From 202374a19ab2ed72a75488dc51461ceab9193c00 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Wed, 9 Nov 2022 20:21:32 +0200 Subject: [PATCH 001/761] Bump version to 8.0.0-preview.1 --- Directory.Build.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Build.props b/Directory.Build.props index 6d7a7869ab..135024f015 100644 --- a/Directory.Build.props +++ b/Directory.Build.props @@ -1,6 +1,6 @@  - 7.0.0 + 8.0.0-preview.1 latest true enable From 14b505b3921624e662c91c57a3d040ffeb549e72 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Thu, 10 Nov 2022 13:29:46 +0200 Subject: [PATCH 002/761] Reset public API snapshots (#4749) --- src/Npgsql.GeoJSON/PublicAPI.Shipped.txt | 2 +- src/Npgsql.Json.NET/PublicAPI.Shipped.txt | 2 +- .../PublicAPI.Unshipped.txt | 2 +- src/Npgsql/PublicAPI.Shipped.txt | 367 +++++------------- src/Npgsql/PublicAPI.Unshipped.txt | 366 ----------------- 5 files changed, 108 insertions(+), 631 deletions(-) diff --git a/src/Npgsql.GeoJSON/PublicAPI.Shipped.txt b/src/Npgsql.GeoJSON/PublicAPI.Shipped.txt index 13ced34dc9..a5e3b621d4 100644 --- a/src/Npgsql.GeoJSON/PublicAPI.Shipped.txt +++ b/src/Npgsql.GeoJSON/PublicAPI.Shipped.txt @@ -5,4 +5,4 @@ Npgsql.GeoJSONOptions.LongCRS = 4 -> Npgsql.GeoJSONOptions Npgsql.GeoJSONOptions.None = 0 -> Npgsql.GeoJSONOptions Npgsql.GeoJSONOptions.ShortCRS = 2 -> Npgsql.GeoJSONOptions Npgsql.NpgsqlGeoJSONExtensions -static Npgsql.NpgsqlGeoJSONExtensions.UseGeoJson(this Npgsql.TypeMapping.INpgsqlTypeMapper! mapper, Npgsql.GeoJSONOptions options = Npgsql.GeoJSONOptions.None, bool geographyAsDefault = false) -> Npgsql.TypeMapping.INpgsqlTypeMapper! \ No newline at end of file +static Npgsql.NpgsqlGeoJSONExtensions.UseGeoJson(this Npgsql.TypeMapping.INpgsqlTypeMapper! mapper, Npgsql.GeoJSONOptions options = Npgsql.GeoJSONOptions.None, bool geographyAsDefault = false) -> Npgsql.TypeMapping.INpgsqlTypeMapper! diff --git a/src/Npgsql.Json.NET/PublicAPI.Shipped.txt b/src/Npgsql.Json.NET/PublicAPI.Shipped.txt index 48ddf42ce5..dd615d73a6 100644 --- a/src/Npgsql.Json.NET/PublicAPI.Shipped.txt +++ b/src/Npgsql.Json.NET/PublicAPI.Shipped.txt @@ -1,3 +1,3 @@ #nullable enable Npgsql.NpgsqlJsonNetExtensions -static Npgsql.NpgsqlJsonNetExtensions.UseJsonNet(this Npgsql.TypeMapping.INpgsqlTypeMapper! mapper, System.Type![]? jsonbClrTypes = null, System.Type![]? jsonClrTypes = null, Newtonsoft.Json.JsonSerializerSettings? settings = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! \ No newline at end of file +static Npgsql.NpgsqlJsonNetExtensions.UseJsonNet(this Npgsql.TypeMapping.INpgsqlTypeMapper! mapper, System.Type![]? jsonbClrTypes = null, System.Type![]? jsonClrTypes = null, Newtonsoft.Json.JsonSerializerSettings? settings = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! diff --git a/src/Npgsql.NetTopologySuite/PublicAPI.Unshipped.txt b/src/Npgsql.NetTopologySuite/PublicAPI.Unshipped.txt index 5f282702bb..ab058de62d 100644 --- a/src/Npgsql.NetTopologySuite/PublicAPI.Unshipped.txt +++ b/src/Npgsql.NetTopologySuite/PublicAPI.Unshipped.txt @@ -1 +1 @@ - \ No newline at end of file +#nullable enable diff --git a/src/Npgsql/PublicAPI.Shipped.txt b/src/Npgsql/PublicAPI.Shipped.txt index 79818d3afd..6f09e92dd3 100644 --- a/src/Npgsql/PublicAPI.Shipped.txt +++ b/src/Npgsql/PublicAPI.Shipped.txt @@ -1,6 +1,4 @@ #nullable enable -abstract Npgsql.Logging.NpgsqlLogger.IsEnabled(Npgsql.Logging.NpgsqlLogLevel level) -> bool -abstract Npgsql.Logging.NpgsqlLogger.Log(Npgsql.Logging.NpgsqlLogLevel level, int connectorId, string! msg, System.Exception? exception = null) -> void abstract Npgsql.Replication.PgOutput.Messages.UpdateMessage.NewRow.get -> Npgsql.Replication.PgOutput.ReplicationTuple! abstract NpgsqlTypes.NpgsqlTsQuery.Equals(NpgsqlTypes.NpgsqlTsQuery? other) -> bool const Npgsql.NpgsqlConnection.DefaultPort = 5432 -> int @@ -241,18 +239,6 @@ const Npgsql.PostgresErrorCodes.WindowingError = "42P20" -> string! const Npgsql.PostgresErrorCodes.WithCheckOptionViolation = "44000" -> string! const Npgsql.PostgresErrorCodes.WrongObjectType = "42809" -> string! const Npgsql.PostgresErrorCodes.ZeroLengthCharacterString = "2200F" -> string! -const NpgsqlTypes.NpgsqlDate.MaxYear = 5874897 -> int -const NpgsqlTypes.NpgsqlDate.MinYear = -4714 -> int -const NpgsqlTypes.NpgsqlTimeSpan.DaysPerMonth = 30 -> int -const NpgsqlTypes.NpgsqlTimeSpan.HoursPerDay = 24 -> int -const NpgsqlTypes.NpgsqlTimeSpan.MonthsPerYear = 12 -> int -const NpgsqlTypes.NpgsqlTimeSpan.TicksPerDay = 864000000000 -> long -const NpgsqlTypes.NpgsqlTimeSpan.TicksPerHour = 36000000000 -> long -const NpgsqlTypes.NpgsqlTimeSpan.TicksPerMicrosecond = 10 -> long -const NpgsqlTypes.NpgsqlTimeSpan.TicksPerMillsecond = 10000 -> long -const NpgsqlTypes.NpgsqlTimeSpan.TicksPerMinute = 600000000 -> long -const NpgsqlTypes.NpgsqlTimeSpan.TicksPerMonth = 25920000000000 -> long -const NpgsqlTypes.NpgsqlTimeSpan.TicksPerSecond = 10000000 -> long Npgsql.ArrayNullabilityMode Npgsql.ArrayNullabilityMode.Always = 1 -> Npgsql.ArrayNullabilityMode Npgsql.ArrayNullabilityMode.Never = 0 -> Npgsql.ArrayNullabilityMode @@ -265,21 +251,6 @@ Npgsql.BackendMessages.FieldDescription.TypeSize.set -> void Npgsql.INpgsqlNameTranslator Npgsql.INpgsqlNameTranslator.TranslateMemberName(string! clrName) -> string! Npgsql.INpgsqlNameTranslator.TranslateTypeName(string! clrName) -> string! -Npgsql.Logging.ConsoleLoggingProvider -Npgsql.Logging.ConsoleLoggingProvider.ConsoleLoggingProvider(Npgsql.Logging.NpgsqlLogLevel minLevel = Npgsql.Logging.NpgsqlLogLevel.Info, bool printLevel = false, bool printConnectorId = false) -> void -Npgsql.Logging.ConsoleLoggingProvider.CreateLogger(string! name) -> Npgsql.Logging.NpgsqlLogger! -Npgsql.Logging.INpgsqlLoggingProvider -Npgsql.Logging.INpgsqlLoggingProvider.CreateLogger(string! name) -> Npgsql.Logging.NpgsqlLogger! -Npgsql.Logging.NpgsqlLogger -Npgsql.Logging.NpgsqlLogger.NpgsqlLogger() -> void -Npgsql.Logging.NpgsqlLogLevel -Npgsql.Logging.NpgsqlLogLevel.Debug = 2 -> Npgsql.Logging.NpgsqlLogLevel -Npgsql.Logging.NpgsqlLogLevel.Error = 5 -> Npgsql.Logging.NpgsqlLogLevel -Npgsql.Logging.NpgsqlLogLevel.Fatal = 6 -> Npgsql.Logging.NpgsqlLogLevel -Npgsql.Logging.NpgsqlLogLevel.Info = 3 -> Npgsql.Logging.NpgsqlLogLevel -Npgsql.Logging.NpgsqlLogLevel.Trace = 1 -> Npgsql.Logging.NpgsqlLogLevel -Npgsql.Logging.NpgsqlLogLevel.Warn = 4 -> Npgsql.Logging.NpgsqlLogLevel -Npgsql.Logging.NpgsqlLogManager Npgsql.NameTranslation.NpgsqlNullNameTranslator Npgsql.NameTranslation.NpgsqlNullNameTranslator.NpgsqlNullNameTranslator() -> void Npgsql.NameTranslation.NpgsqlNullNameTranslator.TranslateMemberName(string! clrName) -> string! @@ -295,6 +266,8 @@ Npgsql.NpgsqlBatch Npgsql.NpgsqlBatch.BatchCommands.get -> Npgsql.NpgsqlBatchCommandCollection! Npgsql.NpgsqlBatch.Connection.get -> Npgsql.NpgsqlConnection? Npgsql.NpgsqlBatch.Connection.set -> void +Npgsql.NpgsqlBatch.EnableErrorBarriers.get -> bool +Npgsql.NpgsqlBatch.EnableErrorBarriers.set -> void Npgsql.NpgsqlBatch.ExecuteReader(System.Data.CommandBehavior behavior = System.Data.CommandBehavior.Default) -> Npgsql.NpgsqlDataReader! Npgsql.NpgsqlBatch.ExecuteReaderAsync(System.Data.CommandBehavior behavior, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! Npgsql.NpgsqlBatch.ExecuteReaderAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! @@ -302,6 +275,8 @@ Npgsql.NpgsqlBatch.NpgsqlBatch(Npgsql.NpgsqlConnection? connection = null, Npgsq Npgsql.NpgsqlBatch.Transaction.get -> Npgsql.NpgsqlTransaction? Npgsql.NpgsqlBatch.Transaction.set -> void Npgsql.NpgsqlBatchCommand +Npgsql.NpgsqlBatchCommand.AppendErrorBarrier.get -> bool? +Npgsql.NpgsqlBatchCommand.AppendErrorBarrier.set -> void Npgsql.NpgsqlBatchCommand.NpgsqlBatchCommand() -> void Npgsql.NpgsqlBatchCommand.NpgsqlBatchCommand(string! commandText) -> void Npgsql.NpgsqlBatchCommand.OID.get -> uint @@ -355,7 +330,6 @@ Npgsql.NpgsqlBinaryImporter.WriteRowAsync(System.Threading.CancellationToken can Npgsql.NpgsqlCommand Npgsql.NpgsqlCommand.AllResultTypesAreUnknown.get -> bool Npgsql.NpgsqlCommand.AllResultTypesAreUnknown.set -> void -Npgsql.NpgsqlCommand.Clone() -> Npgsql.NpgsqlCommand! Npgsql.NpgsqlCommand.Connection.get -> Npgsql.NpgsqlConnection? Npgsql.NpgsqlCommand.Connection.set -> void Npgsql.NpgsqlCommand.CreateParameter() -> Npgsql.NpgsqlParameter! @@ -409,16 +383,10 @@ Npgsql.NpgsqlConnection.FullState.get -> System.Data.ConnectionState Npgsql.NpgsqlConnection.HasIntegerDateTimes.get -> bool Npgsql.NpgsqlConnection.Host.get -> string? Npgsql.NpgsqlConnection.IntegratedSecurity.get -> bool -Npgsql.NpgsqlConnection.MapComposite(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> void -Npgsql.NpgsqlConnection.MapEnum(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> void Npgsql.NpgsqlConnection.Notice -> Npgsql.NoticeEventHandler? Npgsql.NpgsqlConnection.Notification -> Npgsql.NotificationEventHandler? Npgsql.NpgsqlConnection.NpgsqlConnection() -> void Npgsql.NpgsqlConnection.NpgsqlConnection(string? connectionString) -> void -Npgsql.NpgsqlConnection.PhysicalOpenAsyncCallback.get -> Npgsql.PhysicalOpenAsyncCallback? -Npgsql.NpgsqlConnection.PhysicalOpenAsyncCallback.set -> void -Npgsql.NpgsqlConnection.PhysicalOpenCallback.get -> Npgsql.PhysicalOpenCallback? -Npgsql.NpgsqlConnection.PhysicalOpenCallback.set -> void Npgsql.NpgsqlConnection.Port.get -> int Npgsql.NpgsqlConnection.PostgresParameters.get -> System.Collections.Generic.IReadOnlyDictionary! Npgsql.NpgsqlConnection.PostgreSqlVersion.get -> System.Version! @@ -428,7 +396,7 @@ Npgsql.NpgsqlConnection.ProvideClientCertificatesCallback.set -> void Npgsql.NpgsqlConnection.ProvidePasswordCallback.get -> Npgsql.ProvidePasswordCallback? Npgsql.NpgsqlConnection.ProvidePasswordCallback.set -> void Npgsql.NpgsqlConnection.ReloadTypes() -> void -Npgsql.NpgsqlConnection.Settings.get -> Npgsql.NpgsqlConnectionStringBuilder! +Npgsql.NpgsqlConnection.ReloadTypesAsync() -> System.Threading.Tasks.Task! Npgsql.NpgsqlConnection.Timezone.get -> string! Npgsql.NpgsqlConnection.TypeMapper.get -> Npgsql.TypeMapping.INpgsqlTypeMapper! Npgsql.NpgsqlConnection.UnprepareAll() -> void @@ -586,10 +554,6 @@ Npgsql.NpgsqlConnectionStringBuilder.WriteBufferSize.get -> int Npgsql.NpgsqlConnectionStringBuilder.WriteBufferSize.set -> void Npgsql.NpgsqlConnectionStringBuilder.WriteCoalescingBufferThresholdBytes.get -> int Npgsql.NpgsqlConnectionStringBuilder.WriteCoalescingBufferThresholdBytes.set -> void -Npgsql.NpgsqlConnectionStringPropertyAttribute -Npgsql.NpgsqlConnectionStringPropertyAttribute.NpgsqlConnectionStringPropertyAttribute() -> void -Npgsql.NpgsqlConnectionStringPropertyAttribute.NpgsqlConnectionStringPropertyAttribute(params string![]! synonyms) -> void -Npgsql.NpgsqlConnectionStringPropertyAttribute.Synonyms.get -> string![]! Npgsql.NpgsqlCopyTextReader Npgsql.NpgsqlCopyTextReader.Cancel() -> void Npgsql.NpgsqlCopyTextReader.CancelAsync() -> System.Threading.Tasks.Task! @@ -617,17 +581,44 @@ Npgsql.NpgsqlDataReader.GetColumnSchema() -> System.Collections.ObjectModel.Read Npgsql.NpgsqlDataReader.GetColumnSchemaAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task!>! Npgsql.NpgsqlDataReader.GetData(int ordinal) -> Npgsql.NpgsqlNestedDataReader! Npgsql.NpgsqlDataReader.GetDataTypeOID(int ordinal) -> uint -Npgsql.NpgsqlDataReader.GetDate(int ordinal) -> NpgsqlTypes.NpgsqlDate -Npgsql.NpgsqlDataReader.GetInterval(int ordinal) -> NpgsqlTypes.NpgsqlTimeSpan Npgsql.NpgsqlDataReader.GetPostgresType(int ordinal) -> Npgsql.PostgresTypes.PostgresType! Npgsql.NpgsqlDataReader.GetStreamAsync(int ordinal, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! Npgsql.NpgsqlDataReader.GetTextReaderAsync(int ordinal, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! Npgsql.NpgsqlDataReader.GetTimeSpan(int ordinal) -> System.TimeSpan -Npgsql.NpgsqlDataReader.GetTimeStamp(int ordinal) -> NpgsqlTypes.NpgsqlDateTime Npgsql.NpgsqlDataReader.IsOnRow.get -> bool Npgsql.NpgsqlDataReader.ReaderClosed -> System.EventHandler? Npgsql.NpgsqlDataReader.Rows.get -> ulong Npgsql.NpgsqlDataReader.Statements.get -> System.Collections.Generic.IReadOnlyList! +Npgsql.NpgsqlDataSource +Npgsql.NpgsqlDataSource.CreateBatch() -> Npgsql.NpgsqlBatch! +Npgsql.NpgsqlDataSource.CreateCommand(string? commandText = null) -> Npgsql.NpgsqlCommand! +Npgsql.NpgsqlDataSource.CreateConnection() -> Npgsql.NpgsqlConnection! +Npgsql.NpgsqlDataSource.OpenConnection() -> Npgsql.NpgsqlConnection! +Npgsql.NpgsqlDataSource.OpenConnectionAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.ValueTask +Npgsql.NpgsqlDataSource.Password.set -> void +Npgsql.NpgsqlDataSourceBuilder +Npgsql.NpgsqlDataSourceBuilder.AddTypeResolverFactory(Npgsql.Internal.TypeHandling.TypeHandlerResolverFactory! resolverFactory) -> void +Npgsql.NpgsqlDataSourceBuilder.Build() -> Npgsql.NpgsqlDataSource! +Npgsql.NpgsqlDataSourceBuilder.BuildMultiHost() -> Npgsql.NpgsqlMultiHostDataSource! +Npgsql.NpgsqlDataSourceBuilder.ConnectionString.get -> string! +Npgsql.NpgsqlDataSourceBuilder.ConnectionStringBuilder.get -> Npgsql.NpgsqlConnectionStringBuilder! +Npgsql.NpgsqlDataSourceBuilder.DefaultNameTranslator.get -> Npgsql.INpgsqlNameTranslator! +Npgsql.NpgsqlDataSourceBuilder.DefaultNameTranslator.set -> void +Npgsql.NpgsqlDataSourceBuilder.EnableParameterLogging(bool parameterLoggingEnabled = true) -> Npgsql.NpgsqlDataSourceBuilder! +Npgsql.NpgsqlDataSourceBuilder.MapComposite(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! +Npgsql.NpgsqlDataSourceBuilder.MapComposite(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! +Npgsql.NpgsqlDataSourceBuilder.MapEnum(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! +Npgsql.NpgsqlDataSourceBuilder.NpgsqlDataSourceBuilder(string? connectionString = null) -> void +Npgsql.NpgsqlDataSourceBuilder.UnmapComposite(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> bool +Npgsql.NpgsqlDataSourceBuilder.UnmapComposite(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> bool +Npgsql.NpgsqlDataSourceBuilder.UnmapEnum(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> bool +Npgsql.NpgsqlDataSourceBuilder.UseClientCertificate(System.Security.Cryptography.X509Certificates.X509Certificate? clientCertificate) -> Npgsql.NpgsqlDataSourceBuilder! +Npgsql.NpgsqlDataSourceBuilder.UseClientCertificates(System.Security.Cryptography.X509Certificates.X509CertificateCollection? clientCertificates) -> Npgsql.NpgsqlDataSourceBuilder! +Npgsql.NpgsqlDataSourceBuilder.UseClientCertificatesCallback(System.Action? clientCertificatesCallback) -> Npgsql.NpgsqlDataSourceBuilder! +Npgsql.NpgsqlDataSourceBuilder.UseLoggerFactory(Microsoft.Extensions.Logging.ILoggerFactory? loggerFactory) -> Npgsql.NpgsqlDataSourceBuilder! +Npgsql.NpgsqlDataSourceBuilder.UsePeriodicPasswordProvider(System.Func>? passwordProvider, System.TimeSpan successRefreshInterval, System.TimeSpan failureRefreshInterval) -> Npgsql.NpgsqlDataSourceBuilder! +Npgsql.NpgsqlDataSourceBuilder.UsePhysicalConnectionInitializer(System.Action? connectionInitializer, System.Func? connectionInitializerAsync) -> Npgsql.NpgsqlDataSourceBuilder! +Npgsql.NpgsqlDataSourceBuilder.UseUserCertificateValidationCallback(System.Net.Security.RemoteCertificateValidationCallback! userCertificateValidationCallback) -> Npgsql.NpgsqlDataSourceBuilder! Npgsql.NpgsqlException Npgsql.NpgsqlException.BatchCommand.get -> Npgsql.NpgsqlBatchCommand? Npgsql.NpgsqlException.BatchCommand.set -> void @@ -659,6 +650,13 @@ Npgsql.NpgsqlLargeObjectStream.GetLengthAsync(System.Threading.CancellationToken Npgsql.NpgsqlLargeObjectStream.Has64BitSupport.get -> bool Npgsql.NpgsqlLargeObjectStream.SeekAsync(long offset, System.IO.SeekOrigin origin, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! Npgsql.NpgsqlLargeObjectStream.SetLength(long value, System.Threading.CancellationToken cancellationToken) -> System.Threading.Tasks.Task! +Npgsql.NpgsqlLoggingConfiguration +Npgsql.NpgsqlMultiHostDataSource +Npgsql.NpgsqlMultiHostDataSource.ClearDatabaseStates() -> void +Npgsql.NpgsqlMultiHostDataSource.CreateConnection(Npgsql.TargetSessionAttributes targetSessionAttributes) -> Npgsql.NpgsqlConnection! +Npgsql.NpgsqlMultiHostDataSource.OpenConnection(Npgsql.TargetSessionAttributes targetSessionAttributes) -> Npgsql.NpgsqlConnection! +Npgsql.NpgsqlMultiHostDataSource.OpenConnectionAsync(Npgsql.TargetSessionAttributes targetSessionAttributes, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.ValueTask +Npgsql.NpgsqlMultiHostDataSource.WithTargetSession(Npgsql.TargetSessionAttributes targetSessionAttributes) -> Npgsql.NpgsqlDataSource! Npgsql.NpgsqlNestedDataReader Npgsql.NpgsqlNestedDataReader.GetData(int ordinal) -> Npgsql.NpgsqlNestedDataReader! Npgsql.NpgsqlNoticeEventArgs @@ -742,8 +740,6 @@ Npgsql.NpgsqlTracingOptions Npgsql.NpgsqlTracingOptions.NpgsqlTracingOptions() -> void Npgsql.NpgsqlTransaction Npgsql.NpgsqlTransaction.Connection.get -> Npgsql.NpgsqlConnection? -Npgsql.PhysicalOpenAsyncCallback -Npgsql.PhysicalOpenCallback Npgsql.PostgresErrorCodes Npgsql.PostgresException Npgsql.PostgresException.Code.get -> string! @@ -859,6 +855,7 @@ Npgsql.Replication.LogicalSlotSnapshotInitMode.Use = 1 -> Npgsql.Replication.Log Npgsql.Replication.PgOutput.Messages.BeginMessage Npgsql.Replication.PgOutput.Messages.BeginMessage.TransactionCommitTimestamp.get -> System.DateTime Npgsql.Replication.PgOutput.Messages.BeginMessage.TransactionFinalLsn.get -> NpgsqlTypes.NpgsqlLogSequenceNumber +Npgsql.Replication.PgOutput.Messages.BeginPrepareMessage Npgsql.Replication.PgOutput.Messages.CommitMessage Npgsql.Replication.PgOutput.Messages.CommitMessage.CommitFlags Npgsql.Replication.PgOutput.Messages.CommitMessage.CommitFlags.None = 0 -> Npgsql.Replication.PgOutput.Messages.CommitMessage.CommitFlags @@ -866,6 +863,13 @@ Npgsql.Replication.PgOutput.Messages.CommitMessage.CommitLsn.get -> NpgsqlTypes. Npgsql.Replication.PgOutput.Messages.CommitMessage.Flags.get -> Npgsql.Replication.PgOutput.Messages.CommitMessage.CommitFlags Npgsql.Replication.PgOutput.Messages.CommitMessage.TransactionCommitTimestamp.get -> System.DateTime Npgsql.Replication.PgOutput.Messages.CommitMessage.TransactionEndLsn.get -> NpgsqlTypes.NpgsqlLogSequenceNumber +Npgsql.Replication.PgOutput.Messages.CommitPreparedMessage +Npgsql.Replication.PgOutput.Messages.CommitPreparedMessage.CommitPreparedEndLsn.get -> NpgsqlTypes.NpgsqlLogSequenceNumber +Npgsql.Replication.PgOutput.Messages.CommitPreparedMessage.CommitPreparedFlags +Npgsql.Replication.PgOutput.Messages.CommitPreparedMessage.CommitPreparedFlags.None = 0 -> Npgsql.Replication.PgOutput.Messages.CommitPreparedMessage.CommitPreparedFlags +Npgsql.Replication.PgOutput.Messages.CommitPreparedMessage.CommitPreparedLsn.get -> NpgsqlTypes.NpgsqlLogSequenceNumber +Npgsql.Replication.PgOutput.Messages.CommitPreparedMessage.Flags.get -> Npgsql.Replication.PgOutput.Messages.CommitPreparedMessage.CommitPreparedFlags +Npgsql.Replication.PgOutput.Messages.CommitPreparedMessage.TransactionCommitTimestamp.get -> System.DateTime Npgsql.Replication.PgOutput.Messages.DefaultUpdateMessage Npgsql.Replication.PgOutput.Messages.DeleteMessage Npgsql.Replication.PgOutput.Messages.DeleteMessage.Relation.get -> Npgsql.Replication.PgOutput.Messages.RelationMessage! @@ -892,6 +896,16 @@ Npgsql.Replication.PgOutput.Messages.OriginMessage.OriginCommitLsn.get -> Npgsql Npgsql.Replication.PgOutput.Messages.OriginMessage.OriginName.get -> string! Npgsql.Replication.PgOutput.Messages.PgOutputReplicationMessage Npgsql.Replication.PgOutput.Messages.PgOutputReplicationMessage.PgOutputReplicationMessage() -> void +Npgsql.Replication.PgOutput.Messages.PreparedTransactionControlMessage +Npgsql.Replication.PgOutput.Messages.PreparedTransactionControlMessage.TransactionGid.get -> string! +Npgsql.Replication.PgOutput.Messages.PrepareMessage +Npgsql.Replication.PgOutput.Messages.PrepareMessage.Flags.get -> Npgsql.Replication.PgOutput.Messages.PrepareMessage.PrepareFlags +Npgsql.Replication.PgOutput.Messages.PrepareMessage.PrepareFlags +Npgsql.Replication.PgOutput.Messages.PrepareMessage.PrepareFlags.None = 0 -> Npgsql.Replication.PgOutput.Messages.PrepareMessage.PrepareFlags +Npgsql.Replication.PgOutput.Messages.PrepareMessageBase +Npgsql.Replication.PgOutput.Messages.PrepareMessageBase.PrepareEndLsn.get -> NpgsqlTypes.NpgsqlLogSequenceNumber +Npgsql.Replication.PgOutput.Messages.PrepareMessageBase.PrepareLsn.get -> NpgsqlTypes.NpgsqlLogSequenceNumber +Npgsql.Replication.PgOutput.Messages.PrepareMessageBase.TransactionPrepareTimestamp.get -> System.DateTime Npgsql.Replication.PgOutput.Messages.RelationMessage Npgsql.Replication.PgOutput.Messages.RelationMessage.Column Npgsql.Replication.PgOutput.Messages.RelationMessage.Column.Column() -> void @@ -918,6 +932,14 @@ Npgsql.Replication.PgOutput.Messages.RelationMessageColumn.DataTypeId.get -> uin Npgsql.Replication.PgOutput.Messages.RelationMessageColumn.Flags.get -> byte Npgsql.Replication.PgOutput.Messages.RelationMessageColumn.RelationMessageColumn() -> void Npgsql.Replication.PgOutput.Messages.RelationMessageColumn.TypeModifier.get -> int +Npgsql.Replication.PgOutput.Messages.RollbackPreparedMessage +Npgsql.Replication.PgOutput.Messages.RollbackPreparedMessage.Flags.get -> Npgsql.Replication.PgOutput.Messages.RollbackPreparedMessage.RollbackPreparedFlags +Npgsql.Replication.PgOutput.Messages.RollbackPreparedMessage.PreparedTransactionEndLsn.get -> NpgsqlTypes.NpgsqlLogSequenceNumber +Npgsql.Replication.PgOutput.Messages.RollbackPreparedMessage.RollbackPreparedEndLsn.get -> NpgsqlTypes.NpgsqlLogSequenceNumber +Npgsql.Replication.PgOutput.Messages.RollbackPreparedMessage.RollbackPreparedFlags +Npgsql.Replication.PgOutput.Messages.RollbackPreparedMessage.RollbackPreparedFlags.None = 0 -> Npgsql.Replication.PgOutput.Messages.RollbackPreparedMessage.RollbackPreparedFlags +Npgsql.Replication.PgOutput.Messages.RollbackPreparedMessage.TransactionPrepareTimestamp.get -> System.DateTime +Npgsql.Replication.PgOutput.Messages.RollbackPreparedMessage.TransactionRollbackTimestamp.get -> System.DateTime Npgsql.Replication.PgOutput.Messages.StreamAbortMessage Npgsql.Replication.PgOutput.Messages.StreamAbortMessage.SubtransactionXid.get -> uint Npgsql.Replication.PgOutput.Messages.StreamCommitMessage @@ -925,6 +947,10 @@ Npgsql.Replication.PgOutput.Messages.StreamCommitMessage.CommitLsn.get -> Npgsql Npgsql.Replication.PgOutput.Messages.StreamCommitMessage.Flags.get -> byte Npgsql.Replication.PgOutput.Messages.StreamCommitMessage.TransactionCommitTimestamp.get -> System.DateTime Npgsql.Replication.PgOutput.Messages.StreamCommitMessage.TransactionEndLsn.get -> NpgsqlTypes.NpgsqlLogSequenceNumber +Npgsql.Replication.PgOutput.Messages.StreamPrepareMessage +Npgsql.Replication.PgOutput.Messages.StreamPrepareMessage.Flags.get -> Npgsql.Replication.PgOutput.Messages.StreamPrepareMessage.StreamPrepareFlags +Npgsql.Replication.PgOutput.Messages.StreamPrepareMessage.StreamPrepareFlags +Npgsql.Replication.PgOutput.Messages.StreamPrepareMessage.StreamPrepareFlags.None = 0 -> Npgsql.Replication.PgOutput.Messages.StreamPrepareMessage.StreamPrepareFlags Npgsql.Replication.PgOutput.Messages.StreamStartMessage Npgsql.Replication.PgOutput.Messages.StreamStartMessage.StreamSegmentIndicator.get -> byte Npgsql.Replication.PgOutput.Messages.StreamStopMessage @@ -952,11 +978,12 @@ Npgsql.Replication.PgOutput.PgOutputReplicationOptions Npgsql.Replication.PgOutput.PgOutputReplicationOptions.Binary.get -> bool? Npgsql.Replication.PgOutput.PgOutputReplicationOptions.Equals(Npgsql.Replication.PgOutput.PgOutputReplicationOptions? other) -> bool Npgsql.Replication.PgOutput.PgOutputReplicationOptions.Messages.get -> bool? -Npgsql.Replication.PgOutput.PgOutputReplicationOptions.PgOutputReplicationOptions(string! publicationName, ulong protocolVersion, bool? binary = null, bool? streaming = null, bool? messages = null) -> void -Npgsql.Replication.PgOutput.PgOutputReplicationOptions.PgOutputReplicationOptions(System.Collections.Generic.IEnumerable! publicationNames, ulong protocolVersion, bool? binary = null, bool? streaming = null, bool? messages = null) -> void +Npgsql.Replication.PgOutput.PgOutputReplicationOptions.PgOutputReplicationOptions(string! publicationName, ulong protocolVersion, bool? binary = null, bool? streaming = null, bool? messages = null, bool? twoPhase = null) -> void +Npgsql.Replication.PgOutput.PgOutputReplicationOptions.PgOutputReplicationOptions(System.Collections.Generic.IEnumerable! publicationNames, ulong protocolVersion, bool? binary = null, bool? streaming = null, bool? messages = null, bool? twoPhase = null) -> void Npgsql.Replication.PgOutput.PgOutputReplicationOptions.ProtocolVersion.get -> ulong Npgsql.Replication.PgOutput.PgOutputReplicationOptions.PublicationNames.get -> System.Collections.Generic.List! Npgsql.Replication.PgOutput.PgOutputReplicationOptions.Streaming.get -> bool? +Npgsql.Replication.PgOutput.PgOutputReplicationOptions.TwoPhase.get -> bool? Npgsql.Replication.PgOutput.PgOutputReplicationSlot Npgsql.Replication.PgOutput.PgOutputReplicationSlot.PgOutputReplicationSlot(Npgsql.Replication.PgOutput.PgOutputReplicationSlot! slot) -> void Npgsql.Replication.PgOutput.PgOutputReplicationSlot.PgOutputReplicationSlot(Npgsql.Replication.ReplicationSlotOptions options) -> void @@ -985,10 +1012,14 @@ Npgsql.Replication.PhysicalReplicationConnection Npgsql.Replication.PhysicalReplicationConnection.CreateReplicationSlot(string! slotName, bool isTemporary = false, bool reserveWal = false, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! Npgsql.Replication.PhysicalReplicationConnection.PhysicalReplicationConnection() -> void Npgsql.Replication.PhysicalReplicationConnection.PhysicalReplicationConnection(string? connectionString) -> void -Npgsql.Replication.PhysicalReplicationConnection.StartReplication(Npgsql.Replication.PhysicalReplicationSlot? slot, NpgsqlTypes.NpgsqlLogSequenceNumber walLocation, System.Threading.CancellationToken cancellationToken, uint timeline = 0) -> System.Collections.Generic.IAsyncEnumerable! -Npgsql.Replication.PhysicalReplicationConnection.StartReplication(NpgsqlTypes.NpgsqlLogSequenceNumber walLocation, System.Threading.CancellationToken cancellationToken, uint timeline = 0) -> System.Collections.Generic.IAsyncEnumerable! +Npgsql.Replication.PhysicalReplicationConnection.ReadReplicationSlot(string! slotName, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! +Npgsql.Replication.PhysicalReplicationConnection.StartReplication(Npgsql.Replication.PhysicalReplicationSlot! slot, System.Threading.CancellationToken cancellationToken) -> System.Collections.Generic.IAsyncEnumerable! +Npgsql.Replication.PhysicalReplicationConnection.StartReplication(Npgsql.Replication.PhysicalReplicationSlot? slot, NpgsqlTypes.NpgsqlLogSequenceNumber walLocation, System.Threading.CancellationToken cancellationToken, ulong timeline = 0) -> System.Collections.Generic.IAsyncEnumerable! +Npgsql.Replication.PhysicalReplicationConnection.StartReplication(NpgsqlTypes.NpgsqlLogSequenceNumber walLocation, System.Threading.CancellationToken cancellationToken, ulong timeline = 0) -> System.Collections.Generic.IAsyncEnumerable! Npgsql.Replication.PhysicalReplicationSlot -Npgsql.Replication.PhysicalReplicationSlot.PhysicalReplicationSlot(string! slotName) -> void +Npgsql.Replication.PhysicalReplicationSlot.PhysicalReplicationSlot(string! slotName, NpgsqlTypes.NpgsqlLogSequenceNumber? restartLsn = null, ulong? restartTimeline = null) -> void +Npgsql.Replication.PhysicalReplicationSlot.RestartLsn.get -> NpgsqlTypes.NpgsqlLogSequenceNumber? +Npgsql.Replication.PhysicalReplicationSlot.RestartTimeline.get -> ulong? Npgsql.Replication.ReplicationConnection Npgsql.Replication.ReplicationConnection.CommandTimeout.get -> System.TimeSpan Npgsql.Replication.ReplicationConnection.CommandTimeout.set -> void @@ -1087,6 +1118,8 @@ Npgsql.Schema.NpgsqlDbColumn.IsAliased.get -> bool? Npgsql.Schema.NpgsqlDbColumn.IsAliased.set -> void Npgsql.Schema.NpgsqlDbColumn.IsAutoIncrement.get -> bool? Npgsql.Schema.NpgsqlDbColumn.IsAutoIncrement.set -> void +Npgsql.Schema.NpgsqlDbColumn.IsIdentity.get -> bool? +Npgsql.Schema.NpgsqlDbColumn.IsIdentity.set -> void Npgsql.Schema.NpgsqlDbColumn.IsKey.get -> bool? Npgsql.Schema.NpgsqlDbColumn.IsKey.set -> void Npgsql.Schema.NpgsqlDbColumn.IsLong.get -> bool? @@ -1118,6 +1151,7 @@ Npgsql.SslMode.Require = 3 -> Npgsql.SslMode Npgsql.SslMode.VerifyCA = 4 -> Npgsql.SslMode Npgsql.SslMode.VerifyFull = 5 -> Npgsql.SslMode Npgsql.StatementType +Npgsql.StatementType.Call = 11 -> Npgsql.StatementType Npgsql.StatementType.Copy = 8 -> Npgsql.StatementType Npgsql.StatementType.CreateTableAs = 5 -> Npgsql.StatementType Npgsql.StatementType.Delete = 3 -> Npgsql.StatementType @@ -1132,6 +1166,7 @@ Npgsql.StatementType.Update = 4 -> Npgsql.StatementType Npgsql.TypeMapping.INpgsqlTypeMapper Npgsql.TypeMapping.INpgsqlTypeMapper.AddTypeResolverFactory(Npgsql.Internal.TypeHandling.TypeHandlerResolverFactory! resolverFactory) -> void Npgsql.TypeMapping.INpgsqlTypeMapper.DefaultNameTranslator.get -> Npgsql.INpgsqlNameTranslator! +Npgsql.TypeMapping.INpgsqlTypeMapper.DefaultNameTranslator.set -> void Npgsql.TypeMapping.INpgsqlTypeMapper.MapComposite(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! Npgsql.TypeMapping.INpgsqlTypeMapper.MapComposite(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! Npgsql.TypeMapping.INpgsqlTypeMapper.MapEnum(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! @@ -1170,78 +1205,6 @@ NpgsqlTypes.NpgsqlCircle.X.get -> double NpgsqlTypes.NpgsqlCircle.X.set -> void NpgsqlTypes.NpgsqlCircle.Y.get -> double NpgsqlTypes.NpgsqlCircle.Y.set -> void -NpgsqlTypes.NpgsqlDate -NpgsqlTypes.NpgsqlDate.Add(in NpgsqlTypes.NpgsqlTimeSpan interval) -> NpgsqlTypes.NpgsqlDate -NpgsqlTypes.NpgsqlDate.AddDays(int days) -> NpgsqlTypes.NpgsqlDate -NpgsqlTypes.NpgsqlDate.AddMonths(int months) -> NpgsqlTypes.NpgsqlDate -NpgsqlTypes.NpgsqlDate.AddYears(int years) -> NpgsqlTypes.NpgsqlDate -NpgsqlTypes.NpgsqlDate.Compare(NpgsqlTypes.NpgsqlDate x, NpgsqlTypes.NpgsqlDate y) -> int -NpgsqlTypes.NpgsqlDate.Compare(object? x, object? y) -> int -NpgsqlTypes.NpgsqlDate.CompareTo(NpgsqlTypes.NpgsqlDate other) -> int -NpgsqlTypes.NpgsqlDate.CompareTo(object? o) -> int -NpgsqlTypes.NpgsqlDate.Day.get -> int -NpgsqlTypes.NpgsqlDate.DayOfWeek.get -> System.DayOfWeek -NpgsqlTypes.NpgsqlDate.DayOfYear.get -> int -NpgsqlTypes.NpgsqlDate.Equals(NpgsqlTypes.NpgsqlDate other) -> bool -NpgsqlTypes.NpgsqlDate.IsFinite.get -> bool -NpgsqlTypes.NpgsqlDate.IsInfinity.get -> bool -NpgsqlTypes.NpgsqlDate.IsLeapYear.get -> bool -NpgsqlTypes.NpgsqlDate.IsNegativeInfinity.get -> bool -NpgsqlTypes.NpgsqlDate.Month.get -> int -NpgsqlTypes.NpgsqlDate.NpgsqlDate() -> void -NpgsqlTypes.NpgsqlDate.NpgsqlDate(int year, int month, int day) -> void -NpgsqlTypes.NpgsqlDate.NpgsqlDate(NpgsqlTypes.NpgsqlDate copyFrom) -> void -NpgsqlTypes.NpgsqlDate.NpgsqlDate(System.DateOnly date) -> void -NpgsqlTypes.NpgsqlDate.NpgsqlDate(System.DateTime dateTime) -> void -NpgsqlTypes.NpgsqlDate.Subtract(in NpgsqlTypes.NpgsqlTimeSpan interval) -> NpgsqlTypes.NpgsqlDate -NpgsqlTypes.NpgsqlDate.Year.get -> int -NpgsqlTypes.NpgsqlDateTime -NpgsqlTypes.NpgsqlDateTime.Add(in NpgsqlTypes.NpgsqlTimeSpan value) -> NpgsqlTypes.NpgsqlDateTime -NpgsqlTypes.NpgsqlDateTime.Add(System.TimeSpan value) -> NpgsqlTypes.NpgsqlDateTime -NpgsqlTypes.NpgsqlDateTime.AddDays(double value) -> NpgsqlTypes.NpgsqlDateTime -NpgsqlTypes.NpgsqlDateTime.AddHours(double value) -> NpgsqlTypes.NpgsqlDateTime -NpgsqlTypes.NpgsqlDateTime.AddMilliseconds(double value) -> NpgsqlTypes.NpgsqlDateTime -NpgsqlTypes.NpgsqlDateTime.AddMinutes(double value) -> NpgsqlTypes.NpgsqlDateTime -NpgsqlTypes.NpgsqlDateTime.AddMonths(int value) -> NpgsqlTypes.NpgsqlDateTime -NpgsqlTypes.NpgsqlDateTime.AddSeconds(double value) -> NpgsqlTypes.NpgsqlDateTime -NpgsqlTypes.NpgsqlDateTime.AddTicks(long value) -> NpgsqlTypes.NpgsqlDateTime -NpgsqlTypes.NpgsqlDateTime.AddYears(int value) -> NpgsqlTypes.NpgsqlDateTime -NpgsqlTypes.NpgsqlDateTime.Compare(NpgsqlTypes.NpgsqlDateTime x, NpgsqlTypes.NpgsqlDateTime y) -> int -NpgsqlTypes.NpgsqlDateTime.Compare(object? x, object? y) -> int -NpgsqlTypes.NpgsqlDateTime.CompareTo(NpgsqlTypes.NpgsqlDateTime other) -> int -NpgsqlTypes.NpgsqlDateTime.CompareTo(object? o) -> int -NpgsqlTypes.NpgsqlDateTime.Date.get -> NpgsqlTypes.NpgsqlDate -NpgsqlTypes.NpgsqlDateTime.Day.get -> int -NpgsqlTypes.NpgsqlDateTime.DayOfWeek.get -> System.DayOfWeek -NpgsqlTypes.NpgsqlDateTime.DayOfYear.get -> int -NpgsqlTypes.NpgsqlDateTime.Equals(NpgsqlTypes.NpgsqlDateTime other) -> bool -NpgsqlTypes.NpgsqlDateTime.Hour.get -> int -NpgsqlTypes.NpgsqlDateTime.IsFinite.get -> bool -NpgsqlTypes.NpgsqlDateTime.IsInfinity.get -> bool -NpgsqlTypes.NpgsqlDateTime.IsLeapYear.get -> bool -NpgsqlTypes.NpgsqlDateTime.IsNegativeInfinity.get -> bool -NpgsqlTypes.NpgsqlDateTime.Kind.get -> System.DateTimeKind -NpgsqlTypes.NpgsqlDateTime.Millisecond.get -> int -NpgsqlTypes.NpgsqlDateTime.Minute.get -> int -NpgsqlTypes.NpgsqlDateTime.Month.get -> int -NpgsqlTypes.NpgsqlDateTime.Normalize() -> NpgsqlTypes.NpgsqlDateTime -NpgsqlTypes.NpgsqlDateTime.NpgsqlDateTime() -> void -NpgsqlTypes.NpgsqlDateTime.NpgsqlDateTime(int year, int month, int day, int hours, int minutes, int seconds, int milliseconds, System.DateTimeKind kind = System.DateTimeKind.Unspecified) -> void -NpgsqlTypes.NpgsqlDateTime.NpgsqlDateTime(int year, int month, int day, int hours, int minutes, int seconds, System.DateTimeKind kind = System.DateTimeKind.Unspecified) -> void -NpgsqlTypes.NpgsqlDateTime.NpgsqlDateTime(long ticks) -> void -NpgsqlTypes.NpgsqlDateTime.NpgsqlDateTime(long ticks, System.DateTimeKind kind) -> void -NpgsqlTypes.NpgsqlDateTime.NpgsqlDateTime(NpgsqlTypes.NpgsqlDate date) -> void -NpgsqlTypes.NpgsqlDateTime.NpgsqlDateTime(NpgsqlTypes.NpgsqlDate date, System.TimeSpan time, System.DateTimeKind kind = System.DateTimeKind.Unspecified) -> void -NpgsqlTypes.NpgsqlDateTime.NpgsqlDateTime(System.DateTime dateTime) -> void -NpgsqlTypes.NpgsqlDateTime.Second.get -> int -NpgsqlTypes.NpgsqlDateTime.Subtract(in NpgsqlTypes.NpgsqlTimeSpan interval) -> NpgsqlTypes.NpgsqlDateTime -NpgsqlTypes.NpgsqlDateTime.Subtract(NpgsqlTypes.NpgsqlDateTime timestamp) -> NpgsqlTypes.NpgsqlTimeSpan -NpgsqlTypes.NpgsqlDateTime.Ticks.get -> long -NpgsqlTypes.NpgsqlDateTime.Time.get -> System.TimeSpan -NpgsqlTypes.NpgsqlDateTime.ToDateTime() -> System.DateTime -NpgsqlTypes.NpgsqlDateTime.ToLocalTime() -> NpgsqlTypes.NpgsqlDateTime -NpgsqlTypes.NpgsqlDateTime.ToUniversalTime() -> NpgsqlTypes.NpgsqlDateTime -NpgsqlTypes.NpgsqlDateTime.Year.get -> int NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.Abstime = 33 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.Array = -2147483648 -> NpgsqlTypes.NpgsqlDbType @@ -1436,46 +1399,6 @@ NpgsqlTypes.NpgsqlTid.Equals(NpgsqlTypes.NpgsqlTid other) -> bool NpgsqlTypes.NpgsqlTid.NpgsqlTid() -> void NpgsqlTypes.NpgsqlTid.NpgsqlTid(uint blockNumber, ushort offsetNumber) -> void NpgsqlTypes.NpgsqlTid.OffsetNumber.get -> ushort -NpgsqlTypes.NpgsqlTimeSpan -NpgsqlTypes.NpgsqlTimeSpan.Add(in NpgsqlTypes.NpgsqlTimeSpan interval) -> NpgsqlTypes.NpgsqlTimeSpan -NpgsqlTypes.NpgsqlTimeSpan.Canonicalize() -> NpgsqlTypes.NpgsqlTimeSpan -NpgsqlTypes.NpgsqlTimeSpan.CompareTo(NpgsqlTypes.NpgsqlTimeSpan other) -> int -NpgsqlTypes.NpgsqlTimeSpan.CompareTo(object? other) -> int -NpgsqlTypes.NpgsqlTimeSpan.Days.get -> int -NpgsqlTypes.NpgsqlTimeSpan.Duration() -> NpgsqlTypes.NpgsqlTimeSpan -NpgsqlTypes.NpgsqlTimeSpan.Equals(NpgsqlTypes.NpgsqlTimeSpan other) -> bool -NpgsqlTypes.NpgsqlTimeSpan.Hours.get -> int -NpgsqlTypes.NpgsqlTimeSpan.JustifyDays() -> NpgsqlTypes.NpgsqlTimeSpan -NpgsqlTypes.NpgsqlTimeSpan.JustifyInterval() -> NpgsqlTypes.NpgsqlTimeSpan -NpgsqlTypes.NpgsqlTimeSpan.JustifyMonths() -> NpgsqlTypes.NpgsqlTimeSpan -NpgsqlTypes.NpgsqlTimeSpan.Microseconds.get -> int -NpgsqlTypes.NpgsqlTimeSpan.Milliseconds.get -> int -NpgsqlTypes.NpgsqlTimeSpan.Minutes.get -> int -NpgsqlTypes.NpgsqlTimeSpan.Months.get -> int -NpgsqlTypes.NpgsqlTimeSpan.Negate() -> NpgsqlTypes.NpgsqlTimeSpan -NpgsqlTypes.NpgsqlTimeSpan.NpgsqlTimeSpan() -> void -NpgsqlTypes.NpgsqlTimeSpan.NpgsqlTimeSpan(int days, int hours, int minutes, int seconds) -> void -NpgsqlTypes.NpgsqlTimeSpan.NpgsqlTimeSpan(int days, int hours, int minutes, int seconds, int milliseconds) -> void -NpgsqlTypes.NpgsqlTimeSpan.NpgsqlTimeSpan(int months, int days, int hours, int minutes, int seconds, int milliseconds) -> void -NpgsqlTypes.NpgsqlTimeSpan.NpgsqlTimeSpan(int months, int days, long ticks) -> void -NpgsqlTypes.NpgsqlTimeSpan.NpgsqlTimeSpan(int years, int months, int days, int hours, int minutes, int seconds, int milliseconds) -> void -NpgsqlTypes.NpgsqlTimeSpan.NpgsqlTimeSpan(long ticks) -> void -NpgsqlTypes.NpgsqlTimeSpan.NpgsqlTimeSpan(System.TimeSpan timespan) -> void -NpgsqlTypes.NpgsqlTimeSpan.Seconds.get -> int -NpgsqlTypes.NpgsqlTimeSpan.Subtract(in NpgsqlTypes.NpgsqlTimeSpan interval) -> NpgsqlTypes.NpgsqlTimeSpan -NpgsqlTypes.NpgsqlTimeSpan.Ticks.get -> long -NpgsqlTypes.NpgsqlTimeSpan.Time.get -> System.TimeSpan -NpgsqlTypes.NpgsqlTimeSpan.TotalDays.get -> double -NpgsqlTypes.NpgsqlTimeSpan.TotalHours.get -> double -NpgsqlTypes.NpgsqlTimeSpan.TotalMicroseconds.get -> double -NpgsqlTypes.NpgsqlTimeSpan.TotalMilliseconds.get -> double -NpgsqlTypes.NpgsqlTimeSpan.TotalMinutes.get -> double -NpgsqlTypes.NpgsqlTimeSpan.TotalMonths.get -> double -NpgsqlTypes.NpgsqlTimeSpan.TotalSeconds.get -> double -NpgsqlTypes.NpgsqlTimeSpan.TotalTicks.get -> long -NpgsqlTypes.NpgsqlTimeSpan.UnjustifyDays() -> NpgsqlTypes.NpgsqlTimeSpan -NpgsqlTypes.NpgsqlTimeSpan.UnjustifyInterval() -> NpgsqlTypes.NpgsqlTimeSpan -NpgsqlTypes.NpgsqlTimeSpan.UnjustifyMonths() -> NpgsqlTypes.NpgsqlTimeSpan NpgsqlTypes.NpgsqlTsQuery NpgsqlTypes.NpgsqlTsQuery.Kind.get -> NpgsqlTypes.NpgsqlTsQuery.NodeKind NpgsqlTypes.NpgsqlTsQuery.NodeKind @@ -1593,8 +1516,17 @@ override Npgsql.NpgsqlCommand.CommandTimeout.get -> int override Npgsql.NpgsqlCommand.CommandTimeout.set -> void override Npgsql.NpgsqlCommand.CommandType.get -> System.Data.CommandType override Npgsql.NpgsqlCommand.CommandType.set -> void +override Npgsql.NpgsqlCommand.CreateDbParameter() -> System.Data.Common.DbParameter! +override Npgsql.NpgsqlCommand.DbConnection.get -> System.Data.Common.DbConnection? +override Npgsql.NpgsqlCommand.DbConnection.set -> void +override Npgsql.NpgsqlCommand.DbParameterCollection.get -> System.Data.Common.DbParameterCollection! +override Npgsql.NpgsqlCommand.DbTransaction.get -> System.Data.Common.DbTransaction? +override Npgsql.NpgsqlCommand.DbTransaction.set -> void override Npgsql.NpgsqlCommand.DesignTimeVisible.get -> bool override Npgsql.NpgsqlCommand.DesignTimeVisible.set -> void +override Npgsql.NpgsqlCommand.Dispose(bool disposing) -> void +override Npgsql.NpgsqlCommand.ExecuteDbDataReader(System.Data.CommandBehavior behavior) -> System.Data.Common.DbDataReader! +override Npgsql.NpgsqlCommand.ExecuteDbDataReaderAsync(System.Data.CommandBehavior behavior, System.Threading.CancellationToken cancellationToken) -> System.Threading.Tasks.Task! override Npgsql.NpgsqlCommand.ExecuteNonQuery() -> int override Npgsql.NpgsqlCommand.ExecuteNonQueryAsync(System.Threading.CancellationToken cancellationToken) -> System.Threading.Tasks.Task! override Npgsql.NpgsqlCommand.ExecuteScalar() -> object? @@ -1684,6 +1616,7 @@ override Npgsql.NpgsqlDataReader.ReadAsync(System.Threading.CancellationToken ca override Npgsql.NpgsqlDataReader.RecordsAffected.get -> int override Npgsql.NpgsqlDataReader.this[int ordinal].get -> object! override Npgsql.NpgsqlDataReader.this[string! name].get -> object! +override Npgsql.NpgsqlDataSource.ConnectionString.get -> string! override Npgsql.NpgsqlException.DbBatchCommand.get -> System.Data.Common.DbBatchCommand? override Npgsql.NpgsqlException.IsTransient.get -> bool override Npgsql.NpgsqlFactory.CanCreateBatch.get -> bool @@ -1696,6 +1629,7 @@ override Npgsql.NpgsqlFactory.CreateCommandBuilder() -> System.Data.Common.DbCom override Npgsql.NpgsqlFactory.CreateConnection() -> System.Data.Common.DbConnection! override Npgsql.NpgsqlFactory.CreateConnectionStringBuilder() -> System.Data.Common.DbConnectionStringBuilder! override Npgsql.NpgsqlFactory.CreateDataAdapter() -> System.Data.Common.DbDataAdapter! +override Npgsql.NpgsqlFactory.CreateDataSource(string! connectionString) -> System.Data.Common.DbDataSource! override Npgsql.NpgsqlFactory.CreateParameter() -> System.Data.Common.DbParameter! override Npgsql.NpgsqlLargeObjectStream.CanRead.get -> bool override Npgsql.NpgsqlLargeObjectStream.CanSeek.get -> bool @@ -1829,12 +1763,6 @@ override NpgsqlTypes.NpgsqlBox.ToString() -> string! override NpgsqlTypes.NpgsqlCircle.Equals(object? obj) -> bool override NpgsqlTypes.NpgsqlCircle.GetHashCode() -> int override NpgsqlTypes.NpgsqlCircle.ToString() -> string! -override NpgsqlTypes.NpgsqlDate.Equals(object? obj) -> bool -override NpgsqlTypes.NpgsqlDate.GetHashCode() -> int -override NpgsqlTypes.NpgsqlDate.ToString() -> string! -override NpgsqlTypes.NpgsqlDateTime.Equals(object? obj) -> bool -override NpgsqlTypes.NpgsqlDateTime.GetHashCode() -> int -override NpgsqlTypes.NpgsqlDateTime.ToString() -> string! override NpgsqlTypes.NpgsqlInet.Equals(object? obj) -> bool override NpgsqlTypes.NpgsqlInet.GetHashCode() -> int override NpgsqlTypes.NpgsqlInet.ToString() -> string! @@ -1868,9 +1796,6 @@ override NpgsqlTypes.NpgsqlRange.ToString() -> string! override NpgsqlTypes.NpgsqlTid.Equals(object? o) -> bool override NpgsqlTypes.NpgsqlTid.GetHashCode() -> int override NpgsqlTypes.NpgsqlTid.ToString() -> string! -override NpgsqlTypes.NpgsqlTimeSpan.Equals(object? obj) -> bool -override NpgsqlTypes.NpgsqlTimeSpan.GetHashCode() -> int -override NpgsqlTypes.NpgsqlTimeSpan.ToString() -> string! override NpgsqlTypes.NpgsqlTsQuery.Equals(object? obj) -> bool override NpgsqlTypes.NpgsqlTsQuery.GetHashCode() -> int override NpgsqlTypes.NpgsqlTsQuery.ToString() -> string! @@ -1911,24 +1836,19 @@ override sealed Npgsql.NpgsqlParameter.SourceColumnNullMapping.get -> bool override sealed Npgsql.NpgsqlParameter.SourceColumnNullMapping.set -> void override sealed Npgsql.NpgsqlParameter.SourceVersion.get -> System.Data.DataRowVersion override sealed Npgsql.NpgsqlParameter.SourceVersion.set -> void -static Npgsql.Logging.NpgsqlLogManager.IsParameterLoggingEnabled.get -> bool -static Npgsql.Logging.NpgsqlLogManager.IsParameterLoggingEnabled.set -> void -static Npgsql.Logging.NpgsqlLogManager.Provider.get -> Npgsql.Logging.INpgsqlLoggingProvider! -static Npgsql.Logging.NpgsqlLogManager.Provider.set -> void static Npgsql.NameTranslation.NpgsqlSnakeCaseNameTranslator.ConvertToSnakeCase(string! name) -> string! static Npgsql.NpgsqlCommandBuilder.DeriveParameters(Npgsql.NpgsqlCommand! command) -> void static Npgsql.NpgsqlConnection.ClearAllPools() -> void static Npgsql.NpgsqlConnection.ClearPool(Npgsql.NpgsqlConnection! connection) -> void static Npgsql.NpgsqlConnection.GlobalTypeMapper.get -> Npgsql.TypeMapping.INpgsqlTypeMapper! -static Npgsql.NpgsqlConnection.MapCompositeGlobally(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> void -static Npgsql.NpgsqlConnection.MapEnumGlobally(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> void -static Npgsql.NpgsqlConnection.UnmapCompositeGlobally(string! pgName, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> void -static Npgsql.NpgsqlConnection.UnmapEnumGlobally(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> void -static Npgsql.Replication.Internal.LogicalReplicationConnectionExtensions.CreateLogicalReplicationSlot(this Npgsql.Replication.LogicalReplicationConnection! connection, string! slotName, string! outputPlugin, bool isTemporary = false, Npgsql.Replication.LogicalSlotSnapshotInitMode? slotSnapshotInitMode = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! +static Npgsql.NpgsqlDataSource.Create(Npgsql.NpgsqlConnectionStringBuilder! connectionStringBuilder) -> Npgsql.NpgsqlDataSource! +static Npgsql.NpgsqlDataSource.Create(string! connectionString) -> Npgsql.NpgsqlDataSource! +static Npgsql.NpgsqlLoggingConfiguration.InitializeLogging(Microsoft.Extensions.Logging.ILoggerFactory! loggerFactory, bool parameterLoggingEnabled = false) -> void +static Npgsql.Replication.Internal.LogicalReplicationConnectionExtensions.CreateLogicalReplicationSlot(this Npgsql.Replication.LogicalReplicationConnection! connection, string! slotName, string! outputPlugin, bool isTemporary = false, Npgsql.Replication.LogicalSlotSnapshotInitMode? slotSnapshotInitMode = null, bool twoPhase = false, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! static Npgsql.Replication.Internal.LogicalReplicationConnectionExtensions.StartLogicalReplication(this Npgsql.Replication.LogicalReplicationConnection! connection, Npgsql.Replication.Internal.LogicalReplicationSlot! slot, System.Threading.CancellationToken cancellationToken, NpgsqlTypes.NpgsqlLogSequenceNumber? walLocation = null, System.Collections.Generic.IEnumerable>? options = null, bool bypassingStream = false) -> System.Collections.Generic.IAsyncEnumerable! -static Npgsql.Replication.PgOutputConnectionExtensions.CreatePgOutputReplicationSlot(this Npgsql.Replication.LogicalReplicationConnection! connection, string! slotName, bool temporarySlot = false, Npgsql.Replication.LogicalSlotSnapshotInitMode? slotSnapshotInitMode = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! +static Npgsql.Replication.PgOutputConnectionExtensions.CreatePgOutputReplicationSlot(this Npgsql.Replication.LogicalReplicationConnection! connection, string! slotName, bool temporarySlot = false, Npgsql.Replication.LogicalSlotSnapshotInitMode? slotSnapshotInitMode = null, bool twoPhase = false, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! static Npgsql.Replication.PgOutputConnectionExtensions.StartReplication(this Npgsql.Replication.LogicalReplicationConnection! connection, Npgsql.Replication.PgOutput.PgOutputReplicationSlot! slot, Npgsql.Replication.PgOutput.PgOutputReplicationOptions! options, System.Threading.CancellationToken cancellationToken, NpgsqlTypes.NpgsqlLogSequenceNumber? walLocation = null) -> System.Collections.Generic.IAsyncEnumerable! -static Npgsql.Replication.TestDecodingConnectionExtensions.CreateTestDecodingReplicationSlot(this Npgsql.Replication.LogicalReplicationConnection! connection, string! slotName, bool temporarySlot = false, Npgsql.Replication.LogicalSlotSnapshotInitMode? slotSnapshotInitMode = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! +static Npgsql.Replication.TestDecodingConnectionExtensions.CreateTestDecodingReplicationSlot(this Npgsql.Replication.LogicalReplicationConnection! connection, string! slotName, bool temporarySlot = false, Npgsql.Replication.LogicalSlotSnapshotInitMode? slotSnapshotInitMode = null, bool twoPhase = false, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! static Npgsql.Replication.TestDecodingConnectionExtensions.StartReplication(this Npgsql.Replication.LogicalReplicationConnection! connection, Npgsql.Replication.TestDecoding.TestDecodingReplicationSlot! slot, System.Threading.CancellationToken cancellationToken, Npgsql.Replication.TestDecoding.TestDecodingOptions? options = null, NpgsqlTypes.NpgsqlLogSequenceNumber? walLocation = null) -> System.Collections.Generic.IAsyncEnumerable! static NpgsqlTypes.NpgsqlBox.operator !=(NpgsqlTypes.NpgsqlBox x, NpgsqlTypes.NpgsqlBox y) -> bool static NpgsqlTypes.NpgsqlBox.operator ==(NpgsqlTypes.NpgsqlBox x, NpgsqlTypes.NpgsqlBox y) -> bool @@ -1936,45 +1856,6 @@ static NpgsqlTypes.NpgsqlBox.Parse(string! s) -> NpgsqlTypes.NpgsqlBox static NpgsqlTypes.NpgsqlCircle.operator !=(NpgsqlTypes.NpgsqlCircle x, NpgsqlTypes.NpgsqlCircle y) -> bool static NpgsqlTypes.NpgsqlCircle.operator ==(NpgsqlTypes.NpgsqlCircle x, NpgsqlTypes.NpgsqlCircle y) -> bool static NpgsqlTypes.NpgsqlCircle.Parse(string! s) -> NpgsqlTypes.NpgsqlCircle -static NpgsqlTypes.NpgsqlDate.explicit operator NpgsqlTypes.NpgsqlDate(System.DateOnly date) -> NpgsqlTypes.NpgsqlDate -static NpgsqlTypes.NpgsqlDate.explicit operator NpgsqlTypes.NpgsqlDate(System.DateTime date) -> NpgsqlTypes.NpgsqlDate -static NpgsqlTypes.NpgsqlDate.explicit operator System.DateOnly(NpgsqlTypes.NpgsqlDate date) -> System.DateOnly -static NpgsqlTypes.NpgsqlDate.explicit operator System.DateTime(NpgsqlTypes.NpgsqlDate date) -> System.DateTime -static NpgsqlTypes.NpgsqlDate.Now.get -> NpgsqlTypes.NpgsqlDate -static NpgsqlTypes.NpgsqlDate.operator !=(NpgsqlTypes.NpgsqlDate x, NpgsqlTypes.NpgsqlDate y) -> bool -static NpgsqlTypes.NpgsqlDate.operator +(NpgsqlTypes.NpgsqlDate date, NpgsqlTypes.NpgsqlTimeSpan interval) -> NpgsqlTypes.NpgsqlDate -static NpgsqlTypes.NpgsqlDate.operator +(NpgsqlTypes.NpgsqlTimeSpan interval, NpgsqlTypes.NpgsqlDate date) -> NpgsqlTypes.NpgsqlDate -static NpgsqlTypes.NpgsqlDate.operator -(NpgsqlTypes.NpgsqlDate date, NpgsqlTypes.NpgsqlTimeSpan interval) -> NpgsqlTypes.NpgsqlDate -static NpgsqlTypes.NpgsqlDate.operator -(NpgsqlTypes.NpgsqlDate dateX, NpgsqlTypes.NpgsqlDate dateY) -> NpgsqlTypes.NpgsqlTimeSpan -static NpgsqlTypes.NpgsqlDate.operator <(NpgsqlTypes.NpgsqlDate x, NpgsqlTypes.NpgsqlDate y) -> bool -static NpgsqlTypes.NpgsqlDate.operator <=(NpgsqlTypes.NpgsqlDate x, NpgsqlTypes.NpgsqlDate y) -> bool -static NpgsqlTypes.NpgsqlDate.operator ==(NpgsqlTypes.NpgsqlDate x, NpgsqlTypes.NpgsqlDate y) -> bool -static NpgsqlTypes.NpgsqlDate.operator >(NpgsqlTypes.NpgsqlDate x, NpgsqlTypes.NpgsqlDate y) -> bool -static NpgsqlTypes.NpgsqlDate.operator >=(NpgsqlTypes.NpgsqlDate x, NpgsqlTypes.NpgsqlDate y) -> bool -static NpgsqlTypes.NpgsqlDate.Parse(string! str) -> NpgsqlTypes.NpgsqlDate -static NpgsqlTypes.NpgsqlDate.ToDateOnly(NpgsqlTypes.NpgsqlDate date) -> System.DateOnly -static NpgsqlTypes.NpgsqlDate.ToDateTime(NpgsqlTypes.NpgsqlDate date) -> System.DateTime -static NpgsqlTypes.NpgsqlDate.Today.get -> NpgsqlTypes.NpgsqlDate -static NpgsqlTypes.NpgsqlDate.Tomorrow.get -> NpgsqlTypes.NpgsqlDate -static NpgsqlTypes.NpgsqlDate.ToNpgsqlDate(System.DateOnly date) -> NpgsqlTypes.NpgsqlDate -static NpgsqlTypes.NpgsqlDate.ToNpgsqlDate(System.DateTime date) -> NpgsqlTypes.NpgsqlDate -static NpgsqlTypes.NpgsqlDate.TryParse(string! str, out NpgsqlTypes.NpgsqlDate date) -> bool -static NpgsqlTypes.NpgsqlDate.Yesterday.get -> NpgsqlTypes.NpgsqlDate -static NpgsqlTypes.NpgsqlDateTime.explicit operator System.DateTime(NpgsqlTypes.NpgsqlDateTime npgsqlDateTime) -> System.DateTime -static NpgsqlTypes.NpgsqlDateTime.implicit operator NpgsqlTypes.NpgsqlDateTime(System.DateTime dateTime) -> NpgsqlTypes.NpgsqlDateTime -static NpgsqlTypes.NpgsqlDateTime.Now.get -> NpgsqlTypes.NpgsqlDateTime -static NpgsqlTypes.NpgsqlDateTime.operator !=(NpgsqlTypes.NpgsqlDateTime x, NpgsqlTypes.NpgsqlDateTime y) -> bool -static NpgsqlTypes.NpgsqlDateTime.operator +(NpgsqlTypes.NpgsqlDateTime timestamp, NpgsqlTypes.NpgsqlTimeSpan interval) -> NpgsqlTypes.NpgsqlDateTime -static NpgsqlTypes.NpgsqlDateTime.operator +(NpgsqlTypes.NpgsqlTimeSpan interval, NpgsqlTypes.NpgsqlDateTime timestamp) -> NpgsqlTypes.NpgsqlDateTime -static NpgsqlTypes.NpgsqlDateTime.operator -(NpgsqlTypes.NpgsqlDateTime timestamp, NpgsqlTypes.NpgsqlTimeSpan interval) -> NpgsqlTypes.NpgsqlDateTime -static NpgsqlTypes.NpgsqlDateTime.operator -(NpgsqlTypes.NpgsqlDateTime x, NpgsqlTypes.NpgsqlDateTime y) -> NpgsqlTypes.NpgsqlTimeSpan -static NpgsqlTypes.NpgsqlDateTime.operator <(NpgsqlTypes.NpgsqlDateTime x, NpgsqlTypes.NpgsqlDateTime y) -> bool -static NpgsqlTypes.NpgsqlDateTime.operator <=(NpgsqlTypes.NpgsqlDateTime x, NpgsqlTypes.NpgsqlDateTime y) -> bool -static NpgsqlTypes.NpgsqlDateTime.operator ==(NpgsqlTypes.NpgsqlDateTime x, NpgsqlTypes.NpgsqlDateTime y) -> bool -static NpgsqlTypes.NpgsqlDateTime.operator >(NpgsqlTypes.NpgsqlDateTime x, NpgsqlTypes.NpgsqlDateTime y) -> bool -static NpgsqlTypes.NpgsqlDateTime.operator >=(NpgsqlTypes.NpgsqlDateTime x, NpgsqlTypes.NpgsqlDateTime y) -> bool -static NpgsqlTypes.NpgsqlDateTime.Parse(string! str) -> NpgsqlTypes.NpgsqlDateTime -static NpgsqlTypes.NpgsqlDateTime.ToNpgsqlDateTime(System.DateTime dateTime) -> NpgsqlTypes.NpgsqlDateTime static NpgsqlTypes.NpgsqlInet.explicit operator System.Net.IPAddress!(NpgsqlTypes.NpgsqlInet inet) -> System.Net.IPAddress! static NpgsqlTypes.NpgsqlInet.implicit operator NpgsqlTypes.NpgsqlInet(System.Net.IPAddress! ip) -> NpgsqlTypes.NpgsqlInet static NpgsqlTypes.NpgsqlInet.operator !=(NpgsqlTypes.NpgsqlInet x, NpgsqlTypes.NpgsqlInet y) -> bool @@ -2019,32 +1900,6 @@ static NpgsqlTypes.NpgsqlRange.Parse(string! value) -> NpgsqlTypes.NpgsqlRang static NpgsqlTypes.NpgsqlRange.RangeTypeConverter.Register() -> void static NpgsqlTypes.NpgsqlTid.operator !=(NpgsqlTypes.NpgsqlTid left, NpgsqlTypes.NpgsqlTid right) -> bool static NpgsqlTypes.NpgsqlTid.operator ==(NpgsqlTypes.NpgsqlTid left, NpgsqlTypes.NpgsqlTid right) -> bool -static NpgsqlTypes.NpgsqlTimeSpan.Compare(NpgsqlTypes.NpgsqlTimeSpan x, NpgsqlTypes.NpgsqlTimeSpan y) -> int -static NpgsqlTypes.NpgsqlTimeSpan.explicit operator System.TimeSpan(NpgsqlTypes.NpgsqlTimeSpan interval) -> System.TimeSpan -static NpgsqlTypes.NpgsqlTimeSpan.FromDays(double days) -> NpgsqlTypes.NpgsqlTimeSpan -static NpgsqlTypes.NpgsqlTimeSpan.FromHours(double hours) -> NpgsqlTypes.NpgsqlTimeSpan -static NpgsqlTypes.NpgsqlTimeSpan.FromMicroseconds(double micro) -> NpgsqlTypes.NpgsqlTimeSpan -static NpgsqlTypes.NpgsqlTimeSpan.FromMilliseconds(double milli) -> NpgsqlTypes.NpgsqlTimeSpan -static NpgsqlTypes.NpgsqlTimeSpan.FromMinutes(double minutes) -> NpgsqlTypes.NpgsqlTimeSpan -static NpgsqlTypes.NpgsqlTimeSpan.FromMonths(double months) -> NpgsqlTypes.NpgsqlTimeSpan -static NpgsqlTypes.NpgsqlTimeSpan.FromSeconds(double seconds) -> NpgsqlTypes.NpgsqlTimeSpan -static NpgsqlTypes.NpgsqlTimeSpan.FromTicks(long ticks) -> NpgsqlTypes.NpgsqlTimeSpan -static NpgsqlTypes.NpgsqlTimeSpan.implicit operator NpgsqlTypes.NpgsqlTimeSpan(System.TimeSpan timespan) -> NpgsqlTypes.NpgsqlTimeSpan -static NpgsqlTypes.NpgsqlTimeSpan.operator !=(NpgsqlTypes.NpgsqlTimeSpan x, NpgsqlTypes.NpgsqlTimeSpan y) -> bool -static NpgsqlTypes.NpgsqlTimeSpan.operator +(NpgsqlTypes.NpgsqlTimeSpan x) -> NpgsqlTypes.NpgsqlTimeSpan -static NpgsqlTypes.NpgsqlTimeSpan.operator +(NpgsqlTypes.NpgsqlTimeSpan x, NpgsqlTypes.NpgsqlTimeSpan y) -> NpgsqlTypes.NpgsqlTimeSpan -static NpgsqlTypes.NpgsqlTimeSpan.operator -(NpgsqlTypes.NpgsqlTimeSpan x) -> NpgsqlTypes.NpgsqlTimeSpan -static NpgsqlTypes.NpgsqlTimeSpan.operator -(NpgsqlTypes.NpgsqlTimeSpan x, NpgsqlTypes.NpgsqlTimeSpan y) -> NpgsqlTypes.NpgsqlTimeSpan -static NpgsqlTypes.NpgsqlTimeSpan.operator <(NpgsqlTypes.NpgsqlTimeSpan x, NpgsqlTypes.NpgsqlTimeSpan y) -> bool -static NpgsqlTypes.NpgsqlTimeSpan.operator <=(NpgsqlTypes.NpgsqlTimeSpan x, NpgsqlTypes.NpgsqlTimeSpan y) -> bool -static NpgsqlTypes.NpgsqlTimeSpan.operator ==(NpgsqlTypes.NpgsqlTimeSpan x, NpgsqlTypes.NpgsqlTimeSpan y) -> bool -static NpgsqlTypes.NpgsqlTimeSpan.operator >(NpgsqlTypes.NpgsqlTimeSpan x, NpgsqlTypes.NpgsqlTimeSpan y) -> bool -static NpgsqlTypes.NpgsqlTimeSpan.operator >=(NpgsqlTypes.NpgsqlTimeSpan x, NpgsqlTypes.NpgsqlTimeSpan y) -> bool -static NpgsqlTypes.NpgsqlTimeSpan.Parse(string! str) -> NpgsqlTypes.NpgsqlTimeSpan -static NpgsqlTypes.NpgsqlTimeSpan.Plus(in NpgsqlTypes.NpgsqlTimeSpan x) -> NpgsqlTypes.NpgsqlTimeSpan -static NpgsqlTypes.NpgsqlTimeSpan.ToNpgsqlTimeSpan(System.TimeSpan timespan) -> NpgsqlTypes.NpgsqlTimeSpan -static NpgsqlTypes.NpgsqlTimeSpan.ToTimeSpan(in NpgsqlTypes.NpgsqlTimeSpan interval) -> System.TimeSpan -static NpgsqlTypes.NpgsqlTimeSpan.TryParse(string! str, out NpgsqlTypes.NpgsqlTimeSpan result) -> bool static NpgsqlTypes.NpgsqlTsQuery.operator !=(NpgsqlTypes.NpgsqlTsQuery? left, NpgsqlTypes.NpgsqlTsQuery? right) -> bool static NpgsqlTypes.NpgsqlTsQuery.operator ==(NpgsqlTypes.NpgsqlTsQuery? left, NpgsqlTypes.NpgsqlTsQuery? right) -> bool static NpgsqlTypes.NpgsqlTsQuery.Parse(string! value) -> NpgsqlTypes.NpgsqlTsQuery! @@ -2054,19 +1909,7 @@ static NpgsqlTypes.NpgsqlTsVector.Lexeme.WordEntryPos.operator !=(NpgsqlTypes.Np static NpgsqlTypes.NpgsqlTsVector.Lexeme.WordEntryPos.operator ==(NpgsqlTypes.NpgsqlTsVector.Lexeme.WordEntryPos left, NpgsqlTypes.NpgsqlTsVector.Lexeme.WordEntryPos right) -> bool static NpgsqlTypes.NpgsqlTsVector.Parse(string! value) -> NpgsqlTypes.NpgsqlTsVector! static readonly Npgsql.NpgsqlFactory.Instance -> Npgsql.NpgsqlFactory! -static readonly NpgsqlTypes.NpgsqlDate.Epoch -> NpgsqlTypes.NpgsqlDate -static readonly NpgsqlTypes.NpgsqlDate.Era -> NpgsqlTypes.NpgsqlDate -static readonly NpgsqlTypes.NpgsqlDate.Infinity -> NpgsqlTypes.NpgsqlDate -static readonly NpgsqlTypes.NpgsqlDate.MaxCalculableValue -> NpgsqlTypes.NpgsqlDate -static readonly NpgsqlTypes.NpgsqlDate.MinCalculableValue -> NpgsqlTypes.NpgsqlDate -static readonly NpgsqlTypes.NpgsqlDate.NegativeInfinity -> NpgsqlTypes.NpgsqlDate -static readonly NpgsqlTypes.NpgsqlDateTime.Epoch -> NpgsqlTypes.NpgsqlDateTime -static readonly NpgsqlTypes.NpgsqlDateTime.Era -> NpgsqlTypes.NpgsqlDateTime -static readonly NpgsqlTypes.NpgsqlDateTime.Infinity -> NpgsqlTypes.NpgsqlDateTime -static readonly NpgsqlTypes.NpgsqlDateTime.NegativeInfinity -> NpgsqlTypes.NpgsqlDateTime static readonly NpgsqlTypes.NpgsqlLogSequenceNumber.Invalid -> NpgsqlTypes.NpgsqlLogSequenceNumber static readonly NpgsqlTypes.NpgsqlRange.Empty -> NpgsqlTypes.NpgsqlRange -static readonly NpgsqlTypes.NpgsqlTimeSpan.MaxValue -> NpgsqlTypes.NpgsqlTimeSpan -static readonly NpgsqlTypes.NpgsqlTimeSpan.MinValue -> NpgsqlTypes.NpgsqlTimeSpan -static readonly NpgsqlTypes.NpgsqlTimeSpan.Zero -> NpgsqlTypes.NpgsqlTimeSpan -virtual Npgsql.Replication.PgOutput.ReplicationTuple.GetAsyncEnumerator(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Collections.Generic.IAsyncEnumerator! \ No newline at end of file +virtual Npgsql.NpgsqlCommand.Clone() -> Npgsql.NpgsqlCommand! +virtual Npgsql.Replication.PgOutput.ReplicationTuple.GetAsyncEnumerator(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Collections.Generic.IAsyncEnumerator! diff --git a/src/Npgsql/PublicAPI.Unshipped.txt b/src/Npgsql/PublicAPI.Unshipped.txt index b032a873c1..ab058de62d 100644 --- a/src/Npgsql/PublicAPI.Unshipped.txt +++ b/src/Npgsql/PublicAPI.Unshipped.txt @@ -1,367 +1 @@ #nullable enable -Npgsql.NpgsqlBatch.EnableErrorBarriers.get -> bool -Npgsql.NpgsqlBatch.EnableErrorBarriers.set -> void -Npgsql.NpgsqlBatchCommand.AppendErrorBarrier.get -> bool? -Npgsql.NpgsqlBatchCommand.AppendErrorBarrier.set -> void -Npgsql.NpgsqlConnection.ReloadTypesAsync() -> System.Threading.Tasks.Task! -Npgsql.NpgsqlDataSource -Npgsql.NpgsqlDataSource.CreateBatch() -> Npgsql.NpgsqlBatch! -Npgsql.NpgsqlDataSource.CreateCommand(string? commandText = null) -> Npgsql.NpgsqlCommand! -Npgsql.NpgsqlDataSource.CreateConnection() -> Npgsql.NpgsqlConnection! -Npgsql.NpgsqlDataSource.OpenConnection() -> Npgsql.NpgsqlConnection! -Npgsql.NpgsqlDataSource.OpenConnectionAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.ValueTask -Npgsql.NpgsqlDataSource.Password.set -> void -Npgsql.NpgsqlDataSourceBuilder -Npgsql.NpgsqlDataSourceBuilder.AddTypeResolverFactory(Npgsql.Internal.TypeHandling.TypeHandlerResolverFactory! resolverFactory) -> void -Npgsql.NpgsqlDataSourceBuilder.Build() -> Npgsql.NpgsqlDataSource! -Npgsql.NpgsqlDataSourceBuilder.BuildMultiHost() -> Npgsql.NpgsqlMultiHostDataSource! -Npgsql.NpgsqlDataSourceBuilder.ConnectionString.get -> string! -Npgsql.NpgsqlDataSourceBuilder.ConnectionStringBuilder.get -> Npgsql.NpgsqlConnectionStringBuilder! -Npgsql.NpgsqlDataSourceBuilder.DefaultNameTranslator.get -> Npgsql.INpgsqlNameTranslator! -Npgsql.NpgsqlDataSourceBuilder.DefaultNameTranslator.set -> void -Npgsql.NpgsqlDataSourceBuilder.EnableParameterLogging(bool parameterLoggingEnabled = true) -> Npgsql.NpgsqlDataSourceBuilder! -Npgsql.NpgsqlDataSourceBuilder.MapComposite(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! -Npgsql.NpgsqlDataSourceBuilder.MapComposite(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! -Npgsql.NpgsqlDataSourceBuilder.MapEnum(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! -Npgsql.NpgsqlDataSourceBuilder.NpgsqlDataSourceBuilder(string? connectionString = null) -> void -Npgsql.NpgsqlDataSourceBuilder.UnmapComposite(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> bool -Npgsql.NpgsqlDataSourceBuilder.UnmapComposite(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> bool -Npgsql.NpgsqlDataSourceBuilder.UnmapEnum(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> bool -Npgsql.NpgsqlDataSourceBuilder.UseClientCertificate(System.Security.Cryptography.X509Certificates.X509Certificate? clientCertificate) -> Npgsql.NpgsqlDataSourceBuilder! -Npgsql.NpgsqlDataSourceBuilder.UseClientCertificates(System.Security.Cryptography.X509Certificates.X509CertificateCollection? clientCertificates) -> Npgsql.NpgsqlDataSourceBuilder! -Npgsql.NpgsqlDataSourceBuilder.UseClientCertificatesCallback(System.Action? clientCertificatesCallback) -> Npgsql.NpgsqlDataSourceBuilder! -Npgsql.NpgsqlDataSourceBuilder.UseLoggerFactory(Microsoft.Extensions.Logging.ILoggerFactory? loggerFactory) -> Npgsql.NpgsqlDataSourceBuilder! -Npgsql.NpgsqlDataSourceBuilder.UsePeriodicPasswordProvider(System.Func>? passwordProvider, System.TimeSpan successRefreshInterval, System.TimeSpan failureRefreshInterval) -> Npgsql.NpgsqlDataSourceBuilder! -Npgsql.NpgsqlDataSourceBuilder.UsePhysicalConnectionInitializer(System.Action? connectionInitializer, System.Func? connectionInitializerAsync) -> Npgsql.NpgsqlDataSourceBuilder! -Npgsql.NpgsqlDataSourceBuilder.UseUserCertificateValidationCallback(System.Net.Security.RemoteCertificateValidationCallback! userCertificateValidationCallback) -> Npgsql.NpgsqlDataSourceBuilder! -Npgsql.NpgsqlLoggingConfiguration -Npgsql.NpgsqlMultiHostDataSource -Npgsql.NpgsqlMultiHostDataSource.ClearDatabaseStates() -> void -Npgsql.NpgsqlMultiHostDataSource.CreateConnection(Npgsql.TargetSessionAttributes targetSessionAttributes) -> Npgsql.NpgsqlConnection! -Npgsql.NpgsqlMultiHostDataSource.OpenConnection(Npgsql.TargetSessionAttributes targetSessionAttributes) -> Npgsql.NpgsqlConnection! -Npgsql.NpgsqlMultiHostDataSource.OpenConnectionAsync(Npgsql.TargetSessionAttributes targetSessionAttributes, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.ValueTask -Npgsql.NpgsqlMultiHostDataSource.WithTargetSession(Npgsql.TargetSessionAttributes targetSessionAttributes) -> Npgsql.NpgsqlDataSource! -Npgsql.Replication.PgOutput.Messages.BeginPrepareMessage -Npgsql.Replication.PgOutput.Messages.CommitPreparedMessage -Npgsql.Replication.PgOutput.Messages.CommitPreparedMessage.CommitPreparedEndLsn.get -> NpgsqlTypes.NpgsqlLogSequenceNumber -Npgsql.Replication.PgOutput.Messages.CommitPreparedMessage.CommitPreparedFlags -Npgsql.Replication.PgOutput.Messages.CommitPreparedMessage.CommitPreparedFlags.None = 0 -> Npgsql.Replication.PgOutput.Messages.CommitPreparedMessage.CommitPreparedFlags -Npgsql.Replication.PgOutput.Messages.CommitPreparedMessage.CommitPreparedLsn.get -> NpgsqlTypes.NpgsqlLogSequenceNumber -Npgsql.Replication.PgOutput.Messages.CommitPreparedMessage.Flags.get -> Npgsql.Replication.PgOutput.Messages.CommitPreparedMessage.CommitPreparedFlags -Npgsql.Replication.PgOutput.Messages.CommitPreparedMessage.TransactionCommitTimestamp.get -> System.DateTime -Npgsql.Replication.PgOutput.Messages.PreparedTransactionControlMessage -Npgsql.Replication.PgOutput.Messages.PreparedTransactionControlMessage.TransactionGid.get -> string! -Npgsql.Replication.PgOutput.Messages.PrepareMessage -Npgsql.Replication.PgOutput.Messages.PrepareMessage.Flags.get -> Npgsql.Replication.PgOutput.Messages.PrepareMessage.PrepareFlags -Npgsql.Replication.PgOutput.Messages.PrepareMessage.PrepareFlags -Npgsql.Replication.PgOutput.Messages.PrepareMessage.PrepareFlags.None = 0 -> Npgsql.Replication.PgOutput.Messages.PrepareMessage.PrepareFlags -Npgsql.Replication.PgOutput.Messages.PrepareMessageBase -Npgsql.Replication.PgOutput.Messages.PrepareMessageBase.PrepareEndLsn.get -> NpgsqlTypes.NpgsqlLogSequenceNumber -Npgsql.Replication.PgOutput.Messages.PrepareMessageBase.PrepareLsn.get -> NpgsqlTypes.NpgsqlLogSequenceNumber -Npgsql.Replication.PgOutput.Messages.PrepareMessageBase.TransactionPrepareTimestamp.get -> System.DateTime -Npgsql.Replication.PgOutput.Messages.RollbackPreparedMessage -Npgsql.Replication.PgOutput.Messages.RollbackPreparedMessage.Flags.get -> Npgsql.Replication.PgOutput.Messages.RollbackPreparedMessage.RollbackPreparedFlags -Npgsql.Replication.PgOutput.Messages.RollbackPreparedMessage.PreparedTransactionEndLsn.get -> NpgsqlTypes.NpgsqlLogSequenceNumber -Npgsql.Replication.PgOutput.Messages.RollbackPreparedMessage.RollbackPreparedEndLsn.get -> NpgsqlTypes.NpgsqlLogSequenceNumber -Npgsql.Replication.PgOutput.Messages.RollbackPreparedMessage.RollbackPreparedFlags -Npgsql.Replication.PgOutput.Messages.RollbackPreparedMessage.RollbackPreparedFlags.None = 0 -> Npgsql.Replication.PgOutput.Messages.RollbackPreparedMessage.RollbackPreparedFlags -Npgsql.Replication.PgOutput.Messages.RollbackPreparedMessage.TransactionPrepareTimestamp.get -> System.DateTime -Npgsql.Replication.PgOutput.Messages.RollbackPreparedMessage.TransactionRollbackTimestamp.get -> System.DateTime -Npgsql.Replication.PgOutput.Messages.StreamPrepareMessage -Npgsql.Replication.PgOutput.Messages.StreamPrepareMessage.Flags.get -> Npgsql.Replication.PgOutput.Messages.StreamPrepareMessage.StreamPrepareFlags -Npgsql.Replication.PgOutput.Messages.StreamPrepareMessage.StreamPrepareFlags -Npgsql.Replication.PgOutput.Messages.StreamPrepareMessage.StreamPrepareFlags.None = 0 -> Npgsql.Replication.PgOutput.Messages.StreamPrepareMessage.StreamPrepareFlags -Npgsql.Replication.PgOutput.PgOutputReplicationOptions.PgOutputReplicationOptions(string! publicationName, ulong protocolVersion, bool? binary = null, bool? streaming = null, bool? messages = null, bool? twoPhase = null) -> void -Npgsql.Replication.PgOutput.PgOutputReplicationOptions.PgOutputReplicationOptions(System.Collections.Generic.IEnumerable! publicationNames, ulong protocolVersion, bool? binary = null, bool? streaming = null, bool? messages = null, bool? twoPhase = null) -> void -Npgsql.Replication.PgOutput.PgOutputReplicationOptions.TwoPhase.get -> bool? -Npgsql.Replication.PhysicalReplicationConnection.ReadReplicationSlot(string! slotName, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! -Npgsql.Replication.PhysicalReplicationConnection.StartReplication(Npgsql.Replication.PhysicalReplicationSlot! slot, System.Threading.CancellationToken cancellationToken) -> System.Collections.Generic.IAsyncEnumerable! -Npgsql.Replication.PhysicalReplicationConnection.StartReplication(Npgsql.Replication.PhysicalReplicationSlot? slot, NpgsqlTypes.NpgsqlLogSequenceNumber walLocation, System.Threading.CancellationToken cancellationToken, ulong timeline = 0) -> System.Collections.Generic.IAsyncEnumerable! -Npgsql.Replication.PhysicalReplicationConnection.StartReplication(NpgsqlTypes.NpgsqlLogSequenceNumber walLocation, System.Threading.CancellationToken cancellationToken, ulong timeline = 0) -> System.Collections.Generic.IAsyncEnumerable! -Npgsql.Replication.PhysicalReplicationSlot.PhysicalReplicationSlot(string! slotName, NpgsqlTypes.NpgsqlLogSequenceNumber? restartLsn = null, ulong? restartTimeline = null) -> void -Npgsql.Replication.PhysicalReplicationSlot.RestartLsn.get -> NpgsqlTypes.NpgsqlLogSequenceNumber? -Npgsql.Replication.PhysicalReplicationSlot.RestartTimeline.get -> ulong? -Npgsql.Schema.NpgsqlDbColumn.IsIdentity.get -> bool? -Npgsql.Schema.NpgsqlDbColumn.IsIdentity.set -> void -Npgsql.StatementType.Call = 11 -> Npgsql.StatementType -Npgsql.TypeMapping.INpgsqlTypeMapper.DefaultNameTranslator.set -> void -override Npgsql.NpgsqlCommand.CreateDbParameter() -> System.Data.Common.DbParameter! -override Npgsql.NpgsqlCommand.DbConnection.get -> System.Data.Common.DbConnection? -override Npgsql.NpgsqlCommand.DbConnection.set -> void -override Npgsql.NpgsqlCommand.DbParameterCollection.get -> System.Data.Common.DbParameterCollection! -override Npgsql.NpgsqlCommand.DbTransaction.get -> System.Data.Common.DbTransaction? -override Npgsql.NpgsqlCommand.DbTransaction.set -> void -override Npgsql.NpgsqlCommand.Dispose(bool disposing) -> void -override Npgsql.NpgsqlCommand.ExecuteDbDataReader(System.Data.CommandBehavior behavior) -> System.Data.Common.DbDataReader! -override Npgsql.NpgsqlCommand.ExecuteDbDataReaderAsync(System.Data.CommandBehavior behavior, System.Threading.CancellationToken cancellationToken) -> System.Threading.Tasks.Task! -override Npgsql.NpgsqlDataSource.ConnectionString.get -> string! -override Npgsql.NpgsqlFactory.CreateDataSource(string! connectionString) -> System.Data.Common.DbDataSource! -static Npgsql.NpgsqlDataSource.Create(Npgsql.NpgsqlConnectionStringBuilder! connectionStringBuilder) -> Npgsql.NpgsqlDataSource! -static Npgsql.NpgsqlDataSource.Create(string! connectionString) -> Npgsql.NpgsqlDataSource! -static Npgsql.NpgsqlLoggingConfiguration.InitializeLogging(Microsoft.Extensions.Logging.ILoggerFactory! loggerFactory, bool parameterLoggingEnabled = false) -> void -static Npgsql.Replication.Internal.LogicalReplicationConnectionExtensions.CreateLogicalReplicationSlot(this Npgsql.Replication.LogicalReplicationConnection! connection, string! slotName, string! outputPlugin, bool isTemporary = false, Npgsql.Replication.LogicalSlotSnapshotInitMode? slotSnapshotInitMode = null, bool twoPhase = false, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! -static Npgsql.Replication.PgOutputConnectionExtensions.CreatePgOutputReplicationSlot(this Npgsql.Replication.LogicalReplicationConnection! connection, string! slotName, bool temporarySlot = false, Npgsql.Replication.LogicalSlotSnapshotInitMode? slotSnapshotInitMode = null, bool twoPhase = false, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! -static Npgsql.Replication.TestDecodingConnectionExtensions.CreateTestDecodingReplicationSlot(this Npgsql.Replication.LogicalReplicationConnection! connection, string! slotName, bool temporarySlot = false, Npgsql.Replication.LogicalSlotSnapshotInitMode? slotSnapshotInitMode = null, bool twoPhase = false, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! -virtual Npgsql.NpgsqlCommand.Clone() -> Npgsql.NpgsqlCommand! - -*REMOVED*Npgsql.NpgsqlConnection.Settings.get -> Npgsql.NpgsqlConnectionStringBuilder! -*REMOVED*abstract Npgsql.Logging.NpgsqlLogger.IsEnabled(Npgsql.Logging.NpgsqlLogLevel level) -> bool -*REMOVED*abstract Npgsql.Logging.NpgsqlLogger.Log(Npgsql.Logging.NpgsqlLogLevel level, int connectorId, string! msg, System.Exception? exception = null) -> void -*REMOVED*const NpgsqlTypes.NpgsqlDate.MaxYear = 5874897 -> int -*REMOVED*const NpgsqlTypes.NpgsqlDate.MinYear = -4714 -> int -*REMOVED*const NpgsqlTypes.NpgsqlTimeSpan.DaysPerMonth = 30 -> int -*REMOVED*const NpgsqlTypes.NpgsqlTimeSpan.HoursPerDay = 24 -> int -*REMOVED*const NpgsqlTypes.NpgsqlTimeSpan.MonthsPerYear = 12 -> int -*REMOVED*const NpgsqlTypes.NpgsqlTimeSpan.TicksPerDay = 864000000000 -> long -*REMOVED*const NpgsqlTypes.NpgsqlTimeSpan.TicksPerHour = 36000000000 -> long -*REMOVED*const NpgsqlTypes.NpgsqlTimeSpan.TicksPerMicrosecond = 10 -> long -*REMOVED*const NpgsqlTypes.NpgsqlTimeSpan.TicksPerMillsecond = 10000 -> long -*REMOVED*const NpgsqlTypes.NpgsqlTimeSpan.TicksPerMinute = 600000000 -> long -*REMOVED*const NpgsqlTypes.NpgsqlTimeSpan.TicksPerMonth = 25920000000000 -> long -*REMOVED*const NpgsqlTypes.NpgsqlTimeSpan.TicksPerSecond = 10000000 -> long -*REMOVED*Npgsql.NpgsqlConnection.MapComposite(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> void -*REMOVED*Npgsql.NpgsqlConnection.MapEnum(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> void -*REMOVED*Npgsql.NpgsqlConnection.PhysicalOpenAsyncCallback.get -> Npgsql.PhysicalOpenAsyncCallback? -*REMOVED*Npgsql.NpgsqlConnection.PhysicalOpenAsyncCallback.set -> void -*REMOVED*Npgsql.NpgsqlConnection.PhysicalOpenCallback.get -> Npgsql.PhysicalOpenCallback? -*REMOVED*Npgsql.NpgsqlConnection.PhysicalOpenCallback.set -> void -*REMOVED*Npgsql.NpgsqlConnectionStringPropertyAttribute -*REMOVED*Npgsql.NpgsqlConnectionStringPropertyAttribute.NpgsqlConnectionStringPropertyAttribute() -> void -*REMOVED*Npgsql.NpgsqlConnectionStringPropertyAttribute.NpgsqlConnectionStringPropertyAttribute(params string![]! synonyms) -> void -*REMOVED*Npgsql.NpgsqlConnectionStringPropertyAttribute.Synonyms.get -> string![]! -*REMOVED*Npgsql.Logging.ConsoleLoggingProvider -*REMOVED*Npgsql.Logging.ConsoleLoggingProvider.ConsoleLoggingProvider(Npgsql.Logging.NpgsqlLogLevel minLevel = Npgsql.Logging.NpgsqlLogLevel.Info, bool printLevel = false, bool printConnectorId = false) -> void -*REMOVED*Npgsql.Logging.ConsoleLoggingProvider.CreateLogger(string! name) -> Npgsql.Logging.NpgsqlLogger! -*REMOVED*Npgsql.Logging.INpgsqlLoggingProvider -*REMOVED*Npgsql.Logging.INpgsqlLoggingProvider.CreateLogger(string! name) -> Npgsql.Logging.NpgsqlLogger! -*REMOVED*Npgsql.Logging.NpgsqlLogger -*REMOVED*Npgsql.Logging.NpgsqlLogger.NpgsqlLogger() -> void -*REMOVED*Npgsql.Logging.NpgsqlLogLevel -*REMOVED*Npgsql.Logging.NpgsqlLogLevel.Debug = 2 -> Npgsql.Logging.NpgsqlLogLevel -*REMOVED*Npgsql.Logging.NpgsqlLogLevel.Error = 5 -> Npgsql.Logging.NpgsqlLogLevel -*REMOVED*Npgsql.Logging.NpgsqlLogLevel.Fatal = 6 -> Npgsql.Logging.NpgsqlLogLevel -*REMOVED*Npgsql.Logging.NpgsqlLogLevel.Info = 3 -> Npgsql.Logging.NpgsqlLogLevel -*REMOVED*Npgsql.Logging.NpgsqlLogLevel.Trace = 1 -> Npgsql.Logging.NpgsqlLogLevel -*REMOVED*Npgsql.Logging.NpgsqlLogLevel.Warn = 4 -> Npgsql.Logging.NpgsqlLogLevel -*REMOVED*Npgsql.Logging.NpgsqlLogManager -*REMOVED*Npgsql.NpgsqlCommand.Clone() -> Npgsql.NpgsqlCommand! -*REMOVED*Npgsql.NpgsqlDataReader.GetDate(int ordinal) -> NpgsqlTypes.NpgsqlDate -*REMOVED*Npgsql.NpgsqlDataReader.GetInterval(int ordinal) -> NpgsqlTypes.NpgsqlTimeSpan -*REMOVED*Npgsql.NpgsqlDataReader.GetTimeStamp(int ordinal) -> NpgsqlTypes.NpgsqlDateTime -*REMOVED*Npgsql.PhysicalOpenAsyncCallback -*REMOVED*Npgsql.PhysicalOpenCallback -*REMOVED*Npgsql.Replication.PhysicalReplicationSlot.PhysicalReplicationSlot(string! slotName) -> void -*REMOVED*Npgsql.Replication.PhysicalReplicationConnection.StartReplication(Npgsql.Replication.PhysicalReplicationSlot? slot, NpgsqlTypes.NpgsqlLogSequenceNumber walLocation, System.Threading.CancellationToken cancellationToken, uint timeline = 0) -> System.Collections.Generic.IAsyncEnumerable! -*REMOVED*Npgsql.Replication.PhysicalReplicationConnection.StartReplication(NpgsqlTypes.NpgsqlLogSequenceNumber walLocation, System.Threading.CancellationToken cancellationToken, uint timeline = 0) -> System.Collections.Generic.IAsyncEnumerable! -*REMOVED*Npgsql.Replication.PgOutput.PgOutputReplicationOptions.PgOutputReplicationOptions(string! publicationName, ulong protocolVersion, bool? binary = null, bool? streaming = null, bool? messages = null) -> void -*REMOVED*Npgsql.Replication.PgOutput.PgOutputReplicationOptions.PgOutputReplicationOptions(System.Collections.Generic.IEnumerable! publicationNames, ulong protocolVersion, bool? binary = null, bool? streaming = null, bool? messages = null) -> void -*REMOVED*NpgsqlTypes.NpgsqlDate -*REMOVED*NpgsqlTypes.NpgsqlDate.Add(in NpgsqlTypes.NpgsqlTimeSpan interval) -> NpgsqlTypes.NpgsqlDate -*REMOVED*NpgsqlTypes.NpgsqlDate.AddDays(int days) -> NpgsqlTypes.NpgsqlDate -*REMOVED*NpgsqlTypes.NpgsqlDate.AddMonths(int months) -> NpgsqlTypes.NpgsqlDate -*REMOVED*NpgsqlTypes.NpgsqlDate.AddYears(int years) -> NpgsqlTypes.NpgsqlDate -*REMOVED*NpgsqlTypes.NpgsqlDate.Compare(NpgsqlTypes.NpgsqlDate x, NpgsqlTypes.NpgsqlDate y) -> int -*REMOVED*NpgsqlTypes.NpgsqlDate.Compare(object? x, object? y) -> int -*REMOVED*NpgsqlTypes.NpgsqlDate.CompareTo(NpgsqlTypes.NpgsqlDate other) -> int -*REMOVED*NpgsqlTypes.NpgsqlDate.CompareTo(object? o) -> int -*REMOVED*NpgsqlTypes.NpgsqlDate.Day.get -> int -*REMOVED*NpgsqlTypes.NpgsqlDate.DayOfWeek.get -> System.DayOfWeek -*REMOVED*NpgsqlTypes.NpgsqlDate.DayOfYear.get -> int -*REMOVED*NpgsqlTypes.NpgsqlDate.Equals(NpgsqlTypes.NpgsqlDate other) -> bool -*REMOVED*NpgsqlTypes.NpgsqlDate.IsFinite.get -> bool -*REMOVED*NpgsqlTypes.NpgsqlDate.IsInfinity.get -> bool -*REMOVED*NpgsqlTypes.NpgsqlDate.IsLeapYear.get -> bool -*REMOVED*NpgsqlTypes.NpgsqlDate.IsNegativeInfinity.get -> bool -*REMOVED*NpgsqlTypes.NpgsqlDate.Month.get -> int -*REMOVED*NpgsqlTypes.NpgsqlDate.NpgsqlDate() -> void -*REMOVED*NpgsqlTypes.NpgsqlDate.NpgsqlDate(int year, int month, int day) -> void -*REMOVED*NpgsqlTypes.NpgsqlDate.NpgsqlDate(NpgsqlTypes.NpgsqlDate copyFrom) -> void -*REMOVED*NpgsqlTypes.NpgsqlDate.NpgsqlDate(System.DateOnly date) -> void -*REMOVED*NpgsqlTypes.NpgsqlDate.NpgsqlDate(System.DateTime dateTime) -> void -*REMOVED*NpgsqlTypes.NpgsqlDate.Subtract(in NpgsqlTypes.NpgsqlTimeSpan interval) -> NpgsqlTypes.NpgsqlDate -*REMOVED*NpgsqlTypes.NpgsqlDate.Year.get -> int -*REMOVED*NpgsqlTypes.NpgsqlDateTime -*REMOVED*NpgsqlTypes.NpgsqlDateTime.Add(in NpgsqlTypes.NpgsqlTimeSpan value) -> NpgsqlTypes.NpgsqlDateTime -*REMOVED*NpgsqlTypes.NpgsqlDateTime.Add(System.TimeSpan value) -> NpgsqlTypes.NpgsqlDateTime -*REMOVED*NpgsqlTypes.NpgsqlDateTime.AddDays(double value) -> NpgsqlTypes.NpgsqlDateTime -*REMOVED*NpgsqlTypes.NpgsqlDateTime.AddHours(double value) -> NpgsqlTypes.NpgsqlDateTime -*REMOVED*NpgsqlTypes.NpgsqlDateTime.AddMilliseconds(double value) -> NpgsqlTypes.NpgsqlDateTime -*REMOVED*NpgsqlTypes.NpgsqlDateTime.AddMinutes(double value) -> NpgsqlTypes.NpgsqlDateTime -*REMOVED*NpgsqlTypes.NpgsqlDateTime.AddMonths(int value) -> NpgsqlTypes.NpgsqlDateTime -*REMOVED*NpgsqlTypes.NpgsqlDateTime.AddSeconds(double value) -> NpgsqlTypes.NpgsqlDateTime -*REMOVED*NpgsqlTypes.NpgsqlDateTime.AddTicks(long value) -> NpgsqlTypes.NpgsqlDateTime -*REMOVED*NpgsqlTypes.NpgsqlDateTime.AddYears(int value) -> NpgsqlTypes.NpgsqlDateTime -*REMOVED*NpgsqlTypes.NpgsqlDateTime.Compare(NpgsqlTypes.NpgsqlDateTime x, NpgsqlTypes.NpgsqlDateTime y) -> int -*REMOVED*NpgsqlTypes.NpgsqlDateTime.Compare(object? x, object? y) -> int -*REMOVED*NpgsqlTypes.NpgsqlDateTime.CompareTo(NpgsqlTypes.NpgsqlDateTime other) -> int -*REMOVED*NpgsqlTypes.NpgsqlDateTime.CompareTo(object? o) -> int -*REMOVED*NpgsqlTypes.NpgsqlDateTime.Date.get -> NpgsqlTypes.NpgsqlDate -*REMOVED*NpgsqlTypes.NpgsqlDateTime.Day.get -> int -*REMOVED*NpgsqlTypes.NpgsqlDateTime.DayOfWeek.get -> System.DayOfWeek -*REMOVED*NpgsqlTypes.NpgsqlDateTime.DayOfYear.get -> int -*REMOVED*NpgsqlTypes.NpgsqlDateTime.Equals(NpgsqlTypes.NpgsqlDateTime other) -> bool -*REMOVED*NpgsqlTypes.NpgsqlDateTime.Hour.get -> int -*REMOVED*NpgsqlTypes.NpgsqlDateTime.IsFinite.get -> bool -*REMOVED*NpgsqlTypes.NpgsqlDateTime.IsInfinity.get -> bool -*REMOVED*NpgsqlTypes.NpgsqlDateTime.IsLeapYear.get -> bool -*REMOVED*NpgsqlTypes.NpgsqlDateTime.IsNegativeInfinity.get -> bool -*REMOVED*NpgsqlTypes.NpgsqlDateTime.Kind.get -> System.DateTimeKind -*REMOVED*NpgsqlTypes.NpgsqlDateTime.Millisecond.get -> int -*REMOVED*NpgsqlTypes.NpgsqlDateTime.Minute.get -> int -*REMOVED*NpgsqlTypes.NpgsqlDateTime.Month.get -> int -*REMOVED*NpgsqlTypes.NpgsqlDateTime.Normalize() -> NpgsqlTypes.NpgsqlDateTime -*REMOVED*NpgsqlTypes.NpgsqlDateTime.NpgsqlDateTime() -> void -*REMOVED*NpgsqlTypes.NpgsqlDateTime.NpgsqlDateTime(int year, int month, int day, int hours, int minutes, int seconds, int milliseconds, System.DateTimeKind kind = System.DateTimeKind.Unspecified) -> void -*REMOVED*NpgsqlTypes.NpgsqlDateTime.NpgsqlDateTime(int year, int month, int day, int hours, int minutes, int seconds, System.DateTimeKind kind = System.DateTimeKind.Unspecified) -> void -*REMOVED*NpgsqlTypes.NpgsqlDateTime.NpgsqlDateTime(long ticks) -> void -*REMOVED*NpgsqlTypes.NpgsqlDateTime.NpgsqlDateTime(long ticks, System.DateTimeKind kind) -> void -*REMOVED*NpgsqlTypes.NpgsqlDateTime.NpgsqlDateTime(NpgsqlTypes.NpgsqlDate date) -> void -*REMOVED*NpgsqlTypes.NpgsqlDateTime.NpgsqlDateTime(NpgsqlTypes.NpgsqlDate date, System.TimeSpan time, System.DateTimeKind kind = System.DateTimeKind.Unspecified) -> void -*REMOVED*NpgsqlTypes.NpgsqlDateTime.NpgsqlDateTime(System.DateTime dateTime) -> void -*REMOVED*NpgsqlTypes.NpgsqlDateTime.Second.get -> int -*REMOVED*NpgsqlTypes.NpgsqlDateTime.Subtract(in NpgsqlTypes.NpgsqlTimeSpan interval) -> NpgsqlTypes.NpgsqlDateTime -*REMOVED*NpgsqlTypes.NpgsqlDateTime.Subtract(NpgsqlTypes.NpgsqlDateTime timestamp) -> NpgsqlTypes.NpgsqlTimeSpan -*REMOVED*NpgsqlTypes.NpgsqlDateTime.Ticks.get -> long -*REMOVED*NpgsqlTypes.NpgsqlDateTime.Time.get -> System.TimeSpan -*REMOVED*NpgsqlTypes.NpgsqlDateTime.ToDateTime() -> System.DateTime -*REMOVED*NpgsqlTypes.NpgsqlDateTime.ToLocalTime() -> NpgsqlTypes.NpgsqlDateTime -*REMOVED*NpgsqlTypes.NpgsqlDateTime.ToUniversalTime() -> NpgsqlTypes.NpgsqlDateTime -*REMOVED*NpgsqlTypes.NpgsqlDateTime.Year.get -> int -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.Add(in NpgsqlTypes.NpgsqlTimeSpan interval) -> NpgsqlTypes.NpgsqlTimeSpan -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.Canonicalize() -> NpgsqlTypes.NpgsqlTimeSpan -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.CompareTo(NpgsqlTypes.NpgsqlTimeSpan other) -> int -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.CompareTo(object? other) -> int -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.Days.get -> int -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.Duration() -> NpgsqlTypes.NpgsqlTimeSpan -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.Equals(NpgsqlTypes.NpgsqlTimeSpan other) -> bool -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.Hours.get -> int -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.JustifyDays() -> NpgsqlTypes.NpgsqlTimeSpan -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.JustifyInterval() -> NpgsqlTypes.NpgsqlTimeSpan -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.JustifyMonths() -> NpgsqlTypes.NpgsqlTimeSpan -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.Microseconds.get -> int -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.Milliseconds.get -> int -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.Minutes.get -> int -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.Months.get -> int -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.Negate() -> NpgsqlTypes.NpgsqlTimeSpan -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.NpgsqlTimeSpan() -> void -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.NpgsqlTimeSpan(int days, int hours, int minutes, int seconds) -> void -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.NpgsqlTimeSpan(int days, int hours, int minutes, int seconds, int milliseconds) -> void -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.NpgsqlTimeSpan(int months, int days, int hours, int minutes, int seconds, int milliseconds) -> void -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.NpgsqlTimeSpan(int months, int days, long ticks) -> void -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.NpgsqlTimeSpan(int years, int months, int days, int hours, int minutes, int seconds, int milliseconds) -> void -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.NpgsqlTimeSpan(long ticks) -> void -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.NpgsqlTimeSpan(System.TimeSpan timespan) -> void -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.Seconds.get -> int -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.Subtract(in NpgsqlTypes.NpgsqlTimeSpan interval) -> NpgsqlTypes.NpgsqlTimeSpan -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.Ticks.get -> long -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.Time.get -> System.TimeSpan -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.TotalDays.get -> double -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.TotalHours.get -> double -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.TotalMicroseconds.get -> double -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.TotalMilliseconds.get -> double -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.TotalMinutes.get -> double -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.TotalMonths.get -> double -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.TotalSeconds.get -> double -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.TotalTicks.get -> long -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.UnjustifyDays() -> NpgsqlTypes.NpgsqlTimeSpan -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.UnjustifyInterval() -> NpgsqlTypes.NpgsqlTimeSpan -*REMOVED*NpgsqlTypes.NpgsqlTimeSpan.UnjustifyMonths() -> NpgsqlTypes.NpgsqlTimeSpan -*REMOVED*override NpgsqlTypes.NpgsqlDate.Equals(object? obj) -> bool -*REMOVED*override NpgsqlTypes.NpgsqlDate.GetHashCode() -> int -*REMOVED*override NpgsqlTypes.NpgsqlDate.ToString() -> string! -*REMOVED*override NpgsqlTypes.NpgsqlDateTime.Equals(object? obj) -> bool -*REMOVED*override NpgsqlTypes.NpgsqlDateTime.GetHashCode() -> int -*REMOVED*override NpgsqlTypes.NpgsqlDateTime.ToString() -> string! -*REMOVED*override NpgsqlTypes.NpgsqlTimeSpan.Equals(object? obj) -> bool -*REMOVED*override NpgsqlTypes.NpgsqlTimeSpan.GetHashCode() -> int -*REMOVED*override NpgsqlTypes.NpgsqlTimeSpan.ToString() -> string! -*REMOVED*static Npgsql.Logging.NpgsqlLogManager.IsParameterLoggingEnabled.get -> bool -*REMOVED*static Npgsql.Logging.NpgsqlLogManager.IsParameterLoggingEnabled.set -> void -*REMOVED*static Npgsql.Logging.NpgsqlLogManager.Provider.get -> Npgsql.Logging.INpgsqlLoggingProvider! -*REMOVED*static Npgsql.Logging.NpgsqlLogManager.Provider.set -> void -*REMOVED*static Npgsql.NpgsqlConnection.MapCompositeGlobally(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> void -*REMOVED*static Npgsql.NpgsqlConnection.MapEnumGlobally(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> void -*REMOVED*static Npgsql.NpgsqlConnection.UnmapCompositeGlobally(string! pgName, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> void -*REMOVED*static Npgsql.NpgsqlConnection.UnmapEnumGlobally(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> void -*REMOVED*static Npgsql.Replication.PgOutputConnectionExtensions.CreatePgOutputReplicationSlot(this Npgsql.Replication.LogicalReplicationConnection! connection, string! slotName, bool temporarySlot = false, Npgsql.Replication.LogicalSlotSnapshotInitMode? slotSnapshotInitMode = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! -*REMOVED*static Npgsql.Replication.TestDecodingConnectionExtensions.CreateTestDecodingReplicationSlot(this Npgsql.Replication.LogicalReplicationConnection! connection, string! slotName, bool temporarySlot = false, Npgsql.Replication.LogicalSlotSnapshotInitMode? slotSnapshotInitMode = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! -*REMOVED*static NpgsqlTypes.NpgsqlDate.explicit operator NpgsqlTypes.NpgsqlDate(System.DateOnly date) -> NpgsqlTypes.NpgsqlDate -*REMOVED*static NpgsqlTypes.NpgsqlDate.explicit operator NpgsqlTypes.NpgsqlDate(System.DateTime date) -> NpgsqlTypes.NpgsqlDate -*REMOVED*static NpgsqlTypes.NpgsqlDate.explicit operator System.DateOnly(NpgsqlTypes.NpgsqlDate date) -> System.DateOnly -*REMOVED*static NpgsqlTypes.NpgsqlDate.explicit operator System.DateTime(NpgsqlTypes.NpgsqlDate date) -> System.DateTime -*REMOVED*static NpgsqlTypes.NpgsqlDate.Now.get -> NpgsqlTypes.NpgsqlDate -*REMOVED*static NpgsqlTypes.NpgsqlDate.operator !=(NpgsqlTypes.NpgsqlDate x, NpgsqlTypes.NpgsqlDate y) -> bool -*REMOVED*static NpgsqlTypes.NpgsqlDate.operator +(NpgsqlTypes.NpgsqlDate date, NpgsqlTypes.NpgsqlTimeSpan interval) -> NpgsqlTypes.NpgsqlDate -*REMOVED*static NpgsqlTypes.NpgsqlDate.operator +(NpgsqlTypes.NpgsqlTimeSpan interval, NpgsqlTypes.NpgsqlDate date) -> NpgsqlTypes.NpgsqlDate -*REMOVED*static NpgsqlTypes.NpgsqlDate.operator -(NpgsqlTypes.NpgsqlDate date, NpgsqlTypes.NpgsqlTimeSpan interval) -> NpgsqlTypes.NpgsqlDate -*REMOVED*static NpgsqlTypes.NpgsqlDate.operator -(NpgsqlTypes.NpgsqlDate dateX, NpgsqlTypes.NpgsqlDate dateY) -> NpgsqlTypes.NpgsqlTimeSpan -*REMOVED*static NpgsqlTypes.NpgsqlDate.operator <(NpgsqlTypes.NpgsqlDate x, NpgsqlTypes.NpgsqlDate y) -> bool -*REMOVED*static NpgsqlTypes.NpgsqlDate.operator <=(NpgsqlTypes.NpgsqlDate x, NpgsqlTypes.NpgsqlDate y) -> bool -*REMOVED*static NpgsqlTypes.NpgsqlDate.operator ==(NpgsqlTypes.NpgsqlDate x, NpgsqlTypes.NpgsqlDate y) -> bool -*REMOVED*static NpgsqlTypes.NpgsqlDate.operator >(NpgsqlTypes.NpgsqlDate x, NpgsqlTypes.NpgsqlDate y) -> bool -*REMOVED*static NpgsqlTypes.NpgsqlDate.operator >=(NpgsqlTypes.NpgsqlDate x, NpgsqlTypes.NpgsqlDate y) -> bool -*REMOVED*static NpgsqlTypes.NpgsqlDate.Parse(string! str) -> NpgsqlTypes.NpgsqlDate -*REMOVED*static NpgsqlTypes.NpgsqlDate.ToDateOnly(NpgsqlTypes.NpgsqlDate date) -> System.DateOnly -*REMOVED*static NpgsqlTypes.NpgsqlDate.ToDateTime(NpgsqlTypes.NpgsqlDate date) -> System.DateTime -*REMOVED*static NpgsqlTypes.NpgsqlDate.Today.get -> NpgsqlTypes.NpgsqlDate -*REMOVED*static NpgsqlTypes.NpgsqlDate.Tomorrow.get -> NpgsqlTypes.NpgsqlDate -*REMOVED*static NpgsqlTypes.NpgsqlDate.ToNpgsqlDate(System.DateOnly date) -> NpgsqlTypes.NpgsqlDate -*REMOVED*static NpgsqlTypes.NpgsqlDate.ToNpgsqlDate(System.DateTime date) -> NpgsqlTypes.NpgsqlDate -*REMOVED*static NpgsqlTypes.NpgsqlDate.TryParse(string! str, out NpgsqlTypes.NpgsqlDate date) -> bool -*REMOVED*static NpgsqlTypes.NpgsqlDate.Yesterday.get -> NpgsqlTypes.NpgsqlDate -*REMOVED*static NpgsqlTypes.NpgsqlDateTime.explicit operator System.DateTime(NpgsqlTypes.NpgsqlDateTime npgsqlDateTime) -> System.DateTime -*REMOVED*static NpgsqlTypes.NpgsqlDateTime.implicit operator NpgsqlTypes.NpgsqlDateTime(System.DateTime dateTime) -> NpgsqlTypes.NpgsqlDateTime -*REMOVED*static NpgsqlTypes.NpgsqlDateTime.Now.get -> NpgsqlTypes.NpgsqlDateTime -*REMOVED*static NpgsqlTypes.NpgsqlDateTime.operator !=(NpgsqlTypes.NpgsqlDateTime x, NpgsqlTypes.NpgsqlDateTime y) -> bool -*REMOVED*static NpgsqlTypes.NpgsqlDateTime.operator +(NpgsqlTypes.NpgsqlDateTime timestamp, NpgsqlTypes.NpgsqlTimeSpan interval) -> NpgsqlTypes.NpgsqlDateTime -*REMOVED*static NpgsqlTypes.NpgsqlDateTime.operator +(NpgsqlTypes.NpgsqlTimeSpan interval, NpgsqlTypes.NpgsqlDateTime timestamp) -> NpgsqlTypes.NpgsqlDateTime -*REMOVED*static NpgsqlTypes.NpgsqlDateTime.operator -(NpgsqlTypes.NpgsqlDateTime timestamp, NpgsqlTypes.NpgsqlTimeSpan interval) -> NpgsqlTypes.NpgsqlDateTime -*REMOVED*static NpgsqlTypes.NpgsqlDateTime.operator -(NpgsqlTypes.NpgsqlDateTime x, NpgsqlTypes.NpgsqlDateTime y) -> NpgsqlTypes.NpgsqlTimeSpan -*REMOVED*static NpgsqlTypes.NpgsqlDateTime.operator <(NpgsqlTypes.NpgsqlDateTime x, NpgsqlTypes.NpgsqlDateTime y) -> bool -*REMOVED*static NpgsqlTypes.NpgsqlDateTime.operator <=(NpgsqlTypes.NpgsqlDateTime x, NpgsqlTypes.NpgsqlDateTime y) -> bool -*REMOVED*static NpgsqlTypes.NpgsqlDateTime.operator ==(NpgsqlTypes.NpgsqlDateTime x, NpgsqlTypes.NpgsqlDateTime y) -> bool -*REMOVED*static NpgsqlTypes.NpgsqlDateTime.operator >(NpgsqlTypes.NpgsqlDateTime x, NpgsqlTypes.NpgsqlDateTime y) -> bool -*REMOVED*static NpgsqlTypes.NpgsqlDateTime.operator >=(NpgsqlTypes.NpgsqlDateTime x, NpgsqlTypes.NpgsqlDateTime y) -> bool -*REMOVED*static NpgsqlTypes.NpgsqlDateTime.Parse(string! str) -> NpgsqlTypes.NpgsqlDateTime -*REMOVED*static NpgsqlTypes.NpgsqlDateTime.ToNpgsqlDateTime(System.DateTime dateTime) -> NpgsqlTypes.NpgsqlDateTime -*REMOVED*static NpgsqlTypes.NpgsqlTimeSpan.Compare(NpgsqlTypes.NpgsqlTimeSpan x, NpgsqlTypes.NpgsqlTimeSpan y) -> int -*REMOVED*static NpgsqlTypes.NpgsqlTimeSpan.explicit operator System.TimeSpan(NpgsqlTypes.NpgsqlTimeSpan interval) -> System.TimeSpan -*REMOVED*static NpgsqlTypes.NpgsqlTimeSpan.FromDays(double days) -> NpgsqlTypes.NpgsqlTimeSpan -*REMOVED*static NpgsqlTypes.NpgsqlTimeSpan.FromHours(double hours) -> NpgsqlTypes.NpgsqlTimeSpan -*REMOVED*static NpgsqlTypes.NpgsqlTimeSpan.FromMicroseconds(double micro) -> NpgsqlTypes.NpgsqlTimeSpan -*REMOVED*static NpgsqlTypes.NpgsqlTimeSpan.FromMilliseconds(double milli) -> NpgsqlTypes.NpgsqlTimeSpan -*REMOVED*static NpgsqlTypes.NpgsqlTimeSpan.FromMinutes(double minutes) -> NpgsqlTypes.NpgsqlTimeSpan -*REMOVED*static NpgsqlTypes.NpgsqlTimeSpan.FromMonths(double months) -> NpgsqlTypes.NpgsqlTimeSpan -*REMOVED*static NpgsqlTypes.NpgsqlTimeSpan.FromSeconds(double seconds) -> NpgsqlTypes.NpgsqlTimeSpan -*REMOVED*static NpgsqlTypes.NpgsqlTimeSpan.FromTicks(long ticks) -> NpgsqlTypes.NpgsqlTimeSpan -*REMOVED*static NpgsqlTypes.NpgsqlTimeSpan.implicit operator NpgsqlTypes.NpgsqlTimeSpan(System.TimeSpan timespan) -> NpgsqlTypes.NpgsqlTimeSpan -*REMOVED*static NpgsqlTypes.NpgsqlTimeSpan.operator !=(NpgsqlTypes.NpgsqlTimeSpan x, NpgsqlTypes.NpgsqlTimeSpan y) -> bool -*REMOVED*static NpgsqlTypes.NpgsqlTimeSpan.operator +(NpgsqlTypes.NpgsqlTimeSpan x) -> NpgsqlTypes.NpgsqlTimeSpan -*REMOVED*static NpgsqlTypes.NpgsqlTimeSpan.operator +(NpgsqlTypes.NpgsqlTimeSpan x, NpgsqlTypes.NpgsqlTimeSpan y) -> NpgsqlTypes.NpgsqlTimeSpan -*REMOVED*static NpgsqlTypes.NpgsqlTimeSpan.operator -(NpgsqlTypes.NpgsqlTimeSpan x) -> NpgsqlTypes.NpgsqlTimeSpan -*REMOVED*static NpgsqlTypes.NpgsqlTimeSpan.operator -(NpgsqlTypes.NpgsqlTimeSpan x, NpgsqlTypes.NpgsqlTimeSpan y) -> NpgsqlTypes.NpgsqlTimeSpan -*REMOVED*static NpgsqlTypes.NpgsqlTimeSpan.operator <(NpgsqlTypes.NpgsqlTimeSpan x, NpgsqlTypes.NpgsqlTimeSpan y) -> bool -*REMOVED*static NpgsqlTypes.NpgsqlTimeSpan.operator <=(NpgsqlTypes.NpgsqlTimeSpan x, NpgsqlTypes.NpgsqlTimeSpan y) -> bool -*REMOVED*static NpgsqlTypes.NpgsqlTimeSpan.operator ==(NpgsqlTypes.NpgsqlTimeSpan x, NpgsqlTypes.NpgsqlTimeSpan y) -> bool -*REMOVED*static NpgsqlTypes.NpgsqlTimeSpan.operator >(NpgsqlTypes.NpgsqlTimeSpan x, NpgsqlTypes.NpgsqlTimeSpan y) -> bool -*REMOVED*static NpgsqlTypes.NpgsqlTimeSpan.operator >=(NpgsqlTypes.NpgsqlTimeSpan x, NpgsqlTypes.NpgsqlTimeSpan y) -> bool -*REMOVED*static NpgsqlTypes.NpgsqlTimeSpan.Parse(string! str) -> NpgsqlTypes.NpgsqlTimeSpan -*REMOVED*static NpgsqlTypes.NpgsqlTimeSpan.Plus(in NpgsqlTypes.NpgsqlTimeSpan x) -> NpgsqlTypes.NpgsqlTimeSpan -*REMOVED*static NpgsqlTypes.NpgsqlTimeSpan.ToNpgsqlTimeSpan(System.TimeSpan timespan) -> NpgsqlTypes.NpgsqlTimeSpan -*REMOVED*static NpgsqlTypes.NpgsqlTimeSpan.ToTimeSpan(in NpgsqlTypes.NpgsqlTimeSpan interval) -> System.TimeSpan -*REMOVED*static NpgsqlTypes.NpgsqlTimeSpan.TryParse(string! str, out NpgsqlTypes.NpgsqlTimeSpan result) -> bool -*REMOVED*static readonly NpgsqlTypes.NpgsqlDate.Epoch -> NpgsqlTypes.NpgsqlDate -*REMOVED*static readonly NpgsqlTypes.NpgsqlDate.Era -> NpgsqlTypes.NpgsqlDate -*REMOVED*static readonly NpgsqlTypes.NpgsqlDate.Infinity -> NpgsqlTypes.NpgsqlDate -*REMOVED*static readonly NpgsqlTypes.NpgsqlDate.MaxCalculableValue -> NpgsqlTypes.NpgsqlDate -*REMOVED*static readonly NpgsqlTypes.NpgsqlDate.MinCalculableValue -> NpgsqlTypes.NpgsqlDate -*REMOVED*static readonly NpgsqlTypes.NpgsqlDate.NegativeInfinity -> NpgsqlTypes.NpgsqlDate -*REMOVED*static readonly NpgsqlTypes.NpgsqlDateTime.Epoch -> NpgsqlTypes.NpgsqlDateTime -*REMOVED*static readonly NpgsqlTypes.NpgsqlDateTime.Era -> NpgsqlTypes.NpgsqlDateTime -*REMOVED*static readonly NpgsqlTypes.NpgsqlDateTime.Infinity -> NpgsqlTypes.NpgsqlDateTime -*REMOVED*static readonly NpgsqlTypes.NpgsqlDateTime.NegativeInfinity -> NpgsqlTypes.NpgsqlDateTime -*REMOVED*static readonly NpgsqlTypes.NpgsqlTimeSpan.MaxValue -> NpgsqlTypes.NpgsqlTimeSpan -*REMOVED*static readonly NpgsqlTypes.NpgsqlTimeSpan.MinValue -> NpgsqlTypes.NpgsqlTimeSpan -*REMOVED*static readonly NpgsqlTypes.NpgsqlTimeSpan.Zero -> NpgsqlTypes.NpgsqlTimeSpan -*REMOVED*static Npgsql.Replication.Internal.LogicalReplicationConnectionExtensions.CreateLogicalReplicationSlot(this Npgsql.Replication.LogicalReplicationConnection! connection, string! slotName, string! outputPlugin, bool isTemporary = false, Npgsql.Replication.LogicalSlotSnapshotInitMode? slotSnapshotInitMode = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! From 55ad6118300071b187bdc5d2840cae59925449de Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Sat, 12 Nov 2022 11:41:12 +0200 Subject: [PATCH 003/761] Fixes to DbDataSource (#4754) Fixes #4752 --- src/Npgsql/NpgsqlDataSource.cs | 25 ++++++++++++------------- src/Npgsql/Shims/DbDataSource.cs | 2 +- test/Npgsql.Tests/DataSourceTests.cs | 17 +++++++++++++++++ 3 files changed, 30 insertions(+), 14 deletions(-) diff --git a/src/Npgsql/NpgsqlDataSource.cs b/src/Npgsql/NpgsqlDataSource.cs index 170c103a6b..754050da02 100644 --- a/src/Npgsql/NpgsqlDataSource.cs +++ b/src/Npgsql/NpgsqlDataSource.cs @@ -120,15 +120,11 @@ internal NpgsqlDataSource( } } - /// - /// Returns a new, unopened connection from this data source. - /// + /// public new NpgsqlConnection CreateConnection() => NpgsqlConnection.FromDataSource(this); - /// - /// Returns a new, opened connection from this data source. - /// + /// public new NpgsqlConnection OpenConnection() { var connection = CreateConnection(); @@ -145,12 +141,11 @@ internal NpgsqlDataSource( } } - /// - /// Returns a new, opened connection from this data source. - /// - /// - /// An optional token to cancel the asynchronous operation. The default value is . - /// + /// + protected override DbConnection OpenDbConnection() + => OpenConnection(); + + /// public new async ValueTask OpenConnectionAsync(CancellationToken cancellationToken = default) { var connection = CreateConnection(); @@ -167,13 +162,17 @@ internal NpgsqlDataSource( } } + /// + protected override async ValueTask OpenDbConnectionAsync(CancellationToken cancellationToken = default) + => await OpenConnectionAsync(cancellationToken); + /// protected override DbConnection CreateDbConnection() => CreateConnection(); /// protected override DbCommand CreateDbCommand(string? commandText = null) - => CreateCommand(); + => CreateCommand(commandText); /// protected override DbBatch CreateDbBatch() diff --git a/src/Npgsql/Shims/DbDataSource.cs b/src/Npgsql/Shims/DbDataSource.cs index fd720bb65b..6951d427fb 100644 --- a/src/Npgsql/Shims/DbDataSource.cs +++ b/src/Npgsql/Shims/DbDataSource.cs @@ -40,7 +40,7 @@ public ValueTask OpenConnectionAsync(CancellationToken cancellatio => OpenDbConnectionAsync(cancellationToken); public DbCommand CreateCommand(string? commandText = null) - => CreateDbCommand(); + => CreateDbCommand(commandText); public DbBatch CreateBatch() => CreateDbBatch(); diff --git a/test/Npgsql.Tests/DataSourceTests.cs b/test/Npgsql.Tests/DataSourceTests.cs index adbed90c0d..10565d4615 100644 --- a/test/Npgsql.Tests/DataSourceTests.cs +++ b/test/Npgsql.Tests/DataSourceTests.cs @@ -1,5 +1,6 @@ using System; using System.Data; +using System.Data.Common; using System.Threading.Tasks; using NUnit.Framework; @@ -243,4 +244,20 @@ public async Task Cannot_get_connection_after_dispose_unpooled([Values] bool asy Assert.That(() => dataSource.OpenConnection(), Throws.Exception.TypeOf()); } } + + [Test] // #4752 + public async Task As_DbDataSource([Values] bool async) + { + await using DbDataSource dataSource = NpgsqlDataSource.Create(ConnectionString); + await using var connection = async + ? await dataSource.OpenConnectionAsync() + : dataSource.OpenConnection(); + Assert.That(connection.State, Is.EqualTo(ConnectionState.Open)); + + await using var command = dataSource.CreateCommand("SELECT 1"); + + Assert.That(async + ? await command.ExecuteScalarAsync() + : command.ExecuteScalar(), Is.EqualTo(1)); + } } From b3262989cb5c6f7974cddedede1028d9143450be Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 15 Nov 2022 22:22:43 +0100 Subject: [PATCH 004/761] Bump Microsoft.CodeAnalysis.CSharp from 4.3.1 to 4.4.0 (#4760) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 1eed6bee0c..be07cad736 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -13,7 +13,7 @@ - + From d0b91b10ae2e31a290486ca19c90580df00c43af Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Thu, 17 Nov 2022 09:32:39 +0100 Subject: [PATCH 005/761] Fix thread safety of NetTopologySuiteHandler (#4767) Fixes #4766 --- .../Internal/NetTopologySuiteHandler.cs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteHandler.cs b/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteHandler.cs index b1cb3783e1..f75be9f4a7 100644 --- a/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteHandler.cs +++ b/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteHandler.cs @@ -22,7 +22,6 @@ partial class NetTopologySuiteHandler : NpgsqlTypeHandler, { readonly PostGisReader _reader; readonly PostGisWriter _writer; - readonly LengthStream _lengthStream = new(); internal NetTopologySuiteHandler(PostgresType postgresType, PostGisReader reader, PostGisWriter writer) : base(postgresType) @@ -91,9 +90,10 @@ int INpgsqlTypeHandler.ValidateAndGetLength(GeometryCollecti int ValidateAndGetLengthCore(Geometry value) { - _lengthStream.SetLength(0); - _writer.Write(value, _lengthStream); - return (int)_lengthStream.Length; + var lengthStream = new LengthStream(); + lengthStream.SetLength(0); + _writer.Write(value, lengthStream); + return (int)lengthStream.Length; } sealed class LengthStream : Stream From 031a36a496744689615a2515d44b94d28da44b82 Mon Sep 17 00:00:00 2001 From: Brar Piening Date: Thu, 17 Nov 2022 16:06:29 +0100 Subject: [PATCH 006/761] Set a valid activity span name in NpgsqlActivitySource.CommandStart (#4765) Fixes #4757 --- src/Npgsql/Internal/NpgsqlConnector.cs | 51 ++++++++++++++++----- src/Npgsql/KerberosUsernameProvider.cs | 61 ++++++++++++++------------ src/Npgsql/NpgsqlActivitySource.cs | 50 ++++++++++++++++++--- src/Npgsql/NpgsqlCommand.cs | 2 +- 4 files changed, 119 insertions(+), 45 deletions(-) diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index cd1ef203bb..ca5d1b6eec 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -96,6 +96,17 @@ public sealed partial class NpgsqlConnector : IDisposable /// internal int BackendProcessId { get; private set; } + string? _inferredUserName; + + /// + /// The user name that has been inferred when the connector was opened + /// + internal string InferredUserName + { + get => _inferredUserName ?? throw new InvalidOperationException($"{nameof(InferredUserName)} cannot be accessed before the connector has been opened."); + private set => _inferredUserName = value; + } + bool SupportsPostgresCancellation => BackendProcessId != 0; /// @@ -682,29 +693,47 @@ void WriteStartupMessage(string username) WriteStartup(startupParams); } - async ValueTask GetUsernameAsync(bool async, CancellationToken cancellationToken) + ValueTask GetUsernameAsync(bool async, CancellationToken cancellationToken) { var username = Settings.Username; if (username?.Length > 0) - return username; + { + InferredUserName = username; + return new(username); + } username = PostgresEnvironment.User; if (username?.Length > 0) - return username; + { + InferredUserName = username; + return new(username); + } + + return GetUsernameAsyncInternal(); - if (!RuntimeInformation.IsOSPlatform(OSPlatform.Windows)) + async ValueTask GetUsernameAsyncInternal() { - username = await KerberosUsernameProvider.GetUsernameAsync(Settings.IncludeRealm, ConnectionLogger, async, cancellationToken); + if (!RuntimeInformation.IsOSPlatform(OSPlatform.Windows)) + { + username = await KerberosUsernameProvider.GetUsernameAsync(Settings.IncludeRealm, ConnectionLogger, async, + cancellationToken); + + if (username?.Length > 0) + { + InferredUserName = username; + return username; + } + } + username = Environment.UserName; if (username?.Length > 0) + { + InferredUserName = username; return username; - } - - username = Environment.UserName; - if (username?.Length > 0) - return username; + } - throw new NpgsqlException("No username could be found, please specify one explicitly"); + throw new NpgsqlException("No username could be found, please specify one explicitly"); + } } async Task RawOpen(SslMode sslMode, NpgsqlTimeout timeout, bool async, CancellationToken cancellationToken, bool isFirstAttempt = true) diff --git a/src/Npgsql/KerberosUsernameProvider.cs b/src/Npgsql/KerberosUsernameProvider.cs index e2342775dd..63cc42fb88 100644 --- a/src/Npgsql/KerberosUsernameProvider.cs +++ b/src/Npgsql/KerberosUsernameProvider.cs @@ -18,19 +18,16 @@ sealed class KerberosUsernameProvider static string? _principalWithRealm; static string? _principalWithoutRealm; -#pragma warning disable CS1998 - internal static async ValueTask GetUsernameAsync(bool includeRealm, ILogger connectionLogger, bool async, CancellationToken cancellationToken) -#pragma warning restore CS1998 + internal static ValueTask GetUsernameAsync(bool includeRealm, ILogger connectionLogger, bool async, CancellationToken cancellationToken) { if (_performedDetection) - return includeRealm ? _principalWithRealm : _principalWithoutRealm; + return new(includeRealm ? _principalWithRealm : _principalWithoutRealm); var klistPath = FindInPath("klist"); if (klistPath == null) { connectionLogger.LogDebug("klist not found in PATH, skipping Kerberos username detection"); - return null; + return new((string?)null); } - var processStartInfo = new ProcessStartInfo { FileName = klistPath, @@ -38,46 +35,54 @@ sealed class KerberosUsernameProvider RedirectStandardError = true, UseShellExecute = false }; + var process = Process.Start(processStartInfo); if (process is null) { connectionLogger.LogDebug("klist process could not be started"); - return null; + return new((string?)null); } + return GetUsernameAsyncInternal(); + +#pragma warning disable CS1998 + async ValueTask GetUsernameAsyncInternal() +#pragma warning restore CS1998 + { #if NET5_0_OR_GREATER - if (async) - await process.WaitForExitAsync(cancellationToken); - else - // ReSharper disable once MethodHasAsyncOverloadWithCancellation - process.WaitForExit(); + if (async) + await process.WaitForExitAsync(cancellationToken); + else + // ReSharper disable once MethodHasAsyncOverloadWithCancellation + process.WaitForExit(); #else // ReSharper disable once MethodHasAsyncOverload process.WaitForExit(); #endif - if (process.ExitCode != 0) - { - connectionLogger.LogDebug($"klist exited with code {process.ExitCode}: {process.StandardError.ReadToEnd()}"); - return null; - } + if (process.ExitCode != 0) + { + connectionLogger.LogDebug($"klist exited with code {process.ExitCode}: {process.StandardError.ReadToEnd()}"); + return null; + } - var line = default(string); - for (var i = 0; i < 2; i++) - // ReSharper disable once MethodHasAsyncOverload + var line = default(string); + for (var i = 0; i < 2; i++) + // ReSharper disable once MethodHasAsyncOverload #if NET7_0_OR_GREATER - if ((line = async ? await process.StandardOutput.ReadLineAsync(cancellationToken) : process.StandardOutput.ReadLine()) == null) + if ((line = async ? await process.StandardOutput.ReadLineAsync(cancellationToken) : process.StandardOutput.ReadLine()) == null) #elif NET5_0_OR_GREATER - if ((line = async ? await process.StandardOutput.ReadLineAsync() : process.StandardOutput.ReadLine()) == null) + if ((line = async ? await process.StandardOutput.ReadLineAsync() : process.StandardOutput.ReadLine()) == null) #else - if ((line = process.StandardOutput.ReadLine()) == null) + if ((line = process.StandardOutput.ReadLine()) == null) #endif - { - connectionLogger.LogDebug("Unexpected output from klist, aborting Kerberos username detection"); - return null; - } + { + connectionLogger.LogDebug("Unexpected output from klist, aborting Kerberos username detection"); + return null; + } - return ParseKListOutput(line!, includeRealm, connectionLogger); + return ParseKListOutput(line!, includeRealm, connectionLogger); + } } static string? ParseKListOutput(string line, bool includeRealm, ILogger connectionLogger) diff --git a/src/Npgsql/NpgsqlActivitySource.cs b/src/Npgsql/NpgsqlActivitySource.cs index 002cf4a638..ae6f46956d 100644 --- a/src/Npgsql/NpgsqlActivitySource.cs +++ b/src/Npgsql/NpgsqlActivitySource.cs @@ -1,5 +1,6 @@ using Npgsql.Internal; using System; +using System.Data; using System.Diagnostics; using System.Net; using System.Net.Sockets; @@ -20,19 +21,58 @@ static NpgsqlActivitySource() internal static bool IsEnabled => Source.HasListeners(); - internal static Activity? CommandStart(NpgsqlConnector connector, string sql) + internal static Activity? CommandStart(NpgsqlConnector connector, string commandText, CommandType commandType) { var settings = connector.Settings; - var activity = Source.StartActivity(settings.Database!, ActivityKind.Client); + + var dbName = settings.Database ?? connector.InferredUserName; + string? dbOperation = null; + string? dbSqlTable = null; + string activityName; + switch (commandType) + { + case CommandType.StoredProcedure: + dbOperation = NpgsqlCommand.EnableStoredProcedureCompatMode ? "SELECT" : "CALL"; + // In this case our activity name follows the concept of the CommandType.TableDirect case + // (" .") but replaces db.sql.table with the procedure name + // which seems to match the spec's intent without being explicitly specified that way (it suggests + // using the procedure name but doesn't mention using db.operation or db.name in that case). + activityName = $"{dbOperation} {dbName}.{commandText}"; + break; + case CommandType.TableDirect: + dbOperation = "SELECT"; + // The OpenTelemetry spec actually asks to include the database name into db.sql.table + // but then again mixes the concept of database and schema. + // As I interpret it, it actually wants db.sql.table to include the schema name and not the + // database name if the concept of schemas exists in the database system. + // This also makes sense in the context of the activity name which otherwise would include the + // database name twice. + dbSqlTable = commandText; + activityName = $"{dbOperation} {dbName}.{dbSqlTable}"; + break; + case CommandType.Text: + activityName = dbName; + break; + default: + throw new ArgumentOutOfRangeException(nameof(commandType), commandType, null); + } + + var activity = Source.StartActivity(activityName, ActivityKind.Client); if (activity is not { IsAllDataRequested: true }) return activity; activity.SetTag("db.system", "postgresql"); activity.SetTag("db.connection_string", connector.UserFacingConnectionString); - activity.SetTag("db.user", settings.Username); - activity.SetTag("db.name", settings.Database); - activity.SetTag("db.statement", sql); + activity.SetTag("db.user", connector.InferredUserName); + // We trace the actual (maybe inferred) database name we're connected to, even if it + // wasn't specified in the connection string + activity.SetTag("db.name", dbName); + activity.SetTag("db.statement", commandText); activity.SetTag("db.connection_id", connector.Id); + if (dbOperation != null) + activity.SetTag("db.operation", dbOperation); + if (dbSqlTable != null) + activity.SetTag("db.sql.table", dbSqlTable); var endPoint = connector.ConnectedEndPoint; Debug.Assert(endPoint is not null); diff --git a/src/Npgsql/NpgsqlCommand.cs b/src/Npgsql/NpgsqlCommand.cs index e3d07c3ae9..c32a675b7f 100644 --- a/src/Npgsql/NpgsqlCommand.cs +++ b/src/Npgsql/NpgsqlCommand.cs @@ -1609,7 +1609,7 @@ internal void TraceCommandStart(NpgsqlConnector connector) { Debug.Assert(CurrentActivity is null); if (NpgsqlActivitySource.IsEnabled) - CurrentActivity = NpgsqlActivitySource.CommandStart(connector, CommandText); + CurrentActivity = NpgsqlActivitySource.CommandStart(connector, CommandText, CommandType); } internal void TraceReceivedFirstResponse() From cfad1c073e3ca9ad88a72cbe22431899bacc237c Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Thu, 17 Nov 2022 18:40:03 +0300 Subject: [PATCH 007/761] Fix Unique_constraint test flakiness (#4768) --- test/Npgsql.Tests/SchemaTests.cs | 13 +++++++++---- test/Npgsql.Tests/SystemTransactionTests.cs | 2 +- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/test/Npgsql.Tests/SchemaTests.cs b/test/Npgsql.Tests/SchemaTests.cs index 83f9e859c6..5deee67a0d 100644 --- a/test/Npgsql.Tests/SchemaTests.cs +++ b/test/Npgsql.Tests/SchemaTests.cs @@ -380,11 +380,16 @@ public async Task Unique_constraint() Assert.That(columns.All(r => r["table_name"].Equals(table))); Assert.That(columns.All(r => r["constraint_type"].Equals("UNIQUE KEY"))); - Assert.That(columns[0]["column_name"], Is.EqualTo("f1")); - Assert.That(columns[0]["ordinal_number"], Is.EqualTo(1)); + Assert.That(columns.Count, Is.EqualTo(2)); - Assert.That(columns[1]["column_name"], Is.EqualTo("f2")); - Assert.That(columns[1]["ordinal_number"], Is.EqualTo(2)); + // Columns are not necessarily in the correct order + var firstColumn = columns.FirstOrDefault(x => (string)x["column_name"] == "f1")!; + Assert.NotNull(firstColumn); + Assert.That(firstColumn["ordinal_number"], Is.EqualTo(1)); + + var secondColumn = columns.FirstOrDefault(x => (string)x["column_name"] == "f2")!; + Assert.NotNull(secondColumn); + Assert.That(secondColumn["ordinal_number"], Is.EqualTo(2)); } [Test] diff --git a/test/Npgsql.Tests/SystemTransactionTests.cs b/test/Npgsql.Tests/SystemTransactionTests.cs index 27a9d057e1..ae8385e535 100644 --- a/test/Npgsql.Tests/SystemTransactionTests.cs +++ b/test/Npgsql.Tests/SystemTransactionTests.cs @@ -35,7 +35,7 @@ public void Explicit_enlist() [Test, Description("Single connection enlisting implicitly, committing")] public void Implicit_enlist() { - var conn = new NpgsqlConnection(ConnectionStringEnlistOn); + using var conn = new NpgsqlConnection(ConnectionStringEnlistOn); using (var scope = new TransactionScope()) { conn.Open(); From a06d63078d712a296c85d102ea7036ce815c255f Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Thu, 17 Nov 2022 18:56:03 +0300 Subject: [PATCH 008/761] Add support for writing json as ReadOnlyMemory (#4769) Closes #4748 --- src/Npgsql/Internal/NpgsqlWriteBuffer.cs | 11 ++++++++--- .../Internal/TypeHandlers/JsonHandler.cs | 10 ++++++++-- .../Internal/TypeHandlers/TextHandler.cs | 18 +++++++++++++++--- test/Npgsql.Tests/Types/JsonTests.cs | 4 ++++ test/Npgsql.Tests/Types/TextTests.cs | 5 +++++ 5 files changed, 40 insertions(+), 8 deletions(-) diff --git a/src/Npgsql/Internal/NpgsqlWriteBuffer.cs b/src/Npgsql/Internal/NpgsqlWriteBuffer.cs index 1a89cff985..d7586270bb 100644 --- a/src/Npgsql/Internal/NpgsqlWriteBuffer.cs +++ b/src/Npgsql/Internal/NpgsqlWriteBuffer.cs @@ -440,10 +440,15 @@ public void WriteBytes(ReadOnlySpan buf) WritePosition += buf.Length; } + public void WriteBytes(ReadOnlyMemory buf) + => WriteBytes(buf.Span); + + public void WriteBytes(byte[] buf) => WriteBytes(buf.AsSpan()); + public void WriteBytes(byte[] buf, int offset, int count) => WriteBytes(new ReadOnlySpan(buf, offset, count)); - public Task WriteBytesRaw(byte[] bytes, bool async, CancellationToken cancellationToken = default) + public Task WriteBytesRaw(ReadOnlyMemory bytes, bool async, CancellationToken cancellationToken = default) { if (bytes.Length <= WriteSpaceLeft) { @@ -452,7 +457,7 @@ public Task WriteBytesRaw(byte[] bytes, bool async, CancellationToken cancellati } return WriteBytesLong(this, async, bytes, cancellationToken); - static async Task WriteBytesLong(NpgsqlWriteBuffer buffer, bool async, byte[] bytes, CancellationToken cancellationToken) + static async Task WriteBytesLong(NpgsqlWriteBuffer buffer, bool async, ReadOnlyMemory bytes, CancellationToken cancellationToken) { if (bytes.Length <= buffer.Size) { @@ -470,7 +475,7 @@ static async Task WriteBytesLong(NpgsqlWriteBuffer buffer, bool async, byte[] by await buffer.Flush(async, cancellationToken); var writeLen = Math.Min(remaining, buffer.WriteSpaceLeft); var offset = bytes.Length - remaining; - buffer.WriteBytes(bytes, offset, writeLen); + buffer.WriteBytes(bytes.Slice(offset, writeLen)); remaining -= writeLen; } while (remaining > 0); diff --git a/src/Npgsql/Internal/TypeHandlers/JsonHandler.cs b/src/Npgsql/Internal/TypeHandlers/JsonHandler.cs index 6ccd11fc0a..6dbcf942b1 100644 --- a/src/Npgsql/Internal/TypeHandlers/JsonHandler.cs +++ b/src/Npgsql/Internal/TypeHandlers/JsonHandler.cs @@ -54,7 +54,8 @@ protected internal override int ValidateAndGetLengthCustom([DisallowNull] typeof(TAny) == typeof(char[]) || typeof(TAny) == typeof(ArraySegment) || typeof(TAny) == typeof(char) || - typeof(TAny) == typeof(byte[])) + typeof(TAny) == typeof(byte[]) || + typeof(TAny) == typeof(ReadOnlyMemory)) { return _textHandler.ValidateAndGetLength(value, ref lengthCache, parameter) + _headerLen; } @@ -102,6 +103,8 @@ protected override async Task WriteWithLengthCustom([DisallowNull] TAny va await _textHandler.Write((char)(object)value!, buf, lengthCache, parameter, async, cancellationToken); else if (typeof(TAny) == typeof(byte[])) await _textHandler.Write((byte[])(object)value!, buf, lengthCache, parameter, async, cancellationToken); + else if (typeof(TAny) == typeof(ReadOnlyMemory)) + await _textHandler.Write((ReadOnlyMemory)(object)value!, buf, lengthCache, parameter, async, cancellationToken); else if (typeof(TAny) == typeof(JsonDocument)) { var data = parameter?.ConvertedValue != null @@ -146,6 +149,7 @@ public override int ValidateObjectAndGetLength(object value, ref NpgsqlLengthCac ArraySegment s => ValidateAndGetLength(s, ref lengthCache, parameter), char s => ValidateAndGetLength(s, ref lengthCache, parameter), byte[] s => ValidateAndGetLength(s, ref lengthCache, parameter), + ReadOnlyMemory s => ValidateAndGetLength(s, ref lengthCache, parameter), JsonDocument jsonDocument => ValidateAndGetLength(jsonDocument, ref lengthCache, parameter), _ => ValidateAndGetLength(value, ref lengthCache, parameter) }; @@ -166,6 +170,7 @@ public override async Task WriteObjectWithLength(object? value, NpgsqlWriteBuffe ArraySegment s => WriteWithLengthCustom(s, buf, lengthCache, parameter, async, cancellationToken), char s => WriteWithLengthCustom(s, buf, lengthCache, parameter, async, cancellationToken), byte[] s => WriteWithLengthCustom(s, buf, lengthCache, parameter, async, cancellationToken), + ReadOnlyMemory s => WriteWithLengthCustom(s, buf, lengthCache, parameter, async, cancellationToken), JsonDocument jsonDocument => WriteWithLengthCustom(jsonDocument, buf, lengthCache, parameter, async, cancellationToken), _ => WriteWithLengthCustom(value, buf, lengthCache, parameter, async, cancellationToken), }); @@ -187,7 +192,8 @@ protected internal override async ValueTask ReadCustom(NpgsqlReadBuffer bu typeof(T) == typeof(char[]) || typeof(T) == typeof(ArraySegment) || typeof(T) == typeof(char) || - typeof(T) == typeof(byte[])) + typeof(T) == typeof(byte[]) || + typeof(T) == typeof(ReadOnlyMemory)) { return await _textHandler.Read(buf, byteLen, async, fieldDescription); } diff --git a/src/Npgsql/Internal/TypeHandlers/TextHandler.cs b/src/Npgsql/Internal/TypeHandlers/TextHandler.cs index e3c5f957d4..a85ccb6e1d 100644 --- a/src/Npgsql/Internal/TypeHandlers/TextHandler.cs +++ b/src/Npgsql/Internal/TypeHandlers/TextHandler.cs @@ -23,7 +23,7 @@ namespace Npgsql.Internal.TypeHandlers; /// Use it at your own risk. /// public partial class TextHandler : NpgsqlTypeHandler, INpgsqlTypeHandler, INpgsqlTypeHandler>, - INpgsqlTypeHandler, INpgsqlTypeHandler, ITextReaderHandler + INpgsqlTypeHandler, INpgsqlTypeHandler, INpgsqlTypeHandler>, ITextReaderHandler { // Text types are handled a bit more efficiently when sent as text than as binary // see https://github.com/npgsql/npgsql/issues/1210#issuecomment-235641670 @@ -182,6 +182,9 @@ static async ValueTask ReadLong(NpgsqlReadBuffer buf, byte[] bytes, int return bytes; } } + + ValueTask> INpgsqlTypeHandler>.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) + => throw new NotSupportedException("Only writing ReadOnlyMemory to PostgreSQL text is supported, no reading."); #endregion @@ -244,6 +247,10 @@ public int ValidateAndGetLength(char value, ref NpgsqlLengthCache? lengthCache, public int ValidateAndGetLength(byte[] value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) => value.Length; + /// + public int ValidateAndGetLength(ReadOnlyMemory value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) + => value.Length; + /// public override Task Write(string value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) => WriteString(value, buf, lengthCache!, parameter, async, cancellationToken); @@ -290,8 +297,13 @@ static unsafe void WriteCharCore(char value, NpgsqlWriteBuffer buf) } } - /// - public Task Write(byte[] value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) + + public Task Write(byte[] value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, + CancellationToken cancellationToken = default) + => buf.WriteBytesRaw(value, async, cancellationToken); + + public Task Write(ReadOnlyMemory value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, + CancellationToken cancellationToken = default) => buf.WriteBytesRaw(value, async, cancellationToken); #endregion diff --git a/test/Npgsql.Tests/Types/JsonTests.cs b/test/Npgsql.Tests/Types/JsonTests.cs index c2a309d4ff..bb55b734eb 100644 --- a/test/Npgsql.Tests/Types/JsonTests.cs +++ b/test/Npgsql.Tests/Types/JsonTests.cs @@ -50,6 +50,10 @@ public async Task As_char_array() public async Task As_bytes() => await AssertType(Encoding.ASCII.GetBytes(@"{""K"": ""V""}"), @"{""K"": ""V""}", PostgresType, NpgsqlDbType, isDefault: false); + [Test] + public async Task Write_as_ReadOnlyMemory_of_byte() + => await AssertTypeWrite(new ReadOnlyMemory(Encoding.ASCII.GetBytes(@"{""K"": ""V""}")), @"{""K"": ""V""}", PostgresType, NpgsqlDbType, isDefault: false); + [Test] public async Task Write_as_ArraySegment_of_char() => await AssertTypeWrite( diff --git a/test/Npgsql.Tests/Types/TextTests.cs b/test/Npgsql.Tests/Types/TextTests.cs index e787122e7d..aa2e7d69a3 100644 --- a/test/Npgsql.Tests/Types/TextTests.cs +++ b/test/Npgsql.Tests/Types/TextTests.cs @@ -33,6 +33,11 @@ public Task Text_as_ArraySegment_of_chars() public Task Text_as_array_of_bytes() => AssertType(Encoding.UTF8.GetBytes("foo"), "foo", "text", NpgsqlDbType.Text, DbType.String, isDefault: false); + [Test] + public Task Text_as_ReadOnlyMemory_of_bytes() + => AssertTypeWrite(new ReadOnlyMemory(Encoding.UTF8.GetBytes("foo")), "foo", "text", NpgsqlDbType.Text, DbType.String, + isDefault: false); + [Test] public Task Char_as_char() => AssertType('f', "f", "character", NpgsqlDbType.Char, inferredDbType: DbType.String, isDefault: false); From ae60e37ca2a14afcc3f912975dd1f71ad37d43ae Mon Sep 17 00:00:00 2001 From: jam40jeff Date: Fri, 18 Nov 2022 11:55:56 -0500 Subject: [PATCH 009/761] Update CommandCompleteMessage.cs (#4772) Fix ParseNumber to use ulong internally. --- src/Npgsql/BackendMessages/CommandCompleteMessage.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Npgsql/BackendMessages/CommandCompleteMessage.cs b/src/Npgsql/BackendMessages/CommandCompleteMessage.cs index 6d9800a27f..63080052cb 100644 --- a/src/Npgsql/BackendMessages/CommandCompleteMessage.cs +++ b/src/Npgsql/BackendMessages/CommandCompleteMessage.cs @@ -111,7 +111,7 @@ static bool AreEqual(byte[] bytes, int pos, string s) static ulong ParseNumber(byte[] bytes, ref int pos) { Debug.Assert(bytes[pos] >= '0' && bytes[pos] <= '9'); - uint result = 0; + ulong result = 0; do { result = result * 10 + bytes[pos++] - '0'; From 341d7fc43417cb7ceb0a858a3e79e1c82158143b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 18 Nov 2022 22:44:52 +0100 Subject: [PATCH 010/761] Bump Scriban.Signed from 5.5.0 to 5.5.1 (#4775) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index be07cad736..7b25d48cbc 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -15,7 +15,7 @@ - + From d8c94dbc674fc55830e23813de0e51a88fc35247 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 21 Nov 2022 23:28:32 +0100 Subject: [PATCH 011/761] Bump NUnit3TestAdapter from 4.3.0 to 4.3.1 (#4779) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 7b25d48cbc..63de136aee 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -30,7 +30,7 @@ - + From 5fa455a655e163ee46717c463268383fcfa5c882 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 25 Nov 2022 10:58:01 +0100 Subject: [PATCH 012/761] Bump Newtonsoft.Json from 13.0.1 to 13.0.2 (#4785) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 63de136aee..c84bdf09c5 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -22,7 +22,7 @@ - + From 011e9a5921e0bc9250e7458e7723db641cb99dc3 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Fri, 25 Nov 2022 13:26:53 +0300 Subject: [PATCH 013/761] Fix linux postgresql ci (#4786) --- .github/workflows/build.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index fe542161a2..d635b761d7 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -80,6 +80,9 @@ jobs: # First uninstall any PostgreSQL installed on the image dpkg-query -W --showformat='${Package}\n' 'postgresql-*' | xargs sudo dpkg -P postgresql + # Import the repository signing key + wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add - + sudo sh -c 'echo "deb http://apt.postgresql.org/pub/repos/apt/ jammy-pgdg main ${{ matrix.pg_major }}" >> /etc/apt/sources.list.d/pgdg.list' sudo apt-get update -qq sudo apt-get install -qq postgresql-${{ matrix.pg_major }} From 941db8044ebe5c96be233a3ae94ba94bdb9c6809 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Sat, 26 Nov 2022 12:05:16 +0100 Subject: [PATCH 014/761] Obsolete IntegratedSecurity (#4790) Closes #4789 --- src/Npgsql/Internal/NpgsqlConnector.Auth.cs | 3 -- .../Internal/NpgsqlConnector.OldAuth.cs | 3 -- src/Npgsql/Internal/NpgsqlConnector.cs | 1 - src/Npgsql/NpgsqlConnection.cs | 1 + src/Npgsql/NpgsqlConnectionStringBuilder.cs | 45 +++++++++---------- test/Npgsql.Tests/AuthenticationTests.cs | 10 +++-- test/Npgsql.Tests/SecurityTests.cs | 6 +-- 7 files changed, 30 insertions(+), 39 deletions(-) diff --git a/src/Npgsql/Internal/NpgsqlConnector.Auth.cs b/src/Npgsql/Internal/NpgsqlConnector.Auth.cs index 02a8890dde..09c92bd9c9 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.Auth.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.Auth.cs @@ -287,9 +287,6 @@ async Task AuthenticateMD5(string username, byte[] salt, bool async, Cancellatio #if NET7_0_OR_GREATER async Task AuthenticateGSS(bool async) { - if (!IntegratedSecurity) - throw new NpgsqlException("GSS/SSPI authentication but IntegratedSecurity not enabled"); - var targetName = $"{KerberosServiceName}/{Host}"; using var authContext = new NegotiateAuthentication(new NegotiateAuthenticationClientOptions{ TargetName = targetName}); diff --git a/src/Npgsql/Internal/NpgsqlConnector.OldAuth.cs b/src/Npgsql/Internal/NpgsqlConnector.OldAuth.cs index 9b8afffadd..9cb30d47f7 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.OldAuth.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.OldAuth.cs @@ -40,9 +40,6 @@ static byte[] Hi(string str, byte[] salt, int count) #if !NET7_0_OR_GREATER async Task AuthenticateGSS(bool async) { - if (!IntegratedSecurity) - throw new NpgsqlException("GSS/SSPI authentication but IntegratedSecurity not enabled"); - var targetName = $"{KerberosServiceName}/{Host}"; using var negotiateStream = new NegotiateStream(new GSSPasswordMessageStream(this), true); diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index ca5d1b6eec..3e4c0c0ba8 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -392,7 +392,6 @@ internal NpgsqlConnector(NpgsqlDataSource dataSource, NpgsqlConnection conn) internal string Database => Settings.Database!; string KerberosServiceName => Settings.KerberosServiceName; int ConnectionTimeout => Settings.Timeout; - bool IntegratedSecurity => Settings.IntegratedSecurity; /// /// The actual command timeout value that gets set on internal commands. diff --git a/src/Npgsql/NpgsqlConnection.cs b/src/Npgsql/NpgsqlConnection.cs index a14b07ee7e..2bdd5d566a 100644 --- a/src/Npgsql/NpgsqlConnection.cs +++ b/src/Npgsql/NpgsqlConnection.cs @@ -456,6 +456,7 @@ public override string ConnectionString /// /// Whether to use Windows integrated security to log in. /// + [Obsolete("The IntegratedSecurity parameter is no longer needed and does nothing.")] public bool IntegratedSecurity => Settings.IntegratedSecurity; /// diff --git a/src/Npgsql/NpgsqlConnectionStringBuilder.cs b/src/Npgsql/NpgsqlConnectionStringBuilder.cs index 26589cef52..6844db06cf 100644 --- a/src/Npgsql/NpgsqlConnectionStringBuilder.cs +++ b/src/Npgsql/NpgsqlConnectionStringBuilder.cs @@ -262,7 +262,7 @@ public string? Database string? _database; /// - /// The username to connect with. Not required if using IntegratedSecurity. + /// The username to connect with. Not required if using GSS/SSPI/Kerberos. /// [Category("Connection")] [Description("The username to connect with. Not required if using IntegratedSecurity.")] @@ -280,7 +280,7 @@ public string? Username string? _username; /// - /// The password to connect with. Not required if using IntegratedSecurity. + /// The password to connect with. Not required if using GSS/SSPI/Kerberos. /// [Category("Connection")] [Description("The password to connect with. Not required if using IntegratedSecurity.")] @@ -559,28 +559,6 @@ public bool CheckCertificateRevocation } bool _checkCertificateRevocation; - /// - /// Whether to use Windows integrated security to log in. - /// - [Category("Security")] - [Description("Whether to use Windows integrated security to log in.")] - [DisplayName("Integrated Security")] - [NpgsqlConnectionStringProperty] - public bool IntegratedSecurity - { - get => _integratedSecurity; - set - { - // No integrated security if we're on mono and .NET 4.5 because of ClaimsIdentity, - // see https://github.com/npgsql/Npgsql/issues/133 - if (value && Type.GetType("Mono.Runtime") != null) - throw new NotSupportedException("IntegratedSecurity is currently unsupported on mono and .NET 4.5 (see https://github.com/npgsql/Npgsql/issues/133)"); - _integratedSecurity = value; - SetValue(nameof(IntegratedSecurity), value); - } - } - bool _integratedSecurity; - /// /// The Kerberos service name to be used for authentication. /// @@ -1416,6 +1394,25 @@ public ServerCompatibilityMode ServerCompatibilityMode #region Properties - Obsolete + /// + /// Whether to use Windows integrated security to log in. + /// + [Category("Security")] + [Description("Whether to use Windows integrated security to log in.")] + [DisplayName("Integrated Security")] + [NpgsqlConnectionStringProperty] + [Obsolete("The IntegratedSecurity parameter is no longer needed and does nothing.")] + public bool IntegratedSecurity + { + get => _integratedSecurity; + set + { + _integratedSecurity = value; + SetValue(nameof(IntegratedSecurity), value); + } + } + bool _integratedSecurity; + /// /// Obsolete, see https://www.npgsql.org/doc/release-notes/6.0.html /// diff --git a/test/Npgsql.Tests/AuthenticationTests.cs b/test/Npgsql.Tests/AuthenticationTests.cs index 5231ff03c0..c26b665a34 100644 --- a/test/Npgsql.Tests/AuthenticationTests.cs +++ b/test/Npgsql.Tests/AuthenticationTests.cs @@ -18,7 +18,7 @@ public class AuthenticationTests : MultiplexingTestBase [NonParallelizable] // Sets environment variable public async Task Connect_UserNameFromEnvironment_Succeeds() { - var builder = new NpgsqlConnectionStringBuilder(ConnectionString) { IntegratedSecurity = false }; + var builder = new NpgsqlConnectionStringBuilder(ConnectionString); using var _ = SetEnvironmentVariable("PGUSER", builder.Username); builder.Username = null; using var __ = CreateTempPool(builder.ConnectionString, out var connectionString); @@ -29,7 +29,7 @@ public async Task Connect_UserNameFromEnvironment_Succeeds() [NonParallelizable] // Sets environment variable public async Task Connect_PasswordFromEnvironment_Succeeds() { - var builder = new NpgsqlConnectionStringBuilder(ConnectionString) { IntegratedSecurity = false }; + var builder = new NpgsqlConnectionStringBuilder(ConnectionString); using var _ = SetEnvironmentVariable("PGPASSWORD", builder.Password); builder.Password = null; using var __ = CreateTempPool(builder.ConnectionString, out var connectionString); @@ -279,7 +279,6 @@ Func OpenConnection(string? password, string? passFile) => async () = { builder.Password = password; builder.Passfile = passFile; - builder.IntegratedSecurity = false; builder.ApplicationName = $"{nameof(Password_source_precedence)}:{Guid.NewGuid()}"; using var pool = CreateTempPool(builder.ConnectionString, out var connectionString); @@ -341,7 +340,10 @@ public void Pool_by_password() public async Task AuthenticateIntegratedSecurity() { await using var dataSource = NpgsqlDataSource.Create(new NpgsqlConnectionStringBuilder(ConnectionString) - { IntegratedSecurity = true, Username = null, Password = null }); + { + Username = null, + Password = null + }); await using var c = await dataSource.OpenConnectionAsync(); Assert.That(c.State, Is.EqualTo(ConnectionState.Open)); } diff --git a/test/Npgsql.Tests/SecurityTests.cs b/test/Npgsql.Tests/SecurityTests.cs index a3240bde86..9f93caa36d 100644 --- a/test/Npgsql.Tests/SecurityTests.cs +++ b/test/Npgsql.Tests/SecurityTests.cs @@ -89,8 +89,8 @@ public void IntegratedSecurity_with_Username() if (username == null) throw new Exception("Could find username"); - var connString = new NpgsqlConnectionStringBuilder(ConnectionString) { - IntegratedSecurity = true, + var connString = new NpgsqlConnectionStringBuilder(ConnectionString) + { Username = username, Password = null }.ToString(); @@ -113,7 +113,6 @@ public void IntegratedSecurity_without_Username() { var connString = new NpgsqlConnectionStringBuilder(ConnectionString) { - IntegratedSecurity = true, Username = null, Password = null }.ToString(); @@ -136,7 +135,6 @@ public void Connection_database_is_populated_on_Open() { var connString = new NpgsqlConnectionStringBuilder(ConnectionString) { - IntegratedSecurity = true, Username = null, Password = null, Database = null From 1c2e67c2f1a364d0a8f470b8e58c7f237f328581 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 29 Nov 2022 22:59:20 +0100 Subject: [PATCH 015/761] Bump NodaTime from 3.1.5 to 3.1.6 (#4794) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index c84bdf09c5..204f78fcb0 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -20,7 +20,7 @@ - + From dd14e4f048e422a9da2012e1b79f294d2f6107de Mon Sep 17 00:00:00 2001 From: Brar Piening Date: Sat, 3 Dec 2022 16:45:22 +0100 Subject: [PATCH 016/761] Remove package reference for System.Runtime.CompilerServices.Unsafe (#4797) This removes the package reference for System.Runtime.CompilerServices.Unsafe from TFM net7.0 and above. When testing, Npgsql actually built perfectly even when removing the package reference for all TFMs but this doesn't feel safe for now and also is not what is suggested by the documentation at: https://learn.microsoft.com/en-us/dotnet/core/compatibility/core-libraries/7.0/unsafe-package --- src/Npgsql/Npgsql.csproj | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/Npgsql/Npgsql.csproj b/src/Npgsql/Npgsql.csproj index 368e04ca57..7dd3999425 100644 --- a/src/Npgsql/Npgsql.csproj +++ b/src/Npgsql/Npgsql.csproj @@ -13,7 +13,6 @@ - @@ -38,6 +37,10 @@ + + + + From 6744638fcd12c48aebf61fd01b02f96b7405e8d8 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Wed, 7 Dec 2022 15:57:26 +0100 Subject: [PATCH 017/761] Update comment around reader waiting on the send task See https://github.com/npgsql/npgsql/issues/4804#issuecomment-1341052345 --- src/Npgsql/NpgsqlDataReader.cs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/Npgsql/NpgsqlDataReader.cs b/src/Npgsql/NpgsqlDataReader.cs index c6fc499720..2b87c8f042 100644 --- a/src/Npgsql/NpgsqlDataReader.cs +++ b/src/Npgsql/NpgsqlDataReader.cs @@ -1155,9 +1155,9 @@ internal async Task Cleanup(bool async, bool connectionClosing = false, bool isD // If multiplexing isn't on, _sendTask contains the task for the writing of this command. // Make sure that this task, which may have executed asynchronously and in parallel with the reading, - // has completed, throwing any exceptions it generated. - // Note: if the following is removed, mysterious concurrent connection usage errors start happening - // on .NET Framework. + // has completed, throwing any exceptions it generated. If we don't do this, there's the possibility of a race condition where the + // user executes a new command after reader.Dispose() returns, but some additional write stuff is still finishing up from the last + // command. if (_sendTask != null) { try From b73f42e43a278d93a514ca09b1118119a614e88a Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Thu, 8 Dec 2022 14:21:28 +0300 Subject: [PATCH 018/761] Disable rich code navigations because of failures https://github.com/microsoft/RichCodeNavIndexer/issues/128 --- .github/workflows/rich-code-nav.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/rich-code-nav.yml b/.github/workflows/rich-code-nav.yml index 118c2e3e28..7b1c7588ae 100644 --- a/.github/workflows/rich-code-nav.yml +++ b/.github/workflows/rich-code-nav.yml @@ -14,6 +14,7 @@ env: jobs: build: + if: ${{ false }} # disable as it's failing, see https://github.com/microsoft/RichCodeNavIndexer/issues/128 runs-on: windows-latest steps: From b96d2cb4862e3638a1ba867e030d2551aa55fe7e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Emmanuel=20Andr=C3=A9?= <2341261+manandre@users.noreply.github.com> Date: Mon, 12 Dec 2022 13:34:17 +0100 Subject: [PATCH 019/761] Move devcontainer to .NET 7 (#4811) * Move devcontainer to .NET 7 * Fix deprecated terminal configuration --- .devcontainer/devcontainer.json | 11 ++++++++--- .devcontainer/docker-compose.yml | 2 +- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index fbfe8dbc51..69115b9ad3 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -6,10 +6,15 @@ "workspaceFolder": "/workspace", "settings": { - "terminal.integrated.shell.linux": "/bin/bash", + "terminal.integrated.profiles.linux": { + "bash": { + "path": "/bin/bash" + } + }, + "terminal.integrated.defaultProfile.linux": "bash", "remote.extensionKind": { "ms-azuretools.vscode-docker": "workspace" - }, + } }, "extensions": [ @@ -22,7 +27,7 @@ "forwardPorts": [5432, 5050], "remoteEnv": { - "DeveloperBuild": "True", + "DeveloperBuild": "True" }, "postCreateCommand": "dotnet restore Npgsql.sln" diff --git a/.devcontainer/docker-compose.yml b/.devcontainer/docker-compose.yml index 7fa06cb7ea..4628935113 100644 --- a/.devcontainer/docker-compose.yml +++ b/.devcontainer/docker-compose.yml @@ -2,7 +2,7 @@ version: '3' services: npgsql-dev: - image: mcr.microsoft.com/dotnet/sdk:6.0 + image: mcr.microsoft.com/dotnet/sdk:7.0 volumes: - ..:/workspace:cached tty: true From fdbdf59a9aa6bddf3d031c86ddc029559bf58413 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 13 Dec 2022 21:14:27 +0000 Subject: [PATCH 020/761] Bump System.Text.Json from 7.0.0 to 7.0.1 (#4814) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 204f78fcb0..7bc6a963dd 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -5,7 +5,7 @@ - + From da39537cd1ba3bf8c6d12a4a73ee78623c3f38bc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 17 Dec 2022 00:38:55 +0100 Subject: [PATCH 021/761] Bump Microsoft.NET.Test.Sdk from 17.4.0 to 17.4.1 (#4819) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 7bc6a963dd..2027b14d58 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -29,7 +29,7 @@ - + From 46adc6282931e2d6a16ef24e0817fa171c7fe8d0 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Sat, 17 Dec 2022 11:52:57 +0300 Subject: [PATCH 022/761] Fix deadlock from concurrent read and write failure (#4807) Fixes #4804 --- src/Npgsql/NpgsqlDataReader.cs | 27 +++++++--- .../Util/ResettableCancellationTokenSource.cs | 54 ++++++++++++++++--- test/Npgsql.Tests/CommandTests.cs | 23 ++++++++ 3 files changed, 88 insertions(+), 16 deletions(-) diff --git a/src/Npgsql/NpgsqlDataReader.cs b/src/Npgsql/NpgsqlDataReader.cs index 2b87c8f042..6006ae1a47 100644 --- a/src/Npgsql/NpgsqlDataReader.cs +++ b/src/Npgsql/NpgsqlDataReader.cs @@ -1160,17 +1160,28 @@ internal async Task Cleanup(bool async, bool connectionClosing = false, bool isD // command. if (_sendTask != null) { - try + // If the connector is broken, we have no reason to wait for the sendTask to complete + // as we're not going to send anything else over it + // and that can lead to deadlocks (concurrent write and read failure, see #4804) + if (Connector.IsBroken) { - if (async) - await _sendTask; - else - _sendTask.GetAwaiter().GetResult(); + // Prevent unobserved Task notifications by observing the failed Task exception. + _ = _sendTask.ContinueWith(t => _ = t.Exception, CancellationToken.None, TaskContinuationOptions.OnlyOnFaulted, TaskScheduler.Current); } - catch (Exception e) + else { - // TODO: think of a better way to handle exceptions, see #1323 and #3163 - _commandLogger.LogDebug(e, "Exception caught while sending the request", Connector.Id); + try + { + if (async) + await _sendTask; + else + _sendTask.GetAwaiter().GetResult(); + } + catch (Exception e) + { + // TODO: think of a better way to handle exceptions, see #1323 and #3163 + _commandLogger.LogDebug(e, "Exception caught while sending the request", Connector.Id); + } } } diff --git a/src/Npgsql/Util/ResettableCancellationTokenSource.cs b/src/Npgsql/Util/ResettableCancellationTokenSource.cs index c61a07b99e..9bb507b1cb 100644 --- a/src/Npgsql/Util/ResettableCancellationTokenSource.cs +++ b/src/Npgsql/Util/ResettableCancellationTokenSource.cs @@ -48,10 +48,21 @@ public CancellationToken Start(CancellationToken cancellationToken = default) #if DEBUG Debug.Assert(!_isRunning); #endif - _cts.CancelAfter(Timeout); - if (_cts.IsCancellationRequested) + lock (lockObject) { - lock (lockObject) + // if there was an attempt to cancel while the connector was breaking + // we do nothing and return the default token + // as we're going to fail while reading or writing anyway + if (isDisposed) + { +#if DEBUG + _isRunning = true; +#endif + return CancellationToken.None; + } + + _cts.CancelAfter(Timeout); + if (_cts.IsCancellationRequested) { _cts.Dispose(); _cts = new CancellationTokenSource(Timeout); @@ -69,7 +80,17 @@ public CancellationToken Start(CancellationToken cancellationToken = default) /// Restart the timeout on the wrapped without reinitializing it, /// even if is already set to /// - public void RestartTimeoutWithoutReset() => _cts.CancelAfter(Timeout); + public void RestartTimeoutWithoutReset() + { + lock (lockObject) + { + // if there was an attempt to cancel while the connector was breaking + // we do nothing and return the default token + // as we're going to fail while reading or writing anyway + if (!isDisposed) + _cts.CancelAfter(Timeout); + } + } /// /// Reset the wrapper to contain a unstarted and uncancelled @@ -83,10 +104,21 @@ public CancellationToken Start(CancellationToken cancellationToken = default) public CancellationToken Reset(CancellationToken cancellationToken = default) { _registration.Dispose(); - _cts.CancelAfter(InfiniteTimeSpan); - if (_cts.IsCancellationRequested) + lock (lockObject) { - lock (lockObject) + // if there was an attempt to cancel while the connector was breaking + // we do nothing and return + // as we're going to fail while reading or writing anyway + if (isDisposed) + { +#if DEBUG + _isRunning = false; +#endif + return CancellationToken.None; + } + + _cts.CancelAfter(InfiniteTimeSpan); + if (_cts.IsCancellationRequested) { _cts.Dispose(); _cts = new CancellationTokenSource(); @@ -129,7 +161,13 @@ public void ResetCts() public void Stop() { _registration.Dispose(); - _cts.CancelAfter(InfiniteTimeSpan); + lock (lockObject) + { + // if there was an attempt to cancel while the connector was breaking + // we do nothing + if (!isDisposed) + _cts.CancelAfter(InfiniteTimeSpan); + } #if DEBUG _isRunning = false; #endif diff --git a/test/Npgsql.Tests/CommandTests.cs b/test/Npgsql.Tests/CommandTests.cs index 4f24eb5e18..21e9da6085 100644 --- a/test/Npgsql.Tests/CommandTests.cs +++ b/test/Npgsql.Tests/CommandTests.cs @@ -1384,6 +1384,29 @@ await server Assert.That(conn.FullState, Is.EqualTo(ConnectionState.Open)); } + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/4804")] + [Description("Concurrent write and read failure can lead to deadlocks while cleaning up the connector.")] + public async Task Concurrent_read_write_failure_deadlock() + { + if (IsMultiplexing) + return; + + await using var postmasterMock = PgPostmasterMock.Start(ConnectionString); + using var _ = CreateTempPool(postmasterMock.ConnectionString, out var connectionString); + await using var conn = await OpenConnectionAsync(connectionString); + + await using var cmd = conn.CreateCommand(); + // Attempt to send a big enough query to fill buffers + // That way the write side should be stuck, waiting for the server to empty buffers + cmd.CommandText = new string('a', 8_000_000); + var queryTask = cmd.ExecuteNonQueryAsync(); + + var server = await postmasterMock.WaitForServerConnection(); + server.Close(); + + Assert.ThrowsAsync(async () => await queryTask); + } + #region Logging [Test] From 09a8387b4cf3597ff13e34eb4ac06e7b8c6a1d89 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Wed, 21 Dec 2022 14:19:54 +0300 Subject: [PATCH 023/761] Fix reusing NpgsqlBatch with autoprepare (#4821) Fixes #4264 --- src/Npgsql/NpgsqlBatchCommand.cs | 16 ++++++++++++++- src/Npgsql/NpgsqlCommand.cs | 34 +++++++++++++++++++++++--------- src/Npgsql/NpgsqlDataReader.cs | 2 ++ test/Npgsql.Tests/BatchTests.cs | 24 ++++++++++++++++++++++ 4 files changed, 66 insertions(+), 10 deletions(-) diff --git a/src/Npgsql/NpgsqlBatchCommand.cs b/src/Npgsql/NpgsqlBatchCommand.cs index 78aedc1f7e..9e45f45c99 100644 --- a/src/Npgsql/NpgsqlBatchCommand.cs +++ b/src/Npgsql/NpgsqlBatchCommand.cs @@ -20,7 +20,13 @@ public sealed class NpgsqlBatchCommand : DbBatchCommand public override string CommandText { get => _commandText; - set => _commandText = value ?? string.Empty; + set + { + _commandText = value ?? string.Empty; + + ResetPreparation(); + // TODO: Technically should do this also if the parameter list (or type) changes + } } /// @@ -153,6 +159,8 @@ internal PreparedStatement? PreparedStatement PreparedStatement? _preparedStatement; + internal NpgsqlConnector? ConnectorPreparedOn { get; set; } + internal bool IsPreparing; /// @@ -248,6 +256,12 @@ internal void ApplyCommandComplete(CommandCompleteMessage msg) OID = msg.OID; } + internal void ResetPreparation() + { + PreparedStatement = null; + ConnectorPreparedOn = null; + } + /// /// Returns the . /// diff --git a/src/Npgsql/NpgsqlCommand.cs b/src/Npgsql/NpgsqlCommand.cs index c32a675b7f..3550d821a2 100644 --- a/src/Npgsql/NpgsqlCommand.cs +++ b/src/Npgsql/NpgsqlCommand.cs @@ -1346,20 +1346,33 @@ internal virtual async ValueTask ExecuteReader(CommandBehavior { case true: Debug.Assert(_connectorPreparedOn != null); - if (_connectorPreparedOn != connector) - { - // The command was prepared, but since then the connector has changed. Detach all prepared statements. - foreach (var s in InternalBatchCommands) - s.PreparedStatement = null; - ResetPreparation(); - goto case false; - } - if (IsWrappedByBatch) + { foreach (var batchCommand in InternalBatchCommands) + { + if (batchCommand.ConnectorPreparedOn != connector) + { + foreach (var s in InternalBatchCommands) + s.ResetPreparation(); + ResetPreparation(); + goto case false; + } + batchCommand.Parameters.ProcessParameters(dataSource.TypeMapper, validateParameterValues, CommandType); + } + } else + { + if (_connectorPreparedOn != connector) + { + // The command was prepared, but since then the connector has changed. Detach all prepared statements. + foreach (var s in InternalBatchCommands) + s.PreparedStatement = null; + ResetPreparation(); + goto case false; + } Parameters.ProcessParameters(dataSource.TypeMapper, validateParameterValues, CommandType); + } NpgsqlEventSource.Log.CommandStartPrepared(); break; @@ -1377,7 +1390,10 @@ internal virtual async ValueTask ExecuteReader(CommandBehavior ProcessRawQuery(connector.SqlQueryParser, connector.UseConformingStrings, batchCommand); if (connector.Settings.MaxAutoPrepare > 0 && batchCommand.TryAutoPrepare(connector)) + { + batchCommand.ConnectorPreparedOn = connector; numPrepared++; + } } } else diff --git a/src/Npgsql/NpgsqlDataReader.cs b/src/Npgsql/NpgsqlDataReader.cs index 6006ae1a47..33bbfbcaf2 100644 --- a/src/Npgsql/NpgsqlDataReader.cs +++ b/src/Npgsql/NpgsqlDataReader.cs @@ -563,6 +563,8 @@ async Task NextResult(bool async, bool isConsuming = false, CancellationTo { preparedStatement.State = PreparedState.Invalidated; Command.ResetPreparation(); + foreach (var s in Command.InternalBatchCommands) + s.ResetPreparation(); } } diff --git a/test/Npgsql.Tests/BatchTests.cs b/test/Npgsql.Tests/BatchTests.cs index 342076714a..e59d3b9195 100644 --- a/test/Npgsql.Tests/BatchTests.cs +++ b/test/Npgsql.Tests/BatchTests.cs @@ -731,6 +731,30 @@ public async Task ExecuteScalar_without_parameters() Assert.That(await batch.ExecuteScalarAsync(), Is.EqualTo(1)); } + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/4264")] + public async Task Batch_with_auto_prepare_reuse() + { + var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + { + MaxAutoPrepare = 20 + }; + + await using var conn = await OpenConnectionAsync(csb); + + var tempTableName = await CreateTempTable(conn, "id int"); + + await using var batch = new NpgsqlBatch(conn); + for (var i = 0; i < 2; ++i) + { + for (var j = 0; j < 10; ++j) + { + batch.BatchCommands.Add(new NpgsqlBatchCommand($"DELETE FROM {tempTableName} WHERE 1=0")); + } + await batch.ExecuteNonQueryAsync(); + batch.BatchCommands.Clear(); + } + } + #endregion Miscellaneous #region Logging From 2c81aed1419574152cf92fc091a58985f9c135d4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Emmanuel=20Andr=C3=A9?= <2341261+manandre@users.noreply.github.com> Date: Wed, 21 Dec 2022 14:04:22 +0100 Subject: [PATCH 024/761] Fix header size for GeoJSON collections (#4828) Fixes #4827 --- src/Npgsql.GeoJSON/Internal/GeoJSONHandler.cs | 14 ++--- test/Npgsql.PluginTests/GeoJSONTests.cs | 61 +++++++++++++++++++ 2 files changed, 68 insertions(+), 7 deletions(-) diff --git a/src/Npgsql.GeoJSON/Internal/GeoJSONHandler.cs b/src/Npgsql.GeoJSON/Internal/GeoJSONHandler.cs index 4c3c90b866..4db9ffcc12 100644 --- a/src/Npgsql.GeoJSON/Internal/GeoJSONHandler.cs +++ b/src/Npgsql.GeoJSON/Internal/GeoJSONHandler.cs @@ -449,7 +449,7 @@ public async Task Write(Point value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? l public async Task Write(LineString value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) { var type = EwkbGeometryType.LineString; - var size = SizeOfHeader; + var size = SizeOfHeaderWithLength; var srid = GetSrid(value.CRS); if (srid != 0) { @@ -476,7 +476,7 @@ public async Task Write(LineString value, NpgsqlWriteBuffer buf, NpgsqlLengthCac public async Task Write(Polygon value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) { var type = EwkbGeometryType.Polygon; - var size = SizeOfHeader; + var size = SizeOfHeaderWithLength; var srid = GetSrid(value.CRS); if (srid != 0) { @@ -510,7 +510,7 @@ public async Task Write(Polygon value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? public async Task Write(MultiPoint value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) { var type = EwkbGeometryType.MultiPoint; - var size = SizeOfHeader; + var size = SizeOfHeaderWithLength; var srid = GetSrid(value.CRS); if (srid != 0) { @@ -537,7 +537,7 @@ public async Task Write(MultiPoint value, NpgsqlWriteBuffer buf, NpgsqlLengthCac public async Task Write(MultiLineString value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) { var type = EwkbGeometryType.MultiLineString; - var size = SizeOfHeader; + var size = SizeOfHeaderWithLength; var srid = GetSrid(value.CRS); if (srid != 0) { @@ -564,7 +564,7 @@ public async Task Write(MultiLineString value, NpgsqlWriteBuffer buf, NpgsqlLeng public async Task Write(MultiPolygon value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) { var type = EwkbGeometryType.MultiPolygon; - var size = SizeOfHeader; + var size = SizeOfHeaderWithLength; var srid = GetSrid(value.CRS); if (srid != 0) { @@ -590,7 +590,7 @@ public async Task Write(MultiPolygon value, NpgsqlWriteBuffer buf, NpgsqlLengthC public async Task Write(GeometryCollection value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) { var type = EwkbGeometryType.GeometryCollection; - var size = SizeOfHeader; + var size = SizeOfHeaderWithLength; var srid = GetSrid(value.CRS); if (srid != 0) { @@ -717,4 +717,4 @@ enum EwkbGeometryType : uint HasSrid = 0x20000000, HasM = 0x40000000, HasZ = 0x80000000 -} \ No newline at end of file +} diff --git a/test/Npgsql.PluginTests/GeoJSONTests.cs b/test/Npgsql.PluginTests/GeoJSONTests.cs index 2f44d0ec18..688295eee0 100644 --- a/test/Npgsql.PluginTests/GeoJSONTests.cs +++ b/test/Npgsql.PluginTests/GeoJSONTests.cs @@ -8,6 +8,7 @@ using GeoJSON.Net.Geometry; using Newtonsoft.Json; using Npgsql.Tests; +using NpgsqlTypes; using NUnit.Framework; using static Npgsql.Tests.TestUtil; @@ -285,6 +286,66 @@ public async Task Roundtrip_geometry_geography() } } + [Test, TestCaseSource(nameof(Tests))] + public async Task Import_geometry(TestData data) + { + await using var conn = await OpenConnectionAsync(options: GeoJSONOptions.BoundingBox); + var table = await CreateTempTable(conn, "field geometry"); + + await using (var writer = await conn.BeginBinaryImportAsync($"COPY {table} (field) FROM STDIN BINARY")) + { + await writer.StartRowAsync(); + await writer.WriteAsync(data.Geometry, NpgsqlDbType.Geometry); + + var rowsWritten = await writer.CompleteAsync(); + Assert.That(rowsWritten, Is.EqualTo(1)); + } + + await using var cmd = conn.CreateCommand(); + cmd.CommandText = $"SELECT field FROM {table}"; + await using var reader = await cmd.ExecuteReaderAsync(); + Assert.IsTrue(await reader.ReadAsync()); + var actual = reader.GetValue(0); + Assert.That(actual, Is.EqualTo(data.Geometry)); + } + + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/4827")] + public async Task Import_big_geometry() + { + await using var conn = await OpenConnectionAsync(); + var table = await CreateTempTable(conn, "id text, field geometry"); + + var geometry = new MultiLineString(new[] { + new LineString( + Enumerable.Range(1, 507) + .Select(i => new Position(longitude: i, latitude: i)) + .Append(new Position(longitude: 1d, latitude: 1d))), + new LineString(new[] { + new Position(longitude: 1d, latitude: 1d), + new Position(longitude: 1d, latitude: 2d), + new Position(longitude: 1d, latitude: 3d), + new Position(longitude: 1d, latitude: 1d), + }) + }); + + await using (var writer = await conn.BeginBinaryImportAsync($"COPY {table} (id, field) FROM STDIN BINARY")) + { + await writer.StartRowAsync(); + await writer.WriteAsync("a", NpgsqlDbType.Text); + await writer.WriteAsync(geometry, NpgsqlDbType.Geometry); + + var rowsWritten = await writer.CompleteAsync(); + Assert.That(rowsWritten, Is.EqualTo(1)); + } + + await using var cmd = conn.CreateCommand(); + cmd.CommandText = $"SELECT field FROM {table}"; + await using var reader = await cmd.ExecuteReaderAsync(); + Assert.IsTrue(await reader.ReadAsync()); + var actual = reader.GetValue(0); + Assert.That(actual, Is.EqualTo(geometry)); + } + ValueTask OpenConnectionAsync(GeoJSONOptions options = GeoJSONOptions.None) => GetDataSource(options).OpenConnectionAsync(); From c04c113adf178cd78ac7aa73e87386991f1594f8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 21 Dec 2022 23:46:17 +0100 Subject: [PATCH 025/761] Bump OpenTelemetry.API from 1.3.1 to 1.3.2 (#4829) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 2027b14d58..47caa185c7 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -10,7 +10,7 @@ - + From 68a63a8319045a9ff3d36156515c1e93c5ab898c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Emmanuel=20Andr=C3=A9?= <2341261+manandre@users.noreply.github.com> Date: Thu, 22 Dec 2022 09:32:33 +0100 Subject: [PATCH 026/761] Ensure required bytes when reading Polygon types (#4831) --- src/Npgsql.GeoJSON/Internal/GeoJSONHandler.cs | 4 +- test/Npgsql.PluginTests/GeoJSONTests.cs | 62 +++++++++++++++++++ 2 files changed, 65 insertions(+), 1 deletion(-) diff --git a/src/Npgsql.GeoJSON/Internal/GeoJSONHandler.cs b/src/Npgsql.GeoJSON/Internal/GeoJSONHandler.cs index 4db9ffcc12..ba040ed79d 100644 --- a/src/Npgsql.GeoJSON/Internal/GeoJSONHandler.cs +++ b/src/Npgsql.GeoJSON/Internal/GeoJSONHandler.cs @@ -167,6 +167,7 @@ async ValueTask ReadGeometryCore(NpgsqlReadBuffer buf, bool async var lines = new LineString[buf.ReadInt32(littleEndian)]; for (var i = 0; i < lines.Length; ++i) { + await buf.Ensure(SizeOfLength, async); var coordinates = new Position[buf.ReadInt32(littleEndian)]; for (var j = 0; j < coordinates.Length; ++j) { @@ -230,6 +231,7 @@ async ValueTask ReadGeometryCore(NpgsqlReadBuffer buf, bool async var lines = new LineString[buf.ReadInt32(littleEndian)]; for (var j = 0; j < lines.Length; ++j) { + await buf.Ensure(SizeOfLength, async); var coordinates = new Position[buf.ReadInt32(littleEndian)]; for (var k = 0; k < coordinates.Length; ++k) { @@ -498,7 +500,7 @@ public async Task Write(Polygon value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? for (var i = 0; i < lines.Count; ++i) { - if (buf.WriteSpaceLeft < 4) + if (buf.WriteSpaceLeft < SizeOfLength) await buf.Flush(async, cancellationToken); var coordinates = lines[i].Coordinates; buf.WriteInt32(coordinates.Count); diff --git a/test/Npgsql.PluginTests/GeoJSONTests.cs b/test/Npgsql.PluginTests/GeoJSONTests.cs index 688295eee0..0630eebc8d 100644 --- a/test/Npgsql.PluginTests/GeoJSONTests.cs +++ b/test/Npgsql.PluginTests/GeoJSONTests.cs @@ -346,6 +346,68 @@ public async Task Import_big_geometry() Assert.That(actual, Is.EqualTo(geometry)); } + [Test, TestCaseSource(nameof(Tests))] + public async Task Export_geometry(TestData data) + { + await using var conn = await OpenConnectionAsync(options: GeoJSONOptions.BoundingBox); + var table = await CreateTempTable(conn, "field geometry"); + + await using (var writer = await conn.BeginBinaryImportAsync($"COPY {table} (field) FROM STDIN BINARY")) + { + await writer.StartRowAsync(); + await writer.WriteAsync(data.Geometry, NpgsqlDbType.Geometry); + + var rowsWritten = await writer.CompleteAsync(); + Assert.That(rowsWritten, Is.EqualTo(1)); + } + + await using (var reader = await conn.BeginBinaryExportAsync($"COPY {table} (field) TO STDOUT BINARY")) + { + await reader.StartRowAsync(); + var field = await reader.ReadAsync(NpgsqlDbType.Geometry); + Assert.That(field, Is.EqualTo(data.Geometry)); + } + } + + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/4830")] + public async Task Export_big_geometry() + { + await using var conn = await OpenConnectionAsync(); + var table = await CreateTempTable(conn, "id text, field geometry"); + + var geometry = new Polygon(new[] { + new LineString( + Enumerable.Range(1, 507) + .Select(i => new Position(longitude: i, latitude: i)) + .Append(new Position(longitude: 1d, latitude: 1d))), + new LineString(new[] { + new Position(longitude: 1d, latitude: 1d), + new Position(longitude: 1d, latitude: 2d), + new Position(longitude: 1d, latitude: 3d), + new Position(longitude: 1d, latitude: 1d), + }) + }); + + await using (var writer = await conn.BeginBinaryImportAsync($"COPY {table} (id, field) FROM STDIN BINARY")) + { + await writer.StartRowAsync(); + await writer.WriteAsync("aaaa", NpgsqlDbType.Text); + await writer.WriteAsync(geometry, NpgsqlDbType.Geometry); + + var rowsWritten = await writer.CompleteAsync(); + Assert.That(rowsWritten, Is.EqualTo(1)); + } + + await using (var reader = await conn.BeginBinaryExportAsync($"COPY {table} (id, field) TO STDOUT BINARY")) + { + await reader.StartRowAsync(); + var id = await reader.ReadAsync(); + var field = await reader.ReadAsync(NpgsqlDbType.Geometry); + Assert.That(id, Is.EqualTo("aaaa")); + Assert.That(field, Is.EqualTo(geometry)); + } + } + ValueTask OpenConnectionAsync(GeoJSONOptions options = GeoJSONOptions.None) => GetDataSource(options).OpenConnectionAsync(); From 266e92818fb9368b89c1016ce354f63a5a4a6615 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 26 Dec 2022 23:25:33 +0100 Subject: [PATCH 027/761] Bump BenchmarkDotNet.Diagnostics.Windows from 0.13.2 to 0.13.3 (#4836) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 47caa185c7..096acea08a 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -40,6 +40,6 @@ - + From 633b679c3f687c80bd2b1f3830e6013f63ec4324 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 26 Dec 2022 23:25:52 +0100 Subject: [PATCH 028/761] Bump BenchmarkDotNet from 0.13.2 to 0.13.3 (#4835) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 096acea08a..75db40709e 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -38,7 +38,7 @@ - + From 0b991a2169cc720062bf462ccf684ed8aa6952e5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 5 Jan 2023 00:10:46 +0100 Subject: [PATCH 029/761] Bump Scriban.Signed from 5.5.1 to 5.5.2 (#4849) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 75db40709e..cacc5111d1 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -15,7 +15,7 @@ - + From 0acc808485db0f40e945e24e7d58119389f15dd5 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Sat, 7 Jan 2023 20:09:12 +0300 Subject: [PATCH 030/761] Properly shutdown multiplexing write loop while disposing datasource (#4844) Closes #4686 --- src/Npgsql/MultiplexingDataSource.cs | 43 +++++++++++++++----- src/Npgsql/NpgsqlCommand.cs | 11 +++++- src/Npgsql/NpgsqlDataSource.cs | 59 +++++++++++++++++++++------- test/Npgsql.Tests/DataSourceTests.cs | 16 +++++++- 4 files changed, 104 insertions(+), 25 deletions(-) diff --git a/src/Npgsql/MultiplexingDataSource.cs b/src/Npgsql/MultiplexingDataSource.cs index 2eb1763c3c..1aa2cbe0d9 100644 --- a/src/Npgsql/MultiplexingDataSource.cs +++ b/src/Npgsql/MultiplexingDataSource.cs @@ -21,6 +21,8 @@ sealed class MultiplexingDataSource : PoolingDataSource readonly ChannelReader _multiplexCommandReader; internal ChannelWriter MultiplexCommandWriter { get; } + readonly Task _multiplexWriteLoop; + /// /// When multiplexing is enabled, determines the maximum number of outgoing bytes to buffer before /// flushing to the network. @@ -56,14 +58,15 @@ internal MultiplexingDataSource( _connectionLogger = dataSourceConfig.LoggingConfiguration.ConnectionLogger; _commandLogger = dataSourceConfig.LoggingConfiguration.CommandLogger; - // TODO: Think about cleanup for this, e.g. completing the channel at application shutdown and/or - // pool clearing - _ = Task.Run(MultiplexingWriteLoop, CancellationToken.None) + _multiplexWriteLoop = Task.Run(MultiplexingWriteLoop, CancellationToken.None) .ContinueWith(t => { - // Note that we *must* observe the exception if the task is faulted. - _connectionLogger.LogError(t.Exception, "Exception in multiplexing write loop, this is an Npgsql bug, please file an issue."); - }, TaskContinuationOptions.OnlyOnFaulted); + if (t.IsFaulted) + { + // Note that MultiplexingWriteLoop should never throw an exception - everything should be caught and handled internally. + _connectionLogger.LogError(t.Exception, "Exception in multiplexing write loop, this is an Npgsql bug, please file an issue."); + } + }); } async Task MultiplexingWriteLoop() @@ -79,10 +82,18 @@ async Task MultiplexingWriteLoop() while (true) { NpgsqlConnector? connector; + NpgsqlCommand? command; - // Get a first command out. - if (!_multiplexCommandReader.TryRead(out var command)) - command = await _multiplexCommandReader.ReadAsync(); + try + { + // Get a first command out. + if (!_multiplexCommandReader.TryRead(out command)) + command = await _multiplexCommandReader.ReadAsync(); + } + catch (ChannelClosedException) + { + return; + } try { @@ -349,6 +360,20 @@ static void CompleteWrite(NpgsqlConnector connector, ref MultiplexingStats stats // ReSharper disable once FunctionNeverReturns } + protected override void DisposeBase() + { + MultiplexCommandWriter.Complete(new ObjectDisposedException(nameof(MultiplexingDataSource))); + _multiplexWriteLoop.GetAwaiter().GetResult(); + base.DisposeBase(); + } + + protected override async ValueTask DisposeAsyncBase() + { + MultiplexCommandWriter.Complete(new ObjectDisposedException(nameof(MultiplexingDataSource))); + await _multiplexWriteLoop; + await base.DisposeAsyncBase(); + } + struct MultiplexingStats { internal Stopwatch Stopwatch; diff --git a/src/Npgsql/NpgsqlCommand.cs b/src/Npgsql/NpgsqlCommand.cs index 3550d821a2..07f3514d80 100644 --- a/src/Npgsql/NpgsqlCommand.cs +++ b/src/Npgsql/NpgsqlCommand.cs @@ -14,6 +14,7 @@ using NpgsqlTypes; using static Npgsql.Util.Statics; using System.Diagnostics.CodeAnalysis; +using System.Threading.Channels; using Microsoft.Extensions.Logging; using Npgsql.Internal; using Npgsql.Properties; @@ -1507,7 +1508,15 @@ internal virtual async ValueTask ExecuteReader(CommandBehavior // Previous behavior was to wait on reading, which throw the exception from ExecuteReader (and not from // the first read). But waiting on writing would allow us to do sync writing and async reading. ExecutionCompletion.Reset(); - await dataSource.MultiplexCommandWriter.WriteAsync(this, cancellationToken); + try + { + await dataSource.MultiplexCommandWriter.WriteAsync(this, cancellationToken); + } + catch (ChannelClosedException ex) + { + Debug.Assert(ex.InnerException is not null); + throw ex.InnerException; + } connector = await new ValueTask(ExecutionCompletion, ExecutionCompletion.Version); // TODO: Overload of StartBindingScope? conn.Connector = connector; diff --git a/src/Npgsql/NpgsqlDataSource.cs b/src/Npgsql/NpgsqlDataSource.cs index 754050da02..71b7d2a0bc 100644 --- a/src/Npgsql/NpgsqlDataSource.cs +++ b/src/Npgsql/NpgsqlDataSource.cs @@ -397,34 +397,65 @@ internal virtual bool TryRentEnlistedPending(Transaction transaction, NpgsqlConn #region Dispose /// - protected override void Dispose(bool disposing) + protected sealed override void Dispose(bool disposing) { if (disposing && Interlocked.CompareExchange(ref _isDisposed, 1, 0) == 0) + DisposeBase(); + } + + /// + protected virtual void DisposeBase() + { + var cancellationTokenSource = _timerPasswordProviderCancellationTokenSource; + if (cancellationTokenSource is not null) { - var cancellationTokenSource = _timerPasswordProviderCancellationTokenSource; - if (cancellationTokenSource is not null) - { - cancellationTokenSource.Cancel(); - cancellationTokenSource.Dispose(); - } + cancellationTokenSource.Cancel(); + cancellationTokenSource.Dispose(); + } - _passwordProviderTimer?.Dispose(); + _passwordProviderTimer?.Dispose(); - _setupMappingsSemaphore.Dispose(); + _setupMappingsSemaphore.Dispose(); - Clear(); - } + Clear(); } /// - protected override ValueTask DisposeAsyncCore() + protected sealed override ValueTask DisposeAsyncCore() { - // TODO: async Clear, #4499 - Dispose(true); + if (Interlocked.CompareExchange(ref _isDisposed, 1, 0) == 0) + return DisposeAsyncBase(); return default; } +#pragma warning disable CS1998 + /// + protected virtual async ValueTask DisposeAsyncBase() + { + var cancellationTokenSource = _timerPasswordProviderCancellationTokenSource; + if (cancellationTokenSource is not null) + { + cancellationTokenSource.Cancel(); + cancellationTokenSource.Dispose(); + } + + if (_passwordProviderTimer is not null) + { +#if NET5_0_OR_GREATER + await _passwordProviderTimer.DisposeAsync(); +#else + _passwordProviderTimer.Dispose(); +#endif + } + + _setupMappingsSemaphore.Dispose(); + + // TODO: async Clear, #4499 + Clear(); + } +#pragma warning restore CS1998 + [MethodImpl(MethodImplOptions.AggressiveInlining)] private protected void CheckDisposed() { diff --git a/test/Npgsql.Tests/DataSourceTests.cs b/test/Npgsql.Tests/DataSourceTests.cs index 10565d4615..6778f6260a 100644 --- a/test/Npgsql.Tests/DataSourceTests.cs +++ b/test/Npgsql.Tests/DataSourceTests.cs @@ -71,7 +71,7 @@ public async Task ExecuteReader_on_connectionless_command([Values] bool async) await using var command = dataSource.CreateCommand(); command.CommandText = "SELECT 1"; - using (var reader = async ? await command.ExecuteReaderAsync() : command.ExecuteReader()) + await using (var reader = async ? await command.ExecuteReaderAsync() : command.ExecuteReader()) { Assert.True(reader.Read()); Assert.That(reader.GetInt32(0), Is.EqualTo(1)); @@ -260,4 +260,18 @@ public async Task As_DbDataSource([Values] bool async) ? await command.ExecuteScalarAsync() : command.ExecuteScalar(), Is.EqualTo(1)); } + + [Test] + public async Task Executing_command_on_disposed_datasource([Values] bool multiplexing) + { + var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + { + Multiplexing = multiplexing + }; + DbDataSource dataSource = NpgsqlDataSource.Create(csb.ConnectionString); + await using (var _ = await dataSource.OpenConnectionAsync()) {} + await dataSource.DisposeAsync(); + await using var command = dataSource.CreateCommand("SELECT 1"); + Assert.ThrowsAsync(command.ExecuteNonQueryAsync); + } } From f47be0aefd464769c9694b95c51ca96b920e8b30 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Mon, 9 Jan 2023 15:17:30 +0300 Subject: [PATCH 031/761] Add support for writing JObject and JsonObject without NpgsqlDbType (#4857) Fixes #4537 --- .../JsonNetTypeHandlerResolverFactory.cs | 7 +- .../Internal/TypeHandlers/JsonHandler.cs | 66 ++++++++++++++++--- .../TypeMapping/BuiltInTypeHandlerResolver.cs | 20 +++++- test/Npgsql.PluginTests/JsonNetTests.cs | 45 +++++++++++-- test/Npgsql.Tests/Types/JsonTests.cs | 63 ++++++++++++++++++ 5 files changed, 186 insertions(+), 15 deletions(-) diff --git a/src/Npgsql.Json.NET/Internal/JsonNetTypeHandlerResolverFactory.cs b/src/Npgsql.Json.NET/Internal/JsonNetTypeHandlerResolverFactory.cs index 9a047fff9e..830a589b26 100644 --- a/src/Npgsql.Json.NET/Internal/JsonNetTypeHandlerResolverFactory.cs +++ b/src/Npgsql.Json.NET/Internal/JsonNetTypeHandlerResolverFactory.cs @@ -1,6 +1,7 @@ using System; using System.Collections.Generic; using Newtonsoft.Json; +using Newtonsoft.Json.Linq; using Npgsql.Internal; using Npgsql.Internal.TypeHandling; using Npgsql.TypeMapping; @@ -23,7 +24,11 @@ public JsonNetTypeHandlerResolverFactory( _jsonClrTypes = jsonClrTypes ?? Array.Empty(); _settings = settings ?? new JsonSerializerSettings(); - _byType = new(); + _byType = new() + { + { typeof(JObject), "jsonb" }, + { typeof(JArray), "jsonb" } + }; if (jsonbClrTypes is not null) foreach (var type in jsonbClrTypes) diff --git a/src/Npgsql/Internal/TypeHandlers/JsonHandler.cs b/src/Npgsql/Internal/TypeHandlers/JsonHandler.cs index 6dbcf942b1..69fe07c45e 100644 --- a/src/Npgsql/Internal/TypeHandlers/JsonHandler.cs +++ b/src/Npgsql/Internal/TypeHandlers/JsonHandler.cs @@ -11,6 +11,10 @@ using Npgsql.TypeMapping; using NpgsqlTypes; +#if NET6_0_OR_GREATER || NETSTANDARD2_0 || NETSTANDARD2_1 +using System.Text.Json.Nodes; +#endif + namespace Npgsql.Internal.TypeHandlers; /// @@ -66,11 +70,25 @@ protected internal override int ValidateAndGetLengthCustom([DisallowNull] if (lengthCache.IsPopulated) return lengthCache.Get(); - var data = SerializeJsonDocument((JsonDocument)(object)value!); + var data = SerializeJsonDocument((JsonDocument)(object)value); if (parameter != null) parameter.ConvertedValue = data; return lengthCache.Set(data.Length + _headerLen); } + +#if NET6_0_OR_GREATER || NETSTANDARD2_0 || NETSTANDARD2_1 + if (typeof(TAny) == typeof(JsonObject) || typeof(TAny) == typeof(JsonArray)) + { + lengthCache ??= new NpgsqlLengthCache(1); + if (lengthCache.IsPopulated) + return lengthCache.Get(); + + var data = SerializeJsonObject((JsonNode)(object)value); + if (parameter != null) + parameter.ConvertedValue = data; + return lengthCache.Set(data.Length + _headerLen); + } +#endif // User POCO, need to serialize. At least internally ArrayPool buffers are used... var s = JsonSerializer.Serialize(value, _serializerOptions); @@ -94,30 +112,39 @@ protected override async Task WriteWithLengthCustom([DisallowNull] TAny va buf.WriteByte(JsonbProtocolVersion); if (typeof(TAny) == typeof(string)) - await _textHandler.Write((string)(object)value!, buf, lengthCache, parameter, async, cancellationToken); + await _textHandler.Write((string)(object)value, buf, lengthCache, parameter, async, cancellationToken); else if (typeof(TAny) == typeof(char[])) - await _textHandler.Write((char[])(object)value!, buf, lengthCache, parameter, async, cancellationToken); + await _textHandler.Write((char[])(object)value, buf, lengthCache, parameter, async, cancellationToken); else if (typeof(TAny) == typeof(ArraySegment)) - await _textHandler.Write((ArraySegment)(object)value!, buf, lengthCache, parameter, async, cancellationToken); + await _textHandler.Write((ArraySegment)(object)value, buf, lengthCache, parameter, async, cancellationToken); else if (typeof(TAny) == typeof(char)) - await _textHandler.Write((char)(object)value!, buf, lengthCache, parameter, async, cancellationToken); + await _textHandler.Write((char)(object)value, buf, lengthCache, parameter, async, cancellationToken); else if (typeof(TAny) == typeof(byte[])) - await _textHandler.Write((byte[])(object)value!, buf, lengthCache, parameter, async, cancellationToken); + await _textHandler.Write((byte[])(object)value, buf, lengthCache, parameter, async, cancellationToken); else if (typeof(TAny) == typeof(ReadOnlyMemory)) - await _textHandler.Write((ReadOnlyMemory)(object)value!, buf, lengthCache, parameter, async, cancellationToken); + await _textHandler.Write((ReadOnlyMemory)(object)value, buf, lengthCache, parameter, async, cancellationToken); else if (typeof(TAny) == typeof(JsonDocument)) { var data = parameter?.ConvertedValue != null ? (byte[])parameter.ConvertedValue - : SerializeJsonDocument((JsonDocument)(object)value!); + : SerializeJsonDocument((JsonDocument)(object)value); + await buf.WriteBytesRaw(data, async, cancellationToken); + } +#if NET6_0_OR_GREATER || NETSTANDARD2_0 || NETSTANDARD2_1 + else if (typeof(TAny) == typeof(JsonObject) || typeof(TAny) == typeof(JsonArray)) + { + var data = parameter?.ConvertedValue != null + ? (byte[])parameter.ConvertedValue + : SerializeJsonObject((JsonNode)(object)value); await buf.WriteBytesRaw(data, async, cancellationToken); } +#endif else { // User POCO, read serialized representation from the validation phase var s = parameter?.ConvertedValue != null ? (string)parameter.ConvertedValue - : JsonSerializer.Serialize(value!, value!.GetType(), _serializerOptions); + : JsonSerializer.Serialize(value, value.GetType(), _serializerOptions); await _textHandler.Write(s, buf, lengthCache, parameter, async, cancellationToken); } @@ -151,6 +178,10 @@ public override int ValidateObjectAndGetLength(object value, ref NpgsqlLengthCac byte[] s => ValidateAndGetLength(s, ref lengthCache, parameter), ReadOnlyMemory s => ValidateAndGetLength(s, ref lengthCache, parameter), JsonDocument jsonDocument => ValidateAndGetLength(jsonDocument, ref lengthCache, parameter), +#if NET6_0_OR_GREATER || NETSTANDARD2_0 || NETSTANDARD2_1 + JsonObject jsonObject => ValidateAndGetLength(jsonObject, ref lengthCache, parameter), + JsonArray jsonArray => ValidateAndGetLength(jsonArray, ref lengthCache, parameter), +#endif _ => ValidateAndGetLength(value, ref lengthCache, parameter) }; @@ -172,6 +203,10 @@ public override async Task WriteObjectWithLength(object? value, NpgsqlWriteBuffe byte[] s => WriteWithLengthCustom(s, buf, lengthCache, parameter, async, cancellationToken), ReadOnlyMemory s => WriteWithLengthCustom(s, buf, lengthCache, parameter, async, cancellationToken), JsonDocument jsonDocument => WriteWithLengthCustom(jsonDocument, buf, lengthCache, parameter, async, cancellationToken), +#if NET6_0_OR_GREATER || NETSTANDARD2_0 || NETSTANDARD2_1 + JsonObject jsonObject => WriteWithLengthCustom(jsonObject, buf, lengthCache, parameter, async, cancellationToken), + JsonArray jsonArray => WriteWithLengthCustom(jsonArray, buf, lengthCache, parameter, async, cancellationToken), +#endif _ => WriteWithLengthCustom(value, buf, lengthCache, parameter, async, cancellationToken), }); } @@ -243,4 +278,17 @@ byte[] SerializeJsonDocument(JsonDocument document) writer.Flush(); return stream.ToArray(); } + +#if NET6_0_OR_GREATER || NETSTANDARD2_0 || NETSTANDARD2_1 + byte[] SerializeJsonObject(JsonNode jsonObject) + { + // TODO: Writing is currently really inefficient - please don't criticize :) + // We need to implement one-pass writing to serialize directly to the buffer (or just switch to pipelines). + using var stream = new MemoryStream(); + using var writer = new Utf8JsonWriter(stream); + jsonObject.WriteTo(writer); + writer.Flush(); + return stream.ToArray(); + } +#endif } \ No newline at end of file diff --git a/src/Npgsql/TypeMapping/BuiltInTypeHandlerResolver.cs b/src/Npgsql/TypeMapping/BuiltInTypeHandlerResolver.cs index f7e132fb5d..f350a793d4 100644 --- a/src/Npgsql/TypeMapping/BuiltInTypeHandlerResolver.cs +++ b/src/Npgsql/TypeMapping/BuiltInTypeHandlerResolver.cs @@ -23,6 +23,10 @@ using NpgsqlTypes; using static Npgsql.Util.Statics; +#if NET6_0_OR_GREATER || NETSTANDARD2_0 || NETSTANDARD2_1 +using System.Text.Json.Nodes; +#endif + namespace Npgsql.TypeMapping; sealed class BuiltInTypeHandlerResolver : TypeHandlerResolver @@ -54,7 +58,11 @@ sealed class BuiltInTypeHandlerResolver : TypeHandlerResolver { "name", new(NpgsqlDbType.Name, "name") }, { "refcursor", new(NpgsqlDbType.Refcursor, "refcursor") }, { "citext", new(NpgsqlDbType.Citext, "citext") }, - { "jsonb", new(NpgsqlDbType.Jsonb, "jsonb", typeof(JsonDocument)) }, + { "jsonb", new(NpgsqlDbType.Jsonb, "jsonb", typeof(JsonDocument) +#if NET6_0_OR_GREATER || NETSTANDARD2_0 || NETSTANDARD2_1 + , typeof(JsonObject), typeof(JsonArray) +#endif + ) }, { "json", new(NpgsqlDbType.Json, "json") }, { "jsonpath", new(NpgsqlDbType.JsonPath, "jsonpath") }, @@ -397,6 +405,10 @@ static BuiltInTypeHandlerResolver() { typeof(char), "text" }, { typeof(ArraySegment), "text" }, { typeof(JsonDocument), "jsonb" }, +#if NET6_0_OR_GREATER || NETSTANDARD2_0 || NETSTANDARD2_1 + { typeof(JsonObject), "jsonb" }, + { typeof(JsonArray), "jsonb" }, +#endif // Date/time types // The DateTime entry is for LegacyTimestampBehavior mode only. In regular mode we resolve through @@ -599,6 +611,12 @@ static DateTimeKind GetMultirangeKind(IList> multirange) return _textHandler; if (typeof(T) == typeof(JsonDocument)) return JsonbHandler(); +#if NET6_0_OR_GREATER || NETSTANDARD2_0 || NETSTANDARD2_1 + if (typeof(T) == typeof(JsonObject)) + return JsonbHandler(); + if (typeof(T) == typeof(JsonArray)) + return JsonbHandler(); +#endif // Date/time types // No resolution for DateTime, since that's value-dependent (Kind) diff --git a/test/Npgsql.PluginTests/JsonNetTests.cs b/test/Npgsql.PluginTests/JsonNetTests.cs index 49790a2172..e85f86f736 100644 --- a/test/Npgsql.PluginTests/JsonNetTests.cs +++ b/test/Npgsql.PluginTests/JsonNetTests.cs @@ -72,7 +72,9 @@ public Task Roundtrip_JObject() IsJsonb ? @"{""Bar"": 8}" : @"{""Bar"":8}", _pgTypeName, _npgsqlDbType, - isDefault: false, + // By default we map JObject to jsonb + isDefaultForWriting: IsJsonb, + isDefaultForReading: false, isNpgsqlDbTypeInferredFromClrType: false); [Test] @@ -83,7 +85,9 @@ public Task Roundtrip_JArray() IsJsonb ? "[1, 2, 3]" : "[1,2,3]", _pgTypeName, _npgsqlDbType, - isDefault: false, + // By default we map JArray to jsonb + isDefaultForWriting: IsJsonb, + isDefaultForReading: false, isNpgsqlDbTypeInferredFromClrType: false); [Test] @@ -168,6 +172,7 @@ await AssertType( isDefault: false, isNpgsqlDbTypeInferredFromClrType: false); } + [Test] public async Task Bug3464() { @@ -190,8 +195,37 @@ public class Bug3464Class public string? SomeString { get; set; } } - readonly NpgsqlDbType _npgsqlDbType; - readonly string _pgTypeName; + [Test] + [IssueLink("https://github.com/npgsql/npgsql/issues/4537")] + public async Task Write_jobject_array_without_npgsqldbtype() + { + // By default we map JObject to jsonb + if (!IsJsonb) + return; + + await using var conn = await JsonDataSource.OpenConnectionAsync(); + var tableName = await TestUtil.CreateTempTable(conn, "key SERIAL PRIMARY KEY, ingredients json[]"); + + await using var cmd = new NpgsqlCommand { Connection = conn }; + + var jsonObject1 = new JObject + { + { "name", "value1" }, + { "amount", 1 }, + { "unit", "ml" } + }; + + var jsonObject2 = new JObject + { + { "name", "value2" }, + { "amount", 2 }, + { "unit", "g" } + }; + + cmd.CommandText = $"INSERT INTO {tableName} (ingredients) VALUES (@p)"; + cmd.Parameters.Add(new("p", new[] { jsonObject1, jsonObject2 })); + await cmd.ExecuteNonQueryAsync(); + } class Foo { @@ -200,6 +234,9 @@ class Foo public override int GetHashCode() => Bar.GetHashCode(); } + readonly NpgsqlDbType _npgsqlDbType; + readonly string _pgTypeName; + [OneTimeSetUp] public void SetUp() { diff --git a/test/Npgsql.Tests/Types/JsonTests.cs b/test/Npgsql.Tests/Types/JsonTests.cs index bb55b734eb..649f46ba26 100644 --- a/test/Npgsql.Tests/Types/JsonTests.cs +++ b/test/Npgsql.Tests/Types/JsonTests.cs @@ -1,6 +1,7 @@ using System; using System.Text; using System.Text.Json; +using System.Text.Json.Nodes; using System.Threading.Tasks; using NpgsqlTypes; using NUnit.Framework; @@ -69,6 +70,34 @@ public async Task As_JsonDocument() isDefault: false, comparer: (x, y) => x.RootElement.GetProperty("K").GetString() == y.RootElement.GetProperty("K").GetString()); +#if NET6_0_OR_GREATER + [Test] + public Task Roundtrip_JsonObject() + => AssertType( + new JsonObject { ["Bar"] = 8 }, + IsJsonb ? @"{""Bar"": 8}" : @"{""Bar"":8}", + PostgresType, + NpgsqlDbType, + // By default we map JsonObject to jsonb + isDefaultForWriting: IsJsonb, + isDefaultForReading: false, + isNpgsqlDbTypeInferredFromClrType: false, + comparer: (x, y) => x.ToString() == y.ToString()); + + [Test] + public Task Roundtrip_JsonArray() + => AssertType( + new JsonArray { 1, 2, 3 }, + IsJsonb ? "[1, 2, 3]" : "[1,2,3]", + PostgresType, + NpgsqlDbType, + // By default we map JsonArray to jsonb + isDefaultForWriting: IsJsonb, + isDefaultForReading: false, + isNpgsqlDbTypeInferredFromClrType: false, + comparer: (x, y) => x.ToString() == y.ToString()); +#endif + [Test] public async Task As_poco() => await AssertType( @@ -141,6 +170,40 @@ public async Task Can_read_two_json_documents() Assert.That(car.RootElement.GetProperty("key").GetString(), Is.EqualTo("foo")); } +#if NET6_0_OR_GREATER + [Test] + [IssueLink("https://github.com/npgsql/npgsql/issues/4537")] + public async Task Write_jsonobject_array_without_npgsqldbtype() + { + // By default we map JsonObject to jsonb + if (!IsJsonb) + return; + + await using var conn = await OpenConnectionAsync(); + var tableName = await TestUtil.CreateTempTable(conn, "key SERIAL PRIMARY KEY, ingredients json[]"); + + await using var cmd = new NpgsqlCommand { Connection = conn }; + + var jsonObject1 = new JsonObject + { + { "name", "value1" }, + { "amount", 1 }, + { "unit", "ml" } + }; + + var jsonObject2 = new JsonObject + { + { "name", "value2" }, + { "amount", 2 }, + { "unit", "g" } + }; + + cmd.CommandText = $"INSERT INTO {tableName} (ingredients) VALUES (@p)"; + cmd.Parameters.Add(new("p", new[] { jsonObject1, jsonObject2 })); + await cmd.ExecuteNonQueryAsync(); + } +#endif + public JsonTests(MultiplexingMode multiplexingMode, NpgsqlDbType npgsqlDbType) : base(multiplexingMode) { From c9ba6120d9ae3641a236c52bed9de769e0e72b39 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Mon, 9 Jan 2023 19:36:49 +0100 Subject: [PATCH 032/761] Separate System.Text.Json support out to a plugin (#4860) And add API support for specifying JsonSerializerOptions and CLR types that map to JSON by default Closes #4846 Closes #4859 --- src/Npgsql.Json.NET/Internal/JsonHandler.cs | 100 ------ .../Internal/JsonNetJsonHandler.cs | 64 ++++ .../Internal/JsonNetTypeHandlerResolver.cs | 26 +- src/Npgsql.Json.NET/Internal/JsonbHandler.cs | 100 ------ .../NpgsqlJsonNetExtensions.cs | 16 +- src/Npgsql.Json.NET/PublicAPI.Unshipped.txt | 2 + .../Internal/TypeHandlers/JsonHandler.cs | 294 ------------------ .../Internal/TypeHandlers/JsonTextHandler.cs | 215 +++++++++++++ .../TypeHandlers/SystemTextJsonHandler.cs | 222 +++++++++++++ src/Npgsql/PublicAPI.Unshipped.txt | 2 + .../TypeMapping/BuiltInTypeHandlerResolver.cs | 33 +- .../TypeMapping/JsonTypeHandlerResolver.cs | 85 +++++ .../JsonTypeHandlerResolverFactory.cs | 50 +++ .../TypeMapping/NpgsqlJsonExtensions.cs | 34 ++ test/Npgsql.PluginTests/JsonNetTests.cs | 2 +- test/Npgsql.Tests/Support/TestBase.cs | 19 +- test/Npgsql.Tests/Types/JsonTests.cs | 108 ++++++- 17 files changed, 813 insertions(+), 559 deletions(-) delete mode 100644 src/Npgsql.Json.NET/Internal/JsonHandler.cs create mode 100644 src/Npgsql.Json.NET/Internal/JsonNetJsonHandler.cs delete mode 100644 src/Npgsql.Json.NET/Internal/JsonbHandler.cs delete mode 100644 src/Npgsql/Internal/TypeHandlers/JsonHandler.cs create mode 100644 src/Npgsql/Internal/TypeHandlers/JsonTextHandler.cs create mode 100644 src/Npgsql/Internal/TypeHandlers/SystemTextJsonHandler.cs create mode 100644 src/Npgsql/TypeMapping/JsonTypeHandlerResolver.cs create mode 100644 src/Npgsql/TypeMapping/JsonTypeHandlerResolverFactory.cs create mode 100644 src/Npgsql/TypeMapping/NpgsqlJsonExtensions.cs diff --git a/src/Npgsql.Json.NET/Internal/JsonHandler.cs b/src/Npgsql.Json.NET/Internal/JsonHandler.cs deleted file mode 100644 index 56a6683a5f..0000000000 --- a/src/Npgsql.Json.NET/Internal/JsonHandler.cs +++ /dev/null @@ -1,100 +0,0 @@ -using System; -using System.Diagnostics.CodeAnalysis; -using System.Threading; -using System.Threading.Tasks; -using Newtonsoft.Json; -using Npgsql.BackendMessages; -using Npgsql.Internal; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; - -namespace Npgsql.Json.NET.Internal; - -class JsonHandler : Npgsql.Internal.TypeHandlers.JsonHandler -{ - readonly JsonSerializerSettings _settings; - - public JsonHandler(PostgresType postgresType, NpgsqlConnector connector, JsonSerializerSettings settings) - : base(postgresType, connector.TextEncoding, isJsonb: false) => _settings = settings; - - protected override async ValueTask ReadCustom(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - { - if (typeof(T) == typeof(string) || - typeof(T) == typeof(char[]) || - typeof(T) == typeof(ArraySegment) || - typeof(T) == typeof(char) || - typeof(T) == typeof(byte[])) - { - return await base.ReadCustom(buf, len, async, fieldDescription); - } - - // JSON.NET returns null if no JSON content was found. This means null may get returned even if T is a non-nullable reference - // type (for value types, an exception will be thrown). - return JsonConvert.DeserializeObject(await base.Read(buf, len, async, fieldDescription), _settings)!; - } - - protected override int ValidateAndGetLengthCustom([DisallowNull] T2 value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - { - if (typeof(T2) == typeof(string) || - typeof(T2) == typeof(char[]) || - typeof(T2) == typeof(ArraySegment) || - typeof(T2) == typeof(char) || - typeof(T2) == typeof(byte[])) - { - return base.ValidateAndGetLengthCustom(value, ref lengthCache, parameter); - } - - var serialized = JsonConvert.SerializeObject(value, _settings); - if (parameter != null) - parameter.ConvertedValue = serialized; - return base.ValidateAndGetLengthCustom(serialized, ref lengthCache, parameter); - } - - protected override Task WriteWithLengthCustom([DisallowNull] T2 value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - if (typeof(T2) == typeof(string) || - typeof(T2) == typeof(char[]) || - typeof(T2) == typeof(ArraySegment) || - typeof(T2) == typeof(char) || - typeof(T2) == typeof(byte[])) - { - return base.WriteWithLengthCustom(value, buf, lengthCache, parameter, async, cancellationToken); - } - - // User POCO, read serialized representation from the validation phase - var serialized = parameter?.ConvertedValue != null - ? (string)parameter.ConvertedValue - : JsonConvert.SerializeObject(value, _settings); - return base.WriteWithLengthCustom(serialized, buf, lengthCache, parameter, async, cancellationToken); - } - - public override int ValidateObjectAndGetLength(object value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - { - if (value is string || - value is char[] || - value is ArraySegment || - value is char || - value is byte[]) - { - return base.ValidateObjectAndGetLength(value, ref lengthCache, parameter); - } - - return ValidateAndGetLength(value, ref lengthCache, parameter); - } - - public override Task WriteObjectWithLength(object? value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - if (value is null || - value is DBNull || - value is string || - value is char[] || - value is ArraySegment || - value is char || - value is byte[]) - { - return base.WriteObjectWithLength(value, buf, lengthCache, parameter, async, cancellationToken); - } - - return WriteWithLength(value, buf, lengthCache, parameter, async, cancellationToken); - } -} \ No newline at end of file diff --git a/src/Npgsql.Json.NET/Internal/JsonNetJsonHandler.cs b/src/Npgsql.Json.NET/Internal/JsonNetJsonHandler.cs new file mode 100644 index 0000000000..cbf8ca3ae2 --- /dev/null +++ b/src/Npgsql.Json.NET/Internal/JsonNetJsonHandler.cs @@ -0,0 +1,64 @@ +using System; +using System.Diagnostics.CodeAnalysis; +using System.Threading; +using System.Threading.Tasks; +using Newtonsoft.Json; +using Npgsql.BackendMessages; +using Npgsql.Internal; +using Npgsql.Internal.TypeHandlers; +using Npgsql.Internal.TypeHandling; +using Npgsql.PostgresTypes; + +namespace Npgsql.Json.NET.Internal; + +class JsonNetJsonHandler : JsonTextHandler +{ + readonly JsonSerializerSettings _settings; + + public JsonNetJsonHandler(PostgresType postgresType, NpgsqlConnector connector, bool isJsonb, JsonSerializerSettings settings) + : base(postgresType, connector.TextEncoding, isJsonb) => _settings = settings; + + protected override async ValueTask ReadCustom(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) + { + if (IsSupportedAsText()) + return await base.ReadCustom(buf, len, async, fieldDescription); + + // JSON.NET returns null if no JSON content was found. This means null may get returned even if T is a non-nullable reference + // type (for value types, an exception will be thrown). + return JsonConvert.DeserializeObject(await base.Read(buf, len, async, fieldDescription), _settings)!; + } + + protected override int ValidateAndGetLengthCustom([DisallowNull] TAny value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) + { + if (IsSupportedAsText()) + return base.ValidateAndGetLengthCustom(value, ref lengthCache, parameter); + + var serialized = JsonConvert.SerializeObject(value, _settings); + if (parameter != null) + parameter.ConvertedValue = serialized; + return base.ValidateAndGetLengthCustom(serialized, ref lengthCache, parameter); + } + + protected override Task WriteWithLengthCustom([DisallowNull] TAny value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) + { + if (IsSupportedAsText()) + return base.WriteWithLengthCustom(value, buf, lengthCache, parameter, async, cancellationToken); + + // User POCO, read serialized representation from the validation phase + var serialized = parameter?.ConvertedValue != null + ? (string)parameter.ConvertedValue + : JsonConvert.SerializeObject(value, _settings); + return base.WriteWithLengthCustom(serialized, buf, lengthCache, parameter, async, cancellationToken); + } + + public override int ValidateObjectAndGetLength(object value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) + => IsSupported(value.GetType()) + ? base.ValidateObjectAndGetLength(value, ref lengthCache, parameter) + : ValidateAndGetLengthCustom(value, ref lengthCache, parameter); + + public override Task WriteObjectWithLength(object? value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, + NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) + => value is null or DBNull || IsSupported(value.GetType()) + ? base.WriteObjectWithLength(value, buf, lengthCache, parameter, async, cancellationToken) + : WriteWithLengthCustom(value, buf, lengthCache, parameter, async, cancellationToken); +} \ No newline at end of file diff --git a/src/Npgsql.Json.NET/Internal/JsonNetTypeHandlerResolver.cs b/src/Npgsql.Json.NET/Internal/JsonNetTypeHandlerResolver.cs index 25ce0d5e92..338e695f65 100644 --- a/src/Npgsql.Json.NET/Internal/JsonNetTypeHandlerResolver.cs +++ b/src/Npgsql.Json.NET/Internal/JsonNetTypeHandlerResolver.cs @@ -1,11 +1,9 @@ using System; using System.Collections.Generic; -using System.Data; using Newtonsoft.Json; using Npgsql.Internal; using Npgsql.Internal.TypeHandling; using Npgsql.PostgresTypes; -using Npgsql.TypeMapping; using NpgsqlTypes; namespace Npgsql.Json.NET.Internal; @@ -13,36 +11,28 @@ namespace Npgsql.Json.NET.Internal; public class JsonNetTypeHandlerResolver : TypeHandlerResolver { readonly NpgsqlDatabaseInfo _databaseInfo; - readonly JsonbHandler _jsonbHandler; - readonly JsonHandler _jsonHandler; + readonly JsonNetJsonHandler _jsonNetJsonbHandler; + readonly JsonNetJsonHandler _jsonNetJsonHandler; readonly Dictionary _dataTypeNamesByClrType; internal JsonNetTypeHandlerResolver( NpgsqlConnector connector, - Dictionary dataClrTypeNamesDataTypeNamesByClrClrType, + Dictionary dataTypeNamesByClrType, JsonSerializerSettings settings) { _databaseInfo = connector.DatabaseInfo; - _jsonbHandler = new JsonbHandler(PgType("jsonb"), connector, settings); - _jsonHandler = new JsonHandler(PgType("json"), connector, settings); + _jsonNetJsonbHandler = new JsonNetJsonHandler(PgType("jsonb"), connector, isJsonb: true, settings); + _jsonNetJsonHandler = new JsonNetJsonHandler(PgType("json"), connector, isJsonb: false, settings); - _dataTypeNamesByClrType = dataClrTypeNamesDataTypeNamesByClrClrType; + _dataTypeNamesByClrType = dataTypeNamesByClrType; } - public NpgsqlTypeHandler? ResolveNpgsqlDbType(NpgsqlDbType npgsqlDbType) - => npgsqlDbType switch - { - NpgsqlDbType.Jsonb => _jsonbHandler, - NpgsqlDbType.Json => _jsonHandler, - _ => null - }; - public override NpgsqlTypeHandler? ResolveByDataTypeName(string typeName) => typeName switch { - "jsonb" => _jsonbHandler, - "json" => _jsonHandler, + "jsonb" => _jsonNetJsonbHandler, + "json" => _jsonNetJsonHandler, _ => null }; diff --git a/src/Npgsql.Json.NET/Internal/JsonbHandler.cs b/src/Npgsql.Json.NET/Internal/JsonbHandler.cs deleted file mode 100644 index e9b88e3d34..0000000000 --- a/src/Npgsql.Json.NET/Internal/JsonbHandler.cs +++ /dev/null @@ -1,100 +0,0 @@ -using System; -using System.Diagnostics.CodeAnalysis; -using System.Threading; -using System.Threading.Tasks; -using Newtonsoft.Json; -using Npgsql.BackendMessages; -using Npgsql.Internal; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; - -namespace Npgsql.Json.NET.Internal; - -class JsonbHandler : Npgsql.Internal.TypeHandlers.JsonHandler -{ - readonly JsonSerializerSettings _settings; - - public JsonbHandler(PostgresType postgresType, NpgsqlConnector connector, JsonSerializerSettings settings) - : base(postgresType, connector.TextEncoding, isJsonb: true) => _settings = settings; - - protected override async ValueTask ReadCustom(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - { - if (typeof(T) == typeof(string) || - typeof(T) == typeof(char[]) || - typeof(T) == typeof(ArraySegment) || - typeof(T) == typeof(char) || - typeof(T) == typeof(byte[])) - { - return await base.ReadCustom(buf, len, async, fieldDescription); - } - - // JSON.NET returns null if no JSON content was found. This means null may get returned even if T is a non-nullable reference - // type (for value types, an exception will be thrown). - return JsonConvert.DeserializeObject(await base.Read(buf, len, async, fieldDescription), _settings)!; - } - - protected override int ValidateAndGetLengthCustom([DisallowNull] T2 value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - { - if (typeof(T2) == typeof(string) || - typeof(T2) == typeof(char[]) || - typeof(T2) == typeof(ArraySegment) || - typeof(T2) == typeof(char) || - typeof(T2) == typeof(byte[])) - { - return base.ValidateAndGetLengthCustom(value, ref lengthCache, parameter); - } - - var serialized = JsonConvert.SerializeObject(value, _settings); - if (parameter != null) - parameter.ConvertedValue = serialized; - return base.ValidateAndGetLengthCustom(serialized, ref lengthCache, parameter); - } - - protected override Task WriteWithLengthCustom([DisallowNull] T2 value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - if (typeof(T2) == typeof(string) || - typeof(T2) == typeof(char[]) || - typeof(T2) == typeof(ArraySegment) || - typeof(T2) == typeof(char) || - typeof(T2) == typeof(byte[])) - { - return base.WriteWithLengthCustom(value, buf, lengthCache, parameter, async, cancellationToken); - } - - // User POCO, read serialized representation from the validation phase - var serialized = parameter?.ConvertedValue != null - ? (string)parameter.ConvertedValue - : JsonConvert.SerializeObject(value, _settings); - return base.WriteWithLengthCustom(serialized, buf, lengthCache, parameter, async, cancellationToken); - } - - public override int ValidateObjectAndGetLength(object value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - { - if (value is string || - value is char[] || - value is ArraySegment || - value is char || - value is byte[]) - { - return base.ValidateObjectAndGetLength(value, ref lengthCache, parameter); - } - - return ValidateAndGetLengthCustom(value, ref lengthCache, parameter); - } - - public override Task WriteObjectWithLength(object? value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - if (value is null || - value is DBNull || - value is string || - value is char[] || - value is ArraySegment || - value is char || - value is byte[]) - { - return base.WriteObjectWithLength(value, buf, lengthCache, parameter, async, cancellationToken); - } - - return WriteWithLengthCustom(value, buf, lengthCache, parameter, async, cancellationToken); - } -} \ No newline at end of file diff --git a/src/Npgsql.Json.NET/NpgsqlJsonNetExtensions.cs b/src/Npgsql.Json.NET/NpgsqlJsonNetExtensions.cs index 06f0f2f661..bd3b7b41f8 100644 --- a/src/Npgsql.Json.NET/NpgsqlJsonNetExtensions.cs +++ b/src/Npgsql.Json.NET/NpgsqlJsonNetExtensions.cs @@ -15,15 +15,19 @@ public static class NpgsqlJsonNetExtensions /// /// Sets up JSON.NET mappings for the PostgreSQL json and jsonb types. /// - /// The type mapper to set up (global or connection-specific) - /// A list of CLR types to map to PostgreSQL jsonb (no need to specify NpgsqlDbType.Jsonb) - /// A list of CLR types to map to PostgreSQL json (no need to specify NpgsqlDbType.Json) - /// Optional settings to customize JSON serialization + /// The type mapper to set up. + /// Optional settings to customize JSON serialization. + /// + /// A list of CLR types to map to PostgreSQL jsonb (no need to specify ). + /// + /// + /// A list of CLR types to map to PostgreSQL json (no need to specify ). + /// public static INpgsqlTypeMapper UseJsonNet( this INpgsqlTypeMapper mapper, + JsonSerializerSettings? settings = null, Type[]? jsonbClrTypes = null, - Type[]? jsonClrTypes = null, - JsonSerializerSettings? settings = null) + Type[]? jsonClrTypes = null) { mapper.AddTypeResolverFactory(new JsonNetTypeHandlerResolverFactory(jsonbClrTypes, jsonClrTypes, settings)); return mapper; diff --git a/src/Npgsql.Json.NET/PublicAPI.Unshipped.txt b/src/Npgsql.Json.NET/PublicAPI.Unshipped.txt index ab058de62d..6372f0638d 100644 --- a/src/Npgsql.Json.NET/PublicAPI.Unshipped.txt +++ b/src/Npgsql.Json.NET/PublicAPI.Unshipped.txt @@ -1 +1,3 @@ #nullable enable +static Npgsql.NpgsqlJsonNetExtensions.UseJsonNet(this Npgsql.TypeMapping.INpgsqlTypeMapper! mapper, Newtonsoft.Json.JsonSerializerSettings? settings = null, System.Type![]? jsonbClrTypes = null, System.Type![]? jsonClrTypes = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! +*REMOVED*static Npgsql.NpgsqlJsonNetExtensions.UseJsonNet(this Npgsql.TypeMapping.INpgsqlTypeMapper! mapper, System.Type![]? jsonbClrTypes = null, System.Type![]? jsonClrTypes = null, Newtonsoft.Json.JsonSerializerSettings? settings = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! diff --git a/src/Npgsql/Internal/TypeHandlers/JsonHandler.cs b/src/Npgsql/Internal/TypeHandlers/JsonHandler.cs deleted file mode 100644 index 69fe07c45e..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/JsonHandler.cs +++ /dev/null @@ -1,294 +0,0 @@ -using System; -using System.Diagnostics.CodeAnalysis; -using System.IO; -using System.Text; -using System.Text.Json; -using System.Threading; -using System.Threading.Tasks; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using Npgsql.TypeMapping; -using NpgsqlTypes; - -#if NET6_0_OR_GREATER || NETSTANDARD2_0 || NETSTANDARD2_1 -using System.Text.Json.Nodes; -#endif - -namespace Npgsql.Internal.TypeHandlers; - -/// -/// A type handler for the PostgreSQL json and jsonb data type. -/// -/// -/// See https://www.postgresql.org/docs/current/datatype-json.html. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public class JsonHandler : NpgsqlTypeHandler, ITextReaderHandler -{ - readonly JsonSerializerOptions _serializerOptions; - readonly TextHandler _textHandler; - readonly bool _isJsonb; - readonly int _headerLen; - - /// - /// Prepended to the string in the wire encoding - /// - const byte JsonbProtocolVersion = 1; - - static readonly JsonSerializerOptions DefaultSerializerOptions = new(); - - /// - public JsonHandler(PostgresType postgresType, Encoding encoding, bool isJsonb, JsonSerializerOptions? serializerOptions = null) - : base(postgresType) - { - _serializerOptions = serializerOptions ?? DefaultSerializerOptions; - _isJsonb = isJsonb; - _headerLen = isJsonb ? 1 : 0; - _textHandler = new TextHandler(postgresType, encoding); - } - - /// - protected internal override int ValidateAndGetLengthCustom([DisallowNull] TAny value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - { - if (typeof(TAny) == typeof(string) || - typeof(TAny) == typeof(char[]) || - typeof(TAny) == typeof(ArraySegment) || - typeof(TAny) == typeof(char) || - typeof(TAny) == typeof(byte[]) || - typeof(TAny) == typeof(ReadOnlyMemory)) - { - return _textHandler.ValidateAndGetLength(value, ref lengthCache, parameter) + _headerLen; - } - - if (typeof(TAny) == typeof(JsonDocument)) - { - lengthCache ??= new NpgsqlLengthCache(1); - if (lengthCache.IsPopulated) - return lengthCache.Get(); - - var data = SerializeJsonDocument((JsonDocument)(object)value); - if (parameter != null) - parameter.ConvertedValue = data; - return lengthCache.Set(data.Length + _headerLen); - } - -#if NET6_0_OR_GREATER || NETSTANDARD2_0 || NETSTANDARD2_1 - if (typeof(TAny) == typeof(JsonObject) || typeof(TAny) == typeof(JsonArray)) - { - lengthCache ??= new NpgsqlLengthCache(1); - if (lengthCache.IsPopulated) - return lengthCache.Get(); - - var data = SerializeJsonObject((JsonNode)(object)value); - if (parameter != null) - parameter.ConvertedValue = data; - return lengthCache.Set(data.Length + _headerLen); - } -#endif - - // User POCO, need to serialize. At least internally ArrayPool buffers are used... - var s = JsonSerializer.Serialize(value, _serializerOptions); - if (parameter != null) - parameter.ConvertedValue = s; - - return _textHandler.ValidateAndGetLength(s, ref lengthCache, parameter) + _headerLen; - } - - /// - protected override async Task WriteWithLengthCustom([DisallowNull] TAny value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - var spaceRequired = _isJsonb ? 5 : 4; - - if (buf.WriteSpaceLeft < spaceRequired) - await buf.Flush(async, cancellationToken); - - buf.WriteInt32(ValidateAndGetLength(value, ref lengthCache, parameter)); - - if (_isJsonb) - buf.WriteByte(JsonbProtocolVersion); - - if (typeof(TAny) == typeof(string)) - await _textHandler.Write((string)(object)value, buf, lengthCache, parameter, async, cancellationToken); - else if (typeof(TAny) == typeof(char[])) - await _textHandler.Write((char[])(object)value, buf, lengthCache, parameter, async, cancellationToken); - else if (typeof(TAny) == typeof(ArraySegment)) - await _textHandler.Write((ArraySegment)(object)value, buf, lengthCache, parameter, async, cancellationToken); - else if (typeof(TAny) == typeof(char)) - await _textHandler.Write((char)(object)value, buf, lengthCache, parameter, async, cancellationToken); - else if (typeof(TAny) == typeof(byte[])) - await _textHandler.Write((byte[])(object)value, buf, lengthCache, parameter, async, cancellationToken); - else if (typeof(TAny) == typeof(ReadOnlyMemory)) - await _textHandler.Write((ReadOnlyMemory)(object)value, buf, lengthCache, parameter, async, cancellationToken); - else if (typeof(TAny) == typeof(JsonDocument)) - { - var data = parameter?.ConvertedValue != null - ? (byte[])parameter.ConvertedValue - : SerializeJsonDocument((JsonDocument)(object)value); - await buf.WriteBytesRaw(data, async, cancellationToken); - } -#if NET6_0_OR_GREATER || NETSTANDARD2_0 || NETSTANDARD2_1 - else if (typeof(TAny) == typeof(JsonObject) || typeof(TAny) == typeof(JsonArray)) - { - var data = parameter?.ConvertedValue != null - ? (byte[])parameter.ConvertedValue - : SerializeJsonObject((JsonNode)(object)value); - await buf.WriteBytesRaw(data, async, cancellationToken); - } -#endif - else - { - // User POCO, read serialized representation from the validation phase - var s = parameter?.ConvertedValue != null - ? (string)parameter.ConvertedValue - : JsonSerializer.Serialize(value, value.GetType(), _serializerOptions); - - await _textHandler.Write(s, buf, lengthCache, parameter, async, cancellationToken); - } - } - - /// - public override int ValidateAndGetLength(string value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLengthCustom(value, ref lengthCache, parameter); - - /// - public override async Task Write(string value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - if (_isJsonb) - { - if (buf.WriteSpaceLeft < 1) - await buf.Flush(async, cancellationToken); - buf.WriteByte(JsonbProtocolVersion); - } - - await _textHandler.Write(value, buf, lengthCache, parameter, async, cancellationToken); - } - - /// - public override int ValidateObjectAndGetLength(object value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => value switch - { - string s => ValidateAndGetLength(s, ref lengthCache, parameter), - char[] s => ValidateAndGetLength(s, ref lengthCache, parameter), - ArraySegment s => ValidateAndGetLength(s, ref lengthCache, parameter), - char s => ValidateAndGetLength(s, ref lengthCache, parameter), - byte[] s => ValidateAndGetLength(s, ref lengthCache, parameter), - ReadOnlyMemory s => ValidateAndGetLength(s, ref lengthCache, parameter), - JsonDocument jsonDocument => ValidateAndGetLength(jsonDocument, ref lengthCache, parameter), -#if NET6_0_OR_GREATER || NETSTANDARD2_0 || NETSTANDARD2_1 - JsonObject jsonObject => ValidateAndGetLength(jsonObject, ref lengthCache, parameter), - JsonArray jsonArray => ValidateAndGetLength(jsonArray, ref lengthCache, parameter), -#endif - _ => ValidateAndGetLength(value, ref lengthCache, parameter) - }; - - /// - public override async Task WriteObjectWithLength(object? value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - // We call into WriteWithLength below, which assumes it as at least enough write space for the length - if (buf.WriteSpaceLeft < 4) - await buf.Flush(async, cancellationToken); - - await (value switch - { - null => WriteWithLength(DBNull.Value, buf, lengthCache, parameter, async, cancellationToken), - DBNull => WriteWithLength(DBNull.Value, buf, lengthCache, parameter, async, cancellationToken), - string s => WriteWithLengthCustom(s, buf, lengthCache, parameter, async, cancellationToken), - char[] s => WriteWithLengthCustom(s, buf, lengthCache, parameter, async, cancellationToken), - ArraySegment s => WriteWithLengthCustom(s, buf, lengthCache, parameter, async, cancellationToken), - char s => WriteWithLengthCustom(s, buf, lengthCache, parameter, async, cancellationToken), - byte[] s => WriteWithLengthCustom(s, buf, lengthCache, parameter, async, cancellationToken), - ReadOnlyMemory s => WriteWithLengthCustom(s, buf, lengthCache, parameter, async, cancellationToken), - JsonDocument jsonDocument => WriteWithLengthCustom(jsonDocument, buf, lengthCache, parameter, async, cancellationToken), -#if NET6_0_OR_GREATER || NETSTANDARD2_0 || NETSTANDARD2_1 - JsonObject jsonObject => WriteWithLengthCustom(jsonObject, buf, lengthCache, parameter, async, cancellationToken), - JsonArray jsonArray => WriteWithLengthCustom(jsonArray, buf, lengthCache, parameter, async, cancellationToken), -#endif - _ => WriteWithLengthCustom(value, buf, lengthCache, parameter, async, cancellationToken), - }); - } - - /// - protected internal override async ValueTask ReadCustom(NpgsqlReadBuffer buf, int byteLen, bool async, FieldDescription? fieldDescription = null) - { - if (_isJsonb) - { - await buf.Ensure(1, async); - var version = buf.ReadByte(); - if (version != JsonbProtocolVersion) - throw new NotSupportedException($"Don't know how to decode JSONB with wire format {version}, your connection is now broken"); - byteLen--; - } - - if (typeof(T) == typeof(string) || - typeof(T) == typeof(char[]) || - typeof(T) == typeof(ArraySegment) || - typeof(T) == typeof(char) || - typeof(T) == typeof(byte[]) || - typeof(T) == typeof(ReadOnlyMemory)) - { - return await _textHandler.Read(buf, byteLen, async, fieldDescription); - } - - // JsonDocument is a view over its provided buffer, so we can't return one over our internal buffer (see #2811), so we deserialize - // a string and get a JsonDocument from that. #2818 tracks improving this. - if (typeof(T) == typeof(JsonDocument)) - return (T)(object)JsonDocument.Parse(await _textHandler.Read(buf, byteLen, async, fieldDescription)); - - // User POCO - if (buf.ReadBytesLeft >= byteLen) - return JsonSerializer.Deserialize(buf.ReadSpan(byteLen), _serializerOptions)!; - -#if NET6_0_OR_GREATER - return (async - ? await JsonSerializer.DeserializeAsync(buf.GetStream(byteLen, canSeek: false), _serializerOptions) - : JsonSerializer.Deserialize(buf.GetStream(byteLen, canSeek: false), _serializerOptions))!; -#else - return JsonSerializer.Deserialize(await _textHandler.Read(buf, byteLen, async, fieldDescription), _serializerOptions)!; -#endif - } - - /// - public override ValueTask Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - => ReadCustom(buf, len, async, fieldDescription); - - /// - public TextReader GetTextReader(Stream stream, NpgsqlReadBuffer buffer) - { - if (_isJsonb) - { - var version = stream.ReadByte(); - if (version != JsonbProtocolVersion) - throw new NpgsqlException($"Don't know how to decode jsonb with wire format {version}, your connection is now broken"); - } - - return _textHandler.GetTextReader(stream, buffer); - } - - byte[] SerializeJsonDocument(JsonDocument document) - { - // TODO: Writing is currently really inefficient - please don't criticize :) - // We need to implement one-pass writing to serialize directly to the buffer (or just switch to pipelines). - using var stream = new MemoryStream(); - using var writer = new Utf8JsonWriter(stream); - document.WriteTo(writer); - writer.Flush(); - return stream.ToArray(); - } - -#if NET6_0_OR_GREATER || NETSTANDARD2_0 || NETSTANDARD2_1 - byte[] SerializeJsonObject(JsonNode jsonObject) - { - // TODO: Writing is currently really inefficient - please don't criticize :) - // We need to implement one-pass writing to serialize directly to the buffer (or just switch to pipelines). - using var stream = new MemoryStream(); - using var writer = new Utf8JsonWriter(stream); - jsonObject.WriteTo(writer); - writer.Flush(); - return stream.ToArray(); - } -#endif -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/JsonTextHandler.cs b/src/Npgsql/Internal/TypeHandlers/JsonTextHandler.cs new file mode 100644 index 0000000000..ef1fa0593b --- /dev/null +++ b/src/Npgsql/Internal/TypeHandlers/JsonTextHandler.cs @@ -0,0 +1,215 @@ +using System; +using System.Diagnostics.CodeAnalysis; +using System.IO; +using System.Text; +using System.Threading; +using System.Threading.Tasks; +using Npgsql.BackendMessages; +using Npgsql.Internal.TypeHandling; +using Npgsql.PostgresTypes; + +namespace Npgsql.Internal.TypeHandlers; + +/// +/// A text-only type handler for the PostgreSQL json and jsonb data type. This handler does not support serialization/deserialization +/// with System.Text.Json or Json.NET. +/// +/// +/// See https://www.postgresql.org/docs/current/datatype-json.html. +/// +/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it +/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. +/// Use it at your own risk. +/// +public class JsonTextHandler : NpgsqlTypeHandler, ITextReaderHandler +{ + protected TextHandler TextHandler { get; } + readonly bool _isJsonb; + readonly int _headerLen; + + internal override bool PreferTextWrite => false; + + /// + /// Prepended to the string in the wire encoding + /// + const byte JsonbProtocolVersion = 1; + + /// + public JsonTextHandler(PostgresType postgresType, Encoding encoding, bool isJsonb) + : base(postgresType) + { + _isJsonb = isJsonb; + _headerLen = isJsonb ? 1 : 0; + TextHandler = new TextHandler(postgresType, encoding); + } + + protected bool IsSupportedAsText() + => typeof(T) == typeof(string) || + typeof(T) == typeof(char[]) || + typeof(T) == typeof(ArraySegment) || + typeof(T) == typeof(char) || + typeof(T) == typeof(byte[]) || + typeof(T) == typeof(ReadOnlyMemory); + + protected bool IsSupported(Type type) + => type == typeof(string) || + type == typeof(char[]) || + type == typeof(ArraySegment) || + type == typeof(char) || + type == typeof(byte[]) || + type == typeof(ReadOnlyMemory); + + protected bool TryValidateAndGetLengthCustom( + [DisallowNull] TAny value, + ref NpgsqlLengthCache? lengthCache, + NpgsqlParameter? parameter, + out int length) + { + if (IsSupportedAsText()) + { + length = TextHandler.ValidateAndGetLength(value, ref lengthCache, parameter) + _headerLen; + return true; + } + + length = 0; + return false; + } + + /// + protected internal override int ValidateAndGetLengthCustom([DisallowNull] TAny value, ref NpgsqlLengthCache? lengthCache, + NpgsqlParameter? parameter) + => IsSupportedAsText() + ? TextHandler.ValidateAndGetLength(value, ref lengthCache, parameter) + _headerLen + : throw new InvalidCastException( + $"Can't write CLR type {value.GetType()}. " + + "You may need to use the System.Text.Json or Json.NET plugins, see the docs for more information."); + + protected override async Task WriteWithLengthCustom( + [DisallowNull] TAny value, + NpgsqlWriteBuffer buf, + NpgsqlLengthCache? lengthCache, + NpgsqlParameter? parameter, + bool async, + CancellationToken cancellationToken) + { + var spaceRequired = _isJsonb ? 5 : 4; + + if (buf.WriteSpaceLeft < spaceRequired) + await buf.Flush(async, cancellationToken); + + buf.WriteInt32(ValidateAndGetLength(value, ref lengthCache, parameter)); + + if (_isJsonb) + buf.WriteByte(JsonbProtocolVersion); + + if (typeof(TAny) == typeof(string)) + await TextHandler.Write((string)(object)value, buf, lengthCache, parameter, async, cancellationToken); + else if (typeof(TAny) == typeof(char[])) + await TextHandler.Write((char[])(object)value, buf, lengthCache, parameter, async, cancellationToken); + else if (typeof(TAny) == typeof(ArraySegment)) + await TextHandler.Write((ArraySegment)(object)value, buf, lengthCache, parameter, async, cancellationToken); + else if (typeof(TAny) == typeof(char)) + await TextHandler.Write((char)(object)value, buf, lengthCache, parameter, async, cancellationToken); + else if (typeof(TAny) == typeof(byte[])) + await TextHandler.Write((byte[])(object)value, buf, lengthCache, parameter, async, cancellationToken); + else if (typeof(TAny) == typeof(ReadOnlyMemory)) + await TextHandler.Write((ReadOnlyMemory)(object)value, buf, lengthCache, parameter, async, cancellationToken); + else throw new InvalidCastException( + $"Can't write CLR type {value.GetType()}. " + + "You may need to use the System.Text.Json or Json.NET plugins, see the docs for more information."); + } + + /// + public override int ValidateAndGetLength(string value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) + => ValidateAndGetLengthCustom(value, ref lengthCache, parameter); + + /// + public override async Task Write(string value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) + { + if (_isJsonb) + { + if (buf.WriteSpaceLeft < 1) + await buf.Flush(async, cancellationToken); + buf.WriteByte(JsonbProtocolVersion); + } + + await TextHandler.Write(value, buf, lengthCache, parameter, async, cancellationToken); + } + + /// + public override int ValidateObjectAndGetLength(object value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) + => value switch + { + string s => ValidateAndGetLength(s, ref lengthCache, parameter), + char[] s => ValidateAndGetLength(s, ref lengthCache, parameter), + ArraySegment s => ValidateAndGetLength(s, ref lengthCache, parameter), + char s => ValidateAndGetLength(s, ref lengthCache, parameter), + byte[] s => ValidateAndGetLength(s, ref lengthCache, parameter), + ReadOnlyMemory s => ValidateAndGetLength(s, ref lengthCache, parameter), + + _ => throw new InvalidCastException( + $"Can't write CLR type {value.GetType()}. " + + "You may need to use the System.Text.Json or Json.NET plugins, see the docs for more information.") + }; + + /// + public override async Task WriteObjectWithLength(object? value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) + { + // We call into WriteWithLength below, which assumes it as at least enough write space for the length + if (buf.WriteSpaceLeft < 4) + await buf.Flush(async, cancellationToken); + + await (value switch + { + null => WriteWithLength(DBNull.Value, buf, lengthCache, parameter, async, cancellationToken), + DBNull => WriteWithLength(DBNull.Value, buf, lengthCache, parameter, async, cancellationToken), + string s => WriteWithLengthCustom(s, buf, lengthCache, parameter, async, cancellationToken), + char[] s => WriteWithLengthCustom(s, buf, lengthCache, parameter, async, cancellationToken), + ArraySegment s => WriteWithLengthCustom(s, buf, lengthCache, parameter, async, cancellationToken), + char s => WriteWithLengthCustom(s, buf, lengthCache, parameter, async, cancellationToken), + byte[] s => WriteWithLengthCustom(s, buf, lengthCache, parameter, async, cancellationToken), + ReadOnlyMemory s => WriteWithLengthCustom(s, buf, lengthCache, parameter, async, cancellationToken), + + _ => throw new InvalidCastException( + $"Can't write CLR type {value.GetType()}. " + + "You may need to use the System.Text.Json or Json.NET plugins, see the docs for more information.") + }); + } + + /// + protected internal override async ValueTask ReadCustom(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) + { + if (_isJsonb) + { + await buf.Ensure(1, async); + var version = buf.ReadByte(); + if (version != JsonbProtocolVersion) + throw new NotSupportedException($"Don't know how to decode JSONB with wire format {version}, your connection is now broken"); + len--; + } + + if (IsSupportedAsText()) + return await TextHandler.Read(buf, len, async, fieldDescription); + + throw new InvalidCastException( + $"Can't read JSON as CLR type {typeof(T)}. " + + "You may need to use the System.Text.Json or Json.NET plugins, see the docs for more information."); + } + + /// + public override ValueTask Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) + => ReadCustom(buf, len, async, fieldDescription); + + /// + public TextReader GetTextReader(Stream stream, NpgsqlReadBuffer buffer) + { + if (_isJsonb) + { + var version = stream.ReadByte(); + if (version != JsonbProtocolVersion) + throw new NpgsqlException($"Don't know how to decode jsonb with wire format {version}, your connection is now broken"); + } + + return TextHandler.GetTextReader(stream, buffer); + } +} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/SystemTextJsonHandler.cs b/src/Npgsql/Internal/TypeHandlers/SystemTextJsonHandler.cs new file mode 100644 index 0000000000..34f35b0d6f --- /dev/null +++ b/src/Npgsql/Internal/TypeHandlers/SystemTextJsonHandler.cs @@ -0,0 +1,222 @@ +using System; +using System.Diagnostics.CodeAnalysis; +using System.IO; +using System.Text; +using System.Text.Json; +using System.Threading; +using System.Threading.Tasks; +using Npgsql.BackendMessages; +using Npgsql.Internal.TypeHandling; +using Npgsql.PostgresTypes; + +#if NET6_0_OR_GREATER || NETSTANDARD2_0 || NETSTANDARD2_1 +using System.Text.Json.Nodes; +#endif + +namespace Npgsql.Internal.TypeHandlers; + +/// +/// A type handler for the PostgreSQL json and jsonb data type which uses System.Text.Json. +/// +/// +/// See https://www.postgresql.org/docs/current/datatype-json.html. +/// +/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it +/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. +/// Use it at your own risk. +/// +public class SystemTextJsonHandler : JsonTextHandler +{ + readonly JsonSerializerOptions _serializerOptions; + readonly bool _isJsonb; + readonly int _headerLen; + + /// + /// Prepended to the string in the wire encoding + /// + const byte JsonbProtocolVersion = 1; + + static readonly JsonSerializerOptions DefaultSerializerOptions = new(); + + /// + public SystemTextJsonHandler(PostgresType postgresType, Encoding encoding, bool isJsonb, JsonSerializerOptions? serializerOptions = null) + : base(postgresType, encoding, isJsonb) + { + _serializerOptions = serializerOptions ?? DefaultSerializerOptions; + _isJsonb = isJsonb; + _headerLen = isJsonb ? 1 : 0; + } + + /// + protected internal override int ValidateAndGetLengthCustom([DisallowNull] TAny value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) + { + if (IsSupportedAsText()) + return base.ValidateAndGetLengthCustom(value, ref lengthCache, parameter); + + if (typeof(TAny) == typeof(JsonDocument)) + { + lengthCache ??= new NpgsqlLengthCache(1); + if (lengthCache.IsPopulated) + return lengthCache.Get(); + + var data = SerializeJsonDocument((JsonDocument)(object)value); + if (parameter != null) + parameter.ConvertedValue = data; + return lengthCache.Set(data.Length + _headerLen); + } + +#if NET6_0_OR_GREATER || NETSTANDARD2_0 || NETSTANDARD2_1 + if (typeof(TAny) == typeof(JsonObject) || typeof(TAny) == typeof(JsonArray)) + { + lengthCache ??= new NpgsqlLengthCache(1); + if (lengthCache.IsPopulated) + return lengthCache.Get(); + + var data = SerializeJsonObject((JsonNode)(object)value); + if (parameter != null) + parameter.ConvertedValue = data; + return lengthCache.Set(data.Length + _headerLen); + } +#endif + + // User POCO, need to serialize. At least internally ArrayPool buffers are used... + var s = JsonSerializer.Serialize(value, _serializerOptions); + if (parameter != null) + parameter.ConvertedValue = s; + + return TextHandler.ValidateAndGetLength(s, ref lengthCache, parameter) + _headerLen; + } + + /// + protected override async Task WriteWithLengthCustom([DisallowNull] TAny value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken) + { + if (IsSupportedAsText()) + { + await base.WriteWithLengthCustom(value, buf, lengthCache, parameter, async, cancellationToken); + return; + } + + var spaceRequired = _isJsonb ? 5 : 4; + + if (buf.WriteSpaceLeft < spaceRequired) + await buf.Flush(async, cancellationToken); + + buf.WriteInt32(ValidateAndGetLength(value, ref lengthCache, parameter)); + + if (_isJsonb) + buf.WriteByte(JsonbProtocolVersion); + + if (typeof(TAny) == typeof(JsonDocument)) + { + var data = parameter?.ConvertedValue != null + ? (byte[])parameter.ConvertedValue + : SerializeJsonDocument((JsonDocument)(object)value); + await buf.WriteBytesRaw(data, async, cancellationToken); + } +#if NET6_0_OR_GREATER || NETSTANDARD2_0 || NETSTANDARD2_1 + else if (typeof(TAny) == typeof(JsonObject) || typeof(TAny) == typeof(JsonArray)) + { + var data = parameter?.ConvertedValue != null + ? (byte[])parameter.ConvertedValue + : SerializeJsonObject((JsonNode)(object)value); + await buf.WriteBytesRaw(data, async, cancellationToken); + } +#endif + else + { + // User POCO, read serialized representation from the validation phase + var s = parameter?.ConvertedValue != null + ? (string)parameter.ConvertedValue + : JsonSerializer.Serialize(value, value.GetType(), _serializerOptions); + + await TextHandler.Write(s, buf, lengthCache, parameter, async, cancellationToken); + } + } + + /// + public override int ValidateObjectAndGetLength(object value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) + => IsSupported(value.GetType()) + ? base.ValidateObjectAndGetLength(value, ref lengthCache, parameter) + : value switch + { + JsonDocument jsonDocument => ValidateAndGetLengthCustom(jsonDocument, ref lengthCache, parameter), +#if NET6_0_OR_GREATER || NETSTANDARD2_0 || NETSTANDARD2_1 + JsonObject jsonObject => ValidateAndGetLengthCustom(jsonObject, ref lengthCache, parameter), + JsonArray jsonArray => ValidateAndGetLengthCustom(jsonArray, ref lengthCache, parameter), +#endif + _ => ValidateAndGetLengthCustom(value, ref lengthCache, parameter) + }; + + /// + public override Task WriteObjectWithLength(object? value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) + => value is null or DBNull || IsSupported(value.GetType()) + ? base.WriteObjectWithLength(value, buf, lengthCache, parameter, async, cancellationToken) + : value switch + { + JsonDocument jsonDocument => WriteWithLengthCustom(jsonDocument, buf, lengthCache, parameter, async, cancellationToken), +#if NET6_0_OR_GREATER || NETSTANDARD2_0 || NETSTANDARD2_1 + JsonObject jsonObject => WriteWithLengthCustom(jsonObject, buf, lengthCache, parameter, async, cancellationToken), + JsonArray jsonArray => WriteWithLengthCustom(jsonArray, buf, lengthCache, parameter, async, cancellationToken), +#endif + _ => WriteWithLengthCustom(value, buf, lengthCache, parameter, async, cancellationToken), + }; + + /// + protected internal override async ValueTask ReadCustom(NpgsqlReadBuffer buf, int byteLen, bool async, FieldDescription? fieldDescription) + { + if (IsSupportedAsText()) + { + return await base.ReadCustom(buf, byteLen, async, fieldDescription); + } + + if (_isJsonb) + { + await buf.Ensure(1, async); + var version = buf.ReadByte(); + if (version != JsonbProtocolVersion) + throw new NotSupportedException($"Don't know how to decode JSONB with wire format {version}, your connection is now broken"); + byteLen--; + } + + // JsonDocument is a view over its provided buffer, so we can't return one over our internal buffer (see #2811), so we deserialize + // a string and get a JsonDocument from that. #2818 tracks improving this. + if (typeof(T) == typeof(JsonDocument)) + return (T)(object)JsonDocument.Parse(await TextHandler.Read(buf, byteLen, async, fieldDescription)); + + // User POCO + if (buf.ReadBytesLeft >= byteLen) + return JsonSerializer.Deserialize(buf.ReadSpan(byteLen), _serializerOptions)!; + +#if NET6_0_OR_GREATER + return (async + ? await JsonSerializer.DeserializeAsync(buf.GetStream(byteLen, canSeek: false), _serializerOptions) + : JsonSerializer.Deserialize(buf.GetStream(byteLen, canSeek: false), _serializerOptions))!; +#else + return JsonSerializer.Deserialize(await TextHandler.Read(buf, byteLen, async, fieldDescription), _serializerOptions)!; +#endif + } + + byte[] SerializeJsonDocument(JsonDocument document) + { + // TODO: Writing is currently really inefficient - please don't criticize :) + // We need to implement one-pass writing to serialize directly to the buffer (or just switch to pipelines). + using var stream = new MemoryStream(); + using var writer = new Utf8JsonWriter(stream); + document.WriteTo(writer); + writer.Flush(); + return stream.ToArray(); + } + +#if NET6_0_OR_GREATER || NETSTANDARD2_0 || NETSTANDARD2_1 + byte[] SerializeJsonObject(JsonNode jsonObject) + { + // TODO: Writing is currently really inefficient - please don't criticize :) + // We need to implement one-pass writing to serialize directly to the buffer (or just switch to pipelines). + using var stream = new MemoryStream(); + using var writer = new Utf8JsonWriter(stream); + jsonObject.WriteTo(writer); + writer.Flush(); + return stream.ToArray(); + } +#endif +} \ No newline at end of file diff --git a/src/Npgsql/PublicAPI.Unshipped.txt b/src/Npgsql/PublicAPI.Unshipped.txt index ab058de62d..a4f57b62f5 100644 --- a/src/Npgsql/PublicAPI.Unshipped.txt +++ b/src/Npgsql/PublicAPI.Unshipped.txt @@ -1 +1,3 @@ #nullable enable +Npgsql.NpgsqlJsonExtensions +static Npgsql.NpgsqlJsonExtensions.UseSystemTextJson(this Npgsql.TypeMapping.INpgsqlTypeMapper! mapper, System.Text.Json.JsonSerializerOptions? serializerOptions = null, System.Type![]? jsonbClrTypes = null, System.Type![]? jsonClrTypes = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! diff --git a/src/Npgsql/TypeMapping/BuiltInTypeHandlerResolver.cs b/src/Npgsql/TypeMapping/BuiltInTypeHandlerResolver.cs index f350a793d4..606c7b446a 100644 --- a/src/Npgsql/TypeMapping/BuiltInTypeHandlerResolver.cs +++ b/src/Npgsql/TypeMapping/BuiltInTypeHandlerResolver.cs @@ -3,12 +3,10 @@ using System.Collections.Generic; using System.Collections.Immutable; using System.Collections.Specialized; -using System.Data; using System.IO; using System.Net; using System.Net.NetworkInformation; using System.Numerics; -using System.Text.Json; using Npgsql.Internal; using Npgsql.Internal.TypeHandlers; using Npgsql.Internal.TypeHandlers.DateTimeHandlers; @@ -23,10 +21,6 @@ using NpgsqlTypes; using static Npgsql.Util.Statics; -#if NET6_0_OR_GREATER || NETSTANDARD2_0 || NETSTANDARD2_1 -using System.Text.Json.Nodes; -#endif - namespace Npgsql.TypeMapping; sealed class BuiltInTypeHandlerResolver : TypeHandlerResolver @@ -58,11 +52,7 @@ sealed class BuiltInTypeHandlerResolver : TypeHandlerResolver { "name", new(NpgsqlDbType.Name, "name") }, { "refcursor", new(NpgsqlDbType.Refcursor, "refcursor") }, { "citext", new(NpgsqlDbType.Citext, "citext") }, - { "jsonb", new(NpgsqlDbType.Jsonb, "jsonb", typeof(JsonDocument) -#if NET6_0_OR_GREATER || NETSTANDARD2_0 || NETSTANDARD2_1 - , typeof(JsonObject), typeof(JsonArray) -#endif - ) }, + { "jsonb", new(NpgsqlDbType.Jsonb, "jsonb") }, { "json", new(NpgsqlDbType.Json, "json") }, { "jsonpath", new(NpgsqlDbType.JsonPath, "jsonpath") }, @@ -192,8 +182,8 @@ sealed class BuiltInTypeHandlerResolver : TypeHandlerResolver TextHandler? _nameHandler; TextHandler? _refcursorHandler; TextHandler? _citextHandler; - JsonHandler? _jsonbHandler; // Note that old version of PG (and Redshift) don't have jsonb - JsonHandler? _jsonHandler; + JsonTextHandler? _jsonbHandler; // Note that old version of PG (and Redshift) don't have jsonb + JsonTextHandler? _jsonHandler; JsonPathHandler? _jsonPathHandler; // Date/time types @@ -404,11 +394,6 @@ static BuiltInTypeHandlerResolver() { typeof(char[]), "text" }, { typeof(char), "text" }, { typeof(ArraySegment), "text" }, - { typeof(JsonDocument), "jsonb" }, -#if NET6_0_OR_GREATER || NETSTANDARD2_0 || NETSTANDARD2_1 - { typeof(JsonObject), "jsonb" }, - { typeof(JsonArray), "jsonb" }, -#endif // Date/time types // The DateTime entry is for LegacyTimestampBehavior mode only. In regular mode we resolve through @@ -609,14 +594,6 @@ static DateTimeKind GetMultirangeKind(IList> multirange) return _textHandler; if (typeof(T) == typeof(ArraySegment)) return _textHandler; - if (typeof(T) == typeof(JsonDocument)) - return JsonbHandler(); -#if NET6_0_OR_GREATER || NETSTANDARD2_0 || NETSTANDARD2_1 - if (typeof(T) == typeof(JsonObject)) - return JsonbHandler(); - if (typeof(T) == typeof(JsonArray)) - return JsonbHandler(); -#endif // Date/time types // No resolution for DateTime, since that's value-dependent (Kind) @@ -702,8 +679,8 @@ static DateTimeKind GetMultirangeKind(IList> multirange) NpgsqlTypeHandler? CitextHandler() => _citextHandler ??= _databaseInfo.TryGetPostgresTypeByName("citext", out var pgType) ? new TextHandler(pgType, _connector.TextEncoding) : null; - NpgsqlTypeHandler JsonbHandler() => _jsonbHandler ??= new JsonHandler(PgType("jsonb"), _connector.TextEncoding, isJsonb: true); - NpgsqlTypeHandler JsonHandler() => _jsonHandler ??= new JsonHandler(PgType("json"), _connector.TextEncoding, isJsonb: false); + NpgsqlTypeHandler JsonbHandler() => _jsonbHandler ??= new JsonTextHandler(PgType("jsonb"), _connector.TextEncoding, isJsonb: true); + NpgsqlTypeHandler JsonHandler() => _jsonHandler ??= new JsonTextHandler(PgType("json"), _connector.TextEncoding, isJsonb: false); NpgsqlTypeHandler JsonPathHandler() => _jsonPathHandler ??= new JsonPathHandler(PgType("jsonpath"), _connector.TextEncoding); // Date/time types diff --git a/src/Npgsql/TypeMapping/JsonTypeHandlerResolver.cs b/src/Npgsql/TypeMapping/JsonTypeHandlerResolver.cs new file mode 100644 index 0000000000..8522a48af5 --- /dev/null +++ b/src/Npgsql/TypeMapping/JsonTypeHandlerResolver.cs @@ -0,0 +1,85 @@ +using System; +using System.Collections.Generic; +using System.Text.Json; +using Npgsql.Internal; +using Npgsql.Internal.TypeHandlers; +using Npgsql.Internal.TypeHandling; +using Npgsql.PostgresTypes; +using NpgsqlTypes; + +#if NET6_0_OR_GREATER || NETSTANDARD2_0 || NETSTANDARD2_1 +using System.Text.Json.Nodes; +#endif + +namespace Npgsql.TypeMapping; + +sealed class JsonTypeHandlerResolver : TypeHandlerResolver +{ + readonly NpgsqlDatabaseInfo _databaseInfo; + readonly SystemTextJsonHandler? _jsonbHandler; // Note that old version of PG (and Redshift) don't have jsonb + readonly SystemTextJsonHandler? _jsonHandler; + readonly Dictionary? _userClrTypes; + + internal JsonTypeHandlerResolver( + NpgsqlConnector connector, + Dictionary? userClrTypes, + JsonSerializerOptions serializerOptions) + { + _databaseInfo = connector.DatabaseInfo; + + _jsonbHandler = new SystemTextJsonHandler(PgType("jsonb"), connector.TextEncoding, isJsonb: true, serializerOptions); + _jsonHandler = new SystemTextJsonHandler(PgType("json"), connector.TextEncoding, isJsonb: false, serializerOptions); + + _userClrTypes = userClrTypes; + } + + public override NpgsqlTypeHandler? ResolveByDataTypeName(string typeName) + => typeName switch + { + "jsonb" => _jsonbHandler, + "json" => _jsonHandler, + _ => null + }; + + public override NpgsqlTypeHandler? ResolveByClrType(Type type) + => ClrTypeToDataTypeName(type, _userClrTypes) is { } dataTypeName && ResolveByDataTypeName(dataTypeName) is { } handler + ? handler + : null; + + internal static string? ClrTypeToDataTypeName(Type type, Dictionary? clrTypes) + => type == typeof(JsonDocument) +#if NET6_0_OR_GREATER || NETSTANDARD2_0 || NETSTANDARD2_1 + || type == typeof(JsonObject) || type == typeof(JsonArray) +#endif + ? "jsonb" + : clrTypes is not null && clrTypes.TryGetValue(type, out var dataTypeName) ? dataTypeName : null; + + public override TypeMappingInfo? GetMappingByDataTypeName(string dataTypeName) + => DoGetMappingByDataTypeName(dataTypeName); + + internal static TypeMappingInfo? DoGetMappingByDataTypeName(string dataTypeName) + => dataTypeName switch + { + "jsonb" => new(NpgsqlDbType.Jsonb, "jsonb", typeof(JsonDocument) +#if NET6_0_OR_GREATER || NETSTANDARD2_0 || NETSTANDARD2_1 + , typeof(JsonObject), typeof(JsonArray) +#endif + ), + "json" => new(NpgsqlDbType.Json, "json"), + _ => null + }; + + public override NpgsqlTypeHandler? ResolveValueTypeGenerically(T value) + { + if (typeof(T) == typeof(JsonDocument)) + return _jsonbHandler; +#if NET6_0_OR_GREATER || NETSTANDARD2_0 || NETSTANDARD2_1 + if (typeof(T) == typeof(JsonObject) || typeof(T) == typeof(JsonArray)) + return _jsonbHandler; +#endif + + return null; + } + + PostgresType PgType(string pgTypeName) => _databaseInfo.GetPostgresTypeByName(pgTypeName); +} diff --git a/src/Npgsql/TypeMapping/JsonTypeHandlerResolverFactory.cs b/src/Npgsql/TypeMapping/JsonTypeHandlerResolverFactory.cs new file mode 100644 index 0000000000..d5221cad28 --- /dev/null +++ b/src/Npgsql/TypeMapping/JsonTypeHandlerResolverFactory.cs @@ -0,0 +1,50 @@ +using System; +using System.Collections.Generic; +using System.Text.Json; +using Npgsql.Internal; +using Npgsql.Internal.TypeHandling; + +namespace Npgsql.TypeMapping; + +sealed class JsonTypeHandlerResolverFactory : TypeHandlerResolverFactory +{ + readonly Type[] _jsonbClrTypes; + readonly Type[] _jsonClrTypes; + readonly JsonSerializerOptions _settings; + readonly Dictionary? _userClrTypes; + + public JsonTypeHandlerResolverFactory( + Type[]? jsonbClrTypes, + Type[]? jsonClrTypes, + JsonSerializerOptions? settings) + { + _jsonbClrTypes = jsonbClrTypes ?? Array.Empty(); + _jsonClrTypes = jsonClrTypes ?? Array.Empty(); + _settings = settings ?? new JsonSerializerOptions(); + + if (jsonbClrTypes is not null) + { + _userClrTypes ??= new(); + + foreach (var type in jsonbClrTypes) + _userClrTypes[type] = "jsonb"; + } + + if (jsonClrTypes is not null) + { + _userClrTypes ??= new(); + + foreach (var type in jsonClrTypes) + _userClrTypes[type] = "json"; + } + } + + public override TypeHandlerResolver Create(NpgsqlConnector connector) + => new JsonTypeHandlerResolver(connector, _userClrTypes, _settings); + + public override string? GetDataTypeNameByClrType(Type type) + => JsonTypeHandlerResolver.ClrTypeToDataTypeName(type, _userClrTypes); + + public override TypeMappingInfo? GetMappingByDataTypeName(string dataTypeName) + => JsonTypeHandlerResolver.DoGetMappingByDataTypeName(dataTypeName); +} diff --git a/src/Npgsql/TypeMapping/NpgsqlJsonExtensions.cs b/src/Npgsql/TypeMapping/NpgsqlJsonExtensions.cs new file mode 100644 index 0000000000..2a84cc4c02 --- /dev/null +++ b/src/Npgsql/TypeMapping/NpgsqlJsonExtensions.cs @@ -0,0 +1,34 @@ +using System; +using System.Text.Json; +using Npgsql.TypeMapping; +using NpgsqlTypes; + +// ReSharper disable once CheckNamespace +namespace Npgsql; + +/// +/// Extension allowing adding the System.Text.Json plugin to an Npgsql type mapper. +/// +public static class NpgsqlJsonExtensions +{ + /// + /// Sets up System.Text.Json mappings for the PostgreSQL json and jsonb types. + /// + /// The type mapper to set up. + /// Options to customize JSON serialization and deserialization. + /// + /// A list of CLR types to map to PostgreSQL jsonb (no need to specify ). + /// + /// + /// A list of CLR types to map to PostgreSQL json (no need to specify ). + /// + public static INpgsqlTypeMapper UseSystemTextJson( + this INpgsqlTypeMapper mapper, + JsonSerializerOptions? serializerOptions = null, + Type[]? jsonbClrTypes = null, + Type[]? jsonClrTypes = null) + { + mapper.AddTypeResolverFactory(new JsonTypeHandlerResolverFactory(jsonbClrTypes, jsonClrTypes, serializerOptions)); + return mapper; + } +} diff --git a/test/Npgsql.PluginTests/JsonNetTests.cs b/test/Npgsql.PluginTests/JsonNetTests.cs index e85f86f736..0eecf3ab8c 100644 --- a/test/Npgsql.PluginTests/JsonNetTests.cs +++ b/test/Npgsql.PluginTests/JsonNetTests.cs @@ -177,7 +177,7 @@ await AssertType( public async Task Bug3464() { var dataSourceBuilder = CreateDataSourceBuilder(); - dataSourceBuilder.UseJsonNet(new[] { typeof(Bug3464Class) }); + dataSourceBuilder.UseJsonNet(jsonbClrTypes: new[] { typeof(Bug3464Class) }); await using var dataSource = dataSourceBuilder.Build(); var expected = new Bug3464Class { SomeString = new string('5', 8174) }; diff --git a/test/Npgsql.Tests/Support/TestBase.cs b/test/Npgsql.Tests/Support/TestBase.cs index 0bea07a7b8..dae42ba644 100644 --- a/test/Npgsql.Tests/Support/TestBase.cs +++ b/test/Npgsql.Tests/Support/TestBase.cs @@ -60,7 +60,7 @@ public async Task AssertType( await using var connection = await dataSource.OpenConnectionAsync(); return await AssertType(connection, value, sqlLiteral, pgTypeName, npgsqlDbType, dbType, inferredDbType, isDefaultForReading, - isDefaultForWriting, isDefault, isNpgsqlDbTypeInferredFromClrType); + isDefaultForWriting, isDefault, isNpgsqlDbTypeInferredFromClrType, comparer); } public async Task AssertType( @@ -90,6 +90,23 @@ public async Task AssertTypeRead(string sqlLiteral, string pgTypeName, T e return await AssertTypeRead(connection, sqlLiteral, pgTypeName, expected, isDefault); } + public async Task AssertTypeWrite( + NpgsqlDataSource dataSource, + T value, + string expectedSqlLiteral, + string pgTypeName, + NpgsqlDbType npgsqlDbType, + DbType? dbType = null, + DbType? inferredDbType = null, + bool isDefault = true, + bool isNpgsqlDbTypeInferredFromClrType = true) + { + await using var connection = await dataSource.OpenConnectionAsync(); + + await AssertTypeWrite(connection, () => value, expectedSqlLiteral, pgTypeName, npgsqlDbType, dbType, inferredDbType, isDefault, + isNpgsqlDbTypeInferredFromClrType); + } + public Task AssertTypeWrite( T value, string expectedSqlLiteral, diff --git a/test/Npgsql.Tests/Types/JsonTests.cs b/test/Npgsql.Tests/Types/JsonTests.cs index 649f46ba26..88511e9e67 100644 --- a/test/Npgsql.Tests/Types/JsonTests.cs +++ b/test/Npgsql.Tests/Types/JsonTests.cs @@ -49,20 +49,22 @@ public async Task As_char_array() [Test] public async Task As_bytes() - => await AssertType(Encoding.ASCII.GetBytes(@"{""K"": ""V""}"), @"{""K"": ""V""}", PostgresType, NpgsqlDbType, isDefault: false); + => await AssertType(@"{""K"": ""V""}"u8.ToArray(), @"{""K"": ""V""}", PostgresType, NpgsqlDbType, isDefault: false); [Test] public async Task Write_as_ReadOnlyMemory_of_byte() - => await AssertTypeWrite(new ReadOnlyMemory(Encoding.ASCII.GetBytes(@"{""K"": ""V""}")), @"{""K"": ""V""}", PostgresType, NpgsqlDbType, isDefault: false); + => await AssertTypeWrite(new ReadOnlyMemory(@"{""K"": ""V""}"u8.ToArray()), @"{""K"": ""V""}", PostgresType, NpgsqlDbType, + isDefault: false); [Test] public async Task Write_as_ArraySegment_of_char() - => await AssertTypeWrite( - new ArraySegment(@"{""K"": ""V""}".ToCharArray()), @"{""K"": ""V""}", PostgresType, NpgsqlDbType, isDefault: false); + => await AssertTypeWrite(new ArraySegment(@"{""K"": ""V""}".ToCharArray()), @"{""K"": ""V""}", PostgresType, NpgsqlDbType, + isDefault: false); [Test] public async Task As_JsonDocument() => await AssertType( + JsonDataSource, JsonDocument.Parse(@"{""K"": ""V""}"), IsJsonb ? @"{""K"": ""V""}" : @"{""K"":""V""}", PostgresType, @@ -70,10 +72,18 @@ public async Task As_JsonDocument() isDefault: false, comparer: (x, y) => x.RootElement.GetProperty("K").GetString() == y.RootElement.GetProperty("K").GetString()); + [Test] + public async Task As_JsonDocument_supported_only_with_SystemTextJson() + => await AssertTypeUnsupported( + JsonDocument.Parse(@"{""K"": ""V""}"), + @"{""K"": ""V""}", + PostgresType); + #if NET6_0_OR_GREATER [Test] public Task Roundtrip_JsonObject() => AssertType( + JsonDataSource, new JsonObject { ["Bar"] = 8 }, IsJsonb ? @"{""Bar"": 8}" : @"{""Bar"":8}", PostgresType, @@ -87,6 +97,7 @@ public Task Roundtrip_JsonObject() [Test] public Task Roundtrip_JsonArray() => AssertType( + JsonDataSource, new JsonArray { 1, 2, 3 }, IsJsonb ? "[1, 2, 3]" : "[1,2,3]", PostgresType, @@ -101,13 +112,13 @@ public Task Roundtrip_JsonArray() [Test] public async Task As_poco() => await AssertType( + JsonDataSource, new WeatherForecast { Date = new DateTime(2019, 9, 1), Summary = "Partly cloudy", TemperatureC = 10 }, - // Warning: in theory jsonb order and whitespace may change across versions IsJsonb ? @"{""Date"": ""2019-09-01T00:00:00"", ""Summary"": ""Partly cloudy"", ""TemperatureC"": 10}" : @"{""Date"":""2019-09-01T00:00:00"",""TemperatureC"":10,""Summary"":""Partly cloudy""}", @@ -122,6 +133,7 @@ public async Task As_poco_long() var bigString = new string('x', Math.Max(conn.Settings.ReadBufferSize, conn.Settings.WriteBufferSize)); await AssertType( + JsonDataSource, new WeatherForecast { Date = new DateTime(2019, 9, 1), @@ -137,6 +149,18 @@ await AssertType( isDefault: false); } + [Test] + public async Task As_poco_supported_only_with_SystemTextJson() + => await AssertTypeUnsupported( + new WeatherForecast + { + Date = new DateTime(2019, 9, 1), + Summary = "Partly cloudy", + TemperatureC = 10 + }, + @"{""Date"": ""2019-09-01T00:00:00"", ""Summary"": ""Partly cloudy"", ""TemperatureC"": 10}", + PostgresType); + record WeatherForecast { public DateTime Date { get; set; } @@ -150,18 +174,18 @@ record WeatherForecast [IssueLink("https://github.com/npgsql/efcore.pg/issues/1082")] public async Task Can_read_two_json_documents() { - using var conn = await OpenConnectionAsync(); + await using var conn = await JsonDataSource.OpenConnectionAsync(); JsonDocument car; - using (var cmd = new NpgsqlCommand(@"SELECT '{""key"" : ""foo""}'::jsonb", conn)) - using (var reader = await cmd.ExecuteReaderAsync()) + await using (var cmd = new NpgsqlCommand(@"SELECT '{""key"" : ""foo""}'::jsonb", conn)) + await using (var reader = await cmd.ExecuteReaderAsync()) { reader.Read(); car = reader.GetFieldValue(0); } - using (var cmd = new NpgsqlCommand(@"SELECT '{""key"" : ""bar""}'::jsonb", conn)) - using (var reader = await cmd.ExecuteReaderAsync()) + await using (var cmd = new NpgsqlCommand(@"SELECT '{""key"" : ""bar""}'::jsonb", conn)) + await using (var reader = await cmd.ExecuteReaderAsync()) { reader.Read(); reader.GetFieldValue(0); @@ -179,7 +203,7 @@ public async Task Write_jsonobject_array_without_npgsqldbtype() if (!IsJsonb) return; - await using var conn = await OpenConnectionAsync(); + await using var conn = await JsonDataSource.OpenConnectionAsync(); var tableName = await TestUtil.CreateTempTable(conn, "key SERIAL PRIMARY KEY, ingredients json[]"); await using var cmd = new NpgsqlCommand { Connection = conn }; @@ -204,6 +228,67 @@ public async Task Write_jsonobject_array_without_npgsqldbtype() } #endif + [Test] + public async Task Custom_JsonSerializerOptions() + { + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.UseSystemTextJson(new JsonSerializerOptions { PropertyNamingPolicy = JsonNamingPolicy.CamelCase }); + await using var dataSource = dataSourceBuilder.Build(); + + await AssertTypeWrite( + dataSource, + new WeatherForecast + { + Date = new DateTime(2019, 9, 1), + Summary = "Partly cloudy", + TemperatureC = 10 + }, + IsJsonb + ? @"{""date"": ""2019-09-01T00:00:00"", ""summary"": ""Partly cloudy"", ""temperatureC"": 10}" + : @"{""date"":""2019-09-01T00:00:00"",""temperatureC"":10,""summary"":""Partly cloudy""}", + PostgresType, + NpgsqlDbType, + isDefault: false); + } + + [Test] + public async Task Poco_default_mapping() + { + var dataSourceBuilder = CreateDataSourceBuilder(); + if (IsJsonb) + dataSourceBuilder.UseSystemTextJson(jsonbClrTypes: new[] { typeof(WeatherForecast) }); + else + dataSourceBuilder.UseSystemTextJson(jsonClrTypes: new[] { typeof(WeatherForecast) }); + await using var dataSource = dataSourceBuilder.Build(); + + await AssertTypeWrite( + dataSource, + new WeatherForecast + { + Date = new DateTime(2019, 9, 1), + Summary = "Partly cloudy", + TemperatureC = 10 + }, + IsJsonb + ? @"{""Date"": ""2019-09-01T00:00:00"", ""Summary"": ""Partly cloudy"", ""TemperatureC"": 10}" + : @"{""Date"":""2019-09-01T00:00:00"",""TemperatureC"":10,""Summary"":""Partly cloudy""}", + PostgresType, + NpgsqlDbType, + isNpgsqlDbTypeInferredFromClrType: false); + } + + [OneTimeSetUp] + public void SetUp() + { + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.UseSystemTextJson(); + JsonDataSource = dataSourceBuilder.Build(); + } + + [OneTimeTearDown] + public async Task Teardown() + => await JsonDataSource.DisposeAsync(); + public JsonTests(MultiplexingMode multiplexingMode, NpgsqlDbType npgsqlDbType) : base(multiplexingMode) { @@ -215,4 +300,5 @@ public JsonTests(MultiplexingMode multiplexingMode, NpgsqlDbType npgsqlDbType) bool IsJsonb => NpgsqlDbType == NpgsqlDbType.Jsonb; string PostgresType => IsJsonb ? "jsonb" : "json"; readonly NpgsqlDbType NpgsqlDbType; + NpgsqlDataSource JsonDataSource = default!; } From 957c8dea8cdb09a5264174556d37af3f15df8f80 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Mon, 9 Jan 2023 21:24:35 +0100 Subject: [PATCH 033/761] Fix DateTime truncation when writing as date (#4863) Fixes #4861 --- .../Internal/TypeHandlers/DateTimeHandlers/DateHandler.cs | 2 +- test/Npgsql.Tests/Types/DateTimeTests.cs | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/src/Npgsql/Internal/TypeHandlers/DateTimeHandlers/DateHandler.cs b/src/Npgsql/Internal/TypeHandlers/DateTimeHandlers/DateHandler.cs index 42bcb93d42..0831306a67 100644 --- a/src/Npgsql/Internal/TypeHandlers/DateTimeHandlers/DateHandler.cs +++ b/src/Npgsql/Internal/TypeHandlers/DateTimeHandlers/DateHandler.cs @@ -76,7 +76,7 @@ public override void Write(DateTime value, NpgsqlWriteBuffer buf, NpgsqlParamete } } - buf.WriteInt32((value - BaseValueDateTime).Days); + buf.WriteInt32((value.Date - BaseValueDateTime).Days); } /// diff --git a/test/Npgsql.Tests/Types/DateTimeTests.cs b/test/Npgsql.Tests/Types/DateTimeTests.cs index e91cf5c7f2..87c8cbb5f2 100644 --- a/test/Npgsql.Tests/Types/DateTimeTests.cs +++ b/test/Npgsql.Tests/Types/DateTimeTests.cs @@ -16,6 +16,10 @@ public class DateTimeTests : TestBase public Task Date_as_DateTime() => AssertType(new DateTime(2020, 10, 1), "2020-10-01", "date", NpgsqlDbType.Date, DbType.Date, isDefaultForWriting: false); + [Test] + public Task Date_as_DateTime_with_date_and_time_before_2000() + => AssertTypeWrite(new DateTime(1980, 10, 1, 11, 0, 0), "1980-10-01", "date", NpgsqlDbType.Date, DbType.Date, isDefault: false); + // Internal PostgreSQL representation (days since 2020-01-01), for out-of-range values. [Test] public Task Date_as_int() From 6c42d51208c24a9b1f3e70e243ad81ef50b1d90f Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Thu, 12 Jan 2023 12:56:20 +0300 Subject: [PATCH 034/761] =?UTF-8?q?Optimize=20cast=20check=20for=20NpgsqlB?= =?UTF-8?q?atchCommandCollection=20and=20NpgsqlParame=E2=80=A6=20(#4820)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/Npgsql/Internal/NpgsqlReadBuffer.cs | 7 +++---- src/Npgsql/NpgsqlBatchCommandCollection.cs | 20 +++++++++++++++----- src/Npgsql/NpgsqlParameterCollection.cs | 19 ++++++++++++++----- 3 files changed, 32 insertions(+), 14 deletions(-) diff --git a/src/Npgsql/Internal/NpgsqlReadBuffer.cs b/src/Npgsql/Internal/NpgsqlReadBuffer.cs index 50ef859a76..7eb8598f7b 100644 --- a/src/Npgsql/Internal/NpgsqlReadBuffer.cs +++ b/src/Npgsql/Internal/NpgsqlReadBuffer.cs @@ -2,7 +2,6 @@ using System.Buffers; using System.Buffers.Binary; using System.Diagnostics; -using System.Diagnostics.CodeAnalysis; using System.IO; using System.Net.Sockets; using System.Runtime.CompilerServices; @@ -519,9 +518,6 @@ public int Read(Span output) public ValueTask ReadAsync(Memory output, CancellationToken cancellationToken = default) { - if (output.Length == 0) - return new ValueTask(0); - var readFromBuffer = Math.Min(ReadBytesLeft, output.Length); if (readFromBuffer > 0) { @@ -529,6 +525,9 @@ public ValueTask ReadAsync(Memory output, CancellationToken cancellat ReadPosition += readFromBuffer; return new ValueTask(readFromBuffer); } + + if (output.Length == 0) + return new ValueTask(0); return ReadAsyncLong(this, output, cancellationToken); diff --git a/src/Npgsql/NpgsqlBatchCommandCollection.cs b/src/Npgsql/NpgsqlBatchCommandCollection.cs index 7a345f609b..58227ac69a 100644 --- a/src/Npgsql/NpgsqlBatchCommandCollection.cs +++ b/src/Npgsql/NpgsqlBatchCommandCollection.cs @@ -1,6 +1,8 @@ using System; using System.Collections.Generic; using System.Data.Common; +using System.Diagnostics.CodeAnalysis; +using System.Runtime.CompilerServices; namespace Npgsql; @@ -81,7 +83,7 @@ NpgsqlBatchCommand IList.this[int index] set => _list[index] = value; } - /// + /// public new NpgsqlBatchCommand this[int index] { get => _list[index]; @@ -97,8 +99,16 @@ protected override void SetBatchCommand(int index, DbBatchCommand batchCommand) => _list[index] = Cast(batchCommand); static NpgsqlBatchCommand Cast(DbBatchCommand? value) - => value is NpgsqlBatchCommand c - ? c - : throw new InvalidCastException( - $"The value \"{value}\" is not of type \"{nameof(NpgsqlBatchCommand)}\" and cannot be used in this batch command collection."); + { + var castedValue = value as NpgsqlBatchCommand; + if (castedValue is null) + ThrowInvalidCastException(value); + + return castedValue; + } + + [DoesNotReturn] + static void ThrowInvalidCastException(DbBatchCommand? value) => + throw new InvalidCastException( + $"The value \"{value}\" is not of type \"{nameof(NpgsqlBatchCommand)}\" and cannot be used in this batch command collection."); } \ No newline at end of file diff --git a/src/Npgsql/NpgsqlParameterCollection.cs b/src/Npgsql/NpgsqlParameterCollection.cs index 89a66244af..399d5279c5 100644 --- a/src/Npgsql/NpgsqlParameterCollection.cs +++ b/src/Npgsql/NpgsqlParameterCollection.cs @@ -5,6 +5,7 @@ using System.Data.Common; using System.Diagnostics; using System.Diagnostics.CodeAnalysis; +using System.Runtime.CompilerServices; using Npgsql.TypeMapping; using NpgsqlTypes; @@ -565,7 +566,7 @@ public override void AddRange(Array values) throw new ArgumentNullException(nameof(values)); foreach (var parameter in values) - Add(Cast(parameter) ?? throw new ArgumentException("Collection contains a null value.", nameof(values))); + Add(Cast(parameter)); } /// @@ -748,10 +749,18 @@ internal void ProcessParameters(TypeMapper typeMapper, bool validateValues, Comm internal PlaceholderType PlaceholderType { get; set; } static NpgsqlParameter Cast(object? value) - => value is NpgsqlParameter p - ? p - : throw new InvalidCastException( - $"The value \"{value}\" is not of type \"{nameof(NpgsqlParameter)}\" and cannot be used in this parameter collection."); + { + var castedValue = value as NpgsqlParameter; + if (castedValue is null) + ThrowInvalidCastException(value); + + return castedValue; + } + + [DoesNotReturn] + static void ThrowInvalidCastException(object? value) => + throw new InvalidCastException( + $"The value \"{value}\" is not of type \"{nameof(NpgsqlParameter)}\" and cannot be used in this parameter collection."); } enum PlaceholderType From 29d1145d3ccc2fd89bdde1ce722321ab566dd549 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Thu, 12 Jan 2023 14:01:14 +0300 Subject: [PATCH 035/761] Remove some MethodImpl attributes, extract exceptions to help inlining (#4866) --- src/Npgsql/Internal/NpgsqlReadBuffer.cs | 2 +- src/Npgsql/Internal/NpgsqlWriteBuffer.cs | 1 - src/Npgsql/NpgsqlDataReader.cs | 63 +++++++++++++----------- src/Npgsql/ThrowHelper.cs | 8 +++ src/Npgsql/Util/PGUtil.cs | 8 ++- test/Npgsql.Tests/BatchTests.cs | 4 +- 6 files changed, 53 insertions(+), 33 deletions(-) diff --git a/src/Npgsql/Internal/NpgsqlReadBuffer.cs b/src/Npgsql/Internal/NpgsqlReadBuffer.cs index 7eb8598f7b..267d3b1951 100644 --- a/src/Npgsql/Internal/NpgsqlReadBuffer.cs +++ b/src/Npgsql/Internal/NpgsqlReadBuffer.cs @@ -2,6 +2,7 @@ using System.Buffers; using System.Buffers.Binary; using System.Diagnostics; +using System.Diagnostics.CodeAnalysis; using System.IO; using System.Net.Sockets; using System.Runtime.CompilerServices; @@ -438,7 +439,6 @@ T Read() return result; } - [MethodImpl(MethodImplOptions.NoInlining)] static void ThrowNotSpaceLeft() => throw new InvalidOperationException("There is not enough space left in the buffer."); diff --git a/src/Npgsql/Internal/NpgsqlWriteBuffer.cs b/src/Npgsql/Internal/NpgsqlWriteBuffer.cs index d7586270bb..0585b82a25 100644 --- a/src/Npgsql/Internal/NpgsqlWriteBuffer.cs +++ b/src/Npgsql/Internal/NpgsqlWriteBuffer.cs @@ -336,7 +336,6 @@ void Write(T value) WritePosition += Unsafe.SizeOf(); } - [MethodImpl(MethodImplOptions.NoInlining)] static void ThrowNotSpaceLeft() => throw new InvalidOperationException("There is not enough space left in the buffer."); diff --git a/src/Npgsql/NpgsqlDataReader.cs b/src/Npgsql/NpgsqlDataReader.cs index 33bbfbcaf2..6ac58b4c43 100644 --- a/src/Npgsql/NpgsqlDataReader.cs +++ b/src/Npgsql/NpgsqlDataReader.cs @@ -246,7 +246,7 @@ public override Task ReadAsync(CancellationToken cancellationToken) return null; } - var msg = Connector.ParseServerMessage(readBuf, messageCode, len, false)!; + var msg = Connector.ParseServerMessage(readBuf, BackendMessageCode.DataRow, len, false)!; Debug.Assert(msg.Code == BackendMessageCode.DataRow); ProcessMessage(msg); return true; @@ -520,6 +520,7 @@ async Task NextResult(bool async, bool isConsuming = false, CancellationTo switch (msg.Code) { case BackendMessageCode.DataRow: + Connector.State = ConnectorState.Fetching; return true; case BackendMessageCode.CommandComplete: if (statement.AppendErrorBarrier ?? Command.EnableErrorBarriers) @@ -816,14 +817,16 @@ internal void ProcessMessage(IBackendMessage msg) return; default: - throw new Exception("Received unexpected backend message of type " + msg.Code); + ThrowUnexpectedBackendMessage(msg.Code); + return; } + + static void ThrowUnexpectedBackendMessage(BackendMessageCode code) + => throw new Exception("Received unexpected backend message of type " + code); } void ProcessDataRowMessage(DataRowMessage msg) { - Connector.State = ConnectorState.Fetching; - // The connector's buffer can actually change between DataRows: // If a large DataRow exceeding the connector's current read buffer arrives, and we're // reading in non-sequential mode, a new oversize buffer is allocated. We thus have to @@ -871,8 +874,6 @@ void ProcessDataRowMessage(DataRowMessage msg) #endregion - void Cancel() => Connector.PerformPostgresCancellation(); - /// /// Gets a value indicating the depth of nesting for the current row. Always returns zero. /// @@ -2275,7 +2276,7 @@ async Task SeekInColumnSequential(int posInColumn, bool async) Debug.Assert(_column > -1); if (posInColumn < PosInColumn) - throw new InvalidOperationException("Attempt to read a position in the column which has already been read"); + ThrowHelper.ThrowInvalidOperationException("Attempt to read a position in the column which has already been read"); if (posInColumn > ColumnLen) posInColumn = ColumnLen; @@ -2337,7 +2338,7 @@ void ConsumeRowNonSequential() { Debug.Assert(State == ReaderState.InResult || State == ReaderState.BeforeResult); - if (_columnStream != null) + if (_columnStream is not null) { _columnStream.Dispose(); _columnStream = null; @@ -2351,24 +2352,25 @@ void ConsumeRowNonSequential() #region Checks - [MethodImpl(MethodImplOptions.AggressiveInlining)] void CheckResultSet() { switch (State) { case ReaderState.BeforeResult: case ReaderState.InResult: - break; + return; case ReaderState.Closed: - throw new InvalidOperationException("The reader is closed"); + ThrowHelper.ThrowInvalidOperationException("The reader is closed"); + return; case ReaderState.Disposed: - throw new ObjectDisposedException(nameof(NpgsqlDataReader)); + ThrowHelper.ThrowObjectDisposedException(nameof(NpgsqlDataReader)); + return; default: - throw new InvalidOperationException("No resultset is currently being traversed"); + ThrowHelper.ThrowInvalidOperationException("No resultset is currently being traversed"); + return; } } - [MethodImpl(MethodImplOptions.AggressiveInlining)] FieldDescription CheckRowAndGetField(int column) { switch (State) @@ -2376,15 +2378,18 @@ FieldDescription CheckRowAndGetField(int column) case ReaderState.InResult: break; case ReaderState.Closed: - throw new InvalidOperationException("The reader is closed"); + ThrowHelper.ThrowInvalidOperationException("The reader is closed"); + break; case ReaderState.Disposed: - throw new ObjectDisposedException(nameof(NpgsqlDataReader)); + ThrowHelper.ThrowObjectDisposedException(nameof(NpgsqlDataReader)); + break; default: - throw new InvalidOperationException("No row is available"); + ThrowHelper.ThrowInvalidOperationException("No row is available"); + break; } if (column < 0 || column >= RowDescription!.Count) - throw new IndexOutOfRangeException($"Column must be between {0} and {RowDescription!.Count - 1}"); + ThrowColumnOutOfRange(RowDescription!.Count); return RowDescription[column]; } @@ -2393,38 +2398,40 @@ FieldDescription CheckRowAndGetField(int column) /// Checks that we have a RowDescription, but not necessary an actual resultset /// (for operations which work in SchemaOnly mode. /// - [MethodImpl(MethodImplOptions.AggressiveInlining)] FieldDescription GetField(int column) { - if (RowDescription == null) - throw new InvalidOperationException("No resultset is currently being traversed"); + if (RowDescription is null) + ThrowHelper.ThrowInvalidOperationException("No resultset is currently being traversed"); if (column < 0 || column >= RowDescription.Count) - throw new IndexOutOfRangeException($"Column must be between {0} and {RowDescription.Count - 1}"); + ThrowColumnOutOfRange(RowDescription.Count); return RowDescription[column]; } - [MethodImpl(MethodImplOptions.AggressiveInlining)] void CheckColumnStart() { Debug.Assert(_isSequential); if (PosInColumn != 0) - throw new InvalidOperationException("Attempt to read a position in the column which has already been read"); + ThrowHelper.ThrowInvalidOperationException("Attempt to read a position in the column which has already been read"); } - [MethodImpl(MethodImplOptions.AggressiveInlining)] void CheckClosedOrDisposed() { switch (State) { case ReaderState.Closed: - throw new InvalidOperationException("The reader is closed"); + ThrowHelper.ThrowInvalidOperationException("The reader is closed"); + return; case ReaderState.Disposed: - throw new ObjectDisposedException(nameof(NpgsqlDataReader)); + ThrowHelper.ThrowObjectDisposedException(nameof(NpgsqlDataReader)); + return; } } + static void ThrowColumnOutOfRange(int maxIndex) => + throw new IndexOutOfRangeException($"Column must be between {0} and {maxIndex - 1}"); + #endregion #region Misc @@ -2436,7 +2443,7 @@ void CheckClosedOrDisposed() internal void UnbindIfNecessary() { // We're closing the connection, but reader is not yet disposed - // We have to unbind the reader from the connector, otherwise there could be a concurency issues + // We have to unbind the reader from the connector, otherwise there could be a concurrency issues // See #3126 and #3290 if (State != ReaderState.Disposed) { diff --git a/src/Npgsql/ThrowHelper.cs b/src/Npgsql/ThrowHelper.cs index 1a05ab5e8b..ab3df07989 100644 --- a/src/Npgsql/ThrowHelper.cs +++ b/src/Npgsql/ThrowHelper.cs @@ -8,6 +8,14 @@ namespace Npgsql; static class ThrowHelper { + [DoesNotReturn] + internal static void ThrowInvalidOperationException(string message) + => throw new InvalidOperationException(message); + + [DoesNotReturn] + internal static void ThrowObjectDisposedException(string objectName) => + throw new ObjectDisposedException(objectName); + [DoesNotReturn] internal static void ThrowInvalidCastException_NotSupportedType(NpgsqlTypeHandler handler, NpgsqlParameter? parameter, Type type) { diff --git a/src/Npgsql/Util/PGUtil.cs b/src/Npgsql/Util/PGUtil.cs index b3746f2a65..67635cd8f4 100644 --- a/src/Npgsql/Util/PGUtil.cs +++ b/src/Npgsql/Util/PGUtil.cs @@ -44,7 +44,7 @@ internal static T ExpectAny(IBackendMessage msg, NpgsqlConnector connector) return default; } - [MethodImpl(MethodImplOptions.NoInlining), DoesNotReturn] + [DoesNotReturn] static void ThrowIfMsgWrongType(IBackendMessage msg, NpgsqlConnector connector) => throw connector.Break( new NpgsqlException($"Received backend message {msg.Code} while expecting {typeof(T).Name}. Please file a bug.")); @@ -135,8 +135,12 @@ internal static void ValidateBackendMessageCode(BackendMessageCode code) case BackendMessageCode.RowDescription: return; default: - throw new NpgsqlException("Unknown message code: " + code); + ThrowUnknownMessageCode(code); + return; } + + static void ThrowUnknownMessageCode(BackendMessageCode code) + => throw new NpgsqlException("Unknown message code: " + code); } [MethodImpl(MethodImplOptions.AggressiveInlining)] diff --git a/test/Npgsql.Tests/BatchTests.cs b/test/Npgsql.Tests/BatchTests.cs index e59d3b9195..a118a6be99 100644 --- a/test/Npgsql.Tests/BatchTests.cs +++ b/test/Npgsql.Tests/BatchTests.cs @@ -466,7 +466,9 @@ public async Task Batch_with_multiple_errors([Values] bool withErrorBarriers) [Test] public async Task Batch_close_dispose_reader_with_multiple_errors([Values] bool withErrorBarriers, [Values] bool dispose) { - await using var conn = await OpenConnectionAsync(); + // Create a temp pool since we dispose the reader (and check the state afterwards) and it can be reused by another connection + using var _ = CreateTempPool(ConnectionString, out var connString); + await using var conn = await OpenConnectionAsync(connString); var table = await CreateTempTable(conn, "id INT"); await using var batch = new NpgsqlBatch(conn) From 03c4707670f8ce5ed56fba73973c1c47257f1228 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Thu, 12 Jan 2023 22:16:35 +0300 Subject: [PATCH 036/761] Remove excessive try/catch from GetFieldValue (#4868) Closes #4867 --- src/Npgsql/NpgsqlDataReader.cs | 32 +++++++------------------------- 1 file changed, 7 insertions(+), 25 deletions(-) diff --git a/src/Npgsql/NpgsqlDataReader.cs b/src/Npgsql/NpgsqlDataReader.cs index 6ac58b4c43..0a9e14a244 100644 --- a/src/Npgsql/NpgsqlDataReader.cs +++ b/src/Npgsql/NpgsqlDataReader.cs @@ -1715,31 +1715,13 @@ public override T GetFieldValue(int ordinal) ThrowHelper.ThrowInvalidCastException_NoValue(field); } - var position = Buffer.ReadPosition; - try - { - return NullableHandler.Exists - ? NullableHandler.Read(field.Handler, Buffer, ColumnLen, field) - : typeof(T) == typeof(object) - ? (T)field.Handler.ReadAsObject(Buffer, ColumnLen, field) - : field.Handler.Read(Buffer, ColumnLen, field); - } - catch - { - if (Connector.State != ConnectorState.Broken) - { - var writtenBytes = Buffer.ReadPosition - position; - var remainingBytes = ColumnLen - writtenBytes; - if (remainingBytes > 0) - Buffer.Skip(remainingBytes, false).GetAwaiter().GetResult(); - } - throw; - } - finally - { - // Important: position must still be updated - PosInColumn += ColumnLen; - } + // We don't handle exceptions or update PosInColumn + // As with non-sequential reads we always just move to the start/end of the column + return NullableHandler.Exists + ? NullableHandler.Read(field.Handler, Buffer, ColumnLen, field) + : typeof(T) == typeof(object) + ? (T)field.Handler.ReadAsObject(Buffer, ColumnLen, field) + : field.Handler.Read(Buffer, ColumnLen, field); } async ValueTask GetFieldValueSequential(int column, bool async, CancellationToken cancellationToken = default) From 6a02236d8df9b79f97cde8d53c4d7e1dd0c082f2 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Fri, 13 Jan 2023 18:00:11 +0300 Subject: [PATCH 037/761] Use ThrowHelper to throw exceptions (part 1) (#4872) Contributes to #2237 --- .../BackendMessages/RowDescriptionMessage.cs | 17 +++- src/Npgsql/Internal/NpgsqlConnector.cs | 46 ++++++---- .../Internal/NpgsqlReadBuffer.Stream.cs | 2 +- src/Npgsql/Internal/NpgsqlReadBuffer.cs | 2 +- .../Internal/NpgsqlWriteBuffer.Stream.cs | 2 +- src/Npgsql/Internal/NpgsqlWriteBuffer.cs | 2 +- src/Npgsql/NpgsqlBinaryExporter.cs | 8 +- src/Npgsql/NpgsqlCommand.cs | 43 ++++----- src/Npgsql/NpgsqlConnection.cs | 87 +++++++++++-------- src/Npgsql/NpgsqlDataReader.cs | 6 +- src/Npgsql/NpgsqlDataSource.cs | 3 +- src/Npgsql/NpgsqlParameter.cs | 2 +- src/Npgsql/NpgsqlParameterCollection.cs | 16 ++-- src/Npgsql/NpgsqlTransaction.cs | 4 +- src/Npgsql/PreparedStatementManager.cs | 4 +- src/Npgsql/PreparedTextReader.cs | 2 +- src/Npgsql/ThrowHelper.cs | 65 +++++++++++--- src/Npgsql/TypeMapping/TypeMapper.cs | 12 ++- src/Npgsql/Util/PGUtil.cs | 10 +-- 19 files changed, 208 insertions(+), 125 deletions(-) diff --git a/src/Npgsql/BackendMessages/RowDescriptionMessage.cs b/src/Npgsql/BackendMessages/RowDescriptionMessage.cs index e50f5af795..b2b045a3e1 100644 --- a/src/Npgsql/BackendMessages/RowDescriptionMessage.cs +++ b/src/Npgsql/BackendMessages/RowDescriptionMessage.cs @@ -125,9 +125,11 @@ public FieldDescription this[int index] /// Given a string name, returns the field's ordinal index in the row. /// internal int GetFieldIndex(string name) - => TryGetFieldIndex(name, out var ret) - ? ret - : throw new IndexOutOfRangeException("Field not found in row: " + name); + { + if (!TryGetFieldIndex(name, out var ret)) + ThrowHelper.ThrowIndexOutOfRangeException($"Field not found in row: {name}"); + return ret; + } /// /// Given a string name, returns the field's ordinal index in the row. @@ -181,7 +183,14 @@ public Enumerator(RowDescriptionMessage rowDescription) => _rowDescription = rowDescription; public FieldDescription Current - => _pos >= 0 ? _rowDescription[_pos] : throw new InvalidOperationException(); + { + get + { + if (_pos < 0) + ThrowHelper.ThrowInvalidOperationException(); + return _rowDescription[_pos]; + } + } object IEnumerator.Current => Current; diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index 3e4c0c0ba8..76783e41a8 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -1469,7 +1469,7 @@ internal ValueTask ReadMessage(bool async, DataRowLoadingMode d var authType = (AuthenticationRequestType)buf.ReadInt32(); return authType switch { - AuthenticationRequestType.AuthenticationOk => (AuthenticationRequestMessage)AuthenticationOkMessage.Instance, + AuthenticationRequestType.AuthenticationOk => AuthenticationOkMessage.Instance, AuthenticationRequestType.AuthenticationCleartextPassword => AuthenticationCleartextPasswordMessage.Instance, AuthenticationRequestType.AuthenticationMD5Password => AuthenticationMD5PasswordMessage.Load(buf), AuthenticationRequestType.AuthenticationGSS => AuthenticationGSSMessage.Instance, @@ -1540,14 +1540,23 @@ internal Task Rollback(bool async, CancellationToken cancellationToken = default } internal bool InTransaction - => TransactionStatus switch + { + get { - TransactionStatus.Idle => false, - TransactionStatus.Pending => true, - TransactionStatus.InTransactionBlock => true, - TransactionStatus.InFailedTransactionBlock => true, - _ => throw new InvalidOperationException($"Internal Npgsql bug: unexpected value {TransactionStatus} of enum {nameof(TransactionStatus)}. Please file a bug.") - }; + switch (TransactionStatus) + { + case TransactionStatus.Idle: + return false; + case TransactionStatus.Pending: + case TransactionStatus.InTransactionBlock: + case TransactionStatus.InFailedTransactionBlock: + return true; + default: + ThrowHelper.ThrowInvalidOperationException($"Internal Npgsql bug: unexpected value {{0}} of enum {nameof(TransactionStatus)}. Please file a bug.", TransactionStatus); + return false; + } + } + } /// /// Handles a new transaction indicator received on a ReadyForQuery message @@ -1562,7 +1571,7 @@ void ProcessNewTransactionStatus(TransactionStatus newStatus) switch (newStatus) { case TransactionStatus.Idle: - break; + return; case TransactionStatus.InTransactionBlock: case TransactionStatus.InFailedTransactionBlock: // In multiplexing mode, we can't support transaction in SQL: the connector must be removed from the @@ -1571,14 +1580,15 @@ void ProcessNewTransactionStatus(TransactionStatus newStatus) if (Connection is null) { Debug.Assert(Settings.Multiplexing); - throw new NotSupportedException("In multiplexing mode, transactions must be started with BeginTransaction"); + ThrowHelper.ThrowNotSupportedException("In multiplexing mode, transactions must be started with BeginTransaction"); } - break; + return; case TransactionStatus.Pending: - throw new Exception($"Internal Npgsql bug: invalid TransactionStatus {nameof(TransactionStatus.Pending)} received, should be frontend-only"); + ThrowHelper.ThrowInvalidOperationException($"Internal Npgsql bug: invalid TransactionStatus {nameof(TransactionStatus.Pending)} received, should be frontend-only"); + return; default: - throw new InvalidOperationException( - $"Internal Npgsql bug: unexpected value {newStatus} of enum {nameof(TransactionStatus)}. Please file a bug."); + ThrowHelper.ThrowInvalidOperationException($"Internal Npgsql bug: unexpected value {{0}} of enum {nameof(TransactionStatus)}. Please file a bug.", newStatus); + return; } } @@ -2172,7 +2182,8 @@ internal async Task Reset(bool async) endBindingScope = true; break; default: - throw new InvalidOperationException($"Internal Npgsql bug: unexpected value {TransactionStatus} of enum {nameof(TransactionStatus)}. Please file a bug."); + ThrowHelper.ThrowInvalidOperationException($"Internal Npgsql bug: unexpected value {TransactionStatus} of enum {nameof(TransactionStatus)}. Please file a bug."); + return; } if (_sendResetOnClose) @@ -2306,7 +2317,8 @@ UserAction DoStartUserAction(ConnectorState newState, NpgsqlCommand? command) break; case ConnectorState.Closed: case ConnectorState.Broken: - throw new InvalidOperationException("Connection is not open"); + ThrowHelper.ThrowInvalidOperationException("Connection is not open"); + break; case ConnectorState.Executing: case ConnectorState.Fetching: case ConnectorState.Waiting: @@ -2318,7 +2330,7 @@ UserAction DoStartUserAction(ConnectorState newState, NpgsqlCommand? command) ? new NpgsqlOperationInProgressException(State) : new NpgsqlOperationInProgressException(currentCommand); default: - throw new ArgumentOutOfRangeException(nameof(State), State, "Invalid connector state: " + State); + throw new ArgumentOutOfRangeException(nameof(State), State, $"Invalid connector state: {State}"); } Debug.Assert(IsReady); diff --git a/src/Npgsql/Internal/NpgsqlReadBuffer.Stream.cs b/src/Npgsql/Internal/NpgsqlReadBuffer.Stream.cs index 39ebad22a7..8734db3ffb 100644 --- a/src/Npgsql/Internal/NpgsqlReadBuffer.Stream.cs +++ b/src/Npgsql/Internal/NpgsqlReadBuffer.Stream.cs @@ -194,7 +194,7 @@ public override void Write(byte[] buffer, int offset, int count) void CheckDisposed() { if (IsDisposed) - throw new ObjectDisposedException(null); + ThrowHelper.ThrowObjectDisposedException(nameof(ColumnStream)); } protected override void Dispose(bool disposing) diff --git a/src/Npgsql/Internal/NpgsqlReadBuffer.cs b/src/Npgsql/Internal/NpgsqlReadBuffer.cs index 267d3b1951..c253f2d9e3 100644 --- a/src/Npgsql/Internal/NpgsqlReadBuffer.cs +++ b/src/Npgsql/Internal/NpgsqlReadBuffer.cs @@ -440,7 +440,7 @@ T Read() } static void ThrowNotSpaceLeft() - => throw new InvalidOperationException("There is not enough space left in the buffer."); + => ThrowHelper.ThrowInvalidOperationException("There is not enough space left in the buffer."); public string ReadString(int byteLen) { diff --git a/src/Npgsql/Internal/NpgsqlWriteBuffer.Stream.cs b/src/Npgsql/Internal/NpgsqlWriteBuffer.Stream.cs index 46b5c8e41a..428fb0ec30 100644 --- a/src/Npgsql/Internal/NpgsqlWriteBuffer.Stream.cs +++ b/src/Npgsql/Internal/NpgsqlWriteBuffer.Stream.cs @@ -113,7 +113,7 @@ async Task WriteLong(byte[] buffer, int offset, int count, bool async, Cancellat void CheckDisposed() { if (_disposed) - throw new ObjectDisposedException(null); + ThrowHelper.ThrowObjectDisposedException(nameof(ParameterStream)); } protected override void Dispose(bool disposing) diff --git a/src/Npgsql/Internal/NpgsqlWriteBuffer.cs b/src/Npgsql/Internal/NpgsqlWriteBuffer.cs index 0585b82a25..3b7d08d369 100644 --- a/src/Npgsql/Internal/NpgsqlWriteBuffer.cs +++ b/src/Npgsql/Internal/NpgsqlWriteBuffer.cs @@ -337,7 +337,7 @@ void Write(T value) } static void ThrowNotSpaceLeft() - => throw new InvalidOperationException("There is not enough space left in the buffer."); + => ThrowHelper.ThrowInvalidOperationException("There is not enough space left in the buffer."); public Task WriteString(string s, int byteLen, bool async, CancellationToken cancellationToken = default) => WriteString(s, s.Length, byteLen, async, cancellationToken); diff --git a/src/Npgsql/NpgsqlBinaryExporter.cs b/src/Npgsql/NpgsqlBinaryExporter.cs index 5415411062..9f5b5b2bcc 100644 --- a/src/Npgsql/NpgsqlBinaryExporter.cs +++ b/src/Npgsql/NpgsqlBinaryExporter.cs @@ -159,7 +159,7 @@ async ValueTask StartRow(bool async, CancellationToken cancellationToken = if (_column == NumColumns) _leftToReadInDataMsg = Expect(await _connector.ReadMessage(async), _connector).Length; else if (_column != -1) - throw new InvalidOperationException("Already in the middle of a row"); + ThrowHelper.ThrowInvalidOperationException("Already in the middle of a row"); await _buf.Ensure(2, async); _leftToReadInDataMsg -= 2; @@ -216,7 +216,7 @@ ValueTask Read(bool async, CancellationToken cancellationToken = default) CheckDisposed(); if (_column == -1 || _column == NumColumns) - throw new InvalidOperationException("Not reading a row"); + ThrowHelper.ThrowInvalidOperationException("Not reading a row"); var type = typeof(T); var handler = _typeHandlerCache[_column]; @@ -267,7 +267,7 @@ ValueTask Read(NpgsqlDbType type, bool async, CancellationToken cancellati { CheckDisposed(); if (_column == -1 || _column == NumColumns) - throw new InvalidOperationException("Not reading a row"); + ThrowHelper.ThrowInvalidOperationException("Not reading a row"); var handler = _typeHandlerCache[_column]; if (handler == null) @@ -372,7 +372,7 @@ async Task ReadColumnLenIfNeeded(bool async) void CheckDisposed() { if (_isDisposed) - throw new ObjectDisposedException(GetType().FullName, "The COPY operation has already ended."); + ThrowHelper.ThrowObjectDisposedException(nameof(NpgsqlBinaryExporter), "The COPY operation has already ended."); } #endregion diff --git a/src/Npgsql/NpgsqlCommand.cs b/src/Npgsql/NpgsqlCommand.cs index 07f3514d80..cf257b7937 100644 --- a/src/Npgsql/NpgsqlCommand.cs +++ b/src/Npgsql/NpgsqlCommand.cs @@ -188,9 +188,10 @@ public override string CommandText { Debug.Assert(!IsWrappedByBatch); - _commandText = State == CommandState.Idle - ? value ?? string.Empty - : throw new InvalidOperationException("An open data reader exists for this command."); + if (State != CommandState.Idle) + ThrowHelper.ThrowInvalidOperationException("An open data reader exists for this command."); + + _commandText = value ?? string.Empty; ResetPreparation(); // TODO: Technically should do this also if the parameter list (or type) changes @@ -827,7 +828,7 @@ internal void ProcessRawQuery(SqlQueryParser? parser, bool standardConformingStr : (batchCommand.CommandText, batchCommand.CommandType, batchCommand.Parameters); if (string.IsNullOrEmpty(commandText)) - throw new InvalidOperationException("CommandText property has not been initialized"); + ThrowHelper.ThrowInvalidOperationException("CommandText property has not been initialized"); switch (commandType) { @@ -862,7 +863,7 @@ internal void ProcessRawQuery(SqlQueryParser? parser, bool standardConformingStr case PlaceholderType.Named: if (!EnableSqlRewriting) - throw new NotSupportedException($"Named parameters are not supported when Npgsql.{nameof(EnableSqlRewriting)} is disabled"); + ThrowHelper.ThrowNotSupportedException($"Named parameters are not supported when Npgsql.{nameof(EnableSqlRewriting)} is disabled"); // The parser is cached on NpgsqlConnector - unless we're in multiplexing mode. parser ??= new SqlQueryParser(); @@ -871,7 +872,7 @@ internal void ProcessRawQuery(SqlQueryParser? parser, bool standardConformingStr { parser.ParseRawQuery(this, standardConformingStrings); if (InternalBatchCommands.Count > 1 && _parameters.HasOutputParameters) - throw new NotSupportedException("Commands with multiple queries cannot have out parameters"); + ThrowHelper.ThrowNotSupportedException("Commands with multiple queries cannot have out parameters"); for (var i = 0; i < InternalBatchCommands.Count; i++) ValidateParameterCount(InternalBatchCommands[i]); } @@ -879,18 +880,19 @@ internal void ProcessRawQuery(SqlQueryParser? parser, bool standardConformingStr { parser.ParseRawQuery(batchCommand, standardConformingStrings); if (batchCommand.Parameters.HasOutputParameters) - throw new NotSupportedException("Batches cannot cannot have out parameters"); + ThrowHelper.ThrowNotSupportedException("Batches cannot cannot have out parameters"); ValidateParameterCount(batchCommand); } break; case PlaceholderType.Mixed: - throw new NotSupportedException("Mixing named and positional parameters isn't supported"); + ThrowHelper.ThrowNotSupportedException("Mixing named and positional parameters isn't supported"); + break; default: - throw new ArgumentOutOfRangeException( - nameof(PlaceholderType), $"Unknown {nameof(PlaceholderType)} value: {Parameters.PlaceholderType}"); + ThrowHelper.ThrowArgumentOutOfRangeException(nameof(PlaceholderType), $"Unknown {nameof(PlaceholderType)} value: {Parameters.PlaceholderType}"); + break; } break; @@ -927,7 +929,7 @@ internal void ProcessRawQuery(SqlQueryParser? parser, bool standardConformingStr if (parameter.IsPositional) { if (seenNamedParam) - throw new ArgumentException(NpgsqlStrings.PositionalParameterAfterNamed); + ThrowHelper.ThrowArgumentException(NpgsqlStrings.PositionalParameterAfterNamed); } else { @@ -958,14 +960,14 @@ internal void ProcessRawQuery(SqlQueryParser? parser, bool standardConformingStr break; default: - throw new InvalidOperationException($"Internal Npgsql bug: unexpected value {CommandType} of enum {nameof(CommandType)}. Please file a bug."); + ThrowHelper.ThrowInvalidOperationException($"Internal Npgsql bug: unexpected value {CommandType} of enum {nameof(CommandType)}. Please file a bug."); + break; } - [MethodImpl(MethodImplOptions.AggressiveInlining)] static void ValidateParameterCount(NpgsqlBatchCommand batchCommand) { if (batchCommand.PositionalParameters.Count > ushort.MaxValue) - throw new NpgsqlException($"A statement cannot have more than {ushort.MaxValue} parameters"); + ThrowHelper.ThrowNpgsqlException("A statement cannot have more than 65535 parameters"); } } @@ -1315,7 +1317,7 @@ internal virtual async ValueTask ExecuteReader(CommandBehavior { Debug.Assert(conn is null); if (behavior.HasFlag(CommandBehavior.CloseConnection)) - throw new ArgumentException($"{nameof(CommandBehavior.CloseConnection)} is not supported with {nameof(NpgsqlConnector)}", nameof(behavior)); + ThrowHelper.ThrowArgumentException($"{nameof(CommandBehavior.CloseConnection)} is not supported with {nameof(NpgsqlConnector)}", nameof(behavior)); connector = _connector; } else @@ -1484,8 +1486,7 @@ internal virtual async ValueTask ExecuteReader(CommandBehavior { // The waiting on the ExecutionCompletion ManualResetValueTaskSource is necessarily // asynchronous, so allowing sync would mean sync-over-async. - throw new NotSupportedException( - "Synchronous command execution is not supported when multiplexing is on"); + ThrowHelper.ThrowNotSupportedException("Synchronous command execution is not supported when multiplexing is on"); } if (IsWrappedByBatch) @@ -1793,15 +1794,14 @@ public virtual NpgsqlCommand Clone() return clone; } - [MethodImpl(MethodImplOptions.AggressiveInlining)] NpgsqlConnection? CheckAndGetConnection() { if (State == CommandState.Disposed) - throw new ObjectDisposedException(GetType().FullName); + ThrowHelper.ThrowObjectDisposedException(GetType().FullName); if (InternalConnection == null) { if (_connector is null) - throw new InvalidOperationException("Connection property has not been initialized."); + ThrowHelper.ThrowInvalidOperationException("Connection property has not been initialized."); return null; } switch (InternalConnection.FullState) @@ -1812,7 +1812,8 @@ public virtual NpgsqlCommand Clone() case ConnectionState.Open | ConnectionState.Fetching: return InternalConnection; default: - throw new InvalidOperationException("Connection is not open"); + ThrowHelper.ThrowInvalidOperationException("Connection is not open"); + return null; } } diff --git a/src/Npgsql/NpgsqlConnection.cs b/src/Npgsql/NpgsqlConnection.cs index 2bdd5d566a..f37df7b96b 100644 --- a/src/Npgsql/NpgsqlConnection.cs +++ b/src/Npgsql/NpgsqlConnection.cs @@ -482,25 +482,40 @@ public override string ConnectionString public ConnectionState FullState { // Note: we allow accessing the state after dispose, #164 - get => _fullState switch + get { - ConnectionState.Open => Connector == null - ? ConnectionState.Open // When unbound, we only know we're open - : Connector.State switch - { - ConnectorState.Ready => ConnectionState.Open, - ConnectorState.Executing => ConnectionState.Open | ConnectionState.Executing, - ConnectorState.Fetching => ConnectionState.Open | ConnectionState.Fetching, - ConnectorState.Copy => ConnectionState.Open | ConnectionState.Fetching, - ConnectorState.Replication => ConnectionState.Open | ConnectionState.Fetching, - ConnectorState.Waiting => ConnectionState.Open | ConnectionState.Fetching, - ConnectorState.Connecting => ConnectionState.Connecting, - ConnectorState.Broken => ConnectionState.Broken, - ConnectorState.Closed => throw new InvalidOperationException("Internal Npgsql bug: connection is in state Open but connector is in state Closed"), - _ => throw new InvalidOperationException($"Internal Npgsql bug: unexpected value {Connector.State} of enum {nameof(ConnectorState)}. Please file a bug.") - }, - _ => _fullState - }; + if (_fullState != ConnectionState.Open) + return _fullState; + + if (Connector is null) + return ConnectionState.Open; // When unbound, we only know we're open + + switch (Connector.State) + { + case ConnectorState.Ready: + return ConnectionState.Open; + case ConnectorState.Executing: + return ConnectionState.Open | ConnectionState.Executing; + case ConnectorState.Fetching: + return ConnectionState.Open | ConnectionState.Fetching; + case ConnectorState.Copy: + return ConnectionState.Open | ConnectionState.Fetching; + case ConnectorState.Replication: + return ConnectionState.Open | ConnectionState.Fetching; + case ConnectorState.Waiting: + return ConnectionState.Open | ConnectionState.Fetching; + case ConnectorState.Connecting: + return ConnectionState.Connecting; + case ConnectorState.Broken: + return ConnectionState.Broken; + case ConnectorState.Closed: + ThrowHelper.ThrowInvalidOperationException("Internal Npgsql bug: connection is in state Open but connector is in state Closed"); + return ConnectionState.Broken; + default: + ThrowHelper.ThrowInvalidOperationException($"Internal Npgsql bug: unexpected value {Connector.State} of enum {nameof(ConnectorState)}. Please file a bug."); + return ConnectionState.Broken; + } + } internal set { var originalOpen = _fullState.HasFlag(ConnectionState.Open); @@ -619,11 +634,11 @@ public override ConnectionState State async ValueTask BeginTransaction(IsolationLevel level, bool async, CancellationToken cancellationToken) { if (level == IsolationLevel.Chaos) - throw new NotSupportedException("Unsupported IsolationLevel: " + level); + ThrowHelper.ThrowNotSupportedException($"Unsupported IsolationLevel: {IsolationLevel.Chaos}"); CheckReady(); if (Connector is { InTransaction: true }) - throw new InvalidOperationException("A transaction is already in progress; nested/concurrent transactions aren't supported."); + ThrowHelper.ThrowInvalidOperationException("A transaction is already in progress; nested/concurrent transactions aren't supported."); // There was a committed/rolled back transaction, but it was not disposed var connector = ConnectorBindingScope == ConnectorBindingScope.Transaction @@ -1525,16 +1540,17 @@ void CheckOpen() case ConnectionState.Open | ConnectionState.Executing: case ConnectionState.Open | ConnectionState.Fetching: case ConnectionState.Connecting: - break; + return; case ConnectionState.Closed: case ConnectionState.Broken: - throw new InvalidOperationException("Connection is not open"); + ThrowHelper.ThrowInvalidOperationException("Connection is not open"); + return; default: - throw new ArgumentOutOfRangeException(); + ThrowHelper.ThrowArgumentOutOfRangeException(); + return; } } - [MethodImpl(MethodImplOptions.AggressiveInlining)] void CheckClosed() { CheckDisposed(); @@ -1543,25 +1559,25 @@ void CheckClosed() { case ConnectionState.Closed: case ConnectionState.Broken: - break; + return; case ConnectionState.Open: case ConnectionState.Connecting: case ConnectionState.Open | ConnectionState.Executing: case ConnectionState.Open | ConnectionState.Fetching: - throw new InvalidOperationException("Connection already open"); + ThrowHelper.ThrowInvalidOperationException("Connection already open"); + return; default: - throw new ArgumentOutOfRangeException(); + ThrowHelper.ThrowArgumentOutOfRangeException(); + return; } } - [MethodImpl(MethodImplOptions.AggressiveInlining)] void CheckDisposed() { if (_disposed) - throw new ObjectDisposedException(typeof(NpgsqlConnection).Name); + ThrowHelper.ThrowObjectDisposedException(nameof(NpgsqlConnection)); } - [MethodImpl(MethodImplOptions.AggressiveInlining)] internal void CheckReady() { CheckDisposed(); @@ -1570,15 +1586,18 @@ internal void CheckReady() { case ConnectionState.Open: case ConnectionState.Connecting: // We need to do type loading as part of connecting - break; + return; case ConnectionState.Closed: case ConnectionState.Broken: - throw new InvalidOperationException("Connection is not open"); + ThrowHelper.ThrowInvalidOperationException("Connection is not open"); + return; case ConnectionState.Open | ConnectionState.Executing: case ConnectionState.Open | ConnectionState.Fetching: - throw new InvalidOperationException("Connection is busy"); + ThrowHelper.ThrowInvalidOperationException("Connection is busy"); + return; default: - throw new ArgumentOutOfRangeException(); + ThrowHelper.ThrowArgumentOutOfRangeException(); + return; } } diff --git a/src/Npgsql/NpgsqlDataReader.cs b/src/Npgsql/NpgsqlDataReader.cs index 0a9e14a244..a459155cdd 100644 --- a/src/Npgsql/NpgsqlDataReader.cs +++ b/src/Npgsql/NpgsqlDataReader.cs @@ -1483,7 +1483,7 @@ ValueTask GetStream(int ordinal, bool async, CancellationToken cancellat async ValueTask GetStreamInternal(FieldDescription field, int ordinal, bool async, CancellationToken cancellationToken = default) { if (_columnStream is { IsDisposed: false }) - throw new InvalidOperationException("A stream is already open for this reader"); + ThrowHelper.ThrowInvalidOperationException("A stream is already open for this reader"); using var registration = Connector.StartNestedCancellableOperation(cancellationToken, attemptPgCancellation: false); @@ -1950,10 +1950,10 @@ async Task IsDBNullAsyncInternal(int ordinal, CancellationToken cancellati public override int GetOrdinal(string name) { if (string.IsNullOrEmpty(name)) - throw new ArgumentException("name cannot be empty", nameof(name)); + ThrowHelper.ThrowArgumentException($"{nameof(name)} cannot be empty", nameof(name)); CheckClosedOrDisposed(); if (RowDescription is null) - throw new InvalidOperationException("No resultset is currently being traversed"); + ThrowHelper.ThrowInvalidOperationException("No resultset is currently being traversed"); return RowDescription.GetFieldIndex(name); } diff --git a/src/Npgsql/NpgsqlDataSource.cs b/src/Npgsql/NpgsqlDataSource.cs index 71b7d2a0bc..82cab03c01 100644 --- a/src/Npgsql/NpgsqlDataSource.cs +++ b/src/Npgsql/NpgsqlDataSource.cs @@ -456,11 +456,10 @@ protected virtual async ValueTask DisposeAsyncBase() } #pragma warning restore CS1998 - [MethodImpl(MethodImplOptions.AggressiveInlining)] private protected void CheckDisposed() { if (_isDisposed == 1) - throw new ObjectDisposedException(GetType().FullName); + ThrowHelper.ThrowObjectDisposedException(GetType().FullName); } #endregion diff --git a/src/Npgsql/NpgsqlParameter.cs b/src/Npgsql/NpgsqlParameter.cs index 96649f9ccc..6da98451d3 100644 --- a/src/Npgsql/NpgsqlParameter.cs +++ b/src/Npgsql/NpgsqlParameter.cs @@ -519,7 +519,7 @@ internal virtual void ResolveHandler(TypeMapper typeMapper) else { var parameterName = !string.IsNullOrEmpty(ParameterName) ? ParameterName : $"${Collection?.IndexOf(this) + 1}"; - throw new InvalidOperationException($"Parameter '{parameterName}' must have either its NpgsqlDbType or its DataTypeName or its Value set"); + ThrowHelper.ThrowInvalidOperationException($"Parameter '{parameterName}' must have either its NpgsqlDbType or its DataTypeName or its Value set"); } } diff --git a/src/Npgsql/NpgsqlParameterCollection.cs b/src/Npgsql/NpgsqlParameterCollection.cs index 399d5279c5..ed294f4b2d 100644 --- a/src/Npgsql/NpgsqlParameterCollection.cs +++ b/src/Npgsql/NpgsqlParameterCollection.cs @@ -208,9 +208,9 @@ internal void ChangeParameterName(NpgsqlParameter parameter, string? value) set { if (value is null) - throw new ArgumentNullException(nameof(value)); + ThrowHelper.ThrowArgumentNullException(nameof(value)); if (value.Collection is not null) - throw new InvalidOperationException("The parameter already belongs to a collection"); + ThrowHelper.ThrowInvalidOperationException("The parameter already belongs to a collection"); var oldValue = InternalList[index]; @@ -233,9 +233,9 @@ internal void ChangeParameterName(NpgsqlParameter parameter, string? value) public NpgsqlParameter Add(NpgsqlParameter value) { if (value is null) - throw new ArgumentNullException(nameof(value)); + ThrowHelper.ThrowArgumentNullException(nameof(value)); if (value.Collection is not null) - throw new InvalidOperationException("The parameter already belongs to a collection"); + ThrowHelper.ThrowInvalidOperationException("The parameter already belongs to a collection"); InternalList.Add(value); value.Collection = this; @@ -448,11 +448,11 @@ public override void Insert(int index, object value) public void Remove(string parameterName) { if (parameterName is null) - throw new ArgumentNullException(nameof(parameterName)); + ThrowHelper.ThrowArgumentNullException(nameof(parameterName)); var index = IndexOf(parameterName); if (index < 0) - throw new InvalidOperationException("No parameter with the specified name exists in the collection"); + ThrowHelper.ThrowInvalidOperationException("No parameter with the specified name exists in the collection"); RemoveAt(index); } @@ -626,9 +626,9 @@ public void Insert(int index, NpgsqlParameter item) public bool Remove(NpgsqlParameter item) { if (item == null) - throw new ArgumentNullException(nameof(item)); + ThrowHelper.ThrowArgumentNullException(nameof(item)); if (item.Collection != this) - throw new InvalidOperationException("The item does not belong to this collection"); + ThrowHelper.ThrowInvalidOperationException("The item does not belong to this collection"); var index = IndexOf(item); if (index >= 0) diff --git a/src/Npgsql/NpgsqlTransaction.cs b/src/Npgsql/NpgsqlTransaction.cs index 0f0cb20fc6..8d4ffbc00a 100644 --- a/src/Npgsql/NpgsqlTransaction.cs +++ b/src/Npgsql/NpgsqlTransaction.cs @@ -447,13 +447,13 @@ void CheckReady() { CheckDisposed(); if (IsCompleted) - throw new InvalidOperationException("This NpgsqlTransaction has completed; it is no longer usable."); + ThrowHelper.ThrowInvalidOperationException("This NpgsqlTransaction has completed; it is no longer usable."); } void CheckDisposed() { if (IsDisposed) - throw new ObjectDisposedException(typeof(NpgsqlTransaction).Name, _disposeReason); + ThrowHelper.ThrowObjectDisposedException(nameof(NpgsqlTransaction), _disposeReason); } static bool RequiresQuoting(string identifier) diff --git a/src/Npgsql/PreparedStatementManager.cs b/src/Npgsql/PreparedStatementManager.cs index c7f18c52e5..227cd1314d 100644 --- a/src/Npgsql/PreparedStatementManager.cs +++ b/src/Npgsql/PreparedStatementManager.cs @@ -198,8 +198,8 @@ internal PreparedStatementManager(NpgsqlConnector connector) continue; default: - throw new Exception( - $"Invalid {nameof(PreparedState)} state {slot.State} encountered when scanning prepared statement slots"); + ThrowHelper.ThrowInvalidOperationException($"Invalid {nameof(PreparedState)} state {slot.State} encountered when scanning prepared statement slots"); + return null; } } diff --git a/src/Npgsql/PreparedTextReader.cs b/src/Npgsql/PreparedTextReader.cs index 145af2037a..8a2cf806d2 100644 --- a/src/Npgsql/PreparedTextReader.cs +++ b/src/Npgsql/PreparedTextReader.cs @@ -105,7 +105,7 @@ public override string ReadToEnd() void CheckDisposed() { if (_disposed || _stream.IsDisposed) - throw new ObjectDisposedException(null); + ThrowHelper.ThrowObjectDisposedException(nameof(PreparedTextReader)); } protected override void Dispose(bool disposing) diff --git a/src/Npgsql/ThrowHelper.cs b/src/Npgsql/ThrowHelper.cs index ab3df07989..760fde12d7 100644 --- a/src/Npgsql/ThrowHelper.cs +++ b/src/Npgsql/ThrowHelper.cs @@ -2,33 +2,42 @@ using System; using System.Diagnostics.CodeAnalysis; using System.Reflection; -using Npgsql.Internal.TypeHandling; namespace Npgsql; static class ThrowHelper { + [DoesNotReturn] + internal static void ThrowArgumentOutOfRangeException() + => throw new ArgumentOutOfRangeException(); + + [DoesNotReturn] + internal static void ThrowArgumentOutOfRangeException(string paramName, string message) + => throw new ArgumentOutOfRangeException(paramName, message); + + [DoesNotReturn] + internal static void ThrowInvalidOperationException() + => throw new InvalidOperationException(); + [DoesNotReturn] internal static void ThrowInvalidOperationException(string message) => throw new InvalidOperationException(message); [DoesNotReturn] - internal static void ThrowObjectDisposedException(string objectName) => - throw new ObjectDisposedException(objectName); + internal static void ThrowInvalidOperationException(string message, object argument) + => throw new InvalidOperationException(string.Format(message, argument)); [DoesNotReturn] - internal static void ThrowInvalidCastException_NotSupportedType(NpgsqlTypeHandler handler, NpgsqlParameter? parameter, Type type) - { - var parameterName = parameter is null - ? null - : parameter.TrimmedName == string.Empty - ? $"${parameter.Collection!.IndexOf(parameter) + 1}" - : parameter.TrimmedName; + internal static void ThrowObjectDisposedException(string? objectName) + => throw new ObjectDisposedException(objectName); - throw new InvalidCastException(parameterName is null - ? $"Cannot write a value of CLR type '{type}' as database type '{handler.PgDisplayName}'." - : $"Cannot write a value of CLR type '{type}' as database type '{handler.PgDisplayName}' for parameter '{parameterName}'."); - } + [DoesNotReturn] + internal static void ThrowObjectDisposedException(string objectName, string message) + => throw new ObjectDisposedException(objectName, message); + + [DoesNotReturn] + internal static void ThrowObjectDisposedException(string objectName, Exception? innerException) + => throw new ObjectDisposedException(objectName, innerException); [DoesNotReturn] internal static void ThrowInvalidCastException_NoValue(FieldDescription field) => @@ -45,4 +54,32 @@ internal static void ThrowInvalidOperationException_NoPropertySetter(Type type, [DoesNotReturn] internal static void ThrowInvalidOperationException_BinaryImportParametersMismatch(int columnCount, int valueCount) => throw new InvalidOperationException($"The binary import operation was started with {columnCount} column(s), but {valueCount} value(s) were provided."); + + [DoesNotReturn] + internal static void ThrowNpgsqlException(string message) + => throw new NpgsqlException(message); + + [DoesNotReturn] + internal static void ThrowArgumentException(string message) + => throw new ArgumentException(message); + + [DoesNotReturn] + internal static void ThrowArgumentException(string message, string paramName) + => throw new ArgumentException(message, paramName); + + [DoesNotReturn] + internal static void ThrowArgumentNullException(string paramName) + => throw new ArgumentNullException(paramName); + + [DoesNotReturn] + internal static void ThrowIndexOutOfRangeException(string message) + => throw new IndexOutOfRangeException(message); + + [DoesNotReturn] + internal static void ThrowNotSupportedException(string message) + => throw new NotSupportedException(message); + + [DoesNotReturn] + internal static void ThrowTimeoutException() + => throw new TimeoutException(); } \ No newline at end of file diff --git a/src/Npgsql/TypeMapping/TypeMapper.cs b/src/Npgsql/TypeMapping/TypeMapper.cs index 376725c90a..bfbff61aef 100644 --- a/src/Npgsql/TypeMapping/TypeMapper.cs +++ b/src/Npgsql/TypeMapping/TypeMapper.cs @@ -26,7 +26,15 @@ sealed class TypeMapper NpgsqlDatabaseInfo? _databaseInfo; internal NpgsqlDatabaseInfo DatabaseInfo - => _databaseInfo ?? throw new InvalidOperationException("Internal error: this type mapper hasn't yet been bound to a database info object"); + { + get + { + var databaseInfo = _databaseInfo; + if (databaseInfo is null) + ThrowHelper.ThrowInvalidOperationException("Internal error: this type mapper hasn't yet been bound to a database info object"); + return databaseInfo; + } + } volatile TypeHandlerResolver[] _resolvers; internal NpgsqlTypeHandler UnrecognizedTypeHandler { get; } @@ -497,7 +505,7 @@ internal bool TryGetMapping(PostgresType pgType, [NotNullWhen(true)] out TypeMap internal (NpgsqlDbType? npgsqlDbType, PostgresType postgresType) GetTypeInfoByOid(uint oid) { if (!DatabaseInfo.ByOID.TryGetValue(oid, out var pgType)) - throw new InvalidOperationException($"Couldn't find PostgreSQL type with OID {oid}"); + ThrowHelper.ThrowInvalidOperationException($"Couldn't find PostgreSQL type with OID {oid}"); foreach (var resolver in _resolvers) if (resolver.GetMappingByDataTypeName(pgType.FullName) is { } mapping) diff --git a/src/Npgsql/Util/PGUtil.cs b/src/Npgsql/Util/PGUtil.cs index 67635cd8f4..2f68598157 100644 --- a/src/Npgsql/Util/PGUtil.cs +++ b/src/Npgsql/Util/PGUtil.cs @@ -140,7 +140,7 @@ internal static void ValidateBackendMessageCode(BackendMessageCode code) } static void ThrowUnknownMessageCode(BackendMessageCode code) - => throw new NpgsqlException("Unknown message code: " + code); + => ThrowHelper.ThrowNpgsqlException($"Unknown message code: {code}"); } [MethodImpl(MethodImplOptions.AggressiveInlining)] @@ -160,9 +160,7 @@ enum FormatCode : short static class EnumerableExtensions { internal static string Join(this IEnumerable values, string separator) - { - return string.Join(separator, values); - } + => string.Join(separator, values); } static class ExceptionExtensions @@ -178,7 +176,7 @@ public readonly struct NpgsqlTimeout { readonly DateTime _expiration; - internal static NpgsqlTimeout Infinite = new(TimeSpan.Zero); + internal static readonly NpgsqlTimeout Infinite = new(TimeSpan.Zero); internal NpgsqlTimeout(TimeSpan expiration) => _expiration = expiration > TimeSpan.Zero @@ -190,7 +188,7 @@ internal NpgsqlTimeout(TimeSpan expiration) internal void Check() { if (HasExpired) - throw new TimeoutException(); + ThrowHelper.ThrowTimeoutException(); } internal void CheckAndApply(NpgsqlConnector connector) From d1990ce855c5977890ec43009b503f2a8afbc74f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 14 Jan 2023 00:26:54 +0100 Subject: [PATCH 038/761] Bump BenchmarkDotNet from 0.13.3 to 0.13.4 (#4876) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index cacc5111d1..0c432341f3 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -38,7 +38,7 @@ - + From cf04d6f137cd36ba9a93a417407a5413345d1435 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 14 Jan 2023 00:27:16 +0100 Subject: [PATCH 039/761] Bump BenchmarkDotNet.Diagnostics.Windows from 0.13.3 to 0.13.4 (#4877) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 0c432341f3..b98501df1b 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -40,6 +40,6 @@ - + From d226eae82086dcbbbeef66dba2e6cc853ea8e232 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Sat, 14 Jan 2023 18:16:38 +0300 Subject: [PATCH 040/761] Reenable rich code navigation (#4879) --- .github/workflows/rich-code-nav.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/rich-code-nav.yml b/.github/workflows/rich-code-nav.yml index 7b1c7588ae..118c2e3e28 100644 --- a/.github/workflows/rich-code-nav.yml +++ b/.github/workflows/rich-code-nav.yml @@ -14,7 +14,6 @@ env: jobs: build: - if: ${{ false }} # disable as it's failing, see https://github.com/microsoft/RichCodeNavIndexer/issues/128 runs-on: windows-latest steps: From 32c48b5e986c4282fd22c80213d045e8d16f2a6a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 14 Jan 2023 15:34:42 +0000 Subject: [PATCH 041/761] Bump Microsoft.Data.SqlClient from 5.0.0 to 5.0.1 (#4707) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index b98501df1b..ae61598b3b 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -39,7 +39,7 @@ - + From 7a6f915a9fe5a78d55675c43358a1d24af4e36f5 Mon Sep 17 00:00:00 2001 From: Brar Piening Date: Sun, 15 Jan 2023 20:33:41 +0100 Subject: [PATCH 042/761] Enable mapping ImmutableDictionary to hstore for .NET Standard (#4880) The mapping ImmutableDictionary <=> hstore is disabled in .NET Standard builds for no reason. --- .../Internal/TypeHandlers/HstoreHandler.cs | 41 ++++++------------- .../TypeMapping/BuiltInTypeHandlerResolver.cs | 8 +--- 2 files changed, 14 insertions(+), 35 deletions(-) diff --git a/src/Npgsql/Internal/TypeHandlers/HstoreHandler.cs b/src/Npgsql/Internal/TypeHandlers/HstoreHandler.cs index 57c10a2fbc..4754d89b14 100644 --- a/src/Npgsql/Internal/TypeHandlers/HstoreHandler.cs +++ b/src/Npgsql/Internal/TypeHandlers/HstoreHandler.cs @@ -1,5 +1,6 @@ using System; using System.Collections.Generic; +using System.Collections.Immutable; using System.Diagnostics; using System.Threading; using System.Threading.Tasks; @@ -7,10 +8,6 @@ using Npgsql.Internal.TypeHandling; using Npgsql.PostgresTypes; -#if NETCOREAPP1_0_OR_GREATER -using System.Collections.Immutable; -#endif - namespace Npgsql.Internal.TypeHandlers; /// @@ -26,10 +23,8 @@ namespace Npgsql.Internal.TypeHandlers; /// public class HstoreHandler : NpgsqlTypeHandler>, - INpgsqlTypeHandler> -#if NETCOREAPP1_0_OR_GREATER - , INpgsqlTypeHandler> -#endif + INpgsqlTypeHandler>, + INpgsqlTypeHandler> { /// /// The text handler to which we delegate encoding/decoding of the actual strings @@ -67,6 +62,11 @@ public int ValidateAndGetLength(IDictionary value, ref NpgsqlLe return lengthCache.Lengths[pos] = totalLen; } + /// + public int ValidateAndGetLength( + ImmutableDictionary value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) + => ValidateAndGetLength((IDictionary)value, ref lengthCache, parameter); + /// public override int ValidateAndGetLength(Dictionary value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) => ValidateAndGetLength(value, ref lengthCache, parameter); @@ -75,9 +75,7 @@ public override int ValidateAndGetLength(Dictionary value, ref public override int ValidateObjectAndGetLength(object? value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) => value switch { -#if NETCOREAPP1_0_OR_GREATER ImmutableDictionary converted => ((INpgsqlTypeHandler>)this).ValidateAndGetLength(converted, ref lengthCache, parameter), -#endif Dictionary converted => ((INpgsqlTypeHandler>)this).ValidateAndGetLength(converted, ref lengthCache, parameter), IDictionary converted => ((INpgsqlTypeHandler>)this).ValidateAndGetLength(converted, ref lengthCache, parameter), @@ -96,9 +94,7 @@ public override Task WriteObjectWithLength( CancellationToken cancellationToken = default) => value switch { -#if NETCOREAPP1_0_OR_GREATER ImmutableDictionary converted => WriteWithLength(converted, buf, lengthCache, parameter, async, cancellationToken), -#endif Dictionary converted => WriteWithLength(converted, buf, lengthCache, parameter, async, cancellationToken), IDictionary converted => WriteWithLength(converted, buf, lengthCache, parameter, async, cancellationToken), @@ -123,6 +119,11 @@ public async Task Write(IDictionary value, NpgsqlWriteBuffer bu } } + /// + public Task Write(ImmutableDictionary value, + NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) + => Write((IDictionary)value, buf, lengthCache, parameter, async, cancellationToken); + /// public override Task Write(Dictionary value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) => Write(value, buf, lengthCache, parameter, async, cancellationToken); @@ -164,21 +165,6 @@ async ValueTask ReadInto(T dictionary, int numElements, NpgsqlReadBuffer b NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) => new(Read(buf, len, async, fieldDescription).Result); - #endregion - -#if NETCOREAPP1_0_OR_GREATER - #region ImmutableDictionary - - /// - public int ValidateAndGetLength( - ImmutableDictionary value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLength((IDictionary)value, ref lengthCache, parameter); - - /// - public Task Write(ImmutableDictionary value, - NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => Write((IDictionary)value, buf, lengthCache, parameter, async, cancellationToken); - async ValueTask> INpgsqlTypeHandler>.Read( NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) { @@ -189,5 +175,4 @@ public Task Write(ImmutableDictionary value, } #endregion -#endif } diff --git a/src/Npgsql/TypeMapping/BuiltInTypeHandlerResolver.cs b/src/Npgsql/TypeMapping/BuiltInTypeHandlerResolver.cs index 606c7b446a..44b6dd809c 100644 --- a/src/Npgsql/TypeMapping/BuiltInTypeHandlerResolver.cs +++ b/src/Npgsql/TypeMapping/BuiltInTypeHandlerResolver.cs @@ -146,11 +146,7 @@ sealed class BuiltInTypeHandlerResolver : TypeHandlerResolver { "bit varying", new(NpgsqlDbType.Varbit, "bit varying", typeof(BitArray), typeof(BitVector32)) }, { "varbit", new(NpgsqlDbType.Varbit, "bit varying", typeof(BitArray), typeof(BitVector32)) }, { "bit", new(NpgsqlDbType.Bit, "bit") }, - { "hstore", new(NpgsqlDbType.Hstore, "hstore", typeof(Dictionary), typeof(IDictionary) -#if !NETSTANDARD2_0 && !NETSTANDARD2_1 - , typeof(ImmutableDictionary) -#endif - ) }, + { "hstore", new(NpgsqlDbType.Hstore, "hstore", typeof(Dictionary), typeof(IDictionary), typeof(ImmutableDictionary)) }, // Internal types { "int2vector", new(NpgsqlDbType.Int2Vector, "int2vector") }, @@ -446,9 +442,7 @@ static BuiltInTypeHandlerResolver() { typeof(BitArray), "bit varying" }, { typeof(BitVector32), "bit varying" }, { typeof(Dictionary), "hstore" }, -#if !NETSTANDARD2_0 && !NETSTANDARD2_1 { typeof(ImmutableDictionary), "hstore" }, -#endif // Internal types { typeof(NpgsqlLogSequenceNumber), "pg_lsn" }, From 282814e41243e3b7135f4b4902673c7070afdd77 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Tue, 17 Jan 2023 18:47:01 +0300 Subject: [PATCH 043/761] Optimize inlining (part 2) (#4881) Contributes to #2237 --- src/Npgsql/Internal/NpgsqlConnector.cs | 78 ++++++------- src/Npgsql/NpgsqlCommand.cs | 4 +- src/Npgsql/NpgsqlConnection.cs | 4 +- src/Npgsql/NpgsqlParameter.cs | 24 ++-- src/Npgsql/NpgsqlParameterCollection.cs | 13 ++- src/Npgsql/SqlQueryParser.cs | 7 +- src/Npgsql/ThrowHelper.cs | 21 ++++ src/Npgsql/TypeMapping/TypeMapper.cs | 140 +++++++++++++----------- 8 files changed, 169 insertions(+), 122 deletions(-) diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index 76783e41a8..1eabf630c4 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -2272,42 +2272,12 @@ internal UserAction StartUserAction( CancellationToken cancellationToken = default, bool attemptPgCancellation = true) { - // If keepalive is enabled, we must protect state transitions with a SemaphoreSlim - // (which itself must be protected by a lock, since its dispose isn't thread-safe). + // If keepalive is enabled, we must protect state transitions with a lock. // This will make the keepalive abort safely if a user query is in progress, and make // the user query wait if a keepalive is in progress. - - // If keepalive isn't enabled, we don't use the semaphore and rely only on the connector's + // If keepalive isn't enabled, we don't use the lock and rely only on the connector's // state (updated via Interlocked.Exchange) to detect concurrent use, on a best-effort basis. - if (!_isKeepAliveEnabled) - return DoStartUserAction(newState, command); - - lock (this) - { - if (!IsConnected) - { - throw IsBroken - ? new NpgsqlException("The connection was previously broken because of the following exception", _breakReason) - : new NpgsqlException("The connection is closed"); - } - - // Disable keepalive, it will be restarted at the end of the user action - _keepAliveTimer!.Change(Timeout.Infinite, Timeout.Infinite); - - try - { - // Check that the connector is ready. - return DoStartUserAction(newState, command); - } - catch (Exception ex) when (ex is not NpgsqlOperationInProgressException) - { - // We failed, but there is no current operation. - // As such, we re-enable the keepalive. - var keepAlive = Settings.KeepAlive * 1000; - _keepAliveTimer!.Change(keepAlive, keepAlive); - throw; - } - } + return _isKeepAliveEnabled ? DoStartUserActionWithKeepAlive(newState, command) : DoStartUserAction(newState, command); UserAction DoStartUserAction(ConnectorState newState, NpgsqlCommand? command) { @@ -2326,11 +2296,14 @@ UserAction DoStartUserAction(ConnectorState newState, NpgsqlCommand? command) case ConnectorState.Connecting: case ConnectorState.Copy: var currentCommand = _currentCommand; - throw currentCommand == null - ? new NpgsqlOperationInProgressException(State) - : new NpgsqlOperationInProgressException(currentCommand); + if (currentCommand is null) + ThrowHelper.ThrowNpgsqlOperationInProgressException(State); + else + ThrowHelper.ThrowNpgsqlOperationInProgressException(currentCommand); + break; default: - throw new ArgumentOutOfRangeException(nameof(State), State, $"Invalid connector state: {State}"); + ThrowHelper.ThrowArgumentOutOfRangeException(nameof(State), "Invalid connector state: {0}", State); + break; } Debug.Assert(IsReady); @@ -2350,6 +2323,37 @@ UserAction DoStartUserAction(ConnectorState newState, NpgsqlCommand? command) return new UserAction(this); } + + UserAction DoStartUserActionWithKeepAlive(ConnectorState newState, NpgsqlCommand? command) + { + lock (this) + { + if (!IsConnected) + { + if (IsBroken) + ThrowHelper.ThrowNpgsqlException("The connection was previously broken because of the following exception", _breakReason); + else + ThrowHelper.ThrowNpgsqlException("The connection is closed"); + } + + // Disable keepalive, it will be restarted at the end of the user action + _keepAliveTimer!.Change(Timeout.Infinite, Timeout.Infinite); + + try + { + // Check that the connector is ready. + return DoStartUserAction(newState, command); + } + catch (Exception ex) when (ex is not NpgsqlOperationInProgressException) + { + // We failed, but there is no current operation. + // As such, we re-enable the keepalive. + var keepAlive = Settings.KeepAlive * 1000; + _keepAliveTimer!.Change(keepAlive, keepAlive); + throw; + } + } + } } internal void EndUserAction() diff --git a/src/Npgsql/NpgsqlCommand.cs b/src/Npgsql/NpgsqlCommand.cs index cf257b7937..a35a189965 100644 --- a/src/Npgsql/NpgsqlCommand.cs +++ b/src/Npgsql/NpgsqlCommand.cs @@ -891,7 +891,7 @@ internal void ProcessRawQuery(SqlQueryParser? parser, bool standardConformingStr break; default: - ThrowHelper.ThrowArgumentOutOfRangeException(nameof(PlaceholderType), $"Unknown {nameof(PlaceholderType)} value: {Parameters.PlaceholderType}"); + ThrowHelper.ThrowArgumentOutOfRangeException(nameof(PlaceholderType), $"Unknown {nameof(PlaceholderType)} value: {{0}}", Parameters.PlaceholderType); break; } @@ -960,7 +960,7 @@ internal void ProcessRawQuery(SqlQueryParser? parser, bool standardConformingStr break; default: - ThrowHelper.ThrowInvalidOperationException($"Internal Npgsql bug: unexpected value {CommandType} of enum {nameof(CommandType)}. Please file a bug."); + ThrowHelper.ThrowArgumentOutOfRangeException(nameof(CommandType), $"Internal Npgsql bug: unexpected value {{0}} of enum {nameof(CommandType)}. Please file a bug.", commandType); break; } diff --git a/src/Npgsql/NpgsqlConnection.cs b/src/Npgsql/NpgsqlConnection.cs index f37df7b96b..66fbc759eb 100644 --- a/src/Npgsql/NpgsqlConnection.cs +++ b/src/Npgsql/NpgsqlConnection.cs @@ -512,7 +512,7 @@ public ConnectionState FullState ThrowHelper.ThrowInvalidOperationException("Internal Npgsql bug: connection is in state Open but connector is in state Closed"); return ConnectionState.Broken; default: - ThrowHelper.ThrowInvalidOperationException($"Internal Npgsql bug: unexpected value {Connector.State} of enum {nameof(ConnectorState)}. Please file a bug."); + ThrowHelper.ThrowInvalidOperationException($"Internal Npgsql bug: unexpected value {{0}} of enum {nameof(ConnectorState)}. Please file a bug.", Connector.State); return ConnectionState.Broken; } } @@ -634,7 +634,7 @@ public override ConnectionState State async ValueTask BeginTransaction(IsolationLevel level, bool async, CancellationToken cancellationToken) { if (level == IsolationLevel.Chaos) - ThrowHelper.ThrowNotSupportedException($"Unsupported IsolationLevel: {IsolationLevel.Chaos}"); + ThrowHelper.ThrowNotSupportedException($"Unsupported IsolationLevel: {nameof(IsolationLevel.Chaos)}"); CheckReady(); if (Connector is { InTransaction: true }) diff --git a/src/Npgsql/NpgsqlParameter.cs b/src/Npgsql/NpgsqlParameter.cs index 6da98451d3..c8b45c7d26 100644 --- a/src/Npgsql/NpgsqlParameter.cs +++ b/src/Npgsql/NpgsqlParameter.cs @@ -510,13 +510,21 @@ internal virtual void ResolveHandler(TypeMapper typeMapper) if (Handler is not null) return; - if (_npgsqlDbType.HasValue) - Handler = typeMapper.ResolveByNpgsqlDbType(_npgsqlDbType.Value); - else if (_dataTypeName is not null) - Handler = typeMapper.ResolveByDataTypeName(_dataTypeName); - else if (_value is not null) - Handler = typeMapper.ResolveByValue(_value); - else + Resolve(typeMapper); + + void Resolve(TypeMapper typeMapper) + { + if (_npgsqlDbType.HasValue) + Handler = typeMapper.ResolveByNpgsqlDbType(_npgsqlDbType.Value); + else if (_dataTypeName is not null) + Handler = typeMapper.ResolveByDataTypeName(_dataTypeName); + else if (_value is not null) + Handler = typeMapper.ResolveByValue(_value); + else + ThrowInvalidOperationException(); + } + + void ThrowInvalidOperationException() { var parameterName = !string.IsNullOrEmpty(ParameterName) ? ParameterName : $"${Collection?.IndexOf(this) + 1}"; ThrowHelper.ThrowInvalidOperationException($"Parameter '{parameterName}' must have either its NpgsqlDbType or its DataTypeName or its Value set"); @@ -534,7 +542,7 @@ internal virtual int ValidateAndGetLength() if (_value is DBNull) return 0; if (_value == null) - throw new InvalidCastException($"Parameter {ParameterName} must be set"); + ThrowHelper.ThrowInvalidCastException("Parameter {0} must be set", ParameterName); var lengthCache = LengthCache; var len = Handler!.ValidateObjectAndGetLength(_value, ref lengthCache, this); diff --git a/src/Npgsql/NpgsqlParameterCollection.cs b/src/Npgsql/NpgsqlParameterCollection.cs index ed294f4b2d..03adb65794 100644 --- a/src/Npgsql/NpgsqlParameterCollection.cs +++ b/src/Npgsql/NpgsqlParameterCollection.cs @@ -705,8 +705,8 @@ internal void ProcessParameters(TypeMapper typeMapper, bool validateValues, Comm case PlaceholderType.Mixed: break; default: - throw new ArgumentOutOfRangeException( - nameof(PlaceholderType), $"Unknown {nameof(PlaceholderType)} value: {PlaceholderType}"); + ThrowHelper.ThrowArgumentOutOfRangeException(nameof(PlaceholderType), $"Unknown {nameof(PlaceholderType)} value: {{0}}", PlaceholderType); + break; } switch (p.Direction) @@ -716,13 +716,13 @@ internal void ProcessParameters(TypeMapper typeMapper, bool validateValues, Comm case ParameterDirection.InputOutput: if (PlaceholderType == PlaceholderType.Positional && commandType != CommandType.StoredProcedure) - throw new NotSupportedException("Output parameters are not supported in positional mode (unless used with CommandType.StoredProcedure)"); + ThrowHelper.ThrowNotSupportedException("Output parameters are not supported in positional mode (unless used with CommandType.StoredProcedure)"); HasOutputParameters = true; break; case ParameterDirection.Output: if (PlaceholderType == PlaceholderType.Positional && commandType != CommandType.StoredProcedure) - throw new NotSupportedException("Output parameters are not supported in positional mode (unless used with CommandType.StoredProcedure)"); + ThrowHelper.ThrowNotSupportedException("Output parameters are not supported in positional mode (unless used with CommandType.StoredProcedure)"); HasOutputParameters = true; continue; @@ -731,8 +731,9 @@ internal void ProcessParameters(TypeMapper typeMapper, bool validateValues, Comm continue; default: - throw new ArgumentOutOfRangeException(nameof(ParameterDirection), - $"Unhandled {nameof(ParameterDirection)} value: {p.Direction}"); + ThrowHelper.ThrowArgumentOutOfRangeException(nameof(ParameterDirection), + $"Unhandled {nameof(ParameterDirection)} value: {{0}}", p.Direction); + break; } p.Bind(typeMapper); diff --git a/src/Npgsql/SqlQueryParser.cs b/src/Npgsql/SqlQueryParser.cs index 52b88fe036..2f554223ea 100644 --- a/src/Npgsql/SqlQueryParser.cs +++ b/src/Npgsql/SqlQueryParser.cs @@ -207,7 +207,7 @@ void ParseRawQuery( } if (!parameter.IsInputDirection) - throw new Exception($"Parameter '{paramName}' referenced in SQL but is an out-only parameter"); + ThrowHelper.ThrowInvalidOperationException("Parameter '{0}' referenced in SQL but is an out-only parameter", paramName); batchCommand.PositionalParameters.Add(parameter); index = _paramIndexMap[paramName] = batchCommand.PositionalParameters.Count; @@ -466,9 +466,8 @@ void ParseRawQuery( if (command is null) { - throw new NotSupportedException( - $"Specifying multiple SQL statements in a single {nameof(NpgsqlBatchCommand)} isn't supported, " + - "please remove all semicolons."); + ThrowHelper.ThrowNotSupportedException($"Specifying multiple SQL statements in a single {nameof(NpgsqlBatchCommand)} isn't supported, " + + "please remove all semicolons."); } statementIndex++; diff --git a/src/Npgsql/ThrowHelper.cs b/src/Npgsql/ThrowHelper.cs index 760fde12d7..14478be7be 100644 --- a/src/Npgsql/ThrowHelper.cs +++ b/src/Npgsql/ThrowHelper.cs @@ -2,6 +2,7 @@ using System; using System.Diagnostics.CodeAnalysis; using System.Reflection; +using Npgsql.Internal; namespace Npgsql; @@ -15,6 +16,10 @@ internal static void ThrowArgumentOutOfRangeException() internal static void ThrowArgumentOutOfRangeException(string paramName, string message) => throw new ArgumentOutOfRangeException(paramName, message); + [DoesNotReturn] + internal static void ThrowArgumentOutOfRangeException(string paramName, string message, object argument) + => throw new ArgumentOutOfRangeException(paramName, string.Format(message, argument)); + [DoesNotReturn] internal static void ThrowInvalidOperationException() => throw new InvalidOperationException(); @@ -39,6 +44,10 @@ internal static void ThrowObjectDisposedException(string objectName, string mess internal static void ThrowObjectDisposedException(string objectName, Exception? innerException) => throw new ObjectDisposedException(objectName, innerException); + [DoesNotReturn] + internal static void ThrowInvalidCastException(string message, object argument) + => throw new InvalidCastException(string.Format(message, argument)); + [DoesNotReturn] internal static void ThrowInvalidCastException_NoValue(FieldDescription field) => throw new InvalidCastException($"Column '{field.Name}' is null."); @@ -59,6 +68,18 @@ internal static void ThrowInvalidOperationException_BinaryImportParametersMismat internal static void ThrowNpgsqlException(string message) => throw new NpgsqlException(message); + [DoesNotReturn] + internal static void ThrowNpgsqlException(string message, Exception? innerException) + => throw new NpgsqlException(message, innerException); + + [DoesNotReturn] + internal static void ThrowNpgsqlOperationInProgressException(NpgsqlCommand command) + => throw new NpgsqlOperationInProgressException(command); + + [DoesNotReturn] + internal static void ThrowNpgsqlOperationInProgressException(ConnectorState state) + => throw new NpgsqlOperationInProgressException(state); + [DoesNotReturn] internal static void ThrowArgumentException(string message) => throw new ArgumentException(message); diff --git a/src/Npgsql/TypeMapping/TypeMapper.cs b/src/Npgsql/TypeMapping/TypeMapper.cs index bfbff61aef..ec46502e4d 100644 --- a/src/Npgsql/TypeMapping/TypeMapper.cs +++ b/src/Npgsql/TypeMapping/TypeMapper.cs @@ -136,61 +136,66 @@ internal NpgsqlTypeHandler ResolveByNpgsqlDbType(NpgsqlDbType npgsqlDbType) if (_handlersByNpgsqlDbType.TryGetValue(npgsqlDbType, out var handler)) return handler; - lock (_writeLock) + return ResolveLong(); + + NpgsqlTypeHandler ResolveLong() { - // First, try to resolve as a base type; translate the NpgsqlDbType to a PG data type name and look that up. - if (GlobalTypeMapper.NpgsqlDbTypeToDataTypeName(npgsqlDbType) is { } dataTypeName) + lock (_writeLock) { - foreach (var resolver in _resolvers) + // First, try to resolve as a base type; translate the NpgsqlDbType to a PG data type name and look that up. + if (GlobalTypeMapper.NpgsqlDbTypeToDataTypeName(npgsqlDbType) is { } dataTypeName) { - try + foreach (var resolver in _resolvers) { - if ((handler = resolver.ResolveByDataTypeName(dataTypeName)) is not null) - return _handlersByNpgsqlDbType[npgsqlDbType] = handler; - } - catch (Exception e) - { - _commandLogger.LogError(e, - $"Type resolver {resolver.GetType().Name} threw exception while resolving NpgsqlDbType {npgsqlDbType}"); + try + { + if ((handler = resolver.ResolveByDataTypeName(dataTypeName)) is not null) + return _handlersByNpgsqlDbType[npgsqlDbType] = handler; + } + catch (Exception e) + { + _commandLogger.LogError(e, + $"Type resolver {resolver.GetType().Name} threw exception while resolving NpgsqlDbType {npgsqlDbType}"); + } } } - } - if (npgsqlDbType.HasFlag(NpgsqlDbType.Array)) - { - var elementHandler = ResolveByNpgsqlDbType(npgsqlDbType & ~NpgsqlDbType.Array); + if (npgsqlDbType.HasFlag(NpgsqlDbType.Array)) + { + var elementHandler = ResolveByNpgsqlDbType(npgsqlDbType & ~NpgsqlDbType.Array); - if (elementHandler.PostgresType.Array is not { } pgArrayType) - throw new ArgumentException( - $"No array type could be found in the database for element {elementHandler.PostgresType}"); + if (elementHandler.PostgresType.Array is not { } pgArrayType) + throw new ArgumentException( + $"No array type could be found in the database for element {elementHandler.PostgresType}"); - return _handlersByNpgsqlDbType[npgsqlDbType] = - elementHandler.CreateArrayHandler(pgArrayType, Connector.Settings.ArrayNullabilityMode); - } + return _handlersByNpgsqlDbType[npgsqlDbType] = + elementHandler.CreateArrayHandler(pgArrayType, Connector.Settings.ArrayNullabilityMode); + } - if (npgsqlDbType.HasFlag(NpgsqlDbType.Range)) - { - var subtypeHandler = ResolveByNpgsqlDbType(npgsqlDbType & ~NpgsqlDbType.Range); + if (npgsqlDbType.HasFlag(NpgsqlDbType.Range)) + { + var subtypeHandler = ResolveByNpgsqlDbType(npgsqlDbType & ~NpgsqlDbType.Range); - if (subtypeHandler.PostgresType.Range is not { } pgRangeType) - throw new ArgumentException( - $"No range type could be found in the database for subtype {subtypeHandler.PostgresType}"); + if (subtypeHandler.PostgresType.Range is not { } pgRangeType) + throw new ArgumentException( + $"No range type could be found in the database for subtype {subtypeHandler.PostgresType}"); - return _handlersByNpgsqlDbType[npgsqlDbType] = subtypeHandler.CreateRangeHandler(pgRangeType); - } + return _handlersByNpgsqlDbType[npgsqlDbType] = subtypeHandler.CreateRangeHandler(pgRangeType); + } - if (npgsqlDbType.HasFlag(NpgsqlDbType.Multirange)) - { - var subtypeHandler = ResolveByNpgsqlDbType(npgsqlDbType & ~NpgsqlDbType.Multirange); + if (npgsqlDbType.HasFlag(NpgsqlDbType.Multirange)) + { + var subtypeHandler = ResolveByNpgsqlDbType(npgsqlDbType & ~NpgsqlDbType.Multirange); - if (subtypeHandler.PostgresType.Range?.Multirange is not { } pgMultirangeType) - throw new ArgumentException(string.Format(NpgsqlStrings.NoMultirangeTypeFound, subtypeHandler.PostgresType)); + if (subtypeHandler.PostgresType.Range?.Multirange is not { } pgMultirangeType) + throw new ArgumentException(string.Format(NpgsqlStrings.NoMultirangeTypeFound, subtypeHandler.PostgresType)); - return _handlersByNpgsqlDbType[npgsqlDbType] = subtypeHandler.CreateMultirangeHandler(pgMultirangeType); - } + return _handlersByNpgsqlDbType[npgsqlDbType] = subtypeHandler.CreateMultirangeHandler(pgMultirangeType); + } - throw new NpgsqlException($"The NpgsqlDbType '{npgsqlDbType}' isn't present in your database. " + - "You may need to install an extension or upgrade to a newer version."); + throw new NpgsqlException($"The NpgsqlDbType '{npgsqlDbType}' isn't present in your database. " + + "You may need to install an extension or upgrade to a newer version."); + } } } @@ -202,22 +207,27 @@ internal NpgsqlTypeHandler ResolveByDataTypeName(string typeName) if (_handlersByDataTypeName.TryGetValue(typeName, out var handler)) return handler; - lock (_writeLock) + return ResolveLong(); + + NpgsqlTypeHandler? ResolveLong() { - foreach (var resolver in _resolvers) + lock (_writeLock) { - try - { - if ((handler = resolver.ResolveByDataTypeName(typeName)) is not null) - return _handlersByDataTypeName[typeName] = handler; - } - catch (Exception e) + foreach (var resolver in _resolvers) { - _commandLogger.LogError(e, $"Type resolver {resolver.GetType().Name} threw exception while resolving data type name {typeName}"); + try + { + if ((handler = resolver.ResolveByDataTypeName(typeName)) is not null) + return _handlersByDataTypeName[typeName] = handler; + } + catch (Exception e) + { + _commandLogger.LogError(e, $"Type resolver {resolver.GetType().Name} threw exception while resolving data type name {typeName}"); + } } - } - return null; + return null; + } } } @@ -310,7 +320,6 @@ internal NpgsqlTypeHandler ResolveByValue(T value) return ResolveByValue((object)value); } - [MethodImpl(MethodImplOptions.AggressiveInlining)] internal NpgsqlTypeHandler ResolveByValue(object value) { // We resolve as follows: @@ -323,22 +332,27 @@ internal NpgsqlTypeHandler ResolveByValue(object value) if (_handlersByClrType.TryGetValue(type, out var handler)) return handler; - foreach (var resolver in _resolvers) + return ResolveLong(); + + NpgsqlTypeHandler ResolveLong() { - try - { - if ((handler = resolver.ResolveValueDependentValue(value)) is not null) - return handler; - } - catch (Exception e) + foreach (var resolver in _resolvers) { - _commandLogger.LogError(e, $"Type resolver {resolver.GetType().Name} threw exception while resolving value with type {type}"); + try + { + if ((handler = resolver.ResolveValueDependentValue(value)) is not null) + return handler; + } + catch (Exception e) + { + _commandLogger.LogError(e, $"Type resolver {resolver.GetType().Name} threw exception while resolving value with type {type}"); + } } - } - // ResolveByClrType either throws, or resolves a handler and caches it in _handlersByClrType (where it would be found above the - // next time we resolve this type) - return ResolveByClrType(type); + // ResolveByClrType either throws, or resolves a handler and caches it in _handlersByClrType (where it would be found above the + // next time we resolve this type) + return ResolveByClrType(type); + } } // TODO: This is needed as a separate method only because of binary COPY, see #3957 From f6cfc75941b1f6858a24369a7506a49102a97f6f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 18 Jan 2023 23:11:31 +0100 Subject: [PATCH 044/761] Bump Microsoft.CodeAnalysis.PublicApiAnalyzers from 3.3.3 to 3.3.4 (#4887) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index ae61598b3b..6d2dc06fa1 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -3,7 +3,7 @@ - + From 203adb3428c4b7728d24c880c8483b2ca2e28a3a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 19 Jan 2023 12:40:17 +0100 Subject: [PATCH 045/761] Bump Microsoft.CodeAnalysis.Analyzers from 3.3.3 to 3.3.4 (#4886) --- Directory.Packages.props | 2 +- src/Npgsql.SourceGenerators/Npgsql.SourceGenerators.csproj | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 6d2dc06fa1..00202626ac 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -14,7 +14,7 @@ - + diff --git a/src/Npgsql.SourceGenerators/Npgsql.SourceGenerators.csproj b/src/Npgsql.SourceGenerators/Npgsql.SourceGenerators.csproj index e10f1cf3e2..434936cefe 100644 --- a/src/Npgsql.SourceGenerators/Npgsql.SourceGenerators.csproj +++ b/src/Npgsql.SourceGenerators/Npgsql.SourceGenerators.csproj @@ -3,7 +3,8 @@ netstandard2.0 1591 - true + true + false From e4309401c63c14076714a69ac2a0048ac984d1cf Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Thu, 19 Jan 2023 16:36:37 +0300 Subject: [PATCH 046/761] Remove closures introduced in 282814e (#4891) --- src/Npgsql/Internal/NpgsqlConnector.cs | 12 ++++++++---- .../Internal/TypeHandlers/TextHandler.cs | 12 ++++-------- src/Npgsql/TypeMapping/TypeMapper.cs | 18 +++++++++--------- 3 files changed, 21 insertions(+), 21 deletions(-) diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index 1eabf630c4..ddad186f7d 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -2277,9 +2277,12 @@ internal UserAction StartUserAction( // the user query wait if a keepalive is in progress. // If keepalive isn't enabled, we don't use the lock and rely only on the connector's // state (updated via Interlocked.Exchange) to detect concurrent use, on a best-effort basis. - return _isKeepAliveEnabled ? DoStartUserActionWithKeepAlive(newState, command) : DoStartUserAction(newState, command); + return _isKeepAliveEnabled + ? DoStartUserActionWithKeepAlive(newState, command, cancellationToken, attemptPgCancellation) + : DoStartUserAction(newState, command, cancellationToken, attemptPgCancellation); - UserAction DoStartUserAction(ConnectorState newState, NpgsqlCommand? command) + UserAction DoStartUserAction(ConnectorState newState, NpgsqlCommand? command, + CancellationToken cancellationToken, bool attemptPgCancellation) { switch (State) { @@ -2324,7 +2327,8 @@ UserAction DoStartUserAction(ConnectorState newState, NpgsqlCommand? command) return new UserAction(this); } - UserAction DoStartUserActionWithKeepAlive(ConnectorState newState, NpgsqlCommand? command) + UserAction DoStartUserActionWithKeepAlive(ConnectorState newState, NpgsqlCommand? command, + CancellationToken cancellationToken, bool attemptPgCancellation) { lock (this) { @@ -2342,7 +2346,7 @@ UserAction DoStartUserActionWithKeepAlive(ConnectorState newState, NpgsqlCommand try { // Check that the connector is ready. - return DoStartUserAction(newState, command); + return DoStartUserAction(newState, command, cancellationToken, attemptPgCancellation); } catch (Exception ex) when (ex is not NpgsqlOperationInProgressException) { diff --git a/src/Npgsql/Internal/TypeHandlers/TextHandler.cs b/src/Npgsql/Internal/TypeHandlers/TextHandler.cs index a85ccb6e1d..bd249a8730 100644 --- a/src/Npgsql/Internal/TypeHandlers/TextHandler.cs +++ b/src/Npgsql/Internal/TypeHandlers/TextHandler.cs @@ -50,8 +50,7 @@ static async ValueTask ReadLong(NpgsqlReadBuffer buf, int byteLen, bool if (byteLen <= buf.Size) { // The string's byte representation can fit in our read buffer, read it. - while (buf.ReadBytesLeft < byteLen) - await buf.ReadMore(async); + await buf.Ensure(byteLen, async); return buf.ReadString(byteLen); } @@ -84,8 +83,7 @@ async ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, in if (byteLen <= buf.Size) { // The string's byte representation can fit in our read buffer, read it. - while (buf.ReadBytesLeft < byteLen) - await buf.ReadMore(async); + await buf.Ensure(byteLen, async); return buf.ReadChars(byteLen); } @@ -111,8 +109,7 @@ async ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int le { // Make sure we have enough bytes in the buffer for a single character var maxBytes = Math.Min(buf.TextEncoding.GetMaxByteCount(1), len); - while (buf.ReadBytesLeft < maxBytes) - await buf.ReadMore(async); + await buf.Ensure(maxBytes, async); return ReadCharCore(); @@ -155,8 +152,7 @@ static async ValueTask ReadLong(NpgsqlReadBuffer buf, byte[] bytes, int if (byteLen <= buf.Size) { // The bytes can fit in our read buffer, read it. - while (buf.ReadBytesLeft < byteLen) - await buf.ReadMore(async); + await buf.Ensure(byteLen, async); buf.ReadBytes(bytes, 0, byteLen); return bytes; } diff --git a/src/Npgsql/TypeMapping/TypeMapper.cs b/src/Npgsql/TypeMapping/TypeMapper.cs index ec46502e4d..d56092e86e 100644 --- a/src/Npgsql/TypeMapping/TypeMapper.cs +++ b/src/Npgsql/TypeMapping/TypeMapper.cs @@ -136,9 +136,9 @@ internal NpgsqlTypeHandler ResolveByNpgsqlDbType(NpgsqlDbType npgsqlDbType) if (_handlersByNpgsqlDbType.TryGetValue(npgsqlDbType, out var handler)) return handler; - return ResolveLong(); + return ResolveLong(npgsqlDbType); - NpgsqlTypeHandler ResolveLong() + NpgsqlTypeHandler ResolveLong(NpgsqlDbType npgsqlDbType) { lock (_writeLock) { @@ -149,7 +149,7 @@ NpgsqlTypeHandler ResolveLong() { try { - if ((handler = resolver.ResolveByDataTypeName(dataTypeName)) is not null) + if (resolver.ResolveByDataTypeName(dataTypeName) is { } handler) return _handlersByNpgsqlDbType[npgsqlDbType] = handler; } catch (Exception e) @@ -207,9 +207,9 @@ internal NpgsqlTypeHandler ResolveByDataTypeName(string typeName) if (_handlersByDataTypeName.TryGetValue(typeName, out var handler)) return handler; - return ResolveLong(); + return ResolveLong(typeName); - NpgsqlTypeHandler? ResolveLong() + NpgsqlTypeHandler? ResolveLong(string typeName) { lock (_writeLock) { @@ -217,7 +217,7 @@ internal NpgsqlTypeHandler ResolveByDataTypeName(string typeName) { try { - if ((handler = resolver.ResolveByDataTypeName(typeName)) is not null) + if (resolver.ResolveByDataTypeName(typeName) is { } handler) return _handlersByDataTypeName[typeName] = handler; } catch (Exception e) @@ -332,15 +332,15 @@ internal NpgsqlTypeHandler ResolveByValue(object value) if (_handlersByClrType.TryGetValue(type, out var handler)) return handler; - return ResolveLong(); + return ResolveLong(value, type); - NpgsqlTypeHandler ResolveLong() + NpgsqlTypeHandler ResolveLong(object value, Type type) { foreach (var resolver in _resolvers) { try { - if ((handler = resolver.ResolveValueDependentValue(value)) is not null) + if (resolver.ResolveValueDependentValue(value) is { } handler) return handler; } catch (Exception e) From ac3b739b9805f6fddf97c19ef36d7b04dee3cd43 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Thu, 19 Jan 2023 22:53:45 +0300 Subject: [PATCH 047/761] Fix generic explosion from MultirangeHandler with native AOT (#4892) Contributes to #4799 --- .../TypeHandlers/MultirangeHandler.cs | 28 ++++++++++++++++--- src/Npgsql/NpgsqlConnectionStringBuilder.cs | 1 + 2 files changed, 25 insertions(+), 4 deletions(-) diff --git a/src/Npgsql/Internal/TypeHandlers/MultirangeHandler.cs b/src/Npgsql/Internal/TypeHandlers/MultirangeHandler.cs index c6b68096ab..96abf09317 100644 --- a/src/Npgsql/Internal/TypeHandlers/MultirangeHandler.cs +++ b/src/Npgsql/Internal/TypeHandlers/MultirangeHandler.cs @@ -9,7 +9,9 @@ namespace Npgsql.Internal.TypeHandlers; -public partial class MultirangeHandler : NpgsqlTypeHandler[]>, +// NOTE: This cannot inherit from NpgsqlTypeHandler[]>, since that triggers infinite generic recursion in Native AOT +public partial class MultirangeHandler : NpgsqlTypeHandler, + INpgsqlTypeHandler[]>, INpgsqlTypeHandler>> { /// @@ -22,7 +24,7 @@ public MultirangeHandler(PostgresMultirangeType pgMultirangeType, RangeHandler RangeHandler = rangeHandler; - public override ValueTask[]> Read( + public ValueTask[]> Read( NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) => ReadMultirangeArray(buf, len, async, fieldDescription); @@ -64,7 +66,10 @@ protected async ValueTask>> ReadMultirangeList[] value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) + public override async ValueTask ReadAsObject(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) + => await Read(buf, len, async, fieldDescription); + + public int ValidateAndGetLength(NpgsqlRange[] value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) => ValidateAndGetLengthMultirange(value, ref lengthCache, parameter); public int ValidateAndGetLength(List> value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) @@ -89,7 +94,7 @@ protected int ValidateAndGetLengthMultirange( return sum; } - public override Task Write( + public Task Write( NpgsqlRange[] value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, @@ -123,6 +128,21 @@ public async Task WriteMultirange( for (var i = 0; i < value.Count; i++) await RangeHandler.WriteWithLength(value[i], buf, lengthCache, parameter: null, async, cancellationToken); } + + public override Type GetFieldType(FieldDescription? fieldDescription = null) => typeof(NpgsqlRange[]); + public override Type GetProviderSpecificFieldType(FieldDescription? fieldDescription = null) => typeof(NpgsqlRange[]); + + /// + public override NpgsqlTypeHandler CreateArrayHandler(PostgresArrayType pgArrayType, ArrayNullabilityMode arrayNullabilityMode) + => throw new NotSupportedException(); + + /// + public override NpgsqlTypeHandler CreateRangeHandler(PostgresType pgRangeType) + => throw new NotSupportedException(); + + /// + public override NpgsqlTypeHandler CreateMultirangeHandler(PostgresMultirangeType pgMultirangeType) + => throw new NotSupportedException(); } public class MultirangeHandler : MultirangeHandler, diff --git a/src/Npgsql/NpgsqlConnectionStringBuilder.cs b/src/Npgsql/NpgsqlConnectionStringBuilder.cs index 6844db06cf..b655c4a846 100644 --- a/src/Npgsql/NpgsqlConnectionStringBuilder.cs +++ b/src/Npgsql/NpgsqlConnectionStringBuilder.cs @@ -1705,6 +1705,7 @@ public void CopyTo(KeyValuePair[] array, int arrayIndex) #region ICustomTypeDescriptor /// + [RequiresUnreferencedCode("PropertyDescriptor's PropertyType cannot be statically discovered.")] protected override void GetProperties(Hashtable propertyDescriptors) { // Tweak which properties are exposed via TypeDescriptor. This affects the VS DDEX From e87cff3e5058ac1bb470f55cd9a1dc760426621c Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Sat, 21 Jan 2023 18:58:06 +0300 Subject: [PATCH 048/761] Use NpgsqlDataSource in all tests (#4882) Closes #4572 --- Npgsql.sln.DotSettings | 1 + src/Npgsql/NpgsqlConnection.cs | 2 +- .../LegacyNodaTimeTests.cs | 7 +- .../NodaTimeInfinityTests.cs | 6 +- test/Npgsql.NodaTime.Tests/NodaTimeTests.cs | 6 +- test/Npgsql.Tests/AuthenticationTests.cs | 137 +++--- test/Npgsql.Tests/AutoPrepareTests.cs | 261 +++++----- test/Npgsql.Tests/BatchTests.cs | 12 +- test/Npgsql.Tests/BugTests.cs | 104 ++-- test/Npgsql.Tests/CommandBuilderTests.cs | 8 +- test/Npgsql.Tests/CommandTests.cs | 146 +++--- test/Npgsql.Tests/ConnectionTests.cs | 356 +++++++------- test/Npgsql.Tests/CopyTests.cs | 4 +- test/Npgsql.Tests/DataSourceTests.cs | 2 +- .../DistributedTransactionTests.cs | 70 ++- test/Npgsql.Tests/ExceptionTests.cs | 24 +- test/Npgsql.Tests/MultipleHostsTests.cs | 167 +++---- test/Npgsql.Tests/NotificationTests.cs | 38 +- test/Npgsql.Tests/PoolManagerTests.cs | 9 +- test/Npgsql.Tests/PoolTests.cs | 449 +++++++----------- test/Npgsql.Tests/PrepareTests.cs | 68 +-- test/Npgsql.Tests/ReaderTests.cs | 128 ++--- .../Replication/CommonReplicationTests.cs | 18 +- test/Npgsql.Tests/SecurityTests.cs | 180 +++---- test/Npgsql.Tests/Support/TestBase.cs | 107 +++-- test/Npgsql.Tests/SystemTransactionTests.cs | 236 ++++----- .../TaskTimeoutAndCancellationTest.cs | 6 + test/Npgsql.Tests/TransactionTests.cs | 62 ++- test/Npgsql.Tests/Types/ArrayTests.cs | 207 ++++---- test/Npgsql.Tests/Types/CompositeTests.cs | 2 + test/Npgsql.Tests/Types/DateTimeTests.cs | 6 +- test/Npgsql.Tests/Types/DomainTests.cs | 9 +- test/Npgsql.Tests/Types/EnumTests.cs | 4 +- .../Npgsql.Tests/Types/LegacyDateTimeTests.cs | 6 +- test/Npgsql.Tests/Types/RangeTests.cs | 16 +- 35 files changed, 1344 insertions(+), 1520 deletions(-) diff --git a/Npgsql.sln.DotSettings b/Npgsql.sln.DotSettings index 69c98554d3..51e889192d 100644 --- a/Npgsql.sln.DotSettings +++ b/Npgsql.sln.DotSettings @@ -121,6 +121,7 @@ True True True + True True True True diff --git a/src/Npgsql/NpgsqlConnection.cs b/src/Npgsql/NpgsqlConnection.cs index 66fbc759eb..5e009a79c5 100644 --- a/src/Npgsql/NpgsqlConnection.cs +++ b/src/Npgsql/NpgsqlConnection.cs @@ -434,7 +434,7 @@ public override string ConnectionString /// Gets the time (in seconds) to wait while trying to execute a command /// before terminating the attempt and generating an error. /// - /// The time (in seconds) to wait for a command to complete. The default value is 20 seconds. + /// The time (in seconds) to wait for a command to complete. The default value is 30 seconds. public int CommandTimeout => Settings.CommandTimeout; /// diff --git a/test/Npgsql.NodaTime.Tests/LegacyNodaTimeTests.cs b/test/Npgsql.NodaTime.Tests/LegacyNodaTimeTests.cs index 89ee298192..67c4202ff4 100644 --- a/test/Npgsql.NodaTime.Tests/LegacyNodaTimeTests.cs +++ b/test/Npgsql.NodaTime.Tests/LegacyNodaTimeTests.cs @@ -51,15 +51,14 @@ public Task Timestamptz_OffsetDateTime_infinite_values_are_not_supported() #region Support - protected override async ValueTask OpenConnectionAsync(string? connectionString = null) + protected override async ValueTask OpenConnectionAsync() { - var conn = new NpgsqlConnection(connectionString ?? ConnectionString); - await conn.OpenAsync(); + var conn = await base.OpenConnectionAsync(); await conn.ExecuteNonQueryAsync("SET TimeZone='Europe/Berlin'"); return conn; } - protected override NpgsqlConnection OpenConnection(string? connectionString = null) + protected override NpgsqlConnection OpenConnection() => throw new NotSupportedException(); #pragma warning disable CS1998 // Release code blocks below lack await diff --git a/test/Npgsql.NodaTime.Tests/NodaTimeInfinityTests.cs b/test/Npgsql.NodaTime.Tests/NodaTimeInfinityTests.cs index caa623e249..66bc0ca1fb 100644 --- a/test/Npgsql.NodaTime.Tests/NodaTimeInfinityTests.cs +++ b/test/Npgsql.NodaTime.Tests/NodaTimeInfinityTests.cs @@ -266,14 +266,14 @@ public async Task DateConvertInfinity() } } - protected override async ValueTask OpenConnectionAsync(string? connectionString = null) + protected override async ValueTask OpenConnectionAsync() { - var conn = await base.OpenConnectionAsync(connectionString); + var conn = await base.OpenConnectionAsync(); await conn.ExecuteNonQueryAsync("SET TimeZone='Europe/Berlin'"); return conn; } - protected override NpgsqlConnection OpenConnection(string? connectionString = null) + protected override NpgsqlConnection OpenConnection() => throw new NotSupportedException(); public NodaTimeInfinityTests(bool disableDateTimeInfinityConversions) diff --git a/test/Npgsql.NodaTime.Tests/NodaTimeTests.cs b/test/Npgsql.NodaTime.Tests/NodaTimeTests.cs index 48ddfd265a..1aa6784261 100644 --- a/test/Npgsql.NodaTime.Tests/NodaTimeTests.cs +++ b/test/Npgsql.NodaTime.Tests/NodaTimeTests.cs @@ -646,14 +646,14 @@ public async Task Bug3438() #region Support - protected override async ValueTask OpenConnectionAsync(string? connectionString = null) + protected override async ValueTask OpenConnectionAsync() { - var conn = await base.OpenConnectionAsync(connectionString); + var conn = await base.OpenConnectionAsync(); await conn.ExecuteNonQueryAsync("SET TimeZone='Europe/Berlin'"); return conn; } - protected override NpgsqlConnection OpenConnection(string? connectionString = null) + protected override NpgsqlConnection OpenConnection() => throw new NotSupportedException(); #endregion Support diff --git a/test/Npgsql.Tests/AuthenticationTests.cs b/test/Npgsql.Tests/AuthenticationTests.cs index c26b665a34..b0173e36a2 100644 --- a/test/Npgsql.Tests/AuthenticationTests.cs +++ b/test/Npgsql.Tests/AuthenticationTests.cs @@ -18,22 +18,18 @@ public class AuthenticationTests : MultiplexingTestBase [NonParallelizable] // Sets environment variable public async Task Connect_UserNameFromEnvironment_Succeeds() { - var builder = new NpgsqlConnectionStringBuilder(ConnectionString); - using var _ = SetEnvironmentVariable("PGUSER", builder.Username); - builder.Username = null; - using var __ = CreateTempPool(builder.ConnectionString, out var connectionString); - using var ___ = await OpenConnectionAsync(connectionString); + using var _ = SetEnvironmentVariable("PGUSER", new NpgsqlConnectionStringBuilder(ConnectionString).Username); + await using var dataSource = CreateDataSource(csb => csb.Username = null); + await using var __ = await dataSource.OpenConnectionAsync(); } [Test] [NonParallelizable] // Sets environment variable public async Task Connect_PasswordFromEnvironment_Succeeds() { - var builder = new NpgsqlConnectionStringBuilder(ConnectionString); - using var _ = SetEnvironmentVariable("PGPASSWORD", builder.Password); - builder.Password = null; - using var __ = CreateTempPool(builder.ConnectionString, out var connectionString); - using var ___ = await OpenConnectionAsync(connectionString); + using var _ = SetEnvironmentVariable("PGPASSWORD", new NpgsqlConnectionStringBuilder(ConnectionString).Password); + await using var dataSource = CreateDataSource(csb => csb.Passfile = null); + await using var __ = await dataSource.OpenConnectionAsync(); } [Test] @@ -142,18 +138,17 @@ public async Task Use_pgpass_from_connection_string() { using var resetPassword = SetEnvironmentVariable("PGPASSWORD", null); var builder = new NpgsqlConnectionStringBuilder(ConnectionString); - - var password = builder.Password; - builder.Password = null; - var passFile = Path.GetTempFileName(); - File.WriteAllText(passFile, $"*:*:*:{builder.Username}:{password}"); - builder.Passfile = passFile; + File.WriteAllText(passFile, $"*:*:*:{builder.Username}:{builder.Password}"); try { - using var pool = CreateTempPool(builder.ConnectionString, out var connectionString); - using var conn = await OpenConnectionAsync(connectionString); + await using var dataSource = CreateDataSource(csb => + { + csb.Passfile = null; + csb.Passfile = passFile; + }); + await using var conn = await dataSource.OpenConnectionAsync(); } finally { @@ -167,18 +162,14 @@ public async Task Use_pgpass_from_environment_variable() { using var resetPassword = SetEnvironmentVariable("PGPASSWORD", null); var builder = new NpgsqlConnectionStringBuilder(ConnectionString); - - var password = builder.Password; - builder.Password = null; - var passFile = Path.GetTempFileName(); - File.WriteAllText(passFile, $"*:*:*:{builder.Username}:{password}"); + File.WriteAllText(passFile, $"*:*:*:{builder.Username}:{builder.Password}"); using var passFileVariable = SetEnvironmentVariable("PGPASSFILE", passFile); try { - using var pool = CreateTempPool(builder.ConnectionString, out var connectionString); - using var conn = await OpenConnectionAsync(connectionString); + await using var dataSource = CreateDataSource(csb => csb.Password = null); + await using var conn = await dataSource.OpenConnectionAsync(); } finally { @@ -191,10 +182,6 @@ public async Task Use_pgpass_from_environment_variable() public async Task Use_pgpass_from_homedir() { using var resetPassword = SetEnvironmentVariable("PGPASSWORD", null); - var builder = new NpgsqlConnectionStringBuilder(ConnectionString); - - var password = builder.Password; - builder.Password = null; string? dirToDelete = null; string passFile; @@ -222,9 +209,10 @@ public async Task Use_pgpass_from_homedir() try { - File.WriteAllText(passFile, $"*:*:*:{builder.Username}:{password}"); - using var pool = CreateTempPool(builder.ConnectionString, out var connectionString); - using var conn = await OpenConnectionAsync(connectionString); + var builder = new NpgsqlConnectionStringBuilder(ConnectionString); + File.WriteAllText(passFile, $"*:*:*:{builder.Username}:{builder.Password}"); + await using var dataSource = CreateDataSource(csb => csb.Passfile = null); + await using var conn = await dataSource.OpenConnectionAsync(); } finally { @@ -243,8 +231,8 @@ public async Task Use_pgpass_from_homedir() public void Password_source_precedence() { using var resetPassword = SetEnvironmentVariable("PGPASSWORD", null); - var builder = new NpgsqlConnectionStringBuilder(ConnectionString); + var builder = new NpgsqlConnectionStringBuilder(ConnectionString); var password = builder.Password; var passwordBad = password + "_bad"; @@ -257,51 +245,66 @@ public void Password_source_precedence() File.WriteAllText(passFile, $"*:*:*:{builder.Username}:{password}"); File.WriteAllText(passFileBad, $"*:*:*:{builder.Username}:{passwordBad}"); - using (var passFileVariable = SetEnvironmentVariable("PGPASSFILE", passFileBad)) + using (SetEnvironmentVariable("PGPASSFILE", passFileBad)) { // Password from the connection string goes first - using (var passwordVariable = SetEnvironmentVariable("PGPASSWORD", passwordBad)) - Assert.That(OpenConnection(password, passFileBad), Throws.Nothing); + using (SetEnvironmentVariable("PGPASSWORD", passwordBad)) + { + using var dataSource1 = CreateDataSource(csb => + { + csb.Password = password; + csb.Passfile = passFileBad; + }); + + Assert.That(() => dataSource1.OpenConnection(), Throws.Nothing); + } // Password from the environment variable goes second - using (var passwordVariable = SetEnvironmentVariable("PGPASSWORD", password)) - Assert.That(OpenConnection(password: null, passFileBad), Throws.Nothing); + using (SetEnvironmentVariable("PGPASSWORD", password)) + { + using var dataSource2 = CreateDataSource(csb => + { + csb.Password = null; + csb.Passfile = passFileBad; + }); + + Assert.That(() => dataSource2.OpenConnection(), Throws.Nothing); + } // Passfile from the connection string goes third - Assert.That(OpenConnection(password: null, passFile: passFile), Throws.Nothing); + using var dataSource3 = CreateDataSource(csb => + { + csb.Password = null; + csb.Passfile = passFile; + }); + + Assert.That(() => dataSource3.OpenConnection(), Throws.Nothing); } // Passfile from the environment variable goes fourth - using (var passFileVariable = SetEnvironmentVariable("PGPASSFILE", passFile)) - Assert.That(OpenConnection(password: null, passFile: null), Throws.Nothing); - - Func OpenConnection(string? password, string? passFile) => async () => + using (SetEnvironmentVariable("PGPASSFILE", passFile)) { - builder.Password = password; - builder.Passfile = passFile; - builder.ApplicationName = $"{nameof(Password_source_precedence)}:{Guid.NewGuid()}"; + using var dataSource4 = CreateDataSource(csb => + { + csb.Password = null; + csb.Passfile = null; + }); - using var pool = CreateTempPool(builder.ConnectionString, out var connectionString); - using var connection = await OpenConnectionAsync(connectionString); - }; + Assert.That(() => dataSource4.OpenConnection(), Throws.Nothing); + } } [Test, Description("Connects with a bad password to ensure the proper error is thrown")] public void Authentication_failure() { - var builder = new NpgsqlConnectionStringBuilder(ConnectionString) - { - Password = "bad" - }; - using (CreateTempPool(builder, out var connectionString)) - using (var conn = new NpgsqlConnection(connectionString)) - { - Assert.That(() => conn.OpenAsync(), Throws.Exception - .TypeOf() - .With.Property(nameof(PostgresException.SqlState)).StartsWith("28") - ); - Assert.That(conn.FullState, Is.EqualTo(ConnectionState.Closed)); - } + using var dataSource = CreateDataSource(csb => csb.Password = "bad"); + using var conn = dataSource.CreateConnection(); + + Assert.That(() => conn.OpenAsync(), Throws.Exception + .TypeOf() + .With.Property(nameof(PostgresException.SqlState)).StartsWith("28") + ); + Assert.That(conn.FullState, Is.EqualTo(ConnectionState.Closed)); } [Test, Description("Simulates a timeout during the authentication phase")] @@ -310,13 +313,13 @@ public async Task Timeout_during_authentication() { var builder = new NpgsqlConnectionStringBuilder(ConnectionString) { Timeout = 1 }; await using var postmasterMock = new PgPostmasterMock(builder.ConnectionString); - using var _ = CreateTempPool(postmasterMock.ConnectionString, out var connectionString); - - var __ = postmasterMock.AcceptServer(); + _ = postmasterMock.AcceptServer(); // The server will accept a connection from the client, but will not respond to the client's authentication // request. This should trigger a timeout - Assert.That(async () => await OpenConnectionAsync(connectionString), + await using var dataSource = CreateDataSource(postmasterMock.ConnectionString); + await using var connection = dataSource.CreateConnection(); + Assert.That(async () => await connection.OpenAsync(), Throws.Exception.TypeOf() .With.InnerException.TypeOf()); } @@ -344,7 +347,7 @@ public async Task AuthenticateIntegratedSecurity() Username = null, Password = null }); - await using var c = await dataSource.OpenConnectionAsync(); + await using var c = await dataSource.OpenConnectionAsync(); Assert.That(c.State, Is.EqualTo(ConnectionState.Open)); } diff --git a/test/Npgsql.Tests/AutoPrepareTests.cs b/test/Npgsql.Tests/AutoPrepareTests.cs index c81affc542..2ce7171fa0 100644 --- a/test/Npgsql.Tests/AutoPrepareTests.cs +++ b/test/Npgsql.Tests/AutoPrepareTests.cs @@ -13,14 +13,12 @@ public class AutoPrepareTests : TestBase [Test] public void Basic() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + using var dataSource = CreateDataSource(csb => { - MaxAutoPrepare = 10, - AutoPrepareMinUsages = 2 - }; - - using var conn = OpenConnection(csb); - conn.UnprepareAll(); + csb.MaxAutoPrepare = 10; + csb.AutoPrepareMinUsages = 2; + }); + using var conn = dataSource.OpenConnection(); using var checkCmd = new NpgsqlCommand(CountPreparedStatements, conn); checkCmd.Prepare(); @@ -49,14 +47,12 @@ public void Basic() [Test, Description("Passes the maximum limit for autoprepared statements, recycling the least-recently used one")] public void Recycle() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + using var dataSource = CreateDataSource(csb => { - AutoPrepareMinUsages = 2, - MaxAutoPrepare = 2 - }; - - using var conn = OpenConnection(csb); - conn.UnprepareAll(); + csb.AutoPrepareMinUsages = 2; + csb.MaxAutoPrepare = 2; + }); + using var conn = dataSource.OpenConnection(); using var checkCmd = new NpgsqlCommand(CountPreparedStatements, conn); checkCmd.Prepare(); @@ -92,15 +88,13 @@ public void Recycle() [Test] public void Persist() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + using var dataSource = CreateDataSource(csb => { - MaxAutoPrepare = 10, - AutoPrepareMinUsages = 2 - }; + csb.MaxAutoPrepare = 10; + csb.AutoPrepareMinUsages = 2; + }); - using var _ = CreateTempPool(csb, out var connString); - - using (var conn = OpenConnection(connString)) + using (var conn = dataSource.OpenConnection()) using (var checkCmd = new NpgsqlCommand(CountPreparedStatements, conn)) { checkCmd.Prepare(); @@ -110,7 +104,7 @@ public void Persist() // We now have two prepared statements which should be persisted - using (var conn = OpenConnection(connString)) + using (var conn = dataSource.OpenConnection()) using (var checkCmd = new NpgsqlCommand(CountPreparedStatements, conn)) { checkCmd.Prepare(); @@ -127,14 +121,12 @@ public void Persist() [Test] public async Task Positional_parameter() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + await using var dataSource = CreateDataSource(csb => { - AutoPrepareMinUsages = 2, - MaxAutoPrepare = 2 - }; - - await using var conn = await OpenConnectionAsync(csb); - conn.UnprepareAll(); + csb.AutoPrepareMinUsages = 2; + csb.MaxAutoPrepare = 2; + }); + await using var conn = await dataSource.OpenConnectionAsync(); await using var checkCmd = new NpgsqlCommand(CountPreparedStatements, conn); await checkCmd.PrepareAsync(); @@ -153,13 +145,12 @@ public async Task Positional_parameter() [Test] public void Promote_auto_to_explicit() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + using var dataSource = CreateDataSource(csb => { - MaxAutoPrepare = 10, - AutoPrepareMinUsages = 2 - }; - using var conn = OpenConnection(csb); - conn.UnprepareAll(); + csb.MaxAutoPrepare = 10; + csb.AutoPrepareMinUsages = 2; + }); + using var conn = dataSource.OpenConnection(); using var checkCmd = new NpgsqlCommand(CountPreparedStatements, conn); using var cmd1 = new NpgsqlCommand("SELECT 1", conn); using var cmd2 = new NpgsqlCommand("SELECT 1", conn); @@ -182,13 +173,12 @@ public void Promote_auto_to_explicit() [Test] public void Candidate_eject() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + using var dataSource = CreateDataSource(csb => { - MaxAutoPrepare = 10, - AutoPrepareMinUsages = 3 - }; - using var conn = OpenConnection(csb); - conn.UnprepareAll(); + csb.MaxAutoPrepare = 10; + csb.AutoPrepareMinUsages = 3; + }); + using var conn = dataSource.OpenConnection(); using var cmd = conn.CreateCommand(); for (var i = 0; i < PreparedStatementManager.CandidateCount; i++) @@ -223,13 +213,12 @@ public void Candidate_eject() [Test] public void One_command_same_sql_twice() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + using var dataSource = CreateDataSource(csb => { - MaxAutoPrepare = 10, - AutoPrepareMinUsages = 2 - }; - using var conn = OpenConnection(csb); - conn.UnprepareAll(); + csb.MaxAutoPrepare = 10; + csb.AutoPrepareMinUsages = 2; + }); + using var conn = dataSource.OpenConnection(); using var cmd = new NpgsqlCommand("SELECT 1; SELECT 1; SELECT 1; SELECT 1", conn); //cmd.Prepare(); //Assert.That(cmd.IsPrepared, Is.True); @@ -240,14 +229,13 @@ public void One_command_same_sql_twice() [Test] public void Across_close_open_different_connector() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) - { - MaxAutoPrepare = 10, - AutoPrepareMinUsages = 2 - }; - using var _ = CreateTempPool(csb, out var connString); - using var conn1 = new NpgsqlConnection(connString); - using var conn2 = new NpgsqlConnection(connString); + using var dataSource = CreateDataSource(csb => + { + csb.MaxAutoPrepare = 10; + csb.AutoPrepareMinUsages = 2; + }); + using var conn1 = dataSource.CreateConnection(); + using var conn2 = dataSource.CreateConnection(); using var cmd = new NpgsqlCommand("SELECT 1", conn1); conn1.Open(); cmd.ExecuteNonQuery(); cmd.ExecuteNonQuery(); @@ -266,14 +254,12 @@ public void Across_close_open_different_connector() [Test] public void Unprepare_all() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + using var dataSource = CreateDataSource(csb => { - MaxAutoPrepare = 10, - AutoPrepareMinUsages = 2 - }; - - using var conn = OpenConnection(csb); - conn.UnprepareAll(); + csb.MaxAutoPrepare = 10; + csb.AutoPrepareMinUsages = 2; + }); + using var conn = dataSource.OpenConnection(); using var cmd = new NpgsqlCommand("SELECT 1", conn); cmd.Prepare(); // Explicit conn.ExecuteNonQuery("SELECT 2"); conn.ExecuteNonQuery("SELECT 2"); // Auto @@ -285,14 +271,12 @@ public void Unprepare_all() [Test, Description("Prepares the same SQL with different parameters (overloading)")] public void Overloaded_sql() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + using var dataSource = CreateDataSource(csb => { - MaxAutoPrepare = 10, - AutoPrepareMinUsages = 2 - }; - - using var conn = OpenConnection(csb); - conn.UnprepareAll(); + csb.MaxAutoPrepare = 10; + csb.AutoPrepareMinUsages = 2; + }); + using var conn = dataSource.OpenConnection(); using (var cmd = new NpgsqlCommand("SELECT @p", conn)) { cmd.Parameters.AddWithValue("p", NpgsqlDbType.Integer, 8); @@ -319,14 +303,12 @@ public void Derive_parameters_for_auto_prepared_statement() { const string query = "SELECT @p::integer"; const int answer = 42; - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + using var dataSource = CreateDataSource(csb => { - MaxAutoPrepare = 10, - AutoPrepareMinUsages = 2 - }; - - using var conn = OpenConnection(csb); - conn.UnprepareAll(); + csb.MaxAutoPrepare = 10; + csb.AutoPrepareMinUsages = 2; + }); + using var conn = dataSource.OpenConnection(); using var checkCmd = new NpgsqlCommand(CountPreparedStatements, conn); using var cmd = new NpgsqlCommand(query, conn); checkCmd.Prepare(); @@ -351,12 +333,12 @@ public void Derive_parameters_for_auto_prepared_statement() [Test, IssueLink("https://github.com/npgsql/npgsql/issues/2644")] public void Row_description_properly_cloned() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + using var dataSource = CreateDataSource(csb => { - MaxAutoPrepare = 10, - AutoPrepareMinUsages = 2 - }; - using var conn = OpenConnection(csb); + csb.MaxAutoPrepare = 10; + csb.AutoPrepareMinUsages = 2; + }); + using var conn = dataSource.OpenConnection(); conn.UnprepareAll(); using var cmd1 = new NpgsqlCommand("SELECT 1 AS foo", conn); using var cmd2 = new NpgsqlCommand("SELECT 1 AS bar", conn); @@ -371,55 +353,47 @@ public void Row_description_properly_cloned() [Test, IssueLink("https://github.com/npgsql/npgsql/issues/3106")] public async Task Dont_auto_prepare_more_than_max_statements_in_batch() { - var builder = new NpgsqlConnectionStringBuilder(ConnectionString) - { - MaxAutoPrepare = 50, - }; + const int maxAutoPrepare = 50; - await using var connection = await OpenConnectionAsync(builder); - connection.UnprepareAll(); + await using var dataSource = CreateDataSource(csb => csb.MaxAutoPrepare = maxAutoPrepare); + await using var connection = await dataSource.OpenConnectionAsync(); for (var i = 0; i < 100; i++) { - using var command = connection.CreateCommand(); + await using var command = connection.CreateCommand(); command.CommandText = string.Join("", Enumerable.Range(0, 100).Select(n => $"SELECT {n};")); await command.ExecuteNonQueryAsync(); } - Assert.That(await connection.ExecuteScalarAsync(CountPreparedStatements), Is.LessThanOrEqualTo(builder.MaxAutoPrepare)); + Assert.That(await connection.ExecuteScalarAsync(CountPreparedStatements), Is.LessThanOrEqualTo(maxAutoPrepare)); } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/3106")] public async Task Dont_auto_prepare_more_than_max_statements_in_batch_random() { - var builder = new NpgsqlConnectionStringBuilder(ConnectionString) - { - MaxAutoPrepare = 10, - }; + const int maxAutoPrepare = 10; - await using var connection = await OpenConnectionAsync(builder); - connection.UnprepareAll(); + await using var dataSource = CreateDataSource(csb => csb.MaxAutoPrepare = maxAutoPrepare); + await using var connection = await dataSource.OpenConnectionAsync(); var random = new Random(1); for (var i = 0; i < 100; i++) { - using var command = connection.CreateCommand(); + await using var command = connection.CreateCommand(); command.CommandText = string.Join("", Enumerable.Range(0, 100).Select(n => $"SELECT {random.Next(200)};")); await command.ExecuteNonQueryAsync(); } - Assert.That(await connection.ExecuteScalarAsync(CountPreparedStatements), Is.LessThanOrEqualTo(builder.MaxAutoPrepare)); + Assert.That(await connection.ExecuteScalarAsync(CountPreparedStatements), Is.LessThanOrEqualTo(maxAutoPrepare)); } [Test] public async Task Replace_and_execute_within_same_batch() { - var builder = new NpgsqlConnectionStringBuilder(ConnectionString) + await using var dataSource = CreateDataSource(csb => { - MaxAutoPrepare = 1, - AutoPrepareMinUsages = 2 - }; - - await using var connection = await OpenConnectionAsync(builder); - connection.UnprepareAll(); + csb.MaxAutoPrepare = 1; + csb.AutoPrepareMinUsages = 2; + }); + await using var connection = await dataSource.OpenConnectionAsync(); for (var i = 0; i < 2; i++) await connection.ExecuteNonQueryAsync("SELECT 1"); @@ -429,26 +403,26 @@ public async Task Replace_and_execute_within_same_batch() } // Exclude some internal Npgsql queries which include pg_type as well as the count statement itself - const string CountPreparedStatements = @" + const string CountPreparedStatements = """ SELECT COUNT(*) FROM pg_prepared_statements - WHERE statement NOT LIKE '%pg_prepared_statements%' - AND statement NOT LIKE '%pg_type%'"; +WHERE statement NOT LIKE '%pg_prepared_statements%' +AND statement NOT LIKE '%pg_type%' +"""; [Test, IssueLink("https://github.com/npgsql/npgsql/issues/2665")] public async Task Auto_prepared_command_failure() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + await using var dataSource = CreateDataSource(csb => { - MaxAutoPrepare = 10, - AutoPrepareMinUsages = 2 - }; - await using var conn = await OpenConnectionAsync(csb); + csb.MaxAutoPrepare = 10; + csb.AutoPrepareMinUsages = 2; + }); + await using var conn = await dataSource.OpenConnectionAsync(); var tableName = await GetTempTableName(conn); - conn.UnprepareAll(); await conn.ExecuteNonQueryAsync($"CREATE TABLE {tableName} (id integer)"); - using (var command = new NpgsqlCommand($"INSERT INTO {tableName} (id) VALUES (1)", conn)) + await using (var command = new NpgsqlCommand($"INSERT INTO {tableName} (id) VALUES (1)", conn)) { await command.ExecuteNonQueryAsync(); await conn.ExecuteNonQueryAsync($"DROP TABLE {tableName}"); @@ -457,7 +431,7 @@ public async Task Auto_prepared_command_failure() await conn.ExecuteNonQueryAsync($"CREATE TABLE {tableName} (id integer)"); - using (var command = new NpgsqlCommand($"INSERT INTO {tableName} (id) VALUES (1)", conn)) + await using (var command = new NpgsqlCommand($"INSERT INTO {tableName} (id) VALUES (1)", conn)) { await command.ExecuteNonQueryAsync(); await command.ExecuteNonQueryAsync(); @@ -467,14 +441,12 @@ public async Task Auto_prepared_command_failure() [Test, IssueLink("https://github.com/npgsql/npgsql/issues/3002")] public void Replace_with_bad_sql() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + using var dataSource = CreateDataSource(csb => { - MaxAutoPrepare = 2, - AutoPrepareMinUsages = 1 - }; - - using var conn = OpenConnection(csb); - conn.UnprepareAll(); + csb.MaxAutoPrepare = 2; + csb.AutoPrepareMinUsages = 1; + }); + using var conn = dataSource.OpenConnection(); conn.ExecuteNonQuery("SELECT 1"); conn.ExecuteNonQuery("SELECT 2"); @@ -500,21 +472,21 @@ public void Replace_with_bad_sql() [Test, IssueLink("https://github.com/npgsql/npgsql/issues/4082")] public async Task Batch_statement_execution_error_cleanup() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + await using var dataSource = CreateDataSource(csb => { - MaxAutoPrepare = 2, - AutoPrepareMinUsages = 1 - }; - - await using var conn = await OpenConnectionAsync(csb); + csb.MaxAutoPrepare = 2; + csb.AutoPrepareMinUsages = 1; + }); + await using var conn = await dataSource.OpenConnectionAsync(); var funcName = await GetTempFunctionName(conn); // Create a function we can use to raise an error with a single statement - conn.ExecuteNonQuery(@$" - CREATE OR REPLACE FUNCTION {funcName}() RETURNS VOID AS - 'BEGIN RAISE EXCEPTION ''testexception'' USING ERRCODE = ''12345'', DETAIL = ''testdetail''; END;' - LANGUAGE 'plpgsql'; - "); + await conn.ExecuteNonQueryAsync( +$""" +CREATE OR REPLACE FUNCTION {funcName}() RETURNS VOID AS + 'BEGIN RAISE EXCEPTION ''testexception'' USING ERRCODE = ''12345'', DETAIL = ''testdetail''; END;' +LANGUAGE 'plpgsql'; +"""); conn.UnprepareAll(); @@ -545,14 +517,12 @@ public async Task Batch_statement_execution_error_cleanup() [Test, IssueLink("https://github.com/npgsql/npgsql/issues/4404")] public async Task SchemaOnly() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + await using var dataSource = CreateDataSource(csb => { - AutoPrepareMinUsages = 2, - MaxAutoPrepare = 10, - }; - - using var _ = CreateTempPool(csb, out var connString); - await using var conn = await OpenConnectionAsync(connString); + csb.AutoPrepareMinUsages = 2; + csb.MaxAutoPrepare = 10; + }); + await using var conn = await dataSource.OpenConnectionAsync(); await using var cmd = new NpgsqlCommand("SELECT 1", conn); for (var i = 0; i < 5; i++) @@ -564,13 +534,12 @@ public async Task SchemaOnly() [Test] public async Task Auto_prepared_statement_invalidation() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + await using var dataSource = CreateDataSource(csb => { - MaxAutoPrepare = 10, - AutoPrepareMinUsages = 2 - }; - - await using var connection = await OpenConnectionAsync(csb); + csb.MaxAutoPrepare = 10; + csb.AutoPrepareMinUsages = 2; + }); + await using var connection = await dataSource.OpenConnectionAsync(); var table = await CreateTempTable(connection, "foo int"); await using var command = new NpgsqlCommand($"SELECT * FROM {table}", connection); diff --git a/test/Npgsql.Tests/BatchTests.cs b/test/Npgsql.Tests/BatchTests.cs index a118a6be99..2e7e3666fa 100644 --- a/test/Npgsql.Tests/BatchTests.cs +++ b/test/Npgsql.Tests/BatchTests.cs @@ -467,8 +467,8 @@ public async Task Batch_with_multiple_errors([Values] bool withErrorBarriers) public async Task Batch_close_dispose_reader_with_multiple_errors([Values] bool withErrorBarriers, [Values] bool dispose) { // Create a temp pool since we dispose the reader (and check the state afterwards) and it can be reused by another connection - using var _ = CreateTempPool(ConnectionString, out var connString); - await using var conn = await OpenConnectionAsync(connString); + await using var dataSource = CreateDataSource(); + await using var conn = await dataSource.OpenConnectionAsync(); var table = await CreateTempTable(conn, "id INT"); await using var batch = new NpgsqlBatch(conn) @@ -736,12 +736,8 @@ public async Task ExecuteScalar_without_parameters() [Test, IssueLink("https://github.com/npgsql/npgsql/issues/4264")] public async Task Batch_with_auto_prepare_reuse() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) - { - MaxAutoPrepare = 20 - }; - - await using var conn = await OpenConnectionAsync(csb); + await using var dataSource = CreateDataSource(csb => csb.MaxAutoPrepare = 20); + await using var conn = await dataSource.OpenConnectionAsync(); var tempTableName = await CreateTempTable(conn, "id int"); diff --git a/test/Npgsql.Tests/BugTests.cs b/test/Npgsql.Tests/BugTests.cs index fac6e71c92..66cd34e65c 100644 --- a/test/Npgsql.Tests/BugTests.cs +++ b/test/Npgsql.Tests/BugTests.cs @@ -125,7 +125,8 @@ public async Task Bug3600() CommandTimeout = 1, }; await using var postmasterMock = PgPostmasterMock.Start(csb.ConnectionString); - await using var conn = await OpenConnectionAsync(postmasterMock.ConnectionString); + await using var dataSource = CreateDataSource(postmasterMock.ConnectionString); + await using var conn = await dataSource.OpenConnectionAsync(); var serverMock = await postmasterMock.WaitForServerConnection(); await serverMock .WriteCopyInResponse() @@ -152,26 +153,25 @@ public async Task Bug1497() [Test, IssueLink("https://github.com/npgsql/npgsql/issues/1558")] public void Bug1558() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + using var dataSource = CreateDataSource(csb => { - Pooling = false, - Enlist = true - }; + csb.Pooling = false; + csb.Enlist = true; + }); using var tx = new TransactionScope(); - using var conn = new NpgsqlConnection(csb.ToString()); - conn.Open(); + using var conn = dataSource.OpenConnection(); } [Test] public void Bug1695() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + using var dataSource = CreateDataSource(csb => { - Pooling = false, - MaxAutoPrepare = 10, - AutoPrepareMinUsages = 1 - }; - using var conn = OpenConnection(csb); + csb.Pooling = false; + csb.MaxAutoPrepare = 10; + csb.AutoPrepareMinUsages = 1; + }); + using var conn = dataSource.OpenConnection(); using (var cmd = new NpgsqlCommand("SELECT 1; SELECT 2", conn)) using (var reader = cmd.ExecuteReader()) { @@ -288,13 +288,13 @@ public async Task Bug2046() [Test] public void Bug1761() { - var connString = new NpgsqlConnectionStringBuilder(ConnectionString) + using var dataSource = CreateDataSource(csb => { - Enlist = true, - Pooling = true, - MinPoolSize = 1, - MaxPoolSize = 1 - }.ConnectionString; + csb.Enlist = true; + csb.Pooling = true; + csb.MinPoolSize = 1; + csb.MaxPoolSize = 1; + }); for (var i = 0; i < 2; i++) { @@ -306,7 +306,7 @@ public void Bug1761() // Ambient transaction is now unusable, attempts to enlist to it will fail. We should recover // properly from this failure. - using (var connection = OpenConnection(connString)) + using (var connection = dataSource.OpenConnection()) using (var cmd = new NpgsqlCommand("SELECT 1", connection)) { cmd.CommandText = "select 1;"; @@ -375,32 +375,31 @@ enum Bug2278EnumType [Test] [IssueLink("https://github.com/npgsql/npgsql/issues/2178")] - public void Bug2178() + public async Task Bug2178() { - var builder = new NpgsqlConnectionStringBuilder(ConnectionString) + await using var dataSource = CreateDataSource(csb => { - AutoPrepareMinUsages = 2, - MaxAutoPrepare = 2 - }; - using var conn = new NpgsqlConnection(builder.ConnectionString); - using var cmd = new NpgsqlCommand(); - conn.Open(); + csb.AutoPrepareMinUsages = 2; + csb.MaxAutoPrepare = 2; + }); + await using var conn = await dataSource.OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand(); cmd.Connection = conn; cmd.CommandText = "SELECT 1"; - cmd.ExecuteScalar(); - cmd.ExecuteScalar(); + await cmd.ExecuteScalarAsync(); + await cmd.ExecuteScalarAsync(); Assert.That(cmd.IsPrepared); // Now executing a faulty command multiple times cmd.CommandText = "SELECT * FROM public.dummy_table_name"; for (var i = 0; i < 3; ++i) { - Assert.Throws(() => cmd.ExecuteScalar()); + Assert.ThrowsAsync(async () => await cmd.ExecuteScalarAsync()); } cmd.CommandText = "SELECT 1"; - cmd.ExecuteScalar(); + await cmd.ExecuteScalarAsync(); Assert.That(cmd.IsPrepared); } @@ -1100,11 +1099,9 @@ CREATE TEMP TABLE ""OrganisatieQmo_Organisatie_QueryModelObjects_Imp"" [Test, IssueLink("https://github.com/npgsql/npgsql/issues/2849")] public async Task Chunked_string_write_buffer_encoding_space() { - var builder = new NpgsqlConnectionStringBuilder(ConnectionString); - // write buffer size must be 8192 for this test to work - // so guard against changes to the default / a change in the test harness - builder.WriteBufferSize = 8192; - await using var conn = await OpenConnectionAsync(builder.ConnectionString); + // write buffer size must be 8192 for this test to work so guard against changes to the default / a change in the test harness + await using var dataSource = CreateDataSource(csb => csb.WriteBufferSize = 8192); + await using var conn = await dataSource.OpenConnectionAsync(); var tableName = await CreateTempTable(conn, "col1 text, col2 text"); @@ -1128,11 +1125,9 @@ public async Task Chunked_string_write_buffer_encoding_space() [Test, IssueLink("https://github.com/npgsql/npgsql/issues/2849")] public async Task Chunked_char_array_write_buffer_encoding_space() { - var builder = new NpgsqlConnectionStringBuilder(ConnectionString); - // write buffer size must be 8192 for this test to work - // so guard against changes to the default / a change in the test harness - builder.WriteBufferSize = 8192; - await using var conn = await OpenConnectionAsync(builder.ConnectionString); + // write buffer size must be 8192 for this test to work so guard against changes to the default / a change in the test harness + await using var dataSource = CreateDataSource(csb => csb.WriteBufferSize = 8192); + await using var conn = await dataSource.OpenConnectionAsync(); var tableName = await CreateTempTable(conn, "col1 text, col2 text"); @@ -1215,9 +1210,9 @@ public void Bug3117() { const string OkCommand = "SELECT 1"; const string ErrorCommand = "SELECT * FROM public.imnotexist"; - using (var conn = new NpgsqlConnection(ConnectionString)) + using var dataSource = CreateDataSource(); + using (var conn = dataSource.OpenConnection()) { - conn.Open(); var okCommand = new NpgsqlCommand(OkCommand, conn); okCommand.Prepare(); using (okCommand.ExecuteReader()) { } @@ -1228,13 +1223,11 @@ public void Bug3117() .With.Property(nameof(PostgresException.SqlState)).EqualTo(PostgresErrorCodes.UndefinedTable)); } - using (var conn = new NpgsqlConnection(ConnectionString)) + using (var conn = dataSource.OpenConnection()) { - conn.Open(); var okCommand = new NpgsqlCommand(OkCommand, conn); okCommand.Prepare(); using (okCommand.ExecuteReader()) { } - conn.UnprepareAll(); } } @@ -1321,7 +1314,8 @@ public async Task Bug3924() }; await using var postmaster = PgPostmasterMock.Start(csb.ConnectionString); - await using var conn = await OpenConnectionAsync(postmaster.ConnectionString); + await using var dataSource = CreateDataSource(postmaster.ConnectionString); + await using var conn = await dataSource.OpenConnectionAsync(); var serverMock = await postmaster.WaitForServerConnection(); using (var cmd = conn.CreateCommand()) @@ -1360,10 +1354,10 @@ public async Task Bug4099() MaxPoolSize = 1 }; await using var postmaster = PgPostmasterMock.Start(csb.ConnectionString); - await using var firstConn = await OpenConnectionAsync(postmaster.ConnectionString); - await using var secondConn = await OpenConnectionAsync(postmaster.ConnectionString); + await using var dataSource = CreateDataSource(postmaster.ConnectionString); + await using var firstConn = await dataSource.OpenConnectionAsync(); + await using var secondConn = await dataSource.OpenConnectionAsync(); - var byteArrayLength = csb.WriteBufferSize + 100; var firstQuery = firstConn.ExecuteScalarAsync("SELECT data"); var server = await postmaster.WaitForServerConnection(); @@ -1399,12 +1393,12 @@ await server [IssueLink("https://github.com/npgsql/npgsql/issues/4123")] public async Task Bug4123() { - using var conn = OpenConnection(); - using var cmd = new NpgsqlCommand("SELECT 1", conn); - using var rdr = await cmd.ExecuteReaderAsync(); + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT 1", conn); + await using var rdr = await cmd.ExecuteReaderAsync(); await rdr.ReadAsync(); - using var stream = await rdr.GetStreamAsync(0); + await using var stream = await rdr.GetStreamAsync(0); Assert.DoesNotThrowAsync(stream.FlushAsync); Assert.DoesNotThrow(stream.Flush); diff --git a/test/Npgsql.Tests/CommandBuilderTests.cs b/test/Npgsql.Tests/CommandBuilderTests.cs index 90b146d344..e917b7f6b3 100644 --- a/test/Npgsql.Tests/CommandBuilderTests.cs +++ b/test/Npgsql.Tests/CommandBuilderTests.cs @@ -80,9 +80,9 @@ public async Task DeriveParameters_text_prepared_statement() { const string query = "SELECT @p::integer"; const int answer = 42; - using var _ = CreateTempPool(ConnectionString, out var connString); - using var conn = await OpenConnectionAsync(connString); - using var cmd = new NpgsqlCommand(query, conn); + await using var dataSource = CreateDataSource(); + await using var conn = await dataSource.OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand(query, conn); cmd.Parameters.AddWithValue("@p", NpgsqlDbType.Integer, answer); cmd.Prepare(); Assert.That(conn.Connector!.PreparedStatementManager.NumPrepared, Is.EqualTo(1)); @@ -102,8 +102,6 @@ public async Task DeriveParameters_text_prepared_statement() Assert.That(conn.Connector.PreparedStatementManager.NumPrepared, Is.EqualTo(1)); cmd.Parameters["@p"].Value = answer; Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo(answer)); - - conn.UnprepareAll(); } [Test, Description("Tests parameter derivation for array parameters in parameterized queries (CommandType.Text)")] diff --git a/test/Npgsql.Tests/CommandTests.cs b/test/Npgsql.Tests/CommandTests.cs index 21e9da6085..8d04f0a128 100644 --- a/test/Npgsql.Tests/CommandTests.cs +++ b/test/Npgsql.Tests/CommandTests.cs @@ -145,10 +145,9 @@ public async Task Timeout() if (IsMultiplexing) return; // Multiplexing, Timeout - // Mono throws a socket exception with WouldBlock instead of TimedOut (see #1330) - var isMono = Type.GetType("Mono.Runtime") != null; - using var conn = await OpenConnectionAsync(ConnectionString + ";CommandTimeout=1"); - using var cmd = CreateSleepCommand(conn, 10); + await using var dataSource = CreateDataSource(csb => csb.CommandTimeout = 1); + await using var conn = await dataSource.OpenConnectionAsync(); + await using var cmd = CreateSleepCommand(conn, 10); Assert.That(() => cmd.ExecuteNonQuery(), Throws.Exception .TypeOf() .With.InnerException.TypeOf() @@ -163,8 +162,9 @@ public async Task Timeout_async_soft() if (IsMultiplexing) return; // Multiplexing, Timeout - using var conn = await OpenConnectionAsync(builder => builder.CommandTimeout = 1); - using var cmd = CreateSleepCommand(conn, 10); + await using var dataSource = CreateDataSource(csb => csb.CommandTimeout = 1); + await using var conn = await dataSource.OpenConnectionAsync(); + await using var cmd = CreateSleepCommand(conn, 10); Assert.That(async () => await cmd.ExecuteNonQueryAsync(), Throws.Exception .TypeOf() @@ -181,8 +181,8 @@ public async Task Timeout_async_hard() var builder = new NpgsqlConnectionStringBuilder(ConnectionString) { CommandTimeout = 1 }; await using var postmasterMock = PgPostmasterMock.Start(builder.ConnectionString); - using var _ = CreateTempPool(postmasterMock.ConnectionString, out var connectionString); - await using var conn = await OpenConnectionAsync(connectionString); + await using var dataSource = CreateDataSource(postmasterMock.ConnectionString); + await using var conn = await dataSource.OpenConnectionAsync(); await postmasterMock.WaitForServerConnection(); var processId = conn.ProcessID; @@ -202,13 +202,9 @@ public async Task Timeout_from_connection_string() { Assert.That(NpgsqlConnector.MinimumInternalCommandTimeout, Is.Not.EqualTo(NpgsqlCommand.DefaultTimeout)); var timeout = NpgsqlConnector.MinimumInternalCommandTimeout; - var connString = new NpgsqlConnectionStringBuilder(ConnectionString) - { - CommandTimeout = timeout - }.ToString(); - using var conn = new NpgsqlConnection(connString); - var command = new NpgsqlCommand("SELECT 1", conn); - conn.Open(); + await using var dataSource = CreateDataSource(csb => csb.CommandTimeout = timeout); + await using var conn = await dataSource.OpenConnectionAsync(); + await using var command = new NpgsqlCommand("SELECT 1", conn); Assert.That(command.CommandTimeout, Is.EqualTo(timeout)); command.CommandTimeout = 10; await command.ExecuteScalarAsync(); @@ -218,29 +214,25 @@ public async Task Timeout_from_connection_string() [Test, IssueLink("https://github.com/npgsql/npgsql/issues/395")] public async Task Timeout_switch_connection() { - using (var conn = new NpgsqlConnection(ConnectionString)) + var csb = new NpgsqlConnectionStringBuilder(ConnectionString); + if (csb.CommandTimeout >= 100 && csb.CommandTimeout < 105) + IgnoreExceptOnBuildServer("Bad default command timeout"); + + await using var dataSource1 = CreateDataSource(ConnectionString + ";CommandTimeout=100"); + await using var c1 = dataSource1.CreateConnection(); + await using var cmd = c1.CreateCommand(); + Assert.That(cmd.CommandTimeout, Is.EqualTo(100)); + await using var dataSource2 = CreateDataSource(ConnectionString + ";CommandTimeout=101"); + await using (var c2 = dataSource2.CreateConnection()) { - if (conn.CommandTimeout >= 100 && conn.CommandTimeout < 105) - TestUtil.IgnoreExceptOnBuildServer("Bad default command timeout"); + cmd.Connection = c2; + Assert.That(cmd.CommandTimeout, Is.EqualTo(101)); } - - using (var c1 = await OpenConnectionAsync(ConnectionString + ";CommandTimeout=100")) + cmd.CommandTimeout = 102; + await using (var c2 = dataSource2.CreateConnection()) { - using (var cmd = c1.CreateCommand()) - { - Assert.That(cmd.CommandTimeout, Is.EqualTo(100)); - using (var c2 = new NpgsqlConnection(ConnectionString + ";CommandTimeout=101")) - { - cmd.Connection = c2; - Assert.That(cmd.CommandTimeout, Is.EqualTo(101)); - } - cmd.CommandTimeout = 102; - using (var c2 = new NpgsqlConnection(ConnectionString + ";CommandTimeout=101")) - { - cmd.Connection = c2; - Assert.That(cmd.CommandTimeout, Is.EqualTo(102)); - } - } + cmd.Connection = c2; + Assert.That(cmd.CommandTimeout, Is.EqualTo(102)); } } @@ -252,8 +244,8 @@ public async Task Prepare_timeout_hard([Values] SyncOrAsync async) var builder = new NpgsqlConnectionStringBuilder(ConnectionString) { CommandTimeout = 1 }; await using var postmasterMock = PgPostmasterMock.Start(builder.ConnectionString); - using var _ = CreateTempPool(postmasterMock.ConnectionString, out var connectionString); - await using var conn = await OpenConnectionAsync(connectionString); + await using var dataSource = CreateDataSource(postmasterMock.ConnectionString); + await using var conn = await dataSource.OpenConnectionAsync(); await postmasterMock.WaitForServerConnection(); var processId = conn.ProcessID; @@ -285,8 +277,8 @@ public async Task Cancel() if (IsMultiplexing) return; - using var conn = await OpenConnectionAsync(); - using var cmd = CreateSleepCommand(conn, 5); + await using var conn = await OpenConnectionAsync(); + await using var cmd = CreateSleepCommand(conn, 5); var queryTask = Task.Run(() => cmd.ExecuteNonQuery()); // We have to be sure the command's state is InProgress, otherwise the cancellation request will never be sent @@ -306,7 +298,7 @@ public async Task Cancel_async_immediately() return; // Multiplexing, cancellation await using var conn = await OpenConnectionAsync(); - using var cmd = conn.CreateCommand(); + await using var cmd = conn.CreateCommand(); cmd.CommandText = "SELECT 1"; var t = cmd.ExecuteScalarAsync(new(canceled: true)); @@ -325,7 +317,7 @@ public async Task Cancel_async_soft() return; // Multiplexing, cancellation await using var conn = await OpenConnectionAsync(); - using var cmd = CreateSleepCommand(conn); + await using var cmd = CreateSleepCommand(conn); using var cancellationSource = new CancellationTokenSource(); var t = cmd.ExecuteNonQueryAsync(cancellationSource.Token); cancellationSource.Cancel(); @@ -346,8 +338,8 @@ public async Task Cancel_async_hard() return; // Multiplexing, cancellation await using var postmasterMock = PgPostmasterMock.Start(ConnectionString); - using var _ = CreateTempPool(postmasterMock.ConnectionString, out var connectionString); - await using var conn = await OpenConnectionAsync(connectionString); + await using var dataSource = CreateDataSource(postmasterMock.ConnectionString); + await using var conn = await dataSource.OpenConnectionAsync(); await postmasterMock.WaitForServerConnection(); var processId = conn.ProcessID; @@ -378,14 +370,14 @@ public async Task Bug3466([Values(false, true)] bool isBroken) Pooling = false }; await using var postmasterMock = PgPostmasterMock.Start(csb.ToString(), completeCancellationImmediately: false); - using var _ = CreateTempPool(postmasterMock.ConnectionString, out var connectionString); - await using var conn = await OpenConnectionAsync(connectionString); + await using var dataSource = CreateDataSource(postmasterMock.ConnectionString); + await using var conn = await dataSource.OpenConnectionAsync(); var serverMock = await postmasterMock.WaitForServerConnection(); var processId = conn.ProcessID; using var cancellationSource = new CancellationTokenSource(); - using var cmd = new NpgsqlCommand("SELECT 1", conn) + await using var cmd = new NpgsqlCommand("SELECT 1", conn) { CommandTimeout = 4 }; @@ -427,9 +419,9 @@ await serverMock [Explicit("Timing-sensitive")] public async Task Cancel_cross_command() { - using var conn = await OpenConnectionAsync(); - using var cmd1 = CreateSleepCommand(conn, 2); - using var cmd2 = new NpgsqlCommand("SELECT 1", conn); + await using var conn = await OpenConnectionAsync(); + await using var cmd1 = CreateSleepCommand(conn, 2); + await using var cmd2 = new NpgsqlCommand("SELECT 1", conn); var cancelTask = Task.Factory.StartNew(() => { Thread.Sleep(300); @@ -531,12 +523,12 @@ public async Task SingleRow([Values(PrepareOrNot.NotPrepared, PrepareOrNot.Prepa if (prepare == PrepareOrNot.Prepared && IsMultiplexing) return; - using var conn = await OpenConnectionAsync(); - using var cmd = new NpgsqlCommand("SELECT 1, 2 UNION SELECT 3, 4", conn); + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT 1, 2 UNION SELECT 3, 4", conn); if (prepare == PrepareOrNot.Prepared) cmd.Prepare(); - using var reader = await cmd.ExecuteReaderAsync(CommandBehavior.SingleRow); + await using var reader = await cmd.ExecuteReaderAsync(CommandBehavior.SingleRow); Assert.That(() => reader.GetInt32(0), Throws.Exception.TypeOf()); Assert.That(reader.Read(), Is.True); Assert.That(reader.GetInt32(0), Is.EqualTo(1)); @@ -737,8 +729,8 @@ public async Task Generic_parameter() [Test] public async Task CommandText_not_set() { - using var conn = await OpenConnectionAsync(); - using (var cmd = new NpgsqlCommand()) + await using var conn = await OpenConnectionAsync(); + await using (var cmd = new NpgsqlCommand()) { cmd.Connection = conn; Assert.That(cmd.ExecuteNonQueryAsync, Throws.Exception.TypeOf()); @@ -747,16 +739,16 @@ public async Task CommandText_not_set() cmd.CommandText = ""; } - using (var cmd = conn.CreateCommand()) + await using (var cmd = conn.CreateCommand()) Assert.That(cmd.ExecuteNonQueryAsync, Throws.Exception.TypeOf()); } [Test] public async Task ExecuteScalar() { - using var conn = await OpenConnectionAsync(); + await using var conn = await OpenConnectionAsync(); var table = await CreateTempTable(conn, "name TEXT"); - using var command = new NpgsqlCommand($"SELECT name FROM {table}", conn); + await using var command = new NpgsqlCommand($"SELECT name FROM {table}", conn); Assert.That(command.ExecuteScalarAsync, Is.Null); await conn.ExecuteNonQueryAsync($"INSERT INTO {table} (name) VALUES (NULL)"); @@ -771,8 +763,8 @@ public async Task ExecuteScalar() [Test] public async Task ExecuteNonQuery() { - using var conn = await OpenConnectionAsync(); - using var cmd = new NpgsqlCommand { Connection = conn }; + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand { Connection = conn }; var table = await CreateTempTable(conn, "name TEXT"); cmd.CommandText = $"INSERT INTO {table} (name) VALUES ('John')"; @@ -788,7 +780,7 @@ public async Task ExecuteNonQuery() [Test, Description("Makes sure a command is unusable after it is disposed")] public async Task Dispose() { - using var conn = await OpenConnectionAsync(); + await using var conn = await OpenConnectionAsync(); var cmd = new NpgsqlCommand("SELECT 1", conn); cmd.Dispose(); Assert.That(() => cmd.ExecuteScalarAsync(), Throws.Exception.TypeOf()); @@ -800,7 +792,7 @@ public async Task Dispose() [Test, Description("Disposing a command with an open reader does not close the reader. This is the SqlClient behavior.")] public async Task Command_Dispose_does_not_close_reader() { - using var conn = await OpenConnectionAsync(); + await using var conn = await OpenConnectionAsync(); var cmd = new NpgsqlCommand("SELECT 1, 2", conn); await cmd.ExecuteReaderAsync(); cmd.Dispose(); @@ -811,12 +803,12 @@ public async Task Command_Dispose_does_not_close_reader() [Test] public async Task Non_standards_conforming_strings() { - using var _ = CreateTempPool(ConnectionString, out var connString); - await using var conn = await OpenConnectionAsync(connString); + await using var dataSource = CreateDataSource(); + await using var conn = await dataSource.OpenConnectionAsync(); if (IsMultiplexing) { - Assert.That(() => conn.ExecuteNonQueryAsync("set standard_conforming_strings=off"), + Assert.That(async () => await conn.ExecuteNonQueryAsync("set standard_conforming_strings=off"), Throws.Exception.TypeOf()); } else @@ -830,12 +822,12 @@ public async Task Non_standards_conforming_strings() [Test] public async Task Parameter_and_operator_unclear() { - using var conn = await OpenConnectionAsync(); + await using var conn = await OpenConnectionAsync(); //Without parenthesis the meaning of [, . and potentially other characters is //a syntax error. See comment in NpgsqlCommand.GetClearCommandText() on "usually-redundant parenthesis". - using var command = new NpgsqlCommand("select :arr[2]", conn); + await using var command = new NpgsqlCommand("select :arr[2]", conn); command.Parameters.AddWithValue(":arr", new int[] {5, 4, 3, 2, 1}); - using var rdr = await command.ExecuteReaderAsync(); + await using var rdr = await command.ExecuteReaderAsync(); rdr.Read(); Assert.AreEqual(rdr.GetInt32(0), 4); } @@ -845,7 +837,7 @@ public async Task Parameter_and_operator_unclear() [TestCase(CommandBehavior.SequentialAccess)] public async Task Statement_mapped_output_parameters(CommandBehavior behavior) { - using var conn = await OpenConnectionAsync(); + await using var conn = await OpenConnectionAsync(); var command = new NpgsqlCommand("select 3, 4 as param1, 5 as param2, 6;", conn); var p = new NpgsqlParameter("param2", NpgsqlDbType.Integer); @@ -863,7 +855,7 @@ public async Task Statement_mapped_output_parameters(CommandBehavior behavior) p.Value = -1; command.Parameters.Add(p); - using var reader = await command.ExecuteReaderAsync(behavior); + await using var reader = await command.ExecuteReaderAsync(behavior); Assert.AreEqual(4, command.Parameters["param1"].Value); Assert.AreEqual(5, command.Parameters["param2"].Value); @@ -983,8 +975,8 @@ public async Task Send_NpgsqlDbType_Unknown([Values(PrepareOrNot.NotPrepared, Pr public async Task Invalid_UTF8() { const string badString = "SELECT 'abc\uD801\uD802d'"; - using var _ = CreateTempPool(ConnectionString, out var connString); - using var conn = await OpenConnectionAsync(connString); + await using var dataSource = CreateDataSource(); + using var conn = await dataSource.OpenConnectionAsync(); Assert.That(() => conn.ExecuteScalarAsync(badString), Throws.Exception.TypeOf()); } @@ -1301,8 +1293,8 @@ public async Task Bug3509() KeepAlive = 1, }; await using var postmasterMock = PgPostmasterMock.Start(csb.ToString()); - using var _ = CreateTempPool(postmasterMock.ConnectionString, out var connectionString); - await using var conn = await OpenConnectionAsync(connectionString); + await using var dataSource = CreateDataSource(postmasterMock.ConnectionString); + await using var conn = await dataSource.OpenConnectionAsync(); var serverMock = await postmasterMock.WaitForServerConnection(); // Wait for a keepalive to arrive at the server, reply with an error await serverMock.WaitForData(); @@ -1366,8 +1358,8 @@ public async Task Postgres_connection_errors_not_break_connection() return; await using var postmasterMock = PgPostmasterMock.Start(ConnectionString); - using var _ = CreateTempPool(postmasterMock.ConnectionString, out var connectionString); - await using var conn = await OpenConnectionAsync(connectionString); + await using var dataSource = CreateDataSource(postmasterMock.ConnectionString); + await using var conn = await dataSource.OpenConnectionAsync(); await using var cmd = conn.CreateCommand(); cmd.CommandText = "SELECT 1"; @@ -1392,8 +1384,8 @@ public async Task Concurrent_read_write_failure_deadlock() return; await using var postmasterMock = PgPostmasterMock.Start(ConnectionString); - using var _ = CreateTempPool(postmasterMock.ConnectionString, out var connectionString); - await using var conn = await OpenConnectionAsync(connectionString); + await using var dataSource = CreateDataSource(postmasterMock.ConnectionString); + await using var conn = await dataSource.OpenConnectionAsync(); await using var cmd = conn.CreateCommand(); // Attempt to send a big enough query to fill buffers diff --git a/test/Npgsql.Tests/ConnectionTests.cs b/test/Npgsql.Tests/ConnectionTests.cs index fd61b4426f..754e5d9b40 100644 --- a/test/Npgsql.Tests/ConnectionTests.cs +++ b/test/Npgsql.Tests/ConnectionTests.cs @@ -26,7 +26,7 @@ public class ConnectionTests : MultiplexingTestBase [Test, Description("Makes sure the connection goes through the proper state lifecycle")] public async Task Basic_lifecycle() { - using var conn = new NpgsqlConnection(ConnectionString); + await using var conn = CreateConnection(); var eventOpen = false; var eventClosed = false; @@ -211,16 +211,13 @@ public void Invalid_Username() [Test] public void Bad_database() { - var builder = new NpgsqlConnectionStringBuilder(ConnectionString) - { - Database = "does_not_exist" - }; - using (CreateTempPool(builder, out var connectionString)) - using (var conn = new NpgsqlConnection(connectionString)) - Assert.That(() => conn.Open(), - Throws.Exception.TypeOf() - .With.Property(nameof(PostgresException.SqlState)).EqualTo(PostgresErrorCodes.InvalidCatalogName) - ); + using var dataSource = CreateDataSource(csb => csb.Database = "does_not_exist"); + using var conn = dataSource.CreateConnection(); + + Assert.That(() => conn.Open(), + Throws.Exception.TypeOf() + .With.Property(nameof(PostgresException.SqlState)).EqualTo(PostgresErrorCodes.InvalidCatalogName) + ); } [Test, Description("Tests that mandatory connection string parameters are indeed mandatory")] @@ -239,16 +236,13 @@ public async Task Fail_connect_then_succeed([Values] bool pooling) await conn1.ExecuteNonQueryAsync($"DROP DATABASE IF EXISTS \"{dbName}\""); try { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + await using var dataSource = CreateDataSource(csb => { - Database = dbName, - Pooling = pooling - }; - - // Create a temp pool to allow us to drop database at the end of the test - using var _ = CreateTempPool(csb, out var connString); + csb.Database = dbName; + csb.Pooling = pooling; + }); - await using var conn2 = new NpgsqlConnection(connString); + await using var conn2 = dataSource.CreateConnection(); var pgEx = Assert.ThrowsAsync(conn2.OpenAsync)!; Assert.That(pgEx.SqlState, Is.EqualTo(PostgresErrorCodes.InvalidCatalogName)); // database doesn't exist Assert.That(conn2.FullState, Is.EqualTo(ConnectionState.Closed)); @@ -267,6 +261,8 @@ public async Task Fail_connect_then_succeed([Values] bool pooling) [Test] public void Open_timeout_unknown_ip([Values(true, false)] bool async) { + const int timeoutSeconds = 2; + var unknownIp = Environment.GetEnvironmentVariable("NPGSQL_UNKNOWN_IP"); if (unknownIp is null) { @@ -274,13 +270,12 @@ public void Open_timeout_unknown_ip([Values(true, false)] bool async) return; } - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + using var dataSource = CreateDataSource(csb => { - Host = unknownIp, - Timeout = 2 - }; - using var _ = CreateTempPool(csb, out var connString); - using var conn = new NpgsqlConnection(connString); + csb.Host = unknownIp; + csb.Timeout = timeoutSeconds; + }); + using var conn = dataSource.CreateConnection(); var sw = Stopwatch.StartNew(); if (async) @@ -296,8 +291,8 @@ public void Open_timeout_unknown_ip([Values(true, false)] bool async) .With.InnerException.TypeOf()); } - Assert.That(sw.Elapsed.TotalMilliseconds, Is.GreaterThanOrEqualTo((csb.Timeout * 1000) - 100), - $"Timeout was supposed to happen after {csb.Timeout} seconds, but fired after {sw.Elapsed.TotalSeconds}"); + Assert.That(sw.Elapsed.TotalMilliseconds, Is.GreaterThanOrEqualTo(timeoutSeconds * 1000 - 100), + $"Timeout was supposed to happen after {timeoutSeconds} seconds, but fired after {sw.Elapsed.TotalSeconds}"); Assert.That(conn.State, Is.EqualTo(ConnectionState.Closed)); } @@ -344,10 +339,8 @@ public async Task Client_encoding_env_var() // Note that the pool is unaware of the environment variable, so if a connection is // returned from the pool it may contain the wrong client_encoding using var _ = SetEnvironmentVariable("PGCLIENTENCODING", "SQL_ASCII"); - using var __ = CreateTempPool(ConnectionString, out var connectionString); - - var connString = new NpgsqlConnectionStringBuilder(connectionString); - using var conn = await OpenConnectionAsync(connString); + await using var dataSource = CreateDataSource(); + await using var conn = await dataSource.OpenConnectionAsync(); Assert.That(await conn.ExecuteScalarAsync("SHOW client_encoding"), Is.EqualTo("SQL_ASCII")); } @@ -356,8 +349,8 @@ public async Task Client_encoding_connection_param() { using (var conn = await OpenConnectionAsync()) Assert.That(await conn.ExecuteScalarAsync("SHOW client_encoding"), Is.Not.EqualTo("SQL_ASCII")); - var connString = new NpgsqlConnectionStringBuilder(ConnectionString) { ClientEncoding = "SQL_ASCII" }; - using (var conn = await OpenConnectionAsync(connString)) + await using var dataSource = CreateDataSource(csb => csb.ClientEncoding = "SQL_ASCII"); + using (var conn = await dataSource.OpenConnectionAsync()) Assert.That(await conn.ExecuteScalarAsync("SHOW client_encoding"), Is.EqualTo("SQL_ASCII")); } @@ -380,8 +373,8 @@ public async Task Timezone_env_var() // Note that the pool is unaware of the environment variable, so if a connection is // returned from the pool it may contain the wrong timezone using var _ = SetEnvironmentVariable("PGTZ", newTimezone); - using var __ = CreateTempPool(ConnectionString, out var connectionString); - using var conn2 = await OpenConnectionAsync(connectionString); + await using var dataSource = CreateDataSource(); + using var conn2 = await dataSource.OpenConnectionAsync(); Assert.That(await conn2.ExecuteScalarAsync("SHOW TIMEZONE"), Is.EqualTo(newTimezone)); } @@ -396,12 +389,8 @@ public async Task Timezone_connection_param() : "Africa/Bamako"; } - var _ = CreateTempPool(ConnectionString, out var connString); - var builder = new NpgsqlConnectionStringBuilder(connString) - { - Timezone = newTimezone - }; - using (var conn = await OpenConnectionAsync(builder.ConnectionString)) + await using var dataSource = CreateDataSource(csb => csb.Timezone = newTimezone); + using (var conn = await dataSource.OpenConnectionAsync()) Assert.That(await conn.ExecuteScalarAsync("SHOW TIMEZONE"), Is.EqualTo(newTimezone)); } @@ -463,17 +452,13 @@ public async Task Unix_domain_socket() return; } - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) - { - Host = dir - }; - try { - await using var conn = await OpenConnectionAsync(csb); + await using var dataSource = CreateDataSource(csb => csb.Host = dir); + await using var conn = await dataSource.OpenConnectionAsync(); await using var tx = await conn.BeginTransactionAsync(); Assert.That(await conn.ExecuteScalarAsync("SELECT 1", tx), Is.EqualTo(1)); - Assert.That(conn.DataSource, Is.EqualTo(Path.Combine(csb.Host, $".s.PGSQL.{port}"))); + Assert.That(conn.DataSource, Is.EqualTo(Path.Combine(dir, $".s.PGSQL.{port}"))); } catch (Exception ex) { @@ -502,7 +487,8 @@ public async Task Unix_abstract_domain_socket() try { - await using var conn = await OpenConnectionAsync(csb); + await using var dataSource = CreateDataSource(csb.ToString()); + await using var conn = await dataSource.OpenConnectionAsync(); await using var tx = await conn.BeginTransactionAsync(); Assert.That(await conn.ExecuteScalarAsync("SELECT 1", tx), Is.EqualTo(1)); Assert.That(conn.DataSource, Is.EqualTo(Path.Combine(csb.Host, $".s.PGSQL.{csb.Port}"))); @@ -724,13 +710,8 @@ public async Task Reload_types_keepalive_concurrent() if (IsMultiplexing) Assert.Ignore("Multiplexing doesn't support keepalive"); - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) - { - KeepAlive = 1, - }; - using var _ = CreateTempPool(csb, out var connString); - - await using var conn = await OpenConnectionAsync(connString); + await using var dataSource = CreateDataSource(csb => csb.KeepAlive = 1); + await using var conn = await dataSource.OpenConnectionAsync(); var startTimestamp = Stopwatch.GetTimestamp(); // Give a few seconds for a KeepAlive to possibly perform @@ -782,17 +763,13 @@ public void ChangeDatabase_connection_on_closed_connection_throws() [Test, Description("Tests closing a connector while a reader is open")] public async Task Close_during_read([Values(PooledOrNot.Pooled, PooledOrNot.Unpooled)] PooledOrNot pooled) { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString); - if (pooled == PooledOrNot.Unpooled) - { - if (IsMultiplexing) - return; // Multiplexing requires pooling - csb.Pooling = false; - } + if (IsMultiplexing && pooled == PooledOrNot.Unpooled) + return; // Multiplexing requires pooling - using var conn = await OpenConnectionAsync(csb); - using (var cmd = new NpgsqlCommand("SELECT 1", conn)) - using (var reader = await cmd.ExecuteReaderAsync()) + await using var dataSource = CreateDataSource(csb => csb.Pooling = pooled == PooledOrNot.Pooled); + await using var conn = await dataSource.OpenConnectionAsync(); + await using (var cmd = new NpgsqlCommand("SELECT 1", conn)) + await using (var reader = await cmd.ExecuteReaderAsync()) { reader.Read(); conn.Close(); @@ -808,19 +785,18 @@ public async Task Close_during_read([Values(PooledOrNot.Pooled, PooledOrNot.Unpo [Test] public async Task Search_path() { - using var conn = await OpenConnectionAsync(new NpgsqlConnectionStringBuilder(ConnectionString) { SearchPath = "foo" }); + await using var dataSource = CreateDataSource(csb => csb.SearchPath = "foo"); + await using var conn = await dataSource.OpenConnectionAsync(); Assert.That(await conn.ExecuteScalarAsync("SHOW search_path"), Contains.Substring("foo")); } [Test] public async Task Set_options() { - using var _ = CreateTempPool(new NpgsqlConnectionStringBuilder(ConnectionString) - { - Options = "-c default_transaction_isolation=serializable -c default_transaction_deferrable=on -c foo.bar=My\\ Famous\\\\Thing" - }, out var connectionString); - - using var conn = await OpenConnectionAsync(connectionString); + await using var dataSource = CreateDataSource(csb => + csb.Options = + "-c default_transaction_isolation=serializable -c default_transaction_deferrable=on -c foo.bar=My\\ Famous\\\\Thing"); + await using var conn = await dataSource.OpenConnectionAsync(); Assert.That(await conn.ExecuteScalarAsync("SHOW default_transaction_isolation"), Is.EqualTo("serializable")); Assert.That(await conn.ExecuteScalarAsync("SHOW default_transaction_deferrable"), Is.EqualTo("on")); @@ -835,10 +811,9 @@ public async Task Connector_not_initialized_exception() for (var i = 0; i < 2; i++) { - using var connection = new NpgsqlConnection(ConnectionString); - connection.Open(); + await using var connection = await OpenConnectionAsync(); command.Connection = connection; - var tx = connection.BeginTransaction(); + await using var tx = await connection.BeginTransactionAsync(); await command.ExecuteScalarAsync(); await tx.CommitAsync(); } @@ -875,11 +850,9 @@ public void Change_ApplicationName_with_connection_string_builder() [Test, Description("Makes sure notices are probably received and emitted as events")] public async Task Notice() { - await using var conn = await OpenConnectionAsync(new NpgsqlConnectionStringBuilder(ConnectionString) - { - // Make sure messages are in English - Options = "-c lc_messages=en_US.UTF-8" - }); + // Make sure messages are in English + await using var dataSource = CreateDataSource(csb => csb.Options = "-c lc_messages=en_US.UTF-8"); + await using var conn = await dataSource.OpenConnectionAsync(); var function = await GetTempFunctionName(conn); await conn.ExecuteNonQueryAsync($@" CREATE OR REPLACE FUNCTION {function}() RETURNS VOID AS @@ -1082,30 +1055,30 @@ public async Task DatabaseInfo_is_shared() if (IsMultiplexing) return; // Create a temp pool to make sure the second connection will be new and not idle - using var _ = CreateTempPool(ConnectionString, out var connString); - using var conn1 = await OpenConnectionAsync(connString); + await using var dataSource = CreateDataSource(); + await using var conn1 = await dataSource.OpenConnectionAsync(); // Call RealoadTypes to force reload DatabaseInfo conn1.ReloadTypes(); - using var conn2 = await OpenConnectionAsync(connString); + await using var conn2 = await dataSource.OpenConnectionAsync(); Assert.That(conn1.Connector!.DatabaseInfo, Is.SameAs(conn2.Connector!.DatabaseInfo)); } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/736")] public async Task ManyOpenClose() { + await using var dataSource = CreateDataSource(); // The connector's _sentRfqPrependedMessages is a byte, too many open/closes made it overflow for (var i = 0; i < 255; i++) { - using var conn = new NpgsqlConnection(ConnectionString); - conn.Open(); + await using var conn = await dataSource.OpenConnectionAsync(); } - using (var conn = new NpgsqlConnection(ConnectionString)) + await using (var conn = dataSource.CreateConnection()) { - conn.Open(); + await conn.OpenAsync(); } - using (var conn = new NpgsqlConnection(ConnectionString)) + await using (var conn = dataSource.CreateConnection()) { - conn.Open(); + await conn.OpenAsync(); Assert.That(await conn.ExecuteScalarAsync("SELECT 1"), Is.EqualTo(1)); } } @@ -1113,13 +1086,14 @@ public async Task ManyOpenClose() [Test, IssueLink("https://github.com/npgsql/npgsql/issues/736")] public async Task Many_open_close_with_transaction() { + await using var dataSource = CreateDataSource(); // The connector's _sentRfqPrependedMessages is a byte, too many open/closes made it overflow for (var i = 0; i < 255; i++) { - using var conn = await OpenConnectionAsync(); - conn.BeginTransaction(); + await using var conn = await dataSource.OpenConnectionAsync(); + await conn.BeginTransactionAsync(); } - using (var conn = await OpenConnectionAsync()) + await using (var conn = await dataSource.OpenConnectionAsync()) Assert.That(await conn.ExecuteScalarAsync("SELECT 1"), Is.EqualTo(1)); } @@ -1132,19 +1106,18 @@ public async Task Rollback_on_close() // Npgsql 3.0.0 to 3.0.4 prepended a rollback for the next time the connector is used, as an optimization. // This caused some issues (#927) and was removed. - // Clear connections in pool as we're going to need to reopen the same connection - var dummyConn = new NpgsqlConnection(ConnectionString); - NpgsqlConnection.ClearPool(dummyConn); + await using var dataSource = CreateDataSource(); int processId; - using (var conn = await OpenConnectionAsync()) + await using (var conn = await dataSource.OpenConnectionAsync()) { processId = conn.Connector!.BackendProcessId; - conn.BeginTransaction(); + await conn.BeginTransactionAsync(); await conn.ExecuteNonQueryAsync("SELECT 1"); Assert.That(conn.Connector.TransactionStatus, Is.EqualTo(TransactionStatus.InTransactionBlock)); } - using (var conn = await OpenConnectionAsync()) + + await using (var conn = await dataSource.OpenConnectionAsync()) { Assert.That(conn.Connector!.BackendProcessId, Is.EqualTo(processId)); Assert.That(conn.Connector.TransactionStatus, Is.EqualTo(TransactionStatus.Idle)); @@ -1153,17 +1126,18 @@ public async Task Rollback_on_close() [Test, Description("Tests an exception happening when sending the Terminate message while closing a ready connector")] [IssueLink("https://github.com/npgsql/npgsql/issues/777")] - [Ignore("Flaky")] public async Task Exception_during_close() { - var dataSourceBuilder = CreateDataSourceBuilder(); - dataSourceBuilder.ConnectionStringBuilder.Pooling = false; - await using var dataSource = dataSourceBuilder.Build(); - using var conn = await dataSource.OpenConnectionAsync(); + // Pooling must be on to use multiplexing + if (IsMultiplexing) + return; + + await using var dataSource = CreateDataSource(csb => csb.Pooling = false); + await using var conn = await dataSource.OpenConnectionAsync(); var connectorId = conn.ProcessID; using (var conn2 = await OpenConnectionAsync()) - conn2.ExecuteNonQuery($"SELECT pg_terminate_backend({connectorId})"); + await conn2.ExecuteNonQueryAsync($"SELECT pg_terminate_backend({connectorId})"); conn.Close(); } @@ -1171,13 +1145,8 @@ public async Task Exception_during_close() [Test, Description("Some pseudo-PG database don't support pg_type loading, we have a minimal DatabaseInfo for this")] public async Task NoTypeLoading() { - var builder = new NpgsqlConnectionStringBuilder(ConnectionString) - { - ServerCompatibilityMode = ServerCompatibilityMode.NoTypeLoading - }; - - using var _ = CreateTempPool(builder, out var connectionString); - using var conn = await OpenConnectionAsync(connectionString); + await using var dataSource = CreateDataSource(csb => csb.ServerCompatibilityMode = ServerCompatibilityMode.NoTypeLoading); + await using var conn = await dataSource.OpenConnectionAsync(); Assert.That(await conn.ExecuteScalarAsync("SELECT 8"), Is.EqualTo(8)); Assert.That(await conn.ExecuteScalarAsync("SELECT 'foo'"), Is.EqualTo("foo")); @@ -1242,16 +1211,14 @@ public async Task Non_UTF8_Encoding() try { // Insert some win1252 data - var goodBuilder = new NpgsqlConnectionStringBuilder(ConnectionString) + await using var goodDataSource = CreateDataSource(csb => { - Database = "sqlascii", - Encoding = "windows-1252", - ClientEncoding = "sql-ascii", - }; - - using var _ = CreateTempPool(goodBuilder, out var goodConnectionString); + csb.Database = "sqlascii"; + csb.Encoding = "windows-1252"; + csb.ClientEncoding = "sql-ascii"; + }); - await using (var conn = await OpenConnectionAsync(goodConnectionString)) + await using (var conn = await goodDataSource.OpenConnectionAsync()) { const string value = "éàç"; await conn.ExecuteNonQueryAsync("CREATE TABLE foo (bar TEXT)"); @@ -1268,12 +1235,8 @@ public async Task Non_UTF8_Encoding() } // A normal connection with the default UTF8 encoding and client_encoding should fail - var badBuilder = new NpgsqlConnectionStringBuilder(ConnectionString) - { - Database = "sqlascii", - }; - using var __ = CreateTempPool(badBuilder, out var badConnectionString); - await using (var conn = await OpenConnectionAsync(badConnectionString)) + await using var badDataSource = CreateDataSource(csb => csb.Database = "sqlascii"); + await using (var conn = await badDataSource.OpenConnectionAsync()) { Assert.That(async () => await conn.ExecuteScalarAsync("SELECT * FROM foo"), Throws.Exception.TypeOf() @@ -1294,40 +1257,38 @@ public async Task Oversize_buffer() if (IsMultiplexing) return; - using (CreateTempPool(ConnectionString, out var connectionString)) - using (var conn = await OpenConnectionAsync(connectionString)) - { - var csb = new NpgsqlConnectionStringBuilder(connectionString); - - Assert.That(conn.Connector!.ReadBuffer.Size, Is.EqualTo(csb.ReadBufferSize)); + await using var dataSource = CreateDataSource(); + await using var conn = await dataSource.OpenConnectionAsync(); + var csb = new NpgsqlConnectionStringBuilder(ConnectionString); - // Read a big row, we should now be using an oversize buffer - var bigString1 = new string('x', conn.Connector.ReadBuffer.Size + 1); - using (var cmd = new NpgsqlCommand($"SELECT '{bigString1}'", conn)) - using (var reader = await cmd.ExecuteReaderAsync()) - { - reader.Read(); - Assert.That(reader.GetString(0), Is.EqualTo(bigString1)); - } - var size1 = conn.Connector.ReadBuffer.Size; - Assert.That(conn.Connector.ReadBuffer.Size, Is.GreaterThan(csb.ReadBufferSize)); + Assert.That(conn.Connector!.ReadBuffer.Size, Is.EqualTo(csb.ReadBufferSize)); - // Even bigger oversize buffer - var bigString2 = new string('x', conn.Connector.ReadBuffer.Size + 1); - using (var cmd = new NpgsqlCommand($"SELECT '{bigString2}'", conn)) - using (var reader = await cmd.ExecuteReaderAsync()) - { - reader.Read(); - Assert.That(reader.GetString(0), Is.EqualTo(bigString2)); - } - Assert.That(conn.Connector.ReadBuffer.Size, Is.GreaterThan(size1)); + // Read a big row, we should now be using an oversize buffer + var bigString1 = new string('x', conn.Connector.ReadBuffer.Size + 1); + using (var cmd = new NpgsqlCommand($"SELECT '{bigString1}'", conn)) + using (var reader = await cmd.ExecuteReaderAsync()) + { + reader.Read(); + Assert.That(reader.GetString(0), Is.EqualTo(bigString1)); + } + var size1 = conn.Connector.ReadBuffer.Size; + Assert.That(conn.Connector.ReadBuffer.Size, Is.GreaterThan(csb.ReadBufferSize)); - var processId = conn.ProcessID; - conn.Close(); - conn.Open(); - Assert.That(conn.ProcessID, Is.EqualTo(processId)); - Assert.That(conn.Connector.ReadBuffer.Size, Is.EqualTo(csb.ReadBufferSize)); + // Even bigger oversize buffer + var bigString2 = new string('x', conn.Connector.ReadBuffer.Size + 1); + using (var cmd = new NpgsqlCommand($"SELECT '{bigString2}'", conn)) + using (var reader = await cmd.ExecuteReaderAsync()) + { + reader.Read(); + Assert.That(reader.GetString(0), Is.EqualTo(bigString2)); } + Assert.That(conn.Connector.ReadBuffer.Size, Is.GreaterThan(size1)); + + var processId = conn.ProcessID; + conn.Close(); + conn.Open(); + Assert.That(conn.ProcessID, Is.EqualTo(processId)); + Assert.That(conn.Connector.ReadBuffer.Size, Is.EqualTo(csb.ReadBufferSize)); } #region Keepalive @@ -1335,22 +1296,16 @@ public async Task Oversize_buffer() [Test, Explicit, Description("Turns on TCP keepalive and sleeps forever, good for wiresharking")] public async Task TcpKeepaliveTime() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) - { - TcpKeepAliveTime = 2 - }; - using (await OpenConnectionAsync(csb)) + await using var dataSource = CreateDataSource(csb => csb.TcpKeepAliveTime = 2); + using (await dataSource.OpenConnectionAsync()) Thread.Sleep(Timeout.Infinite); } [Test, Explicit, Description("Turns on TCP keepalive and sleeps forever, good for wiresharking")] public async Task TcpKeepalive() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) - { - TcpKeepAlive = true - }; - using (await OpenConnectionAsync(csb)) + await using var dataSource = CreateDataSource(csb => csb.TcpKeepAlive = true); + await using (await dataSource.OpenConnectionAsync()) Thread.Sleep(Timeout.Infinite); } @@ -1360,21 +1315,18 @@ public async Task Keepalive_with_failed_transaction() if (IsMultiplexing) return; - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) - { - KeepAlive = 1 - }; - using var conn = await OpenConnectionAsync(csb); - using var tx = await conn.BeginTransactionAsync(); + await using var dataSource = CreateDataSource(csb => csb.KeepAlive = 1); + await using var conn = await dataSource.OpenConnectionAsync(); + await using var tx = await conn.BeginTransactionAsync(); - Assert.Throws(() => conn.ExecuteScalar("SELECT non_existent_table")); + Assert.ThrowsAsync(async () => await conn.ExecuteScalarAsync("SELECT non_existent_table")); // Connection is now in a failed transaction state. Wait a bit to allow for the keepalive to execute. Thread.Sleep(3000); await tx.RollbackAsync(); // Confirm that the connection is still open and usable - Assert.That(conn.ExecuteScalar("SELECT 1"), Is.EqualTo(1)); + Assert.That(await conn.ExecuteScalarAsync("SELECT 1"), Is.EqualTo(1)); } #endregion Keepalive @@ -1401,8 +1353,8 @@ public async Task Connect_OptionsFromEnvironment_Succeeds() { using (SetEnvironmentVariable("PGOPTIONS", "-c default_transaction_isolation=serializable -c default_transaction_deferrable=on -c foo.bar=My\\ Famous\\\\Thing")) { - using var _ = CreateTempPool(ConnectionString, out var connectionString); - using var conn = await OpenConnectionAsync(connectionString); + await using var dataSource = CreateDataSource(); + await using var conn = await dataSource.OpenConnectionAsync(); Assert.That(await conn.ExecuteScalarAsync("SHOW default_transaction_isolation"), Is.EqualTo("serializable")); Assert.That(await conn.ExecuteScalarAsync("SHOW default_transaction_deferrable"), Is.EqualTo("on")); Assert.That(await conn.ExecuteScalarAsync("SHOW foo.bar"), Is.EqualTo("My Famous\\Thing")); @@ -1414,22 +1366,22 @@ public async Task Connect_OptionsFromEnvironment_Succeeds() [TestCase(false, TestName = "NoNoResetOnClose")] public async Task NoResetOnClose(bool noResetOnClose) { - var builder = new NpgsqlConnectionStringBuilder(ConnectionString) + var originalApplicationName = new NpgsqlConnectionStringBuilder(ConnectionString).ApplicationName ?? ""; + + await using var dataSource = CreateDataSource(csb => { - MaxPoolSize = 1, - NoResetOnClose = noResetOnClose - }; - using var _ = CreateTempPool(builder, out var connectionString); - var original = new NpgsqlConnectionStringBuilder(connectionString).ApplicationName; + csb.MaxPoolSize = 1; + csb.NoResetOnClose = noResetOnClose; + }); - using var conn = await OpenConnectionAsync(connectionString); + await using var conn = await dataSource.OpenConnectionAsync(); await conn.ExecuteNonQueryAsync("SET application_name = 'modified'"); await conn.CloseAsync(); await conn.OpenAsync(); Assert.That(await conn.ExecuteScalarAsync("SHOW application_name"), Is.EqualTo( noResetOnClose || IsMultiplexing ? "modified" - : original)); + : originalApplicationName)); } #region Physical connection initialization @@ -1437,6 +1389,9 @@ public async Task NoResetOnClose(bool noResetOnClose) [Test] public async Task PhysicalConnectionInitializer_sync() { + if (IsMultiplexing) // Sync I/O + return; + await using var adminConn = await OpenConnectionAsync(); var table = await CreateTempTable(adminConn, "ID INTEGER"); @@ -1461,6 +1416,11 @@ public async Task PhysicalConnectionInitializer_sync() [Test] public async Task PhysicalConnectionInitializer_async() { + // With multiplexing the connector might become idle at undetermined point after the query is executed. + // Which is why we ignore it. + if (IsMultiplexing) + return; + await using var adminConn = await OpenConnectionAsync(); var table = await CreateTempTable(adminConn, "ID INTEGER"); @@ -1488,8 +1448,6 @@ public async Task PhysicalConnectionInitializer_sync_with_break() if (IsMultiplexing) // Sync I/O return; - await using var adminConn = await OpenConnectionAsync(); - var dataSourceBuilder = CreateDataSourceBuilder(); dataSourceBuilder.UsePhysicalConnectionInitializer( conn => @@ -1510,8 +1468,6 @@ public async Task PhysicalConnectionInitializer_sync_with_break() [Test] public async Task PhysicalConnectionInitializer_async_with_break() { - await using var adminConn = await OpenConnectionAsync(); - var dataSourceBuilder = CreateDataSourceBuilder(); dataSourceBuilder.UsePhysicalConnectionInitializer( _ => throw new NotSupportedException(), @@ -1535,8 +1491,6 @@ public async Task PhysicalConnectionInitializer_async_throws_on_second_open() // With multiplexing a physical connection might open on NpgsqlConnection.OpenAsync (if there was no completed bootstrap beforehand) // or on NpgsqlCommand.ExecuteReaderAsync. // We've already tested the first case in PhysicalConnectionInitializer_async_throws above, testing the second one below. - await using var adminConn = await OpenConnectionAsync(); - var count = 0; var dataSourceBuilder = CreateDataSourceBuilder(); dataSourceBuilder.UsePhysicalConnectionInitializer( @@ -1549,9 +1503,21 @@ public async Task PhysicalConnectionInitializer_async_throws_on_second_open() }); await using var dataSource = dataSourceBuilder.Build(); - Assert.DoesNotThrowAsync(async () => await dataSource.OpenConnectionAsync()); + await using var conn1 = dataSource.CreateConnection(); + Assert.DoesNotThrowAsync(async () => await conn1.OpenAsync()); - var exception = Assert.ThrowsAsync(async () => await dataSource.OpenConnectionAsync())!; + // We start a transaction specifically for multiplexing (to bind a connector to the connection) + await using var tx = await conn1.BeginTransactionAsync(); + + await using var conn2 = dataSource.CreateConnection(); + Exception exception; + if (IsMultiplexing) + { + await conn2.OpenAsync(); + exception = Assert.ThrowsAsync(async () => await conn2.BeginTransactionAsync())!; + } + else + exception = Assert.ThrowsAsync(async () => await conn2.OpenAsync())!; Assert.That(exception.Message, Is.EqualTo("INTENTIONAL FAILURE")); } @@ -1587,9 +1553,9 @@ public async Task Breaking_connection_while_loading_database_info() if (IsMultiplexing) return; - using var _ = CreateTempPool(ConnectionString, out var connString); + await using var dataSource = CreateDataSource(); - await using var firstConn = new NpgsqlConnection(connString); + await using var firstConn = dataSource.CreateConnection(); NpgsqlDatabaseInfo.RegisterFactory(new BreakingDatabaseInfoFactory()); try { @@ -1602,7 +1568,7 @@ public async Task Breaking_connection_while_loading_database_info() } await firstConn.OpenAsync(); - await using var secondConn = await OpenConnectionAsync(connString); + await using var secondConn = await dataSource.OpenConnectionAsync(); await secondConn.CloseAsync(); await firstConn.ReloadTypesAsync(); diff --git a/test/Npgsql.Tests/CopyTests.cs b/test/Npgsql.Tests/CopyTests.cs index adac0916f4..87f5159dd5 100644 --- a/test/Npgsql.Tests/CopyTests.cs +++ b/test/Npgsql.Tests/CopyTests.cs @@ -1089,8 +1089,8 @@ public async Task Copy_is_not_supported_in_regular_command_execution() { // Run in a separate pool to protect other queries in multiplexing // because we're going to break the connection on CopyInResponse - using var _ = CreateTempPool(ConnectionString, out var connectionString); - using var conn = await OpenConnectionAsync(connectionString); + await using var dataSource = CreateDataSource(); + using var conn = await dataSource.OpenConnectionAsync(); var table = await CreateTempTable(conn, "foo INT"); Assert.That(() => conn.ExecuteNonQuery($@"COPY {table} (foo) FROM stdin"), Throws.Exception.TypeOf()); diff --git a/test/Npgsql.Tests/DataSourceTests.cs b/test/Npgsql.Tests/DataSourceTests.cs index 6778f6260a..81cd68661b 100644 --- a/test/Npgsql.Tests/DataSourceTests.cs +++ b/test/Npgsql.Tests/DataSourceTests.cs @@ -11,7 +11,7 @@ namespace Npgsql.Tests; public class DataSourceTests : TestBase { [Test] - public async Task CreateConnection() + public new async Task CreateConnection() { await using var dataSource = NpgsqlDataSource.Create(ConnectionString); await using var connection = dataSource.CreateConnection(); diff --git a/test/Npgsql.Tests/DistributedTransactionTests.cs b/test/Npgsql.Tests/DistributedTransactionTests.cs index 856861fa3a..c0eaccf0eb 100644 --- a/test/Npgsql.Tests/DistributedTransactionTests.cs +++ b/test/Npgsql.Tests/DistributedTransactionTests.cs @@ -7,7 +7,6 @@ using System.Text; using System.Threading; using System.Transactions; -using Npgsql.Internal; using NUnit.Framework; using static Npgsql.Tests.TestUtil; @@ -22,9 +21,11 @@ public void Two_connections_rollback_implicit_enlistment() using var adminConn = OpenConnection(); var table = CreateTempTable(adminConn, "name TEXT"); + var dataSource = EnlistOnDataSource; + using (new TransactionScope()) - using (var conn1 = OpenConnection(ConnectionStringEnlistOn)) - using (var conn2 = OpenConnection(ConnectionStringEnlistOn)) + using (var conn1 = dataSource.OpenConnection()) + using (var conn2 = dataSource.OpenConnection()) { conn1.ExecuteNonQuery($"INSERT INTO {table} (name) VALUES ('test1')"); conn2.ExecuteNonQuery($"INSERT INTO {table} (name) VALUES ('test2')"); @@ -44,8 +45,10 @@ public void Two_connections_rollback_explicit_enlistment() using var adminConn = OpenConnection(); var table = CreateTempTable(adminConn, "name TEXT"); - using (var conn1 = OpenConnection(ConnectionStringEnlistOff)) - using (var conn2 = OpenConnection(ConnectionStringEnlistOff)) + var dataSource = EnlistOffDataSource; + + using (var conn1 = dataSource.OpenConnection()) + using (var conn2 = dataSource.OpenConnection()) using (new TransactionScope()) { conn1.EnlistTransaction(Transaction.Current); @@ -69,9 +72,11 @@ public void Two_connections_commit() using var adminConn = OpenConnection(); var table = CreateTempTable(adminConn, "name TEXT"); + var dataSource = EnlistOnDataSource; + using (var scope = new TransactionScope()) - using (var conn1 = OpenConnection(ConnectionStringEnlistOn)) - using (var conn2 = OpenConnection(ConnectionStringEnlistOn)) + using (var conn1 = dataSource.OpenConnection()) + using (var conn2 = dataSource.OpenConnection()) { conn1.ExecuteNonQuery($"INSERT INTO {table} (name) VALUES ('test1')"); conn2.ExecuteNonQuery($"INSERT INTO {table} (name) VALUES ('test2')"); @@ -91,7 +96,7 @@ public void Two_connections_commit() public void Two_connections_with_failure() { // Use our own data source since this test breaks the connection with a critical failure, affecting database state tracking. - using var dataSource = NpgsqlDataSource.Create(ConnectionStringEnlistOn); + using var dataSource = CreateDataSource(csb => csb.Enlist = true); using var adminConn = dataSource.OpenConnection(); var table = CreateTempTable(adminConn, "name TEXT"); @@ -114,24 +119,24 @@ public void Two_connections_with_failure() [Test, IssueLink("https://github.com/npgsql/npgsql/issues/1737")] public void Multiple_unpooled_connections_do_not_reuse() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + using var dataSource = CreateDataSource(csb => { - Pooling = false, - Enlist = true - }; + csb.Pooling = false; + csb.Enlist = true; + }); using var scope = new TransactionScope(); int processId; - using (var conn1 = OpenConnection(csb)) + using (var conn1 = dataSource.OpenConnection()) using (var cmd = new NpgsqlCommand("SELECT 1", conn1)) { processId = conn1.ProcessID; cmd.ExecuteNonQuery(); } - using (var conn2 = OpenConnection(csb)) + using (var conn2 = dataSource.OpenConnection()) using (var cmd = new NpgsqlCommand("SELECT 1", conn2)) { // The connection reuse optimization isn't implemented for unpooled connections (though it could be) @@ -149,13 +154,15 @@ public void Transaction_race([Values(false, true)] bool distributed) using var adminConn = OpenConnection(); var table = CreateTempTable(adminConn, "name TEXT"); + var dataSource = EnlistOnDataSource; + for (var i = 1; i <= 100; i++) { var eventQueue = new ConcurrentQueue(); try { using (var tx = new TransactionScope()) - using (var conn1 = OpenConnection(ConnectionStringEnlistOn)) + using (var conn1 = dataSource.OpenConnection()) { eventQueue.Enqueue(new TransactionEvent("Scope started, connection enlisted")); conn1.ExecuteNonQuery($"INSERT INTO {table} (name) VALUES ('test1')"); @@ -221,12 +228,14 @@ public void Connection_reuse_race_after_transaction([Values(false, true)] bool d using var adminConn = OpenConnection(); var table = CreateTempTable(adminConn, "name TEXT"); + var dataSource = EnlistOffDataSource; + for (var i = 1; i <= 100; i++) { var eventQueue = new ConcurrentQueue(); try { - using var conn1 = OpenConnection(ConnectionStringEnlistOff); + using var conn1 = dataSource.OpenConnection(); using (var scope = new TransactionScope()) { @@ -273,12 +282,14 @@ public void Connection_reuse_race_after_rollback([Values(false, true)] bool dist using var adminConn = OpenConnection(); var table = CreateTempTable(adminConn, "name TEXT"); + var dataSource = EnlistOffDataSource; + for (var i = 1; i <= 100; i++) { var eventQueue = new ConcurrentQueue(); try { - using var conn1 = OpenConnection(ConnectionStringEnlistOff); + using var conn1 = dataSource.OpenConnection(); using (new TransactionScope()) { @@ -326,12 +337,14 @@ public void Connection_reuse_race_chaining_transaction([Values(false, true)] boo using var adminConn = OpenConnection(); var table = CreateTempTable(adminConn, "name TEXT"); + var dataSource = EnlistOffDataSource; + for (var i = 1; i <= 100; i++) { var eventQueue = new ConcurrentQueue(); try { - using var conn1 = OpenConnection(ConnectionStringEnlistOff); + using var conn1 = dataSource.OpenConnection(); using (var scope = new TransactionScope()) { @@ -427,7 +440,8 @@ void AssertNoPreparedTransactions() int GetNumberOfPreparedTransactions() { - using (var conn = OpenConnection(ConnectionStringEnlistOff)) + var dataSource = EnlistOffDataSource; + using (var conn = dataSource.OpenConnection()) using (var cmd = new NpgsqlCommand("SELECT COUNT(*) FROM pg_prepared_xacts WHERE database = @database", conn)) { cmd.Parameters.Add(new NpgsqlParameter("database", conn.Database)); @@ -444,11 +458,9 @@ static void AssertNoDistributedIdentifier() static void AssertHasDistributedIdentifier() => Assert.That(Transaction.Current?.TransactionInformation.DistributedIdentifier ?? Guid.Empty, Is.Not.EqualTo(Guid.Empty), "Distributed identifier not found"); - public string ConnectionStringEnlistOn - => new NpgsqlConnectionStringBuilder(ConnectionString) { Enlist = true }.ToString(); + NpgsqlDataSource EnlistOnDataSource { get; set; } = default!; - public string ConnectionStringEnlistOff - => new NpgsqlConnectionStringBuilder(ConnectionString) { Enlist = false }.ToString(); + NpgsqlDataSource EnlistOffDataSource { get; set; } = default!; static string FormatEventQueue(ConcurrentQueue eventQueue) { @@ -606,6 +618,18 @@ public void OneTimeSetUp() } foreach (var xactGid in lingeringTransactions) connection.ExecuteNonQuery($"ROLLBACK PREPARED '{xactGid}'"); + + EnlistOnDataSource = CreateDataSource(csb => csb.Enlist = true); + EnlistOffDataSource = CreateDataSource(csb => csb.Enlist = false); + } + + [OneTimeTearDown] + public void OnTimeTearDown() + { + EnlistOnDataSource?.Dispose(); + EnlistOnDataSource = null!; + EnlistOffDataSource?.Dispose(); + EnlistOffDataSource = null!; } [SetUp] diff --git a/test/Npgsql.Tests/ExceptionTests.cs b/test/Npgsql.Tests/ExceptionTests.cs index 4c35b5bf66..101d0b67f6 100644 --- a/test/Npgsql.Tests/ExceptionTests.cs +++ b/test/Npgsql.Tests/ExceptionTests.cs @@ -16,16 +16,15 @@ public class ExceptionTests : TestBase [Test, Description("Generates a basic server-side exception, checks that it's properly raised and populated")] public void Basic() { - using var conn = OpenConnection(new NpgsqlConnectionStringBuilder(ConnectionString) - { - // Make sure messages are in English - Options = "-c lc_messages=en_US.UTF-8" - }); - conn.ExecuteNonQuery(@" - CREATE OR REPLACE FUNCTION pg_temp.emit_exception() RETURNS VOID AS - 'BEGIN RAISE EXCEPTION ''testexception'' USING ERRCODE = ''12345'', DETAIL = ''testdetail''; END;' - LANGUAGE 'plpgsql'; - "); + // Make sure messages are in English + using var dataSource = CreateDataSource(csb => csb.Options = "-c lc_messages=en_US.UTF-8"); + using var conn = dataSource.OpenConnection(); + conn.ExecuteNonQuery( +""" +CREATE OR REPLACE FUNCTION pg_temp.emit_exception() RETURNS VOID AS + 'BEGIN RAISE EXCEPTION ''testexception'' USING ERRCODE = ''12345'', DETAIL = ''testdetail''; END;' +LANGUAGE 'plpgsql'; +"""); PostgresException ex = null!; try @@ -93,9 +92,8 @@ await conn.ExecuteNonQueryAsync($@" [Test] public async Task IncludeErrorDetail() { - var builder = new NpgsqlConnectionStringBuilder(ConnectionString) { IncludeErrorDetail = true }; - using var _ = CreateTempPool(builder, out var connectionStringWithDetails); - await using var conn = await OpenConnectionAsync(connectionStringWithDetails); + await using var dataSource = CreateDataSource(csb => csb.IncludeErrorDetail = true); + await using var conn = await dataSource.OpenConnectionAsync(); var raiseExceptionFunc = await GetTempFunctionName(conn); var raiseNoticeFunc = await GetTempFunctionName(conn); diff --git a/test/Npgsql.Tests/MultipleHostsTests.cs b/test/Npgsql.Tests/MultipleHostsTests.cs index e588efbee6..bcf02a233d 100644 --- a/test/Npgsql.Tests/MultipleHostsTests.cs +++ b/test/Npgsql.Tests/MultipleHostsTests.cs @@ -25,28 +25,28 @@ public class MultipleHostsTests : TestBase { static readonly object[] MyCases = { - new object[] { "standby", new[] { Primary, Standby }, 1 }, - new object[] { "standby", new[] { PrimaryReadOnly, Standby }, 1 }, - new object[] { "prefer-standby", new[] { Primary, Standby }, 1 }, - new object[] { "prefer-standby", new[] { PrimaryReadOnly, Standby }, 1 }, - new object[] { "prefer-standby", new[] { Primary, Primary }, 0 }, - new object[] { "primary", new[] { Standby, Primary }, 1 }, - new object[] { "primary", new[] { Standby, PrimaryReadOnly }, 1 }, - new object[] { "prefer-primary", new[] { Standby, Primary }, 1 }, - new object[] { "prefer-primary", new[] { Standby, PrimaryReadOnly }, 1 }, - new object[] { "prefer-primary", new[] { Standby, Standby }, 0 }, - new object[] { "any", new[] { Standby, Primary }, 0 }, - new object[] { "any", new[] { Primary, Standby }, 0 }, - new object[] { "any", new[] { PrimaryReadOnly, Standby }, 0 }, - new object[] { "read-write", new[] { Standby, Primary }, 1 }, - new object[] { "read-write", new[] { PrimaryReadOnly, Primary }, 1 }, - new object[] { "read-only", new[] { Primary, Standby }, 1 }, - new object[] { "read-only", new[] { PrimaryReadOnly, Standby }, 0 } + new object[] { TargetSessionAttributes.Standby, new[] { Primary, Standby }, 1 }, + new object[] { TargetSessionAttributes.Standby, new[] { PrimaryReadOnly, Standby }, 1 }, + new object[] { TargetSessionAttributes.PreferStandby, new[] { Primary, Standby }, 1 }, + new object[] { TargetSessionAttributes.PreferStandby, new[] { PrimaryReadOnly, Standby }, 1 }, + new object[] { TargetSessionAttributes.PreferStandby, new[] { Primary, Primary }, 0 }, + new object[] { TargetSessionAttributes.Primary, new[] { Standby, Primary }, 1 }, + new object[] { TargetSessionAttributes.Primary, new[] { Standby, PrimaryReadOnly }, 1 }, + new object[] { TargetSessionAttributes.PreferPrimary, new[] { Standby, Primary }, 1 }, + new object[] { TargetSessionAttributes.PreferPrimary, new[] { Standby, PrimaryReadOnly }, 1 }, + new object[] { TargetSessionAttributes.PreferPrimary, new[] { Standby, Standby }, 0 }, + new object[] { TargetSessionAttributes.Any, new[] { Standby, Primary }, 0 }, + new object[] { TargetSessionAttributes.Any, new[] { Primary, Standby }, 0 }, + new object[] { TargetSessionAttributes.Any, new[] { PrimaryReadOnly, Standby }, 0 }, + new object[] { TargetSessionAttributes.ReadWrite, new[] { Standby, Primary }, 1 }, + new object[] { TargetSessionAttributes.ReadWrite, new[] { PrimaryReadOnly, Primary }, 1 }, + new object[] { TargetSessionAttributes.ReadOnly, new[] { Primary, Standby }, 1 }, + new object[] { TargetSessionAttributes.ReadOnly, new[] { PrimaryReadOnly, Standby }, 0 } }; [Test] [TestCaseSource(nameof(MyCases))] - public async Task Connect_to_correct_host_pooled(string targetSessionAttributes, MockState[] servers, int expectedServer) + public async Task Connect_to_correct_host_pooled(TargetSessionAttributes targetSessionAttributes, MockState[] servers, int expectedServer) { var postmasters = servers.Select(s => PgPostmasterMock.Start(state: s)).ToArray(); await using var __ = new DisposableWrapper(postmasters); @@ -54,13 +54,13 @@ public async Task Connect_to_correct_host_pooled(string targetSessionAttributes, var connectionStringBuilder = new NpgsqlConnectionStringBuilder { Host = MultipleHosts(postmasters), - TargetSessionAttributes = targetSessionAttributes, ServerCompatibilityMode = ServerCompatibilityMode.NoTypeLoading, Pooling = true }; - using var pool = CreateTempPool(connectionStringBuilder, out var connectionString); - await using var conn = await OpenConnectionAsync(connectionString); + await using var dataSource = new NpgsqlDataSourceBuilder(connectionStringBuilder.ConnectionString) + .BuildMultiHost(); + await using var conn = await dataSource.OpenConnectionAsync(targetSessionAttributes); Assert.That(conn.Port, Is.EqualTo(postmasters[expectedServer].Port)); @@ -70,7 +70,7 @@ public async Task Connect_to_correct_host_pooled(string targetSessionAttributes, [Test] [TestCaseSource(nameof(MyCases))] - public async Task Connect_to_correct_host_unpooled(string targetSessionAttributes, MockState[] servers, int expectedServer) + public async Task Connect_to_correct_host_unpooled(TargetSessionAttributes targetSessionAttributes, MockState[] servers, int expectedServer) { var postmasters = servers.Select(s => PgPostmasterMock.Start(state: s)).ToArray(); await using var __ = new DisposableWrapper(postmasters); @@ -78,13 +78,13 @@ public async Task Connect_to_correct_host_unpooled(string targetSessionAttribute var connectionStringBuilder = new NpgsqlConnectionStringBuilder { Host = MultipleHosts(postmasters), - TargetSessionAttributes = targetSessionAttributes, ServerCompatibilityMode = ServerCompatibilityMode.NoTypeLoading, Pooling = false }; - using var pool = CreateTempPool(connectionStringBuilder, out var connectionString); - await using var conn = await OpenConnectionAsync(connectionString); + await using var dataSource = new NpgsqlDataSourceBuilder(connectionStringBuilder.ConnectionString) + .BuildMultiHost(); + await using var conn = await dataSource.OpenConnectionAsync(targetSessionAttributes); Assert.That(conn.Port, Is.EqualTo(postmasters[expectedServer].Port)); @@ -95,7 +95,7 @@ public async Task Connect_to_correct_host_unpooled(string targetSessionAttribute [Test] [TestCaseSource(nameof(MyCases))] public async Task Connect_to_correct_host_with_available_idle( - string targetSessionAttributes, MockState[] servers, int expectedServer) + TargetSessionAttributes targetSessionAttributes, MockState[] servers, int expectedServer) { var postmasters = servers.Select(s => PgPostmasterMock.Start(state: s)).ToArray(); await using var __ = new DisposableWrapper(postmasters); @@ -105,29 +105,26 @@ public async Task Connect_to_correct_host_with_available_idle( var connectionStringBuilder = new NpgsqlConnectionStringBuilder { Host = MultipleHosts(postmasters), - TargetSessionAttributes = servers[0] switch - { - Primary => "read-write", - PrimaryReadOnly => "read-only", - Standby => "standby", - _ => throw new ArgumentOutOfRangeException() - }, ServerCompatibilityMode = ServerCompatibilityMode.NoTypeLoading, }; - using var pool = CreateTempPool(connectionStringBuilder, out var connectionString); - await using (_ = await OpenConnectionAsync(connectionString)) + await using var dataSource = new NpgsqlDataSourceBuilder(connectionStringBuilder.ConnectionString) + .BuildMultiHost(); + var idleConnTargetSessionAttributes = servers[0] switch + { + Primary => TargetSessionAttributes.ReadWrite, + PrimaryReadOnly => TargetSessionAttributes.ReadOnly, + Standby => TargetSessionAttributes.Standby, + _ => throw new ArgumentOutOfRangeException() + }; + await using (_ = await dataSource.OpenConnectionAsync(idleConnTargetSessionAttributes)) { // Do nothing, close to have an idle connection in the pool. } // Now connect with the test TargetSessionAttributes - connectionString = new NpgsqlConnectionStringBuilder(connectionString) - { - TargetSessionAttributes = targetSessionAttributes.ToString() - }.ConnectionString; - await using var conn = await OpenConnectionAsync(connectionString); + await using var conn = await dataSource.OpenConnectionAsync(targetSessionAttributes); Assert.That(conn.Port, Is.EqualTo(postmasters[expectedServer].Port)); @@ -136,11 +133,11 @@ public async Task Connect_to_correct_host_with_available_idle( } [Test] - [TestCase("standby", new[] { Primary, Primary })] - [TestCase("primary", new[] { Standby, Standby })] - [TestCase("read-write", new[] { PrimaryReadOnly, Standby })] - [TestCase("read-only", new[] { Primary, Primary })] - public async Task Valid_host_not_found(string targetSessionAttributes, MockState[] servers) + [TestCase(TargetSessionAttributes.Standby, new[] { Primary, Primary })] + [TestCase(TargetSessionAttributes.Primary, new[] { Standby, Standby })] + [TestCase(TargetSessionAttributes.ReadWrite, new[] { PrimaryReadOnly, Standby })] + [TestCase(TargetSessionAttributes.ReadOnly, new[] { Primary, Primary })] + public async Task Valid_host_not_found(TargetSessionAttributes targetSessionAttributes, MockState[] servers) { var postmasters = servers.Select(s => PgPostmasterMock.Start(state: s)).ToArray(); await using var __ = new DisposableWrapper(postmasters); @@ -149,12 +146,12 @@ public async Task Valid_host_not_found(string targetSessionAttributes, MockState { Host = MultipleHosts(postmasters), ServerCompatibilityMode = ServerCompatibilityMode.NoTypeLoading, - TargetSessionAttributes = targetSessionAttributes }; - using var pool = CreateTempPool(connectionStringBuilder.ConnectionString, out var connectionString); + await using var dataSource = new NpgsqlDataSourceBuilder(connectionStringBuilder.ConnectionString) + .BuildMultiHost(); - var exception = Assert.ThrowsAsync(async () => await OpenConnectionAsync(connectionString))!; + var exception = Assert.ThrowsAsync(async () => await dataSource.OpenConnectionAsync(targetSessionAttributes))!; Assert.That(exception.Message, Is.EqualTo("No suitable host was found.")); Assert.That(exception.InnerException, Is.Null); @@ -185,8 +182,9 @@ public void All_hosts_are_down() { Host = $"{localEndPoint1.Address}:{localEndPoint1.Port},{localEndPoint2.Address}:{localEndPoint2.Port}" }.ConnectionString; + using var dataSource = new NpgsqlDataSourceBuilder(connectionString).BuildMultiHost(); - var exception = Assert.ThrowsAsync(async () => await OpenConnectionAsync(connectionString))!; + var exception = Assert.ThrowsAsync(async () => await dataSource.OpenConnectionAsync(TargetSessionAttributes.Any))!; var aggregateException = (AggregateException)exception.InnerException!; Assert.That(aggregateException.InnerExceptions, Has.Count.EqualTo(2)); @@ -210,13 +208,12 @@ public async Task All_hosts_are_unavailable( { Host = MultipleHosts(primaryPostmaster, standbyPostmaster), ServerCompatibilityMode = ServerCompatibilityMode.NoTypeLoading, - TargetSessionAttributes = "any", Pooling = pooling, }; - using var _ = CreateTempPool(builder.ConnectionString, out var connectionString); + await using var dataSource = new NpgsqlDataSourceBuilder(builder.ConnectionString).BuildMultiHost(); - var ex = Assert.ThrowsAsync(async () => await OpenConnectionAsync(connectionString))!; + var ex = Assert.ThrowsAsync(async () => await dataSource.OpenConnectionAsync(TargetSessionAttributes.Any))!; Assert.That(ex.SqlState, Is.EqualTo(errorCode)); } @@ -238,7 +235,9 @@ public async Task First_host_is_down() ServerCompatibilityMode = ServerCompatibilityMode.NoTypeLoading }.ConnectionString; - await using var conn = await OpenConnectionAsync(connectionString); + await using var dataSource = new NpgsqlDataSourceBuilder(connectionString).BuildMultiHost(); + + await using var conn = await dataSource.OpenConnectionAsync(TargetSessionAttributes.Any); Assert.That(conn.Port, Is.EqualTo(postmaster.Port)); } @@ -261,12 +260,13 @@ public async Task TargetSessionAttributes_with_single_host(string targetSessionA { await using var postmasterMock = PgPostmasterMock.Start(ConnectionString); using var pool = CreateTempPool(postmasterMock.ConnectionString, out connectionString); - await using var conn = await OpenConnectionAsync(connectionString); + await using var conn = new NpgsqlConnection(connectionString); + await conn.OpenAsync(); _ = await postmasterMock.WaitForServerConnection(); } else { - Assert.That(() => OpenConnectionAsync(connectionString), Throws.Exception.TypeOf()); + Assert.That(() => new NpgsqlConnection(connectionString), Throws.Exception.TypeOf()); } } @@ -291,9 +291,10 @@ public async Task TargetSessionAttributes_uses_environment_variable() Assert.That(builder.TargetSessionAttributes, Is.Null); - using var _ = CreateTempPool(builder.ConnectionString, out var connectionString); + await using var dataSource = new NpgsqlDataSourceBuilder(builder.ConnectionString) + .BuildMultiHost(); - await using var conn = await OpenConnectionAsync(connectionString); + await using var conn = await dataSource.OpenConnectionAsync(); Assert.That(conn.Port, Is.EqualTo(standbyPostmaster.Port)); } @@ -346,34 +347,35 @@ public async Task Connect_with_load_balancing() LoadBalanceHosts = true, }; - using var _ = CreateTempPool(defaultCsb.ConnectionString, out var defaultConnectionString); + await using var dataSource = new NpgsqlDataSourceBuilder(defaultCsb.ConnectionString) + .BuildMultiHost(); NpgsqlConnector firstConnector; NpgsqlConnector secondConnector; - await using (var firstConnection = await OpenConnectionAsync(defaultConnectionString)) + await using (var firstConnection = await dataSource.OpenConnectionAsync()) { firstConnector = firstConnection.Connector!; } - await using (var secondConnection = await OpenConnectionAsync(defaultConnectionString)) + await using (var secondConnection = await dataSource.OpenConnectionAsync()) { secondConnector = secondConnection.Connector!; } Assert.AreNotSame(firstConnector, secondConnector); - await using (var firstBalancedConnection = await OpenConnectionAsync(defaultConnectionString)) + await using (var firstBalancedConnection = await dataSource.OpenConnectionAsync()) { Assert.AreSame(firstConnector, firstBalancedConnection.Connector); } - await using (var secondBalancedConnection = await OpenConnectionAsync(defaultConnectionString)) + await using (var secondBalancedConnection = await dataSource.OpenConnectionAsync()) { Assert.AreSame(secondConnector, secondBalancedConnection.Connector); } - await using (var thirdBalancedConnection = await OpenConnectionAsync(defaultConnectionString)) + await using (var thirdBalancedConnection = await dataSource.OpenConnectionAsync()) { Assert.AreSame(firstConnector, thirdBalancedConnection.Connector); } @@ -393,33 +395,34 @@ public async Task Connect_without_load_balancing() LoadBalanceHosts = false, }; - using var _ = CreateTempPool(defaultCsb.ConnectionString, out var defaultConnectionString); + await using var dataSource = new NpgsqlDataSourceBuilder(defaultCsb.ConnectionString) + .BuildMultiHost(); NpgsqlConnector firstConnector; NpgsqlConnector secondConnector; - await using (var firstConnection = await OpenConnectionAsync(defaultConnectionString)) + await using (var firstConnection = await dataSource.OpenConnectionAsync()) { firstConnector = firstConnection.Connector!; } - await using (var secondConnection = await OpenConnectionAsync(defaultConnectionString)) + await using (var secondConnection = await dataSource.OpenConnectionAsync()) { Assert.AreSame(firstConnector, secondConnection.Connector); } - await using (var firstConnection = await OpenConnectionAsync(defaultConnectionString)) - await using (var secondConnection = await OpenConnectionAsync(defaultConnectionString)) + await using (var firstConnection = await dataSource.OpenConnectionAsync()) + await using (var secondConnection = await dataSource.OpenConnectionAsync()) { secondConnector = secondConnection.Connector!; } Assert.AreNotSame(firstConnector, secondConnector); - await using (var firstUnbalancedConnection = await OpenConnectionAsync(defaultConnectionString)) + await using (var firstUnbalancedConnection = await dataSource.OpenConnectionAsync()) { Assert.AreSame(firstConnector, firstUnbalancedConnection.Connector); } - await using (var secondUnbalancedConnection = await OpenConnectionAsync(defaultConnectionString)) + await using (var secondUnbalancedConnection = await dataSource.OpenConnectionAsync()) { Assert.AreSame(firstConnector, secondUnbalancedConnection.Connector); } @@ -437,11 +440,11 @@ public async Task Connect_state_changing_hosts([Values] bool alwaysCheckHostStat ServerCompatibilityMode = ServerCompatibilityMode.NoTypeLoading, MaxPoolSize = 1, HostRecheckSeconds = alwaysCheckHostState ? 0 : int.MaxValue, - TargetSessionAttributes = "prefer-primary", NoResetOnClose = true, }; - using var _ = CreateTempPool(defaultCsb.ConnectionString, out var defaultConnectionString); + await using var dataSource = new NpgsqlDataSourceBuilder(defaultCsb.ConnectionString) + .BuildMultiHost(); NpgsqlConnector firstConnector; NpgsqlConnector secondConnector; @@ -474,14 +477,14 @@ public async Task Connect_state_changing_hosts([Values] bool alwaysCheckHostStat await server.SendMockState(Primary); }); - await using (var firstConnection = await OpenConnectionAsync(defaultConnectionString)) - await using (var secondConnection = await OpenConnectionAsync(defaultConnectionString)) + await using (var firstConnection = await dataSource.OpenConnectionAsync(TargetSessionAttributes.PreferPrimary)) + await using (var secondConnection = await dataSource.OpenConnectionAsync(TargetSessionAttributes.PreferPrimary)) { firstConnector = firstConnection.Connector!; secondConnector = secondConnection.Connector!; } - await using var thirdConnection = await OpenConnectionAsync(defaultConnectionString); + await using var thirdConnection = await dataSource.OpenConnectionAsync(TargetSessionAttributes.PreferPrimary); Assert.AreSame(alwaysCheckHostState ? secondConnector : firstConnector, thirdConnection.Connector); await firstServerTask; @@ -569,25 +572,23 @@ public async Task Offline_state_on_query_execution_pg_critical_failure() [Test, NonParallelizable] public async Task Offline_state_on_query_execution_pg_non_critical_failure() { - PoolManager.Reset(); - - var csb = new NpgsqlConnectionStringBuilder(ConnectionString); - await using var conn = await OpenConnectionAsync(csb); + await using var dataSource = CreateDataSource(); + await using var conn = await dataSource.OpenConnectionAsync(); // Starting with PG14 we get the cluster's state from PG automatically var expectedState = conn.PostgreSqlVersion.Major > 13 ? DatabaseState.PrimaryReadWrite : DatabaseState.Unknown; - var state = conn.NpgsqlDataSource.GetDatabaseState(); + var state = dataSource.GetDatabaseState(); Assert.That(state, Is.EqualTo(expectedState)); - Assert.That(conn.NpgsqlDataSource.Statistics.Total, Is.EqualTo(1)); + Assert.That(dataSource.Statistics.Total, Is.EqualTo(1)); var ex = Assert.ThrowsAsync(() => conn.ExecuteNonQueryAsync("SELECT abc"))!; Assert.That(ex.SqlState, Is.EqualTo(PostgresErrorCodes.UndefinedColumn)); Assert.That(conn.State, Is.EqualTo(ConnectionState.Open)); - state = conn.NpgsqlDataSource.GetDatabaseState(); + state = dataSource.GetDatabaseState(); Assert.That(state, Is.EqualTo(expectedState)); - Assert.That(conn.NpgsqlDataSource.Statistics.Total, Is.EqualTo(1)); + Assert.That(dataSource.Statistics.Total, Is.EqualTo(1)); } [Test] diff --git a/test/Npgsql.Tests/NotificationTests.cs b/test/Npgsql.Tests/NotificationTests.cs index 0092dfdad4..403a81312b 100644 --- a/test/Npgsql.Tests/NotificationTests.cs +++ b/test/Npgsql.Tests/NotificationTests.cs @@ -89,9 +89,9 @@ public void Wait_with_timeout() [Test] public void Wait_with_prepended_message() { - using var _ = CreateTempPool(ConnectionString, out var connString); - using (OpenConnection(connString)) {} // A DISCARD ALL is now prepended in the connection's write buffer - using var conn = OpenConnection(connString); + using var dataSource = CreateDataSource(); + using (dataSource.OpenConnection()) {} // A DISCARD ALL is now prepended in the connection's write buffer + using var conn = dataSource.OpenConnection(); Assert.That(conn.Wait(100), Is.EqualTo(false)); } @@ -120,23 +120,23 @@ public void WaitAsync_with_timeout() } [Test] - public async Task Wait_with_keepalive() + public void Wait_with_keepalive() { var notify = GetUniqueIdentifier(nameof(NotificationTests)); - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + using var dataSource = CreateDataSource(csb => { - KeepAlive = 1, - Pooling = false - }; - using var conn = OpenConnection(csb); - using var notifyingConn = OpenConnection(); + csb.KeepAlive = 1; + csb.Pooling = false; + }); + using var conn = dataSource.OpenConnection(); + using var notifyingConn = dataSource.OpenConnection(); conn.ExecuteNonQuery($"LISTEN {notify}"); var notificationTask = Task.Delay(2000).ContinueWith(t => notifyingConn.ExecuteNonQuery($"NOTIFY {notify}")); conn.Wait(); Assert.That(conn.ExecuteScalar("SELECT 1"), Is.EqualTo(1)); // A safeguard against closing an active connection - await notificationTask; + notificationTask.GetAwaiter().GetResult(); //Assert.That(TestLoggerSink.Records, Has.Some.With.Property("EventId").EqualTo(new EventId(NpgsqlEventId.Keepalive))); } @@ -145,18 +145,18 @@ public async Task WaitAsync_with_keepalive() { var notify = GetUniqueIdentifier(nameof(NotificationTests)); - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + await using var dataSource = CreateDataSource(csb => { - KeepAlive = 1, - Pooling = false - }; - using var conn = OpenConnection(csb); - using var notifyingConn = OpenConnection(); - conn.ExecuteNonQuery($"LISTEN {notify}"); + csb.KeepAlive = 1; + csb.Pooling = false; + }); + await using var conn = await dataSource.OpenConnectionAsync(); + await using var notifyingConn = await dataSource.OpenConnectionAsync(); + await conn.ExecuteNonQueryAsync($"LISTEN {notify}"); var notificationTask = Task.Delay(2000).ContinueWith(t => notifyingConn.ExecuteNonQuery($"NOTIFY {notify}")); await conn.WaitAsync(); //Assert.That(TestLoggerSink.Records, Has.Some.With.Property("EventId").EqualTo(new EventId(NpgsqlEventId.Keepalive))); - Assert.That(conn.ExecuteScalar("SELECT 1"), Is.EqualTo(1)); + Assert.That(await conn.ExecuteScalarAsync("SELECT 1"), Is.EqualTo(1)); // A safeguard against closing an active connection await notificationTask; } diff --git a/test/Npgsql.Tests/PoolManagerTests.cs b/test/Npgsql.Tests/PoolManagerTests.cs index 08c38dcb78..afd716dab5 100644 --- a/test/Npgsql.Tests/PoolManagerTests.cs +++ b/test/Npgsql.Tests/PoolManagerTests.cs @@ -40,7 +40,8 @@ public void Many_pools() [Test] public void ClearAllPools() { - using (OpenConnection()) {} + using (var conn = new NpgsqlConnection(ConnectionString)) + conn.Open(); // Now have one connection in the pool Assert.That(PoolManager.Pools.TryGetValue(ConnectionString, out var pool), Is.True); Assert.That(pool!.Statistics.Idle, Is.EqualTo(1)); @@ -54,9 +55,11 @@ public void ClearAllPools() public void ClearAllPools_with_busy() { NpgsqlDataSource? pool; - using (OpenConnection()) + using (var conn = new NpgsqlConnection(ConnectionString)) { - using (OpenConnection()) { } + conn.Open(); + using (var anotherConn = new NpgsqlConnection(ConnectionString)) + anotherConn.Open(); // We have one idle, one busy NpgsqlConnection.ClearAllPools(); diff --git a/test/Npgsql.Tests/PoolTests.cs b/test/Npgsql.Tests/PoolTests.cs index 2929884306..eda0bbedf7 100644 --- a/test/Npgsql.Tests/PoolTests.cs +++ b/test/Npgsql.Tests/PoolTests.cs @@ -9,104 +9,82 @@ namespace Npgsql.Tests; -[NonParallelizable] class PoolTests : TestBase { [Test] - public void MinPoolSize_equals_MaxPoolSize() + public async Task MinPoolSize_equals_MaxPoolSize() { - using var conn = CreateConnection(new NpgsqlConnectionStringBuilder(ConnectionString) + await using var dataSource = CreateDataSource(csb => { - ApplicationName = nameof(MinPoolSize_equals_MaxPoolSize), - MinPoolSize = 30, - MaxPoolSize = 30 - }.ToString()); - conn.Open(); + csb.MinPoolSize = 30; + csb.MaxPoolSize = 30; + }); + await using var conn = await dataSource.OpenConnectionAsync(); } [Test] public void MinPoolSize_bigger_than_MaxPoolSize_throws() - { - var connString = new NpgsqlConnectionStringBuilder(ConnectionString) + => Assert.ThrowsAsync(async () => { - ApplicationName = nameof(MinPoolSize_bigger_than_MaxPoolSize_throws), - MinPoolSize = 2, - MaxPoolSize = 1 - }.ToString(); - - Assert.Throws(() => CreateConnection(connString)); - } + await using var dataSource = CreateDataSource(csb => + { + csb.MinPoolSize = 2; + csb.MaxPoolSize = 1; + }); + }); [Test] - public void Reuse_connector_before_creating_new() + public async Task Reuse_connector_before_creating_new() { - var connString = new NpgsqlConnectionStringBuilder(ConnectionString) - { - ApplicationName = nameof(Reuse_connector_before_creating_new), - }.ToString(); - - using var conn = CreateConnection(connString); - conn.Open(); + await using var dataSource = CreateDataSource(); + await using var conn = await dataSource.OpenConnectionAsync(); var backendId = conn.Connector!.BackendProcessId; - conn.Close(); - conn.Open(); + await conn.CloseAsync(); + await conn.OpenAsync(); Assert.That(conn.Connector.BackendProcessId, Is.EqualTo(backendId)); } [Test] - public void Get_connector_from_exhausted_pool() + public async Task Get_connector_from_exhausted_pool([Values(true, false)] bool async) { - var connString = new NpgsqlConnectionStringBuilder(ConnectionString) + await using var dataSource = CreateDataSource(csb => { - ApplicationName = nameof(Get_connector_from_exhausted_pool), - MaxPoolSize = 1, - Timeout = 0 - }.ToString(); + csb.MaxPoolSize = 1; + csb.Timeout = 0; + }); - using var conn1 = CreateConnection(connString); - conn1.Open(); + await using var conn1 = await dataSource.OpenConnectionAsync(); // Pool is exhausted - using var conn2 = CreateConnection(connString); - _ = Task.Delay(1000).ContinueWith(_ => conn1.Close()); - conn2.Open(); - } - - //[Test, Explicit] - public async Task Get_connector_from_exhausted_pool_async() - { - var connString = new NpgsqlConnectionStringBuilder(ConnectionString) - { - ApplicationName = nameof(Get_connector_from_exhausted_pool_async), - MaxPoolSize = 1, - Timeout = 0 - }.ToString(); - - using var conn1 = CreateConnection(connString); - await conn1.OpenAsync(); - - // Pool is exhausted - using var conn2 = CreateConnection(connString); - using (new Timer(o => conn1.Close(), null, 1000, Timeout.Infinite)) + await using var conn2 = dataSource.CreateConnection(); + _ = Task.Delay(1000).ContinueWith(async _ => + { + if (async) + await conn1.CloseAsync(); + else + conn1.Close(); + }); + if (async) await conn2.OpenAsync(); + else + conn2.Open(); } [Test] public async Task Timeout_getting_connector_from_exhausted_pool([Values(true, false)] bool async) { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + await using var dataSource = CreateDataSource(csb => { - MaxPoolSize = 1, - Timeout = 2 - }; + csb.MaxPoolSize = 1; + csb.Timeout = 2; + }); - using var _ = CreateTempPool(csb, out var connString); - using (var conn1 = CreateConnection(connString)) + await using (var conn1 = dataSource.CreateConnection()) { await conn1.OpenAsync(); // Pool is now exhausted - await using var conn2 = CreateConnection(connString); + await using var conn2 = dataSource.CreateConnection(); var e = async ? Assert.ThrowsAsync(async () => await conn2.OpenAsync())! : Assert.Throws(() => conn2.Open())!; @@ -115,112 +93,69 @@ public async Task Timeout_getting_connector_from_exhausted_pool([Values(true, fa } // conn1 should now be back in the pool as idle - using (var conn3 = CreateConnection(connString)) - conn3.Open(); - } - - [Test] - public async Task Timeout_getting_connector_from_exhausted_pool_async() - { - var connString = new NpgsqlConnectionStringBuilder(ConnectionString) - { - ApplicationName = nameof(Timeout_getting_connector_from_exhausted_pool_async), - MaxPoolSize = 1, - Timeout = 2 - }.ToString(); - - using (var conn1 = CreateConnection(connString)) - { - await conn1.OpenAsync(); - - // Pool is exhausted - using (var conn2 = CreateConnection(connString)) - Assert.That(async () => await conn2.OpenAsync(), Throws.Exception.TypeOf()); - } - // conn1 should now be back in the pool as idle - using (var conn3 = CreateConnection(connString)) - conn3.Open(); + await using var conn3 = await dataSource.OpenConnectionAsync(); } [Test] [Explicit("Timing-based")] public async Task OpenAsync_cancel() { - var connString = new NpgsqlConnectionStringBuilder(ConnectionString) - { - ApplicationName = nameof(OpenAsync_cancel), - MaxPoolSize = 1, - }.ToString(); + await using var dataSource = CreateDataSource(csb => csb.MaxPoolSize = 1); + await using var conn1 = await dataSource.OpenConnectionAsync(); - using var conn1 = CreateConnection(connString); - await conn1.OpenAsync(); - - Assert.True(PoolManager.Pools.TryGetValue(connString, out var pool)); - AssertPoolState(pool, open: 1, idle: 0); + AssertPoolState(dataSource, open: 1, idle: 0); // Pool is exhausted - using (var conn2 = CreateConnection(connString)) + await using (var conn2 = dataSource.CreateConnection()) { var cts = new CancellationTokenSource(1000); var openTask = conn2.OpenAsync(cts.Token); - AssertPoolState(pool, open: 1, idle: 0); + AssertPoolState(dataSource, open: 1, idle: 0); Assert.That(async () => await openTask, Throws.Exception.TypeOf()); } - AssertPoolState(pool, open: 1, idle: 0); - using (var conn2 = CreateConnection(connString)) - using (new Timer(o => conn1.Close(), null, 1000, Timeout.Infinite)) + AssertPoolState(dataSource, open: 1, idle: 0); + await using (var conn2 = dataSource.CreateConnection()) + await using (new Timer(o => conn1.Close(), null, 1000, Timeout.Infinite)) { await conn2.OpenAsync(); - AssertPoolState(pool, open: 1, idle: 0); + AssertPoolState(dataSource, open: 1, idle: 0); } - AssertPoolState(pool, open: 1, idle: 1); + AssertPoolState(dataSource, open: 1, idle: 1); } [Test, Description("Makes sure that when a pooled connection is closed it's properly reset, and that parameter settings aren't leaked")] - public void ResetOnClose() + public async Task ResetOnClose() { - var connString = new NpgsqlConnectionStringBuilder(ConnectionString) - { - ApplicationName = nameof(ResetOnClose), - SearchPath = "public" - }.ToString(); - using var conn = CreateConnection(connString); - conn.Open(); - Assert.That(conn.ExecuteScalar("SHOW search_path"), Is.Not.Contains("pg_temp")); + await using var dataSource = CreateDataSource(csb => csb.SearchPath = "public"); + await using var conn = await dataSource.OpenConnectionAsync(); + Assert.That(await conn.ExecuteScalarAsync("SHOW search_path"), Is.Not.Contains("pg_temp")); var backendId = conn.Connector!.BackendProcessId; - conn.ExecuteNonQuery("SET search_path=pg_temp"); - conn.Close(); + await conn.ExecuteNonQueryAsync("SET search_path=pg_temp"); + await conn.CloseAsync(); - conn.Open(); + await conn.OpenAsync(); Assert.That(conn.Connector.BackendProcessId, Is.EqualTo(backendId)); - Assert.That(conn.ExecuteScalar("SHOW search_path"), Is.EqualTo("public")); + Assert.That(await conn.ExecuteScalarAsync("SHOW search_path"), Is.EqualTo("public")); } [Test] public void ConnectionPruningInterval_zero_throws() - { - var connString = new NpgsqlConnectionStringBuilder(ConnectionString) + => Assert.ThrowsAsync(async () => { - ApplicationName = nameof(ConnectionPruningInterval_zero_throws), - ConnectionPruningInterval = 0 - }.ToString(); - - Assert.Throws(() => OpenConnection(connString)); - } + await using var dataSource = CreateDataSource(csb => csb.ConnectionPruningInterval = 0); + }); [Test] public void ConnectionPruningInterval_bigger_than_ConnectionIdleLifetime_throws() - { - var connString = new NpgsqlConnectionStringBuilder(ConnectionString) + => Assert.ThrowsAsync(async () => { - ApplicationName = nameof(ConnectionPruningInterval_bigger_than_ConnectionIdleLifetime_throws), - ConnectionIdleLifetime = 1, - ConnectionPruningInterval = 2 - }.ToString(); - - Assert.Throws(() => OpenConnection(connString)); - } + await using var dataSource = CreateDataSource(csb => + { + csb.ConnectionIdleLifetime = 1; + csb.ConnectionPruningInterval = 2; + }); + }); [Theory, Explicit("Slow, and flaky under pressure, based on timing")] [TestCase(0, 2, 1, 2)] // min pool size 0, sample twice @@ -229,26 +164,24 @@ public void ConnectionPruningInterval_bigger_than_ConnectionIdleLifetime_throws( [TestCase(2, 3, 2, 2)] // test rounding up, should sample twice. [TestCase(2, 1, 1, 1)] // test sample once. [TestCase(2, 20, 3, 7)] // test high samples. - public void Prune_idle_connectors(int minPoolSize, int connectionIdleLifeTime, int connectionPruningInterval, int samples) + public async Task Prune_idle_connectors(int minPoolSize, int connectionIdleLifeTime, int connectionPruningInterval, int samples) { - var connString = new NpgsqlConnectionStringBuilder(ConnectionString) + await using var dataSource = CreateDataSource(csb => { - ApplicationName = nameof(Prune_idle_connectors), - MinPoolSize = minPoolSize, - ConnectionIdleLifetime = connectionIdleLifeTime, - ConnectionPruningInterval = connectionPruningInterval - }.ToString(); + csb.MinPoolSize = minPoolSize; + csb.ConnectionIdleLifetime = connectionIdleLifeTime; + csb.ConnectionPruningInterval = connectionPruningInterval; + }); var connectionPruningIntervalMs = connectionPruningInterval * 1000; - using var conn1 = OpenConnection(connString); - using var conn2 = OpenConnection(connString); - using var conn3 = OpenConnection(connString); - Assert.True(PoolManager.Pools.TryGetValue(connString, out var pool)); + await using var conn1 = await dataSource.OpenConnectionAsync(); + await using var conn2 = await dataSource.OpenConnectionAsync(); + await using var conn3 = await dataSource.OpenConnectionAsync(); - conn1.Close(); - conn2.Close(); - AssertPoolState(pool!, open: 3, idle: 2); + await conn1.CloseAsync(); + await conn2.CloseAsync(); + AssertPoolState(dataSource!, open: 3, idle: 2); var paddingMs = 100; // 100ms var sleepInterval = connectionPruningIntervalMs + paddingMs; @@ -259,7 +192,7 @@ public void Prune_idle_connectors(int minPoolSize, int connectionIdleLifeTime, i total += sleepInterval; Thread.Sleep(sleepInterval); // ConnectionIdleLifetime not yet reached. - AssertPoolState(pool, open: 3, idle: 2); + AssertPoolState(dataSource, open: 3, idle: 2); } // final cycle to do pruning. @@ -267,65 +200,49 @@ public void Prune_idle_connectors(int minPoolSize, int connectionIdleLifeTime, i // ConnectionIdleLifetime reached, we still have one connection open minimum, // and as a result we have minPoolSize - 1 idle connections. - AssertPoolState(pool, open: Math.Max(1, minPoolSize), idle: Math.Max(0, minPoolSize - 1)); + AssertPoolState(dataSource, open: Math.Max(1, minPoolSize), idle: Math.Max(0, minPoolSize - 1)); } [Test, Description("Makes sure that when a waiting async open is is given a connection, the continuation is executed in the TP rather than on the closing thread")] - public void Close_releases_waiter_on_another_thread() + public async Task Close_releases_waiter_on_another_thread() { - var connString = new NpgsqlConnectionStringBuilder(ConnectionString) - { - ApplicationName = nameof(Close_releases_waiter_on_another_thread), - MaxPoolSize = 1 - }.ToString(); - var conn1 = CreateConnection(connString); - try - { - conn1.Open(); // Pool is now exhausted + await using var dataSource = CreateDataSource(csb => csb.MaxPoolSize = 1); + await using var conn1 = await dataSource.OpenConnectionAsync(); // Pool is now exhausted - Assert.True(PoolManager.Pools.TryGetValue(connString, out var pool)); - AssertPoolState(pool, open: 1, idle: 0); + AssertPoolState(dataSource, open: 1, idle: 0); - Func> asyncOpener = async () => + Func> asyncOpener = async () => + { + using (var conn2 = dataSource.CreateConnection()) { - using (var conn2 = CreateConnection(connString)) - { - await conn2.OpenAsync(); - AssertPoolState(pool, open: 1, idle: 0); - } - AssertPoolState(pool, open: 1, idle: 1); - return Environment.CurrentManagedThreadId; - }; + await conn2.OpenAsync(); + AssertPoolState(dataSource, open: 1, idle: 0); + } + AssertPoolState(dataSource, open: 1, idle: 1); + return Environment.CurrentManagedThreadId; + }; - // Start an async open which will not complete as the pool is exhausted. - var asyncOpenerTask = asyncOpener(); - conn1.Close(); // Complete the async open by closing conn1 - var asyncOpenerThreadId = asyncOpenerTask.GetAwaiter().GetResult(); - AssertPoolState(pool, open: 1, idle: 1); + // Start an async open which will not complete as the pool is exhausted. + var asyncOpenerTask = asyncOpener(); + conn1.Close(); // Complete the async open by closing conn1 + var asyncOpenerThreadId = asyncOpenerTask.GetAwaiter().GetResult(); + AssertPoolState(dataSource, open: 1, idle: 1); - Assert.That(asyncOpenerThreadId, Is.Not.EqualTo(Environment.CurrentManagedThreadId)); - } - finally - { - conn1.Close(); - NpgsqlConnection.ClearPool(conn1); - } + Assert.That(asyncOpenerThreadId, Is.Not.EqualTo(Environment.CurrentManagedThreadId)); } [Test] //TODO: parallelize - public void Release_waiter_on_connection_failure() + public async Task Release_waiter_on_connection_failure() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + await using var dataSource = CreateDataSource(csb => { - Port = 9999, - MaxPoolSize = 1 - }; + csb.Port = 9999; + csb.MaxPoolSize = 1; + }); - using var _ = CreateTempPool(csb, out var connString); var tasks = Enumerable.Range(0, 2).Select(i => Task.Run(async () => { - await using var conn = CreateConnection(connString); - await conn.OpenAsync(); + await using var conn = await dataSource.OpenConnectionAsync(); })).ToArray(); var ex = Assert.Throws(() => Task.WaitAll(tasks))!; @@ -341,19 +258,31 @@ public void ClearPool(int iterations) { var connString = new NpgsqlConnectionStringBuilder(ConnectionString) { - ApplicationName = nameof(ClearPool) + ApplicationName = nameof(ClearPool) + iterations }.ToString(); - NpgsqlConnection conn; - for (var i = 0; i < iterations; i++) + NpgsqlConnection? conn = null; + try { - using (conn = OpenConnection(connString)) { } - // Now have one connection in the pool - Assert.True(PoolManager.Pools.TryGetValue(connString, out var pool)); - AssertPoolState(pool, open: 1, idle: 1); + for (var i = 0; i < iterations; i++) + { + using (conn = new NpgsqlConnection(connString)) + { + conn.Open(); + } - NpgsqlConnection.ClearPool(conn); - AssertPoolState(pool, open: 0, idle: 0); + // Now have one connection in the pool + Assert.True(PoolManager.Pools.TryGetValue(connString, out var pool)); + AssertPoolState(pool, open: 1, idle: 1); + + NpgsqlConnection.ClearPool(conn); + AssertPoolState(pool, open: 0, idle: 0); + } + } + finally + { + if (conn is not null) + NpgsqlConnection.ClearPool(conn); } } @@ -365,16 +294,26 @@ public void ClearPool_with_busy() ApplicationName = nameof(ClearPool_with_busy) }.ToString(); - NpgsqlDataSource? pool; - using (var conn = OpenConnection(connString)) + var conn = new NpgsqlConnection(connString); + try { - NpgsqlConnection.ClearPool(conn); - // conn is still busy but should get closed when returned to the pool + NpgsqlDataSource? pool; + using (conn) + { + conn.Open(); + NpgsqlConnection.ClearPool(conn); + // conn is still busy but should get closed when returned to the pool + + Assert.True(PoolManager.Pools.TryGetValue(connString, out pool)); + AssertPoolState(pool, open: 1, idle: 0); + } - Assert.True(PoolManager.Pools.TryGetValue(connString, out pool)); - AssertPoolState(pool, open: 1, idle: 0); + AssertPoolState(pool, open: 0, idle: 0); + } + finally + { + NpgsqlConnection.ClearPool(conn); } - AssertPoolState(pool, open: 0, idle: 0); } [Test] @@ -384,26 +323,24 @@ public void ClearPool_with_no_pool() { ApplicationName = nameof(ClearPool_with_no_pool) }.ToString(); - using var conn = CreateConnection(connString); + using var conn = new NpgsqlConnection(connString); NpgsqlConnection.ClearPool(conn); } [Test, Description("https://github.com/npgsql/npgsql/commit/45e33ecef21f75f51a625c7b919a50da3ed8e920#r28239653")] public void Open_physical_failure() { - var connString = new NpgsqlConnectionStringBuilder(ConnectionString) + using var dataSource = CreateDataSource(csb => { - ApplicationName = nameof(Open_physical_failure), - Port = 44444, - MaxPoolSize = 1 - }.ToString(); - using var conn = CreateConnection(connString); + csb.Port = 44444; + csb.MaxPoolSize = 1; + }); + using var conn = dataSource.CreateConnection(); for (var i = 0; i < 1; i++) Assert.That(() => conn.Open(), Throws.Exception .TypeOf() .With.InnerException.TypeOf()); - Assert.True(PoolManager.Pools.TryGetValue(connString, out var pool)); - AssertPoolState(pool, open: 0, idle: 0); + AssertPoolState(dataSource, open: 0, idle: 0); } //[Test, Explicit] @@ -411,13 +348,9 @@ public void Open_physical_failure() //[TestCase(10, 10, 30, false)] //[TestCase(10, 20, 30, true)] //[TestCase(10, 20, 30, false)] - public void Exercise_pool(int maxPoolSize, int numTasks, int seconds, bool async) + public async Task Exercise_pool(int maxPoolSize, int numTasks, int seconds, bool async) { - var connString = new NpgsqlConnectionStringBuilder(ConnectionString) - { - ApplicationName = nameof(Exercise_pool), - MaxPoolSize = maxPoolSize - }.ToString(); + await using var dataSource = CreateDataSource(csb => csb.MaxPoolSize = maxPoolSize); Console.WriteLine($"Spinning up {numTasks} parallel tasks for {seconds} seconds (MaxPoolSize={maxPoolSize})..."); StopFlag = 0; @@ -425,7 +358,7 @@ public void Exercise_pool(int maxPoolSize, int numTasks, int seconds, bool async { while (StopFlag == 0) { - using var conn = CreateConnection(connString); + await using var conn = dataSource.CreateConnection(); if (async) await conn.OpenAsync(); else @@ -443,14 +376,8 @@ public void Exercise_pool(int maxPoolSize, int numTasks, int seconds, bool async [Test] public async Task ConnectionLifetime() { - var builder = new NpgsqlConnectionStringBuilder(ConnectionString) - { - ConnectionLifetime = 1 - }; - - using var _ = CreateTempPool(builder, out var connectionString); - await using var conn = new NpgsqlConnection(connectionString); - await conn.OpenAsync(); + await using var dataSource = CreateDataSource(csb => csb.ConnectionLifetime = 1); + await using var conn = await dataSource.OpenConnectionAsync(); var processId = conn.ProcessID; await conn.CloseAsync(); @@ -477,24 +404,23 @@ void AssertPoolState(NpgsqlDataSource? pool, int open, int idle) // With MaxPoolSize=1, opens many connections in parallel and executes a simple SELECT. Since there's only one // physical connection, all operations will be completely serialized [Test] - public Task OnePhysicalConnectionManyCommands() + public async Task OnePhysicalConnectionManyCommands() { const int numParallelCommands = 10000; - var connString = new NpgsqlConnectionStringBuilder(ConnectionString) + await using var dataSource = CreateDataSource(csb => { - MaxPoolSize = 1, - MaxAutoPrepare = 5, - AutoPrepareMinUsages = 5, - Timeout = 0 - }.ToString(); + csb.MaxPoolSize = 1; + csb.MaxAutoPrepare = 5; + csb.AutoPrepareMinUsages = 5; + csb.Timeout = 0; + }); - return Task.WhenAll(Enumerable.Range(0, numParallelCommands) + await Task.WhenAll(Enumerable.Range(0, numParallelCommands) .Select(async i => { - using var conn = new NpgsqlConnection(connString); - await conn.OpenAsync(); - using var cmd = new NpgsqlCommand("SELECT " + i, conn); + await using var conn = await dataSource.OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT " + i, conn); var result = await cmd.ExecuteScalarAsync(); Assert.That(result, Is.EqualTo(i)); })); @@ -506,42 +432,23 @@ public Task OnePhysicalConnectionManyCommands() // TODO: Test not tested [Test] [Ignore("Multiplexing: fails")] - public void MultiplexedCommandDoesntGetExecutedOnTransactionedConnector() + public async Task MultiplexedCommandDoesntGetExecutedOnTransactionedConnector() { - var connString = new NpgsqlConnectionStringBuilder(ConnectionString) + await using var dataSource = CreateDataSource(csb => { - MaxPoolSize = 1, - Timeout = 1 - }.ToString(); + csb.MaxPoolSize = 1; + csb.Timeout = 1; + }); - using var connWithTx = OpenConnection(connString); - using var tx = connWithTx.BeginTransaction(); + await using var connWithTx = await dataSource.OpenConnectionAsync(); + await using var tx = await connWithTx.BeginTransactionAsync(); // connWithTx should now be bound with the only physical connector available. // Any commands execute should timeout - using var conn2 = OpenConnection(connString); - using var cmd = new NpgsqlCommand("SELECT 1", conn2); + await using var conn2 = await dataSource.OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT 1", conn2); Assert.ThrowsAsync(() => cmd.ExecuteScalarAsync()); } - protected override NpgsqlConnection CreateConnection(string? connectionString = null) - { - var conn = base.CreateConnection(connectionString); - _cleanup.Add(conn); - return conn; - } - - readonly List _cleanup = new(); - - [TearDown] - public void Cleanup() - { - foreach (var c in _cleanup) - { - NpgsqlConnection.ClearPool(c); - } - _cleanup.Clear(); - } - #endregion } diff --git a/test/Npgsql.Tests/PrepareTests.cs b/test/Npgsql.Tests/PrepareTests.cs index 12f8d2e3b5..f1233c6df2 100644 --- a/test/Npgsql.Tests/PrepareTests.cs +++ b/test/Npgsql.Tests/PrepareTests.cs @@ -216,11 +216,8 @@ public void Double_prepare_different_sql() [Test, IssueLink("https://github.com/npgsql/npgsql/issues/395")] public void Across_close_open_same_connector() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) - { - ApplicationName = nameof(PrepareTests) + '.' + nameof(Across_close_open_same_connector) - }; - using var conn = OpenConnectionAndUnprepare(csb); + using var dataSource = CreateDataSource(); + using var conn = dataSource.OpenConnection(); using var cmd = new NpgsqlCommand("SELECT 1", conn); cmd.Prepare(); Assert.That(cmd.IsPrepared, Is.True); @@ -232,18 +229,14 @@ public void Across_close_open_same_connector() Assert.That(cmd.ExecuteScalar(), Is.EqualTo(1)); cmd.Prepare(); Assert.That(cmd.ExecuteScalar(), Is.EqualTo(1)); - NpgsqlConnection.ClearPool(conn); } [Test] public void Across_close_open_different_connector() { - var connString = new NpgsqlConnectionStringBuilder(ConnectionString) - { - ApplicationName = nameof(PrepareTests) + '.' + nameof(Across_close_open_different_connector) - }.ToString(); - using var conn1 = new NpgsqlConnection(connString); - using var conn2 = new NpgsqlConnection(connString); + using var dataSource = CreateDataSource(); + using var conn1 = dataSource.CreateConnection(); + using var conn2 = dataSource.CreateConnection(); using var cmd = new NpgsqlCommand("SELECT 1", conn1); conn1.Open(); cmd.Prepare(); @@ -257,17 +250,13 @@ public void Across_close_open_different_connector() Assert.That(cmd.ExecuteScalar(), Is.EqualTo(1)); // Execute unprepared cmd.Prepare(); Assert.That(cmd.ExecuteScalar(), Is.EqualTo(1)); - NpgsqlConnection.ClearPool(conn1); } [Test] public void Reuse_prepared_statement() { - var connString = new NpgsqlConnectionStringBuilder(ConnectionString) - { - ApplicationName = nameof(PrepareTests) + '.' + nameof(Reuse_prepared_statement) - }.ToString(); - using var conn1 = OpenConnection(connString); + using var dataSource = CreateDataSource(); + using var conn1 = dataSource.OpenConnection(); var preparedStatement = ""; using (var cmd1 = new NpgsqlCommand("SELECT @p", conn1)) { @@ -286,7 +275,6 @@ public void Reuse_prepared_statement() Assert.That(cmd2.InternalBatchCommands[0].PreparedStatement!.Name, Is.EqualTo(preparedStatement)); Assert.That(cmd2.ExecuteScalar(), Is.EqualTo(8)); } - NpgsqlConnection.ClearPool(conn1); } [Test] @@ -388,12 +376,12 @@ public void One_command_same_sql_twice() [Test] public void One_command_same_sql_auto_prepare() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + using var dataSource = CreateDataSource(csb => { - MaxAutoPrepare = 5, - AutoPrepareMinUsages = 2 - }; - using var conn = OpenConnectionAndUnprepare(csb); + csb.MaxAutoPrepare = 5; + csb.AutoPrepareMinUsages = 2; + }); + using var conn = dataSource.OpenConnection(); var sql = new StringBuilder(); for (var i = 0; i < 2 + 1; i++) sql.Append("SELECT 1;"); @@ -535,12 +523,8 @@ public void Persistent_across_commands() [Test, Description("Basic persistent prepared system scenario. Checks that statement is not deallocated in the backend after connection close.")] public void Persistent_across_connections() { - var connSettings = new NpgsqlConnectionStringBuilder(ConnectionString) - { - ApplicationName = nameof(Persistent_across_connections) - }; - - using var conn = OpenConnectionAndUnprepare(connSettings); + using var dataSource = CreateDataSource(); + using var conn = dataSource.OpenConnection(); var processId = conn.ProcessID; AssertNumPreparedStatements(conn, 0); @@ -564,8 +548,6 @@ public void Persistent_across_connections() } AssertNumPreparedStatements(conn, 1, "Prepared statement deallocated"); Assert.That(GetPreparedStatements(conn).Single(), Is.EqualTo(stmtName), "Prepared statement name changed unexpectedly"); - - NpgsqlConnection.ClearPool(conn); } [Test, Description("Makes sure that calling Prepare() twice on a command does not deallocate or make a new one after the first prepared statement when command does not change")] @@ -741,8 +723,8 @@ public void Prepare_multiple_commands_with_parameters() [Test] public void Multiplexing_not_supported() { - var builder = new NpgsqlConnectionStringBuilder(ConnectionString) { Multiplexing = true }; - using var conn = OpenConnection(builder); + using var dataSource = CreateDataSource(csb => csb.Multiplexing = true); + using var conn = dataSource.OpenConnection(); using var cmd = new NpgsqlCommand("SELECT 1", conn); Assert.That(() => cmd.Prepare(), Throws.Exception.TypeOf()); @@ -752,13 +734,12 @@ public void Multiplexing_not_supported() [Test] public async Task Explicitly_prepared_statement_invalidation() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + await using var dataSource = CreateDataSource(csb => { - MaxAutoPrepare = 10, - AutoPrepareMinUsages = 2 - }; - - await using var connection = await OpenConnectionAsync(csb); + csb.MaxAutoPrepare = 10; + csb.AutoPrepareMinUsages = 2; + }); + await using var connection = await dataSource.OpenConnectionAsync(); var table = await CreateTempTable(connection, "foo int"); await using var command = new NpgsqlCommand($"SELECT * FROM {table}", connection); @@ -777,16 +758,13 @@ public async Task Explicitly_prepared_statement_invalidation() Assert.False(command.IsPrepared); } - NpgsqlConnection OpenConnectionAndUnprepare(string? connectionString = null) + NpgsqlConnection OpenConnectionAndUnprepare() { - var conn = OpenConnection(connectionString); + var conn = OpenConnection(); conn.UnprepareAll(); return conn; } - NpgsqlConnection OpenConnectionAndUnprepare(NpgsqlConnectionStringBuilder csb) - => OpenConnectionAndUnprepare(csb.ToString()); - void AssertNumPreparedStatements(NpgsqlConnection conn, int expected) => Assert.That(conn.ExecuteScalar("SELECT COUNT(*) FROM pg_prepared_statements WHERE statement NOT LIKE '%FROM pg_prepared_statements%'"), Is.EqualTo(expected)); diff --git a/test/Npgsql.Tests/ReaderTests.cs b/test/Npgsql.Tests/ReaderTests.cs index 5dc6bd6534..bf940fab2d 100644 --- a/test/Npgsql.Tests/ReaderTests.cs +++ b/test/Npgsql.Tests/ReaderTests.cs @@ -365,11 +365,8 @@ public async Task GetDataTypeName(string typeName, string? normalizedName = null [Test] public async Task GetDataTypeName_enum() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) - { - MaxPoolSize = 1 - }; - await using var conn = await OpenConnectionAsync(csb); + await using var dataSource = CreateDataSource(csb => csb.MaxPoolSize = 1); + await using var conn = await dataSource.OpenConnectionAsync(); var typeName = await GetTempTypeName(conn); await conn.ExecuteNonQueryAsync($"CREATE TYPE {typeName} AS ENUM ('one')"); await Task.Yield(); // TODO: fix multiplexing deadlock bug @@ -383,11 +380,8 @@ public async Task GetDataTypeName_enum() [Test] public async Task GetDataTypeName_domain() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) - { - MaxPoolSize = 1 - }; - await using var conn = await OpenConnectionAsync(csb); + await using var dataSource = CreateDataSource(csb => csb.MaxPoolSize = 1); + await using var conn = await dataSource.OpenConnectionAsync(); var typeName = await GetTempTypeName(conn); await conn.ExecuteNonQueryAsync($"CREATE DOMAIN {typeName} AS VARCHAR(10)"); await Task.Yield(); // TODO: fix multiplexing deadlock bug @@ -530,8 +524,8 @@ public async Task Reader_dispose_state_does_not_leak() var startReaderClosedTcs = new TaskCompletionSource(TaskCreationOptions.RunContinuationsAsynchronously); var continueReaderClosedTcs = new TaskCompletionSource(TaskCreationOptions.RunContinuationsAsynchronously); - using var _ = CreateTempPool(ConnectionString, out var connectionString); - await using var conn1 = await OpenConnectionAsync(connectionString); + await using var dataSource = CreateDataSource(); + await using var conn1 = await dataSource.OpenConnectionAsync(); var connID = conn1.Connector!.Id; var readerCloseTask = Task.Run(async () => { @@ -546,7 +540,7 @@ public async Task Reader_dispose_state_does_not_leak() }); await startReaderClosedTcs.Task; - await using var conn2 = await OpenConnectionAsync(connectionString); + await using var conn2 = await dataSource.OpenConnectionAsync(); Assert.That(conn2.Connector!.Id, Is.EqualTo(connID)); using var cmd = conn2.CreateCommand(); cmd.CommandText = "SELECT 1"; @@ -851,7 +845,7 @@ public async Task HasRows([Values(PrepareOrNot.NotPrepared, PrepareOrNot.Prepare command.CommandText = $"INSERT INTO {table} (name) VALUES ('foo'); SELECT * FROM {table}"; if (prepare == PrepareOrNot.Prepared) command.Prepare(); - using (var reader = await command.ExecuteReaderAsync()) + using (var reader = await command.ExecuteReaderAsync(Behavior)) { Assert.That(reader.HasRows, Is.True); reader.Read(); @@ -927,14 +921,14 @@ public async Task Invalid_cast() using var conn = await OpenConnectionAsync(); // Chunking type handler using (var cmd = new NpgsqlCommand("SELECT 'foo'", conn)) - using (var reader = await cmd.ExecuteReaderAsync()) + using (var reader = await cmd.ExecuteReaderAsync(Behavior)) { reader.Read(); Assert.That(() => reader.GetInt32(0), Throws.Exception.TypeOf()); } // Simple type handler using (var cmd = new NpgsqlCommand("SELECT 1", conn)) - using (var reader = await cmd.ExecuteReaderAsync()) + using (var reader = await cmd.ExecuteReaderAsync(Behavior)) { reader.Read(); Assert.That(() => reader.GetDateTime(0), Throws.Exception.TypeOf()); @@ -947,7 +941,7 @@ public async Task Many_reads() { using var conn = await OpenConnectionAsync(); using var cmd = new NpgsqlCommand($"SELECT generate_series(1, {conn.Settings.ReadBufferSize})", conn); - using var reader = await cmd.ExecuteReaderAsync(); + using var reader = await cmd.ExecuteReaderAsync(Behavior); for (var i = 1; i <= conn.Settings.ReadBufferSize; i++) { Assert.That(reader.Read(), Is.True); @@ -959,6 +953,10 @@ public async Task Many_reads() [Test] public async Task Nullable_scalar() { + // We read the same column multiple times + if (IsSequential) + return; + using var conn = await OpenConnectionAsync(); using var cmd = new NpgsqlCommand("SELECT @p1, @p2", conn); var p1 = new NpgsqlParameter { ParameterName = "p1", Value = DBNull.Value, NpgsqlDbType = NpgsqlDbType.Smallint }; @@ -967,7 +965,7 @@ public async Task Nullable_scalar() Assert.That(p2.DbType, Is.EqualTo(DbType.Int16)); cmd.Parameters.Add(p1); cmd.Parameters.Add(p2); - using var reader = await cmd.ExecuteReaderAsync(); + using var reader = await cmd.ExecuteReaderAsync(Behavior); reader.Read(); for (var i = 0; i < cmd.Parameters.Count; i++) @@ -1107,14 +1105,12 @@ public async Task Reader_reuse_on_dispose() [Test] public async Task Unbound_reader_reuse() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + await using var dataSource = CreateDataSource(csb => { - MinPoolSize = 1, - MaxPoolSize = 1, - }; - using var _ = CreateTempPool(csb.ToString(), out var connectionString); - - await using var conn1 = await OpenConnectionAsync(connectionString); + csb.MinPoolSize = 1; + csb.MaxPoolSize = 1; + }); + await using var conn1 = await dataSource.OpenConnectionAsync(); using var cmd1 = conn1.CreateCommand(); cmd1.CommandText = "SELECT 1"; var reader1 = await cmd1.ExecuteReaderAsync(Behavior); @@ -1127,7 +1123,7 @@ public async Task Unbound_reader_reuse() await conn1.CloseAsync(); } - await using var conn2 = await OpenConnectionAsync(connectionString); + await using var conn2 = await dataSource.OpenConnectionAsync(); using var cmd2 = conn2.CreateCommand(); cmd2.CommandText = "SELECT 2"; var reader2 = await cmd2.ExecuteReaderAsync(Behavior); @@ -1141,7 +1137,7 @@ public async Task Unbound_reader_reuse() await conn2.CloseAsync(); } - await using var conn3 = await OpenConnectionAsync(connectionString); + await using var conn3 = await dataSource.OpenConnectionAsync(); using var cmd3 = conn3.CreateCommand(); cmd3.CommandText = "SELECT 3"; var reader3 = await cmd3.ExecuteReaderAsync(Behavior); @@ -1163,8 +1159,8 @@ public async Task Bug3772() return; await using var postmasterMock = PgPostmasterMock.Start(ConnectionString); - using var _ = CreateTempPool(postmasterMock.ConnectionString, out var connectionString); - await using var conn = await OpenConnectionAsync(connectionString); + await using var dataSource = CreateDataSource(postmasterMock.ConnectionString); + await using var conn = await dataSource.OpenConnectionAsync(); var pgMock = await postmasterMock.WaitForServerConnection(); pgMock @@ -1210,8 +1206,8 @@ await pgMock public async Task Dispose_does_not_swallow_exceptions([Values(true, false)] bool async) { await using var postmasterMock = PgPostmasterMock.Start(ConnectionString); - using var _ = CreateTempPool(postmasterMock.ConnectionString, out var connectionString); - await using var conn = await OpenConnectionAsync(connectionString); + await using var dataSource = CreateDataSource(postmasterMock.ConnectionString); + await using var conn = await dataSource.OpenConnectionAsync(); var pgMock = await postmasterMock.WaitForServerConnection(); // Write responses for the query, but break the connection before sending CommandComplete/ReadyForQuery @@ -1223,7 +1219,7 @@ await pgMock .FlushAsync(); using var cmd = new NpgsqlCommand("SELECT 1", conn); - using var reader = await cmd.ExecuteReaderAsync(); + using var reader = await cmd.ExecuteReaderAsync(Behavior); await reader.ReadAsync(); pgMock.Close(); @@ -1552,7 +1548,7 @@ public async Task TextReader_zero_length_column() await using var cmd = conn.CreateCommand(); cmd.CommandText = "SELECT ''"; - await using var reader = await cmd.ExecuteReaderAsync(); + await using var reader = await cmd.ExecuteReaderAsync(Behavior); Assert.IsTrue(await reader.ReadAsync()); using var textReader = reader.GetTextReader(0); @@ -1707,8 +1703,8 @@ public async Task ReadAsync_cancel_command_soft() return; // Multiplexing, cancellation await using var postmasterMock = PgPostmasterMock.Start(ConnectionString); - using var _ = CreateTempPool(postmasterMock.ConnectionString, out var connectionString); - await using var conn = await OpenConnectionAsync(connectionString); + await using var dataSource = CreateDataSource(postmasterMock.ConnectionString); + await using var conn = await dataSource.OpenConnectionAsync(); // Write responses to the query we're about to send, with a single data row (we'll attempt to read two) var pgMock = await postmasterMock.WaitForServerConnection(); @@ -1720,7 +1716,7 @@ await pgMock .FlushAsync(); using var cmd = new NpgsqlCommand("SELECT some_int FROM some_table", conn); - await using (var reader = await cmd.ExecuteReaderAsync()) + await using (var reader = await cmd.ExecuteReaderAsync(Behavior)) { // Successfully read the first row Assert.True(await reader.ReadAsync()); @@ -1756,8 +1752,8 @@ public async Task ReadAsync_cancel_soft() return; // Multiplexing, cancellation await using var postmasterMock = PgPostmasterMock.Start(ConnectionString); - using var _ = CreateTempPool(postmasterMock.ConnectionString, out var connectionString); - await using var conn = await OpenConnectionAsync(connectionString); + await using var dataSource = CreateDataSource(postmasterMock.ConnectionString); + await using var conn = await dataSource.OpenConnectionAsync(); // Write responses to the query we're about to send, with a single data row (we'll attempt to read two) var pgMock = await postmasterMock.WaitForServerConnection(); @@ -1769,7 +1765,7 @@ await pgMock .FlushAsync(); using var cmd = new NpgsqlCommand("SELECT some_int FROM some_table", conn); - await using (var reader = await cmd.ExecuteReaderAsync()) + await using (var reader = await cmd.ExecuteReaderAsync(Behavior)) { // Successfully read the first row Assert.True(await reader.ReadAsync()); @@ -1807,8 +1803,8 @@ public async Task NextResult_cancel_soft() return; // Multiplexing, cancellation await using var postmasterMock = PgPostmasterMock.Start(ConnectionString); - using var _ = CreateTempPool(postmasterMock.ConnectionString, out var connectionString); - await using var conn = await OpenConnectionAsync(connectionString); + await using var dataSource = CreateDataSource(postmasterMock.ConnectionString); + await using var conn = await dataSource.OpenConnectionAsync(); // Write responses to the query we're about to send, only for the first resultset (we'll attempt to read two) var pgMock = await postmasterMock.WaitForServerConnection(); @@ -1821,7 +1817,7 @@ await pgMock .FlushAsync(); using var cmd = new NpgsqlCommand("SELECT 1; SELECT 2", conn); - await using (var reader = await cmd.ExecuteReaderAsync()) + await using (var reader = await cmd.ExecuteReaderAsync(Behavior)) { // Successfully read the first resultset Assert.True(await reader.ReadAsync()); @@ -1859,8 +1855,8 @@ public async Task ReadAsync_cancel_hard([Values(true, false)] bool passCancelled return; // Multiplexing, cancellation await using var postmasterMock = PgPostmasterMock.Start(ConnectionString); - using var _ = CreateTempPool(postmasterMock.ConnectionString, out var connectionString); - await using var conn = await OpenConnectionAsync(connectionString); + await using var dataSource = CreateDataSource(postmasterMock.ConnectionString); + await using var conn = await dataSource.OpenConnectionAsync(); // Write responses to the query we're about to send, with a single data row (we'll attempt to read two) var pgMock = await postmasterMock.WaitForServerConnection(); @@ -1903,8 +1899,8 @@ public async Task NextResultAsync_cancel_hard([Values(true, false)] bool passCan return; // Multiplexing, cancellation await using var postmasterMock = PgPostmasterMock.Start(ConnectionString); - using var _ = CreateTempPool(postmasterMock.ConnectionString, out var connectionString); - await using var conn = await OpenConnectionAsync(connectionString); + await using var dataSource = CreateDataSource(postmasterMock.ConnectionString); + await using var conn = await dataSource.OpenConnectionAsync(); // Write responses to the query we're about to send, with a single data row (we'll attempt to read two) var pgMock = await postmasterMock.WaitForServerConnection(); @@ -1951,8 +1947,8 @@ public async Task GetFieldValueAsync_sequential_cancel([Values(true, false)] boo return; await using var postmasterMock = PgPostmasterMock.Start(ConnectionString); - using var _ = CreateTempPool(postmasterMock.ConnectionString, out var connectionString); - await using var conn = await OpenConnectionAsync(connectionString); + await using var dataSource = CreateDataSource(postmasterMock.ConnectionString); + await using var conn = await dataSource.OpenConnectionAsync(); // Write responses to the query we're about to send, with a single data row (we'll attempt to read two) var pgMock = await postmasterMock.WaitForServerConnection(); @@ -1989,8 +1985,8 @@ public async Task IsDBNullAsync_sequential_cancel([Values(true, false)] bool pas return; await using var postmasterMock = PgPostmasterMock.Start(ConnectionString); - using var _ = CreateTempPool(postmasterMock.ConnectionString, out var connectionString); - await using var conn = await OpenConnectionAsync(connectionString); + await using var dataSource = CreateDataSource(postmasterMock.ConnectionString); + await using var conn = await dataSource.OpenConnectionAsync(); // Write responses to the query we're about to send, with a single data row (we'll attempt to read two) var pgMock = await postmasterMock.WaitForServerConnection(); @@ -2023,8 +2019,8 @@ public async Task Cancel_multiplexing_disabled() if (!IsMultiplexing) return; - using var _ = CreateTempPool(ConnectionString, out var connString); - await using var conn = await OpenConnectionAsync(connString); + await using var dataSource = CreateDataSource(); + await using var conn = await dataSource.OpenConnectionAsync(); await using var cmd = new NpgsqlCommand("SELECT generate_series(1, 100); SELECT generate_series(1, 100)", conn); await using var reader = await cmd.ExecuteReaderAsync(Behavior); var cancelledToken = new CancellationToken(canceled: true); @@ -2048,13 +2044,15 @@ public async Task GetFieldValueAsync_sequential_timeout() if (!IsSequential) return; - var csb = new NpgsqlConnectionStringBuilder(ConnectionString); - csb.CommandTimeout = 3; - csb.CancellationTimeout = 15000; + var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + { + CommandTimeout = 3, + CancellationTimeout = 15000 + }; await using var postmasterMock = PgPostmasterMock.Start(csb.ToString()); - using var _ = CreateTempPool(postmasterMock.ConnectionString, out var connectionString); - await using var conn = await OpenConnectionAsync(connectionString); + await using var dataSource = CreateDataSource(postmasterMock.ConnectionString); + await using var conn = await dataSource.OpenConnectionAsync(); // Write responses to the query we're about to send, with a single data row (we'll attempt to read two) var pgMock = await postmasterMock.WaitForServerConnection(); @@ -2086,13 +2084,15 @@ public async Task IsDBNullAsync_sequential_timeout() if (!IsSequential) return; - var csb = new NpgsqlConnectionStringBuilder(ConnectionString); - csb.CommandTimeout = 3; - csb.CancellationTimeout = 15000; + var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + { + CommandTimeout = 3, + CancellationTimeout = 15000 + }; await using var postmasterMock = PgPostmasterMock.Start(csb.ToString()); - using var _ = CreateTempPool(postmasterMock.ConnectionString, out var connectionString); - await using var conn = await OpenConnectionAsync(connectionString); + await using var dataSource = CreateDataSource(postmasterMock.ConnectionString); + await using var conn = await dataSource.OpenConnectionAsync(); // Write responses to the query we're about to send, with a single data row (we'll attempt to read two) var pgMock = await postmasterMock.WaitForServerConnection(); @@ -2122,8 +2122,8 @@ public async Task Bug3446() return; // Multiplexing, cancellation await using var postmasterMock = PgPostmasterMock.Start(ConnectionString); - using var _ = CreateTempPool(postmasterMock.ConnectionString, out var connectionString); - await using var conn = await OpenConnectionAsync(connectionString); + await using var dataSource = CreateDataSource(postmasterMock.ConnectionString); + await using var conn = await dataSource.OpenConnectionAsync(); var pgMock = await postmasterMock.WaitForServerConnection(); await pgMock diff --git a/test/Npgsql.Tests/Replication/CommonReplicationTests.cs b/test/Npgsql.Tests/Replication/CommonReplicationTests.cs index c00c7a8ecc..9033d14e31 100644 --- a/test/Npgsql.Tests/Replication/CommonReplicationTests.cs +++ b/test/Npgsql.Tests/Replication/CommonReplicationTests.cs @@ -317,10 +317,8 @@ await c.ExecuteNonQueryAsync(@$" // will occupy the connection it is bound to. var insertTask = Task.Run(async () => { - await using var insertConn = await OpenConnectionAsync(new NpgsqlConnectionStringBuilder(ConnectionString) - { - Options = "-c synchronous_commit=on" - }); + await using var dataSource = CreateDataSource(csb => csb.Options = "-c synchronous_commit=on"); + await using var insertConn = await dataSource.OpenConnectionAsync(); await insertConn.ExecuteNonQueryAsync($"INSERT INTO {tableName} (name) VALUES ('{value1String}')"); }); @@ -353,10 +351,8 @@ await c.ExecuteNonQueryAsync(@$" var value2String = Guid.NewGuid().ToString("B"); insertTask = Task.Run(async () => { - await using var insertConn = OpenConnection(new NpgsqlConnectionStringBuilder(ConnectionString) - { - Options = "-c synchronous_commit=remote_apply" - }); + await using var dataSource = CreateDataSource(csb => csb.Options = "-c synchronous_commit=remote_apply"); + await using var insertConn = await dataSource.OpenConnectionAsync(); await insertConn.ExecuteNonQueryAsync($"INSERT INTO {tableName} (name) VALUES ('{value2String}')"); }); @@ -382,10 +378,8 @@ await c.ExecuteNonQueryAsync(@$" var value3String = Guid.NewGuid().ToString("B"); insertTask = Task.Run(async () => { - await using var insertConn = OpenConnection(new NpgsqlConnectionStringBuilder(ConnectionString) - { - Options = "-c synchronous_commit=remote_write" - }); + await using var dataSource = CreateDataSource(csb => csb.Options = "-c synchronous_commit=remote_write"); + await using var insertConn = await dataSource.OpenConnectionAsync(); await insertConn.ExecuteNonQueryAsync($"INSERT INTO {tableName} (name) VALUES ('{value3String}')"); }); diff --git a/test/Npgsql.Tests/SecurityTests.cs b/test/Npgsql.Tests/SecurityTests.cs index 9f93caa36d..49e6030c34 100644 --- a/test/Npgsql.Tests/SecurityTests.cs +++ b/test/Npgsql.Tests/SecurityTests.cs @@ -13,13 +13,12 @@ public class SecurityTests : TestBase [Test, Description("Establishes an SSL connection, assuming a self-signed server certificate")] public void Basic_ssl() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + using var dataSource = CreateDataSource(csb => { - SslMode = SslMode.Require, - TrustServerCertificate = true - }; - - using var conn = OpenConnection(csb); + csb.SslMode = SslMode.Require; + csb.TrustServerCertificate = true; + }); + using var conn = dataSource.OpenConnection(); Assert.That(conn.IsSecure, Is.True); } @@ -29,13 +28,12 @@ public void Default_user_uses_md5_password() if (!IsOnBuildServer) Assert.Ignore("Only executed in CI"); - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + using var dataSource = CreateDataSource(csb => { - SslMode = SslMode.Require, - TrustServerCertificate = true - }; - - using var conn = OpenConnection(csb); + csb.SslMode = SslMode.Require; + csb.TrustServerCertificate = true; + }); + using var conn = dataSource.OpenConnection(); Assert.That(conn.IsScram, Is.False); Assert.That(conn.IsScramPlus, Is.False); } @@ -59,13 +57,12 @@ public void Reject_self_signed_certificate([Values(SslMode.VerifyCA, SslMode.Ver [Test, Description("Makes sure that ssl_renegotiation_limit is always 0, renegotiation is buggy")] public void No_ssl_renegotiation() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + using var dataSource = CreateDataSource(csb => { - SslMode = SslMode.Require, - TrustServerCertificate = true - }; - - using var conn = OpenConnection(csb); + csb.SslMode = SslMode.Require; + csb.TrustServerCertificate = true; + }); + using var conn = dataSource.OpenConnection(); Assert.That(conn.ExecuteScalar("SHOW ssl_renegotiation_limit"), Is.EqualTo("0")); conn.ExecuteNonQuery("DISCARD ALL"); Assert.That(conn.ExecuteScalar("SHOW ssl_renegotiation_limit"), Is.EqualTo("0")); @@ -74,11 +71,8 @@ public void No_ssl_renegotiation() [Test, Description("Makes sure that when SSL is disabled IsSecure returns false")] public void IsSecure_without_ssl() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) - { - SslMode = SslMode.Disable - }; - using var conn = OpenConnection(csb); + using var dataSource = CreateDataSource(csb => csb.SslMode = SslMode.Disable); + using var conn = dataSource.OpenConnection(); Assert.That(conn.IsSecure, Is.False); } @@ -157,13 +151,12 @@ public void Connection_database_is_populated_on_Open() [Test, IssueLink("https://github.com/npgsql/npgsql/issues/1718")] public void Bug1718() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + using var dataSource = CreateDataSource(csb => { - SslMode = SslMode.Require, - TrustServerCertificate = true - }; - - using var conn = OpenConnection(csb); + csb.SslMode = SslMode.Require; + csb.TrustServerCertificate = true; + }); + using var conn = dataSource.OpenConnection(); using var cmd = CreateSleepCommand(conn, 10000); var cts = new CancellationTokenSource(1000).Token; Assert.That(async () => await cmd.ExecuteNonQueryAsync(cts), Throws.Exception @@ -175,18 +168,17 @@ public void Bug1718() [Test] public void ScramPlus() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) - { - SslMode = SslMode.Require, - Username = "npgsql_tests_scram", - Password = "npgsql_tests_scram", - TrustServerCertificate = true - }; - try { - using var conn = OpenConnection(csb); - // scram-sha-256-plus only works begining from PostgreSQL 11 + using var dataSource = CreateDataSource(csb => + { + csb.SslMode = SslMode.Require; + csb.Username = "npgsql_tests_scram"; + csb.Password = "npgsql_tests_scram"; + csb.TrustServerCertificate = true; + }); + using var conn = dataSource.OpenConnection(); + // scram-sha-256-plus only works beginning from PostgreSQL 11 if (conn.PostgreSqlVersion.Major >= 11) { Assert.That(conn.IsScramPlus, Is.True); @@ -207,18 +199,17 @@ public async Task Connect_with_only_ssl_allowed_user([Values] bool multiplexing, Assert.Ignore("Multiplexing doesn't support keepalive"); } - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) - { - SslMode = SslMode.Allow, - Username = "npgsql_tests_ssl", - Password = "npgsql_tests_ssl", - Multiplexing = multiplexing, - KeepAlive = keepAlive ? 10 : 0 - }; - try { - await using var conn = await OpenConnectionAsync(csb); + await using var dataSource = CreateDataSource(csb => + { + csb.SslMode = SslMode.Allow; + csb.Username = "npgsql_tests_ssl"; + csb.Password = "npgsql_tests_ssl"; + csb.Multiplexing = multiplexing; + csb.KeepAlive = keepAlive ? 10 : 0; + }); + await using var conn = await dataSource.OpenConnectionAsync(); Assert.IsTrue(conn.IsSecure); } catch (Exception e) when (!IsOnBuildServer) @@ -231,26 +222,21 @@ public async Task Connect_with_only_ssl_allowed_user([Values] bool multiplexing, [Test] public void SslMode_Require_throws_without_TSC() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) - { - SslMode = SslMode.Require - }; - - var ex = Assert.ThrowsAsync(async () => await OpenConnectionAsync(csb))!; + using var dataSource = CreateDataSource(csb => csb.SslMode = SslMode.Require); + var ex = Assert.ThrowsAsync(async () => await dataSource.OpenConnectionAsync())!; Assert.That(ex.Message, Is.EqualTo(NpgsqlStrings.CannotUseSslModeRequireWithoutTrustServerCertificate)); } [Test] public async Task SslMode_Require_with_callback_without_TSC() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + await using var dataSource = CreateDataSource(csb => { - SslMode = SslMode.Require, - TrustServerCertificate = false, - Pooling = false - }; - - using var connection = CreateConnection(csb.ToString()); + csb.SslMode = SslMode.Require; + csb.TrustServerCertificate = false; + csb.Pooling = false; + }); + await using var connection = dataSource.CreateConnection(); connection.UserCertificateValidationCallback = (_, _, _, _) => true; await connection.OpenAsync(); @@ -264,18 +250,17 @@ public async Task Connect_with_only_non_ssl_allowed_user([Values] bool multiplex Assert.Ignore("Multiplexing doesn't support keepalive"); } - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) - { - SslMode = SslMode.Prefer, - Username = "npgsql_tests_nossl", - Password = "npgsql_tests_nossl", - Multiplexing = multiplexing, - KeepAlive = keepAlive ? 10 : 0 - }; - try { - await using var conn = await OpenConnectionAsync(csb); + await using var dataSource = CreateDataSource(csb => + { + csb.SslMode = SslMode.Prefer; + csb.Username = "npgsql_tests_nossl"; + csb.Password = "npgsql_tests_nossl"; + csb.Multiplexing = multiplexing; + csb.KeepAlive = keepAlive ? 10 : 0; + }); + await using var conn = await dataSource.OpenConnectionAsync(); Assert.IsFalse(conn.IsSecure); } catch (Exception e) when (!IsOnBuildServer) @@ -340,12 +325,8 @@ public async Task Connection_UserCertificateValidationCallback_is_invoked([Value [Test] public void Connect_with_Verify_and_callback_throws([Values(SslMode.VerifyCA, SslMode.VerifyFull)] SslMode sslMode) { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) - { - SslMode = sslMode - }; - - var connection = CreateConnection(csb.ToString()); + using var dataSource = CreateDataSource(csb => csb.SslMode = sslMode); + using var connection = dataSource.CreateConnection(); connection.UserCertificateValidationCallback = (_, _, _, _) => true; var ex = Assert.ThrowsAsync(async () => await connection.OpenAsync())!; @@ -355,13 +336,12 @@ public void Connect_with_Verify_and_callback_throws([Values(SslMode.VerifyCA, Ss [Test] public void Connect_with_RootCertificate_and_callback_throws() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + using var dataSource = CreateDataSource(csb => { - SslMode = SslMode.Require, - RootCertificate = "foo" - }; - - var connection = CreateConnection(csb.ToString()); + csb.SslMode = SslMode.Require; + csb.RootCertificate = "foo"; + }); + using var connection = dataSource.CreateConnection(); connection.UserCertificateValidationCallback = (_, _, _, _) => true; var ex = Assert.ThrowsAsync(async () => await connection.OpenAsync())!; @@ -372,21 +352,20 @@ public void Connect_with_RootCertificate_and_callback_throws() [IssueLink("https://github.com/npgsql/npgsql/issues/4305")] public async Task Bug4305_Secure([Values] bool async) { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + await using var dataSource = CreateDataSource(csb => { - SslMode = SslMode.Require, - Username = "npgsql_tests_ssl", - Password = "npgsql_tests_ssl", - MaxPoolSize = 1, - TrustServerCertificate = true - }; - using var _ = CreateTempPool(csb, out var connString); + csb.SslMode = SslMode.Require; + csb.Username = "npgsql_tests_ssl"; + csb.Password = "npgsql_tests_ssl"; + csb.MaxPoolSize = 1; + csb.TrustServerCertificate = true; + }); NpgsqlConnection conn = default!; try { - conn = await OpenConnectionAsync(connString); + conn = await dataSource.OpenConnectionAsync(); Assert.IsTrue(conn.IsSecure); } catch (Exception e) when (!IsOnBuildServer) @@ -422,20 +401,19 @@ public async Task Bug4305_Secure([Values] bool async) [IssueLink("https://github.com/npgsql/npgsql/issues/4305")] public async Task Bug4305_not_Secure([Values] bool async) { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + await using var dataSource = CreateDataSource(csb => { - SslMode = SslMode.Disable, - Username = "npgsql_tests_nossl", - Password = "npgsql_tests_nossl", - MaxPoolSize = 1 - }; - using var _ = CreateTempPool(csb, out var connString); + csb.SslMode = SslMode.Disable; + csb.Username = "npgsql_tests_nossl"; + csb.Password = "npgsql_tests_nossl"; + csb.MaxPoolSize = 1; + }); NpgsqlConnection conn = default!; try { - conn = await OpenConnectionAsync(connString); + conn = await dataSource.OpenConnectionAsync(); Assert.IsFalse(conn.IsSecure); } catch (Exception e) when (!IsOnBuildServer) diff --git a/test/Npgsql.Tests/Support/TestBase.cs b/test/Npgsql.Tests/Support/TestBase.cs index dae42ba644..29b11adfaa 100644 --- a/test/Npgsql.Tests/Support/TestBase.cs +++ b/test/Npgsql.Tests/Support/TestBase.cs @@ -1,4 +1,5 @@ using System; +using System.Collections.Concurrent; using System.Collections.Generic; using System.Data; using System.Diagnostics; @@ -20,7 +21,11 @@ public abstract class TestBase /// public virtual string ConnectionString => TestUtil.ConnectionString; - static SemaphoreSlim DatabaseCreationLock = new(1); + static readonly SemaphoreSlim DatabaseCreationLock = new(1); + + static readonly object dataSourceLockObject = new(); + + static ConcurrentDictionary DataSources = new(StringComparer.Ordinal); #region Type testing @@ -348,10 +353,36 @@ public bool Equals(T? x, T? y) protected static readonly NpgsqlDataSource SharedDataSource = NpgsqlDataSource.Create(TestUtil.ConnectionString); protected virtual NpgsqlDataSourceBuilder CreateDataSourceBuilder() - => new(TestUtil.ConnectionString); + => new(ConnectionString); protected virtual NpgsqlDataSource CreateDataSource() - => NpgsqlDataSource.Create(TestUtil.ConnectionString); + => CreateDataSource(ConnectionString); + + protected virtual NpgsqlDataSource CreateDataSource(string connectionString) + => NpgsqlDataSource.Create(connectionString); + + protected virtual NpgsqlDataSource CreateDataSource(Action connectionStringBuilderAction) + { + var connectionStringBuilder = new NpgsqlConnectionStringBuilder(ConnectionString); + connectionStringBuilderAction(connectionStringBuilder); + return NpgsqlDataSource.Create(connectionStringBuilder); + } + + protected static NpgsqlDataSource GetDataSource(string connectionString) + { + if (!DataSources.TryGetValue(connectionString, out var dataSource)) + { + lock (dataSourceLockObject) + { + if (!DataSources.TryGetValue(connectionString, out dataSource)) + { + DataSources[connectionString] = dataSource = NpgsqlDataSource.Create(connectionString); + } + } + } + + return dataSource; + } protected virtual NpgsqlDataSource CreateLoggingDataSource( out ListLoggerProvider listLoggerProvider, @@ -372,55 +403,55 @@ protected virtual NpgsqlDataSource CreateLoggingDataSource( return builder.Build(); } - protected virtual NpgsqlConnection CreateConnection(string? connectionString = null) - => new(connectionString ?? ConnectionString); + protected virtual NpgsqlConnection CreateConnection() + => GetDataSource(ConnectionString).CreateConnection(); - protected virtual NpgsqlConnection CreateConnection(Action builderAction) + protected virtual NpgsqlConnection OpenConnection() { - var builder = new NpgsqlConnectionStringBuilder(ConnectionString); - builderAction(builder); - return new NpgsqlConnection(builder.ConnectionString); + var connection = CreateConnection(); + try + { + OpenConnection(connection, async: false).GetAwaiter().GetResult(); + return connection; + } + catch + { + connection.Dispose(); + throw; + } } - protected virtual NpgsqlConnection OpenConnection(string? connectionString = null) - => OpenConnection(connectionString, async: false).GetAwaiter().GetResult(); - - protected virtual NpgsqlConnection OpenConnection(Action builderAction) + protected virtual async ValueTask OpenConnectionAsync() { - var builder = new NpgsqlConnectionStringBuilder(ConnectionString); - builderAction(builder); - return OpenConnection(builder.ConnectionString, async: false).GetAwaiter().GetResult(); - } - - protected virtual ValueTask OpenConnectionAsync(string? connectionString = null) - => OpenConnection(connectionString, async: true); - - protected virtual ValueTask OpenConnectionAsync( - Action builderAction) - { - var builder = new NpgsqlConnectionStringBuilder(ConnectionString); - builderAction(builder); - return OpenConnection(builder.ConnectionString, async: true); + var connection = CreateConnection(); + try + { + await OpenConnection(connection, async: true); + return connection; + } + catch + { + await connection.DisposeAsync(); + throw; + } } - ValueTask OpenConnection(string? connectionString, bool async) + static Task OpenConnection(NpgsqlConnection conn, bool async) { return OpenConnectionInternal(hasLock: false); - async ValueTask OpenConnectionInternal(bool hasLock) + async Task OpenConnectionInternal(bool hasLock) { - var conn = CreateConnection(connectionString); try { if (async) await conn.OpenAsync(); else conn.Open(); - return conn; } catch (PostgresException e) { - if (e.SqlState == PostgresErrorCodes.InvalidPassword && connectionString == TestUtil.DefaultConnectionString) + if (e.SqlState == PostgresErrorCodes.InvalidPassword) throw new Exception("Please create a user npgsql_tests as follows: CREATE USER npgsql_tests PASSWORD 'npgsql_tests' SUPERUSER"); if (e.SqlState == PostgresErrorCodes.InvalidCatalogName) @@ -430,7 +461,7 @@ async ValueTask OpenConnectionInternal(bool hasLock) DatabaseCreationLock.Wait(); try { - return await OpenConnectionInternal(hasLock: true); + await OpenConnectionInternal(hasLock: true); } finally { @@ -439,7 +470,7 @@ async ValueTask OpenConnectionInternal(bool hasLock) } // Database does not exist and we have the lock, proceed to creation - var builder = new NpgsqlConnectionStringBuilder(connectionString ?? ConnectionString) + var builder = new NpgsqlConnectionStringBuilder(TestUtil.ConnectionString) { Pooling = false, Multiplexing = false, @@ -456,7 +487,7 @@ async ValueTask OpenConnectionInternal(bool hasLock) await conn.OpenAsync(); else conn.Open(); - return conn; + return; } throw; @@ -464,12 +495,6 @@ async ValueTask OpenConnectionInternal(bool hasLock) } } - protected NpgsqlConnection OpenConnection(NpgsqlConnectionStringBuilder csb) - => OpenConnection(csb.ToString()); - - protected virtual ValueTask OpenConnectionAsync(NpgsqlConnectionStringBuilder csb) - => OpenConnectionAsync(csb.ToString()); - // In PG under 9.1 you can't do SELECT pg_sleep(2) in binary because that function returns void and PG doesn't know // how to transfer that. So cast to text server-side. protected static NpgsqlCommand CreateSleepCommand(NpgsqlConnection conn, int seconds = 1000) diff --git a/test/Npgsql.Tests/SystemTransactionTests.cs b/test/Npgsql.Tests/SystemTransactionTests.cs index ae8385e535..4e5aaca63a 100644 --- a/test/Npgsql.Tests/SystemTransactionTests.cs +++ b/test/Npgsql.Tests/SystemTransactionTests.cs @@ -1,24 +1,26 @@ using System; using System.Data; +using System.Threading; using System.Transactions; using NUnit.Framework; +using static Npgsql.Tests.TestUtil; namespace Npgsql.Tests; // This test suite contains ambient transaction tests, except those involving distributed transactions which are only // supported on .NET Framework / Windows. Distributed transaction tests are in DistributedTransactionTests. -[NonParallelizable] public class SystemTransactionTests : TestBase { [Test, Description("Single connection enlisting explicitly, committing")] public void Explicit_enlist() { - using var conn = new NpgsqlConnection(ConnectionStringEnlistOff); - conn.Open(); + var dataSource = EnlistOffDataSource; + var tableName = CreateTempTable(dataSource, "name TEXT"); + using var conn = dataSource.OpenConnection(); using (var scope = new TransactionScope()) { conn.EnlistTransaction(Transaction.Current); - Assert.That(conn.ExecuteNonQuery(@"INSERT INTO data (name) VALUES ('test')"), Is.EqualTo(1), "Unexpected insert rowcount"); + Assert.That(conn.ExecuteNonQuery(@$"INSERT INTO {tableName} (name) VALUES ('test')"), Is.EqualTo(1), "Unexpected insert rowcount"); AssertNoDistributedIdentifier(); AssertNoPreparedTransactions(); scope.Complete(); @@ -27,7 +29,7 @@ public void Explicit_enlist() AssertNoPreparedTransactions(); using (var tx = conn.BeginTransaction()) { - Assert.That(conn.ExecuteScalar(@"SELECT COUNT(*) FROM data"), Is.EqualTo(1), "Unexpected data count"); + Assert.That(conn.ExecuteScalar(@$"SELECT COUNT(*) FROM {tableName}"), Is.EqualTo(1), "Unexpected data count"); tx.Rollback(); } } @@ -35,18 +37,20 @@ public void Explicit_enlist() [Test, Description("Single connection enlisting implicitly, committing")] public void Implicit_enlist() { - using var conn = new NpgsqlConnection(ConnectionStringEnlistOn); + var dataSource = EnlistOnDataSource; + var tableName = CreateTempTable(dataSource, "name TEXT"); + using var conn = dataSource.CreateConnection(); using (var scope = new TransactionScope()) { conn.Open(); - Assert.That(conn.ExecuteNonQuery(@"INSERT INTO data (name) VALUES ('test')"), Is.EqualTo(1), "Unexpected insert rowcount"); + Assert.That(conn.ExecuteNonQuery(@$"INSERT INTO {tableName} (name) VALUES ('test')"), Is.EqualTo(1), "Unexpected insert rowcount"); AssertNoDistributedIdentifier(); AssertNoPreparedTransactions(); scope.Complete(); } using (var tx = conn.BeginTransaction()) { - Assert.That(conn.ExecuteScalar(@"SELECT COUNT(*) FROM data"), Is.EqualTo(1), "Unexpected data count"); + Assert.That(conn.ExecuteScalar(@$"SELECT COUNT(*) FROM {tableName}"), Is.EqualTo(1), "Unexpected data count"); tx.Rollback(); } } @@ -54,37 +58,41 @@ public void Implicit_enlist() [Test] public void Enlist_Off() { + var dataSource = EnlistOffDataSource; + var tableName = CreateTempTable(dataSource, "name TEXT"); using (new TransactionScope()) - using (var conn1 = OpenConnection(ConnectionStringEnlistOff)) - using (var conn2 = OpenConnection(ConnectionStringEnlistOff)) + using (var conn1 = dataSource.OpenConnection()) + using (var conn2 = dataSource.OpenConnection()) { Assert.That(conn1.EnlistedTransaction, Is.Null); - Assert.That(conn1.ExecuteNonQuery(@"INSERT INTO data (name) VALUES ('test')"), Is.EqualTo(1), "Unexpected insert rowcount"); - Assert.That(conn2.ExecuteScalar("SELECT COUNT(*) FROM data"), Is.EqualTo(1), "Unexpected data count"); + Assert.That(conn1.ExecuteNonQuery(@$"INSERT INTO {tableName} (name) VALUES ('test')"), Is.EqualTo(1), "Unexpected insert rowcount"); + Assert.That(conn2.ExecuteScalar($"SELECT COUNT(*) FROM {tableName}"), Is.EqualTo(1), "Unexpected data count"); } // Scope disposed and not completed => rollback, but no enlistment, so changes should still be there. - using (var conn3 = OpenConnection(ConnectionStringEnlistOff)) + using (var conn3 = dataSource.OpenConnection()) { - Assert.That(conn3.ExecuteScalar("SELECT COUNT(*) FROM data"), Is.EqualTo(1), "Insert unexpectedly rollback-ed"); + Assert.That(conn3.ExecuteScalar($"SELECT COUNT(*) FROM {tableName}"), Is.EqualTo(1), "Insert unexpectedly rollback-ed"); } } [Test, Description("Single connection enlisting explicitly, rollback")] public void Rollback_explicit_enlist() { - using var conn = OpenConnection(); + using var dataSource = CreateDataSource(); + var tableName = CreateTempTable(dataSource, "name TEXT"); + using var conn = dataSource.OpenConnection(); using (new TransactionScope()) { conn.EnlistTransaction(Transaction.Current); - Assert.That(conn.ExecuteNonQuery(@"INSERT INTO data (name) VALUES ('test')"), Is.EqualTo(1), "Unexpected insert rowcount"); + Assert.That(conn.ExecuteNonQuery(@$"INSERT INTO {tableName} (name) VALUES ('test')"), Is.EqualTo(1), "Unexpected insert rowcount"); // No commit } AssertNoDistributedIdentifier(); AssertNoPreparedTransactions(); using (var tx = conn.BeginTransaction()) { - Assert.That(conn.ExecuteScalar(@"SELECT COUNT(*) FROM data"), Is.EqualTo(0), "Unexpected data count"); + Assert.That(conn.ExecuteScalar(@$"SELECT COUNT(*) FROM {tableName}"), Is.EqualTo(0), "Unexpected data count"); tx.Rollback(); } } @@ -93,36 +101,36 @@ public void Rollback_explicit_enlist() [IssueLink("https://github.com/npgsql/npgsql/issues/2408")] public void Rollback_implicit_enlist([Values(true, false)] bool pooling) { - var connectionString = new NpgsqlConnectionStringBuilder(ConnectionStringEnlistOn) - { - Pooling = pooling - }.ToString(); + using var dataSource = CreateDataSource(csb => csb.Pooling = pooling); + var tableName = CreateTempTable(dataSource, "name TEXT"); using (new TransactionScope()) - using (var conn = OpenConnection(connectionString)) + using (var conn = dataSource.OpenConnection()) { - Assert.That(conn.ExecuteNonQuery(@"INSERT INTO data (name) VALUES ('test')"), Is.EqualTo(1), "Unexpected insert rowcount"); + Assert.That(conn.ExecuteNonQuery(@$"INSERT INTO {tableName} (name) VALUES ('test')"), Is.EqualTo(1), "Unexpected insert rowcount"); AssertNoDistributedIdentifier(); AssertNoPreparedTransactions(); // No commit } - AssertNumberOfRows(0); + AssertNumberOfRows(0, tableName); } [Test] public void Two_consecutive_connections() { + var dataSource = EnlistOnDataSource; + var tableName = CreateTempTable(dataSource, "name TEXT"); using (var scope = new TransactionScope()) { - using (var conn1 = OpenConnection(ConnectionStringEnlistOn)) + using (var conn1 = dataSource.OpenConnection()) { - Assert.That(conn1.ExecuteNonQuery(@"INSERT INTO data (name) VALUES ('test1')"), Is.EqualTo(1), "Unexpected first insert rowcount"); + Assert.That(conn1.ExecuteNonQuery(@$"INSERT INTO {tableName} (name) VALUES ('test1')"), Is.EqualTo(1), "Unexpected first insert rowcount"); } - using (var conn2 = OpenConnection(ConnectionStringEnlistOn)) + using (var conn2 = dataSource.OpenConnection()) { - Assert.That(conn2.ExecuteNonQuery(@"INSERT INTO data (name) VALUES ('test2')"), Is.EqualTo(1), "Unexpected second insert rowcount"); + Assert.That(conn2.ExecuteNonQuery(@$"INSERT INTO {tableName} (name) VALUES ('test2')"), Is.EqualTo(1), "Unexpected second insert rowcount"); } // Consecutive connections used in same scope should not promote the transaction to distributed. @@ -130,65 +138,66 @@ public void Two_consecutive_connections() AssertNoPreparedTransactions(); scope.Complete(); } - AssertNumberOfRows(2); + AssertNumberOfRows(2, tableName); } [Test] public void Close_connection() { - var connString = new NpgsqlConnectionStringBuilder(ConnectionStringEnlistOn) - { - ApplicationName = nameof(Close_connection), - }.ToString(); + // We assert the number of idle connections below + using var dataSource = CreateDataSource(csb => csb.Enlist = true); + var tableName = CreateTempTable(dataSource, "name TEXT"); using (var scope = new TransactionScope()) - using (var conn = OpenConnection(connString)) + using (var conn = dataSource.OpenConnection()) { - Assert.That(conn.ExecuteNonQuery(@"INSERT INTO data (name) VALUES ('test')"), Is.EqualTo(1), "Unexpected insert rowcount"); + Assert.That(conn.ExecuteNonQuery(@$"INSERT INTO {tableName} (name) VALUES ('test')"), Is.EqualTo(1), "Unexpected insert rowcount"); conn.Close(); AssertNoDistributedIdentifier(); AssertNoPreparedTransactions(); scope.Complete(); } - AssertNumberOfRows(1); - Assert.True(PoolManager.Pools.TryGetValue(connString, out var pool)); - Assert.That(pool!.Statistics.Idle, Is.EqualTo(1)); - - using (var conn = new NpgsqlConnection(connString)) - NpgsqlConnection.ClearPool(conn); + AssertNumberOfRows(1, tableName); + Assert.That(dataSource.Statistics.Idle, Is.EqualTo(1)); } [Test] public void Enlist_to_two_transactions() { - using var conn = OpenConnection(ConnectionStringEnlistOff); + var dataSource = EnlistOffDataSource; + var tableName = CreateTempTable(dataSource, "name TEXT"); + using var conn = dataSource.OpenConnection(); var ctx = new CommittableTransaction(); conn.EnlistTransaction(ctx); Assert.That(() => conn.EnlistTransaction(new CommittableTransaction()), Throws.Exception.TypeOf()); ctx.Rollback(); using var tx = conn.BeginTransaction(); - Assert.That(conn.ExecuteScalar(@"SELECT COUNT(*) FROM data"), Is.EqualTo(0)); + Assert.That(conn.ExecuteScalar(@$"SELECT COUNT(*) FROM {tableName}"), Is.EqualTo(0)); tx.Rollback(); } [Test] public void Enlist_twice_to_same_transaction() { - using var conn = OpenConnection(ConnectionStringEnlistOff); + var dataSource = EnlistOffDataSource; + var tableName = CreateTempTable(dataSource, "name TEXT"); + using var conn = dataSource.OpenConnection(); var ctx = new CommittableTransaction(); conn.EnlistTransaction(ctx); conn.EnlistTransaction(ctx); ctx.Rollback(); using var tx = conn.BeginTransaction(); - Assert.That(conn.ExecuteScalar(@"SELECT COUNT(*) FROM data"), Is.EqualTo(0)); + Assert.That(conn.ExecuteScalar(@$"SELECT COUNT(*) FROM {tableName}"), Is.EqualTo(0)); tx.Rollback(); } [Test] public void Scope_after_scope() { - using var conn = OpenConnection(ConnectionStringEnlistOff); + var dataSource = EnlistOffDataSource; + var tableName = CreateTempTable(dataSource, "name TEXT"); + using var conn = dataSource.OpenConnection(); using (new TransactionScope()) conn.EnlistTransaction(Transaction.Current); using (new TransactionScope()) @@ -196,7 +205,7 @@ public void Scope_after_scope() using (var tx = conn.BeginTransaction()) { - Assert.That(conn.ExecuteScalar(@"SELECT COUNT(*) FROM data"), Is.EqualTo(0)); + Assert.That(conn.ExecuteScalar(@$"SELECT COUNT(*) FROM {tableName}"), Is.EqualTo(0)); tx.Rollback(); } } @@ -204,49 +213,57 @@ public void Scope_after_scope() [Test] public void Reuse_connection() { + // We check the ProcessID below + using var dataSource = CreateDataSource(csb => csb.Enlist = true); + var tableName = CreateTempTable(dataSource, "name TEXT"); using (var scope = new TransactionScope()) - using (var conn = new NpgsqlConnection(ConnectionStringEnlistOn)) + using (var conn = dataSource.CreateConnection()) { conn.Open(); var processId = conn.ProcessID; - conn.ExecuteNonQuery(@"INSERT INTO data (name) VALUES ('test1')"); + conn.ExecuteNonQuery(@$"INSERT INTO {tableName} (name) VALUES ('test1')"); conn.Close(); conn.Open(); Assert.That(conn.ProcessID, Is.EqualTo(processId)); - conn.ExecuteNonQuery(@"INSERT INTO data (name) VALUES ('test2')"); + conn.ExecuteNonQuery(@$"INSERT INTO {tableName} (name) VALUES ('test2')"); conn.Close(); scope.Complete(); } - AssertNumberOfRows(2); + AssertNumberOfRows(2, tableName); } [Test] public void Reuse_connection_rollback() { + // We check the ProcessID below + using var dataSource = CreateDataSource(csb => csb.Enlist = true); + var tableName = CreateTempTable(dataSource, "name TEXT"); using (new TransactionScope()) - using (var conn = new NpgsqlConnection(ConnectionStringEnlistOn)) + using (var conn = dataSource.CreateConnection()) { conn.Open(); var processId = conn.ProcessID; - conn.ExecuteNonQuery(@"INSERT INTO data (name) VALUES ('test1')"); + conn.ExecuteNonQuery(@$"INSERT INTO {tableName} (name) VALUES ('test1')"); conn.Close(); conn.Open(); Assert.That(conn.ProcessID, Is.EqualTo(processId)); - conn.ExecuteNonQuery(@"INSERT INTO data (name) VALUES ('test2')"); + conn.ExecuteNonQuery(@$"INSERT INTO {tableName} (name) VALUES ('test2')"); conn.Close(); // No commit } - AssertNumberOfRows(0); + AssertNumberOfRows(0, tableName); } [Test, Ignore("Timeout doesn't seem to fire on .NET Core / Linux")] public void Timeout_triggers_rollback_while_busy() { - using (var conn = OpenConnection(ConnectionStringEnlistOff)) + var dataSource = EnlistOffDataSource; + var tableName = CreateTempTable(dataSource, "name TEXT"); + using (var conn = dataSource.OpenConnection()) { using (new TransactionScope(TransactionScopeOption.Required, TimeSpan.FromSeconds(1))) { @@ -258,15 +275,17 @@ public void Timeout_triggers_rollback_while_busy() } } - AssertNumberOfRows(0); + AssertNumberOfRows(0, tableName); } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/1579")] public void Schema_connection_should_not_enlist() { + var dataSource = EnlistOnDataSource; + var tableName = CreateTempTable(dataSource, "name TEXT"); using var tran = new TransactionScope(); - using var conn = OpenConnection(ConnectionStringEnlistOn); - using var cmd = new NpgsqlCommand("SELECT * FROM data", conn); + using var conn = dataSource.OpenConnection(); + using var cmd = new NpgsqlCommand($"SELECT * FROM {tableName}", conn); using var reader = cmd.ExecuteReader(CommandBehavior.KeyInfo); reader.GetColumnSchema(); AssertNoDistributedIdentifier(); @@ -277,16 +296,14 @@ public void Schema_connection_should_not_enlist() [Test, IssueLink("https://github.com/npgsql/npgsql/issues/1737")] public void Single_unpooled_connection() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + using var dataSource = CreateDataSource(csb => { - Pooling = false, - Enlist = true - }; - - + csb.Pooling = false; + csb.Enlist = true; + }); using var scope = new TransactionScope(); - using (var conn = OpenConnection(csb)) + using (var conn = dataSource.OpenConnection()) using (var cmd = new NpgsqlCommand("SELECT 1", conn)) cmd.ExecuteNonQuery(); @@ -297,13 +314,9 @@ public void Single_unpooled_connection() [IssueLink("https://github.com/npgsql/npgsql/issues/3863")] public void Break_connector_while_in_transaction_scope_with_rollback([Values] bool pooling) { - var csb = new NpgsqlConnectionStringBuilder(ConnectionStringEnlistOn) - { - Pooling = pooling, - }; - + using var dataSource = CreateDataSource(csb => csb.Pooling = pooling); using var scope = new TransactionScope(); - var conn = OpenConnection(csb); + var conn = dataSource.OpenConnection(); conn.ExecuteNonQuery("SELECT 1"); conn.Connector!.Break(new Exception(nameof(Break_connector_while_in_transaction_scope_with_rollback))); @@ -313,15 +326,11 @@ public void Break_connector_while_in_transaction_scope_with_rollback([Values] bo [IssueLink("https://github.com/npgsql/npgsql/issues/3863")] public void Break_connector_while_in_transaction_scope_with_commit([Values] bool pooling) { - var csb = new NpgsqlConnectionStringBuilder(ConnectionStringEnlistOn) - { - Pooling = pooling, - }; - + using var dataSource = CreateDataSource(csb => csb.Pooling = pooling); var ex = Assert.Throws(() => { using var scope = new TransactionScope(); - var conn = OpenConnection(csb); + var conn = dataSource.OpenConnection(); conn.ExecuteNonQuery("SELECT 1"); conn.Connector!.Break(new Exception(nameof(Break_connector_while_in_transaction_scope_with_commit))); @@ -337,11 +346,7 @@ public void Break_connector_while_in_transaction_scope_with_commit([Values] bool [IssueLink("https://github.com/npgsql/npgsql/issues/4085")] public void Open_connection_with_enlist_and_aborted_TransactionScope() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) - { - Enlist = true - }; - + var dataSource = EnlistOnDataSource; for (var i = 0; i < 2; i++) { using var outerScope = new TransactionScope(); @@ -355,7 +360,7 @@ public void Open_connection_with_enlist_and_aborted_TransactionScope() { } - var ex = Assert.Throws(() => OpenConnection(csb))!; + var ex = Assert.Throws(() => dataSource.OpenConnection())!; Assert.That(ex.Message, Is.EqualTo("The operation is not valid for the state of the transaction.")); } } @@ -363,16 +368,18 @@ public void Open_connection_with_enlist_and_aborted_TransactionScope() [Test, IssueLink("https://github.com/npgsql/npgsql/issues/1594")] public void Bug1594() { + var dataSource = EnlistOnDataSource; + var tableName = CreateTempTable(dataSource, "name TEXT"); using var outerScope = new TransactionScope(); - using (var conn = OpenConnection(ConnectionStringEnlistOn)) + using (var conn = dataSource.OpenConnection()) using (var innerScope1 = new TransactionScope()) { - conn.ExecuteNonQuery(@"INSERT INTO data (name) VALUES ('test1')"); + conn.ExecuteNonQuery(@$"INSERT INTO {tableName} (name) VALUES ('test1')"); innerScope1.Complete(); } - using (OpenConnection(ConnectionStringEnlistOn)) + using (dataSource.OpenConnection()) using (new TransactionScope()) { // Don't complete, triggering rollback @@ -386,61 +393,56 @@ void AssertNoPreparedTransactions() int GetNumberOfPreparedTransactions() { - using var conn = OpenConnection(ConnectionStringEnlistOff); + var dataSource = EnlistOffDataSource; + using var conn = dataSource.OpenConnection(); using var cmd = new NpgsqlCommand("SELECT COUNT(*) FROM pg_prepared_xacts WHERE database = @database", conn); cmd.Parameters.Add(new NpgsqlParameter("database", conn.Database)); return (int)(long)cmd.ExecuteScalar()!; } - void AssertNumberOfRows(int expected) - => Assert.That(_controlConn.ExecuteScalar(@"SELECT COUNT(*) FROM data"), Is.EqualTo(expected), "Unexpected data count"); + void AssertNumberOfRows(int expected, string tableName) + { + using var conn = OpenConnection(); + Assert.That(conn.ExecuteScalar(@$"SELECT COUNT(*) FROM {tableName}"), Is.EqualTo(expected), "Unexpected data count"); + } static void AssertNoDistributedIdentifier() => Assert.That(Transaction.Current?.TransactionInformation.DistributedIdentifier ?? Guid.Empty, Is.EqualTo(Guid.Empty), "Distributed identifier found"); - public readonly string ConnectionStringEnlistOn; - public readonly string ConnectionStringEnlistOff; - #endregion Utilities #region Setup - public SystemTransactionTests() - { - ConnectionStringEnlistOn = new NpgsqlConnectionStringBuilder(ConnectionString) { Enlist = true }.ToString(); - ConnectionStringEnlistOff = new NpgsqlConnectionStringBuilder(ConnectionString) { Enlist = false }.ToString(); - } + NpgsqlDataSource EnlistOnDataSource { get; set; } = default!; - NpgsqlConnection _controlConn = default!; + NpgsqlDataSource EnlistOffDataSource { get; set; } = default!; [OneTimeSetUp] public void OneTimeSetUp() { - _controlConn = OpenConnection(); - - // All tests in this fixture should have exclusive access to the database they're running on. - // If we run these tests in parallel (i.e. two builds in parallel) they will interfere. - // Solve this by taking a PostgreSQL advisory lock for the lifetime of the fixture. - _controlConn.ExecuteNonQuery("SELECT pg_advisory_lock(666)"); - - _controlConn.ExecuteNonQuery("DROP TABLE IF EXISTS data"); - _controlConn.ExecuteNonQuery("CREATE TABLE data (name TEXT)"); + EnlistOnDataSource = CreateDataSource(csb => csb.Enlist = true); + EnlistOffDataSource = CreateDataSource(csb => csb.Enlist = false); } - [SetUp] - public void SetUp() + [OneTimeTearDown] + public void OnTimeTearDown() { - _controlConn.ExecuteNonQuery("TRUNCATE data"); + EnlistOnDataSource?.Dispose(); + EnlistOnDataSource = null!; + EnlistOffDataSource?.Dispose(); + EnlistOffDataSource = null!; } -#pragma warning disable CS8625 - [OneTimeTearDown] - public void OneTimeTearDown() + internal static string CreateTempTable(NpgsqlDataSource dataSource, string columns) { - _controlConn?.Close(); - _controlConn = null; + var tableName = "temp_table" + Interlocked.Increment(ref _tempTableCounter); + dataSource.ExecuteNonQuery(@$" +START TRANSACTION; SELECT pg_advisory_xact_lock(0); +DROP TABLE IF EXISTS {tableName} CASCADE; +COMMIT; +CREATE TABLE {tableName} ({columns})"); + return tableName; } -#pragma warning restore CS8625 #endregion } diff --git a/test/Npgsql.Tests/TaskTimeoutAndCancellationTest.cs b/test/Npgsql.Tests/TaskTimeoutAndCancellationTest.cs index a3848de442..e3759d35e9 100644 --- a/test/Npgsql.Tests/TaskTimeoutAndCancellationTest.cs +++ b/test/Npgsql.Tests/TaskTimeoutAndCancellationTest.cs @@ -6,6 +6,7 @@ namespace Npgsql.Tests; +[NonParallelizable] // To make sure unobserved tasks from other tests do not leak public class TaskTimeoutAndCancellationTest : TestBase { const int TestResultValue = 777; @@ -88,6 +89,11 @@ public Task DelayedFaultedTaskCancellation(string testCase) => RunDelayedFaulted static async Task RunDelayedFaultedTaskTestAsync(Func, Task> test) { + // Run the garbage collector to collect unobserved Tasks from other tests. + GC.Collect(); + GC.WaitForPendingFinalizers(); + GC.Collect(); + Exception? unobservedTaskException = null; // Subscribe to UnobservedTaskException event to store the Exception, if any. diff --git a/test/Npgsql.Tests/TransactionTests.cs b/test/Npgsql.Tests/TransactionTests.cs index 87b963d65d..ca09836cc7 100644 --- a/test/Npgsql.Tests/TransactionTests.cs +++ b/test/Npgsql.Tests/TransactionTests.cs @@ -180,13 +180,13 @@ public async Task Empty_rollback() [Test, Description("Disposes an empty transaction")] public async Task Empty_Dispose() { - using var _ = CreateTempPool(ConnectionString, out var connString); + await using var dataSource = CreateDataSource(); - using (var conn = await OpenConnectionAsync(connString)) + using (var conn = await dataSource.OpenConnectionAsync()) using (conn.BeginTransaction()) { } - using (var conn = await OpenConnectionAsync(connString)) + using (var conn = await dataSource.OpenConnectionAsync()) { // Make sure the pending BEGIN TRANSACTION didn't leak from the previous open Assert.That(async () => await conn.ExecuteNonQueryAsync("SAVEPOINT foo"), @@ -325,12 +325,9 @@ public async Task Failed_transaction_cannot_rollback_to_savepoint_with_custom_ti [IssueLink("https://github.com/npgsql/npgsql/issues/719")] public async Task Failed_transaction_on_close_with_custom_timeout() { - var connString = new NpgsqlConnectionStringBuilder(ConnectionString) - { - Pooling = true - }.ToString(); + await using var dataSource = CreateDataSource(csb => csb.Pooling = true); - await using var conn = await OpenConnectionAsync(connString); + await using var conn = await dataSource.OpenConnectionAsync(); conn.BeginTransaction(); var backendProcessId = conn.ProcessID; @@ -427,8 +424,8 @@ public async Task Savepoint_quoted() public async Task Savepoint_prepends() { await using var postmasterMock = PgPostmasterMock.Start(ConnectionString); - using var _ = CreateTempPool(postmasterMock.ConnectionString, out var connectionString); - await using var conn = await OpenConnectionAsync(connectionString); + await using var dataSource = CreateDataSource(postmasterMock.ConnectionString); + await using var conn = await dataSource.OpenConnectionAsync(); var pgMock = await postmasterMock.WaitForServerConnection(); using var tx = conn.BeginTransaction(); @@ -500,6 +497,7 @@ public async Task IsCompleted_rollback_failed() [Parallelizable(ParallelScope.None)] public async Task Transaction_not_supported() { + // TODO: rewrite to DataSource if (IsMultiplexing) Assert.Ignore("Need to rethink/redo dummy transaction mode"); @@ -511,7 +509,8 @@ public async Task Transaction_not_supported() NpgsqlDatabaseInfo.RegisterFactory(new NoTransactionDatabaseInfoFactory()); try { - using var conn = await OpenConnectionAsync(connString); + using var conn = new NpgsqlConnection(connString); + await conn.OpenAsync(); using var tx = conn.BeginTransaction(); // Detect that we're not really in a transaction @@ -527,19 +526,23 @@ public async Task Transaction_not_supported() NpgsqlDatabaseInfo.ResetFactories(); } - using (var conn = await OpenConnectionAsync(connString)) + using (var conn = new NpgsqlConnection(connString)) { + await conn.OpenAsync(); NpgsqlConnection.ClearPool(conn); conn.ReloadTypes(); } // Check that everything is back to normal - using (var conn = await OpenConnectionAsync(connString)) - using (var tx = conn.BeginTransaction()) + using (var conn = new NpgsqlConnection(connString)) { - var prevTxId = conn.ExecuteScalar("SELECT txid_current()"); - var nextTxId = conn.ExecuteScalar("SELECT txid_current()"); - Assert.That(nextTxId, Is.EqualTo(prevTxId)); + await conn.OpenAsync(); + using (var tx = conn.BeginTransaction()) + { + var prevTxId = conn.ExecuteScalar("SELECT txid_current()"); + var nextTxId = conn.ExecuteScalar("SELECT txid_current()"); + Assert.That(nextTxId, Is.EqualTo(prevTxId)); + } } } @@ -552,7 +555,7 @@ public async Task Bug3248_Dispose_transaction_Rollback() return; using var conn = await OpenConnectionAsync(); - await using (var tx = conn.BeginTransaction()) + await using (var tx = await conn.BeginTransactionAsync()) { Assert.That(conn.Connector, Is.Not.Null); Assert.That(async () => await conn.ExecuteScalarAsync("SELECT * FROM \"unknown_table\"", tx: tx), @@ -618,17 +621,16 @@ public async Task Access_connection_on_completed_transaction() [Test] public async Task Unbound_transaction_reuse() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + await using var dataSource = CreateDataSource(csb => { - MinPoolSize = 1, - MaxPoolSize = 1, - }; - using var __ = CreateTempPool(csb.ToString(), out var connectionString); + csb.MinPoolSize = 1; + csb.MaxPoolSize = 1; + }); await using var conn = await OpenConnectionAsync(); var table = await CreateTempTable(conn, "name TEXT"); - await using var conn1 = await OpenConnectionAsync(connectionString); + await using var conn1 = await dataSource.OpenConnectionAsync(); var tx1 = conn1.BeginTransaction(); await using (var ___ = tx1) { @@ -645,7 +647,7 @@ public async Task Unbound_transaction_reuse() await conn1.CloseAsync(); } - await using var conn2 = await OpenConnectionAsync(connectionString); + await using var conn2 = await dataSource.OpenConnectionAsync(); var tx2 = conn2.BeginTransaction(); await using (var ___ = tx2) { @@ -663,7 +665,7 @@ public async Task Unbound_transaction_reuse() await conn2.CloseAsync(); } - await using var conn3 = await OpenConnectionAsync(connectionString); + await using var conn3 = await dataSource.OpenConnectionAsync(); var tx3 = conn3.BeginTransaction(); await using (var ___ = tx3) { @@ -688,12 +690,8 @@ public async Task Bug3686() if (IsMultiplexing) return; - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) - { - Pooling = false - }; - - await using var conn = await OpenConnectionAsync(csb); + await using var dataSource = CreateDataSource(csb => csb.Pooling = false); + await using var conn = await dataSource.OpenConnectionAsync(); await using var tx = await conn.BeginTransactionAsync(); await conn.ExecuteNonQueryAsync("SELECT 1", tx); await tx.CommitAsync(); diff --git a/test/Npgsql.Tests/Types/ArrayTests.cs b/test/Npgsql.Tests/Types/ArrayTests.cs index 8a048067d0..043f19508d 100644 --- a/test/Npgsql.Tests/Types/ArrayTests.cs +++ b/test/Npgsql.Tests/Types/ArrayTests.cs @@ -12,6 +12,8 @@ namespace Npgsql.Tests.Types; +// ReSharper disable BitwiseOperatorOnEnumWithoutFlags + /// /// Tests on PostgreSQL arrays /// @@ -26,41 +28,36 @@ public async Task Array_resolution() if (IsMultiplexing) Assert.Ignore("Multiplexing, ReloadTypes"); - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) - { - ApplicationName = nameof(Array_resolution), // Prevent backend type caching in TypeHandlerRegistry - Pooling = false - }; - - using var conn = await OpenConnectionAsync(csb); + await using var dataSource = CreateDataSource(csb => csb.Pooling = false); + await using var conn = await dataSource.OpenConnectionAsync(); // Resolve type by NpgsqlDbType - using (var cmd = new NpgsqlCommand("SELECT @p", conn)) + await using (var cmd = new NpgsqlCommand("SELECT @p", conn)) { cmd.Parameters.AddWithValue("p", NpgsqlDbType.Array | NpgsqlDbType.Integer, DBNull.Value); - using var reader = await cmd.ExecuteReaderAsync(); + await using var reader = await cmd.ExecuteReaderAsync(); reader.Read(); Assert.That(reader.GetDataTypeName(0), Is.EqualTo("integer[]")); } // Resolve type by ClrType (type inference) - conn.ReloadTypes(); - using (var cmd = new NpgsqlCommand("SELECT @p", conn)) + await conn.ReloadTypesAsync(); + await using (var cmd = new NpgsqlCommand("SELECT @p", conn)) { - cmd.Parameters.Add(new NpgsqlParameter { ParameterName = "p", Value = new int[0] }); - using var reader = await cmd.ExecuteReaderAsync(); + cmd.Parameters.Add(new NpgsqlParameter { ParameterName = "p", Value = Array.Empty() }); + await using var reader = await cmd.ExecuteReaderAsync(); reader.Read(); Assert.That(reader.GetDataTypeName(0), Is.EqualTo("integer[]")); } // Resolve type by DataTypeName - conn.ReloadTypes(); - using (var cmd = new NpgsqlCommand("SELECT @p", conn)) + await conn.ReloadTypesAsync(); + await using (var cmd = new NpgsqlCommand("SELECT @p", conn)) { cmd.Parameters.Add(new NpgsqlParameter { ParameterName="p", DataTypeName = "integer[]", Value = DBNull.Value }); - using (var reader = await cmd.ExecuteReaderAsync()) + await using (var reader = await cmd.ExecuteReaderAsync()) { reader.Read(); Assert.That(reader.GetDataTypeName(0), Is.EqualTo("integer[]")); @@ -68,9 +65,9 @@ public async Task Array_resolution() } // Resolve type by OID (read) - conn.ReloadTypes(); - using (var cmd = new NpgsqlCommand("SELECT '{1, 3}'::INTEGER[]", conn)) - using (var reader = await cmd.ExecuteReaderAsync()) + await conn.ReloadTypesAsync(); + await using (var cmd = new NpgsqlCommand("SELECT '{1, 3}'::INTEGER[]", conn)) + await using (var reader = await cmd.ExecuteReaderAsync()) { reader.Read(); Assert.That(reader.GetDataTypeName(0), Is.EqualTo("integer[]")); @@ -81,11 +78,10 @@ public async Task Array_resolution() [Test] public async Task Bind_int_then_array_of_int() { - using var pool = CreateTempPool(ConnectionString, out var connString); - using var conn = new NpgsqlConnection(connString); - await conn.OpenAsync(); + await using var dataSource = CreateDataSource(); + await using var conn = await dataSource.OpenConnectionAsync(); - using var cmd = new NpgsqlCommand("SELECT 1", conn); + await using var cmd = new NpgsqlCommand("SELECT 1", conn); _ = await cmd.ExecuteScalarAsync(); cmd.CommandText = "SELECT ARRAY[1,2]"; @@ -95,8 +91,8 @@ public async Task Bind_int_then_array_of_int() [Test, Description("Roundtrips a simple, one-dimensional array of ints")] public async Task Ints() { - using var conn = await OpenConnectionAsync(); - using var cmd = new NpgsqlCommand("SELECT @p1, @p2, @p3", conn); + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT @p1, @p2, @p3", conn); var expected = new[] { 1, 5, 9 }; var p1 = new NpgsqlParameter("p1", NpgsqlDbType.Array | NpgsqlDbType.Integer); @@ -123,8 +119,8 @@ public async Task Ints() [Test, Description("Roundtrips a simple, one-dimensional array of int? values")] public async Task Nullable_ints() { - using var conn = await OpenConnectionAsync(); - using var cmd = new NpgsqlCommand("SELECT @p1, @p2, @p3", conn); + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT @p1, @p2, @p3", conn); var expected = new int?[] { 1, 5, null, 9 }; var p1 = new NpgsqlParameter("p1", NpgsqlDbType.Array | NpgsqlDbType.Integer); @@ -149,9 +145,9 @@ public async Task Nullable_ints() [Test, Description("Checks that PG arrays containing nulls can't be read as CLR arrays of non-nullable value types.")] public async Task Nullable_ints_cannot_be_read_as_non_nullable() { - using var conn = await OpenConnectionAsync(); - using var cmd = new NpgsqlCommand("SELECT '{1, NULL, 2}'::integer[]", conn); - using var reader = await cmd.ExecuteReaderAsync(); + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT '{1, NULL, 2}'::integer[]", conn); + await using var reader = await cmd.ExecuteReaderAsync(); reader.Read(); Assert.That(() => reader.GetFieldValue(0), Throws.Exception.TypeOf()); @@ -165,12 +161,14 @@ public async Task Nullable_ints_cannot_be_read_as_non_nullable() [TestCase(ArrayNullabilityMode.PerInstance)] public async Task Value_type_array_nullabilities(ArrayNullabilityMode mode) { - using var pool = CreateTempPool(new NpgsqlConnectionStringBuilder(ConnectionString){ ArrayNullabilityMode = mode}, out var connectionString); - await using var conn = await OpenConnectionAsync(connectionString); - await using var cmd = new NpgsqlCommand("SELECT onedim, twodim FROM (VALUES" + - "('{1, 2, 3, 4}'::int[],'{{1, 2},{3, 4}}'::int[][])," + - "('{5, NULL, 6, 7}'::int[],'{{5, NULL},{6, 7}}'::int[][])" + - ") AS x(onedim,twodim)", conn); + await using var dataSource = CreateDataSource(csb => csb.ArrayNullabilityMode = mode); + await using var conn = await dataSource.OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand( +""" +SELECT onedim, twodim FROM (VALUES +('{1, 2, 3, 4}'::int[],'{{1, 2},{3, 4}}'::int[][]), +('{5, NULL, 6, 7}'::int[],'{{5, NULL},{6, 7}}'::int[][])) AS x(onedim,twodim) +""", conn); await using var reader = await cmd.ExecuteReaderAsync(); switch (mode) @@ -238,10 +236,10 @@ public async Task Value_type_array_nullabilities(ArrayNullabilityMode mode) [Test] public async Task Empty_array() { - using var conn = await OpenConnectionAsync(); - using var cmd = new NpgsqlCommand("SELECT @p", conn); + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT @p", conn); - cmd.Parameters.Add(new NpgsqlParameter("p", NpgsqlDbType.Array | NpgsqlDbType.Integer) { Value = new int[0] }); + cmd.Parameters.Add(new NpgsqlParameter("p", NpgsqlDbType.Array | NpgsqlDbType.Integer) { Value = Array.Empty() }); var reader = await cmd.ExecuteReaderAsync(); reader.Read(); @@ -252,8 +250,8 @@ public async Task Empty_array() [Test, Description("Roundtrips an empty multi-dimensional array.")] public async Task Empty_multidimensional_array() { - using var conn = await OpenConnectionAsync(); - using var cmd = new NpgsqlCommand("SELECT @p", conn); + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT @p", conn); var expected = new int[0, 0]; cmd.Parameters.AddWithValue("p", NpgsqlDbType.Array | NpgsqlDbType.Integer, expected); @@ -267,8 +265,8 @@ public async Task Empty_multidimensional_array() [Test, Description("Verifies that an InvalidOperationException is thrown when the returned array has a different number of dimensions from what was requested.")] public async Task Wrong_array_dimensions_throws() { - using var conn = await OpenConnectionAsync(); - using var cmd = new NpgsqlCommand("SELECT ARRAY[[1], [2]]", conn); + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT ARRAY[[1], [2]]", conn); var reader = await cmd.ExecuteReaderAsync(); reader.Read(); @@ -280,8 +278,8 @@ public async Task Wrong_array_dimensions_throws() [Test, Description("Verifies that an attempt to read an Array of value types that contains null values as array of a non-nullable type fails.")] public async Task Read_null_as_non_nullable_array_throws() { - using var conn = await OpenConnectionAsync(); - using var cmd = new NpgsqlCommand("SELECT @p1", conn); + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT @p1", conn); var expected = new int?[] { 1, 5, null, 9 }; cmd.Parameters.AddWithValue("p1", NpgsqlDbType.Array | NpgsqlDbType.Integer, expected); @@ -299,8 +297,8 @@ public async Task Read_null_as_non_nullable_array_throws() [Test, Description("Verifies that an attempt to read an Array of value types that contains null values as List of a non-nullable type fails.")] public async Task Read_null_as_non_nullable_list_throws() { - using var conn = await OpenConnectionAsync(); - using var cmd = new NpgsqlCommand("SELECT @p1", conn); + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT @p1", conn); var expected = new int?[] { 1, 5, null, 9 }; cmd.Parameters.AddWithValue("p1", NpgsqlDbType.Array | NpgsqlDbType.Integer, expected); @@ -317,17 +315,17 @@ public async Task Read_null_as_non_nullable_list_throws() [Test, Description("Roundtrips a large, one-dimensional array of ints that will be chunked")] public async Task Long_one_dimensional() { - using var conn = await OpenConnectionAsync(); + await using var conn = await OpenConnectionAsync(); var expected = new int[conn.Settings.WriteBufferSize/4 + 100]; for (var i = 0; i < expected.Length; i++) expected[i] = i; - using var cmd = new NpgsqlCommand("SELECT @p", conn); + await using var cmd = new NpgsqlCommand("SELECT @p", conn); var p = new NpgsqlParameter {ParameterName = "p", Value = expected}; cmd.Parameters.Add(p); - using var reader = await cmd.ExecuteReaderAsync(CommandBehavior.SequentialAccess); + await using var reader = await cmd.ExecuteReaderAsync(CommandBehavior.SequentialAccess); reader.Read(); Assert.That(reader[0], Is.EqualTo(expected)); } @@ -335,17 +333,17 @@ public async Task Long_one_dimensional() [Test, Description("Roundtrips a large, two-dimensional array of ints that will be chunked")] public async Task Long_two_dimensional() { - using var conn = await OpenConnectionAsync(); + await using var conn = await OpenConnectionAsync(); var len = conn.Settings.WriteBufferSize/2 + 100; var expected = new int[2, len]; for (var i = 0; i < len; i++) expected[0, i] = i; for (var i = 0; i < len; i++) expected[1, i] = i; - using var cmd = new NpgsqlCommand("SELECT @p", conn); + await using var cmd = new NpgsqlCommand("SELECT @p", conn); var p = new NpgsqlParameter {ParameterName = "p", Value = expected}; cmd.Parameters.Add(p); - using var reader = await cmd.ExecuteReaderAsync(CommandBehavior.SequentialAccess); + await using var reader = await cmd.ExecuteReaderAsync(CommandBehavior.SequentialAccess); reader.Read(); Assert.That(reader[0], Is.EqualTo(expected)); } @@ -353,14 +351,14 @@ public async Task Long_two_dimensional() [Test, Description("Roundtrips a long, one-dimensional array of strings, including a null")] public async Task Strings_with_null() { - using var conn = await OpenConnectionAsync(); + await using var conn = await OpenConnectionAsync(); var largeString = new StringBuilder(); largeString.Append('a', conn.Settings.WriteBufferSize); var expected = new[] {"value1", null, largeString.ToString(), "val3"}; - using var cmd = new NpgsqlCommand("SELECT @p", conn); + await using var cmd = new NpgsqlCommand("SELECT @p", conn); var p = new NpgsqlParameter("p", NpgsqlDbType.Array | NpgsqlDbType.Text) {Value = expected}; cmd.Parameters.Add(p); - using var reader = await cmd.ExecuteReaderAsync(CommandBehavior.SequentialAccess); + await using var reader = await cmd.ExecuteReaderAsync(CommandBehavior.SequentialAccess); reader.Read(); Assert.That(reader.GetFieldValue(0), Is.EqualTo(expected)); } @@ -368,9 +366,9 @@ public async Task Strings_with_null() [Test, Description("Roundtrips a zero-dimensional array of ints, should return empty one-dimensional")] public async Task Zero_dimensional() { - using var conn = await OpenConnectionAsync(); - using var cmd = new NpgsqlCommand("SELECT @p", conn); - var expected = new int[0]; + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT @p", conn); + var expected = Array.Empty(); var p = new NpgsqlParameter("p", NpgsqlDbType.Array | NpgsqlDbType.Integer) { Value = expected }; cmd.Parameters.Add(p); var reader = await cmd.ExecuteReaderAsync(); @@ -384,8 +382,8 @@ public async Task Zero_dimensional() [Test, Description("Roundtrips a two-dimensional array of ints")] public async Task Two_dimensional_ints() { - using var conn = await OpenConnectionAsync(); - using var cmd = new NpgsqlCommand("SELECT @p1, @p2", conn); + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT @p1, @p2", conn); var expected = new[,] { { 1, 2, 3 }, { 7, 8, 9 } }; var p1 = new NpgsqlParameter("p1", NpgsqlDbType.Array | NpgsqlDbType.Integer); var p2 = new NpgsqlParameter { ParameterName = "p2", Value = expected }; @@ -402,16 +400,16 @@ public async Task Two_dimensional_ints() [Test, Description("Reads an one-dimensional array with lower bound != 0")] public async Task Read_non_zero_lower_bounded() { - using var conn = await OpenConnectionAsync(); - using (var cmd = new NpgsqlCommand("SELECT '[2:3]={ 8, 9 }'::INT[]", conn)) - using (var reader = await cmd.ExecuteReaderAsync()) + await using var conn = await OpenConnectionAsync(); + await using (var cmd = new NpgsqlCommand("SELECT '[2:3]={ 8, 9 }'::INT[]", conn)) + await using (var reader = await cmd.ExecuteReaderAsync()) { reader.Read(); Assert.That(reader.GetFieldValue(0), Is.EqualTo(new[] {8, 9})); } - using (var cmd = new NpgsqlCommand("SELECT '[2:3][2:3]={ {8,9}, {1,2} }'::INT[][]", conn)) - using (var reader = await cmd.ExecuteReaderAsync()) + await using (var cmd = new NpgsqlCommand("SELECT '[2:3][2:3]={ {8,9}, {1,2} }'::INT[][]", conn)) + await using (var reader = await cmd.ExecuteReaderAsync()) { reader.Read(); Assert.That(reader.GetFieldValue(0), Is.EqualTo(new[,] {{8, 9}, {1, 2}})); @@ -421,15 +419,15 @@ public async Task Read_non_zero_lower_bounded() [Test, Description("Roundtrips a one-dimensional array of bytea values")] public async Task Array_of_byte_arrays() { - using var conn = await OpenConnectionAsync(); - using var cmd = new NpgsqlCommand("SELECT @p1, @p2", conn); + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT @p1, @p2", conn); var expected = new[] { new byte[] { 1, 2 }, new byte[] { 3, 4, } }; var p1 = new NpgsqlParameter("p1", NpgsqlDbType.Array | NpgsqlDbType.Bytea); var p2 = new NpgsqlParameter { ParameterName = "p2", Value = expected }; cmd.Parameters.Add(p1); cmd.Parameters.Add(p2); p1.Value = expected; - using var reader = await cmd.ExecuteReaderAsync(); + await using var reader = await cmd.ExecuteReaderAsync(); reader.Read(); Assert.That(reader.GetValue(0), Is.EqualTo(expected)); Assert.That(reader.GetFieldValue(0), Is.EqualTo(expected)); @@ -442,8 +440,8 @@ public async Task Array_of_byte_arrays() // ReSharper disable once InconsistentNaming public async Task IList_non_generic() { - using var conn = await OpenConnectionAsync(); - using var cmd = new NpgsqlCommand("SELECT @p", conn); + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT @p", conn); var expected = new ArrayList(new[] { 1, 2, 3 }); var p = new NpgsqlParameter("p", NpgsqlDbType.Array | NpgsqlDbType.Integer) { Value = expected }; cmd.Parameters.Add(p); @@ -454,14 +452,14 @@ public async Task IList_non_generic() // ReSharper disable once InconsistentNaming public async Task IList_generic() { - using var conn = await OpenConnectionAsync(); - using var cmd = new NpgsqlCommand("SELECT @p1, @p2", conn); + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT @p1, @p2", conn); var expected = new[] { 1, 2, 3 }.ToList(); var p1 = new NpgsqlParameter { ParameterName = "p1", Value = expected }; var p2 = new NpgsqlParameter { ParameterName = "p2", Value = expected }; cmd.Parameters.Add(p1); cmd.Parameters.Add(p2); - using var reader = await cmd.ExecuteReaderAsync(); + await using var reader = await cmd.ExecuteReaderAsync(); reader.Read(); Assert.That(reader.GetValue(0), Is.EqualTo(expected)); Assert.That(reader.GetFieldValue>(1), Is.EqualTo(expected)); @@ -471,12 +469,12 @@ public async Task IList_generic() // ReSharper disable once InconsistentNaming public async Task IList_generic_fails_for_multidimensional_array() { - using var conn = await OpenConnectionAsync(); - using var cmd = new NpgsqlCommand("SELECT @p1", conn); + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT @p1", conn); var expected = new[,] { { 1, 2 }, { 3, 4 } }; var p1 = new NpgsqlParameter { ParameterName = "p1", Value = expected }; cmd.Parameters.Add(p1); - using var reader = await cmd.ExecuteReaderAsync(); + await using var reader = await cmd.ExecuteReaderAsync(); reader.Read(); Assert.That(reader.GetValue(0), Is.EqualTo(expected)); var exception = Assert.Throws(() => @@ -489,8 +487,8 @@ public async Task IList_generic_fails_for_multidimensional_array() [Test, IssueLink("https://github.com/npgsql/npgsql/issues/844")] public async Task IEnumerable_throws_friendly_exception() { - using var conn = await OpenConnectionAsync(); - using var cmd = new NpgsqlCommand("SELECT @p1", conn); + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT @p1", conn); cmd.Parameters.AddWithValue("p1", Enumerable.Range(1, 3)); Assert.That(async () => await cmd.ExecuteScalarAsync(), Throws.Exception.TypeOf().With.Message.Contains("array or List")); } @@ -499,8 +497,8 @@ public async Task IEnumerable_throws_friendly_exception() public async Task Mixed_element_types() { var mixedList = new ArrayList { 1, "yo" }; - using var conn = await OpenConnectionAsync(); - using var cmd = new NpgsqlCommand("SELECT @p1", conn); + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT @p1", conn); cmd.Parameters.AddWithValue("p1", NpgsqlDbType.Array | NpgsqlDbType.Integer, mixedList); Assert.That(async () => await cmd.ExecuteNonQueryAsync(), Throws.Exception .TypeOf() @@ -513,8 +511,8 @@ public async Task Jagged_arrays_not_supported() var jagged = new int[2][]; jagged[0] = new[] { 8 }; jagged[1] = new[] { 8, 10 }; - using var conn = await OpenConnectionAsync(); - using var cmd = new NpgsqlCommand("SELECT @p1", conn); + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT @p1", conn); cmd.Parameters.AddWithValue("p1", NpgsqlDbType.Array | NpgsqlDbType.Integer, jagged); Assert.That(async () => await cmd.ExecuteNonQueryAsync(), Throws.Exception .TypeOf() @@ -524,7 +522,7 @@ public async Task Jagged_arrays_not_supported() [Test, Description("Checks that ILists are properly serialized as arrays of their underlying types")] public async Task List_type_resolution() { - using var conn = await OpenConnectionAsync(ConnectionString); + await using var conn = await OpenConnectionAsync(); await AssertIListRoundtrips(conn, new[] { 1, 2, 3 }); await AssertIListRoundtrips(conn, new IntList { 1, 2, 3 }); await AssertIListRoundtrips(conn, new MisleadingIntList() { 1, 2, 3 }); @@ -547,16 +545,16 @@ public async Task Array_of_domain() if (IsMultiplexing) Assert.Ignore("Multiplexing, ReloadTypes"); - using var conn = await OpenConnectionAsync(); - TestUtil.MinimumPgVersion(conn, "11.0", "Arrays of domains were introduced in PostgreSQL 11"); - conn.ExecuteNonQuery("CREATE DOMAIN pg_temp.posint AS integer CHECK (VALUE > 0);"); - conn.ReloadTypes(); - using var cmd = new NpgsqlCommand("SELECT @p1::posint[], @p2::posint[][]", conn); + await using var conn = await OpenConnectionAsync(); + MinimumPgVersion(conn, "11.0", "Arrays of domains were introduced in PostgreSQL 11"); + await conn.ExecuteNonQueryAsync("CREATE DOMAIN pg_temp.posint AS integer CHECK (VALUE > 0);"); + await conn.ReloadTypesAsync(); + await using var cmd = new NpgsqlCommand("SELECT @p1::posint[], @p2::posint[][]", conn); var oneDim = new[] { 1, 3, 5, 9 }; var twoDim = new[,] { { 1, 3 }, { 5, 9 } }; cmd.Parameters.AddWithValue("p1", NpgsqlDbType.Integer | NpgsqlDbType.Array, oneDim); cmd.Parameters.AddWithValue("p2", NpgsqlDbType.Integer | NpgsqlDbType.Array, twoDim); - using var reader = cmd.ExecuteReader(); + await using var reader = cmd.ExecuteReader(); reader.Read(); Assert.That(reader.GetValue(0), Is.EqualTo(oneDim)); @@ -579,17 +577,20 @@ public async Task Domain_of_array() if (IsMultiplexing) Assert.Ignore("Multiplexing, ReloadTypes"); - using var conn = await OpenConnectionAsync(); - TestUtil.MinimumPgVersion(conn, "11.0", "Domains over arrays were introduced in PostgreSQL 11"); - conn.ExecuteNonQuery("CREATE DOMAIN pg_temp.int_array_1d AS int[] CHECK(array_length(VALUE, 1) = 4);" + - "CREATE DOMAIN pg_temp.int_array_2d AS int[][] CHECK(array_length(VALUE, 2) = 2);"); - conn.ReloadTypes(); - using var cmd = new NpgsqlCommand("SELECT @p1::int_array_1d, @p2::int_array_2d", conn); + await using var conn = await OpenConnectionAsync(); + MinimumPgVersion(conn, "11.0", "Domains over arrays were introduced in PostgreSQL 11"); + await conn.ExecuteNonQueryAsync( +""" +CREATE DOMAIN pg_temp.int_array_1d AS int[] CHECK(array_length(VALUE, 1) = 4); +CREATE DOMAIN pg_temp.int_array_2d AS int[][] CHECK(array_length(VALUE, 2) = 2); +"""); + await conn.ReloadTypesAsync(); + await using var cmd = new NpgsqlCommand("SELECT @p1::int_array_1d, @p2::int_array_2d", conn); var oneDim = new[] { 1, 3, 5, 9 }; var twoDim = new[,] { { 1, 3 }, { 5, 9 } }; cmd.Parameters.AddWithValue("p1", NpgsqlDbType.Integer | NpgsqlDbType.Array, oneDim); cmd.Parameters.AddWithValue("p2", NpgsqlDbType.Integer | NpgsqlDbType.Array, twoDim); - using var reader = cmd.ExecuteReader(); + await using var reader = cmd.ExecuteReader(); reader.Read(); Assert.That(reader.GetValue(0), Is.EqualTo(oneDim)); @@ -608,9 +609,9 @@ public async Task Domain_of_array() [Test, IssueLink("https://github.com/npgsql/npgsql/issues/3417")] public async Task Read_two_empty_arrays() { - using var conn = await OpenConnectionAsync(); - using var cmd = new NpgsqlCommand("SELECT '{}'::INT[], '{}'::INT[]", conn); - using var reader = await cmd.ExecuteReaderAsync(); + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT '{}'::INT[], '{}'::INT[]", conn); + await using var reader = await cmd.ExecuteReaderAsync(); await reader.ReadAsync(); Assert.AreSame(reader.GetFieldValue(0), reader.GetFieldValue(1)); // Unlike T[], List is mutable so we should not return the same instance @@ -619,17 +620,19 @@ public async Task Read_two_empty_arrays() async Task AssertIListRoundtrips(NpgsqlConnection conn, IEnumerable value) { - using var cmd = new NpgsqlCommand("SELECT @p", conn); + await using var cmd = new NpgsqlCommand("SELECT @p", conn); cmd.Parameters.Add(new NpgsqlParameter { ParameterName = "p", Value = value }); - using var reader = await cmd.ExecuteReaderAsync(); + await using var reader = await cmd.ExecuteReaderAsync(); reader.Read(); Assert.That(reader.GetDataTypeName(0), Is.EqualTo("integer[]")); Assert.That(reader[0], Is.EqualTo(value.ToArray())); } class IntList : List { } + // ReSharper disable UnusedTypeParameter class MisleadingIntList : List { } + // ReSharper restore UnusedTypeParameter public ArrayTests(MultiplexingMode multiplexingMode) : base(multiplexingMode) {} } diff --git a/test/Npgsql.Tests/Types/CompositeTests.cs b/test/Npgsql.Tests/Types/CompositeTests.cs index 75bf214fff..2795968470 100644 --- a/test/Npgsql.Tests/Types/CompositeTests.cs +++ b/test/Npgsql.Tests/Types/CompositeTests.cs @@ -255,6 +255,8 @@ public async Task Table_as_composite([Values] bool enabled) else { Assert.ThrowsAsync(DoAssertion); + // Start a transaction specifically for multiplexing (to bind a connector to the connection) + await using var tx = await connection.BeginTransactionAsync(); Assert.Null(connection.Connector!.DatabaseInfo.CompositeTypes.SingleOrDefault(c => c.Name.Contains(table))); Assert.Null(connection.Connector!.DatabaseInfo.ArrayTypes.SingleOrDefault(c => c.Name.Contains(table))); diff --git a/test/Npgsql.Tests/Types/DateTimeTests.cs b/test/Npgsql.Tests/Types/DateTimeTests.cs index 87c8cbb5f2..f387387dcc 100644 --- a/test/Npgsql.Tests/Types/DateTimeTests.cs +++ b/test/Npgsql.Tests/Types/DateTimeTests.cs @@ -419,13 +419,13 @@ public Task Interval_with_months_cannot_read_as_TimeSpan() #endregion - protected override async ValueTask OpenConnectionAsync(string? connectionString = null) + protected override async ValueTask OpenConnectionAsync() { - var conn = await base.OpenConnectionAsync(connectionString); + var conn = await base.OpenConnectionAsync(); await conn.ExecuteNonQueryAsync("SET TimeZone='Europe/Berlin'"); return conn; } - protected override NpgsqlConnection OpenConnection(string? connectionString = null) + protected override NpgsqlConnection OpenConnection() => throw new NotSupportedException(); } diff --git a/test/Npgsql.Tests/Types/DomainTests.cs b/test/Npgsql.Tests/Types/DomainTests.cs index fe415116c1..4faaceb212 100644 --- a/test/Npgsql.Tests/Types/DomainTests.cs +++ b/test/Npgsql.Tests/Types/DomainTests.cs @@ -13,13 +13,8 @@ public async Task Domain_resolution() if (IsMultiplexing) Assert.Ignore("Multiplexing, ReloadTypes"); - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) - { - ApplicationName = nameof(Domain_resolution), // Prevent backend type caching in TypeHandlerRegistry - Pooling = false - }; - - using var conn = await OpenConnectionAsync(csb); + await using var dataSource = CreateDataSource(csb => csb.Pooling = false); + await using var conn = await dataSource.OpenConnectionAsync(); var type = await GetTempTypeName(conn); await conn.ExecuteNonQueryAsync($"CREATE DOMAIN {type} AS text"); diff --git a/test/Npgsql.Tests/Types/EnumTests.cs b/test/Npgsql.Tests/Types/EnumTests.cs index 1f450c0a35..bc8a4992d5 100644 --- a/test/Npgsql.Tests/Types/EnumTests.cs +++ b/test/Npgsql.Tests/Types/EnumTests.cs @@ -150,8 +150,8 @@ enum Enum2 { Alpha } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/1779")] public async Task GetPostgresType() { - using var _ = CreateTempPool(ConnectionString, out var connectionString); - using var conn = await OpenConnectionAsync(connectionString); + await using var dataSource = CreateDataSource(); + using var conn = await dataSource.OpenConnectionAsync(); var type = await GetTempTypeName(conn); await conn.ExecuteNonQueryAsync($"CREATE TYPE {type} AS ENUM ('sad', 'ok', 'happy')"); conn.ReloadTypes(); diff --git a/test/Npgsql.Tests/Types/LegacyDateTimeTests.cs b/test/Npgsql.Tests/Types/LegacyDateTimeTests.cs index f03444ca26..fbf1b537d1 100644 --- a/test/Npgsql.Tests/Types/LegacyDateTimeTests.cs +++ b/test/Npgsql.Tests/Types/LegacyDateTimeTests.cs @@ -49,14 +49,14 @@ public Task Timestamptz_local_DateTime_converts() isDefaultForWriting: false); } - protected override async ValueTask OpenConnectionAsync(string? connectionString = null) + protected override async ValueTask OpenConnectionAsync() { - var conn = await base.OpenConnectionAsync(connectionString); + var conn = await base.OpenConnectionAsync(); await conn.ExecuteNonQueryAsync("SET TimeZone='Europe/Berlin'"); return conn; } - protected override NpgsqlConnection OpenConnection(string? connectionString = null) + protected override NpgsqlConnection OpenConnection() => throw new NotSupportedException(); [OneTimeSetUp] diff --git a/test/Npgsql.Tests/Types/RangeTests.cs b/test/Npgsql.Tests/Types/RangeTests.cs index 4d489ad108..8764c304c5 100644 --- a/test/Npgsql.Tests/Types/RangeTests.cs +++ b/test/Npgsql.Tests/Types/RangeTests.cs @@ -22,13 +22,8 @@ public async Task Range_resolution() if (IsMultiplexing) Assert.Ignore("Multiplexing, ReloadTypes"); - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) - { - ApplicationName = nameof(Range_resolution), // Prevent backend type caching in TypeHandlerRegistry - Pooling = false - }; - - using var conn = await OpenConnectionAsync(csb); + await using var dataSource = CreateDataSource(csb => csb.Pooling = false); + await using var conn = await dataSource.OpenConnectionAsync(); // Resolve type by NpgsqlDbType using (var cmd = new NpgsqlCommand("SELECT @p", conn)) @@ -103,11 +98,8 @@ public async Task Range() [NonParallelizable] public async Task Range_with_long_subtype() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) - { - MaxPoolSize = 1 - }; - await using var conn = await OpenConnectionAsync(csb); + await using var dataSource = CreateDataSource(csb => csb.MaxPoolSize = 1); + await using var conn = await dataSource.OpenConnectionAsync(); var typeName = await GetTempTypeName(conn); await conn.ExecuteNonQueryAsync($"CREATE TYPE {typeName} AS RANGE(subtype=text)"); From ce3e97e56b19edd99876f42041df329e51730972 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Sun, 22 Jan 2023 16:11:43 +0300 Subject: [PATCH 049/761] Add Native AOT to CI (#4901) Closes #4897 --- .github/workflows/nativeAOT.yml | 95 +++++++ Directory.Packages.props | 3 + test/MStatDumper/MStatDumper.csproj | 14 ++ test/MStatDumper/Program.cs | 235 ++++++++++++++++++ .../Npgsql.NativeAotTests.csproj | 19 ++ .../Program.cs | 16 +- .../Npgsql.TrimmingTests.csproj | 14 -- 7 files changed, 375 insertions(+), 21 deletions(-) create mode 100644 .github/workflows/nativeAOT.yml create mode 100644 test/MStatDumper/MStatDumper.csproj create mode 100644 test/MStatDumper/Program.cs create mode 100644 test/Npgsql.NativeAotTests/Npgsql.NativeAotTests.csproj rename test/{Npgsql.TrimmingTests => Npgsql.NativeAotTests}/Program.cs (51%) delete mode 100644 test/Npgsql.TrimmingTests/Npgsql.TrimmingTests.csproj diff --git a/.github/workflows/nativeAOT.yml b/.github/workflows/nativeAOT.yml new file mode 100644 index 0000000000..08abaa8b82 --- /dev/null +++ b/.github/workflows/nativeAOT.yml @@ -0,0 +1,95 @@ +name: NativeAOT + +on: + push: + branches: + - main + - 'hotfix/**' + tags: + - '*' + pull_request: + +env: + dotnet_sdk_version: '7.0.102' + DOTNET_SKIP_FIRST_TIME_EXPERIENCE: true + +jobs: + build: + runs-on: ${{ matrix.os }} + + strategy: + fail-fast: false + matrix: + os: [ubuntu-22.04] + pg_major: [15] + + steps: + - name: Checkout + uses: actions/checkout@v3 + + - name: NuGet Cache + uses: actions/cache@v3 + with: + path: ~/.nuget/packages + key: ${{ runner.os }}-nuget-${{ hashFiles('**/Directory.Build.targets') }} + restore-keys: | + ${{ runner.os }}-nuget- + + - name: Setup .NET Core SDK + uses: actions/setup-dotnet@v3.0.3 + with: + dotnet-version: | + ${{ env.dotnet_sdk_version }} + + - name: Setup Native AOT prerequisites + run: sudo apt-get install clang zlib1g-dev + shell: bash + + - name: Build + run: dotnet publish test/Npgsql.NativeAotTests/Npgsql.NativeAotTests.csproj -r linux-x64 -c Release -f net7.0 + shell: bash + + - name: Start PostgreSQL ${{ matrix.pg_major }} (Linux) + if: startsWith(matrix.os, 'ubuntu') + run: | + # First uninstall any PostgreSQL installed on the image + dpkg-query -W --showformat='${Package}\n' 'postgresql-*' | xargs sudo dpkg -P postgresql + + # Import the repository signing key + wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add - + + sudo sh -c 'echo "deb http://apt.postgresql.org/pub/repos/apt/ jammy-pgdg main ${{ matrix.pg_major }}" >> /etc/apt/sources.list.d/pgdg.list' + sudo apt-get update -qq + sudo apt-get install -qq postgresql-${{ matrix.pg_major }} + export PGDATA=/etc/postgresql/${{ matrix.pg_major }}/main + + sudo -u postgres psql -c "CREATE USER npgsql_tests SUPERUSER PASSWORD 'npgsql_tests'" + sudo -u postgres psql -c "CREATE DATABASE npgsql_tests" + + sudo pg_ctlcluster ${{ matrix.pg_major }} main restart + + # Uncomment the following to SSH into the agent running the build (https://github.com/mxschmitt/action-tmate) + #- uses: actions/checkout@v3 + #- name: Setup tmate session + # uses: mxschmitt/action-tmate@v3 + + - name: Run + run: test/Npgsql.NativeAotTests/bin/Release/net7.0/linux-x64/native/Npgsql.NativeAotTests + + - name: Write binary size to summary + run: | + size="$(ls -l test/Npgsql.NativeAotTests/bin/Release/net7.0/linux-x64/native/Npgsql.NativeAotTests | cut -d ' ' -f 5)" + echo "Binary size is $size bytes ($((size / (1024 * 1024))) mb)" >> $GITHUB_STEP_SUMMARY + + - name: Dump mstat + run: dotnet run --project test/MStatDumper/MStatDumper.csproj -c release -- "test/Npgsql.NativeAotTests/obj/Release/net7.0/linux-x64/native/Npgsql.NativeAotTests.mstat" md >> $GITHUB_STEP_SUMMARY + + - name: Assert binary size + run: | + size="$(ls -l test/Npgsql.NativeAotTests/bin/Release/net7.0/linux-x64/native/Npgsql.NativeAotTests | cut -d ' ' -f 5)" + echo "Binary size is $size bytes ($((size / (1024 * 1024))) mb)" + + if (( size > 36700160 )); then + echo "Binary size exceeds 35mb threshold" + exit 1 + fi \ No newline at end of file diff --git a/Directory.Packages.props b/Directory.Packages.props index 00202626ac..3228f538d8 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -41,5 +41,8 @@ + + + diff --git a/test/MStatDumper/MStatDumper.csproj b/test/MStatDumper/MStatDumper.csproj new file mode 100644 index 0000000000..2421fe3b15 --- /dev/null +++ b/test/MStatDumper/MStatDumper.csproj @@ -0,0 +1,14 @@ + + + + Exe + net7.0 + enable + disable + + + + + + + diff --git a/test/MStatDumper/Program.cs b/test/MStatDumper/Program.cs new file mode 100644 index 0000000000..11ec532030 --- /dev/null +++ b/test/MStatDumper/Program.cs @@ -0,0 +1,235 @@ +using Mono.Cecil; +using Mono.Cecil.Rocks; + +namespace MStatDumper +{ + internal class Program + { + static void Main(string[] args) + { + if (args.Length == 0) + { + throw new Exception("Must provide the path to mstat file. It's in {project}/obj/Release/{TFM}/{os}/native/{project}.mstat"); + } + + var markDownStyleOutput = args.Length > 1 && args[1] == "md"; + + var asm = AssemblyDefinition.ReadAssembly(args[0]); + var globalType = (TypeDefinition)asm.MainModule.LookupToken(0x02000001); + + var types = globalType.Methods.First(x => x.Name == "Types"); + var typeStats = GetTypes(types).ToList(); + var typeSize = typeStats.Sum(x => x.Size); + var typesByModules = typeStats.GroupBy(x => x.Type.Scope).Select(x => new { x.Key.Name, Sum = x.Sum(x => x.Size) }).ToList(); + if (markDownStyleOutput) + { + Console.WriteLine("
"); + Console.WriteLine($"Types Total Size {typeSize:n0}"); + Console.WriteLine(); + Console.WriteLine("
"); + Console.WriteLine(); + Console.WriteLine("| Name | Size |"); + Console.WriteLine("| --- | --- |"); + foreach (var m in typesByModules.OrderByDescending(x => x.Sum)) + { + Console.WriteLine($"| {m.Name.Replace("`", "\\`")} | {m.Sum:n0} |"); + } + Console.WriteLine(); + Console.WriteLine("
"); + } + else + { + Console.WriteLine($"// ********** Types Total Size {typeSize:n0}"); + foreach (var m in typesByModules.OrderByDescending(x => x.Sum)) + { + Console.WriteLine($"{m.Name,-70} {m.Sum,9:n0}"); + } + Console.WriteLine("// **********"); + } + + Console.WriteLine(); + + var methods = globalType.Methods.First(x => x.Name == "Methods"); + var methodStats = GetMethods(methods).ToList(); + var methodSize = methodStats.Sum(x => x.Size + x.GcInfoSize + x.EhInfoSize); + var methodsByModules = methodStats.GroupBy(x => x.Method.DeclaringType.Scope).Select(x => new { x.Key.Name, Sum = x.Sum(x => x.Size + x.GcInfoSize + x.EhInfoSize) }).ToList(); + if (markDownStyleOutput) + { + Console.WriteLine("
"); + Console.WriteLine($"Methods Total Size {methodSize:n0}"); + Console.WriteLine(); + Console.WriteLine("
"); + Console.WriteLine(); + Console.WriteLine("| Name | Size |"); + Console.WriteLine("| --- | --- |"); + foreach (var m in methodsByModules.OrderByDescending(x => x.Sum)) + { + Console.WriteLine($"| {m.Name.Replace("`", "\\`")} | {m.Sum:n0} |"); + } + Console.WriteLine(); + Console.WriteLine("
"); + } + else + { + Console.WriteLine($"// ********** Methods Total Size {methodSize:n0}"); + foreach (var m in methodsByModules.OrderByDescending(x => x.Sum)) + { + Console.WriteLine($"{m.Name,-70} {m.Sum,9:n0}"); + } + Console.WriteLine("// **********"); + } + + Console.WriteLine(); + + string FindNamespace(TypeReference type) + { + var current = type; + while (true) + { + if (!String.IsNullOrEmpty(current.Namespace)) + { + return current.Namespace; + } + + if (current.DeclaringType == null) + { + return current.Name; + } + + current = current.DeclaringType; + } + } + + var methodsByNamespace = methodStats.Select(x => new TypeStats { Type = x.Method.DeclaringType, Size = x.Size + x.GcInfoSize + x.EhInfoSize }).Concat(typeStats).GroupBy(x => FindNamespace(x.Type)).Select(x => new { x.Key, Sum = x.Sum(x => x.Size) }).ToList(); + if (markDownStyleOutput) + { + Console.WriteLine("
"); + Console.WriteLine("Size By Namespace"); + Console.WriteLine(); + Console.WriteLine("
"); + Console.WriteLine(); + Console.WriteLine("| Name | Size |"); + Console.WriteLine("| --- | --- |"); + foreach (var m in methodsByNamespace.OrderByDescending(x => x.Sum)) + { + Console.WriteLine($"| {m.Key.Replace("`", "\\`")} | {m.Sum:n0} |"); + } + Console.WriteLine(); + Console.WriteLine("
"); + } + else + { + Console.WriteLine("// ********** Size By Namespace"); + foreach (var m in methodsByNamespace.OrderByDescending(x => x.Sum)) + { + Console.WriteLine($"{m.Key,-70} {m.Sum,9:n0}"); + } + Console.WriteLine("// **********"); + } + + Console.WriteLine(); + + var blobs = globalType.Methods.First(x => x.Name == "Blobs"); + var blobStats = GetBlobs(blobs).ToList(); + var blobSize = blobStats.Sum(x => x.Size); + if (markDownStyleOutput) + { + Console.WriteLine("
"); + Console.WriteLine($"Blobs Total Size {blobSize:n0}"); + Console.WriteLine(); + Console.WriteLine("
"); + Console.WriteLine(); + Console.WriteLine("| Name | Size |"); + Console.WriteLine("| --- | --- |"); + foreach (var m in blobStats.OrderByDescending(x => x.Size)) + { + Console.WriteLine($"| {m.Name.Replace("`", "\\`")} | {m.Size:n0} |"); + } + Console.WriteLine(); + Console.WriteLine("
"); + } + else + { + Console.WriteLine($"// ********** Blobs Total Size {blobSize:n0}"); + foreach (var m in blobStats.OrderByDescending(x => x.Size)) + { + Console.WriteLine($"{m.Name,-70} {m.Size,9:n0}"); + } + Console.WriteLine("// **********"); + } + } + + public static IEnumerable GetTypes(MethodDefinition types) + { + types.Body.SimplifyMacros(); + var il = types.Body.Instructions; + for (int i = 0; i + 2 < il.Count; i += 2) + { + var type = (TypeReference)il[i + 0].Operand; + var size = (int)il[i + 1].Operand; + yield return new TypeStats + { + Type = type, + Size = size + }; + } + } + + public static IEnumerable GetMethods(MethodDefinition methods) + { + methods.Body.SimplifyMacros(); + var il = methods.Body.Instructions; + for (int i = 0; i + 4 < il.Count; i += 4) + { + var method = (MethodReference)il[i + 0].Operand; + var size = (int)il[i + 1].Operand; + var gcInfoSize = (int)il[i + 2].Operand; + var ehInfoSize = (int)il[i + 3].Operand; + yield return new MethodStats + { + Method = method, + Size = size, + GcInfoSize = gcInfoSize, + EhInfoSize = ehInfoSize + }; + } + } + + public static IEnumerable GetBlobs(MethodDefinition blobs) + { + blobs.Body.SimplifyMacros(); + var il = blobs.Body.Instructions; + for (int i = 0; i + 2 < il.Count; i += 2) + { + var name = (string)il[i + 0].Operand; + var size = (int)il[i + 1].Operand; + yield return new BlobStats + { + Name = name, + Size = size + }; + } + } + } + + public class TypeStats + { + public string MethodName { get; set; } + public TypeReference Type { get; set; } + public int Size { get; set; } + } + + public class MethodStats + { + public MethodReference Method { get; set; } + public int Size { get; set; } + public int GcInfoSize { get; set; } + public int EhInfoSize { get; set; } + } + + public class BlobStats + { + public string Name { get; set; } + public int Size { get; set; } + } +} diff --git a/test/Npgsql.NativeAotTests/Npgsql.NativeAotTests.csproj b/test/Npgsql.NativeAotTests/Npgsql.NativeAotTests.csproj new file mode 100644 index 0000000000..3d4dee6080 --- /dev/null +++ b/test/Npgsql.NativeAotTests/Npgsql.NativeAotTests.csproj @@ -0,0 +1,19 @@ + + + exe + true + net7.0 + true + true + true + true + true + true + + + + + + + + diff --git a/test/Npgsql.TrimmingTests/Program.cs b/test/Npgsql.NativeAotTests/Program.cs similarity index 51% rename from test/Npgsql.TrimmingTests/Program.cs rename to test/Npgsql.NativeAotTests/Program.cs index 2d5a226288..59e6daf35b 100644 --- a/test/Npgsql.TrimmingTests/Program.cs +++ b/test/Npgsql.NativeAotTests/Program.cs @@ -4,13 +4,15 @@ var connectionString = Environment.GetEnvironmentVariable("NPGSQL_TEST_DB") ?? "Server=localhost;Username=npgsql_tests;Password=npgsql_tests;Database=npgsql_tests;Timeout=0;Command Timeout=0"; -await using var conn = new NpgsqlConnection(connectionString); +await using var dataSource = NpgsqlDataSource.Create(connectionString); + +await using var conn = dataSource.CreateConnection(); await conn.OpenAsync(); await using var cmd = new NpgsqlCommand("SELECT 'Hello World'", conn); await using var reader = await cmd.ExecuteReaderAsync(); -while (await reader.ReadAsync()) -{ - var value = reader.GetFieldValue(0); - if (value != "Hello World") - throw new Exception($"Got {value} instead of the expected 'Hello World'"); -} +if (!await reader.ReadAsync()) + throw new Exception("Got nothing from the database"); + +var value = reader.GetFieldValue(0); +if (value != "Hello World") + throw new Exception($"Got {value} instead of the expected 'Hello World'"); \ No newline at end of file diff --git a/test/Npgsql.TrimmingTests/Npgsql.TrimmingTests.csproj b/test/Npgsql.TrimmingTests/Npgsql.TrimmingTests.csproj deleted file mode 100644 index de35192b4b..0000000000 --- a/test/Npgsql.TrimmingTests/Npgsql.TrimmingTests.csproj +++ /dev/null @@ -1,14 +0,0 @@ - - - exe - linux-x64 - true - link - - - - - - - - From 93e1ae5471c3366984fe2b71ad8c046b73dee860 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Mon, 23 Jan 2023 08:54:07 +0100 Subject: [PATCH 050/761] Use preinstalled PG in NativeAOT CI workflow (#4903) Continues #4897 --- .../{nativeAOT.yml => native-aot.yml} | 25 +++++-------------- Npgsql.sln | 12 +++++++++ 2 files changed, 18 insertions(+), 19 deletions(-) rename .github/workflows/{nativeAOT.yml => native-aot.yml} (75%) diff --git a/.github/workflows/nativeAOT.yml b/.github/workflows/native-aot.yml similarity index 75% rename from .github/workflows/nativeAOT.yml rename to .github/workflows/native-aot.yml index 08abaa8b82..e23f8fc80b 100644 --- a/.github/workflows/nativeAOT.yml +++ b/.github/workflows/native-aot.yml @@ -49,30 +49,17 @@ jobs: run: dotnet publish test/Npgsql.NativeAotTests/Npgsql.NativeAotTests.csproj -r linux-x64 -c Release -f net7.0 shell: bash - - name: Start PostgreSQL ${{ matrix.pg_major }} (Linux) - if: startsWith(matrix.os, 'ubuntu') - run: | - # First uninstall any PostgreSQL installed on the image - dpkg-query -W --showformat='${Package}\n' 'postgresql-*' | xargs sudo dpkg -P postgresql - - # Import the repository signing key - wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add - - - sudo sh -c 'echo "deb http://apt.postgresql.org/pub/repos/apt/ jammy-pgdg main ${{ matrix.pg_major }}" >> /etc/apt/sources.list.d/pgdg.list' - sudo apt-get update -qq - sudo apt-get install -qq postgresql-${{ matrix.pg_major }} - export PGDATA=/etc/postgresql/${{ matrix.pg_major }}/main - - sudo -u postgres psql -c "CREATE USER npgsql_tests SUPERUSER PASSWORD 'npgsql_tests'" - sudo -u postgres psql -c "CREATE DATABASE npgsql_tests" - - sudo pg_ctlcluster ${{ matrix.pg_major }} main restart - # Uncomment the following to SSH into the agent running the build (https://github.com/mxschmitt/action-tmate) #- uses: actions/checkout@v3 #- name: Setup tmate session # uses: mxschmitt/action-tmate@v3 + - name: Start PostgreSQL + run: | + sudo systemctl start postgresql.service + sudo -u postgres psql -c "CREATE USER npgsql_tests SUPERUSER PASSWORD 'npgsql_tests'" + sudo -u postgres psql -c "CREATE DATABASE npgsql_tests OWNER npgsql_tests" + - name: Run run: test/Npgsql.NativeAotTests/bin/Release/net7.0/linux-x64/native/Npgsql.NativeAotTests diff --git a/Npgsql.sln b/Npgsql.sln index 9e3ebb7af6..50c6a5c0c8 100644 --- a/Npgsql.sln +++ b/Npgsql.sln @@ -46,12 +46,15 @@ Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Github", "Github", "{BA7B6F .github\dependabot.yml = .github\dependabot.yml .github\workflows\codeql-analysis.yml = .github\workflows\codeql-analysis.yml .github\workflows\rich-code-nav.yml = .github\workflows\rich-code-nav.yml + .github\workflows\native-aot.yml = .github\workflows\native-aot.yml EndProjectSection EndProject Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Npgsql.DependencyInjection", "src\Npgsql.DependencyInjection\Npgsql.DependencyInjection.csproj", "{B58E12EB-E43D-4D77-894E-5157D2269836}" EndProject Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Npgsql.DependencyInjection.Tests", "test\Npgsql.DependencyInjection.Tests\Npgsql.DependencyInjection.Tests.csproj", "{EB2530FC-69F7-4DCB-A8B3-3671A157ED32}" EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Npgsql.NativeAotTests", "test\Npgsql.NativeAotTests\Npgsql.NativeAotTests.csproj", "{20F2E9D6-A69E-4BAE-9236-574B0AA59139}" +EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution Debug|Any CPU = Debug|Any CPU @@ -172,6 +175,14 @@ Global {EB2530FC-69F7-4DCB-A8B3-3671A157ED32}.Release|Any CPU.Build.0 = Release|Any CPU {EB2530FC-69F7-4DCB-A8B3-3671A157ED32}.Release|x86.ActiveCfg = Release|Any CPU {EB2530FC-69F7-4DCB-A8B3-3671A157ED32}.Release|x86.Build.0 = Release|Any CPU + {20F2E9D6-A69E-4BAE-9236-574B0AA59139}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {20F2E9D6-A69E-4BAE-9236-574B0AA59139}.Debug|Any CPU.Build.0 = Debug|Any CPU + {20F2E9D6-A69E-4BAE-9236-574B0AA59139}.Debug|x86.ActiveCfg = Debug|Any CPU + {20F2E9D6-A69E-4BAE-9236-574B0AA59139}.Debug|x86.Build.0 = Debug|Any CPU + {20F2E9D6-A69E-4BAE-9236-574B0AA59139}.Release|Any CPU.ActiveCfg = Release|Any CPU + {20F2E9D6-A69E-4BAE-9236-574B0AA59139}.Release|Any CPU.Build.0 = Release|Any CPU + {20F2E9D6-A69E-4BAE-9236-574B0AA59139}.Release|x86.ActiveCfg = Release|Any CPU + {20F2E9D6-A69E-4BAE-9236-574B0AA59139}.Release|x86.Build.0 = Release|Any CPU EndGlobalSection GlobalSection(SolutionProperties) = preSolution HideSolutionNode = FALSE @@ -192,6 +203,7 @@ Global {BA7B6F53-D24D-45AC-927A-266857EA8D1E} = {004A2E0F-D34A-44D4-8DF0-D2BC63B57073} {B58E12EB-E43D-4D77-894E-5157D2269836} = {8537E50E-CF7F-49CB-B4EF-3E2A1B11F050} {EB2530FC-69F7-4DCB-A8B3-3671A157ED32} = {ED612DB1-AB32-4603-95E7-891BACA71C39} + {20F2E9D6-A69E-4BAE-9236-574B0AA59139} = {ED612DB1-AB32-4603-95E7-891BACA71C39} EndGlobalSection GlobalSection(ExtensibilityGlobals) = postSolution SolutionGuid = {C90AEECD-DB4C-4BE6-B506-16A449852FB8} From 7c40906c88115243d827cd763808355722d6c782 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Mon, 23 Jan 2023 22:12:15 +0100 Subject: [PATCH 051/761] Remove geometry type regex parsing logic (#4902) And also pgpass-related regex use Closes #4896 --- src/Npgsql/NpgsqlTypes/NpgsqlTsQuery.cs | 103 +++++----- src/Npgsql/NpgsqlTypes/NpgsqlTypes.cs | 186 ++++-------------- src/Npgsql/PgPassFile.cs | 53 +++-- src/Npgsql/PublicAPI.Unshipped.txt | 7 + src/Npgsql/Util/PGUtil.cs | 6 - test/Npgsql.Tests/PgPassEntryTests.cs | 3 +- test/Npgsql.Tests/PgPassFileTests.cs | 3 +- .../SnakeCaseNameTranslatorTests.cs | 5 +- test/Npgsql.Tests/TypesTests.cs | 3 +- 9 files changed, 139 insertions(+), 230 deletions(-) diff --git a/src/Npgsql/NpgsqlTypes/NpgsqlTsQuery.cs b/src/Npgsql/NpgsqlTypes/NpgsqlTsQuery.cs index ddc8b712ec..e83b69edad 100644 --- a/src/Npgsql/NpgsqlTypes/NpgsqlTsQuery.cs +++ b/src/Npgsql/NpgsqlTypes/NpgsqlTsQuery.cs @@ -253,18 +253,27 @@ public static NpgsqlTsQuery Parse(string value) if (pos >= value.Length) goto Finish; ch = value[pos]; - if (ch == '*') + switch (ch) + { + case '*': ((NpgsqlTsQueryLexeme)valStack.Peek()).IsPrefixSearch = true; - else if (ch == 'a' || ch == 'A') + break; + case 'a' or 'A': ((NpgsqlTsQueryLexeme)valStack.Peek()).Weights |= NpgsqlTsQueryLexeme.Weight.A; - else if (ch == 'b' || ch == 'B') + break; + case 'b' or 'B': ((NpgsqlTsQueryLexeme)valStack.Peek()).Weights |= NpgsqlTsQueryLexeme.Weight.B; - else if (ch == 'c' || ch == 'C') + break; + case 'c' or 'C': ((NpgsqlTsQueryLexeme)valStack.Peek()).Weights |= NpgsqlTsQueryLexeme.Weight.C; - else if (ch == 'd' || ch == 'D') + break; + case 'd' or 'D': ((NpgsqlTsQueryLexeme)valStack.Peek()).Weights |= NpgsqlTsQueryLexeme.Weight.D; - else + break; + default: goto PushedVal; + } + pos++; goto InWeightInfo; @@ -338,12 +347,12 @@ public static NpgsqlTsQuery Parse(string value) } /// - public override int GetHashCode() => - throw new NotImplementedException(); + public override int GetHashCode() + => throw new NotSupportedException("Must be overridden"); /// - public override bool Equals(object? obj) => - obj is NpgsqlTsQuery query && query.Equals(this); + public override bool Equals(object? obj) + => obj is NpgsqlTsQuery query && query.Equals(this); /// /// Returns a value indicating whether this instance and a specified object represent the same value. @@ -358,9 +367,8 @@ public override bool Equals(object? obj) => /// The first object to compare. /// The second object to compare. /// if and are equal; otherwise, . - public static bool operator ==(NpgsqlTsQuery? left, NpgsqlTsQuery? right) => - left is null ? right is null : left.Equals(right); - + public static bool operator ==(NpgsqlTsQuery? left, NpgsqlTsQuery? right) + => left is null ? right is null : left.Equals(right); /// /// Indicates whether the values of two specified objects are not equal. @@ -368,8 +376,8 @@ public override bool Equals(object? obj) => /// The first object to compare. /// The second object to compare. /// if and are not equal; otherwise, . - public static bool operator !=(NpgsqlTsQuery? left, NpgsqlTsQuery? right) => - left is null ? right is not null : !left.Equals(right); + public static bool operator !=(NpgsqlTsQuery? left, NpgsqlTsQuery? right) + => left is null ? right is not null : !left.Equals(right); } readonly struct NpgsqlTsQueryOperator @@ -506,15 +514,15 @@ internal override void WriteCore(StringBuilder sb, bool first = false) } /// - public override bool Equals(NpgsqlTsQuery? other) => - other is NpgsqlTsQueryLexeme lexeme && - lexeme.Text == Text && - lexeme.Weights == Weights && - lexeme.IsPrefixSearch == IsPrefixSearch; + public override bool Equals(NpgsqlTsQuery? other) + => other is NpgsqlTsQueryLexeme lexeme && + lexeme.Text == Text && + lexeme.Weights == Weights && + lexeme.IsPrefixSearch == IsPrefixSearch; /// - public override int GetHashCode() => - HashCode.Combine(Text, Weights, IsPrefixSearch); + public override int GetHashCode() + => HashCode.Combine(Text, Weights, IsPrefixSearch); } /// @@ -555,13 +563,12 @@ internal override void WriteCore(StringBuilder sb, bool first = false) } /// - public override bool Equals(NpgsqlTsQuery? other) => - other is NpgsqlTsQueryNot not && - not.Child == Child; + public override bool Equals(NpgsqlTsQuery? other) + => other is NpgsqlTsQueryNot not && not.Child == Child; /// - public override int GetHashCode() => - Child?.GetHashCode() ?? 0; + public override int GetHashCode() + => Child?.GetHashCode() ?? 0; } /// @@ -611,14 +618,12 @@ internal override void WriteCore(StringBuilder sb, bool first = false) } /// - public override bool Equals(NpgsqlTsQuery? other) => - other is NpgsqlTsQueryAnd and && - and.Left == Left && - and.Right == Right; + public override bool Equals(NpgsqlTsQuery? other) + => other is NpgsqlTsQueryAnd and && and.Left == Left && and.Right == Right; /// - public override int GetHashCode() => - HashCode.Combine(Left, Right); + public override int GetHashCode() + => HashCode.Combine(Left, Right); } /// @@ -649,14 +654,12 @@ internal override void WriteCore(StringBuilder sb, bool first = false) } /// - public override bool Equals(NpgsqlTsQuery? other) => - other is NpgsqlTsQueryOr or && - or.Left == Left && - or.Right == Right; + public override bool Equals(NpgsqlTsQuery? other) + => other is NpgsqlTsQueryOr or && or.Left == Left && or.Right == Right; /// - public override int GetHashCode() => - HashCode.Combine(Left, Right); + public override int GetHashCode() + => HashCode.Combine(Left, Right); } /// @@ -708,15 +711,15 @@ internal override void WriteCore(StringBuilder sb, bool first = false) } /// - public override bool Equals(NpgsqlTsQuery? other) => - other is NpgsqlTsQueryFollowedBy followedBy && - followedBy.Left == Left && - followedBy.Right == Right && - followedBy.Distance == Distance; + public override bool Equals(NpgsqlTsQuery? other) + => other is NpgsqlTsQueryFollowedBy followedBy && + followedBy.Left == Left && + followedBy.Right == Right && + followedBy.Distance == Distance; /// - public override int GetHashCode() => - HashCode.Combine(Left, Right, Distance); + public override int GetHashCode() + => HashCode.Combine(Left, Right, Distance); } /// @@ -732,10 +735,10 @@ public NpgsqlTsQueryEmpty() : base(NodeKind.Empty) {} internal override void WriteCore(StringBuilder sb, bool first = false) { } /// - public override bool Equals(NpgsqlTsQuery? other) => - other is NpgsqlTsQueryEmpty; + public override bool Equals(NpgsqlTsQuery? other) + => other is NpgsqlTsQueryEmpty; /// - public override int GetHashCode() => - Kind.GetHashCode(); + public override int GetHashCode() + => Kind.GetHashCode(); } \ No newline at end of file diff --git a/src/Npgsql/NpgsqlTypes/NpgsqlTypes.cs b/src/Npgsql/NpgsqlTypes/NpgsqlTypes.cs index 4a6c4d112b..cc68603b9c 100644 --- a/src/Npgsql/NpgsqlTypes/NpgsqlTypes.cs +++ b/src/Npgsql/NpgsqlTypes/NpgsqlTypes.cs @@ -1,13 +1,10 @@ using System; using System.Collections; using System.Collections.Generic; -using System.Diagnostics; using System.Globalization; using System.Net; using System.Net.Sockets; using System.Text; -using System.Text.RegularExpressions; -using Npgsql.Util; #pragma warning disable 1591 @@ -22,8 +19,6 @@ namespace NpgsqlTypes; /// public struct NpgsqlPoint : IEquatable { - static readonly Regex Regex = new(@"\((-?\d+.?\d*),(-?\d+.?\d*)\)"); - public double X { get; set; } public double Y { get; set; } @@ -46,17 +41,7 @@ public override bool Equals(object? obj) public static bool operator !=(NpgsqlPoint x, NpgsqlPoint y) => !(x == y); public override int GetHashCode() - => X.GetHashCode() ^ PGUtil.RotateShift(Y.GetHashCode(), PGUtil.BitsInInt / 2); - - public static NpgsqlPoint Parse(string s) - { - var m = Regex.Match(s); - if (!m.Success) { - throw new FormatException("Not a valid point: " + s); - } - return new NpgsqlPoint(double.Parse(m.Groups[1].ToString(), NumberStyles.Any, CultureInfo.InvariantCulture.NumberFormat), - double.Parse(m.Groups[2].ToString(), NumberStyles.Any, CultureInfo.InvariantCulture.NumberFormat)); - } + => HashCode.Combine(X, Y); public override string ToString() => string.Format(CultureInfo.InvariantCulture, "({0},{1})", X, Y); @@ -70,8 +55,6 @@ public override string ToString() /// public struct NpgsqlLine : IEquatable { - static readonly Regex Regex = new(@"\{(-?\d+.?\d*),(-?\d+.?\d*),(-?\d+.?\d*)\}"); - public double A { get; set; } public double B { get; set; } public double C { get; set; } @@ -84,24 +67,14 @@ public NpgsqlLine(double a, double b, double c) C = c; } - public static NpgsqlLine Parse(string s) - { - var m = Regex.Match(s); - if (!m.Success) - throw new FormatException("Not a valid line: " + s); - return new NpgsqlLine( - double.Parse(m.Groups[1].ToString(), NumberStyles.Any, CultureInfo.InvariantCulture.NumberFormat), - double.Parse(m.Groups[2].ToString(), NumberStyles.Any, CultureInfo.InvariantCulture.NumberFormat), - double.Parse(m.Groups[3].ToString(), NumberStyles.Any, CultureInfo.InvariantCulture.NumberFormat) - ); - } - public override string ToString() => string.Format(CultureInfo.InvariantCulture, "{{{0},{1},{2}}}", A, B, C); - public override int GetHashCode() => A.GetHashCode() * B.GetHashCode() * C.GetHashCode(); + public override int GetHashCode() + => HashCode.Combine(A, B, C); - public bool Equals(NpgsqlLine other) => A == other.A && B == other.B && C == other.C; + public bool Equals(NpgsqlLine other) + => A == other.A && B == other.B && C == other.C; public override bool Equals(object? obj) => obj is NpgsqlLine line && Equals(line); @@ -115,8 +88,6 @@ public override bool Equals(object? obj) /// public struct NpgsqlLSeg : IEquatable { - static readonly Regex Regex = new(@"\[\((-?\d+.?\d*),(-?\d+.?\d*)\),\((-?\d+.?\d*),(-?\d+.?\d*)\)\]"); - public NpgsqlPoint Start { get; set; } public NpgsqlPoint End { get; set; } @@ -130,34 +101,17 @@ public NpgsqlLSeg(NpgsqlPoint start, NpgsqlPoint end) public NpgsqlLSeg(double startx, double starty, double endx, double endy) : this() { Start = new NpgsqlPoint(startx, starty); - End = new NpgsqlPoint(endx, endy); - } - - public static NpgsqlLSeg Parse(string s) - { - var m = Regex.Match(s); - if (!m.Success) { - throw new FormatException("Not a valid line: " + s); - } - return new NpgsqlLSeg( - double.Parse(m.Groups[1].ToString(), NumberStyles.Any, CultureInfo.InvariantCulture.NumberFormat), - double.Parse(m.Groups[2].ToString(), NumberStyles.Any, CultureInfo.InvariantCulture.NumberFormat), - double.Parse(m.Groups[3].ToString(), NumberStyles.Any, CultureInfo.InvariantCulture.NumberFormat), - double.Parse(m.Groups[4].ToString(), NumberStyles.Any, CultureInfo.InvariantCulture.NumberFormat) - ); - + End = new NpgsqlPoint(endx, endy); } public override string ToString() => string.Format(CultureInfo.InvariantCulture, "[{0},{1}]", Start, End); public override int GetHashCode() - => Start.X.GetHashCode() ^ - PGUtil.RotateShift(Start.Y.GetHashCode(), PGUtil.BitsInInt / 4) ^ - PGUtil.RotateShift(End.X.GetHashCode(), PGUtil.BitsInInt / 2) ^ - PGUtil.RotateShift(End.Y.GetHashCode(), PGUtil.BitsInInt * 3 / 4); + => HashCode.Combine(Start.X, Start.Y, End.X, End.Y); - public bool Equals(NpgsqlLSeg other) => Start == other.Start && End == other.End; + public bool Equals(NpgsqlLSeg other) + => Start == other.Start && End == other.End; public override bool Equals(object? obj) => obj is NpgsqlLSeg seg && Equals(seg); @@ -174,8 +128,6 @@ public override bool Equals(object? obj) /// public struct NpgsqlBox : IEquatable { - static readonly Regex Regex = new(@"\((-?\d+.?\d*),(-?\d+.?\d*)\),\((-?\d+.?\d*),(-?\d+.?\d*)\)"); - public NpgsqlPoint UpperRight { get; set; } public NpgsqlPoint LowerLeft { get; set; } @@ -197,7 +149,8 @@ public NpgsqlBox(double top, double right, double bottom, double left) public bool IsEmpty => Width == 0 || Height == 0; - public bool Equals(NpgsqlBox other) => UpperRight == other.UpperRight && LowerLeft == other.LowerLeft; + public bool Equals(NpgsqlBox other) + => UpperRight == other.UpperRight && LowerLeft == other.LowerLeft; public override bool Equals(object? obj) => obj is NpgsqlBox box && Equals(box); @@ -207,22 +160,8 @@ public override bool Equals(object? obj) public override string ToString() => string.Format(CultureInfo.InvariantCulture, "{0},{1}", UpperRight, LowerLeft); - public static NpgsqlBox Parse(string s) - { - var m = Regex.Match(s); - return new NpgsqlBox( - new NpgsqlPoint(double.Parse(m.Groups[1].ToString(), NumberStyles.Any, CultureInfo.InvariantCulture.NumberFormat), - double.Parse(m.Groups[2].ToString(), NumberStyles.Any, CultureInfo.InvariantCulture.NumberFormat)), - new NpgsqlPoint(double.Parse(m.Groups[3].ToString(), NumberStyles.Any, CultureInfo.InvariantCulture.NumberFormat), - double.Parse(m.Groups[4].ToString(), NumberStyles.Any, CultureInfo.InvariantCulture.NumberFormat)) - ); - } - public override int GetHashCode() - => Top.GetHashCode() ^ - PGUtil.RotateShift(Right.GetHashCode(), PGUtil.BitsInInt / 4) ^ - PGUtil.RotateShift(Bottom.GetHashCode(), PGUtil.BitsInInt / 2) ^ - PGUtil.RotateShift(LowerLeft.GetHashCode(), PGUtil.BitsInInt * 3 / 4); + => HashCode.Combine(Top, Right, Bottom, LowerLeft); } /// @@ -297,15 +236,16 @@ public override bool Equals(object? obj) public override int GetHashCode() { - var ret = 266370105;//seed with something other than zero to make paths of all zeros hash differently. + var hashCode = new HashCode(); + hashCode.Add(Open); + foreach (var point in this) { - //The ideal amount to shift each value is one that would evenly spread it throughout - //the resultant bytes. Using the current result % 32 is essentially using a random value - //but one that will be the same on subsequent calls. - ret ^= PGUtil.RotateShift(point.GetHashCode(), ret % PGUtil.BitsInInt); + hashCode.Add(point.X); + hashCode.Add(point.Y); } - return Open ? ret : -ret; + + return hashCode.ToHashCode(); } public override string ToString() @@ -323,28 +263,6 @@ public override string ToString() sb.Append(Open ? ']' : ')'); return sb.ToString(); } - - public static NpgsqlPath Parse(string s) - { - var open = s[0] switch - { - '[' => true, - '(' => false, - _ => throw new Exception("Invalid path string: " + s) - }; - Debug.Assert(s[s.Length - 1] == (open ? ']' : ')')); - var result = new NpgsqlPath(open); - var i = 1; - while (true) - { - var i2 = s.IndexOf(')', i); - result.Add(NpgsqlPoint.Parse(s.Substring(i, i2 - i + 1))); - if (s[i2 + 1] != ',') - break; - i = i2 + 2; - } - return result; - } } /// @@ -355,16 +273,12 @@ public struct NpgsqlPolygon : IList, IEquatable readonly List _points; public NpgsqlPolygon(IEnumerable points) - { - _points = new List(points); - } + => _points = new List(points); public NpgsqlPolygon(params NpgsqlPoint[] points) : this ((IEnumerable) points) {} public NpgsqlPolygon(int capacity) - { - _points = new List(capacity); - } + => _points = new List(capacity); public NpgsqlPoint this[int index] { @@ -407,30 +321,15 @@ public override bool Equals(object? obj) public override int GetHashCode() { - var ret = 266370105;//seed with something other than zero to make paths of all zeros hash differently. + var hashCode = new HashCode(); + foreach (var point in this) { - //The ideal amount to shift each value is one that would evenly spread it throughout - //the resultant bytes. Using the current result % 32 is essentially using a random value - //but one that will be the same on subsequent calls. - ret ^= PGUtil.RotateShift(point.GetHashCode(), ret % PGUtil.BitsInInt); + hashCode.Add(point.X); + hashCode.Add(point.Y); } - return ret; - } - public static NpgsqlPolygon Parse(string s) - { - var points = new List(); - var i = 1; - while (true) - { - var i2 = s.IndexOf(')', i); - points.Add(NpgsqlPoint.Parse(s.Substring(i, i2 - i + 1))); - if (s[i2 + 1] != ',') - break; - i = i2 + 2; - } - return new NpgsqlPolygon(points); + return hashCode.ToHashCode(); } public override string ToString() @@ -456,8 +355,6 @@ public override string ToString() /// public struct NpgsqlCircle : IEquatable { - static readonly Regex Regex = new(@"<\((-?\d+.?\d*),(-?\d+.?\d*)\),(\d+.?\d*)>"); - public double X { get; set; } public double Y { get; set; } public double Radius { get; set; } @@ -480,11 +377,7 @@ public NpgsqlCircle(double x, double y, double radius) : this() public NpgsqlPoint Center { get => new(X, Y); - set - { - X = value.X; - Y = value.Y; - } + set => (X, Y) = (value.X, value.Y); } // ReSharper disable CompareOfFloatsByEqualityOperator @@ -495,19 +388,6 @@ public bool Equals(NpgsqlCircle other) public override bool Equals(object? obj) => obj is NpgsqlCircle circle && Equals(circle); - public static NpgsqlCircle Parse(string s) - { - var m = Regex.Match(s); - if (!m.Success) - throw new FormatException("Not a valid circle: " + s); - - return new NpgsqlCircle( - double.Parse(m.Groups[1].ToString(), NumberStyles.Any, CultureInfo.InvariantCulture.NumberFormat), - double.Parse(m.Groups[2].ToString(), NumberStyles.Any, CultureInfo.InvariantCulture.NumberFormat), - double.Parse(m.Groups[3].ToString(), NumberStyles.Any, CultureInfo.InvariantCulture.NumberFormat) - ); - } - public override string ToString() => string.Format(CultureInfo.InvariantCulture, "<({0},{1}),{2}>", X, Y, Radius); @@ -515,7 +395,7 @@ public override string ToString() public static bool operator !=(NpgsqlCircle x, NpgsqlCircle y) => !(x == y); public override int GetHashCode() - => X.GetHashCode() * Y.GetHashCode() * Radius.GetHashCode(); + => HashCode.Combine(X, Y, Radius); } /// @@ -554,9 +434,11 @@ public NpgsqlInet(string addr) if (addr.IndexOf('/') > 0) { var addrbits = addr.Split('/'); - if (addrbits.GetUpperBound(0) != 1) { + if (addrbits.GetUpperBound(0) != 1) + { throw new FormatException("Invalid number of parts in CIDR specification"); } + Address = IPAddress.Parse(addrbits[0]); Netmask = int.Parse(addrbits[1]); } @@ -569,11 +451,12 @@ public NpgsqlInet(string addr) public override string ToString() { - if ((Address.AddressFamily == AddressFamily.InterNetwork && Netmask == 32) || + if ((Address.AddressFamily == AddressFamily.InterNetwork && Netmask == 32) || (Address.AddressFamily == AddressFamily.InterNetworkV6 && Netmask == 128)) { return Address.ToString(); } + return $"{Address}/{Netmask}"; } @@ -589,7 +472,6 @@ public static IPAddress ToIPAddress(NpgsqlInet inet) public static NpgsqlInet ToNpgsqlInet(IPAddress? ip) => ip is null ? default : new NpgsqlInet(ip); - //=> ReferenceEquals(ip, null) ? default : new NpgsqlInet(ip); public static implicit operator NpgsqlInet(IPAddress ip) => ToNpgsqlInet(ip); @@ -605,7 +487,7 @@ public override bool Equals(object? obj) => obj is NpgsqlInet inet && Equals(inet); public override int GetHashCode() - => PGUtil.RotateShift(Address.GetHashCode(), Netmask%32); + => HashCode.Combine(Address, Netmask); public static bool operator ==(NpgsqlInet x, NpgsqlInet y) => x.Equals(y); public static bool operator !=(NpgsqlInet x, NpgsqlInet y) => !(x == y); diff --git a/src/Npgsql/PgPassFile.cs b/src/Npgsql/PgPassFile.cs index 36adf68325..2b5df681fd 100644 --- a/src/Npgsql/PgPassFile.cs +++ b/src/Npgsql/PgPassFile.cs @@ -2,7 +2,7 @@ using System.Collections.Generic; using System.IO; using System.Linq; -using System.Text.RegularExpressions; +using System.Text; namespace Npgsql; @@ -56,8 +56,6 @@ public PgPassFile(string fileName) /// internal sealed class Entry { - const string PgPassWildcard = "*"; - #region Fields and Properties /// @@ -110,24 +108,53 @@ internal sealed class Entry /// Entry is not formatted as hostname:port:database:username:password or non-wildcard port is not a number internal static Entry Parse(string serializedEntry) { - var parts = Regex.Split(serializedEntry, @"(?(5); - var processedParts = parts - .Select(part => part.Replace("\\:", ":").Replace("\\\\", "\\")) // unescape any escaped characters - .Select(part => part == PgPassWildcard ? null : part) - .ToArray(); + var builder = new StringBuilder(); + for (var pos = 0; pos < serializedEntry.Length; pos++) + { + var c = serializedEntry[pos]; + + switch (c) + { + case '\\' when pos < serializedEntry.Length - 1: + // Strip backslash before colon or backslash, otherwise preserve it + c = serializedEntry[++pos]; + if (c is not (':' or '\\')) + { + builder.Append('\\'); + } + + builder.Append(c); + continue; + + case ':': + var part = builder.ToString(); + parts.Add(part == "*" ? null : part); + builder.Clear(); + continue; + + default: + builder.Append(c); + continue; + } + } + + var lastPart = builder.ToString(); + parts.Add(lastPart == "*" ? null : lastPart); + + if (parts.Count != 5) + throw new FormatException("pgpass entry was not well-formed. Please ensure all non-comment entries are formatted as hostname:port:database:username:password. If colon is included, it must be escaped like \\:."); int? port = null; - if (processedParts[1] != null) + if (parts[1] != null) { - if (!int.TryParse(processedParts[1], out var tempPort)) + if (!int.TryParse(parts[1], out var tempPort)) throw new FormatException("pgpass entry was not formatted correctly. Port must be a valid integer."); port = tempPort; } - return new Entry(processedParts[0], port, processedParts[2], processedParts[3], processedParts[4]); + return new Entry(parts[0], port, parts[2], parts[3], parts[4]); } #endregion diff --git a/src/Npgsql/PublicAPI.Unshipped.txt b/src/Npgsql/PublicAPI.Unshipped.txt index a4f57b62f5..d8acd7915a 100644 --- a/src/Npgsql/PublicAPI.Unshipped.txt +++ b/src/Npgsql/PublicAPI.Unshipped.txt @@ -1,3 +1,10 @@ #nullable enable Npgsql.NpgsqlJsonExtensions static Npgsql.NpgsqlJsonExtensions.UseSystemTextJson(this Npgsql.TypeMapping.INpgsqlTypeMapper! mapper, System.Text.Json.JsonSerializerOptions? serializerOptions = null, System.Type![]? jsonbClrTypes = null, System.Type![]? jsonClrTypes = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! +*REMOVED*static NpgsqlTypes.NpgsqlBox.Parse(string! s) -> NpgsqlTypes.NpgsqlBox +*REMOVED*static NpgsqlTypes.NpgsqlCircle.Parse(string! s) -> NpgsqlTypes.NpgsqlCircle +*REMOVED*static NpgsqlTypes.NpgsqlLine.Parse(string! s) -> NpgsqlTypes.NpgsqlLine +*REMOVED*static NpgsqlTypes.NpgsqlLSeg.Parse(string! s) -> NpgsqlTypes.NpgsqlLSeg +*REMOVED*static NpgsqlTypes.NpgsqlPath.Parse(string! s) -> NpgsqlTypes.NpgsqlPath +*REMOVED*static NpgsqlTypes.NpgsqlPoint.Parse(string! s) -> NpgsqlTypes.NpgsqlPoint +*REMOVED*static NpgsqlTypes.NpgsqlPolygon.Parse(string! s) -> NpgsqlTypes.NpgsqlPolygon diff --git a/src/Npgsql/Util/PGUtil.cs b/src/Npgsql/Util/PGUtil.cs index 2f68598157..81a81d586b 100644 --- a/src/Npgsql/Util/PGUtil.cs +++ b/src/Npgsql/Util/PGUtil.cs @@ -102,8 +102,6 @@ static class PGUtil internal static readonly UTF8Encoding UTF8Encoding = new(false, true); internal static readonly UTF8Encoding RelaxedUTF8Encoding = new(false, false); - internal const int BitsInInt = sizeof(int) * 8; - internal static void ValidateBackendMessageCode(BackendMessageCode code) { switch (code) @@ -143,10 +141,6 @@ static void ThrowUnknownMessageCode(BackendMessageCode code) => ThrowHelper.ThrowNpgsqlException($"Unknown message code: {code}"); } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - internal static int RotateShift(int val, int shift) - => (val << shift) | (val >> (BitsInInt - shift)); - internal static readonly Task TrueTask = Task.FromResult(true); internal static readonly Task FalseTask = Task.FromResult(false); } diff --git a/test/Npgsql.Tests/PgPassEntryTests.cs b/test/Npgsql.Tests/PgPassEntryTests.cs index 53fafd348d..9db518aabc 100644 --- a/test/Npgsql.Tests/PgPassEntryTests.cs +++ b/test/Npgsql.Tests/PgPassEntryTests.cs @@ -4,7 +4,6 @@ namespace Npgsql.Tests; -[TestFixture] public class PgPassEntryTests { [Test] @@ -98,4 +97,4 @@ public void Match_true_for_null_query() Assert.That(isMatch, Is.True); } -} \ No newline at end of file +} diff --git a/test/Npgsql.Tests/PgPassFileTests.cs b/test/Npgsql.Tests/PgPassFileTests.cs index dd076b5dec..593e522e89 100644 --- a/test/Npgsql.Tests/PgPassFileTests.cs +++ b/test/Npgsql.Tests/PgPassFileTests.cs @@ -4,7 +4,6 @@ namespace Npgsql.Tests; -[TestFixture] public class PgPassFileTests { [Test] @@ -51,4 +50,4 @@ public void DeleteTestFile() if (File.Exists(_pgpassFile)) File.Delete(_pgpassFile); } -} \ No newline at end of file +} diff --git a/test/Npgsql.Tests/SnakeCaseNameTranslatorTests.cs b/test/Npgsql.Tests/SnakeCaseNameTranslatorTests.cs index 7a43f70ad1..c61d75c628 100644 --- a/test/Npgsql.Tests/SnakeCaseNameTranslatorTests.cs +++ b/test/Npgsql.Tests/SnakeCaseNameTranslatorTests.cs @@ -5,7 +5,6 @@ namespace Npgsql.Tests; -[TestFixture] public class SnakeCaseNameTranslatorTests { [Test, TestCaseSource(typeof(SnakeCaseNameTranslatorTests), nameof(TestCases))] @@ -47,6 +46,6 @@ public string TranslateMemberName(string value, bool legacyMode) }.SelectMany(x => new[] { new TestCaseData(x.value, true).Returns(x.legacyResult), - new TestCaseData(x.value, false).Returns(x.result), + new TestCaseData(x.value, false).Returns(x.result), }); -} \ No newline at end of file +} diff --git a/test/Npgsql.Tests/TypesTests.cs b/test/Npgsql.Tests/TypesTests.cs index 690250aa68..2e2c95400c 100644 --- a/test/Npgsql.Tests/TypesTests.cs +++ b/test/Npgsql.Tests/TypesTests.cs @@ -11,7 +11,6 @@ namespace Npgsql.Tests; /// /// Tests NpgsqlTypes.* independent of a database /// -[TestFixture] public class TypesTests { [Test] @@ -207,4 +206,4 @@ public void NpgsqlInet() #pragma warning disable CS8625 } #pragma warning restore 618 -} \ No newline at end of file +} From 218a160e396636f2c56ca7215d264ce644b1464f Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Sat, 28 Jan 2023 21:47:59 +0300 Subject: [PATCH 052/761] Fix cancellation being able to cancel prepended queries (#4907) Fixes #4906 --- src/Npgsql/Internal/NpgsqlConnector.cs | 250 ++++++++++++++++--------- src/Npgsql/NpgsqlCommand.cs | 4 +- test/Npgsql.Tests/CommandTests.cs | 74 ++++++++ 3 files changed, 232 insertions(+), 96 deletions(-) diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index ddad186f7d..e5013d9ee9 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -147,6 +147,13 @@ internal string InferredUserName /// internal int PendingPrependedResponses { get; set; } + /// + /// A ManualResetEventSlim used to make sure a cancellation request doesn't run + /// while we're reading responses for the prepended query + /// as we can't gracefully handle their cancellation. + /// + readonly ManualResetEventSlim ReadingPrependedMessagesMRE = new(initialState: true); + internal NpgsqlDataReader? CurrentReader; internal PreparedStatementManager PreparedStatementManager { get; } @@ -218,7 +225,20 @@ internal void FlagAsWritableForMultiplexing() /// cancellation is delivered. This reduces the chance that a cancellation meant for a previous /// command will accidentally cancel a later one, see #615. /// - internal object CancelLock { get; } + object CancelLock { get; } = new(); + + /// + /// A lock that's taken to make sure no other concurrent operation is running. + /// Break takes it to set the state of the connector. + /// Anyone else should immediately check the state and exit + /// if the connector is closed. + /// + object SyncObj { get; } = new(); + + /// + /// A lock that's used to wait for the Cleanup to complete while breaking the connection. + /// + object CleanupLock { get; } = new(); readonly bool _isKeepAliveEnabled; readonly Timer? _keepAliveTimer; @@ -353,8 +373,6 @@ internal NpgsqlConnector(NpgsqlDataSource dataSource, NpgsqlConnection conn) Settings = dataSource.Settings; PostgresParameters = new Dictionary(); - CancelLock = new object(); - _isKeepAliveEnabled = Settings.KeepAlive > 0; if (_isKeepAliveEnabled) _keepAliveTimer = new Timer(PerformKeepAlive, null, Timeout.Infinite, Timeout.Infinite); @@ -511,7 +529,7 @@ internal async Task Open(NpgsqlTimeout timeout, bool async, CancellationToken ca // Start the keep alive mechanism to work by scheduling the timer. // Otherwise, it doesn't work for cases when no query executed during // the connection lifetime in case of a new connector. - lock (this) + lock (SyncObj) { var keepAlive = Settings.KeepAlive * 1000; _keepAliveTimer!.Change(keepAlive, keepAlive); @@ -1292,9 +1310,14 @@ internal ValueTask ReadMessage(bool async, DataRowLoadingMode d connector.ReadBuffer.Timeout = TimeSpan.FromMilliseconds(connector.InternalCommandTimeout); for (; connector.PendingPrependedResponses > 0; connector.PendingPrependedResponses--) await ReadMessageLong(connector, async, DataRowLoadingMode.Skip, readingNotifications: false, isReadingPrependedMessage: true); + // We've read all the prepended response. + // Allow cancellation to proceed. + connector.ReadingPrependedMessagesMRE.Set(); } - catch (PostgresException e) + catch (Exception e) { + // Prepended queries should never fail. + // If they do, we're not even going to attempt to salvage the connector. throw connector.Break(e); } } @@ -1671,18 +1694,39 @@ static RemoteCertificateValidationCallback SslRootValidation(string certRootPath #region Cancel + internal void ResetCancellation() + { + // If a cancellation is in progress, wait for it to "complete" before proceeding (#615) + lock (CancelLock) + { + if (PendingPrependedResponses > 0) + ReadingPrependedMessagesMRE.Reset(); + Debug.Assert(ReadingPrependedMessagesMRE.IsSet || PendingPrependedResponses > 0); + } + } + internal void PerformUserCancellation() { var connection = Connection; if (connection is null || connection.ConnectorBindingScope == ConnectorBindingScope.Reader) return; - // There's a subtle race condition where cancellation may be happening just as Break is called. Break takes the connector lock, and - // then ends the user action; this disposes the cancellation token registration, which waits until the cancellation callback - // completes. But the callback needs to take the connector lock below, which led to a deadlock (#4654). - // As a result, Break takes CancelLock, and we abort the cancellation attempt immediately if we can't get it here. - if (!Monitor.TryEnter(CancelLock)) - return; + // Take the lock first to make sure there is no concurrent Break. + // We should be safe to take it as Break only take it to set the state. + lock (SyncObj) + { + // The connector is dead, exit gracefully. + if (!IsConnected) + return; + // The connector is still alive, take the CancelLock before exiting SingleUseLock. + // If a break will happen after, it's going to wait for the cancellation to complete. + Monitor.Enter(CancelLock); + } + + // Wait before we've read all responses for the prepended queries + // as we can't gracefully handle their cancellation. + // Break makes sure that it's going to be set even if we fail while reading them. + ReadingPrependedMessagesMRE.Wait(); try { @@ -1695,28 +1739,18 @@ internal void PerformUserCancellation() { if (cancellationTimeout > 0) { - lock (this) - { - if (!IsConnected) - return; - UserTimeout = cancellationTimeout; - ReadBuffer.Timeout = TimeSpan.FromMilliseconds(cancellationTimeout); - ReadBuffer.Cts.CancelAfter(cancellationTimeout); - } + UserTimeout = cancellationTimeout; + ReadBuffer.Timeout = TimeSpan.FromMilliseconds(cancellationTimeout); + ReadBuffer.Cts.CancelAfter(cancellationTimeout); } return; } } - lock (this) - { - if (!IsConnected) - return; - UserTimeout = -1; - ReadBuffer.Timeout = _cancelImmediatelyTimeout; - ReadBuffer.Cts.Cancel(); - } + UserTimeout = -1; + ReadBuffer.Timeout = _cancelImmediatelyTimeout; + ReadBuffer.Cts.Cancel(); } finally { @@ -1791,8 +1825,7 @@ void DoCancelRequest(int backendProcessId, int backendSecretKey) } finally { - lock (this) - FullCleanup(); + FullCleanup(); } } @@ -1898,7 +1931,7 @@ copyOperation is NpgsqlCopyTextWriter || // very unlikely to block (plus locking would need to be worked out) internal void Close() { - lock (this) + lock (SyncObj) { if (IsReady) { @@ -1927,9 +1960,10 @@ internal void Close() } State = ConnectorState.Closed; - FullCleanup(); - LogMessages.ClosedPhysicalConnection(ConnectionLogger, Host, Port, Database, UserFacingConnectionString, Id); } + + FullCleanup(); + LogMessages.ClosedPhysicalConnection(ConnectionLogger, Host, Port, Database, UserFacingConnectionString, Id); } internal bool TryRemovePendingEnlistedConnector(Transaction transaction) @@ -1958,23 +1992,24 @@ internal Exception Break(Exception reason) { Debug.Assert(!IsClosed); - // See PerformUserCancellation on why we take CancelLock - lock (CancelLock) - lock (this) + Monitor.Enter(SyncObj); + + if (State == ConnectorState.Broken) { - if (State == ConnectorState.Broken) - return reason; + // We're already broken. + // Exit SingleUseLock to unblock other threads (like cancellation). + Monitor.Exit(SyncObj); + // Wait for the break to complete before going forward. + lock (CleanupLock) { } + return reason; + } - // Note we only set the cluster to offline and clear the pool if the connection is being broken (we're in this method), - // *and* the exception indicates that the PG cluster really is down; the latter includes any IO/timeout issue, - // but does not include e.g. authentication failure or timeouts with disabled cancellation. - if (reason is NpgsqlException { IsTransient: true } ne && - (ne.InnerException is not TimeoutException || Settings.CancellationTimeout != -1) || - reason is PostgresException pe && PostgresErrorCodes.IsCriticalFailure(pe)) - { - DataSource.UpdateDatabaseState(DatabaseState.Offline, DateTime.UtcNow, Settings.HostRecheckSecondsTranslated); - DataSource.Clear(); - } + try + { + // If we're broken while reading prepended messages + // the cancellation request might still be waiting on the MRE. + // Unblock it. + ReadingPrependedMessagesMRE.Set(); LogMessages.BreakingConnection(ConnectionLogger, Id, reason); @@ -1982,62 +2017,94 @@ internal Exception Break(Exception reason) // the original reason for the break before actually closing the socket etc. Interlocked.CompareExchange(ref _breakReason, reason, null); State = ConnectorState.Broken; + // Take the CleanupLock while in SingleUseLock to make sure concurrent Break doesn't take it first. + Monitor.Enter(CleanupLock); + } + finally + { + // Unblock other threads (like cancellation) to proceed and exit gracefully. + Monitor.Exit(SyncObj); + } + + try + { + // Make sure there is no concurrent cancellation in process + lock (CancelLock) + { + // Note we only set the cluster to offline and clear the pool if the connection is being broken (we're in this method), + // *and* the exception indicates that the PG cluster really is down; the latter includes any IO/timeout issue, + // but does not include e.g. authentication failure or timeouts with disabled cancellation. + if (reason is NpgsqlException { IsTransient: true } ne && + (ne.InnerException is not TimeoutException || Settings.CancellationTimeout != -1) || + reason is PostgresException pe && PostgresErrorCodes.IsCriticalFailure(pe)) + { + DataSource.UpdateDatabaseState(DatabaseState.Offline, DateTime.UtcNow, Settings.HostRecheckSecondsTranslated); + DataSource.Clear(); + } - var connection = Connection; + var connection = Connection; - FullCleanup(); + FullCleanup(); - if (connection is not null) - { - var closeLockTaken = connection.TakeCloseLock(); - Debug.Assert(closeLockTaken); - if (Settings.ReplicationMode == ReplicationMode.Off) + if (connection is not null) { - // When a connector is broken, we immediately "return" it to the pool (i.e. update the pool state so reflect the - // connector no longer being open). Upper layers such as EF may check DbConnection.ConnectionState, and only close if - // it's closed; so we can't set the state to Closed and expect the user to still close (in order to return to the pool). - // On the other hand leaving the state Open could indicate to the user that the connection is functional. - // (see https://github.com/npgsql/npgsql/issues/3705#issuecomment-839908772) - Connection = null; - if (connection.ConnectorBindingScope != ConnectorBindingScope.None) - Return(); - connection.EnlistedTransaction = null; - connection.Connector = null; - connection.ConnectorBindingScope = ConnectorBindingScope.None; + var closeLockTaken = connection.TakeCloseLock(); + Debug.Assert(closeLockTaken); + if (Settings.ReplicationMode == ReplicationMode.Off) + { + // When a connector is broken, we immediately "return" it to the pool (i.e. update the pool state so reflect the + // connector no longer being open). Upper layers such as EF may check DbConnection.ConnectionState, and only close if + // it's closed; so we can't set the state to Closed and expect the user to still close (in order to return to the pool). + // On the other hand leaving the state Open could indicate to the user that the connection is functional. + // (see https://github.com/npgsql/npgsql/issues/3705#issuecomment-839908772) + Connection = null; + if (connection.ConnectorBindingScope != ConnectorBindingScope.None) + Return(); + connection.EnlistedTransaction = null; + connection.Connector = null; + connection.ConnectorBindingScope = ConnectorBindingScope.None; + } + + connection.FullState = ConnectionState.Broken; + connection.ReleaseCloseLock(); } - connection.FullState = ConnectionState.Broken; - connection.ReleaseCloseLock(); + return reason; } - - return reason; + } + finally + { + Monitor.Exit(CleanupLock); } } void FullCleanup() { - Debug.Assert(Monitor.IsEntered(this)); - - if (Settings.Multiplexing) + lock (CleanupLock) { - FlagAsNotWritableForMultiplexing(); + if (Settings.Multiplexing) + { + FlagAsNotWritableForMultiplexing(); - // Note that in multiplexing, this could be called from the read loop, while the write loop is - // writing into the channel. To make sure this race condition isn't a problem, the channel currently - // isn't set up with SingleWriter (since at this point it doesn't do anything). - CommandsInFlightWriter!.Complete(); + // Note that in multiplexing, this could be called from the read loop, while the write loop is + // writing into the channel. To make sure this race condition isn't a problem, the channel currently + // isn't set up with SingleWriter (since at this point it doesn't do anything). + CommandsInFlightWriter!.Complete(); - // The connector's read loop has a continuation to observe and log any exception coming out - // (see Open) - } + // The connector's read loop has a continuation to observe and log any exception coming out + // (see Open) + } - ConnectionLogger.LogTrace("Cleaning up connector", Id); - Cleanup(); + ConnectionLogger.LogTrace("Cleaning up connector", Id); + Cleanup(); - if (_isKeepAliveEnabled) - { - _keepAliveTimer!.Change(Timeout.Infinite, Timeout.Infinite); - _keepAliveTimer.Dispose(); + if (_isKeepAliveEnabled) + { + _keepAliveTimer!.Change(Timeout.Infinite, Timeout.Infinite); + _keepAliveTimer.Dispose(); + } + + ReadingPrependedMessagesMRE.Dispose(); } } @@ -2330,7 +2397,7 @@ UserAction DoStartUserAction(ConnectorState newState, NpgsqlCommand? command, UserAction DoStartUserActionWithKeepAlive(ConnectorState newState, NpgsqlCommand? command, CancellationToken cancellationToken, bool attemptPgCancellation) { - lock (this) + lock (SyncObj) { if (!IsConnected) { @@ -2368,7 +2435,7 @@ internal void EndUserAction() if (_isKeepAliveEnabled) { - lock (this) + lock (SyncObj) { if (IsReady || !IsConnected) return; @@ -2410,10 +2477,7 @@ internal void EndUserAction() void PerformKeepAlive(object? state) { Debug.Assert(_isKeepAliveEnabled); - - // SemaphoreSlim.Dispose() isn't thread-safe - it may be in progress so we shouldn't try to wait on it; - // we need a standard lock to protect it. - if (!Monitor.TryEnter(this)) + if (!Monitor.TryEnter(SyncObj)) return; try @@ -2446,7 +2510,7 @@ void PerformKeepAlive(object? state) } finally { - Monitor.Exit(this); + Monitor.Exit(SyncObj); } } #pragma warning restore CA1801 // Review unused parameters diff --git a/src/Npgsql/NpgsqlCommand.cs b/src/Npgsql/NpgsqlCommand.cs index a35a189965..264f1a721f 100644 --- a/src/Npgsql/NpgsqlCommand.cs +++ b/src/Npgsql/NpgsqlCommand.cs @@ -1434,9 +1434,7 @@ internal virtual async ValueTask ExecuteReader(CommandBehavior TraceCommandStart(connector); // If a cancellation is in progress, wait for it to "complete" before proceeding (#615) - lock (connector.CancelLock) - { - } + connector.ResetCancellation(); // We do not wait for the entire send to complete before proceeding to reading - // the sending continues in parallel with the user's reading. Waiting for the diff --git a/test/Npgsql.Tests/CommandTests.cs b/test/Npgsql.Tests/CommandTests.cs index 8d04f0a128..eba1bd4d5b 100644 --- a/test/Npgsql.Tests/CommandTests.cs +++ b/test/Npgsql.Tests/CommandTests.cs @@ -1399,6 +1399,80 @@ public async Task Concurrent_read_write_failure_deadlock() Assert.ThrowsAsync(async () => await queryTask); } + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/4906")] + [Description("Make sure we don't cancel a prepended query (and do not deadlock in case of a failure)")] + public async Task Not_cancel_prepended_query([Values] bool failPrependedQuery) + { + if (IsMultiplexing) + return; + + await using var postmasterMock = PgPostmasterMock.Start(ConnectionString); + var csb = new NpgsqlConnectionStringBuilder(postmasterMock.ConnectionString) + { + NoResetOnClose = false + }; + await using var dataSource = CreateDataSource(csb.ConnectionString); + await using var conn = await dataSource.OpenConnectionAsync(); + // reopen connection to append prepended query + await conn.CloseAsync(); + await conn.OpenAsync(); + + using var cts = new CancellationTokenSource(); + var queryTask = conn.ExecuteNonQueryAsync("SELECT 1", cancellationToken: cts.Token); + + var server = await postmasterMock.WaitForServerConnection(); + await server.ExpectSimpleQuery("DISCARD ALL"); + await server.ExpectExtendedQuery(); + + var cancelTask = Task.Run(cts.Cancel); + var cancellationRequestTask = postmasterMock.WaitForCancellationRequest().AsTask(); + // Give 1 second to make sure we didn't send cancellation request + await Task.Delay(1000); + Assert.IsFalse(cancelTask.IsCompleted); + Assert.IsFalse(cancellationRequestTask.IsCompleted); + + if (failPrependedQuery) + { + await server + .WriteErrorResponse(PostgresErrorCodes.SyntaxError) + .WriteReadyForQuery() + .FlushAsync(); + + await cancelTask; + await cancellationRequestTask; + + Assert.ThrowsAsync(async () => await queryTask); + Assert.That(conn.State, Is.EqualTo(ConnectionState.Closed)); + return; + } + + await server + .WriteCommandComplete() + .WriteReadyForQuery() + .FlushAsync(); + + await cancelTask; + await cancellationRequestTask; + + await server + .WriteErrorResponse(PostgresErrorCodes.QueryCanceled) + .WriteReadyForQuery() + .FlushAsync(); + + Assert.ThrowsAsync(async () => await queryTask); + + queryTask = conn.ExecuteNonQueryAsync("SELECT 1"); + await server.ExpectExtendedQuery(); + await server + .WriteParseComplete() + .WriteBindComplete() + .WriteNoData() + .WriteCommandComplete() + .WriteReadyForQuery() + .FlushAsync(); + await queryTask; + } + #region Logging [Test] From 029ec4a10a3daaff34b70bdc5f593c7e6f42dad5 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Tue, 31 Jan 2023 23:29:47 +0300 Subject: [PATCH 053/761] Fix not checking for oversize buffer while loading backend types (#4912) Fixes #4911 --- src/Npgsql/PostgresDatabaseInfo.cs | 37 +++++++++++++------------- test/Npgsql.Tests/NotificationTests.cs | 16 +++++++++++ 2 files changed, 35 insertions(+), 18 deletions(-) diff --git a/src/Npgsql/PostgresDatabaseInfo.cs b/src/Npgsql/PostgresDatabaseInfo.cs index a8a82fccd4..4d640fb261 100644 --- a/src/Npgsql/PostgresDatabaseInfo.cs +++ b/src/Npgsql/PostgresDatabaseInfo.cs @@ -299,7 +299,6 @@ static string SanitizeForReplicationConnection(string str) } await conn.Flush(async); var byOID = new Dictionary(); - var buf = conn.ReadBuffer; // First read the PostgreSQL version Expect(await conn.ReadMessage(async), conn); @@ -307,8 +306,10 @@ static string SanitizeForReplicationConnection(string str) // We read the message in non-sequential mode which buffers the whole message. // There is no need to ensure data within the message boundaries Expect(await conn.ReadMessage(async), conn); - buf.Skip(2); // Column count - LongVersion = ReadNonNullableString(buf); + // Note that here and below we don't assign ReadBuffer to a variable + // because we might allocate oversize buffer + conn.ReadBuffer.Skip(2); // Column count + LongVersion = ReadNonNullableString(conn.ReadBuffer); Expect(await conn.ReadMessage(async), conn); if (isReplicationConnection) Expect(await conn.ReadMessage(async), conn); @@ -322,15 +323,15 @@ static string SanitizeForReplicationConnection(string str) if (msg is not DataRowMessage) break; - buf.Skip(2); // Column count - var nspname = ReadNonNullableString(buf); - var oid = uint.Parse(ReadNonNullableString(buf), NumberFormatInfo.InvariantInfo); + conn.ReadBuffer.Skip(2); // Column count + var nspname = ReadNonNullableString(conn.ReadBuffer); + var oid = uint.Parse(ReadNonNullableString(conn.ReadBuffer), NumberFormatInfo.InvariantInfo); Debug.Assert(oid != 0); - var typname = ReadNonNullableString(buf); - var typtype = ReadNonNullableString(buf)[0]; - var typnotnull = ReadNonNullableString(buf)[0] == 't'; - var len = buf.ReadInt32(); - var elemtypoid = len == -1 ? 0 : uint.Parse(buf.ReadString(len), NumberFormatInfo.InvariantInfo); + var typname = ReadNonNullableString(conn.ReadBuffer); + var typtype = ReadNonNullableString(conn.ReadBuffer)[0]; + var typnotnull = ReadNonNullableString(conn.ReadBuffer)[0] == 't'; + var len = conn.ReadBuffer.ReadInt32(); + var elemtypoid = len == -1 ? 0 : uint.Parse(conn.ReadBuffer.ReadString(len), NumberFormatInfo.InvariantInfo); switch (typtype) { @@ -436,10 +437,10 @@ static string SanitizeForReplicationConnection(string str) if (msg is not DataRowMessage) break; - buf.Skip(2); // Column count - var oid = uint.Parse(ReadNonNullableString(buf), NumberFormatInfo.InvariantInfo); - var attname = ReadNonNullableString(buf); - var atttypid = uint.Parse(ReadNonNullableString(buf), NumberFormatInfo.InvariantInfo); + conn.ReadBuffer.Skip(2); // Column count + var oid = uint.Parse(ReadNonNullableString(conn.ReadBuffer), NumberFormatInfo.InvariantInfo); + var attname = ReadNonNullableString(conn.ReadBuffer); + var atttypid = uint.Parse(ReadNonNullableString(conn.ReadBuffer), NumberFormatInfo.InvariantInfo); if (oid != currentOID) { @@ -498,9 +499,9 @@ static string SanitizeForReplicationConnection(string str) if (msg is not DataRowMessage) break; - buf.Skip(2); // Column count - var oid = uint.Parse(ReadNonNullableString(buf), NumberFormatInfo.InvariantInfo); - var enumlabel = ReadNonNullableString(buf); + conn.ReadBuffer.Skip(2); // Column count + var oid = uint.Parse(ReadNonNullableString(conn.ReadBuffer), NumberFormatInfo.InvariantInfo); + var enumlabel = ReadNonNullableString(conn.ReadBuffer); if (oid != currentOID) { currentOID = oid; diff --git a/test/Npgsql.Tests/NotificationTests.cs b/test/Npgsql.Tests/NotificationTests.cs index 403a81312b..8f3810a779 100644 --- a/test/Npgsql.Tests/NotificationTests.cs +++ b/test/Npgsql.Tests/NotificationTests.cs @@ -212,4 +212,20 @@ public void WaitAsync_breaks_connection() Assert.That(pgEx.SqlState, Is.EqualTo(PostgresErrorCodes.AdminShutdown)); Assert.That(conn.FullState, Is.EqualTo(ConnectionState.Broken)); } + + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/4911")] + public async Task Big_notice_while_loading_types() + { + await using var adminConn = await OpenConnectionAsync(); + // Max notification payload is 8000 + await using var dataSource = CreateDataSource(csb => csb.ReadBufferSize = 4096); + await using var conn = await dataSource.OpenConnectionAsync(); + + var notify = GetUniqueIdentifier(nameof(Big_notice_while_loading_types)); + await conn.ExecuteNonQueryAsync($"LISTEN {notify}"); + var payload = new string('a', 5000); + await adminConn.ExecuteNonQueryAsync($"NOTIFY {notify}, '{payload}'"); + + await conn.ReloadTypesAsync(); + } } From fc50c7e5b313b6b58de9f549b056d8e24a1e2290 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Wed, 1 Feb 2023 00:02:40 +0300 Subject: [PATCH 054/761] Remove IDisposable from NpgsqlConnector (#4913) --- src/Npgsql/Internal/NpgsqlConnector.cs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index e5013d9ee9..023759c427 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -31,7 +31,7 @@ namespace Npgsql.Internal; /// Represents a connection to a PostgreSQL backend. Unlike NpgsqlConnection objects, which are /// exposed to users, connectors are internal to Npgsql and are recycled by the connection pool. /// -public sealed partial class NpgsqlConnector : IDisposable +public sealed partial class NpgsqlConnector { #region Fields and Properties @@ -1971,9 +1971,6 @@ internal bool TryRemovePendingEnlistedConnector(Transaction transaction) internal void Return() => DataSource.Return(this); - /// - public void Dispose() => Close(); - /// /// Called when an unexpected message has been received during an action. Breaks the /// connector and returns the appropriate message. From 2c2ae90b646b9364edad4edc17b04cefe62f85af Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Wed, 1 Feb 2023 14:17:36 +0300 Subject: [PATCH 055/761] Optimize NpgsqlConnector.ReadMessage (#4914) --- src/Npgsql/Internal/NpgsqlConnector.cs | 258 ++++++++++++------------- src/Npgsql/NpgsqlDataReader.cs | 13 +- 2 files changed, 134 insertions(+), 137 deletions(-) diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index 023759c427..aa277a7d1e 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -1252,23 +1252,18 @@ internal void PrependInternalMessage(byte[] rawMessage, int responseMessageCount #region Backend message processing - internal ValueTask ReadMessage(bool async, DataRowLoadingMode dataRowLoadingMode = DataRowLoadingMode.NonSequential) - => ReadMessage(async, dataRowLoadingMode, readingNotifications: false)!; - internal ValueTask ReadMessageWithNotifications(bool async) - => ReadMessage(async, DataRowLoadingMode.NonSequential, readingNotifications: true); + => ReadMessageLong(async, DataRowLoadingMode.NonSequential, readingNotifications: true); - ValueTask ReadMessage( + internal ValueTask ReadMessage( bool async, - DataRowLoadingMode dataRowLoadingMode, - bool readingNotifications) + DataRowLoadingMode dataRowLoadingMode = DataRowLoadingMode.NonSequential) { if (PendingPrependedResponses > 0 || - dataRowLoadingMode != DataRowLoadingMode.NonSequential || - readingNotifications || + dataRowLoadingMode == DataRowLoadingMode.Skip || ReadBuffer.ReadBytesLeft < 5) { - return ReadMessageLong(this, async, dataRowLoadingMode, readingNotifications); + return ReadMessageLong(async, dataRowLoadingMode, readingNotifications: false)!; } var messageCode = (BackendMessageCode)ReadBuffer.ReadByte(); @@ -1279,7 +1274,7 @@ internal ValueTask ReadMessage(bool async, DataRowLoadingMode d case BackendMessageCode.ParameterStatus: case BackendMessageCode.ErrorResponse: ReadBuffer.ReadPosition--; - return ReadMessageLong(this, async, dataRowLoadingMode, readingNotifications: false); + return ReadMessageLong(async, dataRowLoadingMode, readingNotifications: false)!; case BackendMessageCode.ReadyForQuery: break; } @@ -1289,158 +1284,157 @@ internal ValueTask ReadMessage(bool async, DataRowLoadingMode d if (len > ReadBuffer.ReadBytesLeft) { ReadBuffer.ReadPosition -= 5; - return ReadMessageLong(this, async, dataRowLoadingMode, readingNotifications: false); + return ReadMessageLong(async, dataRowLoadingMode, readingNotifications: false)!; } - return new ValueTask(ParseServerMessage(ReadBuffer, messageCode, len, false)); + return new ValueTask(ParseServerMessage(ReadBuffer, messageCode, len, false))!; + } - static async ValueTask ReadMessageLong( - NpgsqlConnector connector, - bool async, - DataRowLoadingMode dataRowLoadingMode, - bool readingNotifications, - bool isReadingPrependedMessage = false) + async ValueTask ReadMessageLong( + bool async, + DataRowLoadingMode dataRowLoadingMode, + bool readingNotifications, + bool isReadingPrependedMessage = false) + { + // First read the responses of any prepended messages. + if (PendingPrependedResponses > 0 && !isReadingPrependedMessage) { - // First read the responses of any prepended messages. - if (connector.PendingPrependedResponses > 0 && !isReadingPrependedMessage) + try { - try - { - // TODO: There could be room for optimization here, rather than the async call(s) - connector.ReadBuffer.Timeout = TimeSpan.FromMilliseconds(connector.InternalCommandTimeout); - for (; connector.PendingPrependedResponses > 0; connector.PendingPrependedResponses--) - await ReadMessageLong(connector, async, DataRowLoadingMode.Skip, readingNotifications: false, isReadingPrependedMessage: true); - // We've read all the prepended response. - // Allow cancellation to proceed. - connector.ReadingPrependedMessagesMRE.Set(); - } - catch (Exception e) - { - // Prepended queries should never fail. - // If they do, we're not even going to attempt to salvage the connector. - throw connector.Break(e); - } + // TODO: There could be room for optimization here, rather than the async call(s) + ReadBuffer.Timeout = TimeSpan.FromMilliseconds(InternalCommandTimeout); + for (; PendingPrependedResponses > 0; PendingPrependedResponses--) + await ReadMessageLong(async, DataRowLoadingMode.Skip, readingNotifications: false, isReadingPrependedMessage: true); + // We've read all the prepended response. + // Allow cancellation to proceed. + ReadingPrependedMessagesMRE.Set(); + } + catch (Exception e) + { + // Prepended queries should never fail. + // If they do, we're not even going to attempt to salvage the connector. + throw Break(e); } + } - PostgresException? error = null; + PostgresException? error = null; - try - { - connector.ReadBuffer.Timeout = TimeSpan.FromMilliseconds(connector.UserTimeout); + try + { + ReadBuffer.Timeout = TimeSpan.FromMilliseconds(UserTimeout); - while (true) + while (true) + { + await ReadBuffer.Ensure(5, async, readingNotifications); + var messageCode = (BackendMessageCode)ReadBuffer.ReadByte(); + PGUtil.ValidateBackendMessageCode(messageCode); + var len = ReadBuffer.ReadInt32() - 4; // Transmitted length includes itself + + if ((messageCode == BackendMessageCode.DataRow && + dataRowLoadingMode != DataRowLoadingMode.NonSequential) || + messageCode == BackendMessageCode.CopyData) { - await connector.ReadBuffer.Ensure(5, async, readingNotifications); - var messageCode = (BackendMessageCode)connector.ReadBuffer.ReadByte(); - PGUtil.ValidateBackendMessageCode(messageCode); - var len = connector.ReadBuffer.ReadInt32() - 4; // Transmitted length includes itself - - if ((messageCode == BackendMessageCode.DataRow && - dataRowLoadingMode != DataRowLoadingMode.NonSequential) || - messageCode == BackendMessageCode.CopyData) + if (dataRowLoadingMode == DataRowLoadingMode.Skip) { - if (dataRowLoadingMode == DataRowLoadingMode.Skip) - { - await connector.ReadBuffer.Skip(len, async); - continue; - } + await ReadBuffer.Skip(len, async); + continue; } - else if (len > connector.ReadBuffer.ReadBytesLeft) + } + else if (len > ReadBuffer.ReadBytesLeft) + { + if (len > ReadBuffer.Size) { - if (len > connector.ReadBuffer.Size) - { - var oversizeBuffer = connector.ReadBuffer.AllocateOversize(len); + var oversizeBuffer = ReadBuffer.AllocateOversize(len); - if (connector._origReadBuffer == null) - connector._origReadBuffer = connector.ReadBuffer; - else - connector.ReadBuffer.Dispose(); - - connector.ReadBuffer = oversizeBuffer; - } + if (_origReadBuffer == null) + _origReadBuffer = ReadBuffer; + else + ReadBuffer.Dispose(); - await connector.ReadBuffer.Ensure(len, async); + ReadBuffer = oversizeBuffer; } - var msg = connector.ParseServerMessage(connector.ReadBuffer, messageCode, len, isReadingPrependedMessage); + await ReadBuffer.Ensure(len, async); + } - switch (messageCode) - { - case BackendMessageCode.ErrorResponse: - Debug.Assert(msg == null); + var msg = ParseServerMessage(ReadBuffer, messageCode, len, isReadingPrependedMessage); - // An ErrorResponse is (almost) always followed by a ReadyForQuery. Save the error - // and throw it as an exception when the ReadyForQuery is received (next). - error = PostgresException.Load( - connector.ReadBuffer, - connector.Settings.IncludeErrorDetail, - connector.LoggingConfiguration.ExceptionLogger); + switch (messageCode) + { + case BackendMessageCode.ErrorResponse: + Debug.Assert(msg == null); - if (connector.State == ConnectorState.Connecting) - { - // During the startup/authentication phase, an ErrorResponse isn't followed by - // an RFQ. Instead, the server closes the connection immediately - throw error; - } + // An ErrorResponse is (almost) always followed by a ReadyForQuery. Save the error + // and throw it as an exception when the ReadyForQuery is received (next). + error = PostgresException.Load( + ReadBuffer, + Settings.IncludeErrorDetail, + LoggingConfiguration.ExceptionLogger); - if (PostgresErrorCodes.IsCriticalFailure(error, clusterError: false)) - { - // Consider the connection dead - throw connector.Break(error); - } + if (State == ConnectorState.Connecting) + { + // During the startup/authentication phase, an ErrorResponse isn't followed by + // an RFQ. Instead, the server closes the connection immediately + throw error; + } - continue; + if (PostgresErrorCodes.IsCriticalFailure(error, clusterError: false)) + { + // Consider the connection dead + throw Break(error); + } - case BackendMessageCode.ReadyForQuery: - if (error != null) - { - NpgsqlEventSource.Log.CommandFailed(); - throw error; - } + continue; - break; - - // Asynchronous messages which can come anytime, they have already been handled - // in ParseServerMessage. Read the next message. - case BackendMessageCode.NoticeResponse: - case BackendMessageCode.NotificationResponse: - case BackendMessageCode.ParameterStatus: - Debug.Assert(msg == null); - if (!readingNotifications) - continue; - return null; + case BackendMessageCode.ReadyForQuery: + if (error != null) + { + NpgsqlEventSource.Log.CommandFailed(); + throw error; } - Debug.Assert(msg != null, "Message is null for code: " + messageCode); - return msg; - } - } - catch (PostgresException e) - { - // TODO: move it up the stack, like #3126 did (relevant for non-command-execution scenarios, like COPY) - if (connector.CurrentReader is null) - connector.EndUserAction(); + break; - if (e.SqlState == PostgresErrorCodes.QueryCanceled && connector.PostgresCancellationPerformed) - { - // The query could be canceled because of a user cancellation or a timeout - raise the proper exception. - // If _postgresCancellationPerformed is false, this is an unsolicited cancellation - - // just bubble up thePostgresException. - throw connector.UserCancellationRequested - ? new OperationCanceledException("Query was cancelled", e, connector.UserCancellationToken) - : new NpgsqlException("Exception while reading from stream", - new TimeoutException("Timeout during reading attempt")); + // Asynchronous messages which can come anytime, they have already been handled + // in ParseServerMessage. Read the next message. + case BackendMessageCode.NoticeResponse: + case BackendMessageCode.NotificationResponse: + case BackendMessageCode.ParameterStatus: + Debug.Assert(msg == null); + if (!readingNotifications) + continue; + return null; } - throw; + Debug.Assert(msg != null, "Message is null for code: " + messageCode); + return msg; } - catch (NpgsqlException) + } + catch (PostgresException e) + { + // TODO: move it up the stack, like #3126 did (relevant for non-command-execution scenarios, like COPY) + if (CurrentReader is null) + EndUserAction(); + + if (e.SqlState == PostgresErrorCodes.QueryCanceled && PostgresCancellationPerformed) { - // An ErrorResponse isn't followed by ReadyForQuery - if (error != null) - ExceptionDispatchInfo.Capture(error).Throw(); - throw; + // The query could be canceled because of a user cancellation or a timeout - raise the proper exception. + // If _postgresCancellationPerformed is false, this is an unsolicited cancellation - + // just bubble up thePostgresException. + throw UserCancellationRequested + ? new OperationCanceledException("Query was cancelled", e, UserCancellationToken) + : new NpgsqlException("Exception while reading from stream", + new TimeoutException("Timeout during reading attempt")); } + + throw; + } + catch (NpgsqlException) + { + // An ErrorResponse isn't followed by ReadyForQuery + if (error != null) + ExceptionDispatchInfo.Capture(error).Throw(); + throw; } } @@ -2802,7 +2796,7 @@ enum TransactionStatus : byte /// /// Specifies how to load/parse DataRow messages as they're received from the backend. /// -internal enum DataRowLoadingMode +enum DataRowLoadingMode { /// /// Load DataRows in non-sequential mode diff --git a/src/Npgsql/NpgsqlDataReader.cs b/src/Npgsql/NpgsqlDataReader.cs index a459155cdd..5a8040801a 100644 --- a/src/Npgsql/NpgsqlDataReader.cs +++ b/src/Npgsql/NpgsqlDataReader.cs @@ -281,7 +281,8 @@ async Task Read(bool async, CancellationToken cancellationToken = default) case ReaderState.Disposed: return false; default: - throw new ArgumentOutOfRangeException(); + ThrowHelper.ThrowArgumentOutOfRangeException(); + return false; } var msg = await ReadMessage(async); @@ -410,7 +411,8 @@ async Task NextResult(bool async, bool isConsuming = false, CancellationTo case ReaderState.Disposed: return false; default: - throw new ArgumentOutOfRangeException(); + ThrowHelper.ThrowArgumentOutOfRangeException(); + return false; } Debug.Assert(State == ReaderState.BetweenResults); @@ -478,8 +480,8 @@ async Task NextResult(bool async, bool isConsuming = false, CancellationTo { // Statement did not generate a resultset (e.g. INSERT) // Read and process its completion message and move on to the next statement - - msg = await ReadMessage(async); + // No need to read sequentially as it's not a DataRow + msg = await Connector.ReadMessage(async); switch (msg.Code) { case BackendMessageCode.CommandComplete: @@ -679,7 +681,8 @@ async Task NextResultSchemaOnly(bool async, bool isConsuming = false, Canc case ReaderState.Disposed: return false; default: - throw new ArgumentOutOfRangeException(); + ThrowHelper.ThrowArgumentOutOfRangeException(); + return false; } for (StatementIndex++; StatementIndex < _statements.Count; StatementIndex++) From 7b7f888c6157581e93a089441f248688856d8f8f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 4 Feb 2023 12:53:07 +0200 Subject: [PATCH 056/761] Bump Scriban.Signed from 5.5.2 to 5.6.0 (#4915) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 3228f538d8..f7f58c6931 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -15,7 +15,7 @@ - + From 7e512e52c043ec22a73bf4d5a22c69647784ac9f Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Fri, 10 Feb 2023 18:12:26 +0300 Subject: [PATCH 057/761] Fix write buffer's WriteSpaceLeft check while closing prepared statement (#4921) Fixes #4920 --- .../Internal/NpgsqlConnector.FrontendMessages.cs | 2 +- test/Npgsql.Tests/PrepareTests.cs | 13 +++++++++++++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/src/Npgsql/Internal/NpgsqlConnector.FrontendMessages.cs b/src/Npgsql/Internal/NpgsqlConnector.FrontendMessages.cs index c61f7b48bd..c38f39575a 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.FrontendMessages.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.FrontendMessages.cs @@ -251,7 +251,7 @@ internal Task WriteClose(StatementOrPortal type, string name, bool async, Cancel sizeof(byte) + // Statement or portal name.Length + sizeof(byte); // Statement or portal name plus null terminator - if (WriteBuffer.WriteSpaceLeft < 10) + if (WriteBuffer.WriteSpaceLeft < len) return FlushAndWrite(len, type, name, async, cancellationToken); Write(len, type, name); diff --git a/test/Npgsql.Tests/PrepareTests.cs b/test/Npgsql.Tests/PrepareTests.cs index f1233c6df2..f980069385 100644 --- a/test/Npgsql.Tests/PrepareTests.cs +++ b/test/Npgsql.Tests/PrepareTests.cs @@ -758,6 +758,19 @@ public async Task Explicitly_prepared_statement_invalidation() Assert.False(command.IsPrepared); } + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/4920")] + public async Task Explicit_prepare_unprepare_many_queries() + { + // Set a specific buffer's size to trigger #4920 + await using var dataSource = CreateDataSource(csb => csb.WriteBufferSize = 5002); + await using var conn = await dataSource.OpenConnectionAsync(); + await using var cmd = conn.CreateCommand(); + + cmd.CommandText = string.Join(';', Enumerable.Range(1, 500).Select(x => $"SELECT {x}")); + await cmd.PrepareAsync(); + await cmd.UnprepareAsync(); + } + NpgsqlConnection OpenConnectionAndUnprepare() { var conn = OpenConnection(); From ad7dee8efa2afd25fc64a8e8bea8da7ca3b13ad8 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Fri, 10 Feb 2023 22:12:20 +0300 Subject: [PATCH 058/761] Fix executing connectionless command on NpgsqlDataSource with multiplexing (#4841) Fixes #4840 --- src/Npgsql/MultiplexingDataSource.cs | 2 +- test/Npgsql.Tests/DataSourceTests.cs | 20 ++++++++++++++++++++ 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/src/Npgsql/MultiplexingDataSource.cs b/src/Npgsql/MultiplexingDataSource.cs index 1aa2cbe0d9..03c8718216 100644 --- a/src/Npgsql/MultiplexingDataSource.cs +++ b/src/Npgsql/MultiplexingDataSource.cs @@ -109,7 +109,7 @@ async Task MultiplexingWriteLoop() } connector = await OpenNewConnector( - command.Connection!, + command.InternalConnection!, new NpgsqlTimeout(TimeSpan.FromSeconds(Settings.Timeout)), async: true, CancellationToken.None); diff --git a/test/Npgsql.Tests/DataSourceTests.cs b/test/Npgsql.Tests/DataSourceTests.cs index 81cd68661b..e1d53c06f6 100644 --- a/test/Npgsql.Tests/DataSourceTests.cs +++ b/test/Npgsql.Tests/DataSourceTests.cs @@ -274,4 +274,24 @@ public async Task Executing_command_on_disposed_datasource([Values] bool multipl await using var command = dataSource.CreateCommand("SELECT 1"); Assert.ThrowsAsync(command.ExecuteNonQueryAsync); } + + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/4840")] + public async Task Multiplexing_connectionless_command_open_connection() + { + var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + { + Multiplexing = true + }; + await using var dataSource = NpgsqlDataSource.Create(csb.ConnectionString); + + await using var conn = await dataSource.OpenConnectionAsync(); + await using var _ = await conn.BeginTransactionAsync(); + + await using var command = dataSource.CreateCommand(); + command.CommandText = "SELECT 1"; + + await using var reader = await command.ExecuteReaderAsync(); + Assert.True(reader.Read()); + Assert.That(reader.GetInt32(0), Is.EqualTo(1)); + } } From ec382f223fb39a363341541dce6e8d3813b6f5ba Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Sat, 11 Feb 2023 18:47:06 +0300 Subject: [PATCH 059/761] Dump method's size by class (#4925) --- test/MStatDumper/Program.cs | 116 +++++++++++++++++++++++++++++++++--- 1 file changed, 108 insertions(+), 8 deletions(-) diff --git a/test/MStatDumper/Program.cs b/test/MStatDumper/Program.cs index 11ec532030..561b0cd73e 100644 --- a/test/MStatDumper/Program.cs +++ b/test/MStatDumper/Program.cs @@ -32,7 +32,12 @@ static void Main(string[] args) Console.WriteLine("| --- | --- |"); foreach (var m in typesByModules.OrderByDescending(x => x.Sum)) { - Console.WriteLine($"| {m.Name.Replace("`", "\\`")} | {m.Sum:n0} |"); + var name = m.Name + .Replace("`", "\\`") + .Replace("<", "<") + .Replace(">", ">") + .Replace("|", "\\|"); + Console.WriteLine($"| {name} | {m.Sum:n0} |"); } Console.WriteLine(); Console.WriteLine(""); @@ -64,7 +69,12 @@ static void Main(string[] args) Console.WriteLine("| --- | --- |"); foreach (var m in methodsByModules.OrderByDescending(x => x.Sum)) { - Console.WriteLine($"| {m.Name.Replace("`", "\\`")} | {m.Sum:n0} |"); + var name = m.Name + .Replace("`", "\\`") + .Replace("<", "<") + .Replace(">", ">") + .Replace("|", "\\|"); + Console.WriteLine($"| {name} | {m.Sum:n0} |"); } Console.WriteLine(); Console.WriteLine(""); @@ -86,7 +96,7 @@ string FindNamespace(TypeReference type) var current = type; while (true) { - if (!String.IsNullOrEmpty(current.Namespace)) + if (!string.IsNullOrEmpty(current.Namespace)) { return current.Namespace; } @@ -112,7 +122,12 @@ string FindNamespace(TypeReference type) Console.WriteLine("| --- | --- |"); foreach (var m in methodsByNamespace.OrderByDescending(x => x.Sum)) { - Console.WriteLine($"| {m.Key.Replace("`", "\\`")} | {m.Sum:n0} |"); + var name = m.Key + .Replace("`", "\\`") + .Replace("<", "<") + .Replace(">", ">") + .Replace("|", "\\|"); + Console.WriteLine($"| {name} | {m.Sum:n0} |"); } Console.WriteLine(); Console.WriteLine(""); @@ -143,7 +158,12 @@ string FindNamespace(TypeReference type) Console.WriteLine("| --- | --- |"); foreach (var m in blobStats.OrderByDescending(x => x.Size)) { - Console.WriteLine($"| {m.Name.Replace("`", "\\`")} | {m.Size:n0} |"); + var name = m.Name + .Replace("`", "\\`") + .Replace("<", "<") + .Replace(">", ">") + .Replace("|", "\\|"); + Console.WriteLine($"| {name} | {m.Size:n0} |"); } Console.WriteLine(); Console.WriteLine(""); @@ -157,13 +177,93 @@ string FindNamespace(TypeReference type) } Console.WriteLine("// **********"); } + + if (markDownStyleOutput) + { + var methodsByClass = methodStats + .Where(x => x.Method.DeclaringType.Scope.Name == "Npgsql") + .GroupBy(x => GetClassName(x.Method)) + .OrderByDescending(x => x.Sum(x => x.Size + x.GcInfoSize + x.EhInfoSize)) + .Take(20) + .ToList(); + + static string GetClassName(MethodReference methodReference) + { + var type = methodReference.DeclaringType.DeclaringType ?? methodReference.DeclaringType; + return type.Namespace + "." + type.Name; + } + + Console.WriteLine("
"); + Console.WriteLine("Top 20 Npgsql Classes By Methods Size"); + Console.WriteLine(); + Console.WriteLine("
"); + Console.WriteLine(); + Console.WriteLine("| Name | Size |"); + Console.WriteLine("| --- | --- |"); + foreach (var m in methodsByClass + .Select(x => new { Name = x.Key, Sum = x.Sum(x => x.Size + x.GcInfoSize + x.EhInfoSize) }) + .OrderByDescending(x => x.Sum)) + { + var name = m.Name + .Replace("`", "\\`") + .Replace("<", "<") + .Replace(">", ">") + .Replace("|", "\\|"); + Console.WriteLine($"| {name} | {m.Sum:n0} |"); + } + + Console.WriteLine(); + Console.WriteLine("
"); + + foreach (var g in methodsByClass + .OrderByDescending(x => x.Sum(x => x.Size + x.GcInfoSize + x.EhInfoSize))) + { + Console.WriteLine(); + Console.WriteLine("
"); + Console.WriteLine($"\"{g.Key}\" Methods ({g.Sum(x => x.Size + x.GcInfoSize + x.EhInfoSize):n0} bytes)"); + Console.WriteLine(); + Console.WriteLine("
"); + Console.WriteLine(); + Console.WriteLine("| Name | Size |"); + Console.WriteLine("| --- | --- |"); + foreach (var m in g + .GroupBy(x => GetMethodName(x.Method)) + .Select(x => new { Name = x.Key, Size = x.Sum(x => x.Size + x.GcInfoSize + x.EhInfoSize)}) + .OrderByDescending(x => x.Size)) + { + var methodName = m.Name + .Replace("`", "\\`") + .Replace("<", "<") + .Replace(">", ">") + .Replace("|", "\\|"); + Console.WriteLine($"| {methodName} | {m.Size:n0} |"); + } + Console.WriteLine(); + Console.WriteLine("
"); + Console.WriteLine(); + Console.WriteLine("
"); + + static string GetMethodName(MethodReference methodReference) + { + if (methodReference.DeclaringType.DeclaringType is null) + { + return methodReference.Name; + } + + return methodReference.DeclaringType.Name; + } + } + + Console.WriteLine(); + Console.WriteLine("
"); + } } public static IEnumerable GetTypes(MethodDefinition types) { types.Body.SimplifyMacros(); var il = types.Body.Instructions; - for (int i = 0; i + 2 < il.Count; i += 2) + for (var i = 0; i + 2 < il.Count; i += 2) { var type = (TypeReference)il[i + 0].Operand; var size = (int)il[i + 1].Operand; @@ -179,7 +279,7 @@ public static IEnumerable GetMethods(MethodDefinition methods) { methods.Body.SimplifyMacros(); var il = methods.Body.Instructions; - for (int i = 0; i + 4 < il.Count; i += 4) + for (var i = 0; i + 4 < il.Count; i += 4) { var method = (MethodReference)il[i + 0].Operand; var size = (int)il[i + 1].Operand; @@ -199,7 +299,7 @@ public static IEnumerable GetBlobs(MethodDefinition blobs) { blobs.Body.SimplifyMacros(); var il = blobs.Body.Instructions; - for (int i = 0; i + 2 < il.Count; i += 2) + for (var i = 0; i + 2 < il.Count; i += 2) { var name = (string)il[i + 0].Operand; var size = (int)il[i + 1].Operand; From dbf2a4f6e6cf4f8abe55231289ab0481ce8c0c6e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 15 Feb 2023 11:07:40 +0200 Subject: [PATCH 060/761] Bump System.Diagnostics.DiagnosticSource from 6.0.0 to 7.0.1 (#4931) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index f7f58c6931..27af7a082c 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -9,7 +9,7 @@ - + From 9783c2d69b68ffb101de6d1232453a4d7fe39cb0 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Wed, 15 Feb 2023 10:08:20 +0100 Subject: [PATCH 061/761] Revert "Bump System.Diagnostics.DiagnosticSource from 6.0.0 to 7.0.1 (#4931)" This reverts commit dbf2a4f6e6cf4f8abe55231289ab0481ce8c0c6e. --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 27af7a082c..f7f58c6931 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -9,7 +9,7 @@ - + From db48ea2d5acecd892244d788ef5927c3e166bfda Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 15 Feb 2023 11:09:33 +0200 Subject: [PATCH 062/761] Bump System.Text.Json from 7.0.1 to 7.0.2 (#4930) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index f7f58c6931..3f00f3403b 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -5,7 +5,7 @@ - + From b50e54011f7c94cf765103fcd278ebef41bffe50 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 18 Feb 2023 11:12:39 +0200 Subject: [PATCH 063/761] Bump BenchmarkDotNet from 0.13.4 to 0.13.5 (#4937) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 3f00f3403b..c4384e1a9b 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -38,7 +38,7 @@ - + From a2e3640b96cdc4ba9e2bdc61de45fb806d0b0865 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 18 Feb 2023 11:13:01 +0200 Subject: [PATCH 064/761] Bump BenchmarkDotNet.Diagnostics.Windows from 0.13.4 to 0.13.5 (#4936) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index c4384e1a9b..2e93e648b2 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -40,7 +40,7 @@ - + From 81ebee13773a199cc3515a152f0f0cdcbf5ded01 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Sat, 18 Feb 2023 12:54:16 +0300 Subject: [PATCH 065/761] Optimize some of the tests by making them parallelizable (#4933) Contributes to #4665 --- test/Npgsql.NodaTime.Tests/NodaTimeInfinityTests.cs | 2 +- test/Npgsql.PluginTests/JsonNetTests.cs | 1 - test/Npgsql.Tests/ConnectionTests.cs | 3 +-- test/Npgsql.Tests/MultipleHostsTests.cs | 1 - test/Npgsql.Tests/NpgsqlParameterCollectionTests.cs | 4 +++- test/Npgsql.Tests/ReadBufferTests.cs | 2 +- test/Npgsql.Tests/TestUtil.cs | 8 ++------ test/Npgsql.Tests/Types/DateTimeInfinityTests.cs | 2 +- test/Npgsql.Tests/WriteBufferTests.cs | 2 +- 9 files changed, 10 insertions(+), 15 deletions(-) diff --git a/test/Npgsql.NodaTime.Tests/NodaTimeInfinityTests.cs b/test/Npgsql.NodaTime.Tests/NodaTimeInfinityTests.cs index 66bc0ca1fb..b719449e1d 100644 --- a/test/Npgsql.NodaTime.Tests/NodaTimeInfinityTests.cs +++ b/test/Npgsql.NodaTime.Tests/NodaTimeInfinityTests.cs @@ -12,8 +12,8 @@ namespace Npgsql.NodaTime.Tests; [TestFixture(false)] #if DEBUG [TestFixture(true)] -#endif [NonParallelizable] +#endif public class NodaTimeInfinityTests : TestBase { [Test] // #4715 diff --git a/test/Npgsql.PluginTests/JsonNetTests.cs b/test/Npgsql.PluginTests/JsonNetTests.cs index 0eecf3ab8c..c3da30a386 100644 --- a/test/Npgsql.PluginTests/JsonNetTests.cs +++ b/test/Npgsql.PluginTests/JsonNetTests.cs @@ -15,7 +15,6 @@ namespace Npgsql.PluginTests; /// /// Tests for the Npgsql.Json.NET mapping plugin /// -[NonParallelizable] [TestFixture(NpgsqlDbType.Jsonb)] [TestFixture(NpgsqlDbType.Json)] public class JsonNetTests : TestBase diff --git a/test/Npgsql.Tests/ConnectionTests.cs b/test/Npgsql.Tests/ConnectionTests.cs index 754e5d9b40..0deb4b6773 100644 --- a/test/Npgsql.Tests/ConnectionTests.cs +++ b/test/Npgsql.Tests/ConnectionTests.cs @@ -416,7 +416,7 @@ public async Task Timezone_connection_param() "localhost:5432", "localhost:5432" })] - [Test, IssueLink("https://github.com/npgsql/npgsql/issues/3802"), NonParallelizable] + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/3802")] public string[] ConnectionString_Host(string host) { var dataSourceBuilder = new NpgsqlDataSourceBuilder @@ -1049,7 +1049,6 @@ public async Task Clone_with_data_source() } [Test] - [NonParallelizable] // Anyone can reload DatabaseInfo between us opening a connection public async Task DatabaseInfo_is_shared() { if (IsMultiplexing) diff --git a/test/Npgsql.Tests/MultipleHostsTests.cs b/test/Npgsql.Tests/MultipleHostsTests.cs index bcf02a233d..b7988c9f78 100644 --- a/test/Npgsql.Tests/MultipleHostsTests.cs +++ b/test/Npgsql.Tests/MultipleHostsTests.cs @@ -20,7 +20,6 @@ namespace Npgsql.Tests; -[NonParallelizable] public class MultipleHostsTests : TestBase { static readonly object[] MyCases = diff --git a/test/Npgsql.Tests/NpgsqlParameterCollectionTests.cs b/test/Npgsql.Tests/NpgsqlParameterCollectionTests.cs index 94c84b2747..87875117d2 100644 --- a/test/Npgsql.Tests/NpgsqlParameterCollectionTests.cs +++ b/test/Npgsql.Tests/NpgsqlParameterCollectionTests.cs @@ -7,9 +7,11 @@ namespace Npgsql.Tests; -[NonParallelizable] // This test class has global effects on case sensitive matching in param collection. [TestFixture(CompatMode.OnePass)] +#if DEBUG [TestFixture(CompatMode.TwoPass)] +[NonParallelizable] // This test class has global effects on case sensitive matching in param collection. +#endif public class NpgsqlParameterCollectionTests { readonly CompatMode _compatMode; diff --git a/test/Npgsql.Tests/ReadBufferTests.cs b/test/Npgsql.Tests/ReadBufferTests.cs index 2d76a39bf8..9246479355 100644 --- a/test/Npgsql.Tests/ReadBufferTests.cs +++ b/test/Npgsql.Tests/ReadBufferTests.cs @@ -8,7 +8,7 @@ namespace Npgsql.Tests; -[NonParallelizable] // Parallel access to a single buffer +[FixtureLifeCycle(LifeCycle.InstancePerTestCase)] // Parallel access to a single buffer class ReadBufferTests { [Test] diff --git a/test/Npgsql.Tests/TestUtil.cs b/test/Npgsql.Tests/TestUtil.cs index ecfdd85ff3..1eccd4b90e 100644 --- a/test/Npgsql.Tests/TestUtil.cs +++ b/test/Npgsql.Tests/TestUtil.cs @@ -117,10 +117,6 @@ static async Task EnsureExtension(NpgsqlConnection conn, string extension, strin conn.ExecuteNonQuery($"CREATE EXTENSION IF NOT EXISTS {extension}"); conn.ReloadTypes(); - - // Multiplexing doesn't really support reloading types, since each connector uses its own connector type mapper when reading, - // which is different from the pool-wise connector mapper (which is used when writing). - NpgsqlConnection.ClearPool(conn); } /// @@ -370,8 +366,8 @@ internal static IDisposable DisableSqlRewriting() NpgsqlCommand.EnableSqlRewriting = false; return new DeferredExecutionDisposable(() => NpgsqlCommand.EnableSqlRewriting = true); #else - Assert.Ignore("Cannot disable SQL rewriting in RELEASE builds"); - throw new NotSupportedException("Cannot disable SQL rewriting in RELEASE builds"); + Assert.Ignore("Cannot disable SQL rewriting in RELEASE builds"); + throw new NotSupportedException("Cannot disable SQL rewriting in RELEASE builds"); #endif } diff --git a/test/Npgsql.Tests/Types/DateTimeInfinityTests.cs b/test/Npgsql.Tests/Types/DateTimeInfinityTests.cs index 3bcf87378f..fc316adfee 100644 --- a/test/Npgsql.Tests/Types/DateTimeInfinityTests.cs +++ b/test/Npgsql.Tests/Types/DateTimeInfinityTests.cs @@ -9,8 +9,8 @@ namespace Npgsql.Tests.Types; [TestFixture(true)] #if DEBUG [TestFixture(false)] -#endif [NonParallelizable] +#endif public class DateTimeInfinityTests : TestBase, IDisposable { [Test] diff --git a/test/Npgsql.Tests/WriteBufferTests.cs b/test/Npgsql.Tests/WriteBufferTests.cs index fe8dc2e7d5..19603b1741 100644 --- a/test/Npgsql.Tests/WriteBufferTests.cs +++ b/test/Npgsql.Tests/WriteBufferTests.cs @@ -5,7 +5,7 @@ namespace Npgsql.Tests; -[NonParallelizable] // Parallel access to a single buffer +[FixtureLifeCycle(LifeCycle.InstancePerTestCase)] // Parallel access to a single buffer class WriteBufferTests { [Test, IssueLink("https://github.com/npgsql/npgsql/issues/1275")] From 09fa2a218d9ccc467d47a80348bbf03aa9cb0ccf Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Sat, 18 Feb 2023 13:41:12 +0300 Subject: [PATCH 066/761] Replace --output with --property for pack in ci to fix 7.0.2 breaking change (#4938) https://github.com/dotnet/sdk/issues/30625#issuecomment-1433569096 --- .github/workflows/build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index d635b761d7..9be7434c50 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -351,7 +351,7 @@ jobs: dotnet-version: ${{ env.dotnet_sdk_version }} - name: Pack - run: dotnet pack Npgsql.sln --configuration Release --output nupkgs --version-suffix "ci.$(date -u +%Y%m%dT%H%M%S)+sha.${GITHUB_SHA:0:9}" -p:ContinuousIntegrationBuild=true + run: dotnet pack Npgsql.sln --configuration Release --property:PackageOutputPath=nupkgs --version-suffix "ci.$(date -u +%Y%m%dT%H%M%S)+sha.${GITHUB_SHA:0:9}" -p:ContinuousIntegrationBuild=true - name: Upload artifacts (nupkg) uses: actions/upload-artifact@v3 From af17de802bde4a3f8ac09195861ae32eeb725900 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Sat, 18 Feb 2023 14:14:41 +0300 Subject: [PATCH 067/761] Fix 09fa2a218d9ccc467d47a80348bbf03aa9cb0ccf by passing full path --- .github/workflows/build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 9be7434c50..bd2750f79a 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -351,7 +351,7 @@ jobs: dotnet-version: ${{ env.dotnet_sdk_version }} - name: Pack - run: dotnet pack Npgsql.sln --configuration Release --property:PackageOutputPath=nupkgs --version-suffix "ci.$(date -u +%Y%m%dT%H%M%S)+sha.${GITHUB_SHA:0:9}" -p:ContinuousIntegrationBuild=true + run: dotnet pack Npgsql.sln --configuration Release --property:PackageOutputPath="$PWD/nupkgs" --version-suffix "ci.$(date -u +%Y%m%dT%H%M%S)+sha.${GITHUB_SHA:0:9}" -p:ContinuousIntegrationBuild=true - name: Upload artifacts (nupkg) uses: actions/upload-artifact@v3 From 888d2360f60c42c1654656bda47924b489aab668 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Tue, 21 Feb 2023 01:54:55 +0300 Subject: [PATCH 068/761] Fix nullability for NpgsqlBinaryImporter.WriteRow (#4929) Fixes #4919 --- src/Npgsql/NpgsqlBinaryImporter.cs | 28 ++++++++++++++-------------- src/Npgsql/PublicAPI.Unshipped.txt | 4 ++++ 2 files changed, 18 insertions(+), 14 deletions(-) diff --git a/src/Npgsql/NpgsqlBinaryImporter.cs b/src/Npgsql/NpgsqlBinaryImporter.cs index 3da41700a5..15af6e85a5 100644 --- a/src/Npgsql/NpgsqlBinaryImporter.cs +++ b/src/Npgsql/NpgsqlBinaryImporter.cs @@ -157,7 +157,7 @@ async Task StartRow(bool async, CancellationToken cancellationToken = default) /// corruption will occur. If in doubt, use to manually /// specify the type. /// - public void Write([AllowNull] T value) => Write(value, false).GetAwaiter().GetResult(); + public void Write(T value) => Write(value, false).GetAwaiter().GetResult(); /// /// Writes a single column in the current row. @@ -171,7 +171,7 @@ async Task StartRow(bool async, CancellationToken cancellationToken = default) /// corruption will occur. If in doubt, use to manually /// specify the type. /// - public Task WriteAsync([AllowNull] T value, CancellationToken cancellationToken = default) + public Task WriteAsync(T value, CancellationToken cancellationToken = default) { if (cancellationToken.IsCancellationRequested) return Task.FromCanceled(cancellationToken); @@ -179,7 +179,7 @@ public Task WriteAsync([AllowNull] T value, CancellationToken cancellationTok return Write(value, true, cancellationToken); } - Task Write([AllowNull] T value, bool async, CancellationToken cancellationToken = default) + Task Write(T value, bool async, CancellationToken cancellationToken = default) { CheckColumnIndex(); @@ -206,7 +206,7 @@ Task Write([AllowNull] T value, bool async, CancellationToken cancellationTok /// must be specified as . /// /// The .NET type of the column to be written. - public void Write([AllowNull] T value, NpgsqlDbType npgsqlDbType) => + public void Write(T value, NpgsqlDbType npgsqlDbType) => Write(value, npgsqlDbType, false).GetAwaiter().GetResult(); /// @@ -223,7 +223,7 @@ public void Write([AllowNull] T value, NpgsqlDbType npgsqlDbType) => /// An optional token to cancel the asynchronous operation. The default value is . /// /// The .NET type of the column to be written. - public Task WriteAsync([AllowNull] T value, NpgsqlDbType npgsqlDbType, CancellationToken cancellationToken = default) + public Task WriteAsync(T value, NpgsqlDbType npgsqlDbType, CancellationToken cancellationToken = default) { if (cancellationToken.IsCancellationRequested) return Task.FromCanceled(cancellationToken); @@ -231,7 +231,7 @@ public Task WriteAsync([AllowNull] T value, NpgsqlDbType npgsqlDbType, Cancel return Write(value, npgsqlDbType, true, cancellationToken); } - Task Write([AllowNull] T value, NpgsqlDbType npgsqlDbType, bool async, CancellationToken cancellationToken = default) + Task Write(T value, NpgsqlDbType npgsqlDbType, bool async, CancellationToken cancellationToken = default) { CheckColumnIndex(); @@ -260,7 +260,7 @@ Task Write([AllowNull] T value, NpgsqlDbType npgsqlDbType, bool async, Cancel /// the database. This parameter and be used to unambiguously specify the type. /// /// The .NET type of the column to be written. - public void Write([AllowNull] T value, string dataTypeName) => + public void Write(T value, string dataTypeName) => Write(value, dataTypeName, false).GetAwaiter().GetResult(); /// @@ -275,7 +275,7 @@ public void Write([AllowNull] T value, string dataTypeName) => /// An optional token to cancel the asynchronous operation. The default value is . /// /// The .NET type of the column to be written. - public Task WriteAsync([AllowNull] T value, string dataTypeName, CancellationToken cancellationToken = default) + public Task WriteAsync(T value, string dataTypeName, CancellationToken cancellationToken = default) { if (cancellationToken.IsCancellationRequested) return Task.FromCanceled(cancellationToken); @@ -283,7 +283,7 @@ public Task WriteAsync([AllowNull] T value, string dataTypeName, Cancellation return Write(value, dataTypeName, true, cancellationToken); } - Task Write([AllowNull] T value, string dataTypeName, bool async, CancellationToken cancellationToken = default) + Task Write(T value, string dataTypeName, bool async, CancellationToken cancellationToken = default) { CheckColumnIndex(); @@ -303,7 +303,7 @@ Task Write([AllowNull] T value, string dataTypeName, bool async, Cancellation return Write(value, p, async, cancellationToken); } - async Task Write([AllowNull] T value, NpgsqlParameter param, bool async, CancellationToken cancellationToken = default) + async Task Write(T value, NpgsqlParameter param, bool async, CancellationToken cancellationToken = default) { CheckReady(); if (_column == -1) @@ -372,7 +372,7 @@ async Task WriteNull(bool async, CancellationToken cancellationToken = default) /// on each value. /// /// An array of column values to be written as a single row - public void WriteRow(params object[] values) => WriteRow(false, CancellationToken.None, values).GetAwaiter().GetResult(); + public void WriteRow(params object?[] values) => WriteRow(false, CancellationToken.None, values).GetAwaiter().GetResult(); /// /// Writes an entire row of columns. @@ -383,7 +383,7 @@ async Task WriteNull(bool async, CancellationToken cancellationToken = default) /// An optional token to cancel the asynchronous operation. The default value is . /// /// An array of column values to be written as a single row - public Task WriteRowAsync(CancellationToken cancellationToken = default, params object[] values) + public Task WriteRowAsync(CancellationToken cancellationToken = default, params object?[] values) { if (cancellationToken.IsCancellationRequested) return Task.FromCanceled(cancellationToken); @@ -391,7 +391,7 @@ public Task WriteRowAsync(CancellationToken cancellationToken = default, params return WriteRow(true, cancellationToken, values); } - async Task WriteRow(bool async, CancellationToken cancellationToken = default, params object[] values) + async Task WriteRow(bool async, CancellationToken cancellationToken = default, params object?[] values) { await StartRow(async, cancellationToken); foreach (var value in values) @@ -613,4 +613,4 @@ enum ImporterState } #endregion Enums -} \ No newline at end of file +} diff --git a/src/Npgsql/PublicAPI.Unshipped.txt b/src/Npgsql/PublicAPI.Unshipped.txt index d8acd7915a..9614eca619 100644 --- a/src/Npgsql/PublicAPI.Unshipped.txt +++ b/src/Npgsql/PublicAPI.Unshipped.txt @@ -1,4 +1,6 @@ #nullable enable +Npgsql.NpgsqlBinaryImporter.WriteRow(params object?[]! values) -> void +Npgsql.NpgsqlBinaryImporter.WriteRowAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken), params object?[]! values) -> System.Threading.Tasks.Task! Npgsql.NpgsqlJsonExtensions static Npgsql.NpgsqlJsonExtensions.UseSystemTextJson(this Npgsql.TypeMapping.INpgsqlTypeMapper! mapper, System.Text.Json.JsonSerializerOptions? serializerOptions = null, System.Type![]? jsonbClrTypes = null, System.Type![]? jsonClrTypes = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! *REMOVED*static NpgsqlTypes.NpgsqlBox.Parse(string! s) -> NpgsqlTypes.NpgsqlBox @@ -8,3 +10,5 @@ static Npgsql.NpgsqlJsonExtensions.UseSystemTextJson(this Npgsql.TypeMapping.INp *REMOVED*static NpgsqlTypes.NpgsqlPath.Parse(string! s) -> NpgsqlTypes.NpgsqlPath *REMOVED*static NpgsqlTypes.NpgsqlPoint.Parse(string! s) -> NpgsqlTypes.NpgsqlPoint *REMOVED*static NpgsqlTypes.NpgsqlPolygon.Parse(string! s) -> NpgsqlTypes.NpgsqlPolygon +*REMOVED*Npgsql.NpgsqlBinaryImporter.WriteRow(params object![]! values) -> void +*REMOVED*Npgsql.NpgsqlBinaryImporter.WriteRowAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken), params object![]! values) -> System.Threading.Tasks.Task! \ No newline at end of file From 08ab92dd2225d8c77a766cf55f0f01c5f1a9e9a3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 21 Feb 2023 23:03:49 +0100 Subject: [PATCH 069/761] Bump Microsoft.Data.SqlClient from 5.0.1 to 5.1.0 (#4894) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 2e93e648b2..5cfdfce4c9 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -39,7 +39,7 @@ - + From 63f31bcca1852b600c46bcece780fb8bc65744f5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 21 Feb 2023 23:02:44 +0000 Subject: [PATCH 070/761] Bump Microsoft.NET.Test.Sdk from 17.4.1 to 17.5.0 (#4940) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 5cfdfce4c9..d422b331e4 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -29,7 +29,7 @@ - + From 64a0bfac7711489a59e4c59f6bf6464745248865 Mon Sep 17 00:00:00 2001 From: Brar Piening Date: Sun, 26 Feb 2023 18:38:17 +0100 Subject: [PATCH 071/761] Update the .NET TFMs for Npgsql 8 (#4878) - Remove netcoreapp3.1 and net5.0 (out of support) - Add net8.0 - Upgrade DeveloperBuild to net8.0 --- .devcontainer/docker-compose.yml | 3 ++- .github/workflows/build.yml | 10 ++++----- .github/workflows/codeql-analysis.yml | 15 +++++++++++-- .github/workflows/native-aot.yml | 12 +++++----- .github/workflows/rich-code-nav.yml | 2 +- Directory.Packages.props | 22 ++++++++++++++----- global.json | 2 +- .../Npgsql.DependencyInjection.csproj | 4 ++-- src/Npgsql.GeoJSON/Npgsql.GeoJSON.csproj | 2 +- src/Npgsql.Json.NET/Npgsql.Json.NET.csproj | 2 +- .../Npgsql.NetTopologySuite.csproj | 2 +- src/Npgsql.NodaTime/Npgsql.NodaTime.csproj | 4 ++-- .../Npgsql.OpenTelemetry.csproj | 2 +- src/Npgsql/Internal/NpgsqlConnector.cs | 7 ++++-- .../Internal/TypeHandlers/ByteaHandler.cs | 7 +----- .../TypeHandlers/SystemTextJsonHandler.cs | 15 +------------ src/Npgsql/Npgsql.csproj | 13 ++++------- .../TypeMapping/JsonTypeHandlerResolver.cs | 11 +--------- test/Directory.Build.props | 13 +++++++++-- test/MStatDumper/MStatDumper.csproj | 3 ++- .../Npgsql.DependencyInjection.Tests.csproj | 2 +- .../Npgsql.NativeAotTests.csproj | 3 ++- test/Npgsql.Tests/Types/ByteaTests.cs | 2 -- 23 files changed, 80 insertions(+), 78 deletions(-) diff --git a/.devcontainer/docker-compose.yml b/.devcontainer/docker-compose.yml index 4628935113..a58f310f4f 100644 --- a/.devcontainer/docker-compose.yml +++ b/.devcontainer/docker-compose.yml @@ -2,7 +2,8 @@ version: '3' services: npgsql-dev: - image: mcr.microsoft.com/dotnet/sdk:7.0 + # Source for tags: https://mcr.microsoft.com/v2/dotnet/sdk/tags/list + image: mcr.microsoft.com/dotnet/sdk:8.0.100-preview.1 volumes: - ..:/workspace:cached tty: true diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index bd2750f79a..218414cc30 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -10,7 +10,7 @@ on: pull_request: env: - dotnet_sdk_version: '7.0.100' + dotnet_sdk_version: '8.0.100-preview.1.23115.2' postgis_version: 3 DOTNET_SKIP_FIRST_TIME_EXPERIENCE: true # Windows comes with PG pre-installed, and defines the PGPASSWORD environment variable. Remove it as it interferes @@ -27,12 +27,12 @@ jobs: os: [ubuntu-22.04, windows-2022] pg_major: [15, 14, 13, 12, 11, 10] config: [Release] - test_tfm: [net7.0] + test_tfm: [net8.0] include: - os: ubuntu-22.04 pg_major: 15 config: Debug - test_tfm: net7.0 + test_tfm: net8.0 - os: ubuntu-22.04 pg_major: 15 config: Release @@ -40,11 +40,11 @@ jobs: - os: macos-12 pg_major: 14 config: Release - test_tfm: net7.0 + test_tfm: net8.0 # - os: ubuntu-22.04 # pg_major: 15 # config: Release -# test_tfm: net7.0 +# test_tfm: net8.0 # pg_prerelease: 'PG Prerelease' outputs: diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 945fa9bd46..cf3be202e6 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -26,6 +26,9 @@ on: schedule: - cron: '21 0 * * 4' +env: + dotnet_sdk_version: '8.0.100-preview.1.23115.2' + jobs: analyze: name: Analyze @@ -56,10 +59,18 @@ jobs: # Prefix the list here with "+" to use these queries and those in the config file. # queries: ./path/to/local/query, your-org/your-repo/queries@main + - name: Setup .NET Core SDK + uses: actions/setup-dotnet@v3.0.3 + with: + dotnet-version: ${{ env.dotnet_sdk_version }} + + - name: Build + run: dotnet build -c Release + # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - - name: Autobuild - uses: github/codeql-action/autobuild@v2 + #- name: Autobuild + # uses: github/codeql-action/autobuild@v2 # ℹ️ Command-line programs to run using the OS shell. # 📚 https://git.io/JvXDl diff --git a/.github/workflows/native-aot.yml b/.github/workflows/native-aot.yml index e23f8fc80b..240f8ff74d 100644 --- a/.github/workflows/native-aot.yml +++ b/.github/workflows/native-aot.yml @@ -10,7 +10,7 @@ on: pull_request: env: - dotnet_sdk_version: '7.0.102' + dotnet_sdk_version: '8.0.100-preview.1.23115.2' DOTNET_SKIP_FIRST_TIME_EXPERIENCE: true jobs: @@ -46,7 +46,7 @@ jobs: shell: bash - name: Build - run: dotnet publish test/Npgsql.NativeAotTests/Npgsql.NativeAotTests.csproj -r linux-x64 -c Release -f net7.0 + run: dotnet publish test/Npgsql.NativeAotTests/Npgsql.NativeAotTests.csproj -r linux-x64 -c Release -f net8.0 shell: bash # Uncomment the following to SSH into the agent running the build (https://github.com/mxschmitt/action-tmate) @@ -61,19 +61,19 @@ jobs: sudo -u postgres psql -c "CREATE DATABASE npgsql_tests OWNER npgsql_tests" - name: Run - run: test/Npgsql.NativeAotTests/bin/Release/net7.0/linux-x64/native/Npgsql.NativeAotTests + run: test/Npgsql.NativeAotTests/bin/Release/net8.0/linux-x64/native/Npgsql.NativeAotTests - name: Write binary size to summary run: | - size="$(ls -l test/Npgsql.NativeAotTests/bin/Release/net7.0/linux-x64/native/Npgsql.NativeAotTests | cut -d ' ' -f 5)" + size="$(ls -l test/Npgsql.NativeAotTests/bin/Release/net8.0/linux-x64/native/Npgsql.NativeAotTests | cut -d ' ' -f 5)" echo "Binary size is $size bytes ($((size / (1024 * 1024))) mb)" >> $GITHUB_STEP_SUMMARY - name: Dump mstat - run: dotnet run --project test/MStatDumper/MStatDumper.csproj -c release -- "test/Npgsql.NativeAotTests/obj/Release/net7.0/linux-x64/native/Npgsql.NativeAotTests.mstat" md >> $GITHUB_STEP_SUMMARY + run: dotnet run --project test/MStatDumper/MStatDumper.csproj -c release -f net8.0 -- "test/Npgsql.NativeAotTests/obj/Release/net8.0/linux-x64/native/Npgsql.NativeAotTests.mstat" md >> $GITHUB_STEP_SUMMARY - name: Assert binary size run: | - size="$(ls -l test/Npgsql.NativeAotTests/bin/Release/net7.0/linux-x64/native/Npgsql.NativeAotTests | cut -d ' ' -f 5)" + size="$(ls -l test/Npgsql.NativeAotTests/bin/Release/net8.0/linux-x64/native/Npgsql.NativeAotTests | cut -d ' ' -f 5)" echo "Binary size is $size bytes ($((size / (1024 * 1024))) mb)" if (( size > 36700160 )); then diff --git a/.github/workflows/rich-code-nav.yml b/.github/workflows/rich-code-nav.yml index 118c2e3e28..1a7ef579ec 100644 --- a/.github/workflows/rich-code-nav.yml +++ b/.github/workflows/rich-code-nav.yml @@ -9,7 +9,7 @@ on: - '*' env: - dotnet_sdk_version: '7.0.100' + dotnet_sdk_version: '8.0.100-preview.1.23115.2' DOTNET_SKIP_FIRST_TIME_EXPERIENCE: true jobs: diff --git a/Directory.Packages.props b/Directory.Packages.props index d422b331e4..6fa76f742a 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -1,15 +1,9 @@  - - - - - - @@ -45,4 +39,20 @@ + + + + + + + + + + + + + + + + diff --git a/global.json b/global.json index 15a6b04b3b..cfe92c4218 100644 --- a/global.json +++ b/global.json @@ -1,6 +1,6 @@ { "sdk": { - "version": "7.0.100", + "version": "8.0.100", "rollForward": "latestMajor", "allowPrerelease": "true" } diff --git a/src/Npgsql.DependencyInjection/Npgsql.DependencyInjection.csproj b/src/Npgsql.DependencyInjection/Npgsql.DependencyInjection.csproj index bd23ae4e27..97dba6845c 100644 --- a/src/Npgsql.DependencyInjection/Npgsql.DependencyInjection.csproj +++ b/src/Npgsql.DependencyInjection/Npgsql.DependencyInjection.csproj @@ -3,8 +3,8 @@ Shay Rojansky - netstandard2.0;net7.0 - net7.0 + netstandard2.0;net7.0 + net8.0 npgsql;postgresql;postgres;ado;ado.net;database;sql;di;dependency injection README.md diff --git a/src/Npgsql.GeoJSON/Npgsql.GeoJSON.csproj b/src/Npgsql.GeoJSON/Npgsql.GeoJSON.csproj index bff8f6cede..6441951b97 100644 --- a/src/Npgsql.GeoJSON/Npgsql.GeoJSON.csproj +++ b/src/Npgsql.GeoJSON/Npgsql.GeoJSON.csproj @@ -4,7 +4,7 @@ GeoJSON plugin for Npgsql, allowing mapping of PostGIS geometry types to GeoJSON types. npgsql;postgresql;postgres;postgis;geojson;spatial;ado;ado.net;database;sql netstandard2.0 - net7.0 + net8.0 diff --git a/src/Npgsql.Json.NET/Npgsql.Json.NET.csproj b/src/Npgsql.Json.NET/Npgsql.Json.NET.csproj index 94d5b9f4d7..abd1a4ea6d 100644 --- a/src/Npgsql.Json.NET/Npgsql.Json.NET.csproj +++ b/src/Npgsql.Json.NET/Npgsql.Json.NET.csproj @@ -4,7 +4,7 @@ Json.NET plugin for Npgsql, allowing transparent serialization/deserialization of JSON objects directly to and from the database. npgsql;postgresql;json;postgres;ado;ado.net;database;sql netstandard2.0 - net7.0 + net8.0 diff --git a/src/Npgsql.NetTopologySuite/Npgsql.NetTopologySuite.csproj b/src/Npgsql.NetTopologySuite/Npgsql.NetTopologySuite.csproj index 82a1efa1e6..e09653ac97 100644 --- a/src/Npgsql.NetTopologySuite/Npgsql.NetTopologySuite.csproj +++ b/src/Npgsql.NetTopologySuite/Npgsql.NetTopologySuite.csproj @@ -5,7 +5,7 @@ npgsql;postgresql;postgres;postgis;spatial;nettopologysuite;nts;ado;ado.net;database;sql README.md netstandard2.0 - net7.0 + net8.0 $(NoWarn);NU5104 diff --git a/src/Npgsql.NodaTime/Npgsql.NodaTime.csproj b/src/Npgsql.NodaTime/Npgsql.NodaTime.csproj index f83f6382e3..1fc55fae1e 100644 --- a/src/Npgsql.NodaTime/Npgsql.NodaTime.csproj +++ b/src/Npgsql.NodaTime/Npgsql.NodaTime.csproj @@ -4,8 +4,8 @@ NodaTime plugin for Npgsql, allowing mapping of PostgreSQL date/time types to NodaTime types. npgsql;postgresql;postgres;nodatime;date;time;ado;ado;net;database;sql README.md - netstandard2.0;net6.0 - net7.0 + netstandard2.0;net6.0 + net8.0 diff --git a/src/Npgsql.OpenTelemetry/Npgsql.OpenTelemetry.csproj b/src/Npgsql.OpenTelemetry/Npgsql.OpenTelemetry.csproj index 7f9fea3eea..d2b8e620a7 100644 --- a/src/Npgsql.OpenTelemetry/Npgsql.OpenTelemetry.csproj +++ b/src/Npgsql.OpenTelemetry/Npgsql.OpenTelemetry.csproj @@ -3,7 +3,7 @@ Shay Rojansky netstandard2.0 - net7.0 + net8.0 npgsql;postgresql;postgres;ado;ado.net;database;sql;opentelemetry;tracing;diagnostics;instrumentation README.md diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index aa277a7d1e..5ee3dbf3c1 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -825,7 +825,10 @@ async Task RawOpen(SslMode sslMode, NpgsqlTimeout timeout, bool async, Cancellat cert = new X509Certificate2(cert.Export(X509ContentType.Pkcs12)); } #else - throw new NotSupportedException("PEM certificates are only supported with .NET 5 and higher"); + // Technically PEM certificates are supported as of .NET 5 but we don't build for the net5.0 + // TFM anymore since .NET 5 is out of support + // This is a breaking change for .NET 5 as of Npgsql 8! + throw new NotSupportedException("PEM certificates are only supported with .NET 6 and higher"); #endif } @@ -879,8 +882,8 @@ async Task RawOpen(SslMode sslMode, NpgsqlTimeout timeout, bool async, Cancellat var sslStream = new SslStream(_stream, leaveInnerStreamOpen: false, certificateValidationCallback); var sslProtocols = SslProtocols.None; - // On .NET Framework SslProtocols.None can be disabled, see #3718 #if NETSTANDARD2_0 + // On .NET Framework SslProtocols.None can be disabled, see #3718 sslProtocols = SslProtocols.Tls | SslProtocols.Tls11 | SslProtocols.Tls12; #endif diff --git a/src/Npgsql/Internal/TypeHandlers/ByteaHandler.cs b/src/Npgsql/Internal/TypeHandlers/ByteaHandler.cs index e4d1a0df1a..785250989e 100644 --- a/src/Npgsql/Internal/TypeHandlers/ByteaHandler.cs +++ b/src/Npgsql/Internal/TypeHandlers/ByteaHandler.cs @@ -18,10 +18,7 @@ namespace Npgsql.Internal.TypeHandlers; /// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. /// Use it at your own risk. /// -public partial class ByteaHandler : NpgsqlTypeHandler, INpgsqlTypeHandler>, INpgsqlTypeHandler -#if !NETSTANDARD2_0 - , INpgsqlTypeHandler>, INpgsqlTypeHandler> -#endif +public partial class ByteaHandler : NpgsqlTypeHandler, INpgsqlTypeHandler>, INpgsqlTypeHandler, INpgsqlTypeHandler>, INpgsqlTypeHandler> { public ByteaHandler(PostgresType pgType) : base(pgType) {} @@ -113,7 +110,6 @@ async Task Write(byte[] value, NpgsqlWriteBuffer buf, int offset, int count, boo Task Write(Stream value, NpgsqlWriteBuffer buf, int count, bool async, CancellationToken cancellationToken = default) => buf.WriteStreamRaw(value, count, async, cancellationToken); -#if !NETSTANDARD2_0 /// public int ValidateAndGetLength(Memory value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) => ValidateAndGetLength(value.Length, parameter); @@ -149,5 +145,4 @@ ValueTask> INpgsqlTypeHandler>.Read(Np ValueTask> INpgsqlTypeHandler>.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) => throw new NotSupportedException("Only writing Memory to PostgreSQL bytea is supported, no reading."); -#endif } \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/SystemTextJsonHandler.cs b/src/Npgsql/Internal/TypeHandlers/SystemTextJsonHandler.cs index 34f35b0d6f..5ba3f03b3e 100644 --- a/src/Npgsql/Internal/TypeHandlers/SystemTextJsonHandler.cs +++ b/src/Npgsql/Internal/TypeHandlers/SystemTextJsonHandler.cs @@ -3,16 +3,13 @@ using System.IO; using System.Text; using System.Text.Json; +using System.Text.Json.Nodes; using System.Threading; using System.Threading.Tasks; using Npgsql.BackendMessages; using Npgsql.Internal.TypeHandling; using Npgsql.PostgresTypes; -#if NET6_0_OR_GREATER || NETSTANDARD2_0 || NETSTANDARD2_1 -using System.Text.Json.Nodes; -#endif - namespace Npgsql.Internal.TypeHandlers; /// @@ -65,7 +62,6 @@ protected internal override int ValidateAndGetLengthCustom([DisallowNull] return lengthCache.Set(data.Length + _headerLen); } -#if NET6_0_OR_GREATER || NETSTANDARD2_0 || NETSTANDARD2_1 if (typeof(TAny) == typeof(JsonObject) || typeof(TAny) == typeof(JsonArray)) { lengthCache ??= new NpgsqlLengthCache(1); @@ -77,7 +73,6 @@ protected internal override int ValidateAndGetLengthCustom([DisallowNull] parameter.ConvertedValue = data; return lengthCache.Set(data.Length + _headerLen); } -#endif // User POCO, need to serialize. At least internally ArrayPool buffers are used... var s = JsonSerializer.Serialize(value, _serializerOptions); @@ -113,7 +108,6 @@ protected override async Task WriteWithLengthCustom([DisallowNull] TAny va : SerializeJsonDocument((JsonDocument)(object)value); await buf.WriteBytesRaw(data, async, cancellationToken); } -#if NET6_0_OR_GREATER || NETSTANDARD2_0 || NETSTANDARD2_1 else if (typeof(TAny) == typeof(JsonObject) || typeof(TAny) == typeof(JsonArray)) { var data = parameter?.ConvertedValue != null @@ -121,7 +115,6 @@ protected override async Task WriteWithLengthCustom([DisallowNull] TAny va : SerializeJsonObject((JsonNode)(object)value); await buf.WriteBytesRaw(data, async, cancellationToken); } -#endif else { // User POCO, read serialized representation from the validation phase @@ -140,10 +133,8 @@ public override int ValidateObjectAndGetLength(object value, ref NpgsqlLengthCac : value switch { JsonDocument jsonDocument => ValidateAndGetLengthCustom(jsonDocument, ref lengthCache, parameter), -#if NET6_0_OR_GREATER || NETSTANDARD2_0 || NETSTANDARD2_1 JsonObject jsonObject => ValidateAndGetLengthCustom(jsonObject, ref lengthCache, parameter), JsonArray jsonArray => ValidateAndGetLengthCustom(jsonArray, ref lengthCache, parameter), -#endif _ => ValidateAndGetLengthCustom(value, ref lengthCache, parameter) }; @@ -154,10 +145,8 @@ public override Task WriteObjectWithLength(object? value, NpgsqlWriteBuffer buf, : value switch { JsonDocument jsonDocument => WriteWithLengthCustom(jsonDocument, buf, lengthCache, parameter, async, cancellationToken), -#if NET6_0_OR_GREATER || NETSTANDARD2_0 || NETSTANDARD2_1 JsonObject jsonObject => WriteWithLengthCustom(jsonObject, buf, lengthCache, parameter, async, cancellationToken), JsonArray jsonArray => WriteWithLengthCustom(jsonArray, buf, lengthCache, parameter, async, cancellationToken), -#endif _ => WriteWithLengthCustom(value, buf, lengthCache, parameter, async, cancellationToken), }; @@ -207,7 +196,6 @@ byte[] SerializeJsonDocument(JsonDocument document) return stream.ToArray(); } -#if NET6_0_OR_GREATER || NETSTANDARD2_0 || NETSTANDARD2_1 byte[] SerializeJsonObject(JsonNode jsonObject) { // TODO: Writing is currently really inefficient - please don't criticize :) @@ -218,5 +206,4 @@ byte[] SerializeJsonObject(JsonNode jsonObject) writer.Flush(); return stream.ToArray(); } -#endif } \ No newline at end of file diff --git a/src/Npgsql/Npgsql.csproj b/src/Npgsql/Npgsql.csproj index 7dd3999425..310da0dad0 100644 --- a/src/Npgsql/Npgsql.csproj +++ b/src/Npgsql/Npgsql.csproj @@ -5,10 +5,8 @@ Npgsql is the open source .NET data provider for PostgreSQL. npgsql;postgresql;postgres;ado;ado.net;database;sql README.md - - netstandard2.0;netstandard2.1;netcoreapp3.1;net5.0;net6.0;net7.0 - net7.0 + netstandard2.0;netstandard2.1;net6.0;net7.0;net8.0 + net8.0 @@ -19,7 +17,7 @@ - + @@ -31,13 +29,10 @@ - - - - + diff --git a/src/Npgsql/TypeMapping/JsonTypeHandlerResolver.cs b/src/Npgsql/TypeMapping/JsonTypeHandlerResolver.cs index 8522a48af5..ba599cd9d5 100644 --- a/src/Npgsql/TypeMapping/JsonTypeHandlerResolver.cs +++ b/src/Npgsql/TypeMapping/JsonTypeHandlerResolver.cs @@ -1,16 +1,13 @@ using System; using System.Collections.Generic; using System.Text.Json; +using System.Text.Json.Nodes; using Npgsql.Internal; using Npgsql.Internal.TypeHandlers; using Npgsql.Internal.TypeHandling; using Npgsql.PostgresTypes; using NpgsqlTypes; -#if NET6_0_OR_GREATER || NETSTANDARD2_0 || NETSTANDARD2_1 -using System.Text.Json.Nodes; -#endif - namespace Npgsql.TypeMapping; sealed class JsonTypeHandlerResolver : TypeHandlerResolver @@ -48,9 +45,7 @@ internal JsonTypeHandlerResolver( internal static string? ClrTypeToDataTypeName(Type type, Dictionary? clrTypes) => type == typeof(JsonDocument) -#if NET6_0_OR_GREATER || NETSTANDARD2_0 || NETSTANDARD2_1 || type == typeof(JsonObject) || type == typeof(JsonArray) -#endif ? "jsonb" : clrTypes is not null && clrTypes.TryGetValue(type, out var dataTypeName) ? dataTypeName : null; @@ -61,9 +56,7 @@ internal JsonTypeHandlerResolver( => dataTypeName switch { "jsonb" => new(NpgsqlDbType.Jsonb, "jsonb", typeof(JsonDocument) -#if NET6_0_OR_GREATER || NETSTANDARD2_0 || NETSTANDARD2_1 , typeof(JsonObject), typeof(JsonArray) -#endif ), "json" => new(NpgsqlDbType.Json, "json"), _ => null @@ -73,10 +66,8 @@ internal JsonTypeHandlerResolver( { if (typeof(T) == typeof(JsonDocument)) return _jsonbHandler; -#if NET6_0_OR_GREATER || NETSTANDARD2_0 || NETSTANDARD2_1 if (typeof(T) == typeof(JsonObject) || typeof(T) == typeof(JsonArray)) return _jsonbHandler; -#endif return null; } diff --git a/test/Directory.Build.props b/test/Directory.Build.props index 4aa685bc22..59f7665837 100644 --- a/test/Directory.Build.props +++ b/test/Directory.Build.props @@ -2,14 +2,23 @@ - net7.0;netcoreapp3.1 - net7.0 + net8.0;netcoreapp3.1 + net8.0 false $(NoWarn);CA2252 + + + true + + diff --git a/test/MStatDumper/MStatDumper.csproj b/test/MStatDumper/MStatDumper.csproj index 2421fe3b15..3cab4d57fd 100644 --- a/test/MStatDumper/MStatDumper.csproj +++ b/test/MStatDumper/MStatDumper.csproj @@ -2,7 +2,8 @@ Exe - net7.0 + + net8.0 enable disable diff --git a/test/Npgsql.DependencyInjection.Tests/Npgsql.DependencyInjection.Tests.csproj b/test/Npgsql.DependencyInjection.Tests/Npgsql.DependencyInjection.Tests.csproj index f577d83d58..9637e56366 100644 --- a/test/Npgsql.DependencyInjection.Tests/Npgsql.DependencyInjection.Tests.csproj +++ b/test/Npgsql.DependencyInjection.Tests/Npgsql.DependencyInjection.Tests.csproj @@ -1,7 +1,7 @@ - net7.0 + net8.0 diff --git a/test/Npgsql.NativeAotTests/Npgsql.NativeAotTests.csproj b/test/Npgsql.NativeAotTests/Npgsql.NativeAotTests.csproj index 3d4dee6080..5a18264e56 100644 --- a/test/Npgsql.NativeAotTests/Npgsql.NativeAotTests.csproj +++ b/test/Npgsql.NativeAotTests/Npgsql.NativeAotTests.csproj @@ -2,7 +2,8 @@ exe true - net7.0 + + net8.0 true true true diff --git a/test/Npgsql.Tests/Types/ByteaTests.cs b/test/Npgsql.Tests/Types/ByteaTests.cs index 56f533dba4..f29f6e490b 100644 --- a/test/Npgsql.Tests/Types/ByteaTests.cs +++ b/test/Npgsql.Tests/Types/ByteaTests.cs @@ -34,7 +34,6 @@ public async Task Bytea_long() await Bytea(array, sqlLiteral); } -#if !NETSTANDARD2_0 [Test] public Task Write_as_Memory() => AssertTypeWrite( @@ -52,7 +51,6 @@ public Task Write_as_ReadOnlyMemory() [Test] public Task Read_as_ReadOnlyMemory_not_supported() => AssertTypeUnsupportedRead, NotSupportedException>("\\x010203", "bytea"); -#endif [Test] public Task Write_as_ArraySegment() From 2ed040bea2da5b2beb4819a502f5d14d06eee634 Mon Sep 17 00:00:00 2001 From: Brar Piening Date: Sun, 26 Feb 2023 19:50:00 +0100 Subject: [PATCH 072/761] Update license year to 2023 --- LICENSE | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LICENSE b/LICENSE index b102b0e388..efec310cda 100644 --- a/LICENSE +++ b/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2002-2021, Npgsql +Copyright (c) 2002-2023, Npgsql Permission to use, copy, modify, and distribute this software and its documentation for any purpose, without fee, and without a written agreement From d110eef2bb8281f22bfacbed53db17f30540caa9 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Mon, 27 Feb 2023 00:55:47 +0300 Subject: [PATCH 073/761] Rewrite type handler lookup (#4900) Closes #4481 Closes #4429 --- .../Internal/GeoJSONTypeHandlerResolver.cs | 4 +- .../Internal/JsonNetTypeHandlerResolver.cs | 4 +- .../NetTopologySuiteTypeHandlerResolver.cs | 4 +- .../Internal/NodaTimeTypeHandlerResolver.cs | 4 +- .../TypeHandling/TypeHandlerResolver.cs | 9 +- .../TypeMapping/BuiltInTypeHandlerResolver.cs | 4 +- .../TypeMapping/JsonTypeHandlerResolver.cs | 4 +- src/Npgsql/TypeMapping/TypeMapper.cs | 245 +++++++++--------- test/Npgsql.Tests/ReaderTests.cs | 2 +- test/Npgsql.Tests/TypeMapperTests.cs | 2 +- 10 files changed, 151 insertions(+), 131 deletions(-) diff --git a/src/Npgsql.GeoJSON/Internal/GeoJSONTypeHandlerResolver.cs b/src/Npgsql.GeoJSON/Internal/GeoJSONTypeHandlerResolver.cs index 862dc80947..6291760011 100644 --- a/src/Npgsql.GeoJSON/Internal/GeoJSONTypeHandlerResolver.cs +++ b/src/Npgsql.GeoJSON/Internal/GeoJSONTypeHandlerResolver.cs @@ -76,8 +76,8 @@ internal GeoJSONTypeHandlerResolver(NpgsqlConnector connector, GeoJSONOptions op ? "geography" : "geometry"; - public override TypeMappingInfo? GetMappingByDataTypeName(string dataTypeName) - => DoGetMappingByDataTypeName(dataTypeName); + public override TypeMappingInfo? GetMappingByPostgresType(PostgresType type) + => DoGetMappingByDataTypeName(type.Name); internal static TypeMappingInfo? DoGetMappingByDataTypeName(string dataTypeName) => dataTypeName switch diff --git a/src/Npgsql.Json.NET/Internal/JsonNetTypeHandlerResolver.cs b/src/Npgsql.Json.NET/Internal/JsonNetTypeHandlerResolver.cs index 338e695f65..d925405063 100644 --- a/src/Npgsql.Json.NET/Internal/JsonNetTypeHandlerResolver.cs +++ b/src/Npgsql.Json.NET/Internal/JsonNetTypeHandlerResolver.cs @@ -44,8 +44,8 @@ internal JsonNetTypeHandlerResolver( internal static string? ClrTypeToDataTypeName(Type type, Dictionary clrTypes) => clrTypes.TryGetValue(type, out var dataTypeName) ? dataTypeName : null; - public override TypeMappingInfo? GetMappingByDataTypeName(string dataTypeName) - => DoGetMappingByDataTypeName(dataTypeName); + public override TypeMappingInfo? GetMappingByPostgresType(PostgresType type) + => DoGetMappingByDataTypeName(type.Name); internal static TypeMappingInfo? DoGetMappingByDataTypeName(string dataTypeName) => dataTypeName switch diff --git a/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeHandlerResolver.cs b/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeHandlerResolver.cs index f327e93eb6..660664028f 100644 --- a/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeHandlerResolver.cs +++ b/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeHandlerResolver.cs @@ -58,8 +58,8 @@ internal NetTopologySuiteTypeHandlerResolver( ? "geography" : "geometry"; - public override TypeMappingInfo? GetMappingByDataTypeName(string dataTypeName) - => DoGetMappingByDataTypeName(dataTypeName); + public override TypeMappingInfo? GetMappingByPostgresType(PostgresType type) + => DoGetMappingByDataTypeName(type.Name); internal static TypeMappingInfo? DoGetMappingByDataTypeName(string dataTypeName) => dataTypeName switch diff --git a/src/Npgsql.NodaTime/Internal/NodaTimeTypeHandlerResolver.cs b/src/Npgsql.NodaTime/Internal/NodaTimeTypeHandlerResolver.cs index 3d51c7f82c..56c01b04b2 100644 --- a/src/Npgsql.NodaTime/Internal/NodaTimeTypeHandlerResolver.cs +++ b/src/Npgsql.NodaTime/Internal/NodaTimeTypeHandlerResolver.cs @@ -178,8 +178,8 @@ internal NodaTimeTypeHandlerResolver(NpgsqlConnector connector) return null; } - public override TypeMappingInfo? GetMappingByDataTypeName(string dataTypeName) - => DoGetMappingByDataTypeName(dataTypeName); + public override TypeMappingInfo? GetMappingByPostgresType(PostgresType type) + => DoGetMappingByDataTypeName(type.Name); internal static TypeMappingInfo? DoGetMappingByDataTypeName(string dataTypeName) => dataTypeName switch diff --git a/src/Npgsql/Internal/TypeHandling/TypeHandlerResolver.cs b/src/Npgsql/Internal/TypeHandling/TypeHandlerResolver.cs index 4170f75c2b..43431073de 100644 --- a/src/Npgsql/Internal/TypeHandling/TypeHandlerResolver.cs +++ b/src/Npgsql/Internal/TypeHandling/TypeHandlerResolver.cs @@ -1,4 +1,5 @@ using System; +using Npgsql.PostgresTypes; namespace Npgsql.Internal.TypeHandling; @@ -18,6 +19,12 @@ public abstract class TypeHandlerResolver /// public abstract NpgsqlTypeHandler? ResolveByClrType(Type type); + /// + /// Resolves a type handler given a PostgreSQL type. + /// + public virtual NpgsqlTypeHandler? ResolveByPostgresType(PostgresType type) + => ResolveByDataTypeName(type.Name); + public virtual NpgsqlTypeHandler? ResolveValueDependentValue(object value) => null; public virtual NpgsqlTypeHandler? ResolveValueTypeGenerically(T value) => null; @@ -26,5 +33,5 @@ public abstract class TypeHandlerResolver /// Gets type mapping information for a given PostgreSQL type. /// Invoked in scenarios when mapping information is required, rather than a type handler for reading or writing. /// - public abstract TypeMappingInfo? GetMappingByDataTypeName(string dataTypeName); + public abstract TypeMappingInfo? GetMappingByPostgresType(PostgresType type); } \ No newline at end of file diff --git a/src/Npgsql/TypeMapping/BuiltInTypeHandlerResolver.cs b/src/Npgsql/TypeMapping/BuiltInTypeHandlerResolver.cs index 44b6dd809c..caad112e5f 100644 --- a/src/Npgsql/TypeMapping/BuiltInTypeHandlerResolver.cs +++ b/src/Npgsql/TypeMapping/BuiltInTypeHandlerResolver.cs @@ -650,8 +650,8 @@ static DateTimeKind GetMultirangeKind(IList> multirange) internal static string? ClrTypeToDataTypeName(Type type) => ClrTypeToDataTypeNameTable.TryGetValue(type, out var dataTypeName) ? dataTypeName : null; - public override TypeMappingInfo? GetMappingByDataTypeName(string dataTypeName) - => DoGetMappingByDataTypeName(dataTypeName); + public override TypeMappingInfo? GetMappingByPostgresType(PostgresType type) + => DoGetMappingByDataTypeName(type.Name); internal static TypeMappingInfo? DoGetMappingByDataTypeName(string dataTypeName) => Mappings.TryGetValue(dataTypeName, out var mapping) ? mapping : null; diff --git a/src/Npgsql/TypeMapping/JsonTypeHandlerResolver.cs b/src/Npgsql/TypeMapping/JsonTypeHandlerResolver.cs index ba599cd9d5..8265a886cd 100644 --- a/src/Npgsql/TypeMapping/JsonTypeHandlerResolver.cs +++ b/src/Npgsql/TypeMapping/JsonTypeHandlerResolver.cs @@ -49,8 +49,8 @@ internal JsonTypeHandlerResolver( ? "jsonb" : clrTypes is not null && clrTypes.TryGetValue(type, out var dataTypeName) ? dataTypeName : null; - public override TypeMappingInfo? GetMappingByDataTypeName(string dataTypeName) - => DoGetMappingByDataTypeName(dataTypeName); + public override TypeMappingInfo? GetMappingByPostgresType(PostgresType type) + => DoGetMappingByDataTypeName(type.Name); internal static TypeMappingInfo? DoGetMappingByDataTypeName(string dataTypeName) => dataTypeName switch diff --git a/src/Npgsql/TypeMapping/TypeMapper.cs b/src/Npgsql/TypeMapping/TypeMapper.cs index d56092e86e..b28758ed15 100644 --- a/src/Npgsql/TypeMapping/TypeMapper.cs +++ b/src/Npgsql/TypeMapping/TypeMapper.cs @@ -5,7 +5,6 @@ using System.Diagnostics.CodeAnalysis; using System.Linq; using System.Reflection; -using System.Runtime.CompilerServices; using Microsoft.Extensions.Logging; using Npgsql.Internal; using Npgsql.Internal.TypeHandlers; @@ -103,31 +102,33 @@ internal bool TryResolveByOID(uint oid, [NotNullWhen(true)] out NpgsqlTypeHandle if (_handlersByOID.TryGetValue(oid, out handler)) return true; - if (!DatabaseInfo.ByOID.TryGetValue(oid, out var pgType)) - return false; + return TryResolveLong(oid, out handler); - lock (_writeLock) + bool TryResolveLong(uint oid, [NotNullWhen(true)] out NpgsqlTypeHandler? handler) { - if ((handler = ResolveByDataTypeNameCore(pgType.FullName)) is not null) + if (!DatabaseInfo.ByOID.TryGetValue(oid, out var pgType)) { - _handlersByOID[oid] = handler; - return true; + handler = null; + return false; } - - if ((handler = ResolveByDataTypeNameCore(pgType.Name)) is not null) - { - _handlersByOID[oid] = handler; - return true; - } - - if ((handler = ResolveComplexTypeByDataTypeName(pgType.FullName, throwOnError: false)) is not null) + + lock (_writeLock) { - _handlersByOID[oid] = handler; - return true; - } + if ((handler = ResolveByPostgresType(pgType)) is not null) + { + _handlersByOID[oid] = handler; + return true; + } - handler = null; - return false; + if ((handler = ResolveComplexTypeByDataTypeName(pgType.FullName, throwOnError: false)) is not null) + { + _handlersByOID[oid] = handler; + return true; + } + + handler = null; + return false; + } } } @@ -231,6 +232,35 @@ internal NpgsqlTypeHandler ResolveByDataTypeName(string typeName) } } + NpgsqlTypeHandler? ResolveByPostgresType(PostgresType type) + { + if (_handlersByDataTypeName.TryGetValue(type.FullName, out var handler)) + return handler; + + return ResolveLong(type); + + NpgsqlTypeHandler? ResolveLong(PostgresType type) + { + lock (_writeLock) + { + foreach (var resolver in _resolvers) + { + try + { + if (resolver.ResolveByPostgresType(type) is { } handler) + return _handlersByDataTypeName[type.FullName] = handler; + } + catch (Exception e) + { + _commandLogger.LogError(e, $"Type resolver {resolver.GetType().Name} threw exception while resolving data type name {type.FullName}"); + } + } + + return null; + } + } + } + NpgsqlTypeHandler? ResolveComplexTypeByDataTypeName(string typeName, bool throwOnError) { lock (_writeLock) @@ -361,107 +391,110 @@ internal NpgsqlTypeHandler ResolveByClrType(Type type) if (_handlersByClrType.TryGetValue(type, out var handler)) return handler; - lock (_writeLock) + return ResolveLong(type); + + NpgsqlTypeHandler ResolveLong(Type type) { - foreach (var resolver in _resolvers) + lock (_writeLock) { - try - { - if ((handler = resolver.ResolveByClrType(type)) is not null) - return _handlersByClrType[type] = handler; - } - catch (Exception e) + foreach (var resolver in _resolvers) { - _commandLogger.LogError(e, $"Type resolver {resolver.GetType().Name} threw exception while resolving value with type {type}"); + try + { + if (resolver.ResolveByClrType(type) is { } handler) + return _handlersByClrType[type] = handler; + } + catch (Exception e) + { + _commandLogger.LogError(e, $"Type resolver {resolver.GetType().Name} threw exception while resolving value with type {type}"); + } } - } - // Try to see if it is an array type - var arrayElementType = GetArrayListElementType(type); - if (arrayElementType is not null) - { - // With PG14, we map arrays over range types to PG multiranges by default, not to regular arrays over ranges. - if (arrayElementType.IsGenericType && - arrayElementType.GetGenericTypeDefinition() == typeof(NpgsqlRange<>) && - DatabaseInfo.Version.IsGreaterOrEqual(14)) + // Try to see if it is an array type + var arrayElementType = GetArrayListElementType(type); + if (arrayElementType is not null) { - var subtypeType = arrayElementType.GetGenericArguments()[0]; + // With PG14, we map arrays over range types to PG multiranges by default, not to regular arrays over ranges. + if (arrayElementType.IsGenericType && + arrayElementType.GetGenericTypeDefinition() == typeof(NpgsqlRange<>) && + DatabaseInfo.Version.IsGreaterOrEqual(14)) + { + var subtypeType = arrayElementType.GetGenericArguments()[0]; - return ResolveByClrType(subtypeType) is - { PostgresType : { Range : { Multirange: { } pgMultirangeType } } } subtypeHandler - ? _handlersByClrType[type] = subtypeHandler.CreateMultirangeHandler(pgMultirangeType) - : throw new NotSupportedException($"The CLR range type {type} isn't supported by Npgsql or your PostgreSQL."); - } + return ResolveByClrType(subtypeType) is + { PostgresType : { Range : { Multirange: { } pgMultirangeType } } } subtypeHandler + ? _handlersByClrType[type] = subtypeHandler.CreateMultirangeHandler(pgMultirangeType) + : throw new NotSupportedException($"The CLR range type {type} isn't supported by Npgsql or your PostgreSQL."); + } - if (ResolveByClrType(arrayElementType) is not { } elementHandler) - throw new ArgumentException($"Array type over CLR type {arrayElementType.Name} isn't supported by Npgsql"); + if (ResolveByClrType(arrayElementType) is not { } elementHandler) + throw new ArgumentException($"Array type over CLR type {arrayElementType.Name} isn't supported by Npgsql"); - if (elementHandler.PostgresType.Array is not { } pgArrayType) - throw new ArgumentException( - $"No array type could be found in the database for element {elementHandler.PostgresType}"); + if (elementHandler.PostgresType.Array is not { } pgArrayType) + throw new ArgumentException( + $"No array type could be found in the database for element {elementHandler.PostgresType}"); - return _handlersByClrType[type] = - elementHandler.CreateArrayHandler(pgArrayType, Connector.Settings.ArrayNullabilityMode); - } + return _handlersByClrType[type] = + elementHandler.CreateArrayHandler(pgArrayType, Connector.Settings.ArrayNullabilityMode); + } - if (Nullable.GetUnderlyingType(type) is { } underlyingType && ResolveByClrType(underlyingType) is { } underlyingHandler) - return _handlersByClrType[type] = underlyingHandler; + if (Nullable.GetUnderlyingType(type) is { } underlyingType && ResolveByClrType(underlyingType) is { } underlyingHandler) + return _handlersByClrType[type] = underlyingHandler; - if (type.IsEnum) - { - return DatabaseInfo.GetPostgresTypeByName(GetPgName(type, _defaultNameTranslator)) is PostgresEnumType pgEnumType - ? _handlersByClrType[type] = new UnmappedEnumHandler(pgEnumType, _defaultNameTranslator, Connector.TextEncoding) - : throw new NotSupportedException( - $"Could not find a PostgreSQL enum type corresponding to {type.Name}. " + - "Consider mapping the enum before usage, refer to the documentation for more details."); - } + if (type.IsEnum) + { + return DatabaseInfo.GetPostgresTypeByName(GetPgName(type, _defaultNameTranslator)) is PostgresEnumType pgEnumType + ? _handlersByClrType[type] = new UnmappedEnumHandler(pgEnumType, _defaultNameTranslator, Connector.TextEncoding) + : throw new NotSupportedException( + $"Could not find a PostgreSQL enum type corresponding to {type.Name}. " + + "Consider mapping the enum before usage, refer to the documentation for more details."); + } - // TODO: We can make the following compatible with reflection-free mode by having NpgsqlRange implement some interface, and - // check for that. - if (type.IsGenericType && type.GetGenericTypeDefinition() == typeof(NpgsqlRange<>)) - { - var subtypeType = type.GetGenericArguments()[0]; + // TODO: We can make the following compatible with reflection-free mode by having NpgsqlRange implement some interface, and + // check for that. + if (type.IsGenericType && type.GetGenericTypeDefinition() == typeof(NpgsqlRange<>)) + { + var subtypeType = type.GetGenericArguments()[0]; - return ResolveByClrType(subtypeType) is { PostgresType : { Range : { } pgRangeType } } subtypeHandler - ? _handlersByClrType[type] = subtypeHandler.CreateRangeHandler(pgRangeType) - : throw new NotSupportedException($"The CLR range type {type} isn't supported by Npgsql or your PostgreSQL."); - } + return ResolveByClrType(subtypeType) is { PostgresType : { Range : { } pgRangeType } } subtypeHandler + ? _handlersByClrType[type] = subtypeHandler.CreateRangeHandler(pgRangeType) + : throw new NotSupportedException($"The CLR range type {type} isn't supported by Npgsql or your PostgreSQL."); + } - if (typeof(IEnumerable).IsAssignableFrom(type)) - throw new NotSupportedException("IEnumerable parameters are not supported, pass an array or List instead"); + if (typeof(IEnumerable).IsAssignableFrom(type)) + throw new NotSupportedException("IEnumerable parameters are not supported, pass an array or List instead"); - throw new NotSupportedException($"The CLR type {type} isn't natively supported by Npgsql or your PostgreSQL. " + - $"To use it with a PostgreSQL composite you need to specify {nameof(NpgsqlParameter.DataTypeName)} or to map it, please refer to the documentation."); - } + throw new NotSupportedException($"The CLR type {type} isn't natively supported by Npgsql or your PostgreSQL. " + + $"To use it with a PostgreSQL composite you need to specify {nameof(NpgsqlParameter.DataTypeName)} or to map it, please refer to the documentation."); + } - static Type? GetArrayListElementType(Type type) - { - var typeInfo = type.GetTypeInfo(); - if (typeInfo.IsArray) - return GetUnderlyingType(type.GetElementType()!); // The use of bang operator is justified here as Type.GetElementType() only returns null for the Array base class which can't be mapped in a useful way. + static Type? GetArrayListElementType(Type type) + { + var typeInfo = type.GetTypeInfo(); + if (typeInfo.IsArray) + return GetUnderlyingType(type.GetElementType()!); // The use of bang operator is justified here as Type.GetElementType() only returns null for the Array base class which can't be mapped in a useful way. - var ilist = typeInfo.ImplementedInterfaces.FirstOrDefault(x => x.GetTypeInfo().IsGenericType && x.GetGenericTypeDefinition() == typeof(IList<>)); - if (ilist != null) - return GetUnderlyingType(ilist.GetGenericArguments()[0]); + var ilist = typeInfo.ImplementedInterfaces.FirstOrDefault(x => x.GetTypeInfo().IsGenericType && x.GetGenericTypeDefinition() == typeof(IList<>)); + if (ilist != null) + return GetUnderlyingType(ilist.GetGenericArguments()[0]); - if (typeof(IList).IsAssignableFrom(type)) - throw new NotSupportedException("Non-generic IList is a supported parameter, but the NpgsqlDbType parameter must be set on the parameter"); + if (typeof(IList).IsAssignableFrom(type)) + throw new NotSupportedException("Non-generic IList is a supported parameter, but the NpgsqlDbType parameter must be set on the parameter"); - return null; + return null; - Type GetUnderlyingType(Type t) - => Nullable.GetUnderlyingType(t) ?? t; + Type GetUnderlyingType(Type t) + => Nullable.GetUnderlyingType(t) ?? t; + } } } + #endregion Type handler lookup + internal bool TryGetMapping(PostgresType pgType, [NotNullWhen(true)] out TypeMappingInfo? mapping) { foreach (var resolver in _resolvers) - if ((mapping = resolver.GetMappingByDataTypeName(pgType.FullName)) is not null) - return true; - - foreach (var resolver in _resolvers) - if ((mapping = resolver.GetMappingByDataTypeName(pgType.Name)) is not null) + if ((mapping = resolver.GetMappingByPostgresType(pgType)) is not null) return true; switch (pgType) @@ -514,33 +547,13 @@ internal bool TryGetMapping(PostgresType pgType, [NotNullWhen(true)] out TypeMap return false; } - #endregion Type handler lookup - internal (NpgsqlDbType? npgsqlDbType, PostgresType postgresType) GetTypeInfoByOid(uint oid) { if (!DatabaseInfo.ByOID.TryGetValue(oid, out var pgType)) ThrowHelper.ThrowInvalidOperationException($"Couldn't find PostgreSQL type with OID {oid}"); - foreach (var resolver in _resolvers) - if (resolver.GetMappingByDataTypeName(pgType.FullName) is { } mapping) - return (mapping.NpgsqlDbType, pgType); - - foreach (var resolver in _resolvers) - if (resolver.GetMappingByDataTypeName(pgType.Name) is { } mapping) - return (mapping.NpgsqlDbType, pgType); - - switch (pgType) - { - case PostgresArrayType pgArrayType: - var (elementNpgsqlDbType, _) = GetTypeInfoByOid(pgArrayType.Element.OID); - if (elementNpgsqlDbType.HasValue) - return new(elementNpgsqlDbType | NpgsqlDbType.Array, pgType); - break; - - case PostgresDomainType pgDomainType: - var (baseNpgsqlDbType, _) = GetTypeInfoByOid(pgDomainType.BaseType.OID); - return new(baseNpgsqlDbType, pgType); - } + if (TryGetMapping(pgType, out var mapping)) + return (mapping.NpgsqlDbType, pgType); return (null, pgType); } diff --git a/test/Npgsql.Tests/ReaderTests.cs b/test/Npgsql.Tests/ReaderTests.cs index bf940fab2d..0373394584 100644 --- a/test/Npgsql.Tests/ReaderTests.cs +++ b/test/Npgsql.Tests/ReaderTests.cs @@ -2187,7 +2187,7 @@ class ExplodingTypeHandlerResolver : TypeHandlerResolver public override NpgsqlTypeHandler? ResolveByDataTypeName(string typeName) => typeName == "integer" ? new ExplodingTypeHandler(null!, _safe) : null; public override NpgsqlTypeHandler? ResolveByClrType(Type type) => null; - public override TypeMappingInfo GetMappingByDataTypeName(string dataTypeName) => throw new NotImplementedException(); + public override TypeMappingInfo GetMappingByPostgresType(PostgresType type) => throw new NotImplementedException(); } } diff --git a/test/Npgsql.Tests/TypeMapperTests.cs b/test/Npgsql.Tests/TypeMapperTests.cs index 15be807767..34f8f598e1 100644 --- a/test/Npgsql.Tests/TypeMapperTests.cs +++ b/test/Npgsql.Tests/TypeMapperTests.cs @@ -184,7 +184,7 @@ public CitextToStringTypeHandlerResolver(NpgsqlConnector connector) => type == typeof(string) ? new TextHandler(_pgCitextType, _connector.TextEncoding) : null; public override NpgsqlTypeHandler? ResolveByDataTypeName(string typeName) => null; - public override TypeMappingInfo? GetMappingByDataTypeName(string dataTypeName) => throw new NotSupportedException(); + public override TypeMappingInfo? GetMappingByPostgresType(PostgresType type) => throw new NotSupportedException(); } } From 98a41c5bae88277aa0101c7300687ad0adb10453 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Mon, 27 Feb 2023 15:01:47 +0300 Subject: [PATCH 074/761] Make range and multirange opt-int to help with AOT size (#4899) Closes #4898 --- .../GeoJSONTypeHandlerResolverFactory.cs | 5 +- .../JsonNetTypeHandlerResolverFactory.cs | 9 +- ...TopologySuiteTypeHandlerResolverFactory.cs | 5 +- .../Internal/NodaTimeTypeHandlerResolver.cs | 12 + .../NodaTimeTypeHandlerResolverFactory.cs | 6 +- .../BackendMessages/RowDescriptionMessage.cs | 1 + src/Npgsql/Internal/NpgsqlConnector.cs | 1 + .../CompositeHandlers/CompositeHandler.cs | 1 + .../Internal/TypeHandlers/RecordHandler.cs | 1 + .../TypeHandling/TypeHandlerResolver.cs | 6 + .../TypeHandlerResolverFactory.cs | 4 +- .../{ => Internal}/TypeMapping/TypeMapper.cs | 135 ++++-------- src/Npgsql/NpgsqlBinaryExporter.cs | 1 + src/Npgsql/NpgsqlNestedDataReader.cs | 1 + src/Npgsql/NpgsqlParameter.cs | 1 + src/Npgsql/NpgsqlParameterCollection.cs | 1 + src/Npgsql/NpgsqlParameter`.cs | 1 + src/Npgsql/PublicAPI.Unshipped.txt | 2 + .../TypeMapping/BuiltInTypeHandlerResolver.cs | 52 ++--- .../BuiltInTypeHandlerResolverFactory.cs | 3 +- .../JsonTypeHandlerResolverFactory.cs | 7 +- .../TypeMapping/NpgsqlRangeExtensions.cs | 20 ++ .../TypeMapping/RangeTypeHandlerResolver.cs | 206 ++++++++++++++++++ .../RangeTypeHandlerResolverFactory.cs | 24 ++ test/Npgsql.Benchmarks/ResolveHandler.cs | 1 + .../LegacyNodaTimeTests.cs | 1 + .../NodaTimeSetupFixture.cs | 2 +- test/Npgsql.Tests/ConnectionTests.cs | 6 +- test/Npgsql.Tests/ReaderTests.cs | 3 +- test/Npgsql.Tests/Support/TestBase.cs | 9 + test/Npgsql.Tests/TypeMapperTests.cs | 4 +- test/Npgsql.Tests/Types/DateTimeTests.cs | 17 +- test/Npgsql.Tests/Types/MultirangeTests.cs | 19 ++ test/Npgsql.Tests/Types/RangeTests.cs | 20 +- 34 files changed, 434 insertions(+), 153 deletions(-) rename src/Npgsql/{ => Internal}/TypeMapping/TypeMapper.cs (81%) create mode 100644 src/Npgsql/TypeMapping/NpgsqlRangeExtensions.cs create mode 100644 src/Npgsql/TypeMapping/RangeTypeHandlerResolver.cs create mode 100644 src/Npgsql/TypeMapping/RangeTypeHandlerResolverFactory.cs diff --git a/src/Npgsql.GeoJSON/Internal/GeoJSONTypeHandlerResolverFactory.cs b/src/Npgsql.GeoJSON/Internal/GeoJSONTypeHandlerResolverFactory.cs index 16d8ae54d9..b575592bc2 100644 --- a/src/Npgsql.GeoJSON/Internal/GeoJSONTypeHandlerResolverFactory.cs +++ b/src/Npgsql.GeoJSON/Internal/GeoJSONTypeHandlerResolverFactory.cs @@ -1,6 +1,7 @@ using System; using Npgsql.Internal; using Npgsql.Internal.TypeHandling; +using Npgsql.Internal.TypeMapping; using Npgsql.TypeMapping; namespace Npgsql.GeoJSON.Internal; @@ -13,7 +14,7 @@ public class GeoJSONTypeHandlerResolverFactory : TypeHandlerResolverFactory public GeoJSONTypeHandlerResolverFactory(GeoJSONOptions options, bool geographyAsDefault) => (_options, _geographyAsDefault) = (options, geographyAsDefault); - public override TypeHandlerResolver Create(NpgsqlConnector connector) + public override TypeHandlerResolver Create(TypeMapper typeMapper, NpgsqlConnector connector) => new GeoJSONTypeHandlerResolver(connector, _options, _geographyAsDefault); public override string? GetDataTypeNameByClrType(Type type) @@ -21,4 +22,4 @@ public override TypeHandlerResolver Create(NpgsqlConnector connector) public override TypeMappingInfo? GetMappingByDataTypeName(string dataTypeName) => GeoJSONTypeHandlerResolver.DoGetMappingByDataTypeName(dataTypeName); -} \ No newline at end of file +} diff --git a/src/Npgsql.Json.NET/Internal/JsonNetTypeHandlerResolverFactory.cs b/src/Npgsql.Json.NET/Internal/JsonNetTypeHandlerResolverFactory.cs index 830a589b26..a4b81ccace 100644 --- a/src/Npgsql.Json.NET/Internal/JsonNetTypeHandlerResolverFactory.cs +++ b/src/Npgsql.Json.NET/Internal/JsonNetTypeHandlerResolverFactory.cs @@ -4,14 +4,13 @@ using Newtonsoft.Json.Linq; using Npgsql.Internal; using Npgsql.Internal.TypeHandling; +using Npgsql.Internal.TypeMapping; using Npgsql.TypeMapping; namespace Npgsql.Json.NET.Internal; public class JsonNetTypeHandlerResolverFactory : TypeHandlerResolverFactory { - readonly Type[] _jsonbClrTypes; - readonly Type[] _jsonClrTypes; readonly JsonSerializerSettings _settings; readonly Dictionary _byType; @@ -20,8 +19,6 @@ public JsonNetTypeHandlerResolverFactory( Type[]? jsonClrTypes, JsonSerializerSettings? settings) { - _jsonbClrTypes = jsonbClrTypes ?? Array.Empty(); - _jsonClrTypes = jsonClrTypes ?? Array.Empty(); _settings = settings ?? new JsonSerializerSettings(); _byType = new() @@ -39,7 +36,7 @@ public JsonNetTypeHandlerResolverFactory( _byType[type] = "json"; } - public override TypeHandlerResolver Create(NpgsqlConnector connector) + public override TypeHandlerResolver Create(TypeMapper typeMapper, NpgsqlConnector connector) => new JsonNetTypeHandlerResolver(connector, _byType, _settings); public override string? GetDataTypeNameByClrType(Type type) @@ -48,4 +45,4 @@ public override TypeHandlerResolver Create(NpgsqlConnector connector) public override TypeMappingInfo? GetMappingByDataTypeName(string dataTypeName) => JsonNetTypeHandlerResolver.DoGetMappingByDataTypeName(dataTypeName); -} \ No newline at end of file +} diff --git a/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeHandlerResolverFactory.cs b/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeHandlerResolverFactory.cs index a758e8ac02..d503707f7e 100644 --- a/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeHandlerResolverFactory.cs +++ b/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeHandlerResolverFactory.cs @@ -4,6 +4,7 @@ using NetTopologySuite.Geometries; using Npgsql.Internal; using Npgsql.Internal.TypeHandling; +using Npgsql.Internal.TypeMapping; using Npgsql.TypeMapping; namespace Npgsql.NetTopologySuite.Internal; @@ -27,7 +28,7 @@ public NetTopologySuiteTypeHandlerResolverFactory( _geographyAsDefault = geographyAsDefault; } - public override TypeHandlerResolver Create(NpgsqlConnector connector) + public override TypeHandlerResolver Create(TypeMapper typeMapper, NpgsqlConnector connector) => new NetTopologySuiteTypeHandlerResolver(connector, _coordinateSequenceFactory, _precisionModel, _handleOrdinates, _geographyAsDefault); @@ -36,4 +37,4 @@ public override TypeHandlerResolver Create(NpgsqlConnector connector) public override TypeMappingInfo? GetMappingByDataTypeName(string dataTypeName) => NetTopologySuiteTypeHandlerResolver.DoGetMappingByDataTypeName(dataTypeName); -} \ No newline at end of file +} diff --git a/src/Npgsql.NodaTime/Internal/NodaTimeTypeHandlerResolver.cs b/src/Npgsql.NodaTime/Internal/NodaTimeTypeHandlerResolver.cs index 56c01b04b2..81b3cffa94 100644 --- a/src/Npgsql.NodaTime/Internal/NodaTimeTypeHandlerResolver.cs +++ b/src/Npgsql.NodaTime/Internal/NodaTimeTypeHandlerResolver.cs @@ -78,6 +78,18 @@ internal NodaTimeTypeHandlerResolver(NpgsqlConnector connector) ? handler : null; + public override NpgsqlTypeHandler? ResolveByNpgsqlDbType(NpgsqlDbType npgsqlDbType) + => npgsqlDbType switch + { + NpgsqlDbType.TimestampTzRange => TsTzRange(), + NpgsqlDbType.DateRange => DateRange(), + NpgsqlDbType.TimestampTzMultirange => TsTzMultirange(), + NpgsqlDbType.DateMultirange => DateMultirange(), + NpgsqlDbType.TimestampTzRange | NpgsqlDbType.Array => TsTzRangeArray(), + NpgsqlDbType.DateRange | NpgsqlDbType.Array => TsTzRangeArray(), + _ => null + }; + public override NpgsqlTypeHandler? ResolveValueTypeGenerically(T value) { // This method only ever gets called for value types, and relies on the JIT specializing the method for T by eliding all the diff --git a/src/Npgsql.NodaTime/Internal/NodaTimeTypeHandlerResolverFactory.cs b/src/Npgsql.NodaTime/Internal/NodaTimeTypeHandlerResolverFactory.cs index c0eeb6709c..01a49b85d1 100644 --- a/src/Npgsql.NodaTime/Internal/NodaTimeTypeHandlerResolverFactory.cs +++ b/src/Npgsql.NodaTime/Internal/NodaTimeTypeHandlerResolverFactory.cs @@ -1,12 +1,14 @@ using System; using Npgsql.Internal; using Npgsql.Internal.TypeHandling; +using Npgsql.Internal.TypeMapping; +using Npgsql.TypeMapping; namespace Npgsql.NodaTime.Internal; public class NodaTimeTypeHandlerResolverFactory : TypeHandlerResolverFactory { - public override TypeHandlerResolver Create(NpgsqlConnector connector) + public override TypeHandlerResolver Create(TypeMapper typeMapper, NpgsqlConnector connector) => new NodaTimeTypeHandlerResolver(connector); public override string? GetDataTypeNameByClrType(Type type) @@ -14,4 +16,4 @@ public override TypeHandlerResolver Create(NpgsqlConnector connector) public override TypeMappingInfo? GetMappingByDataTypeName(string dataTypeName) => NodaTimeTypeHandlerResolver.DoGetMappingByDataTypeName(dataTypeName); -} \ No newline at end of file +} diff --git a/src/Npgsql/BackendMessages/RowDescriptionMessage.cs b/src/Npgsql/BackendMessages/RowDescriptionMessage.cs index b2b045a3e1..b2a9a6f111 100644 --- a/src/Npgsql/BackendMessages/RowDescriptionMessage.cs +++ b/src/Npgsql/BackendMessages/RowDescriptionMessage.cs @@ -6,6 +6,7 @@ using Npgsql.Internal; using Npgsql.Internal.TypeHandlers; using Npgsql.Internal.TypeHandling; +using Npgsql.Internal.TypeMapping; using Npgsql.PostgresTypes; using Npgsql.Replication.PgOutput.Messages; using Npgsql.TypeMapping; diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index 5ee3dbf3c1..0b7e994482 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -23,6 +23,7 @@ using static Npgsql.Util.Statics; using System.Transactions; using Microsoft.Extensions.Logging; +using Npgsql.Internal.TypeMapping; using Npgsql.Properties; namespace Npgsql.Internal; diff --git a/src/Npgsql/Internal/TypeHandlers/CompositeHandlers/CompositeHandler.cs b/src/Npgsql/Internal/TypeHandlers/CompositeHandlers/CompositeHandler.cs index 7df3267f37..5079b24b1d 100644 --- a/src/Npgsql/Internal/TypeHandlers/CompositeHandlers/CompositeHandler.cs +++ b/src/Npgsql/Internal/TypeHandlers/CompositeHandlers/CompositeHandler.cs @@ -8,6 +8,7 @@ using System.Threading.Tasks; using Npgsql.BackendMessages; using Npgsql.Internal.TypeHandling; +using Npgsql.Internal.TypeMapping; using Npgsql.PostgresTypes; using Npgsql.TypeMapping; using NpgsqlTypes; diff --git a/src/Npgsql/Internal/TypeHandlers/RecordHandler.cs b/src/Npgsql/Internal/TypeHandlers/RecordHandler.cs index 04b16bca62..15b778fc10 100644 --- a/src/Npgsql/Internal/TypeHandlers/RecordHandler.cs +++ b/src/Npgsql/Internal/TypeHandlers/RecordHandler.cs @@ -5,6 +5,7 @@ using System.Threading.Tasks; using Npgsql.BackendMessages; using Npgsql.Internal.TypeHandling; +using Npgsql.Internal.TypeMapping; using Npgsql.PostgresTypes; using Npgsql.TypeMapping; diff --git a/src/Npgsql/Internal/TypeHandling/TypeHandlerResolver.cs b/src/Npgsql/Internal/TypeHandling/TypeHandlerResolver.cs index 43431073de..3c47e55f06 100644 --- a/src/Npgsql/Internal/TypeHandling/TypeHandlerResolver.cs +++ b/src/Npgsql/Internal/TypeHandling/TypeHandlerResolver.cs @@ -1,4 +1,5 @@ using System; +using NpgsqlTypes; using Npgsql.PostgresTypes; namespace Npgsql.Internal.TypeHandling; @@ -14,6 +15,11 @@ public abstract class TypeHandlerResolver /// See . public abstract NpgsqlTypeHandler? ResolveByDataTypeName(string typeName); + /// + /// Resolves a type handler for a given NpgsqlDbType. + /// + public virtual NpgsqlTypeHandler? ResolveByNpgsqlDbType(NpgsqlDbType npgsqlDbType) => null; + /// /// Resolves a type handler given a .NET CLR type. /// diff --git a/src/Npgsql/Internal/TypeHandling/TypeHandlerResolverFactory.cs b/src/Npgsql/Internal/TypeHandling/TypeHandlerResolverFactory.cs index 78d3b5bc47..ac195afbdc 100644 --- a/src/Npgsql/Internal/TypeHandling/TypeHandlerResolverFactory.cs +++ b/src/Npgsql/Internal/TypeHandling/TypeHandlerResolverFactory.cs @@ -1,10 +1,12 @@ using System; +using Npgsql.Internal.TypeMapping; +using Npgsql.TypeMapping; namespace Npgsql.Internal.TypeHandling; public abstract class TypeHandlerResolverFactory { - public abstract TypeHandlerResolver Create(NpgsqlConnector connector); + public abstract TypeHandlerResolver Create(TypeMapper typeMapper, NpgsqlConnector connector); public abstract string? GetDataTypeNameByClrType(Type clrType); public virtual string? GetDataTypeNameByValueDependentValue(object value) => null; diff --git a/src/Npgsql/TypeMapping/TypeMapper.cs b/src/Npgsql/Internal/TypeMapping/TypeMapper.cs similarity index 81% rename from src/Npgsql/TypeMapping/TypeMapper.cs rename to src/Npgsql/Internal/TypeMapping/TypeMapper.cs index b28758ed15..0710268eed 100644 --- a/src/Npgsql/TypeMapping/TypeMapper.cs +++ b/src/Npgsql/Internal/TypeMapping/TypeMapper.cs @@ -6,18 +6,18 @@ using System.Linq; using System.Reflection; using Microsoft.Extensions.Logging; -using Npgsql.Internal; using Npgsql.Internal.TypeHandlers; using Npgsql.Internal.TypeHandling; -using Npgsql.Internal.TypeMapping; using Npgsql.PostgresTypes; -using Npgsql.Properties; -using Npgsql.Util; +using Npgsql.TypeMapping; using NpgsqlTypes; -namespace Npgsql.TypeMapping; +namespace Npgsql.Internal.TypeMapping; -sealed class TypeMapper +/// +/// Type mapper used to map types to type handlers. +/// +public sealed class TypeMapper { internal NpgsqlConnector Connector { get; } readonly object _writeLock = new(); @@ -70,7 +70,7 @@ internal void Initialize( var resolvers = new TypeHandlerResolver[resolverFactories.Count]; for (var i = 0; i < resolverFactories.Count; i++) - resolvers[i] = resolverFactories[i].Create(Connector); + resolvers[i] = resolverFactories[i].Create(this, Connector); _resolvers = resolvers; foreach (var userTypeMapping in userTypeMappings.Values) @@ -94,7 +94,7 @@ internal void Initialize( /// /// A PostgreSQL type OID /// A type handler that can be used to encode and decode values. - internal NpgsqlTypeHandler ResolveByOID(uint oid) + public NpgsqlTypeHandler ResolveByOID(uint oid) => TryResolveByOID(oid, out var result) ? result : UnrecognizedTypeHandler; internal bool TryResolveByOID(uint oid, [NotNullWhen(true)] out NpgsqlTypeHandler? handler) @@ -132,7 +132,12 @@ bool TryResolveLong(uint oid, [NotNullWhen(true)] out NpgsqlTypeHandler? handler } } - internal NpgsqlTypeHandler ResolveByNpgsqlDbType(NpgsqlDbType npgsqlDbType) + /// + /// Looks up a type handler by NpgsqlDbType. + /// + /// Parameter's NpgsqlDbType + /// A type handler that can be used to encode and decode values. + public NpgsqlTypeHandler ResolveByNpgsqlDbType(NpgsqlDbType npgsqlDbType) { if (_handlersByNpgsqlDbType.TryGetValue(npgsqlDbType, out var handler)) return handler; @@ -161,6 +166,22 @@ NpgsqlTypeHandler ResolveLong(NpgsqlDbType npgsqlDbType) } } + // Can't find (or translate) PG data type name by NpgsqlDbType. + // This might happen because of flags (like Array, Range or Multirange). + foreach (var resolver in _resolvers) + { + try + { + if (resolver.ResolveByNpgsqlDbType(npgsqlDbType) is { } handler) + return _handlersByNpgsqlDbType[npgsqlDbType] = handler; + } + catch (Exception e) + { + _commandLogger.LogError(e, + $"Type resolver {resolver.GetType().Name} threw exception while resolving NpgsqlDbType {npgsqlDbType}"); + } + } + if (npgsqlDbType.HasFlag(NpgsqlDbType.Array)) { var elementHandler = ResolveByNpgsqlDbType(npgsqlDbType & ~NpgsqlDbType.Array); @@ -173,27 +194,6 @@ NpgsqlTypeHandler ResolveLong(NpgsqlDbType npgsqlDbType) elementHandler.CreateArrayHandler(pgArrayType, Connector.Settings.ArrayNullabilityMode); } - if (npgsqlDbType.HasFlag(NpgsqlDbType.Range)) - { - var subtypeHandler = ResolveByNpgsqlDbType(npgsqlDbType & ~NpgsqlDbType.Range); - - if (subtypeHandler.PostgresType.Range is not { } pgRangeType) - throw new ArgumentException( - $"No range type could be found in the database for subtype {subtypeHandler.PostgresType}"); - - return _handlersByNpgsqlDbType[npgsqlDbType] = subtypeHandler.CreateRangeHandler(pgRangeType); - } - - if (npgsqlDbType.HasFlag(NpgsqlDbType.Multirange)) - { - var subtypeHandler = ResolveByNpgsqlDbType(npgsqlDbType & ~NpgsqlDbType.Multirange); - - if (subtypeHandler.PostgresType.Range?.Multirange is not { } pgMultirangeType) - throw new ArgumentException(string.Format(NpgsqlStrings.NoMultirangeTypeFound, subtypeHandler.PostgresType)); - - return _handlersByNpgsqlDbType[npgsqlDbType] = subtypeHandler.CreateMultirangeHandler(pgMultirangeType); - } - throw new NpgsqlException($"The NpgsqlDbType '{npgsqlDbType}' isn't present in your database. " + "You may need to install an extension or upgrade to a newer version."); } @@ -277,18 +277,6 @@ internal NpgsqlTypeHandler ResolveByDataTypeName(string typeName) elementHandler.CreateArrayHandler(pgArrayType, Connector.Settings.ArrayNullabilityMode); } - case PostgresRangeType pgRangeType: - { - var subtypeHandler = ResolveByOID(pgRangeType.Subtype.OID); - return _handlersByDataTypeName[typeName] = subtypeHandler.CreateRangeHandler(pgRangeType); - } - - case PostgresMultirangeType pgMultirangeType: - { - var subtypeHandler = ResolveByOID(pgMultirangeType.Subrange.Subtype.OID); - return _handlersByDataTypeName[typeName] = subtypeHandler.CreateMultirangeHandler(pgMultirangeType); - } - case PostgresEnumType pgEnumType: { // A mapped enum would have been registered in _extraHandlersByDataTypeName and bound above - this is unmapped. @@ -312,6 +300,16 @@ internal NpgsqlTypeHandler ResolveByDataTypeName(string typeName) $"Composite type '{pgCompositeType}' must be mapped with Npgsql before being used, see the docs.") : null; +#pragma warning disable CS0618 + case PostgresRangeType: + case PostgresMultirangeType: + return throwOnError + ? throw new NotSupportedException( + $"'{pgType}' is a range type; please call {nameof(NpgsqlRangeExtensions.UseRange)} on {nameof(NpgsqlDataSourceBuilder)} or on {nameof(NpgsqlConnection)}.{nameof(NpgsqlConnection.GlobalTypeMapper)} to enable ranges. " + + "See https://www.npgsql.org/doc/types/ranges.html for more information.") + : null; +#pragma warning restore CS0618 + default: throw new ArgumentOutOfRangeException($"Unhandled PostgreSQL type type: {pgType.GetType()}"); } @@ -386,7 +384,12 @@ NpgsqlTypeHandler ResolveLong(object value, Type type) } // TODO: This is needed as a separate method only because of binary COPY, see #3957 - internal NpgsqlTypeHandler ResolveByClrType(Type type) + /// + /// Looks up a type handler by CLR Type. + /// + /// Parameter's CLR type + /// A type handler that can be used to encode and decode values. + public NpgsqlTypeHandler ResolveByClrType(Type type) { if (_handlersByClrType.TryGetValue(type, out var handler)) return handler; @@ -414,19 +417,6 @@ NpgsqlTypeHandler ResolveLong(Type type) var arrayElementType = GetArrayListElementType(type); if (arrayElementType is not null) { - // With PG14, we map arrays over range types to PG multiranges by default, not to regular arrays over ranges. - if (arrayElementType.IsGenericType && - arrayElementType.GetGenericTypeDefinition() == typeof(NpgsqlRange<>) && - DatabaseInfo.Version.IsGreaterOrEqual(14)) - { - var subtypeType = arrayElementType.GetGenericArguments()[0]; - - return ResolveByClrType(subtypeType) is - { PostgresType : { Range : { Multirange: { } pgMultirangeType } } } subtypeHandler - ? _handlersByClrType[type] = subtypeHandler.CreateMultirangeHandler(pgMultirangeType) - : throw new NotSupportedException($"The CLR range type {type} isn't supported by Npgsql or your PostgreSQL."); - } - if (ResolveByClrType(arrayElementType) is not { } elementHandler) throw new ArgumentException($"Array type over CLR type {arrayElementType.Name} isn't supported by Npgsql"); @@ -450,17 +440,6 @@ NpgsqlTypeHandler ResolveLong(Type type) "Consider mapping the enum before usage, refer to the documentation for more details."); } - // TODO: We can make the following compatible with reflection-free mode by having NpgsqlRange implement some interface, and - // check for that. - if (type.IsGenericType && type.GetGenericTypeDefinition() == typeof(NpgsqlRange<>)) - { - var subtypeType = type.GetGenericArguments()[0]; - - return ResolveByClrType(subtypeType) is { PostgresType : { Range : { } pgRangeType } } subtypeHandler - ? _handlersByClrType[type] = subtypeHandler.CreateRangeHandler(pgRangeType) - : throw new NotSupportedException($"The CLR range type {type} isn't supported by Npgsql or your PostgreSQL."); - } - if (typeof(IEnumerable).IsAssignableFrom(type)) throw new NotSupportedException("IEnumerable parameters are not supported, pass an array or List instead"); @@ -508,28 +487,6 @@ internal bool TryGetMapping(PostgresType pgType, [NotNullWhen(true)] out TypeMap break; - case PostgresRangeType pgRangeType: - { - if (TryGetMapping(pgRangeType.Subtype, out var subtypeMapping)) - { - mapping = new(subtypeMapping.NpgsqlDbType | NpgsqlDbType.Range, pgType.DisplayName); - return true; - } - - break; - } - - case PostgresMultirangeType pgMultirangeType: - { - if (TryGetMapping(pgMultirangeType.Subrange.Subtype, out var subtypeMapping)) - { - mapping = new(subtypeMapping.NpgsqlDbType | NpgsqlDbType.Multirange, pgType.DisplayName); - return true; - } - - break; - } - case PostgresDomainType pgDomainType: if (TryGetMapping(pgDomainType.BaseType, out var baseMapping)) { diff --git a/src/Npgsql/NpgsqlBinaryExporter.cs b/src/Npgsql/NpgsqlBinaryExporter.cs index 9f5b5b2bcc..f772334feb 100644 --- a/src/Npgsql/NpgsqlBinaryExporter.cs +++ b/src/Npgsql/NpgsqlBinaryExporter.cs @@ -7,6 +7,7 @@ using Npgsql.BackendMessages; using Npgsql.Internal; using Npgsql.Internal.TypeHandling; +using Npgsql.Internal.TypeMapping; using Npgsql.TypeMapping; using NpgsqlTypes; using static Npgsql.Util.Statics; diff --git a/src/Npgsql/NpgsqlNestedDataReader.cs b/src/Npgsql/NpgsqlNestedDataReader.cs index d8e1b927ed..744b34dd1d 100644 --- a/src/Npgsql/NpgsqlNestedDataReader.cs +++ b/src/Npgsql/NpgsqlNestedDataReader.cs @@ -10,6 +10,7 @@ using System.Globalization; using System.IO; using System.Runtime.CompilerServices; +using Npgsql.Internal.TypeMapping; namespace Npgsql; diff --git a/src/Npgsql/NpgsqlParameter.cs b/src/Npgsql/NpgsqlParameter.cs index c8b45c7d26..3be8758799 100644 --- a/src/Npgsql/NpgsqlParameter.cs +++ b/src/Npgsql/NpgsqlParameter.cs @@ -7,6 +7,7 @@ using System.Threading.Tasks; using Npgsql.Internal; using Npgsql.Internal.TypeHandling; +using Npgsql.Internal.TypeMapping; using Npgsql.PostgresTypes; using Npgsql.TypeMapping; using Npgsql.Util; diff --git a/src/Npgsql/NpgsqlParameterCollection.cs b/src/Npgsql/NpgsqlParameterCollection.cs index 03adb65794..3f1e139b08 100644 --- a/src/Npgsql/NpgsqlParameterCollection.cs +++ b/src/Npgsql/NpgsqlParameterCollection.cs @@ -6,6 +6,7 @@ using System.Diagnostics; using System.Diagnostics.CodeAnalysis; using System.Runtime.CompilerServices; +using Npgsql.Internal.TypeMapping; using Npgsql.TypeMapping; using NpgsqlTypes; diff --git a/src/Npgsql/NpgsqlParameter`.cs b/src/Npgsql/NpgsqlParameter`.cs index 0271952431..a0487a9aec 100644 --- a/src/Npgsql/NpgsqlParameter`.cs +++ b/src/Npgsql/NpgsqlParameter`.cs @@ -3,6 +3,7 @@ using System.Threading; using System.Threading.Tasks; using Npgsql.Internal; +using Npgsql.Internal.TypeMapping; using Npgsql.TypeMapping; using NpgsqlTypes; using static Npgsql.Util.Statics; diff --git a/src/Npgsql/PublicAPI.Unshipped.txt b/src/Npgsql/PublicAPI.Unshipped.txt index 9614eca619..40c160bf26 100644 --- a/src/Npgsql/PublicAPI.Unshipped.txt +++ b/src/Npgsql/PublicAPI.Unshipped.txt @@ -2,7 +2,9 @@ Npgsql.NpgsqlBinaryImporter.WriteRow(params object?[]! values) -> void Npgsql.NpgsqlBinaryImporter.WriteRowAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken), params object?[]! values) -> System.Threading.Tasks.Task! Npgsql.NpgsqlJsonExtensions +Npgsql.NpgsqlRangeExtensions static Npgsql.NpgsqlJsonExtensions.UseSystemTextJson(this Npgsql.TypeMapping.INpgsqlTypeMapper! mapper, System.Text.Json.JsonSerializerOptions? serializerOptions = null, System.Type![]? jsonbClrTypes = null, System.Type![]? jsonClrTypes = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! +static Npgsql.NpgsqlRangeExtensions.UseRange(this Npgsql.TypeMapping.INpgsqlTypeMapper! mapper) -> Npgsql.TypeMapping.INpgsqlTypeMapper! *REMOVED*static NpgsqlTypes.NpgsqlBox.Parse(string! s) -> NpgsqlTypes.NpgsqlBox *REMOVED*static NpgsqlTypes.NpgsqlCircle.Parse(string! s) -> NpgsqlTypes.NpgsqlCircle *REMOVED*static NpgsqlTypes.NpgsqlLine.Parse(string! s) -> NpgsqlTypes.NpgsqlLine diff --git a/src/Npgsql/TypeMapping/BuiltInTypeHandlerResolver.cs b/src/Npgsql/TypeMapping/BuiltInTypeHandlerResolver.cs index caad112e5f..33cacd8b6a 100644 --- a/src/Npgsql/TypeMapping/BuiltInTypeHandlerResolver.cs +++ b/src/Npgsql/TypeMapping/BuiltInTypeHandlerResolver.cs @@ -245,10 +245,6 @@ sealed class BuiltInTypeHandlerResolver : TypeHandlerResolver // Complex type handlers over timestamp/timestamptz (because DateTime is value-dependent) NpgsqlTypeHandler? _timestampArrayHandler; NpgsqlTypeHandler? _timestampTzArrayHandler; - NpgsqlTypeHandler? _timestampRangeHandler; - NpgsqlTypeHandler? _timestampTzRangeHandler; - NpgsqlTypeHandler? _timestampMultirangeHandler; - NpgsqlTypeHandler? _timestampTzMultirangeHandler; #endregion Cached handlers @@ -493,12 +489,6 @@ static BuiltInTypeHandlerResolver() // mix incompatible Kinds, that will fail during validation. For empty arrays it doesn't matter. IList array => ArrayHandler(array.Count == 0 ? DateTimeKind.Unspecified : array[0].Kind), - NpgsqlRange range => RangeHandler(!range.LowerBoundInfinite ? range.LowerBound.Kind : - !range.UpperBoundInfinite ? range.UpperBound.Kind : DateTimeKind.Unspecified), - - NpgsqlRange[] multirange => MultirangeHandler(GetMultirangeKind(multirange)), - List> multirange => MultirangeHandler(GetMultirangeKind(multirange)), - _ => null }; @@ -508,32 +498,6 @@ NpgsqlTypeHandler ArrayHandler(DateTimeKind kind) (PostgresArrayType)PgType("timestamp with time zone[]"), _connector.Settings.ArrayNullabilityMode) : _timestampArrayHandler ??= _timestampHandler.CreateArrayHandler( (PostgresArrayType)PgType("timestamp without time zone[]"), _connector.Settings.ArrayNullabilityMode); - - NpgsqlTypeHandler RangeHandler(DateTimeKind kind) - => kind == DateTimeKind.Utc - ? _timestampTzRangeHandler ??= _timestampTzHandler.CreateRangeHandler((PostgresRangeType)PgType("tstzrange")) - : _timestampRangeHandler ??= _timestampHandler.CreateRangeHandler((PostgresRangeType)PgType("tsrange")); - - NpgsqlTypeHandler MultirangeHandler(DateTimeKind kind) - => kind == DateTimeKind.Utc - ? _timestampTzMultirangeHandler ??= _timestampTzHandler.CreateMultirangeHandler((PostgresMultirangeType)PgType("tstzmultirange")) - : _timestampMultirangeHandler ??= _timestampHandler.CreateMultirangeHandler((PostgresMultirangeType)PgType("tsmultirange")); - } - - static DateTimeKind GetRangeKind(NpgsqlRange range) - => !range.LowerBoundInfinite - ? range.LowerBound.Kind - : !range.UpperBoundInfinite - ? range.UpperBound.Kind - : DateTimeKind.Unspecified; - - static DateTimeKind GetMultirangeKind(IList> multirange) - { - for (var i = 0; i < multirange.Count; i++) - if (!multirange[i].IsEmpty) - return GetRangeKind(multirange[i]); - - return DateTimeKind.Unspecified; } internal static string? ValueDependentValueToDataTypeName(object value) @@ -559,6 +523,22 @@ static DateTimeKind GetMultirangeKind(IList> multirange) _ => null }; } + + static DateTimeKind GetRangeKind(NpgsqlRange range) + => !range.LowerBoundInfinite + ? range.LowerBound.Kind + : !range.UpperBoundInfinite + ? range.UpperBound.Kind + : DateTimeKind.Unspecified; + + static DateTimeKind GetMultirangeKind(IList> multirange) + { + for (var i = 0; i < multirange.Count; i++) + if (!multirange[i].IsEmpty) + return GetRangeKind(multirange[i]); + + return DateTimeKind.Unspecified; + } public override NpgsqlTypeHandler? ResolveValueTypeGenerically(T value) { diff --git a/src/Npgsql/TypeMapping/BuiltInTypeHandlerResolverFactory.cs b/src/Npgsql/TypeMapping/BuiltInTypeHandlerResolverFactory.cs index 6902ef5aaa..14c2c8149a 100644 --- a/src/Npgsql/TypeMapping/BuiltInTypeHandlerResolverFactory.cs +++ b/src/Npgsql/TypeMapping/BuiltInTypeHandlerResolverFactory.cs @@ -1,12 +1,13 @@ using System; using Npgsql.Internal; using Npgsql.Internal.TypeHandling; +using Npgsql.Internal.TypeMapping; namespace Npgsql.TypeMapping; sealed class BuiltInTypeHandlerResolverFactory : TypeHandlerResolverFactory { - public override TypeHandlerResolver Create(NpgsqlConnector connector) + public override TypeHandlerResolver Create(TypeMapper typeMapper, NpgsqlConnector connector) => new BuiltInTypeHandlerResolver(connector); public override string? GetDataTypeNameByClrType(Type clrType) diff --git a/src/Npgsql/TypeMapping/JsonTypeHandlerResolverFactory.cs b/src/Npgsql/TypeMapping/JsonTypeHandlerResolverFactory.cs index d5221cad28..7568e138be 100644 --- a/src/Npgsql/TypeMapping/JsonTypeHandlerResolverFactory.cs +++ b/src/Npgsql/TypeMapping/JsonTypeHandlerResolverFactory.cs @@ -3,13 +3,12 @@ using System.Text.Json; using Npgsql.Internal; using Npgsql.Internal.TypeHandling; +using Npgsql.Internal.TypeMapping; namespace Npgsql.TypeMapping; sealed class JsonTypeHandlerResolverFactory : TypeHandlerResolverFactory { - readonly Type[] _jsonbClrTypes; - readonly Type[] _jsonClrTypes; readonly JsonSerializerOptions _settings; readonly Dictionary? _userClrTypes; @@ -18,8 +17,6 @@ public JsonTypeHandlerResolverFactory( Type[]? jsonClrTypes, JsonSerializerOptions? settings) { - _jsonbClrTypes = jsonbClrTypes ?? Array.Empty(); - _jsonClrTypes = jsonClrTypes ?? Array.Empty(); _settings = settings ?? new JsonSerializerOptions(); if (jsonbClrTypes is not null) @@ -39,7 +36,7 @@ public JsonTypeHandlerResolverFactory( } } - public override TypeHandlerResolver Create(NpgsqlConnector connector) + public override TypeHandlerResolver Create(TypeMapper typeMapper, NpgsqlConnector connector) => new JsonTypeHandlerResolver(connector, _userClrTypes, _settings); public override string? GetDataTypeNameByClrType(Type type) diff --git a/src/Npgsql/TypeMapping/NpgsqlRangeExtensions.cs b/src/Npgsql/TypeMapping/NpgsqlRangeExtensions.cs new file mode 100644 index 0000000000..16c9533a61 --- /dev/null +++ b/src/Npgsql/TypeMapping/NpgsqlRangeExtensions.cs @@ -0,0 +1,20 @@ +using Npgsql.TypeMapping; + +// ReSharper disable once CheckNamespace +namespace Npgsql; + +/// +/// Extension allowing adding range and multirange mappings to an Npgsql type mapper. +/// +public static class NpgsqlRangeExtensions +{ + /// + /// Sets up mappings for the PostgreSQL range and multirange types. + /// + /// The type mapper to set up. + public static INpgsqlTypeMapper UseRange(this INpgsqlTypeMapper mapper) + { + mapper.AddTypeResolverFactory(new RangeTypeHandlerResolverFactory()); + return mapper; + } +} diff --git a/src/Npgsql/TypeMapping/RangeTypeHandlerResolver.cs b/src/Npgsql/TypeMapping/RangeTypeHandlerResolver.cs new file mode 100644 index 0000000000..1386383a63 --- /dev/null +++ b/src/Npgsql/TypeMapping/RangeTypeHandlerResolver.cs @@ -0,0 +1,206 @@ +using System; +using System.Collections; +using System.Collections.Generic; +using System.Linq; +using System.Reflection; +using Npgsql.Internal; +using Npgsql.Internal.TypeHandlers.DateTimeHandlers; +using Npgsql.Internal.TypeHandling; +using Npgsql.Internal.TypeMapping; +using Npgsql.PostgresTypes; +using Npgsql.Properties; +using Npgsql.Util; +using NpgsqlTypes; +using static Npgsql.Util.Statics; + +namespace Npgsql.TypeMapping; + +sealed class RangeTypeHandlerResolver : TypeHandlerResolver +{ + readonly TypeMapper _typeMapper; + readonly NpgsqlDatabaseInfo _databaseInfo; + + readonly TimestampHandler _timestampHandler; + readonly TimestampTzHandler _timestampTzHandler; + + NpgsqlTypeHandler? _timestampRangeHandler; + NpgsqlTypeHandler? _timestampTzRangeHandler; + NpgsqlTypeHandler? _timestampMultirangeHandler; + NpgsqlTypeHandler? _timestampTzMultirangeHandler; + + internal RangeTypeHandlerResolver(TypeMapper typeMapper, NpgsqlConnector connector) + { + _typeMapper = typeMapper; + _databaseInfo = connector.DatabaseInfo; + + _timestampHandler = new TimestampHandler(PgType("timestamp without time zone")); + _timestampTzHandler = new TimestampTzHandler(PgType("timestamp with time zone")); + } + + public override NpgsqlTypeHandler? ResolveByDataTypeName(string typeName) + { + if (_databaseInfo.GetPostgresTypeByName(typeName) is not { } pgType) + return null; + + return pgType switch + { + PostgresRangeType pgRangeType + => _typeMapper.ResolveByOID(pgRangeType.Subtype.OID).CreateRangeHandler(pgRangeType), + PostgresMultirangeType pgMultirangeType + => _typeMapper.ResolveByOID(pgMultirangeType.Subrange.Subtype.OID).CreateMultirangeHandler(pgMultirangeType), + _ => null + }; + } + + public override NpgsqlTypeHandler? ResolveByNpgsqlDbType(NpgsqlDbType npgsqlDbType) + { + if (npgsqlDbType.HasFlag(NpgsqlDbType.Range)) + { + var subtypeHandler = _typeMapper.ResolveByNpgsqlDbType(npgsqlDbType & ~NpgsqlDbType.Range); + + if (subtypeHandler.PostgresType.Range is not { } pgRangeType) + throw new ArgumentException( + $"No range type could be found in the database for subtype {subtypeHandler.PostgresType}"); + + return subtypeHandler.CreateRangeHandler(pgRangeType); + } + + if (npgsqlDbType.HasFlag(NpgsqlDbType.Multirange)) + { + var subtypeHandler = _typeMapper.ResolveByNpgsqlDbType(npgsqlDbType & ~NpgsqlDbType.Multirange); + + if (subtypeHandler.PostgresType.Range?.Multirange is not { } pgMultirangeType) + throw new ArgumentException(string.Format(NpgsqlStrings.NoMultirangeTypeFound, subtypeHandler.PostgresType)); + + return subtypeHandler.CreateMultirangeHandler(pgMultirangeType); + } + + // Not a range or multirange + return null; + } + + public override NpgsqlTypeHandler? ResolveByClrType(Type type) + { + // Try to see if it is an array type + var arrayElementType = GetArrayListElementType(type); + if (arrayElementType is not null) + { + // With PG14, we map arrays over range types to PG multiranges by default, not to regular arrays over ranges. + if (arrayElementType.IsGenericType && + arrayElementType.GetGenericTypeDefinition() == typeof(NpgsqlRange<>) && + _databaseInfo.Version.IsGreaterOrEqual(14)) + { + var arraySubtypeType = arrayElementType.GetGenericArguments()[0]; + + return _typeMapper.ResolveByClrType(arraySubtypeType) is + { PostgresType : { Range : { Multirange: { } pgMultirangeType } } } arraySubtypeHandler + ? arraySubtypeHandler.CreateMultirangeHandler(pgMultirangeType) + : throw new NotSupportedException($"The CLR range type {type} isn't supported by Npgsql or your PostgreSQL."); + } + } + + // TODO: We can make the following compatible with reflection-free mode by having NpgsqlRange implement some interface, and + // check for that. + if (!type.IsGenericType || type.GetGenericTypeDefinition() != typeof(NpgsqlRange<>)) + return null; + + var subtypeType = type.GetGenericArguments()[0]; + + return _typeMapper.ResolveByClrType(subtypeType) is { PostgresType : { Range : { } pgRangeType } } subtypeHandler + ? subtypeHandler.CreateRangeHandler(pgRangeType) + : throw new NotSupportedException($"The CLR range type {type} isn't supported by Npgsql or your PostgreSQL."); + + static Type? GetArrayListElementType(Type type) + { + var typeInfo = type.GetTypeInfo(); + if (typeInfo.IsArray) + return GetUnderlyingType(type.GetElementType()!); // The use of bang operator is justified here as Type.GetElementType() only returns null for the Array base class which can't be mapped in a useful way. + + var ilist = typeInfo.ImplementedInterfaces.FirstOrDefault(x => x.GetTypeInfo().IsGenericType && x.GetGenericTypeDefinition() == typeof(IList<>)); + if (ilist != null) + return GetUnderlyingType(ilist.GetGenericArguments()[0]); + + if (typeof(IList).IsAssignableFrom(type)) + throw new NotSupportedException("Non-generic IList is a supported parameter, but the NpgsqlDbType parameter must be set on the parameter"); + + return null; + + Type GetUnderlyingType(Type t) + => Nullable.GetUnderlyingType(t) ?? t; + } + } + + public override TypeMappingInfo? GetMappingByPostgresType(PostgresType type) + { + switch (type) + { + case PostgresRangeType pgRangeType: + { + if (_typeMapper.TryGetMapping(pgRangeType.Subtype, out var subtypeMapping)) + { + return new(subtypeMapping.NpgsqlDbType | NpgsqlDbType.Range, type.DisplayName); + } + + break; + } + + case PostgresMultirangeType pgMultirangeType: + { + if (_typeMapper.TryGetMapping(pgMultirangeType.Subrange.Subtype, out var subtypeMapping)) + { + return new(subtypeMapping.NpgsqlDbType | NpgsqlDbType.Multirange, type.DisplayName); + } + + break; + } + } + + return null; + } + + public override NpgsqlTypeHandler? ResolveValueDependentValue(object value) + { + // In LegacyTimestampBehavior, DateTime isn't value-dependent, and handled above in ClrTypeToDataTypeNameTable like other types + if (LegacyTimestampBehavior) + return null; + + return value switch + { + NpgsqlRange range => RangeHandler(!range.LowerBoundInfinite ? range.LowerBound.Kind : + !range.UpperBoundInfinite ? range.UpperBound.Kind : DateTimeKind.Unspecified), + + NpgsqlRange[] multirange => MultirangeHandler(GetMultirangeKind(multirange)), + List> multirange => MultirangeHandler(GetMultirangeKind(multirange)), + + _ => null + }; + + NpgsqlTypeHandler RangeHandler(DateTimeKind kind) + => kind == DateTimeKind.Utc + ? _timestampTzRangeHandler ??= _timestampTzHandler.CreateRangeHandler((PostgresRangeType)PgType("tstzrange")) + : _timestampRangeHandler ??= _timestampHandler.CreateRangeHandler((PostgresRangeType)PgType("tsrange")); + + NpgsqlTypeHandler MultirangeHandler(DateTimeKind kind) + => kind == DateTimeKind.Utc + ? _timestampTzMultirangeHandler ??= _timestampTzHandler.CreateMultirangeHandler((PostgresMultirangeType)PgType("tstzmultirange")) + : _timestampMultirangeHandler ??= _timestampHandler.CreateMultirangeHandler((PostgresMultirangeType)PgType("tsmultirange")); + } + + static DateTimeKind GetRangeKind(NpgsqlRange range) + => !range.LowerBoundInfinite + ? range.LowerBound.Kind + : !range.UpperBoundInfinite + ? range.UpperBound.Kind + : DateTimeKind.Unspecified; + + static DateTimeKind GetMultirangeKind(IList> multirange) + { + for (var i = 0; i < multirange.Count; i++) + if (!multirange[i].IsEmpty) + return GetRangeKind(multirange[i]); + + return DateTimeKind.Unspecified; + } + + PostgresType PgType(string pgTypeName) => _databaseInfo.GetPostgresTypeByName(pgTypeName); +} diff --git a/src/Npgsql/TypeMapping/RangeTypeHandlerResolverFactory.cs b/src/Npgsql/TypeMapping/RangeTypeHandlerResolverFactory.cs new file mode 100644 index 0000000000..a1bb608a98 --- /dev/null +++ b/src/Npgsql/TypeMapping/RangeTypeHandlerResolverFactory.cs @@ -0,0 +1,24 @@ +using System; +using Npgsql.Internal; +using Npgsql.Internal.TypeHandling; +using Npgsql.Internal.TypeMapping; + +namespace Npgsql.TypeMapping; + +sealed class RangeTypeHandlerResolverFactory : TypeHandlerResolverFactory +{ + public override TypeHandlerResolver Create(TypeMapper typeMapper, NpgsqlConnector connector) + => new RangeTypeHandlerResolver(typeMapper, connector); + + // Here and below we don't resolve anything. + // Instead BuiltInTypeHandlerResolver will resolve mappings for us. + // This is so we don't need to add RangeTypeHandlerResolverFactory to GlobalTypeMapper + public override string? GetDataTypeNameByClrType(Type clrType) + => null; + + public override string? GetDataTypeNameByValueDependentValue(object value) + => null; + + public override TypeMappingInfo? GetMappingByDataTypeName(string dataTypeName) + => null; +} diff --git a/test/Npgsql.Benchmarks/ResolveHandler.cs b/test/Npgsql.Benchmarks/ResolveHandler.cs index 1b8bf6a8af..419b3e179c 100644 --- a/test/Npgsql.Benchmarks/ResolveHandler.cs +++ b/test/Npgsql.Benchmarks/ResolveHandler.cs @@ -1,5 +1,6 @@ using BenchmarkDotNet.Attributes; using Npgsql.Internal.TypeHandling; +using Npgsql.Internal.TypeMapping; using Npgsql.TypeMapping; using NpgsqlTypes; diff --git a/test/Npgsql.NodaTime.Tests/LegacyNodaTimeTests.cs b/test/Npgsql.NodaTime.Tests/LegacyNodaTimeTests.cs index 67c4202ff4..0792671448 100644 --- a/test/Npgsql.NodaTime.Tests/LegacyNodaTimeTests.cs +++ b/test/Npgsql.NodaTime.Tests/LegacyNodaTimeTests.cs @@ -72,6 +72,7 @@ public async Task Setup() // Clear any previous cached mappings/handlers in case tests were executed before the legacy flag was set. NpgsqlConnection.GlobalTypeMapper.Reset(); + NpgsqlConnection.GlobalTypeMapper.UseRange(); NpgsqlConnection.GlobalTypeMapper.UseNodaTime(); await using var connection = await OpenConnectionAsync(); await connection.ReloadTypesAsync(); diff --git a/test/Npgsql.NodaTime.Tests/NodaTimeSetupFixture.cs b/test/Npgsql.NodaTime.Tests/NodaTimeSetupFixture.cs index 25ab4f58cd..fdd68ba676 100644 --- a/test/Npgsql.NodaTime.Tests/NodaTimeSetupFixture.cs +++ b/test/Npgsql.NodaTime.Tests/NodaTimeSetupFixture.cs @@ -10,7 +10,7 @@ public class NodaTimeSetupFixture { #pragma warning disable CS0618 // GlobalTypeMapper is obsolete [OneTimeSetUp] - public void OneTimeSetUp() => NpgsqlConnection.GlobalTypeMapper.UseNodaTime(); + public void OneTimeSetUp() => NpgsqlConnection.GlobalTypeMapper.UseRange().UseNodaTime(); [OneTimeTearDown] public void OneTimeTearDown() => NpgsqlConnection.GlobalTypeMapper.Reset(); diff --git a/test/Npgsql.Tests/ConnectionTests.cs b/test/Npgsql.Tests/ConnectionTests.cs index 0deb4b6773..9e93f2486b 100644 --- a/test/Npgsql.Tests/ConnectionTests.cs +++ b/test/Npgsql.Tests/ConnectionTests.cs @@ -1144,7 +1144,7 @@ public async Task Exception_during_close() [Test, Description("Some pseudo-PG database don't support pg_type loading, we have a minimal DatabaseInfo for this")] public async Task NoTypeLoading() { - await using var dataSource = CreateDataSource(csb => csb.ServerCompatibilityMode = ServerCompatibilityMode.NoTypeLoading); + await using var dataSource = CreateDataSourceWithRanges(csb => csb.ServerCompatibilityMode = ServerCompatibilityMode.NoTypeLoading); await using var conn = await dataSource.OpenConnectionAsync(); Assert.That(await conn.ExecuteScalarAsync("SELECT 8"), Is.EqualTo(8)); @@ -1170,8 +1170,8 @@ public async Task NoTypeLoading() }; Assert.That(async () => await cmd.ExecuteScalarAsync(), - Throws.Exception.TypeOf() - .With.Message.EqualTo(string.Format(NpgsqlStrings.NoMultirangeTypeFound, "integer"))); + Throws.Exception.TypeOf() + .With.Message.EqualTo("The NpgsqlDbType 'IntegerMultirange' isn't present in your database. You may need to install an extension or upgrade to a newer version.")); } } diff --git a/test/Npgsql.Tests/ReaderTests.cs b/test/Npgsql.Tests/ReaderTests.cs index 0373394584..08c5d78d18 100644 --- a/test/Npgsql.Tests/ReaderTests.cs +++ b/test/Npgsql.Tests/ReaderTests.cs @@ -11,6 +11,7 @@ using Npgsql.BackendMessages; using Npgsql.Internal; using Npgsql.Internal.TypeHandling; +using Npgsql.Internal.TypeMapping; using Npgsql.PostgresTypes; using Npgsql.Tests.Support; using Npgsql.TypeMapping; @@ -2172,7 +2173,7 @@ class ExplodingTypeHandlerResolverFactory : TypeHandlerResolverFactory { readonly bool _safe; public ExplodingTypeHandlerResolverFactory(bool safe) => _safe = safe; - public override TypeHandlerResolver Create(NpgsqlConnector connector) => new ExplodingTypeHandlerResolver(_safe); + public override TypeHandlerResolver Create(TypeMapper typeMapper, NpgsqlConnector connector) => new ExplodingTypeHandlerResolver(_safe); public override TypeMappingInfo GetMappingByDataTypeName(string dataTypeName) => throw new NotSupportedException(); public override string? GetDataTypeNameByClrType(Type clrType) => throw new NotSupportedException(); diff --git a/test/Npgsql.Tests/Support/TestBase.cs b/test/Npgsql.Tests/Support/TestBase.cs index 29b11adfaa..5dd5da05dd 100644 --- a/test/Npgsql.Tests/Support/TestBase.cs +++ b/test/Npgsql.Tests/Support/TestBase.cs @@ -368,6 +368,15 @@ protected virtual NpgsqlDataSource CreateDataSource(Action? connectionStringBuilderAction = null) + { + var connectionStringBuilder = new NpgsqlConnectionStringBuilder(ConnectionString); + connectionStringBuilderAction?.Invoke(connectionStringBuilder); + var dataSourceBuilder = new NpgsqlDataSourceBuilder(connectionStringBuilder.ConnectionString); + dataSourceBuilder.UseRange(); + return dataSourceBuilder.Build(); + } + protected static NpgsqlDataSource GetDataSource(string connectionString) { if (!DataSources.TryGetValue(connectionString, out var dataSource)) diff --git a/test/Npgsql.Tests/TypeMapperTests.cs b/test/Npgsql.Tests/TypeMapperTests.cs index 34f8f598e1..0237bd617a 100644 --- a/test/Npgsql.Tests/TypeMapperTests.cs +++ b/test/Npgsql.Tests/TypeMapperTests.cs @@ -2,9 +2,11 @@ using Npgsql.Internal.TypeHandlers; using Npgsql.Internal.TypeHandling; using Npgsql.PostgresTypes; +using Npgsql.TypeMapping; using NUnit.Framework; using System; using System.Threading.Tasks; +using Npgsql.Internal.TypeMapping; using static Npgsql.Tests.TestUtil; namespace Npgsql.Tests; @@ -162,7 +164,7 @@ await conn.ExecuteNonQueryAsync(@$" class CitextToStringTypeHandlerResolverFactory : TypeHandlerResolverFactory { - public override TypeHandlerResolver Create(NpgsqlConnector connector) + public override TypeHandlerResolver Create(TypeMapper typeMapper, NpgsqlConnector connector) => new CitextToStringTypeHandlerResolver(connector); public override TypeMappingInfo GetMappingByDataTypeName(string dataTypeName) => throw new NotSupportedException(); diff --git a/test/Npgsql.Tests/Types/DateTimeTests.cs b/test/Npgsql.Tests/Types/DateTimeTests.cs index f387387dcc..d121674f94 100644 --- a/test/Npgsql.Tests/Types/DateTimeTests.cs +++ b/test/Npgsql.Tests/Types/DateTimeTests.cs @@ -419,9 +419,24 @@ public Task Interval_with_months_cannot_read_as_TimeSpan() #endregion + NpgsqlDataSource DataSourceWithRanges { get; set; } = default!; + + [OneTimeSetUp] + public void OneTimeSetUp() => DataSourceWithRanges = CreateDataSourceWithRanges(); + + [OneTimeTearDown] + public async Task TearDown() + { + if (DataSourceWithRanges is not null) + { + await DataSourceWithRanges.DisposeAsync(); + DataSourceWithRanges = null!; + } + } + protected override async ValueTask OpenConnectionAsync() { - var conn = await base.OpenConnectionAsync(); + var conn = await DataSourceWithRanges.OpenConnectionAsync(); await conn.ExecuteNonQueryAsync("SET TimeZone='Europe/Berlin'"); return conn; } diff --git a/test/Npgsql.Tests/Types/MultirangeTests.cs b/test/Npgsql.Tests/Types/MultirangeTests.cs index 9479e72eff..dbf87dace6 100644 --- a/test/Npgsql.Tests/Types/MultirangeTests.cs +++ b/test/Npgsql.Tests/Types/MultirangeTests.cs @@ -193,10 +193,29 @@ async Task WriteInternal(IList> multirange) } } + NpgsqlDataSource DataSourceWithRanges { get; set; } = default!; + [OneTimeSetUp] public async Task Setup() { + DataSourceWithRanges = CreateDataSourceWithRanges(); await using var conn = await OpenConnectionAsync(); MinimumPgVersion(conn, "14.0", "Multirange types were introduced in PostgreSQL 14"); } + + [OneTimeTearDown] + public async Task TearDown() + { + if (DataSourceWithRanges is not null) + { + await DataSourceWithRanges.DisposeAsync(); + DataSourceWithRanges = null!; + } + } + + protected override ValueTask OpenConnectionAsync() + => DataSourceWithRanges.OpenConnectionAsync(); + + protected override NpgsqlConnection OpenConnection() + => throw new NotSupportedException(); } diff --git a/test/Npgsql.Tests/Types/RangeTests.cs b/test/Npgsql.Tests/Types/RangeTests.cs index 8764c304c5..c02781f04a 100644 --- a/test/Npgsql.Tests/Types/RangeTests.cs +++ b/test/Npgsql.Tests/Types/RangeTests.cs @@ -22,7 +22,7 @@ public async Task Range_resolution() if (IsMultiplexing) Assert.Ignore("Multiplexing, ReloadTypes"); - await using var dataSource = CreateDataSource(csb => csb.Pooling = false); + await using var dataSource = CreateDataSourceWithRanges(csb => csb.Pooling = false); await using var conn = await dataSource.OpenConnectionAsync(); // Resolve type by NpgsqlDbType @@ -98,7 +98,7 @@ public async Task Range() [NonParallelizable] public async Task Range_with_long_subtype() { - await using var dataSource = CreateDataSource(csb => csb.MaxPoolSize = 1); + await using var dataSource = CreateDataSourceWithRanges(csb => csb.MaxPoolSize = 1); await using var conn = await dataSource.OpenConnectionAsync(); var typeName = await GetTempTypeName(conn); @@ -247,13 +247,29 @@ await AssertType( isNpgsqlDbTypeInferredFromClrType: false); } + NpgsqlDataSource DataSourceWithRanges { get; set; } = default!; + [OneTimeSetUp] public async Task OneTimeSetUp() { + DataSourceWithRanges = CreateDataSourceWithRanges(); using var conn = await OpenConnectionAsync(); MinimumPgVersion(conn, "9.2.0"); } + [OneTimeTearDown] + public async Task TearDown() + { + await DataSourceWithRanges.DisposeAsync(); + DataSourceWithRanges = null!; + } + + protected override ValueTask OpenConnectionAsync() + => DataSourceWithRanges.OpenConnectionAsync(); + + protected override NpgsqlConnection OpenConnection() + => throw new NotSupportedException(); + #region ParseTests [Theory] From 91ab104286ff5c6ac307ad17dab96386ee307e97 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Mon, 27 Feb 2023 15:44:16 +0300 Subject: [PATCH 075/761] Reduce AOT size of ArrayHandler (#4926) Contributes to #4799 --- .../Internal/TypeHandlers/ArrayHandler.cs | 179 +++++++++--------- .../Internal/TypeHandlers/BitStringHandler.cs | 24 --- test/Npgsql.Tests/Types/NetworkTypeTests.cs | 11 +- 3 files changed, 95 insertions(+), 119 deletions(-) diff --git a/src/Npgsql/Internal/TypeHandlers/ArrayHandler.cs b/src/Npgsql/Internal/TypeHandlers/ArrayHandler.cs index 742f503726..cfc44aba67 100644 --- a/src/Npgsql/Internal/TypeHandlers/ArrayHandler.cs +++ b/src/Npgsql/Internal/TypeHandlers/ArrayHandler.cs @@ -200,6 +200,89 @@ protected async ValueTask> ReadList(N #endregion Read + #region Write + + // Take care of multi-dimensional arrays and non-generic IList, we have no choice but to box/unbox + protected int ValidateAndGetLengthNonGeneric(ICollection value, ref NpgsqlLengthCache lengthCache) + { + var asMultidimensional = value as Array; + var dimensions = asMultidimensional?.Rank ?? 1; + + // Leave empty slot for the entire array length, and go ahead an populate the element slots + var pos = lengthCache.Position; + var len = + 4 + // dimensions + 4 + // has_nulls (unused) + 4 + // type OID + dimensions * 8 + // number of dimensions * (length + lower bound) + 4 * value.Count; // sum of element lengths + + lengthCache.Set(0); + NpgsqlLengthCache? elemLengthCache = lengthCache; + + foreach (var element in value) + { + if (element is null) + continue; + + try + { + len += ElementHandler.ValidateObjectAndGetLength(element, ref elemLengthCache, null); + } + catch (Exception e) + { + throw MixedTypesOrJaggedArrayException(e); + } + } + + lengthCache.Lengths[pos] = len; + return len; + } + + protected async Task WriteNonGeneric(ICollection value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, bool async, CancellationToken cancellationToken = default) + { + var asArray = value as Array; + var dimensions = asArray?.Rank ?? 1; + + var len = + 4 + // ndim + 4 + // has_nulls + 4 + // element_oid + dimensions * 8; // dim (4) + lBound (4) + + if (buf.WriteSpaceLeft < len) + { + await buf.Flush(async, cancellationToken); + Debug.Assert(buf.WriteSpaceLeft >= len, "Buffer too small for header"); + } + + buf.WriteInt32(dimensions); + buf.WriteInt32(1); // HasNulls=1. Not actually used by the backend. + buf.WriteUInt32(ElementHandler.PostgresType.OID); + if (asArray != null) + { + for (var i = 0; i < dimensions; i++) + { + buf.WriteInt32(asArray.GetLength(i)); + buf.WriteInt32(LowerBound); // We don't map .NET lower bounds to PG + } + } + else + { + buf.WriteInt32(value.Count); + buf.WriteInt32(LowerBound); // We don't map .NET lower bounds to PG + } + + foreach (var element in value) + await ElementHandler.WriteObjectWithLength(element, buf, lengthCache, null, async, cancellationToken); + } + + protected static Exception MixedTypesOrJaggedArrayException(Exception innerException) + => new("While trying to write an array, one of its elements failed validation. " + + "You may be trying to mix types in a non-generic IList, or to write a jagged array.", innerException); + + #endregion Write + #region Static generic caching helpers internal static class ElementTypeInfo @@ -297,12 +380,8 @@ public override async ValueTask ReadAsObject(NpgsqlReadBuffer buf, int l #region Write - static Exception MixedTypesOrJaggedArrayException(Exception innerException) - => new("While trying to write an array, one of its elements failed validation. " + - "You may be trying to mix types in a non-generic IList, or to write a jagged array.", innerException); - - static Exception CantWriteTypeException(Type type) - => new InvalidCastException($"Can't write type {type} as an array of {typeof(TElement)}"); + static InvalidCastException CantWriteTypeException(Type type) + => new($"Can't write type '{type}' as an array of {typeof(TElement)}"); // Since TAny isn't constrained to class? or struct (C# doesn't have a non-nullable constraint that doesn't limit us to either struct or class), // we must use the bang operator here to tell the compiler that a null value will never be returned. @@ -363,43 +442,6 @@ int ValidateAndGetLengthGeneric(ICollection value, ref NpgsqlLengthCac return len; } - // Take care of multi-dimensional arrays and non-generic IList, we have no choice but to box/unbox - int ValidateAndGetLengthNonGeneric(ICollection value, ref NpgsqlLengthCache lengthCache) - { - var asMultidimensional = value as Array; - var dimensions = asMultidimensional?.Rank ?? 1; - - // Leave empty slot for the entire array length, and go ahead an populate the element slots - var pos = lengthCache.Position; - var len = - 4 + // dimensions - 4 + // has_nulls (unused) - 4 + // type OID - dimensions * 8 + // number of dimensions * (length + lower bound) - 4 * value.Count; // sum of element lengths - - lengthCache.Set(0); - NpgsqlLengthCache? elemLengthCache = lengthCache; - - foreach (var element in value) - { - if (element is null) - continue; - - try - { - len += ElementHandler.ValidateObjectAndGetLength(element, ref elemLengthCache, null); - } - catch (Exception e) - { - throw MixedTypesOrJaggedArrayException(e); - } - } - - lengthCache.Lengths[pos] = len; - return len; - } - protected override Task WriteWithLengthCustom([DisallowNull] TAny value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken) { buf.WriteInt32(ValidateAndGetLength(value, ref lengthCache, parameter)); @@ -444,44 +486,6 @@ async Task WriteGeneric(ICollection value, NpgsqlWriteBuffer buf, Npgs await ElementHandler.WriteWithLength(element, buf, lengthCache, null, async, cancellationToken); } - async Task WriteNonGeneric(ICollection value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, bool async, CancellationToken cancellationToken = default) - { - var asArray = value as Array; - var dimensions = asArray?.Rank ?? 1; - - var len = - 4 + // ndim - 4 + // has_nulls - 4 + // element_oid - dimensions * 8; // dim (4) + lBound (4) - - if (buf.WriteSpaceLeft < len) - { - await buf.Flush(async, cancellationToken); - Debug.Assert(buf.WriteSpaceLeft >= len, "Buffer too small for header"); - } - - buf.WriteInt32(dimensions); - buf.WriteInt32(1); // HasNulls=1. Not actually used by the backend. - buf.WriteUInt32(ElementHandler.PostgresType.OID); - if (asArray != null) - { - for (var i = 0; i < dimensions; i++) - { - buf.WriteInt32(asArray.GetLength(i)); - buf.WriteInt32(LowerBound); // We don't map .NET lower bounds to PG - } - } - else - { - buf.WriteInt32(value.Count); - buf.WriteInt32(LowerBound); // We don't map .NET lower bounds to PG - } - - foreach (var element in value) - await ElementHandler.WriteObjectWithLength(element, buf, lengthCache, null, async, cancellationToken); - } - #endregion } @@ -495,19 +499,6 @@ sealed class ArrayHandlerWithPsv : ArrayHandler public ArrayHandlerWithPsv(PostgresType arrayPostgresType, NpgsqlTypeHandler elementHandler, ArrayNullabilityMode arrayNullabilityMode) : base(arrayPostgresType, elementHandler, arrayNullabilityMode) { } - protected internal override async ValueTask ReadCustom(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - { - if (ArrayTypeInfo.ElementType == typeof(TElementPsv)) - { - if (ArrayTypeInfo.IsArray) - return (TRequestedArray)(object)await ReadArray(buf, async, typeof(TRequestedArray).GetArrayRank()); - - if (ArrayTypeInfo.IsList) - return (TRequestedArray)(object)await ReadList(buf, async); - } - return await base.ReadCustom(buf, len, async, fieldDescription); - } - internal override object ReadPsvAsObject(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) => ReadPsvAsObject(buf, len, false, fieldDescription).GetAwaiter().GetResult(); diff --git a/src/Npgsql/Internal/TypeHandlers/BitStringHandler.cs b/src/Npgsql/Internal/TypeHandlers/BitStringHandler.cs index 7ffa97b0c1..27c3eece3c 100644 --- a/src/Npgsql/Internal/TypeHandlers/BitStringHandler.cs +++ b/src/Npgsql/Internal/TypeHandlers/BitStringHandler.cs @@ -266,30 +266,6 @@ public class BitStringArrayHandler : ArrayHandler public BitStringArrayHandler(PostgresType postgresType, BitStringHandler elementHandler, ArrayNullabilityMode arrayNullabilityMode) : base(postgresType, elementHandler, arrayNullabilityMode) {} - /// - protected internal override async ValueTask ReadCustom(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - { - if (ArrayTypeInfo.ElementType == typeof(BitArray)) - { - if (ArrayTypeInfo.IsArray) - return (TRequestedArray)(object)await ReadArray(buf, async); - - if (ArrayTypeInfo.IsList) - return (TRequestedArray)(object)await ReadList(buf, async); - } - - if (ArrayTypeInfo.ElementType == typeof(bool)) - { - if (ArrayTypeInfo.IsArray) - return (TRequestedArray)(object)await ReadArray(buf, async); - - if (ArrayTypeInfo.IsList) - return (TRequestedArray)(object)await ReadList(buf, async); - } - - return await base.ReadCustom(buf, len, async, fieldDescription); - } - public override async ValueTask ReadAsObject(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) => fieldDescription?.TypeModifier == 1 ? await ReadArray(buf, async) diff --git a/test/Npgsql.Tests/Types/NetworkTypeTests.cs b/test/Npgsql.Tests/Types/NetworkTypeTests.cs index 5e7de43989..63d456b9cd 100644 --- a/test/Npgsql.Tests/Types/NetworkTypeTests.cs +++ b/test/Npgsql.Tests/Types/NetworkTypeTests.cs @@ -40,6 +40,15 @@ public Task Inet_v6_as_tuple() NpgsqlDbType.Inet, isDefaultForReading: false); + [Test] + public Task Inet_v6_array_as_tuple() + => AssertType( + new[] { (IPAddress.Parse("2001:1db8:85a3:1142:1000:8a2e:1370:7334"), 24) }, + "{2001:1db8:85a3:1142:1000:8a2e:1370:7334/24}", + "inet[]", + NpgsqlDbType.Inet | NpgsqlDbType.Array, + isDefaultForReading: false); + [Test, IssueLink("https://github.com/dotnet/corefx/issues/33373")] public Task IPAddress_Any() => AssertTypeWrite(IPAddress.Any, "0.0.0.0/32", "inet", NpgsqlDbType.Inet); @@ -127,4 +136,4 @@ public async Task Macaddr_write_validation() } public NetworkTypeTests(MultiplexingMode multiplexingMode) : base(multiplexingMode) {} -} \ No newline at end of file +} From acd1d32ccb1e00a240d4390000395650ceaa7eb3 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Mon, 27 Feb 2023 15:53:15 +0300 Subject: [PATCH 076/761] Add recycling for NpgsqlBatch (#4847) Closes #4224 --- src/Npgsql/NpgsqlBatch.cs | 26 +++++++++++++++++++++ src/Npgsql/NpgsqlCommand.cs | 35 ++++++++++++++++++----------- src/Npgsql/NpgsqlConnection.cs | 29 +++++++++++++++++++----- src/Npgsql/NpgsqlDataReader.cs | 12 +++------- src/Npgsql/NpgsqlDataSourceBatch.cs | 1 - src/Npgsql/PublicAPI.Unshipped.txt | 3 ++- 6 files changed, 76 insertions(+), 30 deletions(-) diff --git a/src/Npgsql/NpgsqlBatch.cs b/src/Npgsql/NpgsqlBatch.cs index 0b86bb3164..06e15b4988 100644 --- a/src/Npgsql/NpgsqlBatch.cs +++ b/src/Npgsql/NpgsqlBatch.cs @@ -1,3 +1,4 @@ +using System; using System.Data; using System.Data.Common; using System.Threading; @@ -98,6 +99,7 @@ internal bool AllResultTypesAreUnknown /// The in which the executes. public NpgsqlBatch(NpgsqlConnection? connection = null, NpgsqlTransaction? transaction = null) { + GC.SuppressFinalize(this); Command = new(DefaultBatchCommandsSize); BatchCommands = new NpgsqlBatchCommandCollection(Command.InternalBatchCommands); @@ -107,12 +109,14 @@ public NpgsqlBatch(NpgsqlConnection? connection = null, NpgsqlTransaction? trans internal NpgsqlBatch(NpgsqlConnector connector) { + GC.SuppressFinalize(this); Command = new(connector, DefaultBatchCommandsSize); BatchCommands = new NpgsqlBatchCommandCollection(Command.InternalBatchCommands); } private protected NpgsqlBatch(NpgsqlDataSourceCommand command) { + GC.SuppressFinalize(this); Command = command; BatchCommands = new NpgsqlBatchCommandCollection(Command.InternalBatchCommands); } @@ -171,4 +175,26 @@ public override Task PrepareAsync(CancellationToken cancellationToken = default) /// public override void Cancel() => Command.Cancel(); + + /// + public override void Dispose() + { + Command.ResetTransaction(); + if (Command.IsCacheable && Connection is not null && Connection.CachedBatch is null) + { + BatchCommands.Clear(); + Command.Reset(); + Connection.CachedBatch = this; + return; + } + + Command.IsCacheable = false; + } + + internal static NpgsqlBatch CreateCachedBatch(NpgsqlConnection connection) + { + var batch = new NpgsqlBatch(connection); + batch.Command.IsCacheable = true; + return batch; + } } \ No newline at end of file diff --git a/src/Npgsql/NpgsqlCommand.cs b/src/Npgsql/NpgsqlCommand.cs index 264f1a721f..1a39bf291a 100644 --- a/src/Npgsql/NpgsqlCommand.cs +++ b/src/Npgsql/NpgsqlCommand.cs @@ -70,7 +70,7 @@ public class NpgsqlCommand : DbCommand, ICloneable, IComponent /// /// Whether this command is cached by and returned by . /// - internal bool IsCached { get; set; } + internal bool IsCacheable { get; set; } #if DEBUG internal static bool EnableSqlRewriting; @@ -169,7 +169,7 @@ internal NpgsqlCommand(NpgsqlConnector connector, int batchCommandCapacity) => _connector = connector; internal static NpgsqlCommand CreateCachedCommand(NpgsqlConnection connection) - => new(null, connection) { IsCached = true }; + => new(null, connection) { IsCacheable = true }; #endregion Constructors @@ -1604,31 +1604,38 @@ public override void Cancel() /// protected override void Dispose(bool disposing) { - _transaction = null; + ResetTransaction(); State = CommandState.Disposed; - if (IsCached && InternalConnection is not null && InternalConnection.CachedCommand is null) + if (IsCacheable && InternalConnection is not null && InternalConnection.CachedCommand is null) { - // TODO: Optimize NpgsqlParameterCollection to recycle NpgsqlParameter instances as well - // TODO: Statements isn't cleared/recycled, leaving this for now, since it'll be replaced by the new batching API - - _commandText = string.Empty; - CommandType = CommandType.Text; - _parameters.Clear(); + Reset(); InternalConnection.CachedCommand = this; return; } - IsCached = false; + IsCacheable = false; + } + + internal void Reset() + { + // TODO: Optimize NpgsqlParameterCollection to recycle NpgsqlParameter instances as well + // TODO: Statements isn't cleared/recycled, leaving this for now, since it'll be replaced by the new batching API + _commandText = string.Empty; + CommandType = CommandType.Text; + _parameters.Clear(); + _timeout = null; + _allResultTypesAreUnknown = false; + EnableErrorBarriers = false; } + internal void ResetTransaction() => _transaction = null; + #endregion #region Tracing - #endregion Tracing - internal void TraceCommandStart(NpgsqlConnector connector) { Debug.Assert(CurrentActivity is null); @@ -1662,6 +1669,8 @@ internal void TraceSetException(Exception e) } } + #endregion Tracing + #region Misc NpgsqlBatchCommand TruncateStatementsToOne() diff --git a/src/Npgsql/NpgsqlConnection.cs b/src/Npgsql/NpgsqlConnection.cs index 5e009a79c5..d88af4411b 100644 --- a/src/Npgsql/NpgsqlConnection.cs +++ b/src/Npgsql/NpgsqlConnection.cs @@ -71,11 +71,6 @@ internal NpgsqlDataSource NpgsqlDataSource } } - /// - /// A cached command handed out by , which is returned when disposed. Useful for reducing allocations. - /// - internal NpgsqlCommand? CachedCommand { get; set; } - /// /// Flag used to make sure we never double-close a connection, returning it twice to the pool. /// @@ -556,6 +551,11 @@ public override ConnectionState State #region Command / Batch creation + /// + /// A cached command handed out by , which is returned when disposed. Useful for reducing allocations. + /// + internal NpgsqlCommand? CachedCommand { get; set; } + /// /// Creates and returns a /// object associated with the . @@ -582,6 +582,11 @@ public override ConnectionState State return NpgsqlCommand.CreateCachedCommand(this); } + /// + /// A cached batch handed out by , which is returned when disposed. Useful for reducing allocations. + /// + internal NpgsqlBatch? CachedBatch { get; set; } + #if NET6_0_OR_GREATER /// public override bool CanCreateBatch => true; @@ -590,7 +595,19 @@ public override ConnectionState State protected override DbBatch CreateDbBatch() => CreateBatch(); /// - public new NpgsqlBatch CreateBatch() => new(this); + public new NpgsqlBatch CreateBatch() + { + CheckDisposed(); + + var cachedBatch = CachedBatch; + if (cachedBatch is not null) + { + CachedBatch = null; + return cachedBatch; + } + + return NpgsqlBatch.CreateCachedBatch(this); + } #else /// /// Creates and returns a object associated with the . diff --git a/src/Npgsql/NpgsqlDataReader.cs b/src/Npgsql/NpgsqlDataReader.cs index 5a8040801a..908e03a022 100644 --- a/src/Npgsql/NpgsqlDataReader.cs +++ b/src/Npgsql/NpgsqlDataReader.cs @@ -553,10 +553,7 @@ async Task NextResult(bool async, bool isConsuming = false, CancellationTo // Prevent the command or batch from being recycled (by the connection) when it's disposed. This is important since // the exception is very likely to escape the using statement of the command, and by that time some other user may // already be using the recycled instance. - if (!Command.IsWrappedByBatch) - { - Command.IsCached = false; - } + Command.IsCacheable = false; // If the schema of a table changes after a statement is prepared on that table, PostgreSQL errors with // 0A000: cached plan must not change result type. 0A000 seems like a non-specific code, but it's very unlikely the @@ -758,13 +755,10 @@ async Task NextResultSchemaOnly(bool async, bool isConsuming = false, Canc { postgresException.BatchCommand = _statements[StatementIndex]; - // Prevent the command or batch from by recycled (by the connection) when it's disposed. This is important since + // Prevent the command or batch from being recycled (by the connection) when it's disposed. This is important since // the exception is very likely to escape the using statement of the command, and by that time some other user may // already be using the recycled instance. - if (!Command.IsWrappedByBatch) - { - Command.IsCached = false; - } + Command.IsCacheable = false; } // An error means all subsequent statements were skipped by PostgreSQL. diff --git a/src/Npgsql/NpgsqlDataSourceBatch.cs b/src/Npgsql/NpgsqlDataSourceBatch.cs index b3235c9d46..fa239ee8e6 100644 --- a/src/Npgsql/NpgsqlDataSourceBatch.cs +++ b/src/Npgsql/NpgsqlDataSourceBatch.cs @@ -1,5 +1,4 @@ using System; -using System.Data; using System.Data.Common; using System.Threading; using System.Threading.Tasks; diff --git a/src/Npgsql/PublicAPI.Unshipped.txt b/src/Npgsql/PublicAPI.Unshipped.txt index 40c160bf26..95df6e3864 100644 --- a/src/Npgsql/PublicAPI.Unshipped.txt +++ b/src/Npgsql/PublicAPI.Unshipped.txt @@ -1,4 +1,5 @@ #nullable enable +override Npgsql.NpgsqlBatch.Dispose() -> void Npgsql.NpgsqlBinaryImporter.WriteRow(params object?[]! values) -> void Npgsql.NpgsqlBinaryImporter.WriteRowAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken), params object?[]! values) -> System.Threading.Tasks.Task! Npgsql.NpgsqlJsonExtensions @@ -13,4 +14,4 @@ static Npgsql.NpgsqlRangeExtensions.UseRange(this Npgsql.TypeMapping.INpgsqlType *REMOVED*static NpgsqlTypes.NpgsqlPoint.Parse(string! s) -> NpgsqlTypes.NpgsqlPoint *REMOVED*static NpgsqlTypes.NpgsqlPolygon.Parse(string! s) -> NpgsqlTypes.NpgsqlPolygon *REMOVED*Npgsql.NpgsqlBinaryImporter.WriteRow(params object![]! values) -> void -*REMOVED*Npgsql.NpgsqlBinaryImporter.WriteRowAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken), params object![]! values) -> System.Threading.Tasks.Task! \ No newline at end of file +*REMOVED*Npgsql.NpgsqlBinaryImporter.WriteRowAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken), params object![]! values) -> System.Threading.Tasks.Task! From 333bfea35154bb9758f22eb8b7ad71dc993e12ce Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Mon, 27 Feb 2023 16:17:39 +0300 Subject: [PATCH 077/761] Allow providing CA certificate via a property (#4858) Closes #4267 --- src/Npgsql/Internal/NpgsqlConnector.cs | 33 ++-- src/Npgsql/NpgsqlDataSource.cs | 7 +- src/Npgsql/NpgsqlDataSourceBuilder.cs | 32 +++- src/Npgsql/NpgsqlDataSourceConfiguration.cs | 3 +- .../Properties/NpgsqlStrings.Designer.cs | 148 +++++++++++++----- src/Npgsql/Properties/NpgsqlStrings.resx | 3 + src/Npgsql/PublicAPI.Unshipped.txt | 2 + 7 files changed, 175 insertions(+), 53 deletions(-) diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index 0b7e994482..16672fbe87 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -842,6 +842,8 @@ async Task RawOpen(SslMode sslMode, NpgsqlTimeout timeout, bool async, Cancellat var checkCertificateRevocation = Settings.CheckCertificateRevocation; RemoteCertificateValidationCallback? certificateValidationCallback; + X509Certificate2? caCert; + string? certRootPath = null; if (UserCertificateValidationCallback is not null) { @@ -851,6 +853,9 @@ async Task RawOpen(SslMode sslMode, NpgsqlTimeout timeout, bool async, Cancellat if (Settings.RootCertificate is not null) throw new ArgumentException(string.Format(NpgsqlStrings.CannotUseSslRootCertificateWithUserCallback)); + if (DataSource.RootCertificateCallback is not null) + throw new ArgumentException(string.Format(NpgsqlStrings.CannotUseValidationRootCertificateCallbackWithUserCallback)); + certificateValidationCallback = UserCertificateValidationCallback; } else if (sslMode is SslMode.Prefer or SslMode.Require) @@ -861,10 +866,11 @@ async Task RawOpen(SslMode sslMode, NpgsqlTimeout timeout, bool async, Cancellat certificateValidationCallback = SslTrustServerValidation; checkCertificateRevocation = false; } - else if ((Settings.RootCertificate ?? PostgresEnvironment.SslCertRoot ?? PostgresEnvironment.SslCertRootDefault) is - { } certRootPath) + else if ((caCert = DataSource.RootCertificateCallback?.Invoke()) is not null || + (certRootPath = Settings.RootCertificate ?? + PostgresEnvironment.SslCertRoot ?? PostgresEnvironment.SslCertRootDefault) is not null) { - certificateValidationCallback = SslRootValidation(certRootPath, sslMode == SslMode.VerifyFull); + certificateValidationCallback = SslRootValidation(sslMode == SslMode.VerifyFull, certRootPath, caCert); } else if (sslMode == SslMode.VerifyCA) { @@ -1650,8 +1656,8 @@ internal void ClearTransaction(Exception? disposeReason = null) (sender, certificate, chain, sslPolicyErrors) => true; - static RemoteCertificateValidationCallback SslRootValidation(string certRootPath, bool verifyFull) => - (sender, certificate, chain, sslPolicyErrors) => + static RemoteCertificateValidationCallback SslRootValidation(bool verifyFull, string? certRootPath, X509Certificate2? caCertificate) + => (_, certificate, chain, sslPolicyErrors) => { if (certificate is null || chain is null) return false; @@ -1670,13 +1676,22 @@ static RemoteCertificateValidationCallback SslRootValidation(string certRootPath var certs = new X509Certificate2Collection(); + if (certRootPath is null) + { + Debug.Assert(caCertificate is not null); + certs.Add(caCertificate); + } + else + { + Debug.Assert(caCertificate is null); #if NET5_0_OR_GREATER - if (Path.GetExtension(certRootPath).ToUpperInvariant() != ".PFX") - certs.ImportFromPemFile(certRootPath); + if (Path.GetExtension(certRootPath).ToUpperInvariant() != ".PFX") + certs.ImportFromPemFile(certRootPath); #endif - if (certs.Count == 0) - certs.Add(new X509Certificate2(certRootPath)); + if (certs.Count == 0) + certs.Add(new X509Certificate2(certRootPath)); + } #if NET5_0_OR_GREATER chain.ChainPolicy.CustomTrustStore.AddRange(certs); diff --git a/src/Npgsql/NpgsqlDataSource.cs b/src/Npgsql/NpgsqlDataSource.cs index 82cab03c01..ebc9aa8fe5 100644 --- a/src/Npgsql/NpgsqlDataSource.cs +++ b/src/Npgsql/NpgsqlDataSource.cs @@ -100,7 +100,8 @@ internal NpgsqlDataSource( _userTypeMappings, _defaultNameTranslator, ConnectionInitializer, - ConnectionInitializerAsync) + ConnectionInitializerAsync, + RootCertificateCallback) = dataSourceConfig; _connectionLogger = LoggingConfiguration.ConnectionLogger; @@ -301,6 +302,8 @@ async Task RefreshPassword() } #endregion Password management + + internal Func? RootCertificateCallback { get; } internal abstract ValueTask Get( NpgsqlConnection conn, NpgsqlTimeout timeout, bool async, CancellationToken cancellationToken); @@ -464,7 +467,7 @@ private protected void CheckDisposed() #endregion - class DatabaseStateInfo + sealed class DatabaseStateInfo { internal readonly DatabaseState State; internal readonly NpgsqlTimeout Timeout; diff --git a/src/Npgsql/NpgsqlDataSourceBuilder.cs b/src/Npgsql/NpgsqlDataSourceBuilder.cs index 9cc6eebf8a..b9a77f67e6 100644 --- a/src/Npgsql/NpgsqlDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlDataSourceBuilder.cs @@ -26,6 +26,8 @@ public class NpgsqlDataSourceBuilder : INpgsqlTypeMapper RemoteCertificateValidationCallback? _userCertificateValidationCallback; Action? _clientCertificatesCallback; + Func? _rootCertificateCallback; + Func>? _periodicPasswordProvider; TimeSpan _periodicPasswordSuccessRefreshInterval, _periodicPasswordFailureRefreshInterval; @@ -153,6 +155,33 @@ public NpgsqlDataSourceBuilder UseClientCertificatesCallback(Action + /// Sets the that will be used validate SSL certificate, received from the server. + /// + /// The CA certificate. + /// The same builder instance so that multiple calls can be chained. + public NpgsqlDataSourceBuilder UseRootCertificate(X509Certificate2? rootCertificate) + => rootCertificate is null + ? UseRootCertificateCallback(null) + : UseRootCertificateCallback(() => rootCertificate); + + /// + /// Specifies a callback that will be used to validate SSL certificate, received from the server. + /// + /// The callback to get CA certificate. + /// The same builder instance so that multiple calls can be chained. + /// + /// This overload, which accepts a callback, is suitable for scenarios where the certificate rotates + /// and might change during the lifetime of the application. + /// When that's not the case, use the overload which directly accepts the certificate. + /// + public NpgsqlDataSourceBuilder UseRootCertificateCallback(Func? rootCertificateCallback) + { + _rootCertificateCallback = rootCertificateCallback; + + return this; + } + /// /// Configures a periodic password provider, which is automatically called by the data source at some regular interval. This is the /// recommended way to fetch a rotating access token. @@ -389,7 +418,8 @@ _loggerFactory is null _userTypeMappings, DefaultNameTranslator, _syncConnectionInitializer, - _asyncConnectionInitializer); + _asyncConnectionInitializer, + _rootCertificateCallback); } void ValidateMultiHost() diff --git a/src/Npgsql/NpgsqlDataSourceConfiguration.cs b/src/Npgsql/NpgsqlDataSourceConfiguration.cs index fb48053500..33dd71617f 100644 --- a/src/Npgsql/NpgsqlDataSourceConfiguration.cs +++ b/src/Npgsql/NpgsqlDataSourceConfiguration.cs @@ -20,4 +20,5 @@ sealed record NpgsqlDataSourceConfiguration( Dictionary UserTypeMappings, INpgsqlNameTranslator DefaultNameTranslator, Action? ConnectionInitializer, - Func? ConnectionInitializerAsync); + Func? ConnectionInitializerAsync, + Func? RootCertificateCallback); diff --git a/src/Npgsql/Properties/NpgsqlStrings.Designer.cs b/src/Npgsql/Properties/NpgsqlStrings.Designer.cs index ab4bb536bf..99cf71ecfc 100644 --- a/src/Npgsql/Properties/NpgsqlStrings.Designer.cs +++ b/src/Npgsql/Properties/NpgsqlStrings.Designer.cs @@ -11,32 +11,46 @@ namespace Npgsql.Properties { using System; - [System.CodeDom.Compiler.GeneratedCodeAttribute("System.Resources.Tools.StronglyTypedResourceBuilder", "4.0.0.0")] - [System.Diagnostics.DebuggerNonUserCodeAttribute()] - [System.Runtime.CompilerServices.CompilerGeneratedAttribute()] + /// + /// A strongly-typed resource class, for looking up localized strings, etc. + /// + // This class was auto-generated by the StronglyTypedResourceBuilder + // class via a tool like ResGen or Visual Studio. + // To add or remove a member, edit your .ResX file then rerun ResGen + // with the /str option, or rebuild your VS project. + [global::System.CodeDom.Compiler.GeneratedCodeAttribute("System.Resources.Tools.StronglyTypedResourceBuilder", "4.0.0.0")] + [global::System.Diagnostics.DebuggerNonUserCodeAttribute()] + [global::System.Runtime.CompilerServices.CompilerGeneratedAttribute()] internal class NpgsqlStrings { - private static System.Resources.ResourceManager resourceMan; + private static global::System.Resources.ResourceManager resourceMan; - private static System.Globalization.CultureInfo resourceCulture; + private static global::System.Globalization.CultureInfo resourceCulture; - [System.Diagnostics.CodeAnalysis.SuppressMessageAttribute("Microsoft.Performance", "CA1811:AvoidUncalledPrivateCode")] + [global::System.Diagnostics.CodeAnalysis.SuppressMessageAttribute("Microsoft.Performance", "CA1811:AvoidUncalledPrivateCode")] internal NpgsqlStrings() { } - [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Advanced)] - internal static System.Resources.ResourceManager ResourceManager { + /// + /// Returns the cached ResourceManager instance used by this class. + /// + [global::System.ComponentModel.EditorBrowsableAttribute(global::System.ComponentModel.EditorBrowsableState.Advanced)] + internal static global::System.Resources.ResourceManager ResourceManager { get { - if (object.Equals(null, resourceMan)) { - System.Resources.ResourceManager temp = new System.Resources.ResourceManager("Npgsql.Properties.NpgsqlStrings", typeof(NpgsqlStrings).Assembly); + if (object.ReferenceEquals(resourceMan, null)) { + global::System.Resources.ResourceManager temp = new global::System.Resources.ResourceManager("Npgsql.Properties.NpgsqlStrings", typeof(NpgsqlStrings).Assembly); resourceMan = temp; } return resourceMan; } } - [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Advanced)] - internal static System.Globalization.CultureInfo Culture { + /// + /// Overrides the current thread's CurrentUICulture property for all + /// resource lookups using this strongly typed resource class. + /// + [global::System.ComponentModel.EditorBrowsableAttribute(global::System.ComponentModel.EditorBrowsableState.Advanced)] + internal static global::System.Globalization.CultureInfo Culture { get { return resourceCulture; } @@ -45,90 +59,144 @@ internal static System.Globalization.CultureInfo Culture { } } - internal static string CannotUseSslVerifyWithUserCallback { + /// + /// Looks up a localized string similar to '{0}' must be positive.. + /// + internal static string ArgumentMustBePositive { get { - return ResourceManager.GetString("CannotUseSslVerifyWithUserCallback", resourceCulture); + return ResourceManager.GetString("ArgumentMustBePositive", resourceCulture); } } - internal static string CannotUseSslRootCertificateWithUserCallback { + /// + /// Looks up a localized string similar to Cannot read infinity value since Npgsql.DisableDateTimeInfinityConversions is enabled.. + /// + internal static string CannotReadInfinityValue { get { - return ResourceManager.GetString("CannotUseSslRootCertificateWithUserCallback", resourceCulture); + return ResourceManager.GetString("CannotReadInfinityValue", resourceCulture); } } - internal static string CannotUseSslModeRequireWithoutTrustServerCertificate { + /// + /// Looks up a localized string similar to Cannot read interval values with non-zero months as TimeSpan, since that type doesn't support months. Consider using NodaTime Period which better corresponds to PostgreSQL interval, or read the value as NpgsqlInterval, or transform the interval to not contain months or years in PostgreSQL before reading it.. + /// + internal static string CannotReadIntervalWithMonthsAsTimeSpan { get { - return ResourceManager.GetString("CannotUseSslModeRequireWithoutTrustServerCertificate", resourceCulture); + return ResourceManager.GetString("CannotReadIntervalWithMonthsAsTimeSpan", resourceCulture); } } - internal static string CannotUseTrustServerCertificate { + /// + /// Looks up a localized string similar to When registering a password provider, a password or password file may not be set.. + /// + internal static string CannotSetBothPasswordProviderAndPassword { get { - return ResourceManager.GetString("CannotUseTrustServerCertificate", resourceCulture); + return ResourceManager.GetString("CannotSetBothPasswordProviderAndPassword", resourceCulture); } } - internal static string NoMultirangeTypeFound { + /// + /// Looks up a localized string similar to When creating a multi-host data source, TargetSessionAttributes cannot be specified. Create without TargetSessionAttributes, and then obtain DataSource wrappers from it. Consult the docs for more information.. + /// + internal static string CannotSpecifyTargetSessionAttributes { get { - return ResourceManager.GetString("NoMultirangeTypeFound", resourceCulture); + return ResourceManager.GetString("CannotSpecifyTargetSessionAttributes", resourceCulture); } } - internal static string NotSupportedOnDataSourceCommand { + /// + /// Looks up a localized string similar to To validate server certificates, please use VerifyFull or VerifyCA instead of Require. To disable validation, explicitly set 'Trust Server Certificate' to true. See https://www.npgsql.org/doc/release-notes/6.0.html for more details.. + /// + internal static string CannotUseSslModeRequireWithoutTrustServerCertificate { get { - return ResourceManager.GetString("NotSupportedOnDataSourceCommand", resourceCulture); + return ResourceManager.GetString("CannotUseSslModeRequireWithoutTrustServerCertificate", resourceCulture); } } - internal static string NotSupportedOnDataSourceBatch { + /// + /// Looks up a localized string similar to RootCertificate cannot be used in conjunction with UserCertificateValidationCallback; when registering a validation callback, perform whatever validation you require in that callback.. + /// + internal static string CannotUseSslRootCertificateWithUserCallback { get { - return ResourceManager.GetString("NotSupportedOnDataSourceBatch", resourceCulture); + return ResourceManager.GetString("CannotUseSslRootCertificateWithUserCallback", resourceCulture); } } - internal static string CannotSetBothPasswordProviderAndPassword { + /// + /// Looks up a localized string similar to SslMode.{0} cannot be used in conjunction with UserCertificateValidationCallback; when registering a validation callback, perform whatever validation you require in that callback.. + /// + internal static string CannotUseSslVerifyWithUserCallback { get { - return ResourceManager.GetString("CannotSetBothPasswordProviderAndPassword", resourceCulture); + return ResourceManager.GetString("CannotUseSslVerifyWithUserCallback", resourceCulture); } } - internal static string PasswordProviderMissing { + /// + /// Looks up a localized string similar to TrustServerCertificate=true is not supported with SslMode={0}. + /// + internal static string CannotUseTrustServerCertificate { get { - return ResourceManager.GetString("PasswordProviderMissing", resourceCulture); + return ResourceManager.GetString("CannotUseTrustServerCertificate", resourceCulture); } } - internal static string ArgumentMustBePositive { + /// + /// Looks up a localized string similar to ValidationRootCertificateCallback cannot be used in conjunction with UserCertificateValidationCallback; when registering a validation callback, perform whatever validation you require in that callback.. + /// + internal static string CannotUseValidationRootCertificateCallbackWithUserCallback { get { - return ResourceManager.GetString("ArgumentMustBePositive", resourceCulture); + return ResourceManager.GetString("CannotUseValidationRootCertificateCallbackWithUserCallback", resourceCulture); } } - internal static string CannotSpecifyTargetSessionAttributes { + /// + /// Looks up a localized string similar to No multirange type could be found in the database for subtype {0}.. + /// + internal static string NoMultirangeTypeFound { get { - return ResourceManager.GetString("CannotSpecifyTargetSessionAttributes", resourceCulture); + return ResourceManager.GetString("NoMultirangeTypeFound", resourceCulture); } } - internal static string CannotReadIntervalWithMonthsAsTimeSpan { + /// + /// Looks up a localized string similar to Connection and transaction access is not supported on batches created from DbDataSource.. + /// + internal static string NotSupportedOnDataSourceBatch { get { - return ResourceManager.GetString("CannotReadIntervalWithMonthsAsTimeSpan", resourceCulture); + return ResourceManager.GetString("NotSupportedOnDataSourceBatch", resourceCulture); } } - internal static string PositionalParameterAfterNamed { + /// + /// Looks up a localized string similar to Connection and transaction access is not supported on commands created from DbDataSource.. + /// + internal static string NotSupportedOnDataSourceCommand { get { - return ResourceManager.GetString("PositionalParameterAfterNamed", resourceCulture); + return ResourceManager.GetString("NotSupportedOnDataSourceCommand", resourceCulture); } } - internal static string CannotReadInfinityValue { + /// + /// Looks up a localized string similar to The right type of password provider (sync or async) was not found.. + /// + internal static string PasswordProviderMissing { get { - return ResourceManager.GetString("CannotReadInfinityValue", resourceCulture); + return ResourceManager.GetString("PasswordProviderMissing", resourceCulture); + } + } + + /// + /// Looks up a localized string similar to When using CommandType.StoredProcedure, all positional parameters must come before named parameters.. + /// + internal static string PositionalParameterAfterNamed { + get { + return ResourceManager.GetString("PositionalParameterAfterNamed", resourceCulture); } } + /// + /// Looks up a localized string similar to Both sync and async connection initializers must be provided.. + /// internal static string SyncAndAsyncConnectionInitializersRequired { get { return ResourceManager.GetString("SyncAndAsyncConnectionInitializersRequired", resourceCulture); diff --git a/src/Npgsql/Properties/NpgsqlStrings.resx b/src/Npgsql/Properties/NpgsqlStrings.resx index 89f6d038d9..b52726e42d 100644 --- a/src/Npgsql/Properties/NpgsqlStrings.resx +++ b/src/Npgsql/Properties/NpgsqlStrings.resx @@ -63,4 +63,7 @@ Both sync and async connection initializers must be provided. + + ValidationRootCertificateCallback cannot be used in conjunction with UserCertificateValidationCallback; when registering a validation callback, perform whatever validation you require in that callback. + \ No newline at end of file diff --git a/src/Npgsql/PublicAPI.Unshipped.txt b/src/Npgsql/PublicAPI.Unshipped.txt index 95df6e3864..fa22d1fa41 100644 --- a/src/Npgsql/PublicAPI.Unshipped.txt +++ b/src/Npgsql/PublicAPI.Unshipped.txt @@ -1,4 +1,6 @@ #nullable enable +Npgsql.NpgsqlDataSourceBuilder.UseRootCertificate(System.Security.Cryptography.X509Certificates.X509Certificate2? rootCertificate) -> Npgsql.NpgsqlDataSourceBuilder! +Npgsql.NpgsqlDataSourceBuilder.UseRootCertificateCallback(System.Func? rootCertificateCallback) -> Npgsql.NpgsqlDataSourceBuilder! override Npgsql.NpgsqlBatch.Dispose() -> void Npgsql.NpgsqlBinaryImporter.WriteRow(params object?[]! values) -> void Npgsql.NpgsqlBinaryImporter.WriteRowAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken), params object?[]! values) -> System.Threading.Tasks.Task! From 83ab433cf2d80c930bac8a73cda6bbec586e8080 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Mon, 27 Feb 2023 17:57:41 +0100 Subject: [PATCH 078/761] Reduce nullable and array reflection and code generation (#3813) * Block scoped * Remove nullable and array reflection and code generation * Split array and list info to better support reflection free mode which only ships with array reflection * Fix feedback # Conflicts: # src/Npgsql/Internal/TypeHandlers/ArrayHandler.cs * Move types up * File scoped * Reduce boilerplate and address feedback --- .../Internal/TypeHandlers/ArrayHandler.cs | 139 +++++++++++------- .../Internal/TypeHandlers/BitStringHandler.cs | 35 ++++- .../Internal/TypeHandling/NullableHandler.cs | 87 +++++------ 3 files changed, 152 insertions(+), 109 deletions(-) diff --git a/src/Npgsql/Internal/TypeHandlers/ArrayHandler.cs b/src/Npgsql/Internal/TypeHandlers/ArrayHandler.cs index cfc44aba67..3929ead33e 100644 --- a/src/Npgsql/Internal/TypeHandlers/ArrayHandler.cs +++ b/src/Npgsql/Internal/TypeHandlers/ArrayHandler.cs @@ -3,8 +3,6 @@ using System.Collections.Generic; using System.Diagnostics; using System.Diagnostics.CodeAnalysis; -using System.Linq.Expressions; -using System.Reflection; using System.Threading; using System.Threading.Tasks; using Npgsql.BackendMessages; @@ -30,9 +28,6 @@ public abstract class ArrayHandler : NpgsqlTypeHandler private protected NpgsqlTypeHandler ElementHandler { get; } private protected ArrayNullabilityMode ArrayNullabilityMode { get; } - static readonly MethodInfo ReadArrayMethod = typeof(ArrayHandler).GetMethod(nameof(ReadArray), BindingFlags.NonPublic | BindingFlags.Instance)!; - static readonly MethodInfo ReadListMethod = typeof(ArrayHandler).GetMethod(nameof(ReadList), BindingFlags.NonPublic | BindingFlags.Instance)!; - /// protected ArrayHandler(PostgresType arrayPostgresType, NpgsqlTypeHandler elementHandler, ArrayNullabilityMode arrayNullabilityMode, int lowerBound = 1) : base(arrayPostgresType) @@ -63,10 +58,10 @@ public override NpgsqlTypeHandler CreateMultirangeHandler(PostgresMultirangeType protected internal override async ValueTask ReadCustom(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) { if (ArrayTypeInfo.IsArray) - return (TRequestedArray)(object)await ArrayTypeInfo.ReadArrayFunc(this, buf, async); + return (TRequestedArray)await ArrayTypeInfo.ReadArray(this, buf, async); - if (ArrayTypeInfo.IsList) - return await ArrayTypeInfo.ReadListFunc(this, buf, async); + if (ListTypeInfo.IsList) + return (TRequestedArray)await ListTypeInfo.ReadList(this, buf, async); throw new InvalidCastException(fieldDescription == null ? $"Can't cast database type to {typeof(TRequestedArray).Name}" @@ -77,7 +72,7 @@ protected internal override async ValueTask ReadCustom /// Reads an array of element type from the given buffer . /// - protected async ValueTask ReadArray(NpgsqlReadBuffer buf, bool async, int expectedDimensions = 0, bool readAsObject = false) + protected internal async ValueTask ReadArray(NpgsqlReadBuffer buf, bool async, int expectedDimensions = 0, bool readAsObject = false) { await buf.Ensure(12, async); var dimensions = buf.ReadInt32(); @@ -145,7 +140,7 @@ protected async ValueTask ReadArray(NpgsqlReadBuffer b var element = len == -1 ? (object?)null : NullableHandler.Exists - ? await NullableHandler.ReadAsync!(ElementHandler, buf, len, async) + ? await NullableHandler.ReadAsync(ElementHandler, buf, len, async) : await ElementHandler.Read(buf, len, async); result.SetValue(element, indices); @@ -170,7 +165,7 @@ protected async ValueTask ReadArray(NpgsqlReadBuffer b /// /// Reads a generic list containing elements of type from the given buffer . /// - protected async ValueTask> ReadList(NpgsqlReadBuffer buf, bool async) + protected internal async ValueTask ReadList(NpgsqlReadBuffer buf, bool async) { await buf.Ensure(12, async); var dimensions = buf.ReadInt32(); @@ -288,70 +283,61 @@ protected static Exception MixedTypesOrJaggedArrayException(Exception innerExcep internal static class ElementTypeInfo { public static readonly bool IsNonNullable = - typeof(TElement).IsValueType && Nullable.GetUnderlyingType(typeof(TElement)) is null; + typeof(TElement).IsValueType && default(TElement) is not null; public static readonly Type NullableElementType = IsNonNullable ? typeof(Nullable<>).MakeGenericType(typeof(TElement)) : typeof(TElement); } - internal static class ArrayTypeInfo + internal abstract class ArrayTypeInfo { // ReSharper disable StaticMemberInGenericType - public static readonly bool IsArray; - public static readonly bool IsList; - public static readonly Type? ElementType; - - public static readonly Func> ReadArrayFunc = default!; - public static readonly Func> ReadListFunc = default!; + public static readonly Type? ElementType = typeof(TArray).IsArray ? typeof(TArray).GetElementType() : null; // ReSharper restore StaticMemberInGenericType - static ArrayTypeInfo() - { - var type = typeof(TArrayOrList); - IsArray = type.IsArray; - IsList = type.IsGenericType && type.GetGenericTypeDefinition() == typeof(List<>); + public static bool IsArray => ElementType is not null; - ElementType = IsArray - ? type.GetElementType() - : IsList - ? type.GetGenericArguments()[0] - : null; + static ArrayTypeInfo? _derivedInstance; + static ArrayTypeInfo DerivedInstance + { + get + { + Debug.Assert(ElementType is not null); + return (_derivedInstance ??= (ArrayTypeInfo?)Activator.CreateInstance(typeof(ArrayHandler<,>).MakeGenericType(typeof(TArray), ElementType), typeof(TArray).GetArrayRank()))!; + } + } - if (ElementType == null) - return; + public static ValueTask ReadArray(ArrayHandler handler, NpgsqlReadBuffer buf, bool async, bool readAsObject = false) + => DerivedInstance.Read(handler, buf, async, readAsObject); - // Initialize delegates - var arrayHandlerParam = Expression.Parameter(typeof(ArrayHandler), "arrayHandler"); - var bufferParam = Expression.Parameter(typeof(NpgsqlReadBuffer), "buf"); - var asyncParam = Expression.Parameter(typeof(bool), "async"); + protected abstract ValueTask Read(ArrayHandler handler, NpgsqlReadBuffer buf, bool async, bool readAsObject = false); + } + internal abstract class ListTypeInfo + { + // ReSharper disable StaticMemberInGenericType + public static readonly Type? ElementType = + typeof(TList).IsGenericType && typeof(TList).GetGenericTypeDefinition() == typeof(List<>) ? + typeof(TList).GetGenericArguments()[0] : null; + // ReSharper restore StaticMemberInGenericType - if (IsArray) - { - ReadArrayFunc = Expression - .Lambda>>( - Expression.Call( - arrayHandlerParam, - ReadArrayMethod.MakeGenericMethod(ElementType), - bufferParam, asyncParam, Expression.Constant(type.GetArrayRank()), Expression.Constant(false, typeof(bool))), - arrayHandlerParam, bufferParam, asyncParam) - .Compile(); - } + public static bool IsList => ElementType is not null; - if (IsList) + static ListTypeInfo? _derivedInstance; + static ListTypeInfo DerivedInstance + { + get { - ReadListFunc = Expression - .Lambda>>( - Expression.Call( - arrayHandlerParam, - ReadListMethod.MakeGenericMethod(ElementType), - bufferParam, asyncParam), - arrayHandlerParam, bufferParam, asyncParam) - .Compile(); + Debug.Assert(ElementType is not null); + return _derivedInstance ??= (ListTypeInfo?)Activator.CreateInstance(typeof(ListHandler<,>).MakeGenericType(typeof(TList), ElementType))!; } } - } + public static ValueTask ReadList(ArrayHandler handler, NpgsqlReadBuffer buf, bool async) + => DerivedInstance.Read(handler, buf, async); + + protected abstract ValueTask Read(ArrayHandler handler, NpgsqlReadBuffer buf, bool async); + } #endregion Static generic caching helpers } @@ -499,9 +485,48 @@ sealed class ArrayHandlerWithPsv : ArrayHandler public ArrayHandlerWithPsv(PostgresType arrayPostgresType, NpgsqlTypeHandler elementHandler, ArrayNullabilityMode arrayNullabilityMode) : base(arrayPostgresType, elementHandler, arrayNullabilityMode) { } + protected internal override async ValueTask ReadCustom(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) + { + if (ArrayTypeInfo.IsArray) + { + return ArrayTypeInfo.ElementType == typeof(TElementPsv) + ? (TRequestedArray)await ReadArray(buf, async, typeof(TRequestedArray).GetArrayRank()) + : (TRequestedArray)await ArrayTypeInfo.ReadArray(this, buf, async); + } + + // We evaluate List last to better support reflection free mode + // https://github.com/dotnet/runtimelab/blob/f2fd03035c1c02a0b904537b6f38906035f14689/docs/using-nativeaot/reflection-free-mode.md + if (ListTypeInfo.IsList) + { + return ListTypeInfo.ElementType == typeof(TElementPsv) + ? (TRequestedArray)await ReadList(buf, async) + : (TRequestedArray)await ListTypeInfo.ReadList(this, buf, async); + } + + throw new InvalidCastException(fieldDescription == null + ? $"Can't cast database type to {typeof(TRequestedArray).Name}" + : $"Can't cast database type {fieldDescription.Handler.PgDisplayName} to {typeof(TRequestedArray).Name}" + ); + } + internal override object ReadPsvAsObject(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) => ReadPsvAsObject(buf, len, false, fieldDescription).GetAwaiter().GetResult(); internal override async ValueTask ReadPsvAsObject(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) => await ReadArray(buf, async, readAsObject: true); -} \ No newline at end of file +} + +sealed class ArrayHandler : ArrayHandler.ArrayTypeInfo +{ + readonly int _arrayRank; + public ArrayHandler(int arrayRank) => _arrayRank = arrayRank; + + protected override ValueTask Read(ArrayHandler handler, NpgsqlReadBuffer buf, bool async, bool readAsObject = false) + => handler.ReadArray(buf, async, _arrayRank, readAsObject); +} + +sealed class ListHandler : ArrayHandler.ListTypeInfo +{ + protected override ValueTask Read(ArrayHandler handler, NpgsqlReadBuffer buf, bool async) => + handler.ReadList(buf, async); +} diff --git a/src/Npgsql/Internal/TypeHandlers/BitStringHandler.cs b/src/Npgsql/Internal/TypeHandlers/BitStringHandler.cs index 27c3eece3c..aac090eb5e 100644 --- a/src/Npgsql/Internal/TypeHandlers/BitStringHandler.cs +++ b/src/Npgsql/Internal/TypeHandlers/BitStringHandler.cs @@ -266,8 +266,41 @@ public class BitStringArrayHandler : ArrayHandler public BitStringArrayHandler(PostgresType postgresType, BitStringHandler elementHandler, ArrayNullabilityMode arrayNullabilityMode) : base(postgresType, elementHandler, arrayNullabilityMode) {} + /// + protected internal override async ValueTask ReadCustom(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) + { + if (ArrayTypeInfo.IsArray) + { + if(ArrayTypeInfo.ElementType == typeof(BitArray)) + return (TRequestedArray)await ReadArray(buf, async); + + if(ArrayTypeInfo.ElementType == typeof(bool)) + return (TRequestedArray)await ReadArray(buf, async); + + return (TRequestedArray)await ArrayTypeInfo.ReadArray(this, buf, async); + } + + // We evaluate List last to better support reflection free mode + // https://github.com/dotnet/runtimelab/blob/f2fd03035c1c02a0b904537b6f38906035f14689/docs/using-nativeaot/reflection-free-mode.md + if (ListTypeInfo.IsList) + { + if (ListTypeInfo.ElementType == typeof(BitArray)) + return (TRequestedArray)await ReadList(buf, async); + + if (ListTypeInfo.ElementType == typeof(bool)) + return (TRequestedArray)await ReadList(buf, async); + + return (TRequestedArray)await ListTypeInfo.ReadList(this, buf, async); + } + + throw new InvalidCastException(fieldDescription == null + ? $"Can't cast database type to {typeof(TRequestedArray).Name}" + : $"Can't cast database type {fieldDescription.Handler.PgDisplayName} to {typeof(TRequestedArray).Name}" + ); + } + public override async ValueTask ReadAsObject(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) => fieldDescription?.TypeModifier == 1 ? await ReadArray(buf, async) : await ReadArray(buf, async); -} \ No newline at end of file +} diff --git a/src/Npgsql/Internal/TypeHandling/NullableHandler.cs b/src/Npgsql/Internal/TypeHandling/NullableHandler.cs index e3bb0619ce..89fd5a0cb4 100644 --- a/src/Npgsql/Internal/TypeHandling/NullableHandler.cs +++ b/src/Npgsql/Internal/TypeHandling/NullableHandler.cs @@ -1,69 +1,54 @@ using System; -using System.Diagnostics.CodeAnalysis; -using System.Reflection; +using System.Diagnostics; using System.Threading; using System.Threading.Tasks; using Npgsql.BackendMessages; -// ReSharper disable StaticMemberInGenericType namespace Npgsql.Internal.TypeHandling; -delegate T ReadDelegate(NpgsqlTypeHandler handler, NpgsqlReadBuffer buffer, int columnLength, FieldDescription? fieldDescription = null); -delegate ValueTask ReadAsyncDelegate(NpgsqlTypeHandler handler, NpgsqlReadBuffer buffer, int columnLen, bool async, FieldDescription? fieldDescription = null); - -delegate int ValidateAndGetLengthDelegate(NpgsqlTypeHandler handler, T value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter); -delegate Task WriteAsyncDelegate(NpgsqlTypeHandler handler, T value, NpgsqlWriteBuffer buffer, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default); - -static class NullableHandler +abstract class NullableHandler { - public static readonly Type? UnderlyingType; - public static readonly ReadDelegate Read = null!; - public static readonly ReadAsyncDelegate ReadAsync = null!; - public static readonly ValidateAndGetLengthDelegate ValidateAndGetLength = null!; - public static readonly WriteAsyncDelegate WriteAsync = null!; - - public static bool Exists => UnderlyingType != null; + static NullableHandler? _derivedInstance; + public static bool Exists => default(T) is null && typeof(T).IsValueType; - static NullableHandler() + static NullableHandler DerivedInstance { - UnderlyingType = Nullable.GetUnderlyingType(typeof(T)); - - if (UnderlyingType == null) - return; - - Read = NullableHandler.CreateDelegate>(UnderlyingType, NullableHandler.ReadMethod); - ReadAsync = NullableHandler.CreateDelegate>(UnderlyingType, NullableHandler.ReadAsyncMethod); - ValidateAndGetLength = NullableHandler.CreateDelegate>(UnderlyingType, NullableHandler.ValidateMethod); - WriteAsync = NullableHandler.CreateDelegate>(UnderlyingType, NullableHandler.WriteAsyncMethod); + get + { + Debug.Assert(Exists); + return _derivedInstance ??= (NullableHandler?)Activator.CreateInstance(typeof(NullableHandler<,>).MakeGenericType(typeof(T), typeof(T).GenericTypeArguments[0]))!; + } } + + public static T Read(NpgsqlTypeHandler handler, NpgsqlReadBuffer buffer, int columnLength, FieldDescription? fieldDescription = null) => + DerivedInstance.ReadImpl(handler, buffer, columnLength, fieldDescription); + public static ValueTask ReadAsync(NpgsqlTypeHandler handler, NpgsqlReadBuffer buffer, int columnLength, bool async, FieldDescription? fieldDescription = null) => + DerivedInstance.ReadAsyncImpl(handler, buffer, columnLength, async, fieldDescription); + public static int ValidateAndGetLength(NpgsqlTypeHandler handler, T value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) => + DerivedInstance.ValidateAndGetLengthImpl(handler, value, ref lengthCache, parameter); + public static Task WriteAsync(NpgsqlTypeHandler handler, T value, NpgsqlWriteBuffer buffer, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) => + DerivedInstance.WriteAsyncImpl(handler, value, buffer, lengthCache, parameter, async, cancellationToken); + + protected abstract T ReadImpl(NpgsqlTypeHandler handler, NpgsqlReadBuffer buffer, int columnLength, FieldDescription? fieldDescription = null); + protected abstract ValueTask ReadAsyncImpl(NpgsqlTypeHandler handler, NpgsqlReadBuffer buffer, int columnLen, bool async, FieldDescription? fieldDescription = null); + protected abstract int ValidateAndGetLengthImpl(NpgsqlTypeHandler handler, T value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter); + protected abstract Task WriteAsyncImpl(NpgsqlTypeHandler handler, T value, NpgsqlWriteBuffer buffer, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default); } -static class NullableHandler +class NullableHandler : NullableHandler + where TUnderlying : struct { - internal static readonly MethodInfo ReadMethod = new ReadDelegate(Read).Method.GetGenericMethodDefinition(); - internal static readonly MethodInfo ReadAsyncMethod = new ReadAsyncDelegate(ReadAsync).Method.GetGenericMethodDefinition(); - internal static readonly MethodInfo ValidateMethod = new ValidateAndGetLengthDelegate(ValidateAndGetLength).Method.GetGenericMethodDefinition(); - internal static readonly MethodInfo WriteAsyncMethod = new WriteAsyncDelegate(WriteAsync).Method.GetGenericMethodDefinition(); - - static T? Read(NpgsqlTypeHandler handler, NpgsqlReadBuffer buffer, int columnLength, FieldDescription? fieldDescription) - where T : struct - => handler.Read(buffer, columnLength, fieldDescription); + protected override T ReadImpl(NpgsqlTypeHandler handler, NpgsqlReadBuffer buffer, int columnLength, FieldDescription? fieldDescription = null) + => (T)(object)handler.Read(buffer, columnLength, fieldDescription); - static async ValueTask ReadAsync(NpgsqlTypeHandler handler, NpgsqlReadBuffer buffer, int columnLength, bool async, FieldDescription? fieldDescription) - where T : struct - => await handler.Read(buffer, columnLength, async, fieldDescription); + protected override async ValueTask ReadAsyncImpl(NpgsqlTypeHandler handler, NpgsqlReadBuffer buffer, int columnLength, bool async, FieldDescription? fieldDescription = null) + => (T)(object)await handler.Read(buffer, columnLength, async, fieldDescription); - static int ValidateAndGetLength(NpgsqlTypeHandler handler, T? value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - where T : struct - => value.HasValue ? handler.ValidateAndGetLength(value.Value, ref lengthCache, parameter) : 0; + protected override int ValidateAndGetLengthImpl(NpgsqlTypeHandler handler, T value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) => + value != null ? handler.ValidateAndGetLength(((TUnderlying?)(object)value).Value, ref lengthCache, parameter) : 0; - static Task WriteAsync(NpgsqlTypeHandler handler, T? value, NpgsqlWriteBuffer buffer, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - where T : struct - => value.HasValue - ? handler.WriteWithLength(value.Value, buffer, lengthCache, parameter, async, cancellationToken) + protected override Task WriteAsyncImpl(NpgsqlTypeHandler handler, T value, NpgsqlWriteBuffer buffer, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) + => value != null + ? handler.WriteWithLength(((TUnderlying?)(object)value).Value, buffer, lengthCache, parameter, async, cancellationToken) : handler.WriteWithLength(DBNull.Value, buffer, lengthCache, parameter, async, cancellationToken); - - internal static TDelegate CreateDelegate(Type underlyingType, MethodInfo method) - where TDelegate : Delegate - => (TDelegate)method.MakeGenericMethod(underlyingType).CreateDelegate(typeof(TDelegate)); -} \ No newline at end of file +} From 819342833da3245cb47182ca297ed4d22d7ce6a2 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Mon, 27 Feb 2023 22:19:37 +0300 Subject: [PATCH 079/761] Upload mstat as artifact for Native AOT (#4953) --- .github/workflows/native-aot.yml | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/.github/workflows/native-aot.yml b/.github/workflows/native-aot.yml index 240f8ff74d..2975988ff3 100644 --- a/.github/workflows/native-aot.yml +++ b/.github/workflows/native-aot.yml @@ -71,6 +71,13 @@ jobs: - name: Dump mstat run: dotnet run --project test/MStatDumper/MStatDumper.csproj -c release -f net8.0 -- "test/Npgsql.NativeAotTests/obj/Release/net8.0/linux-x64/native/Npgsql.NativeAotTests.mstat" md >> $GITHUB_STEP_SUMMARY + - name: Upload mstat + uses: actions/upload-artifact@v3.1.2 + with: + name: npgsql.mstat + path: "test/Npgsql.NativeAotTests/obj/Release/net8.0/linux-x64/native/Npgsql.NativeAotTests.mstat" + retention-days: 3 + - name: Assert binary size run: | size="$(ls -l test/Npgsql.NativeAotTests/bin/Release/net8.0/linux-x64/native/Npgsql.NativeAotTests | cut -d ' ' -f 5)" @@ -79,4 +86,4 @@ jobs: if (( size > 36700160 )); then echo "Binary size exceeds 35mb threshold" exit 1 - fi \ No newline at end of file + fi From e3712ee38c7aa450c9f00e7664b2ecf0b6eb557f Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Mon, 27 Feb 2023 22:27:13 +0300 Subject: [PATCH 080/761] Fix 300kb regression (#4954) --- .../Internal/TypeHandlers/ArrayHandler.cs | 24 -------------- .../Internal/TypeHandlers/BitStringHandler.cs | 33 ------------------- 2 files changed, 57 deletions(-) diff --git a/src/Npgsql/Internal/TypeHandlers/ArrayHandler.cs b/src/Npgsql/Internal/TypeHandlers/ArrayHandler.cs index 3929ead33e..1ab1025014 100644 --- a/src/Npgsql/Internal/TypeHandlers/ArrayHandler.cs +++ b/src/Npgsql/Internal/TypeHandlers/ArrayHandler.cs @@ -485,30 +485,6 @@ sealed class ArrayHandlerWithPsv : ArrayHandler public ArrayHandlerWithPsv(PostgresType arrayPostgresType, NpgsqlTypeHandler elementHandler, ArrayNullabilityMode arrayNullabilityMode) : base(arrayPostgresType, elementHandler, arrayNullabilityMode) { } - protected internal override async ValueTask ReadCustom(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - { - if (ArrayTypeInfo.IsArray) - { - return ArrayTypeInfo.ElementType == typeof(TElementPsv) - ? (TRequestedArray)await ReadArray(buf, async, typeof(TRequestedArray).GetArrayRank()) - : (TRequestedArray)await ArrayTypeInfo.ReadArray(this, buf, async); - } - - // We evaluate List last to better support reflection free mode - // https://github.com/dotnet/runtimelab/blob/f2fd03035c1c02a0b904537b6f38906035f14689/docs/using-nativeaot/reflection-free-mode.md - if (ListTypeInfo.IsList) - { - return ListTypeInfo.ElementType == typeof(TElementPsv) - ? (TRequestedArray)await ReadList(buf, async) - : (TRequestedArray)await ListTypeInfo.ReadList(this, buf, async); - } - - throw new InvalidCastException(fieldDescription == null - ? $"Can't cast database type to {typeof(TRequestedArray).Name}" - : $"Can't cast database type {fieldDescription.Handler.PgDisplayName} to {typeof(TRequestedArray).Name}" - ); - } - internal override object ReadPsvAsObject(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) => ReadPsvAsObject(buf, len, false, fieldDescription).GetAwaiter().GetResult(); diff --git a/src/Npgsql/Internal/TypeHandlers/BitStringHandler.cs b/src/Npgsql/Internal/TypeHandlers/BitStringHandler.cs index aac090eb5e..68ab42132b 100644 --- a/src/Npgsql/Internal/TypeHandlers/BitStringHandler.cs +++ b/src/Npgsql/Internal/TypeHandlers/BitStringHandler.cs @@ -266,39 +266,6 @@ public class BitStringArrayHandler : ArrayHandler public BitStringArrayHandler(PostgresType postgresType, BitStringHandler elementHandler, ArrayNullabilityMode arrayNullabilityMode) : base(postgresType, elementHandler, arrayNullabilityMode) {} - /// - protected internal override async ValueTask ReadCustom(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - { - if (ArrayTypeInfo.IsArray) - { - if(ArrayTypeInfo.ElementType == typeof(BitArray)) - return (TRequestedArray)await ReadArray(buf, async); - - if(ArrayTypeInfo.ElementType == typeof(bool)) - return (TRequestedArray)await ReadArray(buf, async); - - return (TRequestedArray)await ArrayTypeInfo.ReadArray(this, buf, async); - } - - // We evaluate List last to better support reflection free mode - // https://github.com/dotnet/runtimelab/blob/f2fd03035c1c02a0b904537b6f38906035f14689/docs/using-nativeaot/reflection-free-mode.md - if (ListTypeInfo.IsList) - { - if (ListTypeInfo.ElementType == typeof(BitArray)) - return (TRequestedArray)await ReadList(buf, async); - - if (ListTypeInfo.ElementType == typeof(bool)) - return (TRequestedArray)await ReadList(buf, async); - - return (TRequestedArray)await ListTypeInfo.ReadList(this, buf, async); - } - - throw new InvalidCastException(fieldDescription == null - ? $"Can't cast database type to {typeof(TRequestedArray).Name}" - : $"Can't cast database type {fieldDescription.Handler.PgDisplayName} to {typeof(TRequestedArray).Name}" - ); - } - public override async ValueTask ReadAsObject(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) => fieldDescription?.TypeModifier == 1 ? await ReadArray(buf, async) From 2c62155cdd2646d03f6d863c1d4db033f8e38d50 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 27 Feb 2023 23:54:48 +0100 Subject: [PATCH 081/761] Bump OpenTelemetry.API from 1.3.2 to 1.4.0 (#4958) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 6fa76f742a..10a3eb6142 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -4,7 +4,7 @@ - + From 9eb17523adf11a85a838b8b197c4862a205ab28b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 27 Feb 2023 23:55:13 +0100 Subject: [PATCH 082/761] Bump NUnit3TestAdapter from 4.3.1 to 4.4.0 (#4956) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 10a3eb6142..22b57ba02a 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -24,7 +24,7 @@ - + From e36a82a1525e0dff6f71296d3e208111a8cad61b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 27 Feb 2023 23:55:35 +0100 Subject: [PATCH 083/761] Bump Scriban.Signed from 5.6.0 to 5.7.0 (#4957) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 22b57ba02a..cb3c6dbbaf 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -9,7 +9,7 @@ - + From 26acd911af6861fb767b472b125b5c57cddd63b7 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Tue, 28 Feb 2023 08:25:39 +0300 Subject: [PATCH 084/761] Set a few more flags to reduce AOT binary size (#4947) --- .github/workflows/native-aot.yml | 4 ++-- test/Npgsql.NativeAotTests/Npgsql.NativeAotTests.csproj | 8 +++++--- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/.github/workflows/native-aot.yml b/.github/workflows/native-aot.yml index 2975988ff3..01651189a2 100644 --- a/.github/workflows/native-aot.yml +++ b/.github/workflows/native-aot.yml @@ -83,7 +83,7 @@ jobs: size="$(ls -l test/Npgsql.NativeAotTests/bin/Release/net8.0/linux-x64/native/Npgsql.NativeAotTests | cut -d ' ' -f 5)" echo "Binary size is $size bytes ($((size / (1024 * 1024))) mb)" - if (( size > 36700160 )); then - echo "Binary size exceeds 35mb threshold" + if (( size > 15728640 )); then + echo "Binary size exceeds 15mb threshold" exit 1 fi diff --git a/test/Npgsql.NativeAotTests/Npgsql.NativeAotTests.csproj b/test/Npgsql.NativeAotTests/Npgsql.NativeAotTests.csproj index 5a18264e56..967308c218 100644 --- a/test/Npgsql.NativeAotTests/Npgsql.NativeAotTests.csproj +++ b/test/Npgsql.NativeAotTests/Npgsql.NativeAotTests.csproj @@ -5,11 +5,13 @@ net8.0 true - true true - true - true true + false + true + false + Size + true From 057132e172550e4d08f7b2750d2bdd83dc19098d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 1 Mar 2023 14:06:35 +0100 Subject: [PATCH 085/761] Bump Microsoft.CodeAnalysis.CSharp from 4.4.0 to 4.5.0 (#4948) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index cb3c6dbbaf..2dc7122b70 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -7,7 +7,7 @@ - + From 1016b19095aeaf769cc522fc8c37e8ada4ac9312 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 1 Mar 2023 23:08:11 +0100 Subject: [PATCH 086/761] Bump NUnit3TestAdapter from 4.4.0 to 4.4.2 (#4960) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 2dc7122b70..3674055471 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -24,7 +24,7 @@ - + From bceb9e134c36786d348f833cfc3b1a64cf77cbc4 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Thu, 2 Mar 2023 16:06:24 +0300 Subject: [PATCH 087/761] Fix closing unpooled connection with transaction scope (#4964) Fixes #4963 --- src/Npgsql/NpgsqlConnection.cs | 7 ++--- src/Npgsql/UnpooledDataSource.cs | 9 ------ .../DistributedTransactionTests.cs | 31 ------------------- test/Npgsql.Tests/SystemTransactionTests.cs | 22 +++++++++++++ 4 files changed, 24 insertions(+), 45 deletions(-) diff --git a/src/Npgsql/NpgsqlConnection.cs b/src/Npgsql/NpgsqlConnection.cs index d88af4411b..2c785b9bef 100644 --- a/src/Npgsql/NpgsqlConnection.cs +++ b/src/Npgsql/NpgsqlConnection.cs @@ -889,13 +889,10 @@ async Task CloseAsync(bool async) connector.Connection = null; - // If pooled, close the connection and disconnect it from the resource manager but leave the - // connector in an enlisted pending list in the pool. If another connection is opened within + // Close the connection and disconnect it from the resource manager but leave the + // connector in an enlisted pending list in the data source. If another connection is opened within // the same transaction scope, we will reuse this connector to avoid escalating to a distributed // transaction - // If a *non-pooled* connection is being closed but is enlisted in an ongoing - // TransactionScope, we do nothing - simply detach the connector from the connection and leave - // it open. It will be closed when the TransactionScope is disposed. _dataSource?.AddPendingEnlistedConnector(connector, EnlistedTransaction); EnlistedTransaction = null; diff --git a/src/Npgsql/UnpooledDataSource.cs b/src/Npgsql/UnpooledDataSource.cs index a1ff6659bd..8226524635 100644 --- a/src/Npgsql/UnpooledDataSource.cs +++ b/src/Npgsql/UnpooledDataSource.cs @@ -48,13 +48,4 @@ internal override void Return(NpgsqlConnector connector) } internal override void Clear() {} - - internal override bool TryRentEnlistedPending(Transaction transaction, NpgsqlConnection connection, - [NotNullWhen(true)] out NpgsqlConnector? connector) - { - connector = null; - return false; - } - - internal override bool TryRemovePendingEnlistedConnector(NpgsqlConnector connector, Transaction transaction) => false; } \ No newline at end of file diff --git a/test/Npgsql.Tests/DistributedTransactionTests.cs b/test/Npgsql.Tests/DistributedTransactionTests.cs index c0eaccf0eb..93e350cc11 100644 --- a/test/Npgsql.Tests/DistributedTransactionTests.cs +++ b/test/Npgsql.Tests/DistributedTransactionTests.cs @@ -116,37 +116,6 @@ public void Two_connections_with_failure() AssertNumberOfRows(adminConn, table, 0); } - [Test, IssueLink("https://github.com/npgsql/npgsql/issues/1737")] - public void Multiple_unpooled_connections_do_not_reuse() - { - using var dataSource = CreateDataSource(csb => - { - csb.Pooling = false; - csb.Enlist = true; - }); - - using var scope = new TransactionScope(); - - int processId; - - using (var conn1 = dataSource.OpenConnection()) - using (var cmd = new NpgsqlCommand("SELECT 1", conn1)) - { - processId = conn1.ProcessID; - cmd.ExecuteNonQuery(); - } - - using (var conn2 = dataSource.OpenConnection()) - using (var cmd = new NpgsqlCommand("SELECT 1", conn2)) - { - // The connection reuse optimization isn't implemented for unpooled connections (though it could be) - Assert.That(conn2.ProcessID, Is.Not.EqualTo(processId)); - cmd.ExecuteNonQuery(); - } - - scope.Complete(); - } - [Test(Description = "Transaction race, bool distributed")] [Explicit("Fails on Appveyor (https://ci.appveyor.com/project/roji/npgsql/build/3.3.0-250)")] public void Transaction_race([Values(false, true)] bool distributed) diff --git a/test/Npgsql.Tests/SystemTransactionTests.cs b/test/Npgsql.Tests/SystemTransactionTests.cs index 4e5aaca63a..b71c949259 100644 --- a/test/Npgsql.Tests/SystemTransactionTests.cs +++ b/test/Npgsql.Tests/SystemTransactionTests.cs @@ -310,6 +310,28 @@ public void Single_unpooled_connection() scope.Complete(); } + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/4963")] + public void Single_unpooled_closed_connection() + { + using var dataSource = CreateDataSource(csb => + { + csb.Pooling = false; + csb.Enlist = true; + }); + + using (var scope = new TransactionScope()) + using (var conn = dataSource.OpenConnection()) + using (var cmd = new NpgsqlCommand("SELECT 1", conn)) + { + cmd.ExecuteNonQuery(); + conn.Close(); + Assert.That(dataSource.Statistics.Total, Is.EqualTo(1)); + scope.Complete(); + } + + Assert.That(dataSource.Statistics.Total, Is.EqualTo(0)); + } + [Test] [IssueLink("https://github.com/npgsql/npgsql/issues/3863")] public void Break_connector_while_in_transaction_scope_with_rollback([Values] bool pooling) From 24c056f5145d68a898d2dce13f3c03bc7b3f36ae Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Fri, 3 Mar 2023 14:49:16 +0100 Subject: [PATCH 088/761] Array code bloat reduction (#4961) * Through the eye of the needle * Add create op to reduce behavioral changes * Merge the interface and the struct into the core handler * Reduce generic bloat in type handler base classes * Reduce WriteWithLength bloat * Fix simple type reading * Remove WriteNull again * Address feedback --- .../Internal/NodaTimeTypeHandlerResolver.cs | 6 +- .../Internal/TypeHandlers/ArrayHandler.cs | 632 +++++++++++------- .../Internal/TypeHandlers/BitStringHandler.cs | 11 +- .../Internal/TypeHandlers/HstoreHandler.cs | 16 +- .../InternalTypeHandlers/Int2VectorHandler.cs | 11 +- .../InternalTypeHandlers/OIDVectorHandler.cs | 11 +- .../Internal/TypeHandlers/JsonTextHandler.cs | 14 +- .../TypeHandlers/MultirangeHandler.cs | 12 +- .../Internal/TypeHandlers/RangeHandler.cs | 12 +- .../TypeHandling/INpgsqlTypeHandler.cs | 25 +- .../TypeHandling/NpgsqlSimpleTypeHandler.cs | 8 +- .../NpgsqlSimpleTypeHandlerWithPsv.cs | 6 +- .../TypeHandling/NpgsqlTypeHandler.cs | 11 +- .../TypeHandling/NpgsqlTypeHandler`.cs | 8 +- .../Shims/ConcurrentDictionaryExtensions.cs | 18 + test/Npgsql.Tests/Types/ArrayTests.cs | 4 +- test/Npgsql.Tests/Types/BitStringTests.cs | 20 +- 17 files changed, 493 insertions(+), 332 deletions(-) create mode 100644 src/Npgsql/Shims/ConcurrentDictionaryExtensions.cs diff --git a/src/Npgsql.NodaTime/Internal/NodaTimeTypeHandlerResolver.cs b/src/Npgsql.NodaTime/Internal/NodaTimeTypeHandlerResolver.cs index 81b3cffa94..117bd72cc5 100644 --- a/src/Npgsql.NodaTime/Internal/NodaTimeTypeHandlerResolver.cs +++ b/src/Npgsql.NodaTime/Internal/NodaTimeTypeHandlerResolver.cs @@ -232,9 +232,9 @@ NpgsqlTypeHandler DateMultirange() NpgsqlTypeHandler TsTzRangeArray() => _timestampTzRangeArray ??= - new ArrayHandler((PostgresArrayType)PgType("tstzrange[]"), TsTzRange(), _arrayNullabilityMode); + new ArrayHandler((PostgresArrayType)PgType("tstzrange[]"), TsTzRange(), _arrayNullabilityMode); NpgsqlTypeHandler DateRangeArray() => _dateRangeArray ??= - new ArrayHandler((PostgresArrayType)PgType("daterange[]"), DateRange(), _arrayNullabilityMode); -} \ No newline at end of file + new ArrayHandler((PostgresArrayType)PgType("daterange[]"), DateRange(), _arrayNullabilityMode); +} diff --git a/src/Npgsql/Internal/TypeHandlers/ArrayHandler.cs b/src/Npgsql/Internal/TypeHandlers/ArrayHandler.cs index 1ab1025014..07cd89a469 100644 --- a/src/Npgsql/Internal/TypeHandlers/ArrayHandler.cs +++ b/src/Npgsql/Internal/TypeHandlers/ArrayHandler.cs @@ -1,8 +1,10 @@ using System; using System.Collections; +using System.Collections.Concurrent; using System.Collections.Generic; using System.Diagnostics; using System.Diagnostics.CodeAnalysis; +using System.Runtime.CompilerServices; using System.Threading; using System.Threading.Tasks; using Npgsql.BackendMessages; @@ -13,7 +15,6 @@ namespace Npgsql.Internal.TypeHandlers; /// /// Non-generic base class for all type handlers which handle PostgreSQL arrays. -/// Extend from instead. /// /// /// https://www.postgresql.org/docs/current/static/arrays.html. @@ -22,19 +23,22 @@ namespace Npgsql.Internal.TypeHandlers; /// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. /// Use it at your own risk. /// -public abstract class ArrayHandler : NpgsqlTypeHandler +public class ArrayHandler : NpgsqlTypeHandler { - private protected int LowerBound { get; } // The lower bound value sent to the backend when writing arrays. Normally 1 (the PG default) but is 0 for OIDVector. - private protected NpgsqlTypeHandler ElementHandler { get; } - private protected ArrayNullabilityMode ArrayNullabilityMode { get; } - - /// - protected ArrayHandler(PostgresType arrayPostgresType, NpgsqlTypeHandler elementHandler, ArrayNullabilityMode arrayNullabilityMode, int lowerBound = 1) - : base(arrayPostgresType) + readonly Type _defaultArrayType; + readonly Type _psvArrayType; + readonly ConcurrentDictionary _concreteHandlers = new(); + protected int LowerBound { get; } + protected NpgsqlTypeHandler ElementHandler { get; } + protected ArrayNullabilityMode ArrayNullabilityMode { get; } + + public ArrayHandler(PostgresType arrayPostgresType, NpgsqlTypeHandler elementHandler, ArrayNullabilityMode arrayNullabilityMode, int lowerBound = 1) : base(arrayPostgresType) { LowerBound = lowerBound; ElementHandler = elementHandler; ArrayNullabilityMode = arrayNullabilityMode; + _psvArrayType = elementHandler.GetProviderSpecificFieldType().MakeArrayType(); + _defaultArrayType = elementHandler.GetFieldType().MakeArrayType(); } public override Type GetFieldType(FieldDescription? fieldDescription = null) => typeof(Array); @@ -52,70 +56,223 @@ public override NpgsqlTypeHandler CreateRangeHandler(PostgresType pgRangeType) public override NpgsqlTypeHandler CreateMultirangeHandler(PostgresMultirangeType pgMultirangeType) => throw new NotSupportedException(); - #region Read + ArrayHandlerCore CreateHandler(Type elementType) + => (ArrayHandlerCore)Activator.CreateInstance(typeof(ArrayHandlerCore<>).MakeGenericType(elementType), ElementHandler, ArrayNullabilityMode, LowerBound)!; + + internal override ValueTask ReadPsvAsObject(NpgsqlReadBuffer buf, int len, bool async, + FieldDescription? fieldDescription = null) + { + var handler = _concreteHandlers.GetOrAdd(_psvArrayType, static (_, instance) => instance.CreateHandler(instance.ElementHandler.GetProviderSpecificFieldType()), this); + return handler.ReadArrayAsObject(buf, async); + } /// - protected internal override async ValueTask ReadCustom(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) + protected internal override async ValueTask ReadCustom(NpgsqlReadBuffer buf, int len, bool async, + FieldDescription? fieldDescription) + { + return (TArray)await ReadGenericAsObject(buf, async, fieldDescription); + + // Sync helper to keep the code size cost of ReadCustom low. + ValueTask ReadGenericAsObject(NpgsqlReadBuffer buf, bool async, FieldDescription? fieldDescription) + { + if (ArrayTypeInfo.IsArray) + return GetOrAddHandler().ReadArray(buf, async, ArrayTypeInfo.ArrayRank); + + if (ListTypeInfo.IsList) + return GetOrAddHandler().ReadList(buf, async); + + throw new InvalidCastException(fieldDescription == null + ? $"Can't cast database type to {typeof(TArray).Name}" + : $"Can't cast database type {fieldDescription.Handler.PgDisplayName} to {typeof(TArray).Name}" + ); + } + } + + /// + public override ValueTask ReadAsObject(NpgsqlReadBuffer buf, int len, bool async, + FieldDescription? fieldDescription = null) + => ReadAsObject(ElementHandler.GetFieldType(), buf, len, async, fieldDescription); + + protected async ValueTask ReadAsObject(Type elementType, NpgsqlReadBuffer buf, int len, bool async, + FieldDescription? fieldDescription = null) { - if (ArrayTypeInfo.IsArray) - return (TRequestedArray)await ArrayTypeInfo.ReadArray(this, buf, async); + if (!elementType.IsValueType || ArrayNullabilityMode is ArrayNullabilityMode.Never) + return await GetOrAddObjectHandler(elementType).ReadArrayAsObject(buf, async); - if (ListTypeInfo.IsList) - return (TRequestedArray)await ListTypeInfo.ReadList(this, buf, async); + if (ArrayNullabilityMode is ArrayNullabilityMode.Always) + return await GetOrAddObjectHandler(typeof(Nullable<>).MakeGenericType(elementType)).ReadArrayAsObject(buf, async); - throw new InvalidCastException(fieldDescription == null - ? $"Can't cast database type to {typeof(TRequestedArray).Name}" - : $"Can't cast database type {fieldDescription.Handler.PgDisplayName} to {typeof(TRequestedArray).Name}" - ); + // We need to peek at the data to call into the right handler. + await buf.Ensure(sizeof(int) * 2, async); + var origPos = buf.ReadPosition; + var _ = buf.ReadInt32(); + var containsNulls = buf.ReadInt32() == 1; + buf.ReadPosition = origPos; + + return containsNulls + ? await GetOrAddObjectHandler(typeof(Nullable<>).MakeGenericType(elementType)).ReadArrayAsObject(buf, async) + : await GetOrAddObjectHandler(elementType).ReadArrayAsObject(buf, async); + } + + ArrayHandlerCore GetOrAddObjectHandler(Type elementType) + { + var arrayType = + elementType == ElementHandler.GetFieldType() + ? _defaultArrayType + : elementType.MakeArrayType(); + + return _concreteHandlers.GetOrAdd(arrayType, + static (t, instance) => instance.CreateHandler(t.GetElementType()!), this); } + /// + public override int ValidateObjectAndGetLength(object value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) + => GetOrAddObjectHandler(ElementHandler.GetFieldType()).ValidateAndGetElementLength(value, ref lengthCache); + + /// + protected internal override int ValidateAndGetLengthCustom([DisallowNull] TArray value, ref NpgsqlLengthCache? lengthCache, + NpgsqlParameter? parameter) + => GetOrAddHandler().ValidateAndGetElementLength(value, ref lengthCache); + + /// + public override Task WriteObjectWithLength(object? value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, + NpgsqlParameter? parameter, bool async, + CancellationToken cancellationToken = default) + { + if (value is null or DBNull) + { + buf.WriteInt32(-1); + return Task.CompletedTask; + } + return GetOrAddObjectHandler(ElementHandler.GetFieldType()).WriteElementWithLength(value, buf, lengthCache, async, cancellationToken); + } + + protected override Task WriteWithLengthCustom([DisallowNull]TArray value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, + CancellationToken cancellationToken) + => GetOrAddHandler().WriteElementWithLength(value, buf, lengthCache, async, cancellationToken); + + private protected ArrayHandlerCore GetOrAddHandler() + => _concreteHandlers.GetOrAdd(typeof(TArray), static (_, instance) => + { + if (ArrayTypeInfo.IsArray) + return instance.CreateHandler(ArrayTypeInfo.ElementType); + + if (ListTypeInfo.IsList) + return instance.CreateHandler(ListTypeInfo.ElementType); + + return null!; + }, this); + + static class ArrayTypeInfo + { + // ReSharper disable StaticMemberInGenericType + public static readonly Type? ElementType = typeof(TArray).IsArray ? typeof(TArray).GetElementType() : null; + public static readonly int ArrayRank = ElementType is not null ? typeof(TArray).GetArrayRank() : 0; + // ReSharper restore StaticMemberInGenericType + + [MemberNotNullWhen(true, nameof(ElementType))] + public static bool IsArray => ElementType is not null; + } + + static class ListTypeInfo + { + // ReSharper disable StaticMemberInGenericType + public static readonly Type? ElementType = typeof(TList).IsGenericType && typeof(TList).GetGenericTypeDefinition() == typeof(List<>) ? typeof(TList).GetGenericArguments()[0] : null; + // ReSharper restore StaticMemberInGenericType + + [MemberNotNullWhen(true, nameof(ElementType))] + public static bool IsList => ElementType is not null; + } +} + +abstract class ArrayHandlerCore +{ + internal const string ReadNonNullableCollectionWithNullsExceptionMessage = + "Cannot read a non-nullable collection of elements because the returned array contains nulls. " + + "Call GetFieldValue with a nullable array instead."; + + readonly int _lowerBound; + public ArrayNullabilityMode ArrayNullabilityMode { get; } + + protected ArrayHandlerCore(ArrayNullabilityMode arrayNullabilityMode, int lowerBound = 1) + { + ArrayNullabilityMode = arrayNullabilityMode; + _lowerBound = lowerBound; + } + + public ValueTask ReadArray(NpgsqlReadBuffer buf, bool async, int expectedDimensions = 0) + => ReadArray(buf, async, expectedDimensions, readAsObject: false); + + public ValueTask ReadArrayAsObject(NpgsqlReadBuffer buf, bool async, int expectedDimensions = 0) + => ReadArray(buf, async, expectedDimensions, readAsObject: true); + + protected abstract Type ElementType { get; } + protected abstract bool IsNonNullable { get; } + protected abstract bool IsGenericCollection(object value, out int count); + protected abstract NpgsqlTypeHandler ElementHandler { get; } + protected abstract object CreateCollection(bool isArray, int capacity); + protected abstract ValueTask ReadElement(bool isArray, object values, int index, NpgsqlReadBuffer buf, int length, bool async, + FieldDescription? fieldDescription = null); + protected abstract ValueTask ReadElement(Array array, int[] indices, NpgsqlReadBuffer buf, int length, bool async, + FieldDescription? fieldDescription = null); + protected abstract int ValidateAndGetElementLength(bool isArray, object values, int index, ref NpgsqlLengthCache? lengthCache, + NpgsqlParameter? parameter); + protected abstract ValueTask WriteElementWithLength(bool isArray, object values, int index, NpgsqlWriteBuffer buf, + NpgsqlLengthCache? lengthCache, + NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken); + /// - /// Reads an array of element type from the given buffer . + /// Reads an array of element type from the given buffer . /// - protected internal async ValueTask ReadArray(NpgsqlReadBuffer buf, bool async, int expectedDimensions = 0, bool readAsObject = false) + async ValueTask ReadArray(NpgsqlReadBuffer buf, bool async, int expectedDimensions, bool readAsObject) { await buf.Ensure(12, async); var dimensions = buf.ReadInt32(); var containsNulls = buf.ReadInt32() == 1; buf.ReadUInt32(); // Element OID. Ignored. + var nullableElementType = IsNonNullable + ? typeof(Nullable<>).MakeGenericType(ElementType) + : ElementType; + var returnType = readAsObject ? ArrayNullabilityMode switch { - ArrayNullabilityMode.Never => ElementTypeInfo.IsNonNullable && containsNulls + ArrayNullabilityMode.Never => IsNonNullable && containsNulls ? throw new InvalidOperationException(ReadNonNullableCollectionWithNullsExceptionMessage) - : typeof(TRequestedElement), - ArrayNullabilityMode.Always => ElementTypeInfo.NullableElementType, + : ElementType, + ArrayNullabilityMode.Always => nullableElementType, ArrayNullabilityMode.PerInstance => containsNulls - ? ElementTypeInfo.NullableElementType - : typeof(TRequestedElement), + ? nullableElementType + : ElementType, _ => throw new ArgumentOutOfRangeException() } - : ElementTypeInfo.IsNonNullable && containsNulls + : IsNonNullable && containsNulls ? throw new InvalidOperationException(ReadNonNullableCollectionWithNullsExceptionMessage) - : typeof(TRequestedElement); + : ElementType; if (dimensions == 0) return expectedDimensions > 1 ? Array.CreateInstance(returnType, new int[expectedDimensions]) - : returnType == typeof(TRequestedElement) - ? Array.Empty() - : Array.CreateInstance(returnType, 0); + : CreateCollection(isArray: true, 0); if (expectedDimensions > 0 && dimensions != expectedDimensions) throw new InvalidOperationException($"Cannot read an array with {expectedDimensions} dimension(s) from an array with {dimensions} dimension(s)"); - if (dimensions == 1 && returnType == typeof(TRequestedElement)) + if (dimensions == 1 && returnType == ElementType) { await buf.Ensure(8, async); var arrayLength = buf.ReadInt32(); buf.ReadInt32(); // Lower bound - var oneDimensional = new TRequestedElement[arrayLength]; - for (var i = 0; i < oneDimensional.Length; i++) - oneDimensional[i] = await ElementHandler.ReadWithLength(buf, async); - + var oneDimensional = CreateCollection(isArray: true, arrayLength); + for (var i = 0; i < arrayLength; i++) + { + await buf.Ensure(4, async); + var len = buf.ReadInt32(); + await ReadElement(isArray: true, oneDimensional, i, buf, len, async); + } return oneDimensional; } @@ -137,13 +294,10 @@ protected internal async ValueTask ReadArray(NpgsqlRe { await buf.Ensure(4, async); var len = buf.ReadInt32(); - var element = len == -1 - ? (object?)null - : NullableHandler.Exists - ? await NullableHandler.ReadAsync(ElementHandler, buf, len, async) - : await ElementHandler.Read(buf, len, async); - - result.SetValue(element, indices); + if (len == -1) + result.SetValue(null, indices); + else + await ReadElement(result, indices, buf, len, async); // TODO: Overly complicated/inefficient... indices[dimensions - 1]++; @@ -163,9 +317,9 @@ protected internal async ValueTask ReadArray(NpgsqlRe } /// - /// Reads a generic list containing elements of type from the given buffer . + /// Reads a generic list containing elements from the given buffer . /// - protected internal async ValueTask ReadList(NpgsqlReadBuffer buf, bool async) + public async ValueTask ReadList(NpgsqlReadBuffer buf, bool async) { await buf.Ensure(12, async); var dimensions = buf.ReadInt32(); @@ -173,35 +327,62 @@ protected internal async ValueTask ReadList(NpgsqlRea buf.ReadUInt32(); // Element OID. Ignored. if (dimensions == 0) - return new List(); + return CreateCollection(isArray: false, 0); if (dimensions > 1) - throw new NotSupportedException($"Can't read multidimensional array as List<{typeof(TRequestedElement).Name}>"); - if (ElementTypeInfo.IsNonNullable && containsNulls) + throw new NotSupportedException($"Can't read multidimensional array as List<{ElementType.Name}>"); + + if (containsNulls && IsNonNullable) throw new InvalidOperationException(ReadNonNullableCollectionWithNullsExceptionMessage); await buf.Ensure(8, async); var length = buf.ReadInt32(); buf.ReadInt32(); // We don't care about the lower bounds - var list = new List(length); + var list = CreateCollection(isArray: false, length); for (var i = 0; i < length; i++) - list.Add(await ElementHandler.ReadWithLength(buf, async)); + { + var len = buf.ReadInt32(); + await ReadElement(isArray: false, list, i, buf, len, async); + } return list; } - internal const string ReadNonNullableCollectionWithNullsExceptionMessage = - "Cannot read a non-nullable collection of elements because the returned array contains nulls. " + - "Call GetFieldValue with a nullable array instead."; + // Handle single-dimensional arrays and generic IList + public int ValidateAndGetElementLength(object value, int count, ref NpgsqlLengthCache lengthCache) + { + // Leave empty slot for the entire array length, and go ahead an populate the element slots + var pos = lengthCache.Position; + var len = + 4 + // dimensions + 4 + // has_nulls (unused) + 4 + // type OID + 1 * 8 + // number of dimensions (1) * (length + lower bound) + 4 * count; // sum of element lengths - #endregion Read + lengthCache.Set(0); + var elemLengthCache = lengthCache; + + var isArray = value is Array; + for (var i = 0; i < count; i++) + { + try + { + len += ValidateAndGetElementLength(isArray, value, i, ref elemLengthCache, null); + } + catch (Exception e) + { + throw MixedTypesOrJaggedArrayException(e); + } + } - #region Write + lengthCache.Lengths[pos] = len; + return len; + } // Take care of multi-dimensional arrays and non-generic IList, we have no choice but to box/unbox - protected int ValidateAndGetLengthNonGeneric(ICollection value, ref NpgsqlLengthCache lengthCache) + public int ValidateAndGetLengthAsObject(ICollection value, ref NpgsqlLengthCache lengthCache) { - var asMultidimensional = value as Array; - var dimensions = asMultidimensional?.Rank ?? 1; + var dimensions = (value as Array)?.Rank ?? 1; // Leave empty slot for the entire array length, and go ahead an populate the element slots var pos = lengthCache.Position; @@ -213,8 +394,9 @@ protected int ValidateAndGetLengthNonGeneric(ICollection value, ref NpgsqlLength 4 * value.Count; // sum of element lengths lengthCache.Set(0); - NpgsqlLengthCache? elemLengthCache = lengthCache; + var elemLengthCache = lengthCache; + var elementHandler = ElementHandler; foreach (var element in value) { if (element is null) @@ -222,7 +404,7 @@ protected int ValidateAndGetLengthNonGeneric(ICollection value, ref NpgsqlLength try { - len += ElementHandler.ValidateObjectAndGetLength(element, ref elemLengthCache, null); + len += elementHandler.ValidateObjectAndGetLength(element, ref elemLengthCache, null); } catch (Exception e) { @@ -234,7 +416,7 @@ protected int ValidateAndGetLengthNonGeneric(ICollection value, ref NpgsqlLength return len; } - protected async Task WriteNonGeneric(ICollection value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, bool async, CancellationToken cancellationToken = default) + public async Task WriteAsObject(ICollection value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, bool async, CancellationToken cancellationToken = default) { var asArray = value as Array; var dimensions = asArray?.Rank ?? 1; @@ -251,258 +433,188 @@ protected async Task WriteNonGeneric(ICollection value, NpgsqlWriteBuffer buf, N Debug.Assert(buf.WriteSpaceLeft >= len, "Buffer too small for header"); } + var elementHandler = ElementHandler; buf.WriteInt32(dimensions); buf.WriteInt32(1); // HasNulls=1. Not actually used by the backend. - buf.WriteUInt32(ElementHandler.PostgresType.OID); + buf.WriteUInt32(elementHandler.PostgresType.OID); if (asArray != null) { for (var i = 0; i < dimensions; i++) { buf.WriteInt32(asArray.GetLength(i)); - buf.WriteInt32(LowerBound); // We don't map .NET lower bounds to PG + buf.WriteInt32(_lowerBound); // We don't map .NET lower bounds to PG } } else { buf.WriteInt32(value.Count); - buf.WriteInt32(LowerBound); // We don't map .NET lower bounds to PG + buf.WriteInt32(_lowerBound); // We don't map .NET lower bounds to PG } foreach (var element in value) - await ElementHandler.WriteObjectWithLength(element, buf, lengthCache, null, async, cancellationToken); + await elementHandler.WriteObjectWithLength(element, buf, lengthCache, null, async, cancellationToken); } - protected static Exception MixedTypesOrJaggedArrayException(Exception innerException) - => new("While trying to write an array, one of its elements failed validation. " + - "You may be trying to mix types in a non-generic IList, or to write a jagged array.", innerException); - - #endregion Write - - #region Static generic caching helpers - - internal static class ElementTypeInfo + public async Task Write(object value, int count, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, bool async, CancellationToken cancellationToken = default) { - public static readonly bool IsNonNullable = - typeof(TElement).IsValueType && default(TElement) is not null; - - public static readonly Type NullableElementType = IsNonNullable - ? typeof(Nullable<>).MakeGenericType(typeof(TElement)) - : typeof(TElement); - } - - internal abstract class ArrayTypeInfo - { - // ReSharper disable StaticMemberInGenericType - public static readonly Type? ElementType = typeof(TArray).IsArray ? typeof(TArray).GetElementType() : null; - // ReSharper restore StaticMemberInGenericType - - public static bool IsArray => ElementType is not null; - - static ArrayTypeInfo? _derivedInstance; - static ArrayTypeInfo DerivedInstance + var len = + 4 + // dimensions + 4 + // has_nulls (unused) + 4 + // type OID + 1 * 8; // number of dimensions (1) * (length + lower bound) + if (buf.WriteSpaceLeft < len) { - get - { - Debug.Assert(ElementType is not null); - return (_derivedInstance ??= (ArrayTypeInfo?)Activator.CreateInstance(typeof(ArrayHandler<,>).MakeGenericType(typeof(TArray), ElementType), typeof(TArray).GetArrayRank()))!; - } + await buf.Flush(async, cancellationToken); + Debug.Assert(buf.WriteSpaceLeft >= len, "Buffer too small for header"); } - public static ValueTask ReadArray(ArrayHandler handler, NpgsqlReadBuffer buf, bool async, bool readAsObject = false) - => DerivedInstance.Read(handler, buf, async, readAsObject); + var elementHandler = ElementHandler; + buf.WriteInt32(1); + buf.WriteInt32(1); // has_nulls = 1. Not actually used by the backend. + buf.WriteUInt32(elementHandler.PostgresType.OID); + buf.WriteInt32(count); + buf.WriteInt32(_lowerBound); // We don't map .NET lower bounds to PG - protected abstract ValueTask Read(ArrayHandler handler, NpgsqlReadBuffer buf, bool async, bool readAsObject = false); + var isArray = value is Array; + for (var i = 0; i < count; i++) + await WriteElementWithLength(isArray, value, i, buf, lengthCache, null, async, cancellationToken); } - internal abstract class ListTypeInfo - { - // ReSharper disable StaticMemberInGenericType - public static readonly Type? ElementType = - typeof(TList).IsGenericType && typeof(TList).GetGenericTypeDefinition() == typeof(List<>) ? - typeof(TList).GetGenericArguments()[0] : null; - // ReSharper restore StaticMemberInGenericType - public static bool IsList => ElementType is not null; + static Exception MixedTypesOrJaggedArrayException(Exception innerException) + => new("While trying to write an array, one of its elements failed validation. " + + "You may be trying to mix types in a non-generic IList, or to write a jagged array.", innerException); - static ListTypeInfo? _derivedInstance; - static ListTypeInfo DerivedInstance - { - get - { - Debug.Assert(ElementType is not null); - return _derivedInstance ??= (ListTypeInfo?)Activator.CreateInstance(typeof(ListHandler<,>).MakeGenericType(typeof(TList), ElementType))!; - } - } + public int ValidateAndGetElementLength(object value, ref NpgsqlLengthCache? lengthCache) + { + lengthCache ??= new NpgsqlLengthCache(1); + if (lengthCache.IsPopulated) + return lengthCache.Get(); - public static ValueTask ReadList(ArrayHandler handler, NpgsqlReadBuffer buf, bool async) - => DerivedInstance.Read(handler, buf, async); + return value switch + { + _ when IsGenericCollection(value, out var count) => ValidateAndGetElementLength(value, count, ref lengthCache), + ICollection nonGeneric => ValidateAndGetLengthAsObject(nonGeneric, ref lengthCache), + _ => throw CantWriteTypeException(value.GetType()) + }; + } - protected abstract ValueTask Read(ArrayHandler handler, NpgsqlReadBuffer buf, bool async); + public Task WriteElementWithLength(object value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, bool async, CancellationToken cancellationToken) + { + buf.WriteInt32(ValidateAndGetElementLength(value, ref lengthCache)); + return value switch + { + _ when IsGenericCollection(value, out var count) => Write(value, count, buf, lengthCache, async, cancellationToken), + ICollection nonGeneric => WriteAsObject(nonGeneric, buf, lengthCache, async, cancellationToken), + _ => throw CantWriteTypeException(value.GetType()) + }; } - #endregion Static generic caching helpers + + InvalidCastException CantWriteTypeException(Type type) + => new($"Can't write type '{type}' as an array of {ElementType}"); } -/// -/// Base class for all type handlers which handle PostgreSQL arrays. -/// -/// -/// https://www.postgresql.org/docs/current/static/arrays.html. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public class ArrayHandler : ArrayHandler +sealed class ArrayHandlerCore : ArrayHandlerCore { - /// - public ArrayHandler(PostgresType arrayPostgresType, NpgsqlTypeHandler elementHandler, ArrayNullabilityMode arrayNullabilityMode, int lowerBound = 1) - : base(arrayPostgresType, elementHandler, arrayNullabilityMode, lowerBound) {} - - #region Read - - public override async ValueTask ReadAsObject(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - => await ReadArray(buf, async, readAsObject: true); - - #endregion + readonly NpgsqlTypeHandler _elementHandler; - #region Write + public ArrayHandlerCore(NpgsqlTypeHandler nonNullableElementHandler, ArrayNullabilityMode arrayNullabilityMode, int lowerBound = 1) + : base(arrayNullabilityMode, lowerBound) + => _elementHandler = nonNullableElementHandler; - static InvalidCastException CantWriteTypeException(Type type) - => new($"Can't write type '{type}' as an array of {typeof(TElement)}"); + protected override Type ElementType => typeof(TElement); + protected override bool IsNonNullable => typeof(TElement).IsValueType && default(TElement) is not null; - // Since TAny isn't constrained to class? or struct (C# doesn't have a non-nullable constraint that doesn't limit us to either struct or class), - // we must use the bang operator here to tell the compiler that a null value will never be returned. + protected override bool IsGenericCollection(object value, out int count) + { + if (value is ICollection collection) + { + count = collection.Count; + return true; + } - /// - protected internal override int ValidateAndGetLengthCustom([DisallowNull] TAny value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLength(value, ref lengthCache); + count = 0; + return false; + } - /// - public override int ValidateObjectAndGetLength(object? value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => value is null || value == DBNull.Value - ? 0 - : ValidateAndGetLength(value!, ref lengthCache); + protected override NpgsqlTypeHandler ElementHandler => _elementHandler; - int ValidateAndGetLength(object value, ref NpgsqlLengthCache? lengthCache) + protected override object CreateCollection(bool isArray, int capacity) => isArray switch { - lengthCache ??= new NpgsqlLengthCache(1); - if (lengthCache.IsPopulated) - return lengthCache.Get(); - if (value is ICollection generic) - return ValidateAndGetLengthGeneric(generic, ref lengthCache); - if (value is ICollection nonGeneric) - return ValidateAndGetLengthNonGeneric(nonGeneric, ref lengthCache); - throw CantWriteTypeException(value.GetType()); - } + true => capacity is 0 ? Array.Empty() : new TElement[capacity], + false => new List() + }; - // Handle single-dimensional arrays and generic IList - int ValidateAndGetLengthGeneric(ICollection value, ref NpgsqlLengthCache lengthCache) + protected override ValueTask ReadElement(bool isArray, object values, int index, NpgsqlReadBuffer buf, int length, bool async, FieldDescription? fieldDescription = null) { - // Leave empty slot for the entire array length, and go ahead an populate the element slots - var pos = lengthCache.Position; - var len = - 4 + // dimensions - 4 + // has_nulls (unused) - 4 + // type OID - 1 * 8 + // number of dimensions (1) * (length + lower bound) - 4 * value.Count; // sum of element lengths - - lengthCache.Set(0); - var elemLengthCache = lengthCache; - - foreach (var element in value) + // We want a generic mutation so we unfortunately need the null check on this side. + if (length == -1) { - if (element is null) - continue; - - try - { - len += ElementHandler.ValidateAndGetLength(element, ref elemLengthCache, null); - } - catch (Exception e) - { - throw MixedTypesOrJaggedArrayException(e); - } + SetResult(isArray, values, index, (TElement?)(object?)null); + return new ValueTask(); } - lengthCache.Lengths[pos] = len; - return len; - } - - protected override Task WriteWithLengthCustom([DisallowNull] TAny value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken) - { - buf.WriteInt32(ValidateAndGetLength(value, ref lengthCache, parameter)); - - if (value is ICollection list) - return WriteGeneric(list, buf, lengthCache, async, cancellationToken); + var task = + NullableHandler.Exists + ? NullableHandler.ReadAsync(_elementHandler, buf, length, async, fieldDescription) + : _elementHandler.Read(buf, length, async, fieldDescription); - if (value is ICollection nonGeneric) - return WriteNonGeneric(nonGeneric, buf, lengthCache, async, cancellationToken); + if (!task.IsCompletedSuccessfully) + return Core(isArray, values, index, task); - throw CantWriteTypeException(value.GetType()); - } + SetResult(isArray, values, index, task.GetAwaiter().GetResult()); + return new ValueTask(); - // The default WriteObjectWithLength casts the type handler to INpgsqlTypeHandler, but that's not sufficient for - // us (need to handle many types of T, e.g. int[], int[,]...) - /// - public override Task WriteObjectWithLength(object? value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => value is null || value is DBNull - ? WriteWithLength(DBNull.Value, buf, lengthCache, parameter, async, cancellationToken) - : WriteWithLength(value, buf, lengthCache, parameter, async, cancellationToken); + static async ValueTask Core(bool isArray, object values, int index, ValueTask task) + => SetResult(isArray, values, index, await task); - async Task WriteGeneric(ICollection value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, bool async, CancellationToken cancellationToken = default) - { - var len = - 4 + // dimensions - 4 + // has_nulls (unused) - 4 + // type OID - 1 * 8; // number of dimensions (1) * (length + lower bound) - if (buf.WriteSpaceLeft < len) + static void SetResult(bool isArray, object values, int index, TElement? result) { - await buf.Flush(async, cancellationToken); - Debug.Assert(buf.WriteSpaceLeft >= len, "Buffer too small for header"); + Debug.Assert(isArray ? values is TElement?[] : values is List); + if (isArray) + Unsafe.As(ref values)[index] = result; + else + Unsafe.As>(ref values).Add(result); } - - buf.WriteInt32(1); - buf.WriteInt32(1); // has_nulls = 1. Not actually used by the backend. - buf.WriteUInt32(ElementHandler.PostgresType.OID); - buf.WriteInt32(value.Count); - buf.WriteInt32(LowerBound); // We don't map .NET lower bounds to PG - - foreach (var element in value) - await ElementHandler.WriteWithLength(element, buf, lengthCache, null, async, cancellationToken); } - #endregion -} - -/// -/// https://www.postgresql.org/docs/current/static/arrays.html -/// -/// The .NET type contained as an element within this array -/// The .NET provider-specific type contained as an element within this array -sealed class ArrayHandlerWithPsv : ArrayHandler -{ - public ArrayHandlerWithPsv(PostgresType arrayPostgresType, NpgsqlTypeHandler elementHandler, ArrayNullabilityMode arrayNullabilityMode) - : base(arrayPostgresType, elementHandler, arrayNullabilityMode) { } - - internal override object ReadPsvAsObject(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - => ReadPsvAsObject(buf, len, false, fieldDescription).GetAwaiter().GetResult(); + protected override async ValueTask ReadElement(Array array, int[] indices, NpgsqlReadBuffer buf, int length, bool async, FieldDescription? fieldDescription = null) + { + // Null check is handled in ArrayHandlerOps to reduce code size. + var result = + NullableHandler.Exists + ? await NullableHandler.ReadAsync(_elementHandler, buf, length, async, fieldDescription) + : await _elementHandler.Read(buf, length, async, fieldDescription); - internal override async ValueTask ReadPsvAsObject(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - => await ReadArray(buf, async, readAsObject: true); -} + array.SetValue(result, indices); + } -sealed class ArrayHandler : ArrayHandler.ArrayTypeInfo -{ - readonly int _arrayRank; - public ArrayHandler(int arrayRank) => _arrayRank = arrayRank; + protected override int ValidateAndGetElementLength(bool isArray, object values, int index, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) + { + Debug.Assert(isArray ? values is TElement?[] : values is List); + var element = + isArray + ? Unsafe.As(ref values)[index] + : Unsafe.As>(ref values)[index]; - protected override ValueTask Read(ArrayHandler handler, NpgsqlReadBuffer buf, bool async, bool readAsObject = false) - => handler.ReadArray(buf, async, _arrayRank, readAsObject); -} + return element is null + ? 0 + : NullableHandler.Exists + ? NullableHandler.ValidateAndGetLength(_elementHandler, element, ref lengthCache, parameter) + : _elementHandler.ValidateAndGetLength(element, ref lengthCache, parameter); + } -sealed class ListHandler : ArrayHandler.ListTypeInfo -{ - protected override ValueTask Read(ArrayHandler handler, NpgsqlReadBuffer buf, bool async) => - handler.ReadList(buf, async); + protected override async ValueTask WriteElementWithLength(bool isArray, object values, int index, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, + NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken) + { + Debug.Assert(isArray ? values is TElement?[] : values is List); + var element = + isArray + ? Unsafe.As(ref values)[index] + : Unsafe.As>(ref values)[index]; + + if (NullableHandler.Exists) + await NullableHandler.WriteAsync(_elementHandler, element!, buf, lengthCache, parameter, async, cancellationToken); + else + await _elementHandler.WriteWithLength(element!, buf, lengthCache, parameter, async, cancellationToken); + } } diff --git a/src/Npgsql/Internal/TypeHandlers/BitStringHandler.cs b/src/Npgsql/Internal/TypeHandlers/BitStringHandler.cs index 68ab42132b..5d3e67c1ac 100644 --- a/src/Npgsql/Internal/TypeHandlers/BitStringHandler.cs +++ b/src/Npgsql/Internal/TypeHandlers/BitStringHandler.cs @@ -260,14 +260,15 @@ public async Task Write(string value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? /// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. /// Use it at your own risk. /// -public class BitStringArrayHandler : ArrayHandler +public class BitStringArrayHandler : ArrayHandler { /// public BitStringArrayHandler(PostgresType postgresType, BitStringHandler elementHandler, ArrayNullabilityMode arrayNullabilityMode) - : base(postgresType, elementHandler, arrayNullabilityMode) {} + : base(postgresType, elementHandler, arrayNullabilityMode) + { } - public override async ValueTask ReadAsObject(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) + public override ValueTask ReadAsObject(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) => fieldDescription?.TypeModifier == 1 - ? await ReadArray(buf, async) - : await ReadArray(buf, async); + ? base.ReadAsObject(typeof(bool), buf, len, async, fieldDescription) + : base.ReadAsObject(buf, len, async, fieldDescription); } diff --git a/src/Npgsql/Internal/TypeHandlers/HstoreHandler.cs b/src/Npgsql/Internal/TypeHandlers/HstoreHandler.cs index 4754d89b14..0b8613d979 100644 --- a/src/Npgsql/Internal/TypeHandlers/HstoreHandler.cs +++ b/src/Npgsql/Internal/TypeHandlers/HstoreHandler.cs @@ -75,9 +75,9 @@ public override int ValidateAndGetLength(Dictionary value, ref public override int ValidateObjectAndGetLength(object? value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) => value switch { - ImmutableDictionary converted => ((INpgsqlTypeHandler>)this).ValidateAndGetLength(converted, ref lengthCache, parameter), - Dictionary converted => ((INpgsqlTypeHandler>)this).ValidateAndGetLength(converted, ref lengthCache, parameter), - IDictionary converted => ((INpgsqlTypeHandler>)this).ValidateAndGetLength(converted, ref lengthCache, parameter), + ImmutableDictionary converted => ValidateAndGetLength(converted, ref lengthCache, parameter), + Dictionary converted => ValidateAndGetLength(converted, ref lengthCache, parameter), + IDictionary converted => ValidateAndGetLength(converted, ref lengthCache, parameter), DBNull => 0, null => 0, @@ -94,9 +94,9 @@ public override Task WriteObjectWithLength( CancellationToken cancellationToken = default) => value switch { - ImmutableDictionary converted => WriteWithLength(converted, buf, lengthCache, parameter, async, cancellationToken), - Dictionary converted => WriteWithLength(converted, buf, lengthCache, parameter, async, cancellationToken), - IDictionary converted => WriteWithLength(converted, buf, lengthCache, parameter, async, cancellationToken), + ImmutableDictionary converted => ((INpgsqlTypeHandler>)this).WriteWithLength(converted, buf, lengthCache, parameter, async, cancellationToken), + Dictionary converted => ((INpgsqlTypeHandler>)this).WriteWithLength(converted, buf, lengthCache, parameter, async, cancellationToken), + IDictionary converted => ((INpgsqlTypeHandler>)this).WriteWithLength(converted, buf, lengthCache, parameter, async, cancellationToken), DBNull => WriteWithLength(DBNull.Value, buf, lengthCache, parameter, async, cancellationToken), null => WriteWithLength(DBNull.Value, buf, lengthCache, parameter, async, cancellationToken), @@ -114,8 +114,8 @@ public async Task Write(IDictionary value, NpgsqlWriteBuffer bu foreach (var kv in value) { - await _textHandler.WriteWithLength(kv.Key, buf, lengthCache, parameter, async, cancellationToken); - await _textHandler.WriteWithLength(kv.Value, buf, lengthCache, parameter, async, cancellationToken); + await ((INpgsqlTypeHandler)_textHandler).WriteWithLength(kv.Key, buf, lengthCache, parameter, async, cancellationToken); + await ((INpgsqlTypeHandler)_textHandler).WriteWithLength(kv.Value, buf, lengthCache, parameter, async, cancellationToken); } } diff --git a/src/Npgsql/Internal/TypeHandlers/InternalTypeHandlers/Int2VectorHandler.cs b/src/Npgsql/Internal/TypeHandlers/InternalTypeHandlers/Int2VectorHandler.cs index 95fae5dcb9..1523b66d69 100644 --- a/src/Npgsql/Internal/TypeHandlers/InternalTypeHandlers/Int2VectorHandler.cs +++ b/src/Npgsql/Internal/TypeHandlers/InternalTypeHandlers/Int2VectorHandler.cs @@ -1,9 +1,6 @@ -using System; -using Npgsql.Internal.TypeHandlers.NumericHandlers; +using Npgsql.Internal.TypeHandlers.NumericHandlers; using Npgsql.Internal.TypeHandling; using Npgsql.PostgresTypes; -using Npgsql.TypeMapping; -using NpgsqlTypes; namespace Npgsql.Internal.TypeHandlers.InternalTypeHandlers; @@ -11,11 +8,11 @@ namespace Npgsql.Internal.TypeHandlers.InternalTypeHandlers; /// An int2vector is simply a regular array of shorts, with the sole exception that its lower bound must /// be 0 (we send 1 for regular arrays). /// -sealed class Int2VectorHandler : ArrayHandler +sealed class Int2VectorHandler : ArrayHandler { public Int2VectorHandler(PostgresType arrayPostgresType, PostgresType postgresShortType) : base(arrayPostgresType, new Int16Handler(postgresShortType), ArrayNullabilityMode.Never, 0) { } public override NpgsqlTypeHandler CreateArrayHandler(PostgresArrayType pgArrayType, ArrayNullabilityMode arrayNullabilityMode) - => new ArrayHandler>(pgArrayType, this, arrayNullabilityMode); -} \ No newline at end of file + => new ArrayHandler(pgArrayType, this, arrayNullabilityMode); +} diff --git a/src/Npgsql/Internal/TypeHandlers/InternalTypeHandlers/OIDVectorHandler.cs b/src/Npgsql/Internal/TypeHandlers/InternalTypeHandlers/OIDVectorHandler.cs index d17f069871..00b3a57aa1 100644 --- a/src/Npgsql/Internal/TypeHandlers/InternalTypeHandlers/OIDVectorHandler.cs +++ b/src/Npgsql/Internal/TypeHandlers/InternalTypeHandlers/OIDVectorHandler.cs @@ -1,9 +1,6 @@ -using System; -using Npgsql.Internal.TypeHandlers.NumericHandlers; +using Npgsql.Internal.TypeHandlers.NumericHandlers; using Npgsql.Internal.TypeHandling; using Npgsql.PostgresTypes; -using Npgsql.TypeMapping; -using NpgsqlTypes; namespace Npgsql.Internal.TypeHandlers.InternalTypeHandlers; @@ -11,11 +8,11 @@ namespace Npgsql.Internal.TypeHandlers.InternalTypeHandlers; /// An OIDVector is simply a regular array of uints, with the sole exception that its lower bound must /// be 0 (we send 1 for regular arrays). /// -sealed class OIDVectorHandler : ArrayHandler +sealed class OIDVectorHandler : ArrayHandler { public OIDVectorHandler(PostgresType oidvectorType, PostgresType oidType) : base(oidvectorType, new UInt32Handler(oidType), ArrayNullabilityMode.Never, 0) { } public override NpgsqlTypeHandler CreateArrayHandler(PostgresArrayType pgArrayType, ArrayNullabilityMode arrayNullabilityMode) - => new ArrayHandler>(pgArrayType, this, arrayNullabilityMode); -} \ No newline at end of file + => new ArrayHandler(pgArrayType, this, arrayNullabilityMode); +} diff --git a/src/Npgsql/Internal/TypeHandlers/JsonTextHandler.cs b/src/Npgsql/Internal/TypeHandlers/JsonTextHandler.cs index ef1fa0593b..2842370336 100644 --- a/src/Npgsql/Internal/TypeHandlers/JsonTextHandler.cs +++ b/src/Npgsql/Internal/TypeHandlers/JsonTextHandler.cs @@ -153,13 +153,8 @@ public override int ValidateObjectAndGetLength(object value, ref NpgsqlLengthCac }; /// - public override async Task WriteObjectWithLength(object? value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - // We call into WriteWithLength below, which assumes it as at least enough write space for the length - if (buf.WriteSpaceLeft < 4) - await buf.Flush(async, cancellationToken); - - await (value switch + public override Task WriteObjectWithLength(object? value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) + => value switch { null => WriteWithLength(DBNull.Value, buf, lengthCache, parameter, async, cancellationToken), DBNull => WriteWithLength(DBNull.Value, buf, lengthCache, parameter, async, cancellationToken), @@ -173,8 +168,7 @@ public override async Task WriteObjectWithLength(object? value, NpgsqlWriteBuffe _ => throw new InvalidCastException( $"Can't write CLR type {value.GetType()}. " + "You may need to use the System.Text.Json or Json.NET plugins, see the docs for more information.") - }); - } + }; /// protected internal override async ValueTask ReadCustom(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) @@ -212,4 +206,4 @@ public TextReader GetTextReader(Stream stream, NpgsqlReadBuffer buffer) return TextHandler.GetTextReader(stream, buffer); } -} \ No newline at end of file +} diff --git a/src/Npgsql/Internal/TypeHandlers/MultirangeHandler.cs b/src/Npgsql/Internal/TypeHandlers/MultirangeHandler.cs index 96abf09317..5d2d738d8b 100644 --- a/src/Npgsql/Internal/TypeHandlers/MultirangeHandler.cs +++ b/src/Npgsql/Internal/TypeHandlers/MultirangeHandler.cs @@ -126,7 +126,7 @@ public async Task WriteMultirange( buf.WriteInt32(value.Count); for (var i = 0; i < value.Count; i++) - await RangeHandler.WriteWithLength(value[i], buf, lengthCache, parameter: null, async, cancellationToken); + await ((INpgsqlTypeHandler>)RangeHandler).WriteWithLength(value[i], buf, lengthCache, parameter: null, async, cancellationToken); } public override Type GetFieldType(FieldDescription? fieldDescription = null) => typeof(NpgsqlRange[]); @@ -200,13 +200,13 @@ public override int ValidateObjectAndGetLength(object? value, ref NpgsqlLengthCa public override Task WriteObjectWithLength(object? value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) => value switch { - NpgsqlRange[] converted => WriteWithLength(converted, buf, lengthCache, parameter, async, cancellationToken), - NpgsqlRange[] converted => WriteWithLength(converted, buf, lengthCache, parameter, async, cancellationToken), - List> converted => WriteWithLength(converted, buf, lengthCache, parameter, async, cancellationToken), - List> converted => WriteWithLength(converted, buf, lengthCache, parameter, async, cancellationToken), + NpgsqlRange[] converted => ((INpgsqlTypeHandler[]>)this).WriteWithLength(converted, buf, lengthCache, parameter, async, cancellationToken), + NpgsqlRange[] converted => ((INpgsqlTypeHandler[]>)this).WriteWithLength(converted, buf, lengthCache, parameter, async, cancellationToken), + List> converted => ((INpgsqlTypeHandler>>)this).WriteWithLength(converted, buf, lengthCache, parameter, async, cancellationToken), + List> converted => ((INpgsqlTypeHandler>>)this).WriteWithLength(converted, buf, lengthCache, parameter, async, cancellationToken), DBNull => WriteWithLength(DBNull.Value, buf, lengthCache, parameter, async, cancellationToken), null => WriteWithLength(DBNull.Value, buf, lengthCache, parameter, async, cancellationToken), _ => throw new InvalidCastException($"Can't write CLR type {value.GetType()} with handler type RangeHandler") }; -} \ No newline at end of file +} diff --git a/src/Npgsql/Internal/TypeHandlers/RangeHandler.cs b/src/Npgsql/Internal/TypeHandlers/RangeHandler.cs index 8c9792aabb..f3cbb51192 100644 --- a/src/Npgsql/Internal/TypeHandlers/RangeHandler.cs +++ b/src/Npgsql/Internal/TypeHandlers/RangeHandler.cs @@ -38,7 +38,7 @@ public RangeHandler(PostgresType rangePostgresType, NpgsqlTypeHandler subtypeHan /// public override NpgsqlTypeHandler CreateArrayHandler(PostgresArrayType pgArrayType, ArrayNullabilityMode arrayNullabilityMode) - => new ArrayHandler>(pgArrayType, this, arrayNullabilityMode); + => new ArrayHandler(pgArrayType, this, arrayNullabilityMode); /// public override NpgsqlTypeHandler CreateRangeHandler(PostgresType pgRangeType) @@ -167,8 +167,8 @@ public Task Write(NpgsqlRange value, NpgsqlWriteBuffer buf, NpgsqlLen public override int ValidateObjectAndGetLength(object? value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) => value switch { - NpgsqlRange converted => ((INpgsqlTypeHandler>)this).ValidateAndGetLength(converted, ref lengthCache, parameter), - NpgsqlRange converted => ((INpgsqlTypeHandler>)this).ValidateAndGetLength(converted, ref lengthCache, parameter), + NpgsqlRange converted => ValidateAndGetLength(converted, ref lengthCache, parameter), + NpgsqlRange converted => ValidateAndGetLength(converted, ref lengthCache, parameter), DBNull => 0, null => 0, @@ -178,11 +178,11 @@ public override int ValidateObjectAndGetLength(object? value, ref NpgsqlLengthCa public override Task WriteObjectWithLength(object? value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) => value switch { - NpgsqlRange converted => WriteWithLength(converted, buf, lengthCache, parameter, async, cancellationToken), - NpgsqlRange converted => WriteWithLength(converted, buf, lengthCache, parameter, async, cancellationToken), + NpgsqlRange converted => ((INpgsqlTypeHandler>)this).WriteWithLength(converted, buf, lengthCache, parameter, async, cancellationToken), + NpgsqlRange converted => ((INpgsqlTypeHandler>)this).WriteWithLength(converted, buf, lengthCache, parameter, async, cancellationToken), DBNull => WriteWithLength(DBNull.Value, buf, lengthCache, parameter, async, cancellationToken), null => WriteWithLength(DBNull.Value, buf, lengthCache, parameter, async, cancellationToken), _ => throw new InvalidCastException($"Can't write CLR type {value.GetType()} with handler type RangeHandler") }; -} \ No newline at end of file +} diff --git a/src/Npgsql/Internal/TypeHandling/INpgsqlTypeHandler.cs b/src/Npgsql/Internal/TypeHandling/INpgsqlTypeHandler.cs index 939b7f92a0..e1a4dc125b 100644 --- a/src/Npgsql/Internal/TypeHandling/INpgsqlTypeHandler.cs +++ b/src/Npgsql/Internal/TypeHandling/INpgsqlTypeHandler.cs @@ -1,4 +1,5 @@ -using System.Diagnostics.CodeAnalysis; +using System; +using System.Diagnostics.CodeAnalysis; using System.Threading; using System.Threading.Tasks; using Npgsql.BackendMessages; @@ -34,7 +35,7 @@ public interface INpgsqlTypeHandler /// information relevant to the write process (e.g. ). /// /// The number of bytes required to write the value. - int ValidateAndGetLength([DisallowNull] T value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter); + int ValidateAndGetLength([DisallowNull] T value, [NotNullIfNotNull(nameof(lengthCache))]ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter); /// /// Writes a value to the provided buffer. @@ -53,4 +54,22 @@ public interface INpgsqlTypeHandler /// An optional token to cancel the asynchronous operation. The default value is . /// Task Write([DisallowNull] T value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default); -} \ No newline at end of file +} + +static class INpgsqlTypeHandlerExtensions +{ + public static async Task WriteWithLength(this INpgsqlTypeHandler handler, T? value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) + { + if (buf.WriteSpaceLeft < 4) + await buf.Flush(async, cancellationToken); + + if (value is null or DBNull) + { + buf.WriteInt32(-1); + return; + } + + buf.WriteInt32(handler.ValidateAndGetLength(value, ref lengthCache, parameter)); + await handler.Write(value, buf, lengthCache, parameter, async, cancellationToken); + } +} diff --git a/src/Npgsql/Internal/TypeHandling/NpgsqlSimpleTypeHandler.cs b/src/Npgsql/Internal/TypeHandling/NpgsqlSimpleTypeHandler.cs index 84757171ae..5a9bbde2cf 100644 --- a/src/Npgsql/Internal/TypeHandling/NpgsqlSimpleTypeHandler.cs +++ b/src/Npgsql/Internal/TypeHandling/NpgsqlSimpleTypeHandler.cs @@ -35,6 +35,12 @@ protected NpgsqlSimpleTypeHandler(PostgresType postgresType) : base(postgresType public sealed override ValueTask Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) => throw new NotSupportedException(); + public override async ValueTask ReadAsObject(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) + { + await buf.Ensure(len, async); + return Read(buf, len, fieldDescription)!; + } + #region Write /// @@ -75,4 +81,4 @@ public sealed override int ValidateAndGetLength(TDefault value, ref NpgsqlLength => throw new NotSupportedException(); #endregion -} \ No newline at end of file +} diff --git a/src/Npgsql/Internal/TypeHandling/NpgsqlSimpleTypeHandlerWithPsv.cs b/src/Npgsql/Internal/TypeHandling/NpgsqlSimpleTypeHandlerWithPsv.cs index f8e02a4a48..d6d69b2df1 100644 --- a/src/Npgsql/Internal/TypeHandling/NpgsqlSimpleTypeHandlerWithPsv.cs +++ b/src/Npgsql/Internal/TypeHandling/NpgsqlSimpleTypeHandlerWithPsv.cs @@ -94,9 +94,5 @@ internal override async ValueTask ReadPsvAsObject(NpgsqlReadBuffer buf, public override Type GetProviderSpecificFieldType(FieldDescription? fieldDescription = null) => typeof(TPsv); - /// - public override NpgsqlTypeHandler CreateArrayHandler(PostgresArrayType pgArrayType, ArrayNullabilityMode arrayNullabilityMode) - => new ArrayHandlerWithPsv(pgArrayType, this, arrayNullabilityMode); - #endregion Misc -} \ No newline at end of file +} diff --git a/src/Npgsql/Internal/TypeHandling/NpgsqlTypeHandler.cs b/src/Npgsql/Internal/TypeHandling/NpgsqlTypeHandler.cs index f799a47351..0eaa1906cb 100644 --- a/src/Npgsql/Internal/TypeHandling/NpgsqlTypeHandler.cs +++ b/src/Npgsql/Internal/TypeHandling/NpgsqlTypeHandler.cs @@ -135,7 +135,7 @@ internal async ValueTask ReadWithLength(NpgsqlReadBuffer buf, bool a /// [MethodImpl(MethodImplOptions.AggressiveInlining)] protected internal int ValidateAndGetLength( - [DisallowNull] TAny value, [NotNullIfNotNull("lengthCache")] ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) + [DisallowNull] TAny value, [NotNullIfNotNull(nameof(lengthCache))] ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) { Debug.Assert(value is not DBNull); @@ -148,7 +148,10 @@ protected internal int ValidateAndGetLength( } protected internal virtual int ValidateAndGetLengthCustom( - [DisallowNull] TAny value, [NotNullIfNotNull("lengthCache")] ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) + [DisallowNull] TAny value, [NotNullIfNotNull(nameof(lengthCache))] ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) => + ValidateAndGetLengthCustomCore(parameter, typeof(TAny), PgDisplayName); + + static int ValidateAndGetLengthCustomCore(NpgsqlParameter? parameter, Type type, string displayName) { var parameterName = parameter is null ? null @@ -159,8 +162,8 @@ protected internal virtual int ValidateAndGetLengthCustom( : parameter.TrimmedName; throw new InvalidCastException(parameterName is null - ? $"Cannot write a value of CLR type '{typeof(TAny)}' as database type '{PgDisplayName}'." - : $"Cannot write a value of CLR type '{typeof(TAny)}' as database type '{PgDisplayName}' for parameter '{parameterName}'."); + ? $"Cannot write a value of CLR type '{type}' as database type '{displayName}'." + : $"Cannot write a value of CLR type '{type}' as database type '{displayName}' for parameter '{parameterName}'."); } /// diff --git a/src/Npgsql/Internal/TypeHandling/NpgsqlTypeHandler`.cs b/src/Npgsql/Internal/TypeHandling/NpgsqlTypeHandler`.cs index aa7f0b6606..3ef86dc8c8 100644 --- a/src/Npgsql/Internal/TypeHandling/NpgsqlTypeHandler`.cs +++ b/src/Npgsql/Internal/TypeHandling/NpgsqlTypeHandler`.cs @@ -40,7 +40,7 @@ protected NpgsqlTypeHandler(PostgresType postgresType) : base(postgresType) {} // Since TAny isn't constrained to class? or struct (C# doesn't have a non-nullable constraint that doesn't limit us to either struct or class), // we must use the bang operator here to tell the compiler that a null value will never returned. public override async ValueTask ReadAsObject(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - => (await Read(buf, len, async, fieldDescription))!; + => (await Read(buf, len, async, fieldDescription))!; #endregion Read @@ -49,7 +49,7 @@ public override async ValueTask ReadAsObject(NpgsqlReadBuffer buf, int l /// /// Called to validate and get the length of a value of a generic . /// - public abstract int ValidateAndGetLength(TDefault value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter); + public abstract int ValidateAndGetLength(TDefault value, [NotNullIfNotNull(nameof(lengthCache))] ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter); /// /// Called to write the value of a generic . @@ -65,7 +65,7 @@ public override async ValueTask ReadAsObject(NpgsqlReadBuffer buf, int l /// public override NpgsqlTypeHandler CreateArrayHandler(PostgresArrayType pgArrayType, ArrayNullabilityMode arrayNullabilityMode) - => new ArrayHandler(pgArrayType, this, arrayNullabilityMode); + => new ArrayHandler(pgArrayType, this, arrayNullabilityMode); /// public override NpgsqlTypeHandler CreateRangeHandler(PostgresType pgRangeType) @@ -76,4 +76,4 @@ public override NpgsqlTypeHandler CreateMultirangeHandler(PostgresMultirangeType => new MultirangeHandler(pgMultirangeType, (RangeHandler)CreateRangeHandler(pgMultirangeType.Subrange)); #endregion Misc -} \ No newline at end of file +} diff --git a/src/Npgsql/Shims/ConcurrentDictionaryExtensions.cs b/src/Npgsql/Shims/ConcurrentDictionaryExtensions.cs new file mode 100644 index 0000000000..c752cf2199 --- /dev/null +++ b/src/Npgsql/Shims/ConcurrentDictionaryExtensions.cs @@ -0,0 +1,18 @@ +using System; +using System.Collections.Concurrent; + +namespace System.Collections.Concurrent; + +#if NETSTANDARD2_0 +static class ConcurrentDictionaryExtensions +{ + public static TValue GetOrAdd(this ConcurrentDictionary instance, TKey key, + Func valueFactory, TArg factoryArgument) + { + // The actual closure capture exists in a local function to prevent a display class allocation at the start of the method. + return instance.TryGetValue(key, out var value) ? value : GetOrAddWithClosure(instance, key, valueFactory, factoryArgument); + + static TValue GetOrAddWithClosure(ConcurrentDictionary instance, TKey key, Func valuefactory, TArg factoryargument) => instance.GetOrAdd(key, key => valuefactory(key, factoryargument)); + } +} +#endif diff --git a/test/Npgsql.Tests/Types/ArrayTests.cs b/test/Npgsql.Tests/Types/ArrayTests.cs index 043f19508d..5e56c75c50 100644 --- a/test/Npgsql.Tests/Types/ArrayTests.cs +++ b/test/Npgsql.Tests/Types/ArrayTests.cs @@ -290,7 +290,7 @@ public async Task Read_null_as_non_nullable_array_throws() Assert.That( () => reader.GetFieldValue(0), Throws.Exception.TypeOf() - .With.Message.EqualTo(ArrayHandler.ReadNonNullableCollectionWithNullsExceptionMessage)); + .With.Message.EqualTo(ArrayHandlerCore.ReadNonNullableCollectionWithNullsExceptionMessage)); } @@ -309,7 +309,7 @@ public async Task Read_null_as_non_nullable_list_throws() Assert.That( () => reader.GetFieldValue>(0), Throws.Exception.TypeOf() - .With.Message.EqualTo(ArrayHandler.ReadNonNullableCollectionWithNullsExceptionMessage)); + .With.Message.EqualTo(ArrayHandlerCore.ReadNonNullableCollectionWithNullsExceptionMessage)); } [Test, Description("Roundtrips a large, one-dimensional array of ints that will be chunked")] diff --git a/test/Npgsql.Tests/Types/BitStringTests.cs b/test/Npgsql.Tests/Types/BitStringTests.cs index ac02dab77e..4a22a2a9e6 100644 --- a/test/Npgsql.Tests/Types/BitStringTests.cs +++ b/test/Npgsql.Tests/Types/BitStringTests.cs @@ -98,6 +98,24 @@ public async Task Array_of_single_bits() Assert.That(reader.GetFieldType(0), Is.EqualTo(typeof(Array))); } + [Test] + public async Task Array_of_single_bits_and_null() + { + var dataSource = CreateDataSource(builder => builder.ArrayNullabilityMode = ArrayNullabilityMode.Always); + using var conn = await dataSource.OpenConnectionAsync(); + using var cmd = new NpgsqlCommand("SELECT @p::BIT(1)[]", conn); + var expected = new bool?[] { true, false, null }; + var p = new NpgsqlParameter("p", NpgsqlDbType.Array | NpgsqlDbType.Bit) {Value = expected}; + cmd.Parameters.Add(p); + p.Value = expected; + using var reader = await cmd.ExecuteReaderAsync(); + reader.Read(); + var x = reader.GetValue(0); + Assert.That(reader.GetValue(0), Is.EqualTo(expected)); + Assert.That(reader.GetFieldValue(0), Is.EqualTo(expected)); + Assert.That(reader.GetFieldType(0), Is.EqualTo(typeof(Array))); + } + [Test] public Task Write_as_string() => AssertTypeWrite("010101", "010101", "bit varying", NpgsqlDbType.Varbit, isDefault: false); @@ -111,4 +129,4 @@ public Task Read_as_string_is_not_supported() => AssertTypeUnsupportedRead("010101", "bit varying"); public BitStringTests(MultiplexingMode multiplexingMode) : base(multiplexingMode) {} -} \ No newline at end of file +} From e81b12d918b071132ed009e57a33278075556372 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Fri, 3 Mar 2023 19:15:30 +0100 Subject: [PATCH 089/761] Introduce NpgsqlSlimDataSourceBuilder (#4967) Part of #4965 --- Npgsql.sln.DotSettings | 1 + src/Npgsql/NpgsqlConnection.cs | 26 +- src/Npgsql/NpgsqlDataSource.cs | 2 - src/Npgsql/NpgsqlDataSourceBuilder.cs | 227 ++------- src/Npgsql/NpgsqlSlimDataSourceBuilder.cs | 460 ++++++++++++++++++ src/Npgsql/PublicAPI.Unshipped.txt | 29 +- .../JsonTypeHandlerResolverFactory.cs | 6 +- test/Npgsql.NativeAotTests/Program.cs | 5 +- test/Npgsql.Tests/ConnectionTests.cs | 2 +- test/Npgsql.Tests/Support/TestBase.cs | 46 +- test/Npgsql.Tests/Types/DateTimeTests.cs | 17 +- test/Npgsql.Tests/Types/JsonTests.cs | 40 +- test/Npgsql.Tests/Types/MultirangeTests.cs | 16 - test/Npgsql.Tests/Types/RangeTests.cs | 26 +- 14 files changed, 607 insertions(+), 296 deletions(-) create mode 100644 src/Npgsql/NpgsqlSlimDataSourceBuilder.cs diff --git a/Npgsql.sln.DotSettings b/Npgsql.sln.DotSettings index 51e889192d..246b17c72b 100644 --- a/Npgsql.sln.DotSettings +++ b/Npgsql.sln.DotSettings @@ -91,6 +91,7 @@ True True True + True True True True diff --git a/src/Npgsql/NpgsqlConnection.cs b/src/Npgsql/NpgsqlConnection.cs index 2c785b9bef..d7b8b404eb 100644 --- a/src/Npgsql/NpgsqlConnection.cs +++ b/src/Npgsql/NpgsqlConnection.cs @@ -15,10 +15,8 @@ using System.Transactions; using Microsoft.Extensions.Logging; using Npgsql.Internal; -using Npgsql.NameTranslation; using Npgsql.TypeMapping; using Npgsql.Util; -using NpgsqlTypes; using IsolationLevel = System.Data.IsolationLevel; namespace Npgsql; @@ -92,6 +90,8 @@ internal NpgsqlDataSource NpgsqlDataSource public INpgsqlTypeMapper TypeMapper => throw new NotSupportedException(); + static ICloningInstantiator? _cloningInstantiator; + /// /// The default TCP/IP port for PostgreSQL. /// @@ -219,6 +219,9 @@ void SetupDataSource() dataSourceBuilder.EnableParameterLogging(NpgsqlLoggingConfiguration.GlobalIsParameterLoggingEnabled); var newDataSource = dataSourceBuilder.Build(); + // See Clone() on the following line: + _cloningInstantiator = new CloningInstantiator(); + _dataSource = PoolManager.Pools.GetOrAdd(canonical, newDataSource); if (_dataSource == newDataSource) { @@ -1817,8 +1820,14 @@ object ICloneable.Clone() { CheckDisposed(); + // For NativeAOT code size reduction, we avoid instantiating a connection here directly with + // `new NpgsqlConnection(_connectionString)`, since that would bring in the default data source builder, and with it various + // features which significantly increase binary size (ranges, System.Text.Json...). Instead, we pass through a "cloning + // instantiator" abstraction, where the implementation only ever gets set if SetupDataSource above is called (in which case the + // default data source is brought in anyway). + Debug.Assert(_dataSource is not null || _cloningInstantiator is not null); var conn = _dataSource is null - ? new NpgsqlConnection(_connectionString) + ? _cloningInstantiator!.Instantiate(_connectionString) : _dataSource.CreateConnection(); conn.ProvideClientCertificatesCallback = ProvideClientCertificatesCallback; @@ -1859,6 +1868,17 @@ public NpgsqlConnection CloneWith(string connectionString) }; } + interface ICloningInstantiator + { + public NpgsqlConnection Instantiate(string connectionString); + } + + sealed class CloningInstantiator : ICloningInstantiator + { + public NpgsqlConnection Instantiate(string connectionString) + => new(connectionString); + } + /// /// This method changes the current database by disconnecting from the actual /// database and connecting to the specified. diff --git a/src/Npgsql/NpgsqlDataSource.cs b/src/Npgsql/NpgsqlDataSource.cs index ebc9aa8fe5..94dd64732b 100644 --- a/src/Npgsql/NpgsqlDataSource.cs +++ b/src/Npgsql/NpgsqlDataSource.cs @@ -4,7 +4,6 @@ using System.Diagnostics; using System.Diagnostics.CodeAnalysis; using System.Net.Security; -using System.Runtime.CompilerServices; using System.Security.Cryptography.X509Certificates; using System.Threading; using System.Threading.Tasks; @@ -14,7 +13,6 @@ using Npgsql.Internal.TypeHandling; using Npgsql.Internal.TypeMapping; using Npgsql.Properties; -using Npgsql.TypeMapping; using Npgsql.Util; namespace Npgsql; diff --git a/src/Npgsql/NpgsqlDataSourceBuilder.cs b/src/Npgsql/NpgsqlDataSourceBuilder.cs index b9a77f67e6..192e071f08 100644 --- a/src/Npgsql/NpgsqlDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlDataSourceBuilder.cs @@ -1,63 +1,47 @@ using System; -using System.Collections.Generic; using System.Diagnostics.CodeAnalysis; using System.Net.Security; -using System.Reflection; using System.Security.Cryptography.X509Certificates; using System.Threading; using System.Threading.Tasks; using Microsoft.Extensions.Logging; using Npgsql.Internal.TypeHandling; -using Npgsql.Internal.TypeMapping; -using Npgsql.Properties; using Npgsql.TypeMapping; -using NpgsqlTypes; namespace Npgsql; /// /// Provides a simple API for configuring and creating an , from which database connections can be obtained. /// -public class NpgsqlDataSourceBuilder : INpgsqlTypeMapper +public sealed class NpgsqlDataSourceBuilder : INpgsqlTypeMapper { - ILoggerFactory? _loggerFactory; - bool _sensitiveDataLoggingEnabled; - - RemoteCertificateValidationCallback? _userCertificateValidationCallback; - Action? _clientCertificatesCallback; - - Func? _rootCertificateCallback; - - Func>? _periodicPasswordProvider; - TimeSpan _periodicPasswordSuccessRefreshInterval, _periodicPasswordFailureRefreshInterval; - - readonly List _resolverFactories = new(); - readonly Dictionary _userTypeMappings = new(); + readonly NpgsqlSlimDataSourceBuilder _internalBuilder; /// - public INpgsqlNameTranslator DefaultNameTranslator { get; set; } = GlobalTypeMapper.Instance.DefaultNameTranslator; - - Action? _syncConnectionInitializer; - Func? _asyncConnectionInitializer; + public INpgsqlNameTranslator DefaultNameTranslator + { + get => _internalBuilder.DefaultNameTranslator; + set => _internalBuilder.DefaultNameTranslator = value; + } /// /// A connection string builder that can be used to configured the connection string on the builder. /// - public NpgsqlConnectionStringBuilder ConnectionStringBuilder { get; } + public NpgsqlConnectionStringBuilder ConnectionStringBuilder => _internalBuilder.ConnectionStringBuilder; /// /// Returns the connection string, as currently configured on the builder. /// - public string ConnectionString => ConnectionStringBuilder.ToString(); + public string ConnectionString => _internalBuilder.ConnectionString; /// /// Constructs a new , optionally starting out from the given . /// public NpgsqlDataSourceBuilder(string? connectionString = null) { - ConnectionStringBuilder = new NpgsqlConnectionStringBuilder(connectionString); + _internalBuilder = new(connectionString); - ResetTypeMappings(); + AddDefaultFeatures(); } /// @@ -67,7 +51,7 @@ public NpgsqlDataSourceBuilder(string? connectionString = null) /// The same builder instance so that multiple calls can be chained. public NpgsqlDataSourceBuilder UseLoggerFactory(ILoggerFactory? loggerFactory) { - _loggerFactory = loggerFactory; + _internalBuilder.UseLoggerFactory(loggerFactory); return this; } @@ -80,7 +64,7 @@ public NpgsqlDataSourceBuilder UseLoggerFactory(ILoggerFactory? loggerFactory) /// The same builder instance so that multiple calls can be chained. public NpgsqlDataSourceBuilder EnableParameterLogging(bool parameterLoggingEnabled = true) { - _sensitiveDataLoggingEnabled = parameterLoggingEnabled; + _internalBuilder.EnableParameterLogging(parameterLoggingEnabled); return this; } @@ -101,11 +85,9 @@ public NpgsqlDataSourceBuilder EnableParameterLogging(bool parameterLoggingEnabl /// /// /// The same builder instance so that multiple calls can be chained. - public NpgsqlDataSourceBuilder UseUserCertificateValidationCallback( - RemoteCertificateValidationCallback userCertificateValidationCallback) + public NpgsqlDataSourceBuilder UseUserCertificateValidationCallback(RemoteCertificateValidationCallback userCertificateValidationCallback) { - _userCertificateValidationCallback = userCertificateValidationCallback; - + _internalBuilder.UseUserCertificateValidationCallback(userCertificateValidationCallback); return this; } @@ -116,11 +98,8 @@ public NpgsqlDataSourceBuilder UseUserCertificateValidationCallback( /// The same builder instance so that multiple calls can be chained. public NpgsqlDataSourceBuilder UseClientCertificate(X509Certificate? clientCertificate) { - if (clientCertificate is null) - return UseClientCertificatesCallback(null); - - var clientCertificates = new X509CertificateCollection { clientCertificate }; - return UseClientCertificates(clientCertificates); + _internalBuilder.UseClientCertificate(clientCertificate); + return this; } /// @@ -129,7 +108,10 @@ public NpgsqlDataSourceBuilder UseClientCertificate(X509Certificate? clientCerti /// The client certificate collection to be sent to PostgreSQL when opening a connection. /// The same builder instance so that multiple calls can be chained. public NpgsqlDataSourceBuilder UseClientCertificates(X509CertificateCollection? clientCertificates) - => UseClientCertificatesCallback(clientCertificates is null ? null : certs => certs.AddRange(clientCertificates)); + { + _internalBuilder.UseClientCertificates(clientCertificates); + return this; + } /// /// Specifies a callback to modify the collection of SSL/TLS client certificates which Npgsql will send to PostgreSQL for @@ -150,8 +132,7 @@ public NpgsqlDataSourceBuilder UseClientCertificates(X509CertificateCollection? /// The same builder instance so that multiple calls can be chained. public NpgsqlDataSourceBuilder UseClientCertificatesCallback(Action? clientCertificatesCallback) { - _clientCertificatesCallback = clientCertificatesCallback; - + _internalBuilder.UseClientCertificatesCallback(clientCertificatesCallback); return this; } @@ -161,9 +142,10 @@ public NpgsqlDataSourceBuilder UseClientCertificatesCallback(ActionThe CA certificate. /// The same builder instance so that multiple calls can be chained. public NpgsqlDataSourceBuilder UseRootCertificate(X509Certificate2? rootCertificate) - => rootCertificate is null - ? UseRootCertificateCallback(null) - : UseRootCertificateCallback(() => rootCertificate); + { + _internalBuilder.UseRootCertificate(rootCertificate); + return this; + } /// /// Specifies a callback that will be used to validate SSL certificate, received from the server. @@ -177,8 +159,7 @@ public NpgsqlDataSourceBuilder UseRootCertificate(X509Certificate2? rootCertific /// public NpgsqlDataSourceBuilder UseRootCertificateCallback(Func? rootCertificateCallback) { - _rootCertificateCallback = rootCertificateCallback; - + _internalBuilder.UseRootCertificateCallback(rootCertificateCallback); return this; } @@ -207,17 +188,7 @@ public NpgsqlDataSourceBuilder UsePeriodicPasswordProvider( TimeSpan successRefreshInterval, TimeSpan failureRefreshInterval) { - if (successRefreshInterval < TimeSpan.Zero) - throw new ArgumentException( - string.Format(NpgsqlStrings.ArgumentMustBePositive, nameof(successRefreshInterval)), nameof(successRefreshInterval)); - if (failureRefreshInterval < TimeSpan.Zero) - throw new ArgumentException( - string.Format(NpgsqlStrings.ArgumentMustBePositive, nameof(failureRefreshInterval)), nameof(failureRefreshInterval)); - - _periodicPasswordProvider = passwordProvider; - _periodicPasswordSuccessRefreshInterval = successRefreshInterval; - _periodicPasswordFailureRefreshInterval = failureRefreshInterval; - + _internalBuilder.UsePeriodicPasswordProvider(passwordProvider, successRefreshInterval, failureRefreshInterval); return this; } @@ -227,46 +198,26 @@ public NpgsqlDataSourceBuilder UsePeriodicPasswordProvider( /// public void AddTypeResolverFactory(TypeHandlerResolverFactory resolverFactory) - => _resolverFactories.Insert(0, resolverFactory); + => _internalBuilder.AddTypeResolverFactory(resolverFactory); /// public INpgsqlTypeMapper MapEnum(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) where TEnum : struct, Enum { - if (pgName != null && pgName.Trim() == "") - throw new ArgumentException("pgName can't be empty", nameof(pgName)); - - nameTranslator ??= DefaultNameTranslator; - pgName ??= GetPgName(typeof(TEnum), nameTranslator); - - _userTypeMappings[pgName] = new UserEnumTypeMapping(pgName, nameTranslator); + _internalBuilder.MapEnum(pgName, nameTranslator); return this; } /// public bool UnmapEnum(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) where TEnum : struct, Enum - { - if (pgName != null && pgName.Trim() == "") - throw new ArgumentException("pgName can't be empty", nameof(pgName)); - - nameTranslator ??= DefaultNameTranslator; - pgName ??= GetPgName(typeof(TEnum), nameTranslator); - - return _userTypeMappings.Remove(pgName); - } + => _internalBuilder.UnmapEnum(pgName, nameTranslator); /// [RequiresUnreferencedCode("Composite type mapping currently isn't trimming-safe.")] public INpgsqlTypeMapper MapComposite(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) { - if (pgName != null && pgName.Trim() == "") - throw new ArgumentException("pgName can't be empty", nameof(pgName)); - - nameTranslator ??= DefaultNameTranslator; - pgName ??= GetPgName(typeof(T), nameTranslator); - - _userTypeMappings[pgName] = new UserCompositeTypeMapping(pgName, nameTranslator); + _internalBuilder.MapComposite(pgName, nameTranslator); return this; } @@ -274,57 +225,26 @@ public INpgsqlTypeMapper MapComposite(string? pgName = null, INpgsqlNameTrans [RequiresUnreferencedCode("Composite type mapping currently isn't trimming-safe.")] public INpgsqlTypeMapper MapComposite(Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) { - var openMethod = typeof(NpgsqlDataSourceBuilder).GetMethod(nameof(MapComposite), new[] { typeof(string), typeof(INpgsqlNameTranslator) })!; - var method = openMethod.MakeGenericMethod(clrType); - method.Invoke(this, new object?[] { pgName, nameTranslator }); - + _internalBuilder.MapComposite(clrType, pgName, nameTranslator); return this; } /// [RequiresUnreferencedCode("Composite type mapping currently isn't trimming-safe.")] public bool UnmapComposite(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) - => UnmapComposite(typeof(T), pgName, nameTranslator); + => _internalBuilder.UnmapComposite(pgName, nameTranslator); /// [RequiresUnreferencedCode("Composite type mapping currently isn't trimming-safe.")] public bool UnmapComposite(Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) - { - if (pgName != null && pgName.Trim() == "") - throw new ArgumentException("pgName can't be empty", nameof(pgName)); - - nameTranslator ??= DefaultNameTranslator; - pgName ??= GetPgName(clrType, nameTranslator); - - return _userTypeMappings.Remove(pgName); - } + => _internalBuilder.UnmapComposite(clrType, pgName, nameTranslator); void INpgsqlTypeMapper.Reset() - => ResetTypeMappings(); - - void ResetTypeMappings() { - var globalMapper = GlobalTypeMapper.Instance; - globalMapper.Lock.EnterReadLock(); - try - { - _resolverFactories.Clear(); - foreach (var resolverFactory in globalMapper.ResolverFactories) - _resolverFactories.Add(resolverFactory); - - _userTypeMappings.Clear(); - foreach (var kv in globalMapper.UserTypeMappings) - _userTypeMappings[kv.Key] = kv.Value; - } - finally - { - globalMapper.Lock.ExitReadLock(); - } - } + ((INpgsqlTypeMapper)_internalBuilder).Reset(); - static string GetPgName(Type clrType, INpgsqlNameTranslator nameTranslator) - => clrType.GetCustomAttribute()?.PgName - ?? nameTranslator.TranslateTypeName(clrType.Name); + AddDefaultFeatures(); + } #endregion Type mapping @@ -353,12 +273,7 @@ public NpgsqlDataSourceBuilder UsePhysicalConnectionInitializer( Action? connectionInitializer, Func? connectionInitializerAsync) { - if (connectionInitializer is null != connectionInitializerAsync is null) - throw new ArgumentException(NpgsqlStrings.SyncAndAsyncConnectionInitializersRequired); - - _syncConnectionInitializer = connectionInitializer; - _asyncConnectionInitializer = connectionInitializerAsync; - + _internalBuilder.UsePhysicalConnectionInitializer(connectionInitializer, connectionInitializerAsync); return this; } @@ -366,69 +281,19 @@ public NpgsqlDataSourceBuilder UsePhysicalConnectionInitializer( /// Builds and returns an which is ready for use. /// public NpgsqlDataSource Build() - { - var config = PrepareConfiguration(); - - if (ConnectionStringBuilder.Host!.Contains(",")) - { - ValidateMultiHost(); - - return new NpgsqlMultiHostDataSource(ConnectionStringBuilder, config); - } - - return ConnectionStringBuilder.Multiplexing - ? new MultiplexingDataSource(ConnectionStringBuilder, config) - : ConnectionStringBuilder.Pooling - ? new PoolingDataSource(ConnectionStringBuilder, config) - : new UnpooledDataSource(ConnectionStringBuilder, config); - } + => _internalBuilder.Build(); /// /// Builds and returns a which is ready for use for load-balancing and failover scenarios. /// public NpgsqlMultiHostDataSource BuildMultiHost() - { - var config = PrepareConfiguration(); - - ValidateMultiHost(); - - return new(ConnectionStringBuilder, config); - } - - NpgsqlDataSourceConfiguration PrepareConfiguration() - { - ConnectionStringBuilder.PostProcessAndValidate(); - - if (_periodicPasswordProvider is not null && - (ConnectionStringBuilder.Password is not null || ConnectionStringBuilder.Passfile is not null)) - { - throw new NotSupportedException(NpgsqlStrings.CannotSetBothPasswordProviderAndPassword); - } - - return new( - _loggerFactory is null - ? NpgsqlLoggingConfiguration.NullConfiguration - : new NpgsqlLoggingConfiguration(_loggerFactory, _sensitiveDataLoggingEnabled), - _userCertificateValidationCallback, - _clientCertificatesCallback, - _periodicPasswordProvider, - _periodicPasswordSuccessRefreshInterval, - _periodicPasswordFailureRefreshInterval, - _resolverFactories, - _userTypeMappings, - DefaultNameTranslator, - _syncConnectionInitializer, - _asyncConnectionInitializer, - _rootCertificateCallback); - } + => _internalBuilder.BuildMultiHost(); - void ValidateMultiHost() + void AddDefaultFeatures() { - if (ConnectionStringBuilder.TargetSessionAttributes is not null) - throw new InvalidOperationException(NpgsqlStrings.CannotSpecifyTargetSessionAttributes); - if (ConnectionStringBuilder.Multiplexing) - throw new NotSupportedException("Multiplexing is not supported with multiple hosts"); - if (ConnectionStringBuilder.ReplicationMode != ReplicationMode.Off) - throw new NotSupportedException("Replication is not supported with multiple hosts"); + // If a resolver factory is already registered, we don't replace it. This is to allow customized factories (e.g. JSON with + // specific settings) to flow from the global type mapper to the data source. + _internalBuilder.AddTypeResolverFactory(new JsonTypeHandlerResolverFactory(), replaceIfExists: false); + _internalBuilder.AddTypeResolverFactory(new RangeTypeHandlerResolverFactory(), replaceIfExists: false); } } diff --git a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs new file mode 100644 index 0000000000..11d1ad6109 --- /dev/null +++ b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs @@ -0,0 +1,460 @@ +using System; +using System.Collections.Generic; +using System.Diagnostics.CodeAnalysis; +using System.Net.Security; +using System.Reflection; +using System.Security.Cryptography.X509Certificates; +using System.Threading; +using System.Threading.Tasks; +using Microsoft.Extensions.Logging; +using Npgsql.Internal.TypeHandling; +using Npgsql.Internal.TypeMapping; +using Npgsql.Properties; +using Npgsql.TypeMapping; +using NpgsqlTypes; + +namespace Npgsql; + +/// +/// Provides a simple API for configuring and creating an , from which database connections can be obtained. +/// +/// +/// On this builder, various features are disabled by default; unless you're looking to save on code size (e.g. when publishing with +/// NativeAOT), use instead. +/// +public sealed class NpgsqlSlimDataSourceBuilder : INpgsqlTypeMapper +{ + ILoggerFactory? _loggerFactory; + bool _sensitiveDataLoggingEnabled; + + RemoteCertificateValidationCallback? _userCertificateValidationCallback; + Action? _clientCertificatesCallback; + + Func? _rootCertificateCallback; + + Func>? _periodicPasswordProvider; + TimeSpan _periodicPasswordSuccessRefreshInterval, _periodicPasswordFailureRefreshInterval; + + readonly List _resolverFactories = new(); + readonly Dictionary _userTypeMappings = new(); + + /// + public INpgsqlNameTranslator DefaultNameTranslator { get; set; } = GlobalTypeMapper.Instance.DefaultNameTranslator; + + Action? _syncConnectionInitializer; + Func? _asyncConnectionInitializer; + + /// + /// A connection string builder that can be used to configured the connection string on the builder. + /// + public NpgsqlConnectionStringBuilder ConnectionStringBuilder { get; } + + /// + /// Returns the connection string, as currently configured on the builder. + /// + public string ConnectionString => ConnectionStringBuilder.ToString(); + + /// + /// Constructs a new , optionally starting out from the given + /// . + /// + public NpgsqlSlimDataSourceBuilder(string? connectionString = null) + { + ConnectionStringBuilder = new NpgsqlConnectionStringBuilder(connectionString); + + ResetTypeMappings(); + } + + /// + /// Sets the that will be used for logging. + /// + /// The logger factory to be used. + /// The same builder instance so that multiple calls can be chained. + public NpgsqlSlimDataSourceBuilder UseLoggerFactory(ILoggerFactory? loggerFactory) + { + _loggerFactory = loggerFactory; + return this; + } + + /// + /// Enables parameters to be included in logging. This includes potentially sensitive information from data sent to PostgreSQL. + /// You should only enable this flag in development, or if you have the appropriate security measures in place based on the + /// sensitivity of this data. + /// + /// If , then sensitive data is logged. + /// The same builder instance so that multiple calls can be chained. + public NpgsqlSlimDataSourceBuilder EnableParameterLogging(bool parameterLoggingEnabled = true) + { + _sensitiveDataLoggingEnabled = parameterLoggingEnabled; + return this; + } + + #region Authentication + + /// + /// When using SSL/TLS, this is a callback that allows customizing how the PostgreSQL-provided certificate is verified. This is an + /// advanced API, consider using or instead. + /// + /// The callback containing custom callback verification logic. + /// + /// + /// Cannot be used in conjunction with , or + /// . + /// + /// + /// See . + /// + /// + /// The same builder instance so that multiple calls can be chained. + public NpgsqlSlimDataSourceBuilder UseUserCertificateValidationCallback( + RemoteCertificateValidationCallback userCertificateValidationCallback) + { + _userCertificateValidationCallback = userCertificateValidationCallback; + + return this; + } + + /// + /// Specifies an SSL/TLS certificate which Npgsql will send to PostgreSQL for certificate-based authentication. + /// + /// The client certificate to be sent to PostgreSQL when opening a connection. + /// The same builder instance so that multiple calls can be chained. + public NpgsqlSlimDataSourceBuilder UseClientCertificate(X509Certificate? clientCertificate) + { + if (clientCertificate is null) + return UseClientCertificatesCallback(null); + + var clientCertificates = new X509CertificateCollection { clientCertificate }; + return UseClientCertificates(clientCertificates); + } + + /// + /// Specifies a collection of SSL/TLS certificates which Npgsql will send to PostgreSQL for certificate-based authentication. + /// + /// The client certificate collection to be sent to PostgreSQL when opening a connection. + /// The same builder instance so that multiple calls can be chained. + public NpgsqlSlimDataSourceBuilder UseClientCertificates(X509CertificateCollection? clientCertificates) + => UseClientCertificatesCallback(clientCertificates is null ? null : certs => certs.AddRange(clientCertificates)); + + /// + /// Specifies a callback to modify the collection of SSL/TLS client certificates which Npgsql will send to PostgreSQL for + /// certificate-based authentication. This is an advanced API, consider using or + /// instead. + /// + /// The callback to modify the client certificate collection. + /// + /// + /// The callback is invoked every time a physical connection is opened, and is therefore suitable for rotating short-lived client + /// certificates. Simply make sure the certificate collection argument has the up-to-date certificate(s). + /// + /// + /// The callback's collection argument already includes any client certificates specified via the connection string or environment + /// variables. + /// + /// + /// The same builder instance so that multiple calls can be chained. + public NpgsqlSlimDataSourceBuilder UseClientCertificatesCallback(Action? clientCertificatesCallback) + { + _clientCertificatesCallback = clientCertificatesCallback; + + return this; + } + + /// + /// Sets the that will be used validate SSL certificate, received from the server. + /// + /// The CA certificate. + /// The same builder instance so that multiple calls can be chained. + public NpgsqlSlimDataSourceBuilder UseRootCertificate(X509Certificate2? rootCertificate) + => rootCertificate is null + ? UseRootCertificateCallback(null) + : UseRootCertificateCallback(() => rootCertificate); + + /// + /// Specifies a callback that will be used to validate SSL certificate, received from the server. + /// + /// The callback to get CA certificate. + /// The same builder instance so that multiple calls can be chained. + /// + /// This overload, which accepts a callback, is suitable for scenarios where the certificate rotates + /// and might change during the lifetime of the application. + /// When that's not the case, use the overload which directly accepts the certificate. + /// + public NpgsqlSlimDataSourceBuilder UseRootCertificateCallback(Func? rootCertificateCallback) + { + _rootCertificateCallback = rootCertificateCallback; + + return this; + } + + /// + /// Configures a periodic password provider, which is automatically called by the data source at some regular interval. This is the + /// recommended way to fetch a rotating access token. + /// + /// A callback which returns the password to be sent to PostgreSQL. + /// How long to cache the password before re-invoking the callback. + /// + /// If a password refresh attempt fails, it will be re-attempted with this interval. + /// This should typically be much lower than . + /// + /// The same builder instance so that multiple calls can be chained. + /// + /// + /// The provided callback is invoked in a timer, and not when opening connections. It therefore doesn't affect opening time. + /// + /// + /// The provided cancellation token is only triggered when the entire data source is disposed. If you'd like to apply a timeout to the + /// token fetching, do so within the provided callback. + /// + /// + public NpgsqlSlimDataSourceBuilder UsePeriodicPasswordProvider( + Func>? passwordProvider, + TimeSpan successRefreshInterval, + TimeSpan failureRefreshInterval) + { + if (successRefreshInterval < TimeSpan.Zero) + throw new ArgumentException( + string.Format(NpgsqlStrings.ArgumentMustBePositive, nameof(successRefreshInterval)), nameof(successRefreshInterval)); + if (failureRefreshInterval < TimeSpan.Zero) + throw new ArgumentException( + string.Format(NpgsqlStrings.ArgumentMustBePositive, nameof(failureRefreshInterval)), nameof(failureRefreshInterval)); + + _periodicPasswordProvider = passwordProvider; + _periodicPasswordSuccessRefreshInterval = successRefreshInterval; + _periodicPasswordFailureRefreshInterval = failureRefreshInterval; + + return this; + } + + #endregion Authentication + + #region Type mapping + + /// + public void AddTypeResolverFactory(TypeHandlerResolverFactory resolverFactory) + => AddTypeResolverFactory(resolverFactory, replaceIfExists: true); + + internal void AddTypeResolverFactory(TypeHandlerResolverFactory resolverFactory, bool replaceIfExists) + { + var type = resolverFactory.GetType(); + + for (var i = 0; i < _resolverFactories.Count; i++) + { + if (_resolverFactories[i].GetType() == type) + { + if (replaceIfExists) + { + _resolverFactories.RemoveAt(i); + break; + } + + return; + } + } + + _resolverFactories.Insert(0, resolverFactory); + } + + /// + public INpgsqlTypeMapper MapEnum(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) + where TEnum : struct, Enum + { + if (pgName != null && pgName.Trim() == "") + throw new ArgumentException("pgName can't be empty", nameof(pgName)); + + nameTranslator ??= DefaultNameTranslator; + pgName ??= GetPgName(typeof(TEnum), nameTranslator); + + _userTypeMappings[pgName] = new UserEnumTypeMapping(pgName, nameTranslator); + return this; + } + + /// + public bool UnmapEnum(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) + where TEnum : struct, Enum + { + if (pgName != null && pgName.Trim() == "") + throw new ArgumentException("pgName can't be empty", nameof(pgName)); + + nameTranslator ??= DefaultNameTranslator; + pgName ??= GetPgName(typeof(TEnum), nameTranslator); + + return _userTypeMappings.Remove(pgName); + } + + /// + [RequiresUnreferencedCode("Composite type mapping currently isn't trimming-safe.")] + public INpgsqlTypeMapper MapComposite(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) + { + if (pgName != null && pgName.Trim() == "") + throw new ArgumentException("pgName can't be empty", nameof(pgName)); + + nameTranslator ??= DefaultNameTranslator; + pgName ??= GetPgName(typeof(T), nameTranslator); + + _userTypeMappings[pgName] = new UserCompositeTypeMapping(pgName, nameTranslator); + return this; + } + + /// + [RequiresUnreferencedCode("Composite type mapping currently isn't trimming-safe.")] + public INpgsqlTypeMapper MapComposite(Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) + { + var openMethod = typeof(NpgsqlSlimDataSourceBuilder).GetMethod(nameof(MapComposite), new[] { typeof(string), typeof(INpgsqlNameTranslator) })!; + var method = openMethod.MakeGenericMethod(clrType); + method.Invoke(this, new object?[] { pgName, nameTranslator }); + + return this; + } + + /// + [RequiresUnreferencedCode("Composite type mapping currently isn't trimming-safe.")] + public bool UnmapComposite(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) + => UnmapComposite(typeof(T), pgName, nameTranslator); + + /// + [RequiresUnreferencedCode("Composite type mapping currently isn't trimming-safe.")] + public bool UnmapComposite(Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) + { + if (pgName != null && pgName.Trim() == "") + throw new ArgumentException("pgName can't be empty", nameof(pgName)); + + nameTranslator ??= DefaultNameTranslator; + pgName ??= GetPgName(clrType, nameTranslator); + + return _userTypeMappings.Remove(pgName); + } + + void INpgsqlTypeMapper.Reset() + => ResetTypeMappings(); + + void ResetTypeMappings() + { + var globalMapper = GlobalTypeMapper.Instance; + globalMapper.Lock.EnterReadLock(); + try + { + _resolverFactories.Clear(); + foreach (var resolverFactory in globalMapper.ResolverFactories) + _resolverFactories.Add(resolverFactory); + + _userTypeMappings.Clear(); + foreach (var kv in globalMapper.UserTypeMappings) + _userTypeMappings[kv.Key] = kv.Value; + } + finally + { + globalMapper.Lock.ExitReadLock(); + } + } + + static string GetPgName(Type clrType, INpgsqlNameTranslator nameTranslator) + => clrType.GetCustomAttribute()?.PgName + ?? nameTranslator.TranslateTypeName(clrType.Name); + + #endregion Type mapping + + /// + /// Register a connection initializer, which allows executing arbitrary commands when a physical database connection is first opened. + /// + /// + /// A synchronous connection initialization lambda, which will be called from when a new physical + /// connection is opened. + /// + /// + /// An asynchronous connection initialization lambda, which will be called from + /// when a new physical connection is opened. + /// + /// + /// If an initializer is registered, both sync and async versions must be provided. If you do not use sync APIs in your code, simply + /// throw , which would also catch accidental cases of sync opening. + /// + /// + /// Take care that the setting you apply in the initializer does not get reverted when the connection is returned to the pool, since + /// Npgsql sends DISCARD ALL by default. The option can be used to + /// turn this off. + /// + /// The same builder instance so that multiple calls can be chained. + public NpgsqlSlimDataSourceBuilder UsePhysicalConnectionInitializer( + Action? connectionInitializer, + Func? connectionInitializerAsync) + { + if (connectionInitializer is null != connectionInitializerAsync is null) + throw new ArgumentException(NpgsqlStrings.SyncAndAsyncConnectionInitializersRequired); + + _syncConnectionInitializer = connectionInitializer; + _asyncConnectionInitializer = connectionInitializerAsync; + + return this; + } + + /// + /// Builds and returns an which is ready for use. + /// + public NpgsqlDataSource Build() + { + var config = PrepareConfiguration(); + + if (ConnectionStringBuilder.Host!.Contains(",")) + { + ValidateMultiHost(); + + return new NpgsqlMultiHostDataSource(ConnectionStringBuilder, config); + } + + return ConnectionStringBuilder.Multiplexing + ? new MultiplexingDataSource(ConnectionStringBuilder, config) + : ConnectionStringBuilder.Pooling + ? new PoolingDataSource(ConnectionStringBuilder, config) + : new UnpooledDataSource(ConnectionStringBuilder, config); + } + + /// + /// Builds and returns a which is ready for use for load-balancing and failover scenarios. + /// + public NpgsqlMultiHostDataSource BuildMultiHost() + { + var config = PrepareConfiguration(); + + ValidateMultiHost(); + + return new(ConnectionStringBuilder, config); + } + + NpgsqlDataSourceConfiguration PrepareConfiguration() + { + ConnectionStringBuilder.PostProcessAndValidate(); + + if (_periodicPasswordProvider is not null && + (ConnectionStringBuilder.Password is not null || ConnectionStringBuilder.Passfile is not null)) + { + throw new NotSupportedException(NpgsqlStrings.CannotSetBothPasswordProviderAndPassword); + } + + return new( + _loggerFactory is null + ? NpgsqlLoggingConfiguration.NullConfiguration + : new NpgsqlLoggingConfiguration(_loggerFactory, _sensitiveDataLoggingEnabled), + _userCertificateValidationCallback, + _clientCertificatesCallback, + _periodicPasswordProvider, + _periodicPasswordSuccessRefreshInterval, + _periodicPasswordFailureRefreshInterval, + _resolverFactories, + _userTypeMappings, + DefaultNameTranslator, + _syncConnectionInitializer, + _asyncConnectionInitializer, + _rootCertificateCallback); + } + + void ValidateMultiHost() + { + if (ConnectionStringBuilder.TargetSessionAttributes is not null) + throw new InvalidOperationException(NpgsqlStrings.CannotSpecifyTargetSessionAttributes); + if (ConnectionStringBuilder.Multiplexing) + throw new NotSupportedException("Multiplexing is not supported with multiple hosts"); + if (ConnectionStringBuilder.ReplicationMode != ReplicationMode.Off) + throw new NotSupportedException("Replication is not supported with multiple hosts"); + } +} diff --git a/src/Npgsql/PublicAPI.Unshipped.txt b/src/Npgsql/PublicAPI.Unshipped.txt index fa22d1fa41..d8d07a072f 100644 --- a/src/Npgsql/PublicAPI.Unshipped.txt +++ b/src/Npgsql/PublicAPI.Unshipped.txt @@ -1,11 +1,36 @@ #nullable enable -Npgsql.NpgsqlDataSourceBuilder.UseRootCertificate(System.Security.Cryptography.X509Certificates.X509Certificate2? rootCertificate) -> Npgsql.NpgsqlDataSourceBuilder! -Npgsql.NpgsqlDataSourceBuilder.UseRootCertificateCallback(System.Func? rootCertificateCallback) -> Npgsql.NpgsqlDataSourceBuilder! override Npgsql.NpgsqlBatch.Dispose() -> void Npgsql.NpgsqlBinaryImporter.WriteRow(params object?[]! values) -> void Npgsql.NpgsqlBinaryImporter.WriteRowAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken), params object?[]! values) -> System.Threading.Tasks.Task! +Npgsql.NpgsqlDataSourceBuilder.UseRootCertificate(System.Security.Cryptography.X509Certificates.X509Certificate2? rootCertificate) -> Npgsql.NpgsqlDataSourceBuilder! +Npgsql.NpgsqlDataSourceBuilder.UseRootCertificateCallback(System.Func? rootCertificateCallback) -> Npgsql.NpgsqlDataSourceBuilder! Npgsql.NpgsqlJsonExtensions Npgsql.NpgsqlRangeExtensions +Npgsql.NpgsqlSlimDataSourceBuilder +Npgsql.NpgsqlSlimDataSourceBuilder.AddTypeResolverFactory(Npgsql.Internal.TypeHandling.TypeHandlerResolverFactory! resolverFactory) -> void +Npgsql.NpgsqlSlimDataSourceBuilder.Build() -> Npgsql.NpgsqlDataSource! +Npgsql.NpgsqlSlimDataSourceBuilder.BuildMultiHost() -> Npgsql.NpgsqlMultiHostDataSource! +Npgsql.NpgsqlSlimDataSourceBuilder.ConnectionString.get -> string! +Npgsql.NpgsqlSlimDataSourceBuilder.ConnectionStringBuilder.get -> Npgsql.NpgsqlConnectionStringBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.DefaultNameTranslator.get -> Npgsql.INpgsqlNameTranslator! +Npgsql.NpgsqlSlimDataSourceBuilder.DefaultNameTranslator.set -> void +Npgsql.NpgsqlSlimDataSourceBuilder.EnableParameterLogging(bool parameterLoggingEnabled = true) -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.MapComposite(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! +Npgsql.NpgsqlSlimDataSourceBuilder.MapComposite(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! +Npgsql.NpgsqlSlimDataSourceBuilder.MapEnum(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! +Npgsql.NpgsqlSlimDataSourceBuilder.NpgsqlSlimDataSourceBuilder(string? connectionString = null) -> void +Npgsql.NpgsqlSlimDataSourceBuilder.UnmapComposite(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> bool +Npgsql.NpgsqlSlimDataSourceBuilder.UnmapComposite(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> bool +Npgsql.NpgsqlSlimDataSourceBuilder.UnmapEnum(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> bool +Npgsql.NpgsqlSlimDataSourceBuilder.UseClientCertificate(System.Security.Cryptography.X509Certificates.X509Certificate? clientCertificate) -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.UseClientCertificates(System.Security.Cryptography.X509Certificates.X509CertificateCollection? clientCertificates) -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.UseClientCertificatesCallback(System.Action? clientCertificatesCallback) -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.UseLoggerFactory(Microsoft.Extensions.Logging.ILoggerFactory? loggerFactory) -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.UsePeriodicPasswordProvider(System.Func>? passwordProvider, System.TimeSpan successRefreshInterval, System.TimeSpan failureRefreshInterval) -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.UsePhysicalConnectionInitializer(System.Action? connectionInitializer, System.Func? connectionInitializerAsync) -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.UseRootCertificate(System.Security.Cryptography.X509Certificates.X509Certificate2? rootCertificate) -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.UseRootCertificateCallback(System.Func? rootCertificateCallback) -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.UseUserCertificateValidationCallback(System.Net.Security.RemoteCertificateValidationCallback! userCertificateValidationCallback) -> Npgsql.NpgsqlSlimDataSourceBuilder! static Npgsql.NpgsqlJsonExtensions.UseSystemTextJson(this Npgsql.TypeMapping.INpgsqlTypeMapper! mapper, System.Text.Json.JsonSerializerOptions? serializerOptions = null, System.Type![]? jsonbClrTypes = null, System.Type![]? jsonClrTypes = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! static Npgsql.NpgsqlRangeExtensions.UseRange(this Npgsql.TypeMapping.INpgsqlTypeMapper! mapper) -> Npgsql.TypeMapping.INpgsqlTypeMapper! *REMOVED*static NpgsqlTypes.NpgsqlBox.Parse(string! s) -> NpgsqlTypes.NpgsqlBox diff --git a/src/Npgsql/TypeMapping/JsonTypeHandlerResolverFactory.cs b/src/Npgsql/TypeMapping/JsonTypeHandlerResolverFactory.cs index 7568e138be..4fb7b03eb3 100644 --- a/src/Npgsql/TypeMapping/JsonTypeHandlerResolverFactory.cs +++ b/src/Npgsql/TypeMapping/JsonTypeHandlerResolverFactory.cs @@ -13,9 +13,9 @@ sealed class JsonTypeHandlerResolverFactory : TypeHandlerResolverFactory readonly Dictionary? _userClrTypes; public JsonTypeHandlerResolverFactory( - Type[]? jsonbClrTypes, - Type[]? jsonClrTypes, - JsonSerializerOptions? settings) + Type[]? jsonbClrTypes = null, + Type[]? jsonClrTypes = null, + JsonSerializerOptions? settings = null) { _settings = settings ?? new JsonSerializerOptions(); diff --git a/test/Npgsql.NativeAotTests/Program.cs b/test/Npgsql.NativeAotTests/Program.cs index 59e6daf35b..098c978296 100644 --- a/test/Npgsql.NativeAotTests/Program.cs +++ b/test/Npgsql.NativeAotTests/Program.cs @@ -4,7 +4,8 @@ var connectionString = Environment.GetEnvironmentVariable("NPGSQL_TEST_DB") ?? "Server=localhost;Username=npgsql_tests;Password=npgsql_tests;Database=npgsql_tests;Timeout=0;Command Timeout=0"; -await using var dataSource = NpgsqlDataSource.Create(connectionString); +var dataSourceBuilder = new NpgsqlSlimDataSourceBuilder(connectionString); +await using var dataSource = dataSourceBuilder.Build(); await using var conn = dataSource.CreateConnection(); await conn.OpenAsync(); @@ -15,4 +16,4 @@ var value = reader.GetFieldValue(0); if (value != "Hello World") - throw new Exception($"Got {value} instead of the expected 'Hello World'"); \ No newline at end of file + throw new Exception($"Got {value} instead of the expected 'Hello World'"); diff --git a/test/Npgsql.Tests/ConnectionTests.cs b/test/Npgsql.Tests/ConnectionTests.cs index 9e93f2486b..fd76a31284 100644 --- a/test/Npgsql.Tests/ConnectionTests.cs +++ b/test/Npgsql.Tests/ConnectionTests.cs @@ -1144,7 +1144,7 @@ public async Task Exception_during_close() [Test, Description("Some pseudo-PG database don't support pg_type loading, we have a minimal DatabaseInfo for this")] public async Task NoTypeLoading() { - await using var dataSource = CreateDataSourceWithRanges(csb => csb.ServerCompatibilityMode = ServerCompatibilityMode.NoTypeLoading); + await using var dataSource = CreateDataSource(csb => csb.ServerCompatibilityMode = ServerCompatibilityMode.NoTypeLoading); await using var conn = await dataSource.OpenConnectionAsync(); Assert.That(await conn.ExecuteScalarAsync("SELECT 8"), Is.EqualTo(8)); diff --git a/test/Npgsql.Tests/Support/TestBase.cs b/test/Npgsql.Tests/Support/TestBase.cs index 5dd5da05dd..04756dd15c 100644 --- a/test/Npgsql.Tests/Support/TestBase.cs +++ b/test/Npgsql.Tests/Support/TestBase.cs @@ -283,15 +283,17 @@ void CheckInference() } } - public async Task AssertTypeUnsupported(T value, string sqlLiteral, string pgTypeName) + public async Task AssertTypeUnsupported(T value, string sqlLiteral, string pgTypeName, NpgsqlDataSource? dataSource = null) { - await AssertTypeUnsupportedRead(sqlLiteral, pgTypeName); - await AssertTypeUnsupportedWrite(value, pgTypeName); + await AssertTypeUnsupportedRead(sqlLiteral, pgTypeName, dataSource); + await AssertTypeUnsupportedWrite(value, pgTypeName, dataSource); } - public async Task AssertTypeUnsupportedRead(string sqlLiteral, string pgTypeName) + public async Task AssertTypeUnsupportedRead(string sqlLiteral, string pgTypeName, NpgsqlDataSource? dataSource = null) { - await using var conn = await OpenConnectionAsync(); + dataSource ??= DefaultDataSource; + + await using var conn = await dataSource.OpenConnectionAsync(); await using var cmd = new NpgsqlCommand($"SELECT '{sqlLiteral}'::{pgTypeName}", conn); await using var reader = await cmd.ExecuteReaderAsync(); await reader.ReadAsync(); @@ -299,13 +301,15 @@ public async Task AssertTypeUnsupportedRead(string sqlLiteral, string pgTypeName Assert.That(() => reader.GetValue(0), Throws.Exception.TypeOf()); } - public Task AssertTypeUnsupportedRead(string sqlLiteral, string pgTypeName) - => AssertTypeUnsupportedRead(sqlLiteral, pgTypeName); + public Task AssertTypeUnsupportedRead(string sqlLiteral, string pgTypeName, NpgsqlDataSource? dataSource = null) + => AssertTypeUnsupportedRead(sqlLiteral, pgTypeName, dataSource); - public async Task AssertTypeUnsupportedRead(string sqlLiteral, string pgTypeName) + public async Task AssertTypeUnsupportedRead(string sqlLiteral, string pgTypeName, NpgsqlDataSource? dataSource = null) where TException : Exception { - await using var conn = await OpenConnectionAsync(); + dataSource ??= DefaultDataSource; + + await using var conn = await dataSource.OpenConnectionAsync(); await using var cmd = new NpgsqlCommand($"SELECT '{sqlLiteral}'::{pgTypeName}", conn); await using var reader = await cmd.ExecuteReaderAsync(); await reader.ReadAsync(); @@ -313,13 +317,15 @@ public async Task AssertTypeUnsupportedRead(string sql return Assert.Throws(() => reader.GetFieldValue(0))!; } - public Task AssertTypeUnsupportedWrite(T value, string? pgTypeName = null) - => AssertTypeUnsupportedWrite(value, pgTypeName); + public Task AssertTypeUnsupportedWrite(T value, string? pgTypeName = null, NpgsqlDataSource? dataSource = null) + => AssertTypeUnsupportedWrite(value, pgTypeName, dataSource); - public async Task AssertTypeUnsupportedWrite(T value, string? pgTypeName = null) + public async Task AssertTypeUnsupportedWrite(T value, string? pgTypeName = null, NpgsqlDataSource? dataSource = null) where TException : Exception { - await using var conn = await OpenConnectionAsync(); + dataSource ??= DefaultDataSource; + + await using var conn = await dataSource.OpenConnectionAsync(); await using var cmd = new NpgsqlCommand("SELECT $1", conn) { Parameters = { new() { Value = value } } @@ -368,15 +374,6 @@ protected virtual NpgsqlDataSource CreateDataSource(Action? connectionStringBuilderAction = null) - { - var connectionStringBuilder = new NpgsqlConnectionStringBuilder(ConnectionString); - connectionStringBuilderAction?.Invoke(connectionStringBuilder); - var dataSourceBuilder = new NpgsqlDataSourceBuilder(connectionStringBuilder.ConnectionString); - dataSourceBuilder.UseRange(); - return dataSourceBuilder.Build(); - } - protected static NpgsqlDataSource GetDataSource(string connectionString) { if (!DataSources.TryGetValue(connectionString, out var dataSource)) @@ -412,8 +409,11 @@ protected virtual NpgsqlDataSource CreateLoggingDataSource( return builder.Build(); } + protected NpgsqlDataSource DefaultDataSource + => GetDataSource(ConnectionString); + protected virtual NpgsqlConnection CreateConnection() - => GetDataSource(ConnectionString).CreateConnection(); + => DefaultDataSource.CreateConnection(); protected virtual NpgsqlConnection OpenConnection() { diff --git a/test/Npgsql.Tests/Types/DateTimeTests.cs b/test/Npgsql.Tests/Types/DateTimeTests.cs index d121674f94..f387387dcc 100644 --- a/test/Npgsql.Tests/Types/DateTimeTests.cs +++ b/test/Npgsql.Tests/Types/DateTimeTests.cs @@ -419,24 +419,9 @@ public Task Interval_with_months_cannot_read_as_TimeSpan() #endregion - NpgsqlDataSource DataSourceWithRanges { get; set; } = default!; - - [OneTimeSetUp] - public void OneTimeSetUp() => DataSourceWithRanges = CreateDataSourceWithRanges(); - - [OneTimeTearDown] - public async Task TearDown() - { - if (DataSourceWithRanges is not null) - { - await DataSourceWithRanges.DisposeAsync(); - DataSourceWithRanges = null!; - } - } - protected override async ValueTask OpenConnectionAsync() { - var conn = await DataSourceWithRanges.OpenConnectionAsync(); + var conn = await base.OpenConnectionAsync(); await conn.ExecuteNonQueryAsync("SET TimeZone='Europe/Berlin'"); return conn; } diff --git a/test/Npgsql.Tests/Types/JsonTests.cs b/test/Npgsql.Tests/Types/JsonTests.cs index 88511e9e67..353cb01792 100644 --- a/test/Npgsql.Tests/Types/JsonTests.cs +++ b/test/Npgsql.Tests/Types/JsonTests.cs @@ -64,7 +64,6 @@ public async Task Write_as_ArraySegment_of_char() [Test] public async Task As_JsonDocument() => await AssertType( - JsonDataSource, JsonDocument.Parse(@"{""K"": ""V""}"), IsJsonb ? @"{""K"": ""V""}" : @"{""K"":""V""}", PostgresType, @@ -74,16 +73,20 @@ public async Task As_JsonDocument() [Test] public async Task As_JsonDocument_supported_only_with_SystemTextJson() - => await AssertTypeUnsupported( + { + await using var slimDataSource = new NpgsqlSlimDataSourceBuilder(ConnectionString).Build(); + + await AssertTypeUnsupported( JsonDocument.Parse(@"{""K"": ""V""}"), @"{""K"": ""V""}", - PostgresType); + PostgresType, + slimDataSource); + } #if NET6_0_OR_GREATER [Test] public Task Roundtrip_JsonObject() => AssertType( - JsonDataSource, new JsonObject { ["Bar"] = 8 }, IsJsonb ? @"{""Bar"": 8}" : @"{""Bar"":8}", PostgresType, @@ -97,7 +100,6 @@ public Task Roundtrip_JsonObject() [Test] public Task Roundtrip_JsonArray() => AssertType( - JsonDataSource, new JsonArray { 1, 2, 3 }, IsJsonb ? "[1, 2, 3]" : "[1,2,3]", PostgresType, @@ -112,7 +114,6 @@ public Task Roundtrip_JsonArray() [Test] public async Task As_poco() => await AssertType( - JsonDataSource, new WeatherForecast { Date = new DateTime(2019, 9, 1), @@ -133,7 +134,6 @@ public async Task As_poco_long() var bigString = new string('x', Math.Max(conn.Settings.ReadBufferSize, conn.Settings.WriteBufferSize)); await AssertType( - JsonDataSource, new WeatherForecast { Date = new DateTime(2019, 9, 1), @@ -151,7 +151,10 @@ await AssertType( [Test] public async Task As_poco_supported_only_with_SystemTextJson() - => await AssertTypeUnsupported( + { + await using var slimDataSource = new NpgsqlSlimDataSourceBuilder(ConnectionString).Build(); + + await AssertTypeUnsupported( new WeatherForecast { Date = new DateTime(2019, 9, 1), @@ -159,7 +162,9 @@ public async Task As_poco_supported_only_with_SystemTextJson() TemperatureC = 10 }, @"{""Date"": ""2019-09-01T00:00:00"", ""Summary"": ""Partly cloudy"", ""TemperatureC"": 10}", - PostgresType); + PostgresType, + slimDataSource); + } record WeatherForecast { @@ -174,7 +179,7 @@ record WeatherForecast [IssueLink("https://github.com/npgsql/efcore.pg/issues/1082")] public async Task Can_read_two_json_documents() { - await using var conn = await JsonDataSource.OpenConnectionAsync(); + await using var conn = await OpenConnectionAsync(); JsonDocument car; await using (var cmd = new NpgsqlCommand(@"SELECT '{""key"" : ""foo""}'::jsonb", conn)) @@ -203,7 +208,7 @@ public async Task Write_jsonobject_array_without_npgsqldbtype() if (!IsJsonb) return; - await using var conn = await JsonDataSource.OpenConnectionAsync(); + await using var conn = await OpenConnectionAsync(); var tableName = await TestUtil.CreateTempTable(conn, "key SERIAL PRIMARY KEY, ingredients json[]"); await using var cmd = new NpgsqlCommand { Connection = conn }; @@ -277,18 +282,6 @@ await AssertTypeWrite( isNpgsqlDbTypeInferredFromClrType: false); } - [OneTimeSetUp] - public void SetUp() - { - var dataSourceBuilder = CreateDataSourceBuilder(); - dataSourceBuilder.UseSystemTextJson(); - JsonDataSource = dataSourceBuilder.Build(); - } - - [OneTimeTearDown] - public async Task Teardown() - => await JsonDataSource.DisposeAsync(); - public JsonTests(MultiplexingMode multiplexingMode, NpgsqlDbType npgsqlDbType) : base(multiplexingMode) { @@ -300,5 +293,4 @@ public JsonTests(MultiplexingMode multiplexingMode, NpgsqlDbType npgsqlDbType) bool IsJsonb => NpgsqlDbType == NpgsqlDbType.Jsonb; string PostgresType => IsJsonb ? "jsonb" : "json"; readonly NpgsqlDbType NpgsqlDbType; - NpgsqlDataSource JsonDataSource = default!; } diff --git a/test/Npgsql.Tests/Types/MultirangeTests.cs b/test/Npgsql.Tests/Types/MultirangeTests.cs index dbf87dace6..0162fc78ed 100644 --- a/test/Npgsql.Tests/Types/MultirangeTests.cs +++ b/test/Npgsql.Tests/Types/MultirangeTests.cs @@ -193,29 +193,13 @@ async Task WriteInternal(IList> multirange) } } - NpgsqlDataSource DataSourceWithRanges { get; set; } = default!; - [OneTimeSetUp] public async Task Setup() { - DataSourceWithRanges = CreateDataSourceWithRanges(); await using var conn = await OpenConnectionAsync(); MinimumPgVersion(conn, "14.0", "Multirange types were introduced in PostgreSQL 14"); } - [OneTimeTearDown] - public async Task TearDown() - { - if (DataSourceWithRanges is not null) - { - await DataSourceWithRanges.DisposeAsync(); - DataSourceWithRanges = null!; - } - } - - protected override ValueTask OpenConnectionAsync() - => DataSourceWithRanges.OpenConnectionAsync(); - protected override NpgsqlConnection OpenConnection() => throw new NotSupportedException(); } diff --git a/test/Npgsql.Tests/Types/RangeTests.cs b/test/Npgsql.Tests/Types/RangeTests.cs index c02781f04a..33a1365ec0 100644 --- a/test/Npgsql.Tests/Types/RangeTests.cs +++ b/test/Npgsql.Tests/Types/RangeTests.cs @@ -22,8 +22,8 @@ public async Task Range_resolution() if (IsMultiplexing) Assert.Ignore("Multiplexing, ReloadTypes"); - await using var dataSource = CreateDataSourceWithRanges(csb => csb.Pooling = false); - await using var conn = await dataSource.OpenConnectionAsync(); + await using var dataSource = CreateDataSource(csb => csb.Pooling = false); + await using var conn = await OpenConnectionAsync(); // Resolve type by NpgsqlDbType using (var cmd = new NpgsqlCommand("SELECT @p", conn)) @@ -98,7 +98,7 @@ public async Task Range() [NonParallelizable] public async Task Range_with_long_subtype() { - await using var dataSource = CreateDataSourceWithRanges(csb => csb.MaxPoolSize = 1); + await using var dataSource = CreateDataSource(csb => csb.MaxPoolSize = 1); await using var conn = await dataSource.OpenConnectionAsync(); var typeName = await GetTempTypeName(conn); @@ -247,26 +247,6 @@ await AssertType( isNpgsqlDbTypeInferredFromClrType: false); } - NpgsqlDataSource DataSourceWithRanges { get; set; } = default!; - - [OneTimeSetUp] - public async Task OneTimeSetUp() - { - DataSourceWithRanges = CreateDataSourceWithRanges(); - using var conn = await OpenConnectionAsync(); - MinimumPgVersion(conn, "9.2.0"); - } - - [OneTimeTearDown] - public async Task TearDown() - { - await DataSourceWithRanges.DisposeAsync(); - DataSourceWithRanges = null!; - } - - protected override ValueTask OpenConnectionAsync() - => DataSourceWithRanges.OpenConnectionAsync(); - protected override NpgsqlConnection OpenConnection() => throw new NotSupportedException(); From b06f19718a27c8457a910a4cd6fc1e940b486877 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Fri, 3 Mar 2023 20:49:12 +0100 Subject: [PATCH 090/761] Fixup to mapping changes (#4968) Add default resolver factories at the end rather than at the beginning. --- src/Npgsql/NpgsqlConnection.cs | 17 ++-------- src/Npgsql/NpgsqlDataSourceBuilder.cs | 6 ++-- src/Npgsql/NpgsqlSlimDataSourceBuilder.cs | 39 +++++++++++++++++------ 3 files changed, 35 insertions(+), 27 deletions(-) diff --git a/src/Npgsql/NpgsqlConnection.cs b/src/Npgsql/NpgsqlConnection.cs index d7b8b404eb..b9d97cd374 100644 --- a/src/Npgsql/NpgsqlConnection.cs +++ b/src/Npgsql/NpgsqlConnection.cs @@ -90,7 +90,7 @@ internal NpgsqlDataSource NpgsqlDataSource public INpgsqlTypeMapper TypeMapper => throw new NotSupportedException(); - static ICloningInstantiator? _cloningInstantiator; + static Func? _cloningInstantiator; /// /// The default TCP/IP port for PostgreSQL. @@ -220,7 +220,7 @@ void SetupDataSource() var newDataSource = dataSourceBuilder.Build(); // See Clone() on the following line: - _cloningInstantiator = new CloningInstantiator(); + _cloningInstantiator = s => new NpgsqlConnection(s); _dataSource = PoolManager.Pools.GetOrAdd(canonical, newDataSource); if (_dataSource == newDataSource) @@ -1827,7 +1827,7 @@ object ICloneable.Clone() // default data source is brought in anyway). Debug.Assert(_dataSource is not null || _cloningInstantiator is not null); var conn = _dataSource is null - ? _cloningInstantiator!.Instantiate(_connectionString) + ? _cloningInstantiator!(_connectionString) : _dataSource.CreateConnection(); conn.ProvideClientCertificatesCallback = ProvideClientCertificatesCallback; @@ -1868,17 +1868,6 @@ public NpgsqlConnection CloneWith(string connectionString) }; } - interface ICloningInstantiator - { - public NpgsqlConnection Instantiate(string connectionString); - } - - sealed class CloningInstantiator : ICloningInstantiator - { - public NpgsqlConnection Instantiate(string connectionString) - => new(connectionString); - } - /// /// This method changes the current database by disconnecting from the actual /// database and connecting to the specified. diff --git a/src/Npgsql/NpgsqlDataSourceBuilder.cs b/src/Npgsql/NpgsqlDataSourceBuilder.cs index 192e071f08..75870c8226 100644 --- a/src/Npgsql/NpgsqlDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlDataSourceBuilder.cs @@ -291,9 +291,7 @@ public NpgsqlMultiHostDataSource BuildMultiHost() void AddDefaultFeatures() { - // If a resolver factory is already registered, we don't replace it. This is to allow customized factories (e.g. JSON with - // specific settings) to flow from the global type mapper to the data source. - _internalBuilder.AddTypeResolverFactory(new JsonTypeHandlerResolverFactory(), replaceIfExists: false); - _internalBuilder.AddTypeResolverFactory(new RangeTypeHandlerResolverFactory(), replaceIfExists: false); + _internalBuilder.AddDefaultTypeResolverFactory(new JsonTypeHandlerResolverFactory()); + _internalBuilder.AddDefaultTypeResolverFactory(new RangeTypeHandlerResolverFactory()); } } diff --git a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs index 11d1ad6109..7b5359f73f 100644 --- a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs @@ -232,9 +232,6 @@ public NpgsqlSlimDataSourceBuilder UsePeriodicPasswordProvider( /// public void AddTypeResolverFactory(TypeHandlerResolverFactory resolverFactory) - => AddTypeResolverFactory(resolverFactory, replaceIfExists: true); - - internal void AddTypeResolverFactory(TypeHandlerResolverFactory resolverFactory, bool replaceIfExists) { var type = resolverFactory.GetType(); @@ -242,17 +239,41 @@ internal void AddTypeResolverFactory(TypeHandlerResolverFactory resolverFactory, { if (_resolverFactories[i].GetType() == type) { - if (replaceIfExists) - { - _resolverFactories.RemoveAt(i); - break; - } + _resolverFactories.RemoveAt(i); + break; + } + } + + _resolverFactories.Insert(0, resolverFactory); + } + + internal void AddDefaultTypeResolverFactory(TypeHandlerResolverFactory resolverFactory) + { + // For these "default" resolvers: + // 1. If they were already added in the global type mapper, we don't want to replace them (there may be custom user config, e.g. + // for JSON. + // 2. They can't be at the start, since then they'd override a user-added resolver in global (e.g. the range handler would override + // NodaTime, but NodaTime has special handling for tstzrange, mapping it to Interval in addition to NpgsqlRange). + // 3. They also can't be at the end, since then they'd be overridden by builtin (builtin has limited JSON handler, but we want + // the System.Text.Json handler to take precedence. + // So we (currently) add these at the end, but before the builtin resolver. + var type = resolverFactory.GetType(); + // 1st pass to skip if the resolver already exists from the global type mapper + for (var i = 0; i < _resolverFactories.Count; i++) + if (_resolverFactories[i].GetType() == type) + return; + + for (var i = 0; i < _resolverFactories.Count; i++) + { + if (_resolverFactories[i] is BuiltInTypeHandlerResolverFactory) + { + _resolverFactories.Insert(i, resolverFactory); return; } } - _resolverFactories.Insert(0, resolverFactory); + throw new Exception("No built-in resolver factory found"); } /// From eabbc8a1f6bad1ec0633c893ea0905799b3f52be Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Fri, 3 Mar 2023 21:08:15 +0100 Subject: [PATCH 091/761] Fix release nuget pack in CI --- .github/workflows/build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 218414cc30..de43678e2c 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -385,7 +385,7 @@ jobs: dotnet-version: ${{ env.dotnet_sdk_version }} - name: Pack - run: dotnet pack --configuration Release --output nupkgs -p:ContinuousIntegrationBuild=true + run: dotnet pack --configuration Release --property:PackageOutputPath="$PWD/nupkgs" nupkgs -p:ContinuousIntegrationBuild=true - name: Upload artifacts uses: actions/upload-artifact@v3 From d3893872e9978e6c5a7e40f4aa64a35ac5a210d2 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Fri, 3 Mar 2023 21:32:56 +0100 Subject: [PATCH 092/761] Fixup --- .github/workflows/build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index de43678e2c..6c0057c3c9 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -385,7 +385,7 @@ jobs: dotnet-version: ${{ env.dotnet_sdk_version }} - name: Pack - run: dotnet pack --configuration Release --property:PackageOutputPath="$PWD/nupkgs" nupkgs -p:ContinuousIntegrationBuild=true + run: dotnet pack Npgsql.sln --configuration Release --property:PackageOutputPath="$PWD/nupkgs" -p:ContinuousIntegrationBuild=true - name: Upload artifacts uses: actions/upload-artifact@v3 From caa6b2a883292fb0a27f3c9537c31e36032716a9 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Sat, 4 Mar 2023 15:52:37 +0100 Subject: [PATCH 093/761] Dispose X509Certificate2 when the connector is cleaned up (#4975) Fixes #4969 --- src/Npgsql/Internal/NpgsqlConnector.cs | 26 +++++++++++++++++--------- 1 file changed, 17 insertions(+), 9 deletions(-) diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index 16672fbe87..823bce9f89 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -283,6 +283,8 @@ internal bool PostgresCancellationPerformed internal bool AttemptPostgresCancellation { get; private set; } static readonly TimeSpan _cancelImmediatelyTimeout = TimeSpan.FromMilliseconds(-1); + X509Certificate2? _certificate; + internal NpgsqlLoggingConfiguration LoggingConfiguration { get; } internal ILogger ConnectionLogger { get; } @@ -756,7 +758,6 @@ async ValueTask GetUsernameAsyncInternal() async Task RawOpen(SslMode sslMode, NpgsqlTimeout timeout, bool async, CancellationToken cancellationToken, bool isFirstAttempt = true) { - var cert = default(X509Certificate2?); try { if (async) @@ -815,15 +816,15 @@ async Task RawOpen(SslMode sslMode, NpgsqlTimeout timeout, bool async, Cancellat #if NET5_0_OR_GREATER // It's PEM time var keyPath = Settings.SslKey ?? PostgresEnvironment.SslKey ?? PostgresEnvironment.SslKeyDefault; - cert = string.IsNullOrEmpty(password) + _certificate = string.IsNullOrEmpty(password) ? X509Certificate2.CreateFromPemFile(certPath, keyPath) : X509Certificate2.CreateFromEncryptedPemFile(certPath, password, keyPath); if (RuntimeInformation.IsOSPlatform(OSPlatform.Windows)) { // Windows crypto API has a bug with pem certs // See #3650 - using var previousCert = cert; - cert = new X509Certificate2(cert.Export(X509ContentType.Pkcs12)); + using var previousCert = _certificate; + _certificate = new X509Certificate2(_certificate.Export(X509ContentType.Pkcs12)); } #else // Technically PEM certificates are supported as of .NET 5 but we don't build for the net5.0 @@ -833,8 +834,8 @@ async Task RawOpen(SslMode sslMode, NpgsqlTimeout timeout, bool async, Cancellat #endif } - cert ??= new X509Certificate2(certPath, password); - clientCertificates.Add(cert); + _certificate ??= new X509Certificate2(certPath, password); + clientCertificates.Add(_certificate); } ClientCertificatesCallback?.Invoke(clientCertificates); @@ -851,10 +852,10 @@ async Task RawOpen(SslMode sslMode, NpgsqlTimeout timeout, bool async, Cancellat throw new ArgumentException(string.Format(NpgsqlStrings.CannotUseSslVerifyWithUserCallback, sslMode)); if (Settings.RootCertificate is not null) - throw new ArgumentException(string.Format(NpgsqlStrings.CannotUseSslRootCertificateWithUserCallback)); + throw new ArgumentException(NpgsqlStrings.CannotUseSslRootCertificateWithUserCallback); if (DataSource.RootCertificateCallback is not null) - throw new ArgumentException(string.Format(NpgsqlStrings.CannotUseValidationRootCertificateCallbackWithUserCallback)); + throw new ArgumentException(NpgsqlStrings.CannotUseValidationRootCertificateCallbackWithUserCallback); certificateValidationCallback = UserCertificateValidationCallback; } @@ -921,7 +922,8 @@ async Task RawOpen(SslMode sslMode, NpgsqlTimeout timeout, bool async, Cancellat } catch { - cert?.Dispose(); + _certificate?.Dispose(); + _certificate = null; _stream?.Dispose(); _stream = null!; @@ -2179,6 +2181,12 @@ void Cleanup() Connection = null; PostgresParameters.Clear(); _currentCommand = null; + + if (_certificate is not null) + { + _certificate.Dispose(); + _certificate = null; + } } void GenerateResetMessage() From 7d1dd87e6b10f8a544ef087e2a90508f721c83b7 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Mon, 6 Mar 2023 14:08:51 +0100 Subject: [PATCH 094/761] Bump version to 8.0.0-preview.2 --- Directory.Build.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Build.props b/Directory.Build.props index 135024f015..a6626dce0c 100644 --- a/Directory.Build.props +++ b/Directory.Build.props @@ -1,6 +1,6 @@  - 8.0.0-preview.1 + 8.0.0-preview.2 latest true enable From c2ecdbb5457c16c2b102b57867ba845d85a45b39 Mon Sep 17 00:00:00 2001 From: Jon Roberts Date: Tue, 7 Mar 2023 12:34:54 -0600 Subject: [PATCH 095/761] Add SupportsSavepoints property to NpgsqlTransaction (#4978) --- Npgsql.sln.DotSettings | 1 + src/Npgsql/NpgsqlTransaction.cs | 14 +++++++++++++- src/Npgsql/PublicAPI.Shipped.txt | 1 + 3 files changed, 15 insertions(+), 1 deletion(-) diff --git a/Npgsql.sln.DotSettings b/Npgsql.sln.DotSettings index 246b17c72b..890df2d4be 100644 --- a/Npgsql.sln.DotSettings +++ b/Npgsql.sln.DotSettings @@ -123,6 +123,7 @@ True True True + True True True True diff --git a/src/Npgsql/NpgsqlTransaction.cs b/src/Npgsql/NpgsqlTransaction.cs index 8d4ffbc00a..5dbaaadb85 100644 --- a/src/Npgsql/NpgsqlTransaction.cs +++ b/src/Npgsql/NpgsqlTransaction.cs @@ -356,6 +356,18 @@ public Task ReleaseAsync(string name, CancellationToken cancellationToken = defa return Release(name, true, cancellationToken); } + /// + /// Indicates whether this transaction supports database savepoints. + /// +#if NET5_0_OR_GREATER + public override bool SupportsSavepoints +#else + public bool SupportsSavepoints +#endif + { + get => _connector.DatabaseInfo.SupportsTransactions; + } + #endregion #region Dispose @@ -500,4 +512,4 @@ internal void UnbindIfNecessary() } #endregion -} \ No newline at end of file +} diff --git a/src/Npgsql/PublicAPI.Shipped.txt b/src/Npgsql/PublicAPI.Shipped.txt index 6f09e92dd3..87a3c12c5d 100644 --- a/src/Npgsql/PublicAPI.Shipped.txt +++ b/src/Npgsql/PublicAPI.Shipped.txt @@ -1741,6 +1741,7 @@ override Npgsql.NpgsqlTransaction.RollbackAsync(string! name, System.Threading.C override Npgsql.NpgsqlTransaction.RollbackAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! override Npgsql.NpgsqlTransaction.Save(string! name) -> void override Npgsql.NpgsqlTransaction.SaveAsync(string! name, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! +override Npgsql.NpgsqlTransaction.SupportsSavepoints.get -> bool override Npgsql.PostgresException.GetObjectData(System.Runtime.Serialization.SerializationInfo! info, System.Runtime.Serialization.StreamingContext context) -> void override Npgsql.PostgresException.IsTransient.get -> bool override Npgsql.PostgresException.SqlState.get -> string! From 57f09d026c32082fac2c3ca5c4dd3377a8e71794 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Tue, 7 Mar 2023 20:29:35 +0100 Subject: [PATCH 096/761] Get rid of provider-specific values (#4980) Closes #4977 --- .../Internal/DateMultirangeHandler.cs | 1 - .../Internal/DateRangeHandler.cs | 1 - .../Internal/TimestampTzMultirangeHandler.cs | 1 - .../Internal/TimestampTzRangeHandler.cs | 1 - .../Internal/TypeHandlers/ArrayHandler.cs | 10 -- .../Internal/TypeHandlers/BitStringHandler.cs | 3 - .../TypeHandlers/MultirangeHandler.cs | 1 - .../NetworkHandlers/InetHandler.cs | 10 +- .../Internal/TypeHandlers/RangeHandler.cs | 1 - .../NpgsqlSimpleTypeHandlerWithPsv.cs | 98 ------------------- .../TypeHandling/NpgsqlTypeHandler.cs | 19 ---- .../TypeHandling/NpgsqlTypeHandler`.cs | 1 - src/Npgsql/NpgsqlDataReader.cs | 78 --------------- src/Npgsql/NpgsqlNestedDataReader.cs | 29 ------ src/Npgsql/PublicAPI.Unshipped.txt | 6 ++ 15 files changed, 12 insertions(+), 248 deletions(-) delete mode 100644 src/Npgsql/Internal/TypeHandling/NpgsqlSimpleTypeHandlerWithPsv.cs diff --git a/src/Npgsql.NodaTime/Internal/DateMultirangeHandler.cs b/src/Npgsql.NodaTime/Internal/DateMultirangeHandler.cs index 050a2f492a..167b8eb310 100644 --- a/src/Npgsql.NodaTime/Internal/DateMultirangeHandler.cs +++ b/src/Npgsql.NodaTime/Internal/DateMultirangeHandler.cs @@ -22,7 +22,6 @@ public DateMultirangeHandler(PostgresMultirangeType multirangePostgresType, Date => _dateIntervalHandler = rangeHandler; public override Type GetFieldType(FieldDescription? fieldDescription = null) => typeof(DateInterval[]); - public override Type GetProviderSpecificFieldType(FieldDescription? fieldDescription = null) => typeof(DateInterval[]); public override async ValueTask ReadAsObject(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) diff --git a/src/Npgsql.NodaTime/Internal/DateRangeHandler.cs b/src/Npgsql.NodaTime/Internal/DateRangeHandler.cs index cd24f191ce..601a0cfb45 100644 --- a/src/Npgsql.NodaTime/Internal/DateRangeHandler.cs +++ b/src/Npgsql.NodaTime/Internal/DateRangeHandler.cs @@ -24,7 +24,6 @@ public DateRangeHandler(PostgresType rangePostgresType, NpgsqlTypeHandler subtyp } public override Type GetFieldType(FieldDescription? fieldDescription = null) => typeof(DateInterval); - public override Type GetProviderSpecificFieldType(FieldDescription? fieldDescription = null) => typeof(DateInterval); public override async ValueTask ReadAsObject(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) diff --git a/src/Npgsql.NodaTime/Internal/TimestampTzMultirangeHandler.cs b/src/Npgsql.NodaTime/Internal/TimestampTzMultirangeHandler.cs index b752d0e820..a13bb091b2 100644 --- a/src/Npgsql.NodaTime/Internal/TimestampTzMultirangeHandler.cs +++ b/src/Npgsql.NodaTime/Internal/TimestampTzMultirangeHandler.cs @@ -22,7 +22,6 @@ public partial class TimestampTzMultirangeHandler : MultirangeHandler, readonly INpgsqlTypeHandler _intervalHandler; public override Type GetFieldType(FieldDescription? fieldDescription = null) => typeof(Interval[]); - public override Type GetProviderSpecificFieldType(FieldDescription? fieldDescription = null) => typeof(Interval[]); public TimestampTzMultirangeHandler(PostgresMultirangeType pgMultirangeType, TimestampTzRangeHandler rangeHandler) : base(pgMultirangeType, rangeHandler) diff --git a/src/Npgsql.NodaTime/Internal/TimestampTzRangeHandler.cs b/src/Npgsql.NodaTime/Internal/TimestampTzRangeHandler.cs index b4998fc0d9..8205cc17ef 100644 --- a/src/Npgsql.NodaTime/Internal/TimestampTzRangeHandler.cs +++ b/src/Npgsql.NodaTime/Internal/TimestampTzRangeHandler.cs @@ -16,7 +16,6 @@ public partial class TimestampTzRangeHandler : RangeHandler, INpgsqlTypeHandler>, INpgsqlTypeHandler> { public override Type GetFieldType(FieldDescription? fieldDescription = null) => typeof(Interval); - public override Type GetProviderSpecificFieldType(FieldDescription? fieldDescription = null) => typeof(Interval); public TimestampTzRangeHandler(PostgresType rangePostgresType, NpgsqlTypeHandler subtypeHandler) : base(rangePostgresType, subtypeHandler) diff --git a/src/Npgsql/Internal/TypeHandlers/ArrayHandler.cs b/src/Npgsql/Internal/TypeHandlers/ArrayHandler.cs index 07cd89a469..bc1f100322 100644 --- a/src/Npgsql/Internal/TypeHandlers/ArrayHandler.cs +++ b/src/Npgsql/Internal/TypeHandlers/ArrayHandler.cs @@ -26,7 +26,6 @@ namespace Npgsql.Internal.TypeHandlers; public class ArrayHandler : NpgsqlTypeHandler { readonly Type _defaultArrayType; - readonly Type _psvArrayType; readonly ConcurrentDictionary _concreteHandlers = new(); protected int LowerBound { get; } protected NpgsqlTypeHandler ElementHandler { get; } @@ -37,12 +36,10 @@ public ArrayHandler(PostgresType arrayPostgresType, NpgsqlTypeHandler elementHan LowerBound = lowerBound; ElementHandler = elementHandler; ArrayNullabilityMode = arrayNullabilityMode; - _psvArrayType = elementHandler.GetProviderSpecificFieldType().MakeArrayType(); _defaultArrayType = elementHandler.GetFieldType().MakeArrayType(); } public override Type GetFieldType(FieldDescription? fieldDescription = null) => typeof(Array); - public override Type GetProviderSpecificFieldType(FieldDescription? fieldDescription = null) => typeof(Array); /// public override NpgsqlTypeHandler CreateArrayHandler(PostgresArrayType pgArrayType, ArrayNullabilityMode arrayNullabilityMode) @@ -59,13 +56,6 @@ public override NpgsqlTypeHandler CreateMultirangeHandler(PostgresMultirangeType ArrayHandlerCore CreateHandler(Type elementType) => (ArrayHandlerCore)Activator.CreateInstance(typeof(ArrayHandlerCore<>).MakeGenericType(elementType), ElementHandler, ArrayNullabilityMode, LowerBound)!; - internal override ValueTask ReadPsvAsObject(NpgsqlReadBuffer buf, int len, bool async, - FieldDescription? fieldDescription = null) - { - var handler = _concreteHandlers.GetOrAdd(_psvArrayType, static (_, instance) => instance.CreateHandler(instance.ElementHandler.GetProviderSpecificFieldType()), this); - return handler.ReadArrayAsObject(buf, async); - } - /// protected internal override async ValueTask ReadCustom(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) diff --git a/src/Npgsql/Internal/TypeHandlers/BitStringHandler.cs b/src/Npgsql/Internal/TypeHandlers/BitStringHandler.cs index 5d3e67c1ac..b448463343 100644 --- a/src/Npgsql/Internal/TypeHandlers/BitStringHandler.cs +++ b/src/Npgsql/Internal/TypeHandlers/BitStringHandler.cs @@ -32,9 +32,6 @@ public BitStringHandler(PostgresType pgType) : base(pgType) {} public override Type GetFieldType(FieldDescription? fieldDescription = null) => fieldDescription != null && fieldDescription.TypeModifier == 1 ? typeof(bool) : typeof(BitArray); - public override Type GetProviderSpecificFieldType(FieldDescription? fieldDescription = null) - => GetFieldType(fieldDescription); - // BitString requires a special array handler which returns bool or BitArray /// public override NpgsqlTypeHandler CreateArrayHandler(PostgresArrayType pgArrayType, ArrayNullabilityMode arrayNullabilityMode) diff --git a/src/Npgsql/Internal/TypeHandlers/MultirangeHandler.cs b/src/Npgsql/Internal/TypeHandlers/MultirangeHandler.cs index 5d2d738d8b..f0f2c11827 100644 --- a/src/Npgsql/Internal/TypeHandlers/MultirangeHandler.cs +++ b/src/Npgsql/Internal/TypeHandlers/MultirangeHandler.cs @@ -130,7 +130,6 @@ public async Task WriteMultirange( } public override Type GetFieldType(FieldDescription? fieldDescription = null) => typeof(NpgsqlRange[]); - public override Type GetProviderSpecificFieldType(FieldDescription? fieldDescription = null) => typeof(NpgsqlRange[]); /// public override NpgsqlTypeHandler CreateArrayHandler(PostgresArrayType pgArrayType, ArrayNullabilityMode arrayNullabilityMode) diff --git a/src/Npgsql/Internal/TypeHandlers/NetworkHandlers/InetHandler.cs b/src/Npgsql/Internal/TypeHandlers/NetworkHandlers/InetHandler.cs index 276ca158f7..ed10be3ef8 100644 --- a/src/Npgsql/Internal/TypeHandlers/NetworkHandlers/InetHandler.cs +++ b/src/Npgsql/Internal/TypeHandlers/NetworkHandlers/InetHandler.cs @@ -21,7 +21,8 @@ namespace Npgsql.Internal.TypeHandlers.NetworkHandlers; /// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. /// Use it at your own risk. /// -public partial class InetHandler : NpgsqlSimpleTypeHandlerWithPsv, +public partial class InetHandler : NpgsqlSimpleTypeHandler, + INpgsqlSimpleTypeHandler<(IPAddress Address, int Subnet)>, INpgsqlSimpleTypeHandler { // ReSharper disable InconsistentNaming @@ -58,7 +59,8 @@ internal static (IPAddress Address, int Subnet) DoRead( #pragma warning restore CA1801 // Review unused parameters /// - protected override (IPAddress Address, int Subnet) ReadPsv(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) + (IPAddress Address, int Subnet) INpgsqlSimpleTypeHandler<(IPAddress Address, int Subnet)>.Read( + NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) => DoRead(buf, len, fieldDescription, false); NpgsqlInet INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) @@ -76,7 +78,7 @@ public override int ValidateAndGetLength(IPAddress value, NpgsqlParameter? param => GetLength(value); /// - public override int ValidateAndGetLength((IPAddress Address, int Subnet) value, NpgsqlParameter? parameter) + public int ValidateAndGetLength((IPAddress Address, int Subnet) value, NpgsqlParameter? parameter) => GetLength(value.Address); /// @@ -88,7 +90,7 @@ public override void Write(IPAddress value, NpgsqlWriteBuffer buf, NpgsqlParamet => DoWrite(value, -1, buf, false); /// - public override void Write((IPAddress Address, int Subnet) value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) + public void Write((IPAddress Address, int Subnet) value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) => DoWrite(value.Address, value.Subnet, buf, false); /// diff --git a/src/Npgsql/Internal/TypeHandlers/RangeHandler.cs b/src/Npgsql/Internal/TypeHandlers/RangeHandler.cs index f3cbb51192..0c108696e1 100644 --- a/src/Npgsql/Internal/TypeHandlers/RangeHandler.cs +++ b/src/Npgsql/Internal/TypeHandlers/RangeHandler.cs @@ -34,7 +34,6 @@ public RangeHandler(PostgresType rangePostgresType, NpgsqlTypeHandler subtypeHan => SubtypeHandler = subtypeHandler; public override Type GetFieldType(FieldDescription? fieldDescription = null) => typeof(NpgsqlRange); - public override Type GetProviderSpecificFieldType(FieldDescription? fieldDescription = null) => typeof(NpgsqlRange); /// public override NpgsqlTypeHandler CreateArrayHandler(PostgresArrayType pgArrayType, ArrayNullabilityMode arrayNullabilityMode) diff --git a/src/Npgsql/Internal/TypeHandling/NpgsqlSimpleTypeHandlerWithPsv.cs b/src/Npgsql/Internal/TypeHandling/NpgsqlSimpleTypeHandlerWithPsv.cs deleted file mode 100644 index d6d69b2df1..0000000000 --- a/src/Npgsql/Internal/TypeHandling/NpgsqlSimpleTypeHandlerWithPsv.cs +++ /dev/null @@ -1,98 +0,0 @@ -using System; -using System.Data.Common; -using System.Threading.Tasks; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandlers; -using Npgsql.PostgresTypes; -using NpgsqlTypes; - -namespace Npgsql.Internal.TypeHandling; - -/// -/// A simple type handler that supports a provider-specific value in addition to its default value. -/// This is necessary mainly in cases where the CLR type cannot represent the full range of the PostgreSQL type, and a custom CLR type -/// is needed. The provider-specific type will be returned from calls to -/// . -/// -/// -/// The default CLR type that this handler will read and write. For example, calling -/// on a column with this handler will return a value with type . -/// Type handlers can support additional types by implementing . -/// -/// The provider-specific CLR type that this handler will read and write. -public abstract class NpgsqlSimpleTypeHandlerWithPsv : NpgsqlSimpleTypeHandler, INpgsqlSimpleTypeHandler -{ - public NpgsqlSimpleTypeHandlerWithPsv(PostgresType pgType) : base(pgType) {} - - #region Read - - /// - /// Reads a value of type with the given length from the provided buffer, - /// with the assumption that it is entirely present in the provided memory buffer and no I/O will be - /// required. - /// - /// The buffer from which to read. - /// The byte length of the value. The buffer might not contain the full length, requiring I/O to be performed. - /// Additional PostgreSQL information about the type, such as the length in varchar(30). - /// The fully-read value. - protected abstract TPsv ReadPsv(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null); - - TPsv INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => ReadPsv(buf, len, fieldDescription); - - // Since TAny isn't constrained to class? or struct (C# doesn't have a non-nullable constraint that doesn't limit us to either struct or class), - // we must use the bang operator here to tell the compiler that a null value will never returned. - - /// - /// Reads a column as the type handler's provider-specific type, assuming that it is already entirely - /// in memory (i.e. no I/O is necessary). Called by in non-sequential mode, which - /// buffers entire rows in memory. - /// - internal override object ReadPsvAsObject(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - => Read(buf, len, fieldDescription)!; - - /// - /// Reads a column as the type handler's provider-specific type. If it is not already entirely in - /// memory, sync or async I/O will be performed as specified by . - /// - internal override async ValueTask ReadPsvAsObject(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - => (await Read(buf, len, async, fieldDescription))!; - - #endregion Read - - #region Write - - /// - /// Responsible for validating that a value represents a value of the correct and which can be - /// written for PostgreSQL - if the value cannot be written for any reason, an exception shold be thrown. - /// Also returns the byte length needed to write the value. - /// - /// The value to be written to PostgreSQL - /// - /// The instance where this value resides. Can be used to access additional - /// information relevant to the write process (e.g. ). - /// - /// The number of bytes required to write the value. - public abstract int ValidateAndGetLength(TPsv value, NpgsqlParameter? parameter); - - /// - /// Writes a value to the provided buffer, with the assumption that there is enough space in the buffer - /// (no I/O will occur). The Npgsql core will have taken care of that. - /// - /// The value to write. - /// The buffer to which to write. - /// - /// The instance where this value resides. Can be used to access additional - /// information relevant to the write process (e.g. ). - /// - public abstract void Write(TPsv value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter); - - #endregion Write - - #region Misc - - public override Type GetProviderSpecificFieldType(FieldDescription? fieldDescription = null) - => typeof(TPsv); - - #endregion Misc -} diff --git a/src/Npgsql/Internal/TypeHandling/NpgsqlTypeHandler.cs b/src/Npgsql/Internal/TypeHandling/NpgsqlTypeHandler.cs index 0eaa1906cb..e9cdf8dd4d 100644 --- a/src/Npgsql/Internal/TypeHandling/NpgsqlTypeHandler.cs +++ b/src/Npgsql/Internal/TypeHandling/NpgsqlTypeHandler.cs @@ -91,24 +91,6 @@ internal object ReadAsObject(NpgsqlReadBuffer buf, int len, FieldDescription? fi return ReadAsObject(buf, len, async: false, fieldDescription).Result; } - /// - /// Reads a column as the type handler's provider-specific type. If it is not already entirely in - /// memory, sync or async I/O will be performed as specified by . - /// - internal virtual ValueTask ReadPsvAsObject(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - => ReadAsObject(buf, len, async, fieldDescription); - - /// - /// Version of that's called when we know the entire value - /// is already buffered in memory (i.e. in non-sequential mode). - /// - internal virtual object ReadPsvAsObject(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - { - Debug.Assert(buf.ReadBytesLeft >= len); - - return ReadPsvAsObject(buf, len, async: false, fieldDescription).Result; - } - /// /// Reads a value from the buffer, assuming our read position is at the value's preceding length. /// If the length is -1 (null), this method will return the default value. @@ -259,7 +241,6 @@ protected virtual Task WriteWithLengthCustom( #region Misc public abstract Type GetFieldType(FieldDescription? fieldDescription = null); - public abstract Type GetProviderSpecificFieldType(FieldDescription? fieldDescription = null); internal virtual bool PreferTextWrite => false; diff --git a/src/Npgsql/Internal/TypeHandling/NpgsqlTypeHandler`.cs b/src/Npgsql/Internal/TypeHandling/NpgsqlTypeHandler`.cs index 3ef86dc8c8..ae1e0eee5c 100644 --- a/src/Npgsql/Internal/TypeHandling/NpgsqlTypeHandler`.cs +++ b/src/Npgsql/Internal/TypeHandling/NpgsqlTypeHandler`.cs @@ -61,7 +61,6 @@ public override async ValueTask ReadAsObject(NpgsqlReadBuffer buf, int l #region Misc public override Type GetFieldType(FieldDescription? fieldDescription = null) => typeof(TDefault); - public override Type GetProviderSpecificFieldType(FieldDescription? fieldDescription = null) => typeof(TDefault); /// public override NpgsqlTypeHandler CreateArrayHandler(PostgresArrayType pgArrayType, ArrayNullabilityMode arrayNullabilityMode) diff --git a/src/Npgsql/NpgsqlDataReader.cs b/src/Npgsql/NpgsqlDataReader.cs index 908e03a022..18f6f50019 100644 --- a/src/Npgsql/NpgsqlDataReader.cs +++ b/src/Npgsql/NpgsqlDataReader.cs @@ -1833,51 +1833,6 @@ public override object GetValue(int ordinal) return result; } - /// - /// Gets the value of the specified column as an instance of . - /// - /// The zero-based column ordinal. - /// The value of the specified column. - public override object GetProviderSpecificValue(int ordinal) - { - var fieldDescription = CheckRowAndGetField(ordinal); - - if (_isSequential) - { - SeekToColumnSequential(ordinal, false).GetAwaiter().GetResult(); - CheckColumnStart(); - } - else - SeekToColumnNonSequential(ordinal); - - if (ColumnLen == -1) - return DBNull.Value; - - var position = Buffer.ReadPosition; - try - { - return _isSequential - ? fieldDescription.Handler.ReadPsvAsObject(Buffer, ColumnLen, false, fieldDescription).GetAwaiter().GetResult() - : fieldDescription.Handler.ReadPsvAsObject(Buffer, ColumnLen, fieldDescription); - } - catch - { - if (Connector.State != ConnectorState.Broken) - { - var writtenBytes = Buffer.ReadPosition - position; - var remainingBytes = ColumnLen - writtenBytes; - if (remainingBytes > 0) - Buffer.Skip(remainingBytes, false).GetAwaiter().GetResult(); - } - throw; - } - finally - { - // Important: position must still be updated - PosInColumn += ColumnLen; - } - } - /// /// Gets the value of the specified column as an instance of . /// @@ -1988,39 +1943,6 @@ public override Type GetFieldType(int ordinal) => Command.ObjectResultTypes?[ordinal] ?? GetField(ordinal).FieldType; - /// - /// Returns the provider-specific field type of the specified column. - /// - /// The zero-based column ordinal. - /// The Type object that describes the data type of the specified column. - public override Type GetProviderSpecificFieldType(int ordinal) - { - var fieldDescription = GetField(ordinal); - return fieldDescription.Handler.GetProviderSpecificFieldType(fieldDescription); - } - - /// - /// Gets all provider-specific attribute columns in the collection for the current row. - /// - /// An array of Object into which to copy the attribute columns. - /// The number of instances of in the array. - public override int GetProviderSpecificValues(object[] values) - { - if (values == null) - throw new ArgumentNullException(nameof(values)); - if (State != ReaderState.InResult) - { - throw State == ReaderState.Disposed - ? new ObjectDisposedException(nameof(NpgsqlDataReader)) - : new InvalidOperationException("No row is available"); - } - - var count = Math.Min(FieldCount, values.Length); - for (var i = 0; i < count; i++) - values[i] = GetProviderSpecificValue(i); - return count; - } - /// /// Returns an that can be used to iterate through the rows in the data reader. /// diff --git a/src/Npgsql/NpgsqlNestedDataReader.cs b/src/Npgsql/NpgsqlNestedDataReader.cs index 744b34dd1d..bd10db458c 100644 --- a/src/Npgsql/NpgsqlNestedDataReader.cs +++ b/src/Npgsql/NpgsqlNestedDataReader.cs @@ -348,35 +348,6 @@ public override T GetFieldValue(int ordinal) : field.Handler.Read(Buffer, field.Length, fieldDescription: null); } - /// - public override Type GetProviderSpecificFieldType(int ordinal) - { - var column = CheckRowAndColumn(ordinal); - return column.TypeHandler.GetProviderSpecificFieldType(); - } - - /// - public override object GetProviderSpecificValue(int ordinal) - { - var column = CheckRowAndColumnAndSeek(ordinal); - if (column.Length == -1) - return DBNull.Value; - return column.Handler.ReadPsvAsObject(Buffer, column.Length); - } - - /// - public override int GetProviderSpecificValues(object[] values) - { - if (values == null) - throw new ArgumentNullException(nameof(values)); - CheckOnRow(); - - var count = Math.Min(FieldCount, values.Length); - for (var i = 0; i < count; i++) - values[i] = GetProviderSpecificValue(i); - return count; - } - /// public override bool Read() { diff --git a/src/Npgsql/PublicAPI.Unshipped.txt b/src/Npgsql/PublicAPI.Unshipped.txt index d8d07a072f..11b765fce2 100644 --- a/src/Npgsql/PublicAPI.Unshipped.txt +++ b/src/Npgsql/PublicAPI.Unshipped.txt @@ -42,3 +42,9 @@ static Npgsql.NpgsqlRangeExtensions.UseRange(this Npgsql.TypeMapping.INpgsqlType *REMOVED*static NpgsqlTypes.NpgsqlPolygon.Parse(string! s) -> NpgsqlTypes.NpgsqlPolygon *REMOVED*Npgsql.NpgsqlBinaryImporter.WriteRow(params object![]! values) -> void *REMOVED*Npgsql.NpgsqlBinaryImporter.WriteRowAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken), params object![]! values) -> System.Threading.Tasks.Task! +*REMOVED*override Npgsql.NpgsqlDataReader.GetProviderSpecificFieldType(int ordinal) -> System.Type! +*REMOVED*override Npgsql.NpgsqlDataReader.GetProviderSpecificValue(int ordinal) -> object! +*REMOVED*override Npgsql.NpgsqlDataReader.GetProviderSpecificValues(object![]! values) -> int +*REMOVED*override Npgsql.NpgsqlNestedDataReader.GetProviderSpecificFieldType(int ordinal) -> System.Type! +*REMOVED*override Npgsql.NpgsqlNestedDataReader.GetProviderSpecificValue(int ordinal) -> object! +*REMOVED*override Npgsql.NpgsqlNestedDataReader.GetProviderSpecificValues(object![]! values) -> int From dda36e6767ee8fac66340c2c899aa33103ab30c5 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Tue, 7 Mar 2023 23:43:02 +0100 Subject: [PATCH 097/761] Make encryption opt-in on NpgsqlSlimDataSourceBuilder (#4976) Closes #4966 --- src/Npgsql/Internal/NpgsqlConnector.cs | 226 ++++++++++-------- src/Npgsql/Internal/TypeMapping/TypeMapper.cs | 2 +- src/Npgsql/NpgsqlDataSource.cs | 2 + src/Npgsql/NpgsqlDataSourceBuilder.cs | 22 ++ src/Npgsql/NpgsqlDataSourceConfiguration.cs | 3 + src/Npgsql/NpgsqlSlimDataSourceBuilder.cs | 55 ++++- .../Properties/NpgsqlStrings.Designer.cs | 152 ++++-------- src/Npgsql/Properties/NpgsqlStrings.resx | 3 + src/Npgsql/PublicAPI.Unshipped.txt | 8 +- .../TypeMapping/NpgsqlJsonExtensions.cs | 34 --- .../TypeMapping/NpgsqlRangeExtensions.cs | 20 -- .../LegacyNodaTimeTests.cs | 1 - .../NodaTimeSetupFixture.cs | 2 +- 13 files changed, 261 insertions(+), 269 deletions(-) delete mode 100644 src/Npgsql/TypeMapping/NpgsqlJsonExtensions.cs delete mode 100644 src/Npgsql/TypeMapping/NpgsqlRangeExtensions.cs diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index 823bce9f89..869b88f08c 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -283,7 +283,7 @@ internal bool PostgresCancellationPerformed internal bool AttemptPostgresCancellation { get; private set; } static readonly TimeSpan _cancelImmediatelyTimeout = TimeSpan.FromMilliseconds(-1); - X509Certificate2? _certificate; + IDisposable? _certificate; internal NpgsqlLoggingConfiguration LoggingConfiguration { get; } @@ -786,8 +786,12 @@ async Task RawOpen(SslMode sslMode, NpgsqlTimeout timeout, bool async, Cancellat IsSecure = false; - if (sslMode is SslMode.Prefer or SslMode.Require or SslMode.VerifyCA or SslMode.VerifyFull) + if ((sslMode is SslMode.Prefer && DataSource.EncryptionNegotiator is not null) || + sslMode is SslMode.Require or SslMode.VerifyCA or SslMode.VerifyFull) { + if (DataSource.EncryptionNegotiator is null) + throw new InvalidOperationException(NpgsqlStrings.EncryptionDisabled); + WriteSslRequest(); await Flush(async, cancellationToken); @@ -804,136 +808,152 @@ async Task RawOpen(SslMode sslMode, NpgsqlTimeout timeout, bool async, Cancellat throw new NpgsqlException("SSL connection requested. No SSL enabled connection from this host is configured."); break; case 'S': - var clientCertificates = new X509Certificate2Collection(); - var certPath = Settings.SslCertificate ?? PostgresEnvironment.SslCert ?? PostgresEnvironment.SslCertDefault; + await DataSource.EncryptionNegotiator(this, sslMode, timeout, async, isFirstAttempt); + break; + } - if (certPath != null) - { - var password = Settings.SslPassword; + if (ReadBuffer.ReadBytesLeft > 0) + throw new NpgsqlException("Additional unencrypted data received after SSL negotiation - this should never happen, and may be an indication of a man-in-the-middle attack."); + } - if (Path.GetExtension(certPath).ToUpperInvariant() != ".PFX") - { + ConnectionLogger.LogTrace("Socket connected to {Host}:{Port}", Host, Port); + } + catch + { + _stream?.Dispose(); + _stream = null!; + + _baseStream?.Dispose(); + _baseStream = null!; + + _socket?.Dispose(); + _socket = null!; + + throw; + } + } + + internal async Task NegotiateEncryption(SslMode sslMode, NpgsqlTimeout timeout, bool async, bool isFirstAttempt) + { + var clientCertificates = new X509Certificate2Collection(); + var certPath = Settings.SslCertificate ?? PostgresEnvironment.SslCert ?? PostgresEnvironment.SslCertDefault; + + if (certPath != null) + { + var password = Settings.SslPassword; + + X509Certificate2? cert = null; + if (Path.GetExtension(certPath).ToUpperInvariant() != ".PFX") + { #if NET5_0_OR_GREATER - // It's PEM time - var keyPath = Settings.SslKey ?? PostgresEnvironment.SslKey ?? PostgresEnvironment.SslKeyDefault; - _certificate = string.IsNullOrEmpty(password) - ? X509Certificate2.CreateFromPemFile(certPath, keyPath) - : X509Certificate2.CreateFromEncryptedPemFile(certPath, password, keyPath); - if (RuntimeInformation.IsOSPlatform(OSPlatform.Windows)) - { - // Windows crypto API has a bug with pem certs - // See #3650 - using var previousCert = _certificate; - _certificate = new X509Certificate2(_certificate.Export(X509ContentType.Pkcs12)); - } + // It's PEM time + var keyPath = Settings.SslKey ?? PostgresEnvironment.SslKey ?? PostgresEnvironment.SslKeyDefault; + cert = string.IsNullOrEmpty(password) + ? X509Certificate2.CreateFromPemFile(certPath, keyPath) + : X509Certificate2.CreateFromEncryptedPemFile(certPath, password, keyPath); + if (RuntimeInformation.IsOSPlatform(OSPlatform.Windows)) + { + // Windows crypto API has a bug with pem certs + // See #3650 + using var previousCert = cert; + cert = new X509Certificate2(cert.Export(X509ContentType.Pkcs12)); + } + #else - // Technically PEM certificates are supported as of .NET 5 but we don't build for the net5.0 - // TFM anymore since .NET 5 is out of support - // This is a breaking change for .NET 5 as of Npgsql 8! - throw new NotSupportedException("PEM certificates are only supported with .NET 6 and higher"); + // Technically PEM certificates are supported as of .NET 5 but we don't build for the net5.0 + // TFM anymore since .NET 5 is out of support + // This is a breaking change for .NET 5 as of Npgsql 8! + throw new NotSupportedException("PEM certificates are only supported with .NET 6 and higher"); #endif - } + } - _certificate ??= new X509Certificate2(certPath, password); - clientCertificates.Add(_certificate); - } + cert ??= new X509Certificate2(certPath, password); + clientCertificates.Add(cert); - ClientCertificatesCallback?.Invoke(clientCertificates); + _certificate = cert; + } - var checkCertificateRevocation = Settings.CheckCertificateRevocation; + try + { + ClientCertificatesCallback?.Invoke(clientCertificates); - RemoteCertificateValidationCallback? certificateValidationCallback; - X509Certificate2? caCert; - string? certRootPath = null; + var checkCertificateRevocation = Settings.CheckCertificateRevocation; - if (UserCertificateValidationCallback is not null) - { - if (sslMode is SslMode.VerifyCA or SslMode.VerifyFull) - throw new ArgumentException(string.Format(NpgsqlStrings.CannotUseSslVerifyWithUserCallback, sslMode)); + RemoteCertificateValidationCallback? certificateValidationCallback; + X509Certificate2? caCert; + string? certRootPath = null; - if (Settings.RootCertificate is not null) - throw new ArgumentException(NpgsqlStrings.CannotUseSslRootCertificateWithUserCallback); + if (UserCertificateValidationCallback is not null) + { + if (sslMode is SslMode.VerifyCA or SslMode.VerifyFull) + throw new ArgumentException(string.Format(NpgsqlStrings.CannotUseSslVerifyWithUserCallback, sslMode)); - if (DataSource.RootCertificateCallback is not null) - throw new ArgumentException(NpgsqlStrings.CannotUseValidationRootCertificateCallbackWithUserCallback); + if (Settings.RootCertificate is not null) + throw new ArgumentException(NpgsqlStrings.CannotUseSslRootCertificateWithUserCallback); - certificateValidationCallback = UserCertificateValidationCallback; - } - else if (sslMode is SslMode.Prefer or SslMode.Require) - { - if (isFirstAttempt && sslMode is SslMode.Require && !Settings.TrustServerCertificate) - throw new ArgumentException(NpgsqlStrings.CannotUseSslModeRequireWithoutTrustServerCertificate); + if (DataSource.RootCertificateCallback is not null) + throw new ArgumentException(NpgsqlStrings.CannotUseValidationRootCertificateCallbackWithUserCallback); - certificateValidationCallback = SslTrustServerValidation; - checkCertificateRevocation = false; - } - else if ((caCert = DataSource.RootCertificateCallback?.Invoke()) is not null || - (certRootPath = Settings.RootCertificate ?? - PostgresEnvironment.SslCertRoot ?? PostgresEnvironment.SslCertRootDefault) is not null) - { - certificateValidationCallback = SslRootValidation(sslMode == SslMode.VerifyFull, certRootPath, caCert); - } - else if (sslMode == SslMode.VerifyCA) - { - certificateValidationCallback = SslVerifyCAValidation; - } - else - { - Debug.Assert(sslMode == SslMode.VerifyFull); - certificateValidationCallback = SslVerifyFullValidation; - } + certificateValidationCallback = UserCertificateValidationCallback; + } + else if (sslMode is SslMode.Prefer or SslMode.Require) + { + if (isFirstAttempt && sslMode is SslMode.Require && !Settings.TrustServerCertificate) + throw new ArgumentException(NpgsqlStrings.CannotUseSslModeRequireWithoutTrustServerCertificate); + + certificateValidationCallback = SslTrustServerValidation; + checkCertificateRevocation = false; + } + else if ((caCert = DataSource.RootCertificateCallback?.Invoke()) is not null || + (certRootPath = Settings.RootCertificate ?? + PostgresEnvironment.SslCertRoot ?? PostgresEnvironment.SslCertRootDefault) is not null) + { + certificateValidationCallback = SslRootValidation(sslMode == SslMode.VerifyFull, certRootPath, caCert); + } + else if (sslMode == SslMode.VerifyCA) + { + certificateValidationCallback = SslVerifyCAValidation; + } + else + { + Debug.Assert(sslMode == SslMode.VerifyFull); + certificateValidationCallback = SslVerifyFullValidation; + } - timeout.CheckAndApply(this); + timeout.CheckAndApply(this); - try - { - var sslStream = new SslStream(_stream, leaveInnerStreamOpen: false, certificateValidationCallback); + try + { + var sslStream = new SslStream(_stream, leaveInnerStreamOpen: false, certificateValidationCallback); - var sslProtocols = SslProtocols.None; + var sslProtocols = SslProtocols.None; #if NETSTANDARD2_0 - // On .NET Framework SslProtocols.None can be disabled, see #3718 - sslProtocols = SslProtocols.Tls | SslProtocols.Tls11 | SslProtocols.Tls12; + // On .NET Framework SslProtocols.None can be disabled, see #3718 + sslProtocols = SslProtocols.Tls | SslProtocols.Tls11 | SslProtocols.Tls12; #endif - if (async) - await sslStream.AuthenticateAsClientAsync(Host, clientCertificates, sslProtocols, checkCertificateRevocation); - else - sslStream.AuthenticateAsClient(Host, clientCertificates, sslProtocols, checkCertificateRevocation); - - _stream = sslStream; - } - catch (Exception e) - { - throw new NpgsqlException("Exception while performing SSL handshake", e); - } - - ReadBuffer.Underlying = _stream; - WriteBuffer.Underlying = _stream; - IsSecure = true; - ConnectionLogger.LogTrace("SSL negotiation successful"); - break; - } + if (async) + await sslStream.AuthenticateAsClientAsync(Host, clientCertificates, sslProtocols, checkCertificateRevocation); + else + sslStream.AuthenticateAsClient(Host, clientCertificates, sslProtocols, checkCertificateRevocation); - if (ReadBuffer.ReadBytesLeft > 0) - throw new NpgsqlException("Additional unencrypted data received after SSL negotiation - this should never happen, and may be an indication of a man-in-the-middle attack."); + _stream = sslStream; + } + catch (Exception e) + { + throw new NpgsqlException("Exception while performing SSL handshake", e); } - ConnectionLogger.LogTrace("Socket connected to {Host}:{Port}", Host, Port); + ReadBuffer.Underlying = _stream; + WriteBuffer.Underlying = _stream; + IsSecure = true; + ConnectionLogger.LogTrace("SSL negotiation successful"); } catch { _certificate?.Dispose(); _certificate = null; - _stream?.Dispose(); - _stream = null!; - - _baseStream?.Dispose(); - _baseStream = null!; - - _socket?.Dispose(); - _socket = null!; - throw; } } diff --git a/src/Npgsql/Internal/TypeMapping/TypeMapper.cs b/src/Npgsql/Internal/TypeMapping/TypeMapper.cs index 0710268eed..04bee664f1 100644 --- a/src/Npgsql/Internal/TypeMapping/TypeMapper.cs +++ b/src/Npgsql/Internal/TypeMapping/TypeMapper.cs @@ -305,7 +305,7 @@ internal NpgsqlTypeHandler ResolveByDataTypeName(string typeName) case PostgresMultirangeType: return throwOnError ? throw new NotSupportedException( - $"'{pgType}' is a range type; please call {nameof(NpgsqlRangeExtensions.UseRange)} on {nameof(NpgsqlDataSourceBuilder)} or on {nameof(NpgsqlConnection)}.{nameof(NpgsqlConnection.GlobalTypeMapper)} to enable ranges. " + + $"'{pgType}' is a range type; please call {nameof(NpgsqlSlimDataSourceBuilder.EnableRanges)} on {nameof(NpgsqlSlimDataSourceBuilder)} to enable ranges. " + "See https://www.npgsql.org/doc/types/ranges.html for more information.") : null; #pragma warning restore CS0618 diff --git a/src/Npgsql/NpgsqlDataSource.cs b/src/Npgsql/NpgsqlDataSource.cs index 94dd64732b..303b1b0534 100644 --- a/src/Npgsql/NpgsqlDataSource.cs +++ b/src/Npgsql/NpgsqlDataSource.cs @@ -43,6 +43,7 @@ public abstract class NpgsqlDataSource : DbDataSource /// internal NpgsqlDatabaseInfo DatabaseInfo { get; set; } = null!; // Initialized at bootstrapping + internal Func? EncryptionNegotiator { get; } internal RemoteCertificateValidationCallback? UserCertificateValidationCallback { get; } internal Action? ClientCertificatesCallback { get; } @@ -89,6 +90,7 @@ internal NpgsqlDataSource( Configuration = dataSourceConfig; (LoggingConfiguration, + EncryptionNegotiator, UserCertificateValidationCallback, ClientCertificatesCallback, _periodicPasswordProvider, diff --git a/src/Npgsql/NpgsqlDataSourceBuilder.cs b/src/Npgsql/NpgsqlDataSourceBuilder.cs index 75870c8226..525f00df4b 100644 --- a/src/Npgsql/NpgsqlDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlDataSourceBuilder.cs @@ -2,11 +2,13 @@ using System.Diagnostics.CodeAnalysis; using System.Net.Security; using System.Security.Cryptography.X509Certificates; +using System.Text.Json; using System.Threading; using System.Threading.Tasks; using Microsoft.Extensions.Logging; using Npgsql.Internal.TypeHandling; using Npgsql.TypeMapping; +using NpgsqlTypes; namespace Npgsql; @@ -200,6 +202,25 @@ public NpgsqlDataSourceBuilder UsePeriodicPasswordProvider( public void AddTypeResolverFactory(TypeHandlerResolverFactory resolverFactory) => _internalBuilder.AddTypeResolverFactory(resolverFactory); + /// + /// Sets up System.Text.Json mappings for the PostgreSQL json and jsonb types. + /// + /// Options to customize JSON serialization and deserialization. + /// + /// A list of CLR types to map to PostgreSQL jsonb (no need to specify ). + /// + /// + /// A list of CLR types to map to PostgreSQL json (no need to specify ). + /// + public NpgsqlDataSourceBuilder UseSystemTextJson( + JsonSerializerOptions? serializerOptions = null, + Type[]? jsonbClrTypes = null, + Type[]? jsonClrTypes = null) + { + AddTypeResolverFactory(new JsonTypeHandlerResolverFactory(jsonbClrTypes, jsonClrTypes, serializerOptions)); + return this; + } + /// public INpgsqlTypeMapper MapEnum(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) where TEnum : struct, Enum @@ -291,6 +312,7 @@ public NpgsqlMultiHostDataSource BuildMultiHost() void AddDefaultFeatures() { + _internalBuilder.EnableEncryption(); _internalBuilder.AddDefaultTypeResolverFactory(new JsonTypeHandlerResolverFactory()); _internalBuilder.AddDefaultTypeResolverFactory(new RangeTypeHandlerResolverFactory()); } diff --git a/src/Npgsql/NpgsqlDataSourceConfiguration.cs b/src/Npgsql/NpgsqlDataSourceConfiguration.cs index 33dd71617f..fe35dd33e0 100644 --- a/src/Npgsql/NpgsqlDataSourceConfiguration.cs +++ b/src/Npgsql/NpgsqlDataSourceConfiguration.cs @@ -4,13 +4,16 @@ using System.Security.Cryptography.X509Certificates; using System.Threading; using System.Threading.Tasks; +using Npgsql.Internal; using Npgsql.Internal.TypeHandling; using Npgsql.Internal.TypeMapping; +using Npgsql.Util; namespace Npgsql; sealed record NpgsqlDataSourceConfiguration( NpgsqlLoggingConfiguration LoggingConfiguration, + Func? EncryptionNegotiator, RemoteCertificateValidationCallback? UserCertificateValidationCallback, Action? ClientCertificatesCallback, Func>? PeriodicPasswordProvider, diff --git a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs index 7b5359f73f..7c3fae98bc 100644 --- a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs @@ -4,13 +4,16 @@ using System.Net.Security; using System.Reflection; using System.Security.Cryptography.X509Certificates; +using System.Text.Json; using System.Threading; using System.Threading.Tasks; using Microsoft.Extensions.Logging; +using Npgsql.Internal; using Npgsql.Internal.TypeHandling; using Npgsql.Internal.TypeMapping; using Npgsql.Properties; using Npgsql.TypeMapping; +using Npgsql.Util; using NpgsqlTypes; namespace Npgsql; @@ -27,9 +30,9 @@ public sealed class NpgsqlSlimDataSourceBuilder : INpgsqlTypeMapper ILoggerFactory? _loggerFactory; bool _sensitiveDataLoggingEnabled; + Func? _encryptionNegotiator; RemoteCertificateValidationCallback? _userCertificateValidationCallback; Action? _clientCertificatesCallback; - Func? _rootCertificateCallback; Func>? _periodicPasswordProvider; @@ -375,6 +378,50 @@ static string GetPgName(Type clrType, INpgsqlNameTranslator nameTranslator) #endregion Type mapping + #region Optional opt-ins + + /// + /// Sets up mappings for the PostgreSQL range and multirange types. + /// + public NpgsqlSlimDataSourceBuilder EnableRanges() + { + AddTypeResolverFactory(new RangeTypeHandlerResolverFactory()); + return this; + } + + /// + /// Sets up System.Text.Json mappings for the PostgreSQL json and jsonb types. + /// + /// Options to customize JSON serialization and deserialization. + /// + /// A list of CLR types to map to PostgreSQL jsonb (no need to specify ). + /// + /// + /// A list of CLR types to map to PostgreSQL json (no need to specify ). + /// + public NpgsqlSlimDataSourceBuilder UseSystemTextJson( + JsonSerializerOptions? serializerOptions = null, + Type[]? jsonbClrTypes = null, + Type[]? jsonClrTypes = null) + { + AddTypeResolverFactory(new JsonTypeHandlerResolverFactory(jsonbClrTypes, jsonClrTypes, serializerOptions)); + return this; + } + + /// + /// Enables the possibility to use TLS/SSl encryption for connections to PostgreSQL. This does not guarantee that encryption will + /// actually be used; see for more details. + /// + public NpgsqlSlimDataSourceBuilder EnableEncryption() + { + _encryptionNegotiator = static (connector, sslMode, timeout, async, isFirstAttempt) + => connector.NegotiateEncryption(sslMode, timeout, async, isFirstAttempt); + + return this; + } + + #endregion Optional opt-ins + /// /// Register a connection initializer, which allows executing arbitrary commands when a physical database connection is first opened. /// @@ -446,6 +493,11 @@ NpgsqlDataSourceConfiguration PrepareConfiguration() { ConnectionStringBuilder.PostProcessAndValidate(); + if (_encryptionNegotiator is null && (_userCertificateValidationCallback is not null || _clientCertificatesCallback is not null)) + { + throw new InvalidOperationException(NpgsqlStrings.EncryptionDisabled); + } + if (_periodicPasswordProvider is not null && (ConnectionStringBuilder.Password is not null || ConnectionStringBuilder.Passfile is not null)) { @@ -456,6 +508,7 @@ NpgsqlDataSourceConfiguration PrepareConfiguration() _loggerFactory is null ? NpgsqlLoggingConfiguration.NullConfiguration : new NpgsqlLoggingConfiguration(_loggerFactory, _sensitiveDataLoggingEnabled), + _encryptionNegotiator, _userCertificateValidationCallback, _clientCertificatesCallback, _periodicPasswordProvider, diff --git a/src/Npgsql/Properties/NpgsqlStrings.Designer.cs b/src/Npgsql/Properties/NpgsqlStrings.Designer.cs index 99cf71ecfc..a2335feba4 100644 --- a/src/Npgsql/Properties/NpgsqlStrings.Designer.cs +++ b/src/Npgsql/Properties/NpgsqlStrings.Designer.cs @@ -11,46 +11,32 @@ namespace Npgsql.Properties { using System; - /// - /// A strongly-typed resource class, for looking up localized strings, etc. - /// - // This class was auto-generated by the StronglyTypedResourceBuilder - // class via a tool like ResGen or Visual Studio. - // To add or remove a member, edit your .ResX file then rerun ResGen - // with the /str option, or rebuild your VS project. - [global::System.CodeDom.Compiler.GeneratedCodeAttribute("System.Resources.Tools.StronglyTypedResourceBuilder", "4.0.0.0")] - [global::System.Diagnostics.DebuggerNonUserCodeAttribute()] - [global::System.Runtime.CompilerServices.CompilerGeneratedAttribute()] + [System.CodeDom.Compiler.GeneratedCodeAttribute("System.Resources.Tools.StronglyTypedResourceBuilder", "4.0.0.0")] + [System.Diagnostics.DebuggerNonUserCodeAttribute()] + [System.Runtime.CompilerServices.CompilerGeneratedAttribute()] internal class NpgsqlStrings { - private static global::System.Resources.ResourceManager resourceMan; + private static System.Resources.ResourceManager resourceMan; - private static global::System.Globalization.CultureInfo resourceCulture; + private static System.Globalization.CultureInfo resourceCulture; - [global::System.Diagnostics.CodeAnalysis.SuppressMessageAttribute("Microsoft.Performance", "CA1811:AvoidUncalledPrivateCode")] + [System.Diagnostics.CodeAnalysis.SuppressMessageAttribute("Microsoft.Performance", "CA1811:AvoidUncalledPrivateCode")] internal NpgsqlStrings() { } - /// - /// Returns the cached ResourceManager instance used by this class. - /// - [global::System.ComponentModel.EditorBrowsableAttribute(global::System.ComponentModel.EditorBrowsableState.Advanced)] - internal static global::System.Resources.ResourceManager ResourceManager { + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Advanced)] + internal static System.Resources.ResourceManager ResourceManager { get { - if (object.ReferenceEquals(resourceMan, null)) { - global::System.Resources.ResourceManager temp = new global::System.Resources.ResourceManager("Npgsql.Properties.NpgsqlStrings", typeof(NpgsqlStrings).Assembly); + if (object.Equals(null, resourceMan)) { + System.Resources.ResourceManager temp = new System.Resources.ResourceManager("Npgsql.Properties.NpgsqlStrings", typeof(NpgsqlStrings).Assembly); resourceMan = temp; } return resourceMan; } } - /// - /// Overrides the current thread's CurrentUICulture property for all - /// resource lookups using this strongly typed resource class. - /// - [global::System.ComponentModel.EditorBrowsableAttribute(global::System.ComponentModel.EditorBrowsableState.Advanced)] - internal static global::System.Globalization.CultureInfo Culture { + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Advanced)] + internal static System.Globalization.CultureInfo Culture { get { return resourceCulture; } @@ -59,148 +45,106 @@ internal NpgsqlStrings() { } } - /// - /// Looks up a localized string similar to '{0}' must be positive.. - /// - internal static string ArgumentMustBePositive { + internal static string CannotUseSslVerifyWithUserCallback { get { - return ResourceManager.GetString("ArgumentMustBePositive", resourceCulture); + return ResourceManager.GetString("CannotUseSslVerifyWithUserCallback", resourceCulture); } } - /// - /// Looks up a localized string similar to Cannot read infinity value since Npgsql.DisableDateTimeInfinityConversions is enabled.. - /// - internal static string CannotReadInfinityValue { + internal static string CannotUseSslRootCertificateWithUserCallback { get { - return ResourceManager.GetString("CannotReadInfinityValue", resourceCulture); + return ResourceManager.GetString("CannotUseSslRootCertificateWithUserCallback", resourceCulture); } } - /// - /// Looks up a localized string similar to Cannot read interval values with non-zero months as TimeSpan, since that type doesn't support months. Consider using NodaTime Period which better corresponds to PostgreSQL interval, or read the value as NpgsqlInterval, or transform the interval to not contain months or years in PostgreSQL before reading it.. - /// - internal static string CannotReadIntervalWithMonthsAsTimeSpan { + internal static string CannotUseSslModeRequireWithoutTrustServerCertificate { get { - return ResourceManager.GetString("CannotReadIntervalWithMonthsAsTimeSpan", resourceCulture); + return ResourceManager.GetString("CannotUseSslModeRequireWithoutTrustServerCertificate", resourceCulture); } } - /// - /// Looks up a localized string similar to When registering a password provider, a password or password file may not be set.. - /// - internal static string CannotSetBothPasswordProviderAndPassword { + internal static string CannotUseTrustServerCertificate { get { - return ResourceManager.GetString("CannotSetBothPasswordProviderAndPassword", resourceCulture); + return ResourceManager.GetString("CannotUseTrustServerCertificate", resourceCulture); } } - /// - /// Looks up a localized string similar to When creating a multi-host data source, TargetSessionAttributes cannot be specified. Create without TargetSessionAttributes, and then obtain DataSource wrappers from it. Consult the docs for more information.. - /// - internal static string CannotSpecifyTargetSessionAttributes { + internal static string EncryptionDisabled { get { - return ResourceManager.GetString("CannotSpecifyTargetSessionAttributes", resourceCulture); + return ResourceManager.GetString("EncryptionDisabled", resourceCulture); } } - /// - /// Looks up a localized string similar to To validate server certificates, please use VerifyFull or VerifyCA instead of Require. To disable validation, explicitly set 'Trust Server Certificate' to true. See https://www.npgsql.org/doc/release-notes/6.0.html for more details.. - /// - internal static string CannotUseSslModeRequireWithoutTrustServerCertificate { + internal static string NoMultirangeTypeFound { get { - return ResourceManager.GetString("CannotUseSslModeRequireWithoutTrustServerCertificate", resourceCulture); + return ResourceManager.GetString("NoMultirangeTypeFound", resourceCulture); } } - /// - /// Looks up a localized string similar to RootCertificate cannot be used in conjunction with UserCertificateValidationCallback; when registering a validation callback, perform whatever validation you require in that callback.. - /// - internal static string CannotUseSslRootCertificateWithUserCallback { + internal static string NotSupportedOnDataSourceCommand { get { - return ResourceManager.GetString("CannotUseSslRootCertificateWithUserCallback", resourceCulture); + return ResourceManager.GetString("NotSupportedOnDataSourceCommand", resourceCulture); } } - /// - /// Looks up a localized string similar to SslMode.{0} cannot be used in conjunction with UserCertificateValidationCallback; when registering a validation callback, perform whatever validation you require in that callback.. - /// - internal static string CannotUseSslVerifyWithUserCallback { + internal static string NotSupportedOnDataSourceBatch { get { - return ResourceManager.GetString("CannotUseSslVerifyWithUserCallback", resourceCulture); + return ResourceManager.GetString("NotSupportedOnDataSourceBatch", resourceCulture); } } - /// - /// Looks up a localized string similar to TrustServerCertificate=true is not supported with SslMode={0}. - /// - internal static string CannotUseTrustServerCertificate { + internal static string CannotSetBothPasswordProviderAndPassword { get { - return ResourceManager.GetString("CannotUseTrustServerCertificate", resourceCulture); + return ResourceManager.GetString("CannotSetBothPasswordProviderAndPassword", resourceCulture); } } - /// - /// Looks up a localized string similar to ValidationRootCertificateCallback cannot be used in conjunction with UserCertificateValidationCallback; when registering a validation callback, perform whatever validation you require in that callback.. - /// - internal static string CannotUseValidationRootCertificateCallbackWithUserCallback { + internal static string PasswordProviderMissing { get { - return ResourceManager.GetString("CannotUseValidationRootCertificateCallbackWithUserCallback", resourceCulture); + return ResourceManager.GetString("PasswordProviderMissing", resourceCulture); } } - /// - /// Looks up a localized string similar to No multirange type could be found in the database for subtype {0}.. - /// - internal static string NoMultirangeTypeFound { + internal static string ArgumentMustBePositive { get { - return ResourceManager.GetString("NoMultirangeTypeFound", resourceCulture); + return ResourceManager.GetString("ArgumentMustBePositive", resourceCulture); } } - /// - /// Looks up a localized string similar to Connection and transaction access is not supported on batches created from DbDataSource.. - /// - internal static string NotSupportedOnDataSourceBatch { + internal static string CannotSpecifyTargetSessionAttributes { get { - return ResourceManager.GetString("NotSupportedOnDataSourceBatch", resourceCulture); + return ResourceManager.GetString("CannotSpecifyTargetSessionAttributes", resourceCulture); } } - /// - /// Looks up a localized string similar to Connection and transaction access is not supported on commands created from DbDataSource.. - /// - internal static string NotSupportedOnDataSourceCommand { + internal static string CannotReadIntervalWithMonthsAsTimeSpan { get { - return ResourceManager.GetString("NotSupportedOnDataSourceCommand", resourceCulture); + return ResourceManager.GetString("CannotReadIntervalWithMonthsAsTimeSpan", resourceCulture); } } - /// - /// Looks up a localized string similar to The right type of password provider (sync or async) was not found.. - /// - internal static string PasswordProviderMissing { + internal static string PositionalParameterAfterNamed { get { - return ResourceManager.GetString("PasswordProviderMissing", resourceCulture); + return ResourceManager.GetString("PositionalParameterAfterNamed", resourceCulture); } } - /// - /// Looks up a localized string similar to When using CommandType.StoredProcedure, all positional parameters must come before named parameters.. - /// - internal static string PositionalParameterAfterNamed { + internal static string CannotReadInfinityValue { get { - return ResourceManager.GetString("PositionalParameterAfterNamed", resourceCulture); + return ResourceManager.GetString("CannotReadInfinityValue", resourceCulture); } } - /// - /// Looks up a localized string similar to Both sync and async connection initializers must be provided.. - /// internal static string SyncAndAsyncConnectionInitializersRequired { get { return ResourceManager.GetString("SyncAndAsyncConnectionInitializersRequired", resourceCulture); } } + + internal static string CannotUseValidationRootCertificateCallbackWithUserCallback { + get { + return ResourceManager.GetString("CannotUseValidationRootCertificateCallbackWithUserCallback", resourceCulture); + } + } } } diff --git a/src/Npgsql/Properties/NpgsqlStrings.resx b/src/Npgsql/Properties/NpgsqlStrings.resx index b52726e42d..25fb8f1501 100644 --- a/src/Npgsql/Properties/NpgsqlStrings.resx +++ b/src/Npgsql/Properties/NpgsqlStrings.resx @@ -30,6 +30,9 @@ TrustServerCertificate=true is not supported with SslMode={0} + + NpgsqlSlimDataSourceBuilder is being used, and encryption hasn't been enabled, call EnableEncryption() on NpgsqlSlimDataSourceBuilder to enable it. + No multirange type could be found in the database for subtype {0}. diff --git a/src/Npgsql/PublicAPI.Unshipped.txt b/src/Npgsql/PublicAPI.Unshipped.txt index 11b765fce2..f33e426b0a 100644 --- a/src/Npgsql/PublicAPI.Unshipped.txt +++ b/src/Npgsql/PublicAPI.Unshipped.txt @@ -4,8 +4,7 @@ Npgsql.NpgsqlBinaryImporter.WriteRow(params object?[]! values) -> void Npgsql.NpgsqlBinaryImporter.WriteRowAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken), params object?[]! values) -> System.Threading.Tasks.Task! Npgsql.NpgsqlDataSourceBuilder.UseRootCertificate(System.Security.Cryptography.X509Certificates.X509Certificate2? rootCertificate) -> Npgsql.NpgsqlDataSourceBuilder! Npgsql.NpgsqlDataSourceBuilder.UseRootCertificateCallback(System.Func? rootCertificateCallback) -> Npgsql.NpgsqlDataSourceBuilder! -Npgsql.NpgsqlJsonExtensions -Npgsql.NpgsqlRangeExtensions +Npgsql.NpgsqlDataSourceBuilder.UseSystemTextJson(System.Text.Json.JsonSerializerOptions? serializerOptions = null, System.Type![]? jsonbClrTypes = null, System.Type![]? jsonClrTypes = null) -> Npgsql.NpgsqlDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder Npgsql.NpgsqlSlimDataSourceBuilder.AddTypeResolverFactory(Npgsql.Internal.TypeHandling.TypeHandlerResolverFactory! resolverFactory) -> void Npgsql.NpgsqlSlimDataSourceBuilder.Build() -> Npgsql.NpgsqlDataSource! @@ -14,7 +13,9 @@ Npgsql.NpgsqlSlimDataSourceBuilder.ConnectionString.get -> string! Npgsql.NpgsqlSlimDataSourceBuilder.ConnectionStringBuilder.get -> Npgsql.NpgsqlConnectionStringBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.DefaultNameTranslator.get -> Npgsql.INpgsqlNameTranslator! Npgsql.NpgsqlSlimDataSourceBuilder.DefaultNameTranslator.set -> void +Npgsql.NpgsqlSlimDataSourceBuilder.EnableEncryption() -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.EnableParameterLogging(bool parameterLoggingEnabled = true) -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.EnableRanges() -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.MapComposite(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! Npgsql.NpgsqlSlimDataSourceBuilder.MapComposite(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! Npgsql.NpgsqlSlimDataSourceBuilder.MapEnum(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! @@ -30,9 +31,8 @@ Npgsql.NpgsqlSlimDataSourceBuilder.UsePeriodicPasswordProvider(System.Func? connectionInitializer, System.Func? connectionInitializerAsync) -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.UseRootCertificate(System.Security.Cryptography.X509Certificates.X509Certificate2? rootCertificate) -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.UseRootCertificateCallback(System.Func? rootCertificateCallback) -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.UseSystemTextJson(System.Text.Json.JsonSerializerOptions? serializerOptions = null, System.Type![]? jsonbClrTypes = null, System.Type![]? jsonClrTypes = null) -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.UseUserCertificateValidationCallback(System.Net.Security.RemoteCertificateValidationCallback! userCertificateValidationCallback) -> Npgsql.NpgsqlSlimDataSourceBuilder! -static Npgsql.NpgsqlJsonExtensions.UseSystemTextJson(this Npgsql.TypeMapping.INpgsqlTypeMapper! mapper, System.Text.Json.JsonSerializerOptions? serializerOptions = null, System.Type![]? jsonbClrTypes = null, System.Type![]? jsonClrTypes = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! -static Npgsql.NpgsqlRangeExtensions.UseRange(this Npgsql.TypeMapping.INpgsqlTypeMapper! mapper) -> Npgsql.TypeMapping.INpgsqlTypeMapper! *REMOVED*static NpgsqlTypes.NpgsqlBox.Parse(string! s) -> NpgsqlTypes.NpgsqlBox *REMOVED*static NpgsqlTypes.NpgsqlCircle.Parse(string! s) -> NpgsqlTypes.NpgsqlCircle *REMOVED*static NpgsqlTypes.NpgsqlLine.Parse(string! s) -> NpgsqlTypes.NpgsqlLine diff --git a/src/Npgsql/TypeMapping/NpgsqlJsonExtensions.cs b/src/Npgsql/TypeMapping/NpgsqlJsonExtensions.cs deleted file mode 100644 index 2a84cc4c02..0000000000 --- a/src/Npgsql/TypeMapping/NpgsqlJsonExtensions.cs +++ /dev/null @@ -1,34 +0,0 @@ -using System; -using System.Text.Json; -using Npgsql.TypeMapping; -using NpgsqlTypes; - -// ReSharper disable once CheckNamespace -namespace Npgsql; - -/// -/// Extension allowing adding the System.Text.Json plugin to an Npgsql type mapper. -/// -public static class NpgsqlJsonExtensions -{ - /// - /// Sets up System.Text.Json mappings for the PostgreSQL json and jsonb types. - /// - /// The type mapper to set up. - /// Options to customize JSON serialization and deserialization. - /// - /// A list of CLR types to map to PostgreSQL jsonb (no need to specify ). - /// - /// - /// A list of CLR types to map to PostgreSQL json (no need to specify ). - /// - public static INpgsqlTypeMapper UseSystemTextJson( - this INpgsqlTypeMapper mapper, - JsonSerializerOptions? serializerOptions = null, - Type[]? jsonbClrTypes = null, - Type[]? jsonClrTypes = null) - { - mapper.AddTypeResolverFactory(new JsonTypeHandlerResolverFactory(jsonbClrTypes, jsonClrTypes, serializerOptions)); - return mapper; - } -} diff --git a/src/Npgsql/TypeMapping/NpgsqlRangeExtensions.cs b/src/Npgsql/TypeMapping/NpgsqlRangeExtensions.cs deleted file mode 100644 index 16c9533a61..0000000000 --- a/src/Npgsql/TypeMapping/NpgsqlRangeExtensions.cs +++ /dev/null @@ -1,20 +0,0 @@ -using Npgsql.TypeMapping; - -// ReSharper disable once CheckNamespace -namespace Npgsql; - -/// -/// Extension allowing adding range and multirange mappings to an Npgsql type mapper. -/// -public static class NpgsqlRangeExtensions -{ - /// - /// Sets up mappings for the PostgreSQL range and multirange types. - /// - /// The type mapper to set up. - public static INpgsqlTypeMapper UseRange(this INpgsqlTypeMapper mapper) - { - mapper.AddTypeResolverFactory(new RangeTypeHandlerResolverFactory()); - return mapper; - } -} diff --git a/test/Npgsql.NodaTime.Tests/LegacyNodaTimeTests.cs b/test/Npgsql.NodaTime.Tests/LegacyNodaTimeTests.cs index 0792671448..67c4202ff4 100644 --- a/test/Npgsql.NodaTime.Tests/LegacyNodaTimeTests.cs +++ b/test/Npgsql.NodaTime.Tests/LegacyNodaTimeTests.cs @@ -72,7 +72,6 @@ public async Task Setup() // Clear any previous cached mappings/handlers in case tests were executed before the legacy flag was set. NpgsqlConnection.GlobalTypeMapper.Reset(); - NpgsqlConnection.GlobalTypeMapper.UseRange(); NpgsqlConnection.GlobalTypeMapper.UseNodaTime(); await using var connection = await OpenConnectionAsync(); await connection.ReloadTypesAsync(); diff --git a/test/Npgsql.NodaTime.Tests/NodaTimeSetupFixture.cs b/test/Npgsql.NodaTime.Tests/NodaTimeSetupFixture.cs index fdd68ba676..25ab4f58cd 100644 --- a/test/Npgsql.NodaTime.Tests/NodaTimeSetupFixture.cs +++ b/test/Npgsql.NodaTime.Tests/NodaTimeSetupFixture.cs @@ -10,7 +10,7 @@ public class NodaTimeSetupFixture { #pragma warning disable CS0618 // GlobalTypeMapper is obsolete [OneTimeSetUp] - public void OneTimeSetUp() => NpgsqlConnection.GlobalTypeMapper.UseRange().UseNodaTime(); + public void OneTimeSetUp() => NpgsqlConnection.GlobalTypeMapper.UseNodaTime(); [OneTimeTearDown] public void OneTimeTearDown() => NpgsqlConnection.GlobalTypeMapper.Reset(); From dbfae1a7e6d6e7547902e47e0c1fe136f1cf7b5f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 9 Mar 2023 01:01:05 +0100 Subject: [PATCH 098/761] Bump Newtonsoft.Json from 13.0.2 to 13.0.3 (#4981) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 3674055471..2c9572810e 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -16,7 +16,7 @@ - + From 5461fa883c29fde74d9e00c9dac8556f0fe4a4e9 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Thu, 9 Mar 2023 22:30:06 +0300 Subject: [PATCH 099/761] =?UTF-8?q?Allow=20returning=20non-empty=20NpgsqlC?= =?UTF-8?q?onection.DataSource=20with=20closed=20conn=E2=80=A6=20(#4987)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Allow returning non-empty NpgsqlConection.DataSource with closed connection if single host Closes #4985 --- src/Npgsql/NpgsqlConnection.cs | 2 +- src/Npgsql/NpgsqlConnectionStringBuilder.cs | 6 +++--- test/Npgsql.Tests/ConnectionTests.cs | 18 ++++++++++++------ 3 files changed, 16 insertions(+), 10 deletions(-) diff --git a/src/Npgsql/NpgsqlConnection.cs b/src/Npgsql/NpgsqlConnection.cs index b9d97cd374..1a42a961c8 100644 --- a/src/Npgsql/NpgsqlConnection.cs +++ b/src/Npgsql/NpgsqlConnection.cs @@ -449,7 +449,7 @@ public override string ConnectionString /// The name of the database server (host and port). If the connection uses a Unix-domain socket, /// the path to that socket is returned. The default value is the empty string. /// - public override string DataSource => Connector?.Settings.DataSourceCached ?? string.Empty; + public override string DataSource => Connector?.Settings.DataSourceCached ?? _dataSource?.Settings.DataSourceCached ?? string.Empty; /// /// Whether to use Windows integrated security to log in. diff --git a/src/Npgsql/NpgsqlConnectionStringBuilder.cs b/src/Npgsql/NpgsqlConnectionStringBuilder.cs index b655c4a846..3370056a94 100644 --- a/src/Npgsql/NpgsqlConnectionStringBuilder.cs +++ b/src/Npgsql/NpgsqlConnectionStringBuilder.cs @@ -27,9 +27,9 @@ public sealed partial class NpgsqlConnectionStringBuilder : DbConnectionStringBu /// string? _dataSourceCached; - internal string DataSourceCached - => _dataSourceCached ??= _host is null - ? string.Empty + internal string? DataSourceCached + => _dataSourceCached ??= _host is null || _host.Contains(',') + ? null : IsUnixSocket(_host, _port, out var socketPath, replaceForAbstract: false) ? socketPath : $"tcp://{_host}:{_port}"; diff --git a/test/Npgsql.Tests/ConnectionTests.cs b/test/Npgsql.Tests/ConnectionTests.cs index fd76a31284..dd8d06cf11 100644 --- a/test/Npgsql.Tests/ConnectionTests.cs +++ b/test/Npgsql.Tests/ConnectionTests.cs @@ -500,17 +500,23 @@ public async Task Unix_abstract_domain_socket() } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/903")] - public async Task DataSource_property() + public void DataSource_property() { using var conn = new NpgsqlConnection(); Assert.That(conn.DataSource, Is.EqualTo(string.Empty)); - conn.ConnectionString = ConnectionString; - Assert.That(conn.DataSource, Is.EqualTo(string.Empty)); + var csb = new NpgsqlConnectionStringBuilder(ConnectionString); - await conn.OpenAsync(); - await using var _ = await conn.BeginTransactionAsync(); - Assert.That(conn.DataSource, Is.EqualTo($"tcp://{conn.Host}:{conn.Port}")); + conn.ConnectionString = csb.ConnectionString; + Assert.That(conn.DataSource, Is.EqualTo($"tcp://{csb.Host}:{csb.Port}")); + + // Multiplexing isn't supported with multiple hosts + if (IsMultiplexing) + return; + + csb.Host = "127.0.0.1, 127.0.0.2"; + conn.ConnectionString = csb.ConnectionString; + Assert.That(conn.DataSource, Is.EqualTo(string.Empty)); } #region Server version From f0fc46cfe29e375a97643d245f25a02873f859f4 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Fri, 10 Mar 2023 17:06:03 +0300 Subject: [PATCH 100/761] Add a separate resolver for records (#4971) Part of #4965 --- .../Internal/TypeHandlers/RecordHandler.cs | 2 - .../TypeHandlers/UnsupportedHandler.cs | 48 +++++++++++++++++++ src/Npgsql/NpgsqlDataSourceBuilder.cs | 1 + src/Npgsql/NpgsqlSlimDataSourceBuilder.cs | 9 ++++ src/Npgsql/PublicAPI.Unshipped.txt | 1 + .../TypeMapping/BuiltInTypeHandlerResolver.cs | 4 +- .../TypeMapping/RecordTypeHandlerResolver.cs | 31 ++++++++++++ .../RecordTypeHandlerResolverFactory.cs | 22 +++++++++ test/Npgsql.Tests/Types/MiscTypeTests.cs | 28 +++++++++++ 9 files changed, 141 insertions(+), 5 deletions(-) create mode 100644 src/Npgsql/Internal/TypeHandlers/UnsupportedHandler.cs create mode 100644 src/Npgsql/TypeMapping/RecordTypeHandlerResolver.cs create mode 100644 src/Npgsql/TypeMapping/RecordTypeHandlerResolverFactory.cs diff --git a/src/Npgsql/Internal/TypeHandlers/RecordHandler.cs b/src/Npgsql/Internal/TypeHandlers/RecordHandler.cs index 15b778fc10..9a255e02d3 100644 --- a/src/Npgsql/Internal/TypeHandlers/RecordHandler.cs +++ b/src/Npgsql/Internal/TypeHandlers/RecordHandler.cs @@ -1,13 +1,11 @@ using System; using System.Linq; -using System.Runtime.CompilerServices; using System.Threading; using System.Threading.Tasks; using Npgsql.BackendMessages; using Npgsql.Internal.TypeHandling; using Npgsql.Internal.TypeMapping; using Npgsql.PostgresTypes; -using Npgsql.TypeMapping; namespace Npgsql.Internal.TypeHandlers; diff --git a/src/Npgsql/Internal/TypeHandlers/UnsupportedHandler.cs b/src/Npgsql/Internal/TypeHandlers/UnsupportedHandler.cs new file mode 100644 index 0000000000..2d1f22f893 --- /dev/null +++ b/src/Npgsql/Internal/TypeHandlers/UnsupportedHandler.cs @@ -0,0 +1,48 @@ +using System; +using System.Threading; +using System.Threading.Tasks; +using Npgsql.BackendMessages; +using Npgsql.Internal.TypeHandling; +using Npgsql.PostgresTypes; + +namespace Npgsql.Internal.TypeHandlers; + +sealed class UnsupportedHandler : NpgsqlTypeHandler +{ + readonly string _exceptionMessage; + + public UnsupportedHandler(PostgresType postgresType, string exceptionMessage) : base(postgresType) + => _exceptionMessage = exceptionMessage; + + public override ValueTask ReadAsObject(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) + => throw new NotSupportedException(_exceptionMessage); + + public override int ValidateObjectAndGetLength(object value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) + => throw new NotSupportedException(_exceptionMessage); + + public override Task WriteObjectWithLength(object? value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, + CancellationToken cancellationToken = default) + => throw new NotSupportedException(_exceptionMessage); + + protected internal override ValueTask ReadCustom(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) + => throw new NotSupportedException(_exceptionMessage); + + protected override Task WriteWithLengthCustom(TAny value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, + CancellationToken cancellationToken) + => throw new NotSupportedException(_exceptionMessage); + + protected internal override int ValidateAndGetLengthCustom(TAny value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) + => throw new NotSupportedException(_exceptionMessage); + + public override Type GetFieldType(FieldDescription? fieldDescription = null) + => throw new NotSupportedException(_exceptionMessage); + + public override NpgsqlTypeHandler CreateArrayHandler(PostgresArrayType pgArrayType, ArrayNullabilityMode arrayNullabilityMode) + => throw new NotSupportedException(_exceptionMessage); + + public override NpgsqlTypeHandler CreateRangeHandler(PostgresType pgRangeType) + => throw new NotSupportedException(_exceptionMessage); + + public override NpgsqlTypeHandler CreateMultirangeHandler(PostgresMultirangeType pgMultirangeType) + => throw new NotSupportedException(_exceptionMessage); +} diff --git a/src/Npgsql/NpgsqlDataSourceBuilder.cs b/src/Npgsql/NpgsqlDataSourceBuilder.cs index 525f00df4b..e10a8f5005 100644 --- a/src/Npgsql/NpgsqlDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlDataSourceBuilder.cs @@ -315,5 +315,6 @@ void AddDefaultFeatures() _internalBuilder.EnableEncryption(); _internalBuilder.AddDefaultTypeResolverFactory(new JsonTypeHandlerResolverFactory()); _internalBuilder.AddDefaultTypeResolverFactory(new RangeTypeHandlerResolverFactory()); + _internalBuilder.AddDefaultTypeResolverFactory(new RecordTypeHandlerResolverFactory()); } } diff --git a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs index 7c3fae98bc..b95d9b667f 100644 --- a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs @@ -408,6 +408,15 @@ public NpgsqlSlimDataSourceBuilder UseSystemTextJson( return this; } + /// + /// Sets up mappings for the PostgreSQL record type. + /// + public NpgsqlSlimDataSourceBuilder EnableRecords() + { + AddTypeResolverFactory(new RecordTypeHandlerResolverFactory()); + return this; + } + /// /// Enables the possibility to use TLS/SSl encryption for connections to PostgreSQL. This does not guarantee that encryption will /// actually be used; see for more details. diff --git a/src/Npgsql/PublicAPI.Unshipped.txt b/src/Npgsql/PublicAPI.Unshipped.txt index f33e426b0a..4a34fad271 100644 --- a/src/Npgsql/PublicAPI.Unshipped.txt +++ b/src/Npgsql/PublicAPI.Unshipped.txt @@ -1,4 +1,5 @@ #nullable enable +Npgsql.NpgsqlSlimDataSourceBuilder.EnableRecords() -> Npgsql.NpgsqlSlimDataSourceBuilder! override Npgsql.NpgsqlBatch.Dispose() -> void Npgsql.NpgsqlBinaryImporter.WriteRow(params object?[]! values) -> void Npgsql.NpgsqlBinaryImporter.WriteRowAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken), params object?[]! values) -> System.Threading.Tasks.Task! diff --git a/src/Npgsql/TypeMapping/BuiltInTypeHandlerResolver.cs b/src/Npgsql/TypeMapping/BuiltInTypeHandlerResolver.cs index 33cacd8b6a..d4a109755a 100644 --- a/src/Npgsql/TypeMapping/BuiltInTypeHandlerResolver.cs +++ b/src/Npgsql/TypeMapping/BuiltInTypeHandlerResolver.cs @@ -228,7 +228,6 @@ sealed class BuiltInTypeHandlerResolver : TypeHandlerResolver UuidHandler? _uuidHandler; BitStringHandler? _bitVaryingHandler; BitStringHandler? _bitHandler; - RecordHandler? _recordHandler; VoidHandler? _voidHandler; HstoreHandler? _hstoreHandler; @@ -344,7 +343,7 @@ internal BuiltInTypeHandlerResolver(NpgsqlConnector connector) "pg_lsn" => PgLsnHandler(), "tid" => TidHandler(), "char" => InternalCharHandler(), - "record" => RecordHandler(), + "record" => new UnsupportedHandler(PgType("record"), $"Records aren't supported; please call {nameof(NpgsqlSlimDataSourceBuilder.EnableRecords)} on {nameof(NpgsqlSlimDataSourceBuilder)} to enable records."), "void" => VoidHandler(), "unknown" => UnknownHandler(), @@ -715,7 +714,6 @@ static DateTimeKind GetMultirangeKind(IList> multirange) NpgsqlTypeHandler PgLsnHandler() => _pgLsnHandler ??= new PgLsnHandler(PgType("pg_lsn")); NpgsqlTypeHandler TidHandler() => _tidHandler ??= new TidHandler(PgType("tid")); NpgsqlTypeHandler InternalCharHandler() => _internalCharHandler ??= new InternalCharHandler(PgType("char")); - NpgsqlTypeHandler RecordHandler() => _recordHandler ??= new RecordHandler(PgType("record"), _connector.TypeMapper); NpgsqlTypeHandler VoidHandler() => _voidHandler ??= new VoidHandler(PgType("void")); NpgsqlTypeHandler UnknownHandler() => _unknownHandler ??= new UnknownTypeHandler(_connector.TextEncoding); diff --git a/src/Npgsql/TypeMapping/RecordTypeHandlerResolver.cs b/src/Npgsql/TypeMapping/RecordTypeHandlerResolver.cs new file mode 100644 index 0000000000..438e91cf9d --- /dev/null +++ b/src/Npgsql/TypeMapping/RecordTypeHandlerResolver.cs @@ -0,0 +1,31 @@ +using System; +using Npgsql.Internal; +using Npgsql.Internal.TypeHandlers; +using Npgsql.Internal.TypeHandling; +using Npgsql.Internal.TypeMapping; +using Npgsql.PostgresTypes; + +namespace Npgsql.TypeMapping; + +sealed class RecordTypeHandlerResolver : TypeHandlerResolver +{ + readonly TypeMapper _typeMapper; + readonly NpgsqlDatabaseInfo _databaseInfo; + + RecordHandler? _recordHandler; + + public RecordTypeHandlerResolver(TypeMapper typeMapper, NpgsqlConnector connector) + { + _typeMapper = typeMapper; + _databaseInfo = connector.DatabaseInfo; + } + + public override NpgsqlTypeHandler? ResolveByDataTypeName(string typeName) + => typeName == "record" ? GetHandler() : null; + + public override NpgsqlTypeHandler? ResolveByClrType(Type type) => null; + + public override TypeMappingInfo? GetMappingByPostgresType(PostgresType type) => null; + + NpgsqlTypeHandler GetHandler() => _recordHandler ??= new RecordHandler(_databaseInfo.GetPostgresTypeByName("record"), _typeMapper); +} diff --git a/src/Npgsql/TypeMapping/RecordTypeHandlerResolverFactory.cs b/src/Npgsql/TypeMapping/RecordTypeHandlerResolverFactory.cs new file mode 100644 index 0000000000..a29a35d555 --- /dev/null +++ b/src/Npgsql/TypeMapping/RecordTypeHandlerResolverFactory.cs @@ -0,0 +1,22 @@ +using System; +using Npgsql.Internal; +using Npgsql.Internal.TypeHandling; +using Npgsql.Internal.TypeMapping; + +namespace Npgsql.TypeMapping; + +sealed class RecordTypeHandlerResolverFactory : TypeHandlerResolverFactory +{ + public override TypeHandlerResolver Create(TypeMapper typeMapper, NpgsqlConnector connector) + => new RecordTypeHandlerResolver(typeMapper, connector); + + // Records aren't mapped to anything + public override string? GetDataTypeNameByClrType(Type clrType) + => null; + + public override string? GetDataTypeNameByValueDependentValue(object value) + => null; + + public override TypeMappingInfo? GetMappingByDataTypeName(string dataTypeName) + => null; +} diff --git a/test/Npgsql.Tests/Types/MiscTypeTests.cs b/test/Npgsql.Tests/Types/MiscTypeTests.cs index f0ab30fb47..9c0667c645 100644 --- a/test/Npgsql.Tests/Types/MiscTypeTests.cs +++ b/test/Npgsql.Tests/Types/MiscTypeTests.cs @@ -3,6 +3,7 @@ using System.Threading.Tasks; using NpgsqlTypes; using NUnit.Framework; +using NUnit.Framework.Constraints; namespace Npgsql.Tests.Types; @@ -131,6 +132,33 @@ public async Task Read_Record_as_Tuple() public Task Write_Record_is_not_supported() => AssertTypeUnsupportedWrite(new object[] { 1, "foo" }, "record"); + [Test] + public async Task Records_supported_only_with_EnableRecords([Values] bool withMappings) + { + const string unsupportedMessage = + "Records aren't supported; please call EnableRecords on NpgsqlSlimDataSourceBuilder to enable records."; + Func assertExpr = () => withMappings + ? Throws.Nothing + : Throws.Exception + .TypeOf() + .With.Property("Message").EqualTo(unsupportedMessage); + + var dataSourceBuilder = new NpgsqlSlimDataSourceBuilder(ConnectionString); + if (withMappings) + dataSourceBuilder.EnableRecords(); + var dataSource = dataSourceBuilder.Build(); + await using var conn = await dataSource.OpenConnectionAsync(); + await using var cmd = conn.CreateCommand(); + + // RecordHandler doesn't support writing, so we only check for reading + cmd.CommandText = "SELECT ('one'::text, 2)"; + await using var reader = await cmd.ExecuteReaderAsync(); + await reader.ReadAsync(); + + Assert.That(() => reader.GetValue(0), assertExpr()); + Assert.That(() => reader.GetFieldValue(0), assertExpr()); + } + #endregion Record [Test, Description("Makes sure that setting DbType.Object makes Npgsql infer the type")] From bfbd9036123523920b65e83f6e7b490f3925cd39 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Fri, 10 Mar 2023 18:37:44 +0300 Subject: [PATCH 101/761] Always throw NpgsqlException on timeout (#4990) --- src/Npgsql/ThrowHelper.cs | 4 ++-- src/Npgsql/Util/PGUtil.cs | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/Npgsql/ThrowHelper.cs b/src/Npgsql/ThrowHelper.cs index 14478be7be..a0d33f0050 100644 --- a/src/Npgsql/ThrowHelper.cs +++ b/src/Npgsql/ThrowHelper.cs @@ -101,6 +101,6 @@ internal static void ThrowNotSupportedException(string message) => throw new NotSupportedException(message); [DoesNotReturn] - internal static void ThrowTimeoutException() - => throw new TimeoutException(); + internal static void ThrowNpgsqlExceptionWithInnerTimeoutException(string message) + => throw new NpgsqlException(message, new TimeoutException()); } \ No newline at end of file diff --git a/src/Npgsql/Util/PGUtil.cs b/src/Npgsql/Util/PGUtil.cs index 81a81d586b..006da40d91 100644 --- a/src/Npgsql/Util/PGUtil.cs +++ b/src/Npgsql/Util/PGUtil.cs @@ -182,7 +182,7 @@ internal NpgsqlTimeout(TimeSpan expiration) internal void Check() { if (HasExpired) - ThrowHelper.ThrowTimeoutException(); + ThrowHelper.ThrowNpgsqlExceptionWithInnerTimeoutException("The operation has timed out"); } internal void CheckAndApply(NpgsqlConnector connector) From ee89f6e2e60a2dbba23594e5a1d7388fd622b340 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Sun, 12 Mar 2023 17:11:58 +0300 Subject: [PATCH 102/761] Make an exception for COPY TO queries with NpgsqlDataReader more clear --- src/Npgsql/NpgsqlDataReader.cs | 3 +++ test/Npgsql.Tests/CopyTests.cs | 16 ++++++++++++++-- 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/src/Npgsql/NpgsqlDataReader.cs b/src/Npgsql/NpgsqlDataReader.cs index 18f6f50019..3ddaa70e4f 100644 --- a/src/Npgsql/NpgsqlDataReader.cs +++ b/src/Npgsql/NpgsqlDataReader.cs @@ -491,6 +491,9 @@ async Task NextResult(bool async, bool isConsuming = false, CancellationTo throw Connector.Break(new NotSupportedException( "COPY isn't supported in regular command execution - see https://www.npgsql.org/doc/copy.html for documentation on COPY with Npgsql. " + "If you are trying to execute a SQL script created by pg_dump, pass the '--inserts' switch to disable generating COPY statements.")); + case BackendMessageCode.CopyOutResponse: + throw Connector.Break(new NotSupportedException( + "COPY isn't supported in regular command execution - see https://www.npgsql.org/doc/copy.html for documentation on COPY with Npgsql.")); default: throw Connector.UnexpectedMessageReceived(msg.Code); } diff --git a/test/Npgsql.Tests/CopyTests.cs b/test/Npgsql.Tests/CopyTests.cs index 87f5159dd5..7b5095ef55 100644 --- a/test/Npgsql.Tests/CopyTests.cs +++ b/test/Npgsql.Tests/CopyTests.cs @@ -1085,17 +1085,29 @@ public async Task Within_transaction() } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/4199")] - public async Task Copy_is_not_supported_in_regular_command_execution() + public async Task Copy_from_is_not_supported_in_regular_command_execution() { // Run in a separate pool to protect other queries in multiplexing // because we're going to break the connection on CopyInResponse await using var dataSource = CreateDataSource(); - using var conn = await dataSource.OpenConnectionAsync(); + await using var conn = await dataSource.OpenConnectionAsync(); var table = await CreateTempTable(conn, "foo INT"); Assert.That(() => conn.ExecuteNonQuery($@"COPY {table} (foo) FROM stdin"), Throws.Exception.TypeOf()); } + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/4974")] + public async Task Copy_to_is_not_supported_in_regular_command_execution() + { + // Run in a separate pool to protect other queries in multiplexing + // because we're going to break the connection on CopyInResponse + await using var dataSource = CreateDataSource(); + await using var conn = await dataSource.OpenConnectionAsync(); + var table = await CreateTempTable(conn, "foo INT"); + + Assert.That(() => conn.ExecuteNonQuery($@"COPY {table} (foo) TO stdin"), Throws.Exception.TypeOf()); + } + #endregion #region Utils From ba0747642796fcd070e5e03ee779085c355896a5 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Sun, 12 Mar 2023 17:12:57 +0300 Subject: [PATCH 103/761] Revert "Make an exception for COPY TO queries with NpgsqlDataReader more clear" This reverts commit ee89f6e2e60a2dbba23594e5a1d7388fd622b340. --- src/Npgsql/NpgsqlDataReader.cs | 3 --- test/Npgsql.Tests/CopyTests.cs | 16 ++-------------- 2 files changed, 2 insertions(+), 17 deletions(-) diff --git a/src/Npgsql/NpgsqlDataReader.cs b/src/Npgsql/NpgsqlDataReader.cs index 3ddaa70e4f..18f6f50019 100644 --- a/src/Npgsql/NpgsqlDataReader.cs +++ b/src/Npgsql/NpgsqlDataReader.cs @@ -491,9 +491,6 @@ async Task NextResult(bool async, bool isConsuming = false, CancellationTo throw Connector.Break(new NotSupportedException( "COPY isn't supported in regular command execution - see https://www.npgsql.org/doc/copy.html for documentation on COPY with Npgsql. " + "If you are trying to execute a SQL script created by pg_dump, pass the '--inserts' switch to disable generating COPY statements.")); - case BackendMessageCode.CopyOutResponse: - throw Connector.Break(new NotSupportedException( - "COPY isn't supported in regular command execution - see https://www.npgsql.org/doc/copy.html for documentation on COPY with Npgsql.")); default: throw Connector.UnexpectedMessageReceived(msg.Code); } diff --git a/test/Npgsql.Tests/CopyTests.cs b/test/Npgsql.Tests/CopyTests.cs index 7b5095ef55..87f5159dd5 100644 --- a/test/Npgsql.Tests/CopyTests.cs +++ b/test/Npgsql.Tests/CopyTests.cs @@ -1085,29 +1085,17 @@ public async Task Within_transaction() } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/4199")] - public async Task Copy_from_is_not_supported_in_regular_command_execution() + public async Task Copy_is_not_supported_in_regular_command_execution() { // Run in a separate pool to protect other queries in multiplexing // because we're going to break the connection on CopyInResponse await using var dataSource = CreateDataSource(); - await using var conn = await dataSource.OpenConnectionAsync(); + using var conn = await dataSource.OpenConnectionAsync(); var table = await CreateTempTable(conn, "foo INT"); Assert.That(() => conn.ExecuteNonQuery($@"COPY {table} (foo) FROM stdin"), Throws.Exception.TypeOf()); } - [Test, IssueLink("https://github.com/npgsql/npgsql/issues/4974")] - public async Task Copy_to_is_not_supported_in_regular_command_execution() - { - // Run in a separate pool to protect other queries in multiplexing - // because we're going to break the connection on CopyInResponse - await using var dataSource = CreateDataSource(); - await using var conn = await dataSource.OpenConnectionAsync(); - var table = await CreateTempTable(conn, "foo INT"); - - Assert.That(() => conn.ExecuteNonQuery($@"COPY {table} (foo) TO stdin"), Throws.Exception.TypeOf()); - } - #endregion #region Utils From 24f86ed2572ff48158b8e93aa3547d8380f4c754 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Tue, 14 Mar 2023 08:01:20 +0300 Subject: [PATCH 104/761] Make an exception for COPY TO queries with NpgsqlDataReader more clear (#4992) Closes #4974 --- src/Npgsql/NpgsqlDataReader.cs | 3 +++ test/Npgsql.Tests/CopyTests.cs | 16 ++++++++++++++-- 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/src/Npgsql/NpgsqlDataReader.cs b/src/Npgsql/NpgsqlDataReader.cs index 18f6f50019..3ddaa70e4f 100644 --- a/src/Npgsql/NpgsqlDataReader.cs +++ b/src/Npgsql/NpgsqlDataReader.cs @@ -491,6 +491,9 @@ async Task NextResult(bool async, bool isConsuming = false, CancellationTo throw Connector.Break(new NotSupportedException( "COPY isn't supported in regular command execution - see https://www.npgsql.org/doc/copy.html for documentation on COPY with Npgsql. " + "If you are trying to execute a SQL script created by pg_dump, pass the '--inserts' switch to disable generating COPY statements.")); + case BackendMessageCode.CopyOutResponse: + throw Connector.Break(new NotSupportedException( + "COPY isn't supported in regular command execution - see https://www.npgsql.org/doc/copy.html for documentation on COPY with Npgsql.")); default: throw Connector.UnexpectedMessageReceived(msg.Code); } diff --git a/test/Npgsql.Tests/CopyTests.cs b/test/Npgsql.Tests/CopyTests.cs index 87f5159dd5..7b5095ef55 100644 --- a/test/Npgsql.Tests/CopyTests.cs +++ b/test/Npgsql.Tests/CopyTests.cs @@ -1085,17 +1085,29 @@ public async Task Within_transaction() } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/4199")] - public async Task Copy_is_not_supported_in_regular_command_execution() + public async Task Copy_from_is_not_supported_in_regular_command_execution() { // Run in a separate pool to protect other queries in multiplexing // because we're going to break the connection on CopyInResponse await using var dataSource = CreateDataSource(); - using var conn = await dataSource.OpenConnectionAsync(); + await using var conn = await dataSource.OpenConnectionAsync(); var table = await CreateTempTable(conn, "foo INT"); Assert.That(() => conn.ExecuteNonQuery($@"COPY {table} (foo) FROM stdin"), Throws.Exception.TypeOf()); } + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/4974")] + public async Task Copy_to_is_not_supported_in_regular_command_execution() + { + // Run in a separate pool to protect other queries in multiplexing + // because we're going to break the connection on CopyInResponse + await using var dataSource = CreateDataSource(); + await using var conn = await dataSource.OpenConnectionAsync(); + var table = await CreateTempTable(conn, "foo INT"); + + Assert.That(() => conn.ExecuteNonQuery($@"COPY {table} (foo) TO stdin"), Throws.Exception.TypeOf()); + } + #endregion #region Utils From 99e7324ffce965558fa2a46337e890c1a6983e94 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Wed, 15 Mar 2023 01:40:10 +0300 Subject: [PATCH 105/761] Remove EndUserAction on pg error from ReadMessageLong (#4924) --- src/Npgsql/BackendMessages/AuthenticationMessages.cs | 4 ++-- src/Npgsql/BackendMessages/BackendKeyDataMessage.cs | 4 ++-- src/Npgsql/Internal/NpgsqlConnector.cs | 6 ------ src/Npgsql/Internal/TypeHandling/TypeMappingInfo.cs | 2 +- src/Npgsql/NameTranslation/NpgsqlNullNameTranslator.cs | 2 +- src/Npgsql/NameTranslation/NpgsqlSnakeCaseNameTranslator.cs | 2 +- src/Npgsql/NpgsqlBinaryImporter.cs | 4 ++-- src/Npgsql/NpgsqlDataSource.cs | 2 +- src/Npgsql/NpgsqlRawCopyStream.cs | 3 +-- src/Npgsql/NpgsqlTypes/PgNameAttribute.cs | 6 ++---- src/Npgsql/Util/PGUtil.cs | 2 +- 11 files changed, 14 insertions(+), 23 deletions(-) diff --git a/src/Npgsql/BackendMessages/AuthenticationMessages.cs b/src/Npgsql/BackendMessages/AuthenticationMessages.cs index 415f2a8577..31a6c06e24 100644 --- a/src/Npgsql/BackendMessages/AuthenticationMessages.cs +++ b/src/Npgsql/BackendMessages/AuthenticationMessages.cs @@ -40,7 +40,7 @@ sealed class AuthenticationMD5PasswordMessage : AuthenticationRequestMessage { internal override AuthenticationRequestType AuthRequestType => AuthenticationRequestType.AuthenticationMD5Password; - internal byte[] Salt { get; private set; } + internal byte[] Salt { get; } internal static AuthenticationMD5PasswordMessage Load(NpgsqlReadBuffer buf) { @@ -75,7 +75,7 @@ sealed class AuthenticationGSSContinueMessage : AuthenticationRequestMessage { internal override AuthenticationRequestType AuthRequestType => AuthenticationRequestType.AuthenticationGSSContinue; - internal byte[] AuthenticationData { get; private set; } + internal byte[] AuthenticationData { get; } internal static AuthenticationGSSContinueMessage Load(NpgsqlReadBuffer buf, int len) { diff --git a/src/Npgsql/BackendMessages/BackendKeyDataMessage.cs b/src/Npgsql/BackendMessages/BackendKeyDataMessage.cs index cf6506b02f..2140048c38 100644 --- a/src/Npgsql/BackendMessages/BackendKeyDataMessage.cs +++ b/src/Npgsql/BackendMessages/BackendKeyDataMessage.cs @@ -6,8 +6,8 @@ sealed class BackendKeyDataMessage : IBackendMessage { public BackendMessageCode Code => BackendMessageCode.BackendKeyData; - internal int BackendProcessId { get; private set; } - internal int BackendSecretKey { get; private set; } + internal int BackendProcessId { get; } + internal int BackendSecretKey { get; } internal BackendKeyDataMessage(NpgsqlReadBuffer buf) { diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index 869b88f08c..54ce7d75b8 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -1307,8 +1307,6 @@ internal ValueTask ReadMessage( case BackendMessageCode.ErrorResponse: ReadBuffer.ReadPosition--; return ReadMessageLong(async, dataRowLoadingMode, readingNotifications: false)!; - case BackendMessageCode.ReadyForQuery: - break; } PGUtil.ValidateBackendMessageCode(messageCode); @@ -1444,10 +1442,6 @@ internal ValueTask ReadMessage( } catch (PostgresException e) { - // TODO: move it up the stack, like #3126 did (relevant for non-command-execution scenarios, like COPY) - if (CurrentReader is null) - EndUserAction(); - if (e.SqlState == PostgresErrorCodes.QueryCanceled && PostgresCancellationPerformed) { // The query could be canceled because of a user cancellation or a timeout - raise the proper exception. diff --git a/src/Npgsql/Internal/TypeHandling/TypeMappingInfo.cs b/src/Npgsql/Internal/TypeHandling/TypeMappingInfo.cs index 8d61d1e13b..d669739e6f 100644 --- a/src/Npgsql/Internal/TypeHandling/TypeMappingInfo.cs +++ b/src/Npgsql/Internal/TypeHandling/TypeMappingInfo.cs @@ -5,7 +5,7 @@ namespace Npgsql.Internal.TypeHandling; -public class TypeMappingInfo +public sealed class TypeMappingInfo { public TypeMappingInfo(NpgsqlDbType? npgsqlDbType, string? dataTypeName, Type clrType) => (NpgsqlDbType, DataTypeName, ClrTypes) = (npgsqlDbType, dataTypeName, new[] { clrType }); diff --git a/src/Npgsql/NameTranslation/NpgsqlNullNameTranslator.cs b/src/Npgsql/NameTranslation/NpgsqlNullNameTranslator.cs index 30ff5b9725..f754169a72 100644 --- a/src/Npgsql/NameTranslation/NpgsqlNullNameTranslator.cs +++ b/src/Npgsql/NameTranslation/NpgsqlNullNameTranslator.cs @@ -5,7 +5,7 @@ namespace Npgsql.NameTranslation; /// /// A name translator which preserves CLR names (e.g. SomeClass) when mapping names to the database. /// -public class NpgsqlNullNameTranslator : INpgsqlNameTranslator +public sealed class NpgsqlNullNameTranslator : INpgsqlNameTranslator { /// /// Given a CLR type name (e.g class, struct, enum), translates its name to a database type name. diff --git a/src/Npgsql/NameTranslation/NpgsqlSnakeCaseNameTranslator.cs b/src/Npgsql/NameTranslation/NpgsqlSnakeCaseNameTranslator.cs index 963e1f4e2e..a93661d9e1 100644 --- a/src/Npgsql/NameTranslation/NpgsqlSnakeCaseNameTranslator.cs +++ b/src/Npgsql/NameTranslation/NpgsqlSnakeCaseNameTranslator.cs @@ -9,7 +9,7 @@ namespace Npgsql.NameTranslation; /// A name translator which converts standard CLR names (e.g. SomeClass) to snake-case database /// names (some_class) /// -public class NpgsqlSnakeCaseNameTranslator : INpgsqlNameTranslator +public sealed class NpgsqlSnakeCaseNameTranslator : INpgsqlNameTranslator { /// /// Creates a new . diff --git a/src/Npgsql/NpgsqlBinaryImporter.cs b/src/Npgsql/NpgsqlBinaryImporter.cs index 15af6e85a5..be963c1552 100644 --- a/src/Npgsql/NpgsqlBinaryImporter.cs +++ b/src/Npgsql/NpgsqlBinaryImporter.cs @@ -551,7 +551,6 @@ async ValueTask CloseAsync(bool async, CancellationToken cancellationToken = def throw new Exception("Invalid state: " + _state); } - _connector.EndUserAction(); Cleanup(); } @@ -566,8 +565,9 @@ void Cleanup() if (connector != null) { + connector.EndUserAction(); connector.CurrentCopyOperation = null; - _connector.Connection?.EndBindingScope(ConnectorBindingScope.Copy); + connector.Connection?.EndBindingScope(ConnectorBindingScope.Copy); _connector = null; } diff --git a/src/Npgsql/NpgsqlDataSource.cs b/src/Npgsql/NpgsqlDataSource.cs index 303b1b0534..713154d607 100644 --- a/src/Npgsql/NpgsqlDataSource.cs +++ b/src/Npgsql/NpgsqlDataSource.cs @@ -41,7 +41,7 @@ public abstract class NpgsqlDataSource : DbDataSource /// /// Information about PostgreSQL and PostgreSQL-like databases (e.g. type definitions, capabilities...). /// - internal NpgsqlDatabaseInfo DatabaseInfo { get; set; } = null!; // Initialized at bootstrapping + internal NpgsqlDatabaseInfo DatabaseInfo { get; private set; } = null!; // Initialized at bootstrapping internal Func? EncryptionNegotiator { get; } internal RemoteCertificateValidationCallback? UserCertificateValidationCallback { get; } diff --git a/src/Npgsql/NpgsqlRawCopyStream.cs b/src/Npgsql/NpgsqlRawCopyStream.cs index c0ef7989db..fc503d0746 100644 --- a/src/Npgsql/NpgsqlRawCopyStream.cs +++ b/src/Npgsql/NpgsqlRawCopyStream.cs @@ -372,7 +372,6 @@ async Task Cancel(bool async) } catch (PostgresException e) { - _connector.EndUserAction(); Cleanup(); if (e.SqlState != PostgresErrorCodes.QueryCanceled) @@ -442,7 +441,6 @@ async ValueTask DisposeAsync(bool disposing, bool async) } finally { - _connector.EndUserAction(); Cleanup(); } } @@ -452,6 +450,7 @@ void Cleanup() { Debug.Assert(!_isDisposed); LogMessages.CopyOperationCompleted(_copyLogger, _connector.Id); + _connector.EndUserAction(); _connector.CurrentCopyOperation = null; _connector.Connection?.EndBindingScope(ConnectorBindingScope.Copy); _connector = null; diff --git a/src/Npgsql/NpgsqlTypes/PgNameAttribute.cs b/src/Npgsql/NpgsqlTypes/PgNameAttribute.cs index 8e284111aa..48cbc955e4 100644 --- a/src/Npgsql/NpgsqlTypes/PgNameAttribute.cs +++ b/src/Npgsql/NpgsqlTypes/PgNameAttribute.cs @@ -18,14 +18,12 @@ public class PgNameAttribute : Attribute /// /// The name of PostgreSQL field that corresponds to this CLR property or field /// - public string PgName { get; private set; } + public string PgName { get; } /// /// Indicates that this property or field corresponds to a PostgreSQL field with the specified name /// /// The name of PostgreSQL field that corresponds to this CLR property or field public PgNameAttribute(string pgName) - { - PgName = pgName; - } + => PgName = pgName; } diff --git a/src/Npgsql/Util/PGUtil.cs b/src/Npgsql/Util/PGUtil.cs index 006da40d91..1b9aa0ce2a 100644 --- a/src/Npgsql/Util/PGUtil.cs +++ b/src/Npgsql/Util/PGUtil.cs @@ -25,7 +25,7 @@ static Statics() LegacyTimestampBehavior = AppContext.TryGetSwitch("Npgsql.EnableLegacyTimestampBehavior", out var enabled) && enabled; DisableDateTimeInfinityConversions = AppContext.TryGetSwitch("Npgsql.DisableDateTimeInfinityConversions", out enabled) && enabled; } - + internal static T Expect(IBackendMessage msg, NpgsqlConnector connector) { if (msg.GetType() != typeof(T)) From 799dca0d9060078c1bbf9d5658fb840a54f5d43f Mon Sep 17 00:00:00 2001 From: steveoh Date: Wed, 15 Mar 2023 01:53:38 +0000 Subject: [PATCH 106/761] Update READMEs for NpgsqlDataSource --- README.md | 8 ++++++-- src/Npgsql.NetTopologySuite/README.md | 14 +++++++++----- src/Npgsql.NodaTime/README.md | 13 ++++++++----- 3 files changed, 23 insertions(+), 12 deletions(-) diff --git a/README.md b/README.md index d8e53746f1..2b7bfef019 100644 --- a/README.md +++ b/README.md @@ -17,10 +17,14 @@ For the full documentation, please visit [the Npgsql website](https://www.npgsql Here's a basic code snippet to get you started: ```csharp +using Npgsql; + var connString = "Host=myserver;Username=mylogin;Password=mypass;Database=mydatabase"; -await using var conn = new NpgsqlConnection(connString); -await conn.OpenAsync(); +var dataSourceBuilder = new NpgsqlDataSourceBuilder(connString); +var dataSource = dataSourceBuilder.Build(); + +var conn = await dataSource.OpenConnectionAsync(); // Insert some data await using (var cmd = new NpgsqlCommand("INSERT INTO data (some_field) VALUES (@p)", conn)) diff --git a/src/Npgsql.NetTopologySuite/README.md b/src/Npgsql.NetTopologySuite/README.md index ad2f46289b..c38f46c10b 100644 --- a/src/Npgsql.NetTopologySuite/README.md +++ b/src/Npgsql.NetTopologySuite/README.md @@ -2,15 +2,19 @@ Npgsql is the open source .NET data provider for PostgreSQL. It allows you to co This package is an Npgsql plugin which allows you to interact with spatial data provided by the PostgreSQL [PostGIS extension](https://postgis.net); PostGIS is a mature, standard extension considered to provide top-of-the-line database spatial features. On the .NET side, the plugin adds support for the types from the [NetTopologySuite library](https://github.com/NetTopologySuite/NetTopologySuite), allowing you to read and write them directly to PostgreSQL. -To use the NetTopologySuite plugin, simply add a dependency on this package and set it up at program startup: +To use the NetTopologySuite plugin, add a dependency on this package and create a NpgsqlDataSource. ```csharp -NpgsqlConnection.GlobalTypeMapper.UseNetTopologySuite(); -``` +using Npgsql; +using NetTopologySuite.Geometries; -Once this is done, you can simply use NetTopologySuite types when interacting with PostgreSQL: +var dataSourceBuilder = new NpgsqlDataSourceBuilder(ConnectionString); + +dataSourceBuilder.UseNetTopologySuite(); + +var dataSource = dataSourceBuilder.Build(); +var conn = await dataSource.OpenConnectionAsync(); -```csharp var point = new Point(new Coordinate(1d, 1d)); conn.ExecuteNonQuery("CREATE TEMP TABLE data (geom GEOMETRY)"); using (var cmd = new NpgsqlCommand("INSERT INTO data (geom) VALUES (@p)", conn)) diff --git a/src/Npgsql.NodaTime/README.md b/src/Npgsql.NodaTime/README.md index d9bb2e7634..d24070920b 100644 --- a/src/Npgsql.NodaTime/README.md +++ b/src/Npgsql.NodaTime/README.md @@ -2,15 +2,18 @@ Npgsql is the open source .NET data provider for PostgreSQL. It allows you to co This package is an Npgsql plugin which allows you to use the [NodaTime](https://nodatime.org) date/time library when interacting with PostgreSQL; this provides a better and safer API for dealing with date and time data. -To use the NodaTime plugin, simply add a dependency on this package and set it up at program startup: +To use the NodaTime plugin, add a dependency on this package and create a NpgsqlDataSource. Once this is done, you can use NodaTime types when interacting with PostgreSQL, just as you would use e.g. `DateTime`: ```csharp -NpgsqlConnection.GlobalTypeMapper.UseNodaTime(); -``` +using Npgsql; -Once this is done, you can simply use NodaTime types when interacting with PostgreSQL, just as you would use e.g. `DateTime`: +var dataSourceBuilder = new NpgsqlDataSourceBuilder(ConnectionString); + +dataSourceBuilder.UseNodaTime(); + +var dataSource = dataSourceBuilder.Build(); +var conn = await dataSource.OpenConnectionAsync(); -```csharp // Write NodaTime Instant to PostgreSQL "timestamp with time zone" (UTC) using (var cmd = new NpgsqlCommand(@"INSERT INTO mytable (my_timestamptz) VALUES (@p)", conn)) { From 531cc2507beb9ed2f4f94c577bce1cee713963a2 Mon Sep 17 00:00:00 2001 From: Brar Piening Date: Wed, 15 Mar 2023 08:03:55 +0100 Subject: [PATCH 107/761] Upgrade to .NET 8 preview 2 (#4995) --- .devcontainer/docker-compose.yml | 2 +- .github/workflows/build.yml | 2 +- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/native-aot.yml | 2 +- .github/workflows/rich-code-nav.yml | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.devcontainer/docker-compose.yml b/.devcontainer/docker-compose.yml index a58f310f4f..5d5be53d5a 100644 --- a/.devcontainer/docker-compose.yml +++ b/.devcontainer/docker-compose.yml @@ -3,7 +3,7 @@ version: '3' services: npgsql-dev: # Source for tags: https://mcr.microsoft.com/v2/dotnet/sdk/tags/list - image: mcr.microsoft.com/dotnet/sdk:8.0.100-preview.1 + image: mcr.microsoft.com/dotnet/sdk:8.0.100-preview.2 volumes: - ..:/workspace:cached tty: true diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 6c0057c3c9..4e595464b0 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -10,7 +10,7 @@ on: pull_request: env: - dotnet_sdk_version: '8.0.100-preview.1.23115.2' + dotnet_sdk_version: '8.0.100-preview.2.23157.25' postgis_version: 3 DOTNET_SKIP_FIRST_TIME_EXPERIENCE: true # Windows comes with PG pre-installed, and defines the PGPASSWORD environment variable. Remove it as it interferes diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index cf3be202e6..6203474b64 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -27,7 +27,7 @@ on: - cron: '21 0 * * 4' env: - dotnet_sdk_version: '8.0.100-preview.1.23115.2' + dotnet_sdk_version: '8.0.100-preview.2.23157.25' jobs: analyze: diff --git a/.github/workflows/native-aot.yml b/.github/workflows/native-aot.yml index 01651189a2..b72773ddd8 100644 --- a/.github/workflows/native-aot.yml +++ b/.github/workflows/native-aot.yml @@ -10,7 +10,7 @@ on: pull_request: env: - dotnet_sdk_version: '8.0.100-preview.1.23115.2' + dotnet_sdk_version: '8.0.100-preview.2.23157.25' DOTNET_SKIP_FIRST_TIME_EXPERIENCE: true jobs: diff --git a/.github/workflows/rich-code-nav.yml b/.github/workflows/rich-code-nav.yml index 1a7ef579ec..336aaeeb6c 100644 --- a/.github/workflows/rich-code-nav.yml +++ b/.github/workflows/rich-code-nav.yml @@ -9,7 +9,7 @@ on: - '*' env: - dotnet_sdk_version: '8.0.100-preview.1.23115.2' + dotnet_sdk_version: '8.0.100-preview.2.23157.25' DOTNET_SKIP_FIRST_TIME_EXPERIENCE: true jobs: From 021c82b890c56c97a7045b159739847ce972b703 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Thu, 16 Mar 2023 00:02:39 +0300 Subject: [PATCH 108/761] Fix range resolver --- src/Npgsql/TypeMapping/RangeTypeHandlerResolver.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Npgsql/TypeMapping/RangeTypeHandlerResolver.cs b/src/Npgsql/TypeMapping/RangeTypeHandlerResolver.cs index 1386383a63..1243eaa82a 100644 --- a/src/Npgsql/TypeMapping/RangeTypeHandlerResolver.cs +++ b/src/Npgsql/TypeMapping/RangeTypeHandlerResolver.cs @@ -39,7 +39,7 @@ internal RangeTypeHandlerResolver(TypeMapper typeMapper, NpgsqlConnector connect public override NpgsqlTypeHandler? ResolveByDataTypeName(string typeName) { - if (_databaseInfo.GetPostgresTypeByName(typeName) is not { } pgType) + if (_databaseInfo.TryGetPostgresTypeByName(typeName, out var pgType)) return null; return pgType switch From 52b79458fd8ba6820e0f2e2c560ce5ee3970a090 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Thu, 16 Mar 2023 00:05:10 +0300 Subject: [PATCH 109/761] Fix a few usages of GetPostgresTypeByName --- src/Npgsql/Internal/TypeMapping/TypeMapper.cs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/Npgsql/Internal/TypeMapping/TypeMapper.cs b/src/Npgsql/Internal/TypeMapping/TypeMapper.cs index 04bee664f1..847a86ae7e 100644 --- a/src/Npgsql/Internal/TypeMapping/TypeMapper.cs +++ b/src/Npgsql/Internal/TypeMapping/TypeMapper.cs @@ -265,8 +265,7 @@ internal NpgsqlTypeHandler ResolveByDataTypeName(string typeName) { lock (_writeLock) { - if (DatabaseInfo.GetPostgresTypeByName(typeName) is not { } pgType) - throw new NotSupportedException("Could not find PostgreSQL type " + typeName); + var pgType = DatabaseInfo.GetPostgresTypeByName(typeName); switch (pgType) { @@ -433,7 +432,7 @@ NpgsqlTypeHandler ResolveLong(Type type) if (type.IsEnum) { - return DatabaseInfo.GetPostgresTypeByName(GetPgName(type, _defaultNameTranslator)) is PostgresEnumType pgEnumType + return DatabaseInfo.TryGetPostgresTypeByName(GetPgName(type, _defaultNameTranslator), out var pgType) && pgType is PostgresEnumType pgEnumType ? _handlersByClrType[type] = new UnmappedEnumHandler(pgEnumType, _defaultNameTranslator, Connector.TextEncoding) : throw new NotSupportedException( $"Could not find a PostgreSQL enum type corresponding to {type.Name}. " + From 3c5d3ef74e94f326137bef440727984766d2989a Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Thu, 16 Mar 2023 00:09:14 +0300 Subject: [PATCH 110/761] Revert "Fix a few usages of GetPostgresTypeByName" This reverts commit 52b79458fd8ba6820e0f2e2c560ce5ee3970a090. This reverts commit 021c82b890c56c97a7045b159739847ce972b703. --- src/Npgsql/Internal/TypeMapping/TypeMapper.cs | 5 +++-- src/Npgsql/TypeMapping/RangeTypeHandlerResolver.cs | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/src/Npgsql/Internal/TypeMapping/TypeMapper.cs b/src/Npgsql/Internal/TypeMapping/TypeMapper.cs index 847a86ae7e..04bee664f1 100644 --- a/src/Npgsql/Internal/TypeMapping/TypeMapper.cs +++ b/src/Npgsql/Internal/TypeMapping/TypeMapper.cs @@ -265,7 +265,8 @@ internal NpgsqlTypeHandler ResolveByDataTypeName(string typeName) { lock (_writeLock) { - var pgType = DatabaseInfo.GetPostgresTypeByName(typeName); + if (DatabaseInfo.GetPostgresTypeByName(typeName) is not { } pgType) + throw new NotSupportedException("Could not find PostgreSQL type " + typeName); switch (pgType) { @@ -432,7 +433,7 @@ NpgsqlTypeHandler ResolveLong(Type type) if (type.IsEnum) { - return DatabaseInfo.TryGetPostgresTypeByName(GetPgName(type, _defaultNameTranslator), out var pgType) && pgType is PostgresEnumType pgEnumType + return DatabaseInfo.GetPostgresTypeByName(GetPgName(type, _defaultNameTranslator)) is PostgresEnumType pgEnumType ? _handlersByClrType[type] = new UnmappedEnumHandler(pgEnumType, _defaultNameTranslator, Connector.TextEncoding) : throw new NotSupportedException( $"Could not find a PostgreSQL enum type corresponding to {type.Name}. " + diff --git a/src/Npgsql/TypeMapping/RangeTypeHandlerResolver.cs b/src/Npgsql/TypeMapping/RangeTypeHandlerResolver.cs index 1243eaa82a..1386383a63 100644 --- a/src/Npgsql/TypeMapping/RangeTypeHandlerResolver.cs +++ b/src/Npgsql/TypeMapping/RangeTypeHandlerResolver.cs @@ -39,7 +39,7 @@ internal RangeTypeHandlerResolver(TypeMapper typeMapper, NpgsqlConnector connect public override NpgsqlTypeHandler? ResolveByDataTypeName(string typeName) { - if (_databaseInfo.TryGetPostgresTypeByName(typeName, out var pgType)) + if (_databaseInfo.GetPostgresTypeByName(typeName) is not { } pgType) return null; return pgType switch From 1566d76ea941f0268b07dab29fd5d8e0e71a9e1f Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Sat, 18 Mar 2023 17:27:36 +0100 Subject: [PATCH 111/761] Clone connection string builder when building data source (#5000) --- src/Npgsql/NpgsqlSlimDataSourceBuilder.cs | 11 ++++++----- test/Npgsql.Tests/DataSourceTests.cs | 13 +++++++++++++ 2 files changed, 19 insertions(+), 5 deletions(-) diff --git a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs index b95d9b667f..22ff84a75b 100644 --- a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs @@ -50,7 +50,7 @@ public sealed class NpgsqlSlimDataSourceBuilder : INpgsqlTypeMapper /// /// A connection string builder that can be used to configured the connection string on the builder. /// - public NpgsqlConnectionStringBuilder ConnectionStringBuilder { get; } + public NpgsqlConnectionStringBuilder ConnectionStringBuilder { get; private set; } /// /// Returns the connection string, as currently configured on the builder. @@ -471,19 +471,20 @@ public NpgsqlSlimDataSourceBuilder UsePhysicalConnectionInitializer( public NpgsqlDataSource Build() { var config = PrepareConfiguration(); + var connectionStringBuilder = ConnectionStringBuilder.Clone(); if (ConnectionStringBuilder.Host!.Contains(",")) { ValidateMultiHost(); - return new NpgsqlMultiHostDataSource(ConnectionStringBuilder, config); + return new NpgsqlMultiHostDataSource(connectionStringBuilder, config); } return ConnectionStringBuilder.Multiplexing - ? new MultiplexingDataSource(ConnectionStringBuilder, config) + ? new MultiplexingDataSource(connectionStringBuilder, config) : ConnectionStringBuilder.Pooling - ? new PoolingDataSource(ConnectionStringBuilder, config) - : new UnpooledDataSource(ConnectionStringBuilder, config); + ? new PoolingDataSource(connectionStringBuilder, config) + : new UnpooledDataSource(connectionStringBuilder, config); } /// diff --git a/test/Npgsql.Tests/DataSourceTests.cs b/test/Npgsql.Tests/DataSourceTests.cs index e1d53c06f6..b6a7266012 100644 --- a/test/Npgsql.Tests/DataSourceTests.cs +++ b/test/Npgsql.Tests/DataSourceTests.cs @@ -294,4 +294,17 @@ public async Task Multiplexing_connectionless_command_open_connection() Assert.True(reader.Read()); Assert.That(reader.GetInt32(0), Is.EqualTo(1)); } + + [Test] + public async Task Connection_string_builder_settings_are_frozen_on_Build() + { + var builder = CreateDataSourceBuilder(); + builder.ConnectionStringBuilder.ApplicationName = "foo"; + await using var dataSource = builder.Build(); + + builder.ConnectionStringBuilder.ApplicationName = "bar"; + + await using var command = dataSource.CreateCommand("SHOW application_name"); + Assert.That(await command.ExecuteScalarAsync(), Is.EqualTo("foo")); + } } From c9b07213406a8a5a2647fad69fd152313318066d Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Sat, 18 Mar 2023 17:35:06 +0100 Subject: [PATCH 112/761] Fixup --- src/Npgsql/NpgsqlSlimDataSourceBuilder.cs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs index 22ff84a75b..75d249f88f 100644 --- a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs @@ -50,7 +50,7 @@ public sealed class NpgsqlSlimDataSourceBuilder : INpgsqlTypeMapper /// /// A connection string builder that can be used to configured the connection string on the builder. /// - public NpgsqlConnectionStringBuilder ConnectionStringBuilder { get; private set; } + public NpgsqlConnectionStringBuilder ConnectionStringBuilder { get; } /// /// Returns the connection string, as currently configured on the builder. @@ -496,7 +496,7 @@ public NpgsqlMultiHostDataSource BuildMultiHost() ValidateMultiHost(); - return new(ConnectionStringBuilder, config); + return new(ConnectionStringBuilder.Clone(), config); } NpgsqlDataSourceConfiguration PrepareConfiguration() From 023a3e21187be1a2b2696084456331d480cbf81b Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Sun, 19 Mar 2023 01:11:27 +0100 Subject: [PATCH 113/761] Mstat report improvements (#5002) --- test/MStatDumper/Program.cs | 47 +++++++++++++++++++++++++++++-------- 1 file changed, 37 insertions(+), 10 deletions(-) diff --git a/test/MStatDumper/Program.cs b/test/MStatDumper/Program.cs index 561b0cd73e..0fdb12ffa2 100644 --- a/test/MStatDumper/Program.cs +++ b/test/MStatDumper/Program.cs @@ -184,7 +184,7 @@ string FindNamespace(TypeReference type) .Where(x => x.Method.DeclaringType.Scope.Name == "Npgsql") .GroupBy(x => GetClassName(x.Method)) .OrderByDescending(x => x.Sum(x => x.Size + x.GcInfoSize + x.EhInfoSize)) - .Take(20) + .Take(100) .ToList(); static string GetClassName(MethodReference methodReference) @@ -194,14 +194,14 @@ static string GetClassName(MethodReference methodReference) } Console.WriteLine("
"); - Console.WriteLine("Top 20 Npgsql Classes By Methods Size"); + Console.WriteLine("Top 100 Npgsql Classes By Methods Size"); Console.WriteLine(); Console.WriteLine("
"); Console.WriteLine(); - Console.WriteLine("| Name | Size |"); - Console.WriteLine("| --- | --- |"); + Console.WriteLine("| Name | Size | Total Instantiations |"); + Console.WriteLine("| --- | --- | --- |"); foreach (var m in methodsByClass - .Select(x => new { Name = x.Key, Sum = x.Sum(x => x.Size + x.GcInfoSize + x.EhInfoSize) }) + .Select(x => new { Name = x.Key, Sum = x.Sum(x => x.Size + x.GcInfoSize + x.EhInfoSize), Count = x.Count() }) .OrderByDescending(x => x.Sum)) { var name = m.Name @@ -209,7 +209,7 @@ static string GetClassName(MethodReference methodReference) .Replace("<", "<") .Replace(">", ">") .Replace("|", "\\|"); - Console.WriteLine($"| {name} | {m.Sum:n0} |"); + Console.WriteLine($"| {name} | {m.Sum:n0} | {m.Count} |"); } Console.WriteLine(); @@ -224,11 +224,11 @@ static string GetClassName(MethodReference methodReference) Console.WriteLine(); Console.WriteLine("
"); Console.WriteLine(); - Console.WriteLine("| Name | Size |"); - Console.WriteLine("| --- | --- |"); + Console.WriteLine("| Name | Size | Instantiations |"); + Console.WriteLine("| --- | --- | --- |"); foreach (var m in g .GroupBy(x => GetMethodName(x.Method)) - .Select(x => new { Name = x.Key, Size = x.Sum(x => x.Size + x.GcInfoSize + x.EhInfoSize)}) + .Select(x => new { Name = x.Key, Size = x.Sum(x => x.Size + x.GcInfoSize + x.EhInfoSize), Count = x.Count()}) .OrderByDescending(x => x.Size)) { var methodName = m.Name @@ -236,7 +236,7 @@ static string GetClassName(MethodReference methodReference) .Replace("<", "<") .Replace(">", ">") .Replace("|", "\\|"); - Console.WriteLine($"| {methodName} | {m.Size:n0} |"); + Console.WriteLine($"| {methodName} | {m.Size:n0} | {m.Count} |"); } Console.WriteLine(); Console.WriteLine("
"); @@ -256,6 +256,33 @@ static string GetMethodName(MethodReference methodReference) Console.WriteLine(); Console.WriteLine(""); + + var filteredTypeStats = GetTypes(types) + .Where(x => x.Type.Scope.Name == "Npgsql") + .GroupBy(x => x.Type.Name) + .OrderByDescending(x => x.Sum(x => x.Size)) + .Take(100) + .ToList(); + Console.WriteLine("
"); + Console.WriteLine($"Top 100 Npgsql Types By Size"); + Console.WriteLine(); + Console.WriteLine("
"); + Console.WriteLine(); + Console.WriteLine("| Name | Size | Instantiations |"); + Console.WriteLine("| --- | --- | --- |"); + foreach (var m in filteredTypeStats) + { + var name = m.Key + .Replace("`", "\\`") + .Replace("<", "<") + .Replace(">", ">") + .Replace("|", "\\|"); + Console.WriteLine($"| {name} | {m.Sum(x => x.Size):n0} | {m.Count()} |"); + } + Console.WriteLine(); + Console.WriteLine("
"); + + Console.WriteLine(); } } From c9b8bce549a89ecf4baead24c82f1eb50417ebcd Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Mon, 20 Mar 2023 19:30:38 +0300 Subject: [PATCH 114/761] Split TypeMapperResolver from TypeHandlerResolver (#4973) Closes #4972 --- .../Internal/GeoJSONTypeHandlerResolver.cs | 11 - .../GeoJSONTypeHandlerResolverFactory.cs | 6 +- .../Internal/GeoJsonTypeMappingResolver.cs | 31 ++ .../Internal/JsonNetTypeHandlerResolver.cs | 11 - .../JsonNetTypeHandlerResolverFactory.cs | 7 +- .../Internal/JsonNetTypeMappingResolver.cs | 32 ++ .../NetTopologySuiteTypeHandlerResolver.cs | 20 +- ...TopologySuiteTypeHandlerResolverFactory.cs | 9 +- .../NetTopologySuiteTypeMappingResolver.cs | 39 +++ .../Internal/NodaTimeTypeHandlerResolver.cs | 87 +----- .../NodaTimeTypeHandlerResolverFactory.cs | 8 +- .../Internal/NodaTimeTypeMappingResolver.cs | 102 ++++++ .../TypeHandling/TypeHandlerResolver.cs | 6 - .../TypeHandlerResolverFactory.cs | 15 +- src/Npgsql/Internal/TypeMapping/TypeMapper.cs | 52 +++- .../TypeMapping/TypeMappingResolver.cs | 24 ++ src/Npgsql/NpgsqlDataSourceBuilder.cs | 4 +- src/Npgsql/NpgsqlSlimDataSourceBuilder.cs | 4 +- .../TypeMapping/BuiltInTypeHandlerResolver.cs | 294 +----------------- .../BuiltInTypeHandlerResolverFactory.cs | 10 +- .../TypeMapping/BuiltInTypeMappingResolver.cs | 257 +++++++++++++++ src/Npgsql/TypeMapping/GlobalTypeMapper.cs | 86 ++++- .../TypeMapping/RangeTypeHandlerResolver.cs | 30 +- .../RangeTypeHandlerResolverFactory.cs | 15 +- .../TypeMapping/RangeTypeMappingResolver.cs | 118 +++++++ .../TypeMapping/RecordTypeHandlerResolver.cs | 2 - .../RecordTypeHandlerResolverFactory.cs | 10 - ...s => SystemTextJsonTypeHandlerResolver.cs} | 26 +- ...stemTextJsonTypeHandlerResolverFactory.cs} | 12 +- .../SystemTextJsonTypeMappingResolver.cs | 42 +++ test/Npgsql.Tests/ReaderTests.cs | 5 - test/Npgsql.Tests/TypeMapperTests.cs | 10 +- 32 files changed, 781 insertions(+), 604 deletions(-) create mode 100644 src/Npgsql.GeoJSON/Internal/GeoJsonTypeMappingResolver.cs create mode 100644 src/Npgsql.Json.NET/Internal/JsonNetTypeMappingResolver.cs create mode 100644 src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeMappingResolver.cs create mode 100644 src/Npgsql.NodaTime/Internal/NodaTimeTypeMappingResolver.cs create mode 100644 src/Npgsql/Internal/TypeMapping/TypeMappingResolver.cs create mode 100644 src/Npgsql/TypeMapping/BuiltInTypeMappingResolver.cs create mode 100644 src/Npgsql/TypeMapping/RangeTypeMappingResolver.cs rename src/Npgsql/TypeMapping/{JsonTypeHandlerResolver.cs => SystemTextJsonTypeHandlerResolver.cs} (61%) rename src/Npgsql/TypeMapping/{JsonTypeHandlerResolverFactory.cs => SystemTextJsonTypeHandlerResolverFactory.cs} (66%) create mode 100644 src/Npgsql/TypeMapping/SystemTextJsonTypeMappingResolver.cs diff --git a/src/Npgsql.GeoJSON/Internal/GeoJSONTypeHandlerResolver.cs b/src/Npgsql.GeoJSON/Internal/GeoJSONTypeHandlerResolver.cs index 6291760011..a937c1d62b 100644 --- a/src/Npgsql.GeoJSON/Internal/GeoJSONTypeHandlerResolver.cs +++ b/src/Npgsql.GeoJSON/Internal/GeoJSONTypeHandlerResolver.cs @@ -76,16 +76,5 @@ internal GeoJSONTypeHandlerResolver(NpgsqlConnector connector, GeoJSONOptions op ? "geography" : "geometry"; - public override TypeMappingInfo? GetMappingByPostgresType(PostgresType type) - => DoGetMappingByDataTypeName(type.Name); - - internal static TypeMappingInfo? DoGetMappingByDataTypeName(string dataTypeName) - => dataTypeName switch - { - "geometry" => new(NpgsqlDbType.Geometry, "geometry"), - "geography" => new(NpgsqlDbType.Geography, "geography"), - _ => null - }; - PostgresType? PgType(string pgTypeName) => _databaseInfo.TryGetPostgresTypeByName(pgTypeName, out var pgType) ? pgType : null; } \ No newline at end of file diff --git a/src/Npgsql.GeoJSON/Internal/GeoJSONTypeHandlerResolverFactory.cs b/src/Npgsql.GeoJSON/Internal/GeoJSONTypeHandlerResolverFactory.cs index b575592bc2..aae2c9102a 100644 --- a/src/Npgsql.GeoJSON/Internal/GeoJSONTypeHandlerResolverFactory.cs +++ b/src/Npgsql.GeoJSON/Internal/GeoJSONTypeHandlerResolverFactory.cs @@ -17,9 +17,5 @@ public GeoJSONTypeHandlerResolverFactory(GeoJSONOptions options, bool geographyA public override TypeHandlerResolver Create(TypeMapper typeMapper, NpgsqlConnector connector) => new GeoJSONTypeHandlerResolver(connector, _options, _geographyAsDefault); - public override string? GetDataTypeNameByClrType(Type type) - => GeoJSONTypeHandlerResolver.ClrTypeToDataTypeName(type, _geographyAsDefault); - - public override TypeMappingInfo? GetMappingByDataTypeName(string dataTypeName) - => GeoJSONTypeHandlerResolver.DoGetMappingByDataTypeName(dataTypeName); + public override TypeMappingResolver CreateMappingResolver() => new GeoJsonTypeMappingResolver(_geographyAsDefault); } diff --git a/src/Npgsql.GeoJSON/Internal/GeoJsonTypeMappingResolver.cs b/src/Npgsql.GeoJSON/Internal/GeoJsonTypeMappingResolver.cs new file mode 100644 index 0000000000..621e0389a2 --- /dev/null +++ b/src/Npgsql.GeoJSON/Internal/GeoJsonTypeMappingResolver.cs @@ -0,0 +1,31 @@ +using System; +using Npgsql.Internal.TypeHandling; +using Npgsql.Internal.TypeMapping; +using Npgsql.PostgresTypes; +using NpgsqlTypes; + +namespace Npgsql.GeoJSON.Internal; + +public class GeoJsonTypeMappingResolver : TypeMappingResolver +{ + readonly bool _geographyAsDefault; + + public GeoJsonTypeMappingResolver(bool geographyAsDefault) => _geographyAsDefault = geographyAsDefault; + + public override string? GetDataTypeNameByClrType(Type type) + => GeoJSONTypeHandlerResolver.ClrTypeToDataTypeName(type, _geographyAsDefault); + + public override TypeMappingInfo? GetMappingByDataTypeName(string dataTypeName) + => DoGetMappingByDataTypeName(dataTypeName); + + public override TypeMappingInfo? GetMappingByPostgresType(TypeMapper mapper, PostgresType type) + => DoGetMappingByDataTypeName(type.Name); + + static TypeMappingInfo? DoGetMappingByDataTypeName(string dataTypeName) + => dataTypeName switch + { + "geometry" => new(NpgsqlDbType.Geometry, "geometry"), + "geography" => new(NpgsqlDbType.Geography, "geography"), + _ => null + }; +} diff --git a/src/Npgsql.Json.NET/Internal/JsonNetTypeHandlerResolver.cs b/src/Npgsql.Json.NET/Internal/JsonNetTypeHandlerResolver.cs index d925405063..04bb63bdf1 100644 --- a/src/Npgsql.Json.NET/Internal/JsonNetTypeHandlerResolver.cs +++ b/src/Npgsql.Json.NET/Internal/JsonNetTypeHandlerResolver.cs @@ -44,16 +44,5 @@ internal JsonNetTypeHandlerResolver( internal static string? ClrTypeToDataTypeName(Type type, Dictionary clrTypes) => clrTypes.TryGetValue(type, out var dataTypeName) ? dataTypeName : null; - public override TypeMappingInfo? GetMappingByPostgresType(PostgresType type) - => DoGetMappingByDataTypeName(type.Name); - - internal static TypeMappingInfo? DoGetMappingByDataTypeName(string dataTypeName) - => dataTypeName switch - { - "jsonb" => new(NpgsqlDbType.Jsonb, "jsonb"), - "json" => new(NpgsqlDbType.Json, "json"), - _ => null - }; - PostgresType PgType(string pgTypeName) => _databaseInfo.GetPostgresTypeByName(pgTypeName); } \ No newline at end of file diff --git a/src/Npgsql.Json.NET/Internal/JsonNetTypeHandlerResolverFactory.cs b/src/Npgsql.Json.NET/Internal/JsonNetTypeHandlerResolverFactory.cs index a4b81ccace..739efc6d2c 100644 --- a/src/Npgsql.Json.NET/Internal/JsonNetTypeHandlerResolverFactory.cs +++ b/src/Npgsql.Json.NET/Internal/JsonNetTypeHandlerResolverFactory.cs @@ -39,10 +39,5 @@ public JsonNetTypeHandlerResolverFactory( public override TypeHandlerResolver Create(TypeMapper typeMapper, NpgsqlConnector connector) => new JsonNetTypeHandlerResolver(connector, _byType, _settings); - public override string? GetDataTypeNameByClrType(Type type) - => JsonNetTypeHandlerResolver.ClrTypeToDataTypeName(type, _byType); - - public override TypeMappingInfo? GetMappingByDataTypeName(string dataTypeName) - => JsonNetTypeHandlerResolver.DoGetMappingByDataTypeName(dataTypeName); - + public override TypeMappingResolver CreateMappingResolver() => new JsonNetTypeMappingResolver(_byType); } diff --git a/src/Npgsql.Json.NET/Internal/JsonNetTypeMappingResolver.cs b/src/Npgsql.Json.NET/Internal/JsonNetTypeMappingResolver.cs new file mode 100644 index 0000000000..c8ba31da7a --- /dev/null +++ b/src/Npgsql.Json.NET/Internal/JsonNetTypeMappingResolver.cs @@ -0,0 +1,32 @@ +using System; +using System.Collections.Generic; +using Npgsql.Internal.TypeHandling; +using Npgsql.Internal.TypeMapping; +using Npgsql.PostgresTypes; +using NpgsqlTypes; + +namespace Npgsql.Json.NET.Internal; + +public class JsonNetTypeMappingResolver : TypeMappingResolver +{ + readonly Dictionary _byType; + + public JsonNetTypeMappingResolver(Dictionary byType) => _byType = byType; + + public override string? GetDataTypeNameByClrType(Type type) + => JsonNetTypeHandlerResolver.ClrTypeToDataTypeName(type, _byType); + + public override TypeMappingInfo? GetMappingByDataTypeName(string dataTypeName) + => DoGetMappingByDataTypeName(dataTypeName); + + public override TypeMappingInfo? GetMappingByPostgresType(TypeMapper typeMapper, PostgresType type) + => DoGetMappingByDataTypeName(type.Name); + + static TypeMappingInfo? DoGetMappingByDataTypeName(string dataTypeName) + => dataTypeName switch + { + "jsonb" => new(NpgsqlDbType.Jsonb, "jsonb"), + "json" => new(NpgsqlDbType.Json, "json"), + _ => null + }; +} diff --git a/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeHandlerResolver.cs b/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeHandlerResolver.cs index 660664028f..8f270ac90f 100644 --- a/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeHandlerResolver.cs +++ b/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeHandlerResolver.cs @@ -47,27 +47,9 @@ internal NetTopologySuiteTypeHandlerResolver( }; public override NpgsqlTypeHandler? ResolveByClrType(Type type) - => ClrTypeToDataTypeName(type, _geographyAsDefault) is { } dataTypeName && ResolveByDataTypeName(dataTypeName) is { } handler + => NetTopologySuiteTypeMappingResolver.ClrTypeToDataTypeName(type, _geographyAsDefault) is { } dataTypeName && ResolveByDataTypeName(dataTypeName) is { } handler ? handler : null; - internal static string? ClrTypeToDataTypeName(Type type, bool geographyAsDefault) - => type != typeof(Geometry) && type.BaseType != typeof(Geometry) && type.BaseType != typeof(GeometryCollection) - ? null - : geographyAsDefault - ? "geography" - : "geometry"; - - public override TypeMappingInfo? GetMappingByPostgresType(PostgresType type) - => DoGetMappingByDataTypeName(type.Name); - - internal static TypeMappingInfo? DoGetMappingByDataTypeName(string dataTypeName) - => dataTypeName switch - { - "geometry" => new(NpgsqlDbType.Geometry, "geometry"), - "geography" => new(NpgsqlDbType.Geography, "geography"), - _ => null - }; - PostgresType? PgType(string pgTypeName) => _databaseInfo.TryGetPostgresTypeByName(pgTypeName, out var pgType) ? pgType : null; } \ No newline at end of file diff --git a/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeHandlerResolverFactory.cs b/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeHandlerResolverFactory.cs index d503707f7e..1aed03a058 100644 --- a/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeHandlerResolverFactory.cs +++ b/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeHandlerResolverFactory.cs @@ -1,11 +1,8 @@ -using System; -using System.Data; using NetTopologySuite; using NetTopologySuite.Geometries; using Npgsql.Internal; using Npgsql.Internal.TypeHandling; using Npgsql.Internal.TypeMapping; -using Npgsql.TypeMapping; namespace Npgsql.NetTopologySuite.Internal; @@ -32,9 +29,5 @@ public override TypeHandlerResolver Create(TypeMapper typeMapper, NpgsqlConnecto => new NetTopologySuiteTypeHandlerResolver(connector, _coordinateSequenceFactory, _precisionModel, _handleOrdinates, _geographyAsDefault); - public override string? GetDataTypeNameByClrType(Type type) - => NetTopologySuiteTypeHandlerResolver.ClrTypeToDataTypeName(type, _geographyAsDefault); - - public override TypeMappingInfo? GetMappingByDataTypeName(string dataTypeName) - => NetTopologySuiteTypeHandlerResolver.DoGetMappingByDataTypeName(dataTypeName); + public override TypeMappingResolver CreateMappingResolver() => new NetTopologySuiteTypeMappingResolver(_geographyAsDefault); } diff --git a/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeMappingResolver.cs b/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeMappingResolver.cs new file mode 100644 index 0000000000..bc502af5a6 --- /dev/null +++ b/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeMappingResolver.cs @@ -0,0 +1,39 @@ +using System; +using NetTopologySuite.Geometries; +using Npgsql.Internal.TypeHandling; +using Npgsql.Internal.TypeMapping; +using Npgsql.PostgresTypes; +using NpgsqlTypes; + +namespace Npgsql.NetTopologySuite.Internal; + +public class NetTopologySuiteTypeMappingResolver : TypeMappingResolver +{ + readonly bool _geographyAsDefault; + + public NetTopologySuiteTypeMappingResolver(bool geographyAsDefault) => _geographyAsDefault = geographyAsDefault; + + public override string? GetDataTypeNameByClrType(Type type) + => ClrTypeToDataTypeName(type, _geographyAsDefault); + + public override TypeMappingInfo? GetMappingByDataTypeName(string dataTypeName) + => DoGetMappingByDataTypeName(dataTypeName); + + public override TypeMappingInfo? GetMappingByPostgresType(TypeMapper typeMapper, PostgresType type) + => DoGetMappingByDataTypeName(type.Name); + + internal static string? ClrTypeToDataTypeName(Type type, bool geographyAsDefault) + => type != typeof(Geometry) && type.BaseType != typeof(Geometry) && type.BaseType != typeof(GeometryCollection) + ? null + : geographyAsDefault + ? "geography" + : "geometry"; + + static TypeMappingInfo? DoGetMappingByDataTypeName(string dataTypeName) + => dataTypeName switch + { + "geometry" => new(NpgsqlDbType.Geometry, "geometry"), + "geography" => new(NpgsqlDbType.Geography, "geography"), + _ => null + }; +} diff --git a/src/Npgsql.NodaTime/Internal/NodaTimeTypeHandlerResolver.cs b/src/Npgsql.NodaTime/Internal/NodaTimeTypeHandlerResolver.cs index 117bd72cc5..c0b1cc60c6 100644 --- a/src/Npgsql.NodaTime/Internal/NodaTimeTypeHandlerResolver.cs +++ b/src/Npgsql.NodaTime/Internal/NodaTimeTypeHandlerResolver.cs @@ -1,5 +1,4 @@ using System; -using System.Collections.Generic; using NodaTime; using Npgsql.Internal; using Npgsql.Internal.TypeHandlers; @@ -74,7 +73,7 @@ internal NodaTimeTypeHandlerResolver(NpgsqlConnector connector) }; public override NpgsqlTypeHandler? ResolveByClrType(Type type) - => ClrTypeToDataTypeName(type) is { } dataTypeName && ResolveByDataTypeName(dataTypeName) is { } handler + => NodaTimeTypeMappingResolver.ClrTypeToDataTypeName(type) is { } dataTypeName && ResolveByDataTypeName(dataTypeName) is { } handler ? handler : null; @@ -131,90 +130,6 @@ internal NodaTimeTypeHandlerResolver(NpgsqlConnector connector) return null; } - internal static string? ClrTypeToDataTypeName(Type type) - { - if (type == typeof(Instant)) - return LegacyTimestampBehavior ? "timestamp without time zone" : "timestamp with time zone"; - - if (type == typeof(LocalDateTime)) - return "timestamp without time zone"; - if (type == typeof(ZonedDateTime) || type == typeof(OffsetDateTime)) - return "timestamp with time zone"; - if (type == typeof(LocalDate)) - return "date"; - if (type == typeof(LocalTime)) - return "time without time zone"; - if (type == typeof(OffsetTime)) - return "time with time zone"; - if (type == typeof(Period) || type == typeof(Duration)) - return "interval"; - - // Ranges - if (type == typeof(NpgsqlRange)) - return "tsrange"; - - if (type == typeof(Interval) || - type == typeof(NpgsqlRange) || - type == typeof(NpgsqlRange) || - type == typeof(NpgsqlRange)) - { - return "tstzrange"; - } - - if (type == typeof(DateInterval) || type == typeof(NpgsqlRange)) - return "daterange"; - - // Multiranges - if (type == typeof(NpgsqlRange[]) || type == typeof(List>)) - return "tsmultirange"; - - if (type == typeof(Interval[]) || - type == typeof(List) || - type == typeof(NpgsqlRange[]) || - type == typeof(List>) || - type == typeof(NpgsqlRange[]) || - type == typeof(List>) || - type == typeof(NpgsqlRange[]) || - type == typeof(List>)) - { - return "tstzmultirange"; - } - if (type == typeof(DateInterval[]) || - type == typeof(List) || - type == typeof(NpgsqlRange[]) || - type == typeof(List>)) - { - return "datemultirange"; - } - - return null; - } - - public override TypeMappingInfo? GetMappingByPostgresType(PostgresType type) - => DoGetMappingByDataTypeName(type.Name); - - internal static TypeMappingInfo? DoGetMappingByDataTypeName(string dataTypeName) - => dataTypeName switch - { - "timestamp" or "timestamp without time zone" => new(NpgsqlDbType.Timestamp, "timestamp without time zone"), - "timestamptz" or "timestamp with time zone" => new(NpgsqlDbType.TimestampTz, "timestamp with time zone"), - "date" => new(NpgsqlDbType.Date, "date"), - "time without time zone" => new(NpgsqlDbType.Time, "time without time zone"), - "time with time zone" => new(NpgsqlDbType.TimeTz, "time with time zone"), - "interval" => new(NpgsqlDbType.Interval, "interval"), - - "tsrange" => new(NpgsqlDbType.TimestampRange, "tsrange"), - "tstzrange" => new(NpgsqlDbType.TimestampTzRange, "tstzrange"), - "daterange" => new(NpgsqlDbType.DateRange, "daterange"), - - "tsmultirange" => new(NpgsqlDbType.TimestampMultirange, "tsmultirange"), - "tstzmultirange" => new(NpgsqlDbType.TimestampTzMultirange, "tstzmultirange"), - "datemultirange" => new(NpgsqlDbType.DateMultirange, "datemultirange"), - - _ => null - }; - - PostgresType PgType(string pgTypeName) => _databaseInfo.GetPostgresTypeByName(pgTypeName); TimestampTzRangeHandler TsTzRange() diff --git a/src/Npgsql.NodaTime/Internal/NodaTimeTypeHandlerResolverFactory.cs b/src/Npgsql.NodaTime/Internal/NodaTimeTypeHandlerResolverFactory.cs index 01a49b85d1..d1034e7f5e 100644 --- a/src/Npgsql.NodaTime/Internal/NodaTimeTypeHandlerResolverFactory.cs +++ b/src/Npgsql.NodaTime/Internal/NodaTimeTypeHandlerResolverFactory.cs @@ -1,8 +1,6 @@ -using System; using Npgsql.Internal; using Npgsql.Internal.TypeHandling; using Npgsql.Internal.TypeMapping; -using Npgsql.TypeMapping; namespace Npgsql.NodaTime.Internal; @@ -11,9 +9,7 @@ public class NodaTimeTypeHandlerResolverFactory : TypeHandlerResolverFactory public override TypeHandlerResolver Create(TypeMapper typeMapper, NpgsqlConnector connector) => new NodaTimeTypeHandlerResolver(connector); - public override string? GetDataTypeNameByClrType(Type type) - => NodaTimeTypeHandlerResolver.ClrTypeToDataTypeName(type); + public override TypeMappingResolver CreateMappingResolver() => new NodaTimeTypeMappingResolver(); - public override TypeMappingInfo? GetMappingByDataTypeName(string dataTypeName) - => NodaTimeTypeHandlerResolver.DoGetMappingByDataTypeName(dataTypeName); + public override TypeMappingResolver CreateGlobalMappingResolver() => new NodaTimeTypeMappingResolver(); } diff --git a/src/Npgsql.NodaTime/Internal/NodaTimeTypeMappingResolver.cs b/src/Npgsql.NodaTime/Internal/NodaTimeTypeMappingResolver.cs new file mode 100644 index 0000000000..b5e1e51f6e --- /dev/null +++ b/src/Npgsql.NodaTime/Internal/NodaTimeTypeMappingResolver.cs @@ -0,0 +1,102 @@ +using System; +using System.Collections.Generic; +using NodaTime; +using Npgsql.Internal.TypeHandling; +using Npgsql.Internal.TypeMapping; +using Npgsql.PostgresTypes; +using NpgsqlTypes; +using static Npgsql.NodaTime.Internal.NodaTimeUtils; + +namespace Npgsql.NodaTime.Internal; + +public class NodaTimeTypeMappingResolver : TypeMappingResolver +{ + public override string? GetDataTypeNameByClrType(Type type) + => ClrTypeToDataTypeName(type); + + public override TypeMappingInfo? GetMappingByDataTypeName(string dataTypeName) + => DoGetMappingByDataTypeName(dataTypeName); + + public override TypeMappingInfo? GetMappingByPostgresType(TypeMapper mapper, PostgresType type) + => DoGetMappingByDataTypeName(type.Name); + + static TypeMappingInfo? DoGetMappingByDataTypeName(string dataTypeName) + => dataTypeName switch + { + "timestamp" or "timestamp without time zone" => new(NpgsqlDbType.Timestamp, "timestamp without time zone"), + "timestamptz" or "timestamp with time zone" => new(NpgsqlDbType.TimestampTz, "timestamp with time zone"), + "date" => new(NpgsqlDbType.Date, "date"), + "time without time zone" => new(NpgsqlDbType.Time, "time without time zone"), + "time with time zone" => new(NpgsqlDbType.TimeTz, "time with time zone"), + "interval" => new(NpgsqlDbType.Interval, "interval"), + + "tsrange" => new(NpgsqlDbType.TimestampRange, "tsrange"), + "tstzrange" => new(NpgsqlDbType.TimestampTzRange, "tstzrange"), + "daterange" => new(NpgsqlDbType.DateRange, "daterange"), + + "tsmultirange" => new(NpgsqlDbType.TimestampMultirange, "tsmultirange"), + "tstzmultirange" => new(NpgsqlDbType.TimestampTzMultirange, "tstzmultirange"), + "datemultirange" => new(NpgsqlDbType.DateMultirange, "datemultirange"), + + _ => null + }; + + internal static string? ClrTypeToDataTypeName(Type type) + { + if (type == typeof(Instant)) + return LegacyTimestampBehavior ? "timestamp without time zone" : "timestamp with time zone"; + + if (type == typeof(LocalDateTime)) + return "timestamp without time zone"; + if (type == typeof(ZonedDateTime) || type == typeof(OffsetDateTime)) + return "timestamp with time zone"; + if (type == typeof(LocalDate)) + return "date"; + if (type == typeof(LocalTime)) + return "time without time zone"; + if (type == typeof(OffsetTime)) + return "time with time zone"; + if (type == typeof(Period) || type == typeof(Duration)) + return "interval"; + + // Ranges + if (type == typeof(NpgsqlRange)) + return "tsrange"; + + if (type == typeof(Interval) || + type == typeof(NpgsqlRange) || + type == typeof(NpgsqlRange) || + type == typeof(NpgsqlRange)) + { + return "tstzrange"; + } + + if (type == typeof(DateInterval) || type == typeof(NpgsqlRange)) + return "daterange"; + + // Multiranges + if (type == typeof(NpgsqlRange[]) || type == typeof(List>)) + return "tsmultirange"; + + if (type == typeof(Interval[]) || + type == typeof(List) || + type == typeof(NpgsqlRange[]) || + type == typeof(List>) || + type == typeof(NpgsqlRange[]) || + type == typeof(List>) || + type == typeof(NpgsqlRange[]) || + type == typeof(List>)) + { + return "tstzmultirange"; + } + if (type == typeof(DateInterval[]) || + type == typeof(List) || + type == typeof(NpgsqlRange[]) || + type == typeof(List>)) + { + return "datemultirange"; + } + + return null; + } +} diff --git a/src/Npgsql/Internal/TypeHandling/TypeHandlerResolver.cs b/src/Npgsql/Internal/TypeHandling/TypeHandlerResolver.cs index 3c47e55f06..feb6a719e7 100644 --- a/src/Npgsql/Internal/TypeHandling/TypeHandlerResolver.cs +++ b/src/Npgsql/Internal/TypeHandling/TypeHandlerResolver.cs @@ -34,10 +34,4 @@ public abstract class TypeHandlerResolver public virtual NpgsqlTypeHandler? ResolveValueDependentValue(object value) => null; public virtual NpgsqlTypeHandler? ResolveValueTypeGenerically(T value) => null; - - /// - /// Gets type mapping information for a given PostgreSQL type. - /// Invoked in scenarios when mapping information is required, rather than a type handler for reading or writing. - /// - public abstract TypeMappingInfo? GetMappingByPostgresType(PostgresType type); } \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandling/TypeHandlerResolverFactory.cs b/src/Npgsql/Internal/TypeHandling/TypeHandlerResolverFactory.cs index ac195afbdc..c1d5030b75 100644 --- a/src/Npgsql/Internal/TypeHandling/TypeHandlerResolverFactory.cs +++ b/src/Npgsql/Internal/TypeHandling/TypeHandlerResolverFactory.cs @@ -1,6 +1,4 @@ -using System; using Npgsql.Internal.TypeMapping; -using Npgsql.TypeMapping; namespace Npgsql.Internal.TypeHandling; @@ -8,16 +6,7 @@ public abstract class TypeHandlerResolverFactory { public abstract TypeHandlerResolver Create(TypeMapper typeMapper, NpgsqlConnector connector); - public abstract string? GetDataTypeNameByClrType(Type clrType); - public virtual string? GetDataTypeNameByValueDependentValue(object value) => null; - public abstract TypeMappingInfo? GetMappingByDataTypeName(string dataTypeName); -} + public virtual TypeMappingResolver? CreateMappingResolver() => null; -static class TypeHandlerResolverFactoryExtensions -{ - internal static TypeMappingInfo? GetMappingByClrType(this TypeHandlerResolverFactory factory, Type clrType) - => factory.GetDataTypeNameByClrType(clrType) is { } dataTypeName ? factory.GetMappingByDataTypeName(dataTypeName) : null; - - internal static TypeMappingInfo? GetMappingByValueDependentValue(this TypeHandlerResolverFactory factory, object value) - => factory.GetDataTypeNameByValueDependentValue(value) is { } dataTypeName ? factory.GetMappingByDataTypeName(dataTypeName) : null; + public virtual TypeMappingResolver? CreateGlobalMappingResolver() => null; } \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeMapping/TypeMapper.cs b/src/Npgsql/Internal/TypeMapping/TypeMapper.cs index 04bee664f1..eb6bb75f48 100644 --- a/src/Npgsql/Internal/TypeMapping/TypeMapper.cs +++ b/src/Npgsql/Internal/TypeMapping/TypeMapper.cs @@ -35,7 +35,8 @@ internal NpgsqlDatabaseInfo DatabaseInfo } } - volatile TypeHandlerResolver[] _resolvers; + volatile TypeHandlerResolver[] _handlerResolvers; + volatile TypeMappingResolver[] _mappingResolvers; internal NpgsqlTypeHandler UnrecognizedTypeHandler { get; } readonly ConcurrentDictionary _handlersByOID = new(); @@ -55,7 +56,8 @@ internal TypeMapper(NpgsqlConnector connector, INpgsqlNameTranslator defaultName Connector = connector; _defaultNameTranslator = defaultNameTranslator; UnrecognizedTypeHandler = new UnknownTypeHandler(Connector.TextEncoding); - _resolvers = Array.Empty(); + _handlerResolvers = Array.Empty(); + _mappingResolvers = Array.Empty(); _commandLogger = connector.LoggingConfiguration.CommandLogger; } @@ -68,10 +70,26 @@ internal void Initialize( { _databaseInfo = databaseInfo; - var resolvers = new TypeHandlerResolver[resolverFactories.Count]; + var handlerResolvers = new TypeHandlerResolver[resolverFactories.Count]; + var mappingResolvers = new List(resolverFactories.Count); for (var i = 0; i < resolverFactories.Count; i++) - resolvers[i] = resolverFactories[i].Create(this, Connector); - _resolvers = resolvers; + { + handlerResolvers[i] = resolverFactories[i].Create(this, Connector); + var mappingResolver = resolverFactories[i].CreateMappingResolver(); + if (mappingResolver is not null) + mappingResolvers.Add(mappingResolver); + } + + // Add global mapper resolvers in backwards because they're inserted in the beginning + for (var i = resolverFactories.Count - 1; i >= 0; i--) + { + var globalMappingResolver = resolverFactories[i].CreateGlobalMappingResolver(); + if (globalMappingResolver is not null) + GlobalTypeMapper.Instance.TryAddMappingResolver(globalMappingResolver); + } + + _handlerResolvers = handlerResolvers; + _mappingResolvers = mappingResolvers.ToArray(); foreach (var userTypeMapping in userTypeMappings.Values) { @@ -151,7 +169,7 @@ NpgsqlTypeHandler ResolveLong(NpgsqlDbType npgsqlDbType) // First, try to resolve as a base type; translate the NpgsqlDbType to a PG data type name and look that up. if (GlobalTypeMapper.NpgsqlDbTypeToDataTypeName(npgsqlDbType) is { } dataTypeName) { - foreach (var resolver in _resolvers) + foreach (var resolver in _handlerResolvers) { try { @@ -168,7 +186,7 @@ NpgsqlTypeHandler ResolveLong(NpgsqlDbType npgsqlDbType) // Can't find (or translate) PG data type name by NpgsqlDbType. // This might happen because of flags (like Array, Range or Multirange). - foreach (var resolver in _resolvers) + foreach (var resolver in _handlerResolvers) { try { @@ -214,7 +232,7 @@ internal NpgsqlTypeHandler ResolveByDataTypeName(string typeName) { lock (_writeLock) { - foreach (var resolver in _resolvers) + foreach (var resolver in _handlerResolvers) { try { @@ -243,7 +261,7 @@ internal NpgsqlTypeHandler ResolveByDataTypeName(string typeName) { lock (_writeLock) { - foreach (var resolver in _resolvers) + foreach (var resolver in _handlerResolvers) { try { @@ -265,8 +283,7 @@ internal NpgsqlTypeHandler ResolveByDataTypeName(string typeName) { lock (_writeLock) { - if (DatabaseInfo.GetPostgresTypeByName(typeName) is not { } pgType) - throw new NotSupportedException("Could not find PostgreSQL type " + typeName); + var pgType = DatabaseInfo.GetPostgresTypeByName(typeName); switch (pgType) { @@ -327,7 +344,7 @@ internal NpgsqlTypeHandler ResolveByValue(T value) // do a dictionary lookup (the JIT elides type checks in generic methods for value types) NpgsqlTypeHandler? handler; - foreach (var resolver in _resolvers) + foreach (var resolver in _handlerResolvers) { try { @@ -364,7 +381,7 @@ internal NpgsqlTypeHandler ResolveByValue(object value) NpgsqlTypeHandler ResolveLong(object value, Type type) { - foreach (var resolver in _resolvers) + foreach (var resolver in _handlerResolvers) { try { @@ -400,7 +417,7 @@ NpgsqlTypeHandler ResolveLong(Type type) { lock (_writeLock) { - foreach (var resolver in _resolvers) + foreach (var resolver in _handlerResolvers) { try { @@ -433,7 +450,8 @@ NpgsqlTypeHandler ResolveLong(Type type) if (type.IsEnum) { - return DatabaseInfo.GetPostgresTypeByName(GetPgName(type, _defaultNameTranslator)) is PostgresEnumType pgEnumType + return DatabaseInfo.TryGetPostgresTypeByName(GetPgName(type, _defaultNameTranslator), out var pgType) + && pgType is PostgresEnumType pgEnumType ? _handlersByClrType[type] = new UnmappedEnumHandler(pgEnumType, _defaultNameTranslator, Connector.TextEncoding) : throw new NotSupportedException( $"Could not find a PostgreSQL enum type corresponding to {type.Name}. " + @@ -472,8 +490,8 @@ Type GetUnderlyingType(Type t) internal bool TryGetMapping(PostgresType pgType, [NotNullWhen(true)] out TypeMappingInfo? mapping) { - foreach (var resolver in _resolvers) - if ((mapping = resolver.GetMappingByPostgresType(pgType)) is not null) + foreach (var resolver in _mappingResolvers) + if ((mapping = resolver.GetMappingByPostgresType(this, pgType)) is not null) return true; switch (pgType) diff --git a/src/Npgsql/Internal/TypeMapping/TypeMappingResolver.cs b/src/Npgsql/Internal/TypeMapping/TypeMappingResolver.cs new file mode 100644 index 0000000000..a4f6673d3a --- /dev/null +++ b/src/Npgsql/Internal/TypeMapping/TypeMappingResolver.cs @@ -0,0 +1,24 @@ +using System; +using Npgsql.Internal.TypeHandling; +using Npgsql.PostgresTypes; + +namespace Npgsql.Internal.TypeMapping; + +public abstract class TypeMappingResolver +{ + public abstract string? GetDataTypeNameByClrType(Type clrType); + public virtual string? GetDataTypeNameByValueDependentValue(object value) => null; + public abstract TypeMappingInfo? GetMappingByDataTypeName(string dataTypeName); + + /// + /// Gets type mapping information for a given PostgreSQL type. + /// Invoked in scenarios when mapping information is required, rather than a type handler for reading or writing. + /// + public abstract TypeMappingInfo? GetMappingByPostgresType(TypeMapper typeMapper, PostgresType type); + + internal TypeMappingInfo? GetMappingByValueDependentValue(object value) + => GetDataTypeNameByValueDependentValue(value) is { } dataTypeName ? GetMappingByDataTypeName(dataTypeName) : null; + + internal TypeMappingInfo? GetMappingByClrType(Type clrType) + => GetDataTypeNameByClrType(clrType) is { } dataTypeName ? GetMappingByDataTypeName(dataTypeName) : null; +} \ No newline at end of file diff --git a/src/Npgsql/NpgsqlDataSourceBuilder.cs b/src/Npgsql/NpgsqlDataSourceBuilder.cs index e10a8f5005..bf517b870e 100644 --- a/src/Npgsql/NpgsqlDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlDataSourceBuilder.cs @@ -217,7 +217,7 @@ public NpgsqlDataSourceBuilder UseSystemTextJson( Type[]? jsonbClrTypes = null, Type[]? jsonClrTypes = null) { - AddTypeResolverFactory(new JsonTypeHandlerResolverFactory(jsonbClrTypes, jsonClrTypes, serializerOptions)); + AddTypeResolverFactory(new SystemTextJsonTypeHandlerResolverFactory(jsonbClrTypes, jsonClrTypes, serializerOptions)); return this; } @@ -313,7 +313,7 @@ public NpgsqlMultiHostDataSource BuildMultiHost() void AddDefaultFeatures() { _internalBuilder.EnableEncryption(); - _internalBuilder.AddDefaultTypeResolverFactory(new JsonTypeHandlerResolverFactory()); + _internalBuilder.AddDefaultTypeResolverFactory(new SystemTextJsonTypeHandlerResolverFactory()); _internalBuilder.AddDefaultTypeResolverFactory(new RangeTypeHandlerResolverFactory()); _internalBuilder.AddDefaultTypeResolverFactory(new RecordTypeHandlerResolverFactory()); } diff --git a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs index 75d249f88f..18eacfbfab 100644 --- a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs @@ -359,7 +359,7 @@ void ResetTypeMappings() try { _resolverFactories.Clear(); - foreach (var resolverFactory in globalMapper.ResolverFactories) + foreach (var resolverFactory in globalMapper.HandlerResolverFactories) _resolverFactories.Add(resolverFactory); _userTypeMappings.Clear(); @@ -404,7 +404,7 @@ public NpgsqlSlimDataSourceBuilder UseSystemTextJson( Type[]? jsonbClrTypes = null, Type[]? jsonClrTypes = null) { - AddTypeResolverFactory(new JsonTypeHandlerResolverFactory(jsonbClrTypes, jsonClrTypes, serializerOptions)); + AddTypeResolverFactory(new SystemTextJsonTypeHandlerResolverFactory(jsonbClrTypes, jsonClrTypes, serializerOptions)); return this; } diff --git a/src/Npgsql/TypeMapping/BuiltInTypeHandlerResolver.cs b/src/Npgsql/TypeMapping/BuiltInTypeHandlerResolver.cs index d4a109755a..e8b8af26cf 100644 --- a/src/Npgsql/TypeMapping/BuiltInTypeHandlerResolver.cs +++ b/src/Npgsql/TypeMapping/BuiltInTypeHandlerResolver.cs @@ -1,7 +1,5 @@ using System; -using System.Collections; using System.Collections.Generic; -using System.Collections.Immutable; using System.Collections.Specialized; using System.IO; using System.Net; @@ -28,137 +26,6 @@ sealed class BuiltInTypeHandlerResolver : TypeHandlerResolver readonly NpgsqlConnector _connector; readonly NpgsqlDatabaseInfo _databaseInfo; - static readonly Type ReadOnlyIPAddressType = IPAddress.Loopback.GetType(); - - static readonly Dictionary Mappings = new() - { - // Numeric types - { "smallint", new(NpgsqlDbType.Smallint, "smallint", typeof(short), typeof(byte), typeof(sbyte)) }, - { "integer", new(NpgsqlDbType.Integer, "integer", typeof(int)) }, - { "int", new(NpgsqlDbType.Integer, "integer", typeof(int)) }, - { "bigint", new(NpgsqlDbType.Bigint, "bigint", typeof(long)) }, - { "real", new(NpgsqlDbType.Real, "real", typeof(float)) }, - { "double precision", new(NpgsqlDbType.Double, "double precision", typeof(double)) }, - { "numeric", new(NpgsqlDbType.Numeric, "numeric", typeof(decimal), typeof(BigInteger)) }, - { "decimal", new(NpgsqlDbType.Numeric, "numeric", typeof(decimal), typeof(BigInteger)) }, - { "money", new(NpgsqlDbType.Money, "money") }, - - // Text types - { "text", new(NpgsqlDbType.Text, "text", typeof(string), typeof(char[]), typeof(char), typeof(ArraySegment)) }, - { "xml", new(NpgsqlDbType.Xml, "xml") }, - { "character varying", new(NpgsqlDbType.Varchar, "character varying") }, - { "varchar", new(NpgsqlDbType.Varchar, "character varying") }, - { "character", new(NpgsqlDbType.Char, "character") }, - { "name", new(NpgsqlDbType.Name, "name") }, - { "refcursor", new(NpgsqlDbType.Refcursor, "refcursor") }, - { "citext", new(NpgsqlDbType.Citext, "citext") }, - { "jsonb", new(NpgsqlDbType.Jsonb, "jsonb") }, - { "json", new(NpgsqlDbType.Json, "json") }, - { "jsonpath", new(NpgsqlDbType.JsonPath, "jsonpath") }, - - // Date/time types - { "timestamp without time zone", new(NpgsqlDbType.Timestamp, "timestamp without time zone", typeof(DateTime)) }, - { "timestamp", new(NpgsqlDbType.Timestamp, "timestamp without time zone", typeof(DateTime)) }, - { "timestamp with time zone", new(NpgsqlDbType.TimestampTz, "timestamp with time zone", typeof(DateTimeOffset)) }, - { "timestamptz", new(NpgsqlDbType.TimestampTz, "timestamp with time zone", typeof(DateTimeOffset)) }, - { "date", new(NpgsqlDbType.Date, "date" -#if NET6_0_OR_GREATER - , typeof(DateOnly) -#endif - ) }, - { "time without time zone", new(NpgsqlDbType.Time, "time without time zone" -#if NET6_0_OR_GREATER - , typeof(TimeOnly) -#endif - ) }, - { "time", new(NpgsqlDbType.Time, "time without time zone" -#if NET6_0_OR_GREATER - , typeof(TimeOnly) -#endif - ) }, - { "time with time zone", new(NpgsqlDbType.TimeTz, "time with time zone") }, - { "timetz", new(NpgsqlDbType.TimeTz, "time with time zone") }, - { "interval", new(NpgsqlDbType.Interval, "interval", typeof(TimeSpan)) }, - - { "timestamp without time zone[]", new(NpgsqlDbType.Array | NpgsqlDbType.Timestamp, "timestamp without time zone[]") }, - { "timestamp with time zone[]", new(NpgsqlDbType.Array | NpgsqlDbType.TimestampTz, "timestamp with time zone[]") }, - - { "int4range", new(NpgsqlDbType.IntegerRange, "int4range") }, - { "int8range", new(NpgsqlDbType.BigIntRange, "int8range") }, - { "numrange", new(NpgsqlDbType.NumericRange, "numrange") }, - { "daterange", new(NpgsqlDbType.DateRange, "daterange") }, - { "tsrange", new(NpgsqlDbType.TimestampRange, "tsrange") }, - { "tstzrange", new(NpgsqlDbType.TimestampTzRange, "tstzrange") }, - - { "int4multirange", new(NpgsqlDbType.IntegerMultirange, "int4range") }, - { "int8multirange", new(NpgsqlDbType.BigIntMultirange, "int8range") }, - { "nummultirange", new(NpgsqlDbType.NumericMultirange, "numrange") }, - { "datemultirange", new(NpgsqlDbType.DateMultirange, "datemultirange") }, - { "tsmultirange", new(NpgsqlDbType.TimestampMultirange, "tsmultirange") }, - { "tstzmultirange", new(NpgsqlDbType.TimestampTzMultirange, "tstzmultirange") }, - - // Network types - { "cidr", new(NpgsqlDbType.Cidr, "cidr") }, -#pragma warning disable 618 - { "inet", new(NpgsqlDbType.Inet, "inet", typeof(IPAddress), typeof((IPAddress Address, int Subnet)), typeof(NpgsqlInet), ReadOnlyIPAddressType) }, -#pragma warning restore 618 - { "macaddr", new(NpgsqlDbType.MacAddr, "macaddr", typeof(PhysicalAddress)) }, - { "macaddr8", new(NpgsqlDbType.MacAddr8, "macaddr8") }, - - // Full-text search types - { "tsquery", new(NpgsqlDbType.TsQuery, "tsquery", - typeof(NpgsqlTsQuery), typeof(NpgsqlTsQueryAnd), typeof(NpgsqlTsQueryEmpty), typeof(NpgsqlTsQueryFollowedBy), - typeof(NpgsqlTsQueryLexeme), typeof(NpgsqlTsQueryNot), typeof(NpgsqlTsQueryOr), typeof(NpgsqlTsQueryBinOp) - ) }, - { "tsvector", new(NpgsqlDbType.TsVector, "tsvector", typeof(NpgsqlTsVector)) }, - - // Geometry types - { "box", new(NpgsqlDbType.Box, "box", typeof(NpgsqlBox)) }, - { "circle", new(NpgsqlDbType.Circle, "circle", typeof(NpgsqlCircle)) }, - { "line", new(NpgsqlDbType.Line, "line", typeof(NpgsqlLine)) }, - { "lseg", new(NpgsqlDbType.LSeg, "lseg", typeof(NpgsqlLSeg)) }, - { "path", new(NpgsqlDbType.Path, "path", typeof(NpgsqlPath)) }, - { "point", new(NpgsqlDbType.Point, "point", typeof(NpgsqlPoint)) }, - { "polygon", new(NpgsqlDbType.Polygon, "polygon", typeof(NpgsqlPolygon)) }, - - // LTree types - { "lquery", new(NpgsqlDbType.LQuery, "lquery") }, - { "ltree", new(NpgsqlDbType.LTree, "ltree") }, - { "ltxtquery", new(NpgsqlDbType.LTxtQuery, "ltxtquery") }, - - // UInt types - { "oid", new(NpgsqlDbType.Oid, "oid") }, - { "xid", new(NpgsqlDbType.Xid, "xid") }, - { "xid8", new(NpgsqlDbType.Xid8, "xid8") }, - { "cid", new(NpgsqlDbType.Cid, "cid") }, - { "regtype", new(NpgsqlDbType.Regtype, "regtype") }, - { "regconfig", new(NpgsqlDbType.Regconfig, "regconfig") }, - - // Misc types - { "boolean", new(NpgsqlDbType.Boolean, "boolean", typeof(bool)) }, - { "bool", new(NpgsqlDbType.Boolean, "boolean", typeof(bool)) }, - { "bytea", new(NpgsqlDbType.Bytea, "bytea", typeof(byte[]), typeof(ArraySegment) -#if !NETSTANDARD2_0 - , typeof(ReadOnlyMemory), typeof(Memory) -#endif - ) }, - { "uuid", new(NpgsqlDbType.Uuid, "uuid", typeof(Guid)) }, - { "bit varying", new(NpgsqlDbType.Varbit, "bit varying", typeof(BitArray), typeof(BitVector32)) }, - { "varbit", new(NpgsqlDbType.Varbit, "bit varying", typeof(BitArray), typeof(BitVector32)) }, - { "bit", new(NpgsqlDbType.Bit, "bit") }, - { "hstore", new(NpgsqlDbType.Hstore, "hstore", typeof(Dictionary), typeof(IDictionary), typeof(ImmutableDictionary)) }, - - // Internal types - { "int2vector", new(NpgsqlDbType.Int2Vector, "int2vector") }, - { "oidvector", new(NpgsqlDbType.Oidvector, "oidvector") }, - { "pg_lsn", new(NpgsqlDbType.PgLsn, "pg_lsn", typeof(NpgsqlLogSequenceNumber)) }, - { "tid", new(NpgsqlDbType.Tid, "tid", typeof(NpgsqlTid)) }, - { "char", new(NpgsqlDbType.InternalChar, "char") }, - - // Special types - { "unknown", new(NpgsqlDbType.Unknown, "unknown") }, - }; - #region Cached handlers // Numeric types @@ -353,7 +220,7 @@ internal BuiltInTypeHandlerResolver(NpgsqlConnector connector) public override NpgsqlTypeHandler? ResolveByClrType(Type type) { - if (!ClrTypeToDataTypeNameTable.TryGetValue(type, out var dataTypeName)) + if (BuiltInTypeMappingResolver.ClrTypeToDataTypeName(type) is not { } dataTypeName) { if (!type.IsSubclassOf(typeof(Stream))) return null; @@ -364,116 +231,6 @@ internal BuiltInTypeHandlerResolver(NpgsqlConnector connector) return ResolveByDataTypeName(dataTypeName); } - static readonly Dictionary ClrTypeToDataTypeNameTable; - - static BuiltInTypeHandlerResolver() - { - ClrTypeToDataTypeNameTable = new() - { - // Numeric types - { typeof(byte), "smallint" }, - { typeof(short), "smallint" }, - { typeof(int), "integer" }, - { typeof(long), "bigint" }, - { typeof(float), "real" }, - { typeof(double), "double precision" }, - { typeof(decimal), "decimal" }, - { typeof(BigInteger), "decimal" }, - - // Text types - { typeof(string), "text" }, - { typeof(char[]), "text" }, - { typeof(char), "text" }, - { typeof(ArraySegment), "text" }, - - // Date/time types - // The DateTime entry is for LegacyTimestampBehavior mode only. In regular mode we resolve through - // ResolveValueDependentValue below - { typeof(DateTime), "timestamp without time zone" }, - { typeof(DateTimeOffset), "timestamp with time zone" }, -#if NET6_0_OR_GREATER - { typeof(DateOnly), "date" }, - { typeof(TimeOnly), "time without time zone" }, -#endif - { typeof(TimeSpan), "interval" }, - { typeof(NpgsqlInterval), "interval" }, - - // Network types - { typeof(IPAddress), "inet" }, - // See ReadOnlyIPAddress below - { typeof((IPAddress Address, int Subnet)), "inet" }, -#pragma warning disable 618 - { typeof(NpgsqlInet), "inet" }, -#pragma warning restore 618 - { typeof(PhysicalAddress), "macaddr" }, - - // Full-text types - { typeof(NpgsqlTsVector), "tsvector" }, - { typeof(NpgsqlTsQueryLexeme), "tsquery" }, - { typeof(NpgsqlTsQueryAnd), "tsquery" }, - { typeof(NpgsqlTsQueryOr), "tsquery" }, - { typeof(NpgsqlTsQueryNot), "tsquery" }, - { typeof(NpgsqlTsQueryEmpty), "tsquery" }, - { typeof(NpgsqlTsQueryFollowedBy), "tsquery" }, - - // Geometry types - { typeof(NpgsqlBox), "box" }, - { typeof(NpgsqlCircle), "circle" }, - { typeof(NpgsqlLine), "line" }, - { typeof(NpgsqlLSeg), "lseg" }, - { typeof(NpgsqlPath), "path" }, - { typeof(NpgsqlPoint), "point" }, - { typeof(NpgsqlPolygon), "polygon" }, - - // Misc types - { typeof(bool), "boolean" }, - { typeof(byte[]), "bytea" }, - { typeof(ArraySegment), "bytea" }, -#if !NETSTANDARD2_0 - { typeof(ReadOnlyMemory), "bytea" }, - { typeof(Memory), "bytea" }, -#endif - { typeof(Guid), "uuid" }, - { typeof(BitArray), "bit varying" }, - { typeof(BitVector32), "bit varying" }, - { typeof(Dictionary), "hstore" }, - { typeof(ImmutableDictionary), "hstore" }, - - // Internal types - { typeof(NpgsqlLogSequenceNumber), "pg_lsn" }, - { typeof(NpgsqlTid), "tid" }, - { typeof(DBNull), "unknown" }, - - // Built-in range types - { typeof(NpgsqlRange), "int4range" }, - { typeof(NpgsqlRange), "int8range" }, - { typeof(NpgsqlRange), "numrange" }, -#if NET6_0_OR_GREATER - { typeof(NpgsqlRange), "daterange" }, -#endif - - // Built-in multirange types - { typeof(NpgsqlRange[]), "int4multirange" }, - { typeof(List>), "int4multirange" }, - { typeof(NpgsqlRange[]), "int8multirange" }, - { typeof(List>), "int8multirange" }, - { typeof(NpgsqlRange[]), "nummultirange" }, - { typeof(List>), "nummultirange" }, -#if NET6_0_OR_GREATER - { typeof(NpgsqlRange[]), "datemultirange" }, - { typeof(List>), "datemultirange" }, -#endif - }; - - // Recent versions of .NET Core have an internal ReadOnlyIPAddress type (returned e.g. for IPAddress.Loopback) - // But older versions don't have it - if (ReadOnlyIPAddressType != typeof(IPAddress)) - ClrTypeToDataTypeNameTable[ReadOnlyIPAddressType] = "inet"; - - if (LegacyTimestampBehavior) - ClrTypeToDataTypeNameTable[typeof(DateTime)] = "timestamp without time zone"; - } - public override NpgsqlTypeHandler? ResolveValueDependentValue(object value) { // In LegacyTimestampBehavior, DateTime isn't value-dependent, and handled above in ClrTypeToDataTypeNameTable like other types @@ -499,46 +256,6 @@ NpgsqlTypeHandler ArrayHandler(DateTimeKind kind) (PostgresArrayType)PgType("timestamp without time zone[]"), _connector.Settings.ArrayNullabilityMode); } - internal static string? ValueDependentValueToDataTypeName(object value) - { - // In LegacyTimestampBehavior, DateTime isn't value-dependent, and handled above in ClrTypeToDataTypeNameTable like other types - if (LegacyTimestampBehavior) - return null; - - return value switch - { - DateTime dateTime => dateTime.Kind == DateTimeKind.Utc ? "timestamp with time zone" : "timestamp without time zone", - - // For arrays/lists, return timestamp or timestamptz based on the kind of the first DateTime; if the user attempts to - // mix incompatible Kinds, that will fail during validation. For empty arrays it doesn't matter. - IList array => array.Count == 0 - ? "timestamp without time zone[]" - : array[0].Kind == DateTimeKind.Utc ? "timestamp with time zone[]" : "timestamp without time zone[]", - - NpgsqlRange range => GetRangeKind(range) == DateTimeKind.Utc ? "tstzrange" : "tsrange", - - NpgsqlRange[] multirange => GetMultirangeKind(multirange) == DateTimeKind.Utc ? "tstzmultirange" : "tsmultirange", - - _ => null - }; - } - - static DateTimeKind GetRangeKind(NpgsqlRange range) - => !range.LowerBoundInfinite - ? range.LowerBound.Kind - : !range.UpperBoundInfinite - ? range.UpperBound.Kind - : DateTimeKind.Unspecified; - - static DateTimeKind GetMultirangeKind(IList> multirange) - { - for (var i = 0; i < multirange.Count; i++) - if (!multirange[i].IsEmpty) - return GetRangeKind(multirange[i]); - - return DateTimeKind.Unspecified; - } - public override NpgsqlTypeHandler? ResolveValueTypeGenerically(T value) { // This method only ever gets called for value types, and relies on the JIT specializing the method for T by eliding all the @@ -626,15 +343,6 @@ static DateTimeKind GetMultirangeKind(IList> multirange) return null; } - internal static string? ClrTypeToDataTypeName(Type type) - => ClrTypeToDataTypeNameTable.TryGetValue(type, out var dataTypeName) ? dataTypeName : null; - - public override TypeMappingInfo? GetMappingByPostgresType(PostgresType type) - => DoGetMappingByDataTypeName(type.Name); - - internal static TypeMappingInfo? DoGetMappingByDataTypeName(string dataTypeName) - => Mappings.TryGetValue(dataTypeName, out var mapping) ? mapping : null; - PostgresType PgType(string pgTypeName) => _databaseInfo.GetPostgresTypeByName(pgTypeName); #region Handler accessors diff --git a/src/Npgsql/TypeMapping/BuiltInTypeHandlerResolverFactory.cs b/src/Npgsql/TypeMapping/BuiltInTypeHandlerResolverFactory.cs index 14c2c8149a..2912b97249 100644 --- a/src/Npgsql/TypeMapping/BuiltInTypeHandlerResolverFactory.cs +++ b/src/Npgsql/TypeMapping/BuiltInTypeHandlerResolverFactory.cs @@ -1,4 +1,3 @@ -using System; using Npgsql.Internal; using Npgsql.Internal.TypeHandling; using Npgsql.Internal.TypeMapping; @@ -10,12 +9,5 @@ sealed class BuiltInTypeHandlerResolverFactory : TypeHandlerResolverFactory public override TypeHandlerResolver Create(TypeMapper typeMapper, NpgsqlConnector connector) => new BuiltInTypeHandlerResolver(connector); - public override string? GetDataTypeNameByClrType(Type clrType) - => BuiltInTypeHandlerResolver.ClrTypeToDataTypeName(clrType); - - public override string? GetDataTypeNameByValueDependentValue(object value) - => BuiltInTypeHandlerResolver.ValueDependentValueToDataTypeName(value); - - public override TypeMappingInfo? GetMappingByDataTypeName(string dataTypeName) - => BuiltInTypeHandlerResolver.DoGetMappingByDataTypeName(dataTypeName); + public override TypeMappingResolver CreateMappingResolver() => new BuiltInTypeMappingResolver(); } \ No newline at end of file diff --git a/src/Npgsql/TypeMapping/BuiltInTypeMappingResolver.cs b/src/Npgsql/TypeMapping/BuiltInTypeMappingResolver.cs new file mode 100644 index 0000000000..0d344247b0 --- /dev/null +++ b/src/Npgsql/TypeMapping/BuiltInTypeMappingResolver.cs @@ -0,0 +1,257 @@ +using System; +using System.Collections; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Collections.Specialized; +using System.Net; +using System.Net.NetworkInformation; +using System.Numerics; +using Npgsql.Internal.TypeHandling; +using Npgsql.Internal.TypeMapping; +using Npgsql.PostgresTypes; +using NpgsqlTypes; +using static Npgsql.Util.Statics; + +namespace Npgsql.TypeMapping; + +sealed class BuiltInTypeMappingResolver : TypeMappingResolver +{ + static readonly Type ReadOnlyIPAddressType = IPAddress.Loopback.GetType(); + + static readonly Dictionary Mappings = new() + { + // Numeric types + { "smallint", new(NpgsqlDbType.Smallint, "smallint", typeof(short), typeof(byte), typeof(sbyte)) }, + { "integer", new(NpgsqlDbType.Integer, "integer", typeof(int)) }, + { "int", new(NpgsqlDbType.Integer, "integer", typeof(int)) }, + { "bigint", new(NpgsqlDbType.Bigint, "bigint", typeof(long)) }, + { "real", new(NpgsqlDbType.Real, "real", typeof(float)) }, + { "double precision", new(NpgsqlDbType.Double, "double precision", typeof(double)) }, + { "numeric", new(NpgsqlDbType.Numeric, "numeric", typeof(decimal), typeof(BigInteger)) }, + { "decimal", new(NpgsqlDbType.Numeric, "numeric", typeof(decimal), typeof(BigInteger)) }, + { "money", new(NpgsqlDbType.Money, "money") }, + + // Text types + { "text", new(NpgsqlDbType.Text, "text", typeof(string), typeof(char[]), typeof(char), typeof(ArraySegment)) }, + { "xml", new(NpgsqlDbType.Xml, "xml") }, + { "character varying", new(NpgsqlDbType.Varchar, "character varying") }, + { "varchar", new(NpgsqlDbType.Varchar, "character varying") }, + { "character", new(NpgsqlDbType.Char, "character") }, + { "name", new(NpgsqlDbType.Name, "name") }, + { "refcursor", new(NpgsqlDbType.Refcursor, "refcursor") }, + { "citext", new(NpgsqlDbType.Citext, "citext") }, + { "jsonb", new(NpgsqlDbType.Jsonb, "jsonb") }, + { "json", new(NpgsqlDbType.Json, "json") }, + { "jsonpath", new(NpgsqlDbType.JsonPath, "jsonpath") }, + + // Date/time types + { "timestamp without time zone", new(NpgsqlDbType.Timestamp, "timestamp without time zone", typeof(DateTime)) }, + { "timestamp", new(NpgsqlDbType.Timestamp, "timestamp without time zone", typeof(DateTime)) }, + { "timestamp with time zone", new(NpgsqlDbType.TimestampTz, "timestamp with time zone", typeof(DateTimeOffset)) }, + { "timestamptz", new(NpgsqlDbType.TimestampTz, "timestamp with time zone", typeof(DateTimeOffset)) }, + { "date", new(NpgsqlDbType.Date, "date" +#if NET6_0_OR_GREATER + , typeof(DateOnly) +#endif + ) }, + { "time without time zone", new(NpgsqlDbType.Time, "time without time zone" +#if NET6_0_OR_GREATER + , typeof(TimeOnly) +#endif + ) }, + { "time", new(NpgsqlDbType.Time, "time without time zone" +#if NET6_0_OR_GREATER + , typeof(TimeOnly) +#endif + ) }, + { "time with time zone", new(NpgsqlDbType.TimeTz, "time with time zone") }, + { "timetz", new(NpgsqlDbType.TimeTz, "time with time zone") }, + { "interval", new(NpgsqlDbType.Interval, "interval", typeof(TimeSpan)) }, + + { "timestamp without time zone[]", new(NpgsqlDbType.Array | NpgsqlDbType.Timestamp, "timestamp without time zone[]") }, + { "timestamp with time zone[]", new(NpgsqlDbType.Array | NpgsqlDbType.TimestampTz, "timestamp with time zone[]") }, + + // Network types + { "cidr", new(NpgsqlDbType.Cidr, "cidr") }, +#pragma warning disable 618 + { "inet", new(NpgsqlDbType.Inet, "inet", typeof(IPAddress), typeof((IPAddress Address, int Subnet)), typeof(NpgsqlInet), ReadOnlyIPAddressType) }, +#pragma warning restore 618 + { "macaddr", new(NpgsqlDbType.MacAddr, "macaddr", typeof(PhysicalAddress)) }, + { "macaddr8", new(NpgsqlDbType.MacAddr8, "macaddr8") }, + + // Full-text search types + { "tsquery", new(NpgsqlDbType.TsQuery, "tsquery", + typeof(NpgsqlTsQuery), typeof(NpgsqlTsQueryAnd), typeof(NpgsqlTsQueryEmpty), typeof(NpgsqlTsQueryFollowedBy), + typeof(NpgsqlTsQueryLexeme), typeof(NpgsqlTsQueryNot), typeof(NpgsqlTsQueryOr), typeof(NpgsqlTsQueryBinOp) + ) }, + { "tsvector", new(NpgsqlDbType.TsVector, "tsvector", typeof(NpgsqlTsVector)) }, + + // Geometry types + { "box", new(NpgsqlDbType.Box, "box", typeof(NpgsqlBox)) }, + { "circle", new(NpgsqlDbType.Circle, "circle", typeof(NpgsqlCircle)) }, + { "line", new(NpgsqlDbType.Line, "line", typeof(NpgsqlLine)) }, + { "lseg", new(NpgsqlDbType.LSeg, "lseg", typeof(NpgsqlLSeg)) }, + { "path", new(NpgsqlDbType.Path, "path", typeof(NpgsqlPath)) }, + { "point", new(NpgsqlDbType.Point, "point", typeof(NpgsqlPoint)) }, + { "polygon", new(NpgsqlDbType.Polygon, "polygon", typeof(NpgsqlPolygon)) }, + + // LTree types + { "lquery", new(NpgsqlDbType.LQuery, "lquery") }, + { "ltree", new(NpgsqlDbType.LTree, "ltree") }, + { "ltxtquery", new(NpgsqlDbType.LTxtQuery, "ltxtquery") }, + + // UInt types + { "oid", new(NpgsqlDbType.Oid, "oid") }, + { "xid", new(NpgsqlDbType.Xid, "xid") }, + { "xid8", new(NpgsqlDbType.Xid8, "xid8") }, + { "cid", new(NpgsqlDbType.Cid, "cid") }, + { "regtype", new(NpgsqlDbType.Regtype, "regtype") }, + { "regconfig", new(NpgsqlDbType.Regconfig, "regconfig") }, + + // Misc types + { "boolean", new(NpgsqlDbType.Boolean, "boolean", typeof(bool)) }, + { "bool", new(NpgsqlDbType.Boolean, "boolean", typeof(bool)) }, + { "bytea", new(NpgsqlDbType.Bytea, "bytea", typeof(byte[]), typeof(ArraySegment) +#if !NETSTANDARD2_0 + , typeof(ReadOnlyMemory), typeof(Memory) +#endif + ) }, + { "uuid", new(NpgsqlDbType.Uuid, "uuid", typeof(Guid)) }, + { "bit varying", new(NpgsqlDbType.Varbit, "bit varying", typeof(BitArray), typeof(BitVector32)) }, + { "varbit", new(NpgsqlDbType.Varbit, "bit varying", typeof(BitArray), typeof(BitVector32)) }, + { "bit", new(NpgsqlDbType.Bit, "bit") }, + { "hstore", new(NpgsqlDbType.Hstore, "hstore", typeof(Dictionary), typeof(IDictionary), typeof(ImmutableDictionary)) }, + + // Internal types + { "int2vector", new(NpgsqlDbType.Int2Vector, "int2vector") }, + { "oidvector", new(NpgsqlDbType.Oidvector, "oidvector") }, + { "pg_lsn", new(NpgsqlDbType.PgLsn, "pg_lsn", typeof(NpgsqlLogSequenceNumber)) }, + { "tid", new(NpgsqlDbType.Tid, "tid", typeof(NpgsqlTid)) }, + { "char", new(NpgsqlDbType.InternalChar, "char") }, + + // Special types + { "unknown", new(NpgsqlDbType.Unknown, "unknown") }, + }; + + static readonly Dictionary ClrTypeToDataTypeNameTable; + + static BuiltInTypeMappingResolver() + { + ClrTypeToDataTypeNameTable = new() + { + // Numeric types + { typeof(byte), "smallint" }, + { typeof(short), "smallint" }, + { typeof(int), "integer" }, + { typeof(long), "bigint" }, + { typeof(float), "real" }, + { typeof(double), "double precision" }, + { typeof(decimal), "decimal" }, + { typeof(BigInteger), "decimal" }, + + // Text types + { typeof(string), "text" }, + { typeof(char[]), "text" }, + { typeof(char), "text" }, + { typeof(ArraySegment), "text" }, + + // Date/time types + // The DateTime entry is for LegacyTimestampBehavior mode only. In regular mode we resolve through + // ResolveValueDependentValue below + { typeof(DateTime), "timestamp without time zone" }, + { typeof(DateTimeOffset), "timestamp with time zone" }, +#if NET6_0_OR_GREATER + { typeof(DateOnly), "date" }, + { typeof(TimeOnly), "time without time zone" }, +#endif + { typeof(TimeSpan), "interval" }, + { typeof(NpgsqlInterval), "interval" }, + + // Network types + { typeof(IPAddress), "inet" }, + // See ReadOnlyIPAddress below + { typeof((IPAddress Address, int Subnet)), "inet" }, +#pragma warning disable 618 + { typeof(NpgsqlInet), "inet" }, +#pragma warning restore 618 + { typeof(PhysicalAddress), "macaddr" }, + + // Full-text types + { typeof(NpgsqlTsVector), "tsvector" }, + { typeof(NpgsqlTsQueryLexeme), "tsquery" }, + { typeof(NpgsqlTsQueryAnd), "tsquery" }, + { typeof(NpgsqlTsQueryOr), "tsquery" }, + { typeof(NpgsqlTsQueryNot), "tsquery" }, + { typeof(NpgsqlTsQueryEmpty), "tsquery" }, + { typeof(NpgsqlTsQueryFollowedBy), "tsquery" }, + + // Geometry types + { typeof(NpgsqlBox), "box" }, + { typeof(NpgsqlCircle), "circle" }, + { typeof(NpgsqlLine), "line" }, + { typeof(NpgsqlLSeg), "lseg" }, + { typeof(NpgsqlPath), "path" }, + { typeof(NpgsqlPoint), "point" }, + { typeof(NpgsqlPolygon), "polygon" }, + + // Misc types + { typeof(bool), "boolean" }, + { typeof(byte[]), "bytea" }, + { typeof(ArraySegment), "bytea" }, +#if !NETSTANDARD2_0 + { typeof(ReadOnlyMemory), "bytea" }, + { typeof(Memory), "bytea" }, +#endif + { typeof(Guid), "uuid" }, + { typeof(BitArray), "bit varying" }, + { typeof(BitVector32), "bit varying" }, + { typeof(Dictionary), "hstore" }, + { typeof(ImmutableDictionary), "hstore" }, + + // Internal types + { typeof(NpgsqlLogSequenceNumber), "pg_lsn" }, + { typeof(NpgsqlTid), "tid" }, + { typeof(DBNull), "unknown" } + }; + + // Recent versions of .NET Core have an internal ReadOnlyIPAddress type (returned e.g. for IPAddress.Loopback) + // But older versions don't have it + if (ReadOnlyIPAddressType != typeof(IPAddress)) + ClrTypeToDataTypeNameTable[ReadOnlyIPAddressType] = "inet"; + + if (LegacyTimestampBehavior) + ClrTypeToDataTypeNameTable[typeof(DateTime)] = "timestamp without time zone"; + } + + public override string? GetDataTypeNameByClrType(Type clrType) + => ClrTypeToDataTypeName(clrType); + + internal static string? ClrTypeToDataTypeName(Type clrType) + => ClrTypeToDataTypeNameTable.TryGetValue(clrType, out var dataTypeName) ? dataTypeName : null; + + public override string? GetDataTypeNameByValueDependentValue(object value) + { + // In LegacyTimestampBehavior, DateTime isn't value-dependent, and handled above in ClrTypeToDataTypeNameTable like other types + if (LegacyTimestampBehavior) + return null; + + return value switch + { + DateTime dateTime => dateTime.Kind == DateTimeKind.Utc ? "timestamp with time zone" : "timestamp without time zone", + + // For arrays/lists, return timestamp or timestamptz based on the kind of the first DateTime; if the user attempts to + // mix incompatible Kinds, that will fail during validation. For empty arrays it doesn't matter. + IList array => array.Count == 0 + ? "timestamp without time zone[]" + : array[0].Kind == DateTimeKind.Utc ? "timestamp with time zone[]" : "timestamp without time zone[]", + + _ => null + }; + } + + public override TypeMappingInfo? GetMappingByDataTypeName(string dataTypeName) + => Mappings.TryGetValue(dataTypeName, out var mapping) ? mapping : null; + + public override TypeMappingInfo? GetMappingByPostgresType(TypeMapper typeMapper, PostgresType type) + => GetMappingByDataTypeName(type.Name); +} diff --git a/src/Npgsql/TypeMapping/GlobalTypeMapper.cs b/src/Npgsql/TypeMapping/GlobalTypeMapper.cs index 9e65675a97..a13abf5ea0 100644 --- a/src/Npgsql/TypeMapping/GlobalTypeMapper.cs +++ b/src/Npgsql/TypeMapping/GlobalTypeMapper.cs @@ -20,7 +20,8 @@ sealed class GlobalTypeMapper : INpgsqlTypeMapper public INpgsqlNameTranslator DefaultNameTranslator { get; set; } = new NpgsqlSnakeCaseNameTranslator(); - internal List ResolverFactories { get; } = new(); + internal List HandlerResolverFactories { get; } = new(); + List MappingResolvers { get; } = new(); public ConcurrentDictionary UserTypeMappings { get; } = new(); readonly ConcurrentDictionary _mappingsByClrType = new(); @@ -155,17 +156,21 @@ public void AddTypeResolverFactory(TypeHandlerResolverFactory resolverFactory) // we replace an existing resolver of the same CLR type. var type = resolverFactory.GetType(); - if (ResolverFactories[0].GetType() == type) - ResolverFactories[0] = resolverFactory; + if (HandlerResolverFactories[0].GetType() == type) + HandlerResolverFactories[0] = resolverFactory; else { - for (var i = 0; i < ResolverFactories.Count; i++) - if (ResolverFactories[i].GetType() == type) - ResolverFactories.RemoveAt(i); + for (var i = 0; i < HandlerResolverFactories.Count; i++) + if (HandlerResolverFactories[i].GetType() == type) + HandlerResolverFactories.RemoveAt(i); - ResolverFactories.Insert(0, resolverFactory); + HandlerResolverFactories.Insert(0, resolverFactory); } + var mappingResolver = resolverFactory.CreateMappingResolver(); + if (mappingResolver is not null) + AddMappingResolver(mappingResolver, overwrite: true); + RecordChange(); } finally @@ -174,13 +179,66 @@ public void AddTypeResolverFactory(TypeHandlerResolverFactory resolverFactory) } } + internal void TryAddMappingResolver(TypeMappingResolver resolver) + { + Lock.EnterWriteLock(); + try + { + // For global mapper resolvers we don't need to overwrite them in case we add another of the same type + // because they shouldn't have a state. + // The only exception is whenever a user adds a resolver factory to global type mapper specifically. + // In that case we create a local mapper resolver and always overwrite the one we already have + // as it can have settings (e.g. json serialization) + if (AddMappingResolver(resolver, overwrite: false)) + RecordChange(); + } + finally + { + Lock.ExitWriteLock(); + } + } + + bool AddMappingResolver(TypeMappingResolver resolver, bool overwrite) + { + // Since EFCore.PG plugins (and possibly other users) repeatedly call NpgsqlConnection.GlobalTypeMapped.UseNodaTime, + // we replace an existing resolver of the same CLR type. + var type = resolver.GetType(); + + if (MappingResolvers[0].GetType() == type) + { + if (!overwrite) + return false; + MappingResolvers[0] = resolver; + } + else + { + for (var i = 0; i < MappingResolvers.Count; i++) + { + if (MappingResolvers[i].GetType() == type) + { + if (!overwrite) + return false; + MappingResolvers.RemoveAt(i); + break; + } + } + + MappingResolvers.Insert(0, resolver); + } + + return true; + } + public void Reset() { Lock.EnterWriteLock(); try { - ResolverFactories.Clear(); - ResolverFactories.Add(new BuiltInTypeHandlerResolverFactory()); + HandlerResolverFactories.Clear(); + HandlerResolverFactories.Add(new BuiltInTypeHandlerResolverFactory()); + + MappingResolvers.Clear(); + MappingResolvers.Add(new BuiltInTypeMappingResolver()); UserTypeMappings.Clear(); @@ -219,11 +277,11 @@ internal bool TryResolveMappingByValue(object value, [NotNullWhen(true)] out Typ if (_mappingsByClrType.TryGetValue(type, out typeMapping)) return true; - foreach (var resolverFactory in ResolverFactories) - if ((typeMapping = resolverFactory.GetMappingByValueDependentValue(value)) is not null) + foreach (var resolver in MappingResolvers) + if ((typeMapping = resolver.GetMappingByValueDependentValue(value)) is not null) return true; - return TryResolveMappingByClrType(value.GetType(), out typeMapping); + return TryResolveMappingByClrType(type, out typeMapping); } finally { @@ -235,9 +293,9 @@ bool TryResolveMappingByClrType(Type clrType, [NotNullWhen(true)] out TypeMappin if (_mappingsByClrType.TryGetValue(clrType, out typeMapping)) return true; - foreach (var resolverFactory in ResolverFactories) + foreach (var resolver in MappingResolvers) { - if ((typeMapping = resolverFactory.GetMappingByClrType(clrType)) is not null) + if ((typeMapping = resolver.GetMappingByClrType(clrType)) is not null) { _mappingsByClrType[clrType] = typeMapping; return true; diff --git a/src/Npgsql/TypeMapping/RangeTypeHandlerResolver.cs b/src/Npgsql/TypeMapping/RangeTypeHandlerResolver.cs index 1386383a63..ba3f0f44d2 100644 --- a/src/Npgsql/TypeMapping/RangeTypeHandlerResolver.cs +++ b/src/Npgsql/TypeMapping/RangeTypeHandlerResolver.cs @@ -39,7 +39,7 @@ internal RangeTypeHandlerResolver(TypeMapper typeMapper, NpgsqlConnector connect public override NpgsqlTypeHandler? ResolveByDataTypeName(string typeName) { - if (_databaseInfo.GetPostgresTypeByName(typeName) is not { } pgType) + if (!_databaseInfo.TryGetPostgresTypeByName(typeName, out var pgType)) return null; return pgType switch @@ -130,34 +130,6 @@ Type GetUnderlyingType(Type t) } } - public override TypeMappingInfo? GetMappingByPostgresType(PostgresType type) - { - switch (type) - { - case PostgresRangeType pgRangeType: - { - if (_typeMapper.TryGetMapping(pgRangeType.Subtype, out var subtypeMapping)) - { - return new(subtypeMapping.NpgsqlDbType | NpgsqlDbType.Range, type.DisplayName); - } - - break; - } - - case PostgresMultirangeType pgMultirangeType: - { - if (_typeMapper.TryGetMapping(pgMultirangeType.Subrange.Subtype, out var subtypeMapping)) - { - return new(subtypeMapping.NpgsqlDbType | NpgsqlDbType.Multirange, type.DisplayName); - } - - break; - } - } - - return null; - } - public override NpgsqlTypeHandler? ResolveValueDependentValue(object value) { // In LegacyTimestampBehavior, DateTime isn't value-dependent, and handled above in ClrTypeToDataTypeNameTable like other types diff --git a/src/Npgsql/TypeMapping/RangeTypeHandlerResolverFactory.cs b/src/Npgsql/TypeMapping/RangeTypeHandlerResolverFactory.cs index a1bb608a98..bc7212eda8 100644 --- a/src/Npgsql/TypeMapping/RangeTypeHandlerResolverFactory.cs +++ b/src/Npgsql/TypeMapping/RangeTypeHandlerResolverFactory.cs @@ -1,5 +1,4 @@ -using System; -using Npgsql.Internal; +using Npgsql.Internal; using Npgsql.Internal.TypeHandling; using Npgsql.Internal.TypeMapping; @@ -10,15 +9,7 @@ sealed class RangeTypeHandlerResolverFactory : TypeHandlerResolverFactory public override TypeHandlerResolver Create(TypeMapper typeMapper, NpgsqlConnector connector) => new RangeTypeHandlerResolver(typeMapper, connector); - // Here and below we don't resolve anything. - // Instead BuiltInTypeHandlerResolver will resolve mappings for us. - // This is so we don't need to add RangeTypeHandlerResolverFactory to GlobalTypeMapper - public override string? GetDataTypeNameByClrType(Type clrType) - => null; + public override TypeMappingResolver CreateMappingResolver() => new RangeTypeMappingResolver(); - public override string? GetDataTypeNameByValueDependentValue(object value) - => null; - - public override TypeMappingInfo? GetMappingByDataTypeName(string dataTypeName) - => null; + public override TypeMappingResolver CreateGlobalMappingResolver() => new RangeTypeMappingResolver(); } diff --git a/src/Npgsql/TypeMapping/RangeTypeMappingResolver.cs b/src/Npgsql/TypeMapping/RangeTypeMappingResolver.cs new file mode 100644 index 0000000000..5061a9780c --- /dev/null +++ b/src/Npgsql/TypeMapping/RangeTypeMappingResolver.cs @@ -0,0 +1,118 @@ +using System; +using System.Collections.Generic; +using Npgsql.Internal.TypeHandling; +using Npgsql.Internal.TypeMapping; +using Npgsql.PostgresTypes; +using NpgsqlTypes; +using static Npgsql.Util.Statics; + +namespace Npgsql.TypeMapping; + +sealed class RangeTypeMappingResolver : TypeMappingResolver +{ + static readonly Dictionary Mappings = new() + { + { "int4range", new(NpgsqlDbType.IntegerRange, "int4range") }, + { "int8range", new(NpgsqlDbType.BigIntRange, "int8range") }, + { "numrange", new(NpgsqlDbType.NumericRange, "numrange") }, + { "daterange", new(NpgsqlDbType.DateRange, "daterange") }, + { "tsrange", new(NpgsqlDbType.TimestampRange, "tsrange") }, + { "tstzrange", new(NpgsqlDbType.TimestampTzRange, "tstzrange") }, + + { "int4multirange", new(NpgsqlDbType.IntegerMultirange, "int4range") }, + { "int8multirange", new(NpgsqlDbType.BigIntMultirange, "int8range") }, + { "nummultirange", new(NpgsqlDbType.NumericMultirange, "numrange") }, + { "datemultirange", new(NpgsqlDbType.DateMultirange, "datemultirange") }, + { "tsmultirange", new(NpgsqlDbType.TimestampMultirange, "tsmultirange") }, + { "tstzmultirange", new(NpgsqlDbType.TimestampTzMultirange, "tstzmultirange") } + }; + + static readonly Dictionary ClrTypeToDataTypeNameTable = new() + { + // Built-in range types + { typeof(NpgsqlRange), "int4range" }, + { typeof(NpgsqlRange), "int8range" }, + { typeof(NpgsqlRange), "numrange" }, +#if NET6_0_OR_GREATER + { typeof(NpgsqlRange), "daterange" }, +#endif + + // Built-in multirange types + { typeof(NpgsqlRange[]), "int4multirange" }, + { typeof(List>), "int4multirange" }, + { typeof(NpgsqlRange[]), "int8multirange" }, + { typeof(List>), "int8multirange" }, + { typeof(NpgsqlRange[]), "nummultirange" }, + { typeof(List>), "nummultirange" }, +#if NET6_0_OR_GREATER + { typeof(NpgsqlRange[]), "datemultirange" }, + { typeof(List>), "datemultirange" }, +#endif + }; + + public override string? GetDataTypeNameByClrType(Type clrType) + => ClrTypeToDataTypeNameTable.TryGetValue(clrType, out var dataTypeName) ? dataTypeName : null; + + public override string? GetDataTypeNameByValueDependentValue(object value) + { + // In LegacyTimestampBehavior, DateTime isn't value-dependent, and handled above in ClrTypeToDataTypeNameTable like other types + if (LegacyTimestampBehavior) + return null; + + return value switch + { + NpgsqlRange range => GetRangeKind(range) == DateTimeKind.Utc ? "tstzrange" : "tsrange", + + NpgsqlRange[] multirange => GetMultirangeKind(multirange) == DateTimeKind.Utc ? "tstzmultirange" : "tsmultirange", + + _ => null + }; + } + + public override TypeMappingInfo? GetMappingByDataTypeName(string dataTypeName) + => Mappings.TryGetValue(dataTypeName, out var mapping) ? mapping : null; + + public override TypeMappingInfo? GetMappingByPostgresType(TypeMapper mapper, PostgresType type) + { + switch (type) + { + case PostgresRangeType pgRangeType: + { + if (mapper.TryGetMapping(pgRangeType.Subtype, out var subtypeMapping)) + { + return new(subtypeMapping.NpgsqlDbType | NpgsqlDbType.Range, type.DisplayName); + } + + break; + } + + case PostgresMultirangeType pgMultirangeType: + { + if (mapper.TryGetMapping(pgMultirangeType.Subrange.Subtype, out var subtypeMapping)) + { + return new(subtypeMapping.NpgsqlDbType | NpgsqlDbType.Multirange, type.DisplayName); + } + + break; + } + } + + return null; + } + + static DateTimeKind GetRangeKind(NpgsqlRange range) + => !range.LowerBoundInfinite + ? range.LowerBound.Kind + : !range.UpperBoundInfinite + ? range.UpperBound.Kind + : DateTimeKind.Unspecified; + + static DateTimeKind GetMultirangeKind(IList> multirange) + { + for (var i = 0; i < multirange.Count; i++) + if (!multirange[i].IsEmpty) + return GetRangeKind(multirange[i]); + + return DateTimeKind.Unspecified; + } +} diff --git a/src/Npgsql/TypeMapping/RecordTypeHandlerResolver.cs b/src/Npgsql/TypeMapping/RecordTypeHandlerResolver.cs index 438e91cf9d..df0a44f4e4 100644 --- a/src/Npgsql/TypeMapping/RecordTypeHandlerResolver.cs +++ b/src/Npgsql/TypeMapping/RecordTypeHandlerResolver.cs @@ -25,7 +25,5 @@ public RecordTypeHandlerResolver(TypeMapper typeMapper, NpgsqlConnector connecto public override NpgsqlTypeHandler? ResolveByClrType(Type type) => null; - public override TypeMappingInfo? GetMappingByPostgresType(PostgresType type) => null; - NpgsqlTypeHandler GetHandler() => _recordHandler ??= new RecordHandler(_databaseInfo.GetPostgresTypeByName("record"), _typeMapper); } diff --git a/src/Npgsql/TypeMapping/RecordTypeHandlerResolverFactory.cs b/src/Npgsql/TypeMapping/RecordTypeHandlerResolverFactory.cs index a29a35d555..e308fb03e4 100644 --- a/src/Npgsql/TypeMapping/RecordTypeHandlerResolverFactory.cs +++ b/src/Npgsql/TypeMapping/RecordTypeHandlerResolverFactory.cs @@ -9,14 +9,4 @@ sealed class RecordTypeHandlerResolverFactory : TypeHandlerResolverFactory { public override TypeHandlerResolver Create(TypeMapper typeMapper, NpgsqlConnector connector) => new RecordTypeHandlerResolver(typeMapper, connector); - - // Records aren't mapped to anything - public override string? GetDataTypeNameByClrType(Type clrType) - => null; - - public override string? GetDataTypeNameByValueDependentValue(object value) - => null; - - public override TypeMappingInfo? GetMappingByDataTypeName(string dataTypeName) - => null; } diff --git a/src/Npgsql/TypeMapping/JsonTypeHandlerResolver.cs b/src/Npgsql/TypeMapping/SystemTextJsonTypeHandlerResolver.cs similarity index 61% rename from src/Npgsql/TypeMapping/JsonTypeHandlerResolver.cs rename to src/Npgsql/TypeMapping/SystemTextJsonTypeHandlerResolver.cs index 8265a886cd..051d8f546e 100644 --- a/src/Npgsql/TypeMapping/JsonTypeHandlerResolver.cs +++ b/src/Npgsql/TypeMapping/SystemTextJsonTypeHandlerResolver.cs @@ -6,18 +6,17 @@ using Npgsql.Internal.TypeHandlers; using Npgsql.Internal.TypeHandling; using Npgsql.PostgresTypes; -using NpgsqlTypes; namespace Npgsql.TypeMapping; -sealed class JsonTypeHandlerResolver : TypeHandlerResolver +sealed class SystemTextJsonTypeHandlerResolver : TypeHandlerResolver { readonly NpgsqlDatabaseInfo _databaseInfo; readonly SystemTextJsonHandler? _jsonbHandler; // Note that old version of PG (and Redshift) don't have jsonb readonly SystemTextJsonHandler? _jsonHandler; readonly Dictionary? _userClrTypes; - internal JsonTypeHandlerResolver( + internal SystemTextJsonTypeHandlerResolver( NpgsqlConnector connector, Dictionary? userClrTypes, JsonSerializerOptions serializerOptions) @@ -39,29 +38,10 @@ internal JsonTypeHandlerResolver( }; public override NpgsqlTypeHandler? ResolveByClrType(Type type) - => ClrTypeToDataTypeName(type, _userClrTypes) is { } dataTypeName && ResolveByDataTypeName(dataTypeName) is { } handler + => SystemTextJsonTypeMappingResolver.ClrTypeToDataTypeName(type, _userClrTypes) is { } dataTypeName && ResolveByDataTypeName(dataTypeName) is { } handler ? handler : null; - internal static string? ClrTypeToDataTypeName(Type type, Dictionary? clrTypes) - => type == typeof(JsonDocument) - || type == typeof(JsonObject) || type == typeof(JsonArray) - ? "jsonb" - : clrTypes is not null && clrTypes.TryGetValue(type, out var dataTypeName) ? dataTypeName : null; - - public override TypeMappingInfo? GetMappingByPostgresType(PostgresType type) - => DoGetMappingByDataTypeName(type.Name); - - internal static TypeMappingInfo? DoGetMappingByDataTypeName(string dataTypeName) - => dataTypeName switch - { - "jsonb" => new(NpgsqlDbType.Jsonb, "jsonb", typeof(JsonDocument) - , typeof(JsonObject), typeof(JsonArray) - ), - "json" => new(NpgsqlDbType.Json, "json"), - _ => null - }; - public override NpgsqlTypeHandler? ResolveValueTypeGenerically(T value) { if (typeof(T) == typeof(JsonDocument)) diff --git a/src/Npgsql/TypeMapping/JsonTypeHandlerResolverFactory.cs b/src/Npgsql/TypeMapping/SystemTextJsonTypeHandlerResolverFactory.cs similarity index 66% rename from src/Npgsql/TypeMapping/JsonTypeHandlerResolverFactory.cs rename to src/Npgsql/TypeMapping/SystemTextJsonTypeHandlerResolverFactory.cs index 4fb7b03eb3..26f593933e 100644 --- a/src/Npgsql/TypeMapping/JsonTypeHandlerResolverFactory.cs +++ b/src/Npgsql/TypeMapping/SystemTextJsonTypeHandlerResolverFactory.cs @@ -7,12 +7,12 @@ namespace Npgsql.TypeMapping; -sealed class JsonTypeHandlerResolverFactory : TypeHandlerResolverFactory +sealed class SystemTextJsonTypeHandlerResolverFactory : TypeHandlerResolverFactory { readonly JsonSerializerOptions _settings; readonly Dictionary? _userClrTypes; - public JsonTypeHandlerResolverFactory( + public SystemTextJsonTypeHandlerResolverFactory( Type[]? jsonbClrTypes = null, Type[]? jsonClrTypes = null, JsonSerializerOptions? settings = null) @@ -37,11 +37,9 @@ public JsonTypeHandlerResolverFactory( } public override TypeHandlerResolver Create(TypeMapper typeMapper, NpgsqlConnector connector) - => new JsonTypeHandlerResolver(connector, _userClrTypes, _settings); + => new SystemTextJsonTypeHandlerResolver(connector, _userClrTypes, _settings); - public override string? GetDataTypeNameByClrType(Type type) - => JsonTypeHandlerResolver.ClrTypeToDataTypeName(type, _userClrTypes); + public override TypeMappingResolver CreateMappingResolver() => new SystemTextJsonTypeMappingResolver(_userClrTypes); - public override TypeMappingInfo? GetMappingByDataTypeName(string dataTypeName) - => JsonTypeHandlerResolver.DoGetMappingByDataTypeName(dataTypeName); + public override TypeMappingResolver CreateGlobalMappingResolver() => new SystemTextJsonTypeMappingResolver(userClrTypes: null); } diff --git a/src/Npgsql/TypeMapping/SystemTextJsonTypeMappingResolver.cs b/src/Npgsql/TypeMapping/SystemTextJsonTypeMappingResolver.cs new file mode 100644 index 0000000000..2ec4bb2544 --- /dev/null +++ b/src/Npgsql/TypeMapping/SystemTextJsonTypeMappingResolver.cs @@ -0,0 +1,42 @@ +using System; +using System.Collections.Generic; +using System.Text.Json; +using System.Text.Json.Nodes; +using Npgsql.Internal.TypeHandling; +using Npgsql.Internal.TypeMapping; +using Npgsql.PostgresTypes; +using NpgsqlTypes; + +namespace Npgsql.TypeMapping; + +sealed class SystemTextJsonTypeMappingResolver : TypeMappingResolver +{ + readonly Dictionary? _userClrTypes; + + public SystemTextJsonTypeMappingResolver(Dictionary? userClrTypes) => _userClrTypes = userClrTypes; + + public override string? GetDataTypeNameByClrType(Type type) + => ClrTypeToDataTypeName(type, _userClrTypes); + + public override TypeMappingInfo? GetMappingByDataTypeName(string dataTypeName) + => DoGetMappingByDataTypeName(dataTypeName); + + public override TypeMappingInfo? GetMappingByPostgresType(TypeMapper mapper, PostgresType type) + => DoGetMappingByDataTypeName(type.Name); + + internal static string? ClrTypeToDataTypeName(Type type, Dictionary? clrTypes) + => type == typeof(JsonDocument) + || type == typeof(JsonObject) || type == typeof(JsonArray) + ? "jsonb" + : clrTypes is not null && clrTypes.TryGetValue(type, out var dataTypeName) ? dataTypeName : null; + + static TypeMappingInfo? DoGetMappingByDataTypeName(string dataTypeName) + => dataTypeName switch + { + "jsonb" => new(NpgsqlDbType.Jsonb, "jsonb", typeof(JsonDocument) + , typeof(JsonObject), typeof(JsonArray) + ), + "json" => new(NpgsqlDbType.Json, "json"), + _ => null + }; +} diff --git a/test/Npgsql.Tests/ReaderTests.cs b/test/Npgsql.Tests/ReaderTests.cs index 08c5d78d18..d106599b9d 100644 --- a/test/Npgsql.Tests/ReaderTests.cs +++ b/test/Npgsql.Tests/ReaderTests.cs @@ -2175,10 +2175,6 @@ class ExplodingTypeHandlerResolverFactory : TypeHandlerResolverFactory public ExplodingTypeHandlerResolverFactory(bool safe) => _safe = safe; public override TypeHandlerResolver Create(TypeMapper typeMapper, NpgsqlConnector connector) => new ExplodingTypeHandlerResolver(_safe); - public override TypeMappingInfo GetMappingByDataTypeName(string dataTypeName) => throw new NotSupportedException(); - public override string? GetDataTypeNameByClrType(Type clrType) => throw new NotSupportedException(); - public override string? GetDataTypeNameByValueDependentValue(object value) => throw new NotSupportedException(); - class ExplodingTypeHandlerResolver : TypeHandlerResolver { readonly bool _safe; @@ -2188,7 +2184,6 @@ class ExplodingTypeHandlerResolver : TypeHandlerResolver public override NpgsqlTypeHandler? ResolveByDataTypeName(string typeName) => typeName == "integer" ? new ExplodingTypeHandler(null!, _safe) : null; public override NpgsqlTypeHandler? ResolveByClrType(Type type) => null; - public override TypeMappingInfo GetMappingByPostgresType(PostgresType type) => throw new NotImplementedException(); } } diff --git a/test/Npgsql.Tests/TypeMapperTests.cs b/test/Npgsql.Tests/TypeMapperTests.cs index 0237bd617a..55db858600 100644 --- a/test/Npgsql.Tests/TypeMapperTests.cs +++ b/test/Npgsql.Tests/TypeMapperTests.cs @@ -40,7 +40,7 @@ public async Task Global_mapping() // But they do affect on new data sources await using var dataSource2 = CreateDataSource(); - Assert.ThrowsAsync(() => AssertType(dataSource2, Mood.Happy, "happy", type, npgsqlDbType: null)); + Assert.ThrowsAsync(() => AssertType(dataSource2, Mood.Happy, "happy", type, npgsqlDbType: null)); } finally { @@ -73,7 +73,7 @@ public async Task Global_mapping_reset() // But they do affect on new data sources await using var dataSource2 = CreateDataSource(); - Assert.ThrowsAsync(() => AssertType(dataSource2, Mood.Happy, "happy", type, npgsqlDbType: null)); + Assert.ThrowsAsync(() => AssertType(dataSource2, Mood.Happy, "happy", type, npgsqlDbType: null)); } finally { @@ -167,10 +167,6 @@ class CitextToStringTypeHandlerResolverFactory : TypeHandlerResolverFactory public override TypeHandlerResolver Create(TypeMapper typeMapper, NpgsqlConnector connector) => new CitextToStringTypeHandlerResolver(connector); - public override TypeMappingInfo GetMappingByDataTypeName(string dataTypeName) => throw new NotSupportedException(); - public override string GetDataTypeNameByClrType(Type clrType) => throw new NotSupportedException(); - public override string GetDataTypeNameByValueDependentValue(object value) => throw new NotSupportedException(); - class CitextToStringTypeHandlerResolver : TypeHandlerResolver { readonly NpgsqlConnector _connector; @@ -185,8 +181,6 @@ public CitextToStringTypeHandlerResolver(NpgsqlConnector connector) public override NpgsqlTypeHandler? ResolveByClrType(Type type) => type == typeof(string) ? new TextHandler(_pgCitextType, _connector.TextEncoding) : null; public override NpgsqlTypeHandler? ResolveByDataTypeName(string typeName) => null; - - public override TypeMappingInfo? GetMappingByPostgresType(PostgresType type) => throw new NotSupportedException(); } } From 10557d7e99a84a968a329a0b2c5a6f1c83c52bd1 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Mon, 20 Mar 2023 23:08:56 +0100 Subject: [PATCH 115/761] Bump version to 8.0.0-preview.3 --- Directory.Build.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Build.props b/Directory.Build.props index a6626dce0c..56e747a60f 100644 --- a/Directory.Build.props +++ b/Directory.Build.props @@ -1,6 +1,6 @@  - 8.0.0-preview.2 + 8.0.0-preview.3 latest true enable From fbc7538b268f4827d021e50092b5f1ddc5b72f49 Mon Sep 17 00:00:00 2001 From: yoshihikoueno <38683757+yoshihikoueno@users.noreply.github.com> Date: Tue, 21 Mar 2023 20:20:48 +0900 Subject: [PATCH 116/761] Check the message after sending a PasswordMessage (#5006) --- src/Npgsql/Internal/NpgsqlConnector.Auth.cs | 53 ++++++++++----------- 1 file changed, 25 insertions(+), 28 deletions(-) diff --git a/src/Npgsql/Internal/NpgsqlConnector.Auth.cs b/src/Npgsql/Internal/NpgsqlConnector.Auth.cs index 09c92bd9c9..69b9d09892 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.Auth.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.Auth.cs @@ -19,35 +19,38 @@ partial class NpgsqlConnector { async Task Authenticate(string username, NpgsqlTimeout timeout, bool async, CancellationToken cancellationToken) { - timeout.CheckAndApply(this); - var msg = ExpectAny(await ReadMessage(async), this); - switch (msg.AuthRequestType) + while (true) { - case AuthenticationRequestType.AuthenticationOk: - return; + timeout.CheckAndApply(this); + var msg = ExpectAny(await ReadMessage(async), this); + switch (msg.AuthRequestType) + { + case AuthenticationRequestType.AuthenticationOk: + return; - case AuthenticationRequestType.AuthenticationCleartextPassword: - await AuthenticateCleartext(username, async, cancellationToken); - return; + case AuthenticationRequestType.AuthenticationCleartextPassword: + await AuthenticateCleartext(username, async, cancellationToken); + break; - case AuthenticationRequestType.AuthenticationMD5Password: - await AuthenticateMD5(username, ((AuthenticationMD5PasswordMessage)msg).Salt, async, cancellationToken); - return; + case AuthenticationRequestType.AuthenticationMD5Password: + await AuthenticateMD5(username, ((AuthenticationMD5PasswordMessage)msg).Salt, async, cancellationToken); + break; - case AuthenticationRequestType.AuthenticationSASL: - await AuthenticateSASL(((AuthenticationSASLMessage)msg).Mechanisms, username, async, cancellationToken); - return; + case AuthenticationRequestType.AuthenticationSASL: + await AuthenticateSASL(((AuthenticationSASLMessage)msg).Mechanisms, username, async, cancellationToken); + break; - case AuthenticationRequestType.AuthenticationGSS: - case AuthenticationRequestType.AuthenticationSSPI: - await AuthenticateGSS(async); - return; + case AuthenticationRequestType.AuthenticationGSS: + case AuthenticationRequestType.AuthenticationSSPI: + await AuthenticateGSS(async); + return; - case AuthenticationRequestType.AuthenticationGSSContinue: - throw new NpgsqlException("Can't start auth cycle with AuthenticationGSSContinue"); + case AuthenticationRequestType.AuthenticationGSSContinue: + throw new NpgsqlException("Can't start auth cycle with AuthenticationGSSContinue"); - default: - throw new NotSupportedException($"Authentication method not supported (Received: {msg.AuthRequestType})"); + default: + throw new NotSupportedException($"Authentication method not supported (Received: {msg.AuthRequestType})"); + } } } @@ -62,7 +65,6 @@ async Task AuthenticateCleartext(string username, bool async, CancellationToken await WritePassword(encoded, async, cancellationToken); await Flush(async, cancellationToken); - ExpectAny(await ReadMessage(async), this); } async Task AuthenticateSASL(List mechanisms, string username, bool async, CancellationToken cancellationToken = default) @@ -204,10 +206,6 @@ async Task AuthenticateSASL(List mechanisms, string username, bool async if (scramFinalServerMsg.ServerSignature != Convert.ToBase64String(serverSignature)) throw new NpgsqlException("[SCRAM] Unable to verify server signature"); - var okMsg = ExpectAny(await ReadMessage(async), this); - if (okMsg.AuthRequestType != AuthenticationRequestType.AuthenticationOk) - throw new NpgsqlException("[SASL] Expected AuthenticationOK message"); - static string GetNonce() { @@ -281,7 +279,6 @@ async Task AuthenticateMD5(string username, byte[] salt, bool async, Cancellatio await WritePassword(result, async, cancellationToken); await Flush(async, cancellationToken); - ExpectAny(await ReadMessage(async), this); } #if NET7_0_OR_GREATER From e698b5612b326199addbced4daa66d8d05274b7b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 24 Mar 2023 08:20:09 +0100 Subject: [PATCH 117/761] Bump NodaTime from 3.1.6 to 3.1.7 (#5012)Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 2c9572810e..544d8d8d22 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -14,7 +14,7 @@ - + From fef68e68457a415d09516398b5ef819ed6fd6f09 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 25 Mar 2023 00:20:33 +0100 Subject: [PATCH 118/761] Bump NodaTime from 3.1.7 to 3.1.8 (#5015) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 544d8d8d22..1581f05b4a 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -14,7 +14,7 @@ - + From 43a2e983f41aced973d7071eb0fdbea1ca7a8773 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Sun, 26 Mar 2023 00:15:53 +0300 Subject: [PATCH 119/761] Remove authentication-related encryption code (#5013) Closes #4966 --- src/Npgsql/Internal/EncryptionHandler.cs | 39 +++++++ src/Npgsql/Internal/NpgsqlConnector.Auth.cs | 118 ++++++++++---------- src/Npgsql/Internal/NpgsqlConnector.cs | 11 +- src/Npgsql/NpgsqlDataSource.cs | 9 +- src/Npgsql/NpgsqlDataSourceConfiguration.cs | 6 +- src/Npgsql/NpgsqlSlimDataSourceBuilder.cs | 15 +-- 6 files changed, 115 insertions(+), 83 deletions(-) create mode 100644 src/Npgsql/Internal/EncryptionHandler.cs diff --git a/src/Npgsql/Internal/EncryptionHandler.cs b/src/Npgsql/Internal/EncryptionHandler.cs new file mode 100644 index 0000000000..c03d11ef20 --- /dev/null +++ b/src/Npgsql/Internal/EncryptionHandler.cs @@ -0,0 +1,39 @@ +using System; +using System.Security.Cryptography.X509Certificates; +using System.Threading.Tasks; +using Npgsql.Properties; +using Npgsql.Util; + +namespace Npgsql.Internal; + +class EncryptionHandler +{ + public virtual bool SupportEncryption => false; + + public virtual Func? RootCertificateCallback + { + get => throw new InvalidOperationException(NpgsqlStrings.EncryptionDisabled); + set => throw new InvalidOperationException(NpgsqlStrings.EncryptionDisabled); + } + + public virtual Task NegotiateEncryption(NpgsqlConnector connector, SslMode sslMode, NpgsqlTimeout timeout, bool async, bool isFirstAttempt) + => throw new InvalidOperationException(NpgsqlStrings.EncryptionDisabled); + + public virtual void AuthenticateSASLSha256Plus(NpgsqlConnector connector, ref string mechanism, ref string cbindFlag, ref string cbind, + ref bool successfulBind) + => throw new InvalidOperationException(NpgsqlStrings.EncryptionDisabled); +} + +sealed class RealEncryptionHandler : EncryptionHandler +{ + public override bool SupportEncryption => true; + + public override Func? RootCertificateCallback { get; set; } + + public override Task NegotiateEncryption(NpgsqlConnector connector, SslMode sslMode, NpgsqlTimeout timeout, bool async, bool isFirstAttempt) + => connector.NegotiateEncryption(sslMode, timeout, async, isFirstAttempt); + + public override void AuthenticateSASLSha256Plus(NpgsqlConnector connector, ref string mechanism, ref string cbindFlag, ref string cbind, + ref bool successfulBind) + => connector.AuthenticateSASLSha256Plus(ref mechanism, ref cbindFlag, ref cbind, ref successfulBind); +} diff --git a/src/Npgsql/Internal/NpgsqlConnector.Auth.cs b/src/Npgsql/Internal/NpgsqlConnector.Auth.cs index 69b9d09892..1c928c627d 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.Auth.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.Auth.cs @@ -37,7 +37,8 @@ async Task Authenticate(string username, NpgsqlTimeout timeout, bool async, Canc break; case AuthenticationRequestType.AuthenticationSASL: - await AuthenticateSASL(((AuthenticationSASLMessage)msg).Mechanisms, username, async, cancellationToken); + await AuthenticateSASL(((AuthenticationSASLMessage)msg).Mechanisms, username, async, + cancellationToken); break; case AuthenticationRequestType.AuthenticationGSS: @@ -67,7 +68,7 @@ async Task AuthenticateCleartext(string username, bool async, CancellationToken await Flush(async, cancellationToken); } - async Task AuthenticateSASL(List mechanisms, string username, bool async, CancellationToken cancellationToken = default) + async Task AuthenticateSASL(List mechanisms, string username, bool async, CancellationToken cancellationToken) { // At the time of writing PostgreSQL only supports SCRAM-SHA-256 and SCRAM-SHA-256-PLUS var supportsSha256 = mechanisms.Contains("SCRAM-SHA-256"); @@ -82,61 +83,7 @@ async Task AuthenticateSASL(List mechanisms, string username, bool async var successfulBind = false; if (supportsSha256Plus) - { - var sslStream = (SslStream)_stream; - if (sslStream.RemoteCertificate is null) - { - ConnectionLogger.LogWarning("Remote certificate null, falling back to SCRAM-SHA-256"); - } - else - { - using var remoteCertificate = new X509Certificate2(sslStream.RemoteCertificate); - // Checking for hashing algorithms - HashAlgorithm? hashAlgorithm = null; - var algorithmName = remoteCertificate.SignatureAlgorithm.FriendlyName; - if (algorithmName is null) - { - ConnectionLogger.LogWarning("Signature algorithm was null, falling back to SCRAM-SHA-256"); - } - else if (algorithmName.StartsWith("sha1", StringComparison.OrdinalIgnoreCase) || - algorithmName.StartsWith("md5", StringComparison.OrdinalIgnoreCase) || - algorithmName.StartsWith("sha256", StringComparison.OrdinalIgnoreCase)) - { - hashAlgorithm = SHA256.Create(); - } - else if (algorithmName.StartsWith("sha384", StringComparison.OrdinalIgnoreCase)) - { - hashAlgorithm = SHA384.Create(); - } - else if (algorithmName.StartsWith("sha512", StringComparison.OrdinalIgnoreCase)) - { - hashAlgorithm = SHA512.Create(); - } - else - { - ConnectionLogger.LogWarning( - $"Support for signature algorithm {algorithmName} is not yet implemented, falling back to SCRAM-SHA-256"); - } - - if (hashAlgorithm != null) - { - using var _ = hashAlgorithm; - - // RFC 5929 - mechanism = "SCRAM-SHA-256-PLUS"; - // PostgreSQL only supports tls-server-end-point binding - cbindFlag = "p=tls-server-end-point"; - // SCRAM-SHA-256-PLUS depends on using ssl stream, so it's fine - var cbindFlagBytes = Encoding.UTF8.GetBytes($"{cbindFlag},,"); - - var certificateHash = hashAlgorithm.ComputeHash(remoteCertificate.GetRawCertData()); - var cbindBytes = cbindFlagBytes.Concat(certificateHash).ToArray(); - cbind = Convert.ToBase64String(cbindBytes); - successfulBind = true; - IsScramPlus = true; - } - } - } + DataSource.EncryptionHandler.AuthenticateSASLSha256Plus(this, ref mechanism, ref cbindFlag, ref cbind, ref successfulBind); if (!successfulBind && supportsSha256) { @@ -217,6 +164,63 @@ static string GetNonce() } } + internal void AuthenticateSASLSha256Plus(ref string mechanism, ref string cbindFlag, ref string cbind, + ref bool successfulBind) + { + var sslStream = (SslStream)_stream; + if (sslStream.RemoteCertificate is null) + { + ConnectionLogger.LogWarning("Remote certificate null, falling back to SCRAM-SHA-256"); + return; + } + + using var remoteCertificate = new X509Certificate2(sslStream.RemoteCertificate); + // Checking for hashing algorithms + HashAlgorithm? hashAlgorithm = null; + var algorithmName = remoteCertificate.SignatureAlgorithm.FriendlyName; + if (algorithmName is null) + { + ConnectionLogger.LogWarning("Signature algorithm was null, falling back to SCRAM-SHA-256"); + } + else if (algorithmName.StartsWith("sha1", StringComparison.OrdinalIgnoreCase) || + algorithmName.StartsWith("md5", StringComparison.OrdinalIgnoreCase) || + algorithmName.StartsWith("sha256", StringComparison.OrdinalIgnoreCase)) + { + hashAlgorithm = SHA256.Create(); + } + else if (algorithmName.StartsWith("sha384", StringComparison.OrdinalIgnoreCase)) + { + hashAlgorithm = SHA384.Create(); + } + else if (algorithmName.StartsWith("sha512", StringComparison.OrdinalIgnoreCase)) + { + hashAlgorithm = SHA512.Create(); + } + else + { + ConnectionLogger.LogWarning( + $"Support for signature algorithm {algorithmName} is not yet implemented, falling back to SCRAM-SHA-256"); + } + + if (hashAlgorithm != null) + { + using var _ = hashAlgorithm; + + // RFC 5929 + mechanism = "SCRAM-SHA-256-PLUS"; + // PostgreSQL only supports tls-server-end-point binding + cbindFlag = "p=tls-server-end-point"; + // SCRAM-SHA-256-PLUS depends on using ssl stream, so it's fine + var cbindFlagBytes = Encoding.UTF8.GetBytes($"{cbindFlag},,"); + + var certificateHash = hashAlgorithm.ComputeHash(remoteCertificate.GetRawCertData()); + var cbindBytes = cbindFlagBytes.Concat(certificateHash).ToArray(); + cbind = Convert.ToBase64String(cbindBytes); + successfulBind = true; + IsScramPlus = true; + } + } + #if NET6_0_OR_GREATER static byte[] Hi(string str, byte[] salt, int count) => Rfc2898DeriveBytes.Pbkdf2(str, salt, count, HashAlgorithmName.SHA256, 256 / 8); diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index 54ce7d75b8..c47d53d25c 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -786,12 +786,9 @@ async Task RawOpen(SslMode sslMode, NpgsqlTimeout timeout, bool async, Cancellat IsSecure = false; - if ((sslMode is SslMode.Prefer && DataSource.EncryptionNegotiator is not null) || + if ((sslMode is SslMode.Prefer && DataSource.EncryptionHandler.SupportEncryption) || sslMode is SslMode.Require or SslMode.VerifyCA or SslMode.VerifyFull) { - if (DataSource.EncryptionNegotiator is null) - throw new InvalidOperationException(NpgsqlStrings.EncryptionDisabled); - WriteSslRequest(); await Flush(async, cancellationToken); @@ -808,7 +805,7 @@ async Task RawOpen(SslMode sslMode, NpgsqlTimeout timeout, bool async, Cancellat throw new NpgsqlException("SSL connection requested. No SSL enabled connection from this host is configured."); break; case 'S': - await DataSource.EncryptionNegotiator(this, sslMode, timeout, async, isFirstAttempt); + await DataSource.EncryptionHandler.NegotiateEncryption(this, sslMode, timeout, async, isFirstAttempt); break; } @@ -891,7 +888,7 @@ internal async Task NegotiateEncryption(SslMode sslMode, NpgsqlTimeout timeout, if (Settings.RootCertificate is not null) throw new ArgumentException(NpgsqlStrings.CannotUseSslRootCertificateWithUserCallback); - if (DataSource.RootCertificateCallback is not null) + if (DataSource.EncryptionHandler.RootCertificateCallback is not null) throw new ArgumentException(NpgsqlStrings.CannotUseValidationRootCertificateCallbackWithUserCallback); certificateValidationCallback = UserCertificateValidationCallback; @@ -904,7 +901,7 @@ internal async Task NegotiateEncryption(SslMode sslMode, NpgsqlTimeout timeout, certificateValidationCallback = SslTrustServerValidation; checkCertificateRevocation = false; } - else if ((caCert = DataSource.RootCertificateCallback?.Invoke()) is not null || + else if ((caCert = DataSource.EncryptionHandler.RootCertificateCallback?.Invoke()) is not null || (certRootPath = Settings.RootCertificate ?? PostgresEnvironment.SslCertRoot ?? PostgresEnvironment.SslCertRootDefault) is not null) { diff --git a/src/Npgsql/NpgsqlDataSource.cs b/src/Npgsql/NpgsqlDataSource.cs index 713154d607..64e65bca13 100644 --- a/src/Npgsql/NpgsqlDataSource.cs +++ b/src/Npgsql/NpgsqlDataSource.cs @@ -43,7 +43,7 @@ public abstract class NpgsqlDataSource : DbDataSource ///
internal NpgsqlDatabaseInfo DatabaseInfo { get; private set; } = null!; // Initialized at bootstrapping - internal Func? EncryptionNegotiator { get; } + internal EncryptionHandler EncryptionHandler { get; } internal RemoteCertificateValidationCallback? UserCertificateValidationCallback { get; } internal Action? ClientCertificatesCallback { get; } @@ -90,7 +90,7 @@ internal NpgsqlDataSource( Configuration = dataSourceConfig; (LoggingConfiguration, - EncryptionNegotiator, + EncryptionHandler, UserCertificateValidationCallback, ClientCertificatesCallback, _periodicPasswordProvider, @@ -100,8 +100,7 @@ internal NpgsqlDataSource( _userTypeMappings, _defaultNameTranslator, ConnectionInitializer, - ConnectionInitializerAsync, - RootCertificateCallback) + ConnectionInitializerAsync) = dataSourceConfig; _connectionLogger = LoggingConfiguration.ConnectionLogger; @@ -302,8 +301,6 @@ async Task RefreshPassword() } #endregion Password management - - internal Func? RootCertificateCallback { get; } internal abstract ValueTask Get( NpgsqlConnection conn, NpgsqlTimeout timeout, bool async, CancellationToken cancellationToken); diff --git a/src/Npgsql/NpgsqlDataSourceConfiguration.cs b/src/Npgsql/NpgsqlDataSourceConfiguration.cs index fe35dd33e0..6a7b068868 100644 --- a/src/Npgsql/NpgsqlDataSourceConfiguration.cs +++ b/src/Npgsql/NpgsqlDataSourceConfiguration.cs @@ -7,13 +7,12 @@ using Npgsql.Internal; using Npgsql.Internal.TypeHandling; using Npgsql.Internal.TypeMapping; -using Npgsql.Util; namespace Npgsql; sealed record NpgsqlDataSourceConfiguration( NpgsqlLoggingConfiguration LoggingConfiguration, - Func? EncryptionNegotiator, + EncryptionHandler EncryptionHandler, RemoteCertificateValidationCallback? UserCertificateValidationCallback, Action? ClientCertificatesCallback, Func>? PeriodicPasswordProvider, @@ -23,5 +22,4 @@ sealed record NpgsqlDataSourceConfiguration( Dictionary UserTypeMappings, INpgsqlNameTranslator DefaultNameTranslator, Action? ConnectionInitializer, - Func? ConnectionInitializerAsync, - Func? RootCertificateCallback); + Func? ConnectionInitializerAsync); diff --git a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs index 18eacfbfab..a5f1046e90 100644 --- a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs @@ -30,10 +30,9 @@ public sealed class NpgsqlSlimDataSourceBuilder : INpgsqlTypeMapper ILoggerFactory? _loggerFactory; bool _sensitiveDataLoggingEnabled; - Func? _encryptionNegotiator; + EncryptionHandler _encryptionHandler = new(); RemoteCertificateValidationCallback? _userCertificateValidationCallback; Action? _clientCertificatesCallback; - Func? _rootCertificateCallback; Func>? _periodicPasswordProvider; TimeSpan _periodicPasswordSuccessRefreshInterval, _periodicPasswordFailureRefreshInterval; @@ -185,7 +184,7 @@ public NpgsqlSlimDataSourceBuilder UseRootCertificate(X509Certificate2? rootCert /// public NpgsqlSlimDataSourceBuilder UseRootCertificateCallback(Func? rootCertificateCallback) { - _rootCertificateCallback = rootCertificateCallback; + _encryptionHandler.RootCertificateCallback = rootCertificateCallback; return this; } @@ -423,8 +422,7 @@ public NpgsqlSlimDataSourceBuilder EnableRecords() ///
public NpgsqlSlimDataSourceBuilder EnableEncryption() { - _encryptionNegotiator = static (connector, sslMode, timeout, async, isFirstAttempt) - => connector.NegotiateEncryption(sslMode, timeout, async, isFirstAttempt); + _encryptionHandler = new RealEncryptionHandler(); return this; } @@ -503,7 +501,7 @@ NpgsqlDataSourceConfiguration PrepareConfiguration() { ConnectionStringBuilder.PostProcessAndValidate(); - if (_encryptionNegotiator is null && (_userCertificateValidationCallback is not null || _clientCertificatesCallback is not null)) + if (!_encryptionHandler.SupportEncryption && (_userCertificateValidationCallback is not null || _clientCertificatesCallback is not null)) { throw new InvalidOperationException(NpgsqlStrings.EncryptionDisabled); } @@ -518,7 +516,7 @@ NpgsqlDataSourceConfiguration PrepareConfiguration() _loggerFactory is null ? NpgsqlLoggingConfiguration.NullConfiguration : new NpgsqlLoggingConfiguration(_loggerFactory, _sensitiveDataLoggingEnabled), - _encryptionNegotiator, + _encryptionHandler, _userCertificateValidationCallback, _clientCertificatesCallback, _periodicPasswordProvider, @@ -528,8 +526,7 @@ _loggerFactory is null _userTypeMappings, DefaultNameTranslator, _syncConnectionInitializer, - _asyncConnectionInitializer, - _rootCertificateCallback); + _asyncConnectionInitializer); } void ValidateMultiHost() From b28ea2e8b99c2b7f1dd9d9a49904c7287ccdc58c Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Wed, 29 Mar 2023 00:20:14 +0300 Subject: [PATCH 120/761] Correct docs on DI lifetime (#5020) --- .../NpgsqlServiceCollectionExtensions.cs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/Npgsql.DependencyInjection/NpgsqlServiceCollectionExtensions.cs b/src/Npgsql.DependencyInjection/NpgsqlServiceCollectionExtensions.cs index a9333a0753..dd73e7c14e 100644 --- a/src/Npgsql.DependencyInjection/NpgsqlServiceCollectionExtensions.cs +++ b/src/Npgsql.DependencyInjection/NpgsqlServiceCollectionExtensions.cs @@ -22,7 +22,7 @@ public static class NpgsqlServiceCollectionExtensions /// /// /// The lifetime with which to register the in the container. - /// Defaults to . + /// Defaults to . /// /// /// The lifetime with which to register the service in the container. @@ -44,7 +44,7 @@ public static IServiceCollection AddNpgsqlDataSource( /// An Npgsql connection string. /// /// The lifetime with which to register the in the container. - /// Defaults to . + /// Defaults to . /// /// /// The lifetime with which to register the service in the container. @@ -69,7 +69,7 @@ public static IServiceCollection AddNpgsqlDataSource( /// /// /// The lifetime with which to register the in the container. - /// Defaults to . + /// Defaults to . /// /// /// The lifetime with which to register the service in the container. @@ -93,7 +93,7 @@ public static IServiceCollection AddMultiHostNpgsqlDataSource( /// An Npgsql connection string. /// /// The lifetime with which to register the in the container. - /// Defaults to . + /// Defaults to . /// /// /// The lifetime with which to register the service in the container. From 8bb9d4123e14531b23a220dfe2c93464ed731e84 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 30 Mar 2023 01:45:43 +0200 Subject: [PATCH 121/761] Bump NodaTime from 3.1.8 to 3.1.9 (#5022)Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 1581f05b4a..83fbfe47b8 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -14,7 +14,7 @@ - + From 37ed406857f70706a5d6268640cf14b42235fa19 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 30 Mar 2023 01:46:23 +0200 Subject: [PATCH 122/761] Bump Microsoft.Data.SqlClient from 5.1.0 to 5.1.1 (#5023) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 83fbfe47b8..236c7922bb 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -33,7 +33,7 @@ - + From ae3309f259db536be5ca16a327550f2d654b670c Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Fri, 7 Apr 2023 16:36:06 +0300 Subject: [PATCH 123/761] Fix a possible timeout with gss/sspi auth (#5028) Fixes #4888 --- src/Npgsql/Internal/NpgsqlConnector.Auth.cs | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/Npgsql/Internal/NpgsqlConnector.Auth.cs b/src/Npgsql/Internal/NpgsqlConnector.Auth.cs index 1c928c627d..8931a75462 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.Auth.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.Auth.cs @@ -300,13 +300,15 @@ async Task AuthenticateGSS(bool async) var response = ExpectAny(await ReadMessage(async), this); if (response.AuthRequestType == AuthenticationRequestType.AuthenticationOk) break; - var gssMsg = response as AuthenticationGSSContinueMessage; - if (gssMsg == null) + if (response is not AuthenticationGSSContinueMessage gssMsg) throw new NpgsqlException($"Received unexpected authentication request message {response.AuthRequestType}"); data = authContext.GetOutgoingBlob(gssMsg.AuthenticationData.AsSpan(), out statusCode)!; - if (statusCode == NegotiateAuthenticationStatusCode.Completed) + if (statusCode is not NegotiateAuthenticationStatusCode.Completed and not NegotiateAuthenticationStatusCode.ContinueNeeded) + throw new NpgsqlException($"Error while authenticating GSS/SSPI: {statusCode}"); + // We might get NegotiateAuthenticationStatusCode.Completed but the data will not be null + // This can happen if it's the first cycle, in which case we have to send that data to complete handshake (#4888) + if (data is null) continue; - Debug.Assert(statusCode == NegotiateAuthenticationStatusCode.ContinueNeeded); await WritePassword(data, 0, data.Length, async, UserCancellationToken); await Flush(async, UserCancellationToken); } From 3166703c1cf0a76300878d7bfbd0eae106050efb Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Wed, 12 Apr 2023 14:44:27 +0300 Subject: [PATCH 124/761] Upgrade to .NET 8 preview 3 (#5031) --- .devcontainer/docker-compose.yml | 2 +- .github/workflows/build.yml | 2 +- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/native-aot.yml | 6 +++--- .github/workflows/rich-code-nav.yml | 2 +- test/Npgsql.NativeAotTests/Npgsql.NativeAotTests.csproj | 1 - 6 files changed, 7 insertions(+), 8 deletions(-) diff --git a/.devcontainer/docker-compose.yml b/.devcontainer/docker-compose.yml index 5d5be53d5a..1eeaeb9fd8 100644 --- a/.devcontainer/docker-compose.yml +++ b/.devcontainer/docker-compose.yml @@ -3,7 +3,7 @@ version: '3' services: npgsql-dev: # Source for tags: https://mcr.microsoft.com/v2/dotnet/sdk/tags/list - image: mcr.microsoft.com/dotnet/sdk:8.0.100-preview.2 + image: mcr.microsoft.com/dotnet/sdk:8.0.100-preview.3 volumes: - ..:/workspace:cached tty: true diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 4e595464b0..2617b8f408 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -10,7 +10,7 @@ on: pull_request: env: - dotnet_sdk_version: '8.0.100-preview.2.23157.25' + dotnet_sdk_version: '8.0.100-preview.3.23178.7' postgis_version: 3 DOTNET_SKIP_FIRST_TIME_EXPERIENCE: true # Windows comes with PG pre-installed, and defines the PGPASSWORD environment variable. Remove it as it interferes diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 6203474b64..a12786b32f 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -27,7 +27,7 @@ on: - cron: '21 0 * * 4' env: - dotnet_sdk_version: '8.0.100-preview.2.23157.25' + dotnet_sdk_version: '8.0.100-preview.3.23178.7' jobs: analyze: diff --git a/.github/workflows/native-aot.yml b/.github/workflows/native-aot.yml index b72773ddd8..72a7da4d6d 100644 --- a/.github/workflows/native-aot.yml +++ b/.github/workflows/native-aot.yml @@ -10,7 +10,7 @@ on: pull_request: env: - dotnet_sdk_version: '8.0.100-preview.2.23157.25' + dotnet_sdk_version: '8.0.100-preview.3.23178.7' DOTNET_SKIP_FIRST_TIME_EXPERIENCE: true jobs: @@ -83,7 +83,7 @@ jobs: size="$(ls -l test/Npgsql.NativeAotTests/bin/Release/net8.0/linux-x64/native/Npgsql.NativeAotTests | cut -d ' ' -f 5)" echo "Binary size is $size bytes ($((size / (1024 * 1024))) mb)" - if (( size > 15728640 )); then - echo "Binary size exceeds 15mb threshold" + if (( size > 7340032 )); then + echo "Binary size exceeds 7mb threshold" exit 1 fi diff --git a/.github/workflows/rich-code-nav.yml b/.github/workflows/rich-code-nav.yml index 336aaeeb6c..af153693b0 100644 --- a/.github/workflows/rich-code-nav.yml +++ b/.github/workflows/rich-code-nav.yml @@ -9,7 +9,7 @@ on: - '*' env: - dotnet_sdk_version: '8.0.100-preview.2.23157.25' + dotnet_sdk_version: '8.0.100-preview.3.23178.7' DOTNET_SKIP_FIRST_TIME_EXPERIENCE: true jobs: diff --git a/test/Npgsql.NativeAotTests/Npgsql.NativeAotTests.csproj b/test/Npgsql.NativeAotTests/Npgsql.NativeAotTests.csproj index 967308c218..3396a51a92 100644 --- a/test/Npgsql.NativeAotTests/Npgsql.NativeAotTests.csproj +++ b/test/Npgsql.NativeAotTests/Npgsql.NativeAotTests.csproj @@ -7,7 +7,6 @@ true true true - false true false Size From d024d51e885190c227826980abb63c64d6e383fa Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 20 Apr 2023 18:56:10 +0300 Subject: [PATCH 125/761] Bump Mono.Cecil from 0.11.4 to 0.11.5 (#5036) --- Directory.Packages.props | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 236c7922bb..c92ac4fcab 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -1,4 +1,4 @@ - + @@ -37,7 +37,7 @@ - + From 276bb7db351ed785e2ea3b74cce85d8a3ca49fff Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Sun, 23 Apr 2023 22:50:23 +0300 Subject: [PATCH 126/761] Fix deadlock while cancelling a query (#5033) Fixes #5032 --- src/Npgsql/Internal/NpgsqlConnector.cs | 16 ++++++++++------ test/Npgsql.Tests/CommandTests.cs | 2 ++ 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index c47d53d25c..2b04c5fcff 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -1734,7 +1734,7 @@ internal void ResetCancellation() internal void PerformUserCancellation() { var connection = Connection; - if (connection is null || connection.ConnectorBindingScope == ConnectorBindingScope.Reader) + if (connection is null || connection.ConnectorBindingScope == ConnectorBindingScope.Reader || UserCancellationRequested) return; // Take the lock first to make sure there is no concurrent Break. @@ -1749,13 +1749,17 @@ internal void PerformUserCancellation() Monitor.Enter(CancelLock); } - // Wait before we've read all responses for the prepended queries - // as we can't gracefully handle their cancellation. - // Break makes sure that it's going to be set even if we fail while reading them. - ReadingPrependedMessagesMRE.Wait(); - try { + // Wait before we've read all responses for the prepended queries + // as we can't gracefully handle their cancellation. + // Break makes sure that it's going to be set even if we fail while reading them. + + // We don't wait indefinitely to avoid deadlocks from synchronous CancellationToken.Register + // See #5032 + if (!ReadingPrependedMessagesMRE.Wait(0)) + return; + _userCancellationRequested = true; if (AttemptPostgresCancellation && SupportsPostgresCancellation) diff --git a/test/Npgsql.Tests/CommandTests.cs b/test/Npgsql.Tests/CommandTests.cs index eba1bd4d5b..c486d69890 100644 --- a/test/Npgsql.Tests/CommandTests.cs +++ b/test/Npgsql.Tests/CommandTests.cs @@ -311,6 +311,7 @@ public async Task Cancel_async_immediately() } [Test, Description("Cancels an async query with the cancellation token, with successful PG cancellation")] + [Explicit("Flaky due to #5033")] public async Task Cancel_async_soft() { if (IsMultiplexing) @@ -1401,6 +1402,7 @@ public async Task Concurrent_read_write_failure_deadlock() [Test, IssueLink("https://github.com/npgsql/npgsql/issues/4906")] [Description("Make sure we don't cancel a prepended query (and do not deadlock in case of a failure)")] + [Explicit("Flaky due to #5033")] public async Task Not_cancel_prepended_query([Values] bool failPrependedQuery) { if (IsMultiplexing) From e1f5d7f0977c8f422bdc88269bec0e13881cf56a Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Mon, 24 Apr 2023 15:40:24 +0200 Subject: [PATCH 127/761] Suppress CA2017 We pass the connector ID to log messages although it isn't referenced in the log message text. But it can still be retrieved programmatically etc. --- src/Npgsql/Npgsql.csproj | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Npgsql/Npgsql.csproj b/src/Npgsql/Npgsql.csproj index 310da0dad0..2b284d6c46 100644 --- a/src/Npgsql/Npgsql.csproj +++ b/src/Npgsql/Npgsql.csproj @@ -7,6 +7,7 @@ README.md netstandard2.0;netstandard2.1;net6.0;net7.0;net8.0 net8.0 + $(NoWarn);CA2017 From df811a625baaf302d187954d4f398678b9db9350 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Mon, 24 Apr 2023 15:41:26 +0200 Subject: [PATCH 128/761] Bump version to 8.0.0-preview.4 --- Directory.Build.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Build.props b/Directory.Build.props index 56e747a60f..756df95380 100644 --- a/Directory.Build.props +++ b/Directory.Build.props @@ -1,6 +1,6 @@  - 8.0.0-preview.3 + 8.0.0-preview.4 latest true enable From c90dbf1337382cf20858ec2c4c3728b42aac259b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 27 Apr 2023 22:07:14 +0000 Subject: [PATCH 129/761] Bump GitHubActionsTestLogger from 2.0.1 to 2.0.2 (#5042) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index c92ac4fcab..3a85ac90a1 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -27,7 +27,7 @@ - + From 5cd7fff1c43f987b4a2b9bbc88d59cfac150d03a Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Wed, 3 May 2023 16:36:16 +0300 Subject: [PATCH 130/761] Make full text search opt-in (#5011) Co-authored-by: Shay Rojansky --- .../Internal/GeoJsonTypeMappingResolver.cs | 3 - .../Internal/JsonNetTypeMappingResolver.cs | 3 - .../NetTopologySuiteTypeMappingResolver.cs | 3 - .../Internal/NodaTimeTypeMappingResolver.cs | 3 - .../TypeMapping/TypeMappingResolver.cs | 3 +- src/Npgsql/NpgsqlDataSourceBuilder.cs | 1 + src/Npgsql/NpgsqlSlimDataSourceBuilder.cs | 10 +++- .../Properties/NpgsqlStrings.Designer.cs | 12 ++++ src/Npgsql/Properties/NpgsqlStrings.resx | 6 ++ src/Npgsql/PublicAPI.Unshipped.txt | 1 + .../TypeMapping/BuiltInTypeHandlerResolver.cs | 56 ++++++++++++------- .../TypeMapping/BuiltInTypeMappingResolver.cs | 20 ------- .../FullTextSearchTypeHandlerResolver.cs | 34 +++++++++++ ...ullTextSearchTypeHandlerResolverFactory.cs | 15 +++++ .../FullTextSearchTypeMappingResolver.cs | 41 ++++++++++++++ .../SystemTextJsonTypeMappingResolver.cs | 3 - test/Npgsql.Tests/Support/TestBase.cs | 4 +- .../Npgsql.Tests/Types/FullTextSearchTests.cs | 36 +++++++++++- test/Npgsql.Tests/Types/MiscTypeTests.cs | 8 +-- 19 files changed, 197 insertions(+), 65 deletions(-) create mode 100644 src/Npgsql/TypeMapping/FullTextSearchTypeHandlerResolver.cs create mode 100644 src/Npgsql/TypeMapping/FullTextSearchTypeHandlerResolverFactory.cs create mode 100644 src/Npgsql/TypeMapping/FullTextSearchTypeMappingResolver.cs diff --git a/src/Npgsql.GeoJSON/Internal/GeoJsonTypeMappingResolver.cs b/src/Npgsql.GeoJSON/Internal/GeoJsonTypeMappingResolver.cs index 621e0389a2..137606538b 100644 --- a/src/Npgsql.GeoJSON/Internal/GeoJsonTypeMappingResolver.cs +++ b/src/Npgsql.GeoJSON/Internal/GeoJsonTypeMappingResolver.cs @@ -18,9 +18,6 @@ public class GeoJsonTypeMappingResolver : TypeMappingResolver public override TypeMappingInfo? GetMappingByDataTypeName(string dataTypeName) => DoGetMappingByDataTypeName(dataTypeName); - public override TypeMappingInfo? GetMappingByPostgresType(TypeMapper mapper, PostgresType type) - => DoGetMappingByDataTypeName(type.Name); - static TypeMappingInfo? DoGetMappingByDataTypeName(string dataTypeName) => dataTypeName switch { diff --git a/src/Npgsql.Json.NET/Internal/JsonNetTypeMappingResolver.cs b/src/Npgsql.Json.NET/Internal/JsonNetTypeMappingResolver.cs index c8ba31da7a..119882f37e 100644 --- a/src/Npgsql.Json.NET/Internal/JsonNetTypeMappingResolver.cs +++ b/src/Npgsql.Json.NET/Internal/JsonNetTypeMappingResolver.cs @@ -19,9 +19,6 @@ public class JsonNetTypeMappingResolver : TypeMappingResolver public override TypeMappingInfo? GetMappingByDataTypeName(string dataTypeName) => DoGetMappingByDataTypeName(dataTypeName); - public override TypeMappingInfo? GetMappingByPostgresType(TypeMapper typeMapper, PostgresType type) - => DoGetMappingByDataTypeName(type.Name); - static TypeMappingInfo? DoGetMappingByDataTypeName(string dataTypeName) => dataTypeName switch { diff --git a/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeMappingResolver.cs b/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeMappingResolver.cs index bc502af5a6..f087d6c55e 100644 --- a/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeMappingResolver.cs +++ b/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeMappingResolver.cs @@ -19,9 +19,6 @@ public class NetTopologySuiteTypeMappingResolver : TypeMappingResolver public override TypeMappingInfo? GetMappingByDataTypeName(string dataTypeName) => DoGetMappingByDataTypeName(dataTypeName); - public override TypeMappingInfo? GetMappingByPostgresType(TypeMapper typeMapper, PostgresType type) - => DoGetMappingByDataTypeName(type.Name); - internal static string? ClrTypeToDataTypeName(Type type, bool geographyAsDefault) => type != typeof(Geometry) && type.BaseType != typeof(Geometry) && type.BaseType != typeof(GeometryCollection) ? null diff --git a/src/Npgsql.NodaTime/Internal/NodaTimeTypeMappingResolver.cs b/src/Npgsql.NodaTime/Internal/NodaTimeTypeMappingResolver.cs index b5e1e51f6e..dd5f271050 100644 --- a/src/Npgsql.NodaTime/Internal/NodaTimeTypeMappingResolver.cs +++ b/src/Npgsql.NodaTime/Internal/NodaTimeTypeMappingResolver.cs @@ -16,9 +16,6 @@ public class NodaTimeTypeMappingResolver : TypeMappingResolver public override TypeMappingInfo? GetMappingByDataTypeName(string dataTypeName) => DoGetMappingByDataTypeName(dataTypeName); - - public override TypeMappingInfo? GetMappingByPostgresType(TypeMapper mapper, PostgresType type) - => DoGetMappingByDataTypeName(type.Name); static TypeMappingInfo? DoGetMappingByDataTypeName(string dataTypeName) => dataTypeName switch diff --git a/src/Npgsql/Internal/TypeMapping/TypeMappingResolver.cs b/src/Npgsql/Internal/TypeMapping/TypeMappingResolver.cs index a4f6673d3a..af426e6f2f 100644 --- a/src/Npgsql/Internal/TypeMapping/TypeMappingResolver.cs +++ b/src/Npgsql/Internal/TypeMapping/TypeMappingResolver.cs @@ -14,7 +14,8 @@ public abstract class TypeMappingResolver /// Gets type mapping information for a given PostgreSQL type. /// Invoked in scenarios when mapping information is required, rather than a type handler for reading or writing. ///
- public abstract TypeMappingInfo? GetMappingByPostgresType(TypeMapper typeMapper, PostgresType type); + public virtual TypeMappingInfo? GetMappingByPostgresType(TypeMapper typeMapper, PostgresType type) + => GetMappingByDataTypeName(type.Name); internal TypeMappingInfo? GetMappingByValueDependentValue(object value) => GetDataTypeNameByValueDependentValue(value) is { } dataTypeName ? GetMappingByDataTypeName(dataTypeName) : null; diff --git a/src/Npgsql/NpgsqlDataSourceBuilder.cs b/src/Npgsql/NpgsqlDataSourceBuilder.cs index bf517b870e..ace6c49869 100644 --- a/src/Npgsql/NpgsqlDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlDataSourceBuilder.cs @@ -316,5 +316,6 @@ void AddDefaultFeatures() _internalBuilder.AddDefaultTypeResolverFactory(new SystemTextJsonTypeHandlerResolverFactory()); _internalBuilder.AddDefaultTypeResolverFactory(new RangeTypeHandlerResolverFactory()); _internalBuilder.AddDefaultTypeResolverFactory(new RecordTypeHandlerResolverFactory()); + _internalBuilder.AddDefaultTypeResolverFactory(new FullTextSearchTypeHandlerResolverFactory()); } } diff --git a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs index a5f1046e90..d3619c0635 100644 --- a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs @@ -13,7 +13,6 @@ using Npgsql.Internal.TypeMapping; using Npgsql.Properties; using Npgsql.TypeMapping; -using Npgsql.Util; using NpgsqlTypes; namespace Npgsql; @@ -416,6 +415,15 @@ public NpgsqlSlimDataSourceBuilder EnableRecords() return this; } + /// + /// Sets up mappings for the PostgreSQL tsquery and tsvector types. + /// + public NpgsqlSlimDataSourceBuilder EnableFullTextSearch() + { + AddTypeResolverFactory(new FullTextSearchTypeHandlerResolverFactory()); + return this; + } + /// /// Enables the possibility to use TLS/SSl encryption for connections to PostgreSQL. This does not guarantee that encryption will /// actually be used; see for more details. diff --git a/src/Npgsql/Properties/NpgsqlStrings.Designer.cs b/src/Npgsql/Properties/NpgsqlStrings.Designer.cs index a2335feba4..eac1b386ac 100644 --- a/src/Npgsql/Properties/NpgsqlStrings.Designer.cs +++ b/src/Npgsql/Properties/NpgsqlStrings.Designer.cs @@ -146,5 +146,17 @@ internal static string CannotUseValidationRootCertificateCallbackWithUserCallbac return ResourceManager.GetString("CannotUseValidationRootCertificateCallbackWithUserCallback", resourceCulture); } } + + internal static string RecordsNotEnabled { + get { + return ResourceManager.GetString("RecordsNotEnabled", resourceCulture); + } + } + + internal static string FullTextSearchNotEnabled { + get { + return ResourceManager.GetString("FullTextSearchNotEnabled", resourceCulture); + } + } } } diff --git a/src/Npgsql/Properties/NpgsqlStrings.resx b/src/Npgsql/Properties/NpgsqlStrings.resx index 25fb8f1501..27282cdb1b 100644 --- a/src/Npgsql/Properties/NpgsqlStrings.resx +++ b/src/Npgsql/Properties/NpgsqlStrings.resx @@ -69,4 +69,10 @@ ValidationRootCertificateCallback cannot be used in conjunction with UserCertificateValidationCallback; when registering a validation callback, perform whatever validation you require in that callback. + + Records aren't enabled; please call {0} on {1} to enable records. + + + Full-text search isn't enabled; please call {0} on {1} to enable full-text search. + \ No newline at end of file diff --git a/src/Npgsql/PublicAPI.Unshipped.txt b/src/Npgsql/PublicAPI.Unshipped.txt index 4a34fad271..b55b5f3830 100644 --- a/src/Npgsql/PublicAPI.Unshipped.txt +++ b/src/Npgsql/PublicAPI.Unshipped.txt @@ -1,4 +1,5 @@ #nullable enable +Npgsql.NpgsqlSlimDataSourceBuilder.EnableFullTextSearch() -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.EnableRecords() -> Npgsql.NpgsqlSlimDataSourceBuilder! override Npgsql.NpgsqlBatch.Dispose() -> void Npgsql.NpgsqlBinaryImporter.WriteRow(params object?[]! values) -> void diff --git a/src/Npgsql/TypeMapping/BuiltInTypeHandlerResolver.cs b/src/Npgsql/TypeMapping/BuiltInTypeHandlerResolver.cs index e8b8af26cf..a7370c1fec 100644 --- a/src/Npgsql/TypeMapping/BuiltInTypeHandlerResolver.cs +++ b/src/Npgsql/TypeMapping/BuiltInTypeHandlerResolver.cs @@ -16,6 +16,7 @@ using Npgsql.Internal.TypeHandlers.NumericHandlers; using Npgsql.Internal.TypeHandling; using Npgsql.PostgresTypes; +using Npgsql.Properties; using NpgsqlTypes; using static Npgsql.Util.Statics; @@ -63,10 +64,6 @@ sealed class BuiltInTypeHandlerResolver : TypeHandlerResolver MacaddrHandler? _macaddrHandler; MacaddrHandler? _macaddr8Handler; - // Full-text search types - TsQueryHandler? _tsQueryHandler; - TsVectorHandler? _tsVectorHandler; - // Geometry types BoxHandler? _boxHandler; CircleHandler? _circleHandler; @@ -170,10 +167,6 @@ internal BuiltInTypeHandlerResolver(NpgsqlConnector connector) "macaddr" => MacaddrHandler(), "macaddr8" => Macaddr8Handler(), - // Full-text search types - "tsquery" => TsQueryHandler(), - "tsvector" => TsVectorHandler(), - // Geometry types "box" => BoxHandler(), "circle" => CircleHandler(), @@ -210,25 +203,40 @@ internal BuiltInTypeHandlerResolver(NpgsqlConnector connector) "pg_lsn" => PgLsnHandler(), "tid" => TidHandler(), "char" => InternalCharHandler(), - "record" => new UnsupportedHandler(PgType("record"), $"Records aren't supported; please call {nameof(NpgsqlSlimDataSourceBuilder.EnableRecords)} on {nameof(NpgsqlSlimDataSourceBuilder)} to enable records."), "void" => VoidHandler(), "unknown" => UnknownHandler(), + // Types that are unsupported by default when using NpgsqlSlimDataSourceBuilder + "record" => UnsupportedRecordHandler(), + "tsvector" => UnsupportedTsVectorHandler(), + "tsquery" => UnsupportedTsQueryHandler(), + _ => null }; public override NpgsqlTypeHandler? ResolveByClrType(Type type) { - if (BuiltInTypeMappingResolver.ClrTypeToDataTypeName(type) is not { } dataTypeName) - { - if (!type.IsSubclassOf(typeof(Stream))) - return null; + if (BuiltInTypeMappingResolver.ClrTypeToDataTypeName(type) is { } dataTypeName) + return ResolveByDataTypeName(dataTypeName); - dataTypeName = "bytea"; - } + if (type.IsSubclassOf(typeof(Stream))) + return ResolveByDataTypeName("bytea"); - return ResolveByDataTypeName(dataTypeName); + switch (type.FullName) + { + case "NpgsqlTypes.NpgsqlTsVector": + case "NpgsqlTypes.NpgsqlTsQueryLexeme": + case "NpgsqlTypes.NpgsqlTsQueryAnd": + case "NpgsqlTypes.NpgsqlTsQueryOr": + case "NpgsqlTypes.NpgsqlTsQueryNot": + case "NpgsqlTypes.NpgsqlTsQueryEmpty": + case "NpgsqlTypes.NpgsqlTsQueryFollowedBy": + return UnsupportedTsQueryHandler(); + + default: + return null; + } } public override NpgsqlTypeHandler? ResolveValueDependentValue(object value) @@ -375,10 +383,6 @@ NpgsqlTypeHandler ArrayHandler(DateTimeKind kind) NpgsqlTypeHandler MacaddrHandler() => _macaddrHandler ??= new MacaddrHandler(PgType("macaddr")); NpgsqlTypeHandler Macaddr8Handler() => _macaddr8Handler ??= new MacaddrHandler(PgType("macaddr8")); - // Full-text search types - NpgsqlTypeHandler TsQueryHandler() => _tsQueryHandler ??= new TsQueryHandler(PgType("tsquery")); - NpgsqlTypeHandler TsVectorHandler() => _tsVectorHandler ??= new TsVectorHandler(PgType("tsvector")); - // Geometry types NpgsqlTypeHandler BoxHandler() => _boxHandler ??= new BoxHandler(PgType("box")); NpgsqlTypeHandler CircleHandler() => _circleHandler ??= new CircleHandler(PgType("circle")); @@ -424,6 +428,18 @@ NpgsqlTypeHandler ArrayHandler(DateTimeKind kind) NpgsqlTypeHandler InternalCharHandler() => _internalCharHandler ??= new InternalCharHandler(PgType("char")); NpgsqlTypeHandler VoidHandler() => _voidHandler ??= new VoidHandler(PgType("void")); + // Types that are unsupported by default when using NpgsqlSlimDataSourceBuilder + NpgsqlTypeHandler UnsupportedRecordHandler() => new UnsupportedHandler(PgType("record"), string.Format( + NpgsqlStrings.RecordsNotEnabled, nameof(NpgsqlSlimDataSourceBuilder.EnableRecords), nameof(NpgsqlSlimDataSourceBuilder))); + + NpgsqlTypeHandler UnsupportedTsVectorHandler() => new UnsupportedHandler(PgType("tsvector"), string.Format( + NpgsqlStrings.FullTextSearchNotEnabled, nameof(NpgsqlSlimDataSourceBuilder.EnableFullTextSearch), + nameof(NpgsqlSlimDataSourceBuilder))); + + NpgsqlTypeHandler UnsupportedTsQueryHandler() => new UnsupportedHandler(PgType("tsquery"), string.Format( + NpgsqlStrings.FullTextSearchNotEnabled, nameof(NpgsqlSlimDataSourceBuilder.EnableFullTextSearch), + nameof(NpgsqlSlimDataSourceBuilder))); + NpgsqlTypeHandler UnknownHandler() => _unknownHandler ??= new UnknownTypeHandler(_connector.TextEncoding); #endregion Handler accessors diff --git a/src/Npgsql/TypeMapping/BuiltInTypeMappingResolver.cs b/src/Npgsql/TypeMapping/BuiltInTypeMappingResolver.cs index 0d344247b0..8a236a86f6 100644 --- a/src/Npgsql/TypeMapping/BuiltInTypeMappingResolver.cs +++ b/src/Npgsql/TypeMapping/BuiltInTypeMappingResolver.cs @@ -8,7 +8,6 @@ using System.Numerics; using Npgsql.Internal.TypeHandling; using Npgsql.Internal.TypeMapping; -using Npgsql.PostgresTypes; using NpgsqlTypes; using static Npgsql.Util.Statics; @@ -79,13 +78,6 @@ sealed class BuiltInTypeMappingResolver : TypeMappingResolver { "macaddr", new(NpgsqlDbType.MacAddr, "macaddr", typeof(PhysicalAddress)) }, { "macaddr8", new(NpgsqlDbType.MacAddr8, "macaddr8") }, - // Full-text search types - { "tsquery", new(NpgsqlDbType.TsQuery, "tsquery", - typeof(NpgsqlTsQuery), typeof(NpgsqlTsQueryAnd), typeof(NpgsqlTsQueryEmpty), typeof(NpgsqlTsQueryFollowedBy), - typeof(NpgsqlTsQueryLexeme), typeof(NpgsqlTsQueryNot), typeof(NpgsqlTsQueryOr), typeof(NpgsqlTsQueryBinOp) - ) }, - { "tsvector", new(NpgsqlDbType.TsVector, "tsvector", typeof(NpgsqlTsVector)) }, - // Geometry types { "box", new(NpgsqlDbType.Box, "box", typeof(NpgsqlBox)) }, { "circle", new(NpgsqlDbType.Circle, "circle", typeof(NpgsqlCircle)) }, @@ -176,15 +168,6 @@ static BuiltInTypeMappingResolver() #pragma warning restore 618 { typeof(PhysicalAddress), "macaddr" }, - // Full-text types - { typeof(NpgsqlTsVector), "tsvector" }, - { typeof(NpgsqlTsQueryLexeme), "tsquery" }, - { typeof(NpgsqlTsQueryAnd), "tsquery" }, - { typeof(NpgsqlTsQueryOr), "tsquery" }, - { typeof(NpgsqlTsQueryNot), "tsquery" }, - { typeof(NpgsqlTsQueryEmpty), "tsquery" }, - { typeof(NpgsqlTsQueryFollowedBy), "tsquery" }, - // Geometry types { typeof(NpgsqlBox), "box" }, { typeof(NpgsqlCircle), "circle" }, @@ -251,7 +234,4 @@ static BuiltInTypeMappingResolver() public override TypeMappingInfo? GetMappingByDataTypeName(string dataTypeName) => Mappings.TryGetValue(dataTypeName, out var mapping) ? mapping : null; - - public override TypeMappingInfo? GetMappingByPostgresType(TypeMapper typeMapper, PostgresType type) - => GetMappingByDataTypeName(type.Name); } diff --git a/src/Npgsql/TypeMapping/FullTextSearchTypeHandlerResolver.cs b/src/Npgsql/TypeMapping/FullTextSearchTypeHandlerResolver.cs new file mode 100644 index 0000000000..38db435814 --- /dev/null +++ b/src/Npgsql/TypeMapping/FullTextSearchTypeHandlerResolver.cs @@ -0,0 +1,34 @@ +using System; +using Npgsql.Internal; +using Npgsql.Internal.TypeHandlers.FullTextSearchHandlers; +using Npgsql.Internal.TypeHandling; +using Npgsql.PostgresTypes; + +namespace Npgsql.TypeMapping; + +sealed class FullTextSearchTypeHandlerResolver : TypeHandlerResolver +{ + readonly NpgsqlDatabaseInfo _databaseInfo; + + public FullTextSearchTypeHandlerResolver(NpgsqlConnector connector) + => _databaseInfo = connector.DatabaseInfo; + + TsQueryHandler? _tsQueryHandler; + TsVectorHandler? _tsVectorHandler; + + public override NpgsqlTypeHandler? ResolveByDataTypeName(string typeName) => + typeName switch + { + "tsquery" => TsQueryHandler(), + "tsvector" => TsVectorHandler(), + _ => null + }; + + public override NpgsqlTypeHandler? ResolveByClrType(Type type) + => FullTextSearchTypeMappingResolver.ClrTypeToDataTypeName(type) is { } dataTypeName ? ResolveByDataTypeName(dataTypeName) : null; + + NpgsqlTypeHandler TsQueryHandler() => _tsQueryHandler ??= new TsQueryHandler(PgType("tsquery")); + NpgsqlTypeHandler TsVectorHandler() => _tsVectorHandler ??= new TsVectorHandler(PgType("tsvector")); + + PostgresType PgType(string pgTypeName) => _databaseInfo.GetPostgresTypeByName(pgTypeName); +} diff --git a/src/Npgsql/TypeMapping/FullTextSearchTypeHandlerResolverFactory.cs b/src/Npgsql/TypeMapping/FullTextSearchTypeHandlerResolverFactory.cs new file mode 100644 index 0000000000..cbfb8a9838 --- /dev/null +++ b/src/Npgsql/TypeMapping/FullTextSearchTypeHandlerResolverFactory.cs @@ -0,0 +1,15 @@ +using Npgsql.Internal; +using Npgsql.Internal.TypeHandling; +using Npgsql.Internal.TypeMapping; + +namespace Npgsql.TypeMapping; + +sealed class FullTextSearchTypeHandlerResolverFactory : TypeHandlerResolverFactory +{ + public override TypeHandlerResolver Create(TypeMapper typeMapper, NpgsqlConnector connector) => + new FullTextSearchTypeHandlerResolver(connector); + + public override TypeMappingResolver CreateMappingResolver() => new FullTextSearchTypeMappingResolver(); + + public override TypeMappingResolver CreateGlobalMappingResolver() => new FullTextSearchTypeMappingResolver(); +} diff --git a/src/Npgsql/TypeMapping/FullTextSearchTypeMappingResolver.cs b/src/Npgsql/TypeMapping/FullTextSearchTypeMappingResolver.cs new file mode 100644 index 0000000000..90185578c0 --- /dev/null +++ b/src/Npgsql/TypeMapping/FullTextSearchTypeMappingResolver.cs @@ -0,0 +1,41 @@ +using System; +using System.Collections.Generic; +using Npgsql.Internal.TypeHandling; +using Npgsql.Internal.TypeMapping; +using NpgsqlTypes; + +namespace Npgsql.TypeMapping; + +sealed class FullTextSearchTypeMappingResolver : TypeMappingResolver +{ + static readonly TypeMappingInfo TsQueryMappingInfo = new(NpgsqlDbType.TsQuery, "tsquery", + typeof(NpgsqlTsQuery), typeof(NpgsqlTsQueryAnd), typeof(NpgsqlTsQueryEmpty), typeof(NpgsqlTsQueryFollowedBy), + typeof(NpgsqlTsQueryLexeme), typeof(NpgsqlTsQueryNot), typeof(NpgsqlTsQueryOr), typeof(NpgsqlTsQueryBinOp)); + + static readonly TypeMappingInfo TsVectorMappingInfo = new(NpgsqlDbType.TsVector, "tsvector", typeof(NpgsqlTsVector)); + + static readonly Dictionary ClrTypeToDataTypeNameTable = new() + { + { typeof(NpgsqlTsVector), "tsvector" }, + { typeof(NpgsqlTsQueryLexeme), "tsquery" }, + { typeof(NpgsqlTsQueryAnd), "tsquery" }, + { typeof(NpgsqlTsQueryOr), "tsquery" }, + { typeof(NpgsqlTsQueryNot), "tsquery" }, + { typeof(NpgsqlTsQueryEmpty), "tsquery" }, + { typeof(NpgsqlTsQueryFollowedBy), "tsquery" }, + }; + + public override TypeMappingInfo? GetMappingByDataTypeName(string dataTypeName) + => dataTypeName switch + { + "tsquery" => TsQueryMappingInfo, + "tsvector" => TsVectorMappingInfo, + _ => null + }; + + public override string? GetDataTypeNameByClrType(Type clrType) + => ClrTypeToDataTypeName(clrType); + + internal static string? ClrTypeToDataTypeName(Type clrType) + => ClrTypeToDataTypeNameTable.TryGetValue(clrType, out var dataTypeName) ? dataTypeName : null; +} diff --git a/src/Npgsql/TypeMapping/SystemTextJsonTypeMappingResolver.cs b/src/Npgsql/TypeMapping/SystemTextJsonTypeMappingResolver.cs index 2ec4bb2544..b76820f718 100644 --- a/src/Npgsql/TypeMapping/SystemTextJsonTypeMappingResolver.cs +++ b/src/Npgsql/TypeMapping/SystemTextJsonTypeMappingResolver.cs @@ -21,9 +21,6 @@ sealed class SystemTextJsonTypeMappingResolver : TypeMappingResolver public override TypeMappingInfo? GetMappingByDataTypeName(string dataTypeName) => DoGetMappingByDataTypeName(dataTypeName); - public override TypeMappingInfo? GetMappingByPostgresType(TypeMapper mapper, PostgresType type) - => DoGetMappingByDataTypeName(type.Name); - internal static string? ClrTypeToDataTypeName(Type type, Dictionary? clrTypes) => type == typeof(JsonDocument) || type == typeof(JsonObject) || type == typeof(JsonArray) diff --git a/test/Npgsql.Tests/Support/TestBase.cs b/test/Npgsql.Tests/Support/TestBase.cs index 04756dd15c..126a3575fd 100644 --- a/test/Npgsql.Tests/Support/TestBase.cs +++ b/test/Npgsql.Tests/Support/TestBase.cs @@ -301,10 +301,10 @@ public async Task AssertTypeUnsupportedRead(string sqlLiteral, string pgTypeName Assert.That(() => reader.GetValue(0), Throws.Exception.TypeOf()); } - public Task AssertTypeUnsupportedRead(string sqlLiteral, string pgTypeName, NpgsqlDataSource? dataSource = null) + public Task AssertTypeUnsupportedRead(string sqlLiteral, string pgTypeName, NpgsqlDataSource? dataSource = null) => AssertTypeUnsupportedRead(sqlLiteral, pgTypeName, dataSource); - public async Task AssertTypeUnsupportedRead(string sqlLiteral, string pgTypeName, NpgsqlDataSource? dataSource = null) + public async Task AssertTypeUnsupportedRead(string sqlLiteral, string pgTypeName, NpgsqlDataSource? dataSource = null) where TException : Exception { dataSource ??= DefaultDataSource; diff --git a/test/Npgsql.Tests/Types/FullTextSearchTests.cs b/test/Npgsql.Tests/Types/FullTextSearchTests.cs index f039c5b587..ae759a295c 100644 --- a/test/Npgsql.Tests/Types/FullTextSearchTests.cs +++ b/test/Npgsql.Tests/Types/FullTextSearchTests.cs @@ -1,7 +1,10 @@ -using System.Collections; +using System; +using System.Collections; using System.Threading.Tasks; +using Npgsql.Properties; using NpgsqlTypes; using NUnit.Framework; +using NUnit.Framework.Constraints; namespace Npgsql.Tests.Types; @@ -57,4 +60,33 @@ public static IEnumerable TsQueryTestCases() => new[] [TestCaseSource(nameof(TsQueryTestCases))] public Task TsQuery(string sqlLiteral, NpgsqlTsQuery query) => AssertType(query, sqlLiteral, "tsquery", NpgsqlDbType.TsQuery); -} \ No newline at end of file + + [Test] + public async Task Full_text_search_supported_only_with_EnableFullTextSearch([Values] bool enableFullTextSearch) + { + var errorMessage = string.Format(NpgsqlStrings.FullTextSearchNotEnabled, "EnableFullTextSearch", "NpgsqlSlimDataSourceBuilder"); + + var dataSourceBuilder = new NpgsqlSlimDataSourceBuilder(ConnectionString); + if (enableFullTextSearch) + dataSourceBuilder.EnableFullTextSearch(); + await using var dataSource = dataSourceBuilder.Build(); + + if (enableFullTextSearch) + { + await AssertType(new NpgsqlTsQueryLexeme("a"), "'a'", "tsquery", NpgsqlDbType.TsQuery); + await AssertType(NpgsqlTsVector.Parse("'1'"), "'1'", "tsvector", NpgsqlDbType.TsVector); + } + else + { + var exception = await AssertTypeUnsupportedRead("a", "tsquery", dataSource); + Assert.AreEqual(errorMessage, exception.Message); + exception = await AssertTypeUnsupportedWrite(new NpgsqlTsQueryLexeme("a"), pgTypeName: null, dataSource); + Assert.AreEqual(errorMessage, exception.Message); + + exception = await AssertTypeUnsupportedRead("1", "tsvector", dataSource); + Assert.AreEqual(errorMessage, exception.Message); + exception = await AssertTypeUnsupportedWrite(NpgsqlTsVector.Parse("'1'"), pgTypeName: null, dataSource); + Assert.AreEqual(errorMessage, exception.Message); + } + } +} diff --git a/test/Npgsql.Tests/Types/MiscTypeTests.cs b/test/Npgsql.Tests/Types/MiscTypeTests.cs index 9c0667c645..41555e776e 100644 --- a/test/Npgsql.Tests/Types/MiscTypeTests.cs +++ b/test/Npgsql.Tests/Types/MiscTypeTests.cs @@ -1,6 +1,7 @@ using System; using System.Data; using System.Threading.Tasks; +using Npgsql.Properties; using NpgsqlTypes; using NUnit.Framework; using NUnit.Framework.Constraints; @@ -135,18 +136,17 @@ public Task Write_Record_is_not_supported() [Test] public async Task Records_supported_only_with_EnableRecords([Values] bool withMappings) { - const string unsupportedMessage = - "Records aren't supported; please call EnableRecords on NpgsqlSlimDataSourceBuilder to enable records."; Func assertExpr = () => withMappings ? Throws.Nothing : Throws.Exception .TypeOf() - .With.Property("Message").EqualTo(unsupportedMessage); + .With.Property("Message") + .EqualTo(string.Format(NpgsqlStrings.RecordsNotEnabled, "EnableRecords", "NpgsqlSlimDataSourceBuilder")); var dataSourceBuilder = new NpgsqlSlimDataSourceBuilder(ConnectionString); if (withMappings) dataSourceBuilder.EnableRecords(); - var dataSource = dataSourceBuilder.Build(); + await using var dataSource = dataSourceBuilder.Build(); await using var conn = await dataSource.OpenConnectionAsync(); await using var cmd = conn.CreateCommand(); From a04bdc191659eb0bfd512578ee35476ea5cc4185 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 11 May 2023 12:07:07 +0200 Subject: [PATCH 131/761] Bump GitHubActionsTestLogger from 2.0.2 to 2.1.0 (#5052) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 3a85ac90a1..c4ca5a5fc8 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -27,7 +27,7 @@ - + From e4c0d3f49f65ca5a8a8678b012ab5a79beaf829d Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Fri, 12 May 2023 12:32:19 +0300 Subject: [PATCH 132/761] FIx multiple hosts with disabled sql rewriting (#5056) Fixes #5055 --- src/Npgsql/Internal/NpgsqlConnector.cs | 14 +++++++++++--- test/Npgsql.Tests/MultipleHostsTests.cs | 21 ++++++++++++++++++++- test/Npgsql.Tests/TestUtil.cs | 3 +++ 3 files changed, 34 insertions(+), 4 deletions(-) diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index 2b04c5fcff..fb4bdc339e 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -637,10 +637,12 @@ await OpenCore( internal async ValueTask QueryDatabaseState( NpgsqlTimeout timeout, bool async, CancellationToken cancellationToken = default) { - using var cmd = CreateCommand("select pg_is_in_recovery(); SHOW default_transaction_read_only"); - cmd.CommandTimeout = (int)timeout.CheckAndGetTimeLeft().TotalSeconds; + using var batch = CreateBatch(); + batch.BatchCommands.Add(new NpgsqlBatchCommand("select pg_is_in_recovery()")); + batch.BatchCommands.Add(new NpgsqlBatchCommand("SHOW default_transaction_read_only")); + batch.Timeout = (int)timeout.CheckAndGetTimeLeft().TotalSeconds; - var reader = async ? await cmd.ExecuteReaderAsync(cancellationToken) : cmd.ExecuteReader(); + var reader = async ? await batch.ExecuteReaderAsync(cancellationToken) : batch.ExecuteReader(); try { if (async) @@ -2681,6 +2683,12 @@ internal async Task ExecuteInternalCommand(byte[] data, bool async, Cancellation /// A object. public NpgsqlCommand CreateCommand(string? cmdText = null) => new(cmdText, this); + /// + /// Creates and returns a object associated with the . + /// + /// A object. + public NpgsqlBatch CreateBatch() => new NpgsqlBatch(this); + void ReadParameterStatus(ReadOnlySpan incomingName, ReadOnlySpan incomingValue) { byte[] rawName; diff --git a/test/Npgsql.Tests/MultipleHostsTests.cs b/test/Npgsql.Tests/MultipleHostsTests.cs index b7988c9f78..2b2c3f5304 100644 --- a/test/Npgsql.Tests/MultipleHostsTests.cs +++ b/test/Npgsql.Tests/MultipleHostsTests.cs @@ -891,7 +891,6 @@ await firstServer Assert.That(secondDataSource.GetDatabaseState(), Is.EqualTo(DatabaseState.PrimaryReadWrite)); } - // This is the only test in this class which actually connects to PostgreSQL (the others use the PostgreSQL mock) [Test, NonParallelizable] public void IntegrationTest([Values] bool loadBalancing, [Values] bool alwaysCheckHostState) { @@ -953,6 +952,26 @@ async Task Query(NpgsqlDataSource dataSource) } } + [Test] + [IssueLink("https://github.com/npgsql/npgsql/issues/5055")] + [NonParallelizable] // Disables sql rewriting + public async Task Multiple_hosts_with_disabled_sql_rewriting() + { + using var _ = DisableSqlRewriting(); + + var dataSourceBuilder = new NpgsqlDataSourceBuilder(ConnectionString) + { + ConnectionStringBuilder = + { + Host = "localhost,127.0.0.1", + Pooling = true, + HostRecheckSeconds = 0 + } + }; + await using var dataSource = dataSourceBuilder.BuildMultiHost(); + await using var conn = await dataSource.OpenConnectionAsync(); + } + [Test] public async Task DataSource_with_wrappers() { diff --git a/test/Npgsql.Tests/TestUtil.cs b/test/Npgsql.Tests/TestUtil.cs index 1eccd4b90e..1fa69cb6e1 100644 --- a/test/Npgsql.Tests/TestUtil.cs +++ b/test/Npgsql.Tests/TestUtil.cs @@ -363,6 +363,9 @@ internal static IDisposable SetCurrentCulture(CultureInfo culture) internal static IDisposable DisableSqlRewriting() { #if DEBUG + // We clear the pools to make sure we don't accidentally reuse a pool + // Since EnableSqlRewriting is a global change + PoolManager.Reset(); NpgsqlCommand.EnableSqlRewriting = false; return new DeferredExecutionDisposable(() => NpgsqlCommand.EnableSqlRewriting = true); #else From 60c4f5241b1371123b2cfde28b35e6c6e42f1861 Mon Sep 17 00:00:00 2001 From: Rafi Shamim Date: Tue, 16 May 2023 03:56:53 -0400 Subject: [PATCH 133/761] Treat pg_users.user_sysid as uint rather than int (#5057) --- src/Npgsql/NpgsqlSchema.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Npgsql/NpgsqlSchema.cs b/src/Npgsql/NpgsqlSchema.cs index 75c5e857dc..e8d65ecbf1 100644 --- a/src/Npgsql/NpgsqlSchema.cs +++ b/src/Npgsql/NpgsqlSchema.cs @@ -293,7 +293,7 @@ static async Task GetUsers(NpgsqlConnection conn, string?[]? restrict { var users = new DataTable("Users") { Locale = CultureInfo.InvariantCulture }; - users.Columns.AddRange(new[] { new DataColumn("user_name"), new DataColumn("user_sysid", typeof(int)) }); + users.Columns.AddRange(new[] { new DataColumn("user_name"), new DataColumn("user_sysid", typeof(uint)) }); var getUsers = new StringBuilder(); From dfbbe898b04e4422da90552b0f09d61d65caead7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 17 May 2023 12:03:49 +0300 Subject: [PATCH 134/761] Bump Microsoft.NET.Test.Sdk from 17.5.0 to 17.6.0 (#5059) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index c4ca5a5fc8..388ef75bb4 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -23,7 +23,7 @@ - + From 03f3c85f5f3e34ce62d02fd6430a4950ca379629 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Wed, 17 May 2023 14:20:25 +0300 Subject: [PATCH 135/761] Upgrade to .NET 8 preview 4 (#5060) --- .devcontainer/docker-compose.yml | 2 +- .github/workflows/build.yml | 2 +- .github/workflows/codeql-analysis.yml | 3 ++- .github/workflows/native-aot.yml | 6 +++++- .github/workflows/rich-code-nav.yml | 2 +- src/Npgsql/NpgsqlException.cs | 5 ++++- src/Npgsql/PostgresException.cs | 8 +++++++- test/Npgsql.NativeAotTests/Npgsql.NativeAotTests.csproj | 3 ++- test/Npgsql.Tests/ExceptionTests.cs | 8 ++++++++ test/Npgsql.Tests/Npgsql.Tests.csproj | 3 +++ 10 files changed, 34 insertions(+), 8 deletions(-) diff --git a/.devcontainer/docker-compose.yml b/.devcontainer/docker-compose.yml index 1eeaeb9fd8..d65b99a1a0 100644 --- a/.devcontainer/docker-compose.yml +++ b/.devcontainer/docker-compose.yml @@ -3,7 +3,7 @@ version: '3' services: npgsql-dev: # Source for tags: https://mcr.microsoft.com/v2/dotnet/sdk/tags/list - image: mcr.microsoft.com/dotnet/sdk:8.0.100-preview.3 + image: mcr.microsoft.com/dotnet/sdk:8.0.100-preview.4 volumes: - ..:/workspace:cached tty: true diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 2617b8f408..5c30520cfe 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -10,7 +10,7 @@ on: pull_request: env: - dotnet_sdk_version: '8.0.100-preview.3.23178.7' + dotnet_sdk_version: '8.0.100-preview.4.23260.5' postgis_version: 3 DOTNET_SKIP_FIRST_TIME_EXPERIENCE: true # Windows comes with PG pre-installed, and defines the PGPASSWORD environment variable. Remove it as it interferes diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index a12786b32f..5fe6c18a1e 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -27,7 +27,8 @@ on: - cron: '21 0 * * 4' env: - dotnet_sdk_version: '8.0.100-preview.3.23178.7' + dotnet_sdk_version: '8.0.100-preview.4.23260.5' + DOTNET_SKIP_FIRST_TIME_EXPERIENCE: true jobs: analyze: diff --git a/.github/workflows/native-aot.yml b/.github/workflows/native-aot.yml index 72a7da4d6d..79e0cdfde3 100644 --- a/.github/workflows/native-aot.yml +++ b/.github/workflows/native-aot.yml @@ -10,7 +10,7 @@ on: pull_request: env: - dotnet_sdk_version: '8.0.100-preview.3.23178.7' + dotnet_sdk_version: '8.0.100-preview.4.23260.5' DOTNET_SKIP_FIRST_TIME_EXPERIENCE: true jobs: @@ -68,10 +68,14 @@ jobs: size="$(ls -l test/Npgsql.NativeAotTests/bin/Release/net8.0/linux-x64/native/Npgsql.NativeAotTests | cut -d ' ' -f 5)" echo "Binary size is $size bytes ($((size / (1024 * 1024))) mb)" >> $GITHUB_STEP_SUMMARY + # Temporarily disabled due to NativeAOT bug, see dotnet/runtime#86376 - name: Dump mstat + if: ${{ false }} run: dotnet run --project test/MStatDumper/MStatDumper.csproj -c release -f net8.0 -- "test/Npgsql.NativeAotTests/obj/Release/net8.0/linux-x64/native/Npgsql.NativeAotTests.mstat" md >> $GITHUB_STEP_SUMMARY + # Temporarily disabled due to NativeAOT bug, see dotnet/runtime#86376 - name: Upload mstat + if: ${{ false }} uses: actions/upload-artifact@v3.1.2 with: name: npgsql.mstat diff --git a/.github/workflows/rich-code-nav.yml b/.github/workflows/rich-code-nav.yml index af153693b0..8a2a93ae98 100644 --- a/.github/workflows/rich-code-nav.yml +++ b/.github/workflows/rich-code-nav.yml @@ -9,7 +9,7 @@ on: - '*' env: - dotnet_sdk_version: '8.0.100-preview.3.23178.7' + dotnet_sdk_version: '8.0.100-preview.4.23260.5' DOTNET_SKIP_FIRST_TIME_EXPERIENCE: true jobs: diff --git a/src/Npgsql/NpgsqlException.cs b/src/Npgsql/NpgsqlException.cs index 38b499b438..57c47a514c 100644 --- a/src/Npgsql/NpgsqlException.cs +++ b/src/Npgsql/NpgsqlException.cs @@ -70,7 +70,10 @@ public virtual bool IsTransient /// /// The SerializationInfo that holds the serialized object data about the exception being thrown. /// The StreamingContext that contains contextual information about the source or destination. +#if NET8_0_OR_GREATER + [Obsolete("This API supports obsolete formatter-based serialization. It should not be called or extended by application code.")] +#endif protected internal NpgsqlException(SerializationInfo info, StreamingContext context) : base(info, context) {} #endregion -} \ No newline at end of file +} diff --git a/src/Npgsql/PostgresException.cs b/src/Npgsql/PostgresException.cs index b5ebe5b99b..c4ba62d0a2 100644 --- a/src/Npgsql/PostgresException.cs +++ b/src/Npgsql/PostgresException.cs @@ -110,6 +110,9 @@ static string GetMessage(string sqlState, string messageText, int position, stri internal static PostgresException Load(NpgsqlReadBuffer buf, bool includeDetail, ILogger exceptionLogger) => new(ErrorOrNoticeMessage.Load(buf, includeDetail, exceptionLogger)); +#if NET8_0_OR_GREATER + [Obsolete("This API supports obsolete formatter-based serialization. It should not be called or extended by application code.")] +#endif internal PostgresException(SerializationInfo info, StreamingContext context) : base(info, context) { @@ -140,6 +143,9 @@ internal PostgresException(SerializationInfo info, StreamingContext context) ///
/// The to populate with data. /// The destination (see ) for this serialization. +#if NET8_0_OR_GREATER + [Obsolete("This API supports obsolete formatter-based serialization. It should not be called or extended by application code.")] +#endif public override void GetObjectData(SerializationInfo info, StreamingContext context) { base.GetObjectData(info, context); @@ -371,4 +377,4 @@ public override bool IsTransient public string? Routine { get; } #endregion -} \ No newline at end of file +} diff --git a/test/Npgsql.NativeAotTests/Npgsql.NativeAotTests.csproj b/test/Npgsql.NativeAotTests/Npgsql.NativeAotTests.csproj index 3396a51a92..218331fdbb 100644 --- a/test/Npgsql.NativeAotTests/Npgsql.NativeAotTests.csproj +++ b/test/Npgsql.NativeAotTests/Npgsql.NativeAotTests.csproj @@ -4,7 +4,8 @@ true net8.0 - true + + false true true true diff --git a/test/Npgsql.Tests/ExceptionTests.cs b/test/Npgsql.Tests/ExceptionTests.cs index 101d0b67f6..f9f8821c4d 100644 --- a/test/Npgsql.Tests/ExceptionTests.cs +++ b/test/Npgsql.Tests/ExceptionTests.cs @@ -210,6 +210,8 @@ public void NpgsqlException_IsTransient() Assert.False(new NpgsqlException("", new Exception("Inner Exception")).IsTransient); } +#pragma warning disable SYSLIB0051 +#pragma warning disable 618 [Test] public void PostgresException_IsTransient() { @@ -243,8 +245,11 @@ PostgresException CreateWithSqlState(string sqlState) return new PostgresException(info, default); } } +#pragma warning restore SYSLIB0051 +#pragma warning restore 618 #pragma warning disable SYSLIB0011 +#pragma warning disable SYSLIB0050 #pragma warning disable 618 [Test] public void Serialization() @@ -283,7 +288,9 @@ public void Serialization() SerializationInfo CreateSerializationInfo() => new(typeof(PostgresException), new FormatterConverter()); #pragma warning restore 618 #pragma warning restore SYSLIB0011 +#pragma warning disable SYSLIB0050 +#pragma warning disable SYSLIB0051 [Test] [IssueLink("https://github.com/npgsql/npgsql/issues/3204")] public void Base_exception_property_serialization() @@ -301,4 +308,5 @@ public void Base_exception_property_serialization() Assert.That(ex.Source, Is.EqualTo(info.GetValue("Source", typeof(string)))); Assert.That(ex.StackTrace, Is.EqualTo(info.GetValue("StackTraceString", typeof(string)))); } +#pragma warning restore SYSLIB0051 } diff --git a/test/Npgsql.Tests/Npgsql.Tests.csproj b/test/Npgsql.Tests/Npgsql.Tests.csproj index 7952ad6301..980b51d8aa 100644 --- a/test/Npgsql.Tests/Npgsql.Tests.csproj +++ b/test/Npgsql.Tests/Npgsql.Tests.csproj @@ -10,4 +10,7 @@ + + true + From 9835cb2db4b0ce5f6f993210ea06ca12dda090a8 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Wed, 17 May 2023 14:42:11 +0200 Subject: [PATCH 136/761] Bump version to 8.0.0-preview.5 --- Directory.Build.props | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Directory.Build.props b/Directory.Build.props index 756df95380..c677a6ee5d 100644 --- a/Directory.Build.props +++ b/Directory.Build.props @@ -1,6 +1,6 @@  - 8.0.0-preview.4 + 8.0.0-preview.5 latest true enable @@ -10,7 +10,7 @@ true true - Copyright 2022 © The Npgsql Development Team + Copyright 2023 © The Npgsql Development Team Npgsql PostgreSQL https://github.com/npgsql/npgsql From 2a8b408700bbc8198468f954a885285bcd523b03 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Wed, 17 May 2023 18:02:29 +0300 Subject: [PATCH 137/761] Use dotnet SDK 8.0.0-preview.5 daily build (#5061) * Use dotnet SDK 8.0.0-preview.5 daily build Because of mstat issues in preview4, https://github.com/dotnet/runtime/issues/86376 * Add preview package source * Fix source mapping * Update dumper to ignore dependency info * Cancel previous commit runs on PRs * Try size --------- Co-authored-by: Nino Floris --- .github/workflows/build.yml | 5 +++ .github/workflows/codeql-analysis.yml | 5 +++ .github/workflows/native-aot.yml | 38 +++++++++++++++---- NuGet.config | 6 --- test/MStatDumper/Program.cs | 20 ++++++---- .../Npgsql.NativeAotTests.csproj | 3 +- 6 files changed, 55 insertions(+), 22 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 5c30520cfe..8262794a21 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -9,6 +9,11 @@ on: - '*' pull_request: +# Cancel previous PR branch commits (head_ref is only defined on PRs) +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + env: dotnet_sdk_version: '8.0.100-preview.4.23260.5' postgis_version: 3 diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 5fe6c18a1e..4833295f70 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -26,6 +26,11 @@ on: schedule: - cron: '21 0 * * 4' +# Cancel previous PR branch commits (head_ref is only defined on PRs) +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + env: dotnet_sdk_version: '8.0.100-preview.4.23260.5' DOTNET_SKIP_FIRST_TIME_EXPERIENCE: true diff --git a/.github/workflows/native-aot.yml b/.github/workflows/native-aot.yml index 79e0cdfde3..fe43408c4a 100644 --- a/.github/workflows/native-aot.yml +++ b/.github/workflows/native-aot.yml @@ -9,10 +9,35 @@ on: - '*' pull_request: +# Cancel previous PR branch commits (head_ref is only defined on PRs) +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + env: - dotnet_sdk_version: '8.0.100-preview.4.23260.5' + dotnet_sdk_version: '8.0.100-preview.5.23266.8' DOTNET_SKIP_FIRST_TIME_EXPERIENCE: true - + nuget_config: | + + + + + + + + + + + + + + + + + + + + jobs: build: runs-on: ${{ matrix.os }} @@ -41,12 +66,15 @@ jobs: dotnet-version: | ${{ env.dotnet_sdk_version }} + - name: Setup nuget config + run: echo "$nuget_config" > NuGet.config + - name: Setup Native AOT prerequisites run: sudo apt-get install clang zlib1g-dev shell: bash - name: Build - run: dotnet publish test/Npgsql.NativeAotTests/Npgsql.NativeAotTests.csproj -r linux-x64 -c Release -f net8.0 + run: dotnet publish test/Npgsql.NativeAotTests/Npgsql.NativeAotTests.csproj -r linux-x64 -c Release -f net8.0 -p:OptimizationPreference=Size shell: bash # Uncomment the following to SSH into the agent running the build (https://github.com/mxschmitt/action-tmate) @@ -68,14 +96,10 @@ jobs: size="$(ls -l test/Npgsql.NativeAotTests/bin/Release/net8.0/linux-x64/native/Npgsql.NativeAotTests | cut -d ' ' -f 5)" echo "Binary size is $size bytes ($((size / (1024 * 1024))) mb)" >> $GITHUB_STEP_SUMMARY - # Temporarily disabled due to NativeAOT bug, see dotnet/runtime#86376 - name: Dump mstat - if: ${{ false }} run: dotnet run --project test/MStatDumper/MStatDumper.csproj -c release -f net8.0 -- "test/Npgsql.NativeAotTests/obj/Release/net8.0/linux-x64/native/Npgsql.NativeAotTests.mstat" md >> $GITHUB_STEP_SUMMARY - # Temporarily disabled due to NativeAOT bug, see dotnet/runtime#86376 - name: Upload mstat - if: ${{ false }} uses: actions/upload-artifact@v3.1.2 with: name: npgsql.mstat diff --git a/NuGet.config b/NuGet.config index e49ffd89d8..a44ab2b88a 100644 --- a/NuGet.config +++ b/NuGet.config @@ -5,10 +5,4 @@ - - - - - - diff --git a/test/MStatDumper/Program.cs b/test/MStatDumper/Program.cs index 0fdb12ffa2..9a9fe89dfb 100644 --- a/test/MStatDumper/Program.cs +++ b/test/MStatDumper/Program.cs @@ -17,8 +17,10 @@ static void Main(string[] args) var asm = AssemblyDefinition.ReadAssembly(args[0]); var globalType = (TypeDefinition)asm.MainModule.LookupToken(0x02000001); + var versionMajor = asm.Name.Version.Major; + var types = globalType.Methods.First(x => x.Name == "Types"); - var typeStats = GetTypes(types).ToList(); + var typeStats = GetTypes(versionMajor, types).ToList(); var typeSize = typeStats.Sum(x => x.Size); var typesByModules = typeStats.GroupBy(x => x.Type.Scope).Select(x => new { x.Key.Name, Sum = x.Sum(x => x.Size) }).ToList(); if (markDownStyleOutput) @@ -55,7 +57,7 @@ static void Main(string[] args) Console.WriteLine(); var methods = globalType.Methods.First(x => x.Name == "Methods"); - var methodStats = GetMethods(methods).ToList(); + var methodStats = GetMethods(versionMajor, methods).ToList(); var methodSize = methodStats.Sum(x => x.Size + x.GcInfoSize + x.EhInfoSize); var methodsByModules = methodStats.GroupBy(x => x.Method.DeclaringType.Scope).Select(x => new { x.Key.Name, Sum = x.Sum(x => x.Size + x.GcInfoSize + x.EhInfoSize) }).ToList(); if (markDownStyleOutput) @@ -257,7 +259,7 @@ static string GetMethodName(MethodReference methodReference) Console.WriteLine(); Console.WriteLine(""); - var filteredTypeStats = GetTypes(types) + var filteredTypeStats = GetTypes(versionMajor, types) .Where(x => x.Type.Scope.Name == "Npgsql") .GroupBy(x => x.Type.Name) .OrderByDescending(x => x.Sum(x => x.Size)) @@ -286,11 +288,13 @@ static string GetMethodName(MethodReference methodReference) } } - public static IEnumerable GetTypes(MethodDefinition types) + public static IEnumerable GetTypes(int formatVersion, MethodDefinition types) { + var entrySize = formatVersion == 1 ? 2 : 3; + types.Body.SimplifyMacros(); var il = types.Body.Instructions; - for (var i = 0; i + 2 < il.Count; i += 2) + for (var i = 0; i + entrySize < il.Count; i += entrySize) { var type = (TypeReference)il[i + 0].Operand; var size = (int)il[i + 1].Operand; @@ -302,11 +306,13 @@ public static IEnumerable GetTypes(MethodDefinition types) } } - public static IEnumerable GetMethods(MethodDefinition methods) + public static IEnumerable GetMethods(int formatVersion, MethodDefinition methods) { + var entrySize = formatVersion == 1 ? 4 : 5; + methods.Body.SimplifyMacros(); var il = methods.Body.Instructions; - for (var i = 0; i + 4 < il.Count; i += 4) + for (var i = 0; i + entrySize < il.Count; i += entrySize) { var method = (MethodReference)il[i + 0].Operand; var size = (int)il[i + 1].Operand; diff --git a/test/Npgsql.NativeAotTests/Npgsql.NativeAotTests.csproj b/test/Npgsql.NativeAotTests/Npgsql.NativeAotTests.csproj index 218331fdbb..3396a51a92 100644 --- a/test/Npgsql.NativeAotTests/Npgsql.NativeAotTests.csproj +++ b/test/Npgsql.NativeAotTests/Npgsql.NativeAotTests.csproj @@ -4,8 +4,7 @@ true net8.0 - - false + true true true true From 08e70d49be5b5ad63f975024dc5fd53847834dfc Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Wed, 17 May 2023 17:59:37 +0200 Subject: [PATCH 138/761] Rework nativeaot ci workflow package mappings (#5063) As some were obscured by the nuget cache --- .github/workflows/native-aot.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/native-aot.yml b/.github/workflows/native-aot.yml index fe43408c4a..3324cac4c9 100644 --- a/.github/workflows/native-aot.yml +++ b/.github/workflows/native-aot.yml @@ -29,11 +29,13 @@ env: - - + + + + From eb27542849c71faa1819ef04c55ab5a181bed3f5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 18 May 2023 10:18:47 +0200 Subject: [PATCH 139/761] Bump Microsoft.CodeAnalysis.CSharp from 4.5.0 to 4.6.0 (#5066) --- Directory.Packages.props | 2 +- src/Npgsql.SourceGenerators/TypeHandlerSourceGenerator.cs | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 388ef75bb4..58f2fcd832 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -7,7 +7,7 @@ - + diff --git a/src/Npgsql.SourceGenerators/TypeHandlerSourceGenerator.cs b/src/Npgsql.SourceGenerators/TypeHandlerSourceGenerator.cs index 75edca250a..d36cc41988 100644 --- a/src/Npgsql.SourceGenerators/TypeHandlerSourceGenerator.cs +++ b/src/Npgsql.SourceGenerators/TypeHandlerSourceGenerator.cs @@ -63,8 +63,8 @@ void AugmentTypeHandler( "System.Threading.Tasks", "Npgsql.Internal" }.Concat(classDeclarationSyntax.SyntaxTree.GetCompilationUnitRoot().Usings - .Where(u => u.Alias is null && u.StaticKeyword.IsKind(SyntaxKind.None)) - .Select(u => u.Name.ToString()))); + .Where(u => u.Name is not null && u.Alias is null && u.StaticKeyword.IsKind(SyntaxKind.None)) + .Select(u => u.Name!.ToString()))); var interfaces = typeSymbol.AllInterfaces .Where(i => i.OriginalDefinition.Equals(isSimple ? simpleTypeHandlerInterfaceSymbol : typeHandlerInterfaceSymbol, @@ -126,4 +126,4 @@ cds.BaseList is not null && } } } -} \ No newline at end of file +} From 2cc30c66cc90735ed96c21d8b834c9c86539eed4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 19 May 2023 10:59:51 +0200 Subject: [PATCH 140/761] Bump GitHubActionsTestLogger from 2.1.0 to 2.2.1 (#5068) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 58f2fcd832..51471fe6f2 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -27,7 +27,7 @@ - + From 05b4609e6b128744ea591e1bba4650e2810d772d Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Thu, 25 May 2023 15:15:39 +0300 Subject: [PATCH 141/761] Add some more error codes to the transient exception list (#5076) Closes #5073 --- src/Npgsql/PostgresErrorCodes.cs | 3 ++- src/Npgsql/PostgresException.cs | 3 +++ src/Npgsql/PublicAPI.Unshipped.txt | 3 ++- 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/src/Npgsql/PostgresErrorCodes.cs b/src/Npgsql/PostgresErrorCodes.cs index 4dbbf904d8..95831bc0ef 100644 --- a/src/Npgsql/PostgresErrorCodes.cs +++ b/src/Npgsql/PostgresErrorCodes.cs @@ -390,6 +390,7 @@ public static class PostgresErrorCodes public const string CrashShutdown = "57P02"; public const string CannotConnectNow = "57P03"; public const string DatabaseDropped = "57P04"; + public const string IdleSessionTimeout = "57P05"; #endregion Class 57 - Operator Intervention @@ -479,4 +480,4 @@ public static class PostgresErrorCodes internal static bool IsCriticalFailure(PostgresException e, bool clusterError = true) => CriticalFailureCodes.Any(x => e.SqlState.StartsWith(x, StringComparison.Ordinal)) || !clusterError && e.SqlState == ProtocolViolation; // We only treat ProtocolViolation as critical for connection -} \ No newline at end of file +} diff --git a/src/Npgsql/PostgresException.cs b/src/Npgsql/PostgresException.cs index c4ba62d0a2..4b2377a363 100644 --- a/src/Npgsql/PostgresException.cs +++ b/src/Npgsql/PostgresException.cs @@ -231,6 +231,9 @@ public override bool IsTransient case PostgresErrorCodes.SqlClientUnableToEstablishSqlConnection: case PostgresErrorCodes.SqlServerRejectedEstablishmentOfSqlConnection: case PostgresErrorCodes.TransactionResolutionUnknown: + case PostgresErrorCodes.AdminShutdown: + case PostgresErrorCodes.CrashShutdown: + case PostgresErrorCodes.IdleSessionTimeout: return true; default: return false; diff --git a/src/Npgsql/PublicAPI.Unshipped.txt b/src/Npgsql/PublicAPI.Unshipped.txt index b55b5f3830..0bd82a5918 100644 --- a/src/Npgsql/PublicAPI.Unshipped.txt +++ b/src/Npgsql/PublicAPI.Unshipped.txt @@ -1,7 +1,7 @@ #nullable enable +const Npgsql.PostgresErrorCodes.IdleSessionTimeout = "57P05" -> string! Npgsql.NpgsqlSlimDataSourceBuilder.EnableFullTextSearch() -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.EnableRecords() -> Npgsql.NpgsqlSlimDataSourceBuilder! -override Npgsql.NpgsqlBatch.Dispose() -> void Npgsql.NpgsqlBinaryImporter.WriteRow(params object?[]! values) -> void Npgsql.NpgsqlBinaryImporter.WriteRowAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken), params object?[]! values) -> System.Threading.Tasks.Task! Npgsql.NpgsqlDataSourceBuilder.UseRootCertificate(System.Security.Cryptography.X509Certificates.X509Certificate2? rootCertificate) -> Npgsql.NpgsqlDataSourceBuilder! @@ -35,6 +35,7 @@ Npgsql.NpgsqlSlimDataSourceBuilder.UseRootCertificate(System.Security.Cryptograp Npgsql.NpgsqlSlimDataSourceBuilder.UseRootCertificateCallback(System.Func? rootCertificateCallback) -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.UseSystemTextJson(System.Text.Json.JsonSerializerOptions? serializerOptions = null, System.Type![]? jsonbClrTypes = null, System.Type![]? jsonClrTypes = null) -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.UseUserCertificateValidationCallback(System.Net.Security.RemoteCertificateValidationCallback! userCertificateValidationCallback) -> Npgsql.NpgsqlSlimDataSourceBuilder! +override Npgsql.NpgsqlBatch.Dispose() -> void *REMOVED*static NpgsqlTypes.NpgsqlBox.Parse(string! s) -> NpgsqlTypes.NpgsqlBox *REMOVED*static NpgsqlTypes.NpgsqlCircle.Parse(string! s) -> NpgsqlTypes.NpgsqlCircle *REMOVED*static NpgsqlTypes.NpgsqlLine.Parse(string! s) -> NpgsqlTypes.NpgsqlLine From bc66a5094de92a2d7bc51e8af31373c363f287d4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 26 May 2023 08:03:55 +0200 Subject: [PATCH 142/761] Bump GitHubActionsTestLogger from 2.2.1 to 2.3.0 (#5081) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 51471fe6f2..933c74c612 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -27,7 +27,7 @@ - + From bbfb2aeb03b4e56fec969dc7f1f0be46de48f8ef Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 26 May 2023 08:04:31 +0200 Subject: [PATCH 143/761] Bump actions/setup-dotnet from 3.0.3 to 3.1.0 (#5080) --- .github/workflows/build.yml | 6 +++--- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/native-aot.yml | 2 +- .github/workflows/rich-code-nav.yml | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 8262794a21..3b920b209c 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -69,7 +69,7 @@ jobs: ${{ runner.os }}-nuget- - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v3.0.3 + uses: actions/setup-dotnet@v3.1.0 with: dotnet-version: | ${{ env.dotnet_sdk_version }} @@ -351,7 +351,7 @@ jobs: ${{ runner.os }}-nuget- - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v3.0.3 + uses: actions/setup-dotnet@v3.1.0 with: dotnet-version: ${{ env.dotnet_sdk_version }} @@ -385,7 +385,7 @@ jobs: uses: actions/checkout@v3 - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v3.0.3 + uses: actions/setup-dotnet@v3.1.0 with: dotnet-version: ${{ env.dotnet_sdk_version }} diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 4833295f70..2dfc3b6241 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -66,7 +66,7 @@ jobs: # queries: ./path/to/local/query, your-org/your-repo/queries@main - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v3.0.3 + uses: actions/setup-dotnet@v3.1.0 with: dotnet-version: ${{ env.dotnet_sdk_version }} diff --git a/.github/workflows/native-aot.yml b/.github/workflows/native-aot.yml index 3324cac4c9..722c49fc77 100644 --- a/.github/workflows/native-aot.yml +++ b/.github/workflows/native-aot.yml @@ -63,7 +63,7 @@ jobs: ${{ runner.os }}-nuget- - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v3.0.3 + uses: actions/setup-dotnet@v3.1.0 with: dotnet-version: | ${{ env.dotnet_sdk_version }} diff --git a/.github/workflows/rich-code-nav.yml b/.github/workflows/rich-code-nav.yml index 8a2a93ae98..2a2386142c 100644 --- a/.github/workflows/rich-code-nav.yml +++ b/.github/workflows/rich-code-nav.yml @@ -29,7 +29,7 @@ jobs: ${{ runner.os }}-nuget- - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v3.0.3 + uses: actions/setup-dotnet@v3.1.0 with: dotnet-version: ${{ env.dotnet_sdk_version }} From e5016d0ff9c119c3f9a3959825227cfc92965703 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 30 May 2023 00:14:10 +0200 Subject: [PATCH 144/761] Bump actions/setup-dotnet from 3.1.0 to 3.2.0 (#5083) --- .github/workflows/build.yml | 6 +++--- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/native-aot.yml | 2 +- .github/workflows/rich-code-nav.yml | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 3b920b209c..65e5a3e8e8 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -69,7 +69,7 @@ jobs: ${{ runner.os }}-nuget- - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v3.1.0 + uses: actions/setup-dotnet@v3.2.0 with: dotnet-version: | ${{ env.dotnet_sdk_version }} @@ -351,7 +351,7 @@ jobs: ${{ runner.os }}-nuget- - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v3.1.0 + uses: actions/setup-dotnet@v3.2.0 with: dotnet-version: ${{ env.dotnet_sdk_version }} @@ -385,7 +385,7 @@ jobs: uses: actions/checkout@v3 - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v3.1.0 + uses: actions/setup-dotnet@v3.2.0 with: dotnet-version: ${{ env.dotnet_sdk_version }} diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 2dfc3b6241..b0c232efd1 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -66,7 +66,7 @@ jobs: # queries: ./path/to/local/query, your-org/your-repo/queries@main - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v3.1.0 + uses: actions/setup-dotnet@v3.2.0 with: dotnet-version: ${{ env.dotnet_sdk_version }} diff --git a/.github/workflows/native-aot.yml b/.github/workflows/native-aot.yml index 722c49fc77..2dd007cfb0 100644 --- a/.github/workflows/native-aot.yml +++ b/.github/workflows/native-aot.yml @@ -63,7 +63,7 @@ jobs: ${{ runner.os }}-nuget- - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v3.1.0 + uses: actions/setup-dotnet@v3.2.0 with: dotnet-version: | ${{ env.dotnet_sdk_version }} diff --git a/.github/workflows/rich-code-nav.yml b/.github/workflows/rich-code-nav.yml index 2a2386142c..4f65f04596 100644 --- a/.github/workflows/rich-code-nav.yml +++ b/.github/workflows/rich-code-nav.yml @@ -29,7 +29,7 @@ jobs: ${{ runner.os }}-nuget- - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v3.1.0 + uses: actions/setup-dotnet@v3.2.0 with: dotnet-version: ${{ env.dotnet_sdk_version }} From b9d02056cdafd258d69fde1cf9c8b4e64f81b5c0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 31 May 2023 12:43:28 +0300 Subject: [PATCH 145/761] Bump NUnit3TestAdapter from 4.4.2 to 4.5.0 (#5084) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 933c74c612..8c588145e1 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -24,7 +24,7 @@ - + From 0abd28a07c3c502a4bfbcccbcc749592f25e339c Mon Sep 17 00:00:00 2001 From: kronic Date: Wed, 31 May 2023 16:36:40 +0300 Subject: [PATCH 146/761] Removed minor string allocation (#5087) --- src/Npgsql/NpgsqlConnection.cs | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/src/Npgsql/NpgsqlConnection.cs b/src/Npgsql/NpgsqlConnection.cs index 1a42a961c8..6d40ab730b 100644 --- a/src/Npgsql/NpgsqlConnection.cs +++ b/src/Npgsql/NpgsqlConnection.cs @@ -1180,7 +1180,7 @@ async Task BeginBinaryImport(string copyFromCommand, bool { if (copyFromCommand == null) throw new ArgumentNullException(nameof(copyFromCommand)); - if (!copyFromCommand.TrimStart().ToUpper().StartsWith("COPY", StringComparison.Ordinal)) + if (!IsValidCopyCommand(copyFromCommand)) throw new ArgumentException("Must contain a COPY FROM STDIN command!", nameof(copyFromCommand)); CheckReady(); @@ -1234,7 +1234,7 @@ async Task BeginBinaryExport(string copyToCommand, bool as { if (copyToCommand == null) throw new ArgumentNullException(nameof(copyToCommand)); - if (!copyToCommand.TrimStart().ToUpper().StartsWith("COPY", StringComparison.Ordinal)) + if (!IsValidCopyCommand(copyToCommand)) throw new ArgumentException("Must contain a COPY TO STDOUT command!", nameof(copyToCommand)); CheckReady(); @@ -1294,7 +1294,7 @@ async Task BeginTextImport(string copyFromCommand, bool async, Cance { if (copyFromCommand == null) throw new ArgumentNullException(nameof(copyFromCommand)); - if (!copyFromCommand.TrimStart().ToUpper().StartsWith("COPY", StringComparison.Ordinal)) + if (!IsValidCopyCommand(copyFromCommand)) throw new ArgumentException("Must contain a COPY FROM STDIN command!", nameof(copyFromCommand)); CheckReady(); @@ -1355,7 +1355,7 @@ async Task BeginTextExport(string copyToCommand, bool async, Cancell { if (copyToCommand == null) throw new ArgumentNullException(nameof(copyToCommand)); - if (!copyToCommand.TrimStart().ToUpper().StartsWith("COPY", StringComparison.Ordinal)) + if (!IsValidCopyCommand(copyToCommand)) throw new ArgumentException("Must contain a COPY TO STDOUT command!", nameof(copyToCommand)); CheckReady(); @@ -1416,7 +1416,7 @@ async Task BeginRawBinaryCopy(string copyCommand, bool asyn { if (copyCommand == null) throw new ArgumentNullException(nameof(copyCommand)); - if (!copyCommand.TrimStart().ToUpper().StartsWith("COPY", StringComparison.Ordinal)) + if (!IsValidCopyCommand(copyCommand)) throw new ArgumentException("Must contain a COPY TO STDOUT OR COPY FROM STDIN command!", nameof(copyCommand)); CheckReady(); @@ -1446,6 +1446,14 @@ async Task BeginRawBinaryCopy(string copyCommand, bool asyn } } + static bool IsValidCopyCommand(string copyCommand) + { + #if NET6_0_OR_GREATER || NETSTANDARD2_1 + return copyCommand.AsSpan().TrimStart().StartsWith("COPY", StringComparison.OrdinalIgnoreCase); + #else + return copyCommand.TrimStart().StartsWith("COPY", StringComparison.OrdinalIgnoreCase); + #endif + } #endregion #region Wait From 61cbc6e4bfc962579c3e03883ad53f463990bb8f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 1 Jun 2023 13:47:58 +0300 Subject: [PATCH 147/761] Bump GitHubActionsTestLogger from 2.3.0 to 2.3.1 (#5088) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 8c588145e1..fdacdd684a 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -27,7 +27,7 @@ - + From 85221df287e6d68b245f5d017dfa88391efc3247 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 2 Jun 2023 00:15:52 +0200 Subject: [PATCH 148/761] Bump GitHubActionsTestLogger from 2.3.1 to 2.3.2 (#5090) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index fdacdd684a..594ee1c8a4 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -27,7 +27,7 @@ - + From 6a20496b16b35f71ea03ddf4a38bfa83c7604204 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 2 Jun 2023 00:17:24 +0200 Subject: [PATCH 149/761] Bump Microsoft.NET.Test.Sdk from 17.6.0 to 17.6.1 (#5089) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 594ee1c8a4..932da639e2 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -23,7 +23,7 @@ - + From 343eee14784149677e0d14313426498236a9f443 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Fri, 2 Jun 2023 16:44:39 +0200 Subject: [PATCH 150/761] Small test additions to numeric --- test/Npgsql.Tests/Types/NumericTests.cs | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/test/Npgsql.Tests/Types/NumericTests.cs b/test/Npgsql.Tests/Types/NumericTests.cs index d184221e31..b0cf9596f1 100644 --- a/test/Npgsql.Tests/Types/NumericTests.cs +++ b/test/Npgsql.Tests/Types/NumericTests.cs @@ -19,6 +19,7 @@ public class NumericTests : MultiplexingTestBase new object[] { "0.000000000001::numeric", 0.000000000001M }, new object[] { "0.00000001::numeric", 0.00000001M }, new object[] { "0.0001::numeric", 0.0001M }, + new object[] { "0.123456000000000100000000::numeric", 0.123456000000000100000000M }, new object[] { "1::numeric", 1M }, new object[] { "10000::numeric", 10000M }, new object[] { "100000000::numeric", 100000000M }, @@ -44,6 +45,7 @@ public class NumericTests : MultiplexingTestBase new object[] { "1E+24::numeric", 1000000000000000000000000M }, new object[] { "1E+28::numeric", 10000000000000000000000000000M }, + new object[] { "1.2222333344445555666677778888::numeric", 1.2222333344445555666677778888M }, new object[] { "11.222233334444555566667777888::numeric", 11.222233334444555566667777888M }, new object[] { "111.22223333444455556666777788::numeric", 111.22223333444455556666777788M }, new object[] { "1111.2222333344445555666677778::numeric", 1111.2222333344445555666677778M }, @@ -89,9 +91,8 @@ public async Task Read(string query, decimal expected) { using var conn = await OpenConnectionAsync(); using var cmd = new NpgsqlCommand("SELECT " + query, conn); - Assert.That( - decimal.GetBits((decimal)(await cmd.ExecuteScalarAsync())!), - Is.EqualTo(decimal.GetBits(expected))); + var value = (decimal)(await cmd.ExecuteScalarAsync())!; + Assert.That(decimal.GetBits(value), Is.EqualTo(decimal.GetBits(expected))); } [Test] @@ -150,15 +151,19 @@ public async Task Read_overflow_is_safe() [TestCaseSource(nameof(ReadWriteCases))] public async Task Read_BigInteger(string query, decimal expected) { + var bigInt = new BigInteger(expected); + using var conn = await OpenConnectionAsync(); + using var cmd = new NpgsqlCommand("SELECT " + query, conn); + using var rdr = await cmd.ExecuteReaderAsync(); + await rdr.ReadAsync(); + if (decimal.Floor(expected) == expected) - { - var bigInt = new BigInteger(expected); - using var conn = await OpenConnectionAsync(); - using var cmd = new NpgsqlCommand("SELECT " + query, conn); - using var rdr = await cmd.ExecuteReaderAsync(); - await rdr.ReadAsync(); Assert.That(rdr.GetFieldValue(0), Is.EqualTo(bigInt)); - } + else + Assert.That(() => rdr.GetFieldValue(0), + Throws.Exception + .With.TypeOf() + .With.Message.EqualTo("Numeric value with non-zero fractional digits not supported by BigInteger")); } [Test] From acc90a4c27eb27a6f9cd68f1f29446336af29529 Mon Sep 17 00:00:00 2001 From: kronic Date: Mon, 5 Jun 2023 11:19:42 +0300 Subject: [PATCH 151/761] Optimizing the allocation of temporary buffers (#5092) --- .../Internal/TypeHandlers/TextHandler.cs | 68 +++++++++++-------- 1 file changed, 40 insertions(+), 28 deletions(-) diff --git a/src/Npgsql/Internal/TypeHandlers/TextHandler.cs b/src/Npgsql/Internal/TypeHandlers/TextHandler.cs index bd249a8730..0a8bd5d7d4 100644 --- a/src/Npgsql/Internal/TypeHandlers/TextHandler.cs +++ b/src/Npgsql/Internal/TypeHandlers/TextHandler.cs @@ -1,6 +1,5 @@ -using System; -using System.Data; -using System.Diagnostics; +using System; +using System.Buffers; using System.Diagnostics.CodeAnalysis; using System.IO; using System.Text; @@ -57,24 +56,30 @@ static async ValueTask ReadLong(NpgsqlReadBuffer buf, int byteLen, bool // Bad case: the string's byte representation doesn't fit in our buffer. // This is rare - will only happen in CommandBehavior.Sequential mode (otherwise the // entire row is in memory). Tweaking the buffer length via the connection string can - // help avoid this. - // Allocate a temporary byte buffer to hold the entire string and read it in chunks. - var tempBuf = new byte[byteLen]; - var pos = 0; - while (true) + var tempBuf = ArrayPool.Shared.Rent(byteLen); + + try { - var len = Math.Min(buf.ReadBytesLeft, byteLen - pos); - buf.ReadBytes(tempBuf, pos, len); - pos += len; - if (pos < byteLen) + var pos = 0; + while (true) { - await buf.ReadMore(async); - continue; + var len = Math.Min(buf.ReadBytesLeft, byteLen - pos); + buf.ReadBytes(tempBuf, pos, len); + pos += len; + if (pos < byteLen) + { + await buf.ReadMore(async); + continue; + } + break; } - break; + return buf.TextEncoding.GetString(tempBuf, 0, byteLen); + } + finally + { + ArrayPool.Shared.Return(tempBuf); } - return buf.TextEncoding.GetString(tempBuf); } } @@ -87,22 +92,29 @@ async ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, in return buf.ReadChars(byteLen); } - // TODO: The following can be optimized with Decoder - no need to allocate a byte[] - var tempBuf = new byte[byteLen]; - var pos = 0; - while (true) + var tempBuf = ArrayPool.Shared.Rent(byteLen); + + try { - var len = Math.Min(buf.ReadBytesLeft, byteLen - pos); - buf.ReadBytes(tempBuf, pos, len); - pos += len; - if (pos < byteLen) + var pos = 0; + while (true) { - await buf.ReadMore(async); - continue; + var len = Math.Min(buf.ReadBytesLeft, byteLen - pos); + buf.ReadBytes(tempBuf, pos, len); + pos += len; + if (pos < byteLen) + { + await buf.ReadMore(async); + continue; + } + break; } - break; + return buf.TextEncoding.GetChars(tempBuf, 0, byteLen); + } + finally + { + ArrayPool.Shared.Return(tempBuf); } - return buf.TextEncoding.GetChars(tempBuf); } async ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) From 5dedf798363e618fc59d7df77c925ec71db55239 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 6 Jun 2023 22:05:49 +0000 Subject: [PATCH 152/761] Bump Microsoft.NET.Test.Sdk from 17.6.1 to 17.6.2 (#5095) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 932da639e2..441e9853d3 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -23,7 +23,7 @@ - + From 6d288f0b0ef9d89232f7fb4fd79026ba1c5d9f5a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 7 Jun 2023 00:34:58 +0200 Subject: [PATCH 153/761] Bump OpenTelemetry.API from 1.4.0 to 1.5.0 (#5096) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 441e9853d3..03c0b74a2c 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -4,7 +4,7 @@ - + From 523354d58058bc76941f39e3d68bedb5ae6730ac Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Sat, 10 Jun 2023 12:23:52 +0300 Subject: [PATCH 154/761] Start CI builds on PG16 beta (#5100) --- .github/workflows/build.yml | 10 +++++----- src/Npgsql/PublicAPI.Unshipped.txt | 4 ++++ .../Replication/ReplicationConnection.cs | 18 ++++++++++++++---- .../ReplicationSystemIdentification.cs | 6 +++--- test/Npgsql.Tests/ConnectionTests.cs | 9 ++++++++- 5 files changed, 34 insertions(+), 13 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 65e5a3e8e8..aa273ec435 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -46,11 +46,11 @@ jobs: pg_major: 14 config: Release test_tfm: net8.0 -# - os: ubuntu-22.04 -# pg_major: 15 -# config: Release -# test_tfm: net8.0 -# pg_prerelease: 'PG Prerelease' + - os: ubuntu-22.04 + pg_major: 16 + config: Release + test_tfm: net8.0 + pg_prerelease: 'PG Prerelease' outputs: is_release: ${{ steps.analyze_tag.outputs.is_release }} diff --git a/src/Npgsql/PublicAPI.Unshipped.txt b/src/Npgsql/PublicAPI.Unshipped.txt index 0bd82a5918..db0d6a6f68 100644 --- a/src/Npgsql/PublicAPI.Unshipped.txt +++ b/src/Npgsql/PublicAPI.Unshipped.txt @@ -35,7 +35,11 @@ Npgsql.NpgsqlSlimDataSourceBuilder.UseRootCertificate(System.Security.Cryptograp Npgsql.NpgsqlSlimDataSourceBuilder.UseRootCertificateCallback(System.Func? rootCertificateCallback) -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.UseSystemTextJson(System.Text.Json.JsonSerializerOptions? serializerOptions = null, System.Type![]? jsonbClrTypes = null, System.Type![]? jsonClrTypes = null) -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.UseUserCertificateValidationCallback(System.Net.Security.RemoteCertificateValidationCallback! userCertificateValidationCallback) -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.Replication.ReplicationConnection.TimelineHistory(ulong tli, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! +Npgsql.Replication.ReplicationSystemIdentification.Timeline.get -> ulong override Npgsql.NpgsqlBatch.Dispose() -> void +*REMOVED*Npgsql.Replication.ReplicationConnection.TimelineHistory(uint tli, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! +*REMOVED*Npgsql.Replication.ReplicationSystemIdentification.Timeline.get -> uint *REMOVED*static NpgsqlTypes.NpgsqlBox.Parse(string! s) -> NpgsqlTypes.NpgsqlBox *REMOVED*static NpgsqlTypes.NpgsqlCircle.Parse(string! s) -> NpgsqlTypes.NpgsqlCircle *REMOVED*static NpgsqlTypes.NpgsqlLine.Parse(string! s) -> NpgsqlTypes.NpgsqlLine diff --git a/src/Npgsql/Replication/ReplicationConnection.cs b/src/Npgsql/Replication/ReplicationConnection.cs index 6a09c13811..0cec24e26f 100644 --- a/src/Npgsql/Replication/ReplicationConnection.cs +++ b/src/Npgsql/Replication/ReplicationConnection.cs @@ -320,8 +320,18 @@ public Task IdentifySystem(CancellationToken ca async Task IdentifySystemInternal(CancellationToken cancellationToken) { var row = await ReadSingleRow("IDENTIFY_SYSTEM", cancellationToken); + + var timeline = row[1] switch + { + ulong t => t, // PG 16 and above + uint t => t, // PG 15 and below + + _ => throw new NpgsqlException( + $"Unknown type '{row[1].GetType().Name}' received for timeline in a response for IDENTIFY_SYSTEM") + }; + return new ReplicationSystemIdentification( - (string)row[0], (uint)row[1], NpgsqlLogSequenceNumber.Parse((string)row[2]), (string)row[3]); + (string)row[0], timeline, NpgsqlLogSequenceNumber.Parse((string)row[2]), (string)row[3]); } } @@ -356,12 +366,12 @@ async Task ShowInternal(string parameterName, CancellationToken cancella /// An optional token to cancel the asynchronous operation. The default value is . /// /// The timeline history file for timeline tli - public Task TimelineHistory(uint tli, CancellationToken cancellationToken = default) + public Task TimelineHistory(ulong tli, CancellationToken cancellationToken = default) { using (NoSynchronizationContextScope.Enter()) return TimelineHistoryInternal(tli, cancellationToken); - async Task TimelineHistoryInternal(uint tli, CancellationToken cancellationToken) + async Task TimelineHistoryInternal(ulong tli, CancellationToken cancellationToken) { var result = await ReadSingleRow($"TIMELINE_HISTORY {tli:D}", cancellationToken); return new TimelineHistoryFile((string)result[0], (byte[])result[1]); @@ -958,4 +968,4 @@ internal void CheckDisposed() if (_isDisposed) throw new ObjectDisposedException(GetType().Name); } -} \ No newline at end of file +} diff --git a/src/Npgsql/Replication/ReplicationSystemIdentification.cs b/src/Npgsql/Replication/ReplicationSystemIdentification.cs index a3d91a6674..cd53a76f35 100644 --- a/src/Npgsql/Replication/ReplicationSystemIdentification.cs +++ b/src/Npgsql/Replication/ReplicationSystemIdentification.cs @@ -7,7 +7,7 @@ namespace Npgsql.Replication; ///
public class ReplicationSystemIdentification { - internal ReplicationSystemIdentification(string systemId, uint timeline, NpgsqlLogSequenceNumber xLogPos, string dbName) + internal ReplicationSystemIdentification(string systemId, ulong timeline, NpgsqlLogSequenceNumber xLogPos, string dbName) { SystemId = systemId; Timeline = timeline; @@ -24,7 +24,7 @@ internal ReplicationSystemIdentification(string systemId, uint timeline, NpgsqlL /// /// Current timeline ID. Also useful to check that the standby is consistent with the master. /// - public uint Timeline { get; } + public ulong Timeline { get; } /// /// Current WAL flush location. Useful to get a known location in the write-ahead log where streaming can start. @@ -35,4 +35,4 @@ internal ReplicationSystemIdentification(string systemId, uint timeline, NpgsqlL /// Database connected to. /// public string? DbName { get; } -} \ No newline at end of file +} diff --git a/test/Npgsql.Tests/ConnectionTests.cs b/test/Npgsql.Tests/ConnectionTests.cs index dd8d06cf11..19fb21b693 100644 --- a/test/Npgsql.Tests/ConnectionTests.cs +++ b/test/Npgsql.Tests/ConnectionTests.cs @@ -1210,9 +1210,16 @@ public async Task Non_UTF8_Encoding() { Encoding.RegisterProvider(CodePagesEncodingProvider.Instance); await using var adminConn = await OpenConnectionAsync(); + // Create the database with server encoding sql-ascii + // Starting with PG16, the default locale provider is icu, which does not support encoding sql_ascii. Specify libc explicitly as the + // locale provider (except for older versions where specifying explicitly isn't supported, and libc is the only possibility). await adminConn.ExecuteNonQueryAsync("DROP DATABASE IF EXISTS sqlascii"); - await adminConn.ExecuteNonQueryAsync("CREATE DATABASE sqlascii ENCODING 'sql_ascii' TEMPLATE template0"); + await adminConn.ExecuteNonQueryAsync( + adminConn.PostgreSqlVersion >= new Version(15, 0) + ? "CREATE DATABASE sqlascii ENCODING 'sql_ascii' LOCALE_PROVIDER libc TEMPLATE template0" + : "CREATE DATABASE sqlascii ENCODING 'sql_ascii' TEMPLATE template0"); + try { // Insert some win1252 data From 32e2ef7e12543f120bdf1559076d1e2897329a7c Mon Sep 17 00:00:00 2001 From: Brar Piening Date: Sun, 11 Jun 2023 18:53:20 +0200 Subject: [PATCH 155/761] Consistently handle TimeLineID as UInt32 everywhere (#5101) TimeLineID is an uint32 in PostgreSQL but since PostgreSQL's type system is lacking unsigned types there was no consistent mode of transmission in the streaming replication protocol and the timeline got transmitted both as integer and as bigint. As a consequence although we realized the TimeLineID's unsignedness we also had heterogenity regarding timeline id's ocurrences across Npgsql's replication API. AS of PG 16 the backend is consistently transmitting TimeLineID as bigint that (due to being backed by an uint32 field) can only have values >= 0. With this commit we accept TimeLineID as both integer and bigint and expose it as uint (UInt32) in all our public APIs. Closes #5046 --- src/Npgsql/PublicAPI.Unshipped.txt | 12 ++++-- .../PhysicalReplicationConnection.cs | 6 +-- .../Replication/PhysicalReplicationSlot.cs | 6 +-- .../Replication/ReplicationConnection.cs | 40 ++++++------------- .../ReplicationSystemIdentification.cs | 4 +- .../Replication/PhysicalReplicationTests.cs | 2 +- 6 files changed, 30 insertions(+), 40 deletions(-) diff --git a/src/Npgsql/PublicAPI.Unshipped.txt b/src/Npgsql/PublicAPI.Unshipped.txt index db0d6a6f68..a6e371bfe1 100644 --- a/src/Npgsql/PublicAPI.Unshipped.txt +++ b/src/Npgsql/PublicAPI.Unshipped.txt @@ -35,11 +35,11 @@ Npgsql.NpgsqlSlimDataSourceBuilder.UseRootCertificate(System.Security.Cryptograp Npgsql.NpgsqlSlimDataSourceBuilder.UseRootCertificateCallback(System.Func? rootCertificateCallback) -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.UseSystemTextJson(System.Text.Json.JsonSerializerOptions? serializerOptions = null, System.Type![]? jsonbClrTypes = null, System.Type![]? jsonClrTypes = null) -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.UseUserCertificateValidationCallback(System.Net.Security.RemoteCertificateValidationCallback! userCertificateValidationCallback) -> Npgsql.NpgsqlSlimDataSourceBuilder! -Npgsql.Replication.ReplicationConnection.TimelineHistory(ulong tli, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! -Npgsql.Replication.ReplicationSystemIdentification.Timeline.get -> ulong +Npgsql.Replication.PhysicalReplicationConnection.StartReplication(Npgsql.Replication.PhysicalReplicationSlot? slot, NpgsqlTypes.NpgsqlLogSequenceNumber walLocation, System.Threading.CancellationToken cancellationToken, uint timeline = 0) -> System.Collections.Generic.IAsyncEnumerable! +Npgsql.Replication.PhysicalReplicationConnection.StartReplication(NpgsqlTypes.NpgsqlLogSequenceNumber walLocation, System.Threading.CancellationToken cancellationToken, uint timeline = 0) -> System.Collections.Generic.IAsyncEnumerable! +Npgsql.Replication.PhysicalReplicationSlot.PhysicalReplicationSlot(string! slotName, NpgsqlTypes.NpgsqlLogSequenceNumber? restartLsn = null, uint? restartTimeline = null) -> void +Npgsql.Replication.PhysicalReplicationSlot.RestartTimeline.get -> uint? override Npgsql.NpgsqlBatch.Dispose() -> void -*REMOVED*Npgsql.Replication.ReplicationConnection.TimelineHistory(uint tli, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! -*REMOVED*Npgsql.Replication.ReplicationSystemIdentification.Timeline.get -> uint *REMOVED*static NpgsqlTypes.NpgsqlBox.Parse(string! s) -> NpgsqlTypes.NpgsqlBox *REMOVED*static NpgsqlTypes.NpgsqlCircle.Parse(string! s) -> NpgsqlTypes.NpgsqlCircle *REMOVED*static NpgsqlTypes.NpgsqlLine.Parse(string! s) -> NpgsqlTypes.NpgsqlLine @@ -55,3 +55,7 @@ override Npgsql.NpgsqlBatch.Dispose() -> void *REMOVED*override Npgsql.NpgsqlNestedDataReader.GetProviderSpecificFieldType(int ordinal) -> System.Type! *REMOVED*override Npgsql.NpgsqlNestedDataReader.GetProviderSpecificValue(int ordinal) -> object! *REMOVED*override Npgsql.NpgsqlNestedDataReader.GetProviderSpecificValues(object![]! values) -> int +*REMOVED*Npgsql.Replication.PhysicalReplicationConnection.StartReplication(Npgsql.Replication.PhysicalReplicationSlot? slot, NpgsqlTypes.NpgsqlLogSequenceNumber walLocation, System.Threading.CancellationToken cancellationToken, ulong timeline = 0) -> System.Collections.Generic.IAsyncEnumerable! +*REMOVED*Npgsql.Replication.PhysicalReplicationConnection.StartReplication(NpgsqlTypes.NpgsqlLogSequenceNumber walLocation, System.Threading.CancellationToken cancellationToken, ulong timeline = 0) -> System.Collections.Generic.IAsyncEnumerable! +*REMOVED*Npgsql.Replication.PhysicalReplicationSlot.PhysicalReplicationSlot(string! slotName, NpgsqlTypes.NpgsqlLogSequenceNumber? restartLsn = null, ulong? restartTimeline = null) -> void +*REMOVED*Npgsql.Replication.PhysicalReplicationSlot.RestartTimeline.get -> ulong? diff --git a/src/Npgsql/Replication/PhysicalReplicationConnection.cs b/src/Npgsql/Replication/PhysicalReplicationConnection.cs index d10d42e71b..9d5faeca98 100644 --- a/src/Npgsql/Replication/PhysicalReplicationConnection.cs +++ b/src/Npgsql/Replication/PhysicalReplicationConnection.cs @@ -119,7 +119,7 @@ async Task CreatePhysicalReplicationSlot(string slotNam public IAsyncEnumerable StartReplication(PhysicalReplicationSlot? slot, NpgsqlLogSequenceNumber walLocation, CancellationToken cancellationToken, - ulong timeline = default) + uint timeline = default) { using (NoSynchronizationContextScope.Enter()) return StartPhysicalReplication(slot, walLocation, cancellationToken, timeline); @@ -127,7 +127,7 @@ public IAsyncEnumerable StartReplication(PhysicalReplicationSlo async IAsyncEnumerable StartPhysicalReplication(PhysicalReplicationSlot? slot, NpgsqlLogSequenceNumber walLocation, [EnumeratorCancellation] CancellationToken cancellationToken, - ulong timeline) + uint timeline) { var builder = new StringBuilder("START_REPLICATION"); if (slot != null) @@ -162,7 +162,7 @@ async IAsyncEnumerable StartPhysicalReplication(PhysicalReplica /// A representing an that /// can be used to stream WAL entries in form of instances. public IAsyncEnumerable StartReplication( - NpgsqlLogSequenceNumber walLocation, CancellationToken cancellationToken, ulong timeline = default) + NpgsqlLogSequenceNumber walLocation, CancellationToken cancellationToken, uint timeline = default) => StartReplication(slot: null, walLocation: walLocation, timeline: timeline, cancellationToken: cancellationToken); /// diff --git a/src/Npgsql/Replication/PhysicalReplicationSlot.cs b/src/Npgsql/Replication/PhysicalReplicationSlot.cs index 9bc1018207..7aba817fe2 100644 --- a/src/Npgsql/Replication/PhysicalReplicationSlot.cs +++ b/src/Npgsql/Replication/PhysicalReplicationSlot.cs @@ -17,7 +17,7 @@ public class PhysicalReplicationSlot : ReplicationSlot /// The name of the existing replication slot /// The replication slot's restart_lsn /// The timeline ID associated to restart_lsn, following the current timeline history. - public PhysicalReplicationSlot(string slotName, NpgsqlLogSequenceNumber? restartLsn = null, ulong? restartTimeline = null) + public PhysicalReplicationSlot(string slotName, NpgsqlLogSequenceNumber? restartLsn = null, uint? restartTimeline = null) : base(slotName) { RestartLsn = restartLsn; @@ -32,5 +32,5 @@ public PhysicalReplicationSlot(string slotName, NpgsqlLogSequenceNumber? restart /// /// The timeline ID associated to restart_lsn, following the current timeline history. /// - public ulong? RestartTimeline { get; } -} \ No newline at end of file + public uint? RestartTimeline { get; } +} diff --git a/src/Npgsql/Replication/ReplicationConnection.cs b/src/Npgsql/Replication/ReplicationConnection.cs index 0cec24e26f..903e6b7b28 100644 --- a/src/Npgsql/Replication/ReplicationConnection.cs +++ b/src/Npgsql/Replication/ReplicationConnection.cs @@ -320,18 +320,8 @@ public Task IdentifySystem(CancellationToken ca async Task IdentifySystemInternal(CancellationToken cancellationToken) { var row = await ReadSingleRow("IDENTIFY_SYSTEM", cancellationToken); - - var timeline = row[1] switch - { - ulong t => t, // PG 16 and above - uint t => t, // PG 15 and below - - _ => throw new NpgsqlException( - $"Unknown type '{row[1].GetType().Name}' received for timeline in a response for IDENTIFY_SYSTEM") - }; - return new ReplicationSystemIdentification( - (string)row[0], timeline, NpgsqlLogSequenceNumber.Parse((string)row[2]), (string)row[3]); + (string)row[0], (uint)row[1], NpgsqlLogSequenceNumber.Parse((string)row[2]), (string)row[3]); } } @@ -366,12 +356,12 @@ async Task ShowInternal(string parameterName, CancellationToken cancella /// An optional token to cancel the asynchronous operation. The default value is . /// /// The timeline history file for timeline tli - public Task TimelineHistory(ulong tli, CancellationToken cancellationToken = default) + public Task TimelineHistory(uint tli, CancellationToken cancellationToken = default) { using (NoSynchronizationContextScope.Enter()) return TimelineHistoryInternal(tli, cancellationToken); - async Task TimelineHistoryInternal(ulong tli, CancellationToken cancellationToken) + async Task TimelineHistoryInternal(uint tli, CancellationToken cancellationToken) { var result = await ReadSingleRow($"TIMELINE_HISTORY {tli:D}", cancellationToken); return new TimelineHistoryFile((string)result[0], (byte[])result[1]); @@ -426,7 +416,7 @@ internal async Task CreateReplicationSlot(string command { case "physical": var restartLsn = (string?)result[1]; - var restartTli = (ulong?)result[2]; + var restartTli = (uint?)result[2]; return new PhysicalReplicationSlot( slotName.ToLowerInvariant(), restartLsn == null ? null : NpgsqlLogSequenceNumber.Parse(restartLsn), @@ -833,23 +823,19 @@ async Task ReadSingleRow(string command, CancellationToken cancellatio case "text": results[i] = buf.ReadString(len); continue; + // Currently in all instances where ReadSingleRow gets called, we expect unsigned integer values only, since that's always + // TimeLineID which is a uint32 in PostgreSQL that is sent as integer up to PG 15 and as bigint as of PG 16 + // (https://github.com/postgres/postgres/blob/57d0051706b897048063acc14c2c3454200c488f/src/include/access/xlogdefs.h#L59 and + // https://github.com/postgres/postgres/commit/ec40f3422412cfdc140b5d3f67db7fd2dac0f1e2). + // Because of this, it is safe to always parse the values we get as unit although, according to the row description message + // we formally could also get a signed int or long value. + // Whenever ReadSingleRow gets used in a new context we have to check, whether this contract is still + // valid in that context and if it isn't, adjust the method accordingly (e.g. by switching on the command). case "integer": - { - var str = buf.ReadString(len); - if (!uint.TryParse(str, NumberStyles.None, null, out var num)) - { - throw Connector.Break( - new NpgsqlException( - $"Could not parse '{str}' as unsigned integer in field {field.Name}")); - } - - results[i] = num; - continue; - } case "bigint": { var str = buf.ReadString(len); - if (!ulong.TryParse(str, NumberStyles.None, null, out var num)) + if (!uint.TryParse(str, NumberStyles.None, null, out var num)) { throw Connector.Break( new NpgsqlException( diff --git a/src/Npgsql/Replication/ReplicationSystemIdentification.cs b/src/Npgsql/Replication/ReplicationSystemIdentification.cs index cd53a76f35..7e6673e702 100644 --- a/src/Npgsql/Replication/ReplicationSystemIdentification.cs +++ b/src/Npgsql/Replication/ReplicationSystemIdentification.cs @@ -7,7 +7,7 @@ namespace Npgsql.Replication; /// public class ReplicationSystemIdentification { - internal ReplicationSystemIdentification(string systemId, ulong timeline, NpgsqlLogSequenceNumber xLogPos, string dbName) + internal ReplicationSystemIdentification(string systemId, uint timeline, NpgsqlLogSequenceNumber xLogPos, string dbName) { SystemId = systemId; Timeline = timeline; @@ -24,7 +24,7 @@ internal ReplicationSystemIdentification(string systemId, ulong timeline, Npgsql /// /// Current timeline ID. Also useful to check that the standby is consistent with the master. /// - public ulong Timeline { get; } + public uint Timeline { get; } /// /// Current WAL flush location. Useful to get a known location in the write-ahead log where streaming can start. diff --git a/test/Npgsql.Tests/Replication/PhysicalReplicationTests.cs b/test/Npgsql.Tests/Replication/PhysicalReplicationTests.cs index 948ffabc7a..59698b87ac 100644 --- a/test/Npgsql.Tests/Replication/PhysicalReplicationTests.cs +++ b/test/Npgsql.Tests/Replication/PhysicalReplicationTests.cs @@ -53,7 +53,7 @@ FROM pg_replication_slots await using var reader = await cmd.ExecuteReaderAsync(); Assert.That(reader.Read, Is.EqualTo(createSlot)); var expectedSlotName = createSlot ? reader.GetFieldValue(reader.GetOrdinal("slot_name")) : null; - var expectedTli = createSlot ? unchecked((ulong?)reader.GetFieldValue(reader.GetOrdinal("timeline_id"))) : null; + var expectedTli = createSlot ? (uint?)reader.GetFieldValue(reader.GetOrdinal("timeline_id")) : null; var expectedRestartLsn = createSlot ? reader.GetFieldValue(reader.GetOrdinal("restart_lsn")) : null; Assert.That(reader.Read, Is.False); await using var rc = await OpenReplicationConnectionAsync(); From f31a03778e3208874b2f775c558aaf62f425cf26 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 14 Jun 2023 00:31:03 +0200 Subject: [PATCH 156/761] Bump System.Text.Json from 7.0.2 to 7.0.3 (#5106) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 03c0b74a2c..a93b905e3a 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -46,7 +46,7 @@ - + From b8388c614ad532cb3bf7451c6465a83dabf71eca Mon Sep 17 00:00:00 2001 From: Brar Piening Date: Mon, 19 Jun 2023 13:14:35 +0200 Subject: [PATCH 157/761] Add Package Source Mapping configuration (#5109) --- NuGet.config | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/NuGet.config b/NuGet.config index a44ab2b88a..e49ffd89d8 100644 --- a/NuGet.config +++ b/NuGet.config @@ -5,4 +5,10 @@ + + + + + + From 1b8bb7e55971e30b4d8f6fd41287c2bdfd31cee3 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Mon, 19 Jun 2023 13:51:05 +0200 Subject: [PATCH 158/761] Add empty constructors to Npgsql{Path,Polygon} (#5114) --- src/Npgsql/NpgsqlTypes/NpgsqlTypes.cs | 12 +++++++++--- test/Npgsql.Tests/Types/GeometricTypeTests.cs | 2 +- test/Npgsql.Tests/TypesTests.cs | 8 ++++++++ 3 files changed, 18 insertions(+), 4 deletions(-) diff --git a/src/Npgsql/NpgsqlTypes/NpgsqlTypes.cs b/src/Npgsql/NpgsqlTypes/NpgsqlTypes.cs index cc68603b9c..792a215774 100644 --- a/src/Npgsql/NpgsqlTypes/NpgsqlTypes.cs +++ b/src/Npgsql/NpgsqlTypes/NpgsqlTypes.cs @@ -172,7 +172,10 @@ public struct NpgsqlPath : IList, IEquatable readonly List _points; public bool Open { get; set; } - public NpgsqlPath(IEnumerable points, bool open) : this() + public NpgsqlPath() + => _points = new(); + + public NpgsqlPath(IEnumerable points, bool open) { _points = new List(points); Open = open; @@ -268,14 +271,17 @@ public override string ToString() /// /// Represents a PostgreSQL Polygon type. /// -public struct NpgsqlPolygon : IList, IEquatable +public readonly struct NpgsqlPolygon : IList, IEquatable { readonly List _points; + public NpgsqlPolygon() + => _points = new(); + public NpgsqlPolygon(IEnumerable points) => _points = new List(points); - public NpgsqlPolygon(params NpgsqlPoint[] points) : this ((IEnumerable) points) {} + public NpgsqlPolygon(params NpgsqlPoint[] points) : this((IEnumerable) points) {} public NpgsqlPolygon(int capacity) => _points = new List(capacity); diff --git a/test/Npgsql.Tests/Types/GeometricTypeTests.cs b/test/Npgsql.Tests/Types/GeometricTypeTests.cs index b5948b0e66..b4101cca14 100644 --- a/test/Npgsql.Tests/Types/GeometricTypeTests.cs +++ b/test/Npgsql.Tests/Types/GeometricTypeTests.cs @@ -61,4 +61,4 @@ public Task Circle() NpgsqlDbType.Circle); public GeometricTypeTests(MultiplexingMode multiplexingMode) : base(multiplexingMode) {} -} \ No newline at end of file +} diff --git a/test/Npgsql.Tests/TypesTests.cs b/test/Npgsql.Tests/TypesTests.cs index 2e2c95400c..5dbfa844f3 100644 --- a/test/Npgsql.Tests/TypesTests.cs +++ b/test/Npgsql.Tests/TypesTests.cs @@ -184,6 +184,14 @@ public void TsQueryOperatorPrecedence() Assert.AreEqual(expectedGrouping.ToString(), query.ToString()); } + [Test] + public void NpgsqlPath_empty() + => Assert.That(new NpgsqlPath { new(1, 2) }, Is.EqualTo(new NpgsqlPath(new NpgsqlPoint(1, 2)))); + + [Test] + public void NpgsqlPolygon_empty() + => Assert.That(new NpgsqlPolygon { new(1, 2) }, Is.EqualTo(new NpgsqlPolygon(new NpgsqlPoint(1, 2)))); + [Test] public void Bug1011018() { From 9feab3ccd9a3a21049b8af38f3ae46fabbd5de98 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Mon, 19 Jun 2023 15:03:37 +0300 Subject: [PATCH 159/761] Fix reading a single char with TextHandler (#5111) Fixes #5110 --- .../Internal/TypeHandlers/TextHandler.cs | 25 ++++++++++++------- test/Npgsql.Tests/CommandTests.cs | 1 + test/Npgsql.Tests/CopyTests.cs | 19 ++++++++++++++ test/Npgsql.Tests/ReaderTests.cs | 18 +++++++++++++ 4 files changed, 54 insertions(+), 9 deletions(-) diff --git a/src/Npgsql/Internal/TypeHandlers/TextHandler.cs b/src/Npgsql/Internal/TypeHandlers/TextHandler.cs index 0a8bd5d7d4..5becd7c691 100644 --- a/src/Npgsql/Internal/TypeHandlers/TextHandler.cs +++ b/src/Npgsql/Internal/TypeHandlers/TextHandler.cs @@ -1,5 +1,6 @@ using System; using System.Buffers; +using System.Diagnostics; using System.Diagnostics.CodeAnalysis; using System.IO; using System.Text; @@ -120,25 +121,31 @@ async ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, in async ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) { // Make sure we have enough bytes in the buffer for a single character + // We can get here a much bigger length in case it's a string + // while we want to read only its first character var maxBytes = Math.Min(buf.TextEncoding.GetMaxByteCount(1), len); await buf.Ensure(maxBytes, async); - return ReadCharCore(); + var character = ReadCharCore(); - unsafe char ReadCharCore() + // We've been requested to read 'len' bytes, which is why we're going to skip them + // This is important for NpgsqlDataReader with CommandBehavior.SequentialAccess + // which tracks how many bytes it has to skip for the next column + await buf.Skip(len, async); + return character; + + char ReadCharCore() { var decoder = buf.TextEncoding.GetDecoder(); #if NETSTANDARD2_0 var singleCharArray = new char[1]; - decoder.Convert(buf.Buffer, buf.ReadPosition, maxBytes, singleCharArray, 0, 1, true, out var bytesUsed, out var charsUsed, out _); + decoder.Convert(buf.Buffer, buf.ReadPosition, maxBytes, singleCharArray, 0, 1, true, out _, out var charsUsed, out _); #else Span singleCharArray = stackalloc char[1]; - decoder.Convert(buf.Buffer.AsSpan(buf.ReadPosition, maxBytes), singleCharArray, true, out var bytesUsed, out var charsUsed, out _); + decoder.Convert(buf.Buffer.AsSpan(buf.ReadPosition, maxBytes), singleCharArray, true, out _, out var charsUsed, out _); #endif - buf.Skip(len - bytesUsed); - if (charsUsed < 1) throw new NpgsqlException("Could not read char - string was empty"); @@ -190,7 +197,7 @@ static async ValueTask ReadLong(NpgsqlReadBuffer buf, byte[] bytes, int return bytes; } } - + ValueTask> INpgsqlTypeHandler>.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) => throw new NotSupportedException("Only writing ReadOnlyMemory to PostgreSQL text is supported, no reading."); @@ -320,8 +327,8 @@ public Task Write(ReadOnlyMemory value, NpgsqlWriteBuffer buf, NpgsqlLengt public virtual TextReader GetTextReader(Stream stream, NpgsqlReadBuffer buffer) { var byteLength = (int)(stream.Length - stream.Position); - return buffer.ReadBytesLeft >= byteLength - ? buffer.GetPreparedTextReader(_encoding.GetString(buffer.Buffer, buffer.ReadPosition, byteLength), stream) + return buffer.ReadBytesLeft >= byteLength + ? buffer.GetPreparedTextReader(_encoding.GetString(buffer.Buffer, buffer.ReadPosition, byteLength), stream) : new StreamReader(stream, _encoding); } } diff --git a/test/Npgsql.Tests/CommandTests.cs b/test/Npgsql.Tests/CommandTests.cs index c486d69890..aa31c2cabd 100644 --- a/test/Npgsql.Tests/CommandTests.cs +++ b/test/Npgsql.Tests/CommandTests.cs @@ -272,6 +272,7 @@ public async Task Prepare_timeout_hard([Values] SyncOrAsync async) #region Cancel [Test, Description("Basic cancellation scenario")] + [Ignore("Flaky, see https://github.com/npgsql/npgsql/issues/5070")] public async Task Cancel() { if (IsMultiplexing) diff --git a/test/Npgsql.Tests/CopyTests.cs b/test/Npgsql.Tests/CopyTests.cs index 7b5095ef55..04c372f90c 100644 --- a/test/Npgsql.Tests/CopyTests.cs +++ b/test/Npgsql.Tests/CopyTests.cs @@ -778,6 +778,25 @@ public async Task Binary_copy_throws_for_nullable() Assert.ThrowsAsync(async () => await writer.WriteAsync(value, NpgsqlDbType.Integer)); } + [Test] + [IssueLink("https://github.com/npgsql/npgsql/issues/5110")] + public async Task Binary_copy_read_char_column() + { + await using var conn = await OpenConnectionAsync(); + var tableName = await CreateTempTable(conn, "id serial, value char"); + + await using var cmd = conn.CreateCommand(); + cmd.CommandText = $"INSERT INTO {tableName}(value) VALUES ('d'), ('s')"; + await cmd.ExecuteNonQueryAsync(); + + await using var export = await conn.BeginBinaryExportAsync($"COPY {tableName}(id, value) TO STDOUT (FORMAT BINARY)"); + while (await export.StartRowAsync() != -1) + { + var id = export.Read(); + var value = export.Read(); + } + } + #endregion #region Text diff --git a/test/Npgsql.Tests/ReaderTests.cs b/test/Npgsql.Tests/ReaderTests.cs index d106599b9d..1abd46909e 100644 --- a/test/Npgsql.Tests/ReaderTests.cs +++ b/test/Npgsql.Tests/ReaderTests.cs @@ -1231,6 +1231,24 @@ await pgMock Assert.ThrowsAsync(async () => await reader.DisposeAsync()); } + [Test] + public async Task Read_string_as_char() + { + await using var conn = await OpenConnectionAsync(); + + await using var cmd = conn.CreateCommand(); + cmd.CommandText = "SELECT 'abcdefgh', 'ijklmnop'"; + + await using var reader = await cmd.ExecuteReaderAsync(Behavior); + Assert.IsTrue(await reader.ReadAsync()); + Assert.That(reader.GetChar(0), Is.EqualTo('a')); + if (Behavior == CommandBehavior.SequentialAccess) + Assert.Throws(() => reader.GetChar(0)); + else + Assert.That(reader.GetChar(0), Is.EqualTo('a')); + Assert.That(reader.GetChar(1), Is.EqualTo('i')); + } + #region GetBytes / GetStream [Test] From f4b80231bb4a3f4fae93263b9270edf86422e3d6 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Wed, 21 Jun 2023 18:30:11 +0200 Subject: [PATCH 160/761] Improve text handling (#5116) * Improve GetChar and seek in column implementations * Improve text handler encoding use * Move to 512 chars on the stack --- src/Npgsql/Internal/NpgsqlWriteBuffer.cs | 4 +- .../Internal/TypeHandlers/TextHandler.cs | 39 +--- src/Npgsql/NpgsqlDataReader.cs | 80 +++---- src/Npgsql/NpgsqlNestedDataReader.cs | 15 +- src/Npgsql/Shims/EncodingExtensions.cs | 212 ++++++++++++++++++ src/Npgsql/ThrowHelper.cs | 8 +- 6 files changed, 274 insertions(+), 84 deletions(-) create mode 100644 src/Npgsql/Shims/EncodingExtensions.cs diff --git a/src/Npgsql/Internal/NpgsqlWriteBuffer.cs b/src/Npgsql/Internal/NpgsqlWriteBuffer.cs index 3b7d08d369..9dceb06511 100644 --- a/src/Npgsql/Internal/NpgsqlWriteBuffer.cs +++ b/src/Npgsql/Internal/NpgsqlWriteBuffer.cs @@ -424,13 +424,11 @@ internal void WriteChars(char[] chars, int offset, int len) WritePosition += TextEncoding.GetBytes(chars, offset, charCount, Buffer, WritePosition); } -#if !NETSTANDARD2_0 internal void WriteChars(ReadOnlySpan chars) { Debug.Assert(TextEncoding.GetByteCount(chars) <= WriteSpaceLeft); WritePosition += TextEncoding.GetBytes(chars, Buffer.AsSpan(WritePosition)); } -#endif public void WriteBytes(ReadOnlySpan buf) { @@ -627,4 +625,4 @@ internal byte[] GetContents() } #endregion -} \ No newline at end of file +} diff --git a/src/Npgsql/Internal/TypeHandlers/TextHandler.cs b/src/Npgsql/Internal/TypeHandlers/TextHandler.cs index 5becd7c691..a707c83efc 100644 --- a/src/Npgsql/Internal/TypeHandlers/TextHandler.cs +++ b/src/Npgsql/Internal/TypeHandlers/TextHandler.cs @@ -43,9 +43,9 @@ public override ValueTask Read(NpgsqlReadBuffer buf, int byteLen, bool a { return buf.ReadBytesLeft >= byteLen ? new ValueTask(buf.ReadString(byteLen)) - : ReadLong(buf, byteLen, async); + : ReadLong(_encoding, buf, byteLen, async); - static async ValueTask ReadLong(NpgsqlReadBuffer buf, int byteLen, bool async) + static async ValueTask ReadLong(Encoding encoding, NpgsqlReadBuffer buf, int byteLen, bool async) { if (byteLen <= buf.Size) { @@ -75,7 +75,7 @@ static async ValueTask ReadLong(NpgsqlReadBuffer buf, int byteLen, bool } break; } - return buf.TextEncoding.GetString(tempBuf, 0, byteLen); + return encoding.GetString(tempBuf, 0, byteLen); } finally { @@ -110,7 +110,7 @@ async ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, in } break; } - return buf.TextEncoding.GetChars(tempBuf, 0, byteLen); + return _encoding.GetChars(tempBuf, 0, byteLen); } finally { @@ -123,7 +123,7 @@ async ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int le // Make sure we have enough bytes in the buffer for a single character // We can get here a much bigger length in case it's a string // while we want to read only its first character - var maxBytes = Math.Min(buf.TextEncoding.GetMaxByteCount(1), len); + var maxBytes = Math.Min(_encoding.GetMaxByteCount(1), len); await buf.Ensure(maxBytes, async); var character = ReadCharCore(); @@ -136,20 +136,14 @@ async ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int le char ReadCharCore() { - var decoder = buf.TextEncoding.GetDecoder(); - -#if NETSTANDARD2_0 - var singleCharArray = new char[1]; - decoder.Convert(buf.Buffer, buf.ReadPosition, maxBytes, singleCharArray, 0, 1, true, out _, out var charsUsed, out _); -#else - Span singleCharArray = stackalloc char[1]; - decoder.Convert(buf.Buffer.AsSpan(buf.ReadPosition, maxBytes), singleCharArray, true, out _, out var charsUsed, out _); -#endif - - if (charsUsed < 1) + var charSpan = buf.Buffer.AsSpan(buf.ReadPosition, maxBytes); + var chars = _encoding.GetCharCount(charSpan); + if (chars < 1) throw new NpgsqlException("Could not read char - string was empty"); - return singleCharArray[0]; + Span destination = stackalloc char[chars]; + _encoding.GetChars(charSpan, destination); + return destination[0]; } } @@ -248,12 +242,7 @@ public virtual int ValidateAndGetLength(ArraySegment value, ref NpgsqlLeng /// public int ValidateAndGetLength(char value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) { -#if NETSTANDARD2_0 - var singleCharArray = new char[1]; -#else Span singleCharArray = stackalloc char[1]; -#endif - singleCharArray[0] = value; return _encoding.GetByteCount(singleCharArray); } @@ -300,15 +289,9 @@ public async Task Write(char value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? le static unsafe void WriteCharCore(char value, NpgsqlWriteBuffer buf) { -#if NETSTANDARD2_0 - var singleCharArray = new char[1]; - singleCharArray[0] = value; - buf.WriteChars(singleCharArray, 0, 1); -#else Span singleCharArray = stackalloc char[1]; singleCharArray[0] = value; buf.WriteChars(singleCharArray); -#endif } } diff --git a/src/Npgsql/NpgsqlDataReader.cs b/src/Npgsql/NpgsqlDataReader.cs index 3ddaa70e4f..8f8ee307b5 100644 --- a/src/Npgsql/NpgsqlDataReader.cs +++ b/src/Npgsql/NpgsqlDataReader.cs @@ -129,11 +129,6 @@ public sealed class NpgsqlDataReader : DbDataReader, IDbColumnSchemaGenerator /// NpgsqlReadBuffer.ColumnStream? _columnStream; - /// - /// Used for internal temporary purposes - /// - char[]? _tempCharBuf; - /// /// Used to keep track of every unique row this reader object ever traverses. /// This is used to detect whether nested DbDataReaders are still valid. @@ -1417,12 +1412,12 @@ public override int GetValues(object[] values) /// The actual number of bytes read. public override long GetBytes(int ordinal, long dataOffset, byte[]? buffer, int bufferOffset, int length) { - if (dataOffset < 0 || dataOffset > int.MaxValue) + if (dataOffset is < 0 or > int.MaxValue) throw new ArgumentOutOfRangeException(nameof(dataOffset), dataOffset, $"dataOffset must be between {0} and {int.MaxValue}"); if (buffer != null && (bufferOffset < 0 || bufferOffset >= buffer.Length + 1)) - throw new IndexOutOfRangeException($"bufferOffset must be between {0} and {(buffer.Length)}"); + throw new IndexOutOfRangeException($"bufferOffset must be between 0 and {buffer.Length}"); if (buffer != null && (length < 0 || length > buffer.Length - bufferOffset)) - throw new IndexOutOfRangeException($"length must be between {0} and {buffer.Length - bufferOffset}"); + throw new IndexOutOfRangeException($"length must be between 0 and {buffer.Length - bufferOffset}"); var field = CheckRowAndGetField(ordinal); var handler = field.Handler; @@ -1430,10 +1425,10 @@ public override long GetBytes(int ordinal, long dataOffset, byte[]? buffer, int throw new InvalidCastException("GetBytes() not supported for type " + field.Name); SeekToColumn(ordinal, false).GetAwaiter().GetResult(); - if (ColumnLen == -1) + if (ColumnLen is -1) ThrowHelper.ThrowInvalidCastException_NoValue(field); - if (buffer == null) + if (buffer is null) return ColumnLen; var dataOffset2 = (int)dataOffset; @@ -1513,12 +1508,12 @@ async ValueTask GetStreamInternal(FieldDescription field, int ordinal, b /// The actual number of characters read. public override long GetChars(int ordinal, long dataOffset, char[]? buffer, int bufferOffset, int length) { - if (dataOffset < 0 || dataOffset > int.MaxValue) - throw new ArgumentOutOfRangeException(nameof(dataOffset), dataOffset, $"dataOffset must be between {0} and {int.MaxValue}"); + if (dataOffset is < 0 or > int.MaxValue) + throw new ArgumentOutOfRangeException(nameof(dataOffset), dataOffset, $"dataOffset must be between 0 and {int.MaxValue}"); if (buffer != null && (bufferOffset < 0 || bufferOffset >= buffer.Length + 1)) - throw new IndexOutOfRangeException($"bufferOffset must be between {0} and {(buffer.Length)}"); + throw new IndexOutOfRangeException($"bufferOffset must be between 0 and {buffer.Length}"); if (buffer != null && (length < 0 || length > buffer.Length - bufferOffset)) - throw new IndexOutOfRangeException($"length must be between {0} and {buffer.Length - bufferOffset}"); + throw new IndexOutOfRangeException($"length must be between 0 and {buffer.Length - bufferOffset}"); var field = CheckRowAndGetField(ordinal); var handler = field.Handler as TextHandler; @@ -1570,31 +1565,30 @@ public override long GetChars(int ordinal, long dataOffset, char[]? buffer, int if (length == 0) return 0; - var (bytesRead, charsRead) = DecodeChars(decoder, buffer, bufferOffset, length, ColumnLen - PosInColumn); + var (bytesRead, charsRead) = DecodeChars(decoder, buffer.AsSpan(bufferOffset, length), ColumnLen - PosInColumn); PosInColumn += bytesRead; _charPos += charsRead; return charsRead; } - (int BytesRead, int CharsRead) DecodeChars(Decoder decoder, char[] output, int outputOffset, int charCount, int byteCount) + (int BytesRead, int CharsRead) DecodeChars(Decoder decoder, Span output, int byteCount) { var (bytesRead, charsRead) = (0, 0); + var outputLength = output.Length; while (true) { Buffer.Ensure(1); // Make sure we have at least some data - var maxBytes = Math.Min(byteCount - bytesRead, Buffer.ReadBytesLeft); - decoder.Convert(Buffer.Buffer, Buffer.ReadPosition, maxBytes, output, outputOffset, charCount - charsRead, false, - out var bytesUsed, out var charsUsed, out _); + var bytes = Buffer.Buffer.AsSpan(Buffer.ReadPosition, maxBytes); + decoder.Convert(bytes, output, false, out var bytesUsed, out var charsUsed, out _); Buffer.ReadPosition += bytesUsed; bytesRead += bytesUsed; charsRead += charsUsed; - if (charsRead == charCount || bytesRead == byteCount) + if (charsRead == outputLength || bytesRead == byteCount) break; - outputOffset += charsUsed; - Buffer.Clear(); + output = output.Slice(charsUsed); } return (bytesRead, charsRead); @@ -1602,13 +1596,11 @@ public override long GetChars(int ordinal, long dataOffset, char[]? buffer, int internal (int BytesSkipped, int CharsSkipped) SkipChars(Decoder decoder, int charCount, int byteCount) { - // TODO: Allocate on the stack with Span - if (_tempCharBuf == null) - _tempCharBuf = new char[1024]; + Span tempCharBuf = stackalloc char[512]; var (charsSkipped, bytesSkipped) = (0, 0); while (charsSkipped < charCount && bytesSkipped < byteCount) { - var (bytesRead, charsRead) = DecodeChars(decoder, _tempCharBuf, 0, Math.Min(charCount, _tempCharBuf.Length), byteCount); + var (bytesRead, charsRead) = DecodeChars(decoder, tempCharBuf.Slice(0, Math.Min(charCount, tempCharBuf.Length)), byteCount); bytesSkipped += bytesRead; charsSkipped += charsRead; } @@ -1649,7 +1641,7 @@ async ValueTask GetTextReader(int ordinal, bool async, CancellationT return handler.GetTextReader(stream, Buffer); } - throw new InvalidCastException($"The GetTextReader method is not supported for type {field.Handler.PgDisplayName}"); + throw new InvalidCastException($"The GetTextReader method is not supported for type {field.PostgresType.DisplayName}"); } #endregion @@ -1790,10 +1782,12 @@ public override object GetValue(int ordinal) { var fieldDescription = CheckRowAndGetField(ordinal); - if (_isSequential) { + if (_isSequential) + { SeekToColumnSequential(ordinal, false).GetAwaiter().GetResult(); CheckColumnStart(); - } else + } + else SeekToColumnNonSequential(ordinal); if (ColumnLen == -1) @@ -2105,7 +2099,7 @@ void SeekToColumnNonSequential(int column) for (var lastColumnRead = _columns.Count; column >= lastColumnRead; lastColumnRead++) { int lastColumnLen; - (Buffer.ReadPosition, lastColumnLen) = _columns[lastColumnRead-1]; + (Buffer.ReadPosition, lastColumnLen) = _columns[lastColumnRead - 1]; if (lastColumnLen != -1) Buffer.ReadPosition += lastColumnLen; var len = Buffer.ReadInt32(); @@ -2163,32 +2157,32 @@ async Task SeekToColumnSequential(int column, bool async, CancellationToken canc _column = column; } - Task SeekInColumn(int posInColumn, bool async, CancellationToken cancellationToken = default) + Task SeekInColumn(int dataOffset, bool async, CancellationToken cancellationToken = default) { if (_isSequential) - return SeekInColumnSequential(posInColumn, async); + return SeekInColumnSequential(dataOffset, async); - if (posInColumn > ColumnLen) - posInColumn = ColumnLen; + if (dataOffset >= ColumnLen) + ThrowHelper.ThrowArgumentOutOfRange_OutOfColumnBounds(nameof(dataOffset), ColumnLen); - Buffer.ReadPosition = _columns[_column].Offset + posInColumn; - PosInColumn = posInColumn; + Buffer.ReadPosition = _columns[_column].Offset + dataOffset; + PosInColumn = dataOffset; return Task.CompletedTask; - async Task SeekInColumnSequential(int posInColumn, bool async) + async Task SeekInColumnSequential(int dataOffset, bool async) { Debug.Assert(_column > -1); - if (posInColumn < PosInColumn) + if (dataOffset < PosInColumn) ThrowHelper.ThrowInvalidOperationException("Attempt to read a position in the column which has already been read"); - if (posInColumn > ColumnLen) - posInColumn = ColumnLen; + if (dataOffset >= ColumnLen) + ThrowHelper.ThrowArgumentOutOfRange_OutOfColumnBounds(nameof(dataOffset), ColumnLen); - if (posInColumn > PosInColumn) + if (dataOffset > PosInColumn) { - await Buffer.Skip(posInColumn - PosInColumn, async); - PosInColumn = posInColumn; + await Buffer.Skip(dataOffset - PosInColumn, async); + PosInColumn = dataOffset; } } } diff --git a/src/Npgsql/NpgsqlNestedDataReader.cs b/src/Npgsql/NpgsqlNestedDataReader.cs index bd10db458c..55234f5423 100644 --- a/src/Npgsql/NpgsqlNestedDataReader.cs +++ b/src/Npgsql/NpgsqlNestedDataReader.cs @@ -174,12 +174,12 @@ public override bool IsClosed /// public override long GetBytes(int ordinal, long dataOffset, byte[]? buffer, int bufferOffset, int length) { - if (dataOffset < 0 || dataOffset > int.MaxValue) - throw new ArgumentOutOfRangeException(nameof(dataOffset), dataOffset, $"dataOffset must be between {0} and {int.MaxValue}"); + if (dataOffset is < 0 or > int.MaxValue) + throw new ArgumentOutOfRangeException(nameof(dataOffset), dataOffset, $"dataOffset must be between 0 and {int.MaxValue}"); if (buffer != null && (bufferOffset < 0 || bufferOffset >= buffer.Length + 1)) - throw new IndexOutOfRangeException($"bufferOffset must be between {0} and {(buffer.Length)}"); + throw new IndexOutOfRangeException($"bufferOffset must be between 0 and {buffer.Length}"); if (buffer != null && (length < 0 || length > buffer.Length - bufferOffset)) - throw new IndexOutOfRangeException($"length must be between {0} and {buffer.Length - bufferOffset}"); + throw new IndexOutOfRangeException($"length must be between 0 and {buffer.Length - bufferOffset}"); var field = CheckRowAndColumnAndSeek(ordinal); var handler = field.Handler; @@ -190,9 +190,8 @@ public override long GetBytes(int ordinal, long dataOffset, byte[]? buffer, int throw new InvalidCastException("field is null"); var dataOffset2 = (int)dataOffset; - if (dataOffset2 > field.Length) - throw new ArgumentOutOfRangeException(nameof(dataOffset), - $"attempting to read out of bounds from the column data, dataOffset must be between {0} and {field.Length}"); + if (dataOffset2 >= field.Length) + ThrowHelper.ThrowArgumentOutOfRange_OutOfColumnBounds(nameof(dataOffset), field.Length); Buffer.ReadPosition += dataOffset2; @@ -483,4 +482,4 @@ enum ReaderState Closed, Disposed } -} \ No newline at end of file +} diff --git a/src/Npgsql/Shims/EncodingExtensions.cs b/src/Npgsql/Shims/EncodingExtensions.cs new file mode 100644 index 0000000000..792e225af7 --- /dev/null +++ b/src/Npgsql/Shims/EncodingExtensions.cs @@ -0,0 +1,212 @@ +using System.Buffers; +using System.Collections.Generic; +using System.Runtime.InteropServices; + +namespace System.Text; + +static class EncodingExtensions +{ +#if NETSTANDARD2_0 + public static unsafe int GetByteCount(this Encoding encoding, ReadOnlySpan chars) + { + fixed (char* charsPtr = chars) + { + return encoding.GetByteCount(charsPtr, chars.Length); + } + } + + public static unsafe int GetBytes(this Encoding encoding, ReadOnlySpan chars, Span bytes) + { + fixed (char* charsPtr = chars) + fixed (byte* bytesPtr = bytes) + { + return encoding.GetBytes(charsPtr, chars.Length, bytesPtr, bytes.Length); + } + } + + public static unsafe int GetCharCount(this Encoding encoding, ReadOnlySpan bytes) + { + fixed (byte* bytesPtr = bytes) + { + return encoding.GetCharCount(bytesPtr, bytes.Length); + } + } + + public static unsafe int GetCharCount(this Decoder encoding, ReadOnlySpan bytes, bool flush) + { + fixed (byte* bytesPtr = bytes) + { + return encoding.GetCharCount(bytesPtr, bytes.Length, flush); + } + } + + public static unsafe int GetChars(this Decoder encoding, ReadOnlySpan bytes, Span chars, bool flush) + { + fixed (byte* bytesPtr = bytes) + fixed (char* charsPtr = chars) + { + return encoding.GetChars(bytesPtr, bytes.Length, charsPtr, chars.Length, flush); + } + } + + public static unsafe int GetChars(this Encoding encoding, ReadOnlySpan bytes, Span chars) + { + fixed (byte* bytesPtr = bytes) + fixed (char* charsPtr = chars) + { + return encoding.GetChars(bytesPtr, bytes.Length, charsPtr, chars.Length); + } + } + + public static unsafe void Convert(this Encoder encoder, ReadOnlySpan chars, Span bytes, bool flush, out int charsUsed, out int bytesUsed, out bool completed) + { + fixed (char* charsPtr = chars) + fixed (byte* bytesPtr = bytes) + { + encoder.Convert(charsPtr, chars.Length, bytesPtr, bytes.Length, flush, out charsUsed, out bytesUsed, out completed); + } + } + + public static unsafe void Convert(this Decoder encoder, ReadOnlySpan bytes, Span chars, bool flush, out int bytesUsed, out int charsUsed, out bool completed) + { + fixed (byte* bytesPtr = bytes) + fixed (char* charsPtr = chars) + { + encoder.Convert(bytesPtr, bytes.Length, charsPtr, chars.Length, flush, out bytesUsed, out charsUsed, out completed); + } + } +#endif + +#if NETSTANDARD + /// + /// Decodes the specified to s using the specified + /// and outputs the result to . + /// + /// The which represents how the data in is encoded. + /// The to decode to characters. + /// The destination buffer to which the decoded characters will be written. + /// The number of chars written to . + /// Thrown if is not large enough to contain the encoded form of . + /// Thrown if contains data that cannot be decoded and is configured + /// to throw an exception when such data is seen. + public static int GetChars(this Encoding encoding, in ReadOnlySequence bytes, Span chars) + { + if (encoding is null) + throw new ArgumentNullException(nameof(encoding)); + + if (bytes.IsSingleSegment) + { + // If the incoming sequence is single-segment, one-shot this. + + return encoding.GetChars(bytes.First.Span, chars); + } + else + { + // If the incoming sequence is multi-segment, create a stateful Decoder + // and use it as the workhorse. On the final iteration we'll pass flush=true. + + ReadOnlySequence remainingBytes = bytes; + int originalCharsLength = chars.Length; + Decoder decoder = encoding.GetDecoder(); + bool isFinalSegment; + + do + { + var firstSpan = remainingBytes.First.Span; + var next = remainingBytes.GetPosition(firstSpan.Length); + isFinalSegment = remainingBytes.IsSingleSegment; + + int charsWrittenJustNow = decoder.GetChars(firstSpan, chars, flush: isFinalSegment); + chars = chars.Slice(charsWrittenJustNow); + remainingBytes = remainingBytes.Slice(next); + } while (!isFinalSegment); + + return originalCharsLength - chars.Length; // total number of chars we wrote + } + } + + public static string GetString(this Encoding encoding, in ReadOnlySequence bytes) + { + if (encoding is null) + throw new ArgumentNullException(nameof(encoding)); + + // If the incoming sequence is single-segment, one-shot this. + if (bytes.IsSingleSegment) + { +#if NETSTANDARD2_1 + return encoding.GetString(bytes.First.Span); +#else + var rented = false; + byte[] arr; + var memory = bytes.First; + if (MemoryMarshal.TryGetArray(memory, out var segment)) + arr = segment.Array!; + else + { + rented = true; + arr = ArrayPool.Shared.Rent(memory.Length); + bytes.First.Span.CopyTo(arr); + } + var ret = encoding.GetString(arr, 0, memory.Length); + if (rented) + ArrayPool.Shared.Return(arr); + return ret; +#endif + } + + // If the incoming sequence is multi-segment, create a stateful Decoder + // and use it as the workhorse. On the final iteration we'll pass flush=true. + + var decoder = encoding.GetDecoder(); + + // Maintain a list of all the segments we'll need to concat together. + // These will be released back to the pool at the end of the method. + + var listOfSegments = new List<(char[], int)>(); + var totalCharCount = 0; + + var remainingBytes = bytes; + bool isFinalSegment; + + do + { + var firstSpan = remainingBytes.First.Span; + var next = remainingBytes.GetPosition(firstSpan.Length); + isFinalSegment = remainingBytes.IsSingleSegment; + + var charCountThisIteration = decoder.GetCharCount(firstSpan, flush: isFinalSegment); // could throw ArgumentException if overflow would occur + var rentedArray = ArrayPool.Shared.Rent(charCountThisIteration); + var actualCharsWrittenThisIteration = decoder.GetChars(firstSpan, rentedArray, flush: isFinalSegment); + listOfSegments.Add((rentedArray, actualCharsWrittenThisIteration)); + + totalCharCount += actualCharsWrittenThisIteration; + if (totalCharCount < 0) + { + // If we overflowed, call string.Create, passing int.MaxValue. + // This will end up throwing the expected OutOfMemoryException + // since strings are limited to under int.MaxValue elements in length. + + totalCharCount = int.MaxValue; + break; + } + + remainingBytes = remainingBytes.Slice(next); + } while (!isFinalSegment); + + // Now build up the string to return, then release all of our scratch buffers + // back to the shared pool. + var chars = ArrayPool.Shared.Rent(totalCharCount); + var span = chars.AsSpan(); + foreach (var (array, length) in listOfSegments) + { + array.AsSpan(0, length).CopyTo(span); + ArrayPool.Shared.Return(array); + span = span.Slice(length); + } + + var str = new string(chars); + ArrayPool.Shared.Return(chars); + return str; + } +#endif +} diff --git a/src/Npgsql/ThrowHelper.cs b/src/Npgsql/ThrowHelper.cs index a0d33f0050..57eaa5cc42 100644 --- a/src/Npgsql/ThrowHelper.cs +++ b/src/Npgsql/ThrowHelper.cs @@ -52,6 +52,10 @@ internal static void ThrowInvalidCastException(string message, object argument) internal static void ThrowInvalidCastException_NoValue(FieldDescription field) => throw new InvalidCastException($"Column '{field.Name}' is null."); + [DoesNotReturn] + internal static void ThrowArgumentOutOfRange_OutOfColumnBounds(string paramName, int columnLength) => + throw new ArgumentOutOfRangeException(paramName, $"The value is out of bounds from the column data, dataOffset must be between 0 and {columnLength}"); + [DoesNotReturn] internal static void ThrowInvalidOperationException_NoPropertyGetter(Type type, MemberInfo property) => throw new InvalidOperationException($"Composite type '{type}' cannot be written because the '{property}' property has no getter."); @@ -75,7 +79,7 @@ internal static void ThrowNpgsqlException(string message, Exception? innerExcept [DoesNotReturn] internal static void ThrowNpgsqlOperationInProgressException(NpgsqlCommand command) => throw new NpgsqlOperationInProgressException(command); - + [DoesNotReturn] internal static void ThrowNpgsqlOperationInProgressException(ConnectorState state) => throw new NpgsqlOperationInProgressException(state); @@ -103,4 +107,4 @@ internal static void ThrowNotSupportedException(string message) [DoesNotReturn] internal static void ThrowNpgsqlExceptionWithInnerTimeoutException(string message) => throw new NpgsqlException(message, new TimeoutException()); -} \ No newline at end of file +} From e496efa60710de55508c9687ca0453b8840b3a56 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Thu, 22 Jun 2023 10:25:15 +0200 Subject: [PATCH 161/761] Stop eagerly instantiating JSON type handlers (#5122) --- .../TypeMapping/BuiltInTypeHandlerResolver.cs | 5 ++- .../SystemTextJsonTypeHandlerResolver.cs | 36 ++++++++------- test/Npgsql.Tests/Types/JsonTests.cs | 44 +++++++++---------- 3 files changed, 46 insertions(+), 39 deletions(-) diff --git a/src/Npgsql/TypeMapping/BuiltInTypeHandlerResolver.cs b/src/Npgsql/TypeMapping/BuiltInTypeHandlerResolver.cs index a7370c1fec..fcdbb626d1 100644 --- a/src/Npgsql/TypeMapping/BuiltInTypeHandlerResolver.cs +++ b/src/Npgsql/TypeMapping/BuiltInTypeHandlerResolver.cs @@ -46,7 +46,10 @@ sealed class BuiltInTypeHandlerResolver : TypeHandlerResolver TextHandler? _nameHandler; TextHandler? _refcursorHandler; TextHandler? _citextHandler; - JsonTextHandler? _jsonbHandler; // Note that old version of PG (and Redshift) don't have jsonb + + // Note that old versions of PG - as well as some PG-like databases (Redshift, CockroachDB) don't have json/jsonb, so we create + // these handlers lazily rather than eagerly. + JsonTextHandler? _jsonbHandler; JsonTextHandler? _jsonHandler; JsonPathHandler? _jsonPathHandler; diff --git a/src/Npgsql/TypeMapping/SystemTextJsonTypeHandlerResolver.cs b/src/Npgsql/TypeMapping/SystemTextJsonTypeHandlerResolver.cs index 051d8f546e..a60f53d9c5 100644 --- a/src/Npgsql/TypeMapping/SystemTextJsonTypeHandlerResolver.cs +++ b/src/Npgsql/TypeMapping/SystemTextJsonTypeHandlerResolver.cs @@ -11,46 +11,50 @@ namespace Npgsql.TypeMapping; sealed class SystemTextJsonTypeHandlerResolver : TypeHandlerResolver { + readonly NpgsqlConnector _connector; readonly NpgsqlDatabaseInfo _databaseInfo; - readonly SystemTextJsonHandler? _jsonbHandler; // Note that old version of PG (and Redshift) don't have jsonb - readonly SystemTextJsonHandler? _jsonHandler; + readonly JsonSerializerOptions _serializerOptions; readonly Dictionary? _userClrTypes; + // Note that old versions of PG - as well as some PG-like databases (Redshift, CockroachDB) don't have json/jsonb, so we create + // these handlers lazily rather than eagerly. + SystemTextJsonHandler? _jsonbHandler; + SystemTextJsonHandler? _jsonHandler; + internal SystemTextJsonTypeHandlerResolver( NpgsqlConnector connector, Dictionary? userClrTypes, JsonSerializerOptions serializerOptions) { + _connector = connector; _databaseInfo = connector.DatabaseInfo; - - _jsonbHandler = new SystemTextJsonHandler(PgType("jsonb"), connector.TextEncoding, isJsonb: true, serializerOptions); - _jsonHandler = new SystemTextJsonHandler(PgType("json"), connector.TextEncoding, isJsonb: false, serializerOptions); - + _serializerOptions = serializerOptions; _userClrTypes = userClrTypes; } public override NpgsqlTypeHandler? ResolveByDataTypeName(string typeName) => typeName switch { - "jsonb" => _jsonbHandler, - "json" => _jsonHandler, + "jsonb" => JsonbHandler(), + "json" => JsonHandler(), _ => null }; public override NpgsqlTypeHandler? ResolveByClrType(Type type) - => SystemTextJsonTypeMappingResolver.ClrTypeToDataTypeName(type, _userClrTypes) is { } dataTypeName && ResolveByDataTypeName(dataTypeName) is { } handler + => SystemTextJsonTypeMappingResolver.ClrTypeToDataTypeName(type, _userClrTypes) is { } dataTypeName && + ResolveByDataTypeName(dataTypeName) is { } handler ? handler : null; public override NpgsqlTypeHandler? ResolveValueTypeGenerically(T value) - { - if (typeof(T) == typeof(JsonDocument)) - return _jsonbHandler; - if (typeof(T) == typeof(JsonObject) || typeof(T) == typeof(JsonArray)) - return _jsonbHandler; + => typeof(T) == typeof(JsonDocument) || typeof(T) == typeof(JsonObject) || typeof(T) == typeof(JsonArray) + ? JsonbHandler() + : null; - return null; - } + NpgsqlTypeHandler JsonbHandler() + => _jsonbHandler ??= new SystemTextJsonHandler(PgType("jsonb"), _connector.TextEncoding, isJsonb: true, _serializerOptions); + NpgsqlTypeHandler JsonHandler() + => _jsonHandler ??= new SystemTextJsonHandler(PgType("json"), _connector.TextEncoding, isJsonb: false, _serializerOptions); PostgresType PgType(string pgTypeName) => _databaseInfo.GetPostgresTypeByName(pgTypeName); } diff --git a/test/Npgsql.Tests/Types/JsonTests.cs b/test/Npgsql.Tests/Types/JsonTests.cs index 353cb01792..2323430773 100644 --- a/test/Npgsql.Tests/Types/JsonTests.cs +++ b/test/Npgsql.Tests/Types/JsonTests.cs @@ -16,7 +16,7 @@ public class JsonTests : MultiplexingTestBase { [Test] public async Task As_string() - => await AssertType(@"{""K"": ""V""}", @"{""K"": ""V""}", PostgresType, NpgsqlDbType, isDefaultForWriting: false); + => await AssertType("""{"K": "V"}""", """{"K": "V"}""", PostgresType, NpgsqlDbType, isDefaultForWriting: false); [Test] public async Task As_string_long() @@ -36,7 +36,7 @@ public async Task As_string_long() public async Task As_string_with_GetTextReader() { await using var conn = await OpenConnectionAsync(); - await using var cmd = new NpgsqlCommand($@"SELECT '{{""K"": ""V""}}'::{PostgresType}", conn); + await using var cmd = new NpgsqlCommand($$"""SELECT '{"K": "V"}'::{{PostgresType}}""", conn); await using var reader = await cmd.ExecuteReaderAsync(); reader.Read(); using var textReader = await reader.GetTextReaderAsync(0); @@ -45,27 +45,27 @@ public async Task As_string_with_GetTextReader() [Test] public async Task As_char_array() - => await AssertType(@"{""K"": ""V""}".ToCharArray(), @"{""K"": ""V""}", PostgresType, NpgsqlDbType, isDefault: false); + => await AssertType("""{"K": "V"}""".ToCharArray(), """{"K": "V"}""", PostgresType, NpgsqlDbType, isDefault: false); [Test] public async Task As_bytes() - => await AssertType(@"{""K"": ""V""}"u8.ToArray(), @"{""K"": ""V""}", PostgresType, NpgsqlDbType, isDefault: false); + => await AssertType("""{"K": "V"}"""u8.ToArray(), """{"K": "V"}""", PostgresType, NpgsqlDbType, isDefault: false); [Test] public async Task Write_as_ReadOnlyMemory_of_byte() - => await AssertTypeWrite(new ReadOnlyMemory(@"{""K"": ""V""}"u8.ToArray()), @"{""K"": ""V""}", PostgresType, NpgsqlDbType, + => await AssertTypeWrite(new ReadOnlyMemory("""{"K": "V"}"""u8.ToArray()), """{"K": "V"}""", PostgresType, NpgsqlDbType, isDefault: false); [Test] public async Task Write_as_ArraySegment_of_char() - => await AssertTypeWrite(new ArraySegment(@"{""K"": ""V""}".ToCharArray()), @"{""K"": ""V""}", PostgresType, NpgsqlDbType, + => await AssertTypeWrite(new ArraySegment("""{"K": "V"}""".ToCharArray()), """{"K": "V"}""", PostgresType, NpgsqlDbType, isDefault: false); [Test] public async Task As_JsonDocument() => await AssertType( - JsonDocument.Parse(@"{""K"": ""V""}"), - IsJsonb ? @"{""K"": ""V""}" : @"{""K"":""V""}", + JsonDocument.Parse("""{"K": "V"}"""), + IsJsonb ? """{"K": "V"}""" : """{"K":"V"}""", PostgresType, NpgsqlDbType, isDefault: false, @@ -77,8 +77,8 @@ public async Task As_JsonDocument_supported_only_with_SystemTextJson() await using var slimDataSource = new NpgsqlSlimDataSourceBuilder(ConnectionString).Build(); await AssertTypeUnsupported( - JsonDocument.Parse(@"{""K"": ""V""}"), - @"{""K"": ""V""}", + JsonDocument.Parse("""{"K": "V"}"""), + """{"K": "V"}""", PostgresType, slimDataSource); } @@ -88,7 +88,7 @@ await AssertTypeUnsupported( public Task Roundtrip_JsonObject() => AssertType( new JsonObject { ["Bar"] = 8 }, - IsJsonb ? @"{""Bar"": 8}" : @"{""Bar"":8}", + IsJsonb ? """{"Bar": 8}""" : """{"Bar":8}""", PostgresType, NpgsqlDbType, // By default we map JsonObject to jsonb @@ -121,8 +121,8 @@ public async Task As_poco() TemperatureC = 10 }, IsJsonb - ? @"{""Date"": ""2019-09-01T00:00:00"", ""Summary"": ""Partly cloudy"", ""TemperatureC"": 10}" - : @"{""Date"":""2019-09-01T00:00:00"",""TemperatureC"":10,""Summary"":""Partly cloudy""}", + ? """{"Date": "2019-09-01T00:00:00", "Summary": "Partly cloudy", "TemperatureC": 10}""" + : """{"Date":"2019-09-01T00:00:00","TemperatureC":10,"Summary":"Partly cloudy"}""", PostgresType, NpgsqlDbType, isDefault: false); @@ -142,8 +142,8 @@ await AssertType( }, // Warning: in theory jsonb order and whitespace may change across versions IsJsonb - ? @"{""Date"": ""2019-09-01T00:00:00"", ""Summary"": """ + bigString + @""", ""TemperatureC"": 10}" - : @"{""Date"":""2019-09-01T00:00:00"",""TemperatureC"":10,""Summary"":""" + bigString + @"""}", + ? $$"""{"Date": "2019-09-01T00:00:00", "Summary": "{{bigString}}", "TemperatureC": 10}""" + : $$"""{"Date":"2019-09-01T00:00:00","TemperatureC":10,"Summary":"{{bigString}}"}""", PostgresType, NpgsqlDbType, isDefault: false); @@ -161,7 +161,7 @@ await AssertTypeUnsupported( Summary = "Partly cloudy", TemperatureC = 10 }, - @"{""Date"": ""2019-09-01T00:00:00"", ""Summary"": ""Partly cloudy"", ""TemperatureC"": 10}", + """{"Date": "2019-09-01T00:00:00", "Summary": "Partly cloudy", "TemperatureC": 10}""", PostgresType, slimDataSource); } @@ -182,14 +182,14 @@ public async Task Can_read_two_json_documents() await using var conn = await OpenConnectionAsync(); JsonDocument car; - await using (var cmd = new NpgsqlCommand(@"SELECT '{""key"" : ""foo""}'::jsonb", conn)) + await using (var cmd = new NpgsqlCommand("""SELECT '{"key" : "foo"}'::jsonb""", conn)) await using (var reader = await cmd.ExecuteReaderAsync()) { reader.Read(); car = reader.GetFieldValue(0); } - await using (var cmd = new NpgsqlCommand(@"SELECT '{""key"" : ""bar""}'::jsonb", conn)) + await using (var cmd = new NpgsqlCommand("""SELECT '{"key" : "bar"}'::jsonb""", conn)) await using (var reader = await cmd.ExecuteReaderAsync()) { reader.Read(); @@ -249,8 +249,8 @@ await AssertTypeWrite( TemperatureC = 10 }, IsJsonb - ? @"{""date"": ""2019-09-01T00:00:00"", ""summary"": ""Partly cloudy"", ""temperatureC"": 10}" - : @"{""date"":""2019-09-01T00:00:00"",""temperatureC"":10,""summary"":""Partly cloudy""}", + ? """{"date": "2019-09-01T00:00:00", "summary": "Partly cloudy", "temperatureC": 10}""" + : """{"date":"2019-09-01T00:00:00","temperatureC":10,"summary":"Partly cloudy"}""", PostgresType, NpgsqlDbType, isDefault: false); @@ -275,8 +275,8 @@ await AssertTypeWrite( TemperatureC = 10 }, IsJsonb - ? @"{""Date"": ""2019-09-01T00:00:00"", ""Summary"": ""Partly cloudy"", ""TemperatureC"": 10}" - : @"{""Date"":""2019-09-01T00:00:00"",""TemperatureC"":10,""Summary"":""Partly cloudy""}", + ? """{"Date": "2019-09-01T00:00:00", "Summary": "Partly cloudy", "TemperatureC": 10}""" + : """{"Date":"2019-09-01T00:00:00","TemperatureC":10,"Summary":"Partly cloudy"}""", PostgresType, NpgsqlDbType, isNpgsqlDbTypeInferredFromClrType: false); From f7364c84cf98fd0c15aa2a446c6750c7616135be Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 28 Jun 2023 10:25:15 +0200 Subject: [PATCH 162/761] Bump OpenTelemetry.API from 1.5.0 to 1.5.1 (#5133) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index a93b905e3a..223c936424 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -4,7 +4,7 @@ - + From 70e9b5499417a9db10c80a2872c09e6c9cee2aea Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 28 Jun 2023 10:25:57 +0200 Subject: [PATCH 163/761] Bump Microsoft.NET.Test.Sdk from 17.6.2 to 17.6.3 (#5132) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 223c936424..f06e33994a 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -23,7 +23,7 @@ - + From 9b03f3b6009f4f141b8f5e0cb9e0cee489f9cc93 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Tue, 4 Jul 2023 14:49:34 +0300 Subject: [PATCH 164/761] Stop running ci tests against pg 10 (#5140) Since it's not supported anymore --- .github/workflows/build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index aa273ec435..3a7e11ab06 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -30,7 +30,7 @@ jobs: fail-fast: false matrix: os: [ubuntu-22.04, windows-2022] - pg_major: [15, 14, 13, 12, 11, 10] + pg_major: [15, 14, 13, 12, 11] config: [Release] test_tfm: [net8.0] include: From c36dc1f1f56ad8aa8e05e3297ab02b2417cd6196 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 8 Jul 2023 19:24:09 +0200 Subject: [PATCH 165/761] Bump xunit from 2.4.2 to 2.5.0 (#5148) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index f06e33994a..80d6965d1e 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -25,7 +25,7 @@ - + From 3b417f472c717925e4b61e6bba44c480bb47cb61 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 11 Jul 2023 23:49:31 +0200 Subject: [PATCH 166/761] Bump BenchmarkDotNet from 0.13.5 to 0.13.6 (#5152) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 80d6965d1e..286653a6c3 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -32,7 +32,7 @@ - + From c07ca07954875412b131e9a1d82321814d885e9b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 12 Jul 2023 23:45:30 +0200 Subject: [PATCH 167/761] Bump BenchmarkDotNet.Diagnostics.Windows from 0.13.5 to 0.13.6 (#5153) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 286653a6c3..839d1a87e0 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -34,7 +34,7 @@ - + From 4e2c2b8d593b2fc6943864ce65073afded53adbc Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Fri, 21 Jul 2023 15:23:51 +0300 Subject: [PATCH 168/761] Add a test for case when we cancel a long-running query while reading rows (#5170) --- test/Npgsql.Tests/CommandTests.cs | 36 +++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/test/Npgsql.Tests/CommandTests.cs b/test/Npgsql.Tests/CommandTests.cs index aa31c2cabd..554080f16a 100644 --- a/test/Npgsql.Tests/CommandTests.cs +++ b/test/Npgsql.Tests/CommandTests.cs @@ -1476,6 +1476,42 @@ await server await queryTask; } + [Test] + public async Task Cancel_while_reading_from_long_running_query() + { + if (IsMultiplexing) + return; + + await using var conn = await OpenConnectionAsync(); + + await using var cmd = conn.CreateCommand(); + cmd.CommandText = """ +SELECT *, CASE WHEN "t"."i" = 50000 THEN pg_sleep(100) ELSE NULL END +FROM +( + SELECT generate_series(1, 1000000) AS "i" +) AS "t" +"""; + + using (var cts = new CancellationTokenSource()) + await using (var reader = await cmd.ExecuteReaderAsync(cts.Token)) + { + Assert.ThrowsAsync(async () => + { + var i = 0; + while (await reader.ReadAsync(cts.Token)) + { + i++; + if (i == 10) + cts.Cancel(); + } + }); + } + + cmd.CommandText = "SELECT 42"; + Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo(42)); + } + #region Logging [Test] From 83aecda6b82ce9f0ca4b80773da883654b9e5db8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Y=C3=BCcel=20K=C4=B1van=C3=A7?= Date: Sat, 22 Jul 2023 20:42:07 +0300 Subject: [PATCH 169/761] Change NpgsqlSnakeCaseNameTranslator to use InvariantCulture by default, accept culture parameter (#5172) Closes #5169 --- .../NpgsqlSnakeCaseNameTranslator.cs | 38 ++++++-- src/Npgsql/PublicAPI.Shipped.txt | 6 +- .../SnakeCaseNameTranslatorTests.cs | 89 ++++++++++++------- 3 files changed, 88 insertions(+), 45 deletions(-) diff --git a/src/Npgsql/NameTranslation/NpgsqlSnakeCaseNameTranslator.cs b/src/Npgsql/NameTranslation/NpgsqlSnakeCaseNameTranslator.cs index a93661d9e1..cdb9bb40a8 100644 --- a/src/Npgsql/NameTranslation/NpgsqlSnakeCaseNameTranslator.cs +++ b/src/Npgsql/NameTranslation/NpgsqlSnakeCaseNameTranslator.cs @@ -11,11 +11,18 @@ namespace Npgsql.NameTranslation; /// public sealed class NpgsqlSnakeCaseNameTranslator : INpgsqlNameTranslator { + readonly CultureInfo _culture; + /// /// Creates a new . /// - public NpgsqlSnakeCaseNameTranslator() - : this(false) { } + /// + /// An object that supplies culture-specific casing rules. + /// This will be used when converting names to lower case. + /// If then will be used. + /// + public NpgsqlSnakeCaseNameTranslator(CultureInfo? culture = null) + : this(false, culture) { } /// /// Creates a new . @@ -23,8 +30,16 @@ public NpgsqlSnakeCaseNameTranslator() /// /// Uses the legacy naming convention if , otherwise it uses the new naming convention. /// - public NpgsqlSnakeCaseNameTranslator(bool legacyMode) - => LegacyMode = legacyMode; + /// + /// An object that supplies culture-specific casing rules. + /// This will be used when converting names to lower case. + /// If then will be used. + /// + public NpgsqlSnakeCaseNameTranslator(bool legacyMode, CultureInfo? culture = null) + { + LegacyMode = legacyMode; + _culture = culture ?? CultureInfo.InvariantCulture; + } bool LegacyMode { get; } @@ -42,15 +57,20 @@ public string TranslateMemberName(string clrName) throw new ArgumentNullException(nameof(clrName)); return LegacyMode - ? string.Concat(clrName.Select((c, i) => i > 0 && char.IsUpper(c) ? "_" + c.ToString() : c.ToString())).ToLower() - : ConvertToSnakeCase(clrName); + ? string.Concat(clrName.Select((c, i) => i > 0 && char.IsUpper(c) ? "_" + c.ToString() : c.ToString())).ToLower(_culture) + : ConvertToSnakeCase(clrName, _culture); } /// /// Converts a string to its snake_case equivalent. /// /// The value to convert. - public static string ConvertToSnakeCase(string name) + /// + /// An object that supplies culture-specific casing rules. + /// This will be used when converting names to lower case. + /// If then will be used. + /// + public static string ConvertToSnakeCase(string name, CultureInfo culture) { if (string.IsNullOrEmpty(name)) return name; @@ -84,7 +104,7 @@ public static string ConvertToSnakeCase(string name) builder.Append('_'); } - currentChar = char.ToLower(currentChar); + currentChar = char.ToLower(currentChar, culture); break; case UnicodeCategory.LowercaseLetter: @@ -105,4 +125,4 @@ public static string ConvertToSnakeCase(string name) return builder.ToString(); } -} \ No newline at end of file +} diff --git a/src/Npgsql/PublicAPI.Shipped.txt b/src/Npgsql/PublicAPI.Shipped.txt index 87a3c12c5d..a3ebbabc6b 100644 --- a/src/Npgsql/PublicAPI.Shipped.txt +++ b/src/Npgsql/PublicAPI.Shipped.txt @@ -256,8 +256,8 @@ Npgsql.NameTranslation.NpgsqlNullNameTranslator.NpgsqlNullNameTranslator() -> vo Npgsql.NameTranslation.NpgsqlNullNameTranslator.TranslateMemberName(string! clrName) -> string! Npgsql.NameTranslation.NpgsqlNullNameTranslator.TranslateTypeName(string! clrName) -> string! Npgsql.NameTranslation.NpgsqlSnakeCaseNameTranslator -Npgsql.NameTranslation.NpgsqlSnakeCaseNameTranslator.NpgsqlSnakeCaseNameTranslator() -> void -Npgsql.NameTranslation.NpgsqlSnakeCaseNameTranslator.NpgsqlSnakeCaseNameTranslator(bool legacyMode) -> void +Npgsql.NameTranslation.NpgsqlSnakeCaseNameTranslator.NpgsqlSnakeCaseNameTranslator(System.Globalization.CultureInfo? culture = null) -> void +Npgsql.NameTranslation.NpgsqlSnakeCaseNameTranslator.NpgsqlSnakeCaseNameTranslator(bool legacyMode, System.Globalization.CultureInfo? culture = null) -> void Npgsql.NameTranslation.NpgsqlSnakeCaseNameTranslator.TranslateMemberName(string! clrName) -> string! Npgsql.NameTranslation.NpgsqlSnakeCaseNameTranslator.TranslateTypeName(string! clrName) -> string! Npgsql.NoticeEventHandler @@ -1837,7 +1837,7 @@ override sealed Npgsql.NpgsqlParameter.SourceColumnNullMapping.get -> bool override sealed Npgsql.NpgsqlParameter.SourceColumnNullMapping.set -> void override sealed Npgsql.NpgsqlParameter.SourceVersion.get -> System.Data.DataRowVersion override sealed Npgsql.NpgsqlParameter.SourceVersion.set -> void -static Npgsql.NameTranslation.NpgsqlSnakeCaseNameTranslator.ConvertToSnakeCase(string! name) -> string! +static Npgsql.NameTranslation.NpgsqlSnakeCaseNameTranslator.ConvertToSnakeCase(string! name, System.Globalization.CultureInfo! culture) -> string! static Npgsql.NpgsqlCommandBuilder.DeriveParameters(Npgsql.NpgsqlCommand! command) -> void static Npgsql.NpgsqlConnection.ClearAllPools() -> void static Npgsql.NpgsqlConnection.ClearPool(Npgsql.NpgsqlConnection! connection) -> void diff --git a/test/Npgsql.Tests/SnakeCaseNameTranslatorTests.cs b/test/Npgsql.Tests/SnakeCaseNameTranslatorTests.cs index c61d75c628..52de32bccf 100644 --- a/test/Npgsql.Tests/SnakeCaseNameTranslatorTests.cs +++ b/test/Npgsql.Tests/SnakeCaseNameTranslatorTests.cs @@ -1,4 +1,5 @@ using System.Collections.Generic; +using System.Globalization; using System.Linq; using Npgsql.NameTranslation; using NUnit.Framework; @@ -7,45 +8,67 @@ namespace Npgsql.Tests; public class SnakeCaseNameTranslatorTests { + static readonly CultureInfo trTRCulture = new("tr-TR"); + static readonly CultureInfo enUSCulture = new("en-US"); + [Test, TestCaseSource(typeof(SnakeCaseNameTranslatorTests), nameof(TestCases))] - public string TranslateTypeName(string value, bool legacyMode) - => new NpgsqlSnakeCaseNameTranslator(legacyMode).TranslateTypeName(value); + public string TranslateTypeName(CultureInfo? culture, string value, bool legacyMode) + => new NpgsqlSnakeCaseNameTranslator(legacyMode, culture).TranslateTypeName(value); [Test, TestCaseSource(typeof(SnakeCaseNameTranslatorTests), nameof(TestCases))] - public string TranslateMemberName(string value, bool legacyMode) - => new NpgsqlSnakeCaseNameTranslator(legacyMode).TranslateMemberName(value); + public string TranslateMemberName(CultureInfo? culture, string value, bool legacyMode) + => new NpgsqlSnakeCaseNameTranslator(legacyMode, culture).TranslateMemberName(value); - static IEnumerable TestCases => new (string value, string legacyResult, string result)[] + static IEnumerable TestCases => new (CultureInfo? culture, string value, string legacyResult, string result)[] { - ("Hi!! This is text. Time to test.", "hi!! _this is text. _time to test.", "hi_this_is_text_time_to_test"), - ("9999-12-31T23:59:59.9999999Z", "9999-12-31_t23:59:59.9999999_z", "9999_12_31t23_59_59_9999999z"), - ("FK_post_simple_blog_BlogId", "f_k_post_simple_blog__blog_id", "fk_post_simple_blog_blog_id"), - ("already_snake_case_ ", "already_snake_case_ ", "already_snake_case_"), - ("SHOUTING_CASE", "s_h_o_u_t_i_n_g__c_a_s_e", "shouting_case"), - ("IsJSONProperty", "is_j_s_o_n_property", "is_json_property"), - ("SnA__ kEcAsE", "sn_a__ k_ec_as_e", "sn_a__k_ec_as_e"), - ("SnA__kEcAsE", "sn_a__k_ec_as_e", "sn_a__k_ec_as_e"), - ("SnAkEcAsE", "sn_ak_ec_as_e", "sn_ak_ec_as_e"), - ("URLValue", "u_r_l_value", "url_value"), - ("Xml2Json", "xml2_json", "xml2json"), - (" IPhone ", " _i_phone ", "i_phone"), - ("I Phone", "i _phone", "i_phone"), - (" IPhone", " _i_phone", "i_phone"), - ("I Phone", "i _phone", "i_phone"), - ("IPhone", "i_phone", "i_phone"), - ("iPhone", "i_phone", "i_phone"), - ("IsCIA", "is_c_i_a", "is_cia"), - ("Person", "person", "person"), - ("ABC123", "a_b_c123", "abc123"), - ("VmQ", "vm_q", "vm_q"), - ("URL", "u_r_l", "url"), - ("AB1", "a_b1", "ab1"), - ("ID", "i_d", "id"), - ("I", "i", "i"), - ("", "", "") + (null, "Hi!! This is text. Time to test.", "hi!! _this is text. _time to test.", "hi_this_is_text_time_to_test"), + (null, "9999-12-31T23:59:59.9999999Z", "9999-12-31_t23:59:59.9999999_z", "9999_12_31t23_59_59_9999999z"), + (null, "FK_post_simple_blog_BlogId", "f_k_post_simple_blog__blog_id", "fk_post_simple_blog_blog_id"), + (null, "already_snake_case_ ", "already_snake_case_ ", "already_snake_case_"), + (null, "SHOUTING_CASE", "s_h_o_u_t_i_n_g__c_a_s_e", "shouting_case"), + (null, "IsJSONProperty", "is_j_s_o_n_property", "is_json_property"), + (null, "SnA__ kEcAsE", "sn_a__ k_ec_as_e", "sn_a__k_ec_as_e"), + (null, "SnA__kEcAsE", "sn_a__k_ec_as_e", "sn_a__k_ec_as_e"), + (null, "SnAkEcAsE", "sn_ak_ec_as_e", "sn_ak_ec_as_e"), + (null, "URLValue", "u_r_l_value", "url_value"), + (null, "Xml2Json", "xml2_json", "xml2json"), + (null, " IPhone ", " _i_phone ", "i_phone"), + (null, "I Phone", "i _phone", "i_phone"), + (null, " IPhone", " _i_phone", "i_phone"), + (null, "I Phone", "i _phone", "i_phone"), + (null, "IPhone", "i_phone", "i_phone"), + (null, "iPhone", "i_phone", "i_phone"), + (null, "IsCIA", "is_c_i_a", "is_cia"), + (null, "Person", "person", "person"), + (null, "ABC123", "a_b_c123", "abc123"), + (null, "VmQ", "vm_q", "vm_q"), + (null, "URL", "u_r_l", "url"), + (null, "AB1", "a_b1", "ab1"), + (null, "ID", "i_d", "id"), + (null, "I", "i", "i"), + (null, "", "", ""), + (trTRCulture, "IPhone", "ı_phone", "ı_phone"), // dotless I -> dotless ı + (enUSCulture, "IPhone", "i_phone", "i_phone"), + (CultureInfo.InvariantCulture, "IPhone", "i_phone", "i_phone"), }.SelectMany(x => new[] { - new TestCaseData(x.value, true).Returns(x.legacyResult), - new TestCaseData(x.value, false).Returns(x.result), + new TestCaseData(x.culture, x.value, true).Returns(x.legacyResult), + new TestCaseData(x.culture, x.value, false).Returns(x.result), }); + + [Test, Description("Checks translating a name with letter 'I' in Turkish locale with default setting (Invariant Culture)")] + [SetCulture("tr-TR")] + public void TurkeyTest() + { + var translator = new NpgsqlSnakeCaseNameTranslator(); + var legacyTranslator = new NpgsqlSnakeCaseNameTranslator(true); + + const string clrName = "IPhone"; + const string expected = "i_phone"; + + Assert.AreEqual(expected, translator.TranslateMemberName(clrName)); + Assert.AreEqual(expected, translator.TranslateTypeName(clrName)); + Assert.AreEqual(expected, legacyTranslator.TranslateMemberName(clrName)); + Assert.AreEqual(expected, legacyTranslator.TranslateTypeName(clrName)); + } } From f7d36bffb0d53c95883893b701d505f0d23b08fd Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Mon, 24 Jul 2023 15:44:35 +0200 Subject: [PATCH 170/761] Implement OpenTelemetry metrics (#5158) * Implement Opentelemetry metrics Closes #3960 Closes #5108 --- Npgsql.sln | 1 + src/Npgsql/Internal/NpgsqlConnector.cs | 9 +- src/Npgsql/Internal/NpgsqlReadBuffer.cs | 15 +- src/Npgsql/Internal/NpgsqlWriteBuffer.cs | 10 +- src/Npgsql/MetricsReporter.cs | 237 ++++++++++++++++++++ src/Npgsql/NpgsqlActivitySource.cs | 12 +- src/Npgsql/NpgsqlCommand.cs | 10 +- src/Npgsql/NpgsqlConnection.cs | 8 +- src/Npgsql/NpgsqlDataReader.cs | 4 + src/Npgsql/NpgsqlDataSource.cs | 17 +- src/Npgsql/NpgsqlDataSourceBuilder.cs | 9 + src/Npgsql/NpgsqlDataSourceConfiguration.cs | 1 + src/Npgsql/NpgsqlSlimDataSourceBuilder.cs | 6 + src/Npgsql/PoolingDataSource.cs | 131 ++++++----- src/Npgsql/PublicAPI.Unshipped.txt | 4 + 15 files changed, 381 insertions(+), 93 deletions(-) create mode 100644 src/Npgsql/MetricsReporter.cs diff --git a/Npgsql.sln b/Npgsql.sln index 50c6a5c0c8..007681d5bb 100644 --- a/Npgsql.sln +++ b/Npgsql.sln @@ -32,6 +32,7 @@ Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Solution Items", "Solution Directory.Packages.props = Directory.Packages.props README.md = README.md global.json = global.json + NuGet.config = NuGet.config EndProjectSection EndProject Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Npgsql.SourceGenerators", "src\Npgsql.SourceGenerators\Npgsql.SourceGenerators.csproj", "{63026A19-60B8-4906-81CB-216F30E8094B}" diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index fb4bdc339e..31eaa0c21a 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -379,7 +379,7 @@ internal NpgsqlConnector(NpgsqlDataSource dataSource, NpgsqlConnection conn) _isKeepAliveEnabled = Settings.KeepAlive > 0; if (_isKeepAliveEnabled) _keepAliveTimer = new Timer(PerformKeepAlive, null, Timeout.Infinite, Timeout.Infinite); - + DataReader = new NpgsqlDataReader(this); // TODO: Not just for automatic preparation anymore... @@ -659,7 +659,7 @@ internal async ValueTask QueryDatabaseState( reader.NextResult(); reader.Read(); } - + _isTransactionReadOnly = reader.GetString(0) != "off"; var databaseState = UpdateDatabaseState(); @@ -1419,6 +1419,7 @@ internal ValueTask ReadMessage( if (error != null) { NpgsqlEventSource.Log.CommandFailed(); + DataSource.MetricsReporter.ReportCommandFailed(); throw error; } @@ -2106,7 +2107,7 @@ internal Exception Break(Exception reason) Monitor.Exit(CleanupLock); } } - + void FullCleanup() { lock (CleanupLock) @@ -2379,7 +2380,7 @@ internal UserAction StartUserAction( // the user query wait if a keepalive is in progress. // If keepalive isn't enabled, we don't use the lock and rely only on the connector's // state (updated via Interlocked.Exchange) to detect concurrent use, on a best-effort basis. - return _isKeepAliveEnabled + return _isKeepAliveEnabled ? DoStartUserActionWithKeepAlive(newState, command, cancellationToken, attemptPgCancellation) : DoStartUserAction(newState, command, cancellationToken, attemptPgCancellation); diff --git a/src/Npgsql/Internal/NpgsqlReadBuffer.cs b/src/Npgsql/Internal/NpgsqlReadBuffer.cs index c253f2d9e3..f854f27476 100644 --- a/src/Npgsql/Internal/NpgsqlReadBuffer.cs +++ b/src/Npgsql/Internal/NpgsqlReadBuffer.cs @@ -25,14 +25,11 @@ public sealed partial class NpgsqlReadBuffer : IDisposable #region Fields and Properties public NpgsqlConnection Connection => Connector.Connection!; - internal readonly NpgsqlConnector Connector; - internal Stream Underlying { private get; set; } - readonly Socket? _underlyingSocket; - internal ResettableCancellationTokenSource Cts { get; } + readonly MetricsReporter? _metricsReporter; TimeSpan _preTranslatedTimeout = TimeSpan.Zero; @@ -99,7 +96,7 @@ internal TimeSpan Timeout #region Constructors internal NpgsqlReadBuffer( - NpgsqlConnector connector, + NpgsqlConnector? connector, Stream stream, Socket? socket, int size, @@ -112,9 +109,10 @@ internal NpgsqlReadBuffer( throw new ArgumentOutOfRangeException(nameof(size), size, "Buffer size must be at least " + MinimumSize); } - Connector = connector; + Connector = connector!; // TODO: Clean this up Underlying = stream; _underlyingSocket = socket; + _metricsReporter = connector?.DataSource.MetricsReporter; Cts = new ResettableCancellationTokenSource(); Buffer = usePool ? ArrayPool.Shared.Rent(size) : new byte[size]; Size = Buffer.Length; @@ -272,6 +270,7 @@ static Exception CreateException(NpgsqlConnector connector) buffer.Cts.Stop(); NpgsqlEventSource.Log.BytesRead(totalRead); + buffer._metricsReporter?.ReportBytesRead(totalRead); static Exception NpgsqlTimeoutException() => new NpgsqlException("Exception while reading from stream", TimeoutException()); @@ -525,7 +524,7 @@ public ValueTask ReadAsync(Memory output, CancellationToken cancellat ReadPosition += readFromBuffer; return new ValueTask(readFromBuffer); } - + if (output.Length == 0) return new ValueTask(0); @@ -562,7 +561,7 @@ public TextReader GetPreparedTextReader(string str, Stream stream) { if (_preparedTextReader is not { IsDisposed: true }) _preparedTextReader = new PreparedTextReader(); - + _preparedTextReader.Init(str, (ColumnStream)stream); return _preparedTextReader; } diff --git a/src/Npgsql/Internal/NpgsqlWriteBuffer.cs b/src/Npgsql/Internal/NpgsqlWriteBuffer.cs index 9dceb06511..468c42cd75 100644 --- a/src/Npgsql/Internal/NpgsqlWriteBuffer.cs +++ b/src/Npgsql/Internal/NpgsqlWriteBuffer.cs @@ -28,8 +28,8 @@ public sealed partial class NpgsqlWriteBuffer : IDisposable internal Stream Underlying { private get; set; } readonly Socket? _underlyingSocket; - readonly ResettableCancellationTokenSource _timeoutCts; + readonly MetricsReporter? _metricsReporter; /// /// Timeout for sync and async writes @@ -87,7 +87,7 @@ internal TimeSpan Timeout #region Constructors internal NpgsqlWriteBuffer( - NpgsqlConnector connector, + NpgsqlConnector? connector, Stream stream, Socket? socket, int size, @@ -96,9 +96,10 @@ internal NpgsqlWriteBuffer( if (size < MinimumSize) throw new ArgumentOutOfRangeException(nameof(size), size, "Buffer size must be at least " + MinimumSize); - Connector = connector; + Connector = connector!; // TODO: Clean this up; only null when used from PregeneratedMessages, where we don't care. Underlying = stream; _underlyingSocket = socket; + _metricsReporter = connector?.DataSource.MetricsReporter!; _timeoutCts = new ResettableCancellationTokenSource(); Buffer = new byte[size]; Size = size; @@ -137,7 +138,7 @@ public async Task Flush(bool async, CancellationToken cancellationToken = defaul { await Underlying.WriteAsync(Buffer, 0, WritePosition, finalCt); await Underlying.FlushAsync(finalCt); - if (Timeout > TimeSpan.Zero) + if (Timeout > TimeSpan.Zero) _timeoutCts.Stop(); } else @@ -170,6 +171,7 @@ public async Task Flush(bool async, CancellationToken cancellationToken = defaul throw Connector.Break(new NpgsqlException("Exception while writing to stream", e)); } NpgsqlEventSource.Log.BytesWritten(WritePosition); + _metricsReporter?.ReportBytesWritten(WritePosition); //NpgsqlEventSource.Log.RequestFailed(); WritePosition = 0; diff --git a/src/Npgsql/MetricsReporter.cs b/src/Npgsql/MetricsReporter.cs new file mode 100644 index 0000000000..39a2328487 --- /dev/null +++ b/src/Npgsql/MetricsReporter.cs @@ -0,0 +1,237 @@ +using System; + +namespace Npgsql; + +#if NET7_0_OR_GREATER +using System.Collections.Generic; +using System.Diagnostics; +using System.Diagnostics.Metrics; +using System.Runtime.InteropServices; +using System.Threading; + +// .NET docs on metric instrumentation: https://learn.microsoft.com/en-us/dotnet/core/diagnostics/metrics-instrumentation +// OpenTelemetry semantic conventions for database metric: https://opentelemetry.io/docs/specs/otel/metrics/semantic_conventions/database-metrics +sealed class MetricsReporter : IDisposable +{ + const string Version = "0.1.0"; + + static readonly Meter Meter; + + static readonly UpDownCounter CommandsExecuting; + static readonly Counter CommandsFailed; + static readonly Histogram CommandDuration; + + static readonly Counter BytesWritten; + static readonly Counter BytesRead; + + static readonly UpDownCounter PendingConnectionRequests; + static readonly UpDownCounter ConnectionTimeouts; + static readonly Histogram ConnectionCreateTime; + + readonly NpgsqlDataSource _dataSource; + readonly KeyValuePair _poolNameTag; + + static readonly List Reporters = new(); + + CommandCounters _commandCounters; + + [StructLayout(LayoutKind.Explicit)] + struct CommandCounters + { + [FieldOffset(0)] internal int CommandsStarted; + [FieldOffset(4)] internal int PreparedCommandsStarted; + [FieldOffset(0)] internal long All; + } + + static MetricsReporter() + { + Meter = new("Npgsql", Version); + + // TODO: Add units + CommandsExecuting = + Meter.CreateUpDownCounter("db.client.commands.executing", "The number of currently executing database commands."); + CommandsFailed + = Meter.CreateCounter("db.client.commands.failed", "The number of database commands which have failed."); + CommandDuration + = Meter.CreateHistogram("db.client.commands.duration", "ms", "The duration of database commands, in milliseconds."); + + BytesWritten = Meter.CreateCounter("db.client.commands.bytes_written", "The number of bytes written."); + BytesRead = Meter.CreateCounter("db.client.commands.bytes_read", "The number of bytes read."); + + PendingConnectionRequests = Meter.CreateUpDownCounter( + "db.client.connections.pending_requests", + "The number of pending requests for an open connection, cumulative for the entire pool."); + ConnectionTimeouts = Meter.CreateUpDownCounter( + "db.client.connections.timeouts", + "The number of connection timeouts that have occurred trying to obtain a connection from the pool."); + ConnectionCreateTime + = Meter.CreateHistogram("db.client.connections.create_time", "ms", "The time it took to create a new connection."); + + // Observable metrics; these are for values we already track internally (and efficiently) inside the connection pool implementation. + Meter.CreateObservableUpDownCounter( + "db.client.connections.usage", + GetConnectionUsage, + "The number of connections that are currently in state described by the state attribute."); + + // It's a bit ridiculous to manage "max connections" as an observable counter, given that it never changes for a given pool. + // However, we can't simply report it once at startup, since clients who connect later wouldn't have it. And since reporting it + // repeatedly isn't possible because we need to provide incremental figures, we just manage it as an observable counter. + Meter.CreateObservableUpDownCounter( + "db.client.connections.max", + GetMaxConnections, + "The maximum number of open connections allowed."); + + Meter.CreateObservableUpDownCounter( + "db.client.commands.prepared_ratio", + GetPreparedCommandsRatio, + "%", + "The ratio of prepared command executions."); + } + + public MetricsReporter(NpgsqlDataSource dataSource) + { + _dataSource = dataSource; + _poolNameTag = new KeyValuePair("pool.name", dataSource.Name); + + lock (Reporters) + { + Reporters.Add(this); + Reporters.Sort((x,y) => string.Compare(x._dataSource.Name, y._dataSource.Name, StringComparison.Ordinal)); + } + } + + internal long ReportCommandStart() + { + CommandsExecuting.Add(1, _poolNameTag); + Interlocked.Increment(ref _commandCounters.CommandsStarted); + + return CommandDuration.Enabled ? Stopwatch.GetTimestamp() : 0; + } + + internal void ReportCommandStop(long startTimestamp) + { + CommandsExecuting.Add(-1, _poolNameTag); + + if (CommandDuration.Enabled && startTimestamp > 0) + { + var duration = Stopwatch.GetElapsedTime(startTimestamp); + CommandDuration.Record(duration.TotalMilliseconds, _poolNameTag); + } + } + + internal void CommandStartPrepared() => Interlocked.Increment(ref _commandCounters.PreparedCommandsStarted); + + internal void ReportCommandFailed() => CommandsFailed.Add(1, _poolNameTag); + + internal void ReportBytesWritten(long bytesWritten) => BytesWritten.Add(bytesWritten, _poolNameTag); + internal void ReportBytesRead(long bytesRead) => BytesRead.Add(bytesRead, _poolNameTag); + + internal void ReportConnectionPoolTimeout() + => ConnectionTimeouts.Add(1, _poolNameTag); + + internal void ReportPendingConnectionRequestStart() + => PendingConnectionRequests.Add(1, _poolNameTag); + internal void ReportPendingConnectionRequestStop() + => PendingConnectionRequests.Add(-1, _poolNameTag); + + internal void ReportConnectionCreateTime(TimeSpan duration) + => ConnectionCreateTime.Record(duration.TotalMilliseconds, _poolNameTag); + + static IEnumerable> GetConnectionUsage() + { + lock (Reporters) + { + var measurements = new List>(); + + for (var i = 0; i < Reporters.Count; i++) + { + var reporter = Reporters[i]; + + if (reporter._dataSource is PoolingDataSource poolingDataSource) + { + var stats = poolingDataSource.Statistics; + + measurements.Add(new Measurement( + stats.Idle, + reporter._poolNameTag, + new KeyValuePair("state", "idle"))); + + measurements.Add(new Measurement( + stats.Busy, + reporter._poolNameTag, + new KeyValuePair("state", "used"))); + } + } + + return measurements; + } + } + + static IEnumerable> GetMaxConnections() + { + lock (Reporters) + { + var measurements = new List>(); + + foreach (var reporter in Reporters) + { + if (reporter._dataSource is PoolingDataSource poolingDataSource) + { + measurements.Add(new Measurement(poolingDataSource.MaxConnections, reporter._poolNameTag)); + } + } + + return measurements; + } + } + + static IEnumerable> GetPreparedCommandsRatio() + { + lock (Reporters) + { + var measurements = new Measurement[Reporters.Count]; + + for (var i = 0; i < Reporters.Count; i++) + { + var reporter = Reporters[i]; + + var counters = new CommandCounters + { + All = Interlocked.Exchange(ref reporter._commandCounters.All, default) + }; + + measurements[i] = new Measurement( + (double)counters.PreparedCommandsStarted / counters.CommandsStarted * 100, + reporter._poolNameTag); + } + + return measurements; + } + } + + public void Dispose() + { + lock (Reporters) + { + Reporters.Remove(this); + } + } +} +#else +// Unfortunately, UpDownCounter is only supported starting with net7.0, and since a lot of the metrics rely on it, +sealed class MetricsReporter : IDisposable +{ + public MetricsReporter(NpgsqlDataSource _) {} + internal long ReportCommandStart() => 0; + internal void ReportCommandStop(long startTimestamp) {} + internal void CommandStartPrepared() {} + internal void ReportCommandFailed() {} + internal void ReportBytesWritten(long bytesWritten) {} + internal void ReportBytesRead(long bytesRead) {} + internal void ReportConnectionPoolTimeout() {} + internal void ReportPendingConnectionRequestStart() {} + internal void ReportPendingConnectionRequestStop() {} + internal void ReportConnectionCreateTime(TimeSpan duration) {} + public void Dispose() {} +} +#endif diff --git a/src/Npgsql/NpgsqlActivitySource.cs b/src/Npgsql/NpgsqlActivitySource.cs index ae6f46956d..224bb2e658 100644 --- a/src/Npgsql/NpgsqlActivitySource.cs +++ b/src/Npgsql/NpgsqlActivitySource.cs @@ -4,20 +4,12 @@ using System.Diagnostics; using System.Net; using System.Net.Sockets; -using System.Reflection; namespace Npgsql; static class NpgsqlActivitySource { - static readonly ActivitySource Source; - - static NpgsqlActivitySource() - { - var assembly = typeof(NpgsqlActivitySource).Assembly; - var version = assembly.GetCustomAttribute()?.Version ?? "0.0.0"; - Source = new("Npgsql", version); - } + static readonly ActivitySource Source = new("Npgsql", "0.1.0"); internal static bool IsEnabled => Source.HasListeners(); @@ -125,4 +117,4 @@ internal static void SetException(Activity activity, Exception ex, bool escaped activity.SetTag("otel.status_description", ex is PostgresException pgEx ? pgEx.SqlState : ex.Message); activity.Dispose(); } -} \ No newline at end of file +} diff --git a/src/Npgsql/NpgsqlCommand.cs b/src/Npgsql/NpgsqlCommand.cs index 1a39bf291a..b3a4d268bf 100644 --- a/src/Npgsql/NpgsqlCommand.cs +++ b/src/Npgsql/NpgsqlCommand.cs @@ -1342,6 +1342,7 @@ internal virtual async ValueTask ExecuteReader(CommandBehavior Task? sendTask; var validateParameterValues = !behavior.HasFlag(CommandBehavior.SchemaOnly); + long startTimestamp; try { @@ -1360,7 +1361,7 @@ internal virtual async ValueTask ExecuteReader(CommandBehavior ResetPreparation(); goto case false; } - + batchCommand.Parameters.ProcessParameters(dataSource.TypeMapper, validateParameterValues, CommandType); } } @@ -1378,6 +1379,7 @@ internal virtual async ValueTask ExecuteReader(CommandBehavior } NpgsqlEventSource.Log.CommandStartPrepared(); + connector.DataSource.MetricsReporter.CommandStartPrepared(); break; case false: @@ -1414,7 +1416,10 @@ internal virtual async ValueTask ExecuteReader(CommandBehavior { _connectorPreparedOn = connector; if (numPrepared == InternalBatchCommands.Count) + { NpgsqlEventSource.Log.CommandStartPrepared(); + connector.DataSource.MetricsReporter.CommandStartPrepared(); + } } break; @@ -1431,6 +1436,7 @@ internal virtual async ValueTask ExecuteReader(CommandBehavior } NpgsqlEventSource.Log.CommandStart(CommandText); + startTimestamp = connector.DataSource.MetricsReporter.ReportCommandStart(); TraceCommandStart(connector); // If a cancellation is in progress, wait for it to "complete" before proceeding (#615) @@ -1461,7 +1467,7 @@ internal virtual async ValueTask ExecuteReader(CommandBehavior // TODO: DRY the following with multiplexing, but be careful with the cancellation registration... var reader = connector.DataReader; - reader.Init(this, behavior, InternalBatchCommands, sendTask); + reader.Init(this, behavior, InternalBatchCommands, startTimestamp, sendTask); connector.CurrentReader = reader; if (async) await reader.NextResultAsync(cancellationToken); diff --git a/src/Npgsql/NpgsqlConnection.cs b/src/Npgsql/NpgsqlConnection.cs index 6d40ab730b..627dcb1443 100644 --- a/src/Npgsql/NpgsqlConnection.cs +++ b/src/Npgsql/NpgsqlConnection.cs @@ -232,7 +232,9 @@ void SetupDataSource() foreach (var hostPool in multiHostConnectorPool.Pools) NpgsqlEventSource.Log.DataSourceCreated(hostPool); else + { NpgsqlEventSource.Log.DataSourceCreated(newDataSource); + } } else newDataSource.Dispose(); @@ -853,7 +855,7 @@ internal Task Close(bool async) return Task.CompletedTask; } - return CloseAsync(async); + return CloseAsync(async); } async Task CloseAsync(bool async) @@ -914,7 +916,7 @@ async Task CloseAsync(bool async) // We're already doing the same in the NpgsqlConnector.Reset for pooled connections // TODO: move reset logic to ConnectorSource.Return connector.Transaction?.UnbindIfNecessary(); - } + } if (Settings.Multiplexing) { @@ -1228,7 +1230,7 @@ public Task BeginBinaryExportAsync(string copyToCommand, C { using (NoSynchronizationContextScope.Enter()) return BeginBinaryExport(copyToCommand, async: true, cancellationToken); - } + } async Task BeginBinaryExport(string copyToCommand, bool async, CancellationToken cancellationToken = default) { diff --git a/src/Npgsql/NpgsqlDataReader.cs b/src/Npgsql/NpgsqlDataReader.cs index 8f8ee307b5..f59d8e9de1 100644 --- a/src/Npgsql/NpgsqlDataReader.cs +++ b/src/Npgsql/NpgsqlDataReader.cs @@ -137,6 +137,7 @@ public sealed class NpgsqlDataReader : DbDataReader, IDbColumnSchemaGenerator internal NpgsqlNestedDataReader? CachedFreeNestedDataReader; + long _startTimestamp; readonly ILogger _commandLogger; internal NpgsqlDataReader(NpgsqlConnector connector) @@ -149,6 +150,7 @@ internal void Init( NpgsqlCommand command, CommandBehavior behavior, List statements, + long startTimestamp = 0, Task? sendTask = null) { Command = command; @@ -161,6 +163,7 @@ internal void Init( _sendTask = sendTask; State = ReaderState.BetweenResults; _recordsAffected = null; + _startTimestamp = startTimestamp; } #region Read @@ -1189,6 +1192,7 @@ internal async Task Cleanup(bool async, bool connectionClosing = false, bool isD if (_commandLogger.IsEnabled(LogLevel.Information)) Command.LogExecutingCompleted(Connector, executing: false); NpgsqlEventSource.Log.CommandStop(); + Connector.DataSource.MetricsReporter.ReportCommandStop(_startTimestamp); Connector.EndUserAction(); // The reader shouldn't be unbound, if we're disposing - so the state is set prematurely diff --git a/src/Npgsql/NpgsqlDataSource.cs b/src/Npgsql/NpgsqlDataSource.cs index 64e65bca13..510513d0fb 100644 --- a/src/Npgsql/NpgsqlDataSource.cs +++ b/src/Npgsql/NpgsqlDataSource.cs @@ -67,6 +67,9 @@ public abstract class NpgsqlDataSource : DbDataSource private protected readonly Dictionary> _pendingEnlistedConnectors = new(); + internal MetricsReporter MetricsReporter { get; } + internal string Name { get; } + internal abstract (int Total, int Idle, int Busy) Statistics { get; } volatile int _isDisposed; @@ -89,7 +92,8 @@ internal NpgsqlDataSource( Configuration = dataSourceConfig; - (LoggingConfiguration, + (var name, + LoggingConfiguration, EncryptionHandler, UserCertificateValidationCallback, ClientCertificatesCallback, @@ -118,6 +122,9 @@ internal NpgsqlDataSource( // in GetPasswordAsync. _passwordRefreshTask = Task.Run(RefreshPassword); } + + Name = name ?? ConnectionString; + MetricsReporter = new MetricsReporter(this); } /// @@ -338,7 +345,7 @@ internal DatabaseState UpdateDatabaseState( Debug.Assert(this is not NpgsqlMultiHostDataSource); var databaseStateInfo = _databaseStateInfo; - + if (!ignoreTimeStamp && timeStamp <= databaseStateInfo.TimeStamp) return _databaseStateInfo.State; @@ -414,8 +421,8 @@ protected virtual void DisposeBase() } _passwordProviderTimer?.Dispose(); - _setupMappingsSemaphore.Dispose(); + MetricsReporter.Dispose(); // TODO: This is probably too early, dispose only when all connections have been closed? Clear(); } @@ -463,7 +470,7 @@ private protected void CheckDisposed() } #endregion - + sealed class DatabaseStateInfo { internal readonly DatabaseState State; @@ -472,7 +479,7 @@ sealed class DatabaseStateInfo internal readonly DateTime TimeStamp; public DatabaseStateInfo() : this(default, default, default) {} - + public DatabaseStateInfo(DatabaseState state, NpgsqlTimeout timeout, DateTime timeStamp) => (State, Timeout, TimeStamp) = (state, timeout, timeStamp); } diff --git a/src/Npgsql/NpgsqlDataSourceBuilder.cs b/src/Npgsql/NpgsqlDataSourceBuilder.cs index ace6c49869..356fa48cb3 100644 --- a/src/Npgsql/NpgsqlDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlDataSourceBuilder.cs @@ -19,6 +19,15 @@ public sealed class NpgsqlDataSourceBuilder : INpgsqlTypeMapper { readonly NpgsqlSlimDataSourceBuilder _internalBuilder; + /// + /// A diagnostics name used by Npgsql when generating tracing, logging and metrics. + /// + public string? Name + { + get => _internalBuilder.Name; + set => _internalBuilder.Name = value; + } + /// public INpgsqlNameTranslator DefaultNameTranslator { diff --git a/src/Npgsql/NpgsqlDataSourceConfiguration.cs b/src/Npgsql/NpgsqlDataSourceConfiguration.cs index 6a7b068868..40aec62171 100644 --- a/src/Npgsql/NpgsqlDataSourceConfiguration.cs +++ b/src/Npgsql/NpgsqlDataSourceConfiguration.cs @@ -11,6 +11,7 @@ namespace Npgsql; sealed record NpgsqlDataSourceConfiguration( + string? Name, NpgsqlLoggingConfiguration LoggingConfiguration, EncryptionHandler EncryptionHandler, RemoteCertificateValidationCallback? UserCertificateValidationCallback, diff --git a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs index d3619c0635..97a7dd7a34 100644 --- a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs @@ -55,6 +55,11 @@ public sealed class NpgsqlSlimDataSourceBuilder : INpgsqlTypeMapper /// public string ConnectionString => ConnectionStringBuilder.ToString(); + /// + /// A diagnostics name used by Npgsql when generating tracing, logging and metrics. + /// + public string? Name { get; set; } + /// /// Constructs a new , optionally starting out from the given /// . @@ -521,6 +526,7 @@ NpgsqlDataSourceConfiguration PrepareConfiguration() } return new( + Name, _loggerFactory is null ? NpgsqlLoggingConfiguration.NullConfiguration : new NpgsqlLoggingConfiguration(_loggerFactory, _sensitiveDataLoggingEnabled), diff --git a/src/Npgsql/PoolingDataSource.cs b/src/Npgsql/PoolingDataSource.cs index f6a87c9e9b..64e32b3fc0 100644 --- a/src/Npgsql/PoolingDataSource.cs +++ b/src/Npgsql/PoolingDataSource.cs @@ -16,8 +16,9 @@ class PoolingDataSource : NpgsqlDataSource { #region Fields and properties - readonly int _max; - readonly int _min; + internal int MaxConnections { get; } + internal int MinConnections { get; } + readonly TimeSpan _connectionLifetime; volatile int _numConnectors; @@ -93,8 +94,8 @@ internal PoolingDataSource( _idleConnectorReader = idleChannel.Reader; IdleConnectorWriter = idleChannel.Writer; - _max = settings.MaxPoolSize; - _min = settings.MinPoolSize; + MaxConnections = settings.MaxPoolSize; + MinConnections = settings.MinPoolSize; if (settings.ConnectionPruningInterval == 0) throw new ArgumentException("ConnectionPruningInterval can't be 0."); @@ -111,7 +112,7 @@ internal PoolingDataSource( _pruningTimerEnabled = false; _connectionLifetime = TimeSpan.FromSeconds(settings.ConnectionLifetime); - Connectors = new NpgsqlConnector[_max]; + Connectors = new NpgsqlConnector[MaxConnections]; _logger = LoggingConfiguration.ConnectionLogger; } @@ -139,56 +140,66 @@ async ValueTask RentAsync( using var linkedSource = CancellationTokenSource.CreateLinkedTokenSource(cancellationToken); var finalToken = linkedSource.Token; linkedSource.CancelAfter(timeout.CheckAndGetTimeLeft()); + MetricsReporter.ReportPendingConnectionRequestStart(); - while (true) + try { - try + while (true) { - if (async) + try { - connector = await _idleConnectorReader.ReadAsync(finalToken); - if (CheckIdleConnector(connector)) - return connector; - } - else - { - // Channels don't have a sync API. To avoid sync-over-async issues, we use a special single- - // thread synchronization context which ensures that callbacks are executed on a dedicated - // thread. - // Note that AsTask isn't safe here for getting the result, since it still causes some continuation code - // to get executed on the TP (which can cause deadlocks). - using (SingleThreadSynchronizationContext.Enter()) - using (var mre = new ManualResetEventSlim()) + if (async) { - _idleConnectorReader.WaitToReadAsync(finalToken).GetAwaiter().OnCompleted(() => mre.Set()); - mre.Wait(finalToken); + connector = await _idleConnectorReader.ReadAsync(finalToken); + if (CheckIdleConnector(connector)) + return connector; + } + else + { + // Channels don't have a sync API. To avoid sync-over-async issues, we use a special single- + // thread synchronization context which ensures that callbacks are executed on a dedicated + // thread. + // Note that AsTask isn't safe here for getting the result, since it still causes some continuation code + // to get executed on the TP (which can cause deadlocks). + using (SingleThreadSynchronizationContext.Enter()) + using (var mre = new ManualResetEventSlim()) + { + _idleConnectorReader.WaitToReadAsync(finalToken).GetAwaiter().OnCompleted(() => mre.Set()); + mre.Wait(finalToken); + } } } - } - catch (OperationCanceledException) - { - cancellationToken.ThrowIfCancellationRequested(); - Debug.Assert(finalToken.IsCancellationRequested); - throw new NpgsqlException( - $"The connection pool has been exhausted, either raise 'Max Pool Size' (currently {_max}) " + - $"or 'Timeout' (currently {Settings.Timeout} seconds) in your connection string.", - new TimeoutException()); - } - catch (ChannelClosedException) - { - throw new NpgsqlException("The connection pool has been shut down."); - } + catch (OperationCanceledException) + { + cancellationToken.ThrowIfCancellationRequested(); + Debug.Assert(finalToken.IsCancellationRequested); + + MetricsReporter.ReportConnectionPoolTimeout(); + throw new NpgsqlException( + $"The connection pool has been exhausted, either raise 'Max Pool Size' (currently {MaxConnections}) " + + $"or 'Timeout' (currently {Settings.Timeout} seconds) in your connection string.", + new TimeoutException()); + } + catch (ChannelClosedException) + { + throw new NpgsqlException("The connection pool has been shut down."); + } - // If we're here, our waiting attempt on the idle connector channel was released with a null - // (or bad connector), or we're in sync mode. Check again if a new idle connector has appeared since we last checked. - if (TryGetIdleConnector(out connector)) - return connector; + // If we're here, our waiting attempt on the idle connector channel was released with a null + // (or bad connector), or we're in sync mode. Check again if a new idle connector has appeared since we last checked. + if (TryGetIdleConnector(out connector)) + return connector; - // We might have closed a connector in the meantime and no longer be at max capacity - // so try to open a new connector and if that fails, loop again. - connector = await OpenNewConnector(conn, timeout, async, cancellationToken); - if (connector != null) - return connector; + // We might have closed a connector in the meantime and no longer be at max capacity + // so try to open a new connector and if that fails, loop again. + connector = await OpenNewConnector(conn, timeout, async, cancellationToken); + if (connector != null) + return connector; + } + } + finally + { + MetricsReporter.ReportPendingConnectionRequestStop(); } } } @@ -250,7 +261,7 @@ bool CheckIdleConnector([NotNullWhen(true)] NpgsqlConnector? connector) NpgsqlConnection conn, NpgsqlTimeout timeout, bool async, CancellationToken cancellationToken) { // As long as we're under max capacity, attempt to increase the connector count and open a new connection. - for (var numConnectors = _numConnectors; numConnectors < _max; numConnectors = _numConnectors) + for (var numConnectors = _numConnectors; numConnectors < MaxConnections; numConnectors = _numConnectors) { // Note that we purposefully don't use SpinWait for this: https://github.com/dotnet/coreclr/pull/21437 if (Interlocked.CompareExchange(ref _numConnectors, numConnectors + 1, numConnectors) != numConnectors) @@ -259,22 +270,28 @@ bool CheckIdleConnector([NotNullWhen(true)] NpgsqlConnector? connector) try { // We've managed to increase the open counter, open a physical connections. +#if NET7_0_OR_GREATER + var startTime = Stopwatch.GetTimestamp(); +#endif var connector = new NpgsqlConnector(this, conn) { ClearCounter = _clearCounter }; await connector.Open(timeout, async, cancellationToken); +#if NET7_0_OR_GREATER + MetricsReporter.ReportConnectionCreateTime(Stopwatch.GetElapsedTime(startTime)); +#endif var i = 0; - for (; i < _max; i++) + for (; i < MaxConnections; i++) if (Interlocked.CompareExchange(ref Connectors[i], connector, null) == null) break; - Debug.Assert(i < _max, $"Could not find free slot in {Connectors} when opening."); - if (i == _max) + Debug.Assert(i < MaxConnections, $"Could not find free slot in {Connectors} when opening."); + if (i == MaxConnections) throw new NpgsqlException($"Could not find free slot in {Connectors} when opening. Please report a bug."); // Only start pruning if we've incremented open count past _min. // Note that we don't do it only once, on equality, because the thread which incremented open count past _min might get exception // on NpgsqlConnector.Open due to timeout, CancellationToken or other reasons. - if (numConnectors >= _min) + if (numConnectors >= MinConnections) UpdatePruningTimer(); return connector; @@ -357,14 +374,14 @@ void CloseConnector(NpgsqlConnector connector) } var i = 0; - for (; i < _max; i++) + for (; i < MaxConnections; i++) if (Interlocked.CompareExchange(ref Connectors[i], null, connector) == connector) break; // If CloseConnector is being called from within OpenNewConnector (e.g. an error happened during a connection initializer which // causes the connector to Break, and therefore return the connector), then we haven't yet added the connector to Connectors. // In this case, there's no state to revert here (that's all taken care of in OpenNewConnector), skip it. - if (i == _max) + if (i == MaxConnections) return; var numConnectors = Interlocked.Decrement(ref _numConnectors); @@ -376,7 +393,7 @@ void CloseConnector(NpgsqlConnector connector) IdleConnectorWriter.TryWrite(null); // Only turn off the timer one time, when it was this Close that brought Open back to _min. - if (numConnectors == _min) + if (numConnectors == MinConnections) UpdatePruningTimer(); } @@ -392,12 +409,12 @@ void UpdatePruningTimer() lock (_pruningTimer) { var numConnectors = _numConnectors; - if (numConnectors > _min && !_pruningTimerEnabled) + if (numConnectors > MinConnections && !_pruningTimerEnabled) { _pruningTimerEnabled = true; _pruningTimer.Change(_pruningSamplingInterval, Timeout.InfiniteTimeSpan); } - else if (numConnectors <= _min && _pruningTimerEnabled) + else if (numConnectors <= MinConnections && _pruningTimerEnabled) { _pruningTimer.Change(Timeout.Infinite, Timeout.Infinite); _pruningSampleIndex = 0; @@ -434,7 +451,7 @@ static void PruneIdleConnectors(object? state) } while (toPrune > 0 && - pool._numConnectors > pool._min && + pool._numConnectors > pool.MinConnections && pool._idleConnectorReader.TryRead(out var connector) && connector != null) { diff --git a/src/Npgsql/PublicAPI.Unshipped.txt b/src/Npgsql/PublicAPI.Unshipped.txt index a6e371bfe1..6c86897df3 100644 --- a/src/Npgsql/PublicAPI.Unshipped.txt +++ b/src/Npgsql/PublicAPI.Unshipped.txt @@ -4,6 +4,8 @@ Npgsql.NpgsqlSlimDataSourceBuilder.EnableFullTextSearch() -> Npgsql.NpgsqlSlimDa Npgsql.NpgsqlSlimDataSourceBuilder.EnableRecords() -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlBinaryImporter.WriteRow(params object?[]! values) -> void Npgsql.NpgsqlBinaryImporter.WriteRowAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken), params object?[]! values) -> System.Threading.Tasks.Task! +Npgsql.NpgsqlDataSourceBuilder.Name.get -> string? +Npgsql.NpgsqlDataSourceBuilder.Name.set -> void Npgsql.NpgsqlDataSourceBuilder.UseRootCertificate(System.Security.Cryptography.X509Certificates.X509Certificate2? rootCertificate) -> Npgsql.NpgsqlDataSourceBuilder! Npgsql.NpgsqlDataSourceBuilder.UseRootCertificateCallback(System.Func? rootCertificateCallback) -> Npgsql.NpgsqlDataSourceBuilder! Npgsql.NpgsqlDataSourceBuilder.UseSystemTextJson(System.Text.Json.JsonSerializerOptions? serializerOptions = null, System.Type![]? jsonbClrTypes = null, System.Type![]? jsonClrTypes = null) -> Npgsql.NpgsqlDataSourceBuilder! @@ -21,6 +23,8 @@ Npgsql.NpgsqlSlimDataSourceBuilder.EnableRanges() -> Npgsql.NpgsqlSlimDataSource Npgsql.NpgsqlSlimDataSourceBuilder.MapComposite(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! Npgsql.NpgsqlSlimDataSourceBuilder.MapComposite(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! Npgsql.NpgsqlSlimDataSourceBuilder.MapEnum(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! +Npgsql.NpgsqlSlimDataSourceBuilder.Name.get -> string? +Npgsql.NpgsqlSlimDataSourceBuilder.Name.set -> void Npgsql.NpgsqlSlimDataSourceBuilder.NpgsqlSlimDataSourceBuilder(string? connectionString = null) -> void Npgsql.NpgsqlSlimDataSourceBuilder.UnmapComposite(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> bool Npgsql.NpgsqlSlimDataSourceBuilder.UnmapComposite(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> bool From b3282aa6124184162b66dd4ab828041f872bc602 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Mon, 24 Jul 2023 17:12:52 +0300 Subject: [PATCH 171/761] Add channel binding parameter to connection string (#5171) Closes #5138 --- src/Npgsql/Internal/NpgsqlConnector.Auth.cs | 39 ++++++++++++++++---- src/Npgsql/NpgsqlConnectionStringBuilder.cs | 38 +++++++++++++++++++ src/Npgsql/PublicAPI.Unshipped.txt | 6 +++ test/Npgsql.Tests/SecurityTests.cs | 41 +++++++++++++++++++++ 4 files changed, 117 insertions(+), 7 deletions(-) diff --git a/src/Npgsql/Internal/NpgsqlConnector.Auth.cs b/src/Npgsql/Internal/NpgsqlConnector.Auth.cs index 8931a75462..d0d3e135e8 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.Auth.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.Auth.cs @@ -71,28 +71,40 @@ async Task AuthenticateCleartext(string username, bool async, CancellationToken async Task AuthenticateSASL(List mechanisms, string username, bool async, CancellationToken cancellationToken) { // At the time of writing PostgreSQL only supports SCRAM-SHA-256 and SCRAM-SHA-256-PLUS - var supportsSha256 = mechanisms.Contains("SCRAM-SHA-256"); - var supportsSha256Plus = mechanisms.Contains("SCRAM-SHA-256-PLUS"); - if (!supportsSha256 && !supportsSha256Plus) + var serverSupportsSha256 = mechanisms.Contains("SCRAM-SHA-256"); + var clientSupportsSha256 = serverSupportsSha256 && Settings.ChannelBinding != ChannelBinding.Require; + var serverSupportsSha256Plus = mechanisms.Contains("SCRAM-SHA-256-PLUS"); + var clientSupportsSha256Plus = serverSupportsSha256Plus && Settings.ChannelBinding != ChannelBinding.Disable; + if (!clientSupportsSha256 && !clientSupportsSha256Plus) + { + if (serverSupportsSha256 && Settings.ChannelBinding == ChannelBinding.Require) + throw new NpgsqlException($"Couldn't connect because {nameof(ChannelBinding)} is set to {nameof(ChannelBinding.Require)} " + + "but the server doesn't support SCRAM-SHA-256-PLUS"); + if (serverSupportsSha256Plus && Settings.ChannelBinding == ChannelBinding.Disable) + throw new NpgsqlException($"Couldn't connect because {nameof(ChannelBinding)} is set to {nameof(ChannelBinding.Disable)} " + + "but the server doesn't support SCRAM-SHA-256"); + throw new NpgsqlException("No supported SASL mechanism found (only SCRAM-SHA-256 and SCRAM-SHA-256-PLUS are supported for now). " + "Mechanisms received from server: " + string.Join(", ", mechanisms)); + } var mechanism = string.Empty; var cbindFlag = string.Empty; var cbind = string.Empty; var successfulBind = false; - if (supportsSha256Plus) + if (clientSupportsSha256Plus) DataSource.EncryptionHandler.AuthenticateSASLSha256Plus(this, ref mechanism, ref cbindFlag, ref cbind, ref successfulBind); - if (!successfulBind && supportsSha256) + if (!successfulBind && serverSupportsSha256) { mechanism = "SCRAM-SHA-256"; // We can get here if PostgreSQL supports only SCRAM-SHA-256 or there was an error while binding to SCRAM-SHA-256-PLUS + // Or the user specifically requested to not use bindings // So, we set 'n' (client does not support binding) if there was an error while binding // or 'y' (client supports but server doesn't) in other case - cbindFlag = supportsSha256Plus ? "n" : "y"; - cbind = supportsSha256Plus ? "biws" : "eSws"; + cbindFlag = serverSupportsSha256Plus ? "n" : "y"; + cbind = serverSupportsSha256Plus ? "biws" : "eSws"; successfulBind = true; IsScram = true; } @@ -167,6 +179,19 @@ static string GetNonce() internal void AuthenticateSASLSha256Plus(ref string mechanism, ref string cbindFlag, ref string cbind, ref bool successfulBind) { + // The check below is copied from libpq (with commentary) + // https://github.com/postgres/postgres/blob/98640f960eb9ed80cf90de3ef5d2e829b785b3eb/src/interfaces/libpq/fe-auth.c#L507-L517 + + // The server offered SCRAM-SHA-256-PLUS, but the connection + // is not SSL-encrypted. That's not sane. Perhaps SSL was + // stripped by a proxy? There's no point in continuing, + // because the server will reject the connection anyway if we + // try authenticate without channel binding even though both + // the client and server supported it. The SCRAM exchange + // checks for that, to prevent downgrade attacks. + if (!IsSecure) + throw new NpgsqlException("Server offered SCRAM-SHA-256-PLUS authentication over a non-SSL connection"); + var sslStream = (SslStream)_stream; if (sslStream.RemoteCertificate is null) { diff --git a/src/Npgsql/NpgsqlConnectionStringBuilder.cs b/src/Npgsql/NpgsqlConnectionStringBuilder.cs index 3370056a94..ab6caf82a2 100644 --- a/src/Npgsql/NpgsqlConnectionStringBuilder.cs +++ b/src/Npgsql/NpgsqlConnectionStringBuilder.cs @@ -653,6 +653,25 @@ public bool IncludeErrorDetail } bool _includeErrorDetail; + /// + /// Controls whether channel binding is required, disabled or preferred, depending on server support. + /// + [Category("Security")] + [Description("Controls whether channel binding is required, disabled or preferred, depending on server support.")] + [DisplayName("Channel Binding")] + [DefaultValue(ChannelBinding.Prefer)] + [NpgsqlConnectionStringProperty] + public ChannelBinding ChannelBinding + { + get => _channelBinding; + set + { + _channelBinding = value; + SetValue(nameof(ChannelBinding), value); + } + } + ChannelBinding _channelBinding; + #endregion #region Properties - Pooling @@ -1808,6 +1827,25 @@ public enum SslMode VerifyFull } +/// +/// Specifies how to manage channel binding. +/// +public enum ChannelBinding +{ + /// + /// Channel binding is disabled. If the server requires channel binding, the connection will fail. + /// + Disable, + /// + /// Prefer channel binding if the server allows it, but connect without it if not. + /// + Prefer, + /// + /// Fail the connection if the server doesn't support channel binding. + /// + Require +} + /// /// Specifies how the mapping of arrays of /// value types diff --git a/src/Npgsql/PublicAPI.Unshipped.txt b/src/Npgsql/PublicAPI.Unshipped.txt index 6c86897df3..badf54239b 100644 --- a/src/Npgsql/PublicAPI.Unshipped.txt +++ b/src/Npgsql/PublicAPI.Unshipped.txt @@ -1,5 +1,11 @@ #nullable enable const Npgsql.PostgresErrorCodes.IdleSessionTimeout = "57P05" -> string! +Npgsql.ChannelBinding +Npgsql.ChannelBinding.Disable = 0 -> Npgsql.ChannelBinding +Npgsql.ChannelBinding.Prefer = 1 -> Npgsql.ChannelBinding +Npgsql.ChannelBinding.Require = 2 -> Npgsql.ChannelBinding +Npgsql.NpgsqlConnectionStringBuilder.ChannelBinding.get -> Npgsql.ChannelBinding +Npgsql.NpgsqlConnectionStringBuilder.ChannelBinding.set -> void Npgsql.NpgsqlSlimDataSourceBuilder.EnableFullTextSearch() -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.EnableRecords() -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlBinaryImporter.WriteRow(params object?[]! values) -> void diff --git a/test/Npgsql.Tests/SecurityTests.cs b/test/Npgsql.Tests/SecurityTests.cs index 49e6030c34..56f8f69efa 100644 --- a/test/Npgsql.Tests/SecurityTests.cs +++ b/test/Npgsql.Tests/SecurityTests.cs @@ -181,6 +181,47 @@ public void ScramPlus() // scram-sha-256-plus only works beginning from PostgreSQL 11 if (conn.PostgreSqlVersion.Major >= 11) { + Assert.That(conn.IsScram, Is.False); + Assert.That(conn.IsScramPlus, Is.True); + } + else + { + Assert.That(conn.IsScram, Is.True); + Assert.That(conn.IsScramPlus, Is.False); + } + } + catch (Exception e) when (!IsOnBuildServer) + { + Console.WriteLine(e); + Assert.Ignore("scram-sha-256-plus doesn't seem to be set up"); + } + } + + [Test] + public void ScramPlus_channel_binding([Values] ChannelBinding channelBinding) + { + try + { + using var dataSource = CreateDataSource(csb => + { + csb.SslMode = SslMode.Require; + csb.Username = "npgsql_tests_scram"; + csb.Password = "npgsql_tests_scram"; + csb.TrustServerCertificate = true; + csb.ChannelBinding = channelBinding; + }); + // scram-sha-256-plus only works beginning from PostgreSQL 11 + MinimumPgVersion(dataSource, "11.0"); + using var conn = dataSource.OpenConnection(); + + if (channelBinding == ChannelBinding.Disable) + { + Assert.That(conn.IsScram, Is.True); + Assert.That(conn.IsScramPlus, Is.False); + } + else + { + Assert.That(conn.IsScram, Is.False); Assert.That(conn.IsScramPlus, Is.True); } } From 0124e0ae2b29e0fd4410e7cbeeb1565ee967dd42 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Tue, 25 Jul 2023 11:29:03 +0200 Subject: [PATCH 172/761] Enable the new OpenTelemetry metrics for net6.0 (#5174) Closes #3960 --- Directory.Packages.props | 4 ++-- src/Npgsql/MetricsReporter.cs | 14 ++++++++++++-- src/Npgsql/Npgsql.csproj | 2 +- 3 files changed, 15 insertions(+), 5 deletions(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 839d1a87e0..4bdb731496 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -1,6 +1,6 @@ - + @@ -49,10 +49,10 @@ - + diff --git a/src/Npgsql/MetricsReporter.cs b/src/Npgsql/MetricsReporter.cs index 39a2328487..120f99e496 100644 --- a/src/Npgsql/MetricsReporter.cs +++ b/src/Npgsql/MetricsReporter.cs @@ -2,7 +2,7 @@ namespace Npgsql; -#if NET7_0_OR_GREATER +#if NET6_0_OR_GREATER using System.Collections.Generic; using System.Diagnostics; using System.Diagnostics.Metrics; @@ -114,7 +114,11 @@ internal void ReportCommandStop(long startTimestamp) if (CommandDuration.Enabled && startTimestamp > 0) { +#if NET7_0_OR_GREATER var duration = Stopwatch.GetElapsedTime(startTimestamp); +#else + var duration = new TimeSpan((long)((Stopwatch.GetTimestamp() - startTimestamp) * StopWatchTickFrequency)); +#endif CommandDuration.Record(duration.TotalMilliseconds, _poolNameTag); } } @@ -216,9 +220,15 @@ public void Dispose() Reporters.Remove(this); } } + +#if !NET7_0_OR_GREATER + const long TicksPerMicrosecond = 10; + const long TicksPerMillisecond = TicksPerMicrosecond * 1000; + const long TicksPerSecond = TicksPerMillisecond * 1000; // 10,000,000 + static readonly double StopWatchTickFrequency = (double)TicksPerSecond / Stopwatch.Frequency; +#endif } #else -// Unfortunately, UpDownCounter is only supported starting with net7.0, and since a lot of the metrics rely on it, sealed class MetricsReporter : IDisposable { public MetricsReporter(NpgsqlDataSource _) {} diff --git a/src/Npgsql/Npgsql.csproj b/src/Npgsql/Npgsql.csproj index 2b284d6c46..0c1c0600ff 100644 --- a/src/Npgsql/Npgsql.csproj +++ b/src/Npgsql/Npgsql.csproj @@ -30,11 +30,11 @@ - + From b4974532f2c0482470885434201c7be4ac60569e Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Wed, 26 Jul 2023 16:19:26 +0300 Subject: [PATCH 173/761] Obsolete Trust Server Certificate connection string parameter (#5176) Closes #4010 --- src/Npgsql/Internal/NpgsqlConnector.cs | 3 - src/Npgsql/NpgsqlConnectionStringBuilder.cs | 39 ++--- .../Properties/NpgsqlStrings.Designer.cs | 165 ++++++++++++------ src/Npgsql/Properties/NpgsqlStrings.resx | 6 - test/Npgsql.Tests/SecurityTests.cs | 30 ---- 5 files changed, 128 insertions(+), 115 deletions(-) diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index 31eaa0c21a..e9cc5b76ec 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -897,9 +897,6 @@ internal async Task NegotiateEncryption(SslMode sslMode, NpgsqlTimeout timeout, } else if (sslMode is SslMode.Prefer or SslMode.Require) { - if (isFirstAttempt && sslMode is SslMode.Require && !Settings.TrustServerCertificate) - throw new ArgumentException(NpgsqlStrings.CannotUseSslModeRequireWithoutTrustServerCertificate); - certificateValidationCallback = SslTrustServerValidation; checkCertificateRevocation = false; } diff --git a/src/Npgsql/NpgsqlConnectionStringBuilder.cs b/src/Npgsql/NpgsqlConnectionStringBuilder.cs index ab6caf82a2..b927807844 100644 --- a/src/Npgsql/NpgsqlConnectionStringBuilder.cs +++ b/src/Npgsql/NpgsqlConnectionStringBuilder.cs @@ -450,24 +450,6 @@ public SslMode SslMode } SslMode _sslMode; - /// - /// Whether to trust the server certificate without validating it. - /// - [Category("Security")] - [Description("Whether to trust the server certificate without validating it.")] - [DisplayName("Trust Server Certificate")] - [NpgsqlConnectionStringProperty] - public bool TrustServerCertificate - { - get => _trustServerCertificate; - set - { - _trustServerCertificate = value; - SetValue(nameof(TrustServerCertificate), value); - } - } - bool _trustServerCertificate; - /// /// Location of a client certificate to be sent to the server. /// @@ -1573,6 +1555,25 @@ public bool IncludeErrorDetails set => IncludeErrorDetail = value; } + /// + /// Whether to trust the server certificate without validating it. + /// + [Category("Security")] + [Description("Whether to trust the server certificate without validating it.")] + [DisplayName("Trust Server Certificate")] + [Obsolete("The TrustServerCertificate parameter is no longer needed and does nothing.")] + [NpgsqlConnectionStringProperty] + public bool TrustServerCertificate + { + get => _trustServerCertificate; + set + { + _trustServerCertificate = value; + SetValue(nameof(TrustServerCertificate), value); + } + } + bool _trustServerCertificate; + #endregion #region Misc @@ -1583,8 +1584,6 @@ internal void PostProcessAndValidate() throw new ArgumentException("Host can't be null"); if (Multiplexing && !Pooling) throw new ArgumentException("Pooling must be on to use multiplexing"); - if (TrustServerCertificate && SslMode is SslMode.Allow or SslMode.VerifyCA or SslMode.VerifyFull) - throw new ArgumentException(NpgsqlStrings.CannotUseTrustServerCertificate); if (!Host.Contains(',')) { diff --git a/src/Npgsql/Properties/NpgsqlStrings.Designer.cs b/src/Npgsql/Properties/NpgsqlStrings.Designer.cs index eac1b386ac..5f0847543f 100644 --- a/src/Npgsql/Properties/NpgsqlStrings.Designer.cs +++ b/src/Npgsql/Properties/NpgsqlStrings.Designer.cs @@ -11,32 +11,46 @@ namespace Npgsql.Properties { using System; - [System.CodeDom.Compiler.GeneratedCodeAttribute("System.Resources.Tools.StronglyTypedResourceBuilder", "4.0.0.0")] - [System.Diagnostics.DebuggerNonUserCodeAttribute()] - [System.Runtime.CompilerServices.CompilerGeneratedAttribute()] + /// + /// A strongly-typed resource class, for looking up localized strings, etc. + /// + // This class was auto-generated by the StronglyTypedResourceBuilder + // class via a tool like ResGen or Visual Studio. + // To add or remove a member, edit your .ResX file then rerun ResGen + // with the /str option, or rebuild your VS project. + [global::System.CodeDom.Compiler.GeneratedCodeAttribute("System.Resources.Tools.StronglyTypedResourceBuilder", "4.0.0.0")] + [global::System.Diagnostics.DebuggerNonUserCodeAttribute()] + [global::System.Runtime.CompilerServices.CompilerGeneratedAttribute()] internal class NpgsqlStrings { - private static System.Resources.ResourceManager resourceMan; + private static global::System.Resources.ResourceManager resourceMan; - private static System.Globalization.CultureInfo resourceCulture; + private static global::System.Globalization.CultureInfo resourceCulture; - [System.Diagnostics.CodeAnalysis.SuppressMessageAttribute("Microsoft.Performance", "CA1811:AvoidUncalledPrivateCode")] + [global::System.Diagnostics.CodeAnalysis.SuppressMessageAttribute("Microsoft.Performance", "CA1811:AvoidUncalledPrivateCode")] internal NpgsqlStrings() { } - [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Advanced)] - internal static System.Resources.ResourceManager ResourceManager { + /// + /// Returns the cached ResourceManager instance used by this class. + /// + [global::System.ComponentModel.EditorBrowsableAttribute(global::System.ComponentModel.EditorBrowsableState.Advanced)] + internal static global::System.Resources.ResourceManager ResourceManager { get { - if (object.Equals(null, resourceMan)) { - System.Resources.ResourceManager temp = new System.Resources.ResourceManager("Npgsql.Properties.NpgsqlStrings", typeof(NpgsqlStrings).Assembly); + if (object.ReferenceEquals(resourceMan, null)) { + global::System.Resources.ResourceManager temp = new global::System.Resources.ResourceManager("Npgsql.Properties.NpgsqlStrings", typeof(NpgsqlStrings).Assembly); resourceMan = temp; } return resourceMan; } } - [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Advanced)] - internal static System.Globalization.CultureInfo Culture { + /// + /// Overrides the current thread's CurrentUICulture property for all + /// resource lookups using this strongly typed resource class. + /// + [global::System.ComponentModel.EditorBrowsableAttribute(global::System.ComponentModel.EditorBrowsableState.Advanced)] + internal static global::System.Globalization.CultureInfo Culture { get { return resourceCulture; } @@ -45,117 +59,156 @@ internal static System.Globalization.CultureInfo Culture { } } - internal static string CannotUseSslVerifyWithUserCallback { - get { - return ResourceManager.GetString("CannotUseSslVerifyWithUserCallback", resourceCulture); - } - } - - internal static string CannotUseSslRootCertificateWithUserCallback { - get { - return ResourceManager.GetString("CannotUseSslRootCertificateWithUserCallback", resourceCulture); - } - } - - internal static string CannotUseSslModeRequireWithoutTrustServerCertificate { + /// + /// Looks up a localized string similar to '{0}' must be positive.. + /// + internal static string ArgumentMustBePositive { get { - return ResourceManager.GetString("CannotUseSslModeRequireWithoutTrustServerCertificate", resourceCulture); + return ResourceManager.GetString("ArgumentMustBePositive", resourceCulture); } } - internal static string CannotUseTrustServerCertificate { + /// + /// Looks up a localized string similar to Cannot read infinity value since Npgsql.DisableDateTimeInfinityConversions is enabled.. + /// + internal static string CannotReadInfinityValue { get { - return ResourceManager.GetString("CannotUseTrustServerCertificate", resourceCulture); + return ResourceManager.GetString("CannotReadInfinityValue", resourceCulture); } } - internal static string EncryptionDisabled { + /// + /// Looks up a localized string similar to Cannot read interval values with non-zero months as TimeSpan, since that type doesn't support months. Consider using NodaTime Period which better corresponds to PostgreSQL interval, or read the value as NpgsqlInterval, or transform the interval to not contain months or years in PostgreSQL before reading it.. + /// + internal static string CannotReadIntervalWithMonthsAsTimeSpan { get { - return ResourceManager.GetString("EncryptionDisabled", resourceCulture); + return ResourceManager.GetString("CannotReadIntervalWithMonthsAsTimeSpan", resourceCulture); } } - internal static string NoMultirangeTypeFound { + /// + /// Looks up a localized string similar to When registering a password provider, a password or password file may not be set.. + /// + internal static string CannotSetBothPasswordProviderAndPassword { get { - return ResourceManager.GetString("NoMultirangeTypeFound", resourceCulture); + return ResourceManager.GetString("CannotSetBothPasswordProviderAndPassword", resourceCulture); } } - internal static string NotSupportedOnDataSourceCommand { + /// + /// Looks up a localized string similar to When creating a multi-host data source, TargetSessionAttributes cannot be specified. Create without TargetSessionAttributes, and then obtain DataSource wrappers from it. Consult the docs for more information.. + /// + internal static string CannotSpecifyTargetSessionAttributes { get { - return ResourceManager.GetString("NotSupportedOnDataSourceCommand", resourceCulture); + return ResourceManager.GetString("CannotSpecifyTargetSessionAttributes", resourceCulture); } } - internal static string NotSupportedOnDataSourceBatch { + /// + /// Looks up a localized string similar to RootCertificate cannot be used in conjunction with UserCertificateValidationCallback; when registering a validation callback, perform whatever validation you require in that callback.. + /// + internal static string CannotUseSslRootCertificateWithUserCallback { get { - return ResourceManager.GetString("NotSupportedOnDataSourceBatch", resourceCulture); + return ResourceManager.GetString("CannotUseSslRootCertificateWithUserCallback", resourceCulture); } } - internal static string CannotSetBothPasswordProviderAndPassword { + /// + /// Looks up a localized string similar to SslMode.{0} cannot be used in conjunction with UserCertificateValidationCallback; when registering a validation callback, perform whatever validation you require in that callback.. + /// + internal static string CannotUseSslVerifyWithUserCallback { get { - return ResourceManager.GetString("CannotSetBothPasswordProviderAndPassword", resourceCulture); + return ResourceManager.GetString("CannotUseSslVerifyWithUserCallback", resourceCulture); } } - internal static string PasswordProviderMissing { + /// + /// Looks up a localized string similar to ValidationRootCertificateCallback cannot be used in conjunction with UserCertificateValidationCallback; when registering a validation callback, perform whatever validation you require in that callback.. + /// + internal static string CannotUseValidationRootCertificateCallbackWithUserCallback { get { - return ResourceManager.GetString("PasswordProviderMissing", resourceCulture); + return ResourceManager.GetString("CannotUseValidationRootCertificateCallbackWithUserCallback", resourceCulture); } } - internal static string ArgumentMustBePositive { + /// + /// Looks up a localized string similar to NpgsqlSlimDataSourceBuilder is being used, and encryption hasn't been enabled, call EnableEncryption() on NpgsqlSlimDataSourceBuilder to enable it.. + /// + internal static string EncryptionDisabled { get { - return ResourceManager.GetString("ArgumentMustBePositive", resourceCulture); + return ResourceManager.GetString("EncryptionDisabled", resourceCulture); } } - internal static string CannotSpecifyTargetSessionAttributes { + /// + /// Looks up a localized string similar to Full-text search isn't enabled; please call {0} on {1} to enable full-text search.. + /// + internal static string FullTextSearchNotEnabled { get { - return ResourceManager.GetString("CannotSpecifyTargetSessionAttributes", resourceCulture); + return ResourceManager.GetString("FullTextSearchNotEnabled", resourceCulture); } } - internal static string CannotReadIntervalWithMonthsAsTimeSpan { + /// + /// Looks up a localized string similar to No multirange type could be found in the database for subtype {0}.. + /// + internal static string NoMultirangeTypeFound { get { - return ResourceManager.GetString("CannotReadIntervalWithMonthsAsTimeSpan", resourceCulture); + return ResourceManager.GetString("NoMultirangeTypeFound", resourceCulture); } } - internal static string PositionalParameterAfterNamed { + /// + /// Looks up a localized string similar to Connection and transaction access is not supported on batches created from DbDataSource.. + /// + internal static string NotSupportedOnDataSourceBatch { get { - return ResourceManager.GetString("PositionalParameterAfterNamed", resourceCulture); + return ResourceManager.GetString("NotSupportedOnDataSourceBatch", resourceCulture); } } - internal static string CannotReadInfinityValue { + /// + /// Looks up a localized string similar to Connection and transaction access is not supported on commands created from DbDataSource.. + /// + internal static string NotSupportedOnDataSourceCommand { get { - return ResourceManager.GetString("CannotReadInfinityValue", resourceCulture); + return ResourceManager.GetString("NotSupportedOnDataSourceCommand", resourceCulture); } } - internal static string SyncAndAsyncConnectionInitializersRequired { + /// + /// Looks up a localized string similar to The right type of password provider (sync or async) was not found.. + /// + internal static string PasswordProviderMissing { get { - return ResourceManager.GetString("SyncAndAsyncConnectionInitializersRequired", resourceCulture); + return ResourceManager.GetString("PasswordProviderMissing", resourceCulture); } } - internal static string CannotUseValidationRootCertificateCallbackWithUserCallback { + /// + /// Looks up a localized string similar to When using CommandType.StoredProcedure, all positional parameters must come before named parameters.. + /// + internal static string PositionalParameterAfterNamed { get { - return ResourceManager.GetString("CannotUseValidationRootCertificateCallbackWithUserCallback", resourceCulture); + return ResourceManager.GetString("PositionalParameterAfterNamed", resourceCulture); } } + /// + /// Looks up a localized string similar to Records aren't enabled; please call {0} on {1} to enable records.. + /// internal static string RecordsNotEnabled { get { return ResourceManager.GetString("RecordsNotEnabled", resourceCulture); } } - internal static string FullTextSearchNotEnabled { + /// + /// Looks up a localized string similar to Both sync and async connection initializers must be provided.. + /// + internal static string SyncAndAsyncConnectionInitializersRequired { get { - return ResourceManager.GetString("FullTextSearchNotEnabled", resourceCulture); + return ResourceManager.GetString("SyncAndAsyncConnectionInitializersRequired", resourceCulture); } } } diff --git a/src/Npgsql/Properties/NpgsqlStrings.resx b/src/Npgsql/Properties/NpgsqlStrings.resx index 27282cdb1b..8df8e0b335 100644 --- a/src/Npgsql/Properties/NpgsqlStrings.resx +++ b/src/Npgsql/Properties/NpgsqlStrings.resx @@ -24,12 +24,6 @@ RootCertificate cannot be used in conjunction with UserCertificateValidationCallback; when registering a validation callback, perform whatever validation you require in that callback. - - To validate server certificates, please use VerifyFull or VerifyCA instead of Require. To disable validation, explicitly set 'Trust Server Certificate' to true. See https://www.npgsql.org/doc/release-notes/6.0.html for more details. - - - TrustServerCertificate=true is not supported with SslMode={0} - NpgsqlSlimDataSourceBuilder is being used, and encryption hasn't been enabled, call EnableEncryption() on NpgsqlSlimDataSourceBuilder to enable it. diff --git a/test/Npgsql.Tests/SecurityTests.cs b/test/Npgsql.Tests/SecurityTests.cs index 56f8f69efa..dee4a71a08 100644 --- a/test/Npgsql.Tests/SecurityTests.cs +++ b/test/Npgsql.Tests/SecurityTests.cs @@ -16,7 +16,6 @@ public void Basic_ssl() using var dataSource = CreateDataSource(csb => { csb.SslMode = SslMode.Require; - csb.TrustServerCertificate = true; }); using var conn = dataSource.OpenConnection(); Assert.That(conn.IsSecure, Is.True); @@ -31,7 +30,6 @@ public void Default_user_uses_md5_password() using var dataSource = CreateDataSource(csb => { csb.SslMode = SslMode.Require; - csb.TrustServerCertificate = true; }); using var conn = dataSource.OpenConnection(); Assert.That(conn.IsScram, Is.False); @@ -60,7 +58,6 @@ public void No_ssl_renegotiation() using var dataSource = CreateDataSource(csb => { csb.SslMode = SslMode.Require; - csb.TrustServerCertificate = true; }); using var conn = dataSource.OpenConnection(); Assert.That(conn.ExecuteScalar("SHOW ssl_renegotiation_limit"), Is.EqualTo("0")); @@ -154,7 +151,6 @@ public void Bug1718() using var dataSource = CreateDataSource(csb => { csb.SslMode = SslMode.Require; - csb.TrustServerCertificate = true; }); using var conn = dataSource.OpenConnection(); using var cmd = CreateSleepCommand(conn, 10000); @@ -175,7 +171,6 @@ public void ScramPlus() csb.SslMode = SslMode.Require; csb.Username = "npgsql_tests_scram"; csb.Password = "npgsql_tests_scram"; - csb.TrustServerCertificate = true; }); using var conn = dataSource.OpenConnection(); // scram-sha-256-plus only works beginning from PostgreSQL 11 @@ -207,7 +202,6 @@ public void ScramPlus_channel_binding([Values] ChannelBinding channelBinding) csb.SslMode = SslMode.Require; csb.Username = "npgsql_tests_scram"; csb.Password = "npgsql_tests_scram"; - csb.TrustServerCertificate = true; csb.ChannelBinding = channelBinding; }); // scram-sha-256-plus only works beginning from PostgreSQL 11 @@ -260,29 +254,6 @@ public async Task Connect_with_only_ssl_allowed_user([Values] bool multiplexing, } } - [Test] - public void SslMode_Require_throws_without_TSC() - { - using var dataSource = CreateDataSource(csb => csb.SslMode = SslMode.Require); - var ex = Assert.ThrowsAsync(async () => await dataSource.OpenConnectionAsync())!; - Assert.That(ex.Message, Is.EqualTo(NpgsqlStrings.CannotUseSslModeRequireWithoutTrustServerCertificate)); - } - - [Test] - public async Task SslMode_Require_with_callback_without_TSC() - { - await using var dataSource = CreateDataSource(csb => - { - csb.SslMode = SslMode.Require; - csb.TrustServerCertificate = false; - csb.Pooling = false; - }); - await using var connection = dataSource.CreateConnection(); - connection.UserCertificateValidationCallback = (_, _, _, _) => true; - - await connection.OpenAsync(); - } - [Test] public async Task Connect_with_only_non_ssl_allowed_user([Values] bool multiplexing, [Values] bool keepAlive) { @@ -399,7 +370,6 @@ public async Task Bug4305_Secure([Values] bool async) csb.Username = "npgsql_tests_ssl"; csb.Password = "npgsql_tests_ssl"; csb.MaxPoolSize = 1; - csb.TrustServerCertificate = true; }); NpgsqlConnection conn = default!; From 60941cb03c3381de9ad29574181de26b57bf04be Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Fri, 4 Aug 2023 00:36:49 +0200 Subject: [PATCH 174/761] Correct OTel metrics units (#5185) --- src/Npgsql/MetricsReporter.cs | 52 +++++++++++++++++++++++++---------- 1 file changed, 38 insertions(+), 14 deletions(-) diff --git a/src/Npgsql/MetricsReporter.cs b/src/Npgsql/MetricsReporter.cs index 120f99e496..5cbaf9a9c0 100644 --- a/src/Npgsql/MetricsReporter.cs +++ b/src/Npgsql/MetricsReporter.cs @@ -47,31 +47,55 @@ static MetricsReporter() { Meter = new("Npgsql", Version); - // TODO: Add units CommandsExecuting = - Meter.CreateUpDownCounter("db.client.commands.executing", "The number of currently executing database commands."); + Meter.CreateUpDownCounter( + "db.client.commands.executing", + unit: "{command}", + description: "The number of currently executing database commands."); + CommandsFailed - = Meter.CreateCounter("db.client.commands.failed", "The number of database commands which have failed."); + = Meter.CreateCounter( + "db.client.commands.failed", + unit: "{command}", + description: "The number of database commands which have failed."); + CommandDuration - = Meter.CreateHistogram("db.client.commands.duration", "ms", "The duration of database commands, in milliseconds."); + = Meter.CreateHistogram( + "db.client.commands.duration", + unit: "s", + description: "The duration of database commands, in seconds."); - BytesWritten = Meter.CreateCounter("db.client.commands.bytes_written", "The number of bytes written."); - BytesRead = Meter.CreateCounter("db.client.commands.bytes_read", "The number of bytes read."); + BytesWritten = Meter.CreateCounter( + "db.client.commands.bytes_written", + unit: "By", + description: "The number of bytes written."); + + BytesRead = Meter.CreateCounter( + "db.client.commands.bytes_read", + unit: "By", + description: "The number of bytes read."); PendingConnectionRequests = Meter.CreateUpDownCounter( "db.client.connections.pending_requests", + unit: "{request}", "The number of pending requests for an open connection, cumulative for the entire pool."); + ConnectionTimeouts = Meter.CreateUpDownCounter( "db.client.connections.timeouts", - "The number of connection timeouts that have occurred trying to obtain a connection from the pool."); + unit: "{timeout}", + description: "The number of connection timeouts that have occurred trying to obtain a connection from the pool."); ConnectionCreateTime - = Meter.CreateHistogram("db.client.connections.create_time", "ms", "The time it took to create a new connection."); + = Meter.CreateHistogram( + "db.client.connections.create_time", + "s", + "The time it took to create a new connection."); // Observable metrics; these are for values we already track internally (and efficiently) inside the connection pool implementation. Meter.CreateObservableUpDownCounter( "db.client.connections.usage", GetConnectionUsage, - "The number of connections that are currently in state described by the state attribute."); + unit: "{connection}", + description: "The number of connections that are currently in state described by the state attribute."); // It's a bit ridiculous to manage "max connections" as an observable counter, given that it never changes for a given pool. // However, we can't simply report it once at startup, since clients who connect later wouldn't have it. And since reporting it @@ -79,13 +103,13 @@ static MetricsReporter() Meter.CreateObservableUpDownCounter( "db.client.connections.max", GetMaxConnections, - "The maximum number of open connections allowed."); + unit: "{connection}", + description: "The maximum number of open connections allowed."); Meter.CreateObservableUpDownCounter( "db.client.commands.prepared_ratio", GetPreparedCommandsRatio, - "%", - "The ratio of prepared command executions."); + description: "The ratio of prepared command executions."); } public MetricsReporter(NpgsqlDataSource dataSource) @@ -119,7 +143,7 @@ internal void ReportCommandStop(long startTimestamp) #else var duration = new TimeSpan((long)((Stopwatch.GetTimestamp() - startTimestamp) * StopWatchTickFrequency)); #endif - CommandDuration.Record(duration.TotalMilliseconds, _poolNameTag); + CommandDuration.Record(duration.TotalSeconds, _poolNameTag); } } @@ -139,7 +163,7 @@ internal void ReportPendingConnectionRequestStop() => PendingConnectionRequests.Add(-1, _poolNameTag); internal void ReportConnectionCreateTime(TimeSpan duration) - => ConnectionCreateTime.Record(duration.TotalMilliseconds, _poolNameTag); + => ConnectionCreateTime.Record(duration.TotalSeconds, _poolNameTag); static IEnumerable> GetConnectionUsage() { From 0104f4c2f344c5234365df198d6e093eb815fa29 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 4 Aug 2023 23:08:40 +0200 Subject: [PATCH 175/761] Bump BenchmarkDotNet from 0.13.6 to 0.13.7 (#5189) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 4bdb731496..bae3a6faf0 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -32,7 +32,7 @@ - + From 902a24eedae22b89c74a1dbf335a68d4cab83c14 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 4 Aug 2023 23:25:50 +0200 Subject: [PATCH 176/761] Bump Microsoft.NET.Test.Sdk from 17.6.3 to 17.7.0 (#5188) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index bae3a6faf0..c0012b61dc 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -23,7 +23,7 @@ - + From 46bbdf054d85bd219916e880b7c9cf1e62b91408 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Emmanuel=20Andr=C3=A9?= <2341261+manandre@users.noreply.github.com> Date: Sat, 5 Aug 2023 15:44:07 +0200 Subject: [PATCH 177/761] Upgrade to .NET 8 preview 6 (#5190) --- .devcontainer/docker-compose.yml | 2 +- .github/workflows/build.yml | 2 +- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/native-aot.yml | 2 +- .github/workflows/rich-code-nav.yml | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.devcontainer/docker-compose.yml b/.devcontainer/docker-compose.yml index d65b99a1a0..ae4cd58f96 100644 --- a/.devcontainer/docker-compose.yml +++ b/.devcontainer/docker-compose.yml @@ -3,7 +3,7 @@ version: '3' services: npgsql-dev: # Source for tags: https://mcr.microsoft.com/v2/dotnet/sdk/tags/list - image: mcr.microsoft.com/dotnet/sdk:8.0.100-preview.4 + image: mcr.microsoft.com/dotnet/sdk:8.0.100-preview.6 volumes: - ..:/workspace:cached tty: true diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 3a7e11ab06..c94bc31dc2 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -15,7 +15,7 @@ concurrency: cancel-in-progress: true env: - dotnet_sdk_version: '8.0.100-preview.4.23260.5' + dotnet_sdk_version: '8.0.100-preview.6.23330.14' postgis_version: 3 DOTNET_SKIP_FIRST_TIME_EXPERIENCE: true # Windows comes with PG pre-installed, and defines the PGPASSWORD environment variable. Remove it as it interferes diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index b0c232efd1..25c9147bed 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -32,7 +32,7 @@ concurrency: cancel-in-progress: true env: - dotnet_sdk_version: '8.0.100-preview.4.23260.5' + dotnet_sdk_version: '8.0.100-preview.6.23330.14' DOTNET_SKIP_FIRST_TIME_EXPERIENCE: true jobs: diff --git a/.github/workflows/native-aot.yml b/.github/workflows/native-aot.yml index 2dd007cfb0..19197ac2f7 100644 --- a/.github/workflows/native-aot.yml +++ b/.github/workflows/native-aot.yml @@ -15,7 +15,7 @@ concurrency: cancel-in-progress: true env: - dotnet_sdk_version: '8.0.100-preview.5.23266.8' + dotnet_sdk_version: '8.0.100-preview.6.23330.14' DOTNET_SKIP_FIRST_TIME_EXPERIENCE: true nuget_config: | diff --git a/.github/workflows/rich-code-nav.yml b/.github/workflows/rich-code-nav.yml index 4f65f04596..ff19cb984b 100644 --- a/.github/workflows/rich-code-nav.yml +++ b/.github/workflows/rich-code-nav.yml @@ -9,7 +9,7 @@ on: - '*' env: - dotnet_sdk_version: '8.0.100-preview.4.23260.5' + dotnet_sdk_version: '8.0.100-preview.6.23330.14' DOTNET_SKIP_FIRST_TIME_EXPERIENCE: true jobs: From ef9db1ffe9e432c1562d855b46dfac3514726b1b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 7 Aug 2023 22:05:33 +0000 Subject: [PATCH 178/761] Bump BenchmarkDotNet.Diagnostics.Windows from 0.13.6 to 0.13.7 (#5193) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index c0012b61dc..2a07d697a7 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -34,7 +34,7 @@ - + From d9617b78ab4dfd0905d11c9d7c061ecbdcf101a0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 17 Aug 2023 10:23:26 +0200 Subject: [PATCH 179/761] Bump Microsoft.NET.Test.Sdk from 17.7.0 to 17.7.1 (#5198) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 2a07d697a7..2114b8a5a0 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -23,7 +23,7 @@ - + From 22200328cb3b1422df8438f9925a540cbd2430a1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 19 Aug 2023 00:20:17 +0200 Subject: [PATCH 180/761] Bump Microsoft.CodeAnalysis.CSharp from 4.6.0 to 4.7.0 (#5202) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 2114b8a5a0..1876be638d 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -7,7 +7,7 @@ - + From c2fc02a858176f2b5eab7a2c2336ff5ab4748ad0 Mon Sep 17 00:00:00 2001 From: HansM Date: Sun, 20 Aug 2023 00:53:49 +0200 Subject: [PATCH 181/761] Changed code style in readme. (#4175) From 251220d7cf2de2fc6fee4108915247b1640c9e83 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 25 Aug 2023 00:14:26 +0200 Subject: [PATCH 182/761] Bump GitHubActionsTestLogger from 2.3.2 to 2.3.3 (#5211) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 1876be638d..1953a0bcc6 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -27,7 +27,7 @@ - + From 5f851735ddc41c0336e6eedc463a7cc1623304f0 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Fri, 25 Aug 2023 16:17:58 +0200 Subject: [PATCH 183/761] Replace single thread sync context with task scheduling alternative (#5196) * Replace single thread sync context with task scheduling alternative * Bring back test * Add remaining configure await to write path * Address feedback --- .../NpgsqlConnector.FrontendMessages.cs | 52 +++---- src/Npgsql/Internal/NpgsqlWriteBuffer.cs | 22 +-- src/Npgsql/NpgsqlCommand.cs | 147 ++++++++++-------- src/Npgsql/PoolingDataSource.cs | 30 ++-- src/Npgsql/Util/TaskSchedulerAwaitable.cs | 38 +++++ test/Npgsql.Tests/BugTests.cs | 4 +- .../SingleThreadSynchronizationContext.cs | 7 +- 7 files changed, 169 insertions(+), 131 deletions(-) create mode 100644 src/Npgsql/Util/TaskSchedulerAwaitable.cs rename {src/Npgsql => test/Npgsql.Tests/Support}/SingleThreadSynchronizationContext.cs (97%) diff --git a/src/Npgsql/Internal/NpgsqlConnector.FrontendMessages.cs b/src/Npgsql/Internal/NpgsqlConnector.FrontendMessages.cs index c38f39575a..c2c6c23976 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.FrontendMessages.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.FrontendMessages.cs @@ -28,7 +28,7 @@ internal Task WriteDescribe(StatementOrPortal statementOrPortal, string name, bo async Task FlushAndWrite(int len, StatementOrPortal statementOrPortal, string name, bool async, CancellationToken cancellationToken) { - await Flush(async, cancellationToken); + await Flush(async, cancellationToken).ConfigureAwait(false); Debug.Assert(len <= WriteBuffer.WriteSpaceLeft, $"Message of type {GetType().Name} has length {len} which is bigger than the buffer ({WriteBuffer.WriteSpaceLeft})"); Write(len, statementOrPortal, name); } @@ -55,7 +55,7 @@ internal Task WriteSync(bool async, CancellationToken cancellationToken = defaul async Task FlushAndWrite(bool async, CancellationToken cancellationToken) { - await Flush(async, cancellationToken); + await Flush(async, cancellationToken).ConfigureAwait(false); Debug.Assert(len <= WriteBuffer.WriteSpaceLeft, $"Message of type {GetType().Name} has length {len} which is bigger than the buffer ({WriteBuffer.WriteSpaceLeft})"); Write(); } @@ -84,7 +84,7 @@ internal Task WriteExecute(int maxRows, bool async, CancellationToken cancellati async Task FlushAndWrite(int maxRows, bool async, CancellationToken cancellationToken) { - await Flush(async, cancellationToken); + await Flush(async, cancellationToken).ConfigureAwait(false); Debug.Assert(10 <= WriteBuffer.WriteSpaceLeft, $"Message of type {GetType().Name} has length 10 which is bigger than the buffer ({WriteBuffer.WriteSpaceLeft})"); Write(maxRows); } @@ -114,7 +114,7 @@ internal async Task WriteParse(string sql, string statementName, List= headerLength, "Write buffer too small for Bind header"); - await Flush(async, cancellationToken); + await Flush(async, cancellationToken).ConfigureAwait(false); } var formatCodesSum = 0; @@ -202,7 +202,7 @@ internal async Task WriteBind( if (formatCodeListLength == 1) { if (WriteBuffer.WriteSpaceLeft < 2) - await Flush(async, cancellationToken); + await Flush(async, cancellationToken).ConfigureAwait(false); WriteBuffer.WriteInt16((short)FormatCode.Binary); } else if (formatCodeListLength > 1) @@ -210,13 +210,13 @@ internal async Task WriteBind( for (var paramIndex = 0; paramIndex < parameters.Count; paramIndex++) { if (WriteBuffer.WriteSpaceLeft < 2) - await Flush(async, cancellationToken); + await Flush(async, cancellationToken).ConfigureAwait(false); WriteBuffer.WriteInt16((short)parameters[paramIndex].FormatCode); } } if (WriteBuffer.WriteSpaceLeft < 2) - await Flush(async, cancellationToken); + await Flush(async, cancellationToken).ConfigureAwait(false); WriteBuffer.WriteUInt16((ushort)parameters.Count); @@ -224,13 +224,13 @@ internal async Task WriteBind( { var param = parameters[paramIndex]; param.LengthCache?.Rewind(); - await param.WriteWithLength(WriteBuffer, async, cancellationToken); + await param.WriteWithLength(WriteBuffer, async, cancellationToken).ConfigureAwait(false); } if (unknownResultTypeList != null) { if (WriteBuffer.WriteSpaceLeft < 2 + unknownResultTypeList.Length * 2) - await Flush(async, cancellationToken); + await Flush(async, cancellationToken).ConfigureAwait(false); WriteBuffer.WriteInt16(unknownResultTypeList.Length); foreach (var t in unknownResultTypeList) WriteBuffer.WriteInt16(t ? 0 : 1); @@ -238,7 +238,7 @@ internal async Task WriteBind( else { if (WriteBuffer.WriteSpaceLeft < 4) - await Flush(async, cancellationToken); + await Flush(async, cancellationToken).ConfigureAwait(false); WriteBuffer.WriteInt16(1); WriteBuffer.WriteInt16(allResultTypesAreUnknown ? 0 : 1); } @@ -259,7 +259,7 @@ internal Task WriteClose(StatementOrPortal type, string name, bool async, Cancel async Task FlushAndWrite(int len, StatementOrPortal type, string name, bool async, CancellationToken cancellationToken) { - await Flush(async, cancellationToken); + await Flush(async, cancellationToken).ConfigureAwait(false); Debug.Assert(len <= WriteBuffer.WriteSpaceLeft, $"Message of type {GetType().Name} has length {len} which is bigger than the buffer ({WriteBuffer.WriteSpaceLeft})"); Write(len, type, name); } @@ -280,7 +280,7 @@ internal async Task WriteQuery(string sql, bool async, CancellationToken cancell var queryByteLen = TextEncoding.GetByteCount(sql); if (WriteBuffer.WriteSpaceLeft < 1 + 4) - await Flush(async, cancellationToken); + await Flush(async, cancellationToken).ConfigureAwait(false); WriteBuffer.WriteByte(FrontendMessageCode.Query); WriteBuffer.WriteInt32( @@ -288,9 +288,9 @@ internal async Task WriteQuery(string sql, bool async, CancellationToken cancell queryByteLen + // Query byte length sizeof(byte)); // Null terminator - await WriteBuffer.WriteString(sql, queryByteLen, async, cancellationToken); + await WriteBuffer.WriteString(sql, queryByteLen, async, cancellationToken).ConfigureAwait(false); if (WriteBuffer.WriteSpaceLeft < 1) - await Flush(async, cancellationToken); + await Flush(async, cancellationToken).ConfigureAwait(false); WriteBuffer.WriteByte(0); // Null terminator } @@ -302,7 +302,7 @@ internal async Task WriteCopyDone(bool async, CancellationToken cancellationToke sizeof(int); // Length if (WriteBuffer.WriteSpaceLeft < len) - await Flush(async, cancellationToken); + await Flush(async, cancellationToken).ConfigureAwait(false); WriteBuffer.WriteByte(FrontendMessageCode.CopyDone); WriteBuffer.WriteInt32(len - 1); @@ -317,7 +317,7 @@ internal async Task WriteCopyFail(bool async, CancellationToken cancellationToke sizeof(byte); // Error message is always empty (only a null terminator) if (WriteBuffer.WriteSpaceLeft < len) - await Flush(async, cancellationToken); + await Flush(async, cancellationToken).ConfigureAwait(false); WriteBuffer.WriteByte(FrontendMessageCode.CopyFail); WriteBuffer.WriteInt32(len - 1); @@ -403,7 +403,7 @@ internal void WriteStartup(Dictionary parameters) internal async Task WritePassword(byte[] payload, int offset, int count, bool async, CancellationToken cancellationToken = default) { if (WriteBuffer.WriteSpaceLeft < sizeof(byte) + sizeof(int)) - await WriteBuffer.Flush(async, cancellationToken); + await WriteBuffer.Flush(async, cancellationToken).ConfigureAwait(false); WriteBuffer.WriteByte(FrontendMessageCode.Password); WriteBuffer.WriteInt32(sizeof(int) + count); @@ -414,8 +414,8 @@ internal async Task WritePassword(byte[] payload, int offset, int count, bool as return; } - await WriteBuffer.Flush(async, cancellationToken); - await WriteBuffer.DirectWrite(new ReadOnlyMemory(payload, offset, count), async, cancellationToken); + await WriteBuffer.Flush(async, cancellationToken).ConfigureAwait(false); + await WriteBuffer.DirectWrite(new ReadOnlyMemory(payload, offset, count), async, cancellationToken).ConfigureAwait(false); } internal async Task WriteSASLInitialResponse(string mechanism, byte[] initialResponse, bool async, CancellationToken cancellationToken = default) @@ -427,7 +427,7 @@ internal async Task WriteSASLInitialResponse(string mechanism, byte[] initialRes (initialResponse?.Length ?? 0); // Initial response payload if (WriteBuffer.WriteSpaceLeft < len) - await WriteBuffer.Flush(async, cancellationToken); + await WriteBuffer.Flush(async, cancellationToken).ConfigureAwait(false); WriteBuffer.WriteByte(FrontendMessageCode.Password); WriteBuffer.WriteInt32(len - 1); @@ -457,7 +457,7 @@ internal Task WritePregenerated(byte[] data, bool async = false, CancellationTok async Task FlushAndWrite(byte[] data, bool async, CancellationToken cancellationToken) { - await Flush(async, cancellationToken); + await Flush(async, cancellationToken).ConfigureAwait(false); Debug.Assert(data.Length <= WriteBuffer.WriteSpaceLeft, $"Pregenerated message has length {data.Length} which is bigger than the buffer ({WriteBuffer.WriteSpaceLeft})"); WriteBuffer.WriteBytes(data, 0, data.Length); } @@ -466,4 +466,4 @@ async Task FlushAndWrite(byte[] data, bool async, CancellationToken cancellation internal void Flush() => WriteBuffer.Flush(false).GetAwaiter().GetResult(); internal Task Flush(bool async, CancellationToken cancellationToken = default) => WriteBuffer.Flush(async, cancellationToken); -} \ No newline at end of file +} diff --git a/src/Npgsql/Internal/NpgsqlWriteBuffer.cs b/src/Npgsql/Internal/NpgsqlWriteBuffer.cs index 468c42cd75..451e7d5263 100644 --- a/src/Npgsql/Internal/NpgsqlWriteBuffer.cs +++ b/src/Npgsql/Internal/NpgsqlWriteBuffer.cs @@ -136,8 +136,8 @@ public async Task Flush(bool async, CancellationToken cancellationToken = defaul { if (async) { - await Underlying.WriteAsync(Buffer, 0, WritePosition, finalCt); - await Underlying.FlushAsync(finalCt); + await Underlying.WriteAsync(Buffer, 0, WritePosition, finalCt).ConfigureAwait(false); + await Underlying.FlushAsync(finalCt).ConfigureAwait(false); if (Timeout > TimeSpan.Zero) _timeoutCts.Stop(); } @@ -218,7 +218,7 @@ internal void DirectWrite(ReadOnlySpan buffer) internal async Task DirectWrite(ReadOnlyMemory memory, bool async, CancellationToken cancellationToken = default) { - await Flush(async, cancellationToken); + await Flush(async, cancellationToken).ConfigureAwait(false); if (_copyMode) { @@ -230,7 +230,7 @@ internal async Task DirectWrite(ReadOnlyMemory memory, bool async, Cancell WriteInt32(memory.Length + 4); WritePosition = 5; _copyMode = false; - await Flush(async, cancellationToken); + await Flush(async, cancellationToken).ConfigureAwait(false); _copyMode = true; WriteCopyDataHeader(); // And ready the buffer after the direct write completes } @@ -240,7 +240,7 @@ internal async Task DirectWrite(ReadOnlyMemory memory, bool async, Cancell try { if (async) - await Underlying.WriteAsync(memory, cancellationToken); + await Underlying.WriteAsync(memory, cancellationToken).ConfigureAwait(false); else Underlying.Write(memory.Span); } @@ -360,7 +360,7 @@ static async Task WriteStringLong(NpgsqlWriteBuffer buffer, bool async, string s { // String can fit entirely in an empty buffer. Flush and retry rather than // going into the partial writing flow below (which requires ToCharArray()) - await buffer.Flush(async, cancellationToken); + await buffer.Flush(async, cancellationToken).ConfigureAwait(false); buffer.WriteString(s, charLen); } else @@ -371,7 +371,7 @@ static async Task WriteStringLong(NpgsqlWriteBuffer buffer, bool async, string s buffer.WriteStringChunked(s, charPos, charLen - charPos, true, out var charsUsed, out var completed); if (completed) break; - await buffer.Flush(async, cancellationToken); + await buffer.Flush(async, cancellationToken).ConfigureAwait(false); charPos += charsUsed; } } @@ -462,7 +462,7 @@ static async Task WriteBytesLong(NpgsqlWriteBuffer buffer, bool async, ReadOnlyM { // value can fit entirely in an empty buffer. Flush and retry rather than // going into the partial writing flow below - await buffer.Flush(async, cancellationToken); + await buffer.Flush(async, cancellationToken).ConfigureAwait(false); buffer.WriteBytes(bytes); } else @@ -471,7 +471,7 @@ static async Task WriteBytesLong(NpgsqlWriteBuffer buffer, bool async, ReadOnlyM do { if (buffer.WriteSpaceLeft == 0) - await buffer.Flush(async, cancellationToken); + await buffer.Flush(async, cancellationToken).ConfigureAwait(false); var writeLen = Math.Min(remaining, buffer.WriteSpaceLeft); var offset = bytes.Length - remaining; buffer.WriteBytes(bytes.Slice(offset, writeLen)); @@ -487,11 +487,11 @@ public async Task WriteStreamRaw(Stream stream, int count, bool async, Cancellat while (count > 0) { if (WriteSpaceLeft == 0) - await Flush(async, cancellationToken); + await Flush(async, cancellationToken).ConfigureAwait(false); try { var read = async - ? await stream.ReadAsync(Buffer, WritePosition, Math.Min(WriteSpaceLeft, count), cancellationToken) + ? await stream.ReadAsync(Buffer, WritePosition, Math.Min(WriteSpaceLeft, count), cancellationToken).ConfigureAwait(false) : stream.Read(Buffer, WritePosition, Math.Min(WriteSpaceLeft, count)); if (read == 0) throw new EndOfStreamException(); diff --git a/src/Npgsql/NpgsqlCommand.cs b/src/Npgsql/NpgsqlCommand.cs index b3a4d268bf..9480201a06 100644 --- a/src/Npgsql/NpgsqlCommand.cs +++ b/src/Npgsql/NpgsqlCommand.cs @@ -84,7 +84,8 @@ public class NpgsqlCommand : DbCommand, ICloneable, IComponent static readonly List EmptyParameters = new(); - static readonly SingleThreadSynchronizationContext SingleThreadSynchronizationContext = new("NpgsqlRemainingAsyncSendWorker"); + static readonly TaskScheduler ConstrainedConcurrencyScheduler = + new ConcurrentExclusiveSchedulerPair(TaskScheduler.Default, Math.Max(1, Environment.ProcessorCount / 2)).ConcurrentScheduler; #endregion Fields @@ -694,14 +695,14 @@ static async Task PrepareLong(NpgsqlCommand command, bool async, NpgsqlConnector if (pStatement.StatementBeingReplaced != null) { - Expect(await connector.ReadMessage(async), connector); + Expect(await connector.ReadMessage(async).ConfigureAwait(false), connector); pStatement.StatementBeingReplaced.CompleteUnprepare(); pStatement.StatementBeingReplaced = null; } - Expect(await connector.ReadMessage(async), connector); - Expect(await connector.ReadMessage(async), connector); - var msg = await connector.ReadMessage(async); + Expect(await connector.ReadMessage(async).ConfigureAwait(false), connector); + Expect(await connector.ReadMessage(async).ConfigureAwait(false), connector); + var msg = await connector.ReadMessage(async).ConfigureAwait(false); switch (msg.Code) { case BackendMessageCode.RowDescription: @@ -724,10 +725,10 @@ static async Task PrepareLong(NpgsqlCommand command, bool async, NpgsqlConnector isFirst = false; } - Expect(await connector.ReadMessage(async), connector); + Expect(await connector.ReadMessage(async).ConfigureAwait(false), connector); if (async) - await sendTask; + await sendTask.ConfigureAwait(false); else sendTask.GetAwaiter().GetResult(); } @@ -796,7 +797,7 @@ async Task Unprepare(bool async, CancellationToken cancellationToken = default) { if (batchCommand.PreparedStatement?.State == PreparedState.BeingUnprepared) { - Expect(await connector.ReadMessage(async), connector); + Expect(await connector.ReadMessage(async).ConfigureAwait(false), connector); var pStatement = batchCommand.PreparedStatement; pStatement.CompleteUnprepare(); @@ -808,10 +809,10 @@ async Task Unprepare(bool async, CancellationToken cancellationToken = default) } } - Expect(await connector.ReadMessage(async), connector); + Expect(await connector.ReadMessage(async).ConfigureAwait(false), connector); if (async) - await sendTask; + await sendTask.ConfigureAwait(false); else sendTask.GetAwaiter().GetResult(); } @@ -988,10 +989,12 @@ async Task WriteExecute(NpgsqlConnector connector, bool async, bool flush, Cance { NpgsqlBatchCommand? batchCommand = null; + var syncCaller = !async; for (var i = 0; i < InternalBatchCommands.Count; i++) { // The following is only for deadlock avoidance when doing sync I/O (so never in multiplexing) - ForceAsyncIfNecessary(ref async, i); + if (syncCaller && ShouldSchedule(ref async, i)) + await new TaskSchedulerAwaitable(ConstrainedConcurrencyScheduler); batchCommand = InternalBatchCommands[i]; var pStatement = batchCommand.PreparedStatement; @@ -1005,16 +1008,16 @@ async Task WriteExecute(NpgsqlConnector connector, bool async, bool flush, Cance // We may have a prepared statement that replaces an existing statement - close the latter first. if (pStatement?.StatementBeingReplaced != null) - await connector.WriteClose(StatementOrPortal.Statement, pStatement.StatementBeingReplaced.Name!, async, cancellationToken); + await connector.WriteClose(StatementOrPortal.Statement, pStatement.StatementBeingReplaced.Name!, async, cancellationToken).ConfigureAwait(false); - await connector.WriteParse(batchCommand.FinalCommandText, batchCommand.StatementName, batchCommand.PositionalParameters, async, cancellationToken); + await connector.WriteParse(batchCommand.FinalCommandText, batchCommand.StatementName, batchCommand.PositionalParameters, async, cancellationToken).ConfigureAwait(false); await connector.WriteBind( batchCommand.PositionalParameters, string.Empty, batchCommand.StatementName, AllResultTypesAreUnknown, i == 0 ? UnknownResultTypeList : null, - async, cancellationToken); + async, cancellationToken).ConfigureAwait(false); - await connector.WriteDescribe(StatementOrPortal.Portal, string.Empty, async, cancellationToken); + await connector.WriteDescribe(StatementOrPortal.Portal, string.Empty, async, cancellationToken).ConfigureAwait(false); } else { @@ -1022,13 +1025,13 @@ await connector.WriteBind( await connector.WriteBind( batchCommand.PositionalParameters, string.Empty, batchCommand.StatementName, AllResultTypesAreUnknown, i == 0 ? UnknownResultTypeList : null, - async, cancellationToken); + async, cancellationToken).ConfigureAwait(false); } - await connector.WriteExecute(0, async, cancellationToken); + await connector.WriteExecute(0, async, cancellationToken).ConfigureAwait(false); if (batchCommand.AppendErrorBarrier ?? EnableErrorBarriers) - await connector.WriteSync(async, cancellationToken); + await connector.WriteSync(async, cancellationToken).ConfigureAwait(false); if (pStatement != null) pStatement.LastUsed = DateTime.UtcNow; @@ -1036,35 +1039,38 @@ await connector.WriteBind( if (batchCommand is null || !(batchCommand.AppendErrorBarrier ?? EnableErrorBarriers)) { - await connector.WriteSync(async, cancellationToken); + await connector.WriteSync(async, cancellationToken).ConfigureAwait(false); } if (flush) - await connector.Flush(async, cancellationToken); + await connector.Flush(async, cancellationToken).ConfigureAwait(false); } async Task WriteExecuteSchemaOnly(NpgsqlConnector connector, bool async, bool flush, CancellationToken cancellationToken) { var wroteSomething = false; + var syncCaller = !async; for (var i = 0; i < InternalBatchCommands.Count; i++) { - ForceAsyncIfNecessary(ref async, i); + if (syncCaller && ShouldSchedule(ref async, i)) + await new TaskSchedulerAwaitable(ConstrainedConcurrencyScheduler); var batchCommand = InternalBatchCommands[i]; if (batchCommand.PreparedStatement?.State == PreparedState.Prepared) - continue; // Prepared, we already have the RowDescription + continue; // Prepared, we already have the RowDescription - await connector.WriteParse(batchCommand.FinalCommandText!, batchCommand.StatementName, batchCommand.PositionalParameters, async, cancellationToken); - await connector.WriteDescribe(StatementOrPortal.Statement, batchCommand.StatementName, async, cancellationToken); + await connector.WriteParse(batchCommand.FinalCommandText!, batchCommand.StatementName, + batchCommand.PositionalParameters, async, cancellationToken).ConfigureAwait(false); + await connector.WriteDescribe(StatementOrPortal.Statement, batchCommand.StatementName, async, cancellationToken).ConfigureAwait(false); wroteSomething = true; } if (wroteSomething) { - await connector.WriteSync(async, cancellationToken); + await connector.WriteSync(async, cancellationToken).ConfigureAwait(false); if (flush) - await connector.Flush(async, cancellationToken); + await connector.Flush(async, cancellationToken).ConfigureAwait(false); } } } @@ -1073,27 +1079,31 @@ async Task SendDeriveParameters(NpgsqlConnector connector, bool async, Cancellat { BeginSend(connector); + var syncCaller = !async; for (var i = 0; i < InternalBatchCommands.Count; i++) { - ForceAsyncIfNecessary(ref async, i); + if (syncCaller && ShouldSchedule(ref async, i)) + await new TaskSchedulerAwaitable(ConstrainedConcurrencyScheduler); var batchCommand = InternalBatchCommands[i]; - await connector.WriteParse(batchCommand.FinalCommandText!, string.Empty, EmptyParameters, async, cancellationToken); - await connector.WriteDescribe(StatementOrPortal.Statement, string.Empty, async, cancellationToken); + await connector.WriteParse(batchCommand.FinalCommandText!, string.Empty, EmptyParameters, async, cancellationToken).ConfigureAwait(false); + await connector.WriteDescribe(StatementOrPortal.Statement, string.Empty, async, cancellationToken).ConfigureAwait(false); } - await connector.WriteSync(async, cancellationToken); - await connector.Flush(async, cancellationToken); + await connector.WriteSync(async, cancellationToken).ConfigureAwait(false); + await connector.Flush(async, cancellationToken).ConfigureAwait(false); } async Task SendPrepare(NpgsqlConnector connector, bool async, CancellationToken cancellationToken = default) { BeginSend(connector); + var syncCaller = !async; for (var i = 0; i < InternalBatchCommands.Count; i++) { - ForceAsyncIfNecessary(ref async, i); + if (syncCaller && ShouldSchedule(ref async, i)) + await new TaskSchedulerAwaitable(ConstrainedConcurrencyScheduler); var batchCommand = InternalBatchCommands[i]; var pStatement = batchCommand.PreparedStatement; @@ -1106,31 +1116,32 @@ async Task SendPrepare(NpgsqlConnector connector, bool async, CancellationToken // We may have a prepared statement that replaces an existing statement - close the latter first. var statementToClose = pStatement!.StatementBeingReplaced; if (statementToClose != null) - await connector.WriteClose(StatementOrPortal.Statement, statementToClose.Name!, async, cancellationToken); + await connector.WriteClose(StatementOrPortal.Statement, statementToClose.Name!, async, cancellationToken).ConfigureAwait(false); - await connector.WriteParse(batchCommand.FinalCommandText!, pStatement.Name!, batchCommand.PositionalParameters, async, cancellationToken); - await connector.WriteDescribe(StatementOrPortal.Statement, pStatement.Name!, async, cancellationToken); + await connector.WriteParse(batchCommand.FinalCommandText!, pStatement.Name!, batchCommand.PositionalParameters, async, + cancellationToken).ConfigureAwait(false); + await connector.WriteDescribe(StatementOrPortal.Statement, pStatement.Name!, async, cancellationToken).ConfigureAwait(false); } - await connector.WriteSync(async, cancellationToken); - await connector.Flush(async, cancellationToken); + await connector.WriteSync(async, cancellationToken).ConfigureAwait(false); + await connector.Flush(async, cancellationToken).ConfigureAwait(false); } [MethodImpl(MethodImplOptions.AggressiveInlining)] - void ForceAsyncIfNecessary(ref bool async, int numberOfStatementInBatch) + bool ShouldSchedule(ref bool async, int indexOfStatementInBatch) { - if (!async && numberOfStatementInBatch > 0) - { - // We're synchronously sending the non-first statement in a batch - switch to async writing. - // See long comment in Execute() above. - - // TODO: we can simply do all batch writing asynchronously, instead of starting with the 2nd statement. - // For now, writing the first statement synchronously gives us a better chance of handle and bubbling up errors correctly - // (see sendTask.IsFaulted in Execute()). Once #1323 is done, that shouldn't be needed any more and entire batches should - // be written asynchronously. - async = true; - SynchronizationContext.SetSynchronizationContext(SingleThreadSynchronizationContext); - } + if (indexOfStatementInBatch <= 0) + return false; + + // We're synchronously sending the non-first statement in a batch - switch to async writing. + // See long comment in Execute() above. + + // TODO: we can simply do all batch writing asynchronously, instead of starting with the 2nd statement. + // For now, writing the first statement synchronously gives us a better chance of handling and bubbling up errors correctly + // (see sendTask.IsFaulted in Execute()). Once #1323 is done, that shouldn't be needed any more and entire batches should + // be written asynchronously. + async = true; + return TaskScheduler.Current != ConstrainedConcurrencyScheduler; } async Task SendClose(NpgsqlConnector connector, bool async, CancellationToken cancellationToken = default) @@ -1138,17 +1149,19 @@ async Task SendClose(NpgsqlConnector connector, bool async, CancellationToken ca BeginSend(connector); var i = 0; + var syncCaller = !async; foreach (var batchCommand in InternalBatchCommands.Where(s => s.IsPrepared)) { - ForceAsyncIfNecessary(ref async, i); + if (syncCaller && ShouldSchedule(ref async, i)) + await new TaskSchedulerAwaitable(ConstrainedConcurrencyScheduler); - await connector.WriteClose(StatementOrPortal.Statement, batchCommand.StatementName, async, cancellationToken); + await connector.WriteClose(StatementOrPortal.Statement, batchCommand.StatementName, async, cancellationToken).ConfigureAwait(false); batchCommand.PreparedStatement!.State = PreparedState.BeingUnprepared; i++; } - await connector.WriteSync(async, cancellationToken); - await connector.Flush(async, cancellationToken); + await connector.WriteSync(async, cancellationToken).ConfigureAwait(false); + await connector.Flush(async, cancellationToken).ConfigureAwait(false); } #endregion @@ -1177,17 +1190,17 @@ public override Task ExecuteNonQueryAsync(CancellationToken cancellationTok [MethodImpl(MethodImplOptions.AggressiveInlining)] async Task ExecuteNonQuery(bool async, CancellationToken cancellationToken) { - var reader = await ExecuteReader(CommandBehavior.Default, async, cancellationToken); + var reader = await ExecuteReader(CommandBehavior.Default, async, cancellationToken).ConfigureAwait(false); try { - while (async ? await reader.NextResultAsync(cancellationToken) : reader.NextResult()) ; + while (async ? await reader.NextResultAsync(cancellationToken).ConfigureAwait(false) : reader.NextResult()) ; return reader.RecordsAffected; } finally { if (async) - await reader.DisposeAsync(); + await reader.DisposeAsync().ConfigureAwait(false); else reader.Dispose(); } @@ -1226,19 +1239,19 @@ async Task ExecuteNonQuery(bool async, CancellationToken cancellationToken) if (IsWrappedByBatch || !Parameters.HasOutputParameters) behavior |= CommandBehavior.SequentialAccess; - var reader = await ExecuteReader(behavior, async, cancellationToken); + var reader = await ExecuteReader(behavior, async, cancellationToken).ConfigureAwait(false); try { - var read = async ? await reader.ReadAsync(cancellationToken) : reader.Read(); + var read = async ? await reader.ReadAsync(cancellationToken).ConfigureAwait(false) : reader.Read(); var value = read && reader.FieldCount != 0 ? reader.GetValue(0) : null; // We read the whole result set to trigger any errors - while (async ? await reader.NextResultAsync(cancellationToken) : reader.NextResult()) ; + while (async ? await reader.NextResultAsync(cancellationToken).ConfigureAwait(false) : reader.NextResult()) ; return value; } finally { if (async) - await reader.DisposeAsync(); + await reader.DisposeAsync().ConfigureAwait(false); else reader.Dispose(); } @@ -1264,7 +1277,7 @@ protected override DbDataReader ExecuteDbDataReader(CommandBehavior behavior) /// /// A task representing the asynchronous operation. protected override async Task ExecuteDbDataReaderAsync(CommandBehavior behavior, CancellationToken cancellationToken) - => await ExecuteReaderAsync(behavior, cancellationToken); + => await ExecuteReaderAsync(behavior, cancellationToken).ConfigureAwait(false); /// /// Executes the against the @@ -1470,7 +1483,7 @@ internal virtual async ValueTask ExecuteReader(CommandBehavior reader.Init(this, behavior, InternalBatchCommands, startTimestamp, sendTask); connector.CurrentReader = reader; if (async) - await reader.NextResultAsync(cancellationToken); + await reader.NextResultAsync(cancellationToken).ConfigureAwait(false); else reader.NextResult(); @@ -1515,14 +1528,14 @@ internal virtual async ValueTask ExecuteReader(CommandBehavior ExecutionCompletion.Reset(); try { - await dataSource.MultiplexCommandWriter.WriteAsync(this, cancellationToken); + await dataSource.MultiplexCommandWriter.WriteAsync(this, cancellationToken).ConfigureAwait(false); } catch (ChannelClosedException ex) { Debug.Assert(ex.InnerException is not null); throw ex.InnerException; } - connector = await new ValueTask(ExecutionCompletion, ExecutionCompletion.Version); + connector = await new ValueTask(ExecutionCompletion, ExecutionCompletion.Version).ConfigureAwait(false); // TODO: Overload of StartBindingScope? conn.Connector = connector; connector.Connection = conn; @@ -1531,7 +1544,7 @@ internal virtual async ValueTask ExecuteReader(CommandBehavior var reader = connector.DataReader; reader.Init(this, behavior, InternalBatchCommands); connector.CurrentReader = reader; - await reader.NextResultAsync(cancellationToken); + await reader.NextResultAsync(cancellationToken).ConfigureAwait(false); return reader; } @@ -1540,7 +1553,7 @@ internal virtual async ValueTask ExecuteReader(CommandBehavior { var reader = connector?.CurrentReader; if (e is not NpgsqlOperationInProgressException && reader is not null) - await reader.Cleanup(async); + await reader.Cleanup(async).ConfigureAwait(false); TraceSetException(e); diff --git a/src/Npgsql/PoolingDataSource.cs b/src/Npgsql/PoolingDataSource.cs index 64e32b3fc0..52f8adb2f4 100644 --- a/src/Npgsql/PoolingDataSource.cs +++ b/src/Npgsql/PoolingDataSource.cs @@ -60,7 +60,8 @@ class PoolingDataSource : NpgsqlDataSource volatile int _isClearing; - static readonly SingleThreadSynchronizationContext SingleThreadSynchronizationContext = new("NpgsqlRemainingAsyncSendWorker"); + static readonly ConcurrentExclusiveSchedulerPair ConstrainedConcurrencyScheduler = + new(TaskScheduler.Default, Math.Max(1, Environment.ProcessorCount / 2)); #endregion @@ -148,26 +149,13 @@ async ValueTask RentAsync( { try { - if (async) - { - connector = await _idleConnectorReader.ReadAsync(finalToken); - if (CheckIdleConnector(connector)) - return connector; - } - else - { - // Channels don't have a sync API. To avoid sync-over-async issues, we use a special single- - // thread synchronization context which ensures that callbacks are executed on a dedicated - // thread. - // Note that AsTask isn't safe here for getting the result, since it still causes some continuation code - // to get executed on the TP (which can cause deadlocks). - using (SingleThreadSynchronizationContext.Enter()) - using (var mre = new ManualResetEventSlim()) - { - _idleConnectorReader.WaitToReadAsync(finalToken).GetAwaiter().OnCompleted(() => mre.Set()); - mre.Wait(finalToken); - } - } + var task = _idleConnectorReader.ReadAsync(finalToken); + if (!async && !task.IsCompleted) + await new TaskSchedulerAwaitable(ConstrainedConcurrencyScheduler.ConcurrentScheduler); + + connector = await task; + if (CheckIdleConnector(connector)) + return connector; } catch (OperationCanceledException) { diff --git a/src/Npgsql/Util/TaskSchedulerAwaitable.cs b/src/Npgsql/Util/TaskSchedulerAwaitable.cs new file mode 100644 index 0000000000..be16d8fa55 --- /dev/null +++ b/src/Npgsql/Util/TaskSchedulerAwaitable.cs @@ -0,0 +1,38 @@ +using System; +using System.Diagnostics; +using System.Runtime.CompilerServices; +using System.Threading; +using System.Threading.Tasks; + +namespace Npgsql.Util; + +readonly struct TaskSchedulerAwaitable : ICriticalNotifyCompletion +{ + readonly TaskScheduler _scheduler; + public TaskSchedulerAwaitable(TaskScheduler scheduler) => _scheduler = scheduler; + + public void GetResult() {} + public bool IsCompleted => false; + + public void OnCompleted(Action continuation) + { + var task = Task.Factory.StartNew(continuation, CancellationToken.None, + TaskCreationOptions.DenyChildAttach, + scheduler: _scheduler); + + // Exceptions should never happen as the continuation should be the async statemachine. + // It normally does its own error handling through the returned task unless it's an async void returning method. + // In which case we should absolutely let it bubble up to TaskScheduler.UnobservedTaskException. + OnFaulted(task); + + [Conditional("DEBUG")] + static void OnFaulted(Task task) + { + task.ContinueWith(t => Debug.Fail("Task scheduler task threw an unobserved exception"), TaskContinuationOptions.OnlyOnFaulted); + } + } + + public void UnsafeOnCompleted(Action continuation) => OnCompleted(continuation); + + public TaskSchedulerAwaitable GetAwaiter() => this; +} diff --git a/test/Npgsql.Tests/BugTests.cs b/test/Npgsql.Tests/BugTests.cs index 66cd34e65c..d702f8d0b2 100644 --- a/test/Npgsql.Tests/BugTests.cs +++ b/test/Npgsql.Tests/BugTests.cs @@ -1284,9 +1284,9 @@ public async Task Bug3649() [Test] [IssueLink("https://github.com/npgsql/npgsql/issues/3839")] - public async Task SingleThreadedSynchronizationContext_deadlock() + public async Task UIThreadSynchronizationContext_deadlock() { - var syncContext = new SingleThreadSynchronizationContext(nameof(SingleThreadedSynchronizationContext_deadlock)); + var syncContext = new SingleThreadSynchronizationContext(nameof(UIThreadSynchronizationContext_deadlock)); using (var _ = syncContext.Enter()) { // We have to Yield, so the current thread is changed to the one used by SingleThreadSynchronizationContext diff --git a/src/Npgsql/SingleThreadSynchronizationContext.cs b/test/Npgsql.Tests/Support/SingleThreadSynchronizationContext.cs similarity index 97% rename from src/Npgsql/SingleThreadSynchronizationContext.cs rename to test/Npgsql.Tests/Support/SingleThreadSynchronizationContext.cs index 99318be975..a7fedad3d6 100644 --- a/src/Npgsql/SingleThreadSynchronizationContext.cs +++ b/test/Npgsql.Tests/Support/SingleThreadSynchronizationContext.cs @@ -1,10 +1,9 @@ -using System; +using System; using System.Collections.Concurrent; using System.Diagnostics; using System.Threading; -using Microsoft.Extensions.Logging; -namespace Npgsql; +namespace Npgsql.Tests.Support; sealed class SingleThreadSynchronizationContext : SynchronizationContext, IDisposable { @@ -118,4 +117,4 @@ internal Disposable(SynchronizationContext synchronizationContext) public void Dispose() => SetSynchronizationContext(_synchronizationContext); } -} \ No newline at end of file +} From 39539e5a27156ce1428fbd9d62fd2cab9556dd05 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Fri, 25 Aug 2023 18:10:14 +0300 Subject: [PATCH 184/761] Remove ef 5-6 compability workarounds (#5215) --- src/Npgsql/NpgsqlCommand.cs | 14 +----------- src/Npgsql/NpgsqlDataReader.cs | 12 +--------- src/Npgsql/NpgsqlFactory.cs | 41 ++-------------------------------- 3 files changed, 4 insertions(+), 63 deletions(-) diff --git a/src/Npgsql/NpgsqlCommand.cs b/src/Npgsql/NpgsqlCommand.cs index 9480201a06..f5723546b2 100644 --- a/src/Npgsql/NpgsqlCommand.cs +++ b/src/Npgsql/NpgsqlCommand.cs @@ -347,18 +347,6 @@ public bool[]? UnknownResultTypeList #endregion - #region Result Types Management - - /// - /// Marks result types to be used when using GetValue on a data reader, on a column-by-column basis. - /// Used for Entity Framework 5-6 compability. - /// Only primitive numerical types and DateTimeOffset are supported. - /// Set the whole array or just a value to null to use default type. - /// - internal Type[]? ObjectResultTypes { get; set; } - - #endregion - #region State management volatile int _state; @@ -1814,7 +1802,7 @@ public virtual NpgsqlCommand Clone() { var clone = new NpgsqlCommand(CommandText, InternalConnection, Transaction) { - CommandTimeout = CommandTimeout, CommandType = CommandType, DesignTimeVisible = DesignTimeVisible, _allResultTypesAreUnknown = _allResultTypesAreUnknown, _unknownResultTypeList = _unknownResultTypeList, ObjectResultTypes = ObjectResultTypes + CommandTimeout = CommandTimeout, CommandType = CommandType, DesignTimeVisible = DesignTimeVisible, _allResultTypesAreUnknown = _allResultTypesAreUnknown, _unknownResultTypeList = _unknownResultTypeList }; _parameters.CloneTo(clone._parameters); return clone; diff --git a/src/Npgsql/NpgsqlDataReader.cs b/src/Npgsql/NpgsqlDataReader.cs index f59d8e9de1..04ad1aff28 100644 --- a/src/Npgsql/NpgsqlDataReader.cs +++ b/src/Npgsql/NpgsqlDataReader.cs @@ -1822,15 +1822,6 @@ public override object GetValue(int ordinal) PosInColumn += ColumnLen; } - // Used for Entity Framework <= 6 compability - var objectResultType = Command.ObjectResultTypes?[ordinal]; - if (objectResultType != null) - { - result = objectResultType == typeof(DateTimeOffset) - ? new DateTimeOffset((DateTime)result) - : Convert.ChangeType(result, objectResultType)!; - } - return result; } @@ -1941,8 +1932,7 @@ public override int GetOrdinal(string name) /// The zero-based column ordinal. /// The data type of the specified column. public override Type GetFieldType(int ordinal) - => Command.ObjectResultTypes?[ordinal] - ?? GetField(ordinal).FieldType; + => GetField(ordinal).FieldType; /// /// Returns an that can be used to iterate through the rows in the data reader. diff --git a/src/Npgsql/NpgsqlFactory.cs b/src/Npgsql/NpgsqlFactory.cs index 5efe10e94e..6e45cde7b0 100644 --- a/src/Npgsql/NpgsqlFactory.cs +++ b/src/Npgsql/NpgsqlFactory.cs @@ -85,44 +85,7 @@ public override DbDataSource CreateDataSource(string connectionString) /// /// An object that specifies the type of service object to get. /// A service object of type serviceType, or null if there is no service object of type serviceType. - [RequiresUnreferencedCode("Legacy EF5 method, not trimming-safe.")] - public object? GetService(Type serviceType) - { - if (serviceType == null) - throw new ArgumentNullException(nameof(serviceType)); - - // In legacy Entity Framework, this is the entry point for obtaining Npgsql's - // implementation of DbProviderServices. We use reflection for all types to - // avoid any dependencies on EF stuff in this project. EF6 (and of course EF Core) do not use this method. - - if (serviceType.FullName != "System.Data.Common.DbProviderServices") - return null; - - // User has requested a legacy EF DbProviderServices implementation. Check our cache first. - if (_legacyEntityFrameworkServices != null) - return _legacyEntityFrameworkServices; - - // First time, attempt to find the EntityFramework5.Npgsql assembly and load the type via reflection - var assemblyName = typeof(NpgsqlFactory).GetTypeInfo().Assembly.GetName(); - assemblyName.Name = "EntityFramework5.Npgsql"; - Assembly npgsqlEfAssembly; - try { - npgsqlEfAssembly = Assembly.Load(new AssemblyName(assemblyName.FullName)); - } catch { - return null; - } - - Type? npgsqlServicesType; - if ((npgsqlServicesType = npgsqlEfAssembly.GetType("Npgsql.NpgsqlServices")) == null || - npgsqlServicesType.GetProperty("Instance") == null) - throw new Exception("EntityFramework5.Npgsql assembly does not seem to contain the correct type!"); - - return _legacyEntityFrameworkServices = npgsqlServicesType - .GetProperty("Instance", BindingFlags.Public | BindingFlags.Static)! - .GetMethod!.Invoke(null, new object[0]); - } - - static object? _legacyEntityFrameworkServices; + public object? GetService(Type serviceType) => null; #endregion -} \ No newline at end of file +} From 9ce3ba0a35255cc1a419f0096d51ba0eb48a8f60 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Fri, 25 Aug 2023 18:10:29 +0300 Subject: [PATCH 185/761] Fix potential lost messages/protocol desync after resetting oversize buffer (#5219) Fixes #5218 --- src/Npgsql/Internal/NpgsqlConnector.cs | 14 +++++++ test/Npgsql.Tests/CommandTests.cs | 56 ++++++++++++++++++++++++++ 2 files changed, 70 insertions(+) diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index e9cc5b76ec..e4cab9073b 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -2322,6 +2322,20 @@ void ResetReadBuffer() { if (_origReadBuffer != null) { + Debug.Assert(_origReadBuffer.ReadBytesLeft == 0); + Debug.Assert(_origReadBuffer.ReadPosition == 0); + if (ReadBuffer.ReadBytesLeft > 0) + { + // There is still something in the buffer which we haven't read yet + // In most cases it's ParameterStatus which can be sent asynchronously + // If in some extreme case we have too much data left in the buffer to store in the original buffer + // we just leave the oversize buffer as is and will try again on next reset + if (ReadBuffer.ReadBytesLeft > _origReadBuffer.Size) + return; + + ReadBuffer.CopyTo(_origReadBuffer); + } + ReadBuffer.Dispose(); ReadBuffer = _origReadBuffer; _origReadBuffer = null; diff --git a/test/Npgsql.Tests/CommandTests.cs b/test/Npgsql.Tests/CommandTests.cs index 554080f16a..68c559509f 100644 --- a/test/Npgsql.Tests/CommandTests.cs +++ b/test/Npgsql.Tests/CommandTests.cs @@ -1512,6 +1512,62 @@ SELECT generate_series(1, 1000000) AS "i" Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo(42)); } + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/5218")] + [Description("Make sure we do not lose unread messages after resetting oversize buffer")] + public async Task Oversize_buffer_lost_messages() + { + if (IsMultiplexing) + return; + + var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + { + NoResetOnClose = true + }; + await using var mock = PgPostmasterMock.Start(csb.ConnectionString); + await using var dataSource = CreateDataSource(mock.ConnectionString); + await using var connection = await dataSource.OpenConnectionAsync(); + var connector = connection.Connector!; + + var server = await mock.WaitForServerConnection(); + await server + .WriteParseComplete() + .WriteBindComplete() + .WriteRowDescription(new FieldDescription(PostgresTypeOIDs.Text)) + .WriteDataRowWithFlush(Encoding.ASCII.GetBytes(new string('a', connection.Settings.ReadBufferSize * 2))); + // Just to make sure we have enough space + await server.FlushAsync(); + await server + .WriteDataRow(Encoding.ASCII.GetBytes("abc")) + .WriteCommandComplete() + .WriteReadyForQuery() + .WriteParameterStatus("SomeKey", "SomeValue") + .FlushAsync(); + + await using var cmd = connection.CreateCommand(); + cmd.CommandText = "SELECT 1"; + await using (await cmd.ExecuteReaderAsync()) { } + + await connection.CloseAsync(); + await connection.OpenAsync(); + + Assert.AreSame(connector, connection.Connector); + // We'll get new value after the next query reads ParameterStatus from the buffer + Assert.That(connection.PostgresParameters, Does.Not.ContainKey("SomeKey").WithValue("SomeValue")); + + await server + .WriteParseComplete() + .WriteBindComplete() + .WriteRowDescription(new FieldDescription(PostgresTypeOIDs.Text)) + .WriteDataRow(Encoding.ASCII.GetBytes("abc")) + .WriteCommandComplete() + .WriteReadyForQuery() + .FlushAsync(); + + await cmd.ExecuteNonQueryAsync(); + + Assert.That(connection.PostgresParameters, Contains.Key("SomeKey").WithValue("SomeValue")); + } + #region Logging [Test] From 820cfcddf95dd7736affa53fafd4f4e04b473e04 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Fri, 25 Aug 2023 18:11:21 +0300 Subject: [PATCH 186/761] Fix seek for ColumnStream (#5224) Fixes #5223 --- .../Internal/NpgsqlReadBuffer.Stream.cs | 19 ++++---- test/Npgsql.Tests/ReaderTests.cs | 44 +++++++++++++++++++ 2 files changed, 55 insertions(+), 8 deletions(-) diff --git a/src/Npgsql/Internal/NpgsqlReadBuffer.Stream.cs b/src/Npgsql/Internal/NpgsqlReadBuffer.Stream.cs index 8734db3ffb..cd38bcad0f 100644 --- a/src/Npgsql/Internal/NpgsqlReadBuffer.Stream.cs +++ b/src/Npgsql/Internal/NpgsqlReadBuffer.Stream.cs @@ -65,7 +65,7 @@ public override long Position { if (value < 0) throw new ArgumentOutOfRangeException(nameof(value), "Non - negative number required."); - Seek(_start + value, SeekOrigin.Begin); + Seek(value, SeekOrigin.Begin); } } @@ -87,8 +87,9 @@ public override long Seek(long offset, SeekOrigin origin) var tempPosition = unchecked(_start + (int)offset); if (offset < 0 || tempPosition < _start) throw new IOException(seekBeforeBegin); - _buf.ReadPosition = _start; - return tempPosition; + _buf.ReadPosition = tempPosition; + _read = (int)offset; + return _read; } case SeekOrigin.Current: { @@ -96,15 +97,17 @@ public override long Seek(long offset, SeekOrigin origin) if (unchecked(_buf.ReadPosition + offset) < _start || tempPosition < _start) throw new IOException(seekBeforeBegin); _buf.ReadPosition = tempPosition; - return tempPosition; + _read += (int)offset; + return _read; } case SeekOrigin.End: { - var tempPosition = unchecked(_len + (int)offset); - if (unchecked(_len + offset) < _start || tempPosition < _start) + var tempPosition = unchecked(_start + _len + (int)offset); + if (unchecked(_start + _len + offset) < _start || tempPosition < _start) throw new IOException(seekBeforeBegin); _buf.ReadPosition = tempPosition; - return tempPosition; + _read = _len + (int)offset; + return _read; } default: throw new ArgumentOutOfRangeException(nameof(origin), "Invalid seek origin."); @@ -238,4 +241,4 @@ static void ValidateArguments(byte[] buffer, int offset, int count) if (buffer.Length - offset < count) throw new ArgumentException("Offset and length were out of bounds for the array or count is greater than the number of elements from index to the end of the source collection."); } -} \ No newline at end of file +} diff --git a/test/Npgsql.Tests/ReaderTests.cs b/test/Npgsql.Tests/ReaderTests.cs index 1abd46909e..8f47f53aab 100644 --- a/test/Npgsql.Tests/ReaderTests.cs +++ b/test/Npgsql.Tests/ReaderTests.cs @@ -1468,6 +1468,50 @@ public async Task GetStream_in_middle_of_column_throws([Values] bool async) Assert.That(() => reader.GetStream(0), Throws.Exception.TypeOf()); } + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/5223")] + public async Task GetStream_seek() + { + // Sequential doesn't allow to seek + if (IsSequential) + return; + + await using var conn = await OpenConnectionAsync(); + await using var cmd = conn.CreateCommand(); + cmd.CommandText = "SELECT 'abcdefgh'"; + await using var reader = await cmd.ExecuteReaderAsync(); + await reader.ReadAsync(); + + var buffer = new byte[4]; + + await using var stream = reader.GetStream(0); + Assert.IsTrue(stream.CanSeek); + + var seekPosition = stream.Seek(-1, SeekOrigin.End); + Assert.That(seekPosition, Is.EqualTo(stream.Length - 1)); + var read = stream.Read(buffer); + Assert.That(read, Is.EqualTo(1)); + Assert.That(Encoding.ASCII.GetString(buffer, 0, 1), Is.EqualTo("h")); + read = stream.Read(buffer); + Assert.That(read, Is.EqualTo(0)); + + seekPosition = stream.Seek(2, SeekOrigin.Begin); + Assert.That(seekPosition, Is.EqualTo(2)); + read = stream.Read(buffer); + Assert.That(read, Is.EqualTo(buffer.Length)); + Assert.That(Encoding.ASCII.GetString(buffer), Is.EqualTo("cdef")); + + seekPosition = stream.Seek(-3, SeekOrigin.Current); + Assert.That(seekPosition, Is.EqualTo(3)); + read = stream.Read(buffer); + Assert.That(read, Is.EqualTo(buffer.Length)); + Assert.That(Encoding.ASCII.GetString(buffer), Is.EqualTo("defg")); + + stream.Position = 1; + read = stream.Read(buffer); + Assert.That(read, Is.EqualTo(buffer.Length)); + Assert.That(Encoding.ASCII.GetString(buffer), Is.EqualTo("bcde")); + } + #endregion GetBytes / GetStream #region GetChars / GetTextReader From edb4d5a3ed513139d2603ea538a7369f55290f4e Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Fri, 25 Aug 2023 18:12:08 +0300 Subject: [PATCH 187/761] Reduce the size of NpgsqlDataReader.NextResult async state machine (#5217) --- src/Npgsql/NpgsqlDataReader.cs | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/Npgsql/NpgsqlDataReader.cs b/src/Npgsql/NpgsqlDataReader.cs index 04ad1aff28..3e6385eb5f 100644 --- a/src/Npgsql/NpgsqlDataReader.cs +++ b/src/Npgsql/NpgsqlDataReader.cs @@ -436,15 +436,15 @@ async Task NextResult(bool async, bool isConsuming = false, CancellationTo } else // Non-prepared/preparing flow { - var pStatement = statement.PreparedStatement; - if (pStatement != null) + preparedStatement = statement.PreparedStatement; + if (preparedStatement != null) { - Debug.Assert(!pStatement.IsPrepared); - if (pStatement.StatementBeingReplaced != null) + Debug.Assert(!preparedStatement.IsPrepared); + if (preparedStatement.StatementBeingReplaced != null) { Expect(await Connector.ReadMessage(async), Connector); - pStatement.StatementBeingReplaced.CompleteUnprepare(); - pStatement.StatementBeingReplaced = null; + preparedStatement.StatementBeingReplaced.CompleteUnprepare(); + preparedStatement.StatementBeingReplaced = null; } } @@ -452,7 +452,7 @@ async Task NextResult(bool async, bool isConsuming = false, CancellationTo if (statement.IsPreparing) { - pStatement!.State = PreparedState.Prepared; + preparedStatement!.State = PreparedState.Prepared; Connector.PreparedStatementManager.NumPrepared++; statement.IsPreparing = false; } @@ -466,7 +466,7 @@ async Task NextResult(bool async, bool isConsuming = false, CancellationTo // RowDescription messages are cached on the connector, but if we're auto-preparing, we need to // clone our own copy which will last beyond the lifetime of this invocation. - BackendMessageCode.RowDescription => pStatement == null + BackendMessageCode.RowDescription => preparedStatement == null ? (RowDescriptionMessage)msg : ((RowDescriptionMessage)msg).Clone(), From d8c70e561740b701909d6aa08d069452e971f864 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Fri, 25 Aug 2023 18:12:30 +0300 Subject: [PATCH 188/761] Fix possible nre while writing to RawCopyStream after break (#5213) Fixes #5209 --- src/Npgsql/NpgsqlRawCopyStream.cs | 50 ++++++++--------------- test/Npgsql.Tests/CopyTests.cs | 24 +++++++++++ test/Npgsql.Tests/Support/PgServerMock.cs | 6 +-- 3 files changed, 44 insertions(+), 36 deletions(-) diff --git a/src/Npgsql/NpgsqlRawCopyStream.cs b/src/Npgsql/NpgsqlRawCopyStream.cs index fc503d0746..77153f830c 100644 --- a/src/Npgsql/NpgsqlRawCopyStream.cs +++ b/src/Npgsql/NpgsqlRawCopyStream.cs @@ -143,25 +143,17 @@ public override void Write(ReadOnlySpan buffer) return; } - try - { - // Value is too big, flush. - Flush(); - - if (buffer.Length <= _writeBuf.WriteSpaceLeft) - { - _writeBuf.WriteBytes(buffer); - return; - } + // Value is too big, flush. + Flush(); - // Value is too big even after a flush - bypass the buffer and write directly. - _writeBuf.DirectWrite(buffer); - } - catch (Exception e) + if (buffer.Length <= _writeBuf.WriteSpaceLeft) { - _connector.Break(e); - throw; + _writeBuf.WriteBytes(buffer); + return; } + + // Value is too big even after a flush - bypass the buffer and write directly. + _writeBuf.DirectWrite(buffer); } #if NETSTANDARD2_0 @@ -188,25 +180,17 @@ async ValueTask WriteAsyncInternal(ReadOnlyMemory buffer, CancellationToke return; } - try - { - // Value is too big, flush. - await FlushAsync(true, cancellationToken); - - if (buffer.Length <= _writeBuf.WriteSpaceLeft) - { - _writeBuf.WriteBytes(buffer.Span); - return; - } + // Value is too big, flush. + await FlushAsync(true, cancellationToken); - // Value is too big even after a flush - bypass the buffer and write directly. - await _writeBuf.DirectWrite(buffer, true, cancellationToken); - } - catch (Exception e) + if (buffer.Length <= _writeBuf.WriteSpaceLeft) { - _connector.Break(e); - throw; + _writeBuf.WriteBytes(buffer.Span); + return; } + + // Value is too big even after a flush - bypass the buffer and write directly. + await _writeBuf.DirectWrite(buffer, true, cancellationToken); } } @@ -580,4 +564,4 @@ public ValueTask DisposeAsync() Dispose(); return default; } -} \ No newline at end of file +} diff --git a/test/Npgsql.Tests/CopyTests.cs b/test/Npgsql.Tests/CopyTests.cs index 04c372f90c..41fabe6ddc 100644 --- a/test/Npgsql.Tests/CopyTests.cs +++ b/test/Npgsql.Tests/CopyTests.cs @@ -8,6 +8,7 @@ using System.Threading; using System.Threading.Tasks; using Npgsql.Internal; +using Npgsql.Tests.Support; using NpgsqlTypes; using NUnit.Framework; using static Npgsql.Tests.TestUtil; @@ -1127,6 +1128,29 @@ public async Task Copy_to_is_not_supported_in_regular_command_execution() Assert.That(() => conn.ExecuteNonQuery($@"COPY {table} (foo) TO stdin"), Throws.Exception.TypeOf()); } + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/5209")] + [Platform(Exclude = "MacOsX", Reason = "Write might not throw an exception")] + public async Task RawBinaryCopy_write_nre([Values] bool async) + { + await using var postmasterMock = PgPostmasterMock.Start(ConnectionString); + await using var dataSource = CreateDataSource(postmasterMock.ConnectionString); + await using var conn = await dataSource.OpenConnectionAsync(); + + var server = await postmasterMock.WaitForServerConnection(); + await server + .WriteCopyInResponse(isBinary: true) + .FlushAsync(); + + await using var stream = await conn.BeginRawBinaryCopyAsync("COPY SomeTable (field_text, field_int4) FROM STDIN"); + server.Close(); + var value = Encoding.UTF8.GetBytes(new string('a', conn.Settings.WriteBufferSize * 2)); + if (async) + Assert.ThrowsAsync(async () => await stream.WriteAsync(value)); + else + Assert.Throws(() => stream.Write(value)); + Assert.That(conn.FullState, Is.EqualTo(ConnectionState.Broken)); + } + #endregion #region Utils diff --git a/test/Npgsql.Tests/Support/PgServerMock.cs b/test/Npgsql.Tests/Support/PgServerMock.cs index 639124be5c..6a83cc0248 100644 --- a/test/Npgsql.Tests/Support/PgServerMock.cs +++ b/test/Npgsql.Tests/Support/PgServerMock.cs @@ -328,12 +328,12 @@ internal PgServerMock WriteBackendKeyData(int processId, int secret) internal PgServerMock WriteCancellationResponse() => WriteErrorResponse(PostgresErrorCodes.QueryCanceled, "Cancellation", "Query cancelled"); - internal PgServerMock WriteCopyInResponse() + internal PgServerMock WriteCopyInResponse(bool isBinary = false) { CheckDisposed(); _writeBuffer.WriteByte((byte)BackendMessageCode.CopyInResponse); _writeBuffer.WriteInt32(5); - _writeBuffer.WriteByte(0); + _writeBuffer.WriteByte(isBinary ? (byte)1 : (byte)0); _writeBuffer.WriteInt16(1); _writeBuffer.WriteInt16(0); return this; @@ -381,4 +381,4 @@ public void Dispose() _disposed = true; } -} \ No newline at end of file +} From 8c0e93b32fbffbb7802cf31a925195c2128629a5 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Fri, 25 Aug 2023 18:35:44 +0300 Subject: [PATCH 189/761] Fix possible protocol desync with SchemaOnly and auto prepare (#5221) Fixes #5220 --- src/Npgsql/NpgsqlDataReader.cs | 15 ++++++++------- test/Npgsql.Tests/AutoPrepareTests.cs | 5 ++++- 2 files changed, 12 insertions(+), 8 deletions(-) diff --git a/src/Npgsql/NpgsqlDataReader.cs b/src/Npgsql/NpgsqlDataReader.cs index 3e6385eb5f..db30da551c 100644 --- a/src/Npgsql/NpgsqlDataReader.cs +++ b/src/Npgsql/NpgsqlDataReader.cs @@ -730,6 +730,12 @@ async Task NextResultSchemaOnly(bool async, bool isConsuming = false, Canc default: throw Connector.UnexpectedMessageReceived(msg.Code); } + + if (_statements.Skip(StatementIndex + 1).All(x => x.IsPrepared)) + { + // There are no more queries, we're done. Read to the RFQ. + Expect(await Connector.ReadMessage(async), Connector); + } } // Found a resultset @@ -737,13 +743,8 @@ async Task NextResultSchemaOnly(bool async, bool isConsuming = false, Canc return true; } - // There are no more queries, we're done. Read to the RFQ. - if (!_statements.All(s => s.IsPrepared)) - { - Expect(await Connector.ReadMessage(async), Connector); - RowDescription = null; - State = ReaderState.Consumed; - } + RowDescription = null; + State = ReaderState.Consumed; return false; } diff --git a/test/Npgsql.Tests/AutoPrepareTests.cs b/test/Npgsql.Tests/AutoPrepareTests.cs index 2ce7171fa0..14d6997230 100644 --- a/test/Npgsql.Tests/AutoPrepareTests.cs +++ b/test/Npgsql.Tests/AutoPrepareTests.cs @@ -514,7 +514,7 @@ await conn.ExecuteNonQueryAsync( Assert.That(await conn.ExecuteScalarAsync("SELECT 3"), Is.EqualTo(3)); } - [Test, IssueLink("https://github.com/npgsql/npgsql/issues/4404")] + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/4404"), IssueLink("https://github.com/npgsql/npgsql/issues/5220")] public async Task SchemaOnly() { await using var dataSource = CreateDataSource(csb => @@ -529,6 +529,9 @@ public async Task SchemaOnly() { await using var reader = await cmd.ExecuteReaderAsync(CommandBehavior.SchemaOnly); } + + // Make sure there is no protocol desync due to #5220 + await cmd.ExecuteScalarAsync(); } [Test] From 01aaadff89484148fc67fec4d5e2cbbe720ba43a Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Fri, 25 Aug 2023 19:17:44 +0300 Subject: [PATCH 190/761] Stop reading whole result set with ExecuteScalar (#5216) --- src/Npgsql/NpgsqlCommand.cs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/Npgsql/NpgsqlCommand.cs b/src/Npgsql/NpgsqlCommand.cs index f5723546b2..1ea97b4f73 100644 --- a/src/Npgsql/NpgsqlCommand.cs +++ b/src/Npgsql/NpgsqlCommand.cs @@ -1231,10 +1231,7 @@ async Task ExecuteNonQuery(bool async, CancellationToken cancellationToken) try { var read = async ? await reader.ReadAsync(cancellationToken).ConfigureAwait(false) : reader.Read(); - var value = read && reader.FieldCount != 0 ? reader.GetValue(0) : null; - // We read the whole result set to trigger any errors - while (async ? await reader.NextResultAsync(cancellationToken).ConfigureAwait(false) : reader.NextResult()) ; - return value; + return read && reader.FieldCount != 0 ? reader.GetValue(0) : null; } finally { From 0ac21a30cb286931bac5503d15a720120bef4f9b Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Sat, 26 Aug 2023 11:11:27 +0300 Subject: [PATCH 191/761] Fix possible race condition with unprepare (#5228) Fixes #5227 --- src/Npgsql/NpgsqlCommand.cs | 17 +++-------------- 1 file changed, 3 insertions(+), 14 deletions(-) diff --git a/src/Npgsql/NpgsqlCommand.cs b/src/Npgsql/NpgsqlCommand.cs index 1ea97b4f73..26d822aaf4 100644 --- a/src/Npgsql/NpgsqlCommand.cs +++ b/src/Npgsql/NpgsqlCommand.cs @@ -777,9 +777,8 @@ async Task Unprepare(bool async, CancellationToken cancellationToken = default) using (connector.StartUserAction(cancellationToken)) { - var sendTask = SendClose(connector, async, cancellationToken); - if (sendTask.IsFaulted) - sendTask.GetAwaiter().GetResult(); + // Just wait for SendClose to complete since each statement takes no more than 20 bytes + await SendClose(connector, async, cancellationToken).ConfigureAwait(false); foreach (var batchCommand in InternalBatchCommands) { @@ -798,11 +797,6 @@ async Task Unprepare(bool async, CancellationToken cancellationToken = default) } Expect(await connector.ReadMessage(async).ConfigureAwait(false), connector); - - if (async) - await sendTask.ConfigureAwait(false); - else - sendTask.GetAwaiter().GetResult(); } } @@ -1136,16 +1130,11 @@ async Task SendClose(NpgsqlConnector connector, bool async, CancellationToken ca { BeginSend(connector); - var i = 0; - var syncCaller = !async; foreach (var batchCommand in InternalBatchCommands.Where(s => s.IsPrepared)) { - if (syncCaller && ShouldSchedule(ref async, i)) - await new TaskSchedulerAwaitable(ConstrainedConcurrencyScheduler); - + // No need to force async here since each statement takes no more than 20 bytes await connector.WriteClose(StatementOrPortal.Statement, batchCommand.StatementName, async, cancellationToken).ConfigureAwait(false); batchCommand.PreparedStatement!.State = PreparedState.BeingUnprepared; - i++; } await connector.WriteSync(async, cancellationToken).ConfigureAwait(false); From 255c5ba5bb8d7e2d1211d5ff90c25601585f7ffa Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Sun, 27 Aug 2023 13:37:37 +0300 Subject: [PATCH 192/761] Fix DeriveParametersForQuery not waiting for sendTask to complete (#5230) Fixes #5229 --- src/Npgsql/NpgsqlCommand.cs | 181 +++++++++++++++++++++--------------- 1 file changed, 105 insertions(+), 76 deletions(-) diff --git a/src/Npgsql/NpgsqlCommand.cs b/src/Npgsql/NpgsqlCommand.cs index 26d822aaf4..290c3d8d35 100644 --- a/src/Npgsql/NpgsqlCommand.cs +++ b/src/Npgsql/NpgsqlCommand.cs @@ -536,58 +536,74 @@ void DeriveParametersForQuery(NpgsqlConnector connector) if (sendTask.IsFaulted) sendTask.GetAwaiter().GetResult(); - foreach (var batchCommand in InternalBatchCommands) + try { - Expect( - connector.ReadMessage(async: false).GetAwaiter().GetResult(), connector); - var paramTypeOIDs = Expect( - connector.ReadMessage(async: false).GetAwaiter().GetResult(), connector).TypeOIDs; - - if (batchCommand.PositionalParameters.Count != paramTypeOIDs.Count) + foreach (var batchCommand in InternalBatchCommands) { - connector.SkipUntil(BackendMessageCode.ReadyForQuery); - Parameters.Clear(); - throw new NpgsqlException("There was a mismatch in the number of derived parameters between the Npgsql SQL parser and the PostgreSQL parser. Please report this as bug to the Npgsql developers (https://github.com/npgsql/npgsql/issues)."); - } + Expect( + connector.ReadMessage(async: false).GetAwaiter().GetResult(), connector); + var paramTypeOIDs = Expect( + connector.ReadMessage(async: false).GetAwaiter().GetResult(), connector).TypeOIDs; - for (var i = 0; i < paramTypeOIDs.Count; i++) - { - try + if (batchCommand.PositionalParameters.Count != paramTypeOIDs.Count) { - var param = batchCommand.PositionalParameters[i]; - var paramOid = paramTypeOIDs[i]; + connector.SkipUntil(BackendMessageCode.ReadyForQuery); + Parameters.Clear(); + throw new NpgsqlException( + "There was a mismatch in the number of derived parameters between the Npgsql SQL parser and the PostgreSQL parser. Please report this as bug to the Npgsql developers (https://github.com/npgsql/npgsql/issues)."); + } - var (npgsqlDbType, postgresType) = connector.TypeMapper.GetTypeInfoByOid(paramOid); + for (var i = 0; i < paramTypeOIDs.Count; i++) + { + try + { + var param = batchCommand.PositionalParameters[i]; + var paramOid = paramTypeOIDs[i]; - if (param.NpgsqlDbType != NpgsqlDbType.Unknown && param.NpgsqlDbType != npgsqlDbType) - throw new NpgsqlException("The backend parser inferred different types for parameters with the same name. Please try explicit casting within your SQL statement or batch or use different placeholder names."); + var (npgsqlDbType, postgresType) = connector.TypeMapper.GetTypeInfoByOid(paramOid); - param.DataTypeName = postgresType.DisplayName; - param.PostgresType = postgresType; - if (npgsqlDbType.HasValue) - param.NpgsqlDbType = npgsqlDbType.Value; + if (param.NpgsqlDbType != NpgsqlDbType.Unknown && param.NpgsqlDbType != npgsqlDbType) + throw new NpgsqlException( + "The backend parser inferred different types for parameters with the same name. Please try explicit casting within your SQL statement or batch or use different placeholder names."); + + param.DataTypeName = postgresType.DisplayName; + param.PostgresType = postgresType; + if (npgsqlDbType.HasValue) + param.NpgsqlDbType = npgsqlDbType.Value; + } + catch + { + connector.SkipUntil(BackendMessageCode.ReadyForQuery); + Parameters.Clear(); + throw; + } } - catch + + var msg = connector.ReadMessage(async: false).GetAwaiter().GetResult(); + switch (msg.Code) { - connector.SkipUntil(BackendMessageCode.ReadyForQuery); - Parameters.Clear(); - throw; + case BackendMessageCode.RowDescription: + case BackendMessageCode.NoData: + break; + default: + throw connector.UnexpectedMessageReceived(msg.Code); } } - var msg = connector.ReadMessage(async: false).GetAwaiter().GetResult(); - switch (msg.Code) + Expect(connector.ReadMessage(async: false).GetAwaiter().GetResult(), connector); + } + finally + { + try { - case BackendMessageCode.RowDescription: - case BackendMessageCode.NoData: - break; - default: - throw connector.UnexpectedMessageReceived(msg.Code); + // Make sure sendTask is complete so we don't race against asynchronous flush + sendTask.GetAwaiter().GetResult(); + } + catch + { + // ignored } } - - Expect(connector.ReadMessage(async: false).GetAwaiter().GetResult(), connector); - sendTask.GetAwaiter().GetResult(); } } @@ -672,53 +688,66 @@ static async Task PrepareLong(NpgsqlCommand command, bool async, NpgsqlConnector if (sendTask.IsFaulted) sendTask.GetAwaiter().GetResult(); - // Loop over statements, skipping those that are already prepared (because they were persisted) - var isFirst = true; - foreach (var batchCommand in command.InternalBatchCommands) + try { - if (!batchCommand.IsPreparing) - continue; + // Loop over statements, skipping those that are already prepared (because they were persisted) + var isFirst = true; + foreach (var batchCommand in command.InternalBatchCommands) + { + if (!batchCommand.IsPreparing) + continue; - var pStatement = batchCommand.PreparedStatement!; + var pStatement = batchCommand.PreparedStatement!; - if (pStatement.StatementBeingReplaced != null) - { - Expect(await connector.ReadMessage(async).ConfigureAwait(false), connector); - pStatement.StatementBeingReplaced.CompleteUnprepare(); - pStatement.StatementBeingReplaced = null; + if (pStatement.StatementBeingReplaced != null) + { + Expect(await connector.ReadMessage(async).ConfigureAwait(false), connector); + pStatement.StatementBeingReplaced.CompleteUnprepare(); + pStatement.StatementBeingReplaced = null; + } + + Expect(await connector.ReadMessage(async).ConfigureAwait(false), connector); + Expect(await connector.ReadMessage(async).ConfigureAwait(false), connector); + var msg = await connector.ReadMessage(async).ConfigureAwait(false); + switch (msg.Code) + { + case BackendMessageCode.RowDescription: + // Clone the RowDescription for use with the prepared statement (the one we have is reused + // by the connection) + var description = ((RowDescriptionMessage)msg).Clone(); + command.FixupRowDescription(description, isFirst); + batchCommand.Description = description; + break; + case BackendMessageCode.NoData: + batchCommand.Description = null; + break; + default: + throw connector.UnexpectedMessageReceived(msg.Code); + } + + pStatement.State = PreparedState.Prepared; + connector.PreparedStatementManager.NumPrepared++; + batchCommand.IsPreparing = false; + isFirst = false; } - Expect(await connector.ReadMessage(async).ConfigureAwait(false), connector); - Expect(await connector.ReadMessage(async).ConfigureAwait(false), connector); - var msg = await connector.ReadMessage(async).ConfigureAwait(false); - switch (msg.Code) + Expect(await connector.ReadMessage(async).ConfigureAwait(false), connector); + } + finally + { + try { - case BackendMessageCode.RowDescription: - // Clone the RowDescription for use with the prepared statement (the one we have is reused - // by the connection) - var description = ((RowDescriptionMessage)msg).Clone(); - command.FixupRowDescription(description, isFirst); - batchCommand.Description = description; - break; - case BackendMessageCode.NoData: - batchCommand.Description = null; - break; - default: - throw connector.UnexpectedMessageReceived(msg.Code); + // Make sure sendTask is complete so we don't race against asynchronous flush + if (async) + await sendTask.ConfigureAwait(false); + else + sendTask.GetAwaiter().GetResult(); + } + catch + { + // ignored } - - pStatement.State = PreparedState.Prepared; - connector.PreparedStatementManager.NumPrepared++; - batchCommand.IsPreparing = false; - isFirst = false; } - - Expect(await connector.ReadMessage(async).ConfigureAwait(false), connector); - - if (async) - await sendTask.ConfigureAwait(false); - else - sendTask.GetAwaiter().GetResult(); } LogMessages.CommandPreparedExplicitly(connector.CommandLogger, connector.Id); From 274a1d174b44640d727f57751921541f335d6ffb Mon Sep 17 00:00:00 2001 From: Brar Piening Date: Sun, 27 Aug 2023 17:26:40 +0200 Subject: [PATCH 193/761] Fix a few warnings for .NET Standard in the EncodingExtensions shim (#5232) --- src/Npgsql/Shims/EncodingExtensions.cs | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/src/Npgsql/Shims/EncodingExtensions.cs b/src/Npgsql/Shims/EncodingExtensions.cs index 792e225af7..ae5622ce9d 100644 --- a/src/Npgsql/Shims/EncodingExtensions.cs +++ b/src/Npgsql/Shims/EncodingExtensions.cs @@ -1,7 +1,10 @@ +// ReSharper disable RedundantUsingDirective using System.Buffers; using System.Collections.Generic; using System.Runtime.InteropServices; +// ReSharper restore RedundantUsingDirective +// ReSharper disable once CheckNamespace namespace System.Text; static class EncodingExtensions @@ -105,9 +108,9 @@ public static int GetChars(this Encoding encoding, in ReadOnlySequence byt // If the incoming sequence is multi-segment, create a stateful Decoder // and use it as the workhorse. On the final iteration we'll pass flush=true. - ReadOnlySequence remainingBytes = bytes; - int originalCharsLength = chars.Length; - Decoder decoder = encoding.GetDecoder(); + var remainingBytes = bytes; + var originalCharsLength = chars.Length; + var decoder = encoding.GetDecoder(); bool isFinalSegment; do @@ -116,7 +119,7 @@ public static int GetChars(this Encoding encoding, in ReadOnlySequence byt var next = remainingBytes.GetPosition(firstSpan.Length); isFinalSegment = remainingBytes.IsSingleSegment; - int charsWrittenJustNow = decoder.GetChars(firstSpan, chars, flush: isFinalSegment); + var charsWrittenJustNow = decoder.GetChars(firstSpan, chars, flush: isFinalSegment); chars = chars.Slice(charsWrittenJustNow); remainingBytes = remainingBytes.Slice(next); } while (!isFinalSegment); From 20d0b6c8a82b4d4d5d948b82e517bd05dcc742c4 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Tue, 29 Aug 2023 08:43:05 +0300 Subject: [PATCH 194/761] Make CreateBatchCommand return NpgsqlBatchCommand (#5241) Closes #5238 --- src/Npgsql/NpgsqlBatch.cs | 7 +++++-- src/Npgsql/PublicAPI.Unshipped.txt | 1 + 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/src/Npgsql/NpgsqlBatch.cs b/src/Npgsql/NpgsqlBatch.cs index 06e15b4988..2c011f1d26 100644 --- a/src/Npgsql/NpgsqlBatch.cs +++ b/src/Npgsql/NpgsqlBatch.cs @@ -122,7 +122,10 @@ private protected NpgsqlBatch(NpgsqlDataSourceCommand command) } /// - protected override DbBatchCommand CreateDbBatchCommand() + protected override DbBatchCommand CreateDbBatchCommand() => CreateBatchCommand(); + + /// + public new NpgsqlBatchCommand CreateBatchCommand() => new NpgsqlBatchCommand(); /// @@ -197,4 +200,4 @@ internal static NpgsqlBatch CreateCachedBatch(NpgsqlConnection connection) batch.Command.IsCacheable = true; return batch; } -} \ No newline at end of file +} diff --git a/src/Npgsql/PublicAPI.Unshipped.txt b/src/Npgsql/PublicAPI.Unshipped.txt index badf54239b..aa795b81ff 100644 --- a/src/Npgsql/PublicAPI.Unshipped.txt +++ b/src/Npgsql/PublicAPI.Unshipped.txt @@ -4,6 +4,7 @@ Npgsql.ChannelBinding Npgsql.ChannelBinding.Disable = 0 -> Npgsql.ChannelBinding Npgsql.ChannelBinding.Prefer = 1 -> Npgsql.ChannelBinding Npgsql.ChannelBinding.Require = 2 -> Npgsql.ChannelBinding +Npgsql.NpgsqlBatch.CreateBatchCommand() -> Npgsql.NpgsqlBatchCommand! Npgsql.NpgsqlConnectionStringBuilder.ChannelBinding.get -> Npgsql.ChannelBinding Npgsql.NpgsqlConnectionStringBuilder.ChannelBinding.set -> void Npgsql.NpgsqlSlimDataSourceBuilder.EnableFullTextSearch() -> Npgsql.NpgsqlSlimDataSourceBuilder! From 3a1155d5bf225c0b4405da2f2e136c3d4ee9a6ec Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Tue, 29 Aug 2023 12:47:35 +0300 Subject: [PATCH 195/761] Fix NRE while disposing NpgsqlBatch (#5240) Fixes #5239 --- src/Npgsql/NpgsqlCommand.cs | 3 ++- test/Npgsql.Tests/BatchTests.cs | 35 +++++++++++++++++++++++++++++++++ 2 files changed, 37 insertions(+), 1 deletion(-) diff --git a/src/Npgsql/NpgsqlCommand.cs b/src/Npgsql/NpgsqlCommand.cs index 290c3d8d35..6f83b8f0ad 100644 --- a/src/Npgsql/NpgsqlCommand.cs +++ b/src/Npgsql/NpgsqlCommand.cs @@ -1646,7 +1646,8 @@ internal void Reset() // TODO: Statements isn't cleared/recycled, leaving this for now, since it'll be replaced by the new batching API _commandText = string.Empty; CommandType = CommandType.Text; - _parameters.Clear(); + // Can be null if it's owned by batch + _parameters?.Clear(); _timeout = null; _allResultTypesAreUnknown = false; EnableErrorBarriers = false; diff --git a/test/Npgsql.Tests/BatchTests.cs b/test/Npgsql.Tests/BatchTests.cs index 2e7e3666fa..2983285f85 100644 --- a/test/Npgsql.Tests/BatchTests.cs +++ b/test/Npgsql.Tests/BatchTests.cs @@ -753,6 +753,41 @@ public async Task Batch_with_auto_prepare_reuse() } } +#if NET6_0_OR_GREATER // no batch reuse until 6.0 + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/5239")] + public async Task Batch_dispose_reuse() + { + await using var conn = await OpenConnectionAsync(); + NpgsqlBatch firstBatch; + await using (var batch = conn.CreateBatch()) + { + firstBatch = batch; + + batch.BatchCommands.Add(new NpgsqlBatchCommand("SELECT 1")); + Assert.That(await batch.ExecuteScalarAsync(), Is.EqualTo(1)); + } + + await using (var batch = conn.CreateBatch()) + { + Assert.That(batch, Is.SameAs(firstBatch)); + + batch.BatchCommands.Add(new NpgsqlBatchCommand("SELECT 2")); + Assert.That(await batch.ExecuteScalarAsync(), Is.EqualTo(2)); + } + + await conn.CloseAsync(); + await conn.OpenAsync(); + + await using (var batch = conn.CreateBatch()) + { + Assert.That(batch, Is.SameAs(firstBatch)); + + batch.BatchCommands.Add(new NpgsqlBatchCommand("SELECT 3")); + Assert.That(await batch.ExecuteScalarAsync(), Is.EqualTo(3)); + } + } +#endif + #endregion Miscellaneous #region Logging From 54e02e6b70555aaad281633ee32a5bf15d3c719f Mon Sep 17 00:00:00 2001 From: Kevin Jones Date: Tue, 29 Aug 2023 13:04:50 -0400 Subject: [PATCH 196/761] Improve HMAC performance (#5237) --- src/Npgsql/Internal/NpgsqlConnector.Auth.cs | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/src/Npgsql/Internal/NpgsqlConnector.Auth.cs b/src/Npgsql/Internal/NpgsqlConnector.Auth.cs index d0d3e135e8..d5ea9af5e1 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.Auth.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.Auth.cs @@ -258,10 +258,16 @@ static byte[] Xor(byte[] buffer1, byte[] buffer2) return buffer1; } - static byte[] HMAC(byte[] data, string key) + static byte[] HMAC(byte[] key, string data) { - using var hmacsha256 = new HMACSHA256(data); - return hmacsha256.ComputeHash(Encoding.UTF8.GetBytes(key)); + byte[] dataBytes = Encoding.UTF8.GetBytes(data); +#if NET7_0_OR_GREATER + return HMACSHA256.HashData(key, dataBytes); +#else + using var ih = IncrementalHash.CreateHMAC(HashAlgorithmName.SHA256, key); + ih.AppendData(dataBytes); + return ih.GetHashAndReset(); +#endif } async Task AuthenticateMD5(string username, byte[] salt, bool async, CancellationToken cancellationToken = default) From 58742f190f10e7c1aa2762427aaea5208c8ce4ae Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 30 Aug 2023 00:43:59 +0200 Subject: [PATCH 197/761] Bump Microsoft.NET.Test.Sdk from 17.7.1 to 17.7.2 (#5243) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 1953a0bcc6..364cfba718 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -23,7 +23,7 @@ - + From bf7a0afcdc05c8706bb6f483970da3b4a7430797 Mon Sep 17 00:00:00 2001 From: Liam Laverty Date: Wed, 30 Aug 2023 12:57:06 +0100 Subject: [PATCH 198/761] Updates Npgsql Dependency Injection readme.md to include a lambda config example (#5244) Adds an on-page example of how to use lamda configs in the AddNpgsqlDataSource. --- src/Npgsql.DependencyInjection/README.md | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/src/Npgsql.DependencyInjection/README.md b/src/Npgsql.DependencyInjection/README.md index 16b87419f0..fc5063bea6 100644 --- a/src/Npgsql.DependencyInjection/README.md +++ b/src/Npgsql.DependencyInjection/README.md @@ -42,6 +42,16 @@ app.MapGet("/", async (NpgsqlDataSource dataSource) => }); ``` -Finally, the `AddNpgsqlDataSource` method also accepts a lambda parameter allowing you to configure aspects of Npgsql beyond the connection string. +Finally, the `AddNpgsqlDataSource` method also accepts a lambda parameter allowing you to configure aspects of Npgsql beyond the connection string, e.g. to configure `UseLoggerFactory` and `UseNetTopologySuite`: + +```csharp +var builder = WebApplication.CreateBuilder(args); + +builder.Services.AddNpgsqlDataSource( + "Host=pg_server;Username=test;Password=test;Database=test", + builder => builder + .UseLoggerFactory(loggerFactory) + .UseNetTopologySuite()); +``` For more information, [see the Npgsql documentation](https://www.npgsql.org/doc/index.html). From 265fcfb5ce5c9b51732371b946a98ef041ce7d08 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 1 Sep 2023 23:31:03 +0200 Subject: [PATCH 199/761] Bump Scriban.Signed from 5.7.0 to 5.8.0 (#5248) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 364cfba718..fe8995c70d 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -9,7 +9,7 @@ - + From 728730afd72eb1a322a88cb9ea96db96710140fd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 4 Sep 2023 23:47:55 +0200 Subject: [PATCH 200/761] Bump actions/checkout from 3 to 4 (#5250) --- .github/workflows/build.yml | 8 ++++---- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/native-aot.yml | 4 ++-- .github/workflows/rich-code-nav.yml | 2 +- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index c94bc31dc2..9cd8f49577 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -58,7 +58,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: NuGet Cache uses: actions/cache@v3 @@ -141,7 +141,7 @@ jobs: sudo -u postgres psql -c "CREATE USER npgsql_tests_scram SUPERUSER PASSWORD 'npgsql_tests_scram'" # Uncomment the following to SSH into the agent running the build (https://github.com/mxschmitt/action-tmate) - #- uses: actions/checkout@v3 + #- uses: actions/checkout@v4 #- name: Setup tmate session # uses: mxschmitt/action-tmate@v3 @@ -340,7 +340,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: NuGet Cache uses: actions/cache@v3 @@ -382,7 +382,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Setup .NET Core SDK uses: actions/setup-dotnet@v3.2.0 diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 25c9147bed..a4b0f44bbb 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -53,7 +53,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL diff --git a/.github/workflows/native-aot.yml b/.github/workflows/native-aot.yml index 19197ac2f7..6b58f5f7cd 100644 --- a/.github/workflows/native-aot.yml +++ b/.github/workflows/native-aot.yml @@ -52,7 +52,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: NuGet Cache uses: actions/cache@v3 @@ -80,7 +80,7 @@ jobs: shell: bash # Uncomment the following to SSH into the agent running the build (https://github.com/mxschmitt/action-tmate) - #- uses: actions/checkout@v3 + #- uses: actions/checkout@v4 #- name: Setup tmate session # uses: mxschmitt/action-tmate@v3 diff --git a/.github/workflows/rich-code-nav.yml b/.github/workflows/rich-code-nav.yml index ff19cb984b..bc2db9b271 100644 --- a/.github/workflows/rich-code-nav.yml +++ b/.github/workflows/rich-code-nav.yml @@ -18,7 +18,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: NuGet Cache uses: actions/cache@v3 From 82777baf713284c0708c7ad905ba68b45020198f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 7 Sep 2023 11:09:44 +0200 Subject: [PATCH 201/761] Bump Scriban.Signed from 5.8.0 to 5.9.0 (#5252) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index fe8995c70d..4e7884dd80 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -9,7 +9,7 @@ - + From bc215685495b9bc86a9030b7548c4474f9daa1b0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 8 Sep 2023 13:15:10 +0200 Subject: [PATCH 202/761] Bump OpenTelemetry.API from 1.5.1 to 1.6.0 (#5254) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 4e7884dd80..9787409770 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -4,7 +4,7 @@ - + From 410a974d186ea283ebccd6c8bf2b948d13d604c6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 9 Sep 2023 00:45:23 +0200 Subject: [PATCH 203/761] Bump BenchmarkDotNet from 0.13.7 to 0.13.8 (#5257) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 9787409770..92a5862d21 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -32,7 +32,7 @@ - + From 62228e63d082594e3dfefad431391cea3d3a2c7c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 9 Sep 2023 00:45:54 +0200 Subject: [PATCH 204/761] Bump BenchmarkDotNet.Diagnostics.Windows from 0.13.7 to 0.13.8 (#5256) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 92a5862d21..41220a98ac 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -34,7 +34,7 @@ - + From 6353c2a1b0181a1a7bb1a036a58f6bc94234ba48 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Thu, 14 Sep 2023 13:32:41 +0300 Subject: [PATCH 205/761] Disable Connect_with_only_non_ssl_allowed_user test on windows (#5261) Because it's flaky due to Postgresql bug --- src/Npgsql/Internal/NpgsqlConnector.cs | 4 +--- test/Npgsql.Tests/SecurityTests.cs | 1 + 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index e4cab9073b..4fb25fa761 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -587,7 +587,7 @@ static async Task OpenCore( conn.WriteStartupMessage(username); await conn.Flush(async, cancellationToken); - var cancellationRegistration = conn.StartCancellableOperation(cancellationToken, attemptPgCancellation: false); + using var cancellationRegistration = conn.StartCancellableOperation(cancellationToken, attemptPgCancellation: false); try { await conn.Authenticate(username, timeout, async, cancellationToken); @@ -614,8 +614,6 @@ await OpenCore( return; } - using var _ = cancellationRegistration; - // We treat BackendKeyData as optional because some PostgreSQL-like database // don't send it (CockroachDB, CrateDB) var msg = await conn.ReadMessage(async); diff --git a/test/Npgsql.Tests/SecurityTests.cs b/test/Npgsql.Tests/SecurityTests.cs index dee4a71a08..9aa2ee7d50 100644 --- a/test/Npgsql.Tests/SecurityTests.cs +++ b/test/Npgsql.Tests/SecurityTests.cs @@ -255,6 +255,7 @@ public async Task Connect_with_only_ssl_allowed_user([Values] bool multiplexing, } [Test] + [Platform(Exclude = "Win", Reason = "Postgresql doesn't close connection correctly on windows which might result in missing error message")] public async Task Connect_with_only_non_ssl_allowed_user([Values] bool multiplexing, [Values] bool keepAlive) { if (multiplexing && keepAlive) From 2d2245327faf3d39d570f2372a86bf9eadf0c8dd Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Thu, 14 Sep 2023 13:51:03 +0300 Subject: [PATCH 206/761] =?UTF-8?q?Fix=20completing=20TransactionScope=20w?= =?UTF-8?q?ith=20distributed=20transaction=20and=20undi=E2=80=A6=20(#5260)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes #5246 --- src/Npgsql/VolatileResourceManager.cs | 8 ++++++-- .../Npgsql.Tests/DistributedTransactionTests.cs | 17 +++++++++++++++++ 2 files changed, 23 insertions(+), 2 deletions(-) diff --git a/src/Npgsql/VolatileResourceManager.cs b/src/Npgsql/VolatileResourceManager.cs index 84c28868e3..70afea0557 100644 --- a/src/Npgsql/VolatileResourceManager.cs +++ b/src/Npgsql/VolatileResourceManager.cs @@ -121,7 +121,11 @@ public void Commit(Enlistment enlistment) // if the user continues to use their connection after disposing the scope, and the MSDTC // requests a commit at that exact time. // To avoid this, we open a new connection for performing the 2nd phase. - using var conn2 = (NpgsqlConnection)((ICloneable)_connector.Connection).Clone(); + var settings = _connector.Connection.Settings.Clone(); + // Set Enlist to false because we might be in TransactionScope and we can't prepare transaction while being in an open transaction + // see #5246 + settings.Enlist = false; + using var conn2 = _connector.Connection.CloneWith(settings.ConnectionString); conn2.Open(); var connector = conn2.Connector!; @@ -301,4 +305,4 @@ static System.Data.IsolationLevel ConvertIsolationLevel(IsolationLevel isolation IsolationLevel.Snapshot => System.Data.IsolationLevel.Snapshot, _ => System.Data.IsolationLevel.Unspecified }; -} \ No newline at end of file +} diff --git a/test/Npgsql.Tests/DistributedTransactionTests.cs b/test/Npgsql.Tests/DistributedTransactionTests.cs index 93e350cc11..e55d6e7bd9 100644 --- a/test/Npgsql.Tests/DistributedTransactionTests.cs +++ b/test/Npgsql.Tests/DistributedTransactionTests.cs @@ -377,6 +377,23 @@ public void Connection_reuse_race_chaining_transaction([Values(false, true)] boo } } + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/5246")] + public void Transaction_complete_with_undisposed_connections() + { + using var deleteOuter = new TransactionScope(); + using (var delImidiate = new TransactionScope(TransactionScopeOption.RequiresNew)) + { + var deleteNow = EnlistOnDataSource.OpenConnection(); + deleteNow.ExecuteNonQuery("SELECT 'del_now'"); + var deleteNow2 = EnlistOnDataSource.OpenConnection(); + deleteNow2.ExecuteNonQuery("SELECT 'del_now2'"); + delImidiate.Complete(); + } + var deleteConn = EnlistOnDataSource.OpenConnection(); + deleteConn.ExecuteNonQuery("SELECT 'delete, this should commit last'"); + deleteOuter.Complete(); + } + #region Utilities // MSDTC is asynchronous, i.e. Commit/Rollback may return before the transaction has actually completed in the database; From fcebb3a6d9f153c043184d5a9f63d183b42ac927 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Thu, 14 Sep 2023 18:44:53 +0300 Subject: [PATCH 207/761] Test against PG16 in CI (#5262) --- .github/workflows/build.yml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 9cd8f49577..065f6a2924 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -30,27 +30,27 @@ jobs: fail-fast: false matrix: os: [ubuntu-22.04, windows-2022] - pg_major: [15, 14, 13, 12, 11] + pg_major: [16, 15, 14, 13, 12, 11] config: [Release] test_tfm: [net8.0] include: - os: ubuntu-22.04 - pg_major: 15 + pg_major: 16 config: Debug test_tfm: net8.0 - os: ubuntu-22.04 - pg_major: 15 + pg_major: 16 config: Release test_tfm: netcoreapp3.1 - os: macos-12 pg_major: 14 config: Release test_tfm: net8.0 - - os: ubuntu-22.04 - pg_major: 16 - config: Release - test_tfm: net8.0 - pg_prerelease: 'PG Prerelease' +# - os: ubuntu-22.04 +# pg_major: 17 +# config: Release +# test_tfm: net8.0 +# pg_prerelease: 'PG Prerelease' outputs: is_release: ${{ steps.analyze_tag.outputs.is_release }} From 6672f39ea33fc9a522574e70b27c45fd22455d9a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 19 Sep 2023 10:22:41 +0200 Subject: [PATCH 208/761] Bump xunit from 2.5.0 to 2.5.1 (#5267) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 41220a98ac..2620a142a8 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -25,7 +25,7 @@ - + From 4ffbff1ed89088912662dc0ae45b7a4ab355d405 Mon Sep 17 00:00:00 2001 From: Erik Desjardins <59450623+erikdesj@users.noreply.github.com> Date: Fri, 22 Sep 2023 11:34:37 -0400 Subject: [PATCH 209/761] PruneIdleConnectors: consider broken and lifetime-exceeded connections as pruned (#5184) Fixes #5180 --- src/Npgsql/PoolingDataSource.cs | 5 ++-- test/Npgsql.Tests/PoolTests.cs | 44 +++++++++++++++++++++++++++++++++ 2 files changed, 46 insertions(+), 3 deletions(-) diff --git a/src/Npgsql/PoolingDataSource.cs b/src/Npgsql/PoolingDataSource.cs index 52f8adb2f4..60b288520c 100644 --- a/src/Npgsql/PoolingDataSource.cs +++ b/src/Npgsql/PoolingDataSource.cs @@ -444,10 +444,9 @@ static void PruneIdleConnectors(object? state) connector != null) { if (pool.CheckIdleConnector(connector)) - { pool.CloseConnector(connector); - toPrune--; - } + + toPrune--; } } diff --git a/test/Npgsql.Tests/PoolTests.cs b/test/Npgsql.Tests/PoolTests.cs index eda0bbedf7..6e5d7f8326 100644 --- a/test/Npgsql.Tests/PoolTests.cs +++ b/test/Npgsql.Tests/PoolTests.cs @@ -203,6 +203,50 @@ public async Task Prune_idle_connectors(int minPoolSize, int connectionIdleLifeT AssertPoolState(dataSource, open: Math.Max(1, minPoolSize), idle: Math.Max(0, minPoolSize - 1)); } + [Test] + [Explicit("Timing-based")] + public async Task Prune_counts_max_lifetime_exceeded() + { + await using var dataSource = CreateDataSource(csb => + { + csb.MinPoolSize = 0; + // Idle lifetime 2 seconds, 2 samples + csb.ConnectionIdleLifetime = 2; + csb.ConnectionPruningInterval = 1; + csb.ConnectionLifetime = 5; + }); + + // conn1 will exceed max lifetime + await using var conn1 = await dataSource.OpenConnectionAsync(); + + // make conn1 4 seconds older than the others, so it exceeds max lifetime + Thread.Sleep(4000); + + await using var conn2 = await dataSource.OpenConnectionAsync(); + await using var conn3 = await dataSource.OpenConnectionAsync(); + + await conn1.CloseAsync(); + await conn2.CloseAsync(); + AssertPoolState(dataSource, open: 3, idle: 2); + + // wait for 1 sample + Thread.Sleep(1000); + // ConnectionIdleLifetime not yet reached. + AssertPoolState(dataSource, open: 3, idle: 2); + + // close conn3, so we can see if too many connectors get pruned + await conn3.CloseAsync(); + + // wait for last sample + a bit more time for reliability + Thread.Sleep(1500); + + // ConnectionIdleLifetime reached + // - conn1 should have been closed due to max lifetime (but this should count as pruning) + // - conn2 or conn3 should have been closed due to idle pruning + // - conn3 or conn2 should remain + AssertPoolState(dataSource, open: 1, idle: 1); + } + [Test, Description("Makes sure that when a waiting async open is is given a connection, the continuation is executed in the TP rather than on the closing thread")] public async Task Close_releases_waiter_on_another_thread() { From 0b7aecbe0ed4f3e52994aade7ad7f132801a7def Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Mon, 25 Sep 2023 23:08:44 +0200 Subject: [PATCH 210/761] Handler rework (#5123) * Add legacy datetime support * No unsupported resolver exceptions during introspection * Fixes and a little lie about array types to appease the tests * Add money converter * Add hstore converter * Improve range converter * Rework buffer requirements * Add json and jsonb textual reading/writing * Naming * Implement NTS support Some resolution tweaks still needed * Fix extension check * Fix resolution Arrays are still missing * Fix datatypename normalization from NpgsqlDbType * Remove dead code * Add arrays * More ns2.0 fixes again * Check binary size difference * Move abstract up and impl down * Small fixes * Fix datatable crap * Add more text types * Multiranges * Small fixes * Improve handling of sequential access cursor * Bounded column reads * Small fixes and TextReader support * Final changes on bounded reads Also some bug fixes due to bounded reads correctly failing due to unhandled data! * Implement GetChars over TextReader * Implement GetBytes * More test fixes * Handle cancellation retry in Read/ReadAsync buffer stream methods * Netstandard fixes * Another iteration of bounded reads, automatically consume data for column reads ending in an exception Without paying for a try finally or other expensive constructs * Have nested data reader use nested reads as well * Improve nested reader invalidation mechanism * Clarify rereading rules for sequential mode * Add back byte reading fast path * Implement ltree * Change removed property * Colocate sync/async reading * Small fixes after doing a review * Complete some converter resolver todos * Remove test for a mapping that doesn't exist * Add timestamp(tz) range mappings * Add back jagged validation * Breaking change, remove support for non generic list and derived List types * Add byte reading/writing of text and json * Remove roundtripping restrictions on certain bytea mappings * Add jsonpath support * Revert an invalid optimization * Add missed array mappings * Add stream writing support to bytea * Cache parameter size * Add multirange resolver * Small configurability additions to PgTypeInfo And loosen boxing info restriction * Centralize type equality logic * Stream fix * Rationalize unboxing behavior, also opens up ability for arrays to be constrained by IList instead of object * Change array converters to constrain to IList * Add ts and tstzmultirange * Allow null return from converter resolver * Create specialized versions for all the different datetime resolver kinds * And remove the composing resolvers * Add accurate type names in datetime resolver exception * Don't lose stacktrace on break if possible * Add bitstring string writing * Resolve aot warning * Handle a trim warning * Suppress DbDataReader.GetFieldType dynamically accessed members attribute Odbc and OleDb do the same * Remove dead code, add comments and other small iterations * Bring back default name translator * Add unmapped enums * Add valuetuple and tuple records * Bring truncate behavior back as a compat feature And cleanup naming, some todos * Rename method * Cleanup * Package version bumps * Add STJ poco and known types support pg arrays over them is next * Revert nonsense * Fix typo * Improve introspection mode * Make nested writes consistent with reads * Add composite type support * Netstandard fixes * Add STJ arrays * Move field description info cache to reader Shared prepared statements would otherwise thrash each others caches. * Fixes * Add missed configure awaits WriteBuffer will be in a separate commit with the sync over async writes change * Make datatypename have an implicit conversion to string * Fix nullable resolvers * Improve jsondocument sizing somewhat until unknown sizing support is done * Add GeoJSON support * Small improvement * Add Json.NET support * Add comment * Make PgConverterInfo and Bind internal * Implement boxed converter support in composites * Clean up PgTypeInfo api surface * Move delegating GetFieldValue calls to non-GVM GetFieldValueCore * Fix benchmark project errors * Properly respect async flag on nested read disposal (and ConfigureAwait the IAsyncDisposable) * Remove an ensure overload * Add back some friendly errors on missing pg types * Fix cache bleed * Make type info cache code read better * Make multirange mappings conditional on db support * Actually revert AttemptPostgresCancellation and friends after dispose to help replication * Catch race during extension create * Assert null cache * Remove dead reference * Root out more invalid or missing reader state transitions * Fix writer ifdef issue * Fix double converter netstandard code * Fix json type info resolver being null * Disable noda time tests for now * Segregate tests simulating errors to prevent unrelated test from failing under multiplexing mode * Always reset read started * Readability improvements * Centralize parameter info resets * Only reset binding info for size changes * Improve PgTypeInfo api design * Try to prevent more multiplexing issues * Go over some TODOs * Add windows exemption to flaky test * Reduce jsonb text converter bloat * Rename * Shorten BufferData{Async} to Buffer{Async} * Improve nullable handling of sizes * Reduce the amount of types we reference * Create a dgml file to accompany the mstat * Reduce bloat * Small clarifications in abstractions * Move ImmutableDictionary mapping for hstore to extra conversions * And move BigInteger too * Properly remove all traces of ImmutableDictionary from hstore * Only reference if not trimmed * Fix typo * Move out of array and match name instead * Typo * Bump sdk version to preview 7 * Break away the last bits of PGUtil * Remove unused namespaces * Drop extension method * Make ValueMetadata readonly * Fix todo * Current fix * Move unconsumed read error to endread * Sync binary exporter to patterns used in db data reader * Enable all plugin tests and fix GeoJson errors * Fix infinite range bug * Fix some exporter and replication value bugs Also make Skip take an int as it's a saner size bound * Fix remaining nodatime plugin issues * Restructure range resolver to be more priority based * Add transaction for multiplexing to another test * Don't make json types default for reading * Add net6.0 tfm to json.net to work around init prop issues * Allow type predicates to deny default matches Only if the requirement would allow these to match otherwise, 'true' results are ignored for MatchRequirement.All * Fix remaining issues with Json.Net What a kludge... * Fix legacy infinite errors on NodaTime * Fix some incorrect null type predicate results * Reflect removal of silly read default from STJ poco resolver * Simplify infinity conversions again * Check pg dimension bound during array write * Make range and multirange internal and add static factory instead * Actually check length in all cases * Fix returned multirange type * Normalize [] to _ in the constructor * Fix compilation issue * Remove incorrect xmldoc * Improve composite type info error message * Move out some classes * Rename * Speed up representational type resolution * Add missed representational type resolutions * Bring back dev build tfm for Json.Net * Small naming and error message improvements * Make ColumnStream rely on cumulative position for consume during dispose * Improve PgReader Init/Commit StartRead/EndRead code * Remove _readStarted entirely now we have EndRead doing the consumed checks * More readability improvements * Merge version byte prefixed text converter Thanks @roji for the idea * Merge Hstore Read and ReadInto * Move some exceptions to resources * Fast path other read bytes methods * Monomorphize byte array converter * Streamline field read infra * Simplify cleanup * Fix boxing for default converters * Speed up GetInfo and GetFieldValueCore * Add info cache set and load for prepared statements * Speed up reader start/stop init/commit * Speed up primitive reads * Improve message * Remove unused fields * Small speedup * Improve encapsulation of NpgsqlParameter * More parameter streamlining * Small readability improvements * Improve MultiWriteState state clearing behavior * Centralize async helper read logic * Add some missed buffer checks * Remove redundant code * Map streaming onto SizeKind.Unknown instead of Size.Zero to make BufferRequirements properly monoidal * Clarify bufferRequirements' applicability and fix nullable to adhere to it * Reader naming and structure improvements * Fix seek to column in GetFieldValueAsync * Address nits * Make sure boxing infos use GetResolutionAsObject * Rework parameter ValueType code * Reset type info on Value changes for typeof(object) generic parameters * Address more nits * Upgrade to RC1 * Fix a default type fallback issue in the cache lookup * Add array type checks With various fixes Closes #5137 * Fix some more mappings and tests * Fix the remaining array mapping issues * Fix ensure issue and move PgWriter property to a method * Use full version * Remove nightly feed * Add pragma for new ref readonly warnings Which we can ignore due to consuming it directly in Unsafe.As, which does not write to its ref However overloads cannot be made based on refness so we're stuck with Unsafe.As being ref... * Fix nodatime array/multirange mapping tests * Fix small bug in Json.NET synthetic mappings * Make multirange asserts conditional * Fix debug build issues * Change PgReader 'not exactly consumed' exception and behavior * Replace debug only exception * Tidy up all dynamic mapping code * Implement unmapped ranges * Small tweaks * Don't rely on 'current' data inside GetBytes/GetData * Fix SequentialAccess IsDbNull (resumable) + Get... (some non-resumable op) * Use DateTimeKind.Unspecified and assert on it in tests * Address nits * Use new big endian support in Guid * Add constants for states in BinaryExporter * Add support for unmapped multiranges * Don't try to get an object converter for type = null * Rename TypeCatalog to DatabaseInfo * Use IsAtStart in PgBufferedConverter * Add missed constant uses * Actually implement IDisposable... * Address feedback * Add resolvers check in GlobalTypeMapper (#15) * Address feedback * Breaking change, move refcursor and jsonpath to DbType.Object * Rework NotSupportedException back to InvalidCastException to align to ADO.NET convention * Make GetPostgresTypeXYZ/GetPgType overloads of GetPostgresType * Message tweak * Improve GetConcreteResolution * Make jsonpath test conditional --------- Co-authored-by: Shay Rojansky --- .devcontainer/docker-compose.yml | 2 +- .github/workflows/build.yml | 3 +- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/native-aot.yml | 67 +- .github/workflows/rich-code-nav.yml | 2 +- Directory.Packages.props | 53 +- Npgsql.sln | 11 - .../{Internal => }/CrsMap.WellKnown.cs | 6 +- src/Npgsql.GeoJSON/CrsMap.cs | 59 ++ src/Npgsql.GeoJSON/CrsMapExtensions.cs | 50 + .../Internal/{CrsMap.cs => CrsMapBuilder.cs} | 54 -- .../Internal/GeoJSONConverter.cs | 748 +++++++++++++++ src/Npgsql.GeoJSON/Internal/GeoJSONHandler.cs | 722 --------------- .../Internal/GeoJSONTypeHandlerResolver.cs | 80 -- .../GeoJSONTypeHandlerResolverFactory.cs | 21 - .../Internal/GeoJSONTypeInfoResolver.cs | 76 ++ .../Internal/GeoJsonTypeMappingResolver.cs | 28 - src/Npgsql.GeoJSON/NpgsqlGeoJSONExtensions.cs | 20 +- src/Npgsql.GeoJSON/PublicAPI.Unshipped.txt | 6 +- .../Internal/JsonNetJsonConverter.cs | 121 +++ .../Internal/JsonNetJsonHandler.cs | 64 -- .../Internal/JsonNetPocoTypeInfoResolver.cs | 105 +++ .../Internal/JsonNetTypeHandlerResolver.cs | 48 - .../JsonNetTypeHandlerResolverFactory.cs | 43 - .../Internal/JsonNetTypeInfoResolver.cs | 67 ++ .../Internal/JsonNetTypeMappingResolver.cs | 29 - src/Npgsql.Json.NET/Npgsql.Json.NET.csproj | 3 +- .../NpgsqlJsonNetExtensions.cs | 9 +- .../Properties/AssemblyInfo.cs | 5 - .../Internal/NetTopologySuiteConverter.cs | 81 ++ .../Internal/NetTopologySuiteHandler.cs | 168 ---- .../NetTopologySuiteTypeHandlerResolver.cs | 55 -- ...TopologySuiteTypeHandlerResolverFactory.cs | 33 - .../NetTopologySuiteTypeInfoResolver.cs | 115 +++ .../NetTopologySuiteTypeMappingResolver.cs | 36 - .../Npgsql.NetTopologySuite.csproj | 2 +- .../NpgsqlNetTopologySuiteExtensions.cs | 6 +- src/Npgsql.NodaTime/Internal/DateHandler.cs | 91 -- .../Internal/DateIntervalConverter.cs | 49 + .../Internal/DateMultirangeHandler.cs | 120 --- .../Internal/DateRangeHandler.cs | 69 -- .../Internal/DurationConverter.cs | 42 + .../Internal/IntervalConverter.cs | 57 ++ .../Internal/IntervalHandler.cs | 106 --- .../Internal/LegacyConverters.cs | 78 ++ .../Internal/LegacyTimestampHandler.cs | 64 -- .../Internal/LegacyTimestampTzHandler.cs | 121 --- .../Internal/LocalDateConverter.cs | 52 ++ .../Internal/LocalTimeConverter.cs | 20 + .../Internal/NodaTimeTypeHandlerResolver.cs | 155 ---- .../NodaTimeTypeHandlerResolverFactory.cs | 15 - .../Internal/NodaTimeTypeInfoResolver.cs | 265 ++++++ .../Internal/NodaTimeTypeMappingResolver.cs | 99 -- src/Npgsql.NodaTime/Internal/NodaTimeUtils.cs | 36 +- .../Internal/OffsetTimeConverter.cs | 23 + .../Internal/PeriodConverter.cs | 46 + src/Npgsql.NodaTime/Internal/TimeHandler.cs | 53 -- src/Npgsql.NodaTime/Internal/TimeTzHandler.cs | 41 - .../Internal/TimestampConverters.cs | 106 +++ .../Internal/TimestampHandler.cs | 88 -- .../Internal/TimestampTzHandler.cs | 126 --- .../Internal/TimestampTzMultirangeHandler.cs | 202 ----- .../Internal/TimestampTzRangeHandler.cs | 105 --- .../NpgsqlNodaTimeExtensions.cs | 4 +- .../Properties/AssemblyInfo.cs | 2 +- .../NpgsqlNodaTimeStrings.Designer.cs | 6 + .../Properties/NpgsqlNodaTimeStrings.resx | 5 +- .../Npgsql.SourceGenerators.csproj | 1 - ...lConnectionStringBuilderSourceGenerator.cs | 1 - .../TypeHandler.snbtxt | 36 - .../TypeHandlerSourceGenerator.cs | 129 --- .../BackendMessages/AuthenticationMessages.cs | 5 +- src/Npgsql/BackendMessages/CopyMessages.cs | 9 +- .../BackendMessages/RowDescriptionMessage.cs | 178 ++-- src/Npgsql/Internal/AdoSerializerHelpers.cs | 58 ++ src/Npgsql/Internal/BufferRequirements.cs | 43 + .../Composites/Metadata/CompositeBuilder.cs | 109 +++ .../Composites/Metadata/CompositeFieldInfo.cs | 192 ++++ .../Composites/Metadata/CompositeInfo.cs | 74 ++ .../ReflectionCompositeInfoFactory.cs | 296 ++++++ .../Internal/Converters/ArrayConverter.cs | 675 ++++++++++++++ .../Internal/Converters/AsyncHelpers.cs | 114 +++ .../Converters/BitStringConverters.cs | 249 +++++ .../Internal/Converters/CastingConverter.cs | 83 ++ .../Internal/Converters/CompositeConverter.cs | 185 ++++ .../Internal/Converters/EnumConverter.cs | 68 ++ .../FullTextSearch/TsQueryConverter.cs | 227 +++++ .../FullTextSearch/TsVectorConverter.cs | 107 +++ .../Converters/Geometric/BoxConverter.cs | 26 + .../Converters/Geometric/CircleConverter.cs | 23 + .../Converters/Geometric/LineConverter.cs | 23 + .../Geometric/LineSegmentConverter.cs | 24 + .../Converters/Geometric/PathConverter.cs | 68 ++ .../Converters/Geometric/PointConverter.cs | 22 + .../Converters/Geometric/PolygonConverter.cs | 55 ++ .../Internal/Converters/HstoreConverter.cs | 159 ++++ .../Internal/InternalCharConverter.cs | 43 + .../Converters/Internal/PgLsnConverter.cs | 15 + .../Converters/Internal/TidConverter.cs | 19 + .../Converters/Internal/UInt32Converter.cs | 13 + .../Converters/Internal/UInt64Converter.cs | 13 + .../Converters/Internal/VoidConverter.cs | 13 + .../Internal/Converters/MoneyConverter.cs | 74 ++ .../Converters/MultirangeConverter.cs | 142 +++ .../Networking/IPAddressConverter.cs | 23 + .../Converters/Networking/MacaddrConverter.cs | 40 + .../Networking/NpgsqlCidrConverter.cs | 22 + .../Networking/NpgsqlInetConverter.cs | 73 ++ .../Internal/Converters/NullableConverter.cs | 60 ++ .../Converters/ObjectArrayRecordConverter.cs | 79 ++ .../PolymorphicConverterResolver.cs | 68 ++ .../Converters/Primitive/BoolConverter.cs | 13 + .../Converters/Primitive/ByteaConverters.cs | 124 +++ .../Converters/Primitive/DoubleConverter.cs | 43 + .../Converters/Primitive/GuidUuidConverter.cs | 70 ++ .../Converters/Primitive/Int2Converter.cs | 70 ++ .../Converters/Primitive/Int4Converter.cs | 71 ++ .../Converters/Primitive/Int8Converter.cs | 72 ++ .../Converters/Primitive/NumericConverters.cs | 262 ++++++ .../Internal/Converters/Primitive/PgMoney.cs | 104 +++ .../Converters/Primitive/PgNumeric.cs | 462 ++++++++++ .../Converters/Primitive/RealConverter.cs | 43 + .../Converters/Primitive/TextConverters.cs | 355 ++++++++ .../Internal/Converters/RangeConverter.cs | 216 +++++ .../Converters/SystemTextJsonConverter.cs | 205 +++++ .../Converters/Temporal/DateConverters.cs | 103 +++ .../Temporal/DateTimeConverterResolver.cs | 143 +++ .../Converters/Temporal/DateTimeConverters.cs | 53 ++ .../Converters/Temporal/IntervalConverters.cs | 58 ++ .../Temporal/LegacyDateTimeConverter.cs | 62 ++ .../Converters/Temporal/PgTimestamp.cs | 43 + .../Converters/Temporal/TimeConverters.cs | 52 ++ .../VersionPrefixedTextConverter.cs | 107 +++ src/Npgsql/Internal/DataFormat.cs | 29 + .../Internal/DynamicTypeInfoResolver.cs | 132 +++ src/Npgsql/Internal/IPgTypeInfoResolver.cs | 19 + src/Npgsql/Internal/NpgsqlConnector.Auth.cs | 8 +- .../NpgsqlConnector.FrontendMessages.cs | 42 +- src/Npgsql/Internal/NpgsqlConnector.cs | 75 +- src/Npgsql/Internal/NpgsqlDatabaseInfo.cs | 49 +- .../Internal/NpgsqlReadBuffer.Stream.cs | 76 +- src/Npgsql/Internal/NpgsqlReadBuffer.cs | 294 ++++-- .../Internal/NpgsqlWriteBuffer.Stream.cs | 122 --- src/Npgsql/Internal/NpgsqlWriteBuffer.cs | 77 +- src/Npgsql/Internal/PgBufferedConverter.cs | 52 ++ .../Internal/PgComposingConverterResolver.cs | 68 ++ src/Npgsql/Internal/PgConverter.cs | 205 +++++ src/Npgsql/Internal/PgConverterResolver.cs | 109 +++ src/Npgsql/Internal/PgReader.cs | 723 +++++++++++++++ src/Npgsql/Internal/PgSerializerOptions.cs | 146 +++ src/Npgsql/Internal/PgStreamingConverter.cs | 87 ++ src/Npgsql/Internal/PgTypeInfo.cs | 362 ++++++++ src/Npgsql/Internal/PgWriter.cs | 571 ++++++++++++ src/Npgsql/Internal/Postgres/DataTypeName.cs | 234 +++++ src/Npgsql/Internal/Postgres/DataTypeNames.cs | 79 ++ src/Npgsql/Internal/Postgres/Field.cs | 16 + src/Npgsql/Internal/Postgres/Oid.cs | 19 + src/Npgsql/Internal/Postgres/PgTypeId.cs | 44 + .../Internal/Resolvers/AdoTypeInfoResolver.cs | 491 ++++++++++ .../Resolvers/ExtraConversionsResolver.cs | 235 +++++ .../FullTextSearchTypeInfoResolver.cs | 81 ++ .../Resolvers/GeometricTypeInfoResolver.cs | 51 ++ .../Resolvers/LTreeTypeInfoResolver.cs | 51 ++ .../Resolvers/NetworkTypeInfoResolver.cs | 74 ++ .../Resolvers/RangeTypeInfoResolver.cs | 437 +++++++++ .../Resolvers/RecordTypeInfoResolvers.cs | 137 +++ .../SystemTextJsonPocoTypeInfoResolver.cs | 123 +++ .../SystemTextJsonTypeInfoResolvers.cs | 70 ++ .../Resolvers/UnmappedEnumTypeInfoResolver.cs | 51 ++ .../UnmappedMultirangeTypeInfoResolver.cs | 59 ++ .../UnmappedRangeTypeInfoResolver.cs | 59 ++ .../Resolvers/UnsupportedTypeInfoResolver.cs | 34 + src/Npgsql/Internal/Size.cs | 70 ++ .../Internal/TypeHandlers/ArrayHandler.cs | 610 ------------- .../Internal/TypeHandlers/BitStringHandler.cs | 271 ------ .../Internal/TypeHandlers/BoolHandler.cs | 32 - .../Internal/TypeHandlers/ByteaHandler.cs | 148 --- .../CompositeHandlers/ByReference.cs | 10 - .../CompositeConstructorHandler.cs | 62 -- .../CompositeConstructorHandler`.cs | 66 -- .../CompositeHandlers/CompositeHandler.cs | 282 ------ .../CompositeMemberHandler.cs | 28 - .../CompositeMemberHandlerOfClass.cs | 105 --- .../CompositeMemberHandlerOfStruct.cs | 109 --- .../CompositeParameterHandler.cs | 36 - .../CompositeParameterHandler`.cs | 21 - .../CompositeHandlers/ICompositeHandler.cs | 11 - .../CompositeHandlers/IsValueType.cs | 6 - .../DateTimeHandlers/DateHandler.cs | 131 --- .../DateTimeHandlers/DateTimeUtils.cs | 63 -- .../DateTimeHandlers/IntervalHandler.cs | 70 -- .../DateTimeHandlers/TimeHandler.cs | 52 -- .../DateTimeHandlers/TimeTzHandler.cs | 53 -- .../DateTimeHandlers/TimestampHandler.cs | 62 -- .../DateTimeHandlers/TimestampTzHandler.cs | 143 --- .../Internal/TypeHandlers/EnumHandler.cs | 74 -- .../FullTextSearchHandlers/TsQueryHandler.cs | 291 ------ .../FullTextSearchHandlers/TsVectorHandler.cs | 97 -- .../GeometricHandlers/BoxHandler.cs | 41 - .../GeometricHandlers/CircleHandler.cs | 37 - .../GeometricHandlers/LineHandler.cs | 37 - .../GeometricHandlers/LineSegmentHandler.cs | 38 - .../GeometricHandlers/PathHandler.cs | 74 -- .../GeometricHandlers/PointHandler.cs | 36 - .../GeometricHandlers/PolygonHandler.cs | 65 -- .../Internal/TypeHandlers/HstoreHandler.cs | 178 ---- .../InternalTypeHandlers/Int2VectorHandler.cs | 18 - .../InternalCharHandler.cs | 87 -- .../InternalTypeHandlers/OIDVectorHandler.cs | 18 - .../InternalTypeHandlers/PgLsnHandler.cs | 31 - .../InternalTypeHandlers/TidHandler.cs | 39 - .../Internal/TypeHandlers/JsonPathHandler.cs | 74 -- .../Internal/TypeHandlers/JsonTextHandler.cs | 209 ----- .../LTreeHandlers/LQueryHandler.cs | 90 -- .../LTreeHandlers/LTreeHandler.cs | 90 -- .../LTreeHandlers/LTxtQueryHandler.cs | 93 -- .../TypeHandlers/MultirangeHandler.cs | 211 ----- .../NetworkHandlers/CidrHandler.cs | 50 - .../NetworkHandlers/InetHandler.cs | 133 --- .../NetworkHandlers/MacaddrHandler.cs | 52 -- .../NumericHandlers/DecimalRaw.cs | 150 --- .../NumericHandlers/DoubleHandler.cs | 32 - .../NumericHandlers/Int16Handler.cs | 109 --- .../NumericHandlers/Int32Handler.cs | 96 -- .../NumericHandlers/Int64Handler.cs | 92 -- .../NumericHandlers/MoneyHandler.cs | 52 -- .../NumericHandlers/NumericHandler.cs | 434 --------- .../NumericHandlers/SingleHandler.cs | 45 - .../NumericHandlers/UInt32Handler.cs | 31 - .../NumericHandlers/UInt64Handler.cs | 29 - .../Internal/TypeHandlers/RangeHandler.cs | 187 ---- .../Internal/TypeHandlers/RecordHandler.cs | 104 --- .../TypeHandlers/SystemTextJsonHandler.cs | 209 ----- .../Internal/TypeHandlers/TextHandler.cs | 317 ------- .../TypeHandlers/UnknownTypeHandler.cs | 95 -- .../TypeHandlers/UnmappedEnumHandler.cs | 149 --- .../TypeHandlers/UnsupportedHandler.cs | 48 - .../Internal/TypeHandlers/UuidHandler.cs | 76 -- .../Internal/TypeHandlers/VoidHandler.cs | 41 - .../TypeHandling/INpgsqlSimpleTypeHandler.cs | 47 - .../TypeHandling/INpgsqlTypeHandler.cs | 75 -- .../TypeHandling/ITextReaderHandler.cs | 13 - .../TypeHandling/NpgsqlLengthCache.cs | 65 -- .../TypeHandling/NpgsqlSimpleTypeHandler.cs | 84 -- .../TypeHandling/NpgsqlTypeHandler.cs | 273 ------ .../TypeHandling/NpgsqlTypeHandler`.cs | 78 -- .../Internal/TypeHandling/NullableHandler.cs | 54 -- .../TypeHandling/TypeHandlerResolver.cs | 37 - .../TypeHandlerResolverFactory.cs | 12 - .../Internal/TypeHandling/TypeMappingInfo.cs | 22 - src/Npgsql/Internal/TypeInfoCache.cs | 169 ++++ src/Npgsql/Internal/TypeInfoMapping.cs | 668 ++++++++++++++ src/Npgsql/Internal/TypeInfoResolverChain.cs | 25 + .../Internal/TypeMapping/IUserTypeMapping.cs | 13 - src/Npgsql/Internal/TypeMapping/TypeMapper.cs | 539 ----------- .../TypeMapping/TypeMappingResolver.cs | 25 - .../TypeMapping/UserCompositeTypeMappings.cs | 24 - .../TypeMapping/UserEnumTypeMappings.cs | 46 - src/Npgsql/Internal/ValueMetadata.cs | 9 + src/Npgsql/MultiplexingDataSource.cs | 4 +- .../NpgsqlSnakeCaseNameTranslator.cs | 2 + src/Npgsql/Npgsql.csproj | 5 +- src/Npgsql/NpgsqlBatchCommandCollection.cs | 3 +- src/Npgsql/NpgsqlBinaryExporter.cs | 221 +++-- src/Npgsql/NpgsqlBinaryImporter.cs | 57 +- src/Npgsql/NpgsqlCommand.cs | 42 +- src/Npgsql/NpgsqlConnection.cs | 2 +- src/Npgsql/NpgsqlConnectionStringBuilder.cs | 1 - src/Npgsql/NpgsqlDataReader.cs | 853 ++++++++---------- src/Npgsql/NpgsqlDataSource.cs | 61 +- src/Npgsql/NpgsqlDataSourceBuilder.cs | 103 ++- src/Npgsql/NpgsqlDataSourceConfiguration.cs | 5 +- src/Npgsql/NpgsqlLargeObjectManager.cs | 1 - src/Npgsql/NpgsqlNestedDataReader.cs | 189 ++-- src/Npgsql/NpgsqlParameter.cs | 362 ++++++-- src/Npgsql/NpgsqlParameterCollection.cs | 18 +- src/Npgsql/NpgsqlParameter`.cs | 72 +- src/Npgsql/NpgsqlSchema.cs | 181 ++-- src/Npgsql/NpgsqlSlimDataSourceBuilder.cs | 236 +++-- src/Npgsql/NpgsqlTypes/NpgsqlDbType.cs | 420 ++++++++- src/Npgsql/NpgsqlTypes/NpgsqlInterval.cs | 4 - src/Npgsql/NpgsqlTypes/NpgsqlTsQuery.cs | 16 +- src/Npgsql/NpgsqlTypes/NpgsqlTypes.cs | 120 +-- src/Npgsql/PoolManager.cs | 2 - src/Npgsql/PoolingDataSource.cs | 4 +- src/Npgsql/PostgresDatabaseInfo.cs | 11 +- src/Npgsql/PostgresMinimalDatabaseInfo.cs | 22 +- src/Npgsql/PostgresTypes/PostgresArrayType.cs | 11 +- src/Npgsql/PostgresTypes/PostgresBaseType.cs | 29 +- .../PostgresTypes/PostgresMultirangeType.cs | 2 +- src/Npgsql/PostgresTypes/PostgresType.cs | 49 +- .../PostgresTypes/PostgresUnknownType.cs | 4 +- src/Npgsql/PreparedStatement.cs | 19 +- src/Npgsql/PreparedTextReader.cs | 14 +- src/Npgsql/Properties/AssemblyInfo.cs | 2 +- .../Properties/NpgsqlStrings.Designer.cs | 183 ++-- src/Npgsql/Properties/NpgsqlStrings.resx | 23 +- src/Npgsql/PublicAPI.Unshipped.txt | 54 +- src/Npgsql/Replication/PgDateTime.cs | 16 + .../PgOutput/Messages/DefaultUpdateMessage.cs | 1 - .../PgOutput/Messages/FullDeleteMessage.cs | 1 - .../PgOutput/Messages/FullUpdateMessage.cs | 1 - .../PgOutput/Messages/IndexUpdateMessage.cs | 1 - .../PgOutput/Messages/InsertMessage.cs | 1 - .../PgOutput/Messages/KeyDeleteMessage.cs | 1 - .../Messages/PgOutputReplicationMessage.cs | 5 +- .../PgOutput/Messages/RelationMessage.cs | 3 +- .../PgOutput/PgOutputAsyncEnumerable.cs | 26 +- .../PgOutput/ReadonlyArrayBuffer.cs | 1 - .../Replication/PgOutput/ReplicationValue.cs | 164 ++-- .../Replication/PgOutput/TupleEnumerator.cs | 9 +- .../Replication/ReplicationConnection.cs | 20 +- src/Npgsql/Schema/DbColumnSchemaGenerator.cs | 32 +- .../Shims/ConcurrentDictionaryExtensions.cs | 3 - src/Npgsql/Shims/MemoryExtensions.cs | 18 + .../Shims/ReadOnlySequenceExtensions.cs | 13 + .../Shims/ReadOnlySpanOfCharExtensions.cs | 2 - src/Npgsql/Shims/ReferenceEqualityComparer.cs | 48 + src/Npgsql/Shims/StreamExtensions.cs | 32 +- src/Npgsql/Shims/UnreachableException.cs | 41 + src/Npgsql/ThrowHelper.cs | 12 + .../TypeMapping/BuiltInTypeHandlerResolver.cs | 449 --------- .../BuiltInTypeHandlerResolverFactory.cs | 13 - .../TypeMapping/BuiltInTypeMappingResolver.cs | 237 ----- src/Npgsql/TypeMapping/DefaultPgTypes.cs | 191 ++++ .../FullTextSearchTypeHandlerResolver.cs | 34 - ...ullTextSearchTypeHandlerResolverFactory.cs | 15 - .../FullTextSearchTypeMappingResolver.cs | 41 - src/Npgsql/TypeMapping/GlobalTypeMapper.cs | 702 +++----------- src/Npgsql/TypeMapping/INpgsqlTypeMapper.cs | 10 +- src/Npgsql/TypeMapping/PostgresTypeOIDs.cs | 112 --- .../TypeMapping/RangeTypeHandlerResolver.cs | 178 ---- .../RangeTypeHandlerResolverFactory.cs | 15 - .../TypeMapping/RangeTypeMappingResolver.cs | 118 --- .../TypeMapping/RecordTypeHandlerResolver.cs | 29 - .../RecordTypeHandlerResolverFactory.cs | 12 - .../SystemTextJsonTypeHandlerResolver.cs | 60 -- ...ystemTextJsonTypeHandlerResolverFactory.cs | 45 - .../SystemTextJsonTypeMappingResolver.cs | 39 - src/Npgsql/TypeMapping/UserTypeMapper.cs | 216 +++++ src/Npgsql/UnpooledDataSource.cs | 3 +- src/Npgsql/Util/NpgsqlTimeout.cs | 57 ++ src/Npgsql/Util/PGUtil.cs | 228 ----- .../Util/ResettableCancellationTokenSource.cs | 9 +- src/Npgsql/Util/Statics.cs | 92 ++ src/Npgsql/Util/StrongBox.cs | 41 + src/Npgsql/Util/SubReadStream.cs | 227 +++++ src/Npgsql/VolatileResourceManager.cs | 1 - src/Shared/CodeAnalysis.cs | 88 +- test/Npgsql.Benchmarks/Prepare.cs | 3 +- test/Npgsql.Benchmarks/ReadArray.cs | 6 - test/Npgsql.Benchmarks/ResolveHandler.cs | 30 +- .../TypeHandlers/Composite.cs | 7 +- .../Npgsql.Benchmarks/TypeHandlers/Numeric.cs | 18 +- test/Npgsql.Benchmarks/TypeHandlers/Text.cs | 6 +- .../TypeHandlers/TypeHandlerBenchmarks.cs | 68 +- test/Npgsql.Benchmarks/TypeHandlers/Uuid.cs | 6 +- .../Npgsql.NativeAotTests.csproj | 5 +- .../LegacyNodaTimeTests.cs | 104 --- .../NodaTimeSetupFixture.cs | 18 - .../Npgsql.NodaTime.Tests.csproj | 13 - .../Npgsql.PluginTests/LegacyNodaTimeTests.cs | 106 +++ .../NetTopologySuiteTests.cs | 131 ++- .../NodaTimeInfinityTests.cs | 82 +- .../NodaTimeTests.cs | 280 ++++-- .../Npgsql.PluginTests.csproj | 5 + .../NpgsqlDataReaderTests.cs | 1 - test/Npgsql.Specification.Tests/Utility.cs | 1 - test/Npgsql.Tests/AuthenticationTests.cs | 10 +- test/Npgsql.Tests/BatchTests.cs | 1 - test/Npgsql.Tests/BugTests.cs | 21 +- test/Npgsql.Tests/CommandParameterTests.cs | 207 +++++ test/Npgsql.Tests/CommandTests.cs | 287 ++---- test/Npgsql.Tests/ConnectionTests.cs | 3 +- test/Npgsql.Tests/CopyTests.cs | 129 +-- test/Npgsql.Tests/FunctionTests.cs | 1 - test/Npgsql.Tests/GlobalTypeMapperTests.cs | 85 ++ test/Npgsql.Tests/MultipleHostsTests.cs | 2 +- test/Npgsql.Tests/NotificationTests.cs | 1 - test/Npgsql.Tests/NpgsqlParameterTests.cs | 5 +- test/Npgsql.Tests/PoolTests.cs | 2 - test/Npgsql.Tests/PostgresTypeTests.cs | 3 +- test/Npgsql.Tests/ReadBufferTests.cs | 27 +- test/Npgsql.Tests/ReaderNewSchemaTests.cs | 3 +- test/Npgsql.Tests/ReaderTests.cs | 118 +-- .../Replication/CommonReplicationTests.cs | 1 - .../Replication/PgOutputReplicationTests.cs | 1 - .../TestDecodingReplicationTests.cs | 3 +- test/Npgsql.Tests/SchemaTests.cs | 8 + test/Npgsql.Tests/SecurityTests.cs | 35 +- test/Npgsql.Tests/SqlQueryParserTests.cs | 3 +- test/Npgsql.Tests/Support/AssemblySetUp.cs | 4 +- .../Support/MultiplexingTestBase.cs | 2 +- test/Npgsql.Tests/Support/PgPostmasterMock.cs | 5 +- test/Npgsql.Tests/Support/PgServerMock.cs | 28 +- test/Npgsql.Tests/Support/TestBase.cs | 166 +++- test/Npgsql.Tests/TestUtil.cs | 38 +- test/Npgsql.Tests/TypeMapperTests.cs | 106 +-- test/Npgsql.Tests/Types/ArrayTests.cs | 92 +- test/Npgsql.Tests/Types/BitStringTests.cs | 16 +- test/Npgsql.Tests/Types/ByteaTests.cs | 53 +- .../Types/CompositeHandlerTests.Read.cs | 12 +- .../Types/CompositeHandlerTests.Write.cs | 16 +- test/Npgsql.Tests/Types/CompositeTests.cs | 35 +- .../Types/DateTimeInfinityTests.cs | 12 +- test/Npgsql.Tests/Types/DateTimeTests.cs | 134 ++- .../Npgsql.Tests/Types/FullTextSearchTests.cs | 17 +- test/Npgsql.Tests/Types/GeometricTypeTests.cs | 17 +- test/Npgsql.Tests/Types/HstoreTests.cs | 9 +- test/Npgsql.Tests/Types/JsonPathTests.cs | 17 +- test/Npgsql.Tests/Types/JsonTests.cs | 191 +++- test/Npgsql.Tests/Types/LTreeTests.cs | 28 +- .../Npgsql.Tests/Types/LegacyDateTimeTests.cs | 27 +- test/Npgsql.Tests/Types/MiscTypeTests.cs | 115 +-- test/Npgsql.Tests/Types/MoneyTests.cs | 3 +- test/Npgsql.Tests/Types/MultirangeTests.cs | 288 +++--- test/Npgsql.Tests/Types/NetworkTypeTests.cs | 48 +- test/Npgsql.Tests/Types/NumericTypeTests.cs | 4 +- test/Npgsql.Tests/Types/RangeTests.cs | 224 ++--- test/Npgsql.Tests/Types/RecordTests.cs | 109 +++ test/Npgsql.Tests/Types/TextTests.cs | 1 + test/Npgsql.Tests/TypesTests.cs | 7 - test/Npgsql.Tests/WriteBufferTests.cs | 13 +- 423 files changed, 20463 insertions(+), 17022 deletions(-) rename src/Npgsql.GeoJSON/{Internal => }/CrsMap.WellKnown.cs (99%) create mode 100644 src/Npgsql.GeoJSON/CrsMap.cs create mode 100644 src/Npgsql.GeoJSON/CrsMapExtensions.cs rename src/Npgsql.GeoJSON/Internal/{CrsMap.cs => CrsMapBuilder.cs} (52%) create mode 100644 src/Npgsql.GeoJSON/Internal/GeoJSONConverter.cs delete mode 100644 src/Npgsql.GeoJSON/Internal/GeoJSONHandler.cs delete mode 100644 src/Npgsql.GeoJSON/Internal/GeoJSONTypeHandlerResolver.cs delete mode 100644 src/Npgsql.GeoJSON/Internal/GeoJSONTypeHandlerResolverFactory.cs create mode 100644 src/Npgsql.GeoJSON/Internal/GeoJSONTypeInfoResolver.cs delete mode 100644 src/Npgsql.GeoJSON/Internal/GeoJsonTypeMappingResolver.cs create mode 100644 src/Npgsql.Json.NET/Internal/JsonNetJsonConverter.cs delete mode 100644 src/Npgsql.Json.NET/Internal/JsonNetJsonHandler.cs create mode 100644 src/Npgsql.Json.NET/Internal/JsonNetPocoTypeInfoResolver.cs delete mode 100644 src/Npgsql.Json.NET/Internal/JsonNetTypeHandlerResolver.cs delete mode 100644 src/Npgsql.Json.NET/Internal/JsonNetTypeHandlerResolverFactory.cs create mode 100644 src/Npgsql.Json.NET/Internal/JsonNetTypeInfoResolver.cs delete mode 100644 src/Npgsql.Json.NET/Internal/JsonNetTypeMappingResolver.cs delete mode 100644 src/Npgsql.LegacyPostgis/Properties/AssemblyInfo.cs create mode 100644 src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteConverter.cs delete mode 100644 src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteHandler.cs delete mode 100644 src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeHandlerResolver.cs delete mode 100644 src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeHandlerResolverFactory.cs create mode 100644 src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeInfoResolver.cs delete mode 100644 src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeMappingResolver.cs delete mode 100644 src/Npgsql.NodaTime/Internal/DateHandler.cs create mode 100644 src/Npgsql.NodaTime/Internal/DateIntervalConverter.cs delete mode 100644 src/Npgsql.NodaTime/Internal/DateMultirangeHandler.cs delete mode 100644 src/Npgsql.NodaTime/Internal/DateRangeHandler.cs create mode 100644 src/Npgsql.NodaTime/Internal/DurationConverter.cs create mode 100644 src/Npgsql.NodaTime/Internal/IntervalConverter.cs delete mode 100644 src/Npgsql.NodaTime/Internal/IntervalHandler.cs create mode 100644 src/Npgsql.NodaTime/Internal/LegacyConverters.cs delete mode 100644 src/Npgsql.NodaTime/Internal/LegacyTimestampHandler.cs delete mode 100644 src/Npgsql.NodaTime/Internal/LegacyTimestampTzHandler.cs create mode 100644 src/Npgsql.NodaTime/Internal/LocalDateConverter.cs create mode 100644 src/Npgsql.NodaTime/Internal/LocalTimeConverter.cs delete mode 100644 src/Npgsql.NodaTime/Internal/NodaTimeTypeHandlerResolver.cs delete mode 100644 src/Npgsql.NodaTime/Internal/NodaTimeTypeHandlerResolverFactory.cs create mode 100644 src/Npgsql.NodaTime/Internal/NodaTimeTypeInfoResolver.cs delete mode 100644 src/Npgsql.NodaTime/Internal/NodaTimeTypeMappingResolver.cs create mode 100644 src/Npgsql.NodaTime/Internal/OffsetTimeConverter.cs create mode 100644 src/Npgsql.NodaTime/Internal/PeriodConverter.cs delete mode 100644 src/Npgsql.NodaTime/Internal/TimeHandler.cs delete mode 100644 src/Npgsql.NodaTime/Internal/TimeTzHandler.cs create mode 100644 src/Npgsql.NodaTime/Internal/TimestampConverters.cs delete mode 100644 src/Npgsql.NodaTime/Internal/TimestampHandler.cs delete mode 100644 src/Npgsql.NodaTime/Internal/TimestampTzHandler.cs delete mode 100644 src/Npgsql.NodaTime/Internal/TimestampTzMultirangeHandler.cs delete mode 100644 src/Npgsql.NodaTime/Internal/TimestampTzRangeHandler.cs delete mode 100644 src/Npgsql.SourceGenerators/TypeHandler.snbtxt delete mode 100644 src/Npgsql.SourceGenerators/TypeHandlerSourceGenerator.cs create mode 100644 src/Npgsql/Internal/AdoSerializerHelpers.cs create mode 100644 src/Npgsql/Internal/BufferRequirements.cs create mode 100644 src/Npgsql/Internal/Composites/Metadata/CompositeBuilder.cs create mode 100644 src/Npgsql/Internal/Composites/Metadata/CompositeFieldInfo.cs create mode 100644 src/Npgsql/Internal/Composites/Metadata/CompositeInfo.cs create mode 100644 src/Npgsql/Internal/Composites/ReflectionCompositeInfoFactory.cs create mode 100644 src/Npgsql/Internal/Converters/ArrayConverter.cs create mode 100644 src/Npgsql/Internal/Converters/AsyncHelpers.cs create mode 100644 src/Npgsql/Internal/Converters/BitStringConverters.cs create mode 100644 src/Npgsql/Internal/Converters/CastingConverter.cs create mode 100644 src/Npgsql/Internal/Converters/CompositeConverter.cs create mode 100644 src/Npgsql/Internal/Converters/EnumConverter.cs create mode 100644 src/Npgsql/Internal/Converters/FullTextSearch/TsQueryConverter.cs create mode 100644 src/Npgsql/Internal/Converters/FullTextSearch/TsVectorConverter.cs create mode 100644 src/Npgsql/Internal/Converters/Geometric/BoxConverter.cs create mode 100644 src/Npgsql/Internal/Converters/Geometric/CircleConverter.cs create mode 100644 src/Npgsql/Internal/Converters/Geometric/LineConverter.cs create mode 100644 src/Npgsql/Internal/Converters/Geometric/LineSegmentConverter.cs create mode 100644 src/Npgsql/Internal/Converters/Geometric/PathConverter.cs create mode 100644 src/Npgsql/Internal/Converters/Geometric/PointConverter.cs create mode 100644 src/Npgsql/Internal/Converters/Geometric/PolygonConverter.cs create mode 100644 src/Npgsql/Internal/Converters/HstoreConverter.cs create mode 100644 src/Npgsql/Internal/Converters/Internal/InternalCharConverter.cs create mode 100644 src/Npgsql/Internal/Converters/Internal/PgLsnConverter.cs create mode 100644 src/Npgsql/Internal/Converters/Internal/TidConverter.cs create mode 100644 src/Npgsql/Internal/Converters/Internal/UInt32Converter.cs create mode 100644 src/Npgsql/Internal/Converters/Internal/UInt64Converter.cs create mode 100644 src/Npgsql/Internal/Converters/Internal/VoidConverter.cs create mode 100644 src/Npgsql/Internal/Converters/MoneyConverter.cs create mode 100644 src/Npgsql/Internal/Converters/MultirangeConverter.cs create mode 100644 src/Npgsql/Internal/Converters/Networking/IPAddressConverter.cs create mode 100644 src/Npgsql/Internal/Converters/Networking/MacaddrConverter.cs create mode 100644 src/Npgsql/Internal/Converters/Networking/NpgsqlCidrConverter.cs create mode 100644 src/Npgsql/Internal/Converters/Networking/NpgsqlInetConverter.cs create mode 100644 src/Npgsql/Internal/Converters/NullableConverter.cs create mode 100644 src/Npgsql/Internal/Converters/ObjectArrayRecordConverter.cs create mode 100644 src/Npgsql/Internal/Converters/PolymorphicConverterResolver.cs create mode 100644 src/Npgsql/Internal/Converters/Primitive/BoolConverter.cs create mode 100644 src/Npgsql/Internal/Converters/Primitive/ByteaConverters.cs create mode 100644 src/Npgsql/Internal/Converters/Primitive/DoubleConverter.cs create mode 100644 src/Npgsql/Internal/Converters/Primitive/GuidUuidConverter.cs create mode 100644 src/Npgsql/Internal/Converters/Primitive/Int2Converter.cs create mode 100644 src/Npgsql/Internal/Converters/Primitive/Int4Converter.cs create mode 100644 src/Npgsql/Internal/Converters/Primitive/Int8Converter.cs create mode 100644 src/Npgsql/Internal/Converters/Primitive/NumericConverters.cs create mode 100644 src/Npgsql/Internal/Converters/Primitive/PgMoney.cs create mode 100644 src/Npgsql/Internal/Converters/Primitive/PgNumeric.cs create mode 100644 src/Npgsql/Internal/Converters/Primitive/RealConverter.cs create mode 100644 src/Npgsql/Internal/Converters/Primitive/TextConverters.cs create mode 100644 src/Npgsql/Internal/Converters/RangeConverter.cs create mode 100644 src/Npgsql/Internal/Converters/SystemTextJsonConverter.cs create mode 100644 src/Npgsql/Internal/Converters/Temporal/DateConverters.cs create mode 100644 src/Npgsql/Internal/Converters/Temporal/DateTimeConverterResolver.cs create mode 100644 src/Npgsql/Internal/Converters/Temporal/DateTimeConverters.cs create mode 100644 src/Npgsql/Internal/Converters/Temporal/IntervalConverters.cs create mode 100644 src/Npgsql/Internal/Converters/Temporal/LegacyDateTimeConverter.cs create mode 100644 src/Npgsql/Internal/Converters/Temporal/PgTimestamp.cs create mode 100644 src/Npgsql/Internal/Converters/Temporal/TimeConverters.cs create mode 100644 src/Npgsql/Internal/Converters/VersionPrefixedTextConverter.cs create mode 100644 src/Npgsql/Internal/DataFormat.cs create mode 100644 src/Npgsql/Internal/DynamicTypeInfoResolver.cs create mode 100644 src/Npgsql/Internal/IPgTypeInfoResolver.cs delete mode 100644 src/Npgsql/Internal/NpgsqlWriteBuffer.Stream.cs create mode 100644 src/Npgsql/Internal/PgBufferedConverter.cs create mode 100644 src/Npgsql/Internal/PgComposingConverterResolver.cs create mode 100644 src/Npgsql/Internal/PgConverter.cs create mode 100644 src/Npgsql/Internal/PgConverterResolver.cs create mode 100644 src/Npgsql/Internal/PgReader.cs create mode 100644 src/Npgsql/Internal/PgSerializerOptions.cs create mode 100644 src/Npgsql/Internal/PgStreamingConverter.cs create mode 100644 src/Npgsql/Internal/PgTypeInfo.cs create mode 100644 src/Npgsql/Internal/PgWriter.cs create mode 100644 src/Npgsql/Internal/Postgres/DataTypeName.cs create mode 100644 src/Npgsql/Internal/Postgres/DataTypeNames.cs create mode 100644 src/Npgsql/Internal/Postgres/Field.cs create mode 100644 src/Npgsql/Internal/Postgres/Oid.cs create mode 100644 src/Npgsql/Internal/Postgres/PgTypeId.cs create mode 100644 src/Npgsql/Internal/Resolvers/AdoTypeInfoResolver.cs create mode 100644 src/Npgsql/Internal/Resolvers/ExtraConversionsResolver.cs create mode 100644 src/Npgsql/Internal/Resolvers/FullTextSearchTypeInfoResolver.cs create mode 100644 src/Npgsql/Internal/Resolvers/GeometricTypeInfoResolver.cs create mode 100644 src/Npgsql/Internal/Resolvers/LTreeTypeInfoResolver.cs create mode 100644 src/Npgsql/Internal/Resolvers/NetworkTypeInfoResolver.cs create mode 100644 src/Npgsql/Internal/Resolvers/RangeTypeInfoResolver.cs create mode 100644 src/Npgsql/Internal/Resolvers/RecordTypeInfoResolvers.cs create mode 100644 src/Npgsql/Internal/Resolvers/SystemTextJsonPocoTypeInfoResolver.cs create mode 100644 src/Npgsql/Internal/Resolvers/SystemTextJsonTypeInfoResolvers.cs create mode 100644 src/Npgsql/Internal/Resolvers/UnmappedEnumTypeInfoResolver.cs create mode 100644 src/Npgsql/Internal/Resolvers/UnmappedMultirangeTypeInfoResolver.cs create mode 100644 src/Npgsql/Internal/Resolvers/UnmappedRangeTypeInfoResolver.cs create mode 100644 src/Npgsql/Internal/Resolvers/UnsupportedTypeInfoResolver.cs create mode 100644 src/Npgsql/Internal/Size.cs delete mode 100644 src/Npgsql/Internal/TypeHandlers/ArrayHandler.cs delete mode 100644 src/Npgsql/Internal/TypeHandlers/BitStringHandler.cs delete mode 100644 src/Npgsql/Internal/TypeHandlers/BoolHandler.cs delete mode 100644 src/Npgsql/Internal/TypeHandlers/ByteaHandler.cs delete mode 100644 src/Npgsql/Internal/TypeHandlers/CompositeHandlers/ByReference.cs delete mode 100644 src/Npgsql/Internal/TypeHandlers/CompositeHandlers/CompositeConstructorHandler.cs delete mode 100644 src/Npgsql/Internal/TypeHandlers/CompositeHandlers/CompositeConstructorHandler`.cs delete mode 100644 src/Npgsql/Internal/TypeHandlers/CompositeHandlers/CompositeHandler.cs delete mode 100644 src/Npgsql/Internal/TypeHandlers/CompositeHandlers/CompositeMemberHandler.cs delete mode 100644 src/Npgsql/Internal/TypeHandlers/CompositeHandlers/CompositeMemberHandlerOfClass.cs delete mode 100644 src/Npgsql/Internal/TypeHandlers/CompositeHandlers/CompositeMemberHandlerOfStruct.cs delete mode 100644 src/Npgsql/Internal/TypeHandlers/CompositeHandlers/CompositeParameterHandler.cs delete mode 100644 src/Npgsql/Internal/TypeHandlers/CompositeHandlers/CompositeParameterHandler`.cs delete mode 100644 src/Npgsql/Internal/TypeHandlers/CompositeHandlers/ICompositeHandler.cs delete mode 100644 src/Npgsql/Internal/TypeHandlers/CompositeHandlers/IsValueType.cs delete mode 100644 src/Npgsql/Internal/TypeHandlers/DateTimeHandlers/DateHandler.cs delete mode 100644 src/Npgsql/Internal/TypeHandlers/DateTimeHandlers/DateTimeUtils.cs delete mode 100644 src/Npgsql/Internal/TypeHandlers/DateTimeHandlers/IntervalHandler.cs delete mode 100644 src/Npgsql/Internal/TypeHandlers/DateTimeHandlers/TimeHandler.cs delete mode 100644 src/Npgsql/Internal/TypeHandlers/DateTimeHandlers/TimeTzHandler.cs delete mode 100644 src/Npgsql/Internal/TypeHandlers/DateTimeHandlers/TimestampHandler.cs delete mode 100644 src/Npgsql/Internal/TypeHandlers/DateTimeHandlers/TimestampTzHandler.cs delete mode 100644 src/Npgsql/Internal/TypeHandlers/EnumHandler.cs delete mode 100644 src/Npgsql/Internal/TypeHandlers/FullTextSearchHandlers/TsQueryHandler.cs delete mode 100644 src/Npgsql/Internal/TypeHandlers/FullTextSearchHandlers/TsVectorHandler.cs delete mode 100644 src/Npgsql/Internal/TypeHandlers/GeometricHandlers/BoxHandler.cs delete mode 100644 src/Npgsql/Internal/TypeHandlers/GeometricHandlers/CircleHandler.cs delete mode 100644 src/Npgsql/Internal/TypeHandlers/GeometricHandlers/LineHandler.cs delete mode 100644 src/Npgsql/Internal/TypeHandlers/GeometricHandlers/LineSegmentHandler.cs delete mode 100644 src/Npgsql/Internal/TypeHandlers/GeometricHandlers/PathHandler.cs delete mode 100644 src/Npgsql/Internal/TypeHandlers/GeometricHandlers/PointHandler.cs delete mode 100644 src/Npgsql/Internal/TypeHandlers/GeometricHandlers/PolygonHandler.cs delete mode 100644 src/Npgsql/Internal/TypeHandlers/HstoreHandler.cs delete mode 100644 src/Npgsql/Internal/TypeHandlers/InternalTypeHandlers/Int2VectorHandler.cs delete mode 100644 src/Npgsql/Internal/TypeHandlers/InternalTypeHandlers/InternalCharHandler.cs delete mode 100644 src/Npgsql/Internal/TypeHandlers/InternalTypeHandlers/OIDVectorHandler.cs delete mode 100644 src/Npgsql/Internal/TypeHandlers/InternalTypeHandlers/PgLsnHandler.cs delete mode 100644 src/Npgsql/Internal/TypeHandlers/InternalTypeHandlers/TidHandler.cs delete mode 100644 src/Npgsql/Internal/TypeHandlers/JsonPathHandler.cs delete mode 100644 src/Npgsql/Internal/TypeHandlers/JsonTextHandler.cs delete mode 100644 src/Npgsql/Internal/TypeHandlers/LTreeHandlers/LQueryHandler.cs delete mode 100644 src/Npgsql/Internal/TypeHandlers/LTreeHandlers/LTreeHandler.cs delete mode 100644 src/Npgsql/Internal/TypeHandlers/LTreeHandlers/LTxtQueryHandler.cs delete mode 100644 src/Npgsql/Internal/TypeHandlers/MultirangeHandler.cs delete mode 100644 src/Npgsql/Internal/TypeHandlers/NetworkHandlers/CidrHandler.cs delete mode 100644 src/Npgsql/Internal/TypeHandlers/NetworkHandlers/InetHandler.cs delete mode 100644 src/Npgsql/Internal/TypeHandlers/NetworkHandlers/MacaddrHandler.cs delete mode 100644 src/Npgsql/Internal/TypeHandlers/NumericHandlers/DecimalRaw.cs delete mode 100644 src/Npgsql/Internal/TypeHandlers/NumericHandlers/DoubleHandler.cs delete mode 100644 src/Npgsql/Internal/TypeHandlers/NumericHandlers/Int16Handler.cs delete mode 100644 src/Npgsql/Internal/TypeHandlers/NumericHandlers/Int32Handler.cs delete mode 100644 src/Npgsql/Internal/TypeHandlers/NumericHandlers/Int64Handler.cs delete mode 100644 src/Npgsql/Internal/TypeHandlers/NumericHandlers/MoneyHandler.cs delete mode 100644 src/Npgsql/Internal/TypeHandlers/NumericHandlers/NumericHandler.cs delete mode 100644 src/Npgsql/Internal/TypeHandlers/NumericHandlers/SingleHandler.cs delete mode 100644 src/Npgsql/Internal/TypeHandlers/NumericHandlers/UInt32Handler.cs delete mode 100644 src/Npgsql/Internal/TypeHandlers/NumericHandlers/UInt64Handler.cs delete mode 100644 src/Npgsql/Internal/TypeHandlers/RangeHandler.cs delete mode 100644 src/Npgsql/Internal/TypeHandlers/RecordHandler.cs delete mode 100644 src/Npgsql/Internal/TypeHandlers/SystemTextJsonHandler.cs delete mode 100644 src/Npgsql/Internal/TypeHandlers/TextHandler.cs delete mode 100644 src/Npgsql/Internal/TypeHandlers/UnknownTypeHandler.cs delete mode 100644 src/Npgsql/Internal/TypeHandlers/UnmappedEnumHandler.cs delete mode 100644 src/Npgsql/Internal/TypeHandlers/UnsupportedHandler.cs delete mode 100644 src/Npgsql/Internal/TypeHandlers/UuidHandler.cs delete mode 100644 src/Npgsql/Internal/TypeHandlers/VoidHandler.cs delete mode 100644 src/Npgsql/Internal/TypeHandling/INpgsqlSimpleTypeHandler.cs delete mode 100644 src/Npgsql/Internal/TypeHandling/INpgsqlTypeHandler.cs delete mode 100644 src/Npgsql/Internal/TypeHandling/ITextReaderHandler.cs delete mode 100644 src/Npgsql/Internal/TypeHandling/NpgsqlLengthCache.cs delete mode 100644 src/Npgsql/Internal/TypeHandling/NpgsqlSimpleTypeHandler.cs delete mode 100644 src/Npgsql/Internal/TypeHandling/NpgsqlTypeHandler.cs delete mode 100644 src/Npgsql/Internal/TypeHandling/NpgsqlTypeHandler`.cs delete mode 100644 src/Npgsql/Internal/TypeHandling/NullableHandler.cs delete mode 100644 src/Npgsql/Internal/TypeHandling/TypeHandlerResolver.cs delete mode 100644 src/Npgsql/Internal/TypeHandling/TypeHandlerResolverFactory.cs delete mode 100644 src/Npgsql/Internal/TypeHandling/TypeMappingInfo.cs create mode 100644 src/Npgsql/Internal/TypeInfoCache.cs create mode 100644 src/Npgsql/Internal/TypeInfoMapping.cs create mode 100644 src/Npgsql/Internal/TypeInfoResolverChain.cs delete mode 100644 src/Npgsql/Internal/TypeMapping/IUserTypeMapping.cs delete mode 100644 src/Npgsql/Internal/TypeMapping/TypeMapper.cs delete mode 100644 src/Npgsql/Internal/TypeMapping/TypeMappingResolver.cs delete mode 100644 src/Npgsql/Internal/TypeMapping/UserCompositeTypeMappings.cs delete mode 100644 src/Npgsql/Internal/TypeMapping/UserEnumTypeMappings.cs create mode 100644 src/Npgsql/Internal/ValueMetadata.cs create mode 100644 src/Npgsql/Replication/PgDateTime.cs create mode 100644 src/Npgsql/Shims/MemoryExtensions.cs create mode 100644 src/Npgsql/Shims/ReadOnlySequenceExtensions.cs create mode 100644 src/Npgsql/Shims/ReferenceEqualityComparer.cs create mode 100644 src/Npgsql/Shims/UnreachableException.cs delete mode 100644 src/Npgsql/TypeMapping/BuiltInTypeHandlerResolver.cs delete mode 100644 src/Npgsql/TypeMapping/BuiltInTypeHandlerResolverFactory.cs delete mode 100644 src/Npgsql/TypeMapping/BuiltInTypeMappingResolver.cs create mode 100644 src/Npgsql/TypeMapping/DefaultPgTypes.cs delete mode 100644 src/Npgsql/TypeMapping/FullTextSearchTypeHandlerResolver.cs delete mode 100644 src/Npgsql/TypeMapping/FullTextSearchTypeHandlerResolverFactory.cs delete mode 100644 src/Npgsql/TypeMapping/FullTextSearchTypeMappingResolver.cs delete mode 100644 src/Npgsql/TypeMapping/PostgresTypeOIDs.cs delete mode 100644 src/Npgsql/TypeMapping/RangeTypeHandlerResolver.cs delete mode 100644 src/Npgsql/TypeMapping/RangeTypeHandlerResolverFactory.cs delete mode 100644 src/Npgsql/TypeMapping/RangeTypeMappingResolver.cs delete mode 100644 src/Npgsql/TypeMapping/RecordTypeHandlerResolver.cs delete mode 100644 src/Npgsql/TypeMapping/RecordTypeHandlerResolverFactory.cs delete mode 100644 src/Npgsql/TypeMapping/SystemTextJsonTypeHandlerResolver.cs delete mode 100644 src/Npgsql/TypeMapping/SystemTextJsonTypeHandlerResolverFactory.cs delete mode 100644 src/Npgsql/TypeMapping/SystemTextJsonTypeMappingResolver.cs create mode 100644 src/Npgsql/TypeMapping/UserTypeMapper.cs create mode 100644 src/Npgsql/Util/NpgsqlTimeout.cs delete mode 100644 src/Npgsql/Util/PGUtil.cs create mode 100644 src/Npgsql/Util/Statics.cs create mode 100644 src/Npgsql/Util/StrongBox.cs create mode 100644 src/Npgsql/Util/SubReadStream.cs delete mode 100644 test/Npgsql.NodaTime.Tests/LegacyNodaTimeTests.cs delete mode 100644 test/Npgsql.NodaTime.Tests/NodaTimeSetupFixture.cs delete mode 100644 test/Npgsql.NodaTime.Tests/Npgsql.NodaTime.Tests.csproj create mode 100644 test/Npgsql.PluginTests/LegacyNodaTimeTests.cs rename test/{Npgsql.NodaTime.Tests => Npgsql.PluginTests}/NodaTimeInfinityTests.cs (78%) rename test/{Npgsql.NodaTime.Tests => Npgsql.PluginTests}/NodaTimeTests.cs (64%) create mode 100644 test/Npgsql.Tests/CommandParameterTests.cs create mode 100644 test/Npgsql.Tests/GlobalTypeMapperTests.cs create mode 100644 test/Npgsql.Tests/Types/RecordTests.cs diff --git a/.devcontainer/docker-compose.yml b/.devcontainer/docker-compose.yml index ae4cd58f96..84913d6fdc 100644 --- a/.devcontainer/docker-compose.yml +++ b/.devcontainer/docker-compose.yml @@ -3,7 +3,7 @@ version: '3' services: npgsql-dev: # Source for tags: https://mcr.microsoft.com/v2/dotnet/sdk/tags/list - image: mcr.microsoft.com/dotnet/sdk:8.0.100-preview.6 + image: mcr.microsoft.com/dotnet/sdk:8.0.100-preview.7 volumes: - ..:/workspace:cached tty: true diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 065f6a2924..66e0ff5bd6 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -15,7 +15,7 @@ concurrency: cancel-in-progress: true env: - dotnet_sdk_version: '8.0.100-preview.6.23330.14' + dotnet_sdk_version: '8.0.100-rc.1.23463.5' postgis_version: 3 DOTNET_SKIP_FIRST_TIME_EXPERIENCE: true # Windows comes with PG pre-installed, and defines the PGPASSWORD environment variable. Remove it as it interferes @@ -315,7 +315,6 @@ jobs: run: | if [ -z "${{ matrix.pg_prerelease }}" ]; then dotnet test -c ${{ matrix.config }} -f ${{ matrix.test_tfm }} test/Npgsql.PluginTests --logger "GitHubActions;report-warnings=false" - dotnet test -c ${{ matrix.config }} -f ${{ matrix.test_tfm }} test/Npgsql.NodaTime.Tests --logger "GitHubActions;report-warnings=false" fi shell: bash diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index a4b0f44bbb..93c46a180c 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -32,7 +32,7 @@ concurrency: cancel-in-progress: true env: - dotnet_sdk_version: '8.0.100-preview.6.23330.14' + dotnet_sdk_version: '8.0.100-rc.1.23463.5' DOTNET_SKIP_FIRST_TIME_EXPERIENCE: true jobs: diff --git a/.github/workflows/native-aot.yml b/.github/workflows/native-aot.yml index 6b58f5f7cd..1c6ecce4ba 100644 --- a/.github/workflows/native-aot.yml +++ b/.github/workflows/native-aot.yml @@ -15,31 +15,32 @@ concurrency: cancel-in-progress: true env: - dotnet_sdk_version: '8.0.100-preview.6.23330.14' + dotnet_sdk_version: '8.0.100-rc.1.23463.5' DOTNET_SKIP_FIRST_TIME_EXPERIENCE: true - nuget_config: | - - - - - - - - - - - - - - - - - - - - - - + # Uncomment and edit the following to use nightly/preview builds +# nuget_config: | +# +# +# +# +# +# +# +# +# +# +# +# +# +# +# +# +# +# +# +# +# +# jobs: build: runs-on: ${{ matrix.os }} @@ -68,8 +69,8 @@ jobs: dotnet-version: | ${{ env.dotnet_sdk_version }} - - name: Setup nuget config - run: echo "$nuget_config" > NuGet.config +# - name: Setup nuget config +# run: echo "$nuget_config" > NuGet.config - name: Setup Native AOT prerequisites run: sudo apt-get install clang zlib1g-dev @@ -108,6 +109,20 @@ jobs: path: "test/Npgsql.NativeAotTests/obj/Release/net8.0/linux-x64/native/Npgsql.NativeAotTests.mstat" retention-days: 3 + - name: Upload codedgen dgml + uses: actions/upload-artifact@v3.1.2 + with: + name: npgsql.codegen.dgml.xml + path: "test/Npgsql.NativeAotTests/obj/Release/net8.0/linux-x64/native/Npgsql.NativeAotTests.codegen.dgml.xml" + retention-days: 3 + + - name: Upload scan dgml + uses: actions/upload-artifact@v3.1.2 + with: + name: npgsql.scan.dgml.xml + path: "test/Npgsql.NativeAotTests/obj/Release/net8.0/linux-x64/native/Npgsql.NativeAotTests.scan.dgml.xml" + retention-days: 3 + - name: Assert binary size run: | size="$(ls -l test/Npgsql.NativeAotTests/bin/Release/net8.0/linux-x64/native/Npgsql.NativeAotTests | cut -d ' ' -f 5)" diff --git a/.github/workflows/rich-code-nav.yml b/.github/workflows/rich-code-nav.yml index bc2db9b271..e47a8a2adb 100644 --- a/.github/workflows/rich-code-nav.yml +++ b/.github/workflows/rich-code-nav.yml @@ -9,7 +9,7 @@ on: - '*' env: - dotnet_sdk_version: '8.0.100-preview.6.23330.14' + dotnet_sdk_version: '8.0.100-rc.1.23463.5' DOTNET_SKIP_FIRST_TIME_EXPERIENCE: true jobs: diff --git a/Directory.Packages.props b/Directory.Packages.props index 2620a142a8..4d1e52c2bc 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -1,27 +1,40 @@ + + 8.0.0-rc.1.23419.4 + $(SystemVersion) + + - - - - + + - - - - + + + + + + + - + + + + + + + + - - + + @@ -29,7 +42,7 @@ - + @@ -39,20 +52,4 @@ - - - - - - - - - - - - - - - - diff --git a/Npgsql.sln b/Npgsql.sln index 007681d5bb..80ef02c3a8 100644 --- a/Npgsql.sln +++ b/Npgsql.sln @@ -37,8 +37,6 @@ Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Solution Items", "Solution EndProject Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Npgsql.SourceGenerators", "src\Npgsql.SourceGenerators\Npgsql.SourceGenerators.csproj", "{63026A19-60B8-4906-81CB-216F30E8094B}" EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Npgsql.NodaTime.Tests", "test\Npgsql.NodaTime.Tests\Npgsql.NodaTime.Tests.csproj", "{C00D2EB1-5719-4372-9E1C-5ED05DC23A00}" -EndProject Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Npgsql.OpenTelemetry", "src\Npgsql.OpenTelemetry\Npgsql.OpenTelemetry.csproj", "{DA29F063-1828-47D8-B051-800AF7C9A0BE}" EndProject Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Github", "Github", "{BA7B6F53-D24D-45AC-927A-266857EA8D1E}" @@ -144,14 +142,6 @@ Global {63026A19-60B8-4906-81CB-216F30E8094B}.Release|Any CPU.Build.0 = Release|Any CPU {63026A19-60B8-4906-81CB-216F30E8094B}.Release|x86.ActiveCfg = Release|Any CPU {63026A19-60B8-4906-81CB-216F30E8094B}.Release|x86.Build.0 = Release|Any CPU - {C00D2EB1-5719-4372-9E1C-5ED05DC23A00}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {C00D2EB1-5719-4372-9E1C-5ED05DC23A00}.Debug|Any CPU.Build.0 = Debug|Any CPU - {C00D2EB1-5719-4372-9E1C-5ED05DC23A00}.Debug|x86.ActiveCfg = Debug|Any CPU - {C00D2EB1-5719-4372-9E1C-5ED05DC23A00}.Debug|x86.Build.0 = Debug|Any CPU - {C00D2EB1-5719-4372-9E1C-5ED05DC23A00}.Release|Any CPU.ActiveCfg = Release|Any CPU - {C00D2EB1-5719-4372-9E1C-5ED05DC23A00}.Release|Any CPU.Build.0 = Release|Any CPU - {C00D2EB1-5719-4372-9E1C-5ED05DC23A00}.Release|x86.ActiveCfg = Release|Any CPU - {C00D2EB1-5719-4372-9E1C-5ED05DC23A00}.Release|x86.Build.0 = Release|Any CPU {DA29F063-1828-47D8-B051-800AF7C9A0BE}.Debug|Any CPU.ActiveCfg = Debug|Any CPU {DA29F063-1828-47D8-B051-800AF7C9A0BE}.Debug|Any CPU.Build.0 = Debug|Any CPU {DA29F063-1828-47D8-B051-800AF7C9A0BE}.Debug|x86.ActiveCfg = Debug|Any CPU @@ -199,7 +189,6 @@ Global {F7C53EBD-0075-474F-A083-419257D04080} = {8537E50E-CF7F-49CB-B4EF-3E2A1B11F050} {A77E5FAF-D775-4AB4-8846-8965C2104E60} = {ED612DB1-AB32-4603-95E7-891BACA71C39} {63026A19-60B8-4906-81CB-216F30E8094B} = {8537E50E-CF7F-49CB-B4EF-3E2A1B11F050} - {C00D2EB1-5719-4372-9E1C-5ED05DC23A00} = {ED612DB1-AB32-4603-95E7-891BACA71C39} {DA29F063-1828-47D8-B051-800AF7C9A0BE} = {8537E50E-CF7F-49CB-B4EF-3E2A1B11F050} {BA7B6F53-D24D-45AC-927A-266857EA8D1E} = {004A2E0F-D34A-44D4-8DF0-D2BC63B57073} {B58E12EB-E43D-4D77-894E-5157D2269836} = {8537E50E-CF7F-49CB-B4EF-3E2A1B11F050} diff --git a/src/Npgsql.GeoJSON/Internal/CrsMap.WellKnown.cs b/src/Npgsql.GeoJSON/CrsMap.WellKnown.cs similarity index 99% rename from src/Npgsql.GeoJSON/Internal/CrsMap.WellKnown.cs rename to src/Npgsql.GeoJSON/CrsMap.WellKnown.cs index 9d733830ea..14da2f893e 100644 --- a/src/Npgsql.GeoJSON/Internal/CrsMap.WellKnown.cs +++ b/src/Npgsql.GeoJSON/CrsMap.WellKnown.cs @@ -1,6 +1,6 @@ -namespace Npgsql.GeoJSON.Internal; +namespace Npgsql.GeoJSON; -readonly partial struct CrsMap +public partial class CrsMap { /// /// These entries came from spatial_res_sys. They are used to elide memory allocations @@ -586,4 +586,4 @@ readonly partial struct CrsMap new(32766, 32766, "EPSG"), new(900913, 900913, "spatialreferencing.org"), }; -} \ No newline at end of file +} diff --git a/src/Npgsql.GeoJSON/CrsMap.cs b/src/Npgsql.GeoJSON/CrsMap.cs new file mode 100644 index 0000000000..dd556d9b33 --- /dev/null +++ b/src/Npgsql.GeoJSON/CrsMap.cs @@ -0,0 +1,59 @@ + +namespace Npgsql.GeoJSON; + +/// +/// A map of entries that map the authority to the inclusive range of SRID. +/// +public partial class CrsMap +{ + readonly CrsMapEntry[]? _overriden; + + internal CrsMap(CrsMapEntry[]? overriden) + => _overriden = overriden; + + internal string? GetAuthority(int srid) + => GetAuthority(_overriden, srid) ?? GetAuthority(WellKnown, srid); + + static string? GetAuthority(CrsMapEntry[]? entries, int srid) + { + if (entries == null) + return null; + + var left = 0; + var right = entries.Length; + while (left <= right) + { + var middle = left + (right - left) / 2; + var entry = entries[middle]; + + if (srid < entry.MinSrid) + right = middle - 1; + else + if (srid > entry.MaxSrid) + left = middle + 1; + else + return entry.Authority; + } + + return null; + } +} + +/// +/// An entry which maps the authority to the inclusive range of SRID. +/// +readonly struct CrsMapEntry +{ + internal readonly int MinSrid; + internal readonly int MaxSrid; + internal readonly string? Authority; + + internal CrsMapEntry(int minSrid, int maxSrid, string? authority) + { + MinSrid = minSrid; + MaxSrid = maxSrid; + Authority = authority != null + ? string.IsInterned(authority) ?? authority + : null; + } +} diff --git a/src/Npgsql.GeoJSON/CrsMapExtensions.cs b/src/Npgsql.GeoJSON/CrsMapExtensions.cs new file mode 100644 index 0000000000..329b7d9265 --- /dev/null +++ b/src/Npgsql.GeoJSON/CrsMapExtensions.cs @@ -0,0 +1,50 @@ +using System.Threading.Tasks; +using Npgsql.GeoJSON.Internal; + +namespace Npgsql.GeoJSON; + +/// +/// Extensions for getting a CrsMap from a database. +/// +public static class CrsMapExtensions +{ + /// + /// Gets the full crs details from the database. + /// + /// + public static async Task GetCrsMapAsync(this NpgsqlDataSource dataSource) + { + var builder = new CrsMapBuilder(); + using var cmd = GetCsrCommand(dataSource); + await using var reader = await cmd.ExecuteReaderAsync(); + + while (await reader.ReadAsync()) + builder.Add(new CrsMapEntry(reader.GetInt32(0), reader.GetInt32(1), reader.GetString(2))); + + return builder.Build(); + } + + /// + /// Gets the full crs details from the database. + /// + /// + public static CrsMap GetCrsMap(this NpgsqlDataSource dataSource) + { + var builder = new CrsMapBuilder(); + using var cmd = GetCsrCommand(dataSource); + using var reader = cmd.ExecuteReader(); + + while (reader.Read()) + builder.Add(new CrsMapEntry(reader.GetInt32(0), reader.GetInt32(1), reader.GetString(2))); + + return builder.Build(); + } + + static NpgsqlCommand GetCsrCommand(NpgsqlDataSource dataSource) + => dataSource.CreateCommand(""" + SELECT min(srid), max(srid), auth_name + FROM(SELECT srid, auth_name, srid - rank() OVER(PARTITION BY auth_name ORDER BY srid) AS range FROM spatial_ref_sys) AS s + GROUP BY range, auth_name + ORDER BY 1; + """); +} diff --git a/src/Npgsql.GeoJSON/Internal/CrsMap.cs b/src/Npgsql.GeoJSON/Internal/CrsMapBuilder.cs similarity index 52% rename from src/Npgsql.GeoJSON/Internal/CrsMap.cs rename to src/Npgsql.GeoJSON/Internal/CrsMapBuilder.cs index aa7dc58e2d..44829761c9 100644 --- a/src/Npgsql.GeoJSON/Internal/CrsMap.cs +++ b/src/Npgsql.GeoJSON/Internal/CrsMapBuilder.cs @@ -2,25 +2,6 @@ namespace Npgsql.GeoJSON.Internal; -/// -/// An entry which maps the authority to the inclusive range of SRID. -/// -readonly struct CrsMapEntry -{ - internal readonly int MinSrid; - internal readonly int MaxSrid; - internal readonly string? Authority; - - internal CrsMapEntry(int minSrid, int maxSrid, string? authority) - { - MinSrid = minSrid; - MaxSrid = maxSrid; - Authority = authority != null - ? string.IsInterned(authority) ?? authority - : null; - } -} - struct CrsMapBuilder { CrsMapEntry[] _overrides; @@ -71,38 +52,3 @@ internal CrsMap Build() return new CrsMap(_overrides); } } - -readonly partial struct CrsMap -{ - readonly CrsMapEntry[]? _overriden; - - internal CrsMap(CrsMapEntry[]? overriden) - => _overriden = overriden; - - internal string? GetAuthority(int srid) - => GetAuthority(_overriden, srid) ?? GetAuthority(WellKnown, srid); - - static string? GetAuthority(CrsMapEntry[]? entries, int srid) - { - if (entries == null) - return null; - - var left = 0; - var right = entries.Length; - while (left <= right) - { - var middle = left + (right - left) / 2; - var entry = entries[middle]; - - if (srid < entry.MinSrid) - right = middle - 1; - else - if (srid > entry.MaxSrid) - left = middle + 1; - else - return entry.Authority; - } - - return null; - } -} \ No newline at end of file diff --git a/src/Npgsql.GeoJSON/Internal/GeoJSONConverter.cs b/src/Npgsql.GeoJSON/Internal/GeoJSONConverter.cs new file mode 100644 index 0000000000..2f6ece1fd8 --- /dev/null +++ b/src/Npgsql.GeoJSON/Internal/GeoJSONConverter.cs @@ -0,0 +1,748 @@ +using System; +using System.Buffers.Binary; +using System.Collections.Concurrent; +using System.Collections.ObjectModel; +using System.Runtime.CompilerServices; +using System.Threading; +using System.Threading.Tasks; +using GeoJSON.Net; +using GeoJSON.Net.CoordinateReferenceSystem; +using GeoJSON.Net.Geometry; +using Npgsql.Internal; + +namespace Npgsql.GeoJSON.Internal; + +sealed class GeoJSONConverter : PgStreamingConverter where T : IGeoJSONObject +{ + readonly ConcurrentDictionary _cachedCrs = new(); + readonly GeoJSONOptions _options; + readonly Func _getCrs; + + public GeoJSONConverter(GeoJSONOptions options, CrsMap crsMap) + { + _options = options; + _getCrs = GetCrs( + crsMap, + _cachedCrs, + crsType: _options & (GeoJSONOptions.ShortCRS | GeoJSONOptions.LongCRS) + ); + } + + bool BoundingBox => (_options & GeoJSONOptions.BoundingBox) != 0; + + public override T Read(PgReader reader) + => (T)GeoJSONConverter.Read(async: false, reader, BoundingBox ? new BoundingBoxBuilder() : null, _getCrs, CancellationToken.None).GetAwaiter().GetResult(); + + public override async ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) + => (T)await GeoJSONConverter.Read(async: true, reader, BoundingBox ? new BoundingBoxBuilder() : null, _getCrs, cancellationToken).ConfigureAwait(false); + + public override Size GetSize(SizeContext context, T value, ref object? writeState) + => GeoJSONConverter.GetSize(context, value, ref writeState); + + public override void Write(PgWriter writer, T value) + => GeoJSONConverter.Write(async: false, writer, value, CancellationToken.None).GetAwaiter().GetResult(); + + public override ValueTask WriteAsync(PgWriter writer, T value, CancellationToken cancellationToken = default) + => GeoJSONConverter.Write(async: true, writer, value, CancellationToken.None); + + static Func GetCrs(CrsMap crsMap, ConcurrentDictionary cachedCrs, GeoJSONOptions crsType) + => srid => + { + if (crsType == GeoJSONOptions.None) + return null; + +#if NETSTANDARD2_0 + return cachedCrs.GetOrAdd(srid, srid => + { + var authority = crsMap.GetAuthority(srid); + + return authority is null + ? throw new InvalidOperationException($"SRID {srid} unknown in spatial_ref_sys table") + : new NamedCRS(crsType == GeoJSONOptions.LongCRS + ? "urn:ogc:def:crs:" + authority + "::" + srid + : authority + ":" + srid); + }); +#else + return cachedCrs.GetOrAdd(srid, static (srid, state) => + { + var (crsMap, crsType) = state; + var authority = crsMap.GetAuthority(srid); + + return authority is null + ? throw new InvalidOperationException($"SRID {srid} unknown in spatial_ref_sys table") + : new NamedCRS(crsType == GeoJSONOptions.LongCRS + ? "urn:ogc:def:crs:" + authority + "::" + srid + : authority + ":" + srid); + }, (crsMap, crsType)); +#endif + }; +} + +static class GeoJSONConverter +{ + public static async ValueTask Read(bool async, PgReader reader, BoundingBoxBuilder? boundingBox, Func getCrs, CancellationToken cancellationToken) + { + var geometry = await Core(async, reader, boundingBox, getCrs, cancellationToken).ConfigureAwait(false); + geometry.BoundingBoxes = boundingBox?.Build(); + return geometry; + + static async ValueTask Core(bool async, PgReader reader, BoundingBoxBuilder? boundingbox, Func getCrs, CancellationToken cancellationToken) + { + if (reader.ShouldBuffer(SizeOfHeader)) + await reader.BufferData(async, SizeOfHeader, cancellationToken).ConfigureAwait(false); + + var littleEndian = reader.ReadByte() > 0; + var type = (EwkbGeometryType)ReadUInt32(littleEndian); + + GeoJSONObject geometry; + NamedCRS? crs = null; + + if (HasSrid(type)) + { + if (reader.ShouldBuffer(sizeof(int))) + await reader.BufferData(async, sizeof(int), cancellationToken).ConfigureAwait(false); + crs = getCrs(ReadInt32(littleEndian)); + } + + switch (type & EwkbGeometryType.BaseType) + { + case EwkbGeometryType.Point: + { + if (SizeOfPoint(type) is var size && reader.ShouldBuffer(size)) + await reader.BufferData(async, size, cancellationToken).ConfigureAwait(false); + + var position = ReadPosition(reader, type, littleEndian); + boundingbox?.Accumulate(position); + geometry = new Point(position); + break; + } + + case EwkbGeometryType.LineString: + { + if (reader.ShouldBuffer(SizeOfLength)) + await reader.BufferData(async, SizeOfLength, cancellationToken).ConfigureAwait(false); + + var coordinates = new Position[ReadInt32(littleEndian)]; + for (var i = 0; i < coordinates.Length; ++i) + { + if (SizeOfPoint(type) is var size && reader.ShouldBuffer(size)) + await reader.BufferData(async, size, cancellationToken).ConfigureAwait(false); + + var position = ReadPosition(reader, type, littleEndian); + boundingbox?.Accumulate(position); + coordinates[i] = position; + } + geometry = new LineString(coordinates); + break; + } + + case EwkbGeometryType.Polygon: + { + if (reader.ShouldBuffer(SizeOfLength)) + await reader.BufferData(async, SizeOfLength, cancellationToken).ConfigureAwait(false); + + var lines = new LineString[ReadInt32(littleEndian)]; + for (var i = 0; i < lines.Length; ++i) + { + if (reader.ShouldBuffer(SizeOfLength)) + await reader.BufferData(async, SizeOfLength, cancellationToken).ConfigureAwait(false); + + var coordinates = new Position[ReadInt32(littleEndian)]; + for (var j = 0; j < coordinates.Length; ++j) + { + if (SizeOfPoint(type) is var size && reader.ShouldBuffer(size)) + await reader.BufferData(async, size, cancellationToken).ConfigureAwait(false); + + var position = ReadPosition(reader, type, littleEndian); + boundingbox?.Accumulate(position); + coordinates[j] = position; + } + lines[i] = new LineString(coordinates); + } + geometry = new Polygon(lines); + break; + } + + case EwkbGeometryType.MultiPoint: + { + if (reader.ShouldBuffer(SizeOfLength)) + await reader.BufferData(async, SizeOfLength, cancellationToken).ConfigureAwait(false); + + var points = new Point[ReadInt32(littleEndian)]; + for (var i = 0; i < points.Length; ++i) + { + if (SizeOfHeader + SizeOfPoint(type) is var size && reader.ShouldBuffer(size)) + await reader.BufferData(async, size, cancellationToken).ConfigureAwait(false); + + if (async) + await reader.ConsumeAsync(SizeOfHeader, cancellationToken).ConfigureAwait(false); + else + reader.Consume(SizeOfHeader); + + var position = ReadPosition(reader, type, littleEndian); + boundingbox?.Accumulate(position); + points[i] = new Point(position); + } + geometry = new MultiPoint(points); + break; + } + + case EwkbGeometryType.MultiLineString: + { + if (reader.ShouldBuffer(SizeOfLength)) + await reader.BufferData(async, SizeOfLength, cancellationToken).ConfigureAwait(false); + + var lines = new LineString[ReadInt32(littleEndian)]; + for (var i = 0; i < lines.Length; ++i) + { + if (reader.ShouldBuffer(SizeOfHeaderWithLength)) + await reader.BufferData(async, SizeOfHeaderWithLength, cancellationToken).ConfigureAwait(false); + + if (async) + await reader.ConsumeAsync(SizeOfHeader, cancellationToken).ConfigureAwait(false); + else + reader.Consume(SizeOfHeader); + + var coordinates = new Position[ReadInt32(littleEndian)]; + for (var j = 0; j < coordinates.Length; ++j) + { + if (SizeOfPoint(type) is var size && reader.ShouldBuffer(size)) + await reader.BufferData(async, size, cancellationToken).ConfigureAwait(false); + var position = ReadPosition(reader, type, littleEndian); + boundingbox?.Accumulate(position); + coordinates[j] = position; + } + lines[i] = new LineString(coordinates); + } + geometry = new MultiLineString(lines); + break; + } + + case EwkbGeometryType.MultiPolygon: + { + if (reader.ShouldBuffer(SizeOfLength)) + await reader.BufferData(async, SizeOfLength, cancellationToken).ConfigureAwait(false); + + var polygons = new Polygon[ReadInt32(littleEndian)]; + for (var i = 0; i < polygons.Length; ++i) + { + if (reader.ShouldBuffer(SizeOfHeaderWithLength)) + await reader.BufferData(async, SizeOfHeaderWithLength, cancellationToken).ConfigureAwait(false); + + if (async) + await reader.ConsumeAsync(SizeOfHeader, cancellationToken).ConfigureAwait(false); + else + reader.Consume(SizeOfHeader); + + var lines = new LineString[ReadInt32(littleEndian)]; + for (var j = 0; j < lines.Length; ++j) + { + if (reader.ShouldBuffer(SizeOfLength)) + await reader.BufferData(async, SizeOfLength, cancellationToken).ConfigureAwait(false); + var coordinates = new Position[ReadInt32(littleEndian)]; + for (var k = 0; k < coordinates.Length; ++k) + { + if (SizeOfPoint(type) is var size && reader.ShouldBuffer(size)) + await reader.BufferData(async, size, cancellationToken).ConfigureAwait(false); + var position = ReadPosition(reader, type, littleEndian); + boundingbox?.Accumulate(position); + coordinates[k] = position; + } + lines[j] = new LineString(coordinates); + } + polygons[i] = new Polygon(lines); + } + geometry = new MultiPolygon(polygons); + break; + } + + case EwkbGeometryType.GeometryCollection: + { + if (reader.ShouldBuffer(SizeOfLength)) + await reader.BufferData(async, SizeOfLength, cancellationToken).ConfigureAwait(false); + + var elements = new IGeometryObject[ReadInt32(littleEndian)]; + for (var i = 0; i < elements.Length; ++i) + elements[i] = (IGeometryObject)await Core(async, reader, boundingbox, getCrs, cancellationToken).ConfigureAwait(false); + geometry = new GeometryCollection(elements); + break; + } + + default: + throw UnknownPostGisType(); + } + + geometry.CRS = crs; + return geometry; + + int ReadInt32(bool littleEndian) + => littleEndian ? BinaryPrimitives.ReverseEndianness(reader.ReadInt32()) : reader.ReadInt32(); + uint ReadUInt32(bool littleEndian) + => littleEndian ? BinaryPrimitives.ReverseEndianness(reader.ReadUInt32()) : reader.ReadUInt32(); + } + + static Position ReadPosition(PgReader reader, EwkbGeometryType type, bool littleEndian) + { + var position = new Position( + longitude: ReadDouble(littleEndian), + latitude: ReadDouble(littleEndian), + altitude: HasZ(type) ? reader.ReadDouble() : null); + if (HasM(type)) ReadDouble(littleEndian); + return position; + + double ReadDouble(bool littleEndian) + => littleEndian + // Netstandard is missing ReverseEndianness apis for double. + ? Unsafe.As(ref Unsafe.AsRef( + BinaryPrimitives.ReverseEndianness(Unsafe.As(ref Unsafe.AsRef(reader.ReadDouble()))))) + : reader.ReadDouble(); + } + } + + public static Size GetSize(SizeContext context, IGeoJSONObject value, ref object? writeState) + => value.Type switch + { + GeoJSONObjectType.Point => GetSize((Point)value), + GeoJSONObjectType.LineString => GetSize((LineString)value), + GeoJSONObjectType.Polygon => GetSize((Polygon)value), + GeoJSONObjectType.MultiPoint => GetSize((MultiPoint)value), + GeoJSONObjectType.MultiLineString => GetSize((MultiLineString)value), + GeoJSONObjectType.MultiPolygon => GetSize((MultiPolygon)value), + GeoJSONObjectType.GeometryCollection => GetSize(context, (GeometryCollection)value, ref writeState), + _ => throw UnknownPostGisType() + }; + + static bool NotValid(ReadOnlyCollection coordinates, out bool hasZ) + { + if (coordinates.Count == 0) + hasZ = false; + else + { + hasZ = HasZ(coordinates[0]); + for (var i = 1; i < coordinates.Count; ++i) + if (HasZ(coordinates[i]) != hasZ) return true; + } + return false; + } + + static Size GetSize(Point value) + { + var length = Size.Create(SizeOfHeader + SizeOfPoint(HasZ(value.Coordinates))); + if (GetSrid(value.CRS) != 0) + length = length.Combine(sizeof(int)); + + return length; + } + + static Size GetSize(LineString value) + { + var coordinates = value.Coordinates; + if (NotValid(coordinates, out var hasZ)) + throw AllOrNoneCoordiantesMustHaveZ(nameof(LineString)); + + var length = Size.Create(SizeOfHeaderWithLength + coordinates.Count * SizeOfPoint(hasZ)); + if (GetSrid(value.CRS) != 0) + length = length.Combine(sizeof(int)); + + return length; + } + + static Size GetSize(Polygon value) + { + var lines = value.Coordinates; + var length = Size.Create(SizeOfHeaderWithLength + SizeOfLength * lines.Count); + if (GetSrid(value.CRS) != 0) + length = length.Combine(sizeof(int)); + + var hasZ = false; + for (var i = 0; i < lines.Count; ++i) + { + var coordinates = lines[i].Coordinates; + if (NotValid(coordinates, out var lineHasZ)) + throw AllOrNoneCoordiantesMustHaveZ(nameof(Polygon)); + + if (hasZ != lineHasZ) + { + if (i == 0) hasZ = lineHasZ; + else throw AllOrNoneCoordiantesMustHaveZ(nameof(LineString)); + } + + length = length.Combine(coordinates.Count * SizeOfPoint(hasZ)); + } + + return length; + } + + static Size GetSize(MultiPoint value) + { + var length = Size.Create(SizeOfHeaderWithLength); + if (GetSrid(value.CRS) != 0) + length = length.Combine(sizeof(int)); + + var coordinates = value.Coordinates; + foreach (var t in coordinates) + length = length.Combine(GetSize(t)); + + return length; + } + + static Size GetSize(MultiLineString value) + { + var length = Size.Create(SizeOfHeaderWithLength); + if (GetSrid(value.CRS) != 0) + length = length.Combine(sizeof(int)); + + var coordinates = value.Coordinates; + foreach (var t in coordinates) + length = length.Combine(GetSize(t)); + + return length; + } + + static Size GetSize(MultiPolygon value) + { + var length = Size.Create(SizeOfHeaderWithLength); + if (GetSrid(value.CRS) != 0) + length = length.Combine(sizeof(int)); + + var coordinates = value.Coordinates; + foreach (var t in coordinates) + length = length.Combine(GetSize(t)); + + return length; + } + + static Size GetSize(SizeContext context, GeometryCollection value, ref object? writeState) + { + var length = Size.Create(SizeOfHeaderWithLength); + if (GetSrid(value.CRS) != 0) + length = length.Combine(sizeof(int)); + + var geometries = value.Geometries; + foreach (var t in geometries) + length = length.Combine(GetSize(context, (IGeoJSONObject)t, ref writeState)); + + return length; + } + + public static ValueTask Write(bool async, PgWriter writer, IGeoJSONObject value, CancellationToken cancellationToken = default) + => value.Type switch + { + GeoJSONObjectType.Point => Write(async, writer, (Point)value, cancellationToken), + GeoJSONObjectType.LineString => Write(async, writer, (LineString)value, cancellationToken), + GeoJSONObjectType.Polygon => Write(async, writer, (Polygon)value, cancellationToken), + GeoJSONObjectType.MultiPoint => Write(async, writer, (MultiPoint)value, cancellationToken), + GeoJSONObjectType.MultiLineString => Write(async, writer, (MultiLineString)value, cancellationToken), + GeoJSONObjectType.MultiPolygon => Write(async, writer, (MultiPolygon)value, cancellationToken), + GeoJSONObjectType.GeometryCollection => Write(async, writer, (GeometryCollection)value, cancellationToken), + _ => throw UnknownPostGisType() + }; + + static async ValueTask Write(bool async, PgWriter writer, Point value, CancellationToken cancellationToken) + { + var type = EwkbGeometryType.Point; + var size = SizeOfHeader; + var srid = GetSrid(value.CRS); + if (srid != 0) + { + size += sizeof(int); + type |= EwkbGeometryType.HasSrid; + } + + if (writer.ShouldFlush(size)) + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + + writer.WriteByte(0); // Most significant byte first + writer.WriteInt32((int)type); + + if (srid != 0) + writer.WriteInt32(srid); + + await WritePosition(async, writer, value.Coordinates, cancellationToken).ConfigureAwait(false); + } + + static async ValueTask Write(bool async, PgWriter writer, LineString value, CancellationToken cancellationToken) + { + var type = EwkbGeometryType.LineString; + var size = SizeOfHeaderWithLength; + var srid = GetSrid(value.CRS); + if (srid != 0) + { + size += sizeof(int); + type |= EwkbGeometryType.HasSrid; + } + + if (writer.ShouldFlush(size)) + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + + var coordinates = value.Coordinates; + + writer.WriteByte(0); // Most significant byte first + writer.WriteInt32((int)type); + writer.WriteInt32(coordinates.Count); + + if (srid != 0) + writer.WriteInt32(srid); + + foreach (var t in coordinates) + await WritePosition(async, writer, t, cancellationToken).ConfigureAwait(false); + } + + static async ValueTask Write(bool async, PgWriter writer, Polygon value, CancellationToken cancellationToken) + { + var type = EwkbGeometryType.Polygon; + var size = SizeOfHeaderWithLength; + var srid = GetSrid(value.CRS); + if (srid != 0) + { + size += sizeof(int); + type |= EwkbGeometryType.HasSrid; + } + + if (writer.ShouldFlush(size)) + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + + var lines = value.Coordinates; + + writer.WriteByte(0); // Most significant byte first + writer.WriteInt32((int)type); + writer.WriteInt32(lines.Count); + + if (srid != 0) + writer.WriteInt32(srid); + + foreach (var t in lines) + { + if (writer.ShouldFlush(SizeOfLength)) + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + var coordinates = t.Coordinates; + writer.WriteInt32(coordinates.Count); + foreach (var t1 in coordinates) + await WritePosition(async, writer, t1, cancellationToken).ConfigureAwait(false); + } + } + + static async ValueTask Write(bool async, PgWriter writer, MultiPoint value, CancellationToken cancellationToken) + { + var type = EwkbGeometryType.MultiPoint; + var size = SizeOfHeaderWithLength; + var srid = GetSrid(value.CRS); + if (srid != 0) + { + size += sizeof(int); + type |= EwkbGeometryType.HasSrid; + } + + if (writer.ShouldFlush(size)) + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + + var coordinates = value.Coordinates; + + writer.WriteByte(0); // Most significant byte first + writer.WriteInt32((int)type); + writer.WriteInt32(coordinates.Count); + + if (srid != 0) + writer.WriteInt32(srid); + + foreach (var t in coordinates) + await Write(async, writer, t, cancellationToken).ConfigureAwait(false); + } + + static async ValueTask Write(bool async, PgWriter writer, MultiLineString value, CancellationToken cancellationToken) + { + var type = EwkbGeometryType.MultiLineString; + var size = SizeOfHeaderWithLength; + var srid = GetSrid(value.CRS); + if (srid != 0) + { + size += sizeof(int); + type |= EwkbGeometryType.HasSrid; + } + + if (writer.ShouldFlush(size)) + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + + var coordinates = value.Coordinates; + + writer.WriteByte(0); // Most significant byte first + writer.WriteInt32((int)type); + writer.WriteInt32(coordinates.Count); + + if (srid != 0) + writer.WriteInt32(srid); + + foreach (var t in coordinates) + await Write(async, writer, t, cancellationToken).ConfigureAwait(false); + } + + static async ValueTask Write(bool async, PgWriter writer, MultiPolygon value, CancellationToken cancellationToken) + { + var type = EwkbGeometryType.MultiPolygon; + var size = SizeOfHeaderWithLength; + var srid = GetSrid(value.CRS); + if (srid != 0) + { + size += sizeof(int); + type |= EwkbGeometryType.HasSrid; + } + + if (writer.ShouldFlush(size)) + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + + var coordinates = value.Coordinates; + + writer.WriteByte(0); // Most significant byte first + writer.WriteInt32((int)type); + writer.WriteInt32(coordinates.Count); + + if (srid != 0) + writer.WriteInt32(srid); + foreach (var t in coordinates) + await Write(async, writer, t, cancellationToken).ConfigureAwait(false); + } + + static async ValueTask Write(bool async, PgWriter writer, GeometryCollection value, CancellationToken cancellationToken) + { + var type = EwkbGeometryType.GeometryCollection; + var size = SizeOfHeaderWithLength; + var srid = GetSrid(value.CRS); + if (srid != 0) + { + size += sizeof(int); + type |= EwkbGeometryType.HasSrid; + } + + if (writer.ShouldFlush(size)) + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + + var geometries = value.Geometries; + + writer.WriteByte(0); // Most significant byte first + writer.WriteInt32((int)type); + writer.WriteInt32(geometries.Count); + + if (srid != 0) + writer.WriteInt32(srid); + + foreach (var t in geometries) + await Write(async, writer, (IGeoJSONObject)t, cancellationToken).ConfigureAwait(false); + } + + static async ValueTask WritePosition(bool async, PgWriter writer, IPosition coordinate, CancellationToken cancellationToken) + { + var altitude = coordinate.Altitude; + if (SizeOfPoint(altitude.HasValue) is var size && writer.ShouldFlush(size)) + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + + writer.WriteDouble(coordinate.Longitude); + writer.WriteDouble(coordinate.Latitude); + if (altitude.HasValue) + writer.WriteDouble(altitude.Value); + } + + static ValueTask BufferData(this PgReader reader, bool async, int byteCount, CancellationToken cancellationToken) + { + if (async) + return reader.BufferAsync(byteCount, cancellationToken); + + reader.Buffer(byteCount); + return new(); + } + + static ValueTask Flush(this PgWriter writer, bool async, CancellationToken cancellationToken) + { + if (async) + return writer.FlushAsync(cancellationToken); + + writer.Flush(); + return new(); + } + + static bool HasSrid(EwkbGeometryType type) + => (type & EwkbGeometryType.HasSrid) != 0; + + static bool HasZ(EwkbGeometryType type) + => (type & EwkbGeometryType.HasZ) != 0; + + static bool HasM(EwkbGeometryType type) + => (type & EwkbGeometryType.HasM) != 0; + + static bool HasZ(IPosition coordinates) + => coordinates.Altitude.HasValue; + + const int SizeOfLength = sizeof(int); + const int SizeOfHeader = sizeof(byte) + sizeof(EwkbGeometryType); + const int SizeOfHeaderWithLength = SizeOfHeader + SizeOfLength; + const int SizeOfPoint2D = 2 * sizeof(double); + const int SizeOfPoint3D = 3 * sizeof(double); + + static int SizeOfPoint(bool hasZ) + => hasZ ? SizeOfPoint3D : SizeOfPoint2D; + + static int SizeOfPoint(EwkbGeometryType type) + { + var size = SizeOfPoint2D; + if (HasZ(type)) + size += sizeof(double); + if (HasM(type)) + size += sizeof(double); + return size; + } + + static Exception UnknownPostGisType() + => throw new InvalidOperationException("Invalid PostGIS type"); + + static Exception AllOrNoneCoordiantesMustHaveZ(string typeName) + => new ArgumentException($"The Z coordinate must be specified for all or none elements of {typeName}"); + + static int GetSrid(ICRSObject crs) + { + if (crs is null or UnspecifiedCRS) + return 0; + + var namedCrs = crs as NamedCRS; + if (namedCrs == null) + throw new NotSupportedException("The LinkedCRS class isn't supported"); + + if (namedCrs.Properties.TryGetValue("name", out var value) && value != null) + { + var name = value.ToString()!; + if (string.Equals(name, "urn:ogc:def:crs:OGC::CRS84", StringComparison.Ordinal)) + return 4326; + + var index = name.LastIndexOf(':'); + if (index != -1 && int.TryParse(name.Substring(index + 1), out var srid)) + return srid; + + throw new FormatException("The specified CRS isn't properly named"); + } + + return 0; + } +} + +/// +/// Represents the identifier of the Well Known Binary representation of a geographical feature specified by the OGC. +/// http://portal.opengeospatial.org/files/?artifact_id=13227 Chapter 6.3.2.7 +/// +[Flags] +enum EwkbGeometryType : uint +{ + // Types + Point = 1, + LineString = 2, + Polygon = 3, + MultiPoint = 4, + MultiLineString = 5, + MultiPolygon = 6, + GeometryCollection = 7, + + // Masks + BaseType = Point | LineString | Polygon | MultiPoint | MultiLineString | MultiPolygon | GeometryCollection, + + // Flags + HasSrid = 0x20000000, + HasM = 0x40000000, + HasZ = 0x80000000 +} diff --git a/src/Npgsql.GeoJSON/Internal/GeoJSONHandler.cs b/src/Npgsql.GeoJSON/Internal/GeoJSONHandler.cs deleted file mode 100644 index ba040ed79d..0000000000 --- a/src/Npgsql.GeoJSON/Internal/GeoJSONHandler.cs +++ /dev/null @@ -1,722 +0,0 @@ -using System; -using System.Collections.Concurrent; -using System.Collections.ObjectModel; -using System.Threading; -using System.Threading.Tasks; -using GeoJSON.Net; -using GeoJSON.Net.CoordinateReferenceSystem; -using GeoJSON.Net.Geometry; -using Npgsql.BackendMessages; -using Npgsql.Internal; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; - -namespace Npgsql.GeoJSON.Internal; - -sealed partial class GeoJsonHandler : NpgsqlTypeHandler, - INpgsqlTypeHandler, INpgsqlTypeHandler, - INpgsqlTypeHandler, INpgsqlTypeHandler, - INpgsqlTypeHandler, INpgsqlTypeHandler, - INpgsqlTypeHandler, - INpgsqlTypeHandler, - INpgsqlTypeHandler -{ - readonly GeoJSONOptions _options; - readonly CrsMap _crsMap; - readonly ConcurrentDictionary _cachedCrs = new(); - - internal GeoJsonHandler(PostgresType postgresType, GeoJSONOptions options, CrsMap crsMap) - : base(postgresType) - { - _options = options; - _crsMap = crsMap; - } - - GeoJSONOptions CrsType => _options & (GeoJSONOptions.ShortCRS | GeoJSONOptions.LongCRS); - - bool BoundingBox => (_options & GeoJSONOptions.BoundingBox) != 0; - - static bool HasSrid(EwkbGeometryType type) - => (type & EwkbGeometryType.HasSrid) != 0; - - static bool HasZ(EwkbGeometryType type) - => (type & EwkbGeometryType.HasZ) != 0; - - static bool HasM(EwkbGeometryType type) - => (type & EwkbGeometryType.HasM) != 0; - - static bool HasZ(IPosition coordinates) - => coordinates.Altitude.HasValue; - - const int SizeOfLength = sizeof(int); - const int SizeOfHeader = sizeof(byte) + sizeof(EwkbGeometryType); - const int SizeOfHeaderWithLength = SizeOfHeader + SizeOfLength; - const int SizeOfPoint2D = 2 * sizeof(double); - const int SizeOfPoint3D = 3 * sizeof(double); - - static int SizeOfPoint(bool hasZ) - => hasZ ? SizeOfPoint3D : SizeOfPoint2D; - - static int SizeOfPoint(EwkbGeometryType type) - { - var size = SizeOfPoint2D; - if (HasZ(type)) - size += sizeof(double); - if (HasM(type)) - size += sizeof(double); - return size; - } - - #region Throw - - static Exception UnknownPostGisType() - => throw new InvalidOperationException("Invalid PostGIS type"); - - static Exception AllOrNoneCoordiantesMustHaveZ(NpgsqlParameter? parameter, string typeName) - => parameter is null - ? new ArgumentException($"The Z coordinate must be specified for all or none elements of {typeName}") - : new ArgumentException($"The Z coordinate must be specified for all or none elements of {typeName} in the {parameter.ParameterName} parameter", parameter.ParameterName); - - #endregion - - #region Read - - public override ValueTask Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - => ReadGeometry(buf, async); - - async ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => (Point)await ReadGeometry(buf, async); - - async ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => (LineString)await ReadGeometry(buf, async); - - async ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => (Polygon)await ReadGeometry(buf, async); - - async ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => (MultiPoint)await ReadGeometry(buf, async); - - async ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => (MultiLineString)await ReadGeometry(buf, async); - - async ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => (MultiPolygon)await ReadGeometry(buf, async); - - async ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => (GeometryCollection)await ReadGeometry(buf, async); - - async ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => await ReadGeometry(buf, async); - - async ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => (IGeometryObject)await ReadGeometry(buf, async); - - async ValueTask ReadGeometry(NpgsqlReadBuffer buf, bool async) - { - var boundingBox = BoundingBox ? new BoundingBoxBuilder() : null; - var geometry = await ReadGeometryCore(buf, async, boundingBox); - - geometry.BoundingBoxes = boundingBox?.Build(); - return geometry; - } - - async ValueTask ReadGeometryCore(NpgsqlReadBuffer buf, bool async, BoundingBoxBuilder? boundingBox) - { - await buf.Ensure(SizeOfHeader, async); - var littleEndian = buf.ReadByte() > 0; - var type = (EwkbGeometryType)buf.ReadUInt32(littleEndian); - - GeoJSONObject geometry; - NamedCRS? crs = null; - - if (HasSrid(type)) - { - await buf.Ensure(4, async); - crs = GetCrs(buf.ReadInt32(littleEndian)); - } - - switch (type & EwkbGeometryType.BaseType) - { - case EwkbGeometryType.Point: - { - await buf.Ensure(SizeOfPoint(type), async); - var position = ReadPosition(buf, type, littleEndian); - boundingBox?.Accumulate(position); - geometry = new Point(position); - break; - } - - case EwkbGeometryType.LineString: - { - await buf.Ensure(SizeOfLength, async); - var coordinates = new Position[buf.ReadInt32(littleEndian)]; - for (var i = 0; i < coordinates.Length; ++i) - { - await buf.Ensure(SizeOfPoint(type), async); - var position = ReadPosition(buf, type, littleEndian); - boundingBox?.Accumulate(position); - coordinates[i] = position; - } - geometry = new LineString(coordinates); - break; - } - - case EwkbGeometryType.Polygon: - { - await buf.Ensure(SizeOfLength, async); - var lines = new LineString[buf.ReadInt32(littleEndian)]; - for (var i = 0; i < lines.Length; ++i) - { - await buf.Ensure(SizeOfLength, async); - var coordinates = new Position[buf.ReadInt32(littleEndian)]; - for (var j = 0; j < coordinates.Length; ++j) - { - await buf.Ensure(SizeOfPoint(type), async); - var position = ReadPosition(buf, type, littleEndian); - boundingBox?.Accumulate(position); - coordinates[j] = position; - } - lines[i] = new LineString(coordinates); - } - geometry = new Polygon(lines); - break; - } - - case EwkbGeometryType.MultiPoint: - { - await buf.Ensure(SizeOfLength, async); - var points = new Point[buf.ReadInt32(littleEndian)]; - for (var i = 0; i < points.Length; ++i) - { - await buf.Ensure(SizeOfHeader + SizeOfPoint(type), async); - await buf.Skip(SizeOfHeader, async); - var position = ReadPosition(buf, type, littleEndian); - boundingBox?.Accumulate(position); - points[i] = new Point(position); - } - geometry = new MultiPoint(points); - break; - } - - case EwkbGeometryType.MultiLineString: - { - await buf.Ensure(SizeOfLength, async); - var lines = new LineString[buf.ReadInt32(littleEndian)]; - for (var i = 0; i < lines.Length; ++i) - { - await buf.Ensure(SizeOfHeaderWithLength, async); - await buf.Skip(SizeOfHeader, async); - var coordinates = new Position[buf.ReadInt32(littleEndian)]; - for (var j = 0; j < coordinates.Length; ++j) - { - await buf.Ensure(SizeOfPoint(type), async); - var position = ReadPosition(buf, type, littleEndian); - boundingBox?.Accumulate(position); - coordinates[j] = position; - } - lines[i] = new LineString(coordinates); - } - geometry = new MultiLineString(lines); - break; - } - - case EwkbGeometryType.MultiPolygon: - { - await buf.Ensure(SizeOfLength, async); - var polygons = new Polygon[buf.ReadInt32(littleEndian)]; - for (var i = 0; i < polygons.Length; ++i) - { - await buf.Ensure(SizeOfHeaderWithLength, async); - await buf.Skip(SizeOfHeader, async); - var lines = new LineString[buf.ReadInt32(littleEndian)]; - for (var j = 0; j < lines.Length; ++j) - { - await buf.Ensure(SizeOfLength, async); - var coordinates = new Position[buf.ReadInt32(littleEndian)]; - for (var k = 0; k < coordinates.Length; ++k) - { - await buf.Ensure(SizeOfPoint(type), async); - var position = ReadPosition(buf, type, littleEndian); - boundingBox?.Accumulate(position); - coordinates[k] = position; - } - lines[j] = new LineString(coordinates); - } - polygons[i] = new Polygon(lines); - } - geometry = new MultiPolygon(polygons); - break; - } - - case EwkbGeometryType.GeometryCollection: - { - await buf.Ensure(SizeOfLength, async); - var elements = new IGeometryObject[buf.ReadInt32(littleEndian)]; - for (var i = 0; i < elements.Length; ++i) - elements[i] = (IGeometryObject)await ReadGeometryCore(buf, async, boundingBox); - geometry = new GeometryCollection(elements); - break; - } - - default: - throw UnknownPostGisType(); - } - - geometry.CRS = crs; - return geometry; - } - - static Position ReadPosition(NpgsqlReadBuffer buf, EwkbGeometryType type, bool littleEndian) - { - var position = new Position( - longitude: buf.ReadDouble(littleEndian), - latitude: buf.ReadDouble(littleEndian), - altitude: HasZ(type) ? buf.ReadDouble() : (double?)null); - if (HasM(type)) buf.ReadDouble(littleEndian); - return position; - } - - #endregion - - #region Write - - public override int ValidateAndGetLength(GeoJSONObject value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => value.Type switch - { - GeoJSONObjectType.Point => ValidateAndGetLength((Point)value, ref lengthCache, parameter), - GeoJSONObjectType.LineString => ValidateAndGetLength((LineString)value, ref lengthCache, parameter), - GeoJSONObjectType.Polygon => ValidateAndGetLength((Polygon)value, ref lengthCache, parameter), - GeoJSONObjectType.MultiPoint => ValidateAndGetLength((MultiPoint)value, ref lengthCache, parameter), - GeoJSONObjectType.MultiLineString => ValidateAndGetLength((MultiLineString)value, ref lengthCache, parameter), - GeoJSONObjectType.MultiPolygon => ValidateAndGetLength((MultiPolygon)value, ref lengthCache, parameter), - GeoJSONObjectType.GeometryCollection => ValidateAndGetLength((GeometryCollection)value, ref lengthCache, parameter), - _ => throw UnknownPostGisType() - }; - - public int ValidateAndGetLength(Point value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - { - var length = SizeOfHeader + SizeOfPoint(HasZ(value.Coordinates)); - if (GetSrid(value.CRS) != 0) - length += sizeof(int); - - return length; - } - - public int ValidateAndGetLength(LineString value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - { - var coordinates = value.Coordinates; - if (NotValid(coordinates, out var hasZ)) - throw AllOrNoneCoordiantesMustHaveZ(parameter, nameof(LineString)); - - var length = SizeOfHeaderWithLength + coordinates.Count * SizeOfPoint(hasZ); - if (GetSrid(value.CRS) != 0) - length += sizeof(int); - - return length; - } - - public int ValidateAndGetLength(Polygon value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - { - var lines = value.Coordinates; - var length = SizeOfHeaderWithLength + SizeOfLength * lines.Count; - if (GetSrid(value.CRS) != 0) - length += sizeof(int); - - var hasZ = false; - for (var i = 0; i < lines.Count; ++i) - { - var coordinates = lines[i].Coordinates; - if (NotValid(coordinates, out var lineHasZ)) - throw AllOrNoneCoordiantesMustHaveZ(parameter, nameof(Polygon)); - - if (hasZ != lineHasZ) - { - if (i == 0) hasZ = lineHasZ; - else throw AllOrNoneCoordiantesMustHaveZ(parameter, nameof(LineString)); - } - - length += coordinates.Count * SizeOfPoint(hasZ); - } - - return length; - } - - static bool NotValid(ReadOnlyCollection coordinates, out bool hasZ) - { - if (coordinates.Count == 0) - hasZ = false; - else - { - hasZ = HasZ(coordinates[0]); - for (var i = 1; i < coordinates.Count; ++i) - if (HasZ(coordinates[i]) != hasZ) return true; - } - return false; - } - - public int ValidateAndGetLength(MultiPoint value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - { - var length = SizeOfHeaderWithLength; - if (GetSrid(value.CRS) != 0) - length += sizeof(int); - - var coordinates = value.Coordinates; - for (var i = 0; i < coordinates.Count; ++i) - length += ValidateAndGetLength(coordinates[i], ref lengthCache, parameter); - - return length; - } - - public int ValidateAndGetLength(MultiLineString value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - { - var length = SizeOfHeaderWithLength; - if (GetSrid(value.CRS) != 0) - length += sizeof(int); - - var coordinates = value.Coordinates; - for (var i = 0; i < coordinates.Count; ++i) - length += ValidateAndGetLength(coordinates[i], ref lengthCache, parameter); - - return length; - } - - public int ValidateAndGetLength(MultiPolygon value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - { - var length = SizeOfHeaderWithLength; - if (GetSrid(value.CRS) != 0) - length += sizeof(int); - - var coordinates = value.Coordinates; - for (var i = 0; i < coordinates.Count; ++i) - length += ValidateAndGetLength(coordinates[i], ref lengthCache, parameter); - - return length; - } - - public int ValidateAndGetLength(GeometryCollection value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - { - var length = SizeOfHeaderWithLength; - if (GetSrid(value.CRS) != 0) - length += sizeof(int); - - var geometries = value.Geometries; - for (var i = 0; i < geometries.Count; ++i) - length += ValidateAndGetLength((GeoJSONObject)geometries[i], ref lengthCache, parameter); - - return length; - } - - int INpgsqlTypeHandler.ValidateAndGetLength(IGeoJSONObject value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLength((GeoJSONObject)value, ref lengthCache, parameter); - - int INpgsqlTypeHandler.ValidateAndGetLength(IGeometryObject value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLength((GeoJSONObject)value, ref lengthCache, parameter); - - public override Task Write(GeoJSONObject value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => value.Type switch - { - GeoJSONObjectType.Point => Write((Point)value, buf, lengthCache, parameter, async, cancellationToken), - GeoJSONObjectType.LineString => Write((LineString)value, buf, lengthCache, parameter, async, cancellationToken), - GeoJSONObjectType.Polygon => Write((Polygon)value, buf, lengthCache, parameter, async, cancellationToken), - GeoJSONObjectType.MultiPoint => Write((MultiPoint)value, buf, lengthCache, parameter, async, cancellationToken), - GeoJSONObjectType.MultiLineString => Write((MultiLineString)value, buf, lengthCache, parameter, async, cancellationToken), - GeoJSONObjectType.MultiPolygon => Write((MultiPolygon)value, buf, lengthCache, parameter, async, cancellationToken), - GeoJSONObjectType.GeometryCollection => Write((GeometryCollection)value, buf, lengthCache, parameter, async, cancellationToken), - _ => throw UnknownPostGisType() - }; - - public async Task Write(Point value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - var type = EwkbGeometryType.Point; - var size = SizeOfHeader; - var srid = GetSrid(value.CRS); - if (srid != 0) - { - size += sizeof(int); - type |= EwkbGeometryType.HasSrid; - } - - if (buf.WriteSpaceLeft < size) - await buf.Flush(async, cancellationToken); - - buf.WriteByte(0); // Most significant byte first - buf.WriteInt32((int)type); - - if (srid != 0) - buf.WriteInt32(srid); - - await WritePosition(value.Coordinates, buf, async, cancellationToken); - } - - public async Task Write(LineString value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - var type = EwkbGeometryType.LineString; - var size = SizeOfHeaderWithLength; - var srid = GetSrid(value.CRS); - if (srid != 0) - { - size += sizeof(int); - type |= EwkbGeometryType.HasSrid; - } - - if (buf.WriteSpaceLeft < size) - await buf.Flush(async, cancellationToken); - - var coordinates = value.Coordinates; - - buf.WriteByte(0); // Most significant byte first - buf.WriteInt32((int)type); - buf.WriteInt32(coordinates.Count); - - if (srid != 0) - buf.WriteInt32(srid); - - for (var i = 0; i < coordinates.Count; ++i) - await WritePosition(coordinates[i], buf, async, cancellationToken); - } - - public async Task Write(Polygon value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - var type = EwkbGeometryType.Polygon; - var size = SizeOfHeaderWithLength; - var srid = GetSrid(value.CRS); - if (srid != 0) - { - size += sizeof(int); - type |= EwkbGeometryType.HasSrid; - } - - if (buf.WriteSpaceLeft < size) - await buf.Flush(async, cancellationToken); - - var lines = value.Coordinates; - - buf.WriteByte(0); // Most significant byte first - buf.WriteInt32((int)type); - buf.WriteInt32(lines.Count); - - if (srid != 0) - buf.WriteInt32(srid); - - for (var i = 0; i < lines.Count; ++i) - { - if (buf.WriteSpaceLeft < SizeOfLength) - await buf.Flush(async, cancellationToken); - var coordinates = lines[i].Coordinates; - buf.WriteInt32(coordinates.Count); - for (var j = 0; j < coordinates.Count; ++j) - await WritePosition(coordinates[j], buf, async, cancellationToken); - } - } - - public async Task Write(MultiPoint value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - var type = EwkbGeometryType.MultiPoint; - var size = SizeOfHeaderWithLength; - var srid = GetSrid(value.CRS); - if (srid != 0) - { - size += sizeof(int); - type |= EwkbGeometryType.HasSrid; - } - - if (buf.WriteSpaceLeft < size) - await buf.Flush(async, cancellationToken); - - var coordinates = value.Coordinates; - - buf.WriteByte(0); // Most significant byte first - buf.WriteInt32((int)type); - buf.WriteInt32(coordinates.Count); - - if (srid != 0) - buf.WriteInt32(srid); - - for (var i = 0; i < coordinates.Count; ++i) - await Write(coordinates[i], buf, lengthCache, parameter, async, cancellationToken); - } - - public async Task Write(MultiLineString value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - var type = EwkbGeometryType.MultiLineString; - var size = SizeOfHeaderWithLength; - var srid = GetSrid(value.CRS); - if (srid != 0) - { - size += sizeof(int); - type |= EwkbGeometryType.HasSrid; - } - - if (buf.WriteSpaceLeft < size) - await buf.Flush(async, cancellationToken); - - var coordinates = value.Coordinates; - - buf.WriteByte(0); // Most significant byte first - buf.WriteInt32((int)type); - buf.WriteInt32(coordinates.Count); - - if (srid != 0) - buf.WriteInt32(srid); - - for (var i = 0; i < coordinates.Count; ++i) - await Write(coordinates[i], buf, lengthCache, parameter, async, cancellationToken); - } - - public async Task Write(MultiPolygon value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - var type = EwkbGeometryType.MultiPolygon; - var size = SizeOfHeaderWithLength; - var srid = GetSrid(value.CRS); - if (srid != 0) - { - size += sizeof(int); - type |= EwkbGeometryType.HasSrid; - } - - if (buf.WriteSpaceLeft < size) - await buf.Flush(async, cancellationToken); - - var coordinates = value.Coordinates; - - buf.WriteByte(0); // Most significant byte first - buf.WriteInt32((int)type); - buf.WriteInt32(coordinates.Count); - - if (srid != 0) - buf.WriteInt32(srid); - for (var i = 0; i < coordinates.Count; ++i) - await Write(coordinates[i], buf, lengthCache, parameter, async, cancellationToken); - } - - public async Task Write(GeometryCollection value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - var type = EwkbGeometryType.GeometryCollection; - var size = SizeOfHeaderWithLength; - var srid = GetSrid(value.CRS); - if (srid != 0) - { - size += sizeof(int); - type |= EwkbGeometryType.HasSrid; - } - - if (buf.WriteSpaceLeft < size) - await buf.Flush(async, cancellationToken); - - var geometries = value.Geometries; - - buf.WriteByte(0); // Most significant byte first - buf.WriteInt32((int)type); - buf.WriteInt32(geometries.Count); - - if (srid != 0) - buf.WriteInt32(srid); - - for (var i = 0; i < geometries.Count; ++i) - await Write((GeoJSONObject) geometries[i], buf, lengthCache, parameter, async, cancellationToken); - } - - Task INpgsqlTypeHandler.Write(IGeoJSONObject value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken) - => Write((GeoJSONObject)value, buf, lengthCache, parameter, async, cancellationToken); - - Task INpgsqlTypeHandler.Write(IGeometryObject value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken) - => Write((GeoJSONObject)value, buf, lengthCache, parameter, async, cancellationToken); - - static async Task WritePosition(IPosition coordinate, NpgsqlWriteBuffer buf, bool async, CancellationToken cancellationToken = default) - { - var altitude = coordinate.Altitude; - if (buf.WriteSpaceLeft < SizeOfPoint(altitude.HasValue)) - await buf.Flush(async, cancellationToken); - buf.WriteDouble(coordinate.Longitude); - buf.WriteDouble(coordinate.Latitude); - if (altitude.HasValue) - buf.WriteDouble(altitude.Value); - } - - #endregion - - #region Crs - - NamedCRS? GetCrs(int srid) - { - var crsType = CrsType; - if (crsType == GeoJSONOptions.None) - return null; - -#if NETSTANDARD2_0 - return _cachedCrs.GetOrAdd(srid, srid => - { - var authority = _crsMap.GetAuthority(srid); - - return authority is null - ? throw new InvalidOperationException($"SRID {srid} unknown in spatial_ref_sys table") - : new NamedCRS(crsType == GeoJSONOptions.LongCRS - ? "urn:ogc:def:crs:" + authority + "::" + srid - : authority + ":" + srid); - }); -#else - return _cachedCrs.GetOrAdd(srid, static (srid, me) => - { - var authority = me._crsMap.GetAuthority(srid); - - return authority is null - ? throw new InvalidOperationException($"SRID {srid} unknown in spatial_ref_sys table") - : new NamedCRS(me.CrsType == GeoJSONOptions.LongCRS - ? "urn:ogc:def:crs:" + authority + "::" + srid - : authority + ":" + srid); - }, this); -#endif - } - - static int GetSrid(ICRSObject crs) - { - if (crs == null || crs is UnspecifiedCRS) - return 0; - - var namedCrs = crs as NamedCRS; - if (namedCrs == null) - throw new NotSupportedException("The LinkedCRS class isn't supported"); - - if (namedCrs.Properties.TryGetValue("name", out var value) && value != null) - { - var name = value.ToString()!; - if (string.Equals(name, "urn:ogc:def:crs:OGC::CRS84", StringComparison.Ordinal)) - return 4326; - - var index = name.LastIndexOf(':'); - if (index != -1 && int.TryParse(name.Substring(index + 1), out var srid)) - return srid; - - throw new FormatException("The specified CRS isn't properly named"); - } - - return 0; - } - - #endregion -} - -/// -/// Represents the identifier of the Well Known Binary representation of a geographical feature specified by the OGC. -/// http://portal.opengeospatial.org/files/?artifact_id=13227 Chapter 6.3.2.7 -/// -[Flags] -enum EwkbGeometryType : uint -{ - // Types - Point = 1, - LineString = 2, - Polygon = 3, - MultiPoint = 4, - MultiLineString = 5, - MultiPolygon = 6, - GeometryCollection = 7, - - // Masks - BaseType = Point | LineString | Polygon | MultiPoint | MultiLineString | MultiPolygon | GeometryCollection, - - // Flags - HasSrid = 0x20000000, - HasM = 0x40000000, - HasZ = 0x80000000 -} diff --git a/src/Npgsql.GeoJSON/Internal/GeoJSONTypeHandlerResolver.cs b/src/Npgsql.GeoJSON/Internal/GeoJSONTypeHandlerResolver.cs deleted file mode 100644 index a937c1d62b..0000000000 --- a/src/Npgsql.GeoJSON/Internal/GeoJSONTypeHandlerResolver.cs +++ /dev/null @@ -1,80 +0,0 @@ -using System; -using System.Collections.Concurrent; -using System.Collections.Generic; -using System.Data; -using GeoJSON.Net; -using GeoJSON.Net.Geometry; -using Newtonsoft.Json; -using Npgsql.Internal; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using Npgsql.TypeMapping; -using NpgsqlTypes; - -namespace Npgsql.GeoJSON.Internal; - -public class GeoJSONTypeHandlerResolver : TypeHandlerResolver -{ - readonly NpgsqlDatabaseInfo _databaseInfo; - readonly GeoJsonHandler? _geometryHandler, _geographyHandler; - readonly bool _geographyAsDefault; - - static readonly ConcurrentDictionary CRSMaps = new(); - - internal GeoJSONTypeHandlerResolver(NpgsqlConnector connector, GeoJSONOptions options, bool geographyAsDefault) - { - _databaseInfo = connector.DatabaseInfo; - _geographyAsDefault = geographyAsDefault; - - var crsMap = (options & (GeoJSONOptions.ShortCRS | GeoJSONOptions.LongCRS)) == GeoJSONOptions.None - ? default : CRSMaps.GetOrAdd(connector.Settings.ConnectionString, _ => - { - var builder = new CrsMapBuilder(); - using var cmd = connector.CreateCommand( - "SELECT min(srid), max(srid), auth_name " + - "FROM(SELECT srid, auth_name, srid - rank() OVER(ORDER BY srid) AS range " + - "FROM spatial_ref_sys) AS s GROUP BY range, auth_name ORDER BY 1;"); - cmd.AllResultTypesAreUnknown = true; - using var reader = cmd.ExecuteReader(); - - while (reader.Read()) - { - builder.Add(new CrsMapEntry( - int.Parse(reader.GetString(0)), - int.Parse(reader.GetString(1)), - reader.GetString(2))); - } - - return builder.Build(); - }); - - var (pgGeometryType, pgGeographyType) = (PgType("geometry"), PgType("geography")); - - if (pgGeometryType is not null) - _geometryHandler = new GeoJsonHandler(pgGeometryType, options, crsMap); - if (pgGeographyType is not null) - _geographyHandler = new GeoJsonHandler(pgGeographyType, options, crsMap); - } - - public override NpgsqlTypeHandler? ResolveByDataTypeName(string typeName) - => typeName switch - { - "geometry" => _geometryHandler, - "geography" => _geographyHandler, - _ => null - }; - - public override NpgsqlTypeHandler? ResolveByClrType(Type type) - => ClrTypeToDataTypeName(type, _geographyAsDefault) is { } dataTypeName && ResolveByDataTypeName(dataTypeName) is { } handler - ? handler - : null; - - internal static string? ClrTypeToDataTypeName(Type type, bool geographyAsDefault) - => type.BaseType != typeof(GeoJSONObject) - ? null - : geographyAsDefault - ? "geography" - : "geometry"; - - PostgresType? PgType(string pgTypeName) => _databaseInfo.TryGetPostgresTypeByName(pgTypeName, out var pgType) ? pgType : null; -} \ No newline at end of file diff --git a/src/Npgsql.GeoJSON/Internal/GeoJSONTypeHandlerResolverFactory.cs b/src/Npgsql.GeoJSON/Internal/GeoJSONTypeHandlerResolverFactory.cs deleted file mode 100644 index aae2c9102a..0000000000 --- a/src/Npgsql.GeoJSON/Internal/GeoJSONTypeHandlerResolverFactory.cs +++ /dev/null @@ -1,21 +0,0 @@ -using System; -using Npgsql.Internal; -using Npgsql.Internal.TypeHandling; -using Npgsql.Internal.TypeMapping; -using Npgsql.TypeMapping; - -namespace Npgsql.GeoJSON.Internal; - -public class GeoJSONTypeHandlerResolverFactory : TypeHandlerResolverFactory -{ - readonly GeoJSONOptions _options; - readonly bool _geographyAsDefault; - - public GeoJSONTypeHandlerResolverFactory(GeoJSONOptions options, bool geographyAsDefault) - => (_options, _geographyAsDefault) = (options, geographyAsDefault); - - public override TypeHandlerResolver Create(TypeMapper typeMapper, NpgsqlConnector connector) - => new GeoJSONTypeHandlerResolver(connector, _options, _geographyAsDefault); - - public override TypeMappingResolver CreateMappingResolver() => new GeoJsonTypeMappingResolver(_geographyAsDefault); -} diff --git a/src/Npgsql.GeoJSON/Internal/GeoJSONTypeInfoResolver.cs b/src/Npgsql.GeoJSON/Internal/GeoJSONTypeInfoResolver.cs new file mode 100644 index 0000000000..5ea3b8c9e3 --- /dev/null +++ b/src/Npgsql.GeoJSON/Internal/GeoJSONTypeInfoResolver.cs @@ -0,0 +1,76 @@ +using System; +using GeoJSON.Net; +using GeoJSON.Net.Geometry; +using Npgsql.Internal; +using Npgsql.Internal.Postgres; + +namespace Npgsql.GeoJSON.Internal; + +sealed class GeoJSONTypeInfoResolver : IPgTypeInfoResolver +{ + TypeInfoMappingCollection Mappings { get; } + + internal GeoJSONTypeInfoResolver(GeoJSONOptions options, bool geographyAsDefault, CrsMap? crsMap = null) + { + Mappings = new TypeInfoMappingCollection(); + AddInfos(Mappings, options, geographyAsDefault, crsMap); + // TODO opt-in arrays + AddArrayInfos(Mappings); + } + + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); + + static void AddInfos(TypeInfoMappingCollection mappings, GeoJSONOptions geoJsonOptions, bool geographyAsDefault, CrsMap? crsMap) + { + crsMap ??= new CrsMap(CrsMap.WellKnown); + + var geometryMatchRequirement = !geographyAsDefault ? MatchRequirement.Single : MatchRequirement.DataTypeName; + var geographyMatchRequirement = geographyAsDefault ? MatchRequirement.Single : MatchRequirement.DataTypeName; + + foreach (var dataTypeName in new[] { "geometry", "geography" }) + { + var matchRequirement = dataTypeName == "geometry" ? geometryMatchRequirement : geographyMatchRequirement; + + mappings.AddType(dataTypeName, + (options, mapping, _) => mapping.CreateInfo(options, new GeoJSONConverter(geoJsonOptions, crsMap)), + matchRequirement); + mappings.AddType(dataTypeName, + (options, mapping, _) => mapping.CreateInfo(options, new GeoJSONConverter(geoJsonOptions, crsMap)), + matchRequirement); + mappings.AddType(dataTypeName, + (options, mapping, _) => mapping.CreateInfo(options, new GeoJSONConverter(geoJsonOptions, crsMap)), + matchRequirement); + mappings.AddType(dataTypeName, + (options, mapping, _) => mapping.CreateInfo(options, new GeoJSONConverter(geoJsonOptions, crsMap)), + matchRequirement); + mappings.AddType(dataTypeName, + (options, mapping, _) => mapping.CreateInfo(options, new GeoJSONConverter(geoJsonOptions, crsMap)), + matchRequirement); + mappings.AddType(dataTypeName, + (options, mapping, _) => mapping.CreateInfo(options, new GeoJSONConverter(geoJsonOptions, crsMap)), + matchRequirement); + mappings.AddType(dataTypeName, + (options, mapping, _) => mapping.CreateInfo(options, new GeoJSONConverter(geoJsonOptions, crsMap)), + matchRequirement); + mappings.AddType(dataTypeName, + (options, mapping, _) => mapping.CreateInfo(options, new GeoJSONConverter(geoJsonOptions, crsMap)), + matchRequirement); + } + } + + static void AddArrayInfos(TypeInfoMappingCollection mappings) + { + foreach (var dataTypeName in new[] { "geometry", "geography" }) + { + mappings.AddArrayType(dataTypeName); + mappings.AddArrayType(dataTypeName); + mappings.AddArrayType(dataTypeName); + mappings.AddArrayType(dataTypeName); + mappings.AddArrayType(dataTypeName); + mappings.AddArrayType(dataTypeName); + mappings.AddArrayType(dataTypeName); + mappings.AddArrayType(dataTypeName); + } + } +} diff --git a/src/Npgsql.GeoJSON/Internal/GeoJsonTypeMappingResolver.cs b/src/Npgsql.GeoJSON/Internal/GeoJsonTypeMappingResolver.cs deleted file mode 100644 index 137606538b..0000000000 --- a/src/Npgsql.GeoJSON/Internal/GeoJsonTypeMappingResolver.cs +++ /dev/null @@ -1,28 +0,0 @@ -using System; -using Npgsql.Internal.TypeHandling; -using Npgsql.Internal.TypeMapping; -using Npgsql.PostgresTypes; -using NpgsqlTypes; - -namespace Npgsql.GeoJSON.Internal; - -public class GeoJsonTypeMappingResolver : TypeMappingResolver -{ - readonly bool _geographyAsDefault; - - public GeoJsonTypeMappingResolver(bool geographyAsDefault) => _geographyAsDefault = geographyAsDefault; - - public override string? GetDataTypeNameByClrType(Type type) - => GeoJSONTypeHandlerResolver.ClrTypeToDataTypeName(type, _geographyAsDefault); - - public override TypeMappingInfo? GetMappingByDataTypeName(string dataTypeName) - => DoGetMappingByDataTypeName(dataTypeName); - - static TypeMappingInfo? DoGetMappingByDataTypeName(string dataTypeName) - => dataTypeName switch - { - "geometry" => new(NpgsqlDbType.Geometry, "geometry"), - "geography" => new(NpgsqlDbType.Geography, "geography"), - _ => null - }; -} diff --git a/src/Npgsql.GeoJSON/NpgsqlGeoJSONExtensions.cs b/src/Npgsql.GeoJSON/NpgsqlGeoJSONExtensions.cs index c59b0b21c7..6817094caa 100644 --- a/src/Npgsql.GeoJSON/NpgsqlGeoJSONExtensions.cs +++ b/src/Npgsql.GeoJSON/NpgsqlGeoJSONExtensions.cs @@ -1,4 +1,5 @@ -using Npgsql.GeoJSON.Internal; +using Npgsql.GeoJSON; +using Npgsql.GeoJSON.Internal; using Npgsql.TypeMapping; // ReSharper disable once CheckNamespace @@ -17,7 +18,20 @@ public static class NpgsqlGeoJSONExtensions /// Specifies that the geography type is used for mapping by default. public static INpgsqlTypeMapper UseGeoJson(this INpgsqlTypeMapper mapper, GeoJSONOptions options = GeoJSONOptions.None, bool geographyAsDefault = false) { - mapper.AddTypeResolverFactory(new GeoJSONTypeHandlerResolverFactory(options, geographyAsDefault)); + mapper.AddTypeInfoResolver(new GeoJSONTypeInfoResolver(options, geographyAsDefault, crsMap: null)); return mapper; } -} \ No newline at end of file + + /// + /// Sets up GeoJSON mappings for the PostGIS types. + /// + /// The type mapper to set up (global or connection-specific) + /// A custom crs map that might contain more or less entries than the default well-known crs map. + /// Options to use when constructing objects. + /// Specifies that the geography type is used for mapping by default. + public static INpgsqlTypeMapper UseGeoJson(this INpgsqlTypeMapper mapper, CrsMap crsMap, GeoJSONOptions options = GeoJSONOptions.None, bool geographyAsDefault = false) + { + mapper.AddTypeInfoResolver(new GeoJSONTypeInfoResolver(options, geographyAsDefault, crsMap)); + return mapper; + } +} diff --git a/src/Npgsql.GeoJSON/PublicAPI.Unshipped.txt b/src/Npgsql.GeoJSON/PublicAPI.Unshipped.txt index ab058de62d..be72efeb37 100644 --- a/src/Npgsql.GeoJSON/PublicAPI.Unshipped.txt +++ b/src/Npgsql.GeoJSON/PublicAPI.Unshipped.txt @@ -1 +1,5 @@ -#nullable enable +Npgsql.GeoJSON.CrsMap +Npgsql.GeoJSON.CrsMapExtensions +static Npgsql.GeoJSON.CrsMapExtensions.GetCrsMap(this Npgsql.NpgsqlDataSource! dataSource) -> Npgsql.GeoJSON.CrsMap! +static Npgsql.GeoJSON.CrsMapExtensions.GetCrsMapAsync(this Npgsql.NpgsqlDataSource! dataSource) -> System.Threading.Tasks.Task! +static Npgsql.NpgsqlGeoJSONExtensions.UseGeoJson(this Npgsql.TypeMapping.INpgsqlTypeMapper! mapper, Npgsql.GeoJSON.CrsMap! crsMap, Npgsql.GeoJSONOptions options = Npgsql.GeoJSONOptions.None, bool geographyAsDefault = false) -> Npgsql.TypeMapping.INpgsqlTypeMapper! \ No newline at end of file diff --git a/src/Npgsql.Json.NET/Internal/JsonNetJsonConverter.cs b/src/Npgsql.Json.NET/Internal/JsonNetJsonConverter.cs new file mode 100644 index 0000000000..42b7c88e0d --- /dev/null +++ b/src/Npgsql.Json.NET/Internal/JsonNetJsonConverter.cs @@ -0,0 +1,121 @@ +using System; +using System.Globalization; +using System.IO; +using System.Text; +using System.Threading; +using System.Threading.Tasks; +using Newtonsoft.Json; +using Npgsql.Internal; +using JsonSerializer = Newtonsoft.Json.JsonSerializer; + +namespace Npgsql.Json.NET.Internal; + +sealed class JsonNetJsonConverter : PgStreamingConverter +{ + readonly bool _jsonb; + readonly Encoding _textEncoding; + readonly JsonSerializerSettings _settings; + + public JsonNetJsonConverter(bool jsonb, Encoding textEncoding, JsonSerializerSettings settings) + { + _jsonb = jsonb; + _textEncoding = textEncoding; + _settings = settings; + } + + public override T? Read(PgReader reader) + => (T?)JsonNetJsonConverter.Read(async: false, _jsonb, reader, typeof(T), _settings, _textEncoding, CancellationToken.None).GetAwaiter().GetResult(); + public override async ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) + => (T?)await JsonNetJsonConverter.Read(async: true, _jsonb, reader, typeof(T), _settings, _textEncoding, cancellationToken).ConfigureAwait(false); + + public override Size GetSize(SizeContext context, T? value, ref object? writeState) + => JsonNetJsonConverter.GetSize(_jsonb, context, typeof(T), _settings, _textEncoding, value, ref writeState); + + public override void Write(PgWriter writer, T? value) + => JsonNetJsonConverter.Write(_jsonb, async: false, writer, CancellationToken.None).GetAwaiter().GetResult(); + + public override ValueTask WriteAsync(PgWriter writer, T? value, CancellationToken cancellationToken = default) + => JsonNetJsonConverter.Write(_jsonb, async: true, writer, cancellationToken); +} + +// Split out to avoid unneccesary code duplication. +static class JsonNetJsonConverter +{ + public const byte JsonbProtocolVersion = 1; + + public static async ValueTask Read(bool async, bool jsonb, PgReader reader, Type type, JsonSerializerSettings settings, Encoding encoding, CancellationToken cancellationToken) + { + if (jsonb) + { + if (reader.ShouldBuffer(sizeof(byte))) + { + if (async) + await reader.BufferAsync(sizeof(byte), cancellationToken).ConfigureAwait(false); + else + reader.Buffer(sizeof(byte)); + } + var version = reader.ReadByte(); + if (version != JsonbProtocolVersion) + throw new InvalidCastException($"Unknown jsonb wire format version {version}"); + } + + using var stream = reader.GetStream(); + var mem = new MemoryStream(); + if (async) + await stream.CopyToAsync(mem, Math.Min((int)mem.Length, 81920), cancellationToken).ConfigureAwait(false); + else + stream.CopyTo(mem); + mem.Position = 0; + var jsonSerializer = JsonSerializer.CreateDefault(settings); + using var textReader = new JsonTextReader(new StreamReader(mem, encoding)); + return jsonSerializer.Deserialize(textReader, type); + } + + public static Size GetSize(bool jsonb, SizeContext context, Type type, JsonSerializerSettings settings, Encoding encoding, object? value, ref object? writeState) + { + var jsonSerializer = JsonSerializer.CreateDefault(settings); + var sb = new StringBuilder(256); + var sw = new StringWriter(sb, CultureInfo.InvariantCulture); + using (var jsonWriter = new JsonTextWriter(sw)) + { + jsonWriter.Formatting = jsonSerializer.Formatting; + + jsonSerializer.Serialize(jsonWriter, value, type); + } + + var str = sw.ToString(); + var bytes = encoding.GetBytes(str); + writeState = bytes; + return bytes.Length + (jsonb ? sizeof(byte) : 0); + } + + public static async ValueTask Write(bool jsonb, bool async, PgWriter writer, CancellationToken cancellationToken) + { + if (jsonb) + { + if (writer.ShouldFlush(sizeof(byte))) + { + if (async) + await writer.FlushAsync(cancellationToken).ConfigureAwait(false); + else + writer.Flush(); + } + writer.WriteByte(JsonbProtocolVersion); + } + + ArraySegment buffer; + switch (writer.Current.WriteState) + { + case byte[] bytes: + buffer = new ArraySegment(bytes); + break; + default: + throw new InvalidCastException($"Invalid state {writer.Current.WriteState?.GetType().FullName}."); + } + + if (async) + await writer.WriteBytesAsync(buffer.AsMemory(), cancellationToken).ConfigureAwait(false); + else + writer.WriteBytes(buffer.AsSpan()); + } +} diff --git a/src/Npgsql.Json.NET/Internal/JsonNetJsonHandler.cs b/src/Npgsql.Json.NET/Internal/JsonNetJsonHandler.cs deleted file mode 100644 index cbf8ca3ae2..0000000000 --- a/src/Npgsql.Json.NET/Internal/JsonNetJsonHandler.cs +++ /dev/null @@ -1,64 +0,0 @@ -using System; -using System.Diagnostics.CodeAnalysis; -using System.Threading; -using System.Threading.Tasks; -using Newtonsoft.Json; -using Npgsql.BackendMessages; -using Npgsql.Internal; -using Npgsql.Internal.TypeHandlers; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; - -namespace Npgsql.Json.NET.Internal; - -class JsonNetJsonHandler : JsonTextHandler -{ - readonly JsonSerializerSettings _settings; - - public JsonNetJsonHandler(PostgresType postgresType, NpgsqlConnector connector, bool isJsonb, JsonSerializerSettings settings) - : base(postgresType, connector.TextEncoding, isJsonb) => _settings = settings; - - protected override async ValueTask ReadCustom(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - { - if (IsSupportedAsText()) - return await base.ReadCustom(buf, len, async, fieldDescription); - - // JSON.NET returns null if no JSON content was found. This means null may get returned even if T is a non-nullable reference - // type (for value types, an exception will be thrown). - return JsonConvert.DeserializeObject(await base.Read(buf, len, async, fieldDescription), _settings)!; - } - - protected override int ValidateAndGetLengthCustom([DisallowNull] TAny value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - { - if (IsSupportedAsText()) - return base.ValidateAndGetLengthCustom(value, ref lengthCache, parameter); - - var serialized = JsonConvert.SerializeObject(value, _settings); - if (parameter != null) - parameter.ConvertedValue = serialized; - return base.ValidateAndGetLengthCustom(serialized, ref lengthCache, parameter); - } - - protected override Task WriteWithLengthCustom([DisallowNull] TAny value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - if (IsSupportedAsText()) - return base.WriteWithLengthCustom(value, buf, lengthCache, parameter, async, cancellationToken); - - // User POCO, read serialized representation from the validation phase - var serialized = parameter?.ConvertedValue != null - ? (string)parameter.ConvertedValue - : JsonConvert.SerializeObject(value, _settings); - return base.WriteWithLengthCustom(serialized, buf, lengthCache, parameter, async, cancellationToken); - } - - public override int ValidateObjectAndGetLength(object value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => IsSupported(value.GetType()) - ? base.ValidateObjectAndGetLength(value, ref lengthCache, parameter) - : ValidateAndGetLengthCustom(value, ref lengthCache, parameter); - - public override Task WriteObjectWithLength(object? value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, - NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => value is null or DBNull || IsSupported(value.GetType()) - ? base.WriteObjectWithLength(value, buf, lengthCache, parameter, async, cancellationToken) - : WriteWithLengthCustom(value, buf, lengthCache, parameter, async, cancellationToken); -} \ No newline at end of file diff --git a/src/Npgsql.Json.NET/Internal/JsonNetPocoTypeInfoResolver.cs b/src/Npgsql.Json.NET/Internal/JsonNetPocoTypeInfoResolver.cs new file mode 100644 index 0000000000..a9d54d863f --- /dev/null +++ b/src/Npgsql.Json.NET/Internal/JsonNetPocoTypeInfoResolver.cs @@ -0,0 +1,105 @@ +using System; +using System.Diagnostics.CodeAnalysis; +using System.Text; +using Newtonsoft.Json; +using Npgsql.Internal; +using Npgsql.Internal.Postgres; + +namespace Npgsql.Json.NET.Internal; + +[RequiresUnreferencedCode("Json serializer may perform reflection on trimmed types.")] +[RequiresDynamicCode("Serializing arbitary types to json can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] +class JsonNetPocoTypeInfoResolver : DynamicTypeInfoResolver, IPgTypeInfoResolver +{ + protected TypeInfoMappingCollection Mappings { get; } = new(); + protected JsonSerializerSettings _serializerSettings; + + const string JsonDataTypeName = "pg_catalog.json"; + const string JsonbDataTypeName = "pg_catalog.jsonb"; + + public JsonNetPocoTypeInfoResolver(Type[]? jsonbClrTypes = null, Type[]? jsonClrTypes = null, JsonSerializerSettings? serializerSettings = null) + { + // Capture default settings during construction. + _serializerSettings = serializerSettings ??= JsonConvert.DefaultSettings?.Invoke() ?? new JsonSerializerSettings(); + + AddMappings(Mappings, jsonbClrTypes ?? Array.Empty(), jsonClrTypes ?? Array.Empty(), serializerSettings); + } + + void AddMappings(TypeInfoMappingCollection mappings, Type[] jsonbClrTypes, Type[] jsonClrTypes, JsonSerializerSettings serializerSettings) + { + AddUserMappings(jsonb: true, jsonbClrTypes); + AddUserMappings(jsonb: false, jsonClrTypes); + + void AddUserMappings(bool jsonb, Type[] clrTypes) + { + var dynamicMappings = CreateCollection(); + var dataTypeName = jsonb ? JsonbDataTypeName : JsonDataTypeName; + foreach (var jsonType in clrTypes) + { + dynamicMappings.AddMapping(jsonType, dataTypeName, + factory: (options, mapping, _) => mapping.CreateInfo(options, + CreateConverter(mapping.Type, jsonb, options.TextEncoding, serializerSettings))); + } + mappings.AddRange(dynamicMappings.ToTypeInfoMappingCollection()); + } + } + + protected void AddArrayInfos(TypeInfoMappingCollection mappings, TypeInfoMappingCollection baseMappings) + { + if (baseMappings.Items.Count == 0) + return; + + var dynamicMappings = CreateCollection(baseMappings); + foreach (var mapping in baseMappings.Items) + dynamicMappings.AddArrayMapping(mapping.Type, mapping.DataTypeName); + mappings.AddRange(dynamicMappings.ToTypeInfoMappingCollection()); + } + + public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options) ?? base.GetTypeInfo(type, dataTypeName, options); + + protected override DynamicMappingCollection? GetMappings(Type? type, DataTypeName dataTypeName, PgSerializerOptions options) + { + // Match all types except null, object and text types as long as DataTypeName (json/jsonb) is present. + if (type is null || type == typeof(object) || Array.IndexOf(PgSerializerOptions.WellKnownTextTypes, type) != -1 + || dataTypeName != JsonbDataTypeName && dataTypeName != JsonDataTypeName) + return null; + + return CreateCollection().AddMapping(type, dataTypeName, (options, mapping, _) => + { + var jsonb = dataTypeName == JsonbDataTypeName; + return mapping.CreateInfo(options, + CreateConverter(mapping.Type, jsonb, options.TextEncoding, _serializerSettings)); + }); + } + + static PgConverter CreateConverter(Type valueType, bool jsonb, Encoding textEncoding, JsonSerializerSettings settings) + => (PgConverter)Activator.CreateInstance( + typeof(JsonNetJsonConverter<>).MakeGenericType(valueType), + jsonb, + textEncoding, + settings + )!; +} + +[RequiresUnreferencedCode("Json serializer may perform reflection on trimmed types.")] +[RequiresDynamicCode("Serializing arbitary types to json can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] +sealed class JsonNetPocoArrayTypeInfoResolver : JsonNetPocoTypeInfoResolver, IPgTypeInfoResolver +{ + new TypeInfoMappingCollection Mappings { get; } + + public JsonNetPocoArrayTypeInfoResolver(Type[]? jsonbClrTypes = null, Type[]? jsonClrTypes = null, JsonSerializerSettings? serializerSettings = null) + : base(jsonbClrTypes, jsonClrTypes, serializerSettings) + { + Mappings = new TypeInfoMappingCollection(base.Mappings); + AddArrayInfos(Mappings, base.Mappings); + } + + public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options) ?? base.GetTypeInfo(type, dataTypeName, options); + + protected override DynamicMappingCollection? GetMappings(Type? type, DataTypeName dataTypeName, PgSerializerOptions options) + => type is not null && IsArrayLikeType(type, out var elementType) && IsArrayDataTypeName(dataTypeName, options, out var elementDataTypeName) + ? base.GetMappings(elementType, elementDataTypeName, options)?.AddArrayMapping(elementType, elementDataTypeName) + : null; +} diff --git a/src/Npgsql.Json.NET/Internal/JsonNetTypeHandlerResolver.cs b/src/Npgsql.Json.NET/Internal/JsonNetTypeHandlerResolver.cs deleted file mode 100644 index 04bb63bdf1..0000000000 --- a/src/Npgsql.Json.NET/Internal/JsonNetTypeHandlerResolver.cs +++ /dev/null @@ -1,48 +0,0 @@ -using System; -using System.Collections.Generic; -using Newtonsoft.Json; -using Npgsql.Internal; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using NpgsqlTypes; - -namespace Npgsql.Json.NET.Internal; - -public class JsonNetTypeHandlerResolver : TypeHandlerResolver -{ - readonly NpgsqlDatabaseInfo _databaseInfo; - readonly JsonNetJsonHandler _jsonNetJsonbHandler; - readonly JsonNetJsonHandler _jsonNetJsonHandler; - readonly Dictionary _dataTypeNamesByClrType; - - internal JsonNetTypeHandlerResolver( - NpgsqlConnector connector, - Dictionary dataTypeNamesByClrType, - JsonSerializerSettings settings) - { - _databaseInfo = connector.DatabaseInfo; - - _jsonNetJsonbHandler = new JsonNetJsonHandler(PgType("jsonb"), connector, isJsonb: true, settings); - _jsonNetJsonHandler = new JsonNetJsonHandler(PgType("json"), connector, isJsonb: false, settings); - - _dataTypeNamesByClrType = dataTypeNamesByClrType; - } - - public override NpgsqlTypeHandler? ResolveByDataTypeName(string typeName) - => typeName switch - { - "jsonb" => _jsonNetJsonbHandler, - "json" => _jsonNetJsonHandler, - _ => null - }; - - public override NpgsqlTypeHandler? ResolveByClrType(Type type) - => ClrTypeToDataTypeName(type, _dataTypeNamesByClrType) is { } dataTypeName && ResolveByDataTypeName(dataTypeName) is { } handler - ? handler - : null; - - internal static string? ClrTypeToDataTypeName(Type type, Dictionary clrTypes) - => clrTypes.TryGetValue(type, out var dataTypeName) ? dataTypeName : null; - - PostgresType PgType(string pgTypeName) => _databaseInfo.GetPostgresTypeByName(pgTypeName); -} \ No newline at end of file diff --git a/src/Npgsql.Json.NET/Internal/JsonNetTypeHandlerResolverFactory.cs b/src/Npgsql.Json.NET/Internal/JsonNetTypeHandlerResolverFactory.cs deleted file mode 100644 index 739efc6d2c..0000000000 --- a/src/Npgsql.Json.NET/Internal/JsonNetTypeHandlerResolverFactory.cs +++ /dev/null @@ -1,43 +0,0 @@ -using System; -using System.Collections.Generic; -using Newtonsoft.Json; -using Newtonsoft.Json.Linq; -using Npgsql.Internal; -using Npgsql.Internal.TypeHandling; -using Npgsql.Internal.TypeMapping; -using Npgsql.TypeMapping; - -namespace Npgsql.Json.NET.Internal; - -public class JsonNetTypeHandlerResolverFactory : TypeHandlerResolverFactory -{ - readonly JsonSerializerSettings _settings; - readonly Dictionary _byType; - - public JsonNetTypeHandlerResolverFactory( - Type[]? jsonbClrTypes, - Type[]? jsonClrTypes, - JsonSerializerSettings? settings) - { - _settings = settings ?? new JsonSerializerSettings(); - - _byType = new() - { - { typeof(JObject), "jsonb" }, - { typeof(JArray), "jsonb" } - }; - - if (jsonbClrTypes is not null) - foreach (var type in jsonbClrTypes) - _byType[type] = "jsonb"; - - if (jsonClrTypes is not null) - foreach (var type in jsonClrTypes) - _byType[type] = "json"; - } - - public override TypeHandlerResolver Create(TypeMapper typeMapper, NpgsqlConnector connector) - => new JsonNetTypeHandlerResolver(connector, _byType, _settings); - - public override TypeMappingResolver CreateMappingResolver() => new JsonNetTypeMappingResolver(_byType); -} diff --git a/src/Npgsql.Json.NET/Internal/JsonNetTypeInfoResolver.cs b/src/Npgsql.Json.NET/Internal/JsonNetTypeInfoResolver.cs new file mode 100644 index 0000000000..7954c4bb2f --- /dev/null +++ b/src/Npgsql.Json.NET/Internal/JsonNetTypeInfoResolver.cs @@ -0,0 +1,67 @@ +using System; +using System.Diagnostics.CodeAnalysis; +using System.Reflection; +using System.Text; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Npgsql.Internal; +using Npgsql.Internal.Postgres; + +namespace Npgsql.Json.NET.Internal; + +class JsonNetTypeInfoResolver : IPgTypeInfoResolver +{ + protected TypeInfoMappingCollection Mappings { get; } = new(); + + public JsonNetTypeInfoResolver(JsonSerializerSettings? settings = null) + => AddTypeInfos(Mappings, settings); + + static void AddTypeInfos(TypeInfoMappingCollection mappings, JsonSerializerSettings? settings = null) + { + // Capture default settings during construction. + settings ??= JsonConvert.DefaultSettings?.Invoke() ?? new JsonSerializerSettings(); + + // Jsonb is the first default for JToken etc. + foreach (var dataTypeName in new[] { "jsonb", "json" }) + { + var jsonb = dataTypeName == "jsonb"; + mappings.AddType(dataTypeName, (options, mapping, _) => + mapping.CreateInfo(options, new JsonNetJsonConverter(jsonb, options.TextEncoding, settings)), + isDefault: true); + mappings.AddType(dataTypeName, (options, mapping, _) => + mapping.CreateInfo(options, new JsonNetJsonConverter(jsonb, options.TextEncoding, settings))); + mappings.AddType(dataTypeName, (options, mapping, _) => + mapping.CreateInfo(options, new JsonNetJsonConverter(jsonb, options.TextEncoding, settings))); + mappings.AddType(dataTypeName, (options, mapping, _) => + mapping.CreateInfo(options, new JsonNetJsonConverter(jsonb, options.TextEncoding, settings))); + } + } + + protected static void AddArrayInfos(TypeInfoMappingCollection mappings) + { + foreach (var dataTypeName in new[] { "jsonb", "json" }) + { + mappings.AddArrayType(dataTypeName); + mappings.AddArrayType(dataTypeName); + mappings.AddArrayType(dataTypeName); + mappings.AddArrayType(dataTypeName); + } + } + + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); +} + +sealed class JsonNetArrayTypeInfoResolver : JsonNetTypeInfoResolver, IPgTypeInfoResolver +{ + new TypeInfoMappingCollection Mappings { get; } + + public JsonNetArrayTypeInfoResolver(JsonSerializerSettings? settings = null) : base(settings) + { + Mappings = new TypeInfoMappingCollection(base.Mappings); + AddArrayInfos(Mappings); + } + + public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); +} diff --git a/src/Npgsql.Json.NET/Internal/JsonNetTypeMappingResolver.cs b/src/Npgsql.Json.NET/Internal/JsonNetTypeMappingResolver.cs deleted file mode 100644 index 119882f37e..0000000000 --- a/src/Npgsql.Json.NET/Internal/JsonNetTypeMappingResolver.cs +++ /dev/null @@ -1,29 +0,0 @@ -using System; -using System.Collections.Generic; -using Npgsql.Internal.TypeHandling; -using Npgsql.Internal.TypeMapping; -using Npgsql.PostgresTypes; -using NpgsqlTypes; - -namespace Npgsql.Json.NET.Internal; - -public class JsonNetTypeMappingResolver : TypeMappingResolver -{ - readonly Dictionary _byType; - - public JsonNetTypeMappingResolver(Dictionary byType) => _byType = byType; - - public override string? GetDataTypeNameByClrType(Type type) - => JsonNetTypeHandlerResolver.ClrTypeToDataTypeName(type, _byType); - - public override TypeMappingInfo? GetMappingByDataTypeName(string dataTypeName) - => DoGetMappingByDataTypeName(dataTypeName); - - static TypeMappingInfo? DoGetMappingByDataTypeName(string dataTypeName) - => dataTypeName switch - { - "jsonb" => new(NpgsqlDbType.Jsonb, "jsonb"), - "json" => new(NpgsqlDbType.Json, "json"), - _ => null - }; -} diff --git a/src/Npgsql.Json.NET/Npgsql.Json.NET.csproj b/src/Npgsql.Json.NET/Npgsql.Json.NET.csproj index abd1a4ea6d..baff7e6af6 100644 --- a/src/Npgsql.Json.NET/Npgsql.Json.NET.csproj +++ b/src/Npgsql.Json.NET/Npgsql.Json.NET.csproj @@ -3,8 +3,9 @@ Shay Rojansky Json.NET plugin for Npgsql, allowing transparent serialization/deserialization of JSON objects directly to and from the database. npgsql;postgresql;json;postgres;ado;ado.net;database;sql - netstandard2.0 + netstandard2.0;net6.0 net8.0 + enable diff --git a/src/Npgsql.Json.NET/NpgsqlJsonNetExtensions.cs b/src/Npgsql.Json.NET/NpgsqlJsonNetExtensions.cs index bd3b7b41f8..9cb70d86f1 100644 --- a/src/Npgsql.Json.NET/NpgsqlJsonNetExtensions.cs +++ b/src/Npgsql.Json.NET/NpgsqlJsonNetExtensions.cs @@ -29,7 +29,12 @@ public static INpgsqlTypeMapper UseJsonNet( Type[]? jsonbClrTypes = null, Type[]? jsonClrTypes = null) { - mapper.AddTypeResolverFactory(new JsonNetTypeHandlerResolverFactory(jsonbClrTypes, jsonClrTypes, settings)); + // TODO opt-in of arrays. + // Reverse order + mapper.AddTypeInfoResolver(new JsonNetPocoArrayTypeInfoResolver(jsonbClrTypes, jsonClrTypes, settings)); + mapper.AddTypeInfoResolver(new JsonNetArrayTypeInfoResolver(settings)); + mapper.AddTypeInfoResolver(new JsonNetPocoTypeInfoResolver(jsonbClrTypes, jsonClrTypes, settings)); + mapper.AddTypeInfoResolver(new JsonNetTypeInfoResolver(settings)); return mapper; } -} \ No newline at end of file +} diff --git a/src/Npgsql.LegacyPostgis/Properties/AssemblyInfo.cs b/src/Npgsql.LegacyPostgis/Properties/AssemblyInfo.cs deleted file mode 100644 index 1a340b1a15..0000000000 --- a/src/Npgsql.LegacyPostgis/Properties/AssemblyInfo.cs +++ /dev/null @@ -1,5 +0,0 @@ -using System.Runtime.CompilerServices; - -#if NET5_0_OR_GREATER -[module: SkipLocalsInit] -#endif diff --git a/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteConverter.cs b/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteConverter.cs new file mode 100644 index 0000000000..467356164e --- /dev/null +++ b/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteConverter.cs @@ -0,0 +1,81 @@ +using System; +using System.IO; +using System.Threading; +using System.Threading.Tasks; +using NetTopologySuite.Geometries; +using NetTopologySuite.IO; +using Npgsql.Internal; + +namespace Npgsql.NetTopologySuite.Internal; + +sealed class NetTopologySuiteConverter : PgStreamingConverter + where T : Geometry +{ + readonly PostGisReader _reader; + readonly PostGisWriter _writer; + + internal NetTopologySuiteConverter(PostGisReader reader, PostGisWriter writer) + => (_reader, _writer) = (reader, writer); + + public override T Read(PgReader reader) + => (T)_reader.Read(reader.GetStream()); + + // PostGisReader/PostGisWriter doesn't support async + public override ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) + => new(Read(reader)); + + public override Size GetSize(SizeContext context, T value, ref object? writeState) + { + var lengthStream = new LengthStream(); + lengthStream.SetLength(0); + _writer.Write(value, lengthStream); + return (int)lengthStream.Length; + } + +#pragma warning disable CA2252 // GetStream() is a "preview" feature + public override void Write(PgWriter writer, T value) + => _writer.Write(value, writer.GetStream()); +#pragma warning restore CA2252 + + // PostGisReader/PostGisWriter doesn't support async + public override ValueTask WriteAsync(PgWriter writer, T value, CancellationToken cancellationToken = default) + { + Write(writer, value); + return default; + } + + sealed class LengthStream : Stream + { + long _length; + + public override bool CanRead => false; + + public override bool CanSeek => false; + + public override bool CanWrite => true; + + public override long Length => _length; + + public override long Position + { + get => _length; + set => throw new NotSupportedException(); + } + + public override void Flush() + { + } + + public override int Read(byte[] buffer, int offset, int count) + => throw new NotSupportedException(); + + public override long Seek(long offset, SeekOrigin origin) + => throw new NotSupportedException(); + + public override void SetLength(long value) + => _length = value; + + public override void Write(byte[] buffer, int offset, int count) + => _length += count; + } +} diff --git a/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteHandler.cs b/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteHandler.cs deleted file mode 100644 index f75be9f4a7..0000000000 --- a/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteHandler.cs +++ /dev/null @@ -1,168 +0,0 @@ -using System; -using System.IO; -using System.Threading; -using System.Threading.Tasks; -using NetTopologySuite.Geometries; -using NetTopologySuite.IO; -using Npgsql.BackendMessages; -using Npgsql.Internal; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; - -namespace Npgsql.NetTopologySuite.Internal; - -partial class NetTopologySuiteHandler : NpgsqlTypeHandler, - INpgsqlTypeHandler, - INpgsqlTypeHandler, - INpgsqlTypeHandler, - INpgsqlTypeHandler, - INpgsqlTypeHandler, - INpgsqlTypeHandler, - INpgsqlTypeHandler -{ - readonly PostGisReader _reader; - readonly PostGisWriter _writer; - - internal NetTopologySuiteHandler(PostgresType postgresType, PostGisReader reader, PostGisWriter writer) - : base(postgresType) - { - _reader = reader; - _writer = writer; - } - - #region Read - - public override ValueTask Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - => ReadCore(buf, len); - - ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => ReadCore(buf, len); - - ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => ReadCore(buf, len); - - ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => ReadCore(buf, len); - - ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => ReadCore(buf, len); - - ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => ReadCore(buf, len); - - ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => ReadCore(buf, len); - - ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => ReadCore(buf, len); - - ValueTask ReadCore(NpgsqlReadBuffer buf, int len) - where T : Geometry - => new((T)_reader.Read(buf.GetStream(len, false))); - - #endregion - - #region ValidateAndGetLength - - public override int ValidateAndGetLength(Geometry value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLengthCore(value); - - int INpgsqlTypeHandler.ValidateAndGetLength(Point value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLength(value, ref lengthCache, parameter); - - int INpgsqlTypeHandler.ValidateAndGetLength(LineString value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLength(value, ref lengthCache, parameter); - - int INpgsqlTypeHandler.ValidateAndGetLength(Polygon value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLength(value, ref lengthCache, parameter); - - int INpgsqlTypeHandler.ValidateAndGetLength(MultiPoint value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLength(value, ref lengthCache, parameter); - - int INpgsqlTypeHandler.ValidateAndGetLength(MultiLineString value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLength(value, ref lengthCache, parameter); - - int INpgsqlTypeHandler.ValidateAndGetLength(MultiPolygon value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLength(value, ref lengthCache, parameter); - - int INpgsqlTypeHandler.ValidateAndGetLength(GeometryCollection value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLength(value, ref lengthCache, parameter); - - int ValidateAndGetLengthCore(Geometry value) - { - var lengthStream = new LengthStream(); - lengthStream.SetLength(0); - _writer.Write(value, lengthStream); - return (int)lengthStream.Length; - } - - sealed class LengthStream : Stream - { - long _length; - - public override bool CanRead => false; - - public override bool CanSeek => false; - - public override bool CanWrite => true; - - public override long Length => _length; - - public override long Position - { - get => _length; - set => throw new NotSupportedException(); - } - - public override void Flush() - { } - - public override int Read(byte[] buffer, int offset, int count) - => throw new NotSupportedException(); - - public override long Seek(long offset, SeekOrigin origin) - => throw new NotSupportedException(); - - public override void SetLength(long value) - => _length = value; - - public override void Write(byte[] buffer, int offset, int count) - => _length += count; - } - - #endregion - - #region Write - - public override Task Write(Geometry value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => WriteCore(value, buf); - - Task INpgsqlTypeHandler.Write(Point value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken) - => WriteCore(value, buf); - - Task INpgsqlTypeHandler.Write(LineString value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken) - => WriteCore(value, buf); - - Task INpgsqlTypeHandler.Write(Polygon value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken) - => WriteCore(value, buf); - - Task INpgsqlTypeHandler.Write(MultiPoint value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToke) - => WriteCore(value, buf); - - Task INpgsqlTypeHandler.Write(MultiLineString value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken) - => WriteCore(value, buf); - - Task INpgsqlTypeHandler.Write(MultiPolygon value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken) - => WriteCore(value, buf); - - Task INpgsqlTypeHandler.Write(GeometryCollection value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken) - => WriteCore(value, buf); - - Task WriteCore(Geometry value, NpgsqlWriteBuffer buf) - { - _writer.Write(value, buf.GetStream()); - return Task.CompletedTask; - } - - #endregion -} \ No newline at end of file diff --git a/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeHandlerResolver.cs b/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeHandlerResolver.cs deleted file mode 100644 index 8f270ac90f..0000000000 --- a/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeHandlerResolver.cs +++ /dev/null @@ -1,55 +0,0 @@ -using System; -using System.Data; -using NetTopologySuite.Geometries; -using NetTopologySuite.IO; -using Npgsql.Internal; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using Npgsql.TypeMapping; -using NpgsqlTypes; - -namespace Npgsql.NetTopologySuite.Internal; - -public class NetTopologySuiteTypeHandlerResolver : TypeHandlerResolver -{ - readonly NpgsqlDatabaseInfo _databaseInfo; - readonly bool _geographyAsDefault; - - readonly NetTopologySuiteHandler? _geometryHandler, _geographyHandler; - - internal NetTopologySuiteTypeHandlerResolver( - NpgsqlConnector connector, - CoordinateSequenceFactory coordinateSequenceFactory, - PrecisionModel precisionModel, - Ordinates handleOrdinates, - bool geographyAsDefault) - { - _databaseInfo = connector.DatabaseInfo; - _geographyAsDefault = geographyAsDefault; - - var (pgGeometryType, pgGeographyType) = (PgType("geometry"), PgType("geography")); - - var reader = new PostGisReader(coordinateSequenceFactory, precisionModel, handleOrdinates); - var writer = new PostGisWriter(); - - if (pgGeometryType is not null) - _geometryHandler = new NetTopologySuiteHandler(pgGeometryType, reader, writer); - if (pgGeographyType is not null) - _geographyHandler = new NetTopologySuiteHandler(pgGeographyType, reader, writer); - } - - public override NpgsqlTypeHandler? ResolveByDataTypeName(string typeName) - => typeName switch - { - "geometry" => _geometryHandler, - "geography" => _geographyHandler, - _ => null - }; - - public override NpgsqlTypeHandler? ResolveByClrType(Type type) - => NetTopologySuiteTypeMappingResolver.ClrTypeToDataTypeName(type, _geographyAsDefault) is { } dataTypeName && ResolveByDataTypeName(dataTypeName) is { } handler - ? handler - : null; - - PostgresType? PgType(string pgTypeName) => _databaseInfo.TryGetPostgresTypeByName(pgTypeName, out var pgType) ? pgType : null; -} \ No newline at end of file diff --git a/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeHandlerResolverFactory.cs b/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeHandlerResolverFactory.cs deleted file mode 100644 index 1aed03a058..0000000000 --- a/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeHandlerResolverFactory.cs +++ /dev/null @@ -1,33 +0,0 @@ -using NetTopologySuite; -using NetTopologySuite.Geometries; -using Npgsql.Internal; -using Npgsql.Internal.TypeHandling; -using Npgsql.Internal.TypeMapping; - -namespace Npgsql.NetTopologySuite.Internal; - -public class NetTopologySuiteTypeHandlerResolverFactory : TypeHandlerResolverFactory -{ - readonly CoordinateSequenceFactory _coordinateSequenceFactory; - readonly PrecisionModel _precisionModel; - readonly Ordinates _handleOrdinates; - readonly bool _geographyAsDefault; - - public NetTopologySuiteTypeHandlerResolverFactory( - CoordinateSequenceFactory? coordinateSequenceFactory, - PrecisionModel? precisionModel, - Ordinates handleOrdinates, - bool geographyAsDefault) - { - _coordinateSequenceFactory = coordinateSequenceFactory ?? NtsGeometryServices.Instance.DefaultCoordinateSequenceFactory;; - _precisionModel = precisionModel ?? NtsGeometryServices.Instance.DefaultPrecisionModel; - _handleOrdinates = handleOrdinates == Ordinates.None ? _coordinateSequenceFactory.Ordinates : handleOrdinates; - _geographyAsDefault = geographyAsDefault; - } - - public override TypeHandlerResolver Create(TypeMapper typeMapper, NpgsqlConnector connector) - => new NetTopologySuiteTypeHandlerResolver(connector, _coordinateSequenceFactory, _precisionModel, _handleOrdinates, - _geographyAsDefault); - - public override TypeMappingResolver CreateMappingResolver() => new NetTopologySuiteTypeMappingResolver(_geographyAsDefault); -} diff --git a/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeInfoResolver.cs b/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeInfoResolver.cs new file mode 100644 index 0000000000..a934bd2f86 --- /dev/null +++ b/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeInfoResolver.cs @@ -0,0 +1,115 @@ +using System; +using NetTopologySuite; +using NetTopologySuite.Geometries; +using NetTopologySuite.IO; +using Npgsql.Internal; +using Npgsql.Internal.Postgres; + +namespace Npgsql.NetTopologySuite.Internal; + +sealed class NetTopologySuiteTypeInfoResolver : IPgTypeInfoResolver +{ + TypeInfoMappingCollection Mappings { get; } + + public NetTopologySuiteTypeInfoResolver( + CoordinateSequenceFactory? coordinateSequenceFactory, + PrecisionModel? precisionModel, + Ordinates handleOrdinates, + bool geographyAsDefault) + { + coordinateSequenceFactory ??= NtsGeometryServices.Instance.DefaultCoordinateSequenceFactory; + precisionModel ??= NtsGeometryServices.Instance.DefaultPrecisionModel; + handleOrdinates = handleOrdinates == Ordinates.None ? coordinateSequenceFactory.Ordinates : handleOrdinates; + + var reader = new PostGisReader(coordinateSequenceFactory, precisionModel, handleOrdinates); + var writer = new PostGisWriter(); + + Mappings = new TypeInfoMappingCollection(); + AddInfos(Mappings, reader, writer, geographyAsDefault); + // TODO: Opt-in only + AddArrayInfos(Mappings); + } + + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); + + static void AddInfos(TypeInfoMappingCollection mappings, PostGisReader reader, PostGisWriter writer, bool geographyAsDefault) + { + // geometry + mappings.AddType("geometry", + (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer)), + matchRequirement: !geographyAsDefault ? MatchRequirement.Single : MatchRequirement.DataTypeName); + + mappings.AddType("geometry", + (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer)), + matchRequirement: !geographyAsDefault ? MatchRequirement.All : MatchRequirement.DataTypeName); + mappings.AddType("geometry", + (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer)), + matchRequirement: !geographyAsDefault ? MatchRequirement.All : MatchRequirement.DataTypeName); + mappings.AddType("geometry", + (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer)), + matchRequirement: !geographyAsDefault ? MatchRequirement.All : MatchRequirement.DataTypeName); + mappings.AddType("geometry", + (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer)), + matchRequirement: !geographyAsDefault ? MatchRequirement.All : MatchRequirement.DataTypeName); + mappings.AddType("geometry", + (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer)), + matchRequirement: !geographyAsDefault ? MatchRequirement.All : MatchRequirement.DataTypeName); + mappings.AddType("geometry", + (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer)), + matchRequirement: !geographyAsDefault ? MatchRequirement.All : MatchRequirement.DataTypeName); + mappings.AddType("geometry", + (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer)), + matchRequirement: !geographyAsDefault ? MatchRequirement.All : MatchRequirement.DataTypeName); + + // geography + mappings.AddType("geography", + (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer)), + matchRequirement: geographyAsDefault ? MatchRequirement.Single : MatchRequirement.DataTypeName); + + mappings.AddType("geography", + (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer)), + matchRequirement: geographyAsDefault ? MatchRequirement.All : MatchRequirement.DataTypeName); + mappings.AddType("geography", + (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer)), + matchRequirement: geographyAsDefault ? MatchRequirement.All : MatchRequirement.DataTypeName); + mappings.AddType("geography", + (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer)), + matchRequirement: geographyAsDefault ? MatchRequirement.All : MatchRequirement.DataTypeName); + mappings.AddType("geography", + (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer)), + matchRequirement: geographyAsDefault ? MatchRequirement.All : MatchRequirement.DataTypeName); + mappings.AddType("geography", + (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer)), + matchRequirement: geographyAsDefault ? MatchRequirement.All : MatchRequirement.DataTypeName); + mappings.AddType("geography", + (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer)), + matchRequirement: geographyAsDefault ? MatchRequirement.All : MatchRequirement.DataTypeName); + mappings.AddType("geography", + (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer)), + matchRequirement: geographyAsDefault ? MatchRequirement.All : MatchRequirement.DataTypeName); + } + + static void AddArrayInfos(TypeInfoMappingCollection mappings) + { + // geometry + mappings.AddArrayType("geometry"); + mappings.AddArrayType("geometry"); + mappings.AddArrayType("geometry"); + mappings.AddArrayType("geometry"); + mappings.AddArrayType("geometry"); + mappings.AddArrayType("geometry"); + mappings.AddArrayType("geometry"); + mappings.AddArrayType("geometry"); + + // geography + mappings.AddArrayType("geography"); + mappings.AddArrayType("geography"); + mappings.AddArrayType("geography"); + mappings.AddArrayType("geography"); + mappings.AddArrayType("geography"); + mappings.AddArrayType("geography"); + mappings.AddArrayType("geography"); + mappings.AddArrayType("geography"); + } +} diff --git a/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeMappingResolver.cs b/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeMappingResolver.cs deleted file mode 100644 index f087d6c55e..0000000000 --- a/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeMappingResolver.cs +++ /dev/null @@ -1,36 +0,0 @@ -using System; -using NetTopologySuite.Geometries; -using Npgsql.Internal.TypeHandling; -using Npgsql.Internal.TypeMapping; -using Npgsql.PostgresTypes; -using NpgsqlTypes; - -namespace Npgsql.NetTopologySuite.Internal; - -public class NetTopologySuiteTypeMappingResolver : TypeMappingResolver -{ - readonly bool _geographyAsDefault; - - public NetTopologySuiteTypeMappingResolver(bool geographyAsDefault) => _geographyAsDefault = geographyAsDefault; - - public override string? GetDataTypeNameByClrType(Type type) - => ClrTypeToDataTypeName(type, _geographyAsDefault); - - public override TypeMappingInfo? GetMappingByDataTypeName(string dataTypeName) - => DoGetMappingByDataTypeName(dataTypeName); - - internal static string? ClrTypeToDataTypeName(Type type, bool geographyAsDefault) - => type != typeof(Geometry) && type.BaseType != typeof(Geometry) && type.BaseType != typeof(GeometryCollection) - ? null - : geographyAsDefault - ? "geography" - : "geometry"; - - static TypeMappingInfo? DoGetMappingByDataTypeName(string dataTypeName) - => dataTypeName switch - { - "geometry" => new(NpgsqlDbType.Geometry, "geometry"), - "geography" => new(NpgsqlDbType.Geography, "geography"), - _ => null - }; -} diff --git a/src/Npgsql.NetTopologySuite/Npgsql.NetTopologySuite.csproj b/src/Npgsql.NetTopologySuite/Npgsql.NetTopologySuite.csproj index e09653ac97..c36aec8652 100644 --- a/src/Npgsql.NetTopologySuite/Npgsql.NetTopologySuite.csproj +++ b/src/Npgsql.NetTopologySuite/Npgsql.NetTopologySuite.csproj @@ -24,6 +24,6 @@ - + diff --git a/src/Npgsql.NetTopologySuite/NpgsqlNetTopologySuiteExtensions.cs b/src/Npgsql.NetTopologySuite/NpgsqlNetTopologySuiteExtensions.cs index a867fea349..1408709236 100644 --- a/src/Npgsql.NetTopologySuite/NpgsqlNetTopologySuiteExtensions.cs +++ b/src/Npgsql.NetTopologySuite/NpgsqlNetTopologySuiteExtensions.cs @@ -27,9 +27,7 @@ public static INpgsqlTypeMapper UseNetTopologySuite( Ordinates handleOrdinates = Ordinates.None, bool geographyAsDefault = false) { - mapper.AddTypeResolverFactory( - new NetTopologySuiteTypeHandlerResolverFactory( - coordinateSequenceFactory, precisionModel, handleOrdinates, geographyAsDefault)); + mapper.AddTypeInfoResolver(new NetTopologySuiteTypeInfoResolver(coordinateSequenceFactory, precisionModel, handleOrdinates, geographyAsDefault)); return mapper; } -} \ No newline at end of file +} diff --git a/src/Npgsql.NodaTime/Internal/DateHandler.cs b/src/Npgsql.NodaTime/Internal/DateHandler.cs deleted file mode 100644 index 9ae07b040a..0000000000 --- a/src/Npgsql.NodaTime/Internal/DateHandler.cs +++ /dev/null @@ -1,91 +0,0 @@ -using System; -using NodaTime; -using Npgsql.BackendMessages; -using Npgsql.Internal; -using Npgsql.Internal.TypeHandling; -using Npgsql.NodaTime.Properties; -using Npgsql.PostgresTypes; -using static Npgsql.NodaTime.Internal.NodaTimeUtils; -using BclDateHandler = Npgsql.Internal.TypeHandlers.DateTimeHandlers.DateHandler; - -namespace Npgsql.NodaTime.Internal; - -sealed partial class DateHandler : NpgsqlSimpleTypeHandler, - INpgsqlSimpleTypeHandler, INpgsqlSimpleTypeHandler -#if NET6_0_OR_GREATER - , INpgsqlSimpleTypeHandler -#endif -{ - readonly BclDateHandler _bclHandler; - - internal DateHandler(PostgresType postgresType) - : base(postgresType) - => _bclHandler = new BclDateHandler(postgresType); - - public override LocalDate Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - => buf.ReadInt32() switch - { - int.MaxValue => DisableDateTimeInfinityConversions - ? throw new InvalidCastException(NpgsqlNodaTimeStrings.CannotReadInfinityValue) - : LocalDate.MaxIsoValue, - int.MinValue => DisableDateTimeInfinityConversions - ? throw new InvalidCastException(NpgsqlNodaTimeStrings.CannotReadInfinityValue) - : LocalDate.MinIsoValue, - var value => new LocalDate().PlusDays(value + 730119) - }; - - public override int ValidateAndGetLength(LocalDate value, NpgsqlParameter? parameter) - => 4; - - public override void Write(LocalDate value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - { - if (!DisableDateTimeInfinityConversions) - { - if (value == LocalDate.MaxIsoValue) - { - buf.WriteInt32(int.MaxValue); - return; - } - if (value == LocalDate.MinIsoValue) - { - buf.WriteInt32(int.MinValue); - return; - } - } - - var totalDaysSinceEra = Period.Between(default, value, PeriodUnits.Days).Days; - buf.WriteInt32(totalDaysSinceEra - 730119); - } - - DateTime INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => _bclHandler.Read(buf, len, fieldDescription); - - int INpgsqlSimpleTypeHandler.ValidateAndGetLength(DateTime value, NpgsqlParameter? parameter) - => _bclHandler.ValidateAndGetLength(value, parameter); - - void INpgsqlSimpleTypeHandler.Write(DateTime value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => _bclHandler.Write(value, buf, parameter); - - int INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => _bclHandler.Read(buf, len, fieldDescription); - - int INpgsqlSimpleTypeHandler.ValidateAndGetLength(int value, NpgsqlParameter? parameter) - => _bclHandler.ValidateAndGetLength(value, parameter); - - void INpgsqlSimpleTypeHandler.Write(int value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => _bclHandler.Write(value, buf, parameter); - -#if NET6_0_OR_GREATER - DateOnly INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => _bclHandler.Read(buf, len, fieldDescription); - - public int ValidateAndGetLength(DateOnly value, NpgsqlParameter? parameter) - => _bclHandler.ValidateAndGetLength(value, parameter); - - public void Write(DateOnly value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => _bclHandler.Write(value, buf, parameter); -#endif - - public override NpgsqlTypeHandler CreateRangeHandler(PostgresType pgRangeType) - => new DateRangeHandler(pgRangeType, this); -} \ No newline at end of file diff --git a/src/Npgsql.NodaTime/Internal/DateIntervalConverter.cs b/src/Npgsql.NodaTime/Internal/DateIntervalConverter.cs new file mode 100644 index 0000000000..5e25d8bfcc --- /dev/null +++ b/src/Npgsql.NodaTime/Internal/DateIntervalConverter.cs @@ -0,0 +1,49 @@ +using System.Threading; +using System.Threading.Tasks; +using NodaTime; +using Npgsql.Internal; +using NpgsqlTypes; + +namespace Npgsql.NodaTime.Internal; + +public class DateIntervalConverter : PgStreamingConverter +{ + readonly bool _dateTimeInfinityConversions; + readonly PgConverter> _rangeConverter; + + public DateIntervalConverter(PgConverter> rangeConverter, bool dateTimeInfinityConversions) + { + _rangeConverter = rangeConverter; + _dateTimeInfinityConversions = dateTimeInfinityConversions; + } + + public override DateInterval Read(PgReader reader) + => Read(async: false, reader, CancellationToken.None).GetAwaiter().GetResult(); + + public override ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) + => Read(async: true, reader, cancellationToken); + + async ValueTask Read(bool async, PgReader reader, CancellationToken cancellationToken) + { + var range = async + ? await _rangeConverter.ReadAsync(reader, cancellationToken).ConfigureAwait(false) + // ReSharper disable once MethodHasAsyncOverloadWithCancellation + : _rangeConverter.Read(reader); + + var upperBound = range.UpperBound; + + if (upperBound != LocalDate.MaxIsoValue || !_dateTimeInfinityConversions) + upperBound -= Period.FromDays(1); + + return new(range.LowerBound, upperBound); + } + + public override Size GetSize(SizeContext context, DateInterval value, ref object? writeState) + => _rangeConverter.GetSize(context, new NpgsqlRange(value.Start, value.End), ref writeState); + + public override void Write(PgWriter writer, DateInterval value) + => _rangeConverter.Write(writer, new NpgsqlRange(value.Start, value.End)); + + public override ValueTask WriteAsync(PgWriter writer, DateInterval value, CancellationToken cancellationToken = default) + => _rangeConverter.WriteAsync(writer, new NpgsqlRange(value.Start, value.End), cancellationToken); +} diff --git a/src/Npgsql.NodaTime/Internal/DateMultirangeHandler.cs b/src/Npgsql.NodaTime/Internal/DateMultirangeHandler.cs deleted file mode 100644 index 167b8eb310..0000000000 --- a/src/Npgsql.NodaTime/Internal/DateMultirangeHandler.cs +++ /dev/null @@ -1,120 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Threading; -using System.Threading.Tasks; -using NodaTime; -using Npgsql.BackendMessages; -using Npgsql.Internal; -using Npgsql.Internal.TypeHandlers; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using NpgsqlTypes; - -namespace Npgsql.NodaTime.Internal; - -public partial class DateMultirangeHandler : MultirangeHandler, - INpgsqlTypeHandler, INpgsqlTypeHandler> -{ - readonly INpgsqlTypeHandler _dateIntervalHandler; - - public DateMultirangeHandler(PostgresMultirangeType multirangePostgresType, DateRangeHandler rangeHandler) - : base(multirangePostgresType, rangeHandler) - => _dateIntervalHandler = rangeHandler; - - public override Type GetFieldType(FieldDescription? fieldDescription = null) => typeof(DateInterval[]); - - public override async ValueTask ReadAsObject(NpgsqlReadBuffer buf, int len, bool async, - FieldDescription? fieldDescription = null) - => (await Read(buf, len, async, fieldDescription))!; - - async ValueTask INpgsqlTypeHandler.Read( - NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - { - await buf.Ensure(4, async); - var numRanges = buf.ReadInt32(); - var multirange = new DateInterval[numRanges]; - - for (var i = 0; i < multirange.Length; i++) - { - await buf.Ensure(4, async); - var rangeLen = buf.ReadInt32(); - multirange[i] = await _dateIntervalHandler.Read(buf, rangeLen, async, fieldDescription); - } - - return multirange; - } - - async ValueTask> INpgsqlTypeHandler>.Read( - NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - { - await buf.Ensure(4, async); - var numRanges = buf.ReadInt32(); - var multirange = new List(numRanges); - - for (var i = 0; i < numRanges; i++) - { - await buf.Ensure(4, async); - var rangeLen = buf.ReadInt32(); - multirange.Add(await _dateIntervalHandler.Read(buf, rangeLen, async, fieldDescription)); - } - - return multirange; - } - - public int ValidateAndGetLength(DateInterval[] value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLengthCore(value, ref lengthCache); - - public int ValidateAndGetLength(List value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLengthCore(value, ref lengthCache); - - int ValidateAndGetLengthCore(IList value, ref NpgsqlLengthCache? lengthCache) - { - lengthCache ??= new NpgsqlLengthCache(1); - if (lengthCache.IsPopulated) - return lengthCache.Get(); - - var sum = 4 + 4 * value.Count; - for (var i = 0; i < value.Count; i++) - sum += _dateIntervalHandler.ValidateAndGetLength(value[i], ref lengthCache, parameter: null); - - return lengthCache!.Set(sum); - } - - public async Task Write( - DateInterval[] value, - NpgsqlWriteBuffer buf, - NpgsqlLengthCache? lengthCache, - NpgsqlParameter? parameter, - bool async, - CancellationToken cancellationToken = default) - { - if (buf.WriteSpaceLeft < 4) - await buf.Flush(async, cancellationToken); - - buf.WriteInt32(value.Length); - - for (var i = 0; i < value.Length; i++) - await RangeHandler.WriteWithLength(value[i], buf, lengthCache, parameter: null, async, cancellationToken); - } - - public async Task Write( - List value, - NpgsqlWriteBuffer buf, - NpgsqlLengthCache? lengthCache, - NpgsqlParameter? parameter, - bool async, - CancellationToken cancellationToken = default) - { - if (buf.WriteSpaceLeft < 4) - await buf.Flush(async, cancellationToken); - - buf.WriteInt32(value.Count); - - for (var i = 0; i < value.Count; i++) - { - var interval = value[i]; - await RangeHandler.WriteWithLength( - new NpgsqlRange(interval.Start, interval.End), buf, lengthCache, parameter: null, async, cancellationToken); - } - } -} \ No newline at end of file diff --git a/src/Npgsql.NodaTime/Internal/DateRangeHandler.cs b/src/Npgsql.NodaTime/Internal/DateRangeHandler.cs deleted file mode 100644 index 601a0cfb45..0000000000 --- a/src/Npgsql.NodaTime/Internal/DateRangeHandler.cs +++ /dev/null @@ -1,69 +0,0 @@ -using System; -using System.Threading; -using System.Threading.Tasks; -using NodaTime; -using Npgsql.BackendMessages; -using Npgsql.Internal; -using Npgsql.Internal.TypeHandlers; -using Npgsql.Internal.TypeHandling; -using Npgsql.NodaTime.Properties; -using Npgsql.PostgresTypes; -using NpgsqlTypes; -using static Npgsql.NodaTime.Internal.NodaTimeUtils; - -namespace Npgsql.NodaTime.Internal; - -public partial class DateRangeHandler : RangeHandler, INpgsqlTypeHandler -#if NET6_0_OR_GREATER - , INpgsqlTypeHandler> -#endif -{ - public DateRangeHandler(PostgresType rangePostgresType, NpgsqlTypeHandler subtypeHandler) - : base(rangePostgresType, subtypeHandler) - { - } - - public override Type GetFieldType(FieldDescription? fieldDescription = null) => typeof(DateInterval); - - public override async ValueTask ReadAsObject(NpgsqlReadBuffer buf, int len, bool async, - FieldDescription? fieldDescription = null) - => (await Read(buf, len, async, fieldDescription))!; - - async ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - { - var range = await Read(buf, len, async, fieldDescription); - - var upperBound = range.UpperBound; - - if (DisableDateTimeInfinityConversions || upperBound != LocalDate.MaxIsoValue) - upperBound -= Period.FromDays(1); - - return new(range.LowerBound, upperBound); - } - - public int ValidateAndGetLength(DateInterval value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLengthRange(new NpgsqlRange(value.Start, value.End), ref lengthCache, parameter); - - public Task Write( - DateInterval value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, - CancellationToken cancellationToken = default) - => WriteRange(new NpgsqlRange(value.Start, value.End), buf, lengthCache, parameter, async, cancellationToken); - -#if NET6_0_OR_GREATER - ValueTask> INpgsqlTypeHandler>.Read( - NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => ReadRange(buf, len, async, fieldDescription); - - public int ValidateAndGetLength(NpgsqlRange value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLengthRange(value, ref lengthCache, parameter); - - public Task Write( - NpgsqlRange value, - NpgsqlWriteBuffer buf, - NpgsqlLengthCache? lengthCache, - NpgsqlParameter? parameter, - bool async, - CancellationToken cancellationToken = default) - => WriteRange(value, buf, lengthCache, parameter, async, cancellationToken); -#endif -} \ No newline at end of file diff --git a/src/Npgsql.NodaTime/Internal/DurationConverter.cs b/src/Npgsql.NodaTime/Internal/DurationConverter.cs new file mode 100644 index 0000000000..940ef29464 --- /dev/null +++ b/src/Npgsql.NodaTime/Internal/DurationConverter.cs @@ -0,0 +1,42 @@ +using System; +using NodaTime; +using Npgsql.Internal; +using Npgsql.NodaTime.Properties; + +namespace Npgsql.NodaTime.Internal; + +sealed class DurationConverter : PgBufferedConverter +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(long) + sizeof(int) + sizeof(int)); + return format is DataFormat.Binary; + } + + protected override Duration ReadCore(PgReader reader) + { + var microsecondsInDay = reader.ReadInt64(); + var days = reader.ReadInt32(); + var totalMonths = reader.ReadInt32(); + + if (totalMonths != 0) + throw new InvalidCastException(NpgsqlNodaTimeStrings.CannotReadIntervalWithMonthsAsDuration); + + return Duration.FromDays(days) + Duration.FromNanoseconds(microsecondsInDay * 1000); + } + + protected override void WriteCore(PgWriter writer, Duration value) + { + const long microsecondsPerSecond = 1_000_000; + + // Note that the end result must be long + // see #3438 + var microsecondsInDay = + (((value.Hours * NodaConstants.MinutesPerHour + value.Minutes) * NodaConstants.SecondsPerMinute + value.Seconds) * + microsecondsPerSecond + value.SubsecondNanoseconds / 1000); // Take the microseconds, discard the nanosecond remainder + + writer.WriteInt64(microsecondsInDay); + writer.WriteInt32(value.Days); // days + writer.WriteInt32(0); // months + } +} diff --git a/src/Npgsql.NodaTime/Internal/IntervalConverter.cs b/src/Npgsql.NodaTime/Internal/IntervalConverter.cs new file mode 100644 index 0000000000..3ca9ca9ab0 --- /dev/null +++ b/src/Npgsql.NodaTime/Internal/IntervalConverter.cs @@ -0,0 +1,57 @@ +using System.Threading; +using System.Threading.Tasks; +using NodaTime; +using Npgsql.Internal; +using NpgsqlTypes; + +namespace Npgsql.NodaTime.Internal; + +public class IntervalConverter : PgStreamingConverter +{ + readonly PgConverter> _rangeConverter; + + public IntervalConverter(PgConverter> rangeConverter) + => _rangeConverter = rangeConverter; + + public override Interval Read(PgReader reader) + => Read(async: false, reader, CancellationToken.None).GetAwaiter().GetResult(); + + public override ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) + => Read(async: true, reader, cancellationToken); + + async ValueTask Read(bool async, PgReader reader, CancellationToken cancellationToken) + { + var range = async + ? await _rangeConverter.ReadAsync(reader, cancellationToken).ConfigureAwait(false) + // ReSharper disable once MethodHasAsyncOverloadWithCancellation + : _rangeConverter.Read(reader); + + // NodaTime Interval includes the start instant and excludes the end instant. + Instant? start = range.LowerBoundInfinite + ? null + : range.LowerBoundIsInclusive + ? range.LowerBound + : range.LowerBound + Duration.Epsilon; + Instant? end = range.UpperBoundInfinite + ? null + : range.UpperBoundIsInclusive + ? range.UpperBound + Duration.Epsilon + : range.UpperBound; + + return new(start, end); + } + + public override Size GetSize(SizeContext context, Interval value, ref object? writeState) + => _rangeConverter.GetSize(context, IntervalToNpgsqlRange(value), ref writeState); + + public override void Write(PgWriter writer, Interval value) + => _rangeConverter.Write(writer, IntervalToNpgsqlRange(value)); + + public override ValueTask WriteAsync(PgWriter writer, Interval value, CancellationToken cancellationToken = default) + => _rangeConverter.WriteAsync(writer, IntervalToNpgsqlRange(value), cancellationToken); + + static NpgsqlRange IntervalToNpgsqlRange(Interval interval) + => new( + interval.HasStart ? interval.Start : default, true, !interval.HasStart, + interval.HasEnd ? interval.End : default, false, !interval.HasEnd); +} diff --git a/src/Npgsql.NodaTime/Internal/IntervalHandler.cs b/src/Npgsql.NodaTime/Internal/IntervalHandler.cs deleted file mode 100644 index 4e9305a20b..0000000000 --- a/src/Npgsql.NodaTime/Internal/IntervalHandler.cs +++ /dev/null @@ -1,106 +0,0 @@ -using System; -using NodaTime; -using Npgsql.BackendMessages; -using Npgsql.Internal; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using NpgsqlTypes; -using BclIntervalHandler = Npgsql.Internal.TypeHandlers.DateTimeHandlers.IntervalHandler; - -namespace Npgsql.NodaTime.Internal; - -sealed partial class IntervalHandler : - NpgsqlSimpleTypeHandler, - INpgsqlSimpleTypeHandler, - INpgsqlSimpleTypeHandler, - INpgsqlSimpleTypeHandler -{ - readonly BclIntervalHandler _bclHandler; - - internal IntervalHandler(PostgresType postgresType) - : base(postgresType) - => _bclHandler = new BclIntervalHandler(postgresType); - - public override Period Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - { - var microsecondsInDay = buf.ReadInt64(); - var days = buf.ReadInt32(); - var totalMonths = buf.ReadInt32(); - - // NodaTime will normalize most things (i.e. nanoseconds to milliseconds, seconds...) - // but it will not normalize months to years. - var months = totalMonths % 12; - var years = totalMonths / 12; - - return new PeriodBuilder - { - Nanoseconds = microsecondsInDay * 1000, - Days = days, - Months = months, - Years = years - }.Build().Normalize(); - } - - public override int ValidateAndGetLength(Period value, NpgsqlParameter? parameter) - => 16; - - public override void Write(Period value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - { - // Note that the end result must be long - // see #3438 - var microsecondsInDay = - (((value.Hours * NodaConstants.MinutesPerHour + value.Minutes) * NodaConstants.SecondsPerMinute + value.Seconds) * NodaConstants.MillisecondsPerSecond + value.Milliseconds) * 1000 + - value.Nanoseconds / 1000; // Take the microseconds, discard the nanosecond remainder - - buf.WriteInt64(microsecondsInDay); - buf.WriteInt32(value.Weeks * 7 + value.Days); // days - buf.WriteInt32(value.Years * 12 + value.Months); // months - } - - Duration INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - { - var microsecondsInDay = buf.ReadInt64(); - var days = buf.ReadInt32(); - var totalMonths = buf.ReadInt32(); - - if (totalMonths != 0) - throw new NpgsqlException("Cannot read PostgreSQL interval with non-zero months to NodaTime Duration. Try reading as a NodaTime Period instead."); - - return Duration.FromDays(days) + Duration.FromNanoseconds(microsecondsInDay * 1000); - } - - public int ValidateAndGetLength(Duration value, NpgsqlParameter? parameter) => 16; - - public void Write(Duration value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - { - const long microsecondsPerSecond = 1_000_000; - - // Note that the end result must be long - // see #3438 - var microsecondsInDay = - (((value.Hours * NodaConstants.MinutesPerHour + value.Minutes) * NodaConstants.SecondsPerMinute + value.Seconds) * - microsecondsPerSecond + value.SubsecondNanoseconds / 1000); // Take the microseconds, discard the nanosecond remainder - - buf.WriteInt64(microsecondsInDay); - buf.WriteInt32(value.Days); // days - buf.WriteInt32(0); // months - } - - TimeSpan INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => _bclHandler.Read(buf, len, fieldDescription); - - int INpgsqlSimpleTypeHandler.ValidateAndGetLength(TimeSpan value, NpgsqlParameter? parameter) - => ((INpgsqlSimpleTypeHandler)_bclHandler).ValidateAndGetLength(value, parameter); - - void INpgsqlSimpleTypeHandler.Write(TimeSpan value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => ((INpgsqlSimpleTypeHandler)_bclHandler).Write(value, buf, parameter); - - NpgsqlInterval INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => _bclHandler.Read(buf, len, fieldDescription); - - int INpgsqlSimpleTypeHandler.ValidateAndGetLength(NpgsqlInterval value, NpgsqlParameter? parameter) - => ((INpgsqlSimpleTypeHandler)_bclHandler).ValidateAndGetLength(value, parameter); - - void INpgsqlSimpleTypeHandler.Write(NpgsqlInterval value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => ((INpgsqlSimpleTypeHandler)_bclHandler).Write(value, buf, parameter); -} \ No newline at end of file diff --git a/src/Npgsql.NodaTime/Internal/LegacyConverters.cs b/src/Npgsql.NodaTime/Internal/LegacyConverters.cs new file mode 100644 index 0000000000..54393a4821 --- /dev/null +++ b/src/Npgsql.NodaTime/Internal/LegacyConverters.cs @@ -0,0 +1,78 @@ +using System; +using NodaTime; +using Npgsql.Internal; +using static Npgsql.NodaTime.Internal.NodaTimeUtils; + +namespace Npgsql.NodaTime.Internal; + +sealed class LegacyTimestampTzZonedDateTimeConverter : PgBufferedConverter +{ + readonly DateTimeZone _dateTimeZone; + readonly bool _dateTimeInfinityConversions; + + public LegacyTimestampTzZonedDateTimeConverter(DateTimeZone dateTimeZone, bool dateTimeInfinityConversions) + { + _dateTimeZone = dateTimeZone; + _dateTimeInfinityConversions = dateTimeInfinityConversions; + } + + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(long)); + return format is DataFormat.Binary; + } + + protected override ZonedDateTime ReadCore(PgReader reader) + { + var instant = DecodeInstant(reader.ReadInt64(), _dateTimeInfinityConversions); + if (_dateTimeInfinityConversions && (instant == Instant.MaxValue || instant == Instant.MinValue)) + throw new InvalidCastException("Infinity values not supported for timestamp with time zone"); + + return instant.InZone(_dateTimeZone); + } + + protected override void WriteCore(PgWriter writer, ZonedDateTime value) + { + var instant = value.ToInstant(); + if (_dateTimeInfinityConversions && (instant == Instant.MaxValue || instant == Instant.MinValue)) + throw new ArgumentException("Infinity values not supported for timestamp with time zone"); + + writer.WriteInt64(EncodeInstant(instant, _dateTimeInfinityConversions)); + } +} + +sealed class LegacyTimestampTzOffsetDateTimeConverter : PgBufferedConverter +{ + readonly bool _dateTimeInfinityConversions; + readonly DateTimeZone _dateTimeZone; + + public LegacyTimestampTzOffsetDateTimeConverter(DateTimeZone dateTimeZone, bool dateTimeInfinityConversions) + { + _dateTimeInfinityConversions = dateTimeInfinityConversions; + _dateTimeZone = dateTimeZone; + } + + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(long)); + return format is DataFormat.Binary; + } + + protected override OffsetDateTime ReadCore(PgReader reader) + { + var instant = DecodeInstant(reader.ReadInt64(), _dateTimeInfinityConversions); + if (_dateTimeInfinityConversions && (instant == Instant.MaxValue || instant == Instant.MinValue)) + throw new InvalidCastException("Infinity values not supported for timestamp with time zone"); + + return instant.InZone(_dateTimeZone).ToOffsetDateTime(); + } + + protected override void WriteCore(PgWriter writer, OffsetDateTime value) + { + var instant = value.ToInstant(); + if (_dateTimeInfinityConversions && (instant == Instant.MaxValue || instant == Instant.MinValue)) + throw new ArgumentException("Infinity values not supported for timestamp with time zone"); + + writer.WriteInt64(EncodeInstant(instant, true)); + } +} diff --git a/src/Npgsql.NodaTime/Internal/LegacyTimestampHandler.cs b/src/Npgsql.NodaTime/Internal/LegacyTimestampHandler.cs deleted file mode 100644 index ee2ba1a130..0000000000 --- a/src/Npgsql.NodaTime/Internal/LegacyTimestampHandler.cs +++ /dev/null @@ -1,64 +0,0 @@ -using System; -using System.Diagnostics; -using NodaTime; -using Npgsql.BackendMessages; -using Npgsql.Internal; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using BclTimestampHandler = Npgsql.Internal.TypeHandlers.DateTimeHandlers.TimestampHandler; - -namespace Npgsql.NodaTime.Internal; - -sealed partial class LegacyTimestampHandler : NpgsqlSimpleTypeHandler, - INpgsqlSimpleTypeHandler, INpgsqlSimpleTypeHandler, INpgsqlSimpleTypeHandler -{ - readonly BclTimestampHandler _bclHandler; - - internal LegacyTimestampHandler(PostgresType postgresType) - : base(postgresType) - => _bclHandler = new BclTimestampHandler(postgresType); - - #region Read - - public override Instant Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - => TimestampTzHandler.ReadInstant(buf); - - LocalDateTime INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => TimestampHandler.ReadLocalDateTime(buf); - - DateTime INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => _bclHandler.Read(buf, len, fieldDescription); - - long INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => ((INpgsqlSimpleTypeHandler)_bclHandler).Read(buf, len, fieldDescription); - - #endregion Read - - #region Write - - public override int ValidateAndGetLength(Instant value, NpgsqlParameter? parameter) - => 8; - - int INpgsqlSimpleTypeHandler.ValidateAndGetLength(LocalDateTime value, NpgsqlParameter? parameter) - => 8; - - public override void Write(Instant value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => TimestampTzHandler.WriteInstant(value, buf); - - void INpgsqlSimpleTypeHandler.Write(LocalDateTime value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => TimestampHandler.WriteLocalDateTime(value, buf); - - int INpgsqlSimpleTypeHandler.ValidateAndGetLength(DateTime value, NpgsqlParameter? parameter) - => ((INpgsqlSimpleTypeHandler)_bclHandler).ValidateAndGetLength(value, parameter); - - public int ValidateAndGetLength(long value, NpgsqlParameter? parameter) - => ((INpgsqlSimpleTypeHandler)_bclHandler).ValidateAndGetLength(value, parameter); - - void INpgsqlSimpleTypeHandler.Write(DateTime value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => ((INpgsqlSimpleTypeHandler)_bclHandler).Write(value, buf, parameter); - - void INpgsqlSimpleTypeHandler.Write(long value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => ((INpgsqlSimpleTypeHandler)_bclHandler).Write(value, buf, parameter); - - #endregion Write -} \ No newline at end of file diff --git a/src/Npgsql.NodaTime/Internal/LegacyTimestampTzHandler.cs b/src/Npgsql.NodaTime/Internal/LegacyTimestampTzHandler.cs deleted file mode 100644 index c299193343..0000000000 --- a/src/Npgsql.NodaTime/Internal/LegacyTimestampTzHandler.cs +++ /dev/null @@ -1,121 +0,0 @@ -using System; -using NodaTime; -using NodaTime.TimeZones; -using Npgsql.BackendMessages; -using Npgsql.Internal; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using BclTimestampTzHandler = Npgsql.Internal.TypeHandlers.DateTimeHandlers.TimestampTzHandler; -using static Npgsql.NodaTime.Internal.NodaTimeUtils; - -namespace Npgsql.NodaTime.Internal; - -sealed partial class LegacyTimestampTzHandler : NpgsqlSimpleTypeHandler, INpgsqlSimpleTypeHandler, - INpgsqlSimpleTypeHandler, INpgsqlSimpleTypeHandler, - INpgsqlSimpleTypeHandler, INpgsqlSimpleTypeHandler -{ - readonly IDateTimeZoneProvider _dateTimeZoneProvider; - readonly TimestampTzHandler _wrappedHandler; - - public LegacyTimestampTzHandler(PostgresType postgresType) - : base(postgresType) - { - _dateTimeZoneProvider = DateTimeZoneProviders.Tzdb; - _wrappedHandler = new TimestampTzHandler(postgresType); - } - - #region Read - - public override Instant Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - => _wrappedHandler.Read(buf, len, fieldDescription); - - ZonedDateTime INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - { - try - { - var instant = Read(buf, len, fieldDescription); - - if (!DisableDateTimeInfinityConversions && (instant == Instant.MaxValue || instant == Instant.MinValue)) - throw new InvalidCastException("Infinity values not supported for timestamp with time zone"); - - return instant.InZone(_dateTimeZoneProvider[buf.Connection.Timezone]); - } - catch (Exception e) when ( - string.Equals(buf.Connection.Timezone, "localtime", StringComparison.OrdinalIgnoreCase) && - (e is TimeZoneNotFoundException || e is DateTimeZoneNotFoundException)) - { - throw new TimeZoneNotFoundException( - "The special PostgreSQL timezone 'localtime' is not supported when reading values of type 'timestamp with time zone'. " + - "Please specify a real timezone in 'postgresql.conf' on the server, or set the 'PGTZ' environment variable on the client.", - e); - } - } - - OffsetDateTime INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => ((INpgsqlSimpleTypeHandler)this).Read(buf, len, fieldDescription).ToOffsetDateTime(); - - DateTimeOffset INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => _wrappedHandler.Read(buf, len, fieldDescription); - - DateTime INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => _wrappedHandler.Read(buf, len, fieldDescription); - - long INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => _wrappedHandler.Read(buf, len, fieldDescription); - - #endregion Read - - #region Write - - public override int ValidateAndGetLength(Instant value, NpgsqlParameter? parameter) - => 8; - - int INpgsqlSimpleTypeHandler.ValidateAndGetLength(ZonedDateTime value, NpgsqlParameter? parameter) - => 8; - - public int ValidateAndGetLength(OffsetDateTime value, NpgsqlParameter? parameter) - => 8; - - public override void Write(Instant value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => _wrappedHandler.Write(value, buf, parameter); - - void INpgsqlSimpleTypeHandler.Write(ZonedDateTime value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - { - var instant = value.ToInstant(); - - if (!DisableDateTimeInfinityConversions && (instant == Instant.MaxValue || instant == Instant.MinValue)) - throw new InvalidCastException("Infinity values not supported for timestamp with time zone"); - - _wrappedHandler.Write(instant, buf, parameter); - } - - public void Write(OffsetDateTime value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - { - var instant = value.ToInstant(); - - if (!DisableDateTimeInfinityConversions && (instant == Instant.MaxValue || instant == Instant.MinValue)) - throw new InvalidCastException("Infinity values not supported for timestamp with time zone"); - - _wrappedHandler.Write(instant, buf, parameter); - } - - int INpgsqlSimpleTypeHandler.ValidateAndGetLength(DateTimeOffset value, NpgsqlParameter? parameter) - => ((INpgsqlSimpleTypeHandler)_wrappedHandler).ValidateAndGetLength(value, parameter); - - void INpgsqlSimpleTypeHandler.Write(DateTimeOffset value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => ((INpgsqlSimpleTypeHandler)_wrappedHandler).Write(value, buf, parameter); - - int INpgsqlSimpleTypeHandler.ValidateAndGetLength(DateTime value, NpgsqlParameter? parameter) - => ((INpgsqlSimpleTypeHandler)_wrappedHandler).ValidateAndGetLength(value, parameter); - - void INpgsqlSimpleTypeHandler.Write(DateTime value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => ((INpgsqlSimpleTypeHandler)_wrappedHandler).Write(value, buf, parameter); - - int INpgsqlSimpleTypeHandler.ValidateAndGetLength(long value, NpgsqlParameter? parameter) - => ((INpgsqlSimpleTypeHandler)_wrappedHandler).ValidateAndGetLength(value, parameter); - - void INpgsqlSimpleTypeHandler.Write(long value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => ((INpgsqlSimpleTypeHandler)_wrappedHandler).Write(value, buf, parameter); - - #endregion Write -} \ No newline at end of file diff --git a/src/Npgsql.NodaTime/Internal/LocalDateConverter.cs b/src/Npgsql.NodaTime/Internal/LocalDateConverter.cs new file mode 100644 index 0000000000..e6be7fe69b --- /dev/null +++ b/src/Npgsql.NodaTime/Internal/LocalDateConverter.cs @@ -0,0 +1,52 @@ +using System; +using NodaTime; +using Npgsql.Internal; +using Npgsql.NodaTime.Properties; + +namespace Npgsql.NodaTime.Internal; + +sealed class LocalDateConverter : PgBufferedConverter +{ + readonly bool _dateTimeInfinityConversions; + + public LocalDateConverter(bool dateTimeInfinityConversions) + => _dateTimeInfinityConversions = dateTimeInfinityConversions; + + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(int)); + return format is DataFormat.Binary; + } + + protected override LocalDate ReadCore(PgReader reader) + => reader.ReadInt32() switch + { + int.MaxValue => _dateTimeInfinityConversions + ? LocalDate.MaxIsoValue + : throw new InvalidCastException(NpgsqlNodaTimeStrings.CannotReadInfinityValue), + int.MinValue => _dateTimeInfinityConversions + ? LocalDate.MinIsoValue + : throw new InvalidCastException(NpgsqlNodaTimeStrings.CannotReadInfinityValue), + var value => new LocalDate().PlusDays(value + 730119) + }; + + protected override void WriteCore(PgWriter writer, LocalDate value) + { + if (_dateTimeInfinityConversions) + { + if (value == LocalDate.MaxIsoValue) + { + writer.WriteInt32(int.MaxValue); + return; + } + if (value == LocalDate.MinIsoValue) + { + writer.WriteInt32(int.MinValue); + return; + } + } + + var totalDaysSinceEra = Period.Between(default, value, PeriodUnits.Days).Days; + writer.WriteInt32(totalDaysSinceEra - 730119); + } +} diff --git a/src/Npgsql.NodaTime/Internal/LocalTimeConverter.cs b/src/Npgsql.NodaTime/Internal/LocalTimeConverter.cs new file mode 100644 index 0000000000..5849f45dfc --- /dev/null +++ b/src/Npgsql.NodaTime/Internal/LocalTimeConverter.cs @@ -0,0 +1,20 @@ +using NodaTime; +using Npgsql.Internal; + +namespace Npgsql.NodaTime.Internal; + +sealed class LocalTimeConverter : PgBufferedConverter +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(long)); + return format is DataFormat.Binary; + } + + // PostgreSQL time resolution == 1 microsecond == 10 ticks + protected override LocalTime ReadCore(PgReader reader) + => LocalTime.FromTicksSinceMidnight(reader.ReadInt64() * 10); + + protected override void WriteCore(PgWriter writer, LocalTime value) + => writer.WriteInt64(value.TickOfDay / 10); +} diff --git a/src/Npgsql.NodaTime/Internal/NodaTimeTypeHandlerResolver.cs b/src/Npgsql.NodaTime/Internal/NodaTimeTypeHandlerResolver.cs deleted file mode 100644 index c0b1cc60c6..0000000000 --- a/src/Npgsql.NodaTime/Internal/NodaTimeTypeHandlerResolver.cs +++ /dev/null @@ -1,155 +0,0 @@ -using System; -using NodaTime; -using Npgsql.Internal; -using Npgsql.Internal.TypeHandlers; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using NpgsqlTypes; -using static Npgsql.NodaTime.Internal.NodaTimeUtils; - -namespace Npgsql.NodaTime.Internal; - -public class NodaTimeTypeHandlerResolver : TypeHandlerResolver -{ - readonly NpgsqlDatabaseInfo _databaseInfo; - - readonly NpgsqlTypeHandler _timestampHandler; - readonly NpgsqlTypeHandler _timestampTzHandler; - readonly DateHandler _dateHandler; - readonly TimeHandler _timeHandler; - readonly TimeTzHandler _timeTzHandler; - readonly IntervalHandler _intervalHandler; - - TimestampTzRangeHandler? _timestampTzRangeHandler; - DateRangeHandler? _dateRangeHandler; - DateMultirangeHandler? _dateMultirangeHandler; - TimestampTzMultirangeHandler? _timestampTzMultirangeHandler; - - NpgsqlTypeHandler? _timestampTzRangeArray; - NpgsqlTypeHandler? _dateRangeArray; - - readonly ArrayNullabilityMode _arrayNullabilityMode; - - internal NodaTimeTypeHandlerResolver(NpgsqlConnector connector) - { - _databaseInfo = connector.DatabaseInfo; - - _timestampHandler = LegacyTimestampBehavior - ? new LegacyTimestampHandler(PgType("timestamp without time zone")) - : new TimestampHandler(PgType("timestamp without time zone")); - _timestampTzHandler = LegacyTimestampBehavior - ? new LegacyTimestampTzHandler(PgType("timestamp with time zone")) - : new TimestampTzHandler(PgType("timestamp with time zone")); - _dateHandler = new DateHandler(PgType("date")); - _timeHandler = new TimeHandler(PgType("time without time zone")); - _timeTzHandler = new TimeTzHandler(PgType("time with time zone")); - _intervalHandler = new IntervalHandler(PgType("interval")); - - // Note that the range handlers are absent on some pseudo-PostgreSQL databases (e.g. CockroachDB), and multirange types - // were only introduced in PG14. So we resolve these lazily. - - _arrayNullabilityMode = connector.Settings.ArrayNullabilityMode; - } - - public override NpgsqlTypeHandler? ResolveByDataTypeName(string typeName) - => typeName switch - { - "timestamp" or "timestamp without time zone" => _timestampHandler, - "timestamptz" or "timestamp with time zone" => _timestampTzHandler, - "date" => _dateHandler, - "time without time zone" => _timeHandler, - "time with time zone" => _timeTzHandler, - "interval" => _intervalHandler, - - "tstzrange" => TsTzRange(), - "daterange" => DateRange(), - "tstzmultirange" => TsTzMultirange(), - "datemultirange" => DateMultirange(), - - "tstzrange[]" => TsTzRangeArray(), - "daterange[]" => DateRangeArray(), - - _ => null - }; - - public override NpgsqlTypeHandler? ResolveByClrType(Type type) - => NodaTimeTypeMappingResolver.ClrTypeToDataTypeName(type) is { } dataTypeName && ResolveByDataTypeName(dataTypeName) is { } handler - ? handler - : null; - - public override NpgsqlTypeHandler? ResolveByNpgsqlDbType(NpgsqlDbType npgsqlDbType) - => npgsqlDbType switch - { - NpgsqlDbType.TimestampTzRange => TsTzRange(), - NpgsqlDbType.DateRange => DateRange(), - NpgsqlDbType.TimestampTzMultirange => TsTzMultirange(), - NpgsqlDbType.DateMultirange => DateMultirange(), - NpgsqlDbType.TimestampTzRange | NpgsqlDbType.Array => TsTzRangeArray(), - NpgsqlDbType.DateRange | NpgsqlDbType.Array => TsTzRangeArray(), - _ => null - }; - - public override NpgsqlTypeHandler? ResolveValueTypeGenerically(T value) - { - // This method only ever gets called for value types, and relies on the JIT specializing the method for T by eliding all the - // type checks below. - - if (typeof(T) == typeof(Instant)) - return LegacyTimestampBehavior ? _timestampHandler : _timestampTzHandler; - - if (typeof(T) == typeof(LocalDateTime)) - return _timestampHandler; - if (typeof(T) == typeof(ZonedDateTime)) - return _timestampTzHandler; - if (typeof(T) == typeof(OffsetDateTime)) - return _timestampTzHandler; - if (typeof(T) == typeof(LocalDate)) - return _dateHandler; - if (typeof(T) == typeof(LocalTime)) - return _timeHandler; - if (typeof(T) == typeof(OffsetTime)) - return _timeTzHandler; - if (typeof(T) == typeof(Period)) - return _intervalHandler; - if (typeof(T) == typeof(Duration)) - return _intervalHandler; - - if (typeof(T) == typeof(Interval)) - return _timestampTzRangeHandler; - if (typeof(T) == typeof(NpgsqlRange)) - return _timestampTzRangeHandler; - if (typeof(T) == typeof(NpgsqlRange)) - return _timestampTzRangeHandler; - if (typeof(T) == typeof(NpgsqlRange)) - return _timestampTzRangeHandler; - - // Note that DateInterval is a reference type, so not included in this method - if (typeof(T) == typeof(NpgsqlRange)) - return _dateRangeHandler; - - return null; - } - - PostgresType PgType(string pgTypeName) => _databaseInfo.GetPostgresTypeByName(pgTypeName); - - TimestampTzRangeHandler TsTzRange() - => _timestampTzRangeHandler ??= new TimestampTzRangeHandler(PgType("tstzrange"), _timestampTzHandler); - - DateRangeHandler DateRange() - => _dateRangeHandler ??= new DateRangeHandler(PgType("daterange"), _dateHandler); - - NpgsqlTypeHandler TsTzMultirange() - => _timestampTzMultirangeHandler ??= - new TimestampTzMultirangeHandler((PostgresMultirangeType)PgType("tstzmultirange"), TsTzRange()); - - NpgsqlTypeHandler DateMultirange() - => _dateMultirangeHandler ??= new DateMultirangeHandler((PostgresMultirangeType)PgType("datemultirange"), DateRange()); - - NpgsqlTypeHandler TsTzRangeArray() - => _timestampTzRangeArray ??= - new ArrayHandler((PostgresArrayType)PgType("tstzrange[]"), TsTzRange(), _arrayNullabilityMode); - - NpgsqlTypeHandler DateRangeArray() - => _dateRangeArray ??= - new ArrayHandler((PostgresArrayType)PgType("daterange[]"), DateRange(), _arrayNullabilityMode); -} diff --git a/src/Npgsql.NodaTime/Internal/NodaTimeTypeHandlerResolverFactory.cs b/src/Npgsql.NodaTime/Internal/NodaTimeTypeHandlerResolverFactory.cs deleted file mode 100644 index d1034e7f5e..0000000000 --- a/src/Npgsql.NodaTime/Internal/NodaTimeTypeHandlerResolverFactory.cs +++ /dev/null @@ -1,15 +0,0 @@ -using Npgsql.Internal; -using Npgsql.Internal.TypeHandling; -using Npgsql.Internal.TypeMapping; - -namespace Npgsql.NodaTime.Internal; - -public class NodaTimeTypeHandlerResolverFactory : TypeHandlerResolverFactory -{ - public override TypeHandlerResolver Create(TypeMapper typeMapper, NpgsqlConnector connector) - => new NodaTimeTypeHandlerResolver(connector); - - public override TypeMappingResolver CreateMappingResolver() => new NodaTimeTypeMappingResolver(); - - public override TypeMappingResolver CreateGlobalMappingResolver() => new NodaTimeTypeMappingResolver(); -} diff --git a/src/Npgsql.NodaTime/Internal/NodaTimeTypeInfoResolver.cs b/src/Npgsql.NodaTime/Internal/NodaTimeTypeInfoResolver.cs new file mode 100644 index 0000000000..66dcfc35dc --- /dev/null +++ b/src/Npgsql.NodaTime/Internal/NodaTimeTypeInfoResolver.cs @@ -0,0 +1,265 @@ +using System; +using System.Collections.Generic; +using NodaTime; +using Npgsql.Internal; +using Npgsql.Internal.Postgres; +using NpgsqlTypes; +using static Npgsql.NodaTime.Internal.NodaTimeUtils; +using static Npgsql.Internal.PgConverterFactory; + +namespace Npgsql.NodaTime.Internal; + +sealed class NodaTimeTypeInfoResolver : IPgTypeInfoResolver +{ + static DataTypeName TimestampTzDataTypeName => new("pg_catalog.timestamptz"); + static DataTypeName TimestampDataTypeName => new("pg_catalog.timestamp"); + static DataTypeName DateDataTypeName => new("pg_catalog.date"); + static DataTypeName TimeDataTypeName => new("pg_catalog.time"); + static DataTypeName TimeTzDataTypeName => new("pg_catalog.timetz"); + static DataTypeName IntervalDataTypeName => new("pg_catalog.interval"); + + static DataTypeName DateRangeDataTypeName => new("pg_catalog.daterange"); + static DataTypeName DateMultirangeDataTypeName => new("pg_catalog.datemultirange"); + static DataTypeName TimestampTzRangeDataTypeName => new("pg_catalog.tstzrange"); + static DataTypeName TimestampTzMultirangeDataTypeName => new("pg_catalog.tstzmultirange"); + static DataTypeName TimestampRangeDataTypeName => new("pg_catalog.tsrange"); + static DataTypeName TimestampMultirangeDataTypeName => new("pg_catalog.tsmultirange"); + + TypeInfoMappingCollection Mappings { get; } + + public NodaTimeTypeInfoResolver() + { + Mappings = new TypeInfoMappingCollection(); + AddInfos(Mappings); + // TODO: Opt-in only + AddArrayInfos(Mappings); + } + + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); + + static void AddInfos(TypeInfoMappingCollection mappings) + { + // timestamp and timestamptz, legacy and non-legacy modes + if (LegacyTimestampBehavior) + { + // timestamptz + mappings.AddStructType(new DataTypeName("pg_catalog.timestamptz"), + static (options, mapping, _) => + mapping.CreateInfo(options, new InstantConverter(options.EnableDateTimeInfinityConversions)), isDefault: false); + mappings.AddStructType(new DataTypeName("pg_catalog.timestamptz"), + static (options, mapping, _) => + mapping.CreateInfo(options, new LegacyTimestampTzZonedDateTimeConverter( + DateTimeZoneProviders.Tzdb[options.TimeZone], options.EnableDateTimeInfinityConversions))); + mappings.AddStructType(new DataTypeName("pg_catalog.timestamptz"), + static (options, mapping, _) => + mapping.CreateInfo(options, new LegacyTimestampTzOffsetDateTimeConverter( + DateTimeZoneProviders.Tzdb[options.TimeZone], options.EnableDateTimeInfinityConversions))); + + // timestamp + mappings.AddStructType(TimestampDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, new InstantConverter(options.EnableDateTimeInfinityConversions)), + isDefault: true); + mappings.AddStructType(TimestampDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, new LocalDateTimeConverter(options.EnableDateTimeInfinityConversions)), + isDefault: false); + } + else + { + // timestamptz + mappings.AddStructType(TimestampTzDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, new InstantConverter(options.EnableDateTimeInfinityConversions)), isDefault: true); + mappings.AddStructType(new DataTypeName("pg_catalog.timestamptz"), + static (options, mapping, _) => + mapping.CreateInfo(options, new ZonedDateTimeConverter(options.EnableDateTimeInfinityConversions))); + mappings.AddStructType(new DataTypeName("pg_catalog.timestamptz"), + static (options, mapping, _) => + mapping.CreateInfo(options, new OffsetDateTimeConverter(options.EnableDateTimeInfinityConversions))); + + // timestamp + mappings.AddStructType(TimestampDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, new LocalDateTimeConverter(options.EnableDateTimeInfinityConversions)), isDefault: true); + } + + // date + mappings.AddStructType(DateDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, new LocalDateConverter(options.EnableDateTimeInfinityConversions)), isDefault: true); + + // time + mappings.AddStructType(TimeDataTypeName, + static (options, mapping, _) => mapping.CreateInfo(options, new LocalTimeConverter()), isDefault: true); + + // timetz + mappings.AddStructType(TimeTzDataTypeName, + static (options, mapping, _) => mapping.CreateInfo(options, new OffsetTimeConverter()), isDefault: true); + + // interval + mappings.AddType(IntervalDataTypeName, + static (options, mapping, _) => mapping.CreateInfo(options, new PeriodConverter()), isDefault: true); + mappings.AddStructType(IntervalDataTypeName, + static (options, mapping, _) => mapping.CreateInfo(options, new DurationConverter())); + + // tstzrange + mappings.AddStructType(TimestampTzRangeDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, new IntervalConverter(CreateRangeConverter(new InstantConverter(options.EnableDateTimeInfinityConversions), options))), isDefault: true); + mappings.AddStructType>(TimestampTzRangeDataTypeName, + static (options, mapping, _) => mapping.CreateInfo(options, + CreateRangeConverter(new InstantConverter(options.EnableDateTimeInfinityConversions), options))); + mappings.AddStructType>(TimestampTzRangeDataTypeName, + static (options, mapping, _) => mapping.CreateInfo(options, + CreateRangeConverter(new ZonedDateTimeConverter(options.EnableDateTimeInfinityConversions), options))); + mappings.AddStructType>(TimestampTzRangeDataTypeName, + static (options, mapping, _) => mapping.CreateInfo(options, + CreateRangeConverter(new OffsetDateTimeConverter(options.EnableDateTimeInfinityConversions), options))); + + // tstzmultirange + mappings.AddType(TimestampTzMultirangeDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, CreateArrayMultirangeConverter(new IntervalConverter( + CreateRangeConverter(new InstantConverter(options.EnableDateTimeInfinityConversions), options)), options)), + isDefault: true); + mappings.AddType>(TimestampTzMultirangeDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, CreateListMultirangeConverter(new IntervalConverter( + CreateRangeConverter(new InstantConverter(options.EnableDateTimeInfinityConversions), options)), options))); + mappings.AddType[]>(TimestampTzMultirangeDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, CreateArrayMultirangeConverter(CreateRangeConverter(new InstantConverter(options.EnableDateTimeInfinityConversions), options), options))); + mappings.AddType>>(TimestampTzMultirangeDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, CreateListMultirangeConverter(CreateRangeConverter(new InstantConverter(options.EnableDateTimeInfinityConversions), options), options))); + mappings.AddType[]>(TimestampTzMultirangeDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, CreateArrayMultirangeConverter(CreateRangeConverter(new ZonedDateTimeConverter(options.EnableDateTimeInfinityConversions), options), options))); + mappings.AddType>>(TimestampTzMultirangeDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, CreateListMultirangeConverter(CreateRangeConverter(new ZonedDateTimeConverter(options.EnableDateTimeInfinityConversions), options), options))); + mappings.AddType[]>(TimestampTzMultirangeDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, CreateArrayMultirangeConverter(CreateRangeConverter(new OffsetDateTimeConverter(options.EnableDateTimeInfinityConversions), options), options))); + mappings.AddType>>(TimestampTzMultirangeDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, CreateListMultirangeConverter(CreateRangeConverter(new OffsetDateTimeConverter(options.EnableDateTimeInfinityConversions), options), options))); + + // tsrange + mappings.AddStructType>(TimestampRangeDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, CreateRangeConverter(new LocalDateTimeConverter(options.EnableDateTimeInfinityConversions), options)), + isDefault: true); + + // tsmultirange + mappings.AddType[]>(TimestampMultirangeDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, CreateArrayMultirangeConverter(CreateRangeConverter(new LocalDateTimeConverter(options.EnableDateTimeInfinityConversions), options), options)), + isDefault: true); + mappings.AddType>>(TimestampMultirangeDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, CreateListMultirangeConverter(CreateRangeConverter(new LocalDateTimeConverter(options.EnableDateTimeInfinityConversions), options), options))); + + // daterange + mappings.AddType(DateRangeDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, new DateIntervalConverter( + CreateRangeConverter(new LocalDateConverter(options.EnableDateTimeInfinityConversions), options), + options.EnableDateTimeInfinityConversions)), isDefault: true); + mappings.AddStructType>(DateRangeDataTypeName, + static (options, mapping, _) => mapping.CreateInfo(options, + CreateRangeConverter(new LocalDateConverter(options.EnableDateTimeInfinityConversions), options))); + + // datemultirange + mappings.AddType(DateMultirangeDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, CreateArrayMultirangeConverter(new DateIntervalConverter( + CreateRangeConverter(new LocalDateConverter(options.EnableDateTimeInfinityConversions), options), + options.EnableDateTimeInfinityConversions), options)), + isDefault: true); + mappings.AddType>(DateMultirangeDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, CreateListMultirangeConverter(new DateIntervalConverter( + CreateRangeConverter(new LocalDateConverter(options.EnableDateTimeInfinityConversions), options), + options.EnableDateTimeInfinityConversions), options))); + mappings.AddType[]>(DateMultirangeDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, CreateArrayMultirangeConverter(CreateRangeConverter(new LocalDateConverter(options.EnableDateTimeInfinityConversions), options), options))); + mappings.AddType>>(DateMultirangeDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, CreateListMultirangeConverter(CreateRangeConverter(new LocalDateConverter(options.EnableDateTimeInfinityConversions), options), options))); + } + + static void AddArrayInfos(TypeInfoMappingCollection mappings) + { + // timestamptz + mappings.AddStructArrayType(TimestampTzDataTypeName); + mappings.AddStructArrayType(TimestampTzDataTypeName); + mappings.AddStructArrayType(TimestampTzDataTypeName); + + // timestamp + if (LegacyTimestampBehavior) + { + mappings.AddStructArrayType(TimestampDataTypeName); + + mappings.AddStructType(TimestampDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, new InstantConverter(options.EnableDateTimeInfinityConversions)), + isDefault: true); + mappings.AddStructType(TimestampDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, new LocalDateTimeConverter(options.EnableDateTimeInfinityConversions)), + isDefault: false); + } + else + { + mappings.AddStructType(TimestampDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, new LocalDateTimeConverter(options.EnableDateTimeInfinityConversions)), + isDefault: true); + } + mappings.AddStructArrayType(TimestampDataTypeName); + + // other + mappings.AddStructArrayType(DateDataTypeName); + mappings.AddStructArrayType(TimeDataTypeName); + mappings.AddStructArrayType(TimeTzDataTypeName); + mappings.AddArrayType(IntervalDataTypeName); + mappings.AddStructArrayType(IntervalDataTypeName); + + // tstzrange + mappings.AddStructArrayType(TimestampTzRangeDataTypeName); + mappings.AddStructArrayType>(TimestampTzRangeDataTypeName); + mappings.AddStructArrayType>(TimestampTzRangeDataTypeName); + mappings.AddStructArrayType>(TimestampTzRangeDataTypeName); + + // tstzmultirange + mappings.AddArrayType(TimestampTzMultirangeDataTypeName); + mappings.AddArrayType>(TimestampTzMultirangeDataTypeName); + mappings.AddArrayType[]>(TimestampTzMultirangeDataTypeName); + mappings.AddArrayType>>(TimestampTzMultirangeDataTypeName); + mappings.AddArrayType[]>(TimestampTzMultirangeDataTypeName); + mappings.AddArrayType>>(TimestampTzMultirangeDataTypeName); + mappings.AddArrayType[]>(TimestampTzMultirangeDataTypeName); + mappings.AddArrayType>>(TimestampTzMultirangeDataTypeName); + + // tsrange + mappings.AddStructArrayType>(TimestampRangeDataTypeName); + + // tsmultirange + mappings.AddArrayType[]>(TimestampMultirangeDataTypeName); + mappings.AddArrayType>>(TimestampMultirangeDataTypeName); + + // daterange + mappings.AddArrayType(DateRangeDataTypeName); + mappings.AddStructArrayType>(DateRangeDataTypeName); + + // datemultirange + mappings.AddArrayType(DateMultirangeDataTypeName); + mappings.AddArrayType>(DateMultirangeDataTypeName); + mappings.AddArrayType[]>(DateMultirangeDataTypeName); + mappings.AddArrayType>>(DateMultirangeDataTypeName); + } +} diff --git a/src/Npgsql.NodaTime/Internal/NodaTimeTypeMappingResolver.cs b/src/Npgsql.NodaTime/Internal/NodaTimeTypeMappingResolver.cs deleted file mode 100644 index dd5f271050..0000000000 --- a/src/Npgsql.NodaTime/Internal/NodaTimeTypeMappingResolver.cs +++ /dev/null @@ -1,99 +0,0 @@ -using System; -using System.Collections.Generic; -using NodaTime; -using Npgsql.Internal.TypeHandling; -using Npgsql.Internal.TypeMapping; -using Npgsql.PostgresTypes; -using NpgsqlTypes; -using static Npgsql.NodaTime.Internal.NodaTimeUtils; - -namespace Npgsql.NodaTime.Internal; - -public class NodaTimeTypeMappingResolver : TypeMappingResolver -{ - public override string? GetDataTypeNameByClrType(Type type) - => ClrTypeToDataTypeName(type); - - public override TypeMappingInfo? GetMappingByDataTypeName(string dataTypeName) - => DoGetMappingByDataTypeName(dataTypeName); - - static TypeMappingInfo? DoGetMappingByDataTypeName(string dataTypeName) - => dataTypeName switch - { - "timestamp" or "timestamp without time zone" => new(NpgsqlDbType.Timestamp, "timestamp without time zone"), - "timestamptz" or "timestamp with time zone" => new(NpgsqlDbType.TimestampTz, "timestamp with time zone"), - "date" => new(NpgsqlDbType.Date, "date"), - "time without time zone" => new(NpgsqlDbType.Time, "time without time zone"), - "time with time zone" => new(NpgsqlDbType.TimeTz, "time with time zone"), - "interval" => new(NpgsqlDbType.Interval, "interval"), - - "tsrange" => new(NpgsqlDbType.TimestampRange, "tsrange"), - "tstzrange" => new(NpgsqlDbType.TimestampTzRange, "tstzrange"), - "daterange" => new(NpgsqlDbType.DateRange, "daterange"), - - "tsmultirange" => new(NpgsqlDbType.TimestampMultirange, "tsmultirange"), - "tstzmultirange" => new(NpgsqlDbType.TimestampTzMultirange, "tstzmultirange"), - "datemultirange" => new(NpgsqlDbType.DateMultirange, "datemultirange"), - - _ => null - }; - - internal static string? ClrTypeToDataTypeName(Type type) - { - if (type == typeof(Instant)) - return LegacyTimestampBehavior ? "timestamp without time zone" : "timestamp with time zone"; - - if (type == typeof(LocalDateTime)) - return "timestamp without time zone"; - if (type == typeof(ZonedDateTime) || type == typeof(OffsetDateTime)) - return "timestamp with time zone"; - if (type == typeof(LocalDate)) - return "date"; - if (type == typeof(LocalTime)) - return "time without time zone"; - if (type == typeof(OffsetTime)) - return "time with time zone"; - if (type == typeof(Period) || type == typeof(Duration)) - return "interval"; - - // Ranges - if (type == typeof(NpgsqlRange)) - return "tsrange"; - - if (type == typeof(Interval) || - type == typeof(NpgsqlRange) || - type == typeof(NpgsqlRange) || - type == typeof(NpgsqlRange)) - { - return "tstzrange"; - } - - if (type == typeof(DateInterval) || type == typeof(NpgsqlRange)) - return "daterange"; - - // Multiranges - if (type == typeof(NpgsqlRange[]) || type == typeof(List>)) - return "tsmultirange"; - - if (type == typeof(Interval[]) || - type == typeof(List) || - type == typeof(NpgsqlRange[]) || - type == typeof(List>) || - type == typeof(NpgsqlRange[]) || - type == typeof(List>) || - type == typeof(NpgsqlRange[]) || - type == typeof(List>)) - { - return "tstzmultirange"; - } - if (type == typeof(DateInterval[]) || - type == typeof(List) || - type == typeof(NpgsqlRange[]) || - type == typeof(List>)) - { - return "datemultirange"; - } - - return null; - } -} diff --git a/src/Npgsql.NodaTime/Internal/NodaTimeUtils.cs b/src/Npgsql.NodaTime/Internal/NodaTimeUtils.cs index ff37bdd196..1cf433759a 100644 --- a/src/Npgsql.NodaTime/Internal/NodaTimeUtils.cs +++ b/src/Npgsql.NodaTime/Internal/NodaTimeUtils.cs @@ -1,5 +1,6 @@ using System; using NodaTime; +using Npgsql.NodaTime.Properties; namespace Npgsql.NodaTime.Internal; @@ -7,17 +8,11 @@ static class NodaTimeUtils { #if DEBUG internal static bool LegacyTimestampBehavior; - internal static bool DisableDateTimeInfinityConversions; #else internal static readonly bool LegacyTimestampBehavior; - internal static readonly bool DisableDateTimeInfinityConversions; #endif - static NodaTimeUtils() - { - LegacyTimestampBehavior = AppContext.TryGetSwitch("Npgsql.EnableLegacyTimestampBehavior", out var enabled) && enabled; - DisableDateTimeInfinityConversions = AppContext.TryGetSwitch("Npgsql.DisableDateTimeInfinityConversions", out enabled) && enabled; - } + static NodaTimeUtils() => LegacyTimestampBehavior = AppContext.TryGetSwitch("Npgsql.EnableLegacyTimestampBehavior", out var enabled) && enabled; static readonly Instant Instant2000 = Instant.FromUtc(2000, 1, 1, 0, 0, 0); static readonly Duration Plus292Years = Duration.FromDays(292 * 365); @@ -27,17 +22,36 @@ static NodaTimeUtils() /// Decodes a PostgreSQL timestamp/timestamptz into a NodaTime Instant. /// /// The number of microseconds from 2000-01-01T00:00:00. + /// Whether infinity date/time conversions are enabled. /// /// Unfortunately NodaTime doesn't have Duration.FromMicroseconds(), so we decompose into milliseconds and nanoseconds. /// - internal static Instant DecodeInstant(long value) - => Instant2000 + Duration.FromMilliseconds(value / 1000) + Duration.FromNanoseconds(value % 1000 * 1000); + internal static Instant DecodeInstant(long value, bool dateTimeInfinityConversions) + => value switch + { + long.MaxValue => dateTimeInfinityConversions + ? Instant.MaxValue + : throw new InvalidCastException(NpgsqlNodaTimeStrings.CannotReadInfinityValue), + long.MinValue => dateTimeInfinityConversions + ? Instant.MinValue + : throw new InvalidCastException(NpgsqlNodaTimeStrings.CannotReadInfinityValue), + _ => Instant2000 + Duration.FromMilliseconds(value / 1000) + Duration.FromNanoseconds(value % 1000 * 1000) + }; /// /// Encodes a NodaTime Instant to a PostgreSQL timestamp/timestamptz. /// - internal static long EncodeInstant(Instant instant) + internal static long EncodeInstant(Instant instant, bool dateTimeInfinityConversions) { + if (dateTimeInfinityConversions) + { + if (instant == Instant.MaxValue) + return long.MaxValue; + + if (instant == Instant.MinValue) + return long.MinValue; + } + // We need to write the number of microseconds from 2000-01-01T00:00:00. var since2000 = instant - Instant2000; @@ -46,4 +60,4 @@ internal static long EncodeInstant(Instant instant) ? since2000.ToInt64Nanoseconds() / 1000 : (long)(since2000.ToBigIntegerNanoseconds() / 1000); } -} \ No newline at end of file +} diff --git a/src/Npgsql.NodaTime/Internal/OffsetTimeConverter.cs b/src/Npgsql.NodaTime/Internal/OffsetTimeConverter.cs new file mode 100644 index 0000000000..7c5499c2f8 --- /dev/null +++ b/src/Npgsql.NodaTime/Internal/OffsetTimeConverter.cs @@ -0,0 +1,23 @@ +using NodaTime; +using Npgsql.Internal; + +namespace Npgsql.NodaTime.Internal; + +sealed class OffsetTimeConverter : PgBufferedConverter +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(long) + sizeof(int)); + return format is DataFormat.Binary; + } + + // Adjust from 1 microsecond to 100ns. Time zone (in seconds) is inverted. + protected override OffsetTime ReadCore(PgReader reader) + => new(LocalTime.FromTicksSinceMidnight(reader.ReadInt64() * 10), Offset.FromSeconds(-reader.ReadInt32())); + + protected override void WriteCore(PgWriter writer, OffsetTime value) + { + writer.WriteInt64(value.TickOfDay / 10); + writer.WriteInt32(-(int)(value.Offset.Ticks / NodaConstants.TicksPerSecond)); + } +} diff --git a/src/Npgsql.NodaTime/Internal/PeriodConverter.cs b/src/Npgsql.NodaTime/Internal/PeriodConverter.cs new file mode 100644 index 0000000000..4dbde48dbc --- /dev/null +++ b/src/Npgsql.NodaTime/Internal/PeriodConverter.cs @@ -0,0 +1,46 @@ +using NodaTime; +using Npgsql.Internal; + +namespace Npgsql.NodaTime.Internal; + +sealed class PeriodConverter : PgBufferedConverter +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(long) + sizeof(int) + sizeof(int)); + return format is DataFormat.Binary; + } + + protected override Period ReadCore(PgReader reader) + { + var microsecondsInDay = reader.ReadInt64(); + var days = reader.ReadInt32(); + var totalMonths = reader.ReadInt32(); + + // NodaTime will normalize most things (i.e. nanoseconds to milliseconds, seconds...) + // but it will not normalize months to years. + var months = totalMonths % 12; + var years = totalMonths / 12; + + return new PeriodBuilder + { + Nanoseconds = microsecondsInDay * 1000, + Days = days, + Months = months, + Years = years + }.Build().Normalize(); + } + + protected override void WriteCore(PgWriter writer, Period value) + { + // Note that the end result must be long + // see #3438 + var microsecondsInDay = + (((value.Hours * NodaConstants.MinutesPerHour + value.Minutes) * NodaConstants.SecondsPerMinute + value.Seconds) * NodaConstants.MillisecondsPerSecond + value.Milliseconds) * 1000 + + value.Nanoseconds / 1000; // Take the microseconds, discard the nanosecond remainder + + writer.WriteInt64(microsecondsInDay); + writer.WriteInt32(value.Weeks * 7 + value.Days); // days + writer.WriteInt32(value.Years * 12 + value.Months); // months + } +} diff --git a/src/Npgsql.NodaTime/Internal/TimeHandler.cs b/src/Npgsql.NodaTime/Internal/TimeHandler.cs deleted file mode 100644 index 5171745764..0000000000 --- a/src/Npgsql.NodaTime/Internal/TimeHandler.cs +++ /dev/null @@ -1,53 +0,0 @@ -using System; -using System.Threading; -using System.Threading.Tasks; -using NodaTime; -using Npgsql.BackendMessages; -using Npgsql.Internal; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using BclTimeHandler = Npgsql.Internal.TypeHandlers.DateTimeHandlers.TimeHandler; - -namespace Npgsql.NodaTime.Internal; - -sealed partial class TimeHandler : NpgsqlSimpleTypeHandler, INpgsqlSimpleTypeHandler -#if NET6_0_OR_GREATER - , INpgsqlSimpleTypeHandler -#endif -{ - readonly BclTimeHandler _bclHandler; - - internal TimeHandler(PostgresType postgresType) - : base(postgresType) - => _bclHandler = new BclTimeHandler(postgresType); - - // PostgreSQL time resolution == 1 microsecond == 10 ticks - public override LocalTime Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - => LocalTime.FromTicksSinceMidnight(buf.ReadInt64() * 10); - - public override int ValidateAndGetLength(LocalTime value, NpgsqlParameter? parameter) - => 8; - - public override void Write(LocalTime value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => buf.WriteInt64(value.TickOfDay / 10); - - TimeSpan INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => _bclHandler.Read(buf, len, fieldDescription); - - int INpgsqlSimpleTypeHandler.ValidateAndGetLength(TimeSpan value, NpgsqlParameter? parameter) - => _bclHandler.ValidateAndGetLength(value, parameter); - - void INpgsqlSimpleTypeHandler.Write(TimeSpan value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => _bclHandler.Write(value, buf, parameter); - -#if NET6_0_OR_GREATER - TimeOnly INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => _bclHandler.Read(buf, len, fieldDescription); - - public int ValidateAndGetLength(TimeOnly value, NpgsqlParameter? parameter) - => _bclHandler.ValidateAndGetLength(value, parameter); - - public void Write(TimeOnly value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => _bclHandler.Write(value, buf, parameter); -#endif -} \ No newline at end of file diff --git a/src/Npgsql.NodaTime/Internal/TimeTzHandler.cs b/src/Npgsql.NodaTime/Internal/TimeTzHandler.cs deleted file mode 100644 index d8ace650dc..0000000000 --- a/src/Npgsql.NodaTime/Internal/TimeTzHandler.cs +++ /dev/null @@ -1,41 +0,0 @@ -using System; -using NodaTime; -using Npgsql.BackendMessages; -using Npgsql.Internal; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using BclTimeTzHandler = Npgsql.Internal.TypeHandlers.DateTimeHandlers.TimeTzHandler; - -namespace Npgsql.NodaTime.Internal; - -sealed partial class TimeTzHandler : NpgsqlSimpleTypeHandler, INpgsqlSimpleTypeHandler -{ - readonly BclTimeTzHandler _bclHandler; - - internal TimeTzHandler(PostgresType postgresType) - : base(postgresType) - => _bclHandler = new BclTimeTzHandler(postgresType); - - // Adjust from 1 microsecond to 100ns. Time zone (in seconds) is inverted. - public override OffsetTime Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - => new( - LocalTime.FromTicksSinceMidnight(buf.ReadInt64() * 10), - Offset.FromSeconds(-buf.ReadInt32())); - - public override int ValidateAndGetLength(OffsetTime value, NpgsqlParameter? parameter) => 12; - - public override void Write(OffsetTime value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - { - buf.WriteInt64(value.TickOfDay / 10); - buf.WriteInt32(-(int)(value.Offset.Ticks / NodaConstants.TicksPerSecond)); - } - - DateTimeOffset INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => _bclHandler.Read(buf, len, fieldDescription); - - int INpgsqlSimpleTypeHandler.ValidateAndGetLength(DateTimeOffset value, NpgsqlParameter? parameter) - => _bclHandler.ValidateAndGetLength(value, parameter); - - void INpgsqlSimpleTypeHandler.Write(DateTimeOffset value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => _bclHandler.Write(value, buf, parameter); -} \ No newline at end of file diff --git a/src/Npgsql.NodaTime/Internal/TimestampConverters.cs b/src/Npgsql.NodaTime/Internal/TimestampConverters.cs new file mode 100644 index 0000000000..6808503638 --- /dev/null +++ b/src/Npgsql.NodaTime/Internal/TimestampConverters.cs @@ -0,0 +1,106 @@ +using System; +using NodaTime; +using Npgsql.Internal; +using static Npgsql.NodaTime.Internal.NodaTimeUtils; + +namespace Npgsql.NodaTime.Internal; + +sealed class InstantConverter : PgBufferedConverter +{ + readonly bool _dateTimeInfinityConversions; + + public InstantConverter(bool dateTimeInfinityConversions) + => _dateTimeInfinityConversions = dateTimeInfinityConversions; + + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(long)); + return format is DataFormat.Binary; + } + + protected override Instant ReadCore(PgReader reader) + => DecodeInstant(reader.ReadInt64(), _dateTimeInfinityConversions); + + protected override void WriteCore(PgWriter writer, Instant value) + => writer.WriteInt64(EncodeInstant(value, _dateTimeInfinityConversions)); +} + +sealed class ZonedDateTimeConverter : PgBufferedConverter +{ + readonly bool _dateTimeInfinityConversions; + + public ZonedDateTimeConverter(bool dateTimeInfinityConversions) + => _dateTimeInfinityConversions = dateTimeInfinityConversions; + + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(long)); + return format is DataFormat.Binary; + } + + protected override ZonedDateTime ReadCore(PgReader reader) + => DecodeInstant(reader.ReadInt64(), _dateTimeInfinityConversions).InUtc(); + + protected override void WriteCore(PgWriter writer, ZonedDateTime value) + { + if (value.Zone != DateTimeZone.Utc && !LegacyTimestampBehavior) + { + throw new ArgumentException( + $"Cannot write ZonedDateTime with Zone={value.Zone} to PostgreSQL type 'timestamp with time zone', " + + "only UTC is supported. " + + "See the Npgsql.EnableLegacyTimestampBehavior AppContext switch to enable legacy behavior."); + } + + writer.WriteInt64(EncodeInstant(value.ToInstant(), _dateTimeInfinityConversions)); + } +} + +sealed class OffsetDateTimeConverter : PgBufferedConverter +{ + readonly bool _dateTimeInfinityConversions; + + public OffsetDateTimeConverter(bool dateTimeInfinityConversions) + => _dateTimeInfinityConversions = dateTimeInfinityConversions; + + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(long)); + return format is DataFormat.Binary; + } + + protected override OffsetDateTime ReadCore(PgReader reader) + => DecodeInstant(reader.ReadInt64(), _dateTimeInfinityConversions).WithOffset(Offset.Zero); + + protected override void WriteCore(PgWriter writer, OffsetDateTime value) + { + if (value.Offset != Offset.Zero && !LegacyTimestampBehavior) + { + throw new ArgumentException( + $"Cannot write OffsetDateTime with Offset={value.Offset} to PostgreSQL type 'timestamp with time zone', " + + "only offset 0 (UTC) is supported. " + + "See the Npgsql.EnableLegacyTimestampBehavior AppContext switch to enable legacy behavior."); + } + + writer.WriteInt64(EncodeInstant(value.ToInstant(), _dateTimeInfinityConversions)); + } +} + +sealed class LocalDateTimeConverter : PgBufferedConverter +{ + readonly bool _dateTimeInfinityConversions; + + public LocalDateTimeConverter(bool dateTimeInfinityConversions) + => _dateTimeInfinityConversions = dateTimeInfinityConversions; + + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(long)); + return format is DataFormat.Binary; + } + + protected override LocalDateTime ReadCore(PgReader reader) + => DecodeInstant(reader.ReadInt64(), _dateTimeInfinityConversions).InUtc().LocalDateTime; + + protected override void WriteCore(PgWriter writer, LocalDateTime value) + => writer.WriteInt64(EncodeInstant(value.InUtc().ToInstant(), _dateTimeInfinityConversions)); +} diff --git a/src/Npgsql.NodaTime/Internal/TimestampHandler.cs b/src/Npgsql.NodaTime/Internal/TimestampHandler.cs deleted file mode 100644 index 15c254e3d0..0000000000 --- a/src/Npgsql.NodaTime/Internal/TimestampHandler.cs +++ /dev/null @@ -1,88 +0,0 @@ -using System; -using NodaTime; -using Npgsql.BackendMessages; -using Npgsql.Internal; -using Npgsql.Internal.TypeHandling; -using Npgsql.NodaTime.Properties; -using Npgsql.PostgresTypes; -using BclTimestampHandler = Npgsql.Internal.TypeHandlers.DateTimeHandlers.TimestampHandler; -using static Npgsql.NodaTime.Internal.NodaTimeUtils; - -namespace Npgsql.NodaTime.Internal; - -sealed partial class TimestampHandler : NpgsqlSimpleTypeHandler, - INpgsqlSimpleTypeHandler, INpgsqlSimpleTypeHandler -{ - readonly BclTimestampHandler _bclHandler; - - internal TimestampHandler(PostgresType postgresType) - : base(postgresType) - => _bclHandler = new BclTimestampHandler(postgresType); - - #region Read - - public override LocalDateTime Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => ReadLocalDateTime(buf); - - internal static LocalDateTime ReadLocalDateTime(NpgsqlReadBuffer buf) - => buf.ReadInt64() switch - { - long.MaxValue => DisableDateTimeInfinityConversions - ? throw new InvalidCastException(NpgsqlNodaTimeStrings.CannotReadInfinityValue) - : LocalDateTime.MaxIsoValue, - long.MinValue => DisableDateTimeInfinityConversions - ? throw new InvalidCastException(NpgsqlNodaTimeStrings.CannotReadInfinityValue) - : LocalDateTime.MinIsoValue, - var value => DecodeInstant(value).InUtc().LocalDateTime - }; - - DateTime INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => _bclHandler.Read(buf, len, fieldDescription); - - long INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => ((INpgsqlSimpleTypeHandler)_bclHandler).Read(buf, len, fieldDescription); - - #endregion Read - - #region Write - - public override int ValidateAndGetLength(LocalDateTime value, NpgsqlParameter? parameter) - => 8; - - public override void Write(LocalDateTime value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => WriteLocalDateTime(value, buf); - - internal static void WriteLocalDateTime(LocalDateTime value, NpgsqlWriteBuffer buf) - { - if (!DisableDateTimeInfinityConversions) - { - if (value == LocalDateTime.MaxIsoValue) - { - buf.WriteInt64(long.MaxValue); - return; - } - - if (value == LocalDateTime.MinIsoValue) - { - buf.WriteInt64(long.MinValue); - return; - } - } - - buf.WriteInt64(EncodeInstant(value.InUtc().ToInstant())); - } - - public int ValidateAndGetLength(DateTime value, NpgsqlParameter? parameter) - => ((INpgsqlSimpleTypeHandler)_bclHandler).ValidateAndGetLength(value, parameter); - - public int ValidateAndGetLength(long value, NpgsqlParameter? parameter) - => ((INpgsqlSimpleTypeHandler)_bclHandler).ValidateAndGetLength(value, parameter); - - void INpgsqlSimpleTypeHandler.Write(DateTime value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => ((INpgsqlSimpleTypeHandler)_bclHandler).Write(value, buf, parameter); - - void INpgsqlSimpleTypeHandler.Write(long value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => ((INpgsqlSimpleTypeHandler)_bclHandler).Write(value, buf, parameter); - - #endregion Write -} \ No newline at end of file diff --git a/src/Npgsql.NodaTime/Internal/TimestampTzHandler.cs b/src/Npgsql.NodaTime/Internal/TimestampTzHandler.cs deleted file mode 100644 index fa1924656a..0000000000 --- a/src/Npgsql.NodaTime/Internal/TimestampTzHandler.cs +++ /dev/null @@ -1,126 +0,0 @@ -using System; -using NodaTime; -using Npgsql.BackendMessages; -using Npgsql.Internal; -using Npgsql.Internal.TypeHandling; -using Npgsql.NodaTime.Properties; -using Npgsql.PostgresTypes; -using BclTimestampTzHandler = Npgsql.Internal.TypeHandlers.DateTimeHandlers.TimestampTzHandler; -using static Npgsql.NodaTime.Internal.NodaTimeUtils; - -namespace Npgsql.NodaTime.Internal; - -sealed partial class TimestampTzHandler : NpgsqlSimpleTypeHandler, INpgsqlSimpleTypeHandler, - INpgsqlSimpleTypeHandler, INpgsqlSimpleTypeHandler, - INpgsqlSimpleTypeHandler, INpgsqlSimpleTypeHandler -{ - readonly BclTimestampTzHandler _bclHandler; - - public TimestampTzHandler(PostgresType postgresType) - : base(postgresType) - => _bclHandler = new BclTimestampTzHandler(postgresType); - - #region Read - - public override Instant Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - => ReadInstant(buf); - - internal static Instant ReadInstant(NpgsqlReadBuffer buf) - => buf.ReadInt64() switch - { - long.MaxValue => DisableDateTimeInfinityConversions - ? throw new InvalidCastException(NpgsqlNodaTimeStrings.CannotReadInfinityValue) - : Instant.MaxValue, - long.MinValue => DisableDateTimeInfinityConversions - ? throw new InvalidCastException(NpgsqlNodaTimeStrings.CannotReadInfinityValue) - : Instant.MinValue, - var value => DecodeInstant(value) - }; - - ZonedDateTime INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => Read(buf, len, fieldDescription).InUtc(); - - OffsetDateTime INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => Read(buf, len, fieldDescription).WithOffset(Offset.Zero); - - DateTimeOffset INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => _bclHandler.Read(buf, len, fieldDescription); - - DateTime INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => _bclHandler.Read(buf, len, fieldDescription); - - long INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => ((INpgsqlSimpleTypeHandler)_bclHandler).Read(buf, len, fieldDescription); - - #endregion Read - - #region Write - - public override int ValidateAndGetLength(Instant value, NpgsqlParameter? parameter) - => 8; - - int INpgsqlSimpleTypeHandler.ValidateAndGetLength(ZonedDateTime value, NpgsqlParameter? parameter) - => value.Zone == DateTimeZone.Utc || LegacyTimestampBehavior - ? 8 - : throw new InvalidCastException( - $"Cannot write ZonedDateTime with Zone={value.Zone} to PostgreSQL type 'timestamp with time zone', " + - "only UTC is supported. " + - "See the Npgsql.EnableLegacyTimestampBehavior AppContext switch to enable legacy behavior."); - - public int ValidateAndGetLength(OffsetDateTime value, NpgsqlParameter? parameter) - => value.Offset == Offset.Zero || LegacyTimestampBehavior - ? 8 - : throw new InvalidCastException( - $"Cannot write OffsetDateTime with Offset={value.Offset} to PostgreSQL type 'timestamp with time zone', " + - "only offset 0 (UTC) is supported. " + - "See the Npgsql.EnableLegacyTimestampBehavior AppContext switch to enable legacy behavior."); - - public override void Write(Instant value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => WriteInstant(value, buf); - - internal static void WriteInstant(Instant value, NpgsqlWriteBuffer buf) - { - if (!DisableDateTimeInfinityConversions) - { - if (value == Instant.MaxValue) - { - buf.WriteInt64(long.MaxValue); - return; - } - - if (value == Instant.MinValue) - { - buf.WriteInt64(long.MinValue); - return; - } - } - - buf.WriteInt64(EncodeInstant(value)); - } - - void INpgsqlSimpleTypeHandler.Write(ZonedDateTime value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => Write(value.ToInstant(), buf, parameter); - - public void Write(OffsetDateTime value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => Write(value.ToInstant(), buf, parameter); - - int INpgsqlSimpleTypeHandler.ValidateAndGetLength(DateTimeOffset value, NpgsqlParameter? parameter) - => _bclHandler.ValidateAndGetLength(value, parameter); - - void INpgsqlSimpleTypeHandler.Write(DateTimeOffset value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => _bclHandler.Write(value, buf, parameter); - - int INpgsqlSimpleTypeHandler.ValidateAndGetLength(DateTime value, NpgsqlParameter? parameter) - => ((INpgsqlSimpleTypeHandler)_bclHandler).ValidateAndGetLength(value, parameter); - - public int ValidateAndGetLength(long value, NpgsqlParameter? parameter) - => ((INpgsqlSimpleTypeHandler)_bclHandler).ValidateAndGetLength(value, parameter); - - void INpgsqlSimpleTypeHandler.Write(DateTime value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => _bclHandler.Write(value, buf, parameter); - - void INpgsqlSimpleTypeHandler.Write(long value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => ((INpgsqlSimpleTypeHandler)_bclHandler).Write(value, buf, parameter); - - #endregion Write -} \ No newline at end of file diff --git a/src/Npgsql.NodaTime/Internal/TimestampTzMultirangeHandler.cs b/src/Npgsql.NodaTime/Internal/TimestampTzMultirangeHandler.cs deleted file mode 100644 index a13bb091b2..0000000000 --- a/src/Npgsql.NodaTime/Internal/TimestampTzMultirangeHandler.cs +++ /dev/null @@ -1,202 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Threading; -using System.Threading.Tasks; -using NodaTime; -using Npgsql.BackendMessages; -using Npgsql.Internal; -using Npgsql.Internal.TypeHandlers; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using NpgsqlTypes; - -namespace Npgsql.NodaTime.Internal; - -public partial class TimestampTzMultirangeHandler : MultirangeHandler, - INpgsqlTypeHandler, INpgsqlTypeHandler>, - INpgsqlTypeHandler[]>, INpgsqlTypeHandler>>, - INpgsqlTypeHandler[]>, INpgsqlTypeHandler>>, - INpgsqlTypeHandler[]>, INpgsqlTypeHandler>>, - INpgsqlTypeHandler[]>, INpgsqlTypeHandler>> -{ - readonly INpgsqlTypeHandler _intervalHandler; - - public override Type GetFieldType(FieldDescription? fieldDescription = null) => typeof(Interval[]); - - public TimestampTzMultirangeHandler(PostgresMultirangeType pgMultirangeType, TimestampTzRangeHandler rangeHandler) - : base(pgMultirangeType, rangeHandler) - => _intervalHandler = rangeHandler; - - public override async ValueTask ReadAsObject(NpgsqlReadBuffer buf, int len, bool async, - FieldDescription? fieldDescription = null) - => (await Read(buf, len, async, fieldDescription))!; - - async ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int len, bool async, - FieldDescription? fieldDescription) - { - await buf.Ensure(4, async); - var numRanges = buf.ReadInt32(); - var multirange = new Interval[numRanges]; - - for (var i = 0; i < multirange.Length; i++) - { - await buf.Ensure(4, async); - var rangeLen = buf.ReadInt32(); - multirange[i] = await _intervalHandler.Read(buf, rangeLen, async, fieldDescription); - } - - return multirange; - } - - async ValueTask> INpgsqlTypeHandler>.Read( - NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - { - await buf.Ensure(4, async); - var numRanges = buf.ReadInt32(); - var multirange = new List(numRanges); - - for (var i = 0; i < numRanges; i++) - { - await buf.Ensure(4, async); - var rangeLen = buf.ReadInt32(); - multirange.Add(await _intervalHandler.Read(buf, rangeLen, async, fieldDescription)); - } - - return multirange; - } - - public int ValidateAndGetLength(List value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLengthCore(value, ref lengthCache); - - public int ValidateAndGetLength(Interval[] value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLengthCore(value, ref lengthCache); - - int ValidateAndGetLengthCore(IList value, ref NpgsqlLengthCache? lengthCache) - { - lengthCache ??= new NpgsqlLengthCache(1); - if (lengthCache.IsPopulated) - return lengthCache.Get(); - - var sum = 4 + 4 * value.Count; - for (var i = 0; i < value.Count; i++) - sum += _intervalHandler.ValidateAndGetLength(value[i], ref lengthCache, parameter: null); - - return lengthCache!.Set(sum); - } - - public async Task Write(Interval[] value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, - NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - if (buf.WriteSpaceLeft < 4) - await buf.Flush(async, cancellationToken); - - buf.WriteInt32(value.Length); - - for (var i = 0; i < value.Length; i++) - await RangeHandler.WriteWithLength(value[i], buf, lengthCache, parameter: null, async, cancellationToken); - } - - public async Task Write(List value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, - NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - if (buf.WriteSpaceLeft < 4) - await buf.Flush(async, cancellationToken); - - buf.WriteInt32(value.Count); - - for (var i = 0; i < value.Count; i++) - await RangeHandler.WriteWithLength(value[i], buf, lengthCache, parameter: null, async, cancellationToken); - } - - #region Boilerplate - - ValueTask[]> INpgsqlTypeHandler[]>.Read( - NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => ReadMultirangeArray(buf, len, async, fieldDescription); - - ValueTask>> INpgsqlTypeHandler>>.Read( - NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => ReadMultirangeList(buf, len, async, fieldDescription); - - ValueTask[]> INpgsqlTypeHandler[]>.Read( - NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => ReadMultirangeArray(buf, len, async, fieldDescription); - - ValueTask>> INpgsqlTypeHandler>>.Read( - NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => ReadMultirangeList(buf, len, async, fieldDescription); - - ValueTask[]> INpgsqlTypeHandler[]>.Read( - NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => ReadMultirangeArray(buf, len, async, fieldDescription); - - ValueTask>> INpgsqlTypeHandler>>.Read( - NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => ReadMultirangeList(buf, len, async, fieldDescription); - - ValueTask[]> INpgsqlTypeHandler[]>.Read( - NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => ReadMultirangeArray(buf, len, async, fieldDescription); - - ValueTask>> INpgsqlTypeHandler>>.Read( - NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => ReadMultirangeList(buf, len, async, fieldDescription); - - public int ValidateAndGetLength(NpgsqlRange[] value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLengthMultirange(value, ref lengthCache, parameter); - - public int ValidateAndGetLength(List> value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLengthMultirange(value, ref lengthCache, parameter); - - public int ValidateAndGetLength(NpgsqlRange[] value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLengthMultirange(value, ref lengthCache, parameter); - - public int ValidateAndGetLength(List> value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLengthMultirange(value, ref lengthCache, parameter); - - public int ValidateAndGetLength(NpgsqlRange[] value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLengthMultirange(value, ref lengthCache, parameter); - - public int ValidateAndGetLength(List> value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLengthMultirange(value, ref lengthCache, parameter); - - public int ValidateAndGetLength(NpgsqlRange[] value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLengthMultirange(value, ref lengthCache, parameter); - - public int ValidateAndGetLength(List> value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLengthMultirange(value, ref lengthCache, parameter); - - public Task Write(NpgsqlRange[] value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, - NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => WriteMultirange(value, buf, lengthCache, parameter, async, cancellationToken); - - public Task Write(List> value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, - NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => WriteMultirange(value, buf, lengthCache, parameter, async, cancellationToken); - - public Task Write(NpgsqlRange[] value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, - NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => WriteMultirange(value, buf, lengthCache, parameter, async, cancellationToken); - - public Task Write(List> value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, - NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => WriteMultirange(value, buf, lengthCache, parameter, async, cancellationToken); - - public Task Write(NpgsqlRange[] value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, - NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => WriteMultirange(value, buf, lengthCache, parameter, async, cancellationToken); - - public Task Write(List> value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, - NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => WriteMultirange(value, buf, lengthCache, parameter, async, cancellationToken); - - public Task Write(NpgsqlRange[] value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, - NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => WriteMultirange(value, buf, lengthCache, parameter, async, cancellationToken); - - public Task Write(List> value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, - NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => WriteMultirange(value, buf, lengthCache, parameter, async, cancellationToken); - - #endregion Boilerplate -} \ No newline at end of file diff --git a/src/Npgsql.NodaTime/Internal/TimestampTzRangeHandler.cs b/src/Npgsql.NodaTime/Internal/TimestampTzRangeHandler.cs deleted file mode 100644 index 8205cc17ef..0000000000 --- a/src/Npgsql.NodaTime/Internal/TimestampTzRangeHandler.cs +++ /dev/null @@ -1,105 +0,0 @@ -using System; -using System.Threading; -using System.Threading.Tasks; -using NodaTime; -using Npgsql.BackendMessages; -using Npgsql.Internal; -using Npgsql.Internal.TypeHandlers; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using NpgsqlTypes; - -namespace Npgsql.NodaTime.Internal; - -public partial class TimestampTzRangeHandler : RangeHandler, - INpgsqlTypeHandler, INpgsqlTypeHandler>, INpgsqlTypeHandler>, - INpgsqlTypeHandler>, INpgsqlTypeHandler> -{ - public override Type GetFieldType(FieldDescription? fieldDescription = null) => typeof(Interval); - - public TimestampTzRangeHandler(PostgresType rangePostgresType, NpgsqlTypeHandler subtypeHandler) - : base(rangePostgresType, subtypeHandler) - { - } - - public override async ValueTask ReadAsObject(NpgsqlReadBuffer buf, int len, bool async, - FieldDescription? fieldDescription = null) - => (await Read(buf, len, async, fieldDescription))!; - - // internal Interval ConvertRangetoInterval(NpgsqlRange range) - async ValueTask INpgsqlTypeHandler.Read( - NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - { - var range = await Read(buf, len, async, fieldDescription); - - // NodaTime Interval includes the start instant and excludes the end instant. - Instant? start = range.LowerBoundInfinite - ? null - : range.LowerBoundIsInclusive - ? range.LowerBound - : range.LowerBound + Duration.Epsilon; - Instant? end = range.UpperBoundInfinite - ? null - : range.UpperBoundIsInclusive - ? range.UpperBound + Duration.Epsilon - : range.UpperBound; - return new(start, end); - } - - public int ValidateAndGetLength(Interval value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLengthRange( - new NpgsqlRange(value.HasStart ? value.Start : default, true, !value.HasStart, value.HasEnd ? value.End : default, false, !value.HasEnd), ref lengthCache, parameter); - - public Task Write(Interval value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, - NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => WriteRange(new NpgsqlRange(value.HasStart ? value.Start : default, true, !value.HasStart, value.HasEnd ? value.End : default, false, !value.HasEnd), - buf, lengthCache, parameter, async, cancellationToken); - - #region Boilerplate - - ValueTask> INpgsqlTypeHandler>.Read( - NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => ReadRange(buf, len, async, fieldDescription); - - ValueTask> INpgsqlTypeHandler>.Read( - NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => ReadRange(buf, len, async, fieldDescription); - - ValueTask> INpgsqlTypeHandler>.Read( - NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => ReadRange(buf, len, async, fieldDescription); - - ValueTask> INpgsqlTypeHandler>.Read( - NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => ReadRange(buf, len, async, fieldDescription); - - public int ValidateAndGetLength(NpgsqlRange value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLengthRange(value, ref lengthCache, parameter); - - public int ValidateAndGetLength(NpgsqlRange value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLengthRange(value, ref lengthCache, parameter); - - public int ValidateAndGetLength(NpgsqlRange value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLengthRange(value, ref lengthCache, parameter); - - public int ValidateAndGetLength(NpgsqlRange value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLengthRange(value, ref lengthCache, parameter); - - public Task Write(NpgsqlRange value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, - NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => WriteRange(value, buf, lengthCache, parameter, async, cancellationToken); - - public Task Write(NpgsqlRange value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, - NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => WriteRange(value, buf, lengthCache, parameter, async, cancellationToken); - - public Task Write(NpgsqlRange value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, - NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => WriteRange(value, buf, lengthCache, parameter, async, cancellationToken); - - public Task Write(NpgsqlRange value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, - NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => WriteRange(value, buf, lengthCache, parameter, async, cancellationToken); - - #endregion Boilerplate -} \ No newline at end of file diff --git a/src/Npgsql.NodaTime/NpgsqlNodaTimeExtensions.cs b/src/Npgsql.NodaTime/NpgsqlNodaTimeExtensions.cs index 9fe67ec485..030f1ec1be 100644 --- a/src/Npgsql.NodaTime/NpgsqlNodaTimeExtensions.cs +++ b/src/Npgsql.NodaTime/NpgsqlNodaTimeExtensions.cs @@ -15,7 +15,7 @@ public static class NpgsqlNodaTimeExtensions /// The type mapper to set up (global or connection-specific) public static INpgsqlTypeMapper UseNodaTime(this INpgsqlTypeMapper mapper) { - mapper.AddTypeResolverFactory(new NodaTimeTypeHandlerResolverFactory()); + mapper.AddTypeInfoResolver(new NodaTimeTypeInfoResolver()); return mapper; } -} \ No newline at end of file +} diff --git a/src/Npgsql.NodaTime/Properties/AssemblyInfo.cs b/src/Npgsql.NodaTime/Properties/AssemblyInfo.cs index cf71b6d0b6..a03d5a93d6 100644 --- a/src/Npgsql.NodaTime/Properties/AssemblyInfo.cs +++ b/src/Npgsql.NodaTime/Properties/AssemblyInfo.cs @@ -4,7 +4,7 @@ [module: SkipLocalsInit] #endif -[assembly: InternalsVisibleTo("Npgsql.NodaTime.Tests, PublicKey=" + +[assembly: InternalsVisibleTo("Npgsql.PluginTests, PublicKey=" + "0024000004800000940000000602000000240000525341310004000001000100" + "2b3c590b2a4e3d347e6878dc0ff4d21eb056a50420250c6617044330701d35c9" + "8078a5df97a62d83c9a2db2d072523a8fc491398254c6b89329b8c1dcef43a1e" + diff --git a/src/Npgsql.NodaTime/Properties/NpgsqlNodaTimeStrings.Designer.cs b/src/Npgsql.NodaTime/Properties/NpgsqlNodaTimeStrings.Designer.cs index e47b9140b5..bc6511ea9a 100644 --- a/src/Npgsql.NodaTime/Properties/NpgsqlNodaTimeStrings.Designer.cs +++ b/src/Npgsql.NodaTime/Properties/NpgsqlNodaTimeStrings.Designer.cs @@ -50,5 +50,11 @@ internal static string CannotReadInfinityValue { return ResourceManager.GetString("CannotReadInfinityValue", resourceCulture); } } + + internal static string CannotReadIntervalWithMonthsAsDuration { + get { + return ResourceManager.GetString("CannotReadIntervalWithMonthsAsDuration", resourceCulture); + } + } } } diff --git a/src/Npgsql.NodaTime/Properties/NpgsqlNodaTimeStrings.resx b/src/Npgsql.NodaTime/Properties/NpgsqlNodaTimeStrings.resx index d05d0c3a62..d3329f2a80 100644 --- a/src/Npgsql.NodaTime/Properties/NpgsqlNodaTimeStrings.resx +++ b/src/Npgsql.NodaTime/Properties/NpgsqlNodaTimeStrings.resx @@ -21,4 +21,7 @@ Cannot read infinity value since Npgsql.DisableDateTimeInfinityConversions is enabled. - \ No newline at end of file + + Cannot read PostgreSQL interval with non-zero months to NodaTime Duration. Try reading as a NodaTime Period instead. + + diff --git a/src/Npgsql.SourceGenerators/Npgsql.SourceGenerators.csproj b/src/Npgsql.SourceGenerators/Npgsql.SourceGenerators.csproj index 434936cefe..bc0f37e9bb 100644 --- a/src/Npgsql.SourceGenerators/Npgsql.SourceGenerators.csproj +++ b/src/Npgsql.SourceGenerators/Npgsql.SourceGenerators.csproj @@ -27,7 +27,6 @@ - diff --git a/src/Npgsql.SourceGenerators/NpgsqlConnectionStringBuilderSourceGenerator.cs b/src/Npgsql.SourceGenerators/NpgsqlConnectionStringBuilderSourceGenerator.cs index f7008610b0..a25495a40a 100644 --- a/src/Npgsql.SourceGenerators/NpgsqlConnectionStringBuilderSourceGenerator.cs +++ b/src/Npgsql.SourceGenerators/NpgsqlConnectionStringBuilderSourceGenerator.cs @@ -1,4 +1,3 @@ -using System; using System.Collections.Generic; using System.Linq; using System.Text; diff --git a/src/Npgsql.SourceGenerators/TypeHandler.snbtxt b/src/Npgsql.SourceGenerators/TypeHandler.snbtxt deleted file mode 100644 index 041c948881..0000000000 --- a/src/Npgsql.SourceGenerators/TypeHandler.snbtxt +++ /dev/null @@ -1,36 +0,0 @@ -{{ for using in usings }} -using {{ using }}; -{{ end }} - -#nullable enable -#pragma warning disable CS1591 // Missing XML comment for publicly visible type or member -#pragma warning disable RS0016 // Add public types and members to the declared API -#pragma warning disable 618 // Member is obsolete - -namespace {{ namespace }} -{ - partial class {{ type_name }} - { - public override int ValidateObjectAndGetLength(object value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => value switch - { - {{ for interface in interfaces }} - {{ interface.handled_type }} converted => (({{ interface.name }})this).ValidateAndGetLength(converted, {{ is_simple ? "" : "ref lengthCache, " }}parameter), - {{ end }} - - _ => throw new InvalidCastException($"Can't write CLR type {value.GetType()} with handler type {{ type_name }}") - }; - - public override Task WriteObjectWithLength(object? value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => value switch - { - {{ for interface in interfaces }} - {{ interface.handled_type }} converted => WriteWithLength(converted, buf, lengthCache, parameter, async, cancellationToken), - {{ end }} - - DBNull => WriteWithLength(DBNull.Value, buf, lengthCache, parameter, async, cancellationToken), - null => WriteWithLength(DBNull.Value, buf, lengthCache, parameter, async, cancellationToken), - _ => throw new InvalidCastException($"Can't write CLR type {value.GetType()} with handler type {{ type_name }}") - }; - } -} diff --git a/src/Npgsql.SourceGenerators/TypeHandlerSourceGenerator.cs b/src/Npgsql.SourceGenerators/TypeHandlerSourceGenerator.cs deleted file mode 100644 index d36cc41988..0000000000 --- a/src/Npgsql.SourceGenerators/TypeHandlerSourceGenerator.cs +++ /dev/null @@ -1,129 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Linq; -using System.Text; -using Microsoft.CodeAnalysis; -using Microsoft.CodeAnalysis.CSharp; -using Microsoft.CodeAnalysis.CSharp.Syntax; -using Microsoft.CodeAnalysis.Text; -using Scriban; - -namespace Npgsql.SourceGenerators; - -[Generator] -sealed class TypeHandlerSourceGenerator : ISourceGenerator -{ - public void Initialize(GeneratorInitializationContext context) - => context.RegisterForSyntaxNotifications(() => new MySyntaxReceiver()); - - public void Execute(GeneratorExecutionContext context) - { - var compilation = context.Compilation; - - var (simpleTypeHandlerInterfaceSymbol, typeHandlerInterfaceSymbol) = ( - compilation.GetTypeByMetadataName("Npgsql.Internal.TypeHandling.INpgsqlSimpleTypeHandler`1"), - compilation.GetTypeByMetadataName("Npgsql.Internal.TypeHandling.INpgsqlTypeHandler`1")); - - if (simpleTypeHandlerInterfaceSymbol is null || typeHandlerInterfaceSymbol is null) - throw new Exception("Could not find INpgsqlSimpleTypeHandler or INpgsqlTypeHandler"); - - var template = Template.Parse(EmbeddedResource.GetContent("TypeHandler.snbtxt"), "TypeHandler.snbtxt"); - - foreach (var cds in ((MySyntaxReceiver)context.SyntaxReceiver!).TypeHandlerCandidates) - { - var semanticModel = compilation.GetSemanticModel(cds.SyntaxTree); - if (semanticModel.GetDeclaredSymbol(cds) is not INamedTypeSymbol typeSymbol) - continue; - - if (typeSymbol.AllInterfaces.Any(i => - i.OriginalDefinition.Equals(simpleTypeHandlerInterfaceSymbol, SymbolEqualityComparer.Default))) - { - AugmentTypeHandler(template, typeSymbol, cds, isSimple: true); - continue; - } - - if (typeSymbol.AllInterfaces.Any(i => - i.OriginalDefinition.Equals(typeHandlerInterfaceSymbol, SymbolEqualityComparer.Default))) - { - AugmentTypeHandler(template, typeSymbol, cds, isSimple: false); - } - } - - void AugmentTypeHandler( - Template template, - INamedTypeSymbol typeSymbol, - ClassDeclarationSyntax classDeclarationSyntax, - bool isSimple) - { - var usings = new HashSet( - new[] - { - "System", - "System.Threading", - "System.Threading.Tasks", - "Npgsql.Internal" - }.Concat(classDeclarationSyntax.SyntaxTree.GetCompilationUnitRoot().Usings - .Where(u => u.Name is not null && u.Alias is null && u.StaticKeyword.IsKind(SyntaxKind.None)) - .Select(u => u.Name!.ToString()))); - - var interfaces = typeSymbol.AllInterfaces - .Where(i => i.OriginalDefinition.Equals(isSimple ? simpleTypeHandlerInterfaceSymbol : typeHandlerInterfaceSymbol, - SymbolEqualityComparer.Default)) - // Hacky: we want to emit switch arms for abstract types after concrete ones, since otherwise the compiled complains about - // unreachable arms - .OrderBy(i => i.TypeArguments[0].IsAbstract); - - var output = template.Render(new - { - Usings = usings, - TypeName = FormatTypeName(typeSymbol), - Namespace = typeSymbol.ContainingNamespace.ToDisplayString(), - IsSimple = isSimple, - Interfaces = interfaces.Select(i => new - { - Name = FormatTypeName(i), - HandledType = FormatTypeName(i.TypeArguments[0]), - }) - }); - - context.AddSource(typeSymbol.Name + ".Generated.cs", SourceText.From(output, Encoding.UTF8)); - } - - static string FormatTypeName(ITypeSymbol typeSymbol) - { - if (typeSymbol is INamedTypeSymbol namedTypeSymbol) - { - return namedTypeSymbol.IsGenericType - ? new StringBuilder(namedTypeSymbol.Name) - .Append('<') - .Append(string.Join(",", namedTypeSymbol.TypeArguments.Select(FormatTypeName))) - .Append('>') - .ToString() - : namedTypeSymbol.Name; - } - - if (typeSymbol.TypeKind == TypeKind.Array) - { - return $"{FormatTypeName(((IArrayTypeSymbol)typeSymbol).ElementType)}[]"; - // return "int"; - } - - return typeSymbol.ToString(); - } - } - - sealed class MySyntaxReceiver : ISyntaxReceiver - { - public List TypeHandlerCandidates { get; } = new(); - - public void OnVisitSyntaxNode(SyntaxNode syntaxNode) - { - if (syntaxNode is ClassDeclarationSyntax cds && - cds.BaseList is not null && - cds.Modifiers.Any(SyntaxKind.PartialKeyword)) - { - TypeHandlerCandidates.Add(cds); - } - } - } -} diff --git a/src/Npgsql/BackendMessages/AuthenticationMessages.cs b/src/Npgsql/BackendMessages/AuthenticationMessages.cs index 31a6c06e24..b6320e87b8 100644 --- a/src/Npgsql/BackendMessages/AuthenticationMessages.cs +++ b/src/Npgsql/BackendMessages/AuthenticationMessages.cs @@ -2,7 +2,6 @@ using System.Collections.Generic; using Microsoft.Extensions.Logging; using Npgsql.Internal; -using Npgsql.Util; namespace Npgsql.BackendMessages; @@ -136,7 +135,7 @@ sealed class AuthenticationSCRAMServerFirstMessage internal static AuthenticationSCRAMServerFirstMessage Load(byte[] bytes, ILogger connectionLogger) { - var data = PGUtil.UTF8Encoding.GetString(bytes); + var data = NpgsqlWriteBuffer.UTF8Encoding.GetString(bytes); string? nonce = null, salt = null; var iteration = -1; @@ -188,7 +187,7 @@ sealed class AuthenticationSCRAMServerFinalMessage internal static AuthenticationSCRAMServerFinalMessage Load(byte[] bytes, ILogger connectionLogger) { - var data = PGUtil.UTF8Encoding.GetString(bytes); + var data = NpgsqlWriteBuffer.UTF8Encoding.GetString(bytes); string? serverSignature = null; foreach (var part in data.Split(',')) diff --git a/src/Npgsql/BackendMessages/CopyMessages.cs b/src/Npgsql/BackendMessages/CopyMessages.cs index 67ee5da526..1aa8aec0c2 100644 --- a/src/Npgsql/BackendMessages/CopyMessages.cs +++ b/src/Npgsql/BackendMessages/CopyMessages.cs @@ -1,7 +1,6 @@ using System; using System.Collections.Generic; using Npgsql.Internal; -using Npgsql.Util; namespace Npgsql.BackendMessages; @@ -11,11 +10,11 @@ abstract class CopyResponseMessageBase : IBackendMessage internal bool IsBinary { get; private set; } internal short NumColumns { get; private set; } - internal List ColumnFormatCodes { get; } + internal List ColumnFormatCodes { get; } internal CopyResponseMessageBase() { - ColumnFormatCodes = new List(); + ColumnFormatCodes = new List(); } internal void Load(NpgsqlReadBuffer buf) @@ -32,7 +31,7 @@ internal void Load(NpgsqlReadBuffer buf) NumColumns = buf.ReadInt16(); for (var i = 0; i < NumColumns; i++) - ColumnFormatCodes.Add((FormatCode)buf.ReadInt16()); + ColumnFormatCodes.Add(DataFormatUtils.Create(buf.ReadInt16())); } } @@ -91,4 +90,4 @@ sealed class CopyDoneMessage : IBackendMessage public BackendMessageCode Code => BackendMessageCode.CopyDone; internal static readonly CopyDoneMessage Instance = new(); CopyDoneMessage() { } -} \ No newline at end of file +} diff --git a/src/Npgsql/BackendMessages/RowDescriptionMessage.cs b/src/Npgsql/BackendMessages/RowDescriptionMessage.cs index b2a9a6f111..a963d165c9 100644 --- a/src/Npgsql/BackendMessages/RowDescriptionMessage.cs +++ b/src/Npgsql/BackendMessages/RowDescriptionMessage.cs @@ -3,14 +3,12 @@ using System.Collections.Generic; using System.Diagnostics; using System.Globalization; +using System.Runtime.CompilerServices; +using System.Threading; using Npgsql.Internal; -using Npgsql.Internal.TypeHandlers; -using Npgsql.Internal.TypeHandling; -using Npgsql.Internal.TypeMapping; +using Npgsql.Internal.Postgres; using Npgsql.PostgresTypes; using Npgsql.Replication.PgOutput.Messages; -using Npgsql.TypeMapping; -using Npgsql.Util; namespace Npgsql.BackendMessages; @@ -22,12 +20,15 @@ namespace Npgsql.BackendMessages; /// sealed class RowDescriptionMessage : IBackendMessage, IReadOnlyList { + readonly bool _connectorOwned; FieldDescription?[] _fields; readonly Dictionary _nameIndex; Dictionary? _insensitiveIndex; + PgConverterInfo[]? _lastConverterInfoCache; - internal RowDescriptionMessage(int numFields = 10) + internal RowDescriptionMessage(bool connectorOwned, int numFields = 10) { + _connectorOwned = connectorOwned; _fields = new FieldDescription[numFields]; _nameIndex = new Dictionary(); } @@ -43,7 +44,7 @@ internal RowDescriptionMessage(int numFields = 10) _insensitiveIndex = new Dictionary(source._insensitiveIndex); } - internal RowDescriptionMessage Load(NpgsqlReadBuffer buf, TypeMapper typeMapper) + internal RowDescriptionMessage Load(NpgsqlReadBuffer buf, PgSerializerOptions options) { _nameIndex.Clear(); _insensitiveIndex?.Clear(); @@ -61,14 +62,14 @@ internal RowDescriptionMessage Load(NpgsqlReadBuffer buf, TypeMapper typeMapper) var field = _fields[i] ??= new(); field.Populate( - typeMapper, + options, name: buf.ReadNullTerminatedString(), tableOID: buf.ReadUInt32(), columnAttributeNumber: buf.ReadInt16(), oid: buf.ReadUInt32(), typeSize: buf.ReadInt16(), typeModifier: buf.ReadInt32(), - formatCode: (FormatCode)buf.ReadInt16() + dataFormat: DataFormatUtils.Create(buf.ReadInt16()) ); _nameIndex.TryAdd(field.Name, i); @@ -78,9 +79,9 @@ internal RowDescriptionMessage Load(NpgsqlReadBuffer buf, TypeMapper typeMapper) } internal static RowDescriptionMessage CreateForReplication( - TypeMapper typeMapper, uint tableOID, FormatCode formatCode, IReadOnlyList columns) + PgSerializerOptions options, uint tableOID, DataFormat dataFormat, IReadOnlyList columns) { - var msg = new RowDescriptionMessage(columns.Count); + var msg = new RowDescriptionMessage(false, columns.Count); var numFields = msg.Count = columns.Count; for (var i = 0; i < numFields; ++i) @@ -89,14 +90,14 @@ internal static RowDescriptionMessage CreateForReplication( var column = columns[i]; field.Populate( - typeMapper, - name: column.ColumnName, - tableOID: tableOID, + options, + name: column.ColumnName, + tableOID: tableOID, columnAttributeNumber: checked((short)i), - oid: column.DataTypeId, - typeSize: 0, // TODO: Confirm we don't have this in replication - typeModifier: column.TypeModifier, - formatCode: formatCode + oid: column.DataTypeId, + typeSize: 0, // TODO: Confirm we don't have this in replication + typeModifier: column.TypeModifier, + dataFormat: dataFormat ); if (!msg._nameIndex.ContainsKey(field.Name)) @@ -108,6 +109,7 @@ internal static RowDescriptionMessage CreateForReplication( public FieldDescription this[int index] { + [MethodImpl(MethodImplOptions.AggressiveInlining)] get { Debug.Assert(index < Count); @@ -117,6 +119,20 @@ public FieldDescription this[int index] } } + internal void SetConverterInfoCache(ReadOnlySpan values) + { + if (_connectorOwned || _lastConverterInfoCache is not null) + return; + Interlocked.CompareExchange(ref _lastConverterInfoCache, values.ToArray(), null); + } + + internal void LoadConverterInfoCache(PgConverterInfo[] values) + { + if (_lastConverterInfoCache is not { } cache) + return; + cache.CopyTo(values.AsSpan()); + } + public int Count { get; private set; } public IEnumerator GetEnumerator() => new Enumerator(this); @@ -164,7 +180,7 @@ sealed class InsensitiveComparer : IEqualityComparer public static readonly InsensitiveComparer Instance = new(); static readonly CompareInfo CompareInfo = CultureInfo.InvariantCulture.CompareInfo; - InsensitiveComparer() {} + InsensitiveComparer() { } // We should really have CompareOptions.IgnoreKanaType here, but see // https://github.com/dotnet/corefx/issues/12518#issuecomment-389658716 @@ -204,7 +220,7 @@ public bool MoveNext() } public void Reset() => _pos = -1; - public void Dispose() {} + public void Dispose() { } } } @@ -215,14 +231,14 @@ public void Dispose() {} public sealed class FieldDescription { #pragma warning disable CS8618 // Lazy-initialized type - internal FieldDescription() {} + internal FieldDescription() { } internal FieldDescription(uint oid) - : this("?", 0, 0, oid, 0, 0, FormatCode.Binary) {} + : this("?", 0, 0, oid, 0, 0, DataFormat.Binary) { } internal FieldDescription( string name, uint tableOID, short columnAttributeNumber, - uint oid, short typeSize, int typeModifier, FormatCode formatCode) + uint oid, short typeSize, int typeModifier, DataFormat dataFormat) { Name = name; TableOID = tableOID; @@ -230,38 +246,41 @@ internal FieldDescription( TypeOID = oid; TypeSize = typeSize; TypeModifier = typeModifier; - FormatCode = formatCode; + DataFormat = dataFormat; } #pragma warning restore CS8618 internal FieldDescription(FieldDescription source) { - _typeMapper = source._typeMapper; + _serializerOptions = source._serializerOptions; Name = source.Name; TableOID = source.TableOID; ColumnAttributeNumber = source.ColumnAttributeNumber; TypeOID = source.TypeOID; TypeSize = source.TypeSize; TypeModifier = source.TypeModifier; - FormatCode = source.FormatCode; - Handler = source.Handler; + DataFormat = source.DataFormat; + PostgresType = source.PostgresType; + Field = source.Field; + _objectOrDefaultInfo = source._objectOrDefaultInfo; } internal void Populate( - TypeMapper typeMapper, string name, uint tableOID, short columnAttributeNumber, - uint oid, short typeSize, int typeModifier, FormatCode formatCode + PgSerializerOptions serializerOptions, string name, uint tableOID, short columnAttributeNumber, + uint oid, short typeSize, int typeModifier, DataFormat dataFormat ) { - _typeMapper = typeMapper; + _serializerOptions = serializerOptions; Name = name; TableOID = tableOID; ColumnAttributeNumber = columnAttributeNumber; TypeOID = oid; TypeSize = typeSize; TypeModifier = typeModifier; - FormatCode = formatCode; - - ResolveHandler(); + DataFormat = dataFormat; + PostgresType = _serializerOptions.DatabaseInfo.FindPostgresType((Oid)TypeOID)?.GetRepresentationalType() ?? UnknownBackendType.Instance; + Field = new(Name, _serializerOptions.ToCanonicalTypeId(PostgresType), TypeModifier); + _objectOrDefaultInfo = default; } /// @@ -296,43 +315,94 @@ internal void Populate( /// /// The format code being used for the field. - /// Currently will be zero (text) or one (binary). + /// Currently will be text or binary. /// In a RowDescription returned from the statement variant of Describe, the format code is not yet known and will always be zero. /// - internal FormatCode FormatCode { get; set; } + internal DataFormat DataFormat { get; set; } - internal string TypeDisplayName => PostgresType.GetDisplayNameWithFacets(TypeModifier); + internal Field Field { get; private set; } - /// - /// The Npgsql type handler assigned to handle this field. - /// Returns for fields with format text. - /// - internal NpgsqlTypeHandler Handler { get; private set; } + internal string TypeDisplayName => PostgresType.GetDisplayNameWithFacets(TypeModifier); - internal PostgresType PostgresType - => _typeMapper.DatabaseInfo.ByOID.TryGetValue(TypeOID, out var postgresType) - ? postgresType - : UnknownBackendType.Instance; + internal PostgresType PostgresType { get; private set; } - internal Type FieldType => Handler.GetFieldType(this); + internal Type FieldType => ObjectOrDefaultInfo.TypeToConvert; - internal void ResolveHandler() - => Handler = IsBinaryFormat ? _typeMapper.ResolveByOID(TypeOID) : _typeMapper.UnrecognizedTypeHandler; + PgConverterInfo _objectOrDefaultInfo; + internal PgConverterInfo ObjectOrDefaultInfo + { + get + { + if (!_objectOrDefaultInfo.IsDefault) + return _objectOrDefaultInfo; - TypeMapper _typeMapper; + ref var info = ref _objectOrDefaultInfo; + GetInfo(null, ref _objectOrDefaultInfo); + return info; + } + } - internal bool IsBinaryFormat => FormatCode == FormatCode.Binary; - internal bool IsTextFormat => FormatCode == FormatCode.Text; + PgSerializerOptions _serializerOptions; internal FieldDescription Clone() { - var field = new FieldDescription(this); - field.ResolveHandler(); + var field = new FieldDescription(this); return field; } + internal void GetInfo(Type? type, ref PgConverterInfo lastConverterInfo) + { + Debug.Assert(lastConverterInfo.IsDefault || ( + ReferenceEquals(_serializerOptions, lastConverterInfo.TypeInfo.Options) && + lastConverterInfo.TypeInfo.PgTypeId == _serializerOptions.ToCanonicalTypeId(PostgresType)), "Cache is bleeding over"); + + if (!lastConverterInfo.IsDefault && lastConverterInfo.TypeToConvert == type) + return; + + // Have to check for null as it's a sentinel value used by ObjectOrDefaultTypeInfo init itself. + if (type is not null && ObjectOrDefaultInfo is var odfInfo) + { + if (typeof(object) == type) + { + lastConverterInfo = odfInfo with { AsObject = true }; + return; + } + if (odfInfo.TypeToConvert == type) + { + lastConverterInfo = odfInfo; + return; + } + } + + GetInfoSlow(out lastConverterInfo); + + [MethodImpl(MethodImplOptions.NoInlining)] + void GetInfoSlow(out PgConverterInfo lastConverterInfo) + { + PgConverterInfo converterInfo; + var typeInfo = AdoSerializerHelpers.GetTypeInfoForReading(type ?? typeof(object), PostgresType, _serializerOptions); + switch (DataFormat) + { + case DataFormat.Binary: + // If we don't support binary we'll just throw. + converterInfo = typeInfo.Bind(Field, DataFormat); + break; + default: + // For text we'll fall back to any available text converter for the expected clr type or throw. + if (!typeInfo.TryBind(Field, DataFormat, out converterInfo)) + { + typeInfo = AdoSerializerHelpers.GetTypeInfoForReading(type ?? typeof(string), _serializerOptions.UnknownPgType, _serializerOptions); + converterInfo = typeInfo.Bind(Field, DataFormat); + } + break; + } + + lastConverterInfo = converterInfo; + } + } + /// /// Returns a string that represents the current object. /// - public override string ToString() => Name + (Handler == null ? "" : $"({Handler.PgDisplayName})"); + public override string ToString() => Name + $"({PostgresType.DisplayName})"; } diff --git a/src/Npgsql/Internal/AdoSerializerHelpers.cs b/src/Npgsql/Internal/AdoSerializerHelpers.cs new file mode 100644 index 0000000000..f9e63e7a40 --- /dev/null +++ b/src/Npgsql/Internal/AdoSerializerHelpers.cs @@ -0,0 +1,58 @@ +using System; +using System.Diagnostics; +using System.Diagnostics.CodeAnalysis; +using Npgsql.Internal.Postgres; +using Npgsql.PostgresTypes; +using NpgsqlTypes; + +namespace Npgsql.Internal; + +static class AdoSerializerHelpers +{ + public static PgTypeInfo GetTypeInfoForReading(Type type, PostgresType postgresType, PgSerializerOptions options) + { + PgTypeInfo? typeInfo = null; + Exception? inner = null; + try + { + typeInfo = type == typeof(object) ? options.GetObjectOrDefaultTypeInfo(postgresType) : options.GetTypeInfo(type, postgresType); + } + catch (Exception ex) + { + inner = ex; + } + return typeInfo ?? ThrowReadingNotSupported(type, postgresType.DisplayName, inner); + + // InvalidCastException thrown to align with ADO.NET convention. + [DoesNotReturn] + static PgTypeInfo ThrowReadingNotSupported(Type? type, string displayName, Exception? inner = null) + => throw new InvalidCastException($"Reading{(type is null ? "" : $" as '{type.FullName}'")} is not supported for fields having DataTypeName '{displayName}'", inner); + } + + public static PgTypeInfo GetTypeInfoForWriting(Type? type, PgTypeId? pgTypeId, PgSerializerOptions options, NpgsqlDbType? npgsqlDbType = null) + { + Debug.Assert(type != typeof(object), "Parameters of type object are not supported."); + + PgTypeInfo? typeInfo = null; + Exception? inner = null; + try + { + typeInfo = type is null ? options.GetDefaultTypeInfo(pgTypeId!.Value) : options.GetTypeInfo(type, pgTypeId); + } + catch (Exception ex) + { + inner = ex; + } + return typeInfo ?? ThrowWritingNotSupported(type, + pgTypeString: + pgTypeId is null ? "no NpgsqlDbType or DataTypeName. Try setting one of these values to the expected database type." : + npgsqlDbType is null + ? $"DataTypeName '{options.DatabaseInfo.FindPostgresType(pgTypeId.GetValueOrDefault())?.DisplayName ?? "unknown"}'" + : $"NpgsqlDbType '{npgsqlDbType}'", inner); + + // InvalidCastException thrown to align with ADO.NET convention. + [DoesNotReturn] + static PgTypeInfo ThrowWritingNotSupported(Type? type, string pgTypeString, Exception? inner = null) + => throw new InvalidCastException($"Writing{(type is null ? "" : $" values of '{type.FullName}'")} is not supported for parameters having {pgTypeString}.", inner); + } +} diff --git a/src/Npgsql/Internal/BufferRequirements.cs b/src/Npgsql/Internal/BufferRequirements.cs new file mode 100644 index 0000000000..cd32c0cbd1 --- /dev/null +++ b/src/Npgsql/Internal/BufferRequirements.cs @@ -0,0 +1,43 @@ +using System; + +namespace Npgsql.Internal; + +public readonly struct BufferRequirements : IEquatable +{ + readonly Size _read; + readonly Size _write; + + BufferRequirements(Size read, Size write) + { + _read = read; + _write = write; + } + + public Size Read => _read; + public Size Write => _write; + + /// Streaming + public static BufferRequirements None => new(Size.Unknown, Size.Unknown); + /// Entire value should be buffered + public static BufferRequirements Value => new(Size.CreateUpperBound(int.MaxValue), Size.CreateUpperBound(int.MaxValue)); + /// Fixed size value should be buffered + public static BufferRequirements CreateFixedSize(int byteCount) => new(byteCount, byteCount); + /// Custom requirements + public static BufferRequirements Create(Size value) => new(value, value); + public static BufferRequirements Create(Size read, Size write) => new(read, write); + + public BufferRequirements Combine(Size read, Size write) + => new(_read.Combine(read), _write.Combine(write)); + + public BufferRequirements Combine(BufferRequirements other) + => Combine(other._read, other._write); + + public BufferRequirements Combine(int byteCount) + => Combine(CreateFixedSize(byteCount)); + + public bool Equals(BufferRequirements other) => _read.Equals(other._read) && _write.Equals(other._write); + public override bool Equals(object? obj) => obj is BufferRequirements other && Equals(other); + public override int GetHashCode() => HashCode.Combine(_read, _write); + public static bool operator ==(BufferRequirements left, BufferRequirements right) => left.Equals(right); + public static bool operator !=(BufferRequirements left, BufferRequirements right) => !left.Equals(right); +} diff --git a/src/Npgsql/Internal/Composites/Metadata/CompositeBuilder.cs b/src/Npgsql/Internal/Composites/Metadata/CompositeBuilder.cs new file mode 100644 index 0000000000..c51c0dafa0 --- /dev/null +++ b/src/Npgsql/Internal/Composites/Metadata/CompositeBuilder.cs @@ -0,0 +1,109 @@ +using System; +using System.Buffers; +using Npgsql.Util; + +namespace Npgsql.Internal.Composites; + +abstract class CompositeBuilder +{ + protected StrongBox[] _tempBoxes; + protected int _currentField; + + protected CompositeBuilder(StrongBox[] tempBoxes) => _tempBoxes = tempBoxes; + + protected abstract void Construct(); + protected abstract void SetField(TValue value); + + public void AddValue(TValue value) + { + var tempBoxes = _tempBoxes; + var currentField = _currentField; + if (currentField >= tempBoxes.Length) + { + if (currentField == tempBoxes.Length) + Construct(); + SetField(value); + } + else + { + ((StrongBox)tempBoxes[currentField]).TypedValue = value; + if (currentField + 1 == tempBoxes.Length) + Construct(); + } + + _currentField++; + } +} + +sealed class CompositeBuilder : CompositeBuilder, IDisposable +{ + readonly CompositeInfo _compositeInfo; + T _instance = default!; + object? _boxedInstance; + + public CompositeBuilder(CompositeInfo compositeInfo) + : base(compositeInfo.CreateTempBoxes()) + => _compositeInfo = compositeInfo; + + public T Complete() + { + if (_currentField < _compositeInfo.Fields.Count) + throw new InvalidOperationException($"Missing values, expected: {_compositeInfo.Fields.Count} got: {_currentField}"); + + return (T)(_boxedInstance ?? _instance!); + } + + public void Reset() + { + _instance = default!; + _boxedInstance = null; + _currentField = 0; + foreach (var box in _tempBoxes) + box.Clear(); + } + + public void Dispose() => Reset(); + + protected override void Construct() + { + var tempBoxes = _tempBoxes; + if (_currentField < tempBoxes.Length - 1) + throw new InvalidOperationException($"Missing values, expected: {tempBoxes.Length} got: {_currentField + 1}"); + + var fields = _compositeInfo.Fields; + var args = ArrayPool.Shared.Rent(_compositeInfo.ConstructorParameters); + for (var i = 0; i < tempBoxes.Length; i++) + { + var field = fields[i]; + if (field.ConstructorParameterIndex is { } argIndex) + args[argIndex] = tempBoxes[i]; + } + _instance = _compositeInfo.Constructor(args)!; + ArrayPool.Shared.Return(args); + + if (tempBoxes.Length == _compositeInfo.Fields.Count) + return; + + // We're expecting or already have stored more fields, so box the instance once here. + _boxedInstance = _instance; + for (var i = 0; i < tempBoxes.Length; i++) + { + var field = _compositeInfo.Fields[i]; + if (field.ConstructorParameterIndex is null) + field.Set(_boxedInstance, tempBoxes[i]); + } + } + + protected override void SetField(TValue value) + { + if (_boxedInstance is null) + ThrowHelper.ThrowInvalidOperationException("Not constructed yet, or no more fields were expected."); + + var currentField = _currentField; + var fields = _compositeInfo.Fields; + if (currentField > fields.Count - 1) + ThrowHelper.ThrowIndexOutOfRangeException($"Cannot set field {value} at position {currentField} - all fields have already been set"); + + ((CompositeFieldInfo)fields[currentField]).Set(_boxedInstance, value); + } +} diff --git a/src/Npgsql/Internal/Composites/Metadata/CompositeFieldInfo.cs b/src/Npgsql/Internal/Composites/Metadata/CompositeFieldInfo.cs new file mode 100644 index 0000000000..765399bf76 --- /dev/null +++ b/src/Npgsql/Internal/Composites/Metadata/CompositeFieldInfo.cs @@ -0,0 +1,192 @@ +using System; +using System.Runtime.CompilerServices; +using System.Threading; +using System.Threading.Tasks; +using Npgsql.Internal.Postgres; +using Npgsql.Util; + +namespace Npgsql.Internal.Composites; + +abstract class CompositeFieldInfo +{ + protected PgConverter Converter { get; } + protected BufferRequirements _binaryBufferRequirements; + + private protected CompositeFieldInfo(string name, PgConverterResolution resolution) + { + Name = name; + Converter = resolution.Converter; + PgTypeId = resolution.PgTypeId; + + if (!Converter.CanConvert(DataFormat.Binary, out _binaryBufferRequirements)) + throw new InvalidOperationException("Converter must support binary format to participate in composite types."); + } + + protected PgConverter GetConverter() => (PgConverter)Converter; + + protected ValueTask ReadAsObject(bool async, CompositeBuilder builder, PgReader reader, CancellationToken cancellationToken) + { + if (async) + { + var task = Converter.ReadAsObjectAsync(reader, cancellationToken); + if (!task.IsCompletedSuccessfully) + return Core(builder, task); + + AddValue(builder, task.Result); + } + else + AddValue(builder, Converter.ReadAsObject(reader)); + return new(); +#if NET6_0_OR_GREATER + [AsyncMethodBuilder(typeof(PoolingAsyncValueTaskMethodBuilder))] +#endif + async ValueTask Core(CompositeBuilder builder, ValueTask task) + { + builder.AddValue(await task.ConfigureAwait(false)); + } + } + + protected ValueTask WriteAsObject(bool async, PgWriter writer, object value, CancellationToken cancellationToken) + { + if (async) + return Converter.WriteAsObjectAsync(writer, value, cancellationToken); + + Converter.WriteAsObject(writer, value); + return new(); + } + + public string Name { get; } + public PgTypeId PgTypeId { get; } + public Size BinaryReadRequirement => _binaryBufferRequirements.Read; + public Size BinaryWriteRequirement => _binaryBufferRequirements.Write; + + public abstract Type Type { get; } + + protected abstract void AddValue(CompositeBuilder builder, object value); + + public abstract StrongBox CreateBox(); + public abstract void Set(object instance, StrongBox value); + public abstract int? ConstructorParameterIndex { get; } + public abstract bool IsDbNullable { get; } + + public abstract void ReadDbNull(CompositeBuilder builder); + public abstract ValueTask Read(bool async, CompositeBuilder builder, PgReader reader, CancellationToken cancellationToken = default); + public abstract bool IsDbNull(object instance); + public abstract Size? GetSizeOrDbNull(DataFormat format, object instance, ref object? writeState); + public abstract ValueTask Write(bool async, PgWriter writer, object instance, CancellationToken cancellationToken); +} + +sealed class CompositeFieldInfo : CompositeFieldInfo +{ + readonly Action? _setter; + readonly int _parameterIndex; + readonly Func _getter; + readonly bool _asObject; + + CompositeFieldInfo(string name, PgConverterResolution resolution, Func getter) + : base(name, resolution) + { + var typeToConvert = resolution.Converter.TypeToConvert; + if (!typeToConvert.IsAssignableFrom(typeof(T))) + throw new InvalidOperationException($"Converter type '{typeToConvert}' must be assignable from field type '{typeof(T)}'."); + + _getter = getter; + _asObject = typeToConvert != typeof(T); + } + + public CompositeFieldInfo(string name, PgConverterResolution resolution, Func getter, int parameterIndex) + : this(name, resolution, getter) + => _parameterIndex = parameterIndex; + + public CompositeFieldInfo(string name, PgConverterResolution resolution, Func getter, Action setter) + : this(name, resolution, getter) + => _setter = setter; + + public override Type Type => typeof(T); + + public override int? ConstructorParameterIndex => _setter is not null ? null : _parameterIndex; + + public T Get(object instance) => _getter(instance); + + public override StrongBox CreateBox() => new Util.StrongBox(); + + public void Set(object instance, T value) + { + if (_setter is null) + throw new InvalidOperationException("Not a composite field for a clr field."); + + _setter(instance, value); + } + + public override void Set(object instance, StrongBox value) + { + if (_setter is null) + throw new InvalidOperationException("Not a composite field for a clr field."); + + _setter(instance, ((Util.StrongBox)value).TypedValue!); + } + + public override void ReadDbNull(CompositeBuilder builder) + { + if (default(T) != null) + throw new InvalidCastException($"Type {typeof(T).FullName} does not have null as a possible value."); + + builder.AddValue((T?)default); + } + + protected override void AddValue(CompositeBuilder builder, object value) => builder.AddValue((T)value); + + public override ValueTask Read(bool async, CompositeBuilder builder, PgReader reader, CancellationToken cancellationToken = default) + { + if (_asObject) + return ReadAsObject(async, builder, reader, cancellationToken); + + if (async) + { + var task = GetConverter().ReadAsync(reader, cancellationToken); + if (!task.IsCompletedSuccessfully) + return Core(builder, task); + + builder.AddValue(task.Result); + } + else + builder.AddValue(GetConverter().Read(reader)); + return new(); +#if NET6_0_OR_GREATER + [AsyncMethodBuilder(typeof(PoolingAsyncValueTaskMethodBuilder))] +#endif + async ValueTask Core(CompositeBuilder builder, ValueTask task) + { + builder.AddValue(await task.ConfigureAwait(false)); + } + } + + public override bool IsDbNullable => Converter.IsDbNullable; + + public override bool IsDbNull(object instance) + { + var value = _getter(instance); + return _asObject ? Converter.IsDbNullAsObject(value) : GetConverter().IsDbNull(value); + } + + public override Size? GetSizeOrDbNull(DataFormat format, object instance, ref object? writeState) + { + var value = _getter(instance); + return _asObject + ? Converter.GetSizeOrDbNullAsObject(format, _binaryBufferRequirements.Write, value, ref writeState) + : GetConverter().GetSizeOrDbNull(format, _binaryBufferRequirements.Write, value, ref writeState); + } + + public override ValueTask Write(bool async, PgWriter writer, object instance, CancellationToken cancellationToken) + { + var value = _getter(instance); + if (_asObject) + return WriteAsObject(async, writer, value!, cancellationToken); + + if (async) + return GetConverter().WriteAsync(writer, value!, cancellationToken); + + GetConverter().Write(writer, value!); + return new(); + } +} diff --git a/src/Npgsql/Internal/Composites/Metadata/CompositeInfo.cs b/src/Npgsql/Internal/Composites/Metadata/CompositeInfo.cs new file mode 100644 index 0000000000..95a2c316a1 --- /dev/null +++ b/src/Npgsql/Internal/Composites/Metadata/CompositeInfo.cs @@ -0,0 +1,74 @@ +using System; +using System.Collections.Generic; +using System.Diagnostics; +using Npgsql.Util; + +namespace Npgsql.Internal.Composites; + +sealed class CompositeInfo +{ + readonly int _lastConstructorFieldIndex; + readonly CompositeFieldInfo[] _fields; + + public CompositeInfo(CompositeFieldInfo[] fields, int? constructorParameters, Func? constructor) + { + _lastConstructorFieldIndex = -1; + for (var i = fields.Length - 1; i >= 0; i--) + if (fields[i].ConstructorParameterIndex is not null) + { + _lastConstructorFieldIndex = i; + break; + } + + var parameterSum = 0; + for(var i = constructorParameters - 1 ?? 0; i > 0; i--) + parameterSum += i; + + var argumentsSum = 0; + if (parameterSum > 0) + { + foreach (var field in fields) + if (field.ConstructorParameterIndex is { } index) + argumentsSum += index; + } + + if (parameterSum != argumentsSum) + throw new InvalidOperationException($"Missing composite fields to map to the required {constructorParameters} constructor parameters."); + + _fields = fields; + if (constructor is null) + Constructor = _ => Activator.CreateInstance(); + else + { + var arguments = new CompositeFieldInfo[constructorParameters.GetValueOrDefault()]; + foreach (var field in fields) + { + if (field.ConstructorParameterIndex is { } index) + arguments[index] = field; + } + Constructor = constructor; + } + + ConstructorParameters = constructorParameters ?? 0; + } + + public IReadOnlyList Fields => _fields; + + public int ConstructorParameters { get; } + public Func Constructor { get; } + + /// + /// Create temporary storage for all values that come before the constructor parameters can be saturated. + /// + /// + public StrongBox[] CreateTempBoxes() + { + var valueCache = _lastConstructorFieldIndex + 1 is 0 ? Array.Empty() : new StrongBox[_lastConstructorFieldIndex + 1]; + var fields = _fields; + + for (var i = 0; i < valueCache.Length; i++) + valueCache[i] = fields[i].CreateBox(); + + return valueCache; + } +} diff --git a/src/Npgsql/Internal/Composites/ReflectionCompositeInfoFactory.cs b/src/Npgsql/Internal/Composites/ReflectionCompositeInfoFactory.cs new file mode 100644 index 0000000000..1fe217f5dc --- /dev/null +++ b/src/Npgsql/Internal/Composites/ReflectionCompositeInfoFactory.cs @@ -0,0 +1,296 @@ +using System; +using System.Collections.Generic; +using System.Diagnostics; +using System.Linq; +using System.Linq.Expressions; +using System.Reflection; +using Npgsql.PostgresTypes; +using Npgsql.Util; +using NpgsqlTypes; + +namespace Npgsql.Internal.Composites; + +static class ReflectionCompositeInfoFactory +{ + public static CompositeInfo CreateCompositeInfo(PostgresCompositeType pgType, INpgsqlNameTranslator nameTranslator, PgSerializerOptions options) + { + var pgFields = pgType.Fields; + var propertyMap = MapProperties(pgFields, nameTranslator); + var fieldMap = MapFields(pgFields, nameTranslator); + + var duplicates = propertyMap.Keys.Intersect(fieldMap.Keys).ToArray(); + if (duplicates.Length > 0) + throw new AmbiguousMatchException($"Property {propertyMap[duplicates[0]].Name} and field {fieldMap[duplicates[0]].Name} map to the same '{pgFields[duplicates[0]].Name}' composite field name."); + + var (constructorInfo, parameterFieldMap) = MapBestMatchingConstructor(pgFields, nameTranslator); + var constructorParameters = constructorInfo?.GetParameters() ?? Array.Empty(); + var compositeFields = new CompositeFieldInfo?[pgFields.Count]; + for (var i = 0; i < parameterFieldMap.Length; i++) + { + var fieldIndex = parameterFieldMap[i]; + var pgField = pgFields[fieldIndex]; + var parameter = constructorParameters[i]; + PgTypeInfo pgTypeInfo; + Delegate getter; + if (propertyMap.TryGetValue(fieldIndex, out var property) && property.GetMethod is not null) + { + if (property.PropertyType != parameter.ParameterType) + throw new InvalidOperationException($"Could not find a matching getter for constructor parameter {parameter.Name} and type {parameter.ParameterType} mapped to composite field {pgFields[fieldIndex].Name}."); + + pgTypeInfo = options.GetTypeInfo(property.PropertyType, pgField.Type.GetRepresentationalType()) ?? throw NotSupportedField(pgType, pgField, isField: false, property.Name, property.PropertyType); + getter = CreateGetter(property); + } + else if (fieldMap.TryGetValue(fieldIndex, out var field)) + { + if (field.FieldType != parameter.ParameterType) + throw new InvalidOperationException($"Could not find a matching getter for constructor parameter {parameter.Name} and type {parameter.ParameterType} mapped to composite field {pgFields[fieldIndex].Name}."); + + pgTypeInfo = options.GetTypeInfo(field.FieldType, pgField.Type.GetRepresentationalType()) ?? throw NotSupportedField(pgType, pgField, isField: true, field.Name, field.FieldType); + getter = CreateGetter(field); + } + else + throw new InvalidOperationException($"Cannot find property or field for composite field {pgFields[fieldIndex].Name}."); + + compositeFields[fieldIndex] = CreateCompositeFieldInfo(pgField.Name, pgTypeInfo.Type, MapResolution(pgField, pgTypeInfo.GetConcreteResolution()), getter, i); + } + + for (var fieldIndex = 0; fieldIndex < pgFields.Count; fieldIndex++) + { + // Handled by constructor. + if (compositeFields[fieldIndex] is not null) + continue; + + var pgField = pgFields[fieldIndex]; + PgTypeInfo pgTypeInfo; + Delegate getter; + Delegate setter; + if (propertyMap.TryGetValue(fieldIndex, out var property)) + { + pgTypeInfo = options.GetTypeInfo(property.PropertyType, pgField.Type.GetRepresentationalType()) + ?? throw NotSupportedField(pgType, pgField, isField: false, property.Name, property.PropertyType); + getter = CreateGetter(property); + setter = CreateSetter(property); + } + else if (fieldMap.TryGetValue(fieldIndex, out var field)) + { + pgTypeInfo = options.GetTypeInfo(field.FieldType, pgField.Type.GetRepresentationalType()) + ?? throw NotSupportedField(pgType, pgField, isField: true, field.Name, field.FieldType); + getter = CreateGetter(field); + setter = CreateSetter(field); + } + else + throw new InvalidOperationException($"Cannot find property or field for composite field '{pgFields[fieldIndex].Name}'."); + + compositeFields[fieldIndex] = CreateCompositeFieldInfo(pgField.Name, pgTypeInfo.Type, MapResolution(pgField, pgTypeInfo.GetConcreteResolution()), getter, setter); + } + + Debug.Assert(compositeFields.All(x => x is not null)); + + var constructor = constructorInfo is null ? null : CreateStrongBoxConstructor(constructorInfo); + return new CompositeInfo(compositeFields!, constructorInfo is null ? null : constructorParameters.Length, constructor); + + // We have to map the pg type back to the composite field type, as we've resolved based on the representational pg type. + PgConverterResolution MapResolution(PostgresCompositeType.Field field, PgConverterResolution resolution) + => new(resolution.Converter, options.ToCanonicalTypeId(field.Type)); + + static NotSupportedException NotSupportedField(PostgresCompositeType composite, PostgresCompositeType.Field field, bool isField, string name, Type type) + => new($"No resolution could be found for ('{type.FullName}', '{field.Type.FullName}'). Mapping: CLR {(isField ? "field" : "property")} '{type.Name}.{name}' <-> Composite field '{composite.Name}.{field.Name}'"); + } + + static Delegate CreateGetter(FieldInfo info) + { + var instance = Expression.Parameter(typeof(object), "instance"); + return Expression + .Lambda(typeof(Func<,>).MakeGenericType(typeof(object), info.FieldType), + Expression.Field(UnboxAny(instance, typeof(T)), info), + instance) + .Compile(); + } + + static Delegate CreateSetter(FieldInfo info) + { + var instance = Expression.Parameter(typeof(object), "instance"); + var value = Expression.Parameter(info.FieldType, "value"); + + return Expression + .Lambda(typeof(Action<,>).MakeGenericType(typeof(object), info.FieldType), + Expression.Assign(Expression.Field(UnboxAny(instance, typeof(T)), info), value), instance, value) + .Compile(); + } + + static Delegate CreateGetter(PropertyInfo info) + { + var invalidOpExceptionMessageConstructor = typeof(InvalidOperationException).GetConstructor(new []{ typeof(string) })!; + var instance = Expression.Parameter(typeof(object), "instance"); + var body = info.GetMethod is null || !info.GetMethod.IsPublic + ? (Expression)Expression.Throw(Expression.New(invalidOpExceptionMessageConstructor, + Expression.Constant($"No (public) getter for '{info}' on type {typeof(T)}")), info.PropertyType) + : Expression.Property(UnboxAny(instance, typeof(T)), info); + + return Expression + .Lambda(typeof(Func<,>).MakeGenericType(typeof(object), info.PropertyType), body, instance) + .Compile(); + } + + static Delegate CreateSetter(PropertyInfo info) + { + var instance = Expression.Parameter(typeof(object), "instance"); + var value = Expression.Parameter(info.PropertyType, "value"); + + var invalidOpExceptionMessageConstructor = typeof(InvalidOperationException).GetConstructor(new []{ typeof(string) })!; + var body = info.SetMethod is null || !info.SetMethod.IsPublic + ? (Expression)Expression.Throw(Expression.New(invalidOpExceptionMessageConstructor, + Expression.Constant($"No (public) getter for '{info}' on type {typeof(T)}")), info.PropertyType) + : Expression.Call(UnboxAny(instance, typeof(T)), info.SetMethod, value); + + return Expression + .Lambda(typeof(Action<,>).MakeGenericType(typeof(object), info.PropertyType), body, instance, value) + .Compile(); + } + + static Expression UnboxAny(Expression expression, Type type) + => type.IsValueType ? Expression.Unbox(expression, type) : Expression.Convert(expression, type, null); + + static Func CreateStrongBoxConstructor(ConstructorInfo constructorInfo) + { + var values = Expression.Parameter(typeof(StrongBox[]), "values"); + + var parameters = constructorInfo.GetParameters(); + var parameterCount = Expression.Constant(parameters.Length); + var argumentExceptionNameMessageConstructor = typeof(ArgumentException).GetConstructor(new []{ typeof(string), typeof(string) })!; + return Expression + .Lambda>( + Expression.Block( + Expression.IfThen( + Expression.LessThan(Expression.Property(values, "Length"), parameterCount), + + Expression.Throw(Expression.New(argumentExceptionNameMessageConstructor, + Expression.Constant("Passed fewer arguments than there are constructor parameters."), Expression.Constant(values.Name))) + ), + Expression.New(constructorInfo, parameters.Select((parameter, i) => + Expression.Property( + UnboxAny( + Expression.ArrayIndex(values, Expression.Constant(i)), + typeof(StrongBox<>).MakeGenericType(parameter.ParameterType) + ), + "TypedValue" + ) + )) + ), values) + .Compile(); + } + static CompositeFieldInfo CreateCompositeFieldInfo(string name, Type type, PgConverterResolution converterResolution, Delegate getter, int constructorParameterIndex) + => (CompositeFieldInfo)Activator.CreateInstance( + typeof(CompositeFieldInfo<>).MakeGenericType(type), name, converterResolution, getter, constructorParameterIndex)!; + + static CompositeFieldInfo CreateCompositeFieldInfo(string name, Type type, PgConverterResolution converterResolution, Delegate getter, Delegate setter) + => (CompositeFieldInfo)Activator.CreateInstance( + typeof(CompositeFieldInfo<>).MakeGenericType(type), name, converterResolution, getter, setter)!; + + static Dictionary MapProperties(IReadOnlyList fields, INpgsqlNameTranslator nameTranslator) + { + var properties = typeof(T).GetProperties(BindingFlags.Public | BindingFlags.Instance); + var propertiesAndNames = properties.Select(x => + { + var attr = x.GetCustomAttribute(); + var name = attr?.PgName ?? nameTranslator.TranslateMemberName(x.Name); + return new KeyValuePair(name, x); + }).ToArray(); + + var duplicates = propertiesAndNames.Except(propertiesAndNames.Distinct()).ToArray(); + if (duplicates.Length > 0) + throw new AmbiguousMatchException($"Multiple properties are mapped to the '{duplicates[0].Key}' field."); + + var propertiesMap = propertiesAndNames.ToDictionary(x => x.Key, x => x.Value); + var result = new Dictionary(); + for (var i = 0; i < fields.Count; i++) + { + var field = fields[i]; + if (!propertiesMap.TryGetValue(field.Name, out var value)) + continue; + + result[i] = value; + } + + return result; + } + + static Dictionary MapFields(IReadOnlyList fields, INpgsqlNameTranslator nameTranslator) + { + var clrFields = typeof(T).GetFields(BindingFlags.Public | BindingFlags.Instance); + var clrFieldsAndNames = clrFields.Select(x => + { + var attr = x.GetCustomAttribute(); + var name = attr?.PgName ?? nameTranslator.TranslateMemberName(x.Name); + return new KeyValuePair(name, x); + }).ToArray(); + + var duplicates = clrFieldsAndNames.Except(clrFieldsAndNames.Distinct()).ToArray(); + if (duplicates.Length > 0) + throw new AmbiguousMatchException($"Multiple properties are mapped to the '{duplicates[0].Key}' field."); + + var clrFieldsMap = clrFieldsAndNames.ToDictionary(x => x.Key, x => x.Value); + var result = new Dictionary(); + for (var i = 0; i < fields.Count; i++) + { + var field = fields[i]; + if (!clrFieldsMap.TryGetValue(field.Name, out var value)) + continue; + + result[i] = value; + } + + return result; + } + + static (ConstructorInfo? ConstructorInfo, int[] ParameterFieldMap) MapBestMatchingConstructor(IReadOnlyList fields, INpgsqlNameTranslator nameTranslator) + { + ConstructorInfo? clrDefaultConstructor = null; + foreach (var constructor in typeof(T).GetConstructors().OrderByDescending(x => x.GetParameters().Length)) + { + var parameters = constructor.GetParameters(); + if (parameters.Length != fields.Count) + { + if (parameters.Length == 0) + clrDefaultConstructor = constructor; + + continue; + } + + var parametersMapped = 0; + var parametersMap = new int[parameters.Length]; + + for (var i = 0; i < parameters.Length; i++) + { + var clrParameter = parameters[i]; + var attr = clrParameter.GetCustomAttribute(); + var name = attr?.PgName ?? (clrParameter.Name is { } clrName ? nameTranslator.TranslateMemberName(clrName) : null); + if (name is null) + break; + + for (var pgFieldIndex = 0; pgFieldIndex < fields.Count; pgFieldIndex++) + { + var pgField = fields[pgFieldIndex]; + if (pgField.Name != name) + continue; + + parametersMapped++; + parametersMap[i] = pgFieldIndex; + break; + } + } + + var duplicates = parametersMap.Except(parametersMap.Distinct()).ToArray(); + if (duplicates.Length > 0) + throw new AmbiguousMatchException($"Multiple constructor parameters are mapped to the '{fields[duplicates[0]].Name}' field."); + + if (parametersMapped == parameters.Length) + return (constructor, parametersMap); + } + + if (clrDefaultConstructor is null && !typeof(T).IsValueType) + throw new InvalidOperationException($"No parameterless constructor defined for type '{typeof(T)}'."); + + return (clrDefaultConstructor, Array.Empty()); + } +} diff --git a/src/Npgsql/Internal/Converters/ArrayConverter.cs b/src/Npgsql/Internal/Converters/ArrayConverter.cs new file mode 100644 index 0000000000..2801714cc5 --- /dev/null +++ b/src/Npgsql/Internal/Converters/ArrayConverter.cs @@ -0,0 +1,675 @@ +using System; +using System.Buffers; +using System.Collections; +using System.Collections.Generic; +using System.Collections.Concurrent; +using System.Diagnostics; +using System.Runtime.CompilerServices; +using System.Threading; +using System.Threading.Tasks; +using Npgsql.Internal.Postgres; + +namespace Npgsql.Internal.Converters; + +interface IElementOperations +{ + object CreateCollection(int[] lengths); + int GetCollectionCount(object collection, out int[]? lengths); + Size? GetSizeOrDbNull(SizeContext context, object collection, int[] indices, ref object? writeState); + ValueTask Read(bool async, PgReader reader, bool isDbNull, object collection, int[] indices, CancellationToken cancellationToken = default); + ValueTask Write(bool async, PgWriter writer, object collection, int[] indices, CancellationToken cancellationToken = default); +} + +readonly struct PgArrayConverter +{ + internal const string ReadNonNullableCollectionWithNullsExceptionMessage = "Cannot read a non-nullable collection of elements because the returned array contains nulls. Call GetFieldValue with a nullable collection type instead."; + + readonly IElementOperations _elemOps; + readonly int? _expectedDimensions; + readonly BufferRequirements _bufferRequirements; + public bool ElemTypeDbNullable { get; } + readonly int _pgLowerBound; + readonly PgTypeId _elemTypeId; + + public PgArrayConverter(IElementOperations elemOps, bool elemTypeDbNullable, int? expectedDimensions, BufferRequirements bufferRequirements, PgTypeId elemTypeId, int pgLowerBound = 1) + { + _elemTypeId = elemTypeId; + ElemTypeDbNullable = elemTypeDbNullable; + _pgLowerBound = pgLowerBound; + _elemOps = elemOps; + _expectedDimensions = expectedDimensions; + _bufferRequirements = bufferRequirements; + } + + bool IsDbNull(object values, int[] indices) + { + object? state = null; + return _elemOps.GetSizeOrDbNull(new(DataFormat.Binary, _bufferRequirements.Write), values, indices, ref state) is null; + } + + Size GetElemsSize(object values, (Size, object?)[] elemStates, out bool anyElementState, DataFormat format, int count, int[] indices, int[]? lengths = null) + { + Debug.Assert(elemStates.Length >= count); + var totalSize = Size.Zero; + var context = new SizeContext(format, _bufferRequirements.Write); + anyElementState = false; + var lastLength = lengths?[lengths.Length - 1] ?? count; + ref var lastIndex = ref indices[indices.Length - 1]; + var i = 0; + do + { + ref var elemItem = ref elemStates[i++]; + var elemState = (object?)null; + var size = _elemOps.GetSizeOrDbNull(context, values, indices, ref elemState); + anyElementState = anyElementState || elemState is not null; + elemItem = (size ?? -1, elemState); + totalSize = totalSize.Combine(size ?? 0); + } + // We can immediately continue if we didn't reach the end of the last dimension. + while (++lastIndex < lastLength || (indices.Length > 1 && CarryIndices(lengths!, indices))); + + return totalSize; + } + + Size GetFixedElemsSize(Size elemSize, object values, int count, int[] indices, int[]? lengths = null) + { + var nulls = 0; + var lastLength = lengths?[lengths.Length - 1] ?? count; + ref var lastIndex = ref indices[indices.Length - 1]; + if (ElemTypeDbNullable) + do + { + if (IsDbNull(values, indices)) + nulls++; + } + // We can immediately continue if we didn't reach the end of the last dimension. + while (++lastIndex < lastLength || (indices.Length > 1 && CarryIndices(lengths!, indices))); + + return (count - nulls) * elemSize.Value; + } + + int GetFormatSize(int count, int dimensions) + => sizeof(int) + // Dimensions + sizeof(int) + // Flags + sizeof(int) + // Element OID + dimensions * (sizeof(int) + sizeof(int)) + // Dimensions * (array length and lower bound) + sizeof(int) * count; // Element length integers + + public Size GetSize(SizeContext context, object values, ref object? writeState) + { + var count = _elemOps.GetCollectionCount(values, out var lengths); + var dimensions = lengths?.Length ?? 1; + if (dimensions > 8) + throw new ArgumentException(nameof(values), "Postgres arrays can have at most 8 dimensions."); + + var formatSize = Size.Create(GetFormatSize(count, dimensions)); + if (count is 0) + return formatSize; + + Size elemsSize; + var indices = new int[dimensions]; + if (_bufferRequirements.Write is { Kind: SizeKind.Exact } req) + { + elemsSize = GetFixedElemsSize(req, values, count, indices, lengths); + writeState = new WriteState { Count = count, Indices = indices, Lengths = lengths, ArrayPool = null, Data = default, AnyWriteState = false }; + } + else + { + var arrayPool = ArrayPool<(Size, object?)>.Shared; + var data = ArrayPool<(Size, object?)>.Shared.Rent(count); + elemsSize = GetElemsSize(values, data, out var elemStateDisposable, context.Format, count, indices, lengths); + writeState = new WriteState + { Count = count, Indices = indices, Lengths = lengths, + ArrayPool = arrayPool, Data = new(data, 0, count), AnyWriteState = elemStateDisposable }; + } + + return formatSize.Combine(elemsSize); + } + + sealed class WriteState : MultiWriteState + { + public required int Count { get; init; } + public required int[] Indices { get; init; } + public required int[]? Lengths { get; init; } + } + + public async ValueTask Read(bool async, PgReader reader, CancellationToken cancellationToken = default) + { + var dimensions = reader.ReadInt32(); + var containsNulls = reader.ReadInt32() is 1; + _ = reader.ReadUInt32(); // Element OID. + + if (dimensions is not 0 && _expectedDimensions is not null && dimensions != _expectedDimensions) + ThrowHelper.ThrowInvalidCastException( + $"Cannot read an array value with {dimensions} dimension{(dimensions == 1 ? "" : "s")} into a " + + $"collection type with {_expectedDimensions} dimension{(_expectedDimensions == 1 ? "" : "s")}. " + + $"Call GetValue or a version of GetFieldValue with the commas being the expected amount of dimensions."); + + if (containsNulls && !ElemTypeDbNullable) + ThrowHelper.ThrowInvalidCastException(ReadNonNullableCollectionWithNullsExceptionMessage); + + // Make sure we can read length + lower bound N dimension times. + if (reader.ShouldBuffer((sizeof(int) + sizeof(int)) * dimensions)) + await reader.Buffer(async, (sizeof(int) + sizeof(int)) * dimensions, cancellationToken).ConfigureAwait(false); + + var dimLengths = new int[_expectedDimensions ?? dimensions]; + var lastDimLength = 0; + for (var i = 0; i < dimensions; i++) + { + lastDimLength = reader.ReadInt32(); + reader.ReadInt32(); // Lower bound + if (dimLengths.Length is 0) + break; + dimLengths[i] = lastDimLength; + } + + var collection = _elemOps.CreateCollection(dimLengths); + Debug.Assert(dimensions <= 1 || collection is Array a && a.Rank == dimensions); + + if (dimensions is 0 || lastDimLength is 0) + return collection; + + int[] indices; + // Reuse array for dim <= 1 + if (dimensions == 1) + { + dimLengths[0] = 0; + indices = dimLengths; + } + else + indices = new int[dimensions]; + do + { + if (reader.ShouldBuffer(sizeof(int))) + await reader.Buffer(async, sizeof(int), cancellationToken).ConfigureAwait(false); + + var length = reader.ReadInt32(); + var isDbNull = length == -1; + if (!isDbNull) + { + var scope = await reader.BeginNestedRead(async, length, _bufferRequirements.Read, cancellationToken).ConfigureAwait(false); + try + { + await _elemOps.Read(async, reader, isDbNull, collection, indices, cancellationToken).ConfigureAwait(false); + } + finally + { + if (async) + await scope.DisposeAsync().ConfigureAwait(false); + else + scope.Dispose(); + } + } + else + await _elemOps.Read(async, reader, isDbNull, collection, indices, cancellationToken).ConfigureAwait(false); + } + // We can immediately continue if we didn't reach the end of the last dimension. + while (++indices[indices.Length - 1] < lastDimLength || (dimensions > 1 && CarryIndices(dimLengths, indices))); + + return collection; + } + + static bool CarryIndices(int[] lengths, int[] indices) + { + Debug.Assert(lengths.Length > 1); + + // Find the first dimension from the end that isn't at or past its length, increment it and bring all previous dimensions to zero. + for (var dim = indices.Length - 1; dim >= 0; dim--) + { + if (indices[dim] >= lengths[dim] - 1) + continue; + + indices.AsSpan().Slice(dim + 1).Clear(); + indices[dim]++; + return true; + } + + // We're done if we can't find any dimension that isn't at its length. + return false; + } + + public async ValueTask Write(bool async, PgWriter writer, object values, CancellationToken cancellationToken) + { + var (count, dims, state) = writer.Current.WriteState switch + { + WriteState writeState => (writeState.Count, writeState.Lengths?.Length ?? 1 , writeState), + null => (0, values is Array a ? a.Rank : 1, null), + _ => throw new InvalidCastException($"Invalid write state, expected {typeof(WriteState).FullName}.") + }; + + if (writer.ShouldFlush(GetFormatSize(count, dims))) + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + + writer.WriteInt32(dims); // Dimensions + writer.WriteInt32(0); // Flags (not really used) + writer.WriteAsOid(_elemTypeId); + for (var dim = 0; dim < dims; dim++) + { + writer.WriteInt32(state?.Lengths?[dim] ?? count); + writer.WriteInt32(_pgLowerBound); // Lower bound + } + + // We can stop here for empty collections. + if (state is null) + return; + + var elemTypeDbNullable = ElemTypeDbNullable; + var elemData = state.Data.Array; + + var indices = state.Indices; + Array.Clear(indices, 0 , indices.Length); + var lastLength = state.Lengths?[state.Lengths.Length - 1] ?? state.Count; + var i = state.Data.Offset; + do + { + if (writer.ShouldFlush(sizeof(int))) + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + + var elem = elemData?[i++]; + var size = elem?.Size ?? (elemTypeDbNullable && IsDbNull(values, indices) ? -1 : _bufferRequirements.Write); + if (size.Kind is SizeKind.Unknown) + throw new NotImplementedException(); + + var length = size.Value; + writer.WriteInt32(length); + if (length != -1) + { + using var _ = await writer.BeginNestedWrite(async, _bufferRequirements.Write, length, elem?.WriteState, cancellationToken).ConfigureAwait(false); + await _elemOps.Write(async, writer, values, indices, cancellationToken).ConfigureAwait(false); + } + } + // We can immediately continue if we didn't reach the end of the last dimension. + while (++indices[indices.Length - 1] < lastLength || (indices.Length > 1 && CarryIndices(state.Lengths!, indices))); + } +} + +// Class constraint exists to make Unsafe.As, ValueTask> safe, don't remove unless that unsafe cast is also removed. +abstract class ArrayConverter : PgStreamingConverter where T : class +{ + protected PgConverterResolution ElemResolution { get; } + protected Type ElemTypeToConvert { get; } + + readonly PgArrayConverter _pgArrayConverter; + + private protected ArrayConverter(int? expectedDimensions, PgConverterResolution elemResolution, int pgLowerBound = 1) + { + if (!elemResolution.Converter.CanConvert(DataFormat.Binary, out var bufferRequirements)) + throw new NotSupportedException("Element converter has to support the binary format to be compatible."); + + ElemResolution = elemResolution; + ElemTypeToConvert = elemResolution.Converter.TypeToConvert; + _pgArrayConverter = new((IElementOperations)this, elemResolution.Converter.IsDbNullable, expectedDimensions, + bufferRequirements, elemResolution.PgTypeId, pgLowerBound); + } + + public override T Read(PgReader reader) => (T)_pgArrayConverter.Read(async: false, reader).Result; + + public override ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) +#pragma warning disable CS9193 + => Unsafe.As, ValueTask>(ref Unsafe.AsRef(_pgArrayConverter.Read(async: true, reader, cancellationToken))); +#pragma warning restore + + public override Size GetSize(SizeContext context, T values, ref object? writeState) + => _pgArrayConverter.GetSize(context, values, ref writeState); + + public override void Write(PgWriter writer, T values) + => _pgArrayConverter.Write(async: false, writer, values, CancellationToken.None).GetAwaiter().GetResult(); + + public override ValueTask WriteAsync(PgWriter writer, T values, CancellationToken cancellationToken = default) + => _pgArrayConverter.Write(async: true, writer, values, cancellationToken); + + // Using a function pointer here is safe against assembly unloading as the instance reference that the static pointer method lives on is passed along. + // As such the instance cannot be collected by the gc which means the entire assembly is prevented from unloading until we're done. + // The alternatives are: + // 1. Add a virtual method and make AwaitTask call into it (bloating the vtable of all derived types). + // 2. Using a delegate, meaning we add a static field + an alloc per T + metadata, slightly slower dispatch perf so overall strictly worse as well. +#if NET6_0_OR_GREATER + [AsyncMethodBuilder(typeof(PoolingAsyncValueTaskMethodBuilder))] +#endif + private protected static async ValueTask AwaitTask(Task task, Continuation continuation, object collection, int[] indices) + { + await task.ConfigureAwait(false); + continuation.Invoke(task, collection, indices); + // Guarantee the type stays loaded until the function pointer call is done. + GC.KeepAlive(continuation.Handle); + } + + // Split out into a struct as unsafe and async don't mix, while we do want a nicely typed function pointer signature to prevent mistakes. + protected readonly unsafe struct Continuation + { + public object Handle { get; } + readonly delegate* _continuation; + + /// A reference to the type that houses the static method points to. + /// The continuation + public Continuation(object handle, delegate* continuation) + { + Handle = handle; + _continuation = continuation; + } + + public void Invoke(Task task, object collection, int[] indices) => _continuation(task, collection, indices); + } + + protected static int[]? GetLengths(Array array) + { + if (array.Rank == 1) + return null; + + var lengths = new int[array.Rank]; + for (var i = 0; i < lengths.Length; i++) + lengths[i] = array.GetLength(i); + + return lengths; + } +} + +sealed class ArrayBasedArrayConverter : ArrayConverter, IElementOperations where T : class, IList +{ + readonly PgConverter _elemConverter; + + public ArrayBasedArrayConverter(PgConverterResolution elemResolution, Type? effectiveType = null, int pgLowerBound = 1) + : base( + expectedDimensions: effectiveType is null ? 1 : effectiveType.IsArray ? effectiveType.GetArrayRank() : null, + elemResolution, pgLowerBound) + => _elemConverter = elemResolution.GetConverter(); + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + static TElement? GetValue(object collection, int[] indices) + { + switch (indices.Length) + { + case 1: + Debug.Assert(collection is TElement?[]); + return Unsafe.As(collection)[indices[0]]; + default: + Debug.Assert(collection is Array); + return (TElement?)Unsafe.As(collection).GetValue(indices); + } + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + static void SetValue(object collection, int[] indices, TElement? value) + { + switch (indices.Length) + { + case 1: + Debug.Assert(collection is TElement?[]); + Unsafe.As(collection)[indices[0]] = value; + break; + default: + Debug.Assert(collection is Array); + Unsafe.As(collection).SetValue(value, indices); + break; + } + } + + object IElementOperations.CreateCollection(int[] lengths) + => lengths.Length switch + { + 0 => Array.Empty(), + 1 when lengths[0] == 0 => Array.Empty(), + 1 => new TElement?[lengths[0]], + 2 => new TElement?[lengths[0],lengths[1]], + 3 => new TElement?[lengths[0],lengths[1], lengths[2]], + 4 => new TElement?[lengths[0],lengths[1], lengths[2], lengths[3]], + 5 => new TElement?[lengths[0],lengths[1], lengths[2], lengths[3], lengths[4]], + 6 => new TElement?[lengths[0],lengths[1], lengths[2], lengths[3], lengths[4], lengths[5]], + 7 => new TElement?[lengths[0],lengths[1], lengths[2], lengths[3], lengths[4], lengths[5], lengths[6]], + 8 => new TElement?[lengths[0],lengths[1], lengths[2], lengths[3], lengths[4], lengths[5], lengths[6], lengths[7]], + _ => throw new InvalidOperationException("Postgres arrays can have at most 8 dimensions.") + }; + + int IElementOperations.GetCollectionCount(object collection, out int[]? lengths) + { + Debug.Assert(collection is Array); + var array = Unsafe.As(collection); + lengths = GetLengths(array); + return array.Length; + } + + Size? IElementOperations.GetSizeOrDbNull(SizeContext context, object collection, int[] indices, ref object? writeState) + => _elemConverter.GetSizeOrDbNull(context.Format, context.BufferRequirement, GetValue(collection, indices), ref writeState); + + unsafe ValueTask IElementOperations.Read(bool async, PgReader reader, bool isDbNull, object collection, int[] indices, CancellationToken cancellationToken) + { + TElement? result; + if (isDbNull) + result = default; + else if (!async) + result = _elemConverter.Read(reader); + else + { + var task = _elemConverter.ReadAsync(reader, cancellationToken); + if (!task.IsCompletedSuccessfully) + return AwaitTask(task.AsTask(), new(this, &SetResult), collection, indices); + + result = task.Result; + } + + SetValue(collection, indices, result); + return new(); + + // Using .Result on ValueTask is equivalent to GetAwaiter().GetResult(), this removes TaskAwaiter rooting. + static void SetResult(Task task, object collection, int[] indices) + { + Debug.Assert(task is Task); + SetValue(collection, indices, new ValueTask(Unsafe.As>(task)).Result); + } + } + + ValueTask IElementOperations.Write(bool async, PgWriter writer, object collection, int[] indices, CancellationToken cancellationToken) + { + if (async) + return _elemConverter.WriteAsync(writer, GetValue(collection, indices)!, cancellationToken); + + _elemConverter.Write(writer, GetValue(collection, indices)!); + return new(); + } +} + +sealed class ListBasedArrayConverter : ArrayConverter, IElementOperations where T : class, IList +{ + readonly PgConverter _elemConverter; + + public ListBasedArrayConverter(PgConverterResolution elemResolution, int pgLowerBound = 1) + : base(expectedDimensions: 1, elemResolution, pgLowerBound) + => _elemConverter = elemResolution.GetConverter(); + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + static TElement? GetValue(object collection, int index) + { + Debug.Assert(collection is List); + return Unsafe.As>(collection)[index]; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + static void SetValue(object collection, int index, TElement? value) + { + Debug.Assert(collection is List); + var list = Unsafe.As>(collection); + list.Insert(index, value); + } + + object IElementOperations.CreateCollection(int[] lengths) + => new List(lengths.Length is 0 ? 0 : lengths[0]); + + int IElementOperations.GetCollectionCount(object collection, out int[]? lengths) + { + Debug.Assert(collection is List); + lengths = null; + return Unsafe.As>(collection).Count; + } + + Size? IElementOperations.GetSizeOrDbNull(SizeContext context, object collection, int[] indices, ref object? writeState) + => _elemConverter.GetSizeOrDbNull(context.Format, context.BufferRequirement, GetValue(collection, indices[0]), ref writeState); + + unsafe ValueTask IElementOperations.Read(bool async, PgReader reader, bool isDbNull, object collection, int[] indices, CancellationToken cancellationToken) + { + Debug.Assert(indices.Length is 1); + TElement? result; + if (isDbNull) + result = default; + else if (!async) + result = _elemConverter.Read(reader); + else + { + var task = _elemConverter.ReadAsync(reader, cancellationToken); + if (!task.IsCompletedSuccessfully) + return AwaitTask(task.AsTask(), new(this, &SetResult), collection, indices); + + result = task.Result; + } + + SetValue(collection, indices[0], result); + return new(); + + // Using .Result on ValueTask is equivalent to GetAwaiter().GetResult(), this removes TaskAwaiter rooting. + static void SetResult(Task task, object collection, int[] indices) + { + Debug.Assert(task is Task); + SetValue(collection, indices[0], new ValueTask(Unsafe.As>(task)).Result); + } + } + + ValueTask IElementOperations.Write(bool async, PgWriter writer, object collection, int[] indices, CancellationToken cancellationToken) + { + Debug.Assert(indices.Length is 1); + if (async) + return _elemConverter.WriteAsync(writer, GetValue(collection, indices[0])!, cancellationToken); + + _elemConverter.Write(writer, GetValue(collection, indices[0])!); + return new(); + } +} + +sealed class ArrayConverterResolver : PgComposingConverterResolver where T : class, IList +{ + readonly Type _effectiveType; + + public ArrayConverterResolver(PgResolverTypeInfo elementTypeInfo, Type effectiveType) + : base(elementTypeInfo.PgTypeId is { } id ? elementTypeInfo.Options.GetArrayTypeId(id) : null, elementTypeInfo) + => _effectiveType = effectiveType; + + PgSerializerOptions Options => EffectiveTypeInfo.Options; + + protected override PgTypeId GetEffectivePgTypeId(PgTypeId pgTypeId) => Options.GetArrayElementTypeId(pgTypeId); + protected override PgTypeId GetPgTypeId(PgTypeId effectivePgTypeId) => Options.GetArrayTypeId(effectivePgTypeId); + + protected override PgConverter CreateConverter(PgConverterResolution effectiveResolution) + => typeof(T).IsConstructedGenericType && typeof(T).GetGenericTypeDefinition() == typeof(List<>) + ? new ListBasedArrayConverter(effectiveResolution) + : new ArrayBasedArrayConverter(effectiveResolution, _effectiveType); + + protected override PgConverterResolution? GetEffectiveResolution(T? values, PgTypeId? expectedEffectivePgTypeId) + { + PgConverterResolution? resolution = null; + if (values is null) + { + resolution = EffectiveTypeInfo.GetDefaultResolution(expectedEffectivePgTypeId); + } + else + { + switch (values) + { + case TElement[] array: + foreach (var value in array) + { + var result = EffectiveTypeInfo.GetResolution(value, resolution?.PgTypeId ?? expectedEffectivePgTypeId); + resolution ??= result; + } + break; + case IList list: + foreach (var value in list) + { + var result = EffectiveTypeInfo.GetResolution(value, resolution?.PgTypeId ?? expectedEffectivePgTypeId); + resolution ??= result; + } + break; + default: + foreach (var value in values) + { + var result = EffectiveTypeInfo.GetResolutionAsObject(value, resolution?.PgTypeId ?? expectedEffectivePgTypeId); + resolution ??= result; + } + break; + } + } + + return resolution; + } +} + +// T is Array as we only know what type it will be after reading 'contains nulls'. +sealed class PolymorphicArrayConverter : PgStreamingConverter +{ + readonly PgConverter _structElementCollectionConverter; + readonly PgConverter _nullableElementCollectionConverter; + + public PolymorphicArrayConverter(PgConverter structElementCollectionConverter, PgConverter nullableElementCollectionConverter) + { + _structElementCollectionConverter = structElementCollectionConverter; + _nullableElementCollectionConverter = nullableElementCollectionConverter; + } + + public override TBase Read(PgReader reader) + { + _ = reader.ReadInt32(); + var containsNulls = reader.ReadInt32() is 1; + reader.Rewind(sizeof(int) + sizeof(int)); + return containsNulls + ? _nullableElementCollectionConverter.Read(reader) + : _structElementCollectionConverter.Read(reader); + } + + public override ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) + { + _ = reader.ReadInt32(); + var containsNulls = reader.ReadInt32() is 1; + reader.Rewind(sizeof(int) + sizeof(int)); + return containsNulls + ? _nullableElementCollectionConverter.ReadAsync(reader, cancellationToken) + : _structElementCollectionConverter.ReadAsync(reader, cancellationToken); + } + + public override Size GetSize(SizeContext context, TBase value, ref object? writeState) + => throw new NotSupportedException("Polymorphic writing is not supported"); + + public override void Write(PgWriter writer, TBase value) + => throw new NotSupportedException("Polymorphic writing is not supported"); + + public override ValueTask WriteAsync(PgWriter writer, TBase value, CancellationToken cancellationToken = default) + => throw new NotSupportedException("Polymorphic writing is not supported"); +} + +sealed class PolymorphicArrayConverterResolver : PolymorphicConverterResolver +{ + readonly PgResolverTypeInfo _effectiveInfo; + readonly PgResolverTypeInfo _effectiveNullableInfo; + readonly ConcurrentDictionary _converterCache = new(ReferenceEqualityComparer.Instance); + + public PolymorphicArrayConverterResolver(PgResolverTypeInfo effectiveInfo, PgResolverTypeInfo effectiveNullableInfo) + : base(effectiveInfo.PgTypeId!.Value) + { + if (effectiveInfo.PgTypeId is null || effectiveNullableInfo.PgTypeId is null) + throw new InvalidOperationException("Cannot accept undecided infos"); + + _effectiveInfo = effectiveInfo; + _effectiveNullableInfo = effectiveNullableInfo; + } + + protected override PgConverter Get(Field? maybeField) + { + var structResolution = maybeField is { } field + ? _effectiveInfo.GetResolution(field) + : _effectiveInfo.GetDefaultResolution(PgTypeId); + var nullableResolution = maybeField is { } field2 + ? _effectiveNullableInfo.GetResolution(field2) + : _effectiveNullableInfo.GetDefaultResolution(PgTypeId); + + (PgConverter StructConverter, PgConverter NullableConverter) state = (structResolution.Converter, nullableResolution.Converter); + return _converterCache.GetOrAdd(structResolution.Converter, + static (_, state) => new PolymorphicArrayConverter((PgConverter)state.StructConverter, (PgConverter)state.NullableConverter), + state); + } +} diff --git a/src/Npgsql/Internal/Converters/AsyncHelpers.cs b/src/Npgsql/Internal/Converters/AsyncHelpers.cs new file mode 100644 index 0000000000..339378fdd7 --- /dev/null +++ b/src/Npgsql/Internal/Converters/AsyncHelpers.cs @@ -0,0 +1,114 @@ +using System; +using System.Diagnostics; +using System.Runtime.CompilerServices; +using System.Threading; +using System.Threading.Tasks; + +namespace Npgsql.Internal.Converters; + +// Using a function pointer here is safe against assembly unloading as the instance reference that the static pointer method lives on is passed along. +// As such the instance cannot be collected by the gc which means the entire assembly is prevented from unloading until we're done. +static class AsyncHelpers +{ + static async void AwaitTask(Task task, CompletionSource tcs, Continuation continuation) + { + try + { + await task.ConfigureAwait(false); + continuation.Invoke(task, tcs); + } + catch (Exception ex) + { + tcs.SetException(ex); + } + // Guarantee the type stays loaded until the function pointer call is done. + GC.KeepAlive(continuation.Handle); + } + + abstract class CompletionSource + { + public abstract void SetException(Exception exception); + } + + sealed class CompletionSource : CompletionSource + { +#if NETSTANDARD + AsyncValueTaskMethodBuilder _amb = AsyncValueTaskMethodBuilder.Create(); +#else + PoolingAsyncValueTaskMethodBuilder _amb = PoolingAsyncValueTaskMethodBuilder.Create(); +#endif + public ValueTask Task => _amb.Task; + + public void SetResult(T value) + => _amb.SetResult(value); + + public override void SetException(Exception exception) + => _amb.SetException(exception); + } + + // Split out into a struct as unsafe and async don't mix, while we do want a nicely typed function pointer signature to prevent mistakes. + readonly unsafe struct Continuation + { + public object Handle { get; } + readonly delegate* _continuation; + + /// A reference to the type that houses the static method points to. + /// The continuation + public Continuation(object handle, delegate* continuation) + { + Handle = handle; + _continuation = continuation; + } + + public void Invoke(Task task, CompletionSource tcs) => _continuation(task, tcs); + } + + public static unsafe ValueTask ComposingReadAsync(this PgConverter instance, PgConverter effectiveConverter, PgReader reader, CancellationToken cancellationToken) + { + if (!typeof(T).IsValueType && !typeof(TEffective).IsValueType) +#pragma warning disable CS9193 + return Unsafe.As, ValueTask>(ref Unsafe.AsRef(effectiveConverter.ReadAsync(reader, cancellationToken))); +#pragma warning restore + // Easy if we have all the data. + var task = effectiveConverter.ReadAsync(reader, cancellationToken); + if (task.IsCompletedSuccessfully) + return new((T)(object)task.Result!); + + // Otherwise we do one additional allocation, this allow us to share state machine codegen for all Ts. + var source = new CompletionSource(); + AwaitTask(task.AsTask(), source, new(instance, &UnboxAndComplete)); + return source.Task; + + static void UnboxAndComplete(Task task, CompletionSource completionSource) + { + Debug.Assert(task is Task); + Debug.Assert(completionSource is CompletionSource); + Unsafe.As>(completionSource).SetResult(new ValueTask(Unsafe.As>(task)).Result); + } + } + + public static unsafe ValueTask ComposingReadAsObjectAsync(this PgConverter instance, PgConverter effectiveConverter, PgReader reader, CancellationToken cancellationToken) + { + if (!typeof(T).IsValueType) +#pragma warning disable CS9193 + return Unsafe.As, ValueTask>(ref Unsafe.AsRef(effectiveConverter.ReadAsObjectAsync(reader, cancellationToken))); +#pragma warning restore + + // Easy if we have all the data. + var task = effectiveConverter.ReadAsObjectAsync(reader, cancellationToken); + if (task.IsCompletedSuccessfully) + return new((T)task.Result); + + // Otherwise we do one additional allocation, this allow us to share state machine codegen for all Ts. + var source = new CompletionSource(); + AwaitTask(task.AsTask(), source, new(instance, &UnboxAndComplete)); + return source.Task; + + static void UnboxAndComplete(Task task, CompletionSource completionSource) + { + Debug.Assert(task is Task); + Debug.Assert(completionSource is CompletionSource); + Unsafe.As>(completionSource).SetResult((T)new ValueTask(Unsafe.As>(task)).Result); + } + } +} diff --git a/src/Npgsql/Internal/Converters/BitStringConverters.cs b/src/Npgsql/Internal/Converters/BitStringConverters.cs new file mode 100644 index 0000000000..b7597f96d9 --- /dev/null +++ b/src/Npgsql/Internal/Converters/BitStringConverters.cs @@ -0,0 +1,249 @@ +using System; +using System.Buffers; +using System.Collections; +using System.Collections.Specialized; +using System.Diagnostics; +using System.Text; +using System.Threading; +using System.Threading.Tasks; +using Npgsql.Internal.Postgres; +using static Npgsql.Internal.Converters.BitStringHelpers; + +namespace Npgsql.Internal.Converters; + +static class BitStringHelpers +{ + public static int GetByteLengthFromBits(int n) + { + const int BitShiftPerByte = 3; + Debug.Assert(n >= 0); + // Due to sign extension, we don't need to special case for n == 0, since ((n - 1) >> 3) + 1 = 0 + // This doesn't hold true for ((n - 1) / 8) + 1, which equals 1. + return (int)((uint)(n - 1 + (1 << BitShiftPerByte)) >> BitShiftPerByte); + } + + // http://graphics.stanford.edu/~seander/bithacks.html#ReverseByteWith64Bits + public static byte ReverseBits(byte b) => (byte)(((b * 0x80200802UL) & 0x0884422110UL) * 0x0101010101UL >> 32); +} + +sealed class BitArrayBitStringConverter : PgStreamingConverter +{ + public override BitArray Read(PgReader reader) + { + if (reader.ShouldBuffer(sizeof(int))) + reader.Buffer(sizeof(int)); + + var bits = reader.ReadInt32(); + var bytes = new byte[GetByteLengthFromBits(bits)]; + reader.ReadBytes(bytes); + return ReadValue(bytes, bits); + } + public override async ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) + { + if (reader.ShouldBuffer(sizeof(int))) + await reader.BufferAsync(sizeof(int), cancellationToken).ConfigureAwait(false); + + var bits = reader.ReadInt32(); + var bytes = new byte[GetByteLengthFromBits(bits)]; + await reader.ReadBytesAsync(bytes, cancellationToken).ConfigureAwait(false); + return ReadValue(bytes, bits); + } + + internal static BitArray ReadValue(byte[] bytes, int bits) + { + for (var i = 0; i < bytes.Length; i++) + { + ref var b = ref bytes[i]; + b = ReverseBits(b); + } + + return new(bytes) { Length = bits }; + } + + public override Size GetSize(SizeContext context, BitArray value, ref object? writeState) + => sizeof(int) + GetByteLengthFromBits(value.Length); + + public override void Write(PgWriter writer, BitArray value) + => Write(async: false, writer, value, CancellationToken.None).GetAwaiter().GetResult(); + public override ValueTask WriteAsync(PgWriter writer, BitArray value, CancellationToken cancellationToken = default) + => Write(async: true, writer, value, cancellationToken); + + async ValueTask Write(bool async, PgWriter writer, BitArray value, CancellationToken cancellationToken = default) + { + var byteCount = writer.Current.Size.Value - sizeof(int); + var array = ArrayPool.Shared.Rent(byteCount); + for (var pos = 0; pos < byteCount; pos++) + { + var bitPos = pos*8; + var bits = Math.Min(8, value.Length - bitPos); + var b = 0; + for (var i = 0; i < bits; i++) + b += (value[bitPos + i] ? 1 : 0) << (8 - i - 1); + array[pos] = (byte)b; + } + + if (writer.ShouldFlush(sizeof(int))) + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + + writer.WriteInt32(value.Length); + if (async) + await writer.WriteBytesAsync(new ReadOnlyMemory(array, 0, byteCount), cancellationToken).ConfigureAwait(false); + else + writer.WriteBytes(new ReadOnlySpan(array, 0, byteCount)); + + ArrayPool.Shared.Return(array); + } +} + +sealed class BitVector32BitStringConverter : PgBufferedConverter +{ + static int MaxSize => sizeof(int) + sizeof(int); + + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.Create(Size.CreateUpperBound(MaxSize)); + return format is DataFormat.Binary; + } + + protected override BitVector32 ReadCore(PgReader reader) + { + if (reader.CurrentRemaining > sizeof(int) + sizeof(int)) + throw new InvalidCastException("Can't read a BIT(N) with more than 32 bits to BitVector32, only up to BIT(32)."); + + var bits = reader.ReadInt32(); + return GetByteLengthFromBits(bits) switch + { + 4 => new(reader.ReadInt32()), + 3 => new((reader.ReadInt16() << 8) + reader.ReadByte()), + 2 => new(reader.ReadInt16() << 16), + 1 => new(reader.ReadByte() << 24), + _ => new(0) + }; + } + + public override Size GetSize(SizeContext context, BitVector32 value, ref object? writeState) + => value.Data is 0 ? 4 : MaxSize; + + protected override void WriteCore(PgWriter writer, BitVector32 value) + { + if (value.Data == 0) + writer.WriteInt32(0); + else + { + writer.WriteInt32(32); + writer.WriteInt32(value.Data); + } + } +} + +sealed class BoolBitStringConverter : PgBufferedConverter +{ + static int MaxSize => sizeof(int) + sizeof(byte); + + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.Create(read: Size.CreateUpperBound(MaxSize), write: MaxSize); + return format is DataFormat.Binary; + } + + protected override bool ReadCore(PgReader reader) + { + var bits = reader.ReadInt32(); + return bits switch + { + > 1 => throw new InvalidCastException("Can't read a BIT(N) type to bool, only BIT(1)."), + // We make an accommodation for varbit with no data. + 0 => false, + _ => (reader.ReadByte() & 128) is not 0 + }; + } + + public override Size GetSize(SizeContext context, bool value, ref object? writeState) => MaxSize; + protected override void WriteCore(PgWriter writer, bool value) + { + writer.WriteInt32(1); + writer.WriteByte(value ? (byte)128 : (byte)0); + } +} + +sealed class StringBitStringConverter : PgStreamingConverter +{ + public override string Read(PgReader reader) + => Read(async: false, reader, CancellationToken.None).GetAwaiter().GetResult(); + public override ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) + => Read(async: true, reader, cancellationToken); + + async ValueTask Read(bool async, PgReader reader, CancellationToken cancellationToken) + { + if (reader.ShouldBuffer(sizeof(int))) + await reader.Buffer(async, sizeof(int), cancellationToken).ConfigureAwait(false); + + var bits = reader.ReadInt32(); + var bytes = new byte[GetByteLengthFromBits(bits)]; + if (async) + await reader.ReadBytesAsync(bytes, cancellationToken).ConfigureAwait(false); + else + reader.ReadBytes(bytes); + + var bitArray = BitArrayBitStringConverter.ReadValue(bytes, bits); + var sb = new StringBuilder(bits); + for (var i = 0; i < bitArray.Count; i++) + sb.Append(bitArray[i] ? '1' : '0'); + + return sb.ToString(); + } + + public override Size GetSize(SizeContext context, string value, ref object? writeState) + { + if (value.AsSpan().IndexOfAnyExcept('0', '1') is not -1 and var index) + throw new ArgumentException($"Invalid bitstring character '{value[index]}' at index: {index}", nameof(value)); + + return sizeof(int) + GetByteLengthFromBits(value.Length); + } + + public override void Write(PgWriter writer, string value) + => Write(async: false, writer, value, CancellationToken.None).GetAwaiter().GetResult(); + public override ValueTask WriteAsync(PgWriter writer, string value, CancellationToken cancellationToken = default) + => Write(async: true, writer, value, cancellationToken); + + async ValueTask Write(bool async, PgWriter writer, string value, CancellationToken cancellationToken) + { + var byteCount = writer.Current.Size.Value - sizeof(int); + var array = ArrayPool.Shared.Rent(byteCount); + for (var pos = 0; pos < byteCount; pos++) + { + var bitPos = pos*8; + var bits = Math.Min(8, value.Length - bitPos); + var b = 0; + for (var i = 0; i < bits; i++) + b += (value[bitPos + i] == '1' ? 1 : 0) << (8 - i - 1); + array[pos] = (byte)b; + } + + if (writer.ShouldFlush(sizeof(int))) + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + + writer.WriteInt32(value.Length); + if (async) + await writer.WriteBytesAsync(new ReadOnlyMemory(array, 0, byteCount), cancellationToken).ConfigureAwait(false); + else + writer.WriteBytes(new ReadOnlySpan(array, 0, byteCount)); + + ArrayPool.Shared.Return(array); + } +} + +/// Note that for BIT(1), this resolver will return a bool by default, to align with SqlClient +/// (see discussion https://github.com/npgsql/npgsql/pull/362#issuecomment-59622101). +sealed class PolymorphicBitStringConverterResolver : PolymorphicConverterResolver +{ + BoolBitStringConverter? _boolConverter; + BitArrayBitStringConverter? _bitArrayConverter; + + public PolymorphicBitStringConverterResolver(PgTypeId bitString) : base(bitString) { } + + protected override PgConverter Get(Field? field) + => field?.TypeModifier is 1 + ? _boolConverter ??= new BoolBitStringConverter() + : _bitArrayConverter ??= new BitArrayBitStringConverter(); +} diff --git a/src/Npgsql/Internal/Converters/CastingConverter.cs b/src/Npgsql/Internal/Converters/CastingConverter.cs new file mode 100644 index 0000000000..f721d8d08e --- /dev/null +++ b/src/Npgsql/Internal/Converters/CastingConverter.cs @@ -0,0 +1,83 @@ +using System; +using System.Threading; +using System.Threading.Tasks; +using Npgsql.Internal.Postgres; + +namespace Npgsql.Internal.Converters; + +/// A converter to map strongly typed apis onto boxed converter results to produce a strongly typed converter over T. +sealed class CastingConverter : PgConverter +{ + readonly PgConverter _effectiveConverter; + public CastingConverter(PgConverter effectiveConverter) + : base(effectiveConverter.DbNullPredicateKind is DbNullPredicate.Custom) + => _effectiveConverter = effectiveConverter; + + protected override bool IsDbNullValue(T? value) => _effectiveConverter.IsDbNullAsObject(value); + + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + => _effectiveConverter.CanConvert(format, out bufferRequirements); + + public override T Read(PgReader reader) => (T)_effectiveConverter.ReadAsObject(reader); + + public override ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) + => this.ComposingReadAsObjectAsync(_effectiveConverter, reader, cancellationToken); + + public override Size GetSize(SizeContext context, T value, ref object? writeState) + => _effectiveConverter.GetSizeAsObject(context, value!, ref writeState); + + public override void Write(PgWriter writer, T value) + => _effectiveConverter.WriteAsObject(writer, value!); + + public override ValueTask WriteAsync(PgWriter writer, T value, CancellationToken cancellationToken = default) + => _effectiveConverter.WriteAsObjectAsync(writer, value!, cancellationToken); + + internal override ValueTask ReadAsObject(bool async, PgReader reader, CancellationToken cancellationToken) + => async + ? _effectiveConverter.ReadAsObjectAsync(reader, cancellationToken) + : new(_effectiveConverter.ReadAsObject(reader)); + + internal override ValueTask WriteAsObject(bool async, PgWriter writer, object value, CancellationToken cancellationToken) + { + if (async) + return _effectiveConverter.WriteAsObjectAsync(writer, value, cancellationToken); + + _effectiveConverter.WriteAsObject(writer, value); + return new(); + } +} + +// Given there aren't many instantiations of converter resolvers (and it's fairly involved to write a fast one) we use the composing base class. +sealed class CastingConverterResolver : PgComposingConverterResolver +{ + public CastingConverterResolver(PgResolverTypeInfo effectiveResolverTypeInfo) + : base(effectiveResolverTypeInfo.PgTypeId, effectiveResolverTypeInfo) { } + + protected override PgTypeId GetEffectivePgTypeId(PgTypeId pgTypeId) => pgTypeId; + protected override PgTypeId GetPgTypeId(PgTypeId effectivePgTypeId) => effectivePgTypeId; + + protected override PgConverter CreateConverter(PgConverterResolution effectiveResolution) + => new CastingConverter(effectiveResolution.Converter); + + protected override PgConverterResolution? GetEffectiveResolution(T? value, PgTypeId? expectedEffectiveTypeId) + => EffectiveTypeInfo.GetResolutionAsObject(value, expectedEffectiveTypeId); +} + +static class CastingTypeInfoExtensions +{ + internal static PgTypeInfo ToNonBoxing(this PgTypeInfo typeInfo) + { + if (!typeInfo.IsBoxing) + return typeInfo; + + var type = typeInfo.Type; + if (typeInfo is PgResolverTypeInfo resolverTypeInfo) + return new PgResolverTypeInfo(typeInfo.Options, + (PgConverterResolver)Activator.CreateInstance(typeof(CastingConverterResolver<>).MakeGenericType(type), + resolverTypeInfo)!, typeInfo.PgTypeId); + + var resolution = typeInfo.GetConcreteResolution(); + return new PgTypeInfo(typeInfo.Options, + (PgConverter)Activator.CreateInstance(typeof(CastingConverter<>).MakeGenericType(type), resolution.Converter)!, resolution.PgTypeId); + } +} diff --git a/src/Npgsql/Internal/Converters/CompositeConverter.cs b/src/Npgsql/Internal/Converters/CompositeConverter.cs new file mode 100644 index 0000000000..62befeb900 --- /dev/null +++ b/src/Npgsql/Internal/Converters/CompositeConverter.cs @@ -0,0 +1,185 @@ +using System; +using System.Buffers; +using System.Threading; +using System.Threading.Tasks; +using Npgsql.Internal.Composites; + +namespace Npgsql.Internal.Converters; + +sealed class CompositeConverter : PgStreamingConverter where T : notnull +{ + readonly CompositeInfo _composite; + readonly BufferRequirements _bufferRequirements; + + public CompositeConverter(CompositeInfo composite) + { + _composite = composite; + + var req = BufferRequirements.CreateFixedSize(sizeof(int) + _composite.Fields.Count * (sizeof(uint) + sizeof(int))); + foreach (var field in _composite.Fields) + { + var readReq = field.BinaryReadRequirement; + var writeReq = field.BinaryWriteRequirement; + + // If so we cannot depend on its buffer size being fixed. + if (field.IsDbNullable) + { + readReq = readReq.Combine(Size.CreateUpperBound(0)); + writeReq = readReq.Combine(Size.CreateUpperBound(0)); + } + + req = req.Combine( + // If a read is Unknown (streaming) we can map it to zero as we just want a minimum buffered size. + readReq is { Kind: SizeKind.Unknown } ? Size.Zero : readReq, + // For writes Unknown means our size is dependent on the value so we can't ignore it. + writeReq); + } + + // We have to put a limit on the requirements we report otherwise smaller buffer sizes won't work. + req = BufferRequirements.Create(Limit(req.Read), Limit(req.Write)); + + _bufferRequirements = req; + + Size Limit(Size requirement) + { + const int maxByteCount = 1024; + return requirement switch + { + { Kind: SizeKind.UpperBound } => Size.CreateUpperBound(Math.Min(maxByteCount, requirement.Value)), + { Kind: SizeKind.Exact } => Size.Create(Math.Min(maxByteCount, requirement.Value)), + _ => Size.Unknown + }; + } + } + + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = _bufferRequirements; + return format is DataFormat.Binary; + } + + public override T Read(PgReader reader) + => Read(async: false, reader, CancellationToken.None).GetAwaiter().GetResult(); + + public override ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) + => Read(async: true, reader, cancellationToken); + + async ValueTask Read(bool async, PgReader reader, CancellationToken cancellationToken) + { + // TODO we can make a nice thread-static cache for this. + using var builder = new CompositeBuilder(_composite); + var count = reader.ReadInt32(); + if (count != _composite.Fields.Count) + throw new InvalidOperationException("Cannot read composite type with mismatched number of fields"); + if (reader.ShouldBuffer(sizeof(int))) + await reader.Buffer(async, sizeof(int), cancellationToken).ConfigureAwait(false); + foreach (var field in _composite.Fields) + { + if (reader.ShouldBuffer(sizeof(uint) + sizeof(int))) + await reader.Buffer(async, sizeof(uint) + sizeof(int), cancellationToken).ConfigureAwait(false); + + var oid = reader.ReadUInt32(); + var length = reader.ReadInt32(); + + // We're only requiring the PgTypeIds to be oids if this converter is actually used during execution. + // As a result we can still introspect in the global mapper and create all the info with portable ids. + if(oid != field.PgTypeId.Oid) + // We could remove this requirement by storing a dictionary of CompositeInfos keyed by backend. + throw new InvalidCastException( + $"Cannot read oid {oid} into composite field {field.Name} with oid {field.PgTypeId}. " + + $"This could be caused by a DDL change after this DataSource loaded its types, or a difference between column order of table composites between backends make sure these line up identically."); + + if (length is -1) + field.ReadDbNull(builder); + else + { + var scope = await reader.BeginNestedRead(async, length, field.BinaryReadRequirement, cancellationToken).ConfigureAwait(false); + try + { + await field.Read(async, builder, reader, cancellationToken).ConfigureAwait(false); + } + finally + { + if (async) + await scope.DisposeAsync().ConfigureAwait(false); + else + scope.Dispose(); + } + } + } + + return builder.Complete(); + } + + public override Size GetSize(SizeContext context, T value, ref object? writeState) + { + var arrayPool = ArrayPool<(Size Size, object? WriteState)>.Shared; + var data = arrayPool.Rent(_composite.Fields.Count); + + var totalSize = Size.Create(sizeof(int) + _composite.Fields.Count * (sizeof(uint) + sizeof(int))); + var boxedValue = (object)value; + var anyWriteState = false; + for (var i = 0; i < _composite.Fields.Count; i++) + { + var field = _composite.Fields[i]; + object? fieldState = null; + var fieldSize = field.GetSizeOrDbNull(context.Format, boxedValue, ref fieldState); + anyWriteState = anyWriteState || fieldState is not null; + data[i] = (fieldSize ?? -1, fieldState); + totalSize = totalSize.Combine(fieldSize ?? 0); + } + + writeState = new WriteState + { + ArrayPool = arrayPool, + BoxedInstance = boxedValue, + Data = new(data, 0, _composite.Fields.Count), + AnyWriteState = anyWriteState + }; + return totalSize; + } + + public override void Write(PgWriter writer, T value) + => Write(async: false, writer, value, CancellationToken.None).GetAwaiter().GetResult(); + + public override ValueTask WriteAsync(PgWriter writer, T value, CancellationToken cancellationToken = default) + => Write(async: true, writer, value, cancellationToken); + + async ValueTask Write(bool async, PgWriter writer, T value, CancellationToken cancellationToken) + { + if (writer.Current.WriteState is not null and not WriteState) + throw new InvalidCastException($"Invalid write state, expected {typeof(WriteState).FullName}."); + + if (writer.ShouldFlush(sizeof(int))) + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + + writer.WriteInt32(_composite.Fields.Count); + + var writeState = writer.Current.WriteState as WriteState; + var boxedInstance = writeState?.BoxedInstance ?? value!; + var data = writeState?.Data.Array; + for (var i = 0; i < _composite.Fields.Count; i++) + { + if (writer.ShouldFlush(sizeof(uint) + sizeof(int))) + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + + var field = _composite.Fields[i]; + writer.WriteAsOid(field.PgTypeId); + + var (size, fieldState) = data?[i] ?? (field.IsDbNull(boxedInstance) ? -1 : field.BinaryReadRequirement, null); + + var length = size.Value; + writer.WriteInt32(length); + if (length != -1) + { + using var _ = await writer.BeginNestedWrite(async, _bufferRequirements.Write, length, fieldState, cancellationToken).ConfigureAwait(false); + await field.Write(async, writer, boxedInstance, cancellationToken).ConfigureAwait(false); + } + } + } + + sealed class WriteState : MultiWriteState + { + public required object BoxedInstance { get; init; } + } +} diff --git a/src/Npgsql/Internal/Converters/EnumConverter.cs b/src/Npgsql/Internal/Converters/EnumConverter.cs new file mode 100644 index 0000000000..12f85992f0 --- /dev/null +++ b/src/Npgsql/Internal/Converters/EnumConverter.cs @@ -0,0 +1,68 @@ +using System; +using System.Collections.Generic; +using System.Diagnostics.CodeAnalysis; +using System.Text; + +namespace Npgsql.Internal.Converters; + +[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors)] +sealed class EnumConverter : PgBufferedConverter where TEnum : struct, Enum +{ + readonly Dictionary _enumToLabel; + readonly Dictionary _labelToEnum; + readonly Encoding _encoding; + + // Unmapped enums + public EnumConverter(Dictionary enumToLabel, Dictionary labelToEnum, Encoding encoding) + { + _enumToLabel = new(enumToLabel.Count); + foreach (var kv in enumToLabel) + _enumToLabel.Add((TEnum)kv.Key, kv.Value); + + _labelToEnum = new(labelToEnum.Count); + foreach (var kv in labelToEnum) + _labelToEnum.Add(kv.Key, (TEnum)kv.Value); + + _encoding = encoding; + } + + public EnumConverter(Dictionary enumToLabel, Dictionary labelToEnum, Encoding encoding) + { + _enumToLabel = enumToLabel; + _labelToEnum = labelToEnum; + _encoding = encoding; + } + + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.Value; + return format is DataFormat.Binary or DataFormat.Text; + } + + public override Size GetSize(SizeContext context, TEnum value, ref object? writeState) + { + if (!_enumToLabel.TryGetValue(value, out var str)) + throw new InvalidCastException($"Can't write value {value} as enum {typeof(TEnum)}"); + + return _encoding.GetByteCount(str); + } + + protected override TEnum ReadCore(PgReader reader) + { + var str = _encoding.GetString(reader.ReadBytes(reader.CurrentRemaining)); + var success = _labelToEnum.TryGetValue(str, out var value); + + if (!success) + throw new InvalidCastException($"Received enum value '{str}' from database which wasn't found on enum {typeof(TEnum)}"); + + return value; + } + + protected override void WriteCore(PgWriter writer, TEnum value) + { + if (!_enumToLabel.TryGetValue(value, out var str)) + throw new InvalidCastException($"Can't write value {value} as enum {typeof(TEnum)}"); + + writer.WriteBytes(new ReadOnlySpan(_encoding.GetBytes(str))); + } +} diff --git a/src/Npgsql/Internal/Converters/FullTextSearch/TsQueryConverter.cs b/src/Npgsql/Internal/Converters/FullTextSearch/TsQueryConverter.cs new file mode 100644 index 0000000000..220cc88894 --- /dev/null +++ b/src/Npgsql/Internal/Converters/FullTextSearch/TsQueryConverter.cs @@ -0,0 +1,227 @@ +using System; +using System.Collections.Generic; +using System.Diagnostics; +using System.Text; +using System.Threading; +using System.Threading.Tasks; +using NpgsqlTypes; +using static NpgsqlTypes.NpgsqlTsQuery.NodeKind; + +// ReSharper disable once CheckNamespace +namespace Npgsql.Internal.Converters; + +sealed class TsQueryConverter : PgStreamingConverter + where T : NpgsqlTsQuery +{ + readonly Encoding _encoding; + + public TsQueryConverter(Encoding encoding) + => _encoding = encoding; + + public override T Read(PgReader reader) + => (T)Read(async: false, reader, CancellationToken.None).GetAwaiter().GetResult(); + + public override async ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) + => (T)await Read(async: true, reader, cancellationToken).ConfigureAwait(false); + + async ValueTask Read(bool async, PgReader reader, CancellationToken cancellationToken) + { + if (reader.ShouldBuffer(sizeof(int))) + await reader.Buffer(async, sizeof(int), cancellationToken).ConfigureAwait(false); + var numTokens = reader.ReadInt32(); + if (numTokens == 0) + return new NpgsqlTsQueryEmpty(); + + NpgsqlTsQuery? value = null; + var nodes = new Stack<(NpgsqlTsQuery Node, int Location)>(); + + for (var i = 0; i < numTokens; i++) + { + if (reader.ShouldBuffer(sizeof(byte))) + await reader.Buffer(async, sizeof(byte), cancellationToken).ConfigureAwait(false); + + switch (reader.ReadByte()) + { + case 1: // lexeme + if (reader.ShouldBuffer(sizeof(byte) + sizeof(byte))) + await reader.Buffer(async, sizeof(byte) + sizeof(byte), cancellationToken).ConfigureAwait(false); + var weight = (NpgsqlTsQueryLexeme.Weight)reader.ReadByte(); + var prefix = reader.ReadByte() != 0; + + var str = async + ? await reader.ReadNullTerminatedStringAsync(_encoding, cancellationToken).ConfigureAwait(false) + : reader.ReadNullTerminatedString(_encoding); + InsertInTree(new NpgsqlTsQueryLexeme(str, weight, prefix), nodes, ref value); + continue; + + case 2: // operation + if (reader.ShouldBuffer(sizeof(byte))) + await reader.Buffer(async, sizeof(byte), cancellationToken).ConfigureAwait(false); + var kind = (NpgsqlTsQuery.NodeKind)reader.ReadByte(); + + NpgsqlTsQuery node; + switch (kind) + { + case Not: + node = new NpgsqlTsQueryNot(null!); + InsertInTree(node, nodes, ref value); + nodes.Push((node, 0)); + continue; + + case And: + node = new NpgsqlTsQueryAnd(null!, null!); + break; + case Or: + node = new NpgsqlTsQueryOr(null!, null!); + break; + case Phrase: + if (reader.ShouldBuffer(sizeof(short))) + await reader.Buffer(async, sizeof(short), cancellationToken).ConfigureAwait(false); + node = new NpgsqlTsQueryFollowedBy(null!, reader.ReadInt16(), null!); + break; + default: + throw new UnreachableException( + $"Internal Npgsql bug: unexpected value {kind} of enum {nameof(NpgsqlTsQuery.NodeKind)}. Please file a bug."); + } + + InsertInTree(node, nodes, ref value); + + nodes.Push((node, 1)); + nodes.Push((node, 2)); + continue; + + case var tokenType: + throw new UnreachableException( + $"Internal Npgsql bug: unexpected token type {tokenType} when reading tsquery. Please file a bug."); + } + } + + if (nodes.Count != 0) + throw new UnreachableException("Internal Npgsql bug, please report."); + + return value!; + + static void InsertInTree(NpgsqlTsQuery node, Stack<(NpgsqlTsQuery Node, int Location)> nodes, ref NpgsqlTsQuery? value) + { + if (nodes.Count == 0) + value = node; + else + { + var parent = nodes.Pop(); + switch (parent.Location) + { + case 0: + ((NpgsqlTsQueryNot)parent.Node).Child = node; + break; + case 1: + ((NpgsqlTsQueryBinOp)parent.Node).Left = node; + break; + case 2: + ((NpgsqlTsQueryBinOp)parent.Node).Right = node; + break; + default: + throw new UnreachableException("Internal Npgsql bug, please report."); + } + } + } + } + + public override Size GetSize(SizeContext context, T value, ref object? writeState) + => value.Kind is Empty + ? 4 + : 4 + GetNodeLength(value); + + int GetNodeLength(NpgsqlTsQuery node) + => node.Kind switch + { + Lexeme when _encoding.GetByteCount(((NpgsqlTsQueryLexeme)node).Text) is var strLen + => strLen > 2046 + ? throw new InvalidCastException("Lexeme text too long. Must be at most 2046 encoded bytes.") + : 4 + strLen, + And or Or => 2 + GetNodeLength(((NpgsqlTsQueryBinOp)node).Left) + GetNodeLength(((NpgsqlTsQueryBinOp)node).Right), + Not => 2 + GetNodeLength(((NpgsqlTsQueryNot)node).Child), + Empty => throw new InvalidOperationException("Empty tsquery nodes must be top-level"), + + // 2 additional bytes for uint16 phrase operator "distance" field. + Phrase => 4 + GetNodeLength(((NpgsqlTsQueryBinOp)node).Left) + GetNodeLength(((NpgsqlTsQueryBinOp)node).Right), + + _ => throw new UnreachableException( + $"Internal Npgsql bug: unexpected value {node.Kind} of enum {nameof(NpgsqlTsQuery.NodeKind)}. Please file a bug.") + }; + + public override void Write(PgWriter writer, T value) + => Write(async: false, writer, value, CancellationToken.None).GetAwaiter().GetResult(); + + public override ValueTask WriteAsync(PgWriter writer, T value, CancellationToken cancellationToken = default) + => Write(async: true, writer, value, cancellationToken); + + async ValueTask Write(bool async, PgWriter writer, NpgsqlTsQuery value, CancellationToken cancellationToken) + { + var numTokens = GetTokenCount(value); + + if (writer.ShouldFlush(sizeof(int))) + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + writer.WriteInt32(numTokens); + + if (numTokens is 0) + return; + + await WriteCore(value).ConfigureAwait(false); + + async Task WriteCore(NpgsqlTsQuery node) + { + if (writer.ShouldFlush(sizeof(byte))) + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + writer.WriteByte(node.Kind is Lexeme ? (byte)1 : (byte)2); + + if (node.Kind is Lexeme) + { + var lexemeNode = (NpgsqlTsQueryLexeme)node; + + if (writer.ShouldFlush(sizeof(byte) + sizeof(byte))) + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + + writer.WriteByte((byte)lexemeNode.Weights); + writer.WriteByte(lexemeNode.IsPrefixSearch ? (byte)1 : (byte)0); + + if (async) + await writer.WriteCharsAsync(lexemeNode.Text.AsMemory(), _encoding, cancellationToken).ConfigureAwait(false); + else + writer.WriteChars(lexemeNode.Text.AsMemory().Span, _encoding); + + if (writer.ShouldFlush(sizeof(byte))) + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + + writer.WriteByte(0); + return; + } + + writer.WriteByte((byte)node.Kind); + + switch (node.Kind) + { + case Not: + await WriteCore(((NpgsqlTsQueryNot)node).Child).ConfigureAwait(false); + return; + case Phrase: + writer.WriteInt16(((NpgsqlTsQueryFollowedBy)node).Distance); + break; + } + + await WriteCore(((NpgsqlTsQueryBinOp)node).Right).ConfigureAwait(false); + await WriteCore(((NpgsqlTsQueryBinOp)node).Left).ConfigureAwait(false); + } + } + + int GetTokenCount(NpgsqlTsQuery node) + => node.Kind switch + { + Lexeme => 1, + And or Or or Phrase => 1 + GetTokenCount(((NpgsqlTsQueryBinOp)node).Left) + GetTokenCount(((NpgsqlTsQueryBinOp)node).Right), + Not => 1 + GetTokenCount(((NpgsqlTsQueryNot)node).Child), + Empty => 0, + + _ => throw new UnreachableException( + $"Internal Npgsql bug: unexpected value {node.Kind} of enum {nameof(NpgsqlTsQuery.NodeKind)}. Please file a bug.") + }; +} diff --git a/src/Npgsql/Internal/Converters/FullTextSearch/TsVectorConverter.cs b/src/Npgsql/Internal/Converters/FullTextSearch/TsVectorConverter.cs new file mode 100644 index 0000000000..a61aa2244c --- /dev/null +++ b/src/Npgsql/Internal/Converters/FullTextSearch/TsVectorConverter.cs @@ -0,0 +1,107 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using System.Text; +using System.Threading; +using System.Threading.Tasks; +using NpgsqlTypes; + +// ReSharper disable once CheckNamespace +namespace Npgsql.Internal.Converters; + +sealed class TsVectorConverter : PgStreamingConverter +{ + readonly Encoding _encoding; + + public TsVectorConverter(Encoding encoding) + => _encoding = encoding; + + public override NpgsqlTsVector Read(PgReader reader) + => Read(async: false, reader, CancellationToken.None).GetAwaiter().GetResult(); + + public override ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) + => Read(async: true, reader, cancellationToken); + + async ValueTask Read(bool async, PgReader reader, CancellationToken cancellationToken) + { + if (reader.ShouldBuffer(sizeof(int))) + await reader.Buffer(async, sizeof(int), cancellationToken).ConfigureAwait(false); + + var numLexemes = reader.ReadInt32(); + var lexemes = new List(numLexemes); + + for (var i = 0; i < numLexemes; i++) + { + var lexemeString = async + ? await reader.ReadNullTerminatedStringAsync(_encoding, cancellationToken).ConfigureAwait(false) + : reader.ReadNullTerminatedString(_encoding); + + if (reader.ShouldBuffer(sizeof(short))) + await reader.Buffer(async, sizeof(short), cancellationToken).ConfigureAwait(false); + var numPositions = reader.ReadInt16(); + + if (numPositions == 0) + { + lexemes.Add(new NpgsqlTsVector.Lexeme(lexemeString, wordEntryPositions: null, noCopy: true)); + continue; + } + + // There can only be a maximum of 256 positions, so we just before them all (256 * sizeof(short) = 512) + if (numPositions > 256) + throw new NpgsqlException($"Got {numPositions} lexeme positions when reading tsvector"); + + if (reader.ShouldBuffer(numPositions * sizeof(short))) + await reader.Buffer(async, numPositions * sizeof(short), cancellationToken).ConfigureAwait(false); + + var positions = new List(numPositions); + + for (var j = 0; j < numPositions; j++) + { + var wordEntryPos = reader.ReadInt16(); + positions.Add(new NpgsqlTsVector.Lexeme.WordEntryPos(wordEntryPos)); + } + + lexemes.Add(new NpgsqlTsVector.Lexeme(lexemeString, positions, noCopy: true)); + } + + return new NpgsqlTsVector(lexemes, noCheck: true); + } + + public override Size GetSize(SizeContext context, NpgsqlTsVector value, ref object? writeState) + => 4 + value.Sum(l => _encoding.GetByteCount(l.Text) + 1 + 2 + l.Count * 2); + + public override void Write(PgWriter writer, NpgsqlTsVector value) + => Write(async: false, writer, value, CancellationToken.None).GetAwaiter().GetResult(); + + public override ValueTask WriteAsync(PgWriter writer, NpgsqlTsVector value, CancellationToken cancellationToken = default) + => Write(async: true, writer, value, cancellationToken); + + async ValueTask Write(bool async, PgWriter writer, NpgsqlTsVector value, CancellationToken cancellationToken) + { + if (writer.ShouldFlush(sizeof(int))) + await writer.FlushAsync(cancellationToken).ConfigureAwait(false); + writer.WriteInt32(value.Count); + + foreach (var lexeme in value) + { + if (async) + await writer.WriteCharsAsync(lexeme.Text.AsMemory(), _encoding, cancellationToken).ConfigureAwait(false); + else + writer.WriteChars(lexeme.Text.AsMemory().Span, _encoding); + + if (writer.ShouldFlush(sizeof(byte) + sizeof(short))) + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + + writer.WriteByte(0); + writer.WriteInt16((short)lexeme.Count); + + for (var i = 0; i < lexeme.Count; i++) + { + if (writer.ShouldFlush(sizeof(short))) + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + + writer.WriteInt16(lexeme[i].Value); + } + } + } +} diff --git a/src/Npgsql/Internal/Converters/Geometric/BoxConverter.cs b/src/Npgsql/Internal/Converters/Geometric/BoxConverter.cs new file mode 100644 index 0000000000..4a7578afba --- /dev/null +++ b/src/Npgsql/Internal/Converters/Geometric/BoxConverter.cs @@ -0,0 +1,26 @@ +using NpgsqlTypes; + +// ReSharper disable once CheckNamespace +namespace Npgsql.Internal.Converters; + +sealed class BoxConverter : PgBufferedConverter +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(double) * 4); + return format is DataFormat.Binary; + } + + protected override NpgsqlBox ReadCore(PgReader reader) + => new( + new NpgsqlPoint(reader.ReadDouble(), reader.ReadDouble()), + new NpgsqlPoint(reader.ReadDouble(), reader.ReadDouble())); + + protected override void WriteCore(PgWriter writer, NpgsqlBox value) + { + writer.WriteDouble(value.Right); + writer.WriteDouble(value.Top); + writer.WriteDouble(value.Left); + writer.WriteDouble(value.Bottom); + } +} diff --git a/src/Npgsql/Internal/Converters/Geometric/CircleConverter.cs b/src/Npgsql/Internal/Converters/Geometric/CircleConverter.cs new file mode 100644 index 0000000000..51eea75814 --- /dev/null +++ b/src/Npgsql/Internal/Converters/Geometric/CircleConverter.cs @@ -0,0 +1,23 @@ +using NpgsqlTypes; + +// ReSharper disable once CheckNamespace +namespace Npgsql.Internal.Converters; + +sealed class CircleConverter : PgBufferedConverter +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(double) * 3); + return format is DataFormat.Binary; + } + + protected override NpgsqlCircle ReadCore(PgReader reader) + => new(reader.ReadDouble(), reader.ReadDouble(), reader.ReadDouble()); + + protected override void WriteCore(PgWriter writer, NpgsqlCircle value) + { + writer.WriteDouble(value.X); + writer.WriteDouble(value.Y); + writer.WriteDouble(value.Radius); + } +} diff --git a/src/Npgsql/Internal/Converters/Geometric/LineConverter.cs b/src/Npgsql/Internal/Converters/Geometric/LineConverter.cs new file mode 100644 index 0000000000..17d89909b9 --- /dev/null +++ b/src/Npgsql/Internal/Converters/Geometric/LineConverter.cs @@ -0,0 +1,23 @@ +using NpgsqlTypes; + +// ReSharper disable once CheckNamespace +namespace Npgsql.Internal.Converters; + +sealed class LineConverter : PgBufferedConverter +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(double) * 3); + return format is DataFormat.Binary; + } + + protected override NpgsqlLine ReadCore(PgReader reader) + => new(reader.ReadDouble(), reader.ReadDouble(), reader.ReadDouble()); + + protected override void WriteCore(PgWriter writer, NpgsqlLine value) + { + writer.WriteDouble(value.A); + writer.WriteDouble(value.B); + writer.WriteDouble(value.C); + } +} diff --git a/src/Npgsql/Internal/Converters/Geometric/LineSegmentConverter.cs b/src/Npgsql/Internal/Converters/Geometric/LineSegmentConverter.cs new file mode 100644 index 0000000000..117a108379 --- /dev/null +++ b/src/Npgsql/Internal/Converters/Geometric/LineSegmentConverter.cs @@ -0,0 +1,24 @@ +using NpgsqlTypes; + +// ReSharper disable once CheckNamespace +namespace Npgsql.Internal.Converters; + +sealed class LineSegmentConverter : PgBufferedConverter +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(double) * 4); + return format is DataFormat.Binary; + } + + protected override NpgsqlLSeg ReadCore(PgReader reader) + => new(reader.ReadDouble(), reader.ReadDouble(), reader.ReadDouble(), reader.ReadDouble()); + + protected override void WriteCore(PgWriter writer, NpgsqlLSeg value) + { + writer.WriteDouble(value.Start.X); + writer.WriteDouble(value.Start.Y); + writer.WriteDouble(value.End.X); + writer.WriteDouble(value.End.Y); + } +} diff --git a/src/Npgsql/Internal/Converters/Geometric/PathConverter.cs b/src/Npgsql/Internal/Converters/Geometric/PathConverter.cs new file mode 100644 index 0000000000..c78ba84013 --- /dev/null +++ b/src/Npgsql/Internal/Converters/Geometric/PathConverter.cs @@ -0,0 +1,68 @@ +using System.Diagnostics; +using System.Threading; +using System.Threading.Tasks; +using NpgsqlTypes; + +// ReSharper disable once CheckNamespace +namespace Npgsql.Internal.Converters; + +sealed class PathConverter : PgStreamingConverter +{ + public override NpgsqlPath Read(PgReader reader) + => Read(async: false, reader, CancellationToken.None).GetAwaiter().GetResult(); + + public override ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) + => Read(async: true, reader, cancellationToken); + + async ValueTask Read(bool async, PgReader reader, CancellationToken cancellationToken) + { + if (reader.ShouldBuffer(sizeof(byte) + sizeof(int))) + await reader.Buffer(async, sizeof(byte) + sizeof(int), cancellationToken).ConfigureAwait(false); + + var open = reader.ReadByte() switch + { + 1 => false, + 0 => true, + _ => throw new UnreachableException("Error decoding binary geometric path: bad open byte") + }; + + var numPoints = reader.ReadInt32(); + var result = new NpgsqlPath(numPoints, open); + + for (var i = 0; i < numPoints; i++) + { + if (reader.ShouldBuffer(sizeof(double) * 2)) + await reader.Buffer(async, sizeof(byte) + sizeof(int), cancellationToken).ConfigureAwait(false); + + result.Add(new NpgsqlPoint(reader.ReadDouble(), reader.ReadDouble())); + } + + return result; + } + + public override Size GetSize(SizeContext context, NpgsqlPath value, ref object? writeState) + => 5 + value.Count * sizeof(double) * 2; + + public override void Write(PgWriter writer, NpgsqlPath value) + => Write(async: false, writer, value, CancellationToken.None).GetAwaiter().GetResult(); + + public override ValueTask WriteAsync(PgWriter writer, NpgsqlPath value, CancellationToken cancellationToken = default) + => Write(async: true, writer, value, cancellationToken); + + async ValueTask Write(bool async, PgWriter writer, NpgsqlPath value, CancellationToken cancellationToken) + { + if (writer.ShouldFlush(sizeof(byte) + sizeof(int))) + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + + writer.WriteByte((byte)(value.Open ? 0 : 1)); + writer.WriteInt32(value.Count); + + foreach (var p in value) + { + if (writer.ShouldFlush(sizeof(double) * 2)) + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + writer.WriteDouble(p.X); + writer.WriteDouble(p.Y); + } + } +} diff --git a/src/Npgsql/Internal/Converters/Geometric/PointConverter.cs b/src/Npgsql/Internal/Converters/Geometric/PointConverter.cs new file mode 100644 index 0000000000..03e84c05bd --- /dev/null +++ b/src/Npgsql/Internal/Converters/Geometric/PointConverter.cs @@ -0,0 +1,22 @@ +using NpgsqlTypes; + +// ReSharper disable once CheckNamespace +namespace Npgsql.Internal.Converters; + +sealed class PointConverter : PgBufferedConverter +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(double) * 2); + return format is DataFormat.Binary; + } + + protected override NpgsqlPoint ReadCore(PgReader reader) + => new(reader.ReadDouble(), reader.ReadDouble()); + + protected override void WriteCore(PgWriter writer, NpgsqlPoint value) + { + writer.WriteDouble(value.X); + writer.WriteDouble(value.Y); + } +} diff --git a/src/Npgsql/Internal/Converters/Geometric/PolygonConverter.cs b/src/Npgsql/Internal/Converters/Geometric/PolygonConverter.cs new file mode 100644 index 0000000000..9a889b4323 --- /dev/null +++ b/src/Npgsql/Internal/Converters/Geometric/PolygonConverter.cs @@ -0,0 +1,55 @@ +using System.Threading; +using System.Threading.Tasks; +using NpgsqlTypes; + +// ReSharper disable once CheckNamespace +namespace Npgsql.Internal.Converters; + +sealed class PolygonConverter : PgStreamingConverter +{ + public override NpgsqlPolygon Read(PgReader reader) + => Read(async: false, reader, CancellationToken.None).GetAwaiter().GetResult(); + + public override ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) + => Read(async: true, reader, cancellationToken); + + async ValueTask Read(bool async, PgReader reader, CancellationToken cancellationToken) + { + if (reader.ShouldBuffer(sizeof(int))) + await reader.Buffer(async, sizeof(int), cancellationToken).ConfigureAwait(false); + var numPoints = reader.ReadInt32(); + var result = new NpgsqlPolygon(numPoints); + for (var i = 0; i < numPoints; i++) + { + if (reader.ShouldBuffer(sizeof(double) * 2)) + await reader.Buffer(async, sizeof(double) * 2, cancellationToken).ConfigureAwait(false); + result.Add(new NpgsqlPoint(reader.ReadDouble(), reader.ReadDouble())); + } + + return result; + } + + public override Size GetSize(SizeContext context, NpgsqlPolygon value, ref object? writeState) + => 4 + value.Count * sizeof(double) * 2; + + public override void Write(PgWriter writer, NpgsqlPolygon value) + => Write(async: false, writer, value, CancellationToken.None).GetAwaiter().GetResult(); + + public override ValueTask WriteAsync(PgWriter writer, NpgsqlPolygon value, CancellationToken cancellationToken = default) + => Write(async: true, writer, value, cancellationToken); + + async ValueTask Write(bool async, PgWriter writer, NpgsqlPolygon value, CancellationToken cancellationToken) + { + if (writer.ShouldFlush(sizeof(int))) + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + writer.WriteInt32(value.Count); + + foreach (var p in value) + { + if (writer.ShouldFlush(sizeof(double) * 2)) + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + writer.WriteDouble(p.X); + writer.WriteDouble(p.Y); + } + } +} diff --git a/src/Npgsql/Internal/Converters/HstoreConverter.cs b/src/Npgsql/Internal/Converters/HstoreConverter.cs new file mode 100644 index 0000000000..5f99fd128c --- /dev/null +++ b/src/Npgsql/Internal/Converters/HstoreConverter.cs @@ -0,0 +1,159 @@ +using System; +using System.Buffers; +using System.Collections.Generic; +using System.Text; +using System.Threading; +using System.Threading.Tasks; + +namespace Npgsql.Internal.Converters; + +sealed class HstoreConverter : PgStreamingConverter where T : ICollection> +{ + readonly Encoding _encoding; + readonly Func>, T>? _convert; + + public HstoreConverter(Encoding encoding, Func>, T>? convert = null) + { + _encoding = encoding; + _convert = convert; + } + + public override T Read(PgReader reader) + => Read(async: false, reader, CancellationToken.None).Result; + + public override ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) + => Read(async: true, reader, cancellationToken); + + public override Size GetSize(SizeContext context, T value, ref object? writeState) + { + // Number of lengths (count, key length, value length). + var totalSize = sizeof(int) + value.Count * (sizeof(int) + sizeof(int)); + if (value.Count is 0) + return totalSize; + + var arrayPool = ArrayPool<(Size Size, object? WriteState)>.Shared; + var data = arrayPool.Rent(value.Count * 2); + + var i = 0; + foreach (var kv in value) + { + if (kv.Key is null) + throw new ArgumentException("Hstore doesn't support null keys", nameof(value)); + + var keySize = _encoding.GetByteCount(kv.Key); + var valueSize = kv.Value is null ? -1 : _encoding.GetByteCount(kv.Value); + totalSize += keySize + (valueSize is -1 ? 0 : valueSize); + data[i] = (keySize, null); + data[i + 1] = (valueSize, null); + i += 2; + } + writeState = new WriteState + { + ArrayPool = arrayPool, + Data = new(data, 0, value.Count * 2), + AnyWriteState = false + }; + return totalSize; + } + + public override void Write(PgWriter writer, T value) + => Write(async: false, writer, value, CancellationToken.None).GetAwaiter().GetResult(); + + public override ValueTask WriteAsync(PgWriter writer, T value, CancellationToken cancellationToken = default) + => Write(async: true, writer, value, cancellationToken); + + async ValueTask Read(bool async, PgReader reader, CancellationToken cancellationToken) + { + if (reader.ShouldBuffer(sizeof(int))) + await reader.Buffer(async, sizeof(int), cancellationToken).ConfigureAwait(false); + + var count = reader.ReadInt32(); + + var result = typeof(T) == typeof(Dictionary) || typeof(T) == typeof(IDictionary) + ? (ICollection>)new Dictionary(count) + : new List>(count); + + for (var i = 0; i < count; i++) + { + if (reader.ShouldBuffer(sizeof(int))) + await reader.Buffer(async, sizeof(int), cancellationToken).ConfigureAwait(false); + var keySize = reader.ReadInt32(); + var key = _encoding.GetString(async + ? await reader.ReadBytesAsync(keySize, cancellationToken).ConfigureAwait(false) + : reader.ReadBytes(keySize) + ); + + if (reader.ShouldBuffer(sizeof(int))) + await reader.Buffer(async, sizeof(int), cancellationToken).ConfigureAwait(false); + var valueSize = reader.ReadInt32(); + string? value = null; + if (valueSize is not -1) + value = _encoding.GetString(async + ? await reader.ReadBytesAsync(valueSize, cancellationToken).ConfigureAwait(false) + : reader.ReadBytes(valueSize) + ); + + result.Add(new(key, value)); + } + + if (typeof(T) == typeof(Dictionary) || typeof(T) == typeof(IDictionary)) + return (T)result; + + return _convert is null ? throw new NotSupportedException() : _convert(result); + } + + async ValueTask Write(bool async, PgWriter writer, T value, CancellationToken cancellationToken) + { + if (writer.Current.WriteState is not WriteState && value.Count is not 0) + throw new InvalidCastException($"Invalid write state, expected {typeof(WriteState).FullName}."); + + // Number of lengths (count, key length, value length). + if (writer.ShouldFlush(sizeof(int))) + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + writer.WriteInt32(value.Count); + + if (value.Count is 0 || writer.Current.WriteState is not WriteState writeState) + return; + + var data = writeState.Data; + var i = data.Offset; + foreach (var kv in value) + { + if (writer.ShouldFlush(sizeof(int))) + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + + var (size, _) = data.Array![i]; + if (size.Kind is SizeKind.Unknown) + throw new NotImplementedException(); + + var length = size.Value; + writer.WriteInt32(length); + if (async) + await writer.WriteCharsAsync(kv.Key.AsMemory(), _encoding, cancellationToken).ConfigureAwait(false); + else + writer.WriteChars(kv.Key.AsSpan(), _encoding); + + if (writer.ShouldFlush(sizeof(int))) + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + + var (valueSize, _) = data.Array![i + 1]; + if (valueSize.Kind is SizeKind.Unknown) + throw new NotImplementedException(); + + var valueLength = valueSize.Value; + writer.WriteInt32(valueLength); + if (valueLength is not -1) + { + if (async) + await writer.WriteCharsAsync(kv.Value.AsMemory(), _encoding, cancellationToken).ConfigureAwait(false); + else + writer.WriteChars(kv.Key.AsSpan(), _encoding); + } + i += 2; + } + } + + sealed class WriteState : MultiWriteState + { + } +} diff --git a/src/Npgsql/Internal/Converters/Internal/InternalCharConverter.cs b/src/Npgsql/Internal/Converters/Internal/InternalCharConverter.cs new file mode 100644 index 0000000000..5d00a26dcb --- /dev/null +++ b/src/Npgsql/Internal/Converters/Internal/InternalCharConverter.cs @@ -0,0 +1,43 @@ +using System; +using System.Numerics; + +// ReSharper disable once CheckNamespace +namespace Npgsql.Internal.Converters; + +sealed class InternalCharConverter : PgBufferedConverter +#if NET7_0_OR_GREATER + where T : INumberBase +#endif +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(byte)); + return format is DataFormat.Binary; + } + +#if NET7_0_OR_GREATER + protected override T ReadCore(PgReader reader) => T.CreateChecked(reader.ReadByte()); + protected override void WriteCore(PgWriter writer, T value) => writer.WriteByte(byte.CreateChecked(value)); +#else + protected override T ReadCore(PgReader reader) + { + var value = reader.ReadByte(); + if (typeof(byte) == typeof(T)) + return (T)(object)value; + if (typeof(char) == typeof(T)) + return (T)(object)(char)value; + + throw new NotSupportedException(); + } + + protected override void WriteCore(PgWriter writer, T value) + { + if (typeof(byte) == typeof(T)) + writer.WriteByte((byte)(object)value!); + else if (typeof(char) == typeof(T)) + writer.WriteByte(checked((byte)(char)(object)value!)); + else + throw new NotSupportedException(); + } +#endif +} diff --git a/src/Npgsql/Internal/Converters/Internal/PgLsnConverter.cs b/src/Npgsql/Internal/Converters/Internal/PgLsnConverter.cs new file mode 100644 index 0000000000..96730c857a --- /dev/null +++ b/src/Npgsql/Internal/Converters/Internal/PgLsnConverter.cs @@ -0,0 +1,15 @@ +using NpgsqlTypes; + +// ReSharper disable once CheckNamespace +namespace Npgsql.Internal.Converters; + +sealed class PgLsnConverter : PgBufferedConverter +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(ulong)); + return format is DataFormat.Binary; + } + protected override NpgsqlLogSequenceNumber ReadCore(PgReader reader) => new(reader.ReadUInt64()); + protected override void WriteCore(PgWriter writer, NpgsqlLogSequenceNumber value) => writer.WriteUInt64((ulong)value); +} diff --git a/src/Npgsql/Internal/Converters/Internal/TidConverter.cs b/src/Npgsql/Internal/Converters/Internal/TidConverter.cs new file mode 100644 index 0000000000..747d98fe17 --- /dev/null +++ b/src/Npgsql/Internal/Converters/Internal/TidConverter.cs @@ -0,0 +1,19 @@ +using NpgsqlTypes; + +// ReSharper disable once CheckNamespace +namespace Npgsql.Internal.Converters; + +sealed class TidConverter : PgBufferedConverter +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(uint) + sizeof(ushort)); + return format is DataFormat.Binary; + } + protected override NpgsqlTid ReadCore(PgReader reader) => new(reader.ReadUInt32(), reader.ReadUInt16()); + protected override void WriteCore(PgWriter writer, NpgsqlTid value) + { + writer.WriteUInt32(value.BlockNumber); + writer.WriteUInt16(value.OffsetNumber); + } +} diff --git a/src/Npgsql/Internal/Converters/Internal/UInt32Converter.cs b/src/Npgsql/Internal/Converters/Internal/UInt32Converter.cs new file mode 100644 index 0000000000..92061b1fd2 --- /dev/null +++ b/src/Npgsql/Internal/Converters/Internal/UInt32Converter.cs @@ -0,0 +1,13 @@ +// ReSharper disable once CheckNamespace +namespace Npgsql.Internal.Converters; + +sealed class UInt32Converter : PgBufferedConverter +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(uint)); + return format is DataFormat.Binary; + } + protected override uint ReadCore(PgReader reader) => reader.ReadUInt32(); + protected override void WriteCore(PgWriter writer, uint value) => writer.WriteUInt32(value); +} diff --git a/src/Npgsql/Internal/Converters/Internal/UInt64Converter.cs b/src/Npgsql/Internal/Converters/Internal/UInt64Converter.cs new file mode 100644 index 0000000000..fcf5e3695a --- /dev/null +++ b/src/Npgsql/Internal/Converters/Internal/UInt64Converter.cs @@ -0,0 +1,13 @@ +// ReSharper disable once CheckNamespace +namespace Npgsql.Internal.Converters; + +sealed class UInt64Converter : PgBufferedConverter +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(ulong)); + return format is DataFormat.Binary; + } + protected override ulong ReadCore(PgReader reader) => reader.ReadUInt64(); + protected override void WriteCore(PgWriter writer, ulong value) => writer.WriteUInt64(value); +} diff --git a/src/Npgsql/Internal/Converters/Internal/VoidConverter.cs b/src/Npgsql/Internal/Converters/Internal/VoidConverter.cs new file mode 100644 index 0000000000..45b48df5b5 --- /dev/null +++ b/src/Npgsql/Internal/Converters/Internal/VoidConverter.cs @@ -0,0 +1,13 @@ +using System; + +namespace Npgsql.Internal.Converters.Internal; + +// Void is not a value so we read it as a null reference, not a DBNull. +sealed class VoidConverter : PgBufferedConverter +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + => CanConvertBufferedDefault(DataFormat.Binary, out bufferRequirements); // Text is identical + + protected override object? ReadCore(PgReader reader) => null; + protected override void WriteCore(PgWriter writer, object? value) => throw new NotSupportedException(); +} diff --git a/src/Npgsql/Internal/Converters/MoneyConverter.cs b/src/Npgsql/Internal/Converters/MoneyConverter.cs new file mode 100644 index 0000000000..8443acedc3 --- /dev/null +++ b/src/Npgsql/Internal/Converters/MoneyConverter.cs @@ -0,0 +1,74 @@ +using System; +using System.Numerics; + +namespace Npgsql.Internal.Converters; + +sealed class MoneyConverter : PgBufferedConverter +#if NET7_0_OR_GREATER + where T : INumberBase +#endif +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(long)); + return format is DataFormat.Binary; + } + protected override T ReadCore(PgReader reader) => ConvertTo(new PgMoney(reader.ReadInt64())); + protected override void WriteCore(PgWriter writer, T value) => writer.WriteInt64(ConvertFrom(value).GetValue()); + + static PgMoney ConvertFrom(T value) + { +#if !NET7_0_OR_GREATER + if (typeof(short) == typeof(T)) + return new PgMoney((decimal)(short)(object)value!); + if (typeof(int) == typeof(T)) + return new PgMoney((decimal)(int)(object)value!); + if (typeof(long) == typeof(T)) + return new PgMoney((decimal)(long)(object)value!); + + if (typeof(byte) == typeof(T)) + return new PgMoney((decimal)(byte)(object)value!); + if (typeof(sbyte) == typeof(T)) + return new PgMoney((decimal)(sbyte)(object)value!); + + if (typeof(float) == typeof(T)) + return new PgMoney((decimal)(float)(object)value!); + if (typeof(double) == typeof(T)) + return new PgMoney((decimal)(double)(object)value!); + if (typeof(decimal) == typeof(T)) + return new PgMoney((decimal)(object)value!); + + throw new NotSupportedException(); +#else + return new PgMoney(decimal.CreateChecked(value)); +#endif + } + + static T ConvertTo(PgMoney money) + { +#if !NET7_0_OR_GREATER + if (typeof(short) == typeof(T)) + return (T)(object)(short)money.ToDecimal(); + if (typeof(int) == typeof(T)) + return (T)(object)(int)money.ToDecimal(); + if (typeof(long) == typeof(T)) + return (T)(object)(long)money.ToDecimal(); + + if (typeof(byte) == typeof(T)) + return (T)(object)(byte)money.ToDecimal(); + if (typeof(sbyte) == typeof(T)) + return (T)(object)(sbyte)money.ToDecimal(); + + if (typeof(float) == typeof(T)) + return (T)(object)(float)money.ToDecimal(); + if (typeof(double) == typeof(T)) + return (T)(object)(double)money.ToDecimal(); + if (typeof(decimal) == typeof(T)) + return (T)(object)money.ToDecimal(); + + throw new NotSupportedException(); +#else + return T.CreateChecked(money.ToDecimal()); +#endif + } +} diff --git a/src/Npgsql/Internal/Converters/MultirangeConverter.cs b/src/Npgsql/Internal/Converters/MultirangeConverter.cs new file mode 100644 index 0000000000..524901977b --- /dev/null +++ b/src/Npgsql/Internal/Converters/MultirangeConverter.cs @@ -0,0 +1,142 @@ +using System; +using System.Buffers; +using System.Collections.Generic; +using System.Diagnostics; +using System.Threading; +using System.Threading.Tasks; + +namespace Npgsql.Internal.Converters; + +sealed class MultirangeConverter : PgStreamingConverter + where T : IList + where TRange : notnull +{ + readonly PgConverter _rangeConverter; + readonly BufferRequirements _rangeRequirements; + + static MultirangeConverter() + => Debug.Assert(typeof(T).IsArray || typeof(T).IsGenericType && typeof(T).GetGenericTypeDefinition() == typeof(List<>)); + + public MultirangeConverter(PgConverter rangeConverter) + { + if (!rangeConverter.CanConvert(DataFormat.Binary, out var bufferRequirements)) + throw new NotSupportedException("Range subtype converter has to support the binary format to be compatible."); + _rangeRequirements = bufferRequirements; + _rangeConverter = rangeConverter; + } + + public override T Read(PgReader reader) + => Read(async: false, reader, CancellationToken.None).GetAwaiter().GetResult(); + + public override ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) + => Read(async: true, reader, cancellationToken); + + public async ValueTask Read(bool async, PgReader reader, CancellationToken cancellationToken) + { + if (reader.ShouldBuffer(sizeof(int))) + await reader.Buffer(async, sizeof(int), cancellationToken).ConfigureAwait(false); + var numRanges = reader.ReadInt32(); + var multirange = (T)(object)(typeof(T).IsArray ? new TRange[numRanges] : new List()); + + for (var i = 0; i < numRanges; i++) + { + if (reader.ShouldBuffer(sizeof(int))) + await reader.Buffer(async, sizeof(int), cancellationToken).ConfigureAwait(false); + var length = reader.ReadInt32(); + Debug.Assert(length != -1); + + var scope = await reader.BeginNestedRead(async, length, _rangeRequirements.Read, cancellationToken).ConfigureAwait(false); + try + { + var range = async + ? await _rangeConverter.ReadAsync(reader, cancellationToken).ConfigureAwait(false) + // ReSharper disable once MethodHasAsyncOverloadWithCancellation + : _rangeConverter.Read(reader); + + if (typeof(T).IsArray) + multirange[i] = range; + else + multirange.Add(range); + } + finally + { + if (async) + await scope.DisposeAsync().ConfigureAwait(false); + else + scope.Dispose(); + } + } + + return multirange; + } + + public override Size GetSize(SizeContext context, T value, ref object? writeState) + { + var arrayPool = ArrayPool<(Size Size, object? WriteState)>.Shared; + var data = arrayPool.Rent(value.Count); + + var totalSize = Size.Create(sizeof(int) + sizeof(int) * value.Count); + var anyWriteState = false; + for (var i = 0; i < value.Count; i++) + { + object? innerState = null; + var rangeSize = _rangeConverter.GetSizeOrDbNull(context.Format, _rangeRequirements.Write, value[i], ref innerState); + anyWriteState = anyWriteState || innerState is not null; + // Ranges should never be NULL. + Debug.Assert(rangeSize.HasValue); + data[i] = new(rangeSize.Value, innerState); + totalSize = totalSize.Combine(rangeSize.Value); + } + + writeState = new WriteState + { + ArrayPool = arrayPool, + Data = new(data, 0, value.Count), + AnyWriteState = anyWriteState + }; + return totalSize; + } + + public override void Write(PgWriter writer, T value) + => Write(async: false, writer, value, CancellationToken.None).GetAwaiter().GetResult(); + + public override ValueTask WriteAsync(PgWriter writer, T value, CancellationToken cancellationToken = default) + => Write(async: true, writer, value, cancellationToken); + + async ValueTask Write(bool async, PgWriter writer, T value, CancellationToken cancellationToken) + { + if (writer.Current.WriteState is not WriteState writeState) + throw new InvalidCastException($"Invalid state {writer.Current.WriteState?.GetType().FullName}."); + + if (writer.ShouldFlush(sizeof(int))) + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + writer.WriteInt32(value.Count); + + var data = writeState.Data.Array!; + for (var i = 0; i < value.Count; i++) + { + if (writer.ShouldFlush(sizeof(int))) // Length + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + + var (size, state) = data[i]; + if (size.Kind is SizeKind.Unknown) + throw new NotImplementedException(); + + var length = size.Value; + writer.WriteInt32(length); + if (length != -1) + { + using var _ = await writer.BeginNestedWrite(async, _rangeRequirements.Write, length, state, cancellationToken).ConfigureAwait(false); + if (async) + await _rangeConverter.WriteAsync(writer, value[i], cancellationToken).ConfigureAwait(false); + else + // ReSharper disable once MethodHasAsyncOverloadWithCancellation + _rangeConverter.Write(writer, value[i]); + } + } + } + + sealed class WriteState : MultiWriteState + { + } +} diff --git a/src/Npgsql/Internal/Converters/Networking/IPAddressConverter.cs b/src/Npgsql/Internal/Converters/Networking/IPAddressConverter.cs new file mode 100644 index 0000000000..9050f36f16 --- /dev/null +++ b/src/Npgsql/Internal/Converters/Networking/IPAddressConverter.cs @@ -0,0 +1,23 @@ +using System.Net; +using System.Net.Sockets; + +// ReSharper disable once CheckNamespace +namespace Npgsql.Internal.Converters; + +sealed class IPAddressConverter : PgBufferedConverter +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + => CanConvertBufferedDefault(format, out bufferRequirements); + + public override Size GetSize(SizeContext context, IPAddress value, ref object? writeState) + => NpgsqlInetConverter.GetSizeImpl(context, value, ref writeState); + + protected override IPAddress ReadCore(PgReader reader) + => NpgsqlInetConverter.ReadImpl(reader, shouldBeCidr: false).Address; + + protected override void WriteCore(PgWriter writer, IPAddress value) + => NpgsqlInetConverter.WriteImpl( + writer, + (value, (byte)(value.AddressFamily == AddressFamily.InterNetwork ? 32 : 128)), + isCidr: false); +} diff --git a/src/Npgsql/Internal/Converters/Networking/MacaddrConverter.cs b/src/Npgsql/Internal/Converters/Networking/MacaddrConverter.cs new file mode 100644 index 0000000000..dd8aac78bc --- /dev/null +++ b/src/Npgsql/Internal/Converters/Networking/MacaddrConverter.cs @@ -0,0 +1,40 @@ +using System; +using System.Diagnostics; +using System.Net.NetworkInformation; + +// ReSharper disable once CheckNamespace +namespace Npgsql.Internal.Converters; + +sealed class MacaddrConverter : PgBufferedConverter +{ + readonly bool _macaddr8; + + public MacaddrConverter(bool macaddr8) => _macaddr8 = macaddr8; + + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = _macaddr8 ? BufferRequirements.Create(Size.CreateUpperBound(8)) : BufferRequirements.CreateFixedSize(6); + return format is DataFormat.Binary; + } + + public override Size GetSize(SizeContext context, PhysicalAddress value, ref object? writeState) + => value.GetAddressBytes().Length; + + protected override PhysicalAddress ReadCore(PgReader reader) + { + var len = reader.CurrentRemaining; + Debug.Assert(len is 6 or 8); + + var bytes = new byte[len]; + reader.Read(bytes); + return new PhysicalAddress(bytes); + } + + protected override void WriteCore(PgWriter writer, PhysicalAddress value) + { + var bytes = value.GetAddressBytes(); + if (!_macaddr8 && bytes.Length is not 6) + throw new ArgumentException("A macaddr value must be 6 bytes long."); + writer.WriteBytes(bytes); + } +} diff --git a/src/Npgsql/Internal/Converters/Networking/NpgsqlCidrConverter.cs b/src/Npgsql/Internal/Converters/Networking/NpgsqlCidrConverter.cs new file mode 100644 index 0000000000..249ec9a68f --- /dev/null +++ b/src/Npgsql/Internal/Converters/Networking/NpgsqlCidrConverter.cs @@ -0,0 +1,22 @@ +using NpgsqlTypes; + +// ReSharper disable once CheckNamespace +namespace Npgsql.Internal.Converters; + +sealed class NpgsqlCidrConverter : PgBufferedConverter +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + => CanConvertBufferedDefault(format, out bufferRequirements); + + public override Size GetSize(SizeContext context, NpgsqlCidr value, ref object? writeState) + => NpgsqlInetConverter.GetSizeImpl(context, value.Address, ref writeState); + + protected override NpgsqlCidr ReadCore(PgReader reader) + { + var (ip, netmask) = NpgsqlInetConverter.ReadImpl(reader, shouldBeCidr: true); + return new(ip, netmask); + } + + protected override void WriteCore(PgWriter writer, NpgsqlCidr value) + => NpgsqlInetConverter.WriteImpl(writer, (value.Address, value.Netmask), isCidr: false); +} diff --git a/src/Npgsql/Internal/Converters/Networking/NpgsqlInetConverter.cs b/src/Npgsql/Internal/Converters/Networking/NpgsqlInetConverter.cs new file mode 100644 index 0000000000..f3af04e80a --- /dev/null +++ b/src/Npgsql/Internal/Converters/Networking/NpgsqlInetConverter.cs @@ -0,0 +1,73 @@ +using System; +using System.Diagnostics; +using System.Net; +using System.Net.Sockets; +using NpgsqlTypes; + +// ReSharper disable once CheckNamespace +namespace Npgsql.Internal.Converters; + +sealed class NpgsqlInetConverter : PgBufferedConverter +{ + const byte IPv4 = 2; + const byte IPv6 = 3; + + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + => CanConvertBufferedDefault(format, out bufferRequirements); + + public override Size GetSize(SizeContext context, NpgsqlInet value, ref object? writeState) + => GetSizeImpl(context, value.Address, ref writeState); + + internal static Size GetSizeImpl(SizeContext context, IPAddress ipAddress, ref object? writeState) + => ipAddress.AddressFamily switch + { + AddressFamily.InterNetwork => 8, + AddressFamily.InterNetworkV6 => 20, + _ => throw new InvalidCastException( + $"Can't handle IPAddress with AddressFamily {ipAddress.AddressFamily}, only InterNetwork or InterNetworkV6!") + }; + + protected override NpgsqlInet ReadCore(PgReader reader) + { + var (ip, netmask) = ReadImpl(reader, shouldBeCidr: false); + return new(ip, netmask); + } + + internal static (IPAddress Address, byte Netmask) ReadImpl(PgReader reader, bool shouldBeCidr) + { + _ = reader.ReadByte(); // addressFamily + var mask = reader.ReadByte(); // mask + + var isCidr = reader.ReadByte() == 1; + Debug.Assert(isCidr == shouldBeCidr); + + var numBytes = reader.ReadByte(); + Span bytes = stackalloc byte[numBytes]; + reader.Read(bytes); +#if NETSTANDARD2_0 + return (new IPAddress(bytes.ToArray()), mask); +#else + return (new IPAddress(bytes), mask); +#endif + } + + protected override void WriteCore(PgWriter writer, NpgsqlInet value) + => WriteImpl(writer, (value.Address, value.Netmask), isCidr: false); + + internal static void WriteImpl(PgWriter writer, (IPAddress Address, byte Netmask) value, bool isCidr) + { + writer.WriteByte(value.Address.AddressFamily switch + { + AddressFamily.InterNetwork => IPv4, + AddressFamily.InterNetworkV6 => IPv6, + _ => throw new InvalidCastException( + $"Can't handle IPAddress with AddressFamily {value.Address.AddressFamily}, only InterNetwork or InterNetworkV6!") + }); + + writer.WriteByte(value.Netmask); + writer.WriteByte((byte)(isCidr ? 1 : 0)); // Ignored on server side + var bytes = value.Address.GetAddressBytes(); + writer.WriteByte((byte)bytes.Length); + writer.WriteBytes(bytes); + } +} diff --git a/src/Npgsql/Internal/Converters/NullableConverter.cs b/src/Npgsql/Internal/Converters/NullableConverter.cs new file mode 100644 index 0000000000..b3f8a8a0b2 --- /dev/null +++ b/src/Npgsql/Internal/Converters/NullableConverter.cs @@ -0,0 +1,60 @@ +using System.Diagnostics.CodeAnalysis; +using System.Threading; +using System.Threading.Tasks; +using Npgsql.Internal.Postgres; + +namespace Npgsql.Internal.Converters; + +// NULL writing is always responsibility of the caller writing the length, so there is not much we do here. +/// Special value converter to be able to use struct converters as System.Nullable converters, it delegates all behavior to the effective converter. +sealed class NullableConverter : PgConverter where T : struct +{ + readonly PgConverter _effectiveConverter; + public NullableConverter(PgConverter effectiveConverter) + : base(effectiveConverter.DbNullPredicateKind is DbNullPredicate.Custom) + => _effectiveConverter = effectiveConverter; + + protected override bool IsDbNullValue(T? value) + => value is null || _effectiveConverter.IsDbNull(value.GetValueOrDefault()); + + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + => _effectiveConverter.CanConvert(format, out bufferRequirements); + + public override T? Read(PgReader reader) + => _effectiveConverter.Read(reader); + + public override ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) + => this.ComposingReadAsync(_effectiveConverter, reader, cancellationToken); + + public override Size GetSize(SizeContext context, [DisallowNull]T? value, ref object? writeState) + => _effectiveConverter.GetSize(context, value.GetValueOrDefault(), ref writeState); + + public override void Write(PgWriter writer, T? value) + => _effectiveConverter.Write(writer, value.GetValueOrDefault()); + + public override ValueTask WriteAsync(PgWriter writer, T? value, CancellationToken cancellationToken = default) + => _effectiveConverter.WriteAsync(writer, value.GetValueOrDefault(), cancellationToken); + + internal override ValueTask ReadAsObject(bool async, PgReader reader, CancellationToken cancellationToken) + => _effectiveConverter.ReadAsObject(async, reader, cancellationToken); + + internal override ValueTask WriteAsObject(bool async, PgWriter writer, object value, CancellationToken cancellationToken) + => _effectiveConverter.WriteAsObject(async, writer, value, cancellationToken); +} + +sealed class NullableConverterResolver : PgComposingConverterResolver where T : struct +{ + public NullableConverterResolver(PgResolverTypeInfo effectiveTypeInfo) + : base(effectiveTypeInfo.PgTypeId, effectiveTypeInfo) { } + + protected override PgTypeId GetEffectivePgTypeId(PgTypeId pgTypeId) => pgTypeId; + protected override PgTypeId GetPgTypeId(PgTypeId effectivePgTypeId) => effectivePgTypeId; + + protected override PgConverter CreateConverter(PgConverterResolution effectiveResolution) + => new NullableConverter(effectiveResolution.GetConverter()); + + protected override PgConverterResolution? GetEffectiveResolution(T? value, PgTypeId? expectedEffectivePgTypeId) + => value is null + ? EffectiveTypeInfo.GetDefaultResolution(expectedEffectivePgTypeId) + : EffectiveTypeInfo.GetResolution(value.GetValueOrDefault(), expectedEffectivePgTypeId); +} diff --git a/src/Npgsql/Internal/Converters/ObjectArrayRecordConverter.cs b/src/Npgsql/Internal/Converters/ObjectArrayRecordConverter.cs new file mode 100644 index 0000000000..9b028c02cc --- /dev/null +++ b/src/Npgsql/Internal/Converters/ObjectArrayRecordConverter.cs @@ -0,0 +1,79 @@ +using System; +using System.Threading; +using System.Threading.Tasks; +using Npgsql.Internal.Postgres; + +namespace Npgsql.Internal.Converters; + +sealed class ObjectArrayRecordConverter : PgStreamingConverter +{ + readonly PgSerializerOptions _serializerOptions; + readonly Func? _factory; + + public ObjectArrayRecordConverter(PgSerializerOptions serializerOptions, Func? factory = null) + { + _serializerOptions = serializerOptions; + _factory = factory; + } + + public override T Read(PgReader reader) + => Read(async: false, reader, CancellationToken.None).GetAwaiter().GetResult(); + + public override ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) + => Read(async: true, reader, cancellationToken); + + async ValueTask Read(bool async, PgReader reader, CancellationToken cancellationToken) + { + if (reader.ShouldBuffer(sizeof(int))) + await reader.Buffer(async, sizeof(int), cancellationToken).ConfigureAwait(false); + var fieldCount = reader.ReadInt32(); + var result = new object[fieldCount]; + for (var i = 0; i < fieldCount; i++) + { + if (reader.ShouldBuffer(sizeof(uint) + sizeof(int))) + await reader.Buffer(async, sizeof(uint) + sizeof(int), cancellationToken).ConfigureAwait(false); + + var typeOid = reader.ReadUInt32(); + var length = reader.ReadInt32(); + + // Note that we leave .NET nulls in the object array rather than DBNull. + if (length == -1) + continue; + + var postgresType = + _serializerOptions.DatabaseInfo.GetPostgresType(typeOid).GetRepresentationalType() + ?? throw new NotSupportedException($"Reading isn't supported for record field {i} (unknown type OID {typeOid}"); + + var typeInfo = _serializerOptions.GetObjectOrDefaultTypeInfo(postgresType) + ?? throw new NotSupportedException( + $"Reading isn't supported for record field {i} (PG type '{postgresType.DisplayName}'"); + var resolution = typeInfo.GetConcreteResolution(); + if (typeInfo.GetBufferRequirements(resolution.Converter, DataFormat.Binary) is not { } bufferRequirements) + throw new NotSupportedException($"Resolved record field converter '{resolution.Converter.GetType()}' has to support the binary format to be compatible."); + + var scope = await reader.BeginNestedRead(async, length, bufferRequirements.Read, cancellationToken).ConfigureAwait(false); + try + { + result[i] = await resolution.Converter.ReadAsObject(async, reader, cancellationToken).ConfigureAwait(false); + } + finally + { + if (async) + await scope.DisposeAsync().ConfigureAwait(false); + else + scope.Dispose(); + } + } + + return _factory is null ? (T)(object)result : _factory(result); + } + + public override Size GetSize(SizeContext context, T value, ref object? writeState) + => throw new NotSupportedException(); + + public override void Write(PgWriter writer, T value) + => throw new NotSupportedException(); + + public override ValueTask WriteAsync(PgWriter writer, T value, CancellationToken cancellationToken = default) + => throw new NotSupportedException(); +} diff --git a/src/Npgsql/Internal/Converters/PolymorphicConverterResolver.cs b/src/Npgsql/Internal/Converters/PolymorphicConverterResolver.cs new file mode 100644 index 0000000000..7c78e34a24 --- /dev/null +++ b/src/Npgsql/Internal/Converters/PolymorphicConverterResolver.cs @@ -0,0 +1,68 @@ +using System; +using System.Collections.Concurrent; +using System.Collections.Generic; +using Npgsql.Internal.Postgres; + +namespace Npgsql.Internal.Converters; + +abstract class PolymorphicConverterResolver : PgConverterResolver +{ + protected PolymorphicConverterResolver(PgTypeId pgTypeId) => PgTypeId = pgTypeId; + + protected PgTypeId PgTypeId { get; } + + protected abstract PgConverter Get(Field? field); + + public sealed override PgConverterResolution GetDefault(PgTypeId? pgTypeId) + { + if (pgTypeId is not null && pgTypeId != PgTypeId) + throw CreateUnsupportedPgTypeIdException(pgTypeId.Value); + + return new(Get(null), PgTypeId); + } + + public sealed override PgConverterResolution? Get(TBase? value, PgTypeId? expectedPgTypeId) + => new(Get(null), PgTypeId); + + public sealed override PgConverterResolution Get(Field field) + { + if (field.PgTypeId != PgTypeId) + throw CreateUnsupportedPgTypeIdException(field.PgTypeId); + + var converter = Get(field); + return new(converter, PgTypeId); + } +} + +// Many ways to achieve strongly typed composition on top of a polymorphic element type. +// Including pushing construction through a GVM visitor pattern on the element handler, +// manual reimplementation of the element logic in the array resolver, and other ways. +// This one however is by far the most lightweight on both the implementation duplication and code bloat axes. +sealed class ArrayPolymorphicConverterResolver : PolymorphicConverterResolver +{ + readonly PgResolverTypeInfo _elemTypeInfo; + readonly Func _elemToArrayConverterFactory; + readonly PgTypeId _elemPgTypeId; + readonly ConcurrentDictionary _converterCache = new(ReferenceEqualityComparer.Instance); + + public ArrayPolymorphicConverterResolver(PgTypeId pgTypeId, PgResolverTypeInfo elemTypeInfo, Func elemToArrayConverterFactory) + : base(pgTypeId) + { + if (elemTypeInfo.PgTypeId is null) + throw new ArgumentException("elemTypeInfo.PgTypeId must be non-null.", nameof(elemTypeInfo)); + + _elemTypeInfo = elemTypeInfo; + _elemToArrayConverterFactory = elemToArrayConverterFactory; + _elemPgTypeId = elemTypeInfo.PgTypeId!.Value; + } + + protected override PgConverter Get(Field? maybeField) + { + var elemResolution = maybeField is { } field + ? _elemTypeInfo.GetResolution(field with { PgTypeId = _elemPgTypeId }) + : _elemTypeInfo.GetDefaultResolution(_elemPgTypeId); + + (Func Factory, PgConverterResolution Resolution) state = (_elemToArrayConverterFactory, elemResolution); + return _converterCache.GetOrAdd(elemResolution.Converter, static (_, state) => state.Factory(state.Resolution), state); + } +} diff --git a/src/Npgsql/Internal/Converters/Primitive/BoolConverter.cs b/src/Npgsql/Internal/Converters/Primitive/BoolConverter.cs new file mode 100644 index 0000000000..196877ad0e --- /dev/null +++ b/src/Npgsql/Internal/Converters/Primitive/BoolConverter.cs @@ -0,0 +1,13 @@ +// ReSharper disable once CheckNamespace +namespace Npgsql.Internal.Converters; + +sealed class BoolConverter : PgBufferedConverter +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(byte)); + return format is DataFormat.Binary; + } + protected override bool ReadCore(PgReader reader) => reader.ReadByte() is not 0; + protected override void WriteCore(PgWriter writer, bool value) => writer.WriteByte((byte)(value ? 1 : 0)); +} diff --git a/src/Npgsql/Internal/Converters/Primitive/ByteaConverters.cs b/src/Npgsql/Internal/Converters/Primitive/ByteaConverters.cs new file mode 100644 index 0000000000..1d2b1ce531 --- /dev/null +++ b/src/Npgsql/Internal/Converters/Primitive/ByteaConverters.cs @@ -0,0 +1,124 @@ +using System; +using System.Diagnostics; +using System.IO; +using System.Runtime.CompilerServices; +using System.Runtime.InteropServices; +using System.Threading; +using System.Threading.Tasks; + +// ReSharper disable once CheckNamespace +namespace Npgsql.Internal.Converters; + +abstract class ByteaConverters : PgStreamingConverter +{ + public override T Read(PgReader reader) + => Read(async: false, reader, CancellationToken.None).Result; + + public override ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) + => Read(async: true, reader, cancellationToken); + + public override Size GetSize(SizeContext context, T value, ref object? writeState) + => ConvertTo(value).Length; + + public override void Write(PgWriter writer, T value) + => writer.WriteBytes(ConvertTo(value).Span); + + public override ValueTask WriteAsync(PgWriter writer, T value, CancellationToken cancellationToken = default) + => writer.WriteBytesAsync(ConvertTo(value), cancellationToken); + +#if NET6_0_OR_GREATER + [AsyncMethodBuilder(typeof(PoolingAsyncValueTaskMethodBuilder<>))] +#endif + async ValueTask Read(bool async, PgReader reader, CancellationToken cancellationToken) + { + var bytes = new byte[reader.CurrentRemaining]; + if (async) + await reader.ReadBytesAsync(bytes, cancellationToken).ConfigureAwait(false); + else + reader.ReadBytes(bytes); + + return ConvertFrom(new(bytes)); + } + + protected abstract Memory ConvertTo(T value); + protected abstract T ConvertFrom(Memory value); +} + +sealed class ArraySegmentByteaConverter : ByteaConverters> +{ + protected override Memory ConvertTo(ArraySegment value) => value; + protected override ArraySegment ConvertFrom(Memory value) + => MemoryMarshal.TryGetArray(value, out var segment) + ? segment + : throw new UnreachableException("Expected array-backed memory"); +} + +sealed class ArrayByteaConverter : PgStreamingConverter +{ + public override byte[] Read(PgReader reader) + { + var bytes = new byte[reader.CurrentRemaining]; + reader.ReadBytes(bytes); + return bytes; + } + + public override async ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) + { + var bytes = new byte[reader.CurrentRemaining]; + await reader.ReadBytesAsync(bytes, cancellationToken).ConfigureAwait(false); + return bytes; + } + + public override Size GetSize(SizeContext context, byte[] value, ref object? writeState) + => value.Length; + + public override void Write(PgWriter writer, byte[] value) + => writer.WriteBytes(value); + + public override ValueTask WriteAsync(PgWriter writer, byte[] value, CancellationToken cancellationToken = default) + => writer.WriteBytesAsync(value, cancellationToken); +} + +sealed class ReadOnlyMemoryByteaConverter : ByteaConverters> +{ + protected override Memory ConvertTo(ReadOnlyMemory value) => MemoryMarshal.AsMemory(value); + protected override ReadOnlyMemory ConvertFrom(Memory value) => value; +} + +sealed class MemoryByteaConverter : ByteaConverters> +{ + protected override Memory ConvertTo(Memory value) => value; + protected override Memory ConvertFrom(Memory value) => value; +} + +sealed class StreamByteaConverter : PgStreamingConverter +{ + public override Stream Read(PgReader reader) + => throw new NotSupportedException("Handled by generic stream support in NpgsqlDataReader"); + + public override ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) + => throw new NotSupportedException("Handled by generic stream support in NpgsqlDataReader"); + + public override Size GetSize(SizeContext context, Stream value, ref object? writeState) + { + var memoryStream = new MemoryStream(value.CanSeek ? (int)(value.Length - value.Position) : 0); + value.CopyTo(memoryStream); + writeState = memoryStream; + return checked((int)memoryStream.Length); + } + + public override void Write(PgWriter writer, Stream value) + { + if (!((MemoryStream)writer.Current.WriteState!).TryGetBuffer(out var segment)) + throw new InvalidOperationException(); + writer.WriteBytes(segment.AsSpan()); + } + + public override ValueTask WriteAsync(PgWriter writer, Stream value, CancellationToken cancellationToken = default) + { + if (!((MemoryStream)writer.Current.WriteState!).TryGetBuffer(out var segment)) + throw new InvalidOperationException(); + + return writer.WriteBytesAsync(segment.AsMemory(), cancellationToken); + } +} diff --git a/src/Npgsql/Internal/Converters/Primitive/DoubleConverter.cs b/src/Npgsql/Internal/Converters/Primitive/DoubleConverter.cs new file mode 100644 index 0000000000..74a56d06ae --- /dev/null +++ b/src/Npgsql/Internal/Converters/Primitive/DoubleConverter.cs @@ -0,0 +1,43 @@ +using System; +using System.Numerics; + +// ReSharper disable once CheckNamespace +namespace Npgsql.Internal.Converters; + +sealed class DoubleConverter : PgBufferedConverter +#if NET7_0_OR_GREATER + where T : INumberBase +#endif +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(double)); + return format is DataFormat.Binary; + } + +#if NET7_0_OR_GREATER + protected override T ReadCore(PgReader reader) => T.CreateChecked(reader.ReadDouble()); + protected override void WriteCore(PgWriter writer, T value) => writer.WriteDouble(double.CreateChecked(value)); +#else + protected override T ReadCore(PgReader reader) + { + var value = reader.ReadDouble(); + if (typeof(float) == typeof(T)) + return (T)(object)value; + if (typeof(double) == typeof(T)) + return (T)(object)value; + + throw new NotSupportedException(); + } + + protected override void WriteCore(PgWriter writer, T value) + { + if (typeof(float) == typeof(T)) + writer.WriteDouble((float)(object)value!); + else if (typeof(double) == typeof(T)) + writer.WriteDouble((double)(object)value!); + else + throw new NotSupportedException(); + } +#endif +} diff --git a/src/Npgsql/Internal/Converters/Primitive/GuidUuidConverter.cs b/src/Npgsql/Internal/Converters/Primitive/GuidUuidConverter.cs new file mode 100644 index 0000000000..596deedfce --- /dev/null +++ b/src/Npgsql/Internal/Converters/Primitive/GuidUuidConverter.cs @@ -0,0 +1,70 @@ +using System; +using System.Buffers.Binary; +using System.Runtime.InteropServices; + +namespace Npgsql.Internal.Converters; + +sealed class GuidUuidConverter : PgBufferedConverter +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(16 * sizeof(byte)); + return format is DataFormat.Binary; + } + protected override Guid ReadCore(PgReader reader) + { +#if NET8_0_OR_GREATER + return new Guid(reader.ReadBytes(16).FirstSpan, bigEndian: true); +#else + return new GuidRaw + { + Data1 = reader.ReadInt32(), + Data2 = reader.ReadInt16(), + Data3 = reader.ReadInt16(), + Data4 = BitConverter.IsLittleEndian ? BinaryPrimitives.ReverseEndianness(reader.ReadInt64()) : reader.ReadInt64() + }.Value; +#endif + } + + protected override void WriteCore(PgWriter writer, Guid value) + { +#if NET8_0_OR_GREATER + Span bytes = stackalloc byte[16]; + value.TryWriteBytes(bytes, bigEndian: true, out _); + writer.WriteBytes(bytes); +#else + var raw = new GuidRaw(value); + + writer.WriteInt32(raw.Data1); + writer.WriteInt16(raw.Data2); + writer.WriteInt16(raw.Data3); + writer.WriteInt64(BitConverter.IsLittleEndian ? BinaryPrimitives.ReverseEndianness(raw.Data4) : raw.Data4); +#endif + } + +#if !NET8_0_OR_GREATER + // The following table shows .NET GUID vs Postgres UUID (RFC 4122) layouts. + // + // Note that the first fields are converted from/to native endianness (handled by the Read* + // and Write* methods), while the last field is always read/written in big-endian format. + // + // We're reverting endianness on little endian systems to get it into big endian format. + // + // | Bits | Bytes | Name | Endianness (GUID) | Endianness (RFC 4122) | + // | ---- | ----- | ----- | ----------------- | --------------------- | + // | 32 | 4 | Data1 | Native | Big | + // | 16 | 2 | Data2 | Native | Big | + // | 16 | 2 | Data3 | Native | Big | + // | 64 | 8 | Data4 | Big | Big | + [StructLayout(LayoutKind.Explicit)] + struct GuidRaw + { + [FieldOffset(0)] public Guid Value; + [FieldOffset(0)] public int Data1; + [FieldOffset(4)] public short Data2; + [FieldOffset(6)] public short Data3; + [FieldOffset(8)] public long Data4; + public GuidRaw(Guid value) : this() => Value = value; + } +#endif +} diff --git a/src/Npgsql/Internal/Converters/Primitive/Int2Converter.cs b/src/Npgsql/Internal/Converters/Primitive/Int2Converter.cs new file mode 100644 index 0000000000..e54658d925 --- /dev/null +++ b/src/Npgsql/Internal/Converters/Primitive/Int2Converter.cs @@ -0,0 +1,70 @@ +using System; +using System.Numerics; + +// ReSharper disable once CheckNamespace +namespace Npgsql.Internal.Converters; + +sealed class Int2Converter : PgBufferedConverter +#if NET7_0_OR_GREATER + where T : INumberBase +#endif +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(short)); + return format is DataFormat.Binary; + } +#if NET7_0_OR_GREATER + protected override T ReadCore(PgReader reader) => T.CreateChecked(reader.ReadInt16()); + protected override void WriteCore(PgWriter writer, T value) => writer.WriteInt16(short.CreateChecked(value)); +#else + protected override T ReadCore(PgReader reader) + { + var value = reader.ReadInt16(); + if (typeof(short) == typeof(T)) + return (T)(object)value; + if (typeof(int) == typeof(T)) + return (T)(object)(int)value; + if (typeof(long) == typeof(T)) + return (T)(object)(long)value; + + if (typeof(byte) == typeof(T)) + return (T)(object)checked((byte)value); + if (typeof(sbyte) == typeof(T)) + return (T)(object)checked((sbyte)value); + + if (typeof(float) == typeof(T)) + return (T)(object)(float)value; + if (typeof(double) == typeof(T)) + return (T)(object)(double)value; + if (typeof(decimal) == typeof(T)) + return (T)(object)(decimal)value; + + throw new NotSupportedException(); + } + + protected override void WriteCore(PgWriter writer, T value) + { + if (typeof(short) == typeof(T)) + writer.WriteInt16((short)(object)value!); + else if (typeof(int) == typeof(T)) + writer.WriteInt16(checked((short)(int)(object)value!)); + else if (typeof(long) == typeof(T)) + writer.WriteInt16(checked((short)(long)(object)value!)); + + else if (typeof(byte) == typeof(T)) + writer.WriteInt16((byte)(object)value!); + else if (typeof(sbyte) == typeof(T)) + writer.WriteInt16((sbyte)(object)value!); + + else if (typeof(float) == typeof(T)) + writer.WriteInt16(checked((short)(float)(object)value!)); + else if (typeof(double) == typeof(T)) + writer.WriteInt16(checked((short)(double)(object)value!)); + else if (typeof(decimal) == typeof(T)) + writer.WriteInt16((short)(decimal)(object)value!); + else + throw new NotSupportedException(); + } +#endif +} diff --git a/src/Npgsql/Internal/Converters/Primitive/Int4Converter.cs b/src/Npgsql/Internal/Converters/Primitive/Int4Converter.cs new file mode 100644 index 0000000000..1831ca9b1e --- /dev/null +++ b/src/Npgsql/Internal/Converters/Primitive/Int4Converter.cs @@ -0,0 +1,71 @@ +using System; +using System.Numerics; + +// ReSharper disable once CheckNamespace +namespace Npgsql.Internal.Converters; + +sealed class Int4Converter : PgBufferedConverter +#if NET7_0_OR_GREATER + where T : INumberBase +#endif +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(int)); + return format is DataFormat.Binary; + } + +#if NET7_0_OR_GREATER + protected override T ReadCore(PgReader reader) => T.CreateChecked(reader.ReadInt32()); + protected override void WriteCore(PgWriter writer, T value) => writer.WriteInt32(int.CreateChecked(value)); +#else + protected override T ReadCore(PgReader reader) + { + var value = reader.ReadInt32(); + if (typeof(short) == typeof(T)) + return (T)(object)checked((short)value); + if (typeof(int) == typeof(T)) + return (T)(object)value; + if (typeof(long) == typeof(T)) + return (T)(object)(long)value; + + if (typeof(byte) == typeof(T)) + return (T)(object)checked((byte)value); + if (typeof(sbyte) == typeof(T)) + return (T)(object)checked((sbyte)value); + + if (typeof(float) == typeof(T)) + return (T)(object)(float)value; + if (typeof(double) == typeof(T)) + return (T)(object)(double)value; + if (typeof(decimal) == typeof(T)) + return (T)(object)(decimal)value; + + throw new NotSupportedException(); + } + + protected override void WriteCore(PgWriter writer, T value) + { + if (typeof(short) == typeof(T)) + writer.WriteInt32((short)(object)value!); + else if (typeof(int) == typeof(T)) + writer.WriteInt32((int)(object)value!); + else if (typeof(long) == typeof(T)) + writer.WriteInt32(checked((int)(long)(object)value!)); + + else if (typeof(byte) == typeof(T)) + writer.WriteInt32((byte)(object)value!); + else if (typeof(sbyte) == typeof(T)) + writer.WriteInt32((sbyte)(object)value!); + + else if (typeof(float) == typeof(T)) + writer.WriteInt32(checked((int)(float)(object)value!)); + else if (typeof(double) == typeof(T)) + writer.WriteInt32(checked((int)(double)(object)value!)); + else if (typeof(decimal) == typeof(T)) + writer.WriteInt32((int)(decimal)(object)value!); + else + throw new NotSupportedException(); + } +#endif +} diff --git a/src/Npgsql/Internal/Converters/Primitive/Int8Converter.cs b/src/Npgsql/Internal/Converters/Primitive/Int8Converter.cs new file mode 100644 index 0000000000..b422816244 --- /dev/null +++ b/src/Npgsql/Internal/Converters/Primitive/Int8Converter.cs @@ -0,0 +1,72 @@ +using System; +using System.Numerics; + +// ReSharper disable once CheckNamespace +namespace Npgsql.Internal.Converters; + +sealed class Int8Converter : PgBufferedConverter +#if NET7_0_OR_GREATER + where T : INumberBase +#endif +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(long)); + return format is DataFormat.Binary; + } + +#if NET7_0_OR_GREATER + protected override T ReadCore(PgReader reader) => T.CreateChecked(reader.ReadInt64()); + protected override void WriteCore(PgWriter writer, T value) => writer.WriteInt64(long.CreateChecked(value)); +#else + protected override T ReadCore(PgReader reader) + { + var value = reader.ReadInt64(); + if (typeof(long) == typeof(T)) + return (T)(object)value; + + if (typeof(short) == typeof(T)) + return (T)(object)checked((short)value); + if (typeof(int) == typeof(T)) + return (T)(object)checked((int)value); + + if (typeof(byte) == typeof(T)) + return (T)(object)checked((byte)value); + if (typeof(sbyte) == typeof(T)) + return (T)(object)checked((sbyte)value); + + if (typeof(float) == typeof(T)) + return (T)(object)(float)value; + if (typeof(double) == typeof(T)) + return (T)(object)(double)value; + if (typeof(decimal) == typeof(T)) + return (T)(object)(decimal)value; + + throw new NotSupportedException(); + } + + protected override void WriteCore(PgWriter writer, T value) + { + if (typeof(short) == typeof(T)) + writer.WriteInt64((short)(object)value!); + else if (typeof(int) == typeof(T)) + writer.WriteInt64((int)(object)value!); + else if (typeof(long) == typeof(T)) + writer.WriteInt64((long)(object)value!); + + else if (typeof(byte) == typeof(T)) + writer.WriteInt64((byte)(object)value!); + else if (typeof(sbyte) == typeof(T)) + writer.WriteInt64((sbyte)(object)value!); + + else if (typeof(float) == typeof(T)) + writer.WriteInt64(checked((long)(float)(object)value!)); + else if (typeof(double) == typeof(T)) + writer.WriteInt64(checked((long)(double)(object)value!)); + else if (typeof(decimal) == typeof(T)) + writer.WriteInt64((long)(decimal)(object)value!); + else + throw new NotSupportedException(); + } +#endif +} diff --git a/src/Npgsql/Internal/Converters/Primitive/NumericConverters.cs b/src/Npgsql/Internal/Converters/Primitive/NumericConverters.cs new file mode 100644 index 0000000000..c43e90a1f7 --- /dev/null +++ b/src/Npgsql/Internal/Converters/Primitive/NumericConverters.cs @@ -0,0 +1,262 @@ +using System; +using System.Buffers; +using System.Numerics; +using System.Threading; +using System.Threading.Tasks; + +// ReSharper disable once CheckNamespace +namespace Npgsql.Internal.Converters; + +sealed class BigIntegerNumericConverter : PgStreamingConverter +{ + const int StackAllocByteThreshold = 64 * sizeof(uint); + + public override BigInteger Read(PgReader reader) + { + var digitCount = reader.ReadInt16(); + short[]? digitsFromPool = null; + var digits = (digitCount <= StackAllocByteThreshold / sizeof(short) + ? stackalloc short[StackAllocByteThreshold / sizeof(short)] + : (digitsFromPool = ArrayPool.Shared.Rent(digitCount)).AsSpan()).Slice(0, digitCount); + + var value = ConvertTo(NumericConverter.Read(reader, digits)); + + if (digitsFromPool is not null) + ArrayPool.Shared.Return(digitsFromPool); + + return value; + } + + public override ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) + { + // If we don't need a read and can read buffered we delegate to our sync read method which won't do IO in such a case. + if (!reader.ShouldBuffer(reader.CurrentRemaining)) + Read(reader); + + return AsyncCore(reader, cancellationToken); + + static async ValueTask AsyncCore(PgReader reader, CancellationToken cancellationToken) + { + await reader.BufferAsync(PgNumeric.GetByteCount(0), cancellationToken).ConfigureAwait(false); + var digitCount = reader.ReadInt16(); + var digits = new ArraySegment(ArrayPool.Shared.Rent(digitCount), 0, digitCount); + var value = ConvertTo(await NumericConverter.ReadAsync(reader, digits, cancellationToken).ConfigureAwait(false)); + + ArrayPool.Shared.Return(digits.Array!); + + return value; + } + } + + public override Size GetSize(SizeContext context, BigInteger value, ref object? writeState) => + PgNumeric.GetByteCount(PgNumeric.GetDigitCount(value)); + + public override void Write(PgWriter writer, BigInteger value) + { + // We don't know how many digits we need so we allocate a decent chunk of stack for the builder to use. + // If it's not enough for the builder will do a heap allocation (for decimal it's always enough). + Span destination = stackalloc short[StackAllocByteThreshold / sizeof(short)]; + var numeric = ConvertFrom(value, destination); + NumericConverter.Write(writer, numeric); + } + + public override ValueTask WriteAsync(PgWriter writer, BigInteger value, CancellationToken cancellationToken = default) + { + if (writer.ShouldFlush(writer.Current.Size)) + return AsyncCore(writer, value, cancellationToken); + + // If we don't need a flush and can write buffered we delegate to our sync write method which won't flush in such a case. + Write(writer, value); + return new(); + + static async ValueTask AsyncCore(PgWriter writer, BigInteger value, CancellationToken cancellationToken) + { + await writer.FlushAsync(cancellationToken).ConfigureAwait(false); + var numeric = ConvertFrom(value, Array.Empty()).Build(); + await NumericConverter.WriteAsync(writer, numeric, cancellationToken).ConfigureAwait(false); + } + } + + static PgNumeric.Builder ConvertFrom(BigInteger value, Span destination) => new(value, destination); + static BigInteger ConvertTo(in PgNumeric.Builder numeric) => numeric.ToBigInteger(); + static BigInteger ConvertTo(in PgNumeric numeric) => numeric.ToBigInteger(); +} + +sealed class DecimalNumericConverter : PgBufferedConverter +#if NET7_0_OR_GREATER + where T : INumberBase +#else + where T : notnull +#endif +{ + const int StackAllocByteThreshold = 64 * sizeof(uint); + + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + // This upper bound would already cause an overflow exception in the builder, no need to do + 1. + bufferRequirements = BufferRequirements.Create(Size.CreateUpperBound(NumericConverter.DecimalBasedMaxByteCount)); + return format is DataFormat.Binary; + } + + protected override T ReadCore(PgReader reader) + { + var digitCount = reader.ReadInt16(); + var digits = stackalloc short[StackAllocByteThreshold / sizeof(short)].Slice(0, digitCount);; + var value = ConvertTo(NumericConverter.Read(reader, digits)); + return value; + } + + public override Size GetSize(SizeContext context, T value, ref object? writeState) => + PgNumeric.GetByteCount(default(T) switch + { + _ when typeof(decimal) == typeof(T) => PgNumeric.GetDigitCount((decimal)(object)value), + _ when typeof(short) == typeof(T) => PgNumeric.GetDigitCount((decimal)(short)(object)value), + _ when typeof(int) == typeof(T) => PgNumeric.GetDigitCount((decimal)(int)(object)value), + _ when typeof(long) == typeof(T) => PgNumeric.GetDigitCount((decimal)(long)(object)value), + _ when typeof(byte) == typeof(T) => PgNumeric.GetDigitCount((decimal)(byte)(object)value), + _ when typeof(sbyte) == typeof(T) => PgNumeric.GetDigitCount((decimal)(sbyte)(object)value), + _ when typeof(float) == typeof(T) => PgNumeric.GetDigitCount((decimal)(float)(object)value), + _ when typeof(double) == typeof(T) => PgNumeric.GetDigitCount((decimal)(double)(object)value), + _ => throw new NotSupportedException() + }); + + protected override void WriteCore(PgWriter writer, T value) + { + // We don't know how many digits we need so we allocate enough for the builder to use. + Span destination = stackalloc short[PgNumeric.Builder.MaxDecimalNumericDigits]; + var numeric = ConvertFrom(value, destination); + NumericConverter.Write(writer, numeric); + } + + static PgNumeric.Builder ConvertFrom(T value, Span destination) + { +#if !NET7_0_OR_GREATER + if (typeof(short) == typeof(T)) + return new PgNumeric.Builder((decimal)(short)(object)value!, destination); + if (typeof(int) == typeof(T)) + return new PgNumeric.Builder((decimal)(int)(object)value!, destination); + if (typeof(long) == typeof(T)) + return new PgNumeric.Builder((decimal)(long)(object)value!, destination); + + if (typeof(byte) == typeof(T)) + return new PgNumeric.Builder((decimal)(byte)(object)value!, destination); + if (typeof(sbyte) == typeof(T)) + return new PgNumeric.Builder((decimal)(sbyte)(object)value!, destination); + + if (typeof(float) == typeof(T)) + return new PgNumeric.Builder((decimal)(float)(object)value!, destination); + if (typeof(double) == typeof(T)) + return new PgNumeric.Builder((decimal)(double)(object)value!, destination); + if (typeof(decimal) == typeof(T)) + return new PgNumeric.Builder((decimal)(object)value!, destination); + + throw new NotSupportedException(); +#else + return new PgNumeric.Builder(decimal.CreateChecked(value), destination); +#endif + } + + static T ConvertTo(in PgNumeric.Builder numeric) + { +#if !NET7_0_OR_GREATER + if (typeof(short) == typeof(T)) + return (T)(object)(short)numeric.ToDecimal(); + if (typeof(int) == typeof(T)) + return (T)(object)(int)numeric.ToDecimal(); + if (typeof(long) == typeof(T)) + return (T)(object)(long)numeric.ToDecimal(); + + if (typeof(byte) == typeof(T)) + return (T)(object)(byte)numeric.ToDecimal(); + if (typeof(sbyte) == typeof(T)) + return (T)(object)(sbyte)numeric.ToDecimal(); + + if (typeof(float) == typeof(T)) + return (T)(object)(float)numeric.ToDecimal(); + if (typeof(double) == typeof(T)) + return (T)(object)(double)numeric.ToDecimal(); + if (typeof(decimal) == typeof(T)) + return (T)(object)numeric.ToDecimal(); + + throw new NotSupportedException(); +#else + return T.CreateChecked(numeric.ToDecimal()); +#endif + } +} + +static class NumericConverter +{ + public static int DecimalBasedMaxByteCount = PgNumeric.GetByteCount(PgNumeric.Builder.MaxDecimalNumericDigits); + + public static PgNumeric.Builder Read(PgReader reader, Span digits) + { + var remainingStructureSize = PgNumeric.GetByteCount(0) - sizeof(short); + if (reader.ShouldBuffer(remainingStructureSize)) + reader.Buffer(remainingStructureSize); + var weight = reader.ReadInt16(); + var sign = reader.ReadInt16(); + var scale = reader.ReadInt16(); + foreach (ref var digit in digits) + { + if (reader.ShouldBuffer(sizeof(short))) + reader.Buffer(sizeof(short)); + digit = reader.ReadInt16(); + } + + return new PgNumeric.Builder(digits, weight, sign, scale); + } + + public static async ValueTask ReadAsync(PgReader reader, ArraySegment digits, CancellationToken cancellationToken) + { + var remainingStructureSize = PgNumeric.GetByteCount(0) - sizeof(short); + if (reader.ShouldBuffer(remainingStructureSize)) + await reader.BufferAsync(remainingStructureSize, cancellationToken).ConfigureAwait(false); + var weight = reader.ReadInt16(); + var sign = reader.ReadInt16(); + var scale = reader.ReadInt16(); + var array = digits.Array!; + for (var i = digits.Offset; i < array.Length; i++) + { + if (reader.ShouldBuffer(sizeof(short))) + await reader.BufferAsync(sizeof(short), cancellationToken).ConfigureAwait(false); + array[i] = reader.ReadInt16(); + } + + return new PgNumeric.Builder(digits, weight, sign, scale).Build(); + } + + public static void Write(PgWriter writer, PgNumeric.Builder numeric) + { + if (writer.ShouldFlush(PgNumeric.GetByteCount(0))) + writer.Flush(); + writer.WriteInt16((short)numeric.Digits.Length); + writer.WriteInt16(numeric.Weight); + writer.WriteInt16(numeric.Sign); + writer.WriteInt16(numeric.Scale); + + foreach (var digit in numeric.Digits) + { + if (writer.ShouldFlush(sizeof(short))) + writer.Flush(); + writer.WriteInt16(digit); + } + } + + public static async ValueTask WriteAsync(PgWriter writer, PgNumeric numeric, CancellationToken cancellationToken) + { + if (writer.ShouldFlush(PgNumeric.GetByteCount(0))) + await writer.FlushAsync(cancellationToken).ConfigureAwait(false); + writer.WriteInt16((short)numeric.Digits.Count); + writer.WriteInt16(numeric.Weight); + writer.WriteInt16(numeric.Sign); + writer.WriteInt16(numeric.Scale); + + foreach (var digit in numeric.Digits) + { + if (writer.ShouldFlush(sizeof(short))) + await writer.FlushAsync(cancellationToken).ConfigureAwait(false); + writer.WriteInt16(digit); + } + } +} diff --git a/src/Npgsql/Internal/Converters/Primitive/PgMoney.cs b/src/Npgsql/Internal/Converters/Primitive/PgMoney.cs new file mode 100644 index 0000000000..495e2a8aba --- /dev/null +++ b/src/Npgsql/Internal/Converters/Primitive/PgMoney.cs @@ -0,0 +1,104 @@ +using System; +using System.Diagnostics; +using System.Runtime.InteropServices; + +namespace Npgsql.Internal.Converters; + +readonly struct PgMoney +{ + const int DecimalBits = 4; + const int MoneyScale = 2; + readonly long _value; + + public PgMoney(long value) => _value = value; + + public PgMoney(decimal value) + { + if (value is < -92233720368547758.08M or > 92233720368547758.07M) + throw new OverflowException($"The supplied value '{value}' is outside the range for a PostgreSQL money value."); + + // No-op if scale was already 2 or less. + value = decimal.Round(value, MoneyScale, MidpointRounding.AwayFromZero); + + Span bits = stackalloc uint[DecimalBits]; + GetDecimalBits(value, bits, out var scale); + + var money = (long)bits[1] << 32 | bits[0]; + if (value < 0) + money = -money; + + // If we were less than scale 2, multiply. + _value = (MoneyScale - scale) switch + { + 1 => money * 10, + 2 => money * 100, + _ => money + }; + } + + public long GetValue() => _value; + + public decimal ToDecimal() + { + var result = new decimal(_value); + var scaleFactor = new decimal(1, 0, 0, false, MoneyScale); + result *= scaleFactor; + return result; + } + + static void GetDecimalBits(decimal value, Span destination, out short scale) + { + Debug.Assert(destination.Length >= DecimalBits); + +#if NETSTANDARD + var raw = new DecimalRaw(value); + destination[0] = raw.Low; + destination[1] = raw.Mid; + destination[2] = raw.High; + destination[3] = (uint)raw.Flags; + scale = raw.Scale; +#else + decimal.GetBits(value, MemoryMarshal.Cast(destination)); +#endif +#if NET7_0_OR_GREATER + scale = value.Scale; +#else + scale = (byte)(destination[3] >> 16); +#endif + } + +#if NETSTANDARD + // Zero-alloc access to the decimal bits on netstandard. + [StructLayout(LayoutKind.Explicit)] + readonly struct DecimalRaw + { + const int ScaleMask = 0x00FF0000; + const int ScaleShift = 16; + + // Do not change the order in which these fields are declared. It + // should be same as in the System.Decimal.DecCalc struct. + [FieldOffset(0)] + readonly decimal _value; + [FieldOffset(0)] + readonly int _flags; + [FieldOffset(4)] + readonly uint _high; + [FieldOffset(8)] + readonly ulong _low64; + + // Convenience aliased fields but their usage needs to take endianness into account. + [FieldOffset(8)] + readonly uint _low; + [FieldOffset(12)] + readonly uint _mid; + + public DecimalRaw(decimal value) : this() => _value = value; + + public uint High => _high; + public uint Mid => BitConverter.IsLittleEndian ? _mid : _low; + public uint Low => BitConverter.IsLittleEndian ? _low : _mid; + public int Flags => _flags; + public short Scale => (short)((_flags & ScaleMask) >> ScaleShift); + } +#endif +} diff --git a/src/Npgsql/Internal/Converters/Primitive/PgNumeric.cs b/src/Npgsql/Internal/Converters/Primitive/PgNumeric.cs new file mode 100644 index 0000000000..fad0fd50a9 --- /dev/null +++ b/src/Npgsql/Internal/Converters/Primitive/PgNumeric.cs @@ -0,0 +1,462 @@ +using System; +using System.Buffers; +using System.Buffers.Binary; +using System.Diagnostics; +using System.Numerics; +using System.Runtime.InteropServices; +using static Npgsql.Internal.Converters.PgNumeric.Builder; + +namespace Npgsql.Internal.Converters; + +readonly struct PgNumeric +{ + // numeric digit count + weight + sign + scale + const int StructureByteCount = 4 * sizeof(short); + const int DecimalBits = 4; + const int StackAllocByteThreshold = 64 * sizeof(uint); + + readonly ushort _sign; + + public PgNumeric(ArraySegment digits, short weight, short sign, short scale) + { + Digits = digits; + Weight = weight; + _sign = (ushort)sign; + Scale = scale; + } + + /// Big endian array of numeric digits + public ArraySegment Digits { get; } + public short Weight { get; } + public short Sign => (short)_sign; + public short Scale { get; } + + public int GetByteCount() => GetByteCount(Digits.Count); + public static int GetByteCount(int digitCount) => StructureByteCount + digitCount * sizeof(short); + + static void GetDecimalBits(decimal value, Span destination, out short scale) + { + Debug.Assert(destination.Length >= DecimalBits); + +#if NETSTANDARD + var raw = new DecimalRaw(value); + destination[0] = raw.Low; + destination[1] = raw.Mid; + destination[2] = raw.High; + destination[3] = (uint)raw.Flags; + scale = raw.Scale; +#else + decimal.GetBits(value, MemoryMarshal.Cast(destination)); +#endif +#if NET7_0_OR_GREATER + scale = value.Scale; +#else + scale = (byte)(destination[3] >> 16); +#endif + } + + public static int GetDigitCount(decimal value) + { + Span bits = stackalloc uint[DecimalBits]; + GetDecimalBits(value, bits, out var scale); + bits = bits.Slice(0, DecimalBits - 1); + return GetDigitCountCore(bits, scale); + } + + public static int GetDigitCount(BigInteger value) + { +# if NETSTANDARD2_0 + var bits = value.ToByteArray().AsSpan(); + // Detect the presence of a padding byte and slice it away (as we don't have isUnsigned: true overloads on ns2.0). + if (value.Sign == 1 && bits.Length > 2 && (bits[bits.Length - 2] & 0x80) != 0 && bits[bits.Length - 1] == 0) + bits = bits.Slice(0, bits.Length - 1); + var uintRoundedByteCount = (bits.Length + (sizeof(uint) - 1)) / sizeof(uint) * sizeof(uint); +# else + var absValue = BigInteger.Abs(value); // isUnsigned: true fails for negative values. + var uintRoundedByteCount = (absValue.GetByteCount(isUnsigned: true) + (sizeof(uint) - 1)) / sizeof(uint) * sizeof(uint); +#endif + byte[]? uintRoundedBitsFromPool = null; + var uintRoundedBits = (uintRoundedByteCount <= StackAllocByteThreshold + ? stackalloc byte[StackAllocByteThreshold] + : uintRoundedBitsFromPool = ArrayPool.Shared.Rent(uintRoundedByteCount) + ).Slice(0, uintRoundedByteCount); + // Fill the last uint worth of bytes as it may only be partially written to. + uintRoundedBits.Slice(uintRoundedBits.Length - sizeof(uint)).Fill(0); + +#if NETSTANDARD2_0 + bits.CopyTo(uintRoundedBits); +#else + var success = absValue.TryWriteBytes(uintRoundedBits, out _, isUnsigned: true); + Debug.Assert(success); +#endif + var uintBits = MemoryMarshal.Cast(uintRoundedBits); + if (!BitConverter.IsLittleEndian) + for (var i = 0; i < uintBits.Length; i++) + uintBits[i] = BinaryPrimitives.ReverseEndianness(uintBits[i]); + + var size = GetDigitCountCore(uintBits, scale: 0); + + if (uintRoundedBitsFromPool is not null) + ArrayPool.Shared.Return(uintRoundedBitsFromPool); + + return size; + } + + public decimal ToDecimal() => Builder.ToDecimal(Scale, Weight, _sign, Digits); + public BigInteger ToBigInteger() => Builder.ToBigInteger(Weight, _sign, Digits); + + public readonly ref struct Builder + { + const ushort SignPositive = 0x0000; + const ushort SignNegative = 0x4000; + const ushort SignNan = 0xC000; + const ushort SignPinf = 0xD000; + const ushort SignNinf = 0xF000; + + const uint NumericBase = 10000; + const int NumericBaseLog10 = 4; // log10(10000) + + internal const int MaxDecimalNumericDigits = 8; + + // Fast access for 10^n where n is 0-9 + static ReadOnlySpan UIntPowers10 => new uint[] { + 1, + 10, + 100, + 1000, + 10000, + 100000, + 1000000, + 10000000, + 100000000, + 1000000000 + }; + + const int MaxUInt32Scale = 9; + const int MaxUInt16Scale = 4; + + public short Weight { get; } + + readonly ushort _sign; + public short Sign => (short)_sign; + + public short Scale { get; } + public Span Digits { get; } + readonly short[]? _digitsArray; + + public Builder(Span digits, short weight, short sign, short scale) + { + Digits = digits; + Weight = weight; + _sign = (ushort)sign; + Scale = scale; + } + + public Builder(short[] digits, short weight, short sign, short scale) + { + Digits = _digitsArray = digits; + Weight = weight; + _sign = (ushort)sign; + Scale = scale; + } + + [Conditional("DEBUG")] + static void AssertInvariants() + { + Debug.Assert(UIntPowers10.Length >= NumericBaseLog10); + Debug.Assert(NumericBase < short.MaxValue); + } + + static void Create(ref short[]? digitsArray, ref Span destination, scoped Span bits, short scale, out short weight, out int digitCount) + { + AssertInvariants(); + digitCount = 0; + var digitWeight = -scale / NumericBaseLog10 - 1; + + var bitsUpperBound = (bits.Length * (MaxUInt32Scale + 1) + MaxUInt16Scale - 1) / MaxUInt16Scale + 1; + if (bitsUpperBound > destination.Length) + destination = digitsArray = new short[bitsUpperBound]; + + // When the given scale does not sit on a numeric digit boundary we divide once by the remainder power of 10 instead of the base. + // As a result the quotient is aligned to a digit boundary, we must then scale up the remainder by the missed power of 10 to compensate. + var scaleRemainder = scale % NumericBaseLog10; + if (scaleRemainder > 0 && DivideInPlace(bits, UIntPowers10[scaleRemainder], out var remainder) && remainder != 0) + { + remainder *= UIntPowers10[NumericBaseLog10 - scaleRemainder]; + digitWeight--; + destination[destination.Length - 1 - digitCount++] = (short)remainder; + } + while (DivideInPlace(bits, NumericBase, out remainder)) + { + // Initial zero remainders are skipped as these present trailing zero digits, which should not be stored. + if (digitCount == 0 && remainder == 0) + digitWeight++; + else + // We store the results starting from the end so the final digits end up in big endian. + destination[destination.Length - 1 - digitCount++] = (short)remainder; + } + + weight = (short)(digitWeight + digitCount); + + } + + public Builder(decimal value, Span destination) + { + Span bits = stackalloc uint[DecimalBits]; + GetDecimalBits(value, bits, out var scale); + bits = bits.Slice(0, DecimalBits - 1); + + Create(ref _digitsArray, ref destination, bits, scale, out var weight, out var digitCount); + Digits = destination.Slice(destination.Length - digitCount); + Weight = weight; + _sign = value < 0 ? SignNegative : SignPositive; + Scale = scale; + } + + /// + /// + /// + /// + /// If the destination ends up being too small the builder allocates instead + public Builder(BigInteger value, Span destination) + { +# if NETSTANDARD2_0 + var bits = value.ToByteArray().AsSpan(); + // Detect the presence of a padding byte and slice it away (as we don't have isUnsigned: true overloads on ns2.0). + if (value.Sign == 1 && bits.Length > 2 && (bits[bits.Length - 2] & 0x80) != 0 && bits[bits.Length - 1] == 0) + bits = bits.Slice(0, bits.Length - 1); + var uintRoundedByteCount = (bits.Length + (sizeof(uint) - 1)) / sizeof(uint) * sizeof(uint); +# else + var absValue = BigInteger.Abs(value); // isUnsigned: true fails for negative values. + var uintRoundedByteCount = (absValue.GetByteCount(isUnsigned: true) + (sizeof(uint) - 1)) / sizeof(uint) * sizeof(uint); +#endif + byte[]? uintRoundedBitsFromPool = null; + var uintRoundedBits = (uintRoundedByteCount <= StackAllocByteThreshold + ? stackalloc byte[StackAllocByteThreshold] + : uintRoundedBitsFromPool = ArrayPool.Shared.Rent(uintRoundedByteCount) + ).Slice(0, uintRoundedByteCount); + // Fill the last uint worth of bytes as it may only be partially written to. + uintRoundedBits.Slice(uintRoundedBits.Length - sizeof(uint)).Fill(0); + +#if NETSTANDARD2_0 + bits.CopyTo(uintRoundedBits); +#else + var success = absValue.TryWriteBytes(uintRoundedBits, out _, isUnsigned: true); + Debug.Assert(success); +#endif + var uintBits = MemoryMarshal.Cast(uintRoundedBits); + + // Our calculations are all done in little endian, meaning the least significant *uint* is first, just like in BigInteger. + // The bytes comprising every individual uint should still be converted to big endian though. + // As a result an array of bytes like [ 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8 ] should become [ 0x4, 0x3, 0x2, 0x1, 0x8, 0x7, 0x6, 0x5 ]. + if (!BitConverter.IsLittleEndian) + for (var i = 0; i < uintBits.Length; i++) + uintBits[i] = BinaryPrimitives.ReverseEndianness(uintBits[i]); + + Create(ref _digitsArray, ref destination, uintBits, scale: 0, out var weight, out var digitCount); + Digits = destination.Slice(destination.Length - digitCount); + Weight = weight; + _sign = value < 0 ? SignNegative : SignPositive; + Scale = 0; + + if (uintRoundedBitsFromPool is not null) + ArrayPool.Shared.Return(uintRoundedBitsFromPool); + } + + public PgNumeric Build() + { + var digitsArray = _digitsArray is not null + ? new ArraySegment(_digitsArray, _digitsArray.Length - Digits.Length, Digits.Length) + : new ArraySegment(Digits.ToArray()); + + return new(digitsArray, Weight, Sign, Scale); + } + + public decimal ToDecimal() => ToDecimal(Scale, Weight, _sign, Digits); + public BigInteger ToBigInteger() => ToBigInteger(Weight, _sign, Digits); + + int DigitCount => Digits.Length; + + /// + /// + /// + /// + /// + /// + /// Whether the input consists of any non zero bits + static bool DivideInPlace(Span left, uint right, out uint remainder) + => Divide(left, right, left, out remainder); + + /// Adapted from BigInteger, to allow us to operate directly on stack allocated bits + static bool Divide(ReadOnlySpan left, uint right, Span quotient, out uint remainder) + { + Debug.Assert(quotient.Length == left.Length); + + // Executes the division for one big and one 32-bit integer. + // Thus, we've similar code than below, but there is no loop for + // processing the 32-bit integer, since it's a single element. + + var carry = 0UL; + + var nonZeroInput = false; + for (var i = left.Length - 1; i >= 0; i--) + { + var value = (carry << 32) | left[i]; + nonZeroInput = nonZeroInput || value != 0; + var digit = value / right; + quotient[i] = (uint)digit; + carry = value - digit * right; + } + remainder = (uint)carry; + + return nonZeroInput; + } + + internal static int GetDigitCountCore(Span bits, int scale) + { + AssertInvariants(); + // When a fractional result is expected we must send two numeric digits. + // When the given scale does not sit on a numeric digit boundary- + // we divide once by the remaining power of 10 instead of the full base to align things. + var baseLogRemainder = scale % NumericBaseLog10; + var den = baseLogRemainder > 0 ? UIntPowers10[baseLogRemainder] : NumericBase; + var digits = 0; + while (DivideInPlace(bits, den, out var remainder)) + { + den = NumericBase; + // Initial zero remainders are skipped as these present trailing zero digits, which should not be transmitted. + if (digits != 0 || remainder != 0) + digits++; + } + + return digits; + } + + internal static decimal ToDecimal(short scale, short weight, ushort sign, Span digits) + { + const int MaxUIntScale = 9; + const int MaxDecimalScale = 28; + + var digitCount = digits.Length; + if (digitCount > MaxDecimalNumericDigits) + throw new OverflowException("Numeric value does not fit in a System.Decimal"); + + if (Math.Abs(scale) > MaxDecimalScale) + throw new OverflowException("Numeric value does not fit in a System.Decimal"); + + if (digitCount == 0) + return sign switch + { + SignPositive or SignNegative => decimal.Zero, + SignNan => throw new InvalidCastException("Numeric NaN not supported by System.Decimal"), + SignPinf => throw new InvalidCastException("Numeric Infinity not supported by System.Decimal"), + SignNinf => throw new InvalidCastException("Numeric -Infinity not supported by System.Decimal"), + _ => throw new ArgumentOutOfRangeException() + }; + + var numericBase = new decimal(NumericBase); + var result = decimal.Zero; + for (var i = 0; i < digitCount - 1; i++) + { + result *= numericBase; + result += digits[i]; + } + + var digitScale = (weight + 1 - digitCount) * NumericBaseLog10; + var scaleDifference = scale < 0 ? digitScale : digitScale + scale; + + var digit = digits[digitCount - 1]; + if (digitCount == MaxDecimalNumericDigits) + { + // On the max group we adjust the base based on the scale difference, to prevent overflow for valid values. + var pow = UIntPowers10[-scaleDifference]; + result *= numericBase / pow; + result += new decimal(digit / pow); + } + else + { + result *= numericBase; + result += digit; + + if (scaleDifference < 0) + result /= UIntPowers10[-scaleDifference]; + else + while (scaleDifference > 0) + { + var scaleChunk = Math.Min(MaxUIntScale, scaleDifference); + result *= UIntPowers10[scaleChunk]; + scaleDifference -= scaleChunk; + } + } + + var scaleFactor = new decimal(1, 0, 0, false, (byte)(scale > 0 ? scale : 0)); + result *= scaleFactor; + return sign == SignNegative ? -result : result; + } + + internal static BigInteger ToBigInteger(short weight, ushort sign, Span digits) + { + var digitCount = digits.Length; + if (digitCount == 0) + return sign switch + { + SignPositive or SignNegative => BigInteger.Zero, + SignNan => throw new InvalidCastException("Numeric NaN not supported by BigInteger"), + SignPinf => throw new InvalidCastException("Numeric Infinity not supported by BigInteger"), + SignNinf => throw new InvalidCastException("Numeric -Infinity not supported by BigInteger"), + _ => throw new ArgumentOutOfRangeException() + }; + + var digitWeight = weight + 1 - digitCount; + if (digitWeight < 0) + throw new InvalidCastException("Numeric value with non-zero fractional digits not supported by BigInteger"); + + var numericBase = new BigInteger(NumericBase); + var result = BigInteger.Zero; + foreach (var digit in digits) + { + result *= numericBase; + result += new BigInteger(digit); + } + + var exponentCorrection = BigInteger.Pow(numericBase, digitWeight); + result *= exponentCorrection; + return sign == SignNegative ? -result : result; + } + } + +#if NETSTANDARD + // Zero-alloc access to the decimal bits on netstandard. + [StructLayout(LayoutKind.Explicit)] + readonly struct DecimalRaw + { + const int ScaleMask = 0x00FF0000; + const int ScaleShift = 16; + + // Do not change the order in which these fields are declared. It + // should be same as in the System.Decimal.DecCalc struct. + [FieldOffset(0)] + readonly decimal _value; + [FieldOffset(0)] + readonly int _flags; + [FieldOffset(4)] + readonly uint _high; + [FieldOffset(8)] + readonly ulong _low64; + + // Convenience aliased fields but their usage needs to take endianness into account. + [FieldOffset(8)] + readonly uint _low; + [FieldOffset(12)] + readonly uint _mid; + + public DecimalRaw(decimal value) : this() => _value = value; + + public uint High => _high; + public uint Mid => BitConverter.IsLittleEndian ? _mid : _low; + public uint Low => BitConverter.IsLittleEndian ? _low : _mid; + public int Flags => _flags; + public short Scale => (short)((_flags & ScaleMask) >> ScaleShift); + } +#endif +} diff --git a/src/Npgsql/Internal/Converters/Primitive/RealConverter.cs b/src/Npgsql/Internal/Converters/Primitive/RealConverter.cs new file mode 100644 index 0000000000..b47e641aa5 --- /dev/null +++ b/src/Npgsql/Internal/Converters/Primitive/RealConverter.cs @@ -0,0 +1,43 @@ +using System; +using System.Numerics; + +// ReSharper disable once CheckNamespace +namespace Npgsql.Internal.Converters; + +sealed class RealConverter : PgBufferedConverter +#if NET7_0_OR_GREATER + where T : INumberBase +#endif +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(float)); + return format is DataFormat.Binary; + } + +#if NET7_0_OR_GREATER + protected override T ReadCore(PgReader reader) => T.CreateChecked(reader.ReadFloat()); + protected override void WriteCore(PgWriter writer, T value) => writer.WriteFloat(float.CreateChecked(value)); +#else + protected override T ReadCore(PgReader reader) + { + var value = reader.ReadFloat(); + if (typeof(float) == typeof(T)) + return (T)(object)value; + if (typeof(double) == typeof(T)) + return (T)(object)(double)value; + + throw new NotSupportedException(); + } + + protected override void WriteCore(PgWriter writer, T value) + { + if (typeof(float) == typeof(T)) + writer.WriteFloat((float)(object)value!); + else if (typeof(double) == typeof(T)) + writer.WriteFloat((float)(double)(object)value!); + else + throw new NotSupportedException(); + } +#endif +} diff --git a/src/Npgsql/Internal/Converters/Primitive/TextConverters.cs b/src/Npgsql/Internal/Converters/Primitive/TextConverters.cs new file mode 100644 index 0000000000..8fc04f1360 --- /dev/null +++ b/src/Npgsql/Internal/Converters/Primitive/TextConverters.cs @@ -0,0 +1,355 @@ +using System; +using System.Buffers; +using System.Collections.Generic; +using System.Diagnostics; +using System.IO; +using System.Runtime.CompilerServices; +using System.Text; +using System.Threading; +using System.Threading.Tasks; + +// ReSharper disable once CheckNamespace +namespace Npgsql.Internal.Converters; + +abstract class StringBasedTextConverter : PgStreamingConverter +{ + readonly Encoding _encoding; + protected StringBasedTextConverter(Encoding encoding) => _encoding = encoding; + + public override T Read(PgReader reader) + => Read(async: false, reader, _encoding).GetAwaiter().GetResult(); + + public override ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) + => Read(async: true, reader, _encoding, cancellationToken); + + public override Size GetSize(SizeContext context, T value, ref object? writeState) + => TextConverter.GetSize(ref context, ConvertTo(value), _encoding); + + public override void Write(PgWriter writer, T value) + => writer.WriteChars(ConvertTo(value).Span, _encoding); + + public override ValueTask WriteAsync(PgWriter writer, T value, CancellationToken cancellationToken = default) + => writer.WriteCharsAsync(ConvertTo(value), _encoding, cancellationToken); + + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.None; + return format is DataFormat.Binary or DataFormat.Text; + } + + protected abstract ReadOnlyMemory ConvertTo(T value); + protected abstract T ConvertFrom(string value); + + ValueTask Read(bool async, PgReader reader, Encoding encoding, CancellationToken cancellationToken = default) + { + return async + ? ReadAsync(reader, encoding, cancellationToken) + : new(ConvertFrom(encoding.GetString(reader.ReadBytes(reader.CurrentRemaining)))); + +#if NET6_0_OR_GREATER + [AsyncMethodBuilder(typeof(PoolingAsyncValueTaskMethodBuilder<>))] +#endif + async ValueTask ReadAsync(PgReader reader, Encoding encoding, CancellationToken cancellationToken) + => ConvertFrom(encoding.GetString(await reader.ReadBytesAsync(reader.CurrentRemaining, cancellationToken).ConfigureAwait(false))); + } +} + +sealed class ReadOnlyMemoryTextConverter : StringBasedTextConverter> +{ + public ReadOnlyMemoryTextConverter(Encoding encoding) : base(encoding) { } + protected override ReadOnlyMemory ConvertTo(ReadOnlyMemory value) => value; + protected override ReadOnlyMemory ConvertFrom(string value) => value.AsMemory(); +} + +sealed class StringTextConverter : StringBasedTextConverter +{ + public StringTextConverter(Encoding encoding) : base(encoding) { } + protected override ReadOnlyMemory ConvertTo(string value) => value.AsMemory(); + protected override string ConvertFrom(string value) => value; +} + +abstract class ArrayBasedTextConverter : PgStreamingConverter +{ + readonly Encoding _encoding; + protected ArrayBasedTextConverter(Encoding encoding) => _encoding = encoding; + + public override T Read(PgReader reader) + => Read(async: false, reader, _encoding).GetAwaiter().GetResult(); + public override ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) + => Read(async: true, reader, _encoding); + + public override Size GetSize(SizeContext context, T value, ref object? writeState) + => TextConverter.GetSize(ref context, ConvertTo(value), _encoding); + + public override void Write(PgWriter writer, T value) + => writer.WriteChars(ConvertTo(value).AsSpan(), _encoding); + + public override ValueTask WriteAsync(PgWriter writer, T value, CancellationToken cancellationToken = default) + => writer.WriteCharsAsync(ConvertTo(value), _encoding, cancellationToken); + + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.None; + return format is DataFormat.Binary or DataFormat.Text; + } + + protected abstract ArraySegment ConvertTo(T value); + protected abstract T ConvertFrom(ArraySegment value); + + ValueTask Read(bool async, PgReader reader, Encoding encoding) + { + return async ? ReadAsync(reader, encoding) : new(ConvertFrom(GetSegment(reader.ReadBytes(reader.CurrentRemaining), encoding))); + +#if NET6_0_OR_GREATER + [AsyncMethodBuilder(typeof(PoolingAsyncValueTaskMethodBuilder<>))] +#endif + async ValueTask ReadAsync(PgReader reader, Encoding encoding) + => ConvertFrom(GetSegment(await reader.ReadBytesAsync(reader.CurrentRemaining).ConfigureAwait(false), encoding)); + + static ArraySegment GetSegment(ReadOnlySequence bytes, Encoding encoding) + { + var array = TextConverter.GetChars(encoding, bytes); + return new(array, 0, array.Length); + } + } +} + +sealed class CharArraySegmentTextConverter : ArrayBasedTextConverter> +{ + public CharArraySegmentTextConverter(Encoding encoding) : base(encoding) { } + protected override ArraySegment ConvertTo(ArraySegment value) => value; + protected override ArraySegment ConvertFrom(ArraySegment value) => value; +} + +sealed class CharArrayTextConverter : ArrayBasedTextConverter +{ + public CharArrayTextConverter(Encoding encoding) : base(encoding) { } + protected override ArraySegment ConvertTo(char[] value) => new(value, 0, value.Length); + protected override char[] ConvertFrom(ArraySegment value) + { + if (value.Array?.Length == value.Count) + return value.Array!; + + var array = new char[value.Count]; + Array.Copy(value.Array!, value.Offset, array, 0, value.Count); + return array; + } +} + +sealed class CharTextConverter : PgBufferedConverter +{ + readonly Encoding _encoding; + readonly Size _oneCharMaxByteCount; + + public CharTextConverter(Encoding encoding) + { + _encoding = encoding; + _oneCharMaxByteCount = Size.CreateUpperBound(encoding.GetMaxByteCount(1)); + } + + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.Create(_oneCharMaxByteCount); + return format is DataFormat.Binary or DataFormat.Text; + } + + protected override char ReadCore(PgReader reader) + { + var byteSeq = reader.ReadBytes(Math.Min(_oneCharMaxByteCount.Value, reader.CurrentRemaining)); + Debug.Assert(byteSeq.IsSingleSegment); + var bytes = byteSeq.GetFirstSpan(); + + var chars = _encoding.GetCharCount(bytes); + if (chars < 1) + throw new NpgsqlException("Could not read char - string was empty"); + + Span destination = stackalloc char[chars]; + _encoding.GetChars(bytes, destination); + return destination[0]; + } + + public override Size GetSize(SizeContext context, char value, ref object? writeState) + { + Span spanValue = stackalloc char[] { value }; + return _encoding.GetByteCount(spanValue); + } + + protected override void WriteCore(PgWriter writer, char value) + { + Span spanValue = stackalloc char[] { value }; + writer.WriteChars(spanValue, _encoding); + } +} + +sealed class TextReaderTextConverter : PgStreamingConverter +{ + readonly Encoding _encoding; + public TextReaderTextConverter(Encoding encoding) => _encoding = encoding; + + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.None; + return format is DataFormat.Binary or DataFormat.Text; + } + + public override TextReader Read(PgReader reader) + => reader.GetTextReader(_encoding); + + public override ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) + => reader.GetTextReaderAsync(_encoding, cancellationToken); + + public override Size GetSize(SizeContext context, TextReader value, ref object? writeState) => throw new NotImplementedException(); + public override void Write(PgWriter writer, TextReader value) => throw new NotImplementedException(); + public override ValueTask WriteAsync(PgWriter writer, TextReader value, CancellationToken cancellationToken = default) => throw new NotImplementedException(); +} + + +readonly struct GetChars +{ + public int Read { get; } + public GetChars(int read) => Read = read; +} + +sealed class GetCharsTextConverter : PgStreamingConverter, IResumableRead +{ + readonly Encoding _encoding; + public GetCharsTextConverter(Encoding encoding) => _encoding = encoding; + + public override GetChars Read(PgReader reader) + => reader.IsCharsRead + ? ResumableRead(reader) + : throw new NotSupportedException(); + + public override ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) + => throw new NotSupportedException(); + + public override Size GetSize(SizeContext context, GetChars value, ref object? writeState) => throw new NotSupportedException(); + public override void Write(PgWriter writer, GetChars value) => throw new NotSupportedException(); + public override ValueTask WriteAsync(PgWriter writer, GetChars value, CancellationToken cancellationToken = default) => throw new NotSupportedException(); + + GetChars ResumableRead(PgReader reader) + { + reader.GetCharsReadInfo(_encoding, out var charsRead, out var textReader, out var charsOffset, out var buffer); + if (charsOffset < charsRead || (buffer is null && charsRead > 0)) + { + // With variable length encodings, moving backwards based on bytes means we have to start over. + reader.ResetCharsRead(out charsRead); + } + + // First seek towards the charsOffset. + // If buffer is null read the entire thing and report the length, see sql client remarks. + // https://learn.microsoft.com/en-us/dotnet/api/system.data.sqlclient.sqldatareader.getchars + int read; + if (buffer is null) + { + read = ConsumeChars(textReader, null); + } + else + { + var consumed = ConsumeChars(textReader, charsOffset - charsRead); + Debug.Assert(consumed == charsOffset - charsRead); + read = textReader.ReadBlock(buffer.GetValueOrDefault().Array!, buffer.GetValueOrDefault().Offset, buffer.GetValueOrDefault().Count); + } + + return new(read); + + static int ConsumeChars(TextReader reader, int? count) + { + if (count is 0) + return 0; + + const int maxStackAlloc = 512; +#if NETSTANDARD + var tempCharBuf = new char[maxStackAlloc]; +#else + Span tempCharBuf = stackalloc char[maxStackAlloc]; +#endif + var totalRead = 0; + var fin = false; + while (!fin) + { + var toRead = count is null ? maxStackAlloc : Math.Min(maxStackAlloc, count.Value - totalRead); +#if NETSTANDARD + var read = reader.ReadBlock(tempCharBuf, 0, toRead); +#else + var read = reader.ReadBlock(tempCharBuf.Slice(0, toRead)); +#endif + totalRead += read; + if (count is not null && read is 0) + throw new EndOfStreamException(); + + fin = count is null ? read is 0 : totalRead >= count; + } + return totalRead; + } + } + + bool IResumableRead.Supported => true; +} + +// Moved out for code size/sharing. +static class TextConverter +{ + public static Size GetSize(ref SizeContext context, ReadOnlyMemory value, Encoding encoding) + => encoding.GetByteCount(value.Span); + + // Adapted version of GetString(ROSeq) removing the intermediate string allocation to make a contiguous char array. + public static char[] GetChars(Encoding encoding, ReadOnlySequence bytes) + { + if (bytes.IsSingleSegment) + { + // If the incoming sequence is single-segment, one-shot this. + var firstSpan = bytes.First.Span; + var chars = new char[encoding.GetCharCount(firstSpan)]; + encoding.GetChars(bytes.First.Span, chars); + return chars; + } + else + { + // If the incoming sequence is multi-segment, create a stateful Decoder + // and use it as the workhorse. On the final iteration we'll pass flush=true. + + var decoder = encoding.GetDecoder(); + + // Maintain a list of all the segments we'll need to concat together. + // These will be released back to the pool at the end of the method. + + var listOfSegments = new List<(char[], int)>(); + var totalCharCount = 0; + + var remainingBytes = bytes; + bool isFinalSegment; + + do + { + var firstSpan = remainingBytes.First.Span; + var next = remainingBytes.GetPosition(firstSpan.Length); + isFinalSegment = remainingBytes.IsSingleSegment; + + var charCountThisIteration = decoder.GetCharCount(firstSpan, flush: isFinalSegment); // could throw ArgumentException if overflow would occur + var rentedArray = ArrayPool.Shared.Rent(charCountThisIteration); + var actualCharsWrittenThisIteration = decoder.GetChars(firstSpan, rentedArray, flush: isFinalSegment); + listOfSegments.Add((rentedArray, actualCharsWrittenThisIteration)); + + totalCharCount += actualCharsWrittenThisIteration; + if (totalCharCount < 0) + throw new OutOfMemoryException(); + + remainingBytes = remainingBytes.Slice(next); + } while (!isFinalSegment); + + // Now build up the string to return, then release all of our scratch buffers + // back to the shared pool. + var chars = new char[totalCharCount]; + var span = chars.AsSpan(); + foreach (var (array, length) in listOfSegments) + { + array.AsSpan(0, length).CopyTo(span); + ArrayPool.Shared.Return(array); + span = span.Slice(length); + } + + return chars; + } + } +} diff --git a/src/Npgsql/Internal/Converters/RangeConverter.cs b/src/Npgsql/Internal/Converters/RangeConverter.cs new file mode 100644 index 0000000000..c378d830f7 --- /dev/null +++ b/src/Npgsql/Internal/Converters/RangeConverter.cs @@ -0,0 +1,216 @@ +using System; +using System.Diagnostics; +using System.Threading; +using System.Threading.Tasks; +using NpgsqlTypes; + +namespace Npgsql.Internal.Converters; + +sealed class RangeConverter : PgStreamingConverter> +{ + readonly PgConverter _subtypeConverter; + readonly BufferRequirements _subtypeRequirements; + + public RangeConverter(PgConverter subtypeConverter) + { + if (!subtypeConverter.CanConvert(DataFormat.Binary, out var bufferRequirements)) + throw new NotSupportedException("Range subtype converter has to support the binary format to be compatible."); + _subtypeRequirements = bufferRequirements; + _subtypeConverter = subtypeConverter; + } + + public override NpgsqlRange Read(PgReader reader) + => Read(async: false, reader, CancellationToken.None).GetAwaiter().GetResult(); + + public override ValueTask> ReadAsync(PgReader reader, CancellationToken cancellationToken = default) + => Read(async: true, reader, cancellationToken); + + async ValueTask> Read(bool async, PgReader reader, CancellationToken cancellationToken) + { + if (reader.ShouldBuffer(sizeof(byte))) + await reader.Buffer(async, sizeof(byte), cancellationToken).ConfigureAwait(false); + + var flags = (RangeFlags)reader.ReadByte(); + if ((flags & RangeFlags.Empty) != 0) + return NpgsqlRange.Empty; + + var lowerBound = default(TSubtype); + var upperBound = default(TSubtype); + + var converter = _subtypeConverter; + if ((flags & RangeFlags.LowerBoundInfinite) == 0) + { + if (reader.ShouldBuffer(sizeof(int))) + await reader.Buffer(async, sizeof(int), cancellationToken).ConfigureAwait(false); + var length = reader.ReadInt32(); + + // Note that we leave the CLR default for nulls + if (length != -1) + { + var scope = await reader.BeginNestedRead(async, length, _subtypeRequirements.Read, cancellationToken).ConfigureAwait(false); + try + { + lowerBound = async + ? await converter.ReadAsync(reader, cancellationToken).ConfigureAwait(false) + // ReSharper disable once MethodHasAsyncOverloadWithCancellation + : converter.Read(reader); + } + finally + { + if (async) + await scope.DisposeAsync().ConfigureAwait(false); + else + scope.Dispose(); + } + } + } + + if ((flags & RangeFlags.UpperBoundInfinite) == 0) + { + if (reader.ShouldBuffer(sizeof(int))) + await reader.Buffer(async, sizeof(int), cancellationToken).ConfigureAwait(false); + var length = reader.ReadInt32(); + + // Note that we leave the CLR default for nulls + if (length != -1) + { + var scope = await reader.BeginNestedRead(async, length, _subtypeRequirements.Read, cancellationToken).ConfigureAwait(false); + try + { + upperBound = async + ? await converter.ReadAsync(reader, cancellationToken).ConfigureAwait(false) + // ReSharper disable once MethodHasAsyncOverloadWithCancellation + : converter.Read(reader); + } + finally + { + if (async) + await scope.DisposeAsync().ConfigureAwait(false); + else + scope.Dispose(); + } + } + } + + return new NpgsqlRange(lowerBound, upperBound, flags); + } + + public override Size GetSize(SizeContext context, NpgsqlRange value, ref object? writeState) + { + var totalSize = Size.Create(1); + if (value.IsEmpty) + return totalSize; // Just flags. + + WriteState? state = null; + if (!value.LowerBoundInfinite) + { + totalSize = totalSize.Combine(sizeof(int)); + var subTypeState = (object?)null; + if (_subtypeConverter.GetSizeOrDbNull(context.Format, _subtypeRequirements.Write, value.LowerBound, ref subTypeState) is { } size) + { + totalSize = totalSize.Combine(size); + (state ??= new WriteState()).LowerBoundSize = size; + state.LowerBoundWriteState = subTypeState; + } + else if (state is not null) + state.LowerBoundSize = -1; + } + + if (!value.UpperBoundInfinite) + { + totalSize = totalSize.Combine(sizeof(int)); + var subTypeState = (object?)null; + if (_subtypeConverter.GetSizeOrDbNull(context.Format, _subtypeRequirements.Write, value.UpperBound, ref subTypeState) is { } size) + { + totalSize = totalSize.Combine(size); + (state ??= new WriteState()).UpperBoundSize = size; + state.UpperBoundWriteState = subTypeState; + } + else if (state is not null) + state.UpperBoundSize = -1; + } + + writeState = state; + return totalSize; + } + + public override void Write(PgWriter writer, NpgsqlRange value) + => Write(async: false, writer, value, CancellationToken.None).GetAwaiter().GetResult(); + + public override ValueTask WriteAsync(PgWriter writer, NpgsqlRange value, CancellationToken cancellationToken = default) + => Write(async: true, writer, value, cancellationToken); + + async ValueTask Write(bool async, PgWriter writer, NpgsqlRange value, CancellationToken cancellationToken) + { + var writeState = writer.Current.WriteState as WriteState; + var lowerBoundSize = writeState?.LowerBoundSize ?? -1; + var upperBoundSize = writeState?.UpperBoundSize ?? -1; + + var flags = value.Flags; + if (!value.IsEmpty) + { + // Normalize nulls to infinite, as pg does. + if (lowerBoundSize == -1 && !value.LowerBoundInfinite) + flags = (flags & ~RangeFlags.LowerBoundInclusive) | RangeFlags.LowerBoundInfinite; + + if (upperBoundSize == -1 && !value.UpperBoundInfinite) + flags = (flags & ~RangeFlags.UpperBoundInclusive) | RangeFlags.UpperBoundInfinite; + } + + if (writer.ShouldFlush(sizeof(byte))) + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + writer.WriteByte((byte)flags); + var lowerBoundInfinite = flags.HasFlag(RangeFlags.LowerBoundInfinite); + var upperBoundInfinite = flags.HasFlag(RangeFlags.UpperBoundInfinite); + if (value.IsEmpty || (lowerBoundInfinite && upperBoundInfinite)) + return; + + // Always need write state from this point. + if (writeState is null) + throw new InvalidCastException($"Invalid write state, expected {typeof(WriteState).FullName}."); + + if (!lowerBoundInfinite) + { + Debug.Assert(lowerBoundSize.Value != -1); + if (lowerBoundSize.Kind is SizeKind.Unknown) + throw new NotImplementedException(); + + var byteCount = lowerBoundSize.Value; // Never -1 so it's a byteCount. + if (writer.ShouldFlush(sizeof(int))) // Length + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + writer.WriteInt32(byteCount); + using var _ = await writer.BeginNestedWrite(async, _subtypeRequirements.Write, byteCount, + writeState.LowerBoundWriteState, cancellationToken).ConfigureAwait(false); + if (async) + await _subtypeConverter.WriteAsync(writer, value.LowerBound!, cancellationToken).ConfigureAwait(false); + else + _subtypeConverter.Write(writer, value.LowerBound!); + } + + if (!upperBoundInfinite) + { + Debug.Assert(upperBoundSize.Value != -1); + if (upperBoundSize.Kind is SizeKind.Unknown) + throw new NotImplementedException(); + + var byteCount = upperBoundSize.Value; // Never -1 so it's a byteCount. + if (writer.ShouldFlush(sizeof(int))) // Length + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + writer.WriteInt32(byteCount); + using var _ = await writer.BeginNestedWrite(async, _subtypeRequirements.Write, byteCount, + writeState.UpperBoundWriteState, cancellationToken).ConfigureAwait(false); + if (async) + await _subtypeConverter.WriteAsync(writer, value.UpperBound!, cancellationToken).ConfigureAwait(false); + else + _subtypeConverter.Write(writer, value.UpperBound!); + } + } + + sealed class WriteState + { + internal Size LowerBoundSize { get; set; } + internal object? LowerBoundWriteState { get; set; } + internal Size UpperBoundSize { get; set; } + internal object? UpperBoundWriteState { get; set; } + } +} diff --git a/src/Npgsql/Internal/Converters/SystemTextJsonConverter.cs b/src/Npgsql/Internal/Converters/SystemTextJsonConverter.cs new file mode 100644 index 0000000000..cedf1664f2 --- /dev/null +++ b/src/Npgsql/Internal/Converters/SystemTextJsonConverter.cs @@ -0,0 +1,205 @@ +using System; +using System.Buffers; +using System.Diagnostics; +using System.Diagnostics.CodeAnalysis; +using System.IO; +using System.Text; +using System.Text.Json; +using System.Text.Json.Serialization.Metadata; +using System.Threading; +using System.Threading.Tasks; + +namespace Npgsql.Internal.Converters; + +sealed class SystemTextJsonConverter : PgStreamingConverter where T: TBase? +{ + readonly bool _jsonb; + readonly Encoding _textEncoding; + readonly JsonTypeInfo _jsonTypeInfo; + readonly JsonTypeInfo? _objectTypeInfo; + + public SystemTextJsonConverter(bool jsonb, Encoding textEncoding, JsonSerializerOptions serializerOptions) + { + // We do GetTypeInfo calls directly so we need a resolver. + if (serializerOptions.TypeInfoResolver is null) + serializerOptions.TypeInfoResolver = new DefaultJsonTypeInfoResolver(); + + _jsonb = jsonb; + _textEncoding = textEncoding; + _jsonTypeInfo = typeof(TBase) != typeof(object) && typeof(T) != typeof(TBase) + ? (JsonTypeInfo)serializerOptions.GetTypeInfo(typeof(TBase)) + : (JsonTypeInfo)serializerOptions.GetTypeInfo(typeof(T)); + // Unspecified polymorphism, let STJ handle it. + _objectTypeInfo = typeof(TBase) == typeof(object) + ? (JsonTypeInfo)serializerOptions.GetTypeInfo(typeof(object)) + : null; + } + + public override T? Read(PgReader reader) + => Read(async: false, reader, CancellationToken.None).GetAwaiter().GetResult(); + public override ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) + => Read(async: true, reader, cancellationToken); + + async ValueTask Read(bool async, PgReader reader, CancellationToken cancellationToken) + { + if (_jsonb && reader.ShouldBuffer(sizeof(byte))) + await reader.Buffer(async, sizeof(byte), cancellationToken).ConfigureAwait(false); + + // We always fall back to buffers on older targets due to the absence of transcoding stream. + if (SystemTextJsonConverter.TryReadStream(_jsonb, _textEncoding, reader, out var byteCount, out var stream)) + { + using var _ = stream; + if (_jsonTypeInfo is JsonTypeInfo typeInfoOfT) + return async + ? await JsonSerializer.DeserializeAsync(stream, typeInfoOfT, cancellationToken).ConfigureAwait(false) + : JsonSerializer.Deserialize(stream, typeInfoOfT); + + return (T?)(async + ? await JsonSerializer.DeserializeAsync(stream, (JsonTypeInfo)_jsonTypeInfo, cancellationToken).ConfigureAwait(false) + : JsonSerializer.Deserialize(stream, (JsonTypeInfo)_jsonTypeInfo)); + } + else + { + var (rentedChars, rentedBytes) = await SystemTextJsonConverter.ReadRentedBuffer(async, _textEncoding, byteCount, reader, cancellationToken).ConfigureAwait(false); + var result = _jsonTypeInfo is JsonTypeInfo typeInfoOfT + ? JsonSerializer.Deserialize(rentedChars.AsSpan(), typeInfoOfT) + : (T?)JsonSerializer.Deserialize(rentedChars.AsSpan(), (JsonTypeInfo)_jsonTypeInfo); + + ArrayPool.Shared.Return(rentedChars.Array!); + if (rentedBytes is not null) + ArrayPool.Shared.Return(rentedBytes); + + return result; + } + } + + public override Size GetSize(SizeContext context, T? value, ref object? writeState) + { + var capacity = 0; + if (typeof(T) == typeof(JsonDocument)) + capacity = ((JsonDocument?)(object?)value)?.RootElement.GetRawText().Length ?? 0; + var stream = new MemoryStream(capacity); + + // Mirroring ASP.NET Core serialization strategy https://github.com/dotnet/aspnetcore/issues/47548 + if (_objectTypeInfo is null) + JsonSerializer.Serialize(stream, value, (JsonTypeInfo)_jsonTypeInfo); + else + JsonSerializer.Serialize(stream, value, _objectTypeInfo); + + return SystemTextJsonConverter.GetSizeCore(_jsonb, stream, _textEncoding, ref writeState); + } + + public override void Write(PgWriter writer, T? value) + => SystemTextJsonConverter.Write(_jsonb, async: false, writer, CancellationToken.None).GetAwaiter().GetResult(); + + public override ValueTask WriteAsync(PgWriter writer, T? value, CancellationToken cancellationToken = default) + => SystemTextJsonConverter.Write(_jsonb, async: true, writer, cancellationToken); +} + +// Split out to avoid unneccesary code duplication. +static class SystemTextJsonConverter +{ + public const byte JsonbProtocolVersion = 1; + // We pick a value that is the largest multiple of 4096 that is still smaller than the large object heap threshold (85K). + const int StreamingThreshold = 81920; + + public static bool TryReadStream(bool jsonb, Encoding encoding, PgReader reader, out int byteCount, [NotNullWhen(true)]out Stream? stream) + { + if (jsonb) + { + var version = reader.ReadByte(); + if (version != JsonbProtocolVersion) + throw new InvalidCastException($"Unknown jsonb wire format version {version}"); + } + + var isUtf8 = encoding.CodePage == Encoding.UTF8.CodePage; + byteCount = reader.CurrentRemaining; + // We always fall back to buffers on older targets + if (isUtf8 +#if !NETSTANDARD + || byteCount >= StreamingThreshold +#endif + ) + { + stream = +#if !NETSTANDARD + !isUtf8 + ? Encoding.CreateTranscodingStream(reader.GetStream(), encoding, Encoding.UTF8) + : reader.GetStream(); +#else + reader.GetStream(); + Debug.Assert(isUtf8); +#endif + } + else + stream = null; + + return stream is not null; + } + + public static async ValueTask<(ArraySegment RentedChars, byte[]? RentedBytes)> ReadRentedBuffer(bool async, Encoding encoding, int byteCount, PgReader reader, CancellationToken cancellationToken) + { + // Never utf8, but we may still be able to save a copy. + byte[]? rentedBuffer = null; + if (!reader.TryReadBytes(byteCount, out ReadOnlyMemory buffer)) + { + rentedBuffer = ArrayPool.Shared.Rent(byteCount); + if (async) + await reader.ReadBytesAsync(rentedBuffer.AsMemory(0, byteCount), cancellationToken).ConfigureAwait(false); + else + reader.ReadBytes(rentedBuffer.AsSpan(0, byteCount)); + buffer = rentedBuffer.AsMemory(0, byteCount); + } + + var charCount = encoding.GetCharCount(buffer.Span); + var chars = ArrayPool.Shared.Rent(charCount); + encoding.GetChars(buffer.Span, chars); + + return (new(chars, 0, charCount), rentedBuffer); + } + + public static Size GetSizeCore(bool jsonb, MemoryStream stream, Encoding encoding, ref object? writeState) + { + if (encoding.CodePage == Encoding.UTF8.CodePage) + { + writeState = stream; + return (int)stream.Length + (jsonb ? sizeof(byte) : 0); + } + + if (!stream.TryGetBuffer(out var buffer)) + throw new InvalidOperationException(); + + var bytes = encoding.GetBytes(Encoding.UTF8.GetChars(buffer.Array!, buffer.Offset, buffer.Count)); + writeState = bytes; + return bytes.Length + (jsonb ? sizeof(byte) : 0); + } + + public static async ValueTask Write(bool jsonb, bool async, PgWriter writer, CancellationToken cancellationToken) + { + if (jsonb) + { + if (writer.ShouldFlush(sizeof(byte))) + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + writer.WriteByte(JsonbProtocolVersion); + } + + ArraySegment buffer; + switch (writer.Current.WriteState) + { + case MemoryStream stream: + if (!stream.TryGetBuffer(out buffer)) + throw new InvalidOperationException(); + break; + case byte[] bytes: + buffer = new ArraySegment(bytes); + break; + default: + throw new InvalidCastException($"Invalid state {writer.Current.WriteState?.GetType().FullName}."); + } + + if (async) + await writer.WriteBytesAsync(buffer.AsMemory(), cancellationToken).ConfigureAwait(false); + else + writer.WriteBytes(buffer.AsSpan()); + } +} diff --git a/src/Npgsql/Internal/Converters/Temporal/DateConverters.cs b/src/Npgsql/Internal/Converters/Temporal/DateConverters.cs new file mode 100644 index 0000000000..261d305439 --- /dev/null +++ b/src/Npgsql/Internal/Converters/Temporal/DateConverters.cs @@ -0,0 +1,103 @@ +using System; +using Npgsql.Properties; + +// ReSharper disable once CheckNamespace +namespace Npgsql.Internal.Converters; + +sealed class DateTimeDateConverter : PgBufferedConverter +{ + readonly bool _dateTimeInfinityConversions; + + static readonly DateTime BaseValue = new(2000, 1, 1, 0, 0, 0); + + public DateTimeDateConverter(bool dateTimeInfinityConversions) + => _dateTimeInfinityConversions = dateTimeInfinityConversions; + + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(int)); + return format is DataFormat.Binary; + } + + protected override DateTime ReadCore(PgReader reader) + => reader.ReadInt32() switch + { + int.MaxValue => _dateTimeInfinityConversions + ? DateTime.MaxValue + : throw new InvalidCastException(NpgsqlStrings.CannotReadInfinityValue), + int.MinValue => _dateTimeInfinityConversions + ? DateTime.MinValue + : throw new InvalidCastException(NpgsqlStrings.CannotReadInfinityValue), + var value => BaseValue + TimeSpan.FromDays(value) + }; + + protected override void WriteCore(PgWriter writer, DateTime value) + { + if (_dateTimeInfinityConversions) + { + if (value == DateTime.MaxValue) + { + writer.WriteInt32(int.MaxValue); + return; + } + + if (value == DateTime.MinValue) + { + writer.WriteInt32(int.MinValue); + return; + } + } + + writer.WriteInt32((value.Date - BaseValue).Days); + } +} + +#if NET6_0_OR_GREATER +sealed class DateOnlyDateConverter : PgBufferedConverter +{ + readonly bool _dateTimeInfinityConversions; + + static readonly DateOnly BaseValue = new(2000, 1, 1); + + public DateOnlyDateConverter(bool dateTimeInfinityConversions) + => _dateTimeInfinityConversions = dateTimeInfinityConversions; + + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(int)); + return format is DataFormat.Binary; + } + + protected override DateOnly ReadCore(PgReader reader) + => reader.ReadInt32() switch + { + int.MaxValue => _dateTimeInfinityConversions + ? DateOnly.MaxValue + : throw new InvalidCastException(NpgsqlStrings.CannotReadInfinityValue), + int.MinValue => _dateTimeInfinityConversions + ? DateOnly.MinValue + : throw new InvalidCastException(NpgsqlStrings.CannotReadInfinityValue), + var value => BaseValue.AddDays(value) + }; + + protected override void WriteCore(PgWriter writer, DateOnly value) + { + if (_dateTimeInfinityConversions) + { + if (value == DateOnly.MaxValue) + { + writer.WriteInt32(int.MaxValue); + return; + } + + if (value == DateOnly.MinValue) + { + writer.WriteInt32(int.MinValue); + return; + } + } + + writer.WriteInt32(value.DayNumber - BaseValue.DayNumber); + } +} +#endif diff --git a/src/Npgsql/Internal/Converters/Temporal/DateTimeConverterResolver.cs b/src/Npgsql/Internal/Converters/Temporal/DateTimeConverterResolver.cs new file mode 100644 index 0000000000..6ae5a783a1 --- /dev/null +++ b/src/Npgsql/Internal/Converters/Temporal/DateTimeConverterResolver.cs @@ -0,0 +1,143 @@ +using System; +using System.Collections.Generic; +using Npgsql.Internal.Postgres; +using Npgsql.Properties; +using NpgsqlTypes; + +// ReSharper disable once CheckNamespace +namespace Npgsql.Internal.Converters; + +sealed class DateTimeConverterResolver : PgConverterResolver +{ + readonly PgSerializerOptions _options; + readonly Func, T?, PgTypeId?, PgConverterResolution?> _resolver; + readonly Func _factory; + readonly PgTypeId _timestampTz; + PgConverter? _timestampTzConverter; + readonly PgTypeId _timestamp; + PgConverter? _timestampConverter; + readonly bool _dateTimeInfinityConversions; + + internal DateTimeConverterResolver(PgSerializerOptions options, Func, T?, PgTypeId?, PgConverterResolution?> resolver, Func factory, PgTypeId timestampTz, PgTypeId timestamp, bool dateTimeInfinityConversions) + { + _options = options; + _resolver = resolver; + _factory = factory; + _timestampTz = timestampTz; + _timestamp = timestamp; + _dateTimeInfinityConversions = dateTimeInfinityConversions; + } + + public override PgConverterResolution GetDefault(PgTypeId? pgTypeId) + { + if (pgTypeId == _timestampTz) + return new(_timestampTzConverter ??= _factory(_timestampTz), _timestampTz); + if (pgTypeId is null || pgTypeId == _timestamp) + return new(_timestampConverter ??= _factory(_timestamp), _timestamp); + + throw CreateUnsupportedPgTypeIdException(pgTypeId.Value); + } + + public PgConverterResolution? Get(DateTime value, PgTypeId? expectedPgTypeId, bool validateOnly = false) + { + if (value.Kind is DateTimeKind.Utc) + { + // We coalesce with expectedPgTypeId to throw on unknown type ids. + return expectedPgTypeId == _timestamp + ? throw new ArgumentException( + string.Format(NpgsqlStrings.TimestampNoDateTimeUtc, _options.GetDataTypeName(_timestamp).DisplayName, _options.GetDataTypeName(_timestampTz).DisplayName), nameof(value)) + : validateOnly ? null : GetDefault(expectedPgTypeId ?? _timestampTz); + } + + // For timestamptz types we'll accept unspecified MinValue/MaxValue as well. + if (expectedPgTypeId == _timestampTz + && !(_dateTimeInfinityConversions && (value == DateTime.MinValue || value == DateTime.MaxValue))) + { + throw new ArgumentException( + string.Format(NpgsqlStrings.TimestampTzNoDateTimeUnspecified, value.Kind, _options.GetDataTypeName(_timestampTz).DisplayName), nameof(value)); + } + + // We coalesce with expectedPgTypeId to throw on unknown type ids. + return GetDefault(expectedPgTypeId ?? _timestamp); + } + + public override PgConverterResolution? Get(T? value, PgTypeId? expectedPgTypeId) + => _resolver(this, value, expectedPgTypeId); +} + +sealed class DateTimeConverterResolver +{ + public static DateTimeConverterResolver CreateResolver(PgSerializerOptions options, PgTypeId timestampTz, PgTypeId timestamp, bool dateTimeInfinityConversions) + => new(options, static (resolver, value, expectedPgTypeId) => resolver.Get(value, expectedPgTypeId), pgTypeId => + { + if (pgTypeId == timestampTz) + return new DateTimeConverter(dateTimeInfinityConversions, DateTimeKind.Utc); + if (pgTypeId == timestamp) + return new DateTimeConverter(dateTimeInfinityConversions, DateTimeKind.Unspecified); + + throw new NotSupportedException(); + }, timestampTz, timestamp, dateTimeInfinityConversions); + + public static DateTimeConverterResolver> CreateRangeResolver(PgSerializerOptions options, PgTypeId timestampTz, PgTypeId timestamp, bool dateTimeInfinityConversions) + => new(options, static (resolver, value, expectedPgTypeId) => + { + // Resolve both sides to make sure we end up with consistent PgTypeIds. + PgConverterResolution? resolution = null; + if (!value.LowerBoundInfinite) + resolution = resolver.Get(value.LowerBound, expectedPgTypeId); + + if (!value.UpperBoundInfinite) + { + var result = resolver.Get(value.UpperBound, resolution?.PgTypeId ?? expectedPgTypeId, validateOnly: resolution is not null); + resolution ??= result; + } + + return resolution; + }, pgTypeId => + { + if (pgTypeId == timestampTz) + return new RangeConverter(new DateTimeConverter(dateTimeInfinityConversions, DateTimeKind.Utc)); + if (pgTypeId == timestamp) + return new RangeConverter(new DateTimeConverter(dateTimeInfinityConversions, DateTimeKind.Unspecified)); + + throw new NotSupportedException(); + }, timestampTz, timestamp, dateTimeInfinityConversions); + + public static DateTimeConverterResolver CreateMultirangeResolver(PgSerializerOptions options, PgTypeId timestampTz, PgTypeId timestamp, bool dateTimeInfinityConversions) + where T : IList where TElement : notnull + { + if (typeof(TElement) != typeof(NpgsqlRange)) + ThrowHelper.ThrowNotSupportedException("Unsupported element type"); + + return new DateTimeConverterResolver(options, static (resolver, value, expectedPgTypeId) => + { + PgConverterResolution? resolution = null; + if (value is null) + return null; + + foreach (var element in (IList>)value) + { + PgConverterResolution? result; + if (!element.LowerBoundInfinite) + { + result = resolver.Get(element.LowerBound, resolution?.PgTypeId ?? expectedPgTypeId, validateOnly: resolution is not null); + resolution ??= result; + } + if (!element.UpperBoundInfinite) + { + result = resolver.Get(element.UpperBound, resolution?.PgTypeId ?? expectedPgTypeId, validateOnly: resolution is not null); + resolution ??= result; + } + } + return resolution; + }, pgTypeId => + { + if (pgTypeId == timestampTz) + return new MultirangeConverter((PgConverter)(object)new RangeConverter(new DateTimeConverter(dateTimeInfinityConversions, DateTimeKind.Utc))); + if (pgTypeId == timestamp) + return new MultirangeConverter((PgConverter)(object)new RangeConverter(new DateTimeConverter(dateTimeInfinityConversions, DateTimeKind.Unspecified))); + + throw new NotSupportedException(); + }, timestampTz, timestamp, dateTimeInfinityConversions); + } +} diff --git a/src/Npgsql/Internal/Converters/Temporal/DateTimeConverters.cs b/src/Npgsql/Internal/Converters/Temporal/DateTimeConverters.cs new file mode 100644 index 0000000000..0047e3c572 --- /dev/null +++ b/src/Npgsql/Internal/Converters/Temporal/DateTimeConverters.cs @@ -0,0 +1,53 @@ +using System; + +// ReSharper disable once CheckNamespace +namespace Npgsql.Internal.Converters; + +sealed class DateTimeConverter : PgBufferedConverter +{ + readonly bool _dateTimeInfinityConversions; + readonly DateTimeKind _kind; + + public DateTimeConverter(bool dateTimeInfinityConversions, DateTimeKind kind) + { + _dateTimeInfinityConversions = dateTimeInfinityConversions; + _kind = kind; + } + + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(long)); + return format is DataFormat.Binary; + } + + protected override DateTime ReadCore(PgReader reader) + => PgTimestamp.Decode(reader.ReadInt64(), _kind, _dateTimeInfinityConversions); + + protected override void WriteCore(PgWriter writer, DateTime value) + => writer.WriteInt64(PgTimestamp.Encode(value, _dateTimeInfinityConversions)); +} + +sealed class DateTimeOffsetConverter : PgBufferedConverter +{ + readonly bool _dateTimeInfinityConversions; + public DateTimeOffsetConverter(bool dateTimeInfinityConversions) + => _dateTimeInfinityConversions = dateTimeInfinityConversions; + + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(long)); + return format is DataFormat.Binary; + } + + protected override DateTimeOffset ReadCore(PgReader reader) + => PgTimestamp.Decode(reader.ReadInt64(), DateTimeKind.Utc, _dateTimeInfinityConversions); + + protected override void WriteCore(PgWriter writer, DateTimeOffset value) + { + if (value.Offset != TimeSpan.Zero) + throw new ArgumentException($"Cannot write DateTimeOffset with Offset={value.Offset} to PostgreSQL type 'timestamp with time zone', only offset 0 (UTC) is supported. ", nameof(value)); + + writer.WriteInt64(PgTimestamp.Encode(value.DateTime, _dateTimeInfinityConversions)); + + } +} diff --git a/src/Npgsql/Internal/Converters/Temporal/IntervalConverters.cs b/src/Npgsql/Internal/Converters/Temporal/IntervalConverters.cs new file mode 100644 index 0000000000..1e1cbe9df2 --- /dev/null +++ b/src/Npgsql/Internal/Converters/Temporal/IntervalConverters.cs @@ -0,0 +1,58 @@ +using System; +using NpgsqlTypes; + +// ReSharper disable once CheckNamespace +namespace Npgsql.Internal.Converters; + +sealed class TimeSpanIntervalConverter : PgBufferedConverter +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(long) + sizeof(int) + sizeof(int)); + return format is DataFormat.Binary; + } + + protected override TimeSpan ReadCore(PgReader reader) + { + var microseconds = reader.ReadInt64(); + var days = reader.ReadInt32(); + var months = reader.ReadInt32(); + + return months > 0 + ? throw new InvalidCastException( + "Cannot read interval values with non-zero months as TimeSpan, since that type doesn't support months. Consider using NodaTime Period which better corresponds to PostgreSQL interval, or read the value as NpgsqlInterval, or transform the interval to not contain months or years in PostgreSQL before reading it.") + : new(microseconds * 10 + days * TimeSpan.TicksPerDay); + } + + protected override void WriteCore(PgWriter writer, TimeSpan value) + { + var ticksInDay = value.Ticks - TimeSpan.TicksPerDay * value.Days; + writer.WriteInt64(ticksInDay / 10); + writer.WriteInt32(value.Days); + writer.WriteInt32(0); + } +} + +sealed class NpgsqlIntervalConverter : PgBufferedConverter +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(long) + sizeof(int) + sizeof(int)); + return format is DataFormat.Binary; + } + + protected override NpgsqlInterval ReadCore(PgReader reader) + { + var ticks = reader.ReadInt64(); + var day = reader.ReadInt32(); + var month = reader.ReadInt32(); + return new NpgsqlInterval(month, day, ticks); + } + + protected override void WriteCore(PgWriter writer, NpgsqlInterval value) + { + writer.WriteInt64(value.Time); + writer.WriteInt32(value.Days); + writer.WriteInt32(value.Months); + } +} diff --git a/src/Npgsql/Internal/Converters/Temporal/LegacyDateTimeConverter.cs b/src/Npgsql/Internal/Converters/Temporal/LegacyDateTimeConverter.cs new file mode 100644 index 0000000000..5e6306da56 --- /dev/null +++ b/src/Npgsql/Internal/Converters/Temporal/LegacyDateTimeConverter.cs @@ -0,0 +1,62 @@ +using System; + +namespace Npgsql.Internal.Converters; + +sealed class LegacyDateTimeConverter : PgBufferedConverter +{ + readonly bool _dateTimeInfinityConversions; + readonly bool _timestamp; + + public LegacyDateTimeConverter(bool dateTimeInfinityConversions, bool timestamp) + { + _dateTimeInfinityConversions = dateTimeInfinityConversions; + _timestamp = timestamp; + } + + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(long)); + return format is DataFormat.Binary; + } + + protected override DateTime ReadCore(PgReader reader) + { + var dateTime = PgTimestamp.Decode(reader.ReadInt64(), DateTimeKind.Utc, _dateTimeInfinityConversions); + return !_timestamp && (!_dateTimeInfinityConversions || dateTime != DateTime.MaxValue && dateTime != DateTime.MinValue) + ? dateTime.ToLocalTime() + : dateTime; + } + + protected override void WriteCore(PgWriter writer, DateTime value) + { + if (!_timestamp && value.Kind is DateTimeKind.Local) + value = value.ToUniversalTime(); + + writer.WriteInt64(PgTimestamp.Encode(value, _dateTimeInfinityConversions)); + } +} + +sealed class LegacyDateTimeOffsetConverter : PgBufferedConverter +{ + readonly bool _dateTimeInfinityConversions; + + public LegacyDateTimeOffsetConverter(bool dateTimeInfinityConversions) + => _dateTimeInfinityConversions = dateTimeInfinityConversions; + + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(long)); + return format is DataFormat.Binary; + } + + protected override DateTimeOffset ReadCore(PgReader reader) + { + var dateTime = PgTimestamp.Decode(reader.ReadInt64(), DateTimeKind.Utc, _dateTimeInfinityConversions); + return !_dateTimeInfinityConversions || dateTime != DateTime.MaxValue && dateTime != DateTime.MinValue + ? dateTime.ToLocalTime() + : dateTime; + } + + protected override void WriteCore(PgWriter writer, DateTimeOffset value) + => writer.WriteInt64(PgTimestamp.Encode(value.UtcDateTime, _dateTimeInfinityConversions)); +} diff --git a/src/Npgsql/Internal/Converters/Temporal/PgTimestamp.cs b/src/Npgsql/Internal/Converters/Temporal/PgTimestamp.cs new file mode 100644 index 0000000000..6a44ccbdc9 --- /dev/null +++ b/src/Npgsql/Internal/Converters/Temporal/PgTimestamp.cs @@ -0,0 +1,43 @@ +using System; + +namespace Npgsql.Internal.Converters; + +static class PgTimestamp +{ + const long PostgresTimestampOffsetTicks = 630822816000000000L; + + internal static long Encode(DateTime value, bool dateTimeInfinityConversions) + { + if (dateTimeInfinityConversions) + { + if (value.Ticks == DateTime.MaxValue.Ticks) + return long.MaxValue; + if (value.Ticks == DateTime.MinValue.Ticks) + return long.MinValue; + } + // Rounding here would cause problems because we would round up DateTime.MaxValue + // which would make it impossible to retrieve it back from the database, so we just drop the additional precision + return (value.Ticks - PostgresTimestampOffsetTicks) / 10; + } + + internal static DateTime Decode(long value, DateTimeKind kind, bool dateTimeInfinityConversions) + { + try + { + return value switch + { + long.MaxValue => dateTimeInfinityConversions + ? DateTime.MaxValue + : throw new InvalidCastException("Cannot read infinity value since DisableDateTimeInfinityConversions is true."), + long.MinValue => dateTimeInfinityConversions + ? DateTime.MinValue + : throw new InvalidCastException("Cannot read infinity value since DisableDateTimeInfinityConversions is true."), + _ => new(value * 10 + PostgresTimestampOffsetTicks, kind) + }; + } + catch (ArgumentOutOfRangeException e) + { + throw new InvalidCastException("Out of range of DateTime (year must be between 1 and 9999).", e); + } + } +} diff --git a/src/Npgsql/Internal/Converters/Temporal/TimeConverters.cs b/src/Npgsql/Internal/Converters/Temporal/TimeConverters.cs new file mode 100644 index 0000000000..d2fbf60fda --- /dev/null +++ b/src/Npgsql/Internal/Converters/Temporal/TimeConverters.cs @@ -0,0 +1,52 @@ +using System; + +// ReSharper disable once CheckNamespace +namespace Npgsql.Internal.Converters; + +sealed class TimeSpanTimeConverter : PgBufferedConverter +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(long)); + return format is DataFormat.Binary; + } + protected override TimeSpan ReadCore(PgReader reader) => new(reader.ReadInt64() * 10); + protected override void WriteCore(PgWriter writer, TimeSpan value) => writer.WriteInt64(value.Ticks / 10); +} + +#if NET6_0_OR_GREATER +sealed class TimeOnlyTimeConverter : PgBufferedConverter +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(long)); + return format is DataFormat.Binary; + } + protected override TimeOnly ReadCore(PgReader reader) => new(reader.ReadInt64() * 10); + protected override void WriteCore(PgWriter writer, TimeOnly value) => writer.WriteInt64(value.Ticks / 10); +} +#endif + +sealed class DateTimeOffsetTimeTzConverter : PgBufferedConverter +{ + // Binary Format: int64 expressing microseconds, int32 expressing timezone in seconds, negative + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(long) + sizeof(int)); + return format is DataFormat.Binary; + } + + protected override DateTimeOffset ReadCore(PgReader reader) + { + // Adjust from 1 microsecond to 100ns. Time zone (in seconds) is inverted. + var ticks = reader.ReadInt64() * 10; + var offset = new TimeSpan(0, 0, -reader.ReadInt32()); + return new DateTimeOffset(ticks + TimeSpan.TicksPerDay, offset); + } + + protected override void WriteCore(PgWriter writer, DateTimeOffset value) + { + writer.WriteInt64(value.Ticks / 10); + writer.WriteInt32(-(int)(value.Offset.Ticks / TimeSpan.TicksPerSecond)); + } +} diff --git a/src/Npgsql/Internal/Converters/VersionPrefixedTextConverter.cs b/src/Npgsql/Internal/Converters/VersionPrefixedTextConverter.cs new file mode 100644 index 0000000000..d4776550fd --- /dev/null +++ b/src/Npgsql/Internal/Converters/VersionPrefixedTextConverter.cs @@ -0,0 +1,107 @@ +using System; +using System.Diagnostics.CodeAnalysis; +using System.Threading; +using System.Threading.Tasks; + +namespace Npgsql.Internal.Converters; + +sealed class VersionPrefixedTextConverter : PgStreamingConverter, IResumableRead +{ + readonly byte _versionPrefix; + readonly PgConverter _textConverter; + BufferRequirements _innerRequirements; + + public VersionPrefixedTextConverter(byte versionPrefix, PgConverter textConverter) + : base(textConverter.DbNullPredicateKind is DbNullPredicate.Custom) + { + _versionPrefix = versionPrefix; + _textConverter = textConverter; + } + + protected override bool IsDbNullValue(T? value) => _textConverter.IsDbNull(value); + + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + => VersionPrefixedTextConverter.CanConvert(_textConverter, format, out _innerRequirements, out bufferRequirements); + + public override T Read(PgReader reader) + => Read(async: false, reader, CancellationToken.None).Result; + + public override ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) + => Read(async: true, reader, cancellationToken); + + public override Size GetSize(SizeContext context, [DisallowNull]T value, ref object? writeState) + => _textConverter.GetSize(context, value, ref writeState).Combine(context.Format is DataFormat.Binary ? sizeof(byte) : 0); + + public override void Write(PgWriter writer, [DisallowNull]T value) + => Write(async: false, writer, value, CancellationToken.None).GetAwaiter().GetResult(); + + public override ValueTask WriteAsync(PgWriter writer, [DisallowNull]T value, CancellationToken cancellationToken = default) + => Write(async: true, writer, value, cancellationToken); + + async ValueTask Read(bool async, PgReader reader, CancellationToken cancellationToken) + { + await VersionPrefixedTextConverter.ReadVersion(async, _versionPrefix, reader, _innerRequirements.Read, cancellationToken).ConfigureAwait(false); + return async ? await _textConverter.ReadAsync(reader, cancellationToken).ConfigureAwait(false) : _textConverter.Read(reader); + } + + async ValueTask Write(bool async, PgWriter writer, [DisallowNull]T value, CancellationToken cancellationToken) + { + await VersionPrefixedTextConverter.WriteVersion(async, _versionPrefix, writer, cancellationToken).ConfigureAwait(false); + if (async) + await _textConverter.WriteAsync(writer, value, cancellationToken).ConfigureAwait(false); + else + _textConverter.Write(writer, value); + } + + bool IResumableRead.Supported => _textConverter is IResumableRead { Supported: true }; +} + +static class VersionPrefixedTextConverter +{ + public static async ValueTask WriteVersion(bool async, byte version, PgWriter writer, CancellationToken cancellationToken) + { + if (writer.Current.Format is not DataFormat.Binary) + return; + + if (writer.ShouldFlush(sizeof(byte))) + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + writer.WriteByte(version); + } + + public static async ValueTask ReadVersion(bool async, byte expectedVersion, PgReader reader, Size textConverterReadRequirement, CancellationToken cancellationToken) + { + if (reader.Current.Format is not DataFormat.Binary) + return; + + if (!reader.IsResumed) + { + if (reader.ShouldBuffer(sizeof(byte))) + await reader.Buffer(async, sizeof(byte), cancellationToken).ConfigureAwait(false); + + var actualVersion = reader.ReadByte(); + if (actualVersion != expectedVersion) + throw new InvalidCastException($"Unknown wire format version: {actualVersion}"); + } + + // No need for a nested read, all text converters will read CurrentRemaining bytes. + // We only need to buffer data if we're binary, otherwise the caller would have had to do so + // as we directly expose the underlying text converter requirements for the text data format. + await reader.Buffer(async, textConverterReadRequirement, cancellationToken).ConfigureAwait(false); + } + + public static bool CanConvert(PgConverter textConverter, DataFormat format, out BufferRequirements textConverterRequirements, out BufferRequirements bufferRequirements) + { + var success = textConverter.CanConvert(format, out textConverterRequirements); + if (!success) + { + bufferRequirements = default; + return false; + } + if (textConverter.CanConvert(format is DataFormat.Binary ? DataFormat.Text : DataFormat.Binary, out var otherRequirements) && otherRequirements != textConverterRequirements) + throw new InvalidOperationException("Text converter should have identical requirements for text and binary formats."); + + bufferRequirements = format is DataFormat.Binary ? textConverterRequirements.Combine(sizeof(byte)) : textConverterRequirements; + + return success; + } +} diff --git a/src/Npgsql/Internal/DataFormat.cs b/src/Npgsql/Internal/DataFormat.cs new file mode 100644 index 0000000000..c9950ea417 --- /dev/null +++ b/src/Npgsql/Internal/DataFormat.cs @@ -0,0 +1,29 @@ +using System; +using System.Diagnostics; + +namespace Npgsql.Internal; + +public enum DataFormat : byte +{ + Binary, + Text +} + +static class DataFormatUtils +{ + public static DataFormat Create(short formatCode) + => formatCode switch + { + 0 => DataFormat.Text, + 1 => DataFormat.Binary, + _ => throw new ArgumentOutOfRangeException(nameof(formatCode), formatCode, "Unknown postgres format code, please file a bug,") + }; + + public static short ToFormatCode(this DataFormat dataFormat) + => dataFormat switch + { + DataFormat.Text => 0, + DataFormat.Binary => 1, + _ => throw new UnreachableException() + }; +} diff --git a/src/Npgsql/Internal/DynamicTypeInfoResolver.cs b/src/Npgsql/Internal/DynamicTypeInfoResolver.cs new file mode 100644 index 0000000000..22ffcd2248 --- /dev/null +++ b/src/Npgsql/Internal/DynamicTypeInfoResolver.cs @@ -0,0 +1,132 @@ +using System; +using System.Diagnostics.CodeAnalysis; +using System.Reflection; +using Npgsql.Internal.Postgres; +using Npgsql.PostgresTypes; + +namespace Npgsql.Internal; + +[RequiresUnreferencedCode("A dynamic type info resolver may perform reflection on types that were trimmed if not referenced directly.")] +[RequiresDynamicCode("A dynamic type info resolver may need to construct a generic converter for a statically unknown type.")] +public abstract class DynamicTypeInfoResolver : IPgTypeInfoResolver +{ + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + { + if (dataTypeName is null) + return null; + + var context = GetMappings(type, dataTypeName.GetValueOrDefault(), options); + return context?.Find(type, dataTypeName.GetValueOrDefault(), options); + } + + protected DynamicMappingCollection CreateCollection(TypeInfoMappingCollection? baseCollection = null) => new(baseCollection); + + protected static bool IsTypeOrNullableOfType(Type type, Func predicate, out Type matchedType) + { + matchedType = Nullable.GetUnderlyingType(type) ?? type; + return predicate(matchedType); + } + + protected static bool IsArrayLikeType(Type type, [NotNullWhen(true)]out Type? elementType) => TypeInfoMappingCollection.IsArrayLikeType(type, out elementType); + + protected static bool IsArrayDataTypeName(DataTypeName dataTypeName, PgSerializerOptions options, out DataTypeName elementDataTypeName) + { + if (options.DatabaseInfo.GetPostgresType(dataTypeName) is PostgresArrayType arrayType) + { + elementDataTypeName = arrayType.Element.DataTypeName; + return true; + } + + elementDataTypeName = default; + return false; + } + + protected abstract DynamicMappingCollection? GetMappings(Type? type, DataTypeName dataTypeName, PgSerializerOptions options); + + protected class DynamicMappingCollection + { + TypeInfoMappingCollection? _mappings; + + static readonly MethodInfo AddTypeMethodInfo = typeof(TypeInfoMappingCollection).GetMethod(nameof(TypeInfoMappingCollection.AddType), + new[] { typeof(string), typeof(TypeInfoFactory), typeof(Func) }) ?? throw new NullReferenceException(); + + static readonly MethodInfo AddArrayTypeMethodInfo = typeof(TypeInfoMappingCollection) + .GetMethod(nameof(TypeInfoMappingCollection.AddArrayType), new[] { typeof(string) }) ?? throw new NullReferenceException(); + + static readonly MethodInfo AddStructTypeMethodInfo = typeof(TypeInfoMappingCollection).GetMethod(nameof(TypeInfoMappingCollection.AddStructType), + new[] { typeof(string), typeof(TypeInfoFactory), typeof(Func) }) ?? throw new NullReferenceException(); + + static readonly MethodInfo AddStructArrayTypeMethodInfo = typeof(TypeInfoMappingCollection) + .GetMethod(nameof(TypeInfoMappingCollection.AddStructArrayType), new[] { typeof(string) }) ?? throw new NullReferenceException(); + + static readonly MethodInfo AddResolverTypeMethodInfo = typeof(TypeInfoMappingCollection).GetMethod( + nameof(TypeInfoMappingCollection.AddResolverType), + new[] { typeof(string), typeof(TypeInfoFactory), typeof(Func) }) ?? throw new NullReferenceException(); + + static readonly MethodInfo AddResolverArrayTypeMethodInfo = typeof(TypeInfoMappingCollection) + .GetMethod(nameof(TypeInfoMappingCollection.AddResolverArrayType), new[] { typeof(string) }) ?? throw new NullReferenceException(); + + static readonly MethodInfo AddResolverStructTypeMethodInfo = typeof(TypeInfoMappingCollection).GetMethod( + nameof(TypeInfoMappingCollection.AddResolverStructType), + new[] { typeof(string), typeof(TypeInfoFactory), typeof(Func) }) ?? throw new NullReferenceException(); + + static readonly MethodInfo AddResolverStructArrayTypeMethodInfo = typeof(TypeInfoMappingCollection) + .GetMethod(nameof(TypeInfoMappingCollection.AddResolverStructArrayType), new[] { typeof(string) }) ?? throw new NullReferenceException(); + + internal DynamicMappingCollection(TypeInfoMappingCollection? baseCollection = null) + { + if (baseCollection is not null) + _mappings = new(baseCollection); + } + + public DynamicMappingCollection AddMapping(Type type, string dataTypeName, TypeInfoFactory factory, Func? configureMapping = null) + { + if (type.IsValueType && Nullable.GetUnderlyingType(type) is not null) + throw new NotSupportedException("Mapping nullable types is not supported, map its underlying type instead to get both."); + + (type.IsValueType ? AddStructTypeMethodInfo : AddTypeMethodInfo) + .MakeGenericMethod(type).Invoke(_mappings ??= new(), new object?[] + { + dataTypeName, + factory, + configureMapping + }); + return this; + } + + public DynamicMappingCollection AddArrayMapping(Type elementType, string dataTypeName) + { + (elementType.IsValueType ? AddStructArrayTypeMethodInfo : AddArrayTypeMethodInfo) + .MakeGenericMethod(elementType).Invoke(_mappings ??= new(), new object?[] { dataTypeName }); + return this; + } + + public DynamicMappingCollection AddResolverMapping(Type type, string dataTypeName, TypeInfoFactory factory, Func? configureMapping = null) + { + if (type.IsValueType && Nullable.GetUnderlyingType(type) is not null) + throw new NotSupportedException("Mapping nullable types is not supported"); + + (type.IsValueType ? AddResolverStructTypeMethodInfo : AddResolverTypeMethodInfo) + .MakeGenericMethod(type).Invoke(_mappings ??= new(), new object?[] + { + dataTypeName, + factory, + configureMapping + }); + return this; + } + + public DynamicMappingCollection AddResolverArrayMapping(Type elementType, string dataTypeName) + { + (elementType.IsValueType ? AddResolverStructArrayTypeMethodInfo : AddResolverArrayTypeMethodInfo) + .MakeGenericMethod(elementType).Invoke(_mappings ??= new(), new object?[] { dataTypeName }); + return this; + } + + internal PgTypeInfo? Find(Type? type, DataTypeName dataTypeName, PgSerializerOptions options) + => _mappings?.Find(type, dataTypeName, options); + + public TypeInfoMappingCollection ToTypeInfoMappingCollection() + => new(_mappings?.Items ?? Array.Empty()); + } +} diff --git a/src/Npgsql/Internal/IPgTypeInfoResolver.cs b/src/Npgsql/Internal/IPgTypeInfoResolver.cs new file mode 100644 index 0000000000..62955446eb --- /dev/null +++ b/src/Npgsql/Internal/IPgTypeInfoResolver.cs @@ -0,0 +1,19 @@ +using System; +using Npgsql.Internal.Postgres; + +namespace Npgsql.Internal; + +/// +/// An Npgsql resolver for type info. Used by Npgsql to read and write values to PostgreSQL. +/// +public interface IPgTypeInfoResolver +{ + /// + /// Resolve a type info for a given type and data type name, at least one value will be non-null. + /// + /// The clr type being requested. + /// The postgres type being requested. + /// Used for configuration state and Npgsql type info or PostgreSQL type catalog lookups. + /// A result, or null if there was no match. + PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options); +} diff --git a/src/Npgsql/Internal/NpgsqlConnector.Auth.cs b/src/Npgsql/Internal/NpgsqlConnector.Auth.cs index d5ea9af5e1..25847da65e 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.Auth.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.Auth.cs @@ -121,7 +121,7 @@ async Task AuthenticateSASL(List mechanisms, string username, bool async // Assumption: the write buffer is big enough to contain all our outgoing messages var clientNonce = GetNonce(); - await WriteSASLInitialResponse(mechanism, PGUtil.UTF8Encoding.GetBytes($"{cbindFlag},,n=*,r={clientNonce}"), async, cancellationToken); + await WriteSASLInitialResponse(mechanism, NpgsqlWriteBuffer.UTF8Encoding.GetBytes($"{cbindFlag},,n=*,r={clientNonce}"), async, cancellationToken); await Flush(async, cancellationToken); var saslContinueMsg = Expect(await ReadMessage(async), this); @@ -280,8 +280,8 @@ async Task AuthenticateMD5(string username, byte[] salt, bool async, Cancellatio using (var md5 = MD5.Create()) { // First phase - var passwordBytes = PGUtil.UTF8Encoding.GetBytes(passwd); - var usernameBytes = PGUtil.UTF8Encoding.GetBytes(username); + var passwordBytes = NpgsqlWriteBuffer.UTF8Encoding.GetBytes(passwd); + var usernameBytes = NpgsqlWriteBuffer.UTF8Encoding.GetBytes(username); var cryptBuf = new byte[passwordBytes.Length + usernameBytes.Length]; passwordBytes.CopyTo(cryptBuf, 0); usernameBytes.CopyTo(cryptBuf, passwordBytes.Length); @@ -293,7 +293,7 @@ async Task AuthenticateMD5(string username, byte[] salt, bool async, Cancellatio var prehash = sb.ToString(); - var prehashbytes = PGUtil.UTF8Encoding.GetBytes(prehash); + var prehashbytes = NpgsqlWriteBuffer.UTF8Encoding.GetBytes(prehash); cryptBuf = new byte[prehashbytes.Length + 4]; Array.Copy(salt, 0, cryptBuf, prehashbytes.Length, 4); diff --git a/src/Npgsql/Internal/NpgsqlConnector.FrontendMessages.cs b/src/Npgsql/Internal/NpgsqlConnector.FrontendMessages.cs index c2c6c23976..91a492ae5b 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.FrontendMessages.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.FrontendMessages.cs @@ -4,8 +4,6 @@ using System.Linq; using System.Threading; using System.Threading.Tasks; -using Npgsql.Util; -// ReSharper disable VariableHidesOuterVariable namespace Npgsql.Internal; @@ -141,7 +139,7 @@ internal async Task WriteParse(string sql, string statementName, List 0 ? size.Value : 0; + formatCodesSum += format.ToFormatCode(); } var formatCodeListLength = formatCodesSum == 0 ? 0 : formatCodesSum == parameters.Count ? 1 : parameters.Count; @@ -201,30 +199,38 @@ internal async Task WriteBind( // 0 length implicitly means all-text, 1 means all-binary, >1 means mix-and-match if (formatCodeListLength == 1) { - if (WriteBuffer.WriteSpaceLeft < 2) + if (WriteBuffer.WriteSpaceLeft < sizeof(short)) await Flush(async, cancellationToken).ConfigureAwait(false); - WriteBuffer.WriteInt16((short)FormatCode.Binary); + WriteBuffer.WriteInt16(DataFormat.Binary.ToFormatCode()); } else if (formatCodeListLength > 1) { for (var paramIndex = 0; paramIndex < parameters.Count; paramIndex++) { - if (WriteBuffer.WriteSpaceLeft < 2) + if (WriteBuffer.WriteSpaceLeft < sizeof(short)) await Flush(async, cancellationToken).ConfigureAwait(false); - WriteBuffer.WriteInt16((short)parameters[paramIndex].FormatCode); + WriteBuffer.WriteInt16(parameters[paramIndex].Format.ToFormatCode()); } } - if (WriteBuffer.WriteSpaceLeft < 2) + if (WriteBuffer.WriteSpaceLeft < sizeof(ushort)) await Flush(async, cancellationToken).ConfigureAwait(false); WriteBuffer.WriteUInt16((ushort)parameters.Count); - for (var paramIndex = 0; paramIndex < parameters.Count; paramIndex++) + var writer = WriteBuffer.GetWriter(DatabaseInfo, async ? FlushMode.NonBlocking : FlushMode.Blocking); + try { - var param = parameters[paramIndex]; - param.LengthCache?.Rewind(); - await param.WriteWithLength(WriteBuffer, async, cancellationToken).ConfigureAwait(false); + for (var paramIndex = 0; paramIndex < parameters.Count; paramIndex++) + { + var param = parameters[paramIndex]; + await param.Write(async, writer, cancellationToken).ConfigureAwait(false); + } + } + catch(Exception ex) + { + Break(ex); + throw; } if (unknownResultTypeList != null) @@ -375,8 +381,8 @@ internal void WriteStartup(Dictionary parameters) sizeof(byte); // Trailing zero byte foreach (var kvp in parameters) - len += PGUtil.UTF8Encoding.GetByteCount(kvp.Key) + 1 + - PGUtil.UTF8Encoding.GetByteCount(kvp.Value) + 1; + len += NpgsqlWriteBuffer.UTF8Encoding.GetByteCount(kvp.Key) + 1 + + NpgsqlWriteBuffer.UTF8Encoding.GetByteCount(kvp.Value) + 1; // Should really never happen, just in case if (len > WriteBuffer.Size) @@ -422,7 +428,7 @@ internal async Task WriteSASLInitialResponse(string mechanism, byte[] initialRes { var len = sizeof(byte) + // Message code sizeof(int) + // Length - PGUtil.UTF8Encoding.GetByteCount(mechanism) + sizeof(byte) + // Mechanism plus null terminator + NpgsqlWriteBuffer.UTF8Encoding.GetByteCount(mechanism) + sizeof(byte) + // Mechanism plus null terminator sizeof(int) + // Initial response length (initialResponse?.Length ?? 0); // Initial response payload diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index 4fb25fa761..d7e359b6af 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -18,12 +18,10 @@ using System.Threading.Channels; using System.Threading.Tasks; using Npgsql.BackendMessages; -using Npgsql.TypeMapping; using Npgsql.Util; using static Npgsql.Util.Statics; using System.Transactions; using Microsoft.Extensions.Logging; -using Npgsql.Internal.TypeMapping; using Npgsql.Properties; namespace Npgsql.Internal; @@ -115,13 +113,13 @@ internal string InferredUserName /// internal int Id => BackendProcessId; + internal PgSerializerOptions SerializerOptions { get; set; } = default!; + /// /// Information about PostgreSQL and PostgreSQL-like databases (e.g. type definitions, capabilities...). /// public NpgsqlDatabaseInfo DatabaseInfo { get; internal set; } = default!; - internal TypeMapper TypeMapper { get; set; } = default!; - /// /// The current transaction status for this connector. /// @@ -182,6 +180,9 @@ internal string InferredUserName /// volatile Exception? _breakReason; + // Used by replication to change our cancellation behaviour on ColumnStreams. + internal bool LongRunningConnection { get; set; } + /// /// /// Used by the pool to indicate that I/O is currently in progress on this connector, so that another write @@ -319,7 +320,7 @@ internal bool PostgresCancellationPerformed readonly ReadyForQueryMessage _readyForQueryMessage = new(); readonly ParameterDescriptionMessage _parameterDescriptionMessage = new(); readonly DataRowMessage _dataRowMessage = new(); - readonly RowDescriptionMessage _rowDescriptionMessage = new(); + readonly RowDescriptionMessage _rowDescriptionMessage = new(connectorOwned: true); // Since COPY is rarely used, allocate these lazily CopyInResponseMessage? _copyInResponseMessage; @@ -500,9 +501,9 @@ internal async Task Open(NpgsqlTimeout timeout, bool async, CancellationToken ca await DataSource.Bootstrap(this, timeout, forceReload: false, async, cancellationToken); - Debug.Assert(DataSource.TypeMapper is not null); + Debug.Assert(DataSource.SerializerOptions is not null); Debug.Assert(DataSource.DatabaseInfo is not null); - TypeMapper = DataSource.TypeMapper; + SerializerOptions = DataSource.SerializerOptions; DatabaseInfo = DataSource.DatabaseInfo; if (Settings.Pooling && !Settings.Multiplexing && !Settings.NoResetOnClose && DatabaseInfo.SupportsDiscard) @@ -770,8 +771,8 @@ async Task RawOpen(SslMode sslMode, NpgsqlTimeout timeout, bool async, Cancellat if (Settings.Encoding == "UTF8") { - TextEncoding = PGUtil.UTF8Encoding; - RelaxedTextEncoding = PGUtil.RelaxedUTF8Encoding; + TextEncoding = NpgsqlWriteBuffer.UTF8Encoding; + RelaxedTextEncoding = NpgsqlWriteBuffer.RelaxedUTF8Encoding; } else { @@ -1242,7 +1243,7 @@ async Task MultiplexingReadLoop() // TODO: the exception we have here is sometimes just the result of the write loop breaking // the connector, so it doesn't represent the actual root cause. - pendingCommand.ExecutionCompletion.SetException(_breakReason!); + pendingCommand.ExecutionCompletion.SetException(new NpgsqlException("A previous command on this connection caused an error requiring all pending commands on this connection to be aborted", _breakReason!)); } } catch (ChannelClosedException) @@ -1303,7 +1304,7 @@ internal ValueTask ReadMessage( return ReadMessageLong(async, dataRowLoadingMode, readingNotifications: false)!; } - PGUtil.ValidateBackendMessageCode(messageCode); + ValidateBackendMessageCode(messageCode); var len = ReadBuffer.ReadInt32() - 4; // Transmitted length includes itself if (len > ReadBuffer.ReadBytesLeft) { @@ -1337,7 +1338,8 @@ internal ValueTask ReadMessage( { // Prepended queries should never fail. // If they do, we're not even going to attempt to salvage the connector. - throw Break(e); + Break(e); + throw; } } @@ -1351,7 +1353,7 @@ internal ValueTask ReadMessage( { await ReadBuffer.Ensure(5, async, readingNotifications); var messageCode = (BackendMessageCode)ReadBuffer.ReadByte(); - PGUtil.ValidateBackendMessageCode(messageCode); + ValidateBackendMessageCode(messageCode); var len = ReadBuffer.ReadInt32() - 4; // Transmitted length includes itself if ((messageCode == BackendMessageCode.DataRow && @@ -1432,6 +1434,12 @@ internal ValueTask ReadMessage( } Debug.Assert(msg != null, "Message is null for code: " + messageCode); + + // Reset flushed bytes after any RFQ or in between potentially long running operations. + // Just in case we'll hit that 15 exbibyte limit of a signed long... + if (messageCode is BackendMessageCode.ReadyForQuery or BackendMessageCode.CopyData or BackendMessageCode.NotificationResponse) + ReadBuffer.ResetFlushedBytes(); + return msg; } } @@ -1464,7 +1472,7 @@ internal ValueTask ReadMessage( switch (code) { case BackendMessageCode.RowDescription: - return _rowDescriptionMessage.Load(buf, TypeMapper); + return _rowDescriptionMessage.Load(buf, SerializerOptions); case BackendMessageCode.DataRow: return _dataRowMessage.Load(len); case BackendMessageCode.CommandComplete: @@ -1891,15 +1899,44 @@ internal CancellationTokenRegistration StartCancellableOperation( /// PostgreSQL cancellation will be skipped and client-socket cancellation will occur immediately. /// [MethodImpl(MethodImplOptions.AggressiveInlining)] - internal CancellationTokenRegistration StartNestedCancellableOperation( + internal NestedCancellableScope StartNestedCancellableOperation( CancellationToken cancellationToken = default, bool attemptPgCancellation = true) { + var currentUserCancellationToken = UserCancellationToken; UserCancellationToken = cancellationToken; + var currentAttemptPostgresCancellation = AttemptPostgresCancellation; AttemptPostgresCancellation = attemptPgCancellation; - return _cancellationTokenRegistration = - cancellationToken.Register(static c => ((NpgsqlConnector)c!).PerformUserCancellation(), this); + var registration = cancellationToken.Register(static c => ((NpgsqlConnector)c!).PerformUserCancellation(), this); + + return new(this, registration, currentUserCancellationToken, currentAttemptPostgresCancellation); + } + + internal readonly struct NestedCancellableScope : IDisposable + { + readonly NpgsqlConnector _connector; + readonly CancellationTokenRegistration _registration; + readonly CancellationToken _previousCancellationToken; + readonly bool _previousAttemptPostgresCancellation; + + public NestedCancellableScope(NpgsqlConnector connector, CancellationTokenRegistration registration, CancellationToken previousCancellationToken, bool previousAttemptPostgresCancellation) + { + _connector = connector; + _registration = registration; + _previousCancellationToken = previousCancellationToken; + _previousAttemptPostgresCancellation = previousAttemptPostgresCancellation; + } + + public void Dispose() + { + if (_connector is null) + return; + + _connector.UserCancellationToken = _previousCancellationToken; + _connector.AttemptPostgresCancellation = _previousAttemptPostgresCancellation; + _registration.Dispose(); + } } #endregion Cancel @@ -2318,6 +2355,7 @@ internal async Task Reset(bool async) [MethodImpl(MethodImplOptions.AggressiveInlining)] void ResetReadBuffer() { + LongRunningConnection = false; if (_origReadBuffer != null) { Debug.Assert(_origReadBuffer.ReadBytesLeft == 0); @@ -2615,7 +2653,8 @@ internal async Task Wait(bool async, int timeout, CancellationToken cancel { // We're somewhere in the middle of a reading keepalive messages // Breaking the connection, as we've lost protocol sync - throw Break(e); + Break(e); + throw; } if (msg == null) diff --git a/src/Npgsql/Internal/NpgsqlDatabaseInfo.cs b/src/Npgsql/Internal/NpgsqlDatabaseInfo.cs index 09417eef21..f3c8ea52a3 100644 --- a/src/Npgsql/Internal/NpgsqlDatabaseInfo.cs +++ b/src/Npgsql/Internal/NpgsqlDatabaseInfo.cs @@ -1,9 +1,9 @@ using System; -using System.Collections.Concurrent; using System.Collections.Generic; using System.Diagnostics.CodeAnalysis; using System.Linq; using System.Threading.Tasks; +using Npgsql.Internal.Postgres; using Npgsql.PostgresTypes; using Npgsql.Util; @@ -17,8 +17,7 @@ public abstract class NpgsqlDatabaseInfo { #region Fields - static volatile INpgsqlDatabaseInfoFactory[] Factories = new INpgsqlDatabaseInfoFactory[] - { + static volatile INpgsqlDatabaseInfoFactory[] Factories = { new PostgresMinimalDatabaseInfoFactory(), new PostgresDatabaseInfoFactory() }; @@ -138,7 +137,7 @@ public abstract class NpgsqlDatabaseInfo internal Dictionary ByOID { get; } = new(); /// - /// Indexes backend types by their PostgreSQL name, including namespace (e.g. pg_catalog.int4). + /// Indexes backend types by their PostgreSQL internal name, including namespace (e.g. pg_catalog.int4). /// Only used for enums and composites. /// internal Dictionary ByFullName { get; } = new(); @@ -179,10 +178,22 @@ private protected NpgsqlDatabaseInfo(string host, int port, string databaseName, Version = ParseServerVersion(serverVersion); } - public PostgresType GetPostgresTypeByName(string pgName) + internal PostgresType GetPostgresType(Oid oid) => GetPostgresType(oid.Value); + + public PostgresType GetPostgresType(uint oid) + => ByOID.TryGetValue(oid, out var pgType) + ? pgType + : throw new ArgumentException($"A PostgreSQL type with the oid '{oid}' was not found in the current database info"); + + internal PostgresType GetPostgresType(DataTypeName dataTypeName) + => ByFullName.TryGetValue(dataTypeName.Value, out var value) + ? value + : throw new ArgumentException($"A PostgreSQL type with the name '{dataTypeName}' was not found in the current database info"); + + public PostgresType GetPostgresType(string pgName) => TryGetPostgresTypeByName(pgName, out var pgType) ? pgType - : throw new ArgumentException($"A PostgreSQL type with the name '{pgName}' was not found in the database"); + : throw new ArgumentException($"A PostgreSQL type with the name '{pgName}' was not found in the current database info"); public bool TryGetPostgresTypeByName(string pgName, [NotNullWhen(true)] out PostgresType? pgType) { @@ -217,10 +228,10 @@ internal void ProcessTypes() foreach (var type in GetTypes()) { ByOID[type.OID] = type; - ByFullName[type.FullName] = type; + ByFullName[type.DataTypeName.Value] = type; // If more than one type exists with the same partial name, we place a null value. // This allows us to detect this case later and force the user to use full names only. - ByName[type.Name] = ByName.ContainsKey(type.Name) + ByName[type.InternalName] = ByName.ContainsKey(type.InternalName) ? null : type; @@ -326,4 +337,24 @@ internal static void ResetFactories() }; #endregion Factory management -} \ No newline at end of file + + internal Oid GetOid(PgTypeId pgTypeId, bool validate = false) + => pgTypeId.IsOid + ? validate ? GetPostgresType(pgTypeId.Oid).OID : pgTypeId.Oid + : GetPostgresType(pgTypeId.DataTypeName).OID; + + internal DataTypeName GetDataTypeName(PgTypeId pgTypeId, bool validate = false) + => pgTypeId.IsDataTypeName + ? validate ? GetPostgresType(pgTypeId.DataTypeName).DataTypeName : pgTypeId.DataTypeName + : GetPostgresType(pgTypeId.Oid).DataTypeName; + + internal PostgresType GetPostgresType(PgTypeId pgTypeId) + => pgTypeId.IsOid + ? GetPostgresType(pgTypeId.Oid.Value) + : GetPostgresType(pgTypeId.DataTypeName.Value); + + internal PostgresType? FindPostgresType(PgTypeId pgTypeId) + => pgTypeId.IsOid + ? ByOID.TryGetValue(pgTypeId.Oid.Value, out var pgType) ? pgType : null + : TryGetPostgresTypeByName(pgTypeId.DataTypeName.Value, out pgType) ? pgType : null; +} diff --git a/src/Npgsql/Internal/NpgsqlReadBuffer.Stream.cs b/src/Npgsql/Internal/NpgsqlReadBuffer.Stream.cs index cd38bcad0f..e99b77fa1b 100644 --- a/src/Npgsql/Internal/NpgsqlReadBuffer.Stream.cs +++ b/src/Npgsql/Internal/NpgsqlReadBuffer.Stream.cs @@ -6,33 +6,44 @@ namespace Npgsql.Internal; -public sealed partial class NpgsqlReadBuffer +sealed partial class NpgsqlReadBuffer { internal sealed class ColumnStream : Stream +#if NETSTANDARD2_0 + , IAsyncDisposable +#endif { readonly NpgsqlConnector _connector; readonly NpgsqlReadBuffer _buf; - int _start, _len, _read; + long _startPos; + int _start; + int _read; bool _canSeek; - readonly bool _startCancellableOperations; + bool _commandScoped; + /// Does not throw ODE. + internal int CurrentLength { get; private set; } internal bool IsDisposed { get; private set; } - internal ColumnStream(NpgsqlConnector connector, bool startCancellableOperations = true) + internal ColumnStream(NpgsqlConnector connector) { _connector = connector; _buf = connector.ReadBuffer; - _startCancellableOperations = startCancellableOperations; IsDisposed = true; } - internal void Init(int len, bool canSeek) + internal void Init(int len, bool canSeek, bool commandScoped) { Debug.Assert(!canSeek || _buf.ReadBytesLeft >= len, "Seekable stream constructed but not all data is in buffer (sequential)"); - _start = _buf.ReadPosition; - _len = len; - _read = 0; + _startPos = _buf.CumulativeReadPosition; + _canSeek = canSeek; + _start = canSeek ? _buf.ReadPosition : 0; + + CurrentLength = len; + _read = 0; + + _commandScoped = commandScoped; IsDisposed = false; } @@ -47,7 +58,7 @@ public override long Length get { CheckDisposed(); - return _len; + return CurrentLength; } } @@ -102,11 +113,11 @@ public override long Seek(long offset, SeekOrigin origin) } case SeekOrigin.End: { - var tempPosition = unchecked(_start + _len + (int)offset); - if (unchecked(_start + _len + offset) < _start || tempPosition < _start) + var tempPosition = unchecked(_start + CurrentLength + (int)offset); + if (unchecked(_start + CurrentLength + offset) < _start || tempPosition < _start) throw new IOException(seekBeforeBegin); _buf.ReadPosition = tempPosition; - _read = _len + (int)offset; + _read = CurrentLength + (int)offset; return _read; } default: @@ -140,9 +151,7 @@ public override int Read(byte[] buffer, int offset, int count) public override Task ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) { ValidateArguments(buffer, offset, count); - - using (NoSynchronizationContextScope.Enter()) - return ReadAsync(new Memory(buffer, offset, count), cancellationToken).AsTask(); + return ReadAsync(new Memory(buffer, offset, count), cancellationToken).AsTask(); } #if NETSTANDARD2_0 @@ -153,12 +162,12 @@ public override int Read(Span span) { CheckDisposed(); - var count = Math.Min(span.Length, _len - _read); + var count = Math.Min(span.Length, CurrentLength - _read); if (count == 0) return 0; - var read = _buf.Read(span.Slice(0, count)); + var read = _buf.Read(_commandScoped, span.Slice(0, count)); _read += read; return read; @@ -172,20 +181,16 @@ public override ValueTask ReadAsync(Memory buffer, CancellationToken { CheckDisposed(); - var count = Math.Min(buffer.Length, _len - _read); - - if (count == 0) - return new ValueTask(0); - - using (NoSynchronizationContextScope.Enter()) - return ReadLong(this, buffer.Slice(0, count), cancellationToken); + var count = Math.Min(buffer.Length, CurrentLength - _read); + return count == 0 ? new ValueTask(0) : ReadLong(this, buffer.Slice(0, count), cancellationToken); static async ValueTask ReadLong(ColumnStream stream, Memory buffer, CancellationToken cancellationToken = default) { - using var registration = stream._startCancellableOperations + using var registration = cancellationToken.CanBeCanceled ? stream._connector.StartNestedCancellableOperation(cancellationToken, attemptPgCancellation: false) : default; - var read = await stream._buf.ReadAsync(buffer, cancellationToken); + + var read = await stream._buf.ReadAsync(stream._commandScoped, buffer, cancellationToken).ConfigureAwait(false); stream._read += read; return read; } @@ -208,24 +213,21 @@ public ValueTask DisposeAsync() #else public override ValueTask DisposeAsync() #endif - { - using (NoSynchronizationContextScope.Enter()) - return DisposeAsync(disposing: true, async: true); - } + => DisposeAsync(disposing: true, async: true); async ValueTask DisposeAsync(bool disposing, bool async) { if (IsDisposed || !disposing) return; - var leftToSkip = _len - _read; - if (leftToSkip > 0) + if (!_connector.IsBroken) { - if (async) - await _buf.Skip(leftToSkip, async); - else - _buf.Skip(leftToSkip, async).GetAwaiter().GetResult(); + var pos = _buf.CumulativeReadPosition - _startPos; + var remaining = checked((int)(CurrentLength - pos)); + if (remaining > 0) + await _buf.Skip(remaining, async).ConfigureAwait(false); } + IsDisposed = true; } } diff --git a/src/Npgsql/Internal/NpgsqlReadBuffer.cs b/src/Npgsql/Internal/NpgsqlReadBuffer.cs index f854f27476..cb28028815 100644 --- a/src/Npgsql/Internal/NpgsqlReadBuffer.cs +++ b/src/Npgsql/Internal/NpgsqlReadBuffer.cs @@ -2,7 +2,6 @@ using System.Buffers; using System.Buffers.Binary; using System.Diagnostics; -using System.Diagnostics.CodeAnalysis; using System.IO; using System.Net.Sockets; using System.Runtime.CompilerServices; @@ -12,15 +11,13 @@ using Npgsql.Util; using static System.Threading.Timeout; -#pragma warning disable CS1591 // Missing XML comment for publicly visible type or member - namespace Npgsql.Internal; /// /// A buffer used by Npgsql to read data from the socket efficiently. /// Provides methods which decode different values types and tracks the current position. /// -public sealed partial class NpgsqlReadBuffer : IDisposable +sealed partial class NpgsqlReadBuffer : IDisposable { #region Fields and Properties @@ -74,13 +71,15 @@ internal TimeSpan Timeout internal int ReadPosition { get; set; } internal int ReadBytesLeft => FilledBytes - ReadPosition; + internal PgReader PgReader { get; } + + long _flushedBytes; // this will always fit at least one message. + internal long CumulativeReadPosition => unchecked(_flushedBytes + ReadPosition); internal readonly byte[] Buffer; internal int FilledBytes; - ColumnStream? _columnStream; - - PreparedTextReader? _preparedTextReader; + internal ReadOnlySpan Span => Buffer.AsSpan(ReadPosition, ReadBytesLeft); readonly bool _usePool; bool _disposed; @@ -120,20 +119,163 @@ internal NpgsqlReadBuffer( TextEncoding = textEncoding; RelaxedTextEncoding = relaxedTextEncoding; + PgReader = new PgReader(this); } #endregion #region I/O - internal void Ensure(int count) => Ensure(count, false).GetAwaiter().GetResult(); - public Task Ensure(int count, bool async) => Ensure(count, async, readingNotifications: false); public Task EnsureAsync(int count) => Ensure(count, async: true, readingNotifications: false); + // Can't share due to Span vs Memory difference (can't make a memory out of a span). + int ReadWithTimeout(Span buffer) + { + while (true) + { + try + { + var read = Underlying.Read(buffer); + _flushedBytes = unchecked(_flushedBytes + read); + NpgsqlEventSource.Log.BytesRead(read); + return read; + } + catch (Exception ex) + { + var connector = Connector; + switch (ex) + { + // Note that mono throws SocketException with the wrong error (see #1330) + case IOException e when (e.InnerException as SocketException)?.SocketErrorCode == + (Type.GetType("Mono.Runtime") == null ? SocketError.TimedOut : SocketError.WouldBlock): + { + var isStreamBroken = false; +#if NETSTANDARD2_0 + // SslStream on .NET Framework treats any IOException (including timeouts) as fatal and may + // return garbage if reused. To prevent this, we flow down and break the connection immediately. + // See #4305. + isStreamBroken = connector.IsSecure && ex is IOException; +#endif + + if (!isStreamBroken) + { + // If we should attempt PostgreSQL cancellation, do it the first time we get a timeout. + // TODO: As an optimization, we can still attempt to send a cancellation request, but after + // that immediately break the connection + if (connector.AttemptPostgresCancellation && + !connector.PostgresCancellationPerformed && + connector.PerformPostgresCancellation()) + { + // Note that if the cancellation timeout is negative, we flow down and break the + // connection immediately. + var cancellationTimeout = connector.Settings.CancellationTimeout; + if (cancellationTimeout >= 0) + { + if (cancellationTimeout > 0) + Timeout = TimeSpan.FromMilliseconds(cancellationTimeout); + + continue; + } + } + } + + // If we're here, the PostgreSQL cancellation either failed or skipped entirely. + // Break the connection, bubbling up the correct exception type (cancellation or timeout) + throw connector.Break(CreateCancelException(connector)); + } + default: + throw connector.Break(new NpgsqlException("Exception while reading from stream", ex)); + } + } + } + } + + async ValueTask ReadWithTimeoutAsync(Memory buffer, CancellationToken cancellationToken) + { + var finalCt = Timeout != TimeSpan.Zero + ? Cts.Start(cancellationToken) + : Cts.Reset(); + + while (true) + { + try + { + var read = await Underlying.ReadAsync(buffer, finalCt).ConfigureAwait(false); + _flushedBytes = unchecked(_flushedBytes + read); + Cts.Stop(); + NpgsqlEventSource.Log.BytesRead(read); + return read; + } + catch (Exception ex) + { + var connector = Connector; + Cts.Stop(); + switch (ex) + { + // Read timeout + case OperationCanceledException: + // Note that mono throws SocketException with the wrong error (see #1330) + case IOException e when (e.InnerException as SocketException)?.SocketErrorCode == + (Type.GetType("Mono.Runtime") == null ? SocketError.TimedOut : SocketError.WouldBlock): + { + Debug.Assert(ex is OperationCanceledException); + var isStreamBroken = false; +#if NETSTANDARD2_0 + // SslStream on .NET Framework treats any IOException (including timeouts) as fatal and may + // return garbage if reused. To prevent this, we flow down and break the connection immediately. + // See #4305. + isStreamBroken = connector.IsSecure && ex is IOException; +#endif + + if (!isStreamBroken) + { + // If we should attempt PostgreSQL cancellation, do it the first time we get a timeout. + // TODO: As an optimization, we can still attempt to send a cancellation request, but after + // that immediately break the connection + if (connector.AttemptPostgresCancellation && + !connector.PostgresCancellationPerformed && + connector.PerformPostgresCancellation()) + { + // Note that if the cancellation timeout is negative, we flow down and break the + // connection immediately. + var cancellationTimeout = connector.Settings.CancellationTimeout; + if (cancellationTimeout >= 0) + { + if (cancellationTimeout > 0) + Timeout = TimeSpan.FromMilliseconds(cancellationTimeout); + + finalCt = Cts.Start(cancellationToken); + continue; + } + } + } + + // If we're here, the PostgreSQL cancellation either failed or skipped entirely. + // Break the connection, bubbling up the correct exception type (cancellation or timeout) + throw connector.Break(CreateCancelException(connector)); + } + default: + throw connector.Break(new NpgsqlException("Exception while reading from stream", ex)); + } + } + } + } + + static Exception CreateCancelException(NpgsqlConnector connector) + => !connector.UserCancellationRequested + ? NpgsqlTimeoutException() + : connector.PostgresCancellationPerformed + ? new OperationCanceledException("Query was cancelled", TimeoutException(), connector.UserCancellationToken) + : new OperationCanceledException("Query was cancelled", connector.UserCancellationToken); + + static Exception NpgsqlTimeoutException() => new NpgsqlException("Exception while reading from stream", TimeoutException()); + + static Exception TimeoutException() => new TimeoutException("Timeout during reading attempt"); + /// /// Ensures that bytes are available in the buffer, and if /// not, reads from the socket until enough is available. @@ -154,12 +296,13 @@ static async Task EnsureLong( if (buffer.ReadPosition == buffer.FilledBytes) { - buffer.Clear(); + buffer.ResetPosition(); } else if (count > buffer.Size - buffer.FilledBytes) { Array.Copy(buffer.Buffer, buffer.ReadPosition, buffer.Buffer, 0, buffer.ReadBytesLeft); buffer.FilledBytes = buffer.ReadBytesLeft; + buffer._flushedBytes = unchecked(buffer._flushedBytes + buffer.ReadPosition); buffer.ReadPosition = 0; } @@ -174,7 +317,7 @@ static async Task EnsureLong( { var toRead = buffer.Size - buffer.FilledBytes; var read = async - ? await buffer.Underlying.ReadAsync(buffer.Buffer.AsMemory(buffer.FilledBytes, toRead), finalCt) + ? await buffer.Underlying.ReadAsync(buffer.Buffer.AsMemory(buffer.FilledBytes, toRead), finalCt).ConfigureAwait(false) : buffer.Underlying.Read(buffer.Buffer, buffer.FilledBytes, toRead); if (read == 0) @@ -287,23 +430,23 @@ internal NpgsqlReadBuffer AllocateOversize(int count) if (_underlyingSocket != null) tempBuf.Timeout = Timeout; CopyTo(tempBuf); - Clear(); + ResetPosition(); return tempBuf; } /// /// Does not perform any I/O - assuming that the bytes to be skipped are in the memory buffer. /// - internal void Skip(long len) + internal void Skip(int len) { Debug.Assert(ReadBytesLeft >= len); - ReadPosition += (int)len; + ReadPosition += len; } /// /// Skip a given number of bytes. /// - public async Task Skip(long len, bool async) + public async Task Skip(int len, bool async) { Debug.Assert(len >= 0); @@ -312,15 +455,15 @@ public async Task Skip(long len, bool async) len -= ReadBytesLeft; while (len > Size) { - Clear(); - await Ensure(Size, async); + ResetPosition(); + await Ensure(Size, async).ConfigureAwait(false); len -= Size; } - Clear(); - await Ensure((int)len, async); + ResetPosition(); + await Ensure(len, async).ConfigureAwait(false); } - ReadPosition += (int)len; + ReadPosition += len; } #endregion @@ -428,19 +571,14 @@ public double ReadDouble(bool littleEndian) } [MethodImpl(MethodImplOptions.AggressiveInlining)] - T Read() + unsafe T Read() where T : unmanaged { - if (Unsafe.SizeOf() > ReadBytesLeft) - ThrowNotSpaceLeft(); - + Debug.Assert(sizeof(T) <= ReadBytesLeft, "There is not enough space left in the buffer."); var result = Unsafe.ReadUnaligned(ref Buffer[ReadPosition]); - ReadPosition += Unsafe.SizeOf(); + ReadPosition += sizeof(T); return result; } - static void ThrowNotSpaceLeft() - => ThrowHelper.ThrowInvalidOperationException("There is not enough space left in the buffer."); - public string ReadString(int byteLen) { Debug.Assert(byteLen <= ReadBytesLeft); @@ -449,14 +587,6 @@ public string ReadString(int byteLen) return result; } - public char[] ReadChars(int byteLen) - { - Debug.Assert(byteLen <= ReadBytesLeft); - var result = TextEncoding.GetChars(Buffer, ReadPosition, byteLen); - ReadPosition += byteLen; - return result; - } - public void ReadBytes(Span output) { Debug.Assert(output.Length <= ReadBytesLeft); @@ -467,14 +597,6 @@ public void ReadBytes(Span output) public void ReadBytes(byte[] output, int outputOffset, int len) => ReadBytes(new Span(output, outputOffset, len)); - public ReadOnlySpan ReadSpan(int len) - { - Debug.Assert(len <= ReadBytesLeft); - var span = new ReadOnlySpan(Buffer, ReadPosition, len); - ReadPosition += len; - return span; - } - public ReadOnlyMemory ReadMemory(int len) { Debug.Assert(len <= ReadBytesLeft); @@ -487,26 +609,31 @@ public ReadOnlyMemory ReadMemory(int len) #region Read Complex - public int Read(Span output) + public int Read(bool commandScoped, Span output) { var readFromBuffer = Math.Min(ReadBytesLeft, output.Length); if (readFromBuffer > 0) { - new Span(Buffer, ReadPosition, readFromBuffer).CopyTo(output); + Buffer.AsSpan(ReadPosition, readFromBuffer).CopyTo(output); ReadPosition += readFromBuffer; return readFromBuffer; } - if (output.Length == 0) - return 0; + // Only reset if we'll be able to read data, this is to support zero-byte reads. + if (output.Length > 0) + { + Debug.Assert(ReadBytesLeft == 0); + ResetPosition(); + } + + if (commandScoped) + return ReadWithTimeout(output); - Debug.Assert(ReadBytesLeft == 0); - Clear(); try { var read = Underlying.Read(output); - if (read == 0) - throw new EndOfStreamException(); + _flushedBytes = unchecked(_flushedBytes + read); + NpgsqlEventSource.Log.BytesRead(read); return read; } catch (Exception e) @@ -515,30 +642,35 @@ public int Read(Span output) } } - public ValueTask ReadAsync(Memory output, CancellationToken cancellationToken = default) + public ValueTask ReadAsync(bool commandScoped, Memory output, CancellationToken cancellationToken = default) { var readFromBuffer = Math.Min(ReadBytesLeft, output.Length); if (readFromBuffer > 0) { - new Span(Buffer, ReadPosition, readFromBuffer).CopyTo(output.Span); + Buffer.AsSpan(ReadPosition, readFromBuffer).CopyTo(output.Span); ReadPosition += readFromBuffer; return new ValueTask(readFromBuffer); } - if (output.Length == 0) - return new ValueTask(0); + return ReadAsyncLong(this, commandScoped, output, cancellationToken); - return ReadAsyncLong(this, output, cancellationToken); - - static async ValueTask ReadAsyncLong(NpgsqlReadBuffer buffer, Memory output, CancellationToken cancellationToken) + static async ValueTask ReadAsyncLong(NpgsqlReadBuffer buffer, bool commandScoped, Memory output, CancellationToken cancellationToken) { - Debug.Assert(buffer.ReadBytesLeft == 0); - buffer.Clear(); + // Only reset if we'll be able to read data, this is to support zero-byte reads. + if (output.Length > 0) + { + Debug.Assert(buffer.ReadBytesLeft == 0); + buffer.ResetPosition(); + } + + if (commandScoped) + return await buffer.ReadWithTimeoutAsync(output, cancellationToken).ConfigureAwait(false); + try { - var read = await buffer.Underlying.ReadAsync(output, cancellationToken); - if (read == 0) - throw new EndOfStreamException(); + var read = await buffer.Underlying.ReadAsync(output, cancellationToken).ConfigureAwait(false); + buffer._flushedBytes = unchecked(buffer._flushedBytes + read); + NpgsqlEventSource.Log.BytesRead(read); return read; } catch (Exception e) @@ -548,22 +680,13 @@ static async ValueTask ReadAsyncLong(NpgsqlReadBuffer buffer, Memory } } - public Stream GetStream(int len, bool canSeek) + ColumnStream? _lastStream; + public ColumnStream CreateStream(int len, bool canSeek) { - if (_columnStream == null) - _columnStream = new ColumnStream(Connector); - - _columnStream.Init(len, canSeek); - return _columnStream; - } - - public TextReader GetPreparedTextReader(string str, Stream stream) - { - if (_preparedTextReader is not { IsDisposed: true }) - _preparedTextReader = new PreparedTextReader(); - - _preparedTextReader.Init(str, (ColumnStream)stream); - return _preparedTextReader; + if (_lastStream is not { IsDisposed: true }) + _lastStream = new ColumnStream(Connector); + _lastStream.Init(len, canSeek, !Connector.LongRunningConnection); + return _lastStream; } /// @@ -588,9 +711,9 @@ public ValueTask ReadNullTerminatedString(bool async, CancellationToken /// Seeks the first null terminator (\0) and returns the string up to it. Reads additional data from the network if a null /// terminator isn't found in the buffered data. /// - ValueTask ReadNullTerminatedString(Encoding encoding, bool async, CancellationToken cancellationToken = default) + public ValueTask ReadNullTerminatedString(Encoding encoding, bool async, CancellationToken cancellationToken = default) { - var index = Buffer.AsSpan(ReadPosition, FilledBytes - ReadPosition).IndexOf((byte)0); + var index = Span.IndexOf((byte)0); if (index >= 0) { var result = new ValueTask(encoding.GetString(Buffer, ReadPosition, index)); @@ -614,7 +737,7 @@ async ValueTask ReadLong(Encoding encoding, bool async) do { - await ReadMore(async); + await ReadMore(async).ConfigureAwait(false); Debug.Assert(ReadPosition == 0); foundTerminator = false; @@ -655,7 +778,7 @@ async ValueTask ReadLong(Encoding encoding, bool async) public ReadOnlySpan GetNullTerminatedBytes() { - var i = Buffer.AsSpan(ReadPosition).IndexOf((byte)0); + var i = Span.IndexOf((byte)0); Debug.Assert(i >= 0); var result = new ReadOnlySpan(Buffer, ReadPosition, i); ReadPosition += i + 1; @@ -682,12 +805,15 @@ public void Dispose() #region Misc - internal void Clear() + void ResetPosition() { + _flushedBytes = unchecked(_flushedBytes + FilledBytes); ReadPosition = 0; FilledBytes = 0; } + internal void ResetFlushedBytes() => _flushedBytes = 0; + internal void CopyTo(NpgsqlReadBuffer other) { Debug.Assert(other.Size - other.FilledBytes >= ReadBytesLeft); diff --git a/src/Npgsql/Internal/NpgsqlWriteBuffer.Stream.cs b/src/Npgsql/Internal/NpgsqlWriteBuffer.Stream.cs deleted file mode 100644 index 428fb0ec30..0000000000 --- a/src/Npgsql/Internal/NpgsqlWriteBuffer.Stream.cs +++ /dev/null @@ -1,122 +0,0 @@ -using System; -using System.IO; -using System.Threading; -using System.Threading.Tasks; - -namespace Npgsql.Internal; - -public sealed partial class NpgsqlWriteBuffer -{ - sealed class ParameterStream : Stream - { - readonly NpgsqlWriteBuffer _buf; - bool _disposed; - - internal ParameterStream(NpgsqlWriteBuffer buf) - => _buf = buf; - - internal void Init() - => _disposed = false; - - public override bool CanRead => false; - - public override bool CanWrite => true; - - public override bool CanSeek => false; - - public override long Length => throw new NotSupportedException(); - - public override void SetLength(long value) - => throw new NotSupportedException(); - - public override long Position - { - get => throw new NotSupportedException(); - set => throw new NotSupportedException(); - } - - public override long Seek(long offset, SeekOrigin origin) - => throw new NotSupportedException(); - - public override void Flush() - => CheckDisposed(); - - public override Task FlushAsync(CancellationToken cancellationToken = default) - { - CheckDisposed(); - return cancellationToken.IsCancellationRequested - ? Task.FromCanceled(cancellationToken) : Task.CompletedTask; - } - - public override int Read(byte[] buffer, int offset, int count) - => throw new NotSupportedException(); - - public override Task ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken = default) - => throw new NotSupportedException(); - - public override void Write(byte[] buffer, int offset, int count) - => Write(buffer, offset, count, false); - - public override Task WriteAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken = default) - { - using (NoSynchronizationContextScope.Enter()) - return Write(buffer, offset, count, true, cancellationToken); - } - - Task Write(byte[] buffer, int offset, int count, bool async, CancellationToken cancellationToken = default) - { - CheckDisposed(); - - if (buffer == null) - throw new ArgumentNullException(nameof(buffer)); - if (offset < 0) - throw new ArgumentNullException(nameof(offset)); - if (count < 0) - throw new ArgumentNullException(nameof(count)); - if (buffer.Length - offset < count) - throw new ArgumentException("Offset and length were out of bounds for the array or count is greater than the number of elements from index to the end of the source collection."); - if (cancellationToken.IsCancellationRequested) - return Task.FromCanceled(cancellationToken); - - while (count > 0) - { - var left = _buf.WriteSpaceLeft; - if (left == 0) - return WriteLong(buffer, offset, count, async, cancellationToken); - - var slice = Math.Min(count, left); - _buf.WriteBytes(buffer, offset, slice); - offset += slice; - count -= slice; - } - - return Task.CompletedTask; - } - - async Task WriteLong(byte[] buffer, int offset, int count, bool async, CancellationToken cancellationToken = default) - { - while (count > 0) - { - var left = _buf.WriteSpaceLeft; - if (left == 0) - { - await _buf.Flush(async, cancellationToken); - continue; - } - var slice = Math.Min(count, left); - _buf.WriteBytes(buffer, offset, slice); - offset += slice; - count -= slice; - } - } - - void CheckDisposed() - { - if (_disposed) - ThrowHelper.ThrowObjectDisposedException(nameof(ParameterStream)); - } - - protected override void Dispose(bool disposing) - => _disposed = true; - } -} \ No newline at end of file diff --git a/src/Npgsql/Internal/NpgsqlWriteBuffer.cs b/src/Npgsql/Internal/NpgsqlWriteBuffer.cs index 451e7d5263..94775ec3ad 100644 --- a/src/Npgsql/Internal/NpgsqlWriteBuffer.cs +++ b/src/Npgsql/Internal/NpgsqlWriteBuffer.cs @@ -1,5 +1,4 @@ using System; -using System.Buffers; using System.Buffers.Binary; using System.Diagnostics; using System.IO; @@ -19,10 +18,13 @@ namespace Npgsql.Internal; /// A buffer used by Npgsql to write data to the socket efficiently. /// Provides methods which encode different values types and tracks the current position. /// -public sealed partial class NpgsqlWriteBuffer : IDisposable +sealed class NpgsqlWriteBuffer : IDisposable { #region Fields and Properties + internal static readonly UTF8Encoding UTF8Encoding = new(false, true); + internal static readonly UTF8Encoding RelaxedUTF8Encoding = new(false, false); + internal readonly NpgsqlConnector Connector; internal Stream Underlying { private get; set; } @@ -67,14 +69,23 @@ internal TimeSpan Timeout public int WriteSpaceLeft => Size - WritePosition; + internal PgWriter GetWriter(NpgsqlDatabaseInfo typeCatalog, FlushMode? flushMode = null) + { + // Make sure we'll refetch from the write buffer. + _pgWriter.Reset(); + var writer = _pgWriter.Init(typeCatalog); + if (flushMode is not null) + writer.WithFlushMode(flushMode.GetValueOrDefault()); + return writer; + } + internal readonly byte[] Buffer; readonly Encoder _textEncoder; internal int WritePosition; - ParameterStream? _parameterStream; - bool _disposed; + readonly PgWriter _pgWriter; /// /// The minimum buffer size possible. @@ -106,6 +117,7 @@ internal NpgsqlWriteBuffer( TextEncoding = textEncoding; _textEncoder = TextEncoding.GetEncoder(); + _pgWriter = new PgWriter(new NpgsqlBufferWriter(this)); } #endregion @@ -378,60 +390,12 @@ static async Task WriteStringLong(NpgsqlWriteBuffer buffer, bool async, string s } } - internal Task WriteChars(char[] chars, int offset, int charLen, int byteLen, bool async, CancellationToken cancellationToken = default) - { - if (byteLen <= WriteSpaceLeft) - { - WriteChars(chars, offset, charLen); - return Task.CompletedTask; - } - return WriteCharsLong(this, async, chars, offset, charLen, byteLen, cancellationToken); - - static async Task WriteCharsLong(NpgsqlWriteBuffer buffer, bool async, char[] chars, int offset, int charLen, int byteLen, CancellationToken cancellationToken) - { - Debug.Assert(byteLen > buffer.WriteSpaceLeft); - if (byteLen <= buffer.Size) - { - // String can fit entirely in an empty buffer. Flush and retry rather than - // going into the partial writing flow below (which requires ToCharArray()) - await buffer.Flush(async, cancellationToken); - buffer.WriteChars(chars, offset, charLen); - } - else - { - var charPos = 0; - - while (true) - { - buffer.WriteStringChunked(chars, charPos + offset, charLen - charPos, true, out var charsUsed, out var completed); - if (completed) - break; - await buffer.Flush(async, cancellationToken); - charPos += charsUsed; - } - } - } - } - public void WriteString(string s, int len = 0) { Debug.Assert(TextEncoding.GetByteCount(s) <= WriteSpaceLeft); WritePosition += TextEncoding.GetBytes(s, 0, len == 0 ? s.Length : len, Buffer, WritePosition); } - internal void WriteChars(char[] chars, int offset, int len) - { - var charCount = len == 0 ? chars.Length : len; - Debug.Assert(TextEncoding.GetByteCount(chars, 0, charCount) <= WriteSpaceLeft); - WritePosition += TextEncoding.GetBytes(chars, offset, charCount, Buffer, WritePosition); - } - - internal void WriteChars(ReadOnlySpan chars) - { - Debug.Assert(TextEncoding.GetByteCount(chars) <= WriteSpaceLeft); - WritePosition += TextEncoding.GetBytes(chars, Buffer.AsSpan(WritePosition)); - } - public void WriteBytes(ReadOnlySpan buf) { Debug.Assert(buf.Length <= WriteSpaceLeft); @@ -518,15 +482,6 @@ public void WriteNullTerminatedString(string s) #region Write Complex - public Stream GetStream() - { - if (_parameterStream == null) - _parameterStream = new ParameterStream(this); - - _parameterStream.Init(); - return _parameterStream; - } - internal void WriteStringChunked(char[] chars, int charIndex, int charCount, bool flush, out int charsUsed, out bool completed) { diff --git a/src/Npgsql/Internal/PgBufferedConverter.cs b/src/Npgsql/Internal/PgBufferedConverter.cs new file mode 100644 index 0000000000..7faf7bb0c4 --- /dev/null +++ b/src/Npgsql/Internal/PgBufferedConverter.cs @@ -0,0 +1,52 @@ +using System; +using System.Diagnostics.CodeAnalysis; +using System.Threading; +using System.Threading.Tasks; + +namespace Npgsql.Internal; + +public abstract class PgBufferedConverter : PgConverter +{ + protected PgBufferedConverter(bool customDbNullPredicate = false) : base(customDbNullPredicate) { } + + protected abstract T ReadCore(PgReader reader); + protected abstract void WriteCore(PgWriter writer, T value); + + public override Size GetSize(SizeContext context, T value, ref object? writeState) + => throw new NotSupportedException(); + + public sealed override T Read(PgReader reader) + { + // We check IsAtStart first to speed up primitive reads. + if (!reader.IsAtStart && reader.ShouldBufferCurrent()) + ThrowIORequired(); + + return ReadCore(reader); + } + + public sealed override ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) + => new(Read(reader)); + + internal sealed override ValueTask ReadAsObject(bool async, PgReader reader, CancellationToken cancellationToken) + => new(Read(reader)!); + + public sealed override void Write(PgWriter writer, T value) + { + if (!writer.BufferingWrite && writer.ShouldFlush(writer.CurrentBufferRequirement)) + ThrowIORequired(); + + WriteCore(writer, value); + } + + public sealed override ValueTask WriteAsync(PgWriter writer, [DisallowNull] T value, CancellationToken cancellationToken = default) + { + Write(writer, value); + return new(); + } + + internal sealed override ValueTask WriteAsObject(bool async, PgWriter writer, object value, CancellationToken cancellationToken) + { + Write(writer, (T)value); + return new(); + } +} diff --git a/src/Npgsql/Internal/PgComposingConverterResolver.cs b/src/Npgsql/Internal/PgComposingConverterResolver.cs new file mode 100644 index 0000000000..543ef8bdbd --- /dev/null +++ b/src/Npgsql/Internal/PgComposingConverterResolver.cs @@ -0,0 +1,68 @@ +using System; +using System.Collections.Concurrent; +using System.Collections.Generic; +using Npgsql.Internal.Postgres; + +namespace Npgsql.Internal; + +abstract class PgComposingConverterResolver : PgConverterResolver +{ + readonly PgTypeId? _pgTypeId; + public PgResolverTypeInfo EffectiveTypeInfo { get; } + readonly ConcurrentDictionary _converters = new(ReferenceEqualityComparer.Instance); + + protected PgComposingConverterResolver(PgTypeId? pgTypeId, PgResolverTypeInfo effectiveTypeInfo) + { + if (pgTypeId is null && effectiveTypeInfo.PgTypeId is not null) + throw new ArgumentNullException(nameof(pgTypeId), $"Cannot be null if {nameof(effectiveTypeInfo)}.{nameof(PgTypeInfo.PgTypeId)} is not null."); + + _pgTypeId = pgTypeId; + EffectiveTypeInfo = effectiveTypeInfo; + } + + protected abstract PgTypeId GetEffectivePgTypeId(PgTypeId pgTypeId); + protected abstract PgTypeId GetPgTypeId(PgTypeId effectivePgTypeId); + protected abstract PgConverter CreateConverter(PgConverterResolution effectiveResolution); + protected abstract PgConverterResolution? GetEffectiveResolution(T? value, PgTypeId? expectedEffectivePgTypeId); + + public override PgConverterResolution GetDefault(PgTypeId? pgTypeId) + { + PgTypeId? effectivePgTypeId = pgTypeId is not null ? GetEffectiveTypeId(pgTypeId.GetValueOrDefault()) : null; + var effectiveResolution = EffectiveTypeInfo.GetDefaultResolution(effectivePgTypeId); + return new(GetOrAdd(effectiveResolution), pgTypeId ?? _pgTypeId ?? GetPgTypeId(effectiveResolution.PgTypeId)); + } + + public override PgConverterResolution? Get(T? value, PgTypeId? expectedPgTypeId) + { + PgTypeId? expectedEffectiveId = expectedPgTypeId is not null ? GetEffectiveTypeId(expectedPgTypeId.GetValueOrDefault()) : null; + if (GetEffectiveResolution(value, expectedEffectiveId) is { } resolution) + return new PgConverterResolution(GetOrAdd(resolution), expectedPgTypeId ?? _pgTypeId ?? GetPgTypeId(resolution.PgTypeId)); + + return null; + } + + public override PgConverterResolution Get(Field field) + { + var effectiveResolution = EffectiveTypeInfo.GetResolution(field with { PgTypeId = GetEffectiveTypeId(field.PgTypeId) }); + return new PgConverterResolution(GetOrAdd(effectiveResolution), field.PgTypeId); + } + + PgTypeId GetEffectiveTypeId(PgTypeId pgTypeId) + { + if (_pgTypeId == pgTypeId) + return EffectiveTypeInfo.PgTypeId.GetValueOrDefault(); + + // We have an undecided type info which is asked to resolve for a specific type id + // we'll unfortunately have to look up the effective id, this is rare though. + return GetEffectivePgTypeId(pgTypeId); + } + + PgConverter GetOrAdd(PgConverterResolution effectiveResolution) + { + (PgComposingConverterResolver Instance, PgConverterResolution EffectiveResolution) state = (this, effectiveResolution); + return (PgConverter)_converters.GetOrAdd( + effectiveResolution.Converter, + static (_, state) => state.Instance.CreateConverter(state.EffectiveResolution), + state); + } +} diff --git a/src/Npgsql/Internal/PgConverter.cs b/src/Npgsql/Internal/PgConverter.cs new file mode 100644 index 0000000000..e136e9a904 --- /dev/null +++ b/src/Npgsql/Internal/PgConverter.cs @@ -0,0 +1,205 @@ +using System; +using System.Buffers; +using System.Diagnostics.CodeAnalysis; +using System.Runtime.CompilerServices; +using System.Threading; +using System.Threading.Tasks; + +namespace Npgsql.Internal; + +public abstract class PgConverter +{ + internal DbNullPredicate DbNullPredicateKind { get; } + public bool IsDbNullable => DbNullPredicateKind is not DbNullPredicate.None; + + private protected PgConverter(Type type, bool isNullDefaultValue, bool customDbNullPredicate = false) + => DbNullPredicateKind = customDbNullPredicate ? DbNullPredicate.Custom : InferDbNullPredicate(type, isNullDefaultValue); + + /// + /// Whether this converter can handle the given format and with which buffer requirements. + /// + /// The data format. + /// Returns the buffer requirements. + /// Returns true if the given data format is supported. + /// The buffer requirements should not cover database NULL reads or writes, these are handled by the caller. + public abstract bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements); + + internal abstract Type TypeToConvert { get; } + + internal bool IsDbNullAsObject([NotNullWhen(false)] object? value) + => DbNullPredicateKind switch + { + DbNullPredicate.Null => value is null, + DbNullPredicate.None => false, + DbNullPredicate.PolymorphicNull => value is null or DBNull, + // We do the null check to keep the NotNullWhen(false) invariant. + _ => IsDbNullValueAsObject(value) || (value is null && ThrowInvalidNullValue()) + }; + + private protected abstract bool IsDbNullValueAsObject(object? value); + + internal abstract Size GetSizeAsObject(SizeContext context, object value, ref object? writeState); + + internal object ReadAsObject(PgReader reader) + => ReadAsObject(async: false, reader, CancellationToken.None).GetAwaiter().GetResult(); + internal ValueTask ReadAsObjectAsync(PgReader reader, CancellationToken cancellationToken = default) + => ReadAsObject(async: true, reader, cancellationToken); + + // Shared sync/async abstract to reduce virtual method table size overhead and code size for each NpgsqlConverter instantiation. + internal abstract ValueTask ReadAsObject(bool async, PgReader reader, CancellationToken cancellationToken); + + internal void WriteAsObject(PgWriter writer, object value) + => WriteAsObject(async: false, writer, value, CancellationToken.None).GetAwaiter().GetResult(); + internal ValueTask WriteAsObjectAsync(PgWriter writer, object value, CancellationToken cancellationToken = default) + => WriteAsObject(async: true, writer, value, cancellationToken); + + // Shared sync/async abstract to reduce virtual method table size overhead and code size for each NpgsqlConverter instantiation. + internal abstract ValueTask WriteAsObject(bool async, PgWriter writer, object value, CancellationToken cancellationToken); + + static DbNullPredicate InferDbNullPredicate(Type type, bool isNullDefaultValue) + => type == typeof(object) || type == typeof(DBNull) + ? DbNullPredicate.PolymorphicNull + : isNullDefaultValue + ? DbNullPredicate.Null + : DbNullPredicate.None; + + internal enum DbNullPredicate : byte + { + /// Never DbNull (struct types) + None, + /// DbNull when *user code* + Custom, + /// DbNull when value is null + Null, + /// DbNull when value is null or DBNull + PolymorphicNull + } + + [DoesNotReturn] + private protected static void ThrowIORequired() + => throw new InvalidOperationException("Buffer requirements for format not respected, expected no IO to be required."); + + private protected static bool ThrowInvalidNullValue() + => throw new ArgumentNullException("value", "Null value given for non-nullable type converter"); + + protected bool CanConvertBufferedDefault(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.Value; + return format is DataFormat.Binary; + } +} + +public abstract class PgConverter : PgConverter +{ + private protected PgConverter(bool customDbNullPredicate) + : base(typeof(T), default(T) is null, customDbNullPredicate) { } + + protected virtual bool IsDbNullValue(T? value) => throw new NotSupportedException(); + + // Object null semantics as follows, if T is a struct (so excluding nullable) report false for null values, don't throw on the cast. + // As a result this creates symmetry with IsDbNull when we're dealing with a struct T, as it cannot be passed null at all. + private protected override bool IsDbNullValueAsObject(object? value) + => (default(T) is null || value is not null) && IsDbNullValue(Downcast(value)); + + public bool IsDbNull([NotNullWhen(false)] T? value) + { + return DbNullPredicateKind switch + { + DbNullPredicate.Null => value is null, + DbNullPredicate.None => false, + DbNullPredicate.PolymorphicNull => value is null or DBNull, + // We do the null check to keep the NotNullWhen(false) invariant. + DbNullPredicate.Custom => IsDbNullValue(value) || (value is null && ThrowInvalidNullValue()), + _ => ThrowOutOfRange() + }; + + bool ThrowOutOfRange() => throw new ArgumentOutOfRangeException(nameof(DbNullPredicateKind), "Unknown case", DbNullPredicateKind.ToString()); + } + + public abstract T Read(PgReader reader); + public abstract ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default); + + public abstract Size GetSize(SizeContext context, [DisallowNull]T value, ref object? writeState); + public abstract void Write(PgWriter writer, [DisallowNull] T value); + public abstract ValueTask WriteAsync(PgWriter writer, [DisallowNull] T value, CancellationToken cancellationToken = default); + + internal sealed override Type TypeToConvert => typeof(T); + + internal sealed override Size GetSizeAsObject(SizeContext context, object value, ref object? writeState) + => GetSize(context, Downcast(value), ref writeState); + + [MethodImpl(MethodImplOptions.NoInlining)] + [return: NotNullIfNotNull(nameof(value))] + static T? Downcast(object? value) => (T?)value; +} + +static class PgConverterExtensions +{ + public static Size? GetSizeOrDbNull(this PgConverter converter, DataFormat format, Size writeRequirement, T? value, ref object? writeState) + { + if (converter.IsDbNull(value)) + return null; + + if (writeRequirement is { Kind: SizeKind.Exact, Value: var byteCount }) + return byteCount; + var size = converter.GetSize(new(format, writeRequirement), value, ref writeState); + if (size.Kind is SizeKind.UpperBound) + throw new InvalidOperationException("SizeKind.UpperBound is not a valid return value for GetSize."); + return size; + } + + public static Size? GetSizeOrDbNullAsObject(this PgConverter converter, DataFormat format, Size writeRequirement, object? value, ref object? writeState) + { + if (converter.IsDbNullAsObject(value)) + return null; + + if (writeRequirement is { Kind: SizeKind.Exact, Value: var byteCount }) + return byteCount; + var size = converter.GetSizeAsObject(new(format, writeRequirement), value, ref writeState); + if (size.Kind is SizeKind.UpperBound) + throw new InvalidOperationException("SizeKind.UpperBound is not a valid return value for GetSize."); + return size; + } +} + +interface IResumableRead +{ + bool Supported { get; } +} + +public readonly struct SizeContext +{ + [SetsRequiredMembers] + public SizeContext(DataFormat format, Size bufferRequirement) + { + Format = format; + BufferRequirement = bufferRequirement; + } + + public DataFormat Format { get; } + public required Size BufferRequirement { get; init; } +} + +class MultiWriteState : IDisposable +{ + public required ArrayPool<(Size Size, object? WriteState)>? ArrayPool { get; init; } + public required ArraySegment<(Size Size, object? WriteState)> Data { get; init; } + public required bool AnyWriteState { get; init; } + + public void Dispose() + { + if (Data.Array is not { } array) + return; + + if (AnyWriteState) + { + for (var i = Data.Offset; i < array.Length; i++) + if (array[i].WriteState is IDisposable disposable) + disposable.Dispose(); + + Array.Clear(Data.Array, Data.Offset, Data.Count); + } + + ArrayPool?.Return(Data.Array); + } +} diff --git a/src/Npgsql/Internal/PgConverterResolver.cs b/src/Npgsql/Internal/PgConverterResolver.cs new file mode 100644 index 0000000000..baee09d58e --- /dev/null +++ b/src/Npgsql/Internal/PgConverterResolver.cs @@ -0,0 +1,109 @@ +using System; +using Npgsql.Internal.Postgres; + +namespace Npgsql.Internal; + +public abstract class PgConverterResolver +{ + private protected PgConverterResolver() { } + + /// + /// Gets the appropriate converter solely based on PgTypeId. + /// + /// + /// The converter resolution. + /// + /// Implementations should not return new instances of the possible converters that can be returned, instead its expected these are cached once used. + /// Array or other collection converters depend on this to cache their own converter - which wraps the element converter - with the cache key being the element converter reference. + /// + public abstract PgConverterResolution GetDefault(PgTypeId? pgTypeId); + + /// + /// Gets the appropriate converter to read with based on the given field info. + /// + /// + /// The converter resolution. + /// + /// Implementations should not return new instances of the possible converters that can be returned, instead its expected these are cached once used. + /// Array or other collection converters depend on this to cache their own converter - which wraps the element converter - with the cache key being the element converter reference. + /// + public virtual PgConverterResolution Get(Field field) => GetDefault(field.PgTypeId); + + internal abstract Type TypeToConvert { get; } + + internal abstract PgConverterResolution? GetAsObjectInternal(PgTypeInfo typeInfo, object? value, PgTypeId? expectedPgTypeId); + + internal PgConverterResolution GetDefaultInternal(bool validate, bool expectPortableTypeIds, PgTypeId? pgTypeId) + { + var resolution = GetDefault(pgTypeId); + if (validate) + Validate(nameof(GetDefault), resolution, TypeToConvert, pgTypeId, expectPortableTypeIds); + return resolution; + } + + internal PgConverterResolution GetInternal(PgTypeInfo typeInfo, Field field) + { + var resolution = Get(field); + if (typeInfo.ValidateResolution) + Validate(nameof(Get), resolution, TypeToConvert, field.PgTypeId, typeInfo.Options.PortableTypeIds); + return resolution; + } + + private protected static void Validate(string methodName, PgConverterResolution resolution, Type expectedTypeToConvert, PgTypeId? expectedPgTypeId, bool expectPortableTypeIds) + { + if (resolution.Converter is null) + throw new InvalidOperationException($"'{methodName}' returned a null {nameof(PgConverterResolution.Converter)} unexpectedly."); + + // We allow object resolvers to return any converter, this is to help: + // - Composing resolvers being able to use converter type identity (instead of everything being CastingConverter). + // - Reduce indirection by allowing disparate type converters to be returned directly. + // As a consequence any object typed resolver info is always a boxing one, to reduce the chances invalid casts to PgConverter are attempted. + if (expectedTypeToConvert != typeof(object) && resolution.Converter.TypeToConvert != expectedTypeToConvert) + throw new InvalidOperationException($"'{methodName}' returned a {nameof(PgConverterResolution.Converter)} of type {resolution.Converter.TypeToConvert} instead of {expectedTypeToConvert} unexpectedly."); + + if (expectPortableTypeIds && resolution.PgTypeId.IsOid || !expectPortableTypeIds && resolution.PgTypeId.IsDataTypeName) + throw new InvalidOperationException($"{methodName}' returned a resolution with a {nameof(PgConverterResolution.PgTypeId)} that was not in canonical form."); + + if (expectedPgTypeId is not null && resolution.PgTypeId != expectedPgTypeId) + throw new InvalidOperationException( + $"'{methodName}' returned a different {nameof(PgConverterResolution.PgTypeId)} than was passed in as expected." + + $" If such a mismatch occurs an exception should be thrown instead."); + } + + protected ArgumentOutOfRangeException CreateUnsupportedPgTypeIdException(PgTypeId pgTypeId) + => new(nameof(pgTypeId), pgTypeId, "Unsupported PgTypeId."); +} + +public abstract class PgConverterResolver : PgConverterResolver +{ + /// + /// Gets the appropriate converter to write with based on the given value. + /// + /// + /// + /// The converter resolution. + /// + /// Implementations should not return new instances of the possible converters that can be returned, instead its expected these are + /// cached once used. Array or other collection converters depend on this to cache their own converter - which wraps the element + /// converter - with the cache key being the element converter reference. + /// + public abstract PgConverterResolution? Get(T? value, PgTypeId? expectedPgTypeId); + + internal sealed override Type TypeToConvert => typeof(T); + + internal PgConverterResolution? GetInternal(PgTypeInfo typeInfo, T? value, PgTypeId? expectedPgTypeId) + { + var resolution = Get(value, expectedPgTypeId); + if (typeInfo.ValidateResolution && resolution is not null) + Validate(nameof(Get), resolution.GetValueOrDefault(), TypeToConvert, expectedPgTypeId, typeInfo.Options.PortableTypeIds); + return resolution; + } + + internal sealed override PgConverterResolution? GetAsObjectInternal(PgTypeInfo typeInfo, object? value, PgTypeId? expectedPgTypeId) + { + var resolution = Get(value is null ? default : (T)value, expectedPgTypeId); + if (typeInfo.ValidateResolution && resolution is not null) + Validate(nameof(Get), resolution.GetValueOrDefault(), TypeToConvert, expectedPgTypeId, typeInfo.Options.PortableTypeIds); + return resolution; + } +} diff --git a/src/Npgsql/Internal/PgReader.cs b/src/Npgsql/Internal/PgReader.cs new file mode 100644 index 0000000000..f1f448bc65 --- /dev/null +++ b/src/Npgsql/Internal/PgReader.cs @@ -0,0 +1,723 @@ +using System; +using System.Buffers; +using System.Diagnostics; +using System.Diagnostics.CodeAnalysis; +using System.IO; +using System.Runtime.CompilerServices; +using System.Text; +using System.Threading; +using System.Threading.Tasks; + +namespace Npgsql.Internal; + +public class PgReader +{ + readonly NpgsqlReadBuffer _buffer; + + bool _resumable; + + byte[]? _pooledArray; + NpgsqlReadBuffer.ColumnStream? _userActiveStream; + PreparedTextReader? _preparedTextReader; + + long _fieldStartPos; + Size _fieldBufferRequirement; + DataFormat _fieldFormat; + int _fieldSize; + + // This position is relative to _fieldStartPos, which is why it can be an int. + int _currentStartPos; + Size _currentBufferRequirement; + int _currentSize; + + // GetChars Internal state + TextReader? _charsReadReader; + int _charsRead; + + // GetChars User state + int? _charsReadOffset; + ArraySegment? _charsReadBuffer; + + bool _requiresCleanup; + + internal PgReader(NpgsqlReadBuffer buffer) + { + _buffer = buffer; + _fieldStartPos = -1; + _currentSize = -1; + } + + internal long FieldStartPos => _fieldStartPos; + internal int FieldSize => _fieldSize; + internal bool Initialized => _fieldStartPos is not -1; + internal int FieldOffset => (int)(_buffer.CumulativeReadPosition - _fieldStartPos); + internal int FieldRemaining => FieldSize - FieldOffset; + + bool HasCurrent => _currentSize >= 0; + int CurrentSize => HasCurrent ? _currentSize : _fieldSize; + + public ValueMetadata Current => new() { Size = CurrentSize, Format = _fieldFormat, BufferRequirement = CurrentBufferRequirement }; + public int CurrentRemaining => HasCurrent ? _currentSize - CurrentOffset : FieldRemaining; + + Size CurrentBufferRequirement => HasCurrent ? _currentBufferRequirement : _fieldBufferRequirement; + int CurrentOffset => FieldOffset - _currentStartPos; + + int BufferSize => _buffer.Size; + int BufferBytesRemaining => _buffer.ReadBytesLeft; + + internal bool IsAtStart => FieldOffset is 0; + internal bool Resumable => _resumable; + public bool IsResumed => Resumable && CurrentSize != CurrentRemaining; + + ArrayPool ArrayPool => ArrayPool.Shared; + + [MemberNotNullWhen(true, nameof(_charsReadReader))] + internal bool IsCharsRead => _charsReadOffset is not null; + + // Here for testing purposes + internal void BreakConnection() => throw _buffer.Connector.Break(new Exception("Broken")); + + internal void Revert(int size, int startPos, Size bufferRequirement) + { + if (startPos > FieldOffset) + ThrowHelper.ThrowArgumentOutOfRangeException(nameof(startPos), "Can't revert forwardly"); + + _currentStartPos = startPos; + _currentBufferRequirement = bufferRequirement; + _currentSize = size; + } + + [Conditional("DEBUG")] + void CheckBounds(int count) + { + if (count > FieldRemaining) + ThrowHelper.ThrowInvalidOperationException("Attempt to read past the end of the field."); + } + + public byte ReadByte() + { + CheckBounds(sizeof(byte)); + var result = _buffer.ReadByte(); + return result; + } + + public short ReadInt16() + { + CheckBounds(sizeof(short)); + var result = _buffer.ReadInt16(); + return result; + } + + public int ReadInt32() + { + CheckBounds(sizeof(int)); + var result = _buffer.ReadInt32(); + return result; + } + + public long ReadInt64() + { + CheckBounds(sizeof(long)); + var result = _buffer.ReadInt64(); + return result; + } + + public ushort ReadUInt16() + { + CheckBounds(sizeof(ushort)); + var result = _buffer.ReadUInt16(); + return result; + } + + public uint ReadUInt32() + { + CheckBounds(sizeof(uint)); + var result = _buffer.ReadUInt32(); + return result; + } + + public ulong ReadUInt64() + { + CheckBounds(sizeof(ulong)); + var result = _buffer.ReadUInt64(); + return result; + } + + public float ReadFloat() + { + CheckBounds(sizeof(float)); + var result = _buffer.ReadSingle(); + return result; + } + + public double ReadDouble() + { + CheckBounds(sizeof(double)); + var result = _buffer.ReadDouble(); + return result; + } + + public void Read(Span destination) + { + CheckBounds(destination.Length); + _buffer.ReadBytes(destination); + } + + public async ValueTask ReadNullTerminatedStringAsync(Encoding encoding, CancellationToken cancellationToken = default) + { + var result = await _buffer.ReadNullTerminatedString(encoding, async: true, cancellationToken).ConfigureAwait(false); + // Can only check after the fact. + CheckBounds(0); + return result; + } + + public string ReadNullTerminatedString(Encoding encoding) + { + var result = _buffer.ReadNullTerminatedString(encoding, async: false, CancellationToken.None).GetAwaiter().GetResult(); + CheckBounds(0); + return result; + } + public Stream GetStream(int? length = null) => GetColumnStream(false, length); + + internal Stream GetStream(bool canSeek, int? length = null) => GetColumnStream(canSeek, length); + + NpgsqlReadBuffer.ColumnStream GetColumnStream(bool canSeek = false, int? length = null) + { + if (length > CurrentRemaining) + throw new ArgumentOutOfRangeException(nameof(length), "Length is larger than the current remaining value size"); + + _requiresCleanup = true; + // This will cause any previously handed out StreamReaders etc to throw, as intended. + if (_userActiveStream is not null) + DisposeUserActiveStream(async: false).GetAwaiter().GetResult(); + + length ??= CurrentRemaining; + CheckBounds(length.GetValueOrDefault()); + return _userActiveStream = _buffer.CreateStream(length.GetValueOrDefault(), canSeek && length <= BufferBytesRemaining); + } + + public TextReader GetTextReader(Encoding encoding) + => GetTextReader(async: false, encoding, CancellationToken.None).GetAwaiter().GetResult(); + + public ValueTask GetTextReaderAsync(Encoding encoding, CancellationToken cancellationToken) + => GetTextReader(async: true, encoding, cancellationToken); + + async ValueTask GetTextReader(bool async, Encoding encoding, CancellationToken cancellationToken) + { + // We don't want to add a ton of memory pressure for large strings. + const int maxPreparedSize = 1024 * 64; + + _requiresCleanup = true; + if (CurrentRemaining > BufferBytesRemaining || CurrentRemaining > maxPreparedSize) + return new StreamReader(GetColumnStream(), encoding, detectEncodingFromByteOrderMarks: false); + + if (_preparedTextReader is { IsDisposed: false }) + { + _preparedTextReader.Dispose(); + _preparedTextReader = null; + } + + _preparedTextReader ??= new PreparedTextReader(); + _preparedTextReader.Init( + encoding.GetString(async + ? await ReadBytesAsync(CurrentRemaining, cancellationToken).ConfigureAwait(false) + : ReadBytes(CurrentRemaining)), GetColumnStream(canSeek: false, 0)); + return _preparedTextReader; + } + + public ValueTask ReadBytesAsync(Memory buffer, CancellationToken cancellationToken = default) + { + var count = buffer.Length; + CheckBounds(count); + if (BufferBytesRemaining >= count) + { + _buffer.Buffer.AsSpan(_buffer.ReadPosition, count).CopyTo(buffer.Span); + _buffer.ReadPosition += count; + return new(); + } + + return Slow(); + + async ValueTask Slow() + { + var stream = _buffer.CreateStream(count, canSeek: false); + await using var _ = stream.ConfigureAwait(false); + await stream.ReadExactlyAsync(buffer, cancellationToken).ConfigureAwait(false); + } + } + + public void ReadBytes(Span buffer) + { + var count = buffer.Length; + CheckBounds(count); + if (BufferBytesRemaining >= count) + { + _buffer.Buffer.AsSpan(_buffer.ReadPosition, count).CopyTo(buffer); + _buffer.ReadPosition += count; + return; + } + + Slow(buffer); + + void Slow(Span buffer) + { + using var stream = _buffer.CreateStream(count, canSeek: false); + stream.ReadExactly(buffer); + } + } + + public bool TryReadBytes(int count, out ReadOnlySpan bytes) + { + CheckBounds(count); + if (BufferBytesRemaining >= count) + { + bytes = new ReadOnlySpan(_buffer.Buffer, _buffer.ReadPosition, count); + _buffer.ReadPosition += count; + return true; + } + bytes = default; + return false; + } + + public bool TryReadBytes(int count, out ReadOnlyMemory bytes) + { + CheckBounds(count); + if (BufferBytesRemaining >= count) + { + bytes = new ReadOnlyMemory(_buffer.Buffer, _buffer.ReadPosition, count); + _buffer.ReadPosition += count; + return true; + } + bytes = default; + return false; + } + + /// ReadBytes without memory management, the next read invalidates the underlying buffer(s), only use this for intermediate transformations. + public ReadOnlySequence ReadBytes(int count) + { + CheckBounds(count); + if (BufferBytesRemaining >= count) + { + var result = new ReadOnlySequence(_buffer.Buffer, _buffer.ReadPosition, count); + _buffer.ReadPosition += count; + return result; + } + + var array = RentArray(count); + ReadBytes(array.AsSpan(0, count)); + return new(array, 0, count); + } + + /// ReadBytesAsync without memory management, the next read invalidates the underlying buffer(s), only use this for intermediate transformations. + public async ValueTask> ReadBytesAsync(int count, CancellationToken cancellationToken = default) + { + CheckBounds(count); + if (BufferBytesRemaining >= count) + { + var result = new ReadOnlySequence(_buffer.Buffer, _buffer.ReadPosition, count); + _buffer.ReadPosition += count; + return result; + } + + var array = RentArray(count); + await ReadBytesAsync(array.AsMemory(0, count), cancellationToken).ConfigureAwait(false); + return new(array, 0, count); + } + + public void Rewind(int count) + { + // Shut down any streaming going on on the column + DisposeUserActiveStream(async: false).GetAwaiter().GetResult(); + + if (_buffer.ReadPosition < count) + throw new ArgumentOutOfRangeException("Cannot rewind further than the buffer start"); + + if (CurrentOffset < count) + throw new ArgumentOutOfRangeException("Cannot rewind further than the current field offset"); + + _buffer.ReadPosition -= count; + } + + /// + /// + /// + /// + /// The stream length, if any + async ValueTask DisposeUserActiveStream(bool async) + { + if (_userActiveStream is { IsDisposed: false }) + { + if (async) + await _userActiveStream.DisposeAsync().ConfigureAwait(false); + else + _userActiveStream.Dispose(); + } + + _userActiveStream = null; + } + + internal bool GetCharsReadInfo(Encoding encoding, out int charsRead, out TextReader reader, out int charsOffset, out ArraySegment? buffer) + { + if (!IsCharsRead) + throw new InvalidOperationException("No active chars read"); + + if (_charsReadReader is null) + { + charsRead = 0; + reader = _charsReadReader = GetTextReader(encoding); + charsOffset = _charsReadOffset ??= 0; + buffer = _charsReadBuffer; + return true; + } + + charsRead = _charsRead; + reader = _charsReadReader; + charsOffset = _charsReadOffset!.Value; + buffer = _charsReadBuffer; + + return false; + } + + internal void ResetCharsRead(out int charsRead) + { + if (!IsCharsRead) + throw new InvalidOperationException("No active chars read"); + + switch (_charsReadReader) + { + case PreparedTextReader reader: + reader.Restart(); + break; + case StreamReader reader: + reader.BaseStream.Seek(0, SeekOrigin.Begin); + reader.DiscardBufferedData(); + break; + } + _charsRead = charsRead = 0; + } + + internal void AdvanceCharsRead(int charsRead) + { + _charsRead += charsRead; + _charsReadOffset = null; + _charsReadBuffer = null; + } + + internal void InitCharsRead(int dataOffset, ArraySegment? buffer, out int? charsRead) + { + if (!Resumable) + throw new InvalidOperationException("Wasn't initialized as resumed"); + + charsRead = _charsReadReader is null ? null : _charsRead; + _charsReadOffset = dataOffset; + _charsReadBuffer = buffer; + } + + internal PgReader Init(int fieldLength, DataFormat format, bool resumable = false) + { + if (resumable) + { + if (Resumable) + { + Debug.Assert(Initialized); + return this; + } + _resumable = true; + } + else if (Initialized) + { + if (!IsAtStart) + ThrowHelper.ThrowInvalidOperationException("Cannot be initialized to be non-resumable until a commit is issued."); + _resumable = false; + } + + // Debug.Assert(!Initialized || Resumable, "Reader wasn't properly committed before next init"); + Debug.Assert(!_requiresCleanup, "Reader wasn't properly committed before next init"); + + _fieldStartPos = _buffer.CumulativeReadPosition; + _fieldFormat = format; + _fieldSize = fieldLength; + return this; + } + + internal void StartRead(Size bufferRequirement) + { + Debug.Assert(FieldSize >= 0); + _fieldBufferRequirement = bufferRequirement; + if (ShouldBuffer(bufferRequirement)) + Buffer(bufferRequirement); + } + + internal ValueTask StartReadAsync(Size bufferRequirement, CancellationToken cancellationToken) + { + Debug.Assert(FieldSize >= 0); + _fieldBufferRequirement = bufferRequirement; + return ShouldBuffer(bufferRequirement) ? BufferAsync(bufferRequirement, cancellationToken) : new(); + } + + internal void EndRead() + { + if (_resumable) + return; + + // If it was upper bound we should consume. + if (_fieldBufferRequirement is { Kind: SizeKind.UpperBound }) + { + Consume(FieldRemaining); + return; + } + + if (FieldOffset != FieldSize) + ThrowNotConsumedExactly(); + } + + internal ValueTask EndReadAsync() + { + if (_resumable) + return new(); + + // If it was upper bound we should consume. + if (_fieldBufferRequirement is { Kind: SizeKind.UpperBound }) + return ConsumeAsync(FieldRemaining); + + if (FieldOffset != FieldSize) + ThrowNotConsumedExactly(); + return new(); + } + + internal async ValueTask BeginNestedRead(bool async, int size, Size bufferRequirement, CancellationToken cancellationToken = default) + { + if (size > CurrentRemaining) + throw new ArgumentOutOfRangeException(nameof(size), "Cannot begin a read for a larger size than the current remaining size."); + + if (size < 0) + throw new ArgumentOutOfRangeException(nameof(size), "Cannot be negative"); + + var previousSize = CurrentSize; + var previousStartPos = _currentStartPos; + var previousBufferRequirement = CurrentBufferRequirement; + _currentSize = size; + _currentBufferRequirement = bufferRequirement; + _currentStartPos = FieldOffset; + + await Buffer(async, bufferRequirement, cancellationToken).ConfigureAwait(false); + return new NestedReadScope(async, this, previousSize, previousStartPos, previousBufferRequirement); + } + + public NestedReadScope BeginNestedRead(int size, Size bufferRequirement) + => BeginNestedRead(async: false, size, bufferRequirement, CancellationToken.None).GetAwaiter().GetResult(); + + public ValueTask BeginNestedReadAsync(int size, Size bufferRequirement, CancellationToken cancellationToken = default) + => BeginNestedRead(async: true, size, bufferRequirement, cancellationToken); + + internal void Seek(int offset) + { + if (CurrentOffset > offset) + Rewind(CurrentOffset - offset); + else if (CurrentOffset < offset) + Consume(offset - CurrentOffset); + } + + internal async ValueTask Consume(bool async, int? count = null, CancellationToken cancellationToken = default) + { + if (count <= 0 || FieldSize < 0 || FieldRemaining == 0) + return; + + var remaining = count ?? CurrentRemaining; + CheckBounds(remaining); + + var origOffset = FieldOffset; + // A breaking exception unwind from a nested scope should not try to consume its remaining data. + if (!_buffer.Connector.IsBroken) + await _buffer.Skip(remaining, async).ConfigureAwait(false); + + Debug.Assert(FieldRemaining == FieldSize - origOffset - remaining); + } + + public void Consume(int? count = null) => Consume(async: false, count).GetAwaiter().GetResult(); + public ValueTask ConsumeAsync(int? count = null, CancellationToken cancellationToken = default) => Consume(async: true, count, cancellationToken); + + internal void ThrowIfStreamActive() + { + if (_userActiveStream is { IsDisposed: false}) + ThrowHelper.ThrowInvalidOperationException("A stream is already open for this reader"); + } + + internal bool CommitHasIO(bool resuming) => Initialized && !resuming && FieldRemaining > 0; + internal ValueTask Commit(bool async, bool resuming) + { + if (!Initialized) + return new(); + + if (resuming) + { + if (!Resumable) + ThrowHelper.ThrowInvalidOperationException("Cannot resume a non-resumable read."); + return new(); + } + + // We don't rely on CurrentRemaining, just to make sure we consume fully in the event of a nested scope not being disposed. + // Also shut down any streaming, pooled arrays etc. + if (_requiresCleanup || FieldRemaining > 0) + return Slow(async); + + _fieldSize = default; + _fieldStartPos = -1; + _resumable = false; + _fieldFormat = default; + if (_currentSize is not -1) + { + _currentStartPos = 0; + _currentBufferRequirement = default; + _currentSize = -1; + } + Debug.Assert(!Initialized); + return new(); + + async ValueTask Slow(bool async) + { + // Shut down any streaming and pooling going on on the column. + if (_requiresCleanup) + { + if (_userActiveStream is { IsDisposed: false }) + await DisposeUserActiveStream(async).ConfigureAwait(false); + + if (_pooledArray is not null) + { + ArrayPool.Return(_pooledArray); + _pooledArray = null; + } + + if (_charsReadReader is not null) + { + _charsReadReader.Dispose(); + _charsReadReader = null; + _charsRead = default; + } + _requiresCleanup = false; + } + + await Consume(async, count: FieldRemaining).ConfigureAwait(false); + _fieldSize = default; + _fieldStartPos = -1; + _resumable = false; + _fieldFormat = default; + _currentStartPos = 0; + _currentBufferRequirement = default; + _currentSize = -1; + Debug.Assert(!Initialized); + } + } + + byte[] RentArray(int count) + { + _requiresCleanup = true; + var pooledArray = _pooledArray; + var array = _pooledArray = ArrayPool.Rent(count); + if (pooledArray is not null) + ArrayPool.Return(pooledArray); + return array; + } + + int GetBufferRequirementByteCount(Size bufferRequirement) + => bufferRequirement is { Kind: SizeKind.UpperBound } + ? Math.Min(CurrentRemaining, bufferRequirement.Value) + : bufferRequirement.GetValueOrDefault(); + + internal bool ShouldBufferCurrent() => ShouldBuffer(CurrentBufferRequirement); + + public bool ShouldBuffer(Size bufferRequirement) + => ShouldBuffer(GetBufferRequirementByteCount(bufferRequirement)); + public bool ShouldBuffer(int byteCount) + { + return BufferBytesRemaining < byteCount && ShouldBufferSlow(); + + [MethodImpl(MethodImplOptions.NoInlining)] + bool ShouldBufferSlow() + { + if (byteCount > BufferSize) + ThrowArgumentOutOfRange(); + if (byteCount > CurrentRemaining) + ThrowArgumentOutOfRangeOfValue(); + + return true; + } + + static void ThrowArgumentOutOfRange() + => throw new ArgumentOutOfRangeException(nameof(byteCount), + "Buffer requirement is larger than the buffer size, this can never succeed by buffering data but requires a larger buffer size instead."); + static void ThrowArgumentOutOfRangeOfValue() + => throw new ArgumentOutOfRangeException(nameof(byteCount), + "Buffer requirement is larger than the remaining length of the value, make sure the value is always at least this size or use an upper bound requirement instead."); + } + + public void Buffer(Size bufferRequirement) + => Buffer(GetBufferRequirementByteCount(bufferRequirement)); + public void Buffer(int byteCount) => _buffer.Ensure(byteCount, async: false).GetAwaiter().GetResult(); + + public ValueTask BufferAsync(Size bufferRequirement, CancellationToken cancellationToken) + => BufferAsync(GetBufferRequirementByteCount(bufferRequirement), cancellationToken); + public ValueTask BufferAsync(int byteCount, CancellationToken cancellationToken) => new(_buffer.EnsureAsync(byteCount)); + + internal ValueTask Buffer(bool async, Size bufferRequirement, CancellationToken cancellationToken) + => Buffer(async, GetBufferRequirementByteCount(bufferRequirement), cancellationToken); + internal ValueTask Buffer(bool async, int byteCount, CancellationToken cancellationToken) + { + if (async) + return BufferAsync(byteCount, cancellationToken); + + Buffer(byteCount); + return new(); + } + + void ThrowNotConsumedExactly() => + throw _buffer.Connector.Break( + new InvalidOperationException( + FieldOffset < FieldSize + ? $"The read on this field has not consumed all of its bytes (pos: {FieldOffset}, len: {FieldSize})" + : $"The read on this field has consumed all of its bytes and read into the subsequent bytes (pos: {FieldOffset}, len: {FieldSize})")); +} + +public readonly struct NestedReadScope : IDisposable, IAsyncDisposable +{ + readonly PgReader _reader; + readonly int _previousSize; + readonly int _previousStartPos; + readonly Size _previousBufferRequirement; + readonly bool _async; + + internal NestedReadScope(bool async, PgReader reader, int previousSize, int previousStartPos, Size previousBufferRequirement) + { + _async = async; + _reader = reader; + _previousSize = previousSize; + _previousStartPos = previousStartPos; + _previousBufferRequirement = previousBufferRequirement; + } + + public void Dispose() + { + if (_async) + throw new InvalidOperationException("Cannot synchronously dispose async scopes, call DisposeAsync instead."); + DisposeAsync().GetAwaiter().GetResult(); + } + + public ValueTask DisposeAsync() + { + if (_reader.CurrentRemaining > 0) + { + if (_async) + return AsyncCore(_reader, _previousSize, _previousStartPos, _previousBufferRequirement); + + _reader.Consume(); + } + _reader.Revert(_previousSize, _previousStartPos, _previousBufferRequirement); + return new(); + + static async ValueTask AsyncCore(PgReader reader, int previousSize, int previousStartPos, Size previousBufferRequirement) + { + await reader.ConsumeAsync().ConfigureAwait(false); + reader.Revert(previousSize, previousStartPos, previousBufferRequirement); + } + } +} diff --git a/src/Npgsql/Internal/PgSerializerOptions.cs b/src/Npgsql/Internal/PgSerializerOptions.cs new file mode 100644 index 0000000000..5ee9077458 --- /dev/null +++ b/src/Npgsql/Internal/PgSerializerOptions.cs @@ -0,0 +1,146 @@ +using System; +using System.Runtime.CompilerServices; +using System.Text; +using Npgsql.Internal.Postgres; +using Npgsql.NameTranslation; +using Npgsql.PostgresTypes; + +namespace Npgsql.Internal; + +public sealed class PgSerializerOptions +{ + /// + /// Used by GetSchema to be able to attempt to resolve all type catalog types without exceptions. + /// + [field: ThreadStatic] + internal static bool IntrospectionCaller { get; set; } + + readonly Func? _timeZoneProvider; + readonly object _typeInfoCache; + + internal PgSerializerOptions(NpgsqlDatabaseInfo databaseInfo, Func? timeZoneProvider = null) + { + _timeZoneProvider = timeZoneProvider; + DatabaseInfo = databaseInfo; + UnknownPgType = databaseInfo.GetPostgresType("unknown"); + _typeInfoCache = PortableTypeIds ? new TypeInfoCache(this) : new TypeInfoCache(this); + } + + // Represents the 'unknown' type, which can be used for reading and writing arbitrary text values. + public PostgresType UnknownPgType { get; } + + // Used purely for type mapping, where we don't have a full set of types but resolvers might know enough. + readonly bool _introspectionInstance; + internal bool IntrospectionMode + { + get => _introspectionInstance || IntrospectionCaller; + init => _introspectionInstance = value; + } + + /// Whether options should return a portable identifier (data type name) to prevent any generated id (oid) confusion across backends, this comes with a perf penalty. + internal bool PortableTypeIds { get; init; } + internal NpgsqlDatabaseInfo DatabaseInfo { get; } + + public string TimeZone => _timeZoneProvider?.Invoke() ?? throw new NotSupportedException("TimeZone was not configured."); + public Encoding TextEncoding { get; init; } = Encoding.UTF8; + public required IPgTypeInfoResolver TypeInfoResolver { get; init; } + public bool EnableDateTimeInfinityConversions { get; init; } = true; + + public ArrayNullabilityMode ArrayNullabilityMode { get; init; } = ArrayNullabilityMode.Never; + public INpgsqlNameTranslator DefaultNameTranslator { get; init; } = NpgsqlSnakeCaseNameTranslator.Instance; + + public static Type[] WellKnownTextTypes { get; } = { + typeof(string), typeof(char[]), typeof(byte[]), + typeof(ArraySegment), typeof(ArraySegment?), + typeof(char), typeof(char?) + }; + + // We don't verify the kind of pgTypeId we get, it'll throw if it's incorrect. + // It's up to the caller to call GetCanonicalTypeId if they want to use an oid instead of a DataTypeName. + // This also makes it easier to realize it should be a cached value if infos for different CLR types are requested for the same + // pgTypeId. Effectively it should be 'impossible' to get the wrong kind via any PgConverterOptions api which is what this is mainly + // for. + PgTypeInfo? GetTypeInfoCore(Type? type, PgTypeId? pgTypeId, bool defaultTypeFallback) + => PortableTypeIds + ? Unsafe.As>(_typeInfoCache).GetOrAddInfo(type, pgTypeId?.DataTypeName, defaultTypeFallback) + : Unsafe.As>(_typeInfoCache).GetOrAddInfo(type, pgTypeId?.Oid, defaultTypeFallback); + + public PgTypeInfo? GetDefaultTypeInfo(PostgresType pgType) + => GetTypeInfoCore(null, ToCanonicalTypeId(pgType), false); + + public PgTypeInfo? GetDefaultTypeInfo(PgTypeId pgTypeId) + => GetTypeInfoCore(null, pgTypeId, false); + + public PgTypeInfo? GetTypeInfo(Type type, PostgresType pgType) + => GetTypeInfoCore(type, ToCanonicalTypeId(pgType), false); + + public PgTypeInfo? GetTypeInfo(Type type, PgTypeId? pgTypeId = null) + => GetTypeInfoCore(type, pgTypeId, false); + + public PgTypeInfo? GetObjectOrDefaultTypeInfo(PostgresType pgType) + => GetTypeInfoCore(typeof(object), ToCanonicalTypeId(pgType), true); + + public PgTypeInfo? GetObjectOrDefaultTypeInfo(PgTypeId pgTypeId) + => GetTypeInfoCore(typeof(object), pgTypeId, true); + + // If a given type id is in the opposite form than what was expected it will be mapped according to the requirement. + internal PgTypeId GetCanonicalTypeId(PgTypeId pgTypeId) + => PortableTypeIds ? DatabaseInfo.GetDataTypeName(pgTypeId) : DatabaseInfo.GetOid(pgTypeId); + + // If a given type id is in the opposite form than what was expected it will be mapped according to the requirement. + internal PgTypeId ToCanonicalTypeId(PostgresType pgType) + => PortableTypeIds ? pgType.DataTypeName : (Oid)pgType.OID; + + public PgTypeId GetArrayTypeId(PgTypeId elementTypeId) + { + // Static affordance to help the global type mapper. + if (PortableTypeIds && elementTypeId.IsDataTypeName) + return elementTypeId.DataTypeName.ToArrayName(); + + return ToCanonicalTypeId(DatabaseInfo.GetPostgresType(elementTypeId).Array + ?? throw new NotSupportedException("Cannot resolve array type id")); + } + + public PgTypeId GetArrayElementTypeId(PgTypeId arrayTypeId) + { + // Static affordance to help the global type mapper. + if (PortableTypeIds && arrayTypeId.IsDataTypeName && arrayTypeId.DataTypeName.UnqualifiedNameSpan.StartsWith("_".AsSpan(), StringComparison.Ordinal)) + return new DataTypeName(arrayTypeId.DataTypeName.Schema + arrayTypeId.DataTypeName.UnqualifiedNameSpan.Slice(1).ToString()); + + return ToCanonicalTypeId((DatabaseInfo.GetPostgresType(arrayTypeId) as PostgresArrayType)?.Element + ?? throw new NotSupportedException("Cannot resolve array element type id")); + } + + public PgTypeId GetRangeTypeId(PgTypeId subtypeTypeId) => + ToCanonicalTypeId(DatabaseInfo.GetPostgresType(subtypeTypeId).Range + ?? throw new NotSupportedException("Cannot resolve range type id")); + + public PgTypeId GetRangeSubtypeTypeId(PgTypeId rangeTypeId) => + ToCanonicalTypeId((DatabaseInfo.GetPostgresType(rangeTypeId) as PostgresRangeType)?.Subtype + ?? throw new NotSupportedException("Cannot resolve range subtype type id")); + + public PgTypeId GetMultirangeTypeId(PgTypeId rangeTypeId) => + ToCanonicalTypeId((DatabaseInfo.GetPostgresType(rangeTypeId) as PostgresRangeType)?.Multirange + ?? throw new NotSupportedException("Cannot resolve multirange type id")); + + public PgTypeId GetMultirangeElementTypeId(PgTypeId multirangeTypeId) => + ToCanonicalTypeId((DatabaseInfo.GetPostgresType(multirangeTypeId) as PostgresMultirangeType)?.Subrange + ?? throw new NotSupportedException("Cannot resolve multirange element type id")); + + public bool TryGetDataTypeName(PgTypeId pgTypeId, out DataTypeName dataTypeName) + { + if (DatabaseInfo.FindPostgresType(pgTypeId) is { } pgType) + { + dataTypeName = pgType.DataTypeName; + return true; + } + + dataTypeName = default; + return false; + } + + public DataTypeName GetDataTypeName(PgTypeId pgTypeId) + => !TryGetDataTypeName(pgTypeId, out var name) + ? throw new ArgumentException("Unknown type id", nameof(pgTypeId)) + : name; +} diff --git a/src/Npgsql/Internal/PgStreamingConverter.cs b/src/Npgsql/Internal/PgStreamingConverter.cs new file mode 100644 index 0000000000..09176f82d9 --- /dev/null +++ b/src/Npgsql/Internal/PgStreamingConverter.cs @@ -0,0 +1,87 @@ +using System; +using System.Diagnostics; +using System.Runtime.CompilerServices; +using System.Threading; +using System.Threading.Tasks; + +namespace Npgsql.Internal; + +public abstract class PgStreamingConverter : PgConverter +{ + protected PgStreamingConverter(bool customDbNullPredicate = false) : base(customDbNullPredicate) { } + + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.None; + return format is DataFormat.Binary; + } + + internal sealed override unsafe ValueTask ReadAsObject( + bool async, PgReader reader, CancellationToken cancellationToken) + { + if (!async) + return new(Read(reader)!); + + var task = ReadAsync(reader, cancellationToken); + return task.IsCompletedSuccessfully + ? new(task.Result!) + : PgStreamingConverterHelpers.AwaitTask(task.AsTask(), new(this, &BoxResult)); + + static object BoxResult(Task task) + { + Debug.Assert(task is Task); + return new ValueTask(Unsafe.As>(task)).Result; + } + } + + internal sealed override ValueTask WriteAsObject(bool async, PgWriter writer, object value, CancellationToken cancellationToken) + { + if (async) + return WriteAsync(writer, (T)value, cancellationToken); + + Write(writer, (T)value); + return new(); + } +} + +// Using a function pointer here is safe against assembly unloading as the instance reference that the static pointer method lives on is +// passed along. As such the instance cannot be collected by the gc which means the entire assembly is prevented from unloading until we're +// done. +// The alternatives are: +// 1. Add a virtual method and make AwaitTask call into it (bloating the vtable of all derived types). +// 2. Using a delegate, meaning we add a static field + an alloc per T + metadata, slightly slower dispatch perf so overall strictly worse +// as well. +static class PgStreamingConverterHelpers +{ + // Split out from the generic class to amortize the huge size penalty per async state machine, which would otherwise be per + // instantiation. +#if NET6_0_OR_GREATER + [AsyncMethodBuilder(typeof(PoolingAsyncValueTaskMethodBuilder<>))] +#endif + public static async ValueTask AwaitTask(Task task, Continuation continuation) + { + await task.ConfigureAwait(false); + var result = continuation.Invoke(task); + // Guarantee the type stays loaded until the function pointer call is done. + GC.KeepAlive(continuation.Handle); + return result; + } + + // Split out into a struct as unsafe and async don't mix, while we do want a nicely typed function pointer signature to prevent + // mistakes. + public readonly unsafe struct Continuation + { + public object Handle { get; } + readonly delegate* _continuation; + + /// A reference to the type that houses the static method points to. + /// The continuation + public Continuation(object handle, delegate* continuation) + { + Handle = handle; + _continuation = continuation; + } + + public object Invoke(Task task) => _continuation(task); + } +} diff --git a/src/Npgsql/Internal/PgTypeInfo.cs b/src/Npgsql/Internal/PgTypeInfo.cs new file mode 100644 index 0000000000..8b0dc22c2d --- /dev/null +++ b/src/Npgsql/Internal/PgTypeInfo.cs @@ -0,0 +1,362 @@ +using System; +using System.Diagnostics.CodeAnalysis; +using Npgsql.Internal.Postgres; + +namespace Npgsql.Internal; + +public class PgTypeInfo +{ + readonly bool _canBinaryConvert; + readonly BufferRequirements _binaryBufferRequirements; + + readonly bool _canTextConvert; + readonly BufferRequirements _textBufferRequirements; + + PgTypeInfo(PgSerializerOptions options, Type type, Type? unboxedType) + { + if (unboxedType is not null && !type.IsAssignableFrom(unboxedType)) + throw new ArgumentException("A value of unboxed type is not assignable to converter type", nameof(unboxedType)); + + Options = options; + IsBoxing = unboxedType is not null; + Type = unboxedType ?? type; + SupportsWriting = true; + } + + public PgTypeInfo(PgSerializerOptions options, PgConverter converter, PgTypeId pgTypeId, Type? unboxedType = null) + : this(options, converter.TypeToConvert, unboxedType) + { + Converter = converter; + PgTypeId = options.GetCanonicalTypeId(pgTypeId); + _canBinaryConvert = converter.CanConvert(DataFormat.Binary, out _binaryBufferRequirements); + _canTextConvert = converter.CanConvert(DataFormat.Text, out _textBufferRequirements); + } + + private protected PgTypeInfo(PgSerializerOptions options, Type type, PgConverterResolution? resolution, Type? unboxedType = null) + : this(options, type, unboxedType) + { + if (resolution is { } res) + { + // Resolutions should always be in canonical form already. + if (options.PortableTypeIds && res.PgTypeId.IsOid || !options.PortableTypeIds && res.PgTypeId.IsDataTypeName) + throw new ArgumentException("Given type id is not in canonical form. Make sure ConverterResolver implementations close over canonical ids, e.g. by calling options.GetCanonicalTypeId(pgTypeId) on the constructor arguments.", nameof(PgTypeId)); + + PgTypeId = res.PgTypeId; + Converter = res.Converter; + _canBinaryConvert = res.Converter.CanConvert(DataFormat.Binary, out _binaryBufferRequirements); + _canTextConvert = res.Converter.CanConvert(DataFormat.Text, out _textBufferRequirements); + } + } + + bool HasCachedInfo(PgConverter converter) => ReferenceEquals(Converter, converter); + + public Type Type { get; } + public PgSerializerOptions Options { get; } + + public bool SupportsWriting { get; init; } + public DataFormat? PreferredFormat { get; init; } + + // Doubles as the storage for the converter coming from a default resolution (used to confirm whether we can use cached info). + PgConverter? Converter { get; } + [MemberNotNullWhen(false, nameof(Converter))] + [MemberNotNullWhen(false, nameof(PgTypeId))] + internal bool IsResolverInfo => GetType() == typeof(PgResolverTypeInfo); + + // TODO pull validate from options + internal exempt for perf? + internal bool ValidateResolution => true; + + // Used for internal converters to save on binary bloat. + internal bool IsBoxing { get; } + + public PgTypeId? PgTypeId { get; } + + // Having it here so we can easily extend any behavior. + internal void DisposeWriteState(object writeState) + { + if (writeState is IDisposable disposable) + disposable.Dispose(); + } + + internal bool TryBind(Field field, DataFormat format, out PgConverterInfo info) + { + switch (this) + { + case { IsResolverInfo: false }: + // Type lies when IsBoxing is true. + var typeToConvert = IsBoxing ? typeof(object) : Type; + if (!CachedCanConvert(format, out var bufferRequirements)) + { + info = default; + return false; + } + info = CreateConverterInfo(bufferRequirements, isRead: true, Converter, typeToConvert); + return true; + case PgResolverTypeInfo resolverInfo: + var resolution = resolverInfo.GetResolution(field); + if (!HasCachedInfo(resolution.Converter) + ? !CachedCanConvert(format, out bufferRequirements) + : !resolution.Converter.CanConvert(format, out bufferRequirements)) + { + info = default; + return false; + } + info = CreateConverterInfo(bufferRequirements, isRead: true, resolution.Converter, resolution.Converter.TypeToConvert); + return true; + default: + throw new NotSupportedException("Should not happen, please file a bug."); + } + } + + // Bind for reading. + internal PgConverterInfo Bind(Field field, DataFormat format) + { + if (!TryBind(field, format, out var info)) + ThrowHelper.ThrowInvalidOperationException($"Resolved converter does not support {format} format."); + + return info; + } + + public PgConverterResolution GetResolution(T? value) + { + // Other cases, to keep binary bloat minimal. + if (this is not PgResolverTypeInfo resolverInfo) + return GetObjectResolution(null); + var resolution = resolverInfo.GetResolution(value, null); + return resolution ?? resolverInfo.GetDefaultResolution(null); + } + + // Note: this api is not called GetResolutionAsObject as the semantics are extended, DBNull is a NULL value for all object values. + public PgConverterResolution GetObjectResolution(object? value) + { + switch (this) + { + case { IsResolverInfo: false }: + return new(Converter, PgTypeId.GetValueOrDefault()); + case PgResolverTypeInfo resolverInfo: + PgConverterResolution? resolution = null; + if (value is not DBNull) + resolution = resolverInfo.GetResolutionAsObject(value, null); + return resolution ?? resolverInfo.GetDefaultResolution(null); + default: + return ThrowNotSupported(); + } + + static PgConverterResolution ThrowNotSupported() + => throw new NotSupportedException("Should not happen, please file a bug."); + } + + /// Throws if the type info is undecided in its PgTypeId. + internal PgConverterResolution GetConcreteResolution() + { + var pgTypeId = PgTypeId; + if (pgTypeId is null) + ThrowHelper.ThrowInvalidOperationException("PgTypeId is null."); + + return this switch + { + { IsResolverInfo: false } => new(Converter, pgTypeId.GetValueOrDefault()), + PgResolverTypeInfo resolverInfo => resolverInfo.GetDefaultResolution(null), + _ => ThrowNotSupported() + }; + + static PgConverterResolution ThrowNotSupported() + => throw new NotSupportedException("Should not happen, please file a bug."); + } + + PgConverterInfo CreateConverterInfo(BufferRequirements bufferRequirements, bool isRead, PgConverter converter, Type typeToConvert) + => new() + { + TypeInfo = this, + Converter = converter, + AsObject = Type != typeToConvert, + BufferRequirement = isRead ? bufferRequirements.Read : bufferRequirements.Write + }; + + bool CachedCanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + if (format is DataFormat.Binary) + { + bufferRequirements = _binaryBufferRequirements; + return _canBinaryConvert; + } + + bufferRequirements = _textBufferRequirements; + return _canTextConvert; + } + + public BufferRequirements? GetBufferRequirements(PgConverter converter, DataFormat format) + { + var success = HasCachedInfo(converter) + ? CachedCanConvert(format, out var bufferRequirements) + : converter.CanConvert(format, out bufferRequirements); + + return success ? bufferRequirements : null; + } + + // Bind for writing. + /// When result is null, the value was interpreted to be a SQL NULL. + internal PgConverterInfo? Bind(PgConverter converter, T? value, out Size size, out object? writeState, out DataFormat format, DataFormat? formatPreference = null) + { + // Basically exists to catch cases like object[] resolving a polymorphic read converter, better to fail during binding than writing. + if (!SupportsWriting) + ThrowHelper.ThrowNotSupportedException($"Writing {Type} is not supported for this type info."); + + format = ResolveFormat(converter, out var bufferRequirements, formatPreference ?? PreferredFormat); + if (converter.IsDbNull(value)) + { + writeState = null; + size = default; + return null; + } + writeState = null; + var context = new SizeContext(format, bufferRequirements.Write); + size = bufferRequirements.Write is { Kind: SizeKind.Exact } req ? req : converter.GetSize(context, value, ref writeState); + + if (size is { Kind: SizeKind.Unknown}) + ThrowHelper.ThrowNotSupportedException($"Returning {nameof(Size.Unknown)} from {nameof(PgConverter.GetSize)} is not supported yet."); + + return new() + { + TypeInfo = this, + Converter = converter, + AsObject = IsBoxing, + BufferRequirement = bufferRequirements.Write, + }; + } + + // Bind for writing. + // Note: this api is not called BindAsObject as the semantics are extended, DBNull is a NULL value for all object values. + /// When result is null or DBNull, the value was interpreted to be a SQL NULL. + internal PgConverterInfo? BindObject(PgConverter converter, object? value, out Size size, out object? writeState, out DataFormat format, DataFormat? formatPreference = null) + { + // Basically exists to catch cases like object[] resolving a polymorphic read converter, better to fail during binding than writing. + if (!SupportsWriting) + throw new NotSupportedException($"Writing {Type} is not supported for this type info."); + + format = ResolveFormat(converter, out var bufferRequirements, formatPreference ?? PreferredFormat); + + // Given SQL values are effectively a union of T | NULL we support DBNull.Value to signify a NULL value for all types except DBNull in this api. + if (value is DBNull && Type != typeof(DBNull) || converter.IsDbNullAsObject(value)) + { + writeState = null; + size = default; + return null; + } + writeState = null; + var context = new SizeContext(format, bufferRequirements.Write); + size = bufferRequirements.Write is { Kind: SizeKind.Exact } req ? req : converter.GetSizeAsObject(context, value, ref writeState); + + if (size is { Kind: SizeKind.Unknown}) + ThrowHelper.ThrowNotSupportedException($"Returning {nameof(Size.Unknown)} from {nameof(PgConverter.GetSizeAsObject)} is not supported yet."); + + return new() + { + TypeInfo = this, + Converter = converter, + AsObject = Type != typeof(object), + BufferRequirement = bufferRequirements.Write, + }; + } + + // If we don't have a converter stored we must ask the retrieved one. + DataFormat ResolveFormat(PgConverter converter, out BufferRequirements bufferRequirements, DataFormat? formatPreference = null) + { + switch (formatPreference) + { + // The common case, no preference means we default to binary if supported. + case null or DataFormat.Binary when HasCachedInfo(converter) ? CachedCanConvert(DataFormat.Binary, out bufferRequirements) : converter.CanConvert(DataFormat.Binary, out bufferRequirements): + return DataFormat.Binary; + // In this case we either prefer text or we have no preference and our converter doesn't support binary. + case null or DataFormat.Text: + var canTextConvert = HasCachedInfo(converter) ? CachedCanConvert(DataFormat.Text, out bufferRequirements) : converter.CanConvert(DataFormat.Text, out bufferRequirements); + if (!canTextConvert) + { + if (formatPreference is null) + throw new InvalidOperationException("Converter doesn't support any data format."); + // Rerun without preference. + return ResolveFormat(converter, out bufferRequirements); + } + return DataFormat.Text; + default: + throw new ArgumentOutOfRangeException(); + } + } +} + +public sealed class PgResolverTypeInfo : PgTypeInfo +{ + internal readonly PgConverterResolver _converterResolver; + + public PgResolverTypeInfo(PgSerializerOptions options, PgConverterResolver converterResolver, PgTypeId? pgTypeId, Type? unboxedType = null) + : base(options, + converterResolver.TypeToConvert, + pgTypeId is { } typeId ? ResolveDefaultId(options, converterResolver, typeId) : null, + // We always mark resolvers with type object as boxing, as they may freely return converters for any type (see PgConverterResolver.Validate). + unboxedType ?? (converterResolver.TypeToConvert == typeof(object) ? typeof(object) : null)) + => _converterResolver = converterResolver; + + // We'll always validate the default resolution, the info will be re-used so there is no real downside. + static PgConverterResolution ResolveDefaultId(PgSerializerOptions options, PgConverterResolver converterResolver, PgTypeId typeId) + => converterResolver.GetDefaultInternal(validate: true, options.PortableTypeIds, options.GetCanonicalTypeId(typeId)); + + public PgConverterResolution? GetResolution(T? value, PgTypeId? expectedPgTypeId) + { + return _converterResolver is PgConverterResolver resolverT + ? resolverT.GetInternal(this, value, expectedPgTypeId ?? PgTypeId) + : ThrowNotSupportedType(typeof(T)); + + PgConverterResolution ThrowNotSupportedType(Type? type) + => throw new NotSupportedException(IsBoxing + ? "TypeInfo only supports boxing conversions, call GetResolutionAsObject instead." + : $"TypeInfo is not of type {type}"); + } + + public PgConverterResolution? GetResolutionAsObject(object? value, PgTypeId? expectedPgTypeId) + => _converterResolver.GetAsObjectInternal(this, value, expectedPgTypeId ?? PgTypeId); + + public PgConverterResolution GetResolution(Field field) + => _converterResolver.GetInternal(this, field); + + public PgConverterResolution GetDefaultResolution(PgTypeId? pgTypeId) + => _converterResolver.GetDefaultInternal(ValidateResolution, Options.PortableTypeIds, pgTypeId ?? PgTypeId); +} + +public readonly struct PgConverterResolution +{ + public PgConverterResolution(PgConverter converter, PgTypeId pgTypeId) + { + Converter = converter; + PgTypeId = pgTypeId; + } + + public PgConverter Converter { get; } + public PgTypeId PgTypeId { get; } + + public PgConverter GetConverter() => (PgConverter)Converter; +} + +readonly struct PgConverterInfo +{ + public bool IsDefault => TypeInfo is null; + + public Type TypeToConvert + { + get + { + // Object typed resolvers can return any type of converter, so we check the type of the converter instead. + // We cannot do this in general as we should respect the 'unboxed type' of infos, which can differ from the converter type. + if (TypeInfo.IsResolverInfo && TypeInfo.Type == typeof(object)) + return Converter.TypeToConvert; + + return TypeInfo.Type; + } + } + + public required PgTypeInfo TypeInfo { get; init; } + public required PgConverter Converter { get; init; } + public required Size BufferRequirement { get; init; } + // Whether Converter.TypeToConvert matches the PgTypeInfo.Type, if it doesn't object apis and a downcast should be used. + public required bool AsObject { get; init; } + + public PgConverter GetConverter() => (PgConverter)Converter; +} diff --git a/src/Npgsql/Internal/PgWriter.cs b/src/Npgsql/Internal/PgWriter.cs new file mode 100644 index 0000000000..c1e2983e17 --- /dev/null +++ b/src/Npgsql/Internal/PgWriter.cs @@ -0,0 +1,571 @@ +using System; +using System.Buffers; +using System.Buffers.Binary; +using System.Diagnostics; +using System.IO; +using System.Runtime.CompilerServices; +using System.Runtime.InteropServices; +using System.Text; +using System.Threading; +using System.Threading.Tasks; +using Npgsql.Internal.Postgres; + +namespace Npgsql.Internal; + +enum FlushMode +{ + None, + Blocking, + NonBlocking +} + +// A streaming alternative to a System.IO.Stream, instead based on the preferable IBufferWriter. +interface IStreamingWriter: IBufferWriter +{ + void Flush(TimeSpan timeout = default); + ValueTask FlushAsync(CancellationToken cancellationToken = default); +} + +sealed class NpgsqlBufferWriter : IStreamingWriter +{ + readonly NpgsqlWriteBuffer _buffer; + int? _lastBufferSize; + public NpgsqlBufferWriter(NpgsqlWriteBuffer buffer) => _buffer = buffer; + + public void Advance(int count) + { + if (_lastBufferSize < count || _buffer.WriteSpaceLeft < count) + throw new InvalidOperationException("Cannot advance past the end of the current buffer."); + _lastBufferSize = null; + _buffer.WritePosition += count; + } + + public Memory GetMemory(int sizeHint = 0) + { + if (sizeHint > _buffer.WriteSpaceLeft) + throw new OutOfMemoryException("Not enough space left in buffer."); + + var bufferSize = _buffer.WriteSpaceLeft; + _lastBufferSize = bufferSize; + return _buffer.Buffer.AsMemory(_buffer.WritePosition, bufferSize); + } + + public Span GetSpan(int sizeHint = 0) + { + if (sizeHint > _buffer.WriteSpaceLeft) + throw new OutOfMemoryException("Not enough space left in buffer."); + + var bufferSize = _buffer.WriteSpaceLeft; + _lastBufferSize = bufferSize; + return _buffer.Buffer.AsSpan(_buffer.WritePosition, bufferSize); + } + + public void Flush(TimeSpan timeout = default) + { + if (timeout == TimeSpan.Zero) + _buffer.Flush(); + else + { + TimeSpan? originalTimeout = null; + try + { + if (timeout != TimeSpan.Zero) + { + originalTimeout = _buffer.Timeout; + _buffer.Timeout = timeout; + } + _buffer.Flush(); + } + finally + { + if (originalTimeout is { } value) + _buffer.Timeout = value; + } + } + } + + public ValueTask FlushAsync(CancellationToken cancellationToken = default) + => new(_buffer.Flush(async: true, cancellationToken)); +} + +public sealed class PgWriter +{ + readonly IBufferWriter _writer; + + byte[]? _buffer; + int _offset; + int _pos; + int _length; + + int _totalBytesWritten; + + ValueMetadata _current; + NpgsqlDatabaseInfo? _typeCatalog; + + internal PgWriter(IBufferWriter writer) => _writer = writer; + + internal PgWriter Init(NpgsqlDatabaseInfo typeCatalog) + { + if (_typeCatalog is not null) + throw new InvalidOperationException("Invalid concurrent use or PgWriter was not reset properly."); + + _typeCatalog = typeCatalog; + return this; + } + + internal void Reset() + { + if (_pos != _offset) + throw new InvalidOperationException("PgWriter still has uncommitted bytes."); + + _typeCatalog = null; + FlushMode = FlushMode.None; + _totalBytesWritten = 0; + ResetBuffer(); + } + + void ResetBuffer() + { + _buffer = null; + _pos = 0; + _offset = 0; + _length = 0; + } + + internal FlushMode FlushMode { get; private set; } + + internal PgWriter Refresh() + { + if (_buffer is not null) + ResetBuffer(); + return this; + } + + internal PgWriter WithFlushMode(FlushMode mode) + { + FlushMode = mode; + return this; + } + + // TODO if we're working on a normal buffer writer we should use normal Ensure (so commit and get another buffer) semantics. + void Ensure(int count = 1) + { + if (_buffer is null) + SetBuffer(); + + if (count > _length - _pos) + ThrowOutOfRange(); + + void ThrowOutOfRange() => throw new ArgumentOutOfRangeException(nameof(count), "Coud not ensure enough space in buffer."); + [MethodImpl(MethodImplOptions.NoInlining)] + void SetBuffer() + { + // GetMemory will check whether count is larger than the max buffer size. + var mem = _writer.GetMemory(count); + if (!MemoryMarshal.TryGetArray(mem, out var segment)) + throw new NotSupportedException("Only array backed writers are supported."); + + _buffer = segment.Array!; + _offset = segment.Offset; + _pos = segment.Offset; + _length = segment.Offset + segment.Count; + } + } + + Span Span => _buffer.AsSpan(_pos, _length - _pos); + + int Remaining + { + get + { + if (_buffer is null) + Ensure(count: 0); + return _length - _pos; + } + } + + void Advance(int count) => _pos += count; + + internal void Commit(int? expectedByteCount = null) + { + _totalBytesWritten += _pos - _offset; + _writer.Advance(_pos - _offset); + _offset = _pos; + + if (expectedByteCount is not null) + { + var totalBytesWritten = _totalBytesWritten; + _totalBytesWritten = 0; + if (totalBytesWritten != expectedByteCount) + throw new InvalidOperationException($"Bytes written ({totalBytesWritten}) and expected byte count ({expectedByteCount}) don't match."); + } + } + + internal ValueTask BeginWrite(bool async, ValueMetadata current, CancellationToken cancellationToken) + { + _current = current; + if (ShouldFlush(current.BufferRequirement)) + return Flush(async, cancellationToken); + + return new(); + } + + public ValueMetadata Current => _current; + internal Size CurrentBufferRequirement => _current.BufferRequirement; + + // When we don't know the size during writing we're using the writer buffer as a sizing mechanism. + internal bool BufferingWrite => Current.Size.Kind is SizeKind.Unknown; + + // This method lives here to remove the chances oids will be cached on converters inadvertently when data type names should be used. + // Such a mapping (for instance for array element oids) should be done per operation to ensure it is done in the context of a specific backend. + public void WriteAsOid(PgTypeId pgTypeId) + { + var oid = _typeCatalog!.GetOid(pgTypeId); + WriteUInt32((uint)oid); + } + + public void WriteByte(byte value) + { + Ensure(sizeof(byte)); + Span[0] = value; + Advance(sizeof(byte)); + } + + public void WriteInt16(short value) + { + Ensure(sizeof(short)); + BinaryPrimitives.WriteInt16BigEndian(Span, value); + Advance(sizeof(short)); + } + + public void WriteInt32(int value) + { + Ensure(sizeof(int)); + BinaryPrimitives.WriteInt32BigEndian(Span, value); + Advance(sizeof(int)); + } + + public void WriteInt64(long value) + { + Ensure(sizeof(long)); + BinaryPrimitives.WriteInt64BigEndian(Span, value); + Advance(sizeof(long)); + } + + public void WriteUInt16(ushort value) + { + Ensure(sizeof(ushort)); + BinaryPrimitives.WriteUInt16BigEndian(Span, value); + Advance(sizeof(ushort)); + } + + public void WriteUInt32(uint value) + { + Ensure(sizeof(uint)); + BinaryPrimitives.WriteUInt32BigEndian(Span, value); + Advance(sizeof(uint)); + } + + public void WriteUInt64(ulong value) + { + Ensure(sizeof(ulong)); + BinaryPrimitives.WriteUInt64BigEndian(Span, value); + Advance(sizeof(ulong)); + } + + public void WriteFloat(float value) + { +#if NET5_0_OR_GREATER + Ensure(sizeof(float)); + BinaryPrimitives.WriteSingleBigEndian(Span, value); + Advance(sizeof(float)); +#else + WriteUInt32(Unsafe.As(ref value)); +#endif + } + + public void WriteDouble(double value) + { +#if NET5_0_OR_GREATER + Ensure(sizeof(double)); + BinaryPrimitives.WriteDoubleBigEndian(Span, value); + Advance(sizeof(double)); +#else + WriteUInt64(Unsafe.As(ref value)); +#endif + } + + public void WriteChars(ReadOnlySpan data, Encoding encoding) + { + // If we have more chars than bytes remaining we can immediately go to the slow path. + if (data.Length <= Remaining) + { + // If not, it's worth a shot to see if we can convert in one go. + var encodedLength = encoding.GetByteCount(data); + if (!ShouldFlush(encodedLength)) + { + var count = encoding.GetBytes(data, Span); + Advance(count); + return; + } + } + Core(data, encoding); + + void Core(ReadOnlySpan data, Encoding encoding) + { + var encoder = encoding.GetEncoder(); + var minBufferSize = encoding.GetMaxByteCount(1); + + bool completed; + do + { + if (ShouldFlush(minBufferSize)) + Flush(); + Ensure(minBufferSize); + encoder.Convert(data, Span, flush: data.Length <= Span.Length, out var charsUsed, out var bytesUsed, out completed); + data = data.Slice(charsUsed); + Advance(bytesUsed); + } while (!completed); + } + } + + public ValueTask WriteCharsAsync(ReadOnlyMemory data, Encoding encoding, CancellationToken cancellationToken = default) + { + var dataSpan = data.Span; + // If we have more chars than bytes remaining we can immediately go to the slow path. + if (data.Length <= Remaining) + { + // If not, it's worth a shot to see if we can convert in one go. + var encodedLength = encoding.GetByteCount(dataSpan); + if (!ShouldFlush(encodedLength)) + { + var count = encoding.GetBytes(dataSpan, Span); + Advance(count); + return new(); + } + } + + return Core(data, encoding, cancellationToken); + + async ValueTask Core(ReadOnlyMemory data, Encoding encoding, CancellationToken cancellationToken) + { + var encoder = encoding.GetEncoder(); + var minBufferSize = encoding.GetMaxByteCount(1); + + bool completed; + do + { + if (ShouldFlush(minBufferSize)) + await FlushAsync(cancellationToken).ConfigureAwait(false); + Ensure(minBufferSize); + encoder.Convert(data.Span, Span, flush: data.Length <= Span.Length, out var charsUsed, out var bytesUsed, out completed); + data = data.Slice(charsUsed); + Advance(bytesUsed); + } while (!completed); + } + } + + public void WriteBytes(ReadOnlySpan buffer) + { + while (!buffer.IsEmpty) + { + var write = Math.Min(buffer.Length, Remaining); + buffer.Slice(0, write).CopyTo(Span); + Advance(write); + buffer = buffer.Slice(write); + if (Remaining is 0) + Flush(); + } + } + + public ValueTask WriteBytesAsync(ReadOnlyMemory buffer, CancellationToken cancellationToken = default) + { + if (buffer.Length <= Remaining) + { + buffer.Span.CopyTo(Span); + Advance(buffer.Length); + return new(); + } + + return Core(buffer, cancellationToken); + + async ValueTask Core(ReadOnlyMemory buffer, CancellationToken cancellationToken) + { + while (!buffer.IsEmpty) + { + var write = Math.Min(buffer.Length, Remaining); + buffer.Span.Slice(0, write).CopyTo(Span); + Advance(write); + buffer = buffer.Slice(write); + if (Remaining is 0) + await FlushAsync(cancellationToken).ConfigureAwait(false); + } + } + } + + public Stream GetStream() + => new PgWriterStream(this); + + public bool ShouldFlush(Size bufferRequirement) + => ShouldFlush(bufferRequirement is { Kind: SizeKind.UpperBound } + ? Math.Min(Current.Size.Value, bufferRequirement.Value) + : bufferRequirement.GetValueOrDefault()); + + public bool ShouldFlush(int byteCount) => Remaining < byteCount && FlushMode is not FlushMode.None; + + public void Flush(TimeSpan timeout = default) + { + switch (FlushMode) + { + case FlushMode.None: + return; + case FlushMode.NonBlocking: + throw new NotSupportedException($"Cannot call {nameof(Flush)} on a non-blocking {nameof(PgWriter)}, you might need to override {nameof(PgConverter.WriteAsync)} on {nameof(PgConverter)} if you want to call flush."); + } + + if (_writer is not IStreamingWriter writer) + throw new NotSupportedException($"Cannot call {nameof(Flush)} on a buffered {nameof(PgWriter)}, {nameof(FlushMode)}.{nameof(FlushMode.None)} should be used to prevent this."); + + Commit(); + ResetBuffer(); + writer.Flush(timeout); + } + + public ValueTask FlushAsync(CancellationToken cancellationToken = default) + { + switch (FlushMode) + { + case FlushMode.None: + return new(); + case FlushMode.Blocking: + throw new NotSupportedException($"Cannot call {nameof(FlushAsync)} on a blocking {nameof(PgWriter)}, call Flush instead."); + } + + if (_writer is not IStreamingWriter writer) + throw new NotSupportedException($"Cannot call {nameof(FlushAsync)} on a buffered {nameof(PgWriter)}, {nameof(FlushMode)}.{nameof(FlushMode.None)} should be used to prevent this."); + + Commit(); + ResetBuffer(); + return writer.FlushAsync(cancellationToken); + } + + internal ValueTask Flush(bool async, CancellationToken cancellationToken = default) + { + if (async) + return FlushAsync(cancellationToken); + + Flush(); + return new(); + } + + internal ValueTask BeginNestedWrite(bool async, Size bufferRequirement, int byteCount, object? state, CancellationToken cancellationToken) + { + Debug.Assert(bufferRequirement != -1); + if (ShouldFlush(bufferRequirement)) + return Core(async, bufferRequirement, byteCount, state, cancellationToken); + + _current = new() { Format = _current.Format, Size = byteCount, BufferRequirement = bufferRequirement, WriteState = state }; + + return new(new NestedWriteScope()); +#if NET6_0_OR_GREATER + [AsyncMethodBuilder(typeof(PoolingAsyncValueTaskMethodBuilder<>))] +#endif + async ValueTask Core(bool async, Size bufferRequirement, int byteCount, object? state, CancellationToken cancellationToken) + { + await Flush(async, cancellationToken).ConfigureAwait(false); + _current = new() { Format = _current.Format, Size = byteCount, BufferRequirement = bufferRequirement, WriteState = state }; + return new(); + } + } + + public NestedWriteScope BeginNestedWrite(Size bufferRequirement, int byteCount, object? state) + => BeginNestedWrite(async: false, bufferRequirement, byteCount, state, CancellationToken.None).GetAwaiter().GetResult(); + + public ValueTask BeginNestedWriteAsync(Size bufferRequirement, int byteCount, object? state, CancellationToken cancellationToken = default) + => BeginNestedWrite(async: true, bufferRequirement, byteCount, state, cancellationToken); + + sealed class PgWriterStream : Stream + { + readonly PgWriter _writer; + + internal PgWriterStream(PgWriter writer) + => _writer = writer; + + public override void Write(byte[] buffer, int offset, int count) + => Write(async: false, buffer: buffer, offset: offset, count: count, CancellationToken.None).GetAwaiter().GetResult(); + + public override Task WriteAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) + => Write(async: true, buffer: buffer, offset: offset, count: count, cancellationToken: cancellationToken); + + Task Write(bool async, byte[] buffer, int offset, int count, CancellationToken cancellationToken) + { + if (buffer is null) + throw new ArgumentNullException(nameof(buffer)); + if (offset < 0) + throw new ArgumentNullException(nameof(offset)); + if (count < 0) + throw new ArgumentNullException(nameof(count)); + if (buffer.Length - offset < count) + throw new ArgumentException("Offset and length were out of bounds for the array or count is greater than the number of elements from index to the end of the source collection."); + + if (async) + { + if (cancellationToken.IsCancellationRequested) + return Task.FromCanceled(cancellationToken); + + return _writer.WriteBytesAsync(buffer, cancellationToken).AsTask(); + } + + _writer.WriteBytes(new Span(buffer, offset, count)); + return Task.CompletedTask; + } + +#if !NETSTANDARD2_0 + public override void Write(ReadOnlySpan buffer) => _writer.WriteBytes(buffer); + + public override ValueTask WriteAsync(ReadOnlyMemory buffer, CancellationToken cancellationToken = default) + { + if (cancellationToken.IsCancellationRequested) + return new(Task.FromCanceled(cancellationToken)); + + return _writer.WriteBytesAsync(buffer, cancellationToken); + } +#endif + + public override void Flush() + => _writer.Flush(); + + public override Task FlushAsync(CancellationToken cancellationToken) + => _writer.FlushAsync(cancellationToken).AsTask(); + + public override bool CanRead => false; + public override bool CanWrite => true; + public override bool CanSeek => false; + + public override int Read(byte[] buffer, int offset, int count) + => throw new NotSupportedException(); + + public override Task ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) + => throw new NotSupportedException(); + + public override long Length => throw new NotSupportedException(); + public override void SetLength(long value) + => throw new NotSupportedException(); + + public override long Position + { + get => throw new NotSupportedException(); + set => throw new NotSupportedException(); + } + public override long Seek(long offset, SeekOrigin origin) + => throw new NotSupportedException(); + } +} + +// No-op for now. +public struct NestedWriteScope : IDisposable +{ + public void Dispose() + { + } +} diff --git a/src/Npgsql/Internal/Postgres/DataTypeName.cs b/src/Npgsql/Internal/Postgres/DataTypeName.cs new file mode 100644 index 0000000000..2384ec723d --- /dev/null +++ b/src/Npgsql/Internal/Postgres/DataTypeName.cs @@ -0,0 +1,234 @@ +using System; +using System.Diagnostics; + +namespace Npgsql.Internal.Postgres; + +/// +/// Represents the fully-qualified name of a PostgreSQL type. +/// +[DebuggerDisplay("{DisplayName,nq}")] +public readonly struct DataTypeName : IEquatable +{ + /// + /// The maximum length of names in an unmodified PostgreSQL installation. + /// + /// + /// We need to respect this to get to valid names when deriving them (for multirange/arrays etc). + /// This does not include the namespace. + /// + const int NAMEDATALEN = 64 - 1; // Minus null terminator. + + readonly string _value; + + DataTypeName(string fullyQualifiedDataTypeName, bool validated) + { + if (!validated) + { + var schemaEndIndex = fullyQualifiedDataTypeName.IndexOf('.'); + if (schemaEndIndex == -1) + throw new ArgumentException("Given value does not contain a schema.", nameof(fullyQualifiedDataTypeName)); + + // Friendly array syntax is the only fully qualified name quirk that's allowed by postgres (see FromDisplayName). + if (fullyQualifiedDataTypeName.AsSpan(schemaEndIndex).EndsWith("[]".AsSpan())) + fullyQualifiedDataTypeName = NormalizeName(fullyQualifiedDataTypeName); + + var typeNameLength = fullyQualifiedDataTypeName.Length - schemaEndIndex + 1; + if (typeNameLength > NAMEDATALEN) + throw new ArgumentException( + $"Name is too long and would be truncated to: {fullyQualifiedDataTypeName.Substring(0, fullyQualifiedDataTypeName.Length - typeNameLength + NAMEDATALEN)}"); + } + + _value = fullyQualifiedDataTypeName; + } + + public DataTypeName(string fullyQualifiedDataTypeName) + : this(fullyQualifiedDataTypeName, validated: false) { } + + internal static DataTypeName ValidatedName(string fullyQualifiedDataTypeName) + => new(fullyQualifiedDataTypeName, validated: true); + + // Includes schema unless it's pg_catalog. + public string DisplayName => + Value.StartsWith("pg_catalog", StringComparison.Ordinal) + ? UnqualifiedDisplayName + : Schema + "." + UnqualifiedDisplayName; + + public string UnqualifiedDisplayName => ToDisplayName(UnqualifiedNameSpan); + + public string Schema => Value.Substring(0, _value.IndexOf('.')); + internal ReadOnlySpan UnqualifiedNameSpan => Value.AsSpan().Slice(_value.IndexOf('.') + 1); + public string UnqualifiedName => Value.Substring(_value.IndexOf('.') + 1); + public string Value => _value is null ? ThrowDefaultException() : _value; + + static string ThrowDefaultException() => + throw new InvalidOperationException($"This operation cannot be performed on a default instance of {nameof(DataTypeName)}."); + + public static implicit operator string(DataTypeName value) => value.Value; + + public bool IsDefault => _value is null; + + public bool IsArray => UnqualifiedNameSpan.StartsWith("_".AsSpan(), StringComparison.Ordinal); + + internal static DataTypeName CreateFullyQualifiedName(string dataTypeName) + => dataTypeName.IndexOf('.') != -1 ? new(dataTypeName) : new("pg_catalog." + dataTypeName); + + // Static transform as defined by https://www.postgresql.org/docs/current/sql-createtype.html#SQL-CREATETYPE-ARRAY + // We don't have to deal with [] as we're always starting from a normalized fully qualified name. + public DataTypeName ToArrayName() + { + var unqualifiedNameSpan = UnqualifiedNameSpan; + if (unqualifiedNameSpan.StartsWith("_".AsSpan(), StringComparison.Ordinal)) + return this; + + var unqualifiedName = unqualifiedNameSpan.ToString(); + if (unqualifiedName.Length + "_".Length > NAMEDATALEN) + unqualifiedName = unqualifiedName.Substring(0, NAMEDATALEN - "_".Length); + + return new(Schema + "._" + unqualifiedName); + } + + // Static transform as defined by https://www.postgresql.org/docs/current/sql-createtype.html#SQL-CREATETYPE-RANGE + // Manual testing on PG confirmed it's only the first occurence of 'range' that gets replaced. + public DataTypeName ToDefaultMultirangeName() + { + var unqualifiedNameSpan = UnqualifiedNameSpan; + if (UnqualifiedNameSpan.IndexOf("multirange".AsSpan(), StringComparison.Ordinal) != -1) + return this; + + var unqualifiedName = unqualifiedNameSpan.ToString(); + var rangeIndex = unqualifiedName.IndexOf("range", StringComparison.Ordinal); + if (rangeIndex != -1) + { + var str = unqualifiedName.Substring(0, rangeIndex) + "multirange" + unqualifiedName.Substring(rangeIndex + "range".Length); + + return new($"{Schema}." + (unqualifiedName.Length + "multi".Length > NAMEDATALEN + ? str.Substring(0, NAMEDATALEN - "multi".Length) + : str)); + } + + return new($"{Schema}." + (unqualifiedName.Length + "multi".Length > NAMEDATALEN + ? unqualifiedName.Substring(0, NAMEDATALEN - "_multirange".Length) + "_multirange" + : unqualifiedName + "_multirange")); + } + + // Create a DataTypeName from a broader range of valid names. + // including SQL aliases like 'timestamp without time zone', trailing facet info etc. + public static DataTypeName FromDisplayName(string displayName, string? schema = null) + { + var displayNameSpan = displayName.AsSpan().Trim(); + + // If we have a schema we're done, Postgres doesn't do display name conversions on fully qualified names. + // There is one exception and that's array syntax, which is always resolvable in both ways, while we want the canonical name. + var schemaEndIndex = displayNameSpan.IndexOf('.'); + if (schemaEndIndex is not -1 && + !displayNameSpan.Slice(schemaEndIndex).StartsWith("_".AsSpan(), StringComparison.Ordinal) && + !displayNameSpan.EndsWith("[]".AsSpan(), StringComparison.Ordinal)) + return new(displayName); + + // First we strip the schema to get the type name. + if (schemaEndIndex is not -1) + { + schema = displayNameSpan.Slice(0, schemaEndIndex).ToString(); + displayNameSpan = displayNameSpan.Slice(schemaEndIndex + 1); + } + + // Then we strip either of the two valid array representations to get the base type name (with or without facets). + var isArray = false; + if (displayNameSpan.StartsWith("_".AsSpan())) + { + isArray = true; + displayNameSpan = displayNameSpan.Slice(1); + } + else if (displayNameSpan.EndsWith("[]".AsSpan())) + { + isArray = true; + displayNameSpan = displayNameSpan.Slice(0, displayNameSpan.Length - 2); + } + + string mapped; + if (schemaEndIndex is -1) + { + // Finally we strip the facet info. + var parenIndex = displayNameSpan.IndexOf('('); + if (parenIndex > -1) + displayNameSpan = displayNameSpan.Slice(0, parenIndex); + + // Map any aliases to the internal type name. + mapped = displayNameSpan.ToString() switch + { + "boolean" => "bool", + "character" => "bpchar", + "decimal" => "numeric", + "real" => "float4", + "double precision" => "float8", + "smallint" => "int2", + "integer" => "int4", + "bigint" => "int8", + "time without time zone" => "time", + "timestamp without time zone" => "timestamp", + "time with time zone" => "timetz", + "timestamp with time zone" => "timestamptz", + "bit varying" => "varbit", + "character varying" => "varchar", + var value => value + }; + } + else + { + // If we had a schema originally we stop here, see comment at schemaEndIndex. + mapped = displayNameSpan.ToString(); + } + + return new((schema ?? "pg_catalog") + "." + (isArray ? "_" : "") + mapped); + } + + // The type names stored in a DataTypeName are usually the actual typname from the pg_type column. + // There are some canonical aliases defined in the SQL standard which we take into account. + // Additionally array types have a '_' prefix while for readability their element type should be postfixed with '[]'. + // See the table for all the aliases https://www.postgresql.org/docs/current/static/datatype.html#DATATYPE-TABLE + // Alternatively some of the source lives at https://github.com/postgres/postgres/blob/c8e1ba736b2b9e8c98d37a5b77c4ed31baf94147/src/backend/utils/adt/format_type.c#L186 + static string ToDisplayName(ReadOnlySpan unqualifiedName) + { + var isArray = unqualifiedName.IndexOf('_') == 0; + var baseTypeName = isArray ? unqualifiedName.Slice(1).ToString() : unqualifiedName.ToString(); + + var mappedBaseType = baseTypeName switch + { + "bool" => "boolean", + "bpchar" => "character", + "decimal" => "numeric", + "float4" => "real", + "float8" => "double precision", + "int2" => "smallint", + "int4" => "integer", + "int8" => "bigint", + "time" => "time without time zone", + "timestamp" => "timestamp without time zone", + "timetz" => "time with time zone", + "timestamptz" => "timestamp with time zone", + "varbit" => "bit varying", + "varchar" => "character varying", + _ => baseTypeName + }; + + if (isArray) + return mappedBaseType + "[]"; + + return mappedBaseType; + } + + internal static bool IsFullyQualified(ReadOnlySpan dataTypeName) => dataTypeName.Contains(".".AsSpan(), StringComparison.Ordinal); + + internal static string NormalizeName(string dataTypeName) + { + var fqName = FromDisplayName(dataTypeName); + return IsFullyQualified(dataTypeName.AsSpan()) ? fqName.Value : fqName.UnqualifiedName; + } + + public override string ToString() => Value; + public bool Equals(DataTypeName other) => !IsDefault && !other.IsDefault && _value == other._value; + public override bool Equals(object? obj) => obj is DataTypeName other && Equals(other); + public override int GetHashCode() => _value.GetHashCode(); + public static bool operator ==(DataTypeName left, DataTypeName right) => left.Equals(right); + public static bool operator !=(DataTypeName left, DataTypeName right) => !left.Equals(right); +} diff --git a/src/Npgsql/Internal/Postgres/DataTypeNames.cs b/src/Npgsql/Internal/Postgres/DataTypeNames.cs new file mode 100644 index 0000000000..275bcb9937 --- /dev/null +++ b/src/Npgsql/Internal/Postgres/DataTypeNames.cs @@ -0,0 +1,79 @@ +using static Npgsql.Internal.Postgres.DataTypeName; + +namespace Npgsql.Internal.Postgres; + +/// +/// Well-known PostgreSQL data type names. +/// +static class DataTypeNames +{ + // Note: The names are fully qualified in source so the strings are constants and instances will be interned after the first call. + // Uses an internal constructor bypassing the public DataTypeName constructor validation, as we don't want to store all these names on + // fields either. + public static DataTypeName Int2 => ValidatedName("pg_catalog.int2"); + public static DataTypeName Int4 => ValidatedName("pg_catalog.int4"); + public static DataTypeName Int4Range => ValidatedName("pg_catalog.int4range"); + public static DataTypeName Int4Multirange => ValidatedName("pg_catalog.int4multirange"); + public static DataTypeName Int8 => ValidatedName("pg_catalog.int8"); + public static DataTypeName Int8Range => ValidatedName("pg_catalog.int8range"); + public static DataTypeName Int8Multirange => ValidatedName("pg_catalog.int8multirange"); + public static DataTypeName Float4 => ValidatedName("pg_catalog.float4"); + public static DataTypeName Float8 => ValidatedName("pg_catalog.float8"); + public static DataTypeName Numeric => ValidatedName("pg_catalog.numeric"); + public static DataTypeName NumRange => ValidatedName("pg_catalog.numrange"); + public static DataTypeName NumMultirange => ValidatedName("pg_catalog.nummultirange"); + public static DataTypeName Money => ValidatedName("pg_catalog.money"); + public static DataTypeName Bool => ValidatedName("pg_catalog.bool"); + public static DataTypeName Box => ValidatedName("pg_catalog.box"); + public static DataTypeName Circle => ValidatedName("pg_catalog.circle"); + public static DataTypeName Line => ValidatedName("pg_catalog.line"); + public static DataTypeName LSeg => ValidatedName("pg_catalog.lseg"); + public static DataTypeName Path => ValidatedName("pg_catalog.path"); + public static DataTypeName Point => ValidatedName("pg_catalog.point"); + public static DataTypeName Polygon => ValidatedName("pg_catalog.polygon"); + public static DataTypeName Bpchar => ValidatedName("pg_catalog.bpchar"); + public static DataTypeName Text => ValidatedName("pg_catalog.text"); + public static DataTypeName Varchar => ValidatedName("pg_catalog.varchar"); + public static DataTypeName Char => ValidatedName("pg_catalog.char"); + public static DataTypeName Name => ValidatedName("pg_catalog.name"); + public static DataTypeName Bytea => ValidatedName("pg_catalog.bytea"); + public static DataTypeName Date => ValidatedName("pg_catalog.date"); + public static DataTypeName DateRange => ValidatedName("pg_catalog.daterange"); + public static DataTypeName DateMultirange => ValidatedName("pg_catalog.datemultirange"); + public static DataTypeName Time => ValidatedName("pg_catalog.time"); + public static DataTypeName Timestamp => ValidatedName("pg_catalog.timestamp"); + public static DataTypeName TsRange => ValidatedName("pg_catalog.tsrange"); + public static DataTypeName TsMultirange => ValidatedName("pg_catalog.tsmultirange"); + public static DataTypeName TimestampTz => ValidatedName("pg_catalog.timestamptz"); + public static DataTypeName TsTzRange => ValidatedName("pg_catalog.tstzrange"); + public static DataTypeName TsTzMultirange => ValidatedName("pg_catalog.tstzmultirange"); + public static DataTypeName Interval => ValidatedName("pg_catalog.interval"); + public static DataTypeName TimeTz => ValidatedName("pg_catalog.timetz"); + public static DataTypeName Inet => ValidatedName("pg_catalog.inet"); + public static DataTypeName Cidr => ValidatedName("pg_catalog.cidr"); + public static DataTypeName MacAddr => ValidatedName("pg_catalog.macaddr"); + public static DataTypeName MacAddr8 => ValidatedName("pg_catalog.macaddr8"); + public static DataTypeName Bit => ValidatedName("pg_catalog.bit"); + public static DataTypeName Varbit => ValidatedName("pg_catalog.varbit"); + public static DataTypeName TsVector => ValidatedName("pg_catalog.tsvector"); + public static DataTypeName TsQuery => ValidatedName("pg_catalog.tsquery"); + public static DataTypeName RegConfig => ValidatedName("pg_catalog.regconfig"); + public static DataTypeName Uuid => ValidatedName("pg_catalog.uuid"); + public static DataTypeName Xml => ValidatedName("pg_catalog.xml"); + public static DataTypeName Json => ValidatedName("pg_catalog.json"); + public static DataTypeName Jsonb => ValidatedName("pg_catalog.jsonb"); + public static DataTypeName Jsonpath => ValidatedName("pg_catalog.jsonpath"); + public static DataTypeName Record => ValidatedName("pg_catalog.record"); + public static DataTypeName RefCursor => ValidatedName("pg_catalog.refcursor"); + public static DataTypeName OidVector => ValidatedName("pg_catalog.oidvector"); + public static DataTypeName Int2Vector => ValidatedName("pg_catalog.int2vector"); + public static DataTypeName Oid => ValidatedName("pg_catalog.oid"); + public static DataTypeName Xid => ValidatedName("pg_catalog.xid"); + public static DataTypeName Xid8 => ValidatedName("pg_catalog.xid8"); + public static DataTypeName Cid => ValidatedName("pg_catalog.cid"); + public static DataTypeName RegType => ValidatedName("pg_catalog.regtype"); + public static DataTypeName Tid => ValidatedName("pg_catalog.tid"); + public static DataTypeName PgLsn => ValidatedName("pg_catalog.pg_lsn"); + public static DataTypeName Unknown => ValidatedName("pg_catalog.unknown"); + public static DataTypeName Void => ValidatedName("pg_catalog.void"); +} diff --git a/src/Npgsql/Internal/Postgres/Field.cs b/src/Npgsql/Internal/Postgres/Field.cs new file mode 100644 index 0000000000..f6a261c103 --- /dev/null +++ b/src/Npgsql/Internal/Postgres/Field.cs @@ -0,0 +1,16 @@ +namespace Npgsql.Internal.Postgres; + +/// Base field type shared between tables and composites. +public readonly struct Field +{ + public Field(string name, PgTypeId pgTypeId, int typeModifier) + { + Name = name; + PgTypeId = pgTypeId; + TypeModifier = typeModifier; + } + + public string Name { get; init; } + public PgTypeId PgTypeId { get; init; } + public int TypeModifier { get; init; } +} diff --git a/src/Npgsql/Internal/Postgres/Oid.cs b/src/Npgsql/Internal/Postgres/Oid.cs new file mode 100644 index 0000000000..ac9577609d --- /dev/null +++ b/src/Npgsql/Internal/Postgres/Oid.cs @@ -0,0 +1,19 @@ +using System; + +namespace Npgsql.Internal.Postgres; + +public readonly struct Oid: IEquatable +{ + public Oid(uint value) => Value = value; + + public static explicit operator uint(Oid oid) => oid.Value; + public static implicit operator Oid(uint oid) => new(oid); + public uint Value { get; init; } + + public override string ToString() => Value.ToString(); + public bool Equals(Oid other) => Value == other.Value; + public override bool Equals(object? obj) => obj is Oid other && Equals(other); + public override int GetHashCode() => (int)Value; + public static bool operator ==(Oid left, Oid right) => left.Equals(right); + public static bool operator !=(Oid left, Oid right) => !left.Equals(right); +} diff --git a/src/Npgsql/Internal/Postgres/PgTypeId.cs b/src/Npgsql/Internal/Postgres/PgTypeId.cs new file mode 100644 index 0000000000..e363969a47 --- /dev/null +++ b/src/Npgsql/Internal/Postgres/PgTypeId.cs @@ -0,0 +1,44 @@ +using System; +using System.Diagnostics.CodeAnalysis; + +namespace Npgsql.Internal.Postgres; + +/// +/// A discriminated union of and . +/// +public readonly struct PgTypeId: IEquatable +{ + readonly DataTypeName _dataTypeName; + readonly Oid _oid; + + public PgTypeId(DataTypeName name) => _dataTypeName = name; + public PgTypeId(Oid oid) => _oid = oid; + + [MemberNotNullWhen(true, nameof(_dataTypeName))] + public bool IsDataTypeName => !_dataTypeName.IsDefault; + public bool IsOid => _dataTypeName.IsDefault; + + public DataTypeName DataTypeName + => IsDataTypeName ? _dataTypeName : throw new InvalidOperationException("This value does not describe a DataTypeName."); + + public Oid Oid + => IsOid ? _oid : throw new InvalidOperationException("This value does not describe an Oid."); + + public static implicit operator PgTypeId(DataTypeName name) => new(name); + public static implicit operator PgTypeId(Oid id) => new(id); + + public override string ToString() => IsOid ? _oid.ToString() : _dataTypeName.Value; + + public bool Equals(PgTypeId other) + => (this, other) switch + { + ({ IsOid: true }, { IsOid: true }) => _oid == other._oid, + ({ IsDataTypeName: true }, { IsDataTypeName: true }) => _dataTypeName.Equals(other._dataTypeName), + _ => false + }; + + public override bool Equals(object? obj) => obj is PgTypeId other && Equals(other); + public override int GetHashCode() => IsOid ? _oid.GetHashCode() : _dataTypeName.GetHashCode(); + public static bool operator ==(PgTypeId left, PgTypeId right) => left.Equals(right); + public static bool operator !=(PgTypeId left, PgTypeId right) => !left.Equals(right); +} diff --git a/src/Npgsql/Internal/Resolvers/AdoTypeInfoResolver.cs b/src/Npgsql/Internal/Resolvers/AdoTypeInfoResolver.cs new file mode 100644 index 0000000000..0f2c077aad --- /dev/null +++ b/src/Npgsql/Internal/Resolvers/AdoTypeInfoResolver.cs @@ -0,0 +1,491 @@ +using System; +using System.Collections; +using System.Collections.Generic; +using System.Collections.Specialized; +using System.Diagnostics; +using System.IO; +using Npgsql.Internal.Converters; +using Npgsql.Internal.Converters.Internal; +using Npgsql.Internal.Postgres; +using Npgsql.PostgresTypes; +using Npgsql.Util; +using NpgsqlTypes; + +namespace Npgsql.Internal.Resolvers; + +// Baseline types that are always supported. +class AdoTypeInfoResolver : IPgTypeInfoResolver +{ + public AdoTypeInfoResolver() + { + Mappings = new TypeInfoMappingCollection(); + AddInfos(Mappings); + } + + public static AdoTypeInfoResolver Instance { get; } = new(); + + protected TypeInfoMappingCollection Mappings { get; } + + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + { + var info = Mappings.Find(type, dataTypeName, options); + if (info is null && dataTypeName is not null) + info = GetEnumTypeInfo(type, dataTypeName.GetValueOrDefault(), options); + return info; + } + + protected static PgTypeInfo? GetEnumTypeInfo(Type? type, DataTypeName dataTypeName, PgSerializerOptions options) + { + if (type is not null && type != typeof(string)) + return null; + + if (options.DatabaseInfo.GetPostgresType(dataTypeName) is not PostgresEnumType) + return null; + + return new PgTypeInfo(options, new StringTextConverter(options.TextEncoding), dataTypeName); + } + + static void AddInfos(TypeInfoMappingCollection mappings) + { + // Bool + mappings.AddStructType(DataTypeNames.Bool, + static (options, mapping, _) => mapping.CreateInfo(options, new BoolConverter()), isDefault: true); + + // Numeric + mappings.AddStructType(DataTypeNames.Int2, + static (options, mapping, _) => mapping.CreateInfo(options, new Int2Converter()), isDefault: true); + mappings.AddStructType(DataTypeNames.Int4, + static (options, mapping, _) => mapping.CreateInfo(options, new Int4Converter()), isDefault: true); + mappings.AddStructType(DataTypeNames.Int8, + static (options, mapping, _) => mapping.CreateInfo(options, new Int8Converter()), isDefault: true); + mappings.AddStructType(DataTypeNames.Float4, + static (options, mapping, _) => mapping.CreateInfo(options, new RealConverter()), isDefault: true); + mappings.AddStructType(DataTypeNames.Float8, + static (options, mapping, _) => mapping.CreateInfo(options, new DoubleConverter()), isDefault: true); + mappings.AddStructType(DataTypeNames.Numeric, + static (options, mapping, _) => mapping.CreateInfo(options, new DecimalNumericConverter()), isDefault: true); + mappings.AddStructType(DataTypeNames.Money, + static (options, mapping, _) => mapping.CreateInfo(options, new MoneyConverter()), MatchRequirement.DataTypeName); + + // Text + mappings.AddType(DataTypeNames.Text, + static (options, mapping, _) => mapping.CreateInfo(options, new StringTextConverter(options.TextEncoding), preferredFormat: DataFormat.Text), isDefault: true); + mappings.AddStructType(DataTypeNames.Text, + static (options, mapping, _) => mapping.CreateInfo(options, new CharTextConverter(options.TextEncoding), preferredFormat: DataFormat.Text)); + // Uses the bytea converters, as neither type has a header. + mappings.AddType(DataTypeNames.Text, + static (options, mapping, _) => mapping.CreateInfo(options, new ArrayByteaConverter()), + MatchRequirement.DataTypeName); + mappings.AddStructType>(DataTypeNames.Text, + static (options, mapping, _) => mapping.CreateInfo(options, new ReadOnlyMemoryByteaConverter()), + MatchRequirement.DataTypeName); + //Special mappings, these have no corresponding array mapping. + mappings.AddType(DataTypeNames.Text, + static (options, mapping, _) => mapping.CreateInfo(options, new TextReaderTextConverter(options.TextEncoding), supportsWriting: false, preferredFormat: DataFormat.Text), + MatchRequirement.DataTypeName); + mappings.AddStructType(DataTypeNames.Text, + static (options, mapping, _) => mapping.CreateInfo(options, new GetCharsTextConverter(options.TextEncoding), supportsWriting: false, preferredFormat: DataFormat.Text), + MatchRequirement.DataTypeName); + + // Alternative text types + foreach(var dataTypeName in new[] { "citext", DataTypeNames.Varchar, + DataTypeNames.Bpchar, DataTypeNames.Json, + DataTypeNames.Xml, DataTypeNames.Name, DataTypeNames.RefCursor }) + { + mappings.AddType(dataTypeName, + static (options, mapping, _) => mapping.CreateInfo(options, new StringTextConverter(options.TextEncoding), preferredFormat: DataFormat.Text), + MatchRequirement.DataTypeName); + mappings.AddStructType(dataTypeName, + static (options, mapping, _) => mapping.CreateInfo(options, new CharTextConverter(options.TextEncoding), preferredFormat: DataFormat.Text), + MatchRequirement.DataTypeName); + // Uses the bytea converters, as neither type has a header. + mappings.AddType(dataTypeName, + static (options, mapping, _) => mapping.CreateInfo(options, new ArrayByteaConverter()), + MatchRequirement.DataTypeName); + mappings.AddStructType>(dataTypeName, + static (options, mapping, _) => mapping.CreateInfo(options, new ReadOnlyMemoryByteaConverter()), + MatchRequirement.DataTypeName); + //Special mappings, these have no corresponding array mapping. + mappings.AddType(dataTypeName, + static (options, mapping, _) => mapping.CreateInfo(options, new TextReaderTextConverter(options.TextEncoding), supportsWriting: false, preferredFormat: DataFormat.Text), + MatchRequirement.DataTypeName); + mappings.AddStructType(dataTypeName, + static (options, mapping, _) => mapping.CreateInfo(options, new GetCharsTextConverter(options.TextEncoding), supportsWriting: false, preferredFormat: DataFormat.Text), + MatchRequirement.DataTypeName); + } + + // Jsonb + const byte jsonbVersion = 1; + mappings.AddType(DataTypeNames.Jsonb, + static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter(jsonbVersion, new StringTextConverter(options.TextEncoding))), isDefault: true); + mappings.AddStructType(DataTypeNames.Jsonb, + static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter(jsonbVersion, new CharTextConverter(options.TextEncoding)))); + mappings.AddType(DataTypeNames.Jsonb, + static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter(jsonbVersion, new ArrayByteaConverter())), + MatchRequirement.DataTypeName); + mappings.AddStructType>(DataTypeNames.Jsonb, + static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter>(jsonbVersion, new ReadOnlyMemoryByteaConverter())), + MatchRequirement.DataTypeName); + //Special mappings, these have no corresponding array mapping. + mappings.AddType(DataTypeNames.Jsonb, + static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter(jsonbVersion, new TextReaderTextConverter(options.TextEncoding)), supportsWriting: false, preferredFormat: DataFormat.Text), + MatchRequirement.DataTypeName); + mappings.AddStructType(DataTypeNames.Jsonb, + static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter(jsonbVersion, new GetCharsTextConverter(options.TextEncoding)), supportsWriting: false, preferredFormat: DataFormat.Text), + MatchRequirement.DataTypeName); + + // Jsonpath + const byte jsonpathVersion = 1; + mappings.AddType(DataTypeNames.Jsonpath, + static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter(jsonpathVersion, new StringTextConverter(options.TextEncoding))), isDefault: true); + //Special mappings, these have no corresponding array mapping. + mappings.AddType(DataTypeNames.Jsonpath, + static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter(jsonpathVersion, new TextReaderTextConverter(options.TextEncoding)), supportsWriting: false, preferredFormat: DataFormat.Text), + MatchRequirement.DataTypeName); + mappings.AddStructType(DataTypeNames.Jsonpath, + static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter(jsonpathVersion, new GetCharsTextConverter(options.TextEncoding)), supportsWriting: false, preferredFormat: DataFormat.Text), + MatchRequirement.DataTypeName); + + // Bytea + mappings.AddType(DataTypeNames.Bytea, + static (options, mapping, _) => mapping.CreateInfo(options, new ArrayByteaConverter()), isDefault: true); + mappings.AddStructType>(DataTypeNames.Bytea, + static (options, mapping, _) => mapping.CreateInfo(options, new ReadOnlyMemoryByteaConverter())); + mappings.AddType(DataTypeNames.Bytea, + static (options, mapping, _) => new PgTypeInfo(options, new StreamByteaConverter(), new DataTypeName(mapping.DataTypeName), unboxedType: mapping.Type), + mapping => mapping with { TypeMatchPredicate = type => typeof(Stream).IsAssignableFrom(type) }); + + // Varbit + mappings.AddType(DataTypeNames.Varbit, + static (options, mapping, _) => mapping.CreateInfo(options, + new PolymorphicBitStringConverterResolver(options.GetCanonicalTypeId(DataTypeNames.Varbit)), supportsWriting: false)); + mappings.AddType(DataTypeNames.Varbit, + static (options, mapping, _) => mapping.CreateInfo(options, new BitArrayBitStringConverter()), isDefault: true); + mappings.AddStructType(DataTypeNames.Varbit, + static (options, mapping, _) => mapping.CreateInfo(options, new BoolBitStringConverter())); + mappings.AddStructType(DataTypeNames.Varbit, + static (options, mapping, _) => mapping.CreateInfo(options, new BitVector32BitStringConverter())); + + // Bit + mappings.AddType(DataTypeNames.Bit, + static (options, mapping, _) => mapping.CreateInfo(options, + new PolymorphicBitStringConverterResolver(options.GetCanonicalTypeId(DataTypeNames.Bit)), supportsWriting: false)); + mappings.AddType(DataTypeNames.Bit, + static (options, mapping, _) => mapping.CreateInfo(options, new BitArrayBitStringConverter()), isDefault: true); + mappings.AddStructType(DataTypeNames.Bit, + static (options, mapping, _) => mapping.CreateInfo(options, new BoolBitStringConverter())); + mappings.AddStructType(DataTypeNames.Bit, + static (options, mapping, _) => mapping.CreateInfo(options, new BitVector32BitStringConverter())); + + // Timestamp + if (Statics.LegacyTimestampBehavior) + { + mappings.AddStructType(DataTypeNames.Timestamp, + static (options, mapping, _) => mapping.CreateInfo(options, + new LegacyDateTimeConverter(options.EnableDateTimeInfinityConversions, timestamp: true)), isDefault: true); + } + else + { + mappings.AddResolverStructType(DataTypeNames.Timestamp, + static (options, mapping, dataTypeNameMatch) => mapping.CreateInfo(options, + DateTimeConverterResolver.CreateResolver(options, options.GetCanonicalTypeId(DataTypeNames.TimestampTz), options.GetCanonicalTypeId(DataTypeNames.Timestamp), + options.EnableDateTimeInfinityConversions), dataTypeNameMatch), isDefault: true); + } + mappings.AddStructType(DataTypeNames.Timestamp, + static (options, mapping, _) => mapping.CreateInfo(options, new Int8Converter())); + + // TimestampTz + if (Statics.LegacyTimestampBehavior) + { + mappings.AddStructType(DataTypeNames.TimestampTz, + static (options, mapping, _) => mapping.CreateInfo(options, + new LegacyDateTimeConverter(options.EnableDateTimeInfinityConversions, timestamp: false)), matchRequirement: MatchRequirement.DataTypeName); + mappings.AddStructType(DataTypeNames.TimestampTz, + static (options, mapping, _) => mapping.CreateInfo(options, new LegacyDateTimeOffsetConverter(options.EnableDateTimeInfinityConversions))); + } + else + { + mappings.AddResolverStructType(DataTypeNames.TimestampTz, + static (options, mapping, dataTypeNameMatch) => mapping.CreateInfo(options, + DateTimeConverterResolver.CreateResolver(options, options.GetCanonicalTypeId(DataTypeNames.TimestampTz), options.GetCanonicalTypeId(DataTypeNames.Timestamp), + options.EnableDateTimeInfinityConversions), dataTypeNameMatch), isDefault: true); + mappings.AddStructType(DataTypeNames.TimestampTz, + static (options, mapping, _) => mapping.CreateInfo(options, new DateTimeOffsetConverter(options.EnableDateTimeInfinityConversions))); + } + mappings.AddStructType(DataTypeNames.TimestampTz, + static (options, mapping, _) => mapping.CreateInfo(options, new Int8Converter())); + + // Date + mappings.AddStructType(DataTypeNames.Date, + static (options, mapping, _) => + mapping.CreateInfo(options, new DateTimeDateConverter(options.EnableDateTimeInfinityConversions)), + MatchRequirement.DataTypeName); + mappings.AddStructType(DataTypeNames.Date, + static (options, mapping, _) => mapping.CreateInfo(options, new Int4Converter())); +#if NET6_0_OR_GREATER + mappings.AddStructType(DataTypeNames.Date, + static (options, mapping, _) => mapping.CreateInfo(options, new DateOnlyDateConverter(options.EnableDateTimeInfinityConversions))); +#endif + + // Time + mappings.AddStructType(DataTypeNames.Time, + static (options, mapping, _) => mapping.CreateInfo(options, new TimeSpanTimeConverter()), isDefault: true); + mappings.AddStructType(DataTypeNames.Time, + static (options, mapping, _) => mapping.CreateInfo(options, new Int8Converter())); +#if NET6_0_OR_GREATER + mappings.AddStructType(DataTypeNames.Time, + static (options, mapping, _) => mapping.CreateInfo(options, new TimeOnlyTimeConverter())); +#endif + + // TimeTz + mappings.AddStructType(DataTypeNames.TimeTz, + static (options, mapping, _) => mapping.CreateInfo(options, new DateTimeOffsetTimeTzConverter()), + MatchRequirement.DataTypeName); + + // Interval + mappings.AddStructType(DataTypeNames.Interval, + static (options, mapping, _) => mapping.CreateInfo(options, new TimeSpanIntervalConverter()), + MatchRequirement.DataTypeName); + mappings.AddStructType(DataTypeNames.Interval, + static (options, mapping, _) => mapping.CreateInfo(options, new NpgsqlIntervalConverter())); + + // Uuid + mappings.AddStructType(DataTypeNames.Uuid, + static (options, mapping, _) => mapping.CreateInfo(options, new GuidUuidConverter()), isDefault: true); + + // Hstore + mappings.AddType>("hstore", + static (options, mapping, _) => mapping.CreateInfo(options, new HstoreConverter>(options.TextEncoding)), isDefault: true); + mappings.AddType>("hstore", + static (options, mapping, _) => mapping.CreateInfo(options, new HstoreConverter>(options.TextEncoding))); + + // Unknown + mappings.AddType(DataTypeNames.Unknown, + static (options, mapping, _) => mapping.CreateInfo(options, new StringTextConverter(options.TextEncoding), preferredFormat: DataFormat.Text), + MatchRequirement.DataTypeName); + + // Void + mappings.AddType(DataTypeNames.Void, + static (options, mapping, _) => mapping.CreateInfo(options, new VoidConverter(), supportsWriting: false), + MatchRequirement.DataTypeName); + + // UInt internal types + foreach (var dataTypeName in new[] { DataTypeNames.Oid, DataTypeNames.Xid, DataTypeNames.Cid, DataTypeNames.RegType, DataTypeNames.RegConfig }) + { + mappings.AddStructType(dataTypeName, + static (options, mapping, _) => mapping.CreateInfo(options, new UInt32Converter()), + MatchRequirement.DataTypeName); + } + + // Char + mappings.AddStructType(DataTypeNames.Char, + static (options, mapping, _) => mapping.CreateInfo(options, new InternalCharConverter()), + MatchRequirement.DataTypeName); + mappings.AddStructType(DataTypeNames.Char, + static (options, mapping, _) => mapping.CreateInfo(options, new InternalCharConverter())); + + // Xid8 + mappings.AddStructType(DataTypeNames.Xid8, + static (options, mapping, _) => mapping.CreateInfo(options, new UInt64Converter()), + MatchRequirement.DataTypeName); + + // Oidvector + mappings.AddType( + DataTypeNames.OidVector, + static (options, mapping, _) => mapping.CreateInfo(options, + new ArrayBasedArrayConverter(new(new UInt32Converter(), new PgTypeId(DataTypeNames.Oid)), pgLowerBound: 0)), + MatchRequirement.DataTypeName); + + // Int2vector + mappings.AddType( + DataTypeNames.Int2Vector, + static (options, mapping, _) => mapping.CreateInfo(options, + new ArrayBasedArrayConverter(new(new Int2Converter(), new PgTypeId(DataTypeNames.Int2)), pgLowerBound: 0)), + MatchRequirement.DataTypeName); + + // Tid + mappings.AddStructType(DataTypeNames.Tid, + static (options, mapping, _) => mapping.CreateInfo(options, new TidConverter()), + MatchRequirement.DataTypeName); + + // PgLsn + mappings.AddStructType(DataTypeNames.PgLsn, + static (options, mapping, _) => mapping.CreateInfo(options, new PgLsnConverter()), + MatchRequirement.DataTypeName); + mappings.AddStructType(DataTypeNames.PgLsn, + static (options, mapping, _) => mapping.CreateInfo(options, new UInt64Converter())); + } + + protected static void AddArrayInfos(TypeInfoMappingCollection mappings) + { + // Bool + mappings.AddStructArrayType(DataTypeNames.Bool); + + // Numeric + mappings.AddStructArrayType(DataTypeNames.Int2); + mappings.AddStructArrayType(DataTypeNames.Int4); + mappings.AddStructArrayType(DataTypeNames.Int8); + mappings.AddStructArrayType(DataTypeNames.Float4); + mappings.AddStructArrayType(DataTypeNames.Float8); + mappings.AddStructArrayType(DataTypeNames.Numeric); + mappings.AddStructArrayType(DataTypeNames.Money); + + // Text + mappings.AddArrayType(DataTypeNames.Text); + mappings.AddStructArrayType(DataTypeNames.Text); + mappings.AddArrayType(DataTypeNames.Text); + mappings.AddStructArrayType>(DataTypeNames.Text); + + // Alternative text types + foreach(var dataTypeName in new[] { "citext", DataTypeNames.Varchar, + DataTypeNames.Bpchar, DataTypeNames.Json, + DataTypeNames.Xml, DataTypeNames.Name, DataTypeNames.RefCursor }) + { + mappings.AddArrayType(dataTypeName); + mappings.AddStructArrayType(dataTypeName); + mappings.AddArrayType(dataTypeName); + mappings.AddStructArrayType>(dataTypeName); + } + + // Jsonb + mappings.AddArrayType(DataTypeNames.Jsonb); + mappings.AddStructArrayType(DataTypeNames.Jsonb); + mappings.AddArrayType(DataTypeNames.Jsonb); + mappings.AddStructArrayType>(DataTypeNames.Jsonb); + + // Jsonpath + mappings.AddArrayType(DataTypeNames.Jsonpath); + + // Bytea + mappings.AddArrayType(DataTypeNames.Bytea); + mappings.AddStructArrayType>(DataTypeNames.Bytea); + + // Varbit + // Object mapping first. + mappings.AddPolymorphicResolverArrayType(DataTypeNames.Varbit, static options => resolution => resolution.Converter switch + { + BoolBitStringConverter => PgConverterFactory.CreatePolymorphicArrayConverter( + () => new ArrayBasedArrayConverter(resolution, typeof(Array)), + () => new ArrayBasedArrayConverter(new(new NullableConverter(resolution.GetConverter()), resolution.PgTypeId), typeof(Array)), + options), + BitArrayBitStringConverter => new ArrayBasedArrayConverter(resolution, typeof(Array)), + _ => throw new NotSupportedException() + }); + mappings.AddArrayType(DataTypeNames.Varbit); + mappings.AddStructArrayType(DataTypeNames.Varbit); + mappings.AddStructArrayType(DataTypeNames.Varbit); + + // Bit + // Object mapping first. + mappings.AddPolymorphicResolverArrayType(DataTypeNames.Bit, static options => resolution => resolution.Converter switch + { + BoolBitStringConverter => PgConverterFactory.CreatePolymorphicArrayConverter( + () => new ArrayBasedArrayConverter(resolution, typeof(Array)), + () => new ArrayBasedArrayConverter(new(new NullableConverter(resolution.GetConverter()), resolution.PgTypeId), typeof(Array)), + options), + BitArrayBitStringConverter => new ArrayBasedArrayConverter(resolution, typeof(Array)), + _ => throw new NotSupportedException() + }); + mappings.AddArrayType(DataTypeNames.Bit); + mappings.AddStructArrayType(DataTypeNames.Bit); + mappings.AddStructArrayType(DataTypeNames.Bit); + + // Timestamp + if (Statics.LegacyTimestampBehavior) + mappings.AddStructArrayType(DataTypeNames.Timestamp); + else + mappings.AddResolverStructArrayType(DataTypeNames.Timestamp); + mappings.AddStructArrayType(DataTypeNames.Timestamp); + + // TimestampTz + if (Statics.LegacyTimestampBehavior) + mappings.AddStructArrayType(DataTypeNames.TimestampTz); + else + mappings.AddResolverStructArrayType(DataTypeNames.TimestampTz); + mappings.AddStructArrayType(DataTypeNames.TimestampTz); + mappings.AddStructArrayType(DataTypeNames.TimestampTz); + + // Date + mappings.AddStructArrayType(DataTypeNames.Date); + mappings.AddStructArrayType(DataTypeNames.Date); +#if NET6_0_OR_GREATER + mappings.AddStructArrayType(DataTypeNames.Date); +#endif + + // Time + mappings.AddStructArrayType(DataTypeNames.Time); + mappings.AddStructArrayType(DataTypeNames.Time); +#if NET6_0_OR_GREATER + mappings.AddStructArrayType(DataTypeNames.Time); +#endif + + // TimeTz + mappings.AddStructArrayType(DataTypeNames.TimeTz); + + // Interval + mappings.AddStructArrayType(DataTypeNames.Interval); + mappings.AddStructArrayType(DataTypeNames.Interval); + + // Uuid + mappings.AddStructArrayType(DataTypeNames.Uuid); + + // Hstore + mappings.AddArrayType>("hstore"); + mappings.AddArrayType>("hstore"); + + // UInt internal types + foreach (var dataTypeName in new[] { DataTypeNames.Oid, DataTypeNames.Xid, DataTypeNames.Cid, DataTypeNames.RegType, (string)DataTypeNames.RegConfig }) + { + mappings.AddStructArrayType(dataTypeName); + } + + // Char + mappings.AddStructArrayType(DataTypeNames.Char); + mappings.AddStructArrayType(DataTypeNames.Char); + + // Xid8 + mappings.AddStructArrayType(DataTypeNames.Xid8); + + // Oidvector + mappings.AddArrayType(DataTypeNames.OidVector); + + // Int2vector + mappings.AddArrayType(DataTypeNames.Int2Vector); + } +} + +sealed class AdoArrayTypeInfoResolver : AdoTypeInfoResolver, IPgTypeInfoResolver +{ + new TypeInfoMappingCollection Mappings { get; } + + public AdoArrayTypeInfoResolver() + { + Mappings = new TypeInfoMappingCollection(base.Mappings); + var elementTypeCount = Mappings.Items.Count; + AddArrayInfos(Mappings); + // Make sure we have at least one mapping for each element type. + Debug.Assert(Mappings.Items.Count >= elementTypeCount * 2); + } + + public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + { + var info = Mappings.Find(type, dataTypeName, options); + if (info is null && dataTypeName is not null) + info = GetEnumArrayTypeInfo(type, dataTypeName.GetValueOrDefault(), options); + return info; + } + + static PgTypeInfo? GetEnumArrayTypeInfo(Type? type, DataTypeName dataTypeName, PgSerializerOptions options) + { + if (type is not null && type != typeof(object) && (!TypeInfoMappingCollection.IsArrayLikeType(type, out var elementType) || elementType != typeof(string))) + return null; + + if (options.DatabaseInfo.GetPostgresType(dataTypeName) is not PostgresArrayType { Element: PostgresEnumType enumType }) + return null; + + var mappings = new TypeInfoMappingCollection(); + mappings.AddType(enumType.DataTypeName, (options, mapping, _) => mapping.CreateInfo(options, new StringTextConverter(options.TextEncoding)), MatchRequirement.DataTypeName); + mappings.AddArrayType(enumType.DataTypeName); + return mappings.Find(type, dataTypeName, options); + } +} diff --git a/src/Npgsql/Internal/Resolvers/ExtraConversionsResolver.cs b/src/Npgsql/Internal/Resolvers/ExtraConversionsResolver.cs new file mode 100644 index 0000000000..5f642daf80 --- /dev/null +++ b/src/Npgsql/Internal/Resolvers/ExtraConversionsResolver.cs @@ -0,0 +1,235 @@ +using System; +using System.Collections.Immutable; +using System.Numerics; +using Npgsql.Internal.Converters; +using Npgsql.Internal.Postgres; + +namespace Npgsql.Internal.Resolvers; + +class ExtraConversionsResolver : IPgTypeInfoResolver +{ + public ExtraConversionsResolver() => AddInfos(Mappings); + + protected TypeInfoMappingCollection Mappings { get; } = new(); + + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); + + static void AddInfos(TypeInfoMappingCollection mappings) + { + // Int2 + mappings.AddStructType(DataTypeNames.Int2, + static (options, mapping, _) => mapping.CreateInfo(options, new Int2Converter())); + mappings.AddStructType(DataTypeNames.Int2, + static (options, mapping, _) => mapping.CreateInfo(options, new Int2Converter())); + mappings.AddStructType(DataTypeNames.Int2, + static (options, mapping, _) => mapping.CreateInfo(options, new Int2Converter())); + mappings.AddStructType(DataTypeNames.Int2, + static (options, mapping, _) => mapping.CreateInfo(options, new Int2Converter())); + mappings.AddStructType(DataTypeNames.Int2, + static (options, mapping, _) => mapping.CreateInfo(options, new Int2Converter())); + mappings.AddStructType(DataTypeNames.Int2, + static (options, mapping, _) => mapping.CreateInfo(options, new Int2Converter())); + mappings.AddStructType(DataTypeNames.Int2, + static (options, mapping, _) => mapping.CreateInfo(options, new Int2Converter())); + + // Int4 + mappings.AddStructType(DataTypeNames.Int4, + static (options, mapping, _) => mapping.CreateInfo(options, new Int4Converter())); + mappings.AddStructType(DataTypeNames.Int4, + static (options, mapping, _) => mapping.CreateInfo(options, new Int4Converter())); + mappings.AddStructType(DataTypeNames.Int4, + static (options, mapping, _) => mapping.CreateInfo(options, new Int4Converter())); + mappings.AddStructType(DataTypeNames.Int4, + static (options, mapping, _) => mapping.CreateInfo(options, new Int4Converter())); + mappings.AddStructType(DataTypeNames.Int4, + static (options, mapping, _) => mapping.CreateInfo(options, new Int4Converter())); + mappings.AddStructType(DataTypeNames.Int4, + static (options, mapping, _) => mapping.CreateInfo(options, new Int4Converter())); + mappings.AddStructType(DataTypeNames.Int4, + static (options, mapping, _) => mapping.CreateInfo(options, new Int4Converter())); + + // Int8 + mappings.AddStructType(DataTypeNames.Int8, + static (options, mapping, _) => mapping.CreateInfo(options, new Int8Converter())); + mappings.AddStructType(DataTypeNames.Int8, + static (options, mapping, _) => mapping.CreateInfo(options, new Int8Converter())); + mappings.AddStructType(DataTypeNames.Int8, + static (options, mapping, _) => mapping.CreateInfo(options, new Int8Converter())); + mappings.AddStructType(DataTypeNames.Int8, + static (options, mapping, _) => mapping.CreateInfo(options, new Int8Converter())); + mappings.AddStructType(DataTypeNames.Int8, + static (options, mapping, _) => mapping.CreateInfo(options, new Int8Converter())); + mappings.AddStructType(DataTypeNames.Int8, + static (options, mapping, _) => mapping.CreateInfo(options, new Int8Converter())); + mappings.AddStructType(DataTypeNames.Int8, + static (options, mapping, _) => mapping.CreateInfo(options, new Int8Converter())); + + // Float4 + mappings.AddStructType(DataTypeNames.Float4, + static (options, mapping, _) => mapping.CreateInfo(options, new RealConverter())); + + // Float8 + mappings.AddStructType(DataTypeNames.Float8, + static (options, mapping, _) => mapping.CreateInfo(options, new DoubleConverter())); + + // Numeric + mappings.AddStructType(DataTypeNames.Numeric, + static (options, mapping, _) => mapping.CreateInfo(options, new DecimalNumericConverter())); + mappings.AddStructType(DataTypeNames.Numeric, + static (options, mapping, _) => mapping.CreateInfo(options, new DecimalNumericConverter())); + mappings.AddStructType(DataTypeNames.Numeric, + static (options, mapping, _) => mapping.CreateInfo(options, new DecimalNumericConverter())); + mappings.AddStructType(DataTypeNames.Numeric, + static (options, mapping, _) => mapping.CreateInfo(options, new DecimalNumericConverter())); + mappings.AddStructType(DataTypeNames.Numeric, + static (options, mapping, _) => mapping.CreateInfo(options, new DecimalNumericConverter())); + mappings.AddStructType(DataTypeNames.Numeric, + static (options, mapping, _) => mapping.CreateInfo(options, new DecimalNumericConverter())); + mappings.AddStructType(DataTypeNames.Numeric, + static (options, mapping, _) => mapping.CreateInfo(options, new BigIntegerNumericConverter())); + + // Bytea + mappings.AddStructType>(DataTypeNames.Bytea, + static (options, mapping, _) => mapping.CreateInfo(options, new ArraySegmentByteaConverter())); + mappings.AddStructType>(DataTypeNames.Bytea, + static (options, mapping, _) => mapping.CreateInfo(options, new MemoryByteaConverter())); + + // Varbit + mappings.AddType(DataTypeNames.Varbit, + static (options, mapping, _) => mapping.CreateInfo(options, new StringBitStringConverter())); + + // Bit + mappings.AddType(DataTypeNames.Bit, + static (options, mapping, _) => mapping.CreateInfo(options, new StringBitStringConverter())); + + // Text + mappings.AddType(DataTypeNames.Text, + static (options, mapping, _) => mapping.CreateInfo(options, new CharArrayTextConverter(options.TextEncoding), preferredFormat: DataFormat.Text)); + mappings.AddStructType>(DataTypeNames.Text, + static (options, mapping, _) => mapping.CreateInfo(options, new ReadOnlyMemoryTextConverter(options.TextEncoding), preferredFormat: DataFormat.Text)); + mappings.AddStructType>(DataTypeNames.Text, + static (options, mapping, _) => mapping.CreateInfo(options, new CharArraySegmentTextConverter(options.TextEncoding), preferredFormat: DataFormat.Text)); + + // Alternative text types + foreach(var dataTypeName in new[] { "citext", DataTypeNames.Varchar, + DataTypeNames.Bpchar, DataTypeNames.Json, + DataTypeNames.Xml, DataTypeNames.Name, DataTypeNames.RefCursor }) + { + mappings.AddType(dataTypeName, + static (options, mapping, _) => mapping.CreateInfo(options, new CharArrayTextConverter(options.TextEncoding), + preferredFormat: DataFormat.Text)); + mappings.AddStructType>(dataTypeName, + static (options, mapping, _) => mapping.CreateInfo(options, new ReadOnlyMemoryTextConverter(options.TextEncoding), + preferredFormat: DataFormat.Text)); + mappings.AddStructType>(dataTypeName, + static (options, mapping, _) => mapping.CreateInfo(options, new CharArraySegmentTextConverter(options.TextEncoding), + preferredFormat: DataFormat.Text)); + } + + // Jsonb + const byte jsonbVersion = 1; + mappings.AddType(DataTypeNames.Jsonb, + static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter(jsonbVersion, new CharArrayTextConverter(options.TextEncoding)))); + mappings.AddStructType>(DataTypeNames.Jsonb, + static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter>(jsonbVersion, new ReadOnlyMemoryTextConverter(options.TextEncoding)))); + mappings.AddStructType>(DataTypeNames.Jsonb, + static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter>(jsonbVersion, new CharArraySegmentTextConverter(options.TextEncoding)))); + + // Hstore + mappings.AddType>("hstore", + static (options, mapping, _) => mapping.CreateInfo(options, new HstoreConverter>(options.TextEncoding, result => result.ToImmutableDictionary()))); + } + + protected static void AddArrayInfos(TypeInfoMappingCollection mappings) + { + // Int2 + mappings.AddStructArrayType(DataTypeNames.Int2); + mappings.AddStructArrayType(DataTypeNames.Int2); + mappings.AddStructArrayType(DataTypeNames.Int2); + mappings.AddStructArrayType(DataTypeNames.Int2); + mappings.AddStructArrayType(DataTypeNames.Int2); + mappings.AddStructArrayType(DataTypeNames.Int2); + mappings.AddStructArrayType(DataTypeNames.Int2); + + // Int4 + mappings.AddStructArrayType(DataTypeNames.Int4); + mappings.AddStructArrayType(DataTypeNames.Int4); + mappings.AddStructArrayType(DataTypeNames.Int4); + mappings.AddStructArrayType(DataTypeNames.Int4); + mappings.AddStructArrayType(DataTypeNames.Int4); + mappings.AddStructArrayType(DataTypeNames.Int4); + mappings.AddStructArrayType(DataTypeNames.Int4); + + // Int8 + mappings.AddStructArrayType(DataTypeNames.Int8); + mappings.AddStructArrayType(DataTypeNames.Int8); + mappings.AddStructArrayType(DataTypeNames.Int8); + mappings.AddStructArrayType(DataTypeNames.Int8); + mappings.AddStructArrayType(DataTypeNames.Int8); + mappings.AddStructArrayType(DataTypeNames.Int8); + mappings.AddStructArrayType(DataTypeNames.Int8); + + // Float4 + mappings.AddStructArrayType(DataTypeNames.Float4); + + // Float8 + mappings.AddStructArrayType(DataTypeNames.Float8); + + // Numeric + mappings.AddStructArrayType(DataTypeNames.Numeric); + mappings.AddStructArrayType(DataTypeNames.Numeric); + mappings.AddStructArrayType(DataTypeNames.Numeric); + mappings.AddStructArrayType(DataTypeNames.Numeric); + mappings.AddStructArrayType(DataTypeNames.Numeric); + mappings.AddStructArrayType(DataTypeNames.Numeric); + mappings.AddStructArrayType(DataTypeNames.Numeric); + + // Bytea + mappings.AddStructArrayType>(DataTypeNames.Bytea); + mappings.AddStructArrayType>(DataTypeNames.Bytea); + + // Varbit + mappings.AddArrayType(DataTypeNames.Varbit); + + // Bit + mappings.AddArrayType(DataTypeNames.Bit); + + // Text + mappings.AddArrayType(DataTypeNames.Text); + mappings.AddStructArrayType>(DataTypeNames.Text); + mappings.AddStructArrayType>(DataTypeNames.Text); + + // Alternative text types + foreach(var dataTypeName in new[] { "citext", DataTypeNames.Varchar, + DataTypeNames.Bpchar, DataTypeNames.Json, + DataTypeNames.Xml, DataTypeNames.Name, DataTypeNames.RefCursor }) + { + mappings.AddArrayType(dataTypeName); + mappings.AddStructArrayType>(dataTypeName); + mappings.AddStructArrayType>(dataTypeName); + } + + // Jsonb + mappings.AddArrayType(DataTypeNames.Jsonb); + mappings.AddStructArrayType>(DataTypeNames.Jsonb); + mappings.AddStructArrayType>(DataTypeNames.Jsonb); + + // Hstore + mappings.AddArrayType>("hstore"); + } +} + +sealed class ExtraConversionsArrayTypeInfoResolver : ExtraConversionsResolver, IPgTypeInfoResolver +{ + public ExtraConversionsArrayTypeInfoResolver() + { + Mappings = new TypeInfoMappingCollection(base.Mappings.Items); + AddArrayInfos(Mappings); + } + + new TypeInfoMappingCollection Mappings { get; } + + public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); +} diff --git a/src/Npgsql/Internal/Resolvers/FullTextSearchTypeInfoResolver.cs b/src/Npgsql/Internal/Resolvers/FullTextSearchTypeInfoResolver.cs new file mode 100644 index 0000000000..f3b3a90d79 --- /dev/null +++ b/src/Npgsql/Internal/Resolvers/FullTextSearchTypeInfoResolver.cs @@ -0,0 +1,81 @@ +using System; +using Npgsql.Internal.Converters; +using Npgsql.Internal.Postgres; +using Npgsql.Properties; +using NpgsqlTypes; + +namespace Npgsql.Internal.Resolvers; + +sealed class FullTextSearchTypeInfoResolver : IPgTypeInfoResolver +{ + TypeInfoMappingCollection Mappings { get; } + + public FullTextSearchTypeInfoResolver() + { + Mappings = new TypeInfoMappingCollection(); + AddInfos(Mappings); + // TODO: Opt-in only + AddArrayInfos(Mappings); + } + + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); + + static void AddInfos(TypeInfoMappingCollection mappings) + { + // tsvector + mappings.AddType(DataTypeNames.TsVector, + static (options, mapping, _) => mapping.CreateInfo(options, new TsVectorConverter(options.TextEncoding)), isDefault: true); + + // tsquery + mappings.AddType(DataTypeNames.TsQuery, + static (options, mapping, _) => mapping.CreateInfo(options, new TsQueryConverter(options.TextEncoding)), isDefault: true); + mappings.AddType(DataTypeNames.TsQuery, + static (options, mapping, _) => mapping.CreateInfo(options, new TsQueryConverter(options.TextEncoding))); + mappings.AddType(DataTypeNames.TsQuery, + static (options, mapping, _) => mapping.CreateInfo(options, new TsQueryConverter(options.TextEncoding))); + mappings.AddType(DataTypeNames.TsQuery, + static (options, mapping, _) => mapping.CreateInfo(options, new TsQueryConverter(options.TextEncoding))); + mappings.AddType(DataTypeNames.TsQuery, + static (options, mapping, _) => mapping.CreateInfo(options, new TsQueryConverter(options.TextEncoding))); + mappings.AddType(DataTypeNames.TsQuery, + static (options, mapping, _) => mapping.CreateInfo(options, new TsQueryConverter(options.TextEncoding))); + mappings.AddType(DataTypeNames.TsQuery, + static (options, mapping, _) => mapping.CreateInfo(options, new TsQueryConverter(options.TextEncoding))); + } + + static void AddArrayInfos(TypeInfoMappingCollection mappings) + { + // tsvector + mappings.AddArrayType(DataTypeNames.TsVector); + + // tsquery + mappings.AddArrayType(DataTypeNames.TsQuery); + mappings.AddArrayType(DataTypeNames.TsQuery); + mappings.AddArrayType(DataTypeNames.TsQuery); + mappings.AddArrayType(DataTypeNames.TsQuery); + mappings.AddArrayType(DataTypeNames.TsQuery); + mappings.AddArrayType(DataTypeNames.TsQuery); + mappings.AddArrayType(DataTypeNames.TsQuery); + } + + public static void CheckUnsupported(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + { + if (type != typeof(object) && (dataTypeName == DataTypeNames.TsQuery || dataTypeName == DataTypeNames.TsVector)) + throw new NotSupportedException( + string.Format(NpgsqlStrings.FullTextSearchNotEnabled, nameof(NpgsqlSlimDataSourceBuilder.EnableFullTextSearch), typeof(TBuilder).Name)); + + if (type is null) + return; + + if (TypeInfoMappingCollection.IsArrayLikeType(type, out var elementType)) + type = elementType; + + if (type is { IsConstructedGenericType: true } && type.GetGenericTypeDefinition() == typeof(Nullable<>)) + type = type.GetGenericArguments()[0]; + + if (type == typeof(NpgsqlTsVector) || typeof(NpgsqlTsQuery).IsAssignableFrom(type)) + throw new NotSupportedException( + string.Format(NpgsqlStrings.FullTextSearchNotEnabled, nameof(NpgsqlSlimDataSourceBuilder.EnableFullTextSearch), typeof(TBuilder).Name)); + } +} diff --git a/src/Npgsql/Internal/Resolvers/GeometricTypeInfoResolver.cs b/src/Npgsql/Internal/Resolvers/GeometricTypeInfoResolver.cs new file mode 100644 index 0000000000..6c24e1dcf9 --- /dev/null +++ b/src/Npgsql/Internal/Resolvers/GeometricTypeInfoResolver.cs @@ -0,0 +1,51 @@ +using System; +using Npgsql.Internal.Converters; +using Npgsql.Internal.Postgres; +using NpgsqlTypes; + +namespace Npgsql.Internal.Resolvers; + +sealed class GeometricTypeInfoResolver : IPgTypeInfoResolver +{ + TypeInfoMappingCollection Mappings { get; } + + public GeometricTypeInfoResolver() + { + Mappings = new TypeInfoMappingCollection(); + AddInfos(Mappings); + // TODO: Opt-in only + AddArrayInfos(Mappings); + } + + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); + + static void AddInfos(TypeInfoMappingCollection mappings) + { + mappings.AddStructType(DataTypeNames.Point, + static (options, mapping, _) => mapping.CreateInfo(options, new PointConverter()), isDefault: true); + mappings.AddStructType(DataTypeNames.Box, + static (options, mapping, _) => mapping.CreateInfo(options, new BoxConverter()), isDefault: true); + mappings.AddStructType(DataTypeNames.Polygon, + static (options, mapping, _) => mapping.CreateInfo(options, new PolygonConverter()), isDefault: true); + mappings.AddStructType(DataTypeNames.Line, + static (options, mapping, _) => mapping.CreateInfo(options, new LineConverter()), isDefault: true); + mappings.AddStructType(DataTypeNames.LSeg, + static (options, mapping, _) => mapping.CreateInfo(options, new LineSegmentConverter()), isDefault: true); + mappings.AddStructType(DataTypeNames.Path, + static (options, mapping, _) => mapping.CreateInfo(options, new PathConverter()), isDefault: true); + mappings.AddStructType(DataTypeNames.Circle, + static (options, mapping, _) => mapping.CreateInfo(options, new CircleConverter()), isDefault: true); + } + + static void AddArrayInfos(TypeInfoMappingCollection mappings) + { + mappings.AddStructArrayType(DataTypeNames.Point); + mappings.AddStructArrayType(DataTypeNames.Box); + mappings.AddStructArrayType(DataTypeNames.Polygon); + mappings.AddStructArrayType(DataTypeNames.Line); + mappings.AddStructArrayType(DataTypeNames.LSeg); + mappings.AddStructArrayType(DataTypeNames.Path); + mappings.AddStructArrayType(DataTypeNames.Circle); + } +} diff --git a/src/Npgsql/Internal/Resolvers/LTreeTypeInfoResolver.cs b/src/Npgsql/Internal/Resolvers/LTreeTypeInfoResolver.cs new file mode 100644 index 0000000000..129f73eecd --- /dev/null +++ b/src/Npgsql/Internal/Resolvers/LTreeTypeInfoResolver.cs @@ -0,0 +1,51 @@ +using System; +using Npgsql.Internal.Converters; +using Npgsql.Internal.Postgres; +using Npgsql.Properties; + +namespace Npgsql.Internal.Resolvers; + +sealed class LTreeTypeInfoResolver : IPgTypeInfoResolver +{ + const byte LTreeVersion = 1; + TypeInfoMappingCollection Mappings { get; } + + public LTreeTypeInfoResolver() + { + Mappings = new TypeInfoMappingCollection(); + AddInfos(Mappings); + // TODO: Opt-in only + AddArrayInfos(Mappings); + } + + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); + + static void AddInfos(TypeInfoMappingCollection mappings) + { + mappings.AddType("ltree", + static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter(LTreeVersion, new StringTextConverter(options.TextEncoding))), + MatchRequirement.DataTypeName); + mappings.AddType("lquery", + static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter(LTreeVersion, new StringTextConverter(options.TextEncoding))), + MatchRequirement.DataTypeName); + mappings.AddType("ltxtquery", + static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter(LTreeVersion, new StringTextConverter(options.TextEncoding))), + MatchRequirement.DataTypeName); + } + + static void AddArrayInfos(TypeInfoMappingCollection mappings) + { + mappings.AddArrayType("ltree"); + mappings.AddArrayType("lquery"); + mappings.AddArrayType("ltxtquery"); + } + + public static void CheckUnsupported(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + { + if (type != typeof(object) && dataTypeName is { UnqualifiedName: "ltree" or "lquery" or "ltxtquery" }) + throw new NotSupportedException( + string.Format(NpgsqlStrings.LTreeNotEnabled, nameof(NpgsqlSlimDataSourceBuilder.EnableLTree), + typeof(TBuilder).Name)); + } +} diff --git a/src/Npgsql/Internal/Resolvers/NetworkTypeInfoResolver.cs b/src/Npgsql/Internal/Resolvers/NetworkTypeInfoResolver.cs new file mode 100644 index 0000000000..49ef7e8a5f --- /dev/null +++ b/src/Npgsql/Internal/Resolvers/NetworkTypeInfoResolver.cs @@ -0,0 +1,74 @@ +using System; +using System.Diagnostics.CodeAnalysis; +using System.Net; +using System.Net.NetworkInformation; +using Npgsql.Internal.Converters; +using Npgsql.Internal.Postgres; +using NpgsqlTypes; + +namespace Npgsql.Internal.Resolvers; + +sealed class NetworkTypeInfoResolver : IPgTypeInfoResolver +{ + TypeInfoMappingCollection Mappings { get; } + + public NetworkTypeInfoResolver() + { + Mappings = new TypeInfoMappingCollection(); + AddInfos(Mappings); + // TODO: Opt-in only + AddArrayInfos(Mappings); + } + + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); + + static void AddInfos(TypeInfoMappingCollection mappings) + { + // macaddr + mappings.AddType(DataTypeNames.MacAddr, + static (options, mapping, _) => mapping.CreateInfo(options, new MacaddrConverter(macaddr8: false)), isDefault: true); + mappings.AddType(DataTypeNames.MacAddr8, + static (options, mapping, _) => mapping.CreateInfo(options, new MacaddrConverter(macaddr8: true)), + mapping => mapping with { MatchRequirement = MatchRequirement.DataTypeName }); + + // inet + // This is one of the rare mappings that force us to use reflection for a lack of any alternative. + // There are certain IPAddress values like Loopback or Any that return a *private* derived type (see https://github.com/dotnet/runtime/issues/27870). + // However we still need to be able to resolve an exactly typed converter for those values. + // We do so by wrapping our converter in a casting converter constructed over the derived type. + // Finally we add a custom predicate to be able to match any type which values are assignable to IPAddress. + mappings.AddType(DataTypeNames.Inet, + [UnconditionalSuppressMessage("AOT", "IL3050", Justification = "MakeGenericType is safe because the target will only ever be a reference type.")] + static (options, resolvedMapping, _) => + { + var derivedType = resolvedMapping.Type != typeof(IPAddress); + PgConverter converter = new IPAddressConverter(); + if (derivedType) + // There is not much more we can do, the deriving type IPAddress+ReadOnlyIPAddress isn't public. + converter = (PgConverter)Activator.CreateInstance(typeof(CastingConverter<>).MakeGenericType(resolvedMapping.Type), converter)!; + + return resolvedMapping.CreateInfo(options, converter); + }, mapping => mapping with { MatchRequirement = MatchRequirement.Single, TypeMatchPredicate = type => type is null || typeof(IPAddress).IsAssignableFrom(type) }); + mappings.AddStructType(DataTypeNames.Inet, + static (options, mapping, _) => mapping.CreateInfo(options, new NpgsqlInetConverter())); + + // cidr + mappings.AddStructType(DataTypeNames.Cidr, + static (options, mapping, _) => mapping.CreateInfo(options, new NpgsqlCidrConverter()), isDefault: true); + } + + static void AddArrayInfos(TypeInfoMappingCollection mappings) + { + // macaddr + mappings.AddArrayType(DataTypeNames.MacAddr); + mappings.AddArrayType(DataTypeNames.MacAddr8); + + // inet + mappings.AddArrayType(DataTypeNames.Inet); + mappings.AddStructArrayType(DataTypeNames.Inet); + + // cidr + mappings.AddStructArrayType(DataTypeNames.Cidr); + } +} diff --git a/src/Npgsql/Internal/Resolvers/RangeTypeInfoResolver.cs b/src/Npgsql/Internal/Resolvers/RangeTypeInfoResolver.cs new file mode 100644 index 0000000000..57fc75e978 --- /dev/null +++ b/src/Npgsql/Internal/Resolvers/RangeTypeInfoResolver.cs @@ -0,0 +1,437 @@ +using System; +using System.Collections.Generic; +using System.Numerics; +using Npgsql.Internal.Converters; +using Npgsql.Internal.Postgres; +using Npgsql.Properties; +using Npgsql.TypeMapping; +using Npgsql.Util; +using NpgsqlTypes; +using static Npgsql.Internal.PgConverterFactory; + +namespace Npgsql.Internal.Resolvers; + +// TODO improve the ability to switch on server capability. +class RangeTypeInfoResolver : IPgTypeInfoResolver +{ + protected TypeInfoMappingCollection Mappings { get; } + protected TypeInfoMappingCollection MappingsWithMultiRanges { get; } + + public RangeTypeInfoResolver() + { + Mappings = new TypeInfoMappingCollection(); + AddInfos(Mappings, supportsMultiRange: false); + MappingsWithMultiRanges = new TypeInfoMappingCollection(); + AddInfos(MappingsWithMultiRanges, supportsMultiRange: true); + } + + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => (options.DatabaseInfo.SupportsMultirangeTypes ? MappingsWithMultiRanges : Mappings).Find(type, dataTypeName, options); + + static void AddInfos(TypeInfoMappingCollection mappings, bool supportsMultiRange) + { + // numeric ranges + mappings.AddStructType>(DataTypeNames.Int4Range, + static (options, mapping, _) => mapping.CreateInfo(options, CreateRangeConverter(new Int4Converter(), options)), + isDefault: true); + mappings.AddStructType>(DataTypeNames.Int8Range, + static (options, mapping, _) => mapping.CreateInfo(options, CreateRangeConverter(new Int8Converter(), options)), + isDefault: true); + mappings.AddStructType>(DataTypeNames.NumRange, + static (options, mapping, _) => + mapping.CreateInfo(options, CreateRangeConverter(new DecimalNumericConverter(), options)), + isDefault: true); + mappings.AddStructType>(DataTypeNames.NumRange, + static (options, mapping, _) => mapping.CreateInfo(options, CreateRangeConverter(new BigIntegerNumericConverter(), options))); + + // tsrange + if (Statics.LegacyTimestampBehavior) + { + mappings.AddStructType>(DataTypeNames.TsRange, + static (options, mapping, _) => mapping.CreateInfo(options, + CreateRangeConverter(new LegacyDateTimeConverter(options.EnableDateTimeInfinityConversions, timestamp: true), options)), + isDefault: true); + } + else + { + mappings.AddResolverStructType>(DataTypeNames.TsRange, + static (options, mapping, dataTypeNameMatch) => mapping.CreateInfo(options, + DateTimeConverterResolver.CreateRangeResolver(options, + options.GetCanonicalTypeId(DataTypeNames.TsTzRange), + options.GetCanonicalTypeId(DataTypeNames.TsRange), + options.EnableDateTimeInfinityConversions), dataTypeNameMatch), + isDefault: true); + } + mappings.AddStructType>(DataTypeNames.TsRange, + static (options, mapping, _) => + mapping.CreateInfo(options, CreateRangeConverter(new Int8Converter(), options))); + + // tstzrange + if (Statics.LegacyTimestampBehavior) + { + mappings.AddStructType>(DataTypeNames.TsTzRange, + static (options, mapping, _) => mapping.CreateInfo(options, + CreateRangeConverter(new LegacyDateTimeConverter(options.EnableDateTimeInfinityConversions, timestamp: false), options)), + isDefault: true); + mappings.AddStructType>(DataTypeNames.TsTzRange, + static (options, mapping, _) => mapping.CreateInfo(options, + CreateRangeConverter(new LegacyDateTimeOffsetConverter(options.EnableDateTimeInfinityConversions), options))); + } + else + { + mappings.AddResolverStructType>(DataTypeNames.TsTzRange, + static (options, mapping, dataTypeNameMatch) => mapping.CreateInfo(options, + DateTimeConverterResolver.CreateRangeResolver(options, + options.GetCanonicalTypeId(DataTypeNames.TsTzRange), + options.GetCanonicalTypeId(DataTypeNames.TsRange), + options.EnableDateTimeInfinityConversions), dataTypeNameMatch), + isDefault: true); + mappings.AddStructType>(DataTypeNames.TsTzRange, + static (options, mapping, _) => mapping.CreateInfo(options, + CreateRangeConverter(new DateTimeOffsetConverter(options.EnableDateTimeInfinityConversions), options))); + } + mappings.AddStructType>(DataTypeNames.TsTzRange, + static (options, mapping, _) => mapping.CreateInfo(options, CreateRangeConverter(new Int8Converter(), options))); + + // daterange + mappings.AddStructType>(DataTypeNames.DateRange, + static (options, mapping, _) => mapping.CreateInfo(options, + CreateRangeConverter(new DateTimeDateConverter(options.EnableDateTimeInfinityConversions), options)), + isDefault: true); + mappings.AddStructType>(DataTypeNames.DateRange, + static (options, mapping, _) => mapping.CreateInfo(options, CreateRangeConverter(new Int4Converter(), options))); +#if NET6_0_OR_GREATER + mappings.AddStructType>(DataTypeNames.DateRange, + static (options, mapping, _) => + mapping.CreateInfo(options, CreateRangeConverter(new DateOnlyDateConverter(options.EnableDateTimeInfinityConversions), options))); +#endif + + if (supportsMultiRange) + { + // int4multirange + mappings.AddType[]>(DataTypeNames.Int4Multirange, + static (options, mapping, _) => + mapping.CreateInfo(options, CreateArrayMultirangeConverter(CreateRangeConverter(new Int4Converter(), options), options)), + isDefault: true); + mappings.AddType>>(DataTypeNames.Int4Multirange, + static (options, mapping, _) => + mapping.CreateInfo(options, CreateListMultirangeConverter(CreateRangeConverter(new Int4Converter(), options), options))); + + // int8multirange + mappings.AddType[]>(DataTypeNames.Int8Multirange, + static (options, mapping, _) => + mapping.CreateInfo(options, CreateArrayMultirangeConverter(CreateRangeConverter(new Int8Converter(), options), options)), + isDefault: true); + mappings.AddType>>(DataTypeNames.Int8Multirange, + static (options, mapping, _) => + mapping.CreateInfo(options, CreateListMultirangeConverter(CreateRangeConverter(new Int8Converter(), options), options))); + + // nummultirange + mappings.AddType[]>(DataTypeNames.NumMultirange, + static (options, mapping, _) => + mapping.CreateInfo(options, CreateArrayMultirangeConverter(CreateRangeConverter(new DecimalNumericConverter(), options), options)), + isDefault: true); + mappings.AddType>>(DataTypeNames.NumMultirange, + static (options, mapping, _) => + mapping.CreateInfo(options, CreateListMultirangeConverter(CreateRangeConverter(new DecimalNumericConverter(), options), options))); + + // tsmultirange + if (Statics.LegacyTimestampBehavior) + { + mappings.AddType[]>(DataTypeNames.TsMultirange, + static (options, mapping, _) => + mapping.CreateInfo(options, CreateArrayMultirangeConverter( + CreateRangeConverter(new LegacyDateTimeConverter(options.EnableDateTimeInfinityConversions, timestamp: true), options), options)), + isDefault: true); + mappings.AddType>>(DataTypeNames.TsMultirange, + static (options, mapping, _) => + mapping.CreateInfo(options, CreateListMultirangeConverter( + CreateRangeConverter(new LegacyDateTimeConverter(options.EnableDateTimeInfinityConversions, timestamp: true), options), options))); + } + else + { + mappings.AddType[]>(DataTypeNames.TsMultirange, + static (options, mapping, dataTypeNameMatch) => mapping.CreateInfo(options, + DateTimeConverterResolver.CreateMultirangeResolver[], NpgsqlRange>(options, + options.GetCanonicalTypeId(DataTypeNames.TsTzMultirange), + options.GetCanonicalTypeId(DataTypeNames.TsMultirange), + options.EnableDateTimeInfinityConversions), dataTypeNameMatch), + isDefault: true); + mappings.AddType>>(DataTypeNames.TsMultirange, + static (options, mapping, dataTypeNameMatch) => mapping.CreateInfo(options, + DateTimeConverterResolver.CreateMultirangeResolver>, NpgsqlRange>(options, + options.GetCanonicalTypeId(DataTypeNames.TsTzMultirange), + options.GetCanonicalTypeId(DataTypeNames.TsMultirange), + options.EnableDateTimeInfinityConversions), dataTypeNameMatch)); + } + mappings.AddType[]>(DataTypeNames.TsMultirange, + static (options, mapping, _) => + mapping.CreateInfo(options, CreateArrayMultirangeConverter(CreateRangeConverter(new Int8Converter(), options), options))); + mappings.AddType>>(DataTypeNames.TsMultirange, + static (options, mapping, _) => + mapping.CreateInfo(options, CreateListMultirangeConverter(CreateRangeConverter(new Int8Converter(), options), options))); + + // tstzmultirange + if (Statics.LegacyTimestampBehavior) + { + mappings.AddType[]>(DataTypeNames.TsTzMultirange, + static (options, mapping, _) => + mapping.CreateInfo(options, CreateArrayMultirangeConverter( + CreateRangeConverter(new LegacyDateTimeConverter(options.EnableDateTimeInfinityConversions, timestamp: false), options), options)), + isDefault: true); + mappings.AddType>>(DataTypeNames.TsTzMultirange, + static (options, mapping, _) => + mapping.CreateInfo(options, CreateListMultirangeConverter( + CreateRangeConverter(new LegacyDateTimeConverter(options.EnableDateTimeInfinityConversions, timestamp: false), options), options))); + mappings.AddType[]>(DataTypeNames.TsTzMultirange, + static (options, mapping, _) => + mapping.CreateInfo(options, CreateArrayMultirangeConverter( + CreateRangeConverter(new LegacyDateTimeOffsetConverter(options.EnableDateTimeInfinityConversions), options), options)), + isDefault: true); + mappings.AddType>>(DataTypeNames.TsTzMultirange, + static (options, mapping, _) => + mapping.CreateInfo(options, CreateListMultirangeConverter( + CreateRangeConverter(new LegacyDateTimeOffsetConverter(options.EnableDateTimeInfinityConversions), options), options))); + } + else + { + mappings.AddType[]>(DataTypeNames.TsTzMultirange, + static (options, mapping, dataTypeNameMatch) => mapping.CreateInfo(options, + DateTimeConverterResolver.CreateMultirangeResolver[], NpgsqlRange>(options, + options.GetCanonicalTypeId(DataTypeNames.TsTzMultirange), + options.GetCanonicalTypeId(DataTypeNames.TsMultirange), + options.EnableDateTimeInfinityConversions), dataTypeNameMatch), + isDefault: true); + mappings.AddType>>(DataTypeNames.TsTzMultirange, + static (options, mapping, dataTypeNameMatch) => mapping.CreateInfo(options, + DateTimeConverterResolver.CreateMultirangeResolver>, NpgsqlRange>(options, + options.GetCanonicalTypeId(DataTypeNames.TsTzMultirange), + options.GetCanonicalTypeId(DataTypeNames.TsMultirange), + options.EnableDateTimeInfinityConversions), dataTypeNameMatch)); + mappings.AddType[]>(DataTypeNames.TsTzMultirange, + static (options, mapping, _) => + mapping.CreateInfo(options, CreateArrayMultirangeConverter( + CreateRangeConverter(new DateTimeOffsetConverter(options.EnableDateTimeInfinityConversions), options), options)), + isDefault: true); + mappings.AddType>>(DataTypeNames.TsTzMultirange, + static (options, mapping, _) => + mapping.CreateInfo(options, CreateListMultirangeConverter( + CreateRangeConverter(new DateTimeOffsetConverter(options.EnableDateTimeInfinityConversions), options), options))); + } + mappings.AddType[]>(DataTypeNames.TsTzMultirange, + static (options, mapping, _) => + mapping.CreateInfo(options, CreateArrayMultirangeConverter(CreateRangeConverter(new Int8Converter(), options), options))); + mappings.AddType>>(DataTypeNames.TsTzMultirange, + static (options, mapping, _) => + mapping.CreateInfo(options, CreateListMultirangeConverter(CreateRangeConverter(new Int8Converter(), options), options))); + + // datemultirange + mappings.AddType[]>(DataTypeNames.DateMultirange, + static (options, mapping, _) => + mapping.CreateInfo(options, CreateArrayMultirangeConverter( + CreateRangeConverter(new DateTimeDateConverter(options.EnableDateTimeInfinityConversions), options), options)), + isDefault: true); + mappings.AddType>>(DataTypeNames.DateMultirange, + static (options, mapping, _) => + mapping.CreateInfo(options, CreateListMultirangeConverter( + CreateRangeConverter(new DateTimeDateConverter(options.EnableDateTimeInfinityConversions), options), options))); +#if NET6_0_OR_GREATER + mappings.AddType[]>(DataTypeNames.DateMultirange, + static (options, mapping, _) => + mapping.CreateInfo(options, CreateArrayMultirangeConverter( + CreateRangeConverter(new DateOnlyDateConverter(options.EnableDateTimeInfinityConversions), options), options)), + isDefault: true); + mappings.AddType>>(DataTypeNames.DateMultirange, + static (options, mapping, _) => + mapping.CreateInfo(options, CreateListMultirangeConverter( + CreateRangeConverter(new DateOnlyDateConverter(options.EnableDateTimeInfinityConversions), options), options))); +#endif + } + } + + protected static void AddArrayInfos(TypeInfoMappingCollection mappings, bool supportsMultiRange) + { + // numeric ranges + mappings.AddStructArrayType>(DataTypeNames.Int4Range); + mappings.AddStructArrayType>(DataTypeNames.Int8Range); + mappings.AddStructArrayType>(DataTypeNames.NumRange); + mappings.AddStructArrayType>(DataTypeNames.NumRange); + + // tsrange + if (Statics.LegacyTimestampBehavior) + mappings.AddStructArrayType>(DataTypeNames.TsRange); + else + mappings.AddResolverStructArrayType>(DataTypeNames.TsRange); + mappings.AddStructArrayType>(DataTypeNames.TsRange); + + // tstzrange + if (Statics.LegacyTimestampBehavior) + { + mappings.AddStructArrayType>(DataTypeNames.TsTzRange); + mappings.AddStructArrayType>(DataTypeNames.TsTzRange); + } + else + { + mappings.AddResolverStructArrayType>(DataTypeNames.TsTzRange); + mappings.AddStructArrayType>(DataTypeNames.TsTzRange); + } + mappings.AddStructArrayType>(DataTypeNames.TsTzRange); + + // daterange + mappings.AddStructArrayType>(DataTypeNames.DateRange); + mappings.AddStructArrayType>(DataTypeNames.DateRange); +#if NET6_0_OR_GREATER + mappings.AddStructArrayType>(DataTypeNames.DateRange); +#endif + + if (supportsMultiRange) + { + // int4multirange + mappings.AddArrayType[]>(DataTypeNames.Int4Multirange); + mappings.AddArrayType>>(DataTypeNames.Int4Multirange); + + // int8multirange + mappings.AddArrayType[]>(DataTypeNames.Int8Multirange); + mappings.AddArrayType>>(DataTypeNames.Int8Multirange); + + // nummultirange + mappings.AddArrayType[]>(DataTypeNames.NumMultirange); + mappings.AddArrayType>>(DataTypeNames.NumMultirange); + + // tsmultirange + if (Statics.LegacyTimestampBehavior) + { + mappings.AddArrayType[]>(DataTypeNames.TsMultirange); + mappings.AddArrayType>>(DataTypeNames.TsMultirange); + } + else + { + mappings.AddResolverArrayType[]>(DataTypeNames.TsMultirange); + mappings.AddResolverArrayType>>(DataTypeNames.TsMultirange); + } + mappings.AddArrayType[]>(DataTypeNames.TsMultirange); + mappings.AddArrayType>>(DataTypeNames.TsMultirange); + + // tstzmultirange + if (Statics.LegacyTimestampBehavior) + { + mappings.AddArrayType[]>(DataTypeNames.TsTzMultirange); + mappings.AddArrayType>>(DataTypeNames.TsTzMultirange); + mappings.AddArrayType[]>(DataTypeNames.TsTzMultirange); + mappings.AddArrayType>>(DataTypeNames.TsTzMultirange); + } + else + { + mappings.AddResolverArrayType[]>(DataTypeNames.TsTzMultirange); + mappings.AddResolverArrayType>>(DataTypeNames.TsTzMultirange); + mappings.AddArrayType[]>(DataTypeNames.TsTzMultirange); + mappings.AddArrayType>>(DataTypeNames.TsTzMultirange); + } + mappings.AddArrayType[]>(DataTypeNames.TsTzMultirange); + mappings.AddArrayType>>(DataTypeNames.TsTzMultirange); + + // datemultirange + mappings.AddArrayType[]>(DataTypeNames.DateMultirange); + mappings.AddArrayType>>(DataTypeNames.DateMultirange); +#if NET6_0_OR_GREATER + mappings.AddArrayType[]>(DataTypeNames.DateMultirange); + mappings.AddArrayType>>(DataTypeNames.DateMultirange); +#endif + } + } + + public static void ThrowIfUnsupported(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + { + var kind = CheckUnsupported(type, dataTypeName, options); + switch (kind) + { + case PgTypeKind.Range when kind.Value.HasFlag(PgTypeKind.Array): + throw new NotSupportedException( + string.Format(NpgsqlStrings.RangeArraysNotEnabled, nameof(NpgsqlSlimDataSourceBuilder.EnableArrays), typeof(TBuilder).Name)); + case PgTypeKind.Range: + throw new NotSupportedException( + string.Format(NpgsqlStrings.RangesNotEnabled, nameof(NpgsqlSlimDataSourceBuilder.EnableRanges), typeof(TBuilder).Name)); + case PgTypeKind.Multirange when kind.Value.HasFlag(PgTypeKind.Array): + throw new NotSupportedException( + string.Format(NpgsqlStrings.MultirangeArraysNotEnabled, nameof(NpgsqlSlimDataSourceBuilder.EnableArrays), typeof(TBuilder).Name)); + case PgTypeKind.Multirange: + throw new NotSupportedException( + string.Format(NpgsqlStrings.MultirangesNotEnabled, nameof(NpgsqlSlimDataSourceBuilder.EnableMultiranges), typeof(TBuilder).Name)); + default: + return; + } + } + + public static PgTypeKind? CheckUnsupported(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + { + // Only trigger on well known data type names. + var npgsqlDbType = dataTypeName?.ToNpgsqlDbType(); + if (type != typeof(object)) + { + if (npgsqlDbType?.HasFlag(NpgsqlDbType.Range) != true && npgsqlDbType?.HasFlag(NpgsqlDbType.Multirange) != true) + return null; + + if (npgsqlDbType.Value.HasFlag(NpgsqlDbType.Range)) + return dataTypeName?.IsArray == true + ? PgTypeKind.Array | PgTypeKind.Range + : PgTypeKind.Range; + + return dataTypeName?.IsArray == true + ? PgTypeKind.Array | PgTypeKind.Multirange + : PgTypeKind.Multirange; + } + + if (type == typeof(object)) + return null; + + var isArray = false; + if (TypeInfoMappingCollection.IsArrayLikeType(type, out var elementType)) + { + type = elementType; + isArray = true; + } + + if (type is { IsConstructedGenericType: true } && type.GetGenericTypeDefinition() == typeof(Nullable<>)) + type = type.GetGenericArguments()[0]; + + if (type is { IsConstructedGenericType: true } && type.GetGenericTypeDefinition() == typeof(NpgsqlRange<>)) + { + type = type.GetGenericArguments()[0]; + var matchingArguments = + new[] + { + typeof(int), typeof(long), typeof(decimal), typeof(DateTime), +# if NET6_0_OR_GREATER + typeof(DateOnly) +#endif + }; + + // If we don't know more than the clr type, default to a Multirange kind over Array as they share the same types. + foreach (var argument in matchingArguments) + if (argument == type) + return isArray ? PgTypeKind.Multirange : PgTypeKind.Range; + + if (type.AssemblyQualifiedName == "System.Numerics.BigInteger,System.Runtime.Numerics") + return isArray ? PgTypeKind.Multirange : PgTypeKind.Range; + } + + return null; + } +} + +sealed class RangeArrayTypeInfoResolver : RangeTypeInfoResolver, IPgTypeInfoResolver +{ + new TypeInfoMappingCollection Mappings { get; } + new TypeInfoMappingCollection MappingsWithMultiRanges { get; } + + public RangeArrayTypeInfoResolver() + { + Mappings = new TypeInfoMappingCollection(base.Mappings); + AddArrayInfos(Mappings, supportsMultiRange: false); + MappingsWithMultiRanges = new TypeInfoMappingCollection(base.MappingsWithMultiRanges); + AddArrayInfos(MappingsWithMultiRanges, supportsMultiRange: true); + } + + public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => (options.DatabaseInfo.SupportsMultirangeTypes ? MappingsWithMultiRanges : Mappings).Find(type, dataTypeName, options); +} diff --git a/src/Npgsql/Internal/Resolvers/RecordTypeInfoResolvers.cs b/src/Npgsql/Internal/Resolvers/RecordTypeInfoResolvers.cs new file mode 100644 index 0000000000..f51aeac322 --- /dev/null +++ b/src/Npgsql/Internal/Resolvers/RecordTypeInfoResolvers.cs @@ -0,0 +1,137 @@ +using System; +using System.Diagnostics.CodeAnalysis; +using System.Reflection; +using Npgsql.Internal.Converters; +using Npgsql.Internal.Postgres; +using Npgsql.Properties; + +namespace Npgsql.Internal.Resolvers; + +class RecordTypeInfoResolver : IPgTypeInfoResolver +{ + protected TypeInfoMappingCollection Mappings { get; } = new(); + public RecordTypeInfoResolver() => AddInfos(Mappings); + + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); + + static void AddInfos(TypeInfoMappingCollection mappings) + => mappings.AddType(DataTypeNames.Record, static (options, mapping, _) => + mapping.CreateInfo(options, new ObjectArrayRecordConverter(options), supportsWriting: false), + MatchRequirement.DataTypeName); + + protected static void AddArrayInfos(TypeInfoMappingCollection mappings) + => mappings.AddArrayType(DataTypeNames.Record); + + public static void CheckUnsupported(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + { + if (type != typeof(object) && dataTypeName == DataTypeNames.Record) + { + throw new NotSupportedException( + string.Format(NpgsqlStrings.RecordsNotEnabled, nameof(NpgsqlSlimDataSourceBuilder.EnableRecords), typeof(TBuilder).Name)); + } + } +} + +sealed class RecordArrayTypeInfoResolver : RecordTypeInfoResolver, IPgTypeInfoResolver +{ + public RecordArrayTypeInfoResolver() + { + Mappings = new TypeInfoMappingCollection(base.Mappings.Items); + AddArrayInfos(Mappings); + } + + new TypeInfoMappingCollection Mappings { get; } + + public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); +} + +[RequiresUnreferencedCode("Tupled record resolver may perform reflection on trimmed tuple types.")] +[RequiresDynamicCode("Tupled records need to construct a generic converter for a statically unknown (value)tuple type.")] +class TupledRecordTypeInfoResolver : IPgTypeInfoResolver +{ + protected TypeInfoMappingCollection Mappings { get; } = new(); + public TupledRecordTypeInfoResolver() => AddInfos(Mappings); + + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); + + // Stand-in type, type match predicate does the actual work. + static void AddInfos(TypeInfoMappingCollection mappings) + { + mappings.AddType>(DataTypeNames.Record, Factory, + mapping => mapping with + { + MatchRequirement = MatchRequirement.DataTypeName, + TypeMatchPredicate = type => type is null || (type is { IsConstructedGenericType: true, FullName: not null } + && type.FullName.StartsWith("System.Tuple", StringComparison.Ordinal)) + }); + + mappings.AddStructType>(DataTypeNames.Record, Factory, + mapping => mapping with + { + MatchRequirement = MatchRequirement.DataTypeName, + TypeMatchPredicate = type => type is null || (type is { IsConstructedGenericType: true, FullName: not null } + && type.FullName.StartsWith("System.ValueTuple", StringComparison.Ordinal)) + }); + } + + protected static void AddArrayInfos(TypeInfoMappingCollection mappings) + { + mappings.AddArrayType>(DataTypeNames.Record); + mappings.AddStructArrayType>(DataTypeNames.Record); + } + + static readonly TypeInfoFactory Factory = static (options, mapping, _) => + { + var constructors = mapping.Type.GetConstructors(); + ConstructorInfo? constructor = null; + if (constructors.Length is 1) + constructor = constructors[0]; + else + { + var args = mapping.Type.GenericTypeArguments.Length; + foreach (var ctor in constructors) + if (ctor.GetParameters().Length == args) + { + constructor = ctor; + break; + } + } + + if (constructor is null) + throw new InvalidOperationException($"Couldn't find a suitable constructor for record type: {mapping.Type.FullName}"); + + var factory = typeof(TupledRecordTypeInfoResolver).GetMethod(nameof(CreateFactory), BindingFlags.Static | BindingFlags.NonPublic)! + .MakeGenericMethod(mapping.Type) + .Invoke(null, new object[] { constructor, constructor.GetParameters().Length }); + + var converterType = typeof(ObjectArrayRecordConverter<>).MakeGenericType(mapping.Type); + var converter = (PgConverter)Activator.CreateInstance(converterType, options, factory)!; + return mapping.CreateInfo(options, converter, supportsWriting: false); + }; + + static Func CreateFactory(ConstructorInfo constructor, int constructorParameters) => array => + { + if (array.Length != constructorParameters) + throw new InvalidCastException($"Cannot read record type with {array.Length} fields as {typeof(T)}"); + return (T)constructor.Invoke(array); + }; +} + +[RequiresUnreferencedCode("Tupled record resolver may perform reflection on trimmed tuple types.")] +[RequiresDynamicCode("Tupled records need to construct a generic converter for a statically unknown (value)tuple type.")] +sealed class TupledRecordArrayTypeInfoResolver : TupledRecordTypeInfoResolver, IPgTypeInfoResolver +{ + public TupledRecordArrayTypeInfoResolver() + { + Mappings = new TypeInfoMappingCollection(base.Mappings.Items); + AddArrayInfos(Mappings); + } + + new TypeInfoMappingCollection Mappings { get; } + + public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); +} diff --git a/src/Npgsql/Internal/Resolvers/SystemTextJsonPocoTypeInfoResolver.cs b/src/Npgsql/Internal/Resolvers/SystemTextJsonPocoTypeInfoResolver.cs new file mode 100644 index 0000000000..e513b29a86 --- /dev/null +++ b/src/Npgsql/Internal/Resolvers/SystemTextJsonPocoTypeInfoResolver.cs @@ -0,0 +1,123 @@ +using System; +using System.Diagnostics.CodeAnalysis; +using System.Text; +using System.Text.Json; +using System.Text.Json.Serialization.Metadata; +using Npgsql.Internal.Converters; +using Npgsql.Internal.Postgres; + +namespace Npgsql.Internal.Resolvers; + +[RequiresUnreferencedCode("Json serializer may perform reflection on trimmed types.")] +[RequiresDynamicCode("Serializing arbitary types to json can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] +class SystemTextJsonPocoTypeInfoResolver : DynamicTypeInfoResolver, IPgTypeInfoResolver +{ + protected TypeInfoMappingCollection Mappings { get; } = new(); + protected JsonSerializerOptions _serializerOptions; + + public SystemTextJsonPocoTypeInfoResolver(Type[]? jsonbClrTypes = null, Type[]? jsonClrTypes = null, JsonSerializerOptions? serializerOptions = null) + { +#if NET7_0_OR_GREATER + _serializerOptions = serializerOptions ??= JsonSerializerOptions.Default; +#else + _serializerOptions = serializerOptions ??= new JsonSerializerOptions(); +#endif + + AddMappings(Mappings, jsonbClrTypes ?? Array.Empty(), jsonClrTypes ?? Array.Empty(), serializerOptions); + } + + void AddMappings(TypeInfoMappingCollection mappings, Type[] jsonbClrTypes, Type[] jsonClrTypes, JsonSerializerOptions serializerOptions) + { + // We do GetTypeInfo calls directly so we need a resolver. + serializerOptions.TypeInfoResolver ??= new DefaultJsonTypeInfoResolver(); + + AddUserMappings(jsonb: true, jsonbClrTypes); + AddUserMappings(jsonb: false, jsonClrTypes); + + void AddUserMappings(bool jsonb, Type[] clrTypes) + { + var dynamicMappings = CreateCollection(); + var dataTypeName = (string)(jsonb ? DataTypeNames.Jsonb : DataTypeNames.Json); + foreach (var jsonType in clrTypes) + { + var jsonTypeInfo = serializerOptions.GetTypeInfo(jsonType); + dynamicMappings.AddMapping(jsonTypeInfo.Type, dataTypeName, + factory: (options, mapping, _) => mapping.CreateInfo(options, + CreateSystemTextJsonConverter(mapping.Type, jsonb, options.TextEncoding, serializerOptions, jsonType))); + + if (!jsonType.IsValueType && jsonTypeInfo.PolymorphismOptions is not null) + { + foreach (var derived in jsonTypeInfo.PolymorphismOptions.DerivedTypes) + dynamicMappings.AddMapping(derived.DerivedType, dataTypeName, + factory: (options, mapping, _) => mapping.CreateInfo(options, + CreateSystemTextJsonConverter(mapping.Type, jsonb, options.TextEncoding, serializerOptions, jsonType))); + } + } + mappings.AddRange(dynamicMappings.ToTypeInfoMappingCollection()); + } + } + + protected void AddArrayInfos(TypeInfoMappingCollection mappings, TypeInfoMappingCollection baseMappings) + { + if (baseMappings.Items.Count == 0) + return; + + var dynamicMappings = CreateCollection(baseMappings); + foreach (var mapping in baseMappings.Items) + dynamicMappings.AddArrayMapping(mapping.Type, mapping.DataTypeName); + mappings.AddRange(dynamicMappings.ToTypeInfoMappingCollection()); + } + + public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options) ?? base.GetTypeInfo(type, dataTypeName, options); + + protected override DynamicMappingCollection? GetMappings(Type? type, DataTypeName dataTypeName, PgSerializerOptions options) + { + // Match all types except null, object and text types as long as DataTypeName (json/jsonb) is present. + if (type is null || type == typeof(object) || Array.IndexOf(PgSerializerOptions.WellKnownTextTypes, type) != -1 + || dataTypeName != DataTypeNames.Jsonb && dataTypeName != DataTypeNames.Json) + return null; + + return CreateCollection().AddMapping(type, dataTypeName, (options, mapping, _) => + { + var jsonb = dataTypeName == DataTypeNames.Jsonb; + + // For jsonb we can't properly support polymorphic serialization unless we do quite some additional work + // so we default to mapping.Type instead (exact types will never serialize their "$type" fields, essentially disabling the feature). + var baseType = jsonb ? mapping.Type : typeof(object); + + return mapping.CreateInfo(options, + CreateSystemTextJsonConverter(mapping.Type, jsonb, options.TextEncoding, _serializerOptions, baseType)); + }); + } + + static PgConverter CreateSystemTextJsonConverter(Type valueType, bool jsonb, Encoding textEncoding, JsonSerializerOptions serializerOptions, Type baseType) + => (PgConverter)Activator.CreateInstance( + typeof(SystemTextJsonConverter<,>).MakeGenericType(valueType, baseType), + jsonb, + textEncoding, + serializerOptions + )!; +} + +[RequiresUnreferencedCode("Json serializer may perform reflection on trimmed types.")] +[RequiresDynamicCode("Serializing arbitary types to json can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] +sealed class SystemTextJsonPocoArrayTypeInfoResolver : SystemTextJsonPocoTypeInfoResolver, IPgTypeInfoResolver +{ + new TypeInfoMappingCollection Mappings { get; } + + public SystemTextJsonPocoArrayTypeInfoResolver(Type[]? jsonbClrTypes = null, Type[]? jsonClrTypes = null, JsonSerializerOptions? serializerOptions = null) + : base(jsonbClrTypes, jsonClrTypes, serializerOptions) + { + Mappings = new TypeInfoMappingCollection(base.Mappings); + AddArrayInfos(Mappings, base.Mappings); + } + + public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options) ?? base.GetTypeInfo(type, dataTypeName, options); + + protected override DynamicMappingCollection? GetMappings(Type? type, DataTypeName dataTypeName, PgSerializerOptions options) + => type is not null && IsArrayLikeType(type, out var elementType) && IsArrayDataTypeName(dataTypeName, options, out var elementDataTypeName) + ? base.GetMappings(elementType, elementDataTypeName, options)?.AddArrayMapping(elementType, elementDataTypeName) + : null; +} diff --git a/src/Npgsql/Internal/Resolvers/SystemTextJsonTypeInfoResolvers.cs b/src/Npgsql/Internal/Resolvers/SystemTextJsonTypeInfoResolvers.cs new file mode 100644 index 0000000000..5650906ecb --- /dev/null +++ b/src/Npgsql/Internal/Resolvers/SystemTextJsonTypeInfoResolvers.cs @@ -0,0 +1,70 @@ +using System; +using System.Text.Json; +using System.Text.Json.Nodes; +using Npgsql.Internal.Converters; +using Npgsql.Internal.Postgres; + +namespace Npgsql.Internal.Resolvers; + +class SystemTextJsonTypeInfoResolver : IPgTypeInfoResolver +{ + protected TypeInfoMappingCollection Mappings { get; } = new(); + + public SystemTextJsonTypeInfoResolver(JsonSerializerOptions? serializerOptions = null) + => AddTypeInfos(Mappings, serializerOptions); + + static void AddTypeInfos(TypeInfoMappingCollection mappings, JsonSerializerOptions? serializerOptions = null) + { +#if NET7_0_OR_GREATER + serializerOptions ??= JsonSerializerOptions.Default; +#else + serializerOptions ??= new JsonSerializerOptions(); +#endif + + // Jsonb is the first default for JsonDocument + foreach (var dataTypeName in new[] { DataTypeNames.Jsonb, DataTypeNames.Json }) + { + var jsonb = dataTypeName == DataTypeNames.Jsonb; + mappings.AddType(dataTypeName, (options, mapping, _) => + mapping.CreateInfo(options, new SystemTextJsonConverter(jsonb, options.TextEncoding, serializerOptions)), + isDefault: true); + mappings.AddType(dataTypeName, (options, mapping, _) => + mapping.CreateInfo(options, new SystemTextJsonConverter(jsonb, options.TextEncoding, serializerOptions))); + mappings.AddType(dataTypeName, (options, mapping, _) => + mapping.CreateInfo(options, new SystemTextJsonConverter(jsonb, options.TextEncoding, serializerOptions))); + mappings.AddType(dataTypeName, (options, mapping, _) => + mapping.CreateInfo(options, new SystemTextJsonConverter(jsonb, options.TextEncoding, serializerOptions))); + mappings.AddType(dataTypeName, (options, mapping, _) => + mapping.CreateInfo(options, new SystemTextJsonConverter(jsonb, options.TextEncoding, serializerOptions))); + } + } + + protected static void AddArrayInfos(TypeInfoMappingCollection mappings) + { + foreach (var dataTypeName in new[] { DataTypeNames.Jsonb, DataTypeNames.Json }) + { + mappings.AddArrayType(dataTypeName); + mappings.AddArrayType(dataTypeName); + mappings.AddArrayType(dataTypeName); + mappings.AddArrayType(dataTypeName); + mappings.AddArrayType(dataTypeName); + } + } + + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); +} + +sealed class SystemTextJsonArrayTypeInfoResolver : SystemTextJsonTypeInfoResolver, IPgTypeInfoResolver +{ + new TypeInfoMappingCollection Mappings { get; } + + public SystemTextJsonArrayTypeInfoResolver(JsonSerializerOptions? serializerOptions = null) : base(serializerOptions) + { + Mappings = new TypeInfoMappingCollection(base.Mappings); + AddArrayInfos(Mappings); + } + + public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); +} diff --git a/src/Npgsql/Internal/Resolvers/UnmappedEnumTypeInfoResolver.cs b/src/Npgsql/Internal/Resolvers/UnmappedEnumTypeInfoResolver.cs new file mode 100644 index 0000000000..b6ab437255 --- /dev/null +++ b/src/Npgsql/Internal/Resolvers/UnmappedEnumTypeInfoResolver.cs @@ -0,0 +1,51 @@ +using System; +using System.Collections.Generic; +using System.Diagnostics.CodeAnalysis; +using System.Linq; +using System.Reflection; +using Npgsql.Internal.Converters; +using Npgsql.Internal.Postgres; +using Npgsql.PostgresTypes; +using NpgsqlTypes; + +namespace Npgsql.Internal.Resolvers; + +[RequiresUnreferencedCode("Unmapped enum resolver may perform reflection on types with fields that were trimmed if not referenced directly.")] +[RequiresDynamicCode("Unmapped enums need to construct a generic converter for a statically unknown enum type.")] +class UnmappedEnumTypeInfoResolver : DynamicTypeInfoResolver +{ + protected override DynamicMappingCollection? GetMappings(Type? type, DataTypeName dataTypeName, PgSerializerOptions options) + { + if (type is null || !IsTypeOrNullableOfType(type, static type => type.IsEnum, out var matchedType) || options.DatabaseInfo.GetPostgresType(dataTypeName) is not PostgresEnumType) + return null; + + return CreateCollection().AddMapping(matchedType, dataTypeName, static (options, mapping, _) => + { + var enumToLabel = new Dictionary(); + var labelToEnum = new Dictionary(); + foreach (var field in mapping.Type.GetFields(BindingFlags.Static | BindingFlags.Public)) + { + var attribute = (PgNameAttribute?)field.GetCustomAttributes(typeof(PgNameAttribute), false).FirstOrDefault(); + var enumName = attribute?.PgName ?? options.DefaultNameTranslator.TranslateMemberName(field.Name); + var enumValue = (Enum)field.GetValue(null)!; + + enumToLabel[enumValue] = enumName; + labelToEnum[enumName] = enumValue; + } + + return mapping.CreateInfo(options, (PgConverter)Activator.CreateInstance(typeof(EnumConverter<>).MakeGenericType(mapping.Type), + enumToLabel, labelToEnum, + options.TextEncoding)!); + }); + } +} + +[RequiresUnreferencedCode("Unmapped enum resolver may perform reflection on types with fields that were trimmed if not referenced directly.")] +[RequiresDynamicCode("Unmapped enums need to construct a generic converter for a statically unknown enum type")] +sealed class UnmappedEnumArrayTypeInfoResolver : UnmappedEnumTypeInfoResolver +{ + protected override DynamicMappingCollection? GetMappings(Type? type, DataTypeName dataTypeName, PgSerializerOptions options) + => type is not null && IsArrayLikeType(type, out var elementType) && IsArrayDataTypeName(dataTypeName, options, out var elementDataTypeName) + ? base.GetMappings(elementType, elementDataTypeName, options)?.AddArrayMapping(elementType, elementDataTypeName) + : null; +} diff --git a/src/Npgsql/Internal/Resolvers/UnmappedMultirangeTypeInfoResolver.cs b/src/Npgsql/Internal/Resolvers/UnmappedMultirangeTypeInfoResolver.cs new file mode 100644 index 0000000000..d18b1421db --- /dev/null +++ b/src/Npgsql/Internal/Resolvers/UnmappedMultirangeTypeInfoResolver.cs @@ -0,0 +1,59 @@ +using System; +using System.Diagnostics.CodeAnalysis; +using Npgsql.Internal.Converters; +using Npgsql.Internal.Postgres; +using Npgsql.PostgresTypes; +using NpgsqlTypes; + +namespace Npgsql.Internal.Resolvers; + +[RequiresUnreferencedCode("A dynamic type info resolver may perform reflection on types that were trimmed if not referenced directly.")] +[RequiresDynamicCode("A dynamic type info resolver may need to construct a generic converter for a statically unknown type.")] +class UnmappedMultirangeTypeInfoResolver : DynamicTypeInfoResolver +{ + protected override DynamicMappingCollection? GetMappings(Type? type, DataTypeName dataTypeName, PgSerializerOptions options) + { + Type? elementType = null; + if (type is not null && !IsArrayLikeType(type, out elementType) + || elementType is not null && !IsTypeOrNullableOfType(elementType, + static type => type.IsConstructedGenericType && type.GetGenericTypeDefinition() == typeof(NpgsqlRange<>), out _) + || options.DatabaseInfo.GetPostgresType(dataTypeName) is not PostgresMultirangeType multirangeType) + return null; + + var subInfo = + elementType is null + ? options.GetDefaultTypeInfo(multirangeType.Subrange) + : options.GetTypeInfo(elementType, multirangeType.Subrange); + + // We have no generic MultirangeConverterResolver so we would not know how to compose a range mapping for such infos. + // See https://github.com/npgsql/npgsql/issues/5268 + if (subInfo is not { IsResolverInfo: false }) + return null; + + subInfo = subInfo.ToNonBoxing(); + + type ??= subInfo.Type.MakeArrayType(); + + return CreateCollection().AddMapping(type, dataTypeName, + (options, mapping, _) => mapping.CreateInfo(options, + (PgConverter)Activator.CreateInstance(typeof(MultirangeConverter<,>).MakeGenericType(type, subInfo.Type), subInfo.GetConcreteResolution().Converter)!, + preferredFormat: subInfo.PreferredFormat, supportsWriting: subInfo.SupportsWriting), + mapping => mapping with { MatchRequirement = MatchRequirement.Single }); + } +} + +[RequiresUnreferencedCode("A dynamic type info resolver may perform reflection on types that were trimmed if not referenced directly.")] +[RequiresDynamicCode("A dynamic type info resolver may need to construct a generic converter for a statically unknown type.")] +sealed class UnmappedMultirangeArrayTypeInfoResolver : UnmappedMultirangeTypeInfoResolver +{ + protected override DynamicMappingCollection? GetMappings(Type? type, DataTypeName dataTypeName, PgSerializerOptions options) + { + Type? elementType = null; + if (!((type is null || IsArrayLikeType(type, out elementType)) && IsArrayDataTypeName(dataTypeName, options, out var elementDataTypeName))) + return null; + + var mappings = base.GetMappings(elementType, elementDataTypeName, options); + elementType ??= mappings?.Find(null, elementDataTypeName, options)?.Type; // Try to get the default mapping. + return elementType is null ? null : mappings?.AddArrayMapping(elementType, elementDataTypeName); + } +} diff --git a/src/Npgsql/Internal/Resolvers/UnmappedRangeTypeInfoResolver.cs b/src/Npgsql/Internal/Resolvers/UnmappedRangeTypeInfoResolver.cs new file mode 100644 index 0000000000..9e9ba0fb7d --- /dev/null +++ b/src/Npgsql/Internal/Resolvers/UnmappedRangeTypeInfoResolver.cs @@ -0,0 +1,59 @@ +using System; +using System.Diagnostics.CodeAnalysis; +using Npgsql.Internal.Converters; +using Npgsql.Internal.Postgres; +using Npgsql.PostgresTypes; +using NpgsqlTypes; + +namespace Npgsql.Internal.Resolvers; + +[RequiresUnreferencedCode("A dynamic type info resolver may perform reflection on types that were trimmed if not referenced directly.")] +[RequiresDynamicCode("A dynamic type info resolver may need to construct a generic converter for a statically unknown type.")] +class UnmappedRangeTypeInfoResolver : DynamicTypeInfoResolver +{ + protected override DynamicMappingCollection? GetMappings(Type? type, DataTypeName dataTypeName, PgSerializerOptions options) + { + var matchedType = type; + if (type is not null && !IsTypeOrNullableOfType(type, + static type => type.IsConstructedGenericType && type.GetGenericTypeDefinition() == typeof(NpgsqlRange<>), out matchedType) + || options.DatabaseInfo.GetPostgresType(dataTypeName) is not PostgresRangeType rangeType) + return null; + + var subInfo = + matchedType is null + ? options.GetDefaultTypeInfo(rangeType.Subtype) + // Input matchedType here as we don't want an NpgsqlRange over Nullable (it has its own nullability tracking, for better or worse) + : options.GetTypeInfo(matchedType.GetGenericArguments()[0], rangeType.Subtype); + + // We have no generic RangeConverterResolver so we would not know how to compose a range mapping for such infos. + // See https://github.com/npgsql/npgsql/issues/5268 + if (subInfo is not { IsResolverInfo: false }) + return null; + + subInfo = subInfo.ToNonBoxing(); + + matchedType ??= typeof(NpgsqlRange<>).MakeGenericType(subInfo.Type); + + return CreateCollection().AddMapping(matchedType, dataTypeName, + (options, mapping, _) => mapping.CreateInfo(options, + (PgConverter)Activator.CreateInstance(typeof(RangeConverter<>).MakeGenericType(subInfo.Type), subInfo.GetConcreteResolution().Converter)!, + preferredFormat: subInfo.PreferredFormat, supportsWriting: subInfo.SupportsWriting), + mapping => mapping with { MatchRequirement = MatchRequirement.Single }); + } +} + +[RequiresUnreferencedCode("A dynamic type info resolver may perform reflection on types that were trimmed if not referenced directly.")] +[RequiresDynamicCode("A dynamic type info resolver may need to construct a generic converter for a statically unknown type.")] +sealed class UnmappedRangeArrayTypeInfoResolver : UnmappedRangeTypeInfoResolver +{ + protected override DynamicMappingCollection? GetMappings(Type? type, DataTypeName dataTypeName, PgSerializerOptions options) + { + Type? elementType = null; + if (!((type is null || IsArrayLikeType(type, out elementType)) && IsArrayDataTypeName(dataTypeName, options, out var elementDataTypeName))) + return null; + + var mappings = base.GetMappings(elementType, elementDataTypeName, options); + elementType ??= mappings?.Find(null, elementDataTypeName, options)?.Type; // Try to get the default mapping. + return elementType is null ? null : mappings?.AddArrayMapping(elementType, elementDataTypeName); + } +} diff --git a/src/Npgsql/Internal/Resolvers/UnsupportedTypeInfoResolver.cs b/src/Npgsql/Internal/Resolvers/UnsupportedTypeInfoResolver.cs new file mode 100644 index 0000000000..845816f7a7 --- /dev/null +++ b/src/Npgsql/Internal/Resolvers/UnsupportedTypeInfoResolver.cs @@ -0,0 +1,34 @@ +using System; +using System.Collections; +using Npgsql.Internal.Postgres; + +namespace Npgsql.Internal.Resolvers; + +sealed class UnsupportedTypeInfoResolver : IPgTypeInfoResolver +{ + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + { + if (options.IntrospectionMode) + return null; + + RecordTypeInfoResolver.CheckUnsupported(type, dataTypeName, options); + RangeTypeInfoResolver.ThrowIfUnsupported(type, dataTypeName, options); + FullTextSearchTypeInfoResolver.CheckUnsupported(type, dataTypeName, options); + LTreeTypeInfoResolver.CheckUnsupported(type, dataTypeName, options); + + if (type is null) + return null; + + if (TypeInfoMappingCollection.IsArrayLikeType(type, out var elementType) && TypeInfoMappingCollection.IsArrayLikeType(elementType, out _)) + throw new NotSupportedException("Writing is not supported for jagged collections, use a multidimensional array instead."); + + if (typeof(IEnumerable).IsAssignableFrom(type) && !typeof(IList).IsAssignableFrom(type) && type != typeof(string) && (dataTypeName is null || dataTypeName.Value.IsArray)) + throw new NotSupportedException("Writing is not supported for IEnumerable parameters, use an array or List instead."); + + // TODO bring back json help message. + // $"Can't write CLR type {value.GetType()}. " + + // "You may need to use the System.Text.Json or Json.NET plugins, see the docs for more information." + + return null; + } +} diff --git a/src/Npgsql/Internal/Size.cs b/src/Npgsql/Internal/Size.cs new file mode 100644 index 0000000000..f239453015 --- /dev/null +++ b/src/Npgsql/Internal/Size.cs @@ -0,0 +1,70 @@ +using System; +using System.Diagnostics; + +namespace Npgsql.Internal; + +public enum SizeKind : byte +{ + Unknown = 0, + Exact, + UpperBound +} + +[DebuggerDisplay("{DebuggerDisplay,nq}")] +public readonly struct Size : IEquatable +{ + readonly int _value; + readonly SizeKind _kind; + + Size(SizeKind kind, int value) + { + _value = value; + _kind = kind; + } + + public int Value + { + get + { + if (_kind is SizeKind.Unknown) + ThrowHelper.ThrowInvalidOperationException("Cannot get value from default or Unknown kind"); + return _value; + } + } + + internal int GetValueOrDefault() => _value; + + public SizeKind Kind => _kind; + + public static Size Create(int byteCount) => new(SizeKind.Exact, byteCount); + public static Size CreateUpperBound(int byteCount) => new(SizeKind.UpperBound, byteCount); + public static Size Unknown { get; } = new(SizeKind.Unknown, 0); + public static Size Zero { get; } = new(SizeKind.Exact, 0); + + public Size Combine(Size result) + { + if (_kind is SizeKind.Unknown || result._kind is SizeKind.Unknown) + return Unknown; + + if (_kind is SizeKind.UpperBound || result._kind is SizeKind.UpperBound) + return CreateUpperBound((int)Math.Min((long)(_value + result._value), int.MaxValue)); + + return Create((int)Math.Min((long)(_value + result._value), int.MaxValue)); + } + + public static implicit operator Size(int value) => Create(value); + + string DebuggerDisplay + => _kind switch + { + SizeKind.Exact or SizeKind.UpperBound => $"{_value} ({_kind})", + SizeKind.Unknown => "Unknown", + _ => throw new ArgumentOutOfRangeException() + }; + + public bool Equals(Size other) => _value == other._value && _kind == other.Kind; + public override bool Equals(object? obj) => obj is Size other && Equals(other); + public override int GetHashCode() => HashCode.Combine(_value, (int)_kind); + public static bool operator ==(Size left, Size right) => left.Equals(right); + public static bool operator !=(Size left, Size right) => !left.Equals(right); +} diff --git a/src/Npgsql/Internal/TypeHandlers/ArrayHandler.cs b/src/Npgsql/Internal/TypeHandlers/ArrayHandler.cs deleted file mode 100644 index bc1f100322..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/ArrayHandler.cs +++ /dev/null @@ -1,610 +0,0 @@ -using System; -using System.Collections; -using System.Collections.Concurrent; -using System.Collections.Generic; -using System.Diagnostics; -using System.Diagnostics.CodeAnalysis; -using System.Runtime.CompilerServices; -using System.Threading; -using System.Threading.Tasks; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; - -namespace Npgsql.Internal.TypeHandlers; - -/// -/// Non-generic base class for all type handlers which handle PostgreSQL arrays. -/// -/// -/// https://www.postgresql.org/docs/current/static/arrays.html. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public class ArrayHandler : NpgsqlTypeHandler -{ - readonly Type _defaultArrayType; - readonly ConcurrentDictionary _concreteHandlers = new(); - protected int LowerBound { get; } - protected NpgsqlTypeHandler ElementHandler { get; } - protected ArrayNullabilityMode ArrayNullabilityMode { get; } - - public ArrayHandler(PostgresType arrayPostgresType, NpgsqlTypeHandler elementHandler, ArrayNullabilityMode arrayNullabilityMode, int lowerBound = 1) : base(arrayPostgresType) - { - LowerBound = lowerBound; - ElementHandler = elementHandler; - ArrayNullabilityMode = arrayNullabilityMode; - _defaultArrayType = elementHandler.GetFieldType().MakeArrayType(); - } - - public override Type GetFieldType(FieldDescription? fieldDescription = null) => typeof(Array); - - /// - public override NpgsqlTypeHandler CreateArrayHandler(PostgresArrayType pgArrayType, ArrayNullabilityMode arrayNullabilityMode) - => throw new NotSupportedException(); - - /// - public override NpgsqlTypeHandler CreateRangeHandler(PostgresType pgRangeType) - => throw new NotSupportedException(); - - /// - public override NpgsqlTypeHandler CreateMultirangeHandler(PostgresMultirangeType pgMultirangeType) - => throw new NotSupportedException(); - - ArrayHandlerCore CreateHandler(Type elementType) - => (ArrayHandlerCore)Activator.CreateInstance(typeof(ArrayHandlerCore<>).MakeGenericType(elementType), ElementHandler, ArrayNullabilityMode, LowerBound)!; - - /// - protected internal override async ValueTask ReadCustom(NpgsqlReadBuffer buf, int len, bool async, - FieldDescription? fieldDescription) - { - return (TArray)await ReadGenericAsObject(buf, async, fieldDescription); - - // Sync helper to keep the code size cost of ReadCustom low. - ValueTask ReadGenericAsObject(NpgsqlReadBuffer buf, bool async, FieldDescription? fieldDescription) - { - if (ArrayTypeInfo.IsArray) - return GetOrAddHandler().ReadArray(buf, async, ArrayTypeInfo.ArrayRank); - - if (ListTypeInfo.IsList) - return GetOrAddHandler().ReadList(buf, async); - - throw new InvalidCastException(fieldDescription == null - ? $"Can't cast database type to {typeof(TArray).Name}" - : $"Can't cast database type {fieldDescription.Handler.PgDisplayName} to {typeof(TArray).Name}" - ); - } - } - - /// - public override ValueTask ReadAsObject(NpgsqlReadBuffer buf, int len, bool async, - FieldDescription? fieldDescription = null) - => ReadAsObject(ElementHandler.GetFieldType(), buf, len, async, fieldDescription); - - protected async ValueTask ReadAsObject(Type elementType, NpgsqlReadBuffer buf, int len, bool async, - FieldDescription? fieldDescription = null) - { - if (!elementType.IsValueType || ArrayNullabilityMode is ArrayNullabilityMode.Never) - return await GetOrAddObjectHandler(elementType).ReadArrayAsObject(buf, async); - - if (ArrayNullabilityMode is ArrayNullabilityMode.Always) - return await GetOrAddObjectHandler(typeof(Nullable<>).MakeGenericType(elementType)).ReadArrayAsObject(buf, async); - - // We need to peek at the data to call into the right handler. - await buf.Ensure(sizeof(int) * 2, async); - var origPos = buf.ReadPosition; - var _ = buf.ReadInt32(); - var containsNulls = buf.ReadInt32() == 1; - buf.ReadPosition = origPos; - - return containsNulls - ? await GetOrAddObjectHandler(typeof(Nullable<>).MakeGenericType(elementType)).ReadArrayAsObject(buf, async) - : await GetOrAddObjectHandler(elementType).ReadArrayAsObject(buf, async); - } - - ArrayHandlerCore GetOrAddObjectHandler(Type elementType) - { - var arrayType = - elementType == ElementHandler.GetFieldType() - ? _defaultArrayType - : elementType.MakeArrayType(); - - return _concreteHandlers.GetOrAdd(arrayType, - static (t, instance) => instance.CreateHandler(t.GetElementType()!), this); - } - - /// - public override int ValidateObjectAndGetLength(object value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => GetOrAddObjectHandler(ElementHandler.GetFieldType()).ValidateAndGetElementLength(value, ref lengthCache); - - /// - protected internal override int ValidateAndGetLengthCustom([DisallowNull] TArray value, ref NpgsqlLengthCache? lengthCache, - NpgsqlParameter? parameter) - => GetOrAddHandler().ValidateAndGetElementLength(value, ref lengthCache); - - /// - public override Task WriteObjectWithLength(object? value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, - NpgsqlParameter? parameter, bool async, - CancellationToken cancellationToken = default) - { - if (value is null or DBNull) - { - buf.WriteInt32(-1); - return Task.CompletedTask; - } - return GetOrAddObjectHandler(ElementHandler.GetFieldType()).WriteElementWithLength(value, buf, lengthCache, async, cancellationToken); - } - - protected override Task WriteWithLengthCustom([DisallowNull]TArray value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, - CancellationToken cancellationToken) - => GetOrAddHandler().WriteElementWithLength(value, buf, lengthCache, async, cancellationToken); - - private protected ArrayHandlerCore GetOrAddHandler() - => _concreteHandlers.GetOrAdd(typeof(TArray), static (_, instance) => - { - if (ArrayTypeInfo.IsArray) - return instance.CreateHandler(ArrayTypeInfo.ElementType); - - if (ListTypeInfo.IsList) - return instance.CreateHandler(ListTypeInfo.ElementType); - - return null!; - }, this); - - static class ArrayTypeInfo - { - // ReSharper disable StaticMemberInGenericType - public static readonly Type? ElementType = typeof(TArray).IsArray ? typeof(TArray).GetElementType() : null; - public static readonly int ArrayRank = ElementType is not null ? typeof(TArray).GetArrayRank() : 0; - // ReSharper restore StaticMemberInGenericType - - [MemberNotNullWhen(true, nameof(ElementType))] - public static bool IsArray => ElementType is not null; - } - - static class ListTypeInfo - { - // ReSharper disable StaticMemberInGenericType - public static readonly Type? ElementType = typeof(TList).IsGenericType && typeof(TList).GetGenericTypeDefinition() == typeof(List<>) ? typeof(TList).GetGenericArguments()[0] : null; - // ReSharper restore StaticMemberInGenericType - - [MemberNotNullWhen(true, nameof(ElementType))] - public static bool IsList => ElementType is not null; - } -} - -abstract class ArrayHandlerCore -{ - internal const string ReadNonNullableCollectionWithNullsExceptionMessage = - "Cannot read a non-nullable collection of elements because the returned array contains nulls. " + - "Call GetFieldValue with a nullable array instead."; - - readonly int _lowerBound; - public ArrayNullabilityMode ArrayNullabilityMode { get; } - - protected ArrayHandlerCore(ArrayNullabilityMode arrayNullabilityMode, int lowerBound = 1) - { - ArrayNullabilityMode = arrayNullabilityMode; - _lowerBound = lowerBound; - } - - public ValueTask ReadArray(NpgsqlReadBuffer buf, bool async, int expectedDimensions = 0) - => ReadArray(buf, async, expectedDimensions, readAsObject: false); - - public ValueTask ReadArrayAsObject(NpgsqlReadBuffer buf, bool async, int expectedDimensions = 0) - => ReadArray(buf, async, expectedDimensions, readAsObject: true); - - protected abstract Type ElementType { get; } - protected abstract bool IsNonNullable { get; } - protected abstract bool IsGenericCollection(object value, out int count); - protected abstract NpgsqlTypeHandler ElementHandler { get; } - protected abstract object CreateCollection(bool isArray, int capacity); - protected abstract ValueTask ReadElement(bool isArray, object values, int index, NpgsqlReadBuffer buf, int length, bool async, - FieldDescription? fieldDescription = null); - protected abstract ValueTask ReadElement(Array array, int[] indices, NpgsqlReadBuffer buf, int length, bool async, - FieldDescription? fieldDescription = null); - protected abstract int ValidateAndGetElementLength(bool isArray, object values, int index, ref NpgsqlLengthCache? lengthCache, - NpgsqlParameter? parameter); - protected abstract ValueTask WriteElementWithLength(bool isArray, object values, int index, NpgsqlWriteBuffer buf, - NpgsqlLengthCache? lengthCache, - NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken); - - /// - /// Reads an array of element type from the given buffer . - /// - async ValueTask ReadArray(NpgsqlReadBuffer buf, bool async, int expectedDimensions, bool readAsObject) - { - await buf.Ensure(12, async); - var dimensions = buf.ReadInt32(); - var containsNulls = buf.ReadInt32() == 1; - buf.ReadUInt32(); // Element OID. Ignored. - - var nullableElementType = IsNonNullable - ? typeof(Nullable<>).MakeGenericType(ElementType) - : ElementType; - - var returnType = readAsObject - ? ArrayNullabilityMode switch - { - ArrayNullabilityMode.Never => IsNonNullable && containsNulls - ? throw new InvalidOperationException(ReadNonNullableCollectionWithNullsExceptionMessage) - : ElementType, - ArrayNullabilityMode.Always => nullableElementType, - ArrayNullabilityMode.PerInstance => containsNulls - ? nullableElementType - : ElementType, - _ => throw new ArgumentOutOfRangeException() - } - : IsNonNullable && containsNulls - ? throw new InvalidOperationException(ReadNonNullableCollectionWithNullsExceptionMessage) - : ElementType; - - if (dimensions == 0) - return expectedDimensions > 1 - ? Array.CreateInstance(returnType, new int[expectedDimensions]) - : CreateCollection(isArray: true, 0); - - if (expectedDimensions > 0 && dimensions != expectedDimensions) - throw new InvalidOperationException($"Cannot read an array with {expectedDimensions} dimension(s) from an array with {dimensions} dimension(s)"); - - if (dimensions == 1 && returnType == ElementType) - { - await buf.Ensure(8, async); - var arrayLength = buf.ReadInt32(); - - buf.ReadInt32(); // Lower bound - - var oneDimensional = CreateCollection(isArray: true, arrayLength); - for (var i = 0; i < arrayLength; i++) - { - await buf.Ensure(4, async); - var len = buf.ReadInt32(); - await ReadElement(isArray: true, oneDimensional, i, buf, len, async); - } - return oneDimensional; - } - - var dimLengths = new int[dimensions]; - await buf.Ensure(dimensions * 8, async); - - for (var i = 0; i < dimLengths.Length; i++) - { - dimLengths[i] = buf.ReadInt32(); - buf.ReadInt32(); // Lower bound - } - - var result = Array.CreateInstance(returnType, dimLengths); - - // Either multidimensional arrays or arrays of nullable value types requested as object - // We can't avoid boxing here - var indices = new int[dimensions]; - while (true) - { - await buf.Ensure(4, async); - var len = buf.ReadInt32(); - if (len == -1) - result.SetValue(null, indices); - else - await ReadElement(result, indices, buf, len, async); - - // TODO: Overly complicated/inefficient... - indices[dimensions - 1]++; - for (var dim = dimensions - 1; dim >= 0; dim--) - { - if (indices[dim] <= result.GetUpperBound(dim)) - continue; - - if (dim == 0) - return result; - - for (var j = dim; j < dimensions; j++) - indices[j] = result.GetLowerBound(j); - indices[dim - 1]++; - } - } - } - - /// - /// Reads a generic list containing elements from the given buffer . - /// - public async ValueTask ReadList(NpgsqlReadBuffer buf, bool async) - { - await buf.Ensure(12, async); - var dimensions = buf.ReadInt32(); - var containsNulls = buf.ReadInt32() == 1; - buf.ReadUInt32(); // Element OID. Ignored. - - if (dimensions == 0) - return CreateCollection(isArray: false, 0); - if (dimensions > 1) - throw new NotSupportedException($"Can't read multidimensional array as List<{ElementType.Name}>"); - - if (containsNulls && IsNonNullable) - throw new InvalidOperationException(ReadNonNullableCollectionWithNullsExceptionMessage); - - await buf.Ensure(8, async); - var length = buf.ReadInt32(); - buf.ReadInt32(); // We don't care about the lower bounds - - var list = CreateCollection(isArray: false, length); - for (var i = 0; i < length; i++) - { - var len = buf.ReadInt32(); - await ReadElement(isArray: false, list, i, buf, len, async); - } - return list; - } - - // Handle single-dimensional arrays and generic IList - public int ValidateAndGetElementLength(object value, int count, ref NpgsqlLengthCache lengthCache) - { - // Leave empty slot for the entire array length, and go ahead an populate the element slots - var pos = lengthCache.Position; - var len = - 4 + // dimensions - 4 + // has_nulls (unused) - 4 + // type OID - 1 * 8 + // number of dimensions (1) * (length + lower bound) - 4 * count; // sum of element lengths - - lengthCache.Set(0); - var elemLengthCache = lengthCache; - - var isArray = value is Array; - for (var i = 0; i < count; i++) - { - try - { - len += ValidateAndGetElementLength(isArray, value, i, ref elemLengthCache, null); - } - catch (Exception e) - { - throw MixedTypesOrJaggedArrayException(e); - } - } - - lengthCache.Lengths[pos] = len; - return len; - } - - // Take care of multi-dimensional arrays and non-generic IList, we have no choice but to box/unbox - public int ValidateAndGetLengthAsObject(ICollection value, ref NpgsqlLengthCache lengthCache) - { - var dimensions = (value as Array)?.Rank ?? 1; - - // Leave empty slot for the entire array length, and go ahead an populate the element slots - var pos = lengthCache.Position; - var len = - 4 + // dimensions - 4 + // has_nulls (unused) - 4 + // type OID - dimensions * 8 + // number of dimensions * (length + lower bound) - 4 * value.Count; // sum of element lengths - - lengthCache.Set(0); - var elemLengthCache = lengthCache; - - var elementHandler = ElementHandler; - foreach (var element in value) - { - if (element is null) - continue; - - try - { - len += elementHandler.ValidateObjectAndGetLength(element, ref elemLengthCache, null); - } - catch (Exception e) - { - throw MixedTypesOrJaggedArrayException(e); - } - } - - lengthCache.Lengths[pos] = len; - return len; - } - - public async Task WriteAsObject(ICollection value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, bool async, CancellationToken cancellationToken = default) - { - var asArray = value as Array; - var dimensions = asArray?.Rank ?? 1; - - var len = - 4 + // ndim - 4 + // has_nulls - 4 + // element_oid - dimensions * 8; // dim (4) + lBound (4) - - if (buf.WriteSpaceLeft < len) - { - await buf.Flush(async, cancellationToken); - Debug.Assert(buf.WriteSpaceLeft >= len, "Buffer too small for header"); - } - - var elementHandler = ElementHandler; - buf.WriteInt32(dimensions); - buf.WriteInt32(1); // HasNulls=1. Not actually used by the backend. - buf.WriteUInt32(elementHandler.PostgresType.OID); - if (asArray != null) - { - for (var i = 0; i < dimensions; i++) - { - buf.WriteInt32(asArray.GetLength(i)); - buf.WriteInt32(_lowerBound); // We don't map .NET lower bounds to PG - } - } - else - { - buf.WriteInt32(value.Count); - buf.WriteInt32(_lowerBound); // We don't map .NET lower bounds to PG - } - - foreach (var element in value) - await elementHandler.WriteObjectWithLength(element, buf, lengthCache, null, async, cancellationToken); - } - - public async Task Write(object value, int count, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, bool async, CancellationToken cancellationToken = default) - { - var len = - 4 + // dimensions - 4 + // has_nulls (unused) - 4 + // type OID - 1 * 8; // number of dimensions (1) * (length + lower bound) - if (buf.WriteSpaceLeft < len) - { - await buf.Flush(async, cancellationToken); - Debug.Assert(buf.WriteSpaceLeft >= len, "Buffer too small for header"); - } - - var elementHandler = ElementHandler; - buf.WriteInt32(1); - buf.WriteInt32(1); // has_nulls = 1. Not actually used by the backend. - buf.WriteUInt32(elementHandler.PostgresType.OID); - buf.WriteInt32(count); - buf.WriteInt32(_lowerBound); // We don't map .NET lower bounds to PG - - var isArray = value is Array; - for (var i = 0; i < count; i++) - await WriteElementWithLength(isArray, value, i, buf, lengthCache, null, async, cancellationToken); - } - - static Exception MixedTypesOrJaggedArrayException(Exception innerException) - => new("While trying to write an array, one of its elements failed validation. " + - "You may be trying to mix types in a non-generic IList, or to write a jagged array.", innerException); - - public int ValidateAndGetElementLength(object value, ref NpgsqlLengthCache? lengthCache) - { - lengthCache ??= new NpgsqlLengthCache(1); - if (lengthCache.IsPopulated) - return lengthCache.Get(); - - return value switch - { - _ when IsGenericCollection(value, out var count) => ValidateAndGetElementLength(value, count, ref lengthCache), - ICollection nonGeneric => ValidateAndGetLengthAsObject(nonGeneric, ref lengthCache), - _ => throw CantWriteTypeException(value.GetType()) - }; - } - - public Task WriteElementWithLength(object value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, bool async, CancellationToken cancellationToken) - { - buf.WriteInt32(ValidateAndGetElementLength(value, ref lengthCache)); - return value switch - { - _ when IsGenericCollection(value, out var count) => Write(value, count, buf, lengthCache, async, cancellationToken), - ICollection nonGeneric => WriteAsObject(nonGeneric, buf, lengthCache, async, cancellationToken), - _ => throw CantWriteTypeException(value.GetType()) - }; - } - - InvalidCastException CantWriteTypeException(Type type) - => new($"Can't write type '{type}' as an array of {ElementType}"); -} - -sealed class ArrayHandlerCore : ArrayHandlerCore -{ - readonly NpgsqlTypeHandler _elementHandler; - - public ArrayHandlerCore(NpgsqlTypeHandler nonNullableElementHandler, ArrayNullabilityMode arrayNullabilityMode, int lowerBound = 1) - : base(arrayNullabilityMode, lowerBound) - => _elementHandler = nonNullableElementHandler; - - protected override Type ElementType => typeof(TElement); - protected override bool IsNonNullable => typeof(TElement).IsValueType && default(TElement) is not null; - - protected override bool IsGenericCollection(object value, out int count) - { - if (value is ICollection collection) - { - count = collection.Count; - return true; - } - - count = 0; - return false; - } - - protected override NpgsqlTypeHandler ElementHandler => _elementHandler; - - protected override object CreateCollection(bool isArray, int capacity) => isArray switch - { - true => capacity is 0 ? Array.Empty() : new TElement[capacity], - false => new List() - }; - - protected override ValueTask ReadElement(bool isArray, object values, int index, NpgsqlReadBuffer buf, int length, bool async, FieldDescription? fieldDescription = null) - { - // We want a generic mutation so we unfortunately need the null check on this side. - if (length == -1) - { - SetResult(isArray, values, index, (TElement?)(object?)null); - return new ValueTask(); - } - - var task = - NullableHandler.Exists - ? NullableHandler.ReadAsync(_elementHandler, buf, length, async, fieldDescription) - : _elementHandler.Read(buf, length, async, fieldDescription); - - if (!task.IsCompletedSuccessfully) - return Core(isArray, values, index, task); - - SetResult(isArray, values, index, task.GetAwaiter().GetResult()); - return new ValueTask(); - - static async ValueTask Core(bool isArray, object values, int index, ValueTask task) - => SetResult(isArray, values, index, await task); - - static void SetResult(bool isArray, object values, int index, TElement? result) - { - Debug.Assert(isArray ? values is TElement?[] : values is List); - if (isArray) - Unsafe.As(ref values)[index] = result; - else - Unsafe.As>(ref values).Add(result); - } - } - - protected override async ValueTask ReadElement(Array array, int[] indices, NpgsqlReadBuffer buf, int length, bool async, FieldDescription? fieldDescription = null) - { - // Null check is handled in ArrayHandlerOps to reduce code size. - var result = - NullableHandler.Exists - ? await NullableHandler.ReadAsync(_elementHandler, buf, length, async, fieldDescription) - : await _elementHandler.Read(buf, length, async, fieldDescription); - - array.SetValue(result, indices); - } - - protected override int ValidateAndGetElementLength(bool isArray, object values, int index, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - { - Debug.Assert(isArray ? values is TElement?[] : values is List); - var element = - isArray - ? Unsafe.As(ref values)[index] - : Unsafe.As>(ref values)[index]; - - return element is null - ? 0 - : NullableHandler.Exists - ? NullableHandler.ValidateAndGetLength(_elementHandler, element, ref lengthCache, parameter) - : _elementHandler.ValidateAndGetLength(element, ref lengthCache, parameter); - } - - protected override async ValueTask WriteElementWithLength(bool isArray, object values, int index, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, - NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken) - { - Debug.Assert(isArray ? values is TElement?[] : values is List); - var element = - isArray - ? Unsafe.As(ref values)[index] - : Unsafe.As>(ref values)[index]; - - if (NullableHandler.Exists) - await NullableHandler.WriteAsync(_elementHandler, element!, buf, lengthCache, parameter, async, cancellationToken); - else - await _elementHandler.WriteWithLength(element!, buf, lengthCache, parameter, async, cancellationToken); - } -} diff --git a/src/Npgsql/Internal/TypeHandlers/BitStringHandler.cs b/src/Npgsql/Internal/TypeHandlers/BitStringHandler.cs deleted file mode 100644 index b448463343..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/BitStringHandler.cs +++ /dev/null @@ -1,271 +0,0 @@ -using System; -using System.Collections; -using System.Collections.Specialized; -using System.Diagnostics; -using System.Linq; -using System.Threading; -using System.Threading.Tasks; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; - -namespace Npgsql.Internal.TypeHandlers; - -/// -/// A type handler for the PostgreSQL bit string data type. -/// -/// -/// See https://www.postgresql.org/docs/current/static/datatype-bit.html. -/// -/// Note that for BIT(1), this handler will return a bool by default, to align with SQLClient -/// (see discussion https://github.com/npgsql/npgsql/pull/362#issuecomment-59622101). -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public partial class BitStringHandler : NpgsqlTypeHandler, - INpgsqlTypeHandler, INpgsqlTypeHandler, INpgsqlTypeHandler -{ - public BitStringHandler(PostgresType pgType) : base(pgType) {} - - public override Type GetFieldType(FieldDescription? fieldDescription = null) - => fieldDescription != null && fieldDescription.TypeModifier == 1 ? typeof(bool) : typeof(BitArray); - - // BitString requires a special array handler which returns bool or BitArray - /// - public override NpgsqlTypeHandler CreateArrayHandler(PostgresArrayType pgArrayType, ArrayNullabilityMode arrayNullabilityMode) - => new BitStringArrayHandler(pgArrayType, this, arrayNullabilityMode); - - #region Read - - /// - public override async ValueTask Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - { - await buf.Ensure(4, async); - var numBits = buf.ReadInt32(); - var result = new BitArray(numBits); - var bytesLeft = len - 4; // Remove leading number of bits - if (bytesLeft == 0) - return result; - - var bitNo = 0; - while (true) - { - var iterationEndPos = bytesLeft > buf.ReadBytesLeft - ? bytesLeft - buf.ReadBytesLeft - : 1; - - for (; bytesLeft > iterationEndPos; bytesLeft--) - { - // ReSharper disable ShiftExpressionRealShiftCountIsZero - var chunk = buf.ReadByte(); - result[bitNo++] = (chunk & (1 << 7)) != 0; - result[bitNo++] = (chunk & (1 << 6)) != 0; - result[bitNo++] = (chunk & (1 << 5)) != 0; - result[bitNo++] = (chunk & (1 << 4)) != 0; - result[bitNo++] = (chunk & (1 << 3)) != 0; - result[bitNo++] = (chunk & (1 << 2)) != 0; - result[bitNo++] = (chunk & (1 << 1)) != 0; - result[bitNo++] = (chunk & (1 << 0)) != 0; - } - - if (bytesLeft == 1) - break; - - Debug.Assert(buf.ReadBytesLeft == 0); - await buf.Ensure(Math.Min(bytesLeft, buf.Size), async); - } - - if (bitNo < result.Length) - { - var remainder = result.Length - bitNo; - await buf.Ensure(1, async); - var lastChunk = buf.ReadByte(); - for (var i = 7; i >= 8 - remainder; i--) - result[bitNo++] = (lastChunk & (1 << i)) != 0; - } - - return result; - } - - async ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - { - if (len > 4 + 4) - throw new InvalidCastException("Can't read PostgreSQL bitstring with more than 32 bits into BitVector32"); - - await buf.Ensure(4 + 4, async); - - var numBits = buf.ReadInt32(); - return numBits == 0 - ? new BitVector32(0) - : new BitVector32(buf.ReadInt32()); - } - - async ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - { - await buf.Ensure(5, async); - var bitLen = buf.ReadInt32(); - if (bitLen != 1) - throw new InvalidCastException("Can't convert a BIT(N) type to bool, only BIT(1)"); - var b = buf.ReadByte(); - return (b & 128) != 0; - } - - ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => throw new NotSupportedException("Only writing string to PostgreSQL bitstring is supported, no reading."); - - public override async ValueTask ReadAsObject(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - => fieldDescription?.TypeModifier == 1 - ? await Read(buf, len, async, fieldDescription) - : await Read(buf, len, async, fieldDescription); - - #endregion - - #region Write - - /// - public override int ValidateAndGetLength(BitArray value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => 4 + (value.Length + 7) / 8; - - /// - public int ValidateAndGetLength(BitVector32 value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => value.Data == 0 ? 4 : 8; - - /// - public int ValidateAndGetLength(bool value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => 5; - - /// - public int ValidateAndGetLength(string value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - { - if (value.Any(c => c != '0' && c != '1')) - throw new FormatException("Cannot interpret as ASCII BitString: " + value); - return 4 + (value.Length + 7) / 8; - } - - /// - public override async Task Write(BitArray value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - // Initial bitlength byte - if (buf.WriteSpaceLeft < 4) - await buf.Flush(async, cancellationToken); - buf.WriteInt32(value.Length); - - var byteLen = (value.Length + 7) / 8; - var pos = 0; - while (true) - { - var endPos = pos + Math.Min(byteLen - pos, buf.WriteSpaceLeft); - for (; pos < endPos; pos++) - { - var bitPos = pos*8; - var b = 0; - for (var i = 0; i < Math.Min(8, value.Length - bitPos); i++) - b += (value[bitPos + i] ? 1 : 0) << (8 - i - 1); - buf.WriteByte((byte)b); - } - - if (pos == byteLen) - return; - await buf.Flush(async, cancellationToken); - } - } - - /// - public async Task Write(BitVector32 value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - if (buf.WriteSpaceLeft < 8) - await buf.Flush(async, cancellationToken); - - if (value.Data == 0) - buf.WriteInt32(0); - else - { - buf.WriteInt32(32); - buf.WriteInt32(value.Data); - } - } - - /// - public async Task Write(bool value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - if (buf.WriteSpaceLeft < 5) - await buf.Flush(async, cancellationToken); - buf.WriteInt32(1); - buf.WriteByte(value ? (byte)0x80 : (byte)0); - } - - /// - public async Task Write(string value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - // Initial bitlength byte - if (buf.WriteSpaceLeft < 4) - await buf.Flush(async, cancellationToken); - buf.WriteInt32(value.Length); - - var pos = 0; - var byteLen = (value.Length + 7) / 8; - var bytePos = 0; - - while (true) - { - var endBytePos = bytePos + Math.Min(byteLen - bytePos - 1, buf.WriteSpaceLeft); - - for (; bytePos < endBytePos; bytePos++) - { - var b = 0; - b += (value[pos++] - '0') << 7; - b += (value[pos++] - '0') << 6; - b += (value[pos++] - '0') << 5; - b += (value[pos++] - '0') << 4; - b += (value[pos++] - '0') << 3; - b += (value[pos++] - '0') << 2; - b += (value[pos++] - '0') << 1; - b += (value[pos++] - '0'); - buf.WriteByte((byte)b); - } - - if (bytePos >= byteLen - 1) - break; - await buf.Flush(async, cancellationToken); - } - - if (pos < value.Length) - { - if (buf.WriteSpaceLeft < 1) - await buf.Flush(async, cancellationToken); - - var remainder = value.Length - pos; - var lastChunk = 0; - for (var i = 7; i >= 8 - remainder; i--) - lastChunk += (value[pos++] - '0') << i; - buf.WriteByte((byte)lastChunk); - } - } - - #endregion -} - -/// -/// A special handler for arrays of bit strings. -/// Differs from the standard array handlers in that it returns arrays of bool for BIT(1) and arrays -/// of BitArray otherwise (just like the scalar BitStringHandler does). -/// -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public class BitStringArrayHandler : ArrayHandler -{ - /// - public BitStringArrayHandler(PostgresType postgresType, BitStringHandler elementHandler, ArrayNullabilityMode arrayNullabilityMode) - : base(postgresType, elementHandler, arrayNullabilityMode) - { } - - public override ValueTask ReadAsObject(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - => fieldDescription?.TypeModifier == 1 - ? base.ReadAsObject(typeof(bool), buf, len, async, fieldDescription) - : base.ReadAsObject(buf, len, async, fieldDescription); -} diff --git a/src/Npgsql/Internal/TypeHandlers/BoolHandler.cs b/src/Npgsql/Internal/TypeHandlers/BoolHandler.cs deleted file mode 100644 index c33004c701..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/BoolHandler.cs +++ /dev/null @@ -1,32 +0,0 @@ -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; - -namespace Npgsql.Internal.TypeHandlers; - -/// -/// A type handler for the PostgreSQL bool data type. -/// -/// -/// See https://www.postgresql.org/docs/current/static/datatype-boolean.html. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public partial class BoolHandler : NpgsqlSimpleTypeHandler -{ - public BoolHandler(PostgresType pgType) : base(pgType) {} - - /// - public override bool Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - => buf.ReadByte() != 0; - - /// - public override int ValidateAndGetLength(bool value, NpgsqlParameter? parameter) - => 1; - - /// - public override void Write(bool value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => buf.WriteByte(value ? (byte)1 : (byte)0); -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/ByteaHandler.cs b/src/Npgsql/Internal/TypeHandlers/ByteaHandler.cs deleted file mode 100644 index 785250989e..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/ByteaHandler.cs +++ /dev/null @@ -1,148 +0,0 @@ -using System; -using System.IO; -using System.Threading; -using System.Threading.Tasks; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; - -namespace Npgsql.Internal.TypeHandlers; - -/// -/// A type handler for the PostgreSQL bytea data type. -/// -/// -/// See https://www.postgresql.org/docs/current/static/datatype-binary.html. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public partial class ByteaHandler : NpgsqlTypeHandler, INpgsqlTypeHandler>, INpgsqlTypeHandler, INpgsqlTypeHandler>, INpgsqlTypeHandler> -{ - public ByteaHandler(PostgresType pgType) : base(pgType) {} - - /// - public override async ValueTask Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - { - var bytes = new byte[len]; - var pos = 0; - while (true) - { - var toRead = Math.Min(len - pos, buf.ReadBytesLeft); - buf.ReadBytes(bytes, pos, toRead); - pos += toRead; - if (pos == len) - break; - await buf.ReadMore(async); - } - return bytes; - } - - ValueTask> INpgsqlTypeHandler>.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => throw new NotSupportedException("Only writing ArraySegment to PostgreSQL bytea is supported, no reading."); - - ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => throw new NotSupportedException("Reading a PostgreSQL bytea as a Stream is unsupported, use NpgsqlDataReader.GetStream() instead.."); - - int ValidateAndGetLength(int bufferLen, NpgsqlParameter? parameter) - => parameter == null || parameter.Size <= 0 || parameter.Size >= bufferLen - ? bufferLen - : parameter.Size; - - int ValidateAndGetLength(Stream stream, NpgsqlParameter? parameter) - { - if (parameter != null && parameter.Size > 0) - return parameter.Size; - - if (!stream.CanSeek) - throw new NpgsqlException("Cannot write a stream of bytes. Either provide a positive size, or a seekable stream."); - - try - { - return (int)(stream.Length - stream.Position); - } - catch (Exception ex) - { - throw new NpgsqlException("The remaining bytes in the provided Stream exceed the maximum length. The vaule may be truncated by setting NpgsqlParameter.Size.", ex); - } - } - - /// - public override int ValidateAndGetLength(byte[] value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLength(value.Length, parameter); - - /// - public int ValidateAndGetLength(ArraySegment value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLength(value.Count, parameter); - - /// - public int ValidateAndGetLength(Stream value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLength(value, parameter); - - /// - public override Task Write(byte[] value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => Write(value, buf, 0, ValidateAndGetLength(value.Length, parameter), async, cancellationToken); - - /// - public Task Write(ArraySegment value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => value.Array is null ? Task.CompletedTask : Write(value.Array, buf, value.Offset, ValidateAndGetLength(value.Count, parameter), async, cancellationToken); - - /// - public Task Write(Stream value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => Write(value, buf, ValidateAndGetLength(value, parameter), async, cancellationToken); - - async Task Write(byte[] value, NpgsqlWriteBuffer buf, int offset, int count, bool async, CancellationToken cancellationToken = default) - { - // The entire segment fits in our buffer, copy it as usual. - if (count <= buf.WriteSpaceLeft) - { - buf.WriteBytes(value, offset, count); - return; - } - - // The segment is larger than our buffer. Flush whatever is currently in the buffer and - // write the array directly to the socket. - await buf.Flush(async, cancellationToken); - await buf.DirectWrite(new ReadOnlyMemory(value, offset, count), async, cancellationToken); - } - - Task Write(Stream value, NpgsqlWriteBuffer buf, int count, bool async, CancellationToken cancellationToken = default) - => buf.WriteStreamRaw(value, count, async, cancellationToken); - - /// - public int ValidateAndGetLength(Memory value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLength(value.Length, parameter); - - /// - public int ValidateAndGetLength(ReadOnlyMemory value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLength(value.Length, parameter); - - /// - public async Task Write(ReadOnlyMemory value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - if (parameter != null && parameter.Size > 0 && parameter.Size < value.Length) - value = value.Slice(0, parameter.Size); - - // The entire segment fits in our buffer, copy it into the buffer as usual. - if (value.Length <= buf.WriteSpaceLeft) - { - buf.WriteBytes(value.Span); - return; - } - - // The segment is larger than our buffer. Perform a direct write, flushing whatever is currently in the buffer - // and then writing the array directly to the socket. - await buf.DirectWrite(value, async, cancellationToken); - } - - /// - public Task Write(Memory value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => Write((ReadOnlyMemory)value, buf, lengthCache, parameter, async, cancellationToken); - - ValueTask> INpgsqlTypeHandler>.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescriptioncancellationToken) - => throw new NotSupportedException("Only writing ReadOnlyMemory to PostgreSQL bytea is supported, no reading."); - - ValueTask> INpgsqlTypeHandler>.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => throw new NotSupportedException("Only writing Memory to PostgreSQL bytea is supported, no reading."); -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/CompositeHandlers/ByReference.cs b/src/Npgsql/Internal/TypeHandlers/CompositeHandlers/ByReference.cs deleted file mode 100644 index e5f02bddbe..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/CompositeHandlers/ByReference.cs +++ /dev/null @@ -1,10 +0,0 @@ - -// Only used for value types, but can't constrain because MappedCompositeHandler isn't constrained -#nullable disable - -namespace Npgsql.Internal.TypeHandlers.CompositeHandlers; - -sealed class ByReference -{ - public T Value; -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/CompositeHandlers/CompositeConstructorHandler.cs b/src/Npgsql/Internal/TypeHandlers/CompositeHandlers/CompositeConstructorHandler.cs deleted file mode 100644 index b1b633748b..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/CompositeHandlers/CompositeConstructorHandler.cs +++ /dev/null @@ -1,62 +0,0 @@ -using System; -using System.Reflection; -using System.Threading.Tasks; -using Npgsql.PostgresTypes; - -namespace Npgsql.Internal.TypeHandlers.CompositeHandlers; - -class CompositeConstructorHandler -{ - public PostgresType PostgresType { get; } - public ConstructorInfo ConstructorInfo { get; } - public CompositeParameterHandler[] Handlers { get; } - - protected CompositeConstructorHandler(PostgresType postgresType, ConstructorInfo constructorInfo, CompositeParameterHandler[] handlers) - { - PostgresType = postgresType; - ConstructorInfo = constructorInfo; - Handlers = handlers; - } - - public virtual async ValueTask Read(NpgsqlReadBuffer buffer, bool async) - { - await buffer.Ensure(sizeof(int), async); - - var fieldCount = buffer.ReadInt32(); - if (fieldCount != Handlers.Length) - throw new InvalidOperationException($"pg_attributes contains {Handlers.Length} fields for type {PostgresType.DisplayName}, but {fieldCount} fields were received."); - - var args = new object?[Handlers.Length]; - foreach (var handler in Handlers) - args[handler.ParameterPosition] = await handler.Read(buffer, async); - - return (TComposite)ConstructorInfo.Invoke(args); - } - - public static CompositeConstructorHandler Create(PostgresType postgresType, ConstructorInfo constructorInfo, CompositeParameterHandler[] parameterHandlers) - { - const int maxGenericParameters = 8; - - if (parameterHandlers.Length > maxGenericParameters) - return new CompositeConstructorHandler(postgresType, constructorInfo, parameterHandlers); - - var parameterTypes = new Type[1 + maxGenericParameters]; - foreach (var parameterHandler in parameterHandlers) - parameterTypes[1 + parameterHandler.ParameterPosition] = parameterHandler.ParameterType; - - for (var parameterIndex = 1; parameterIndex < parameterTypes.Length; parameterIndex++) - parameterTypes[parameterIndex] ??= typeof(Unused); - - parameterTypes[0] = typeof(TComposite); - return (CompositeConstructorHandler)Activator.CreateInstance( - typeof(CompositeConstructorHandler<,,,,,,,,>).MakeGenericType(parameterTypes), - BindingFlags.Instance | BindingFlags.Public, - binder: null, - args: new object[] { postgresType, constructorInfo, parameterHandlers }, - culture: null)!; - } - - readonly struct Unused - { - } -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/CompositeHandlers/CompositeConstructorHandler`.cs b/src/Npgsql/Internal/TypeHandlers/CompositeHandlers/CompositeConstructorHandler`.cs deleted file mode 100644 index b7d8a7b7b0..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/CompositeHandlers/CompositeConstructorHandler`.cs +++ /dev/null @@ -1,66 +0,0 @@ -using System; -using System.Linq; -using System.Linq.Expressions; -using System.Reflection; -using System.Threading.Tasks; -using Npgsql.PostgresTypes; - -namespace Npgsql.Internal.TypeHandlers.CompositeHandlers; - -sealed class CompositeConstructorHandler : CompositeConstructorHandler -{ - delegate TComposite CompositeConstructor(in Arguments args); - - readonly CompositeConstructor _constructor; - - public CompositeConstructorHandler(PostgresType postgresType, ConstructorInfo constructorInfo, CompositeParameterHandler[] parameterHandlers) - : base(postgresType, constructorInfo, parameterHandlers) - { - var parameter = Expression.Parameter(typeof(Arguments).MakeByRefType()); - var fields = Enumerable - .Range(1, parameterHandlers.Length) - .Select(i => Expression.Field(parameter, "Argument" + i)); - - _constructor = Expression - .Lambda(Expression.New(constructorInfo, fields), parameter) - .Compile(); - } - - public override async ValueTask Read(NpgsqlReadBuffer buffer, bool async) - { - await buffer.Ensure(sizeof(int), async); - - var fieldCount = buffer.ReadInt32(); - if (fieldCount != Handlers.Length) - throw new InvalidOperationException($"pg_attributes contains {Handlers.Length} fields for type {PostgresType.DisplayName}, but {fieldCount} fields were received."); - - var args = default(Arguments); - - foreach (var handler in Handlers) - switch (handler.ParameterPosition) - { - case 0: args.Argument1 = await handler.Read(buffer, async); break; - case 1: args.Argument2 = await handler.Read(buffer, async); break; - case 2: args.Argument3 = await handler.Read(buffer, async); break; - case 3: args.Argument4 = await handler.Read(buffer, async); break; - case 4: args.Argument5 = await handler.Read(buffer, async); break; - case 5: args.Argument6 = await handler.Read(buffer, async); break; - case 6: args.Argument7 = await handler.Read(buffer, async); break; - case 7: args.Argument8 = await handler.Read(buffer, async); break; - } - - return _constructor(args); - } - - struct Arguments - { - public T1 Argument1; - public T2 Argument2; - public T3 Argument3; - public T4 Argument4; - public T5 Argument5; - public T6 Argument6; - public T7 Argument7; - public T8 Argument8; - } -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/CompositeHandlers/CompositeHandler.cs b/src/Npgsql/Internal/TypeHandlers/CompositeHandlers/CompositeHandler.cs deleted file mode 100644 index 5079b24b1d..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/CompositeHandlers/CompositeHandler.cs +++ /dev/null @@ -1,282 +0,0 @@ -using System; -using System.Diagnostics.CodeAnalysis; -using System.Linq; -using System.Linq.Expressions; -using System.Reflection; -using System.Runtime.CompilerServices; -using System.Threading; -using System.Threading.Tasks; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.Internal.TypeMapping; -using Npgsql.PostgresTypes; -using Npgsql.TypeMapping; -using NpgsqlTypes; - -#region Trimming warning suppressions - -[module: UnconditionalSuppressMessage( - "Composite type mapping currently isn't trimming-safe, and warnings are generated at the MapComposite level.", - "IL2046", Scope = "type", Target = "Npgsql.Internal.TypeHandlers.CompositeHandlers.CompositeHandler")] -[module: UnconditionalSuppressMessage( - "Composite type mapping currently isn't trimming-safe, and warnings are generated at the MapComposite level.", - "IL2080", Scope = "type", Target = "Npgsql.Internal.TypeHandlers.CompositeHandlers.CompositeHandler")] -[module: UnconditionalSuppressMessage( - "Composite type mapping currently isn't trimming-safe, and warnings are generated at the MapComposite level.", - "IL2026", Scope = "type", Target = "Npgsql.Internal.TypeHandlers.CompositeHandlers.CompositeHandler")] -[module: UnconditionalSuppressMessage( - "Composite type mapping currently isn't trimming-safe, and warnings are generated at the MapComposite level.", - "IL2090", Scope = "type", Target = "Npgsql.Internal.TypeHandlers.CompositeHandlers.CompositeHandler")] -[module: UnconditionalSuppressMessage( - "Composite type mapping currently isn't trimming-safe, and warnings are generated at the MapComposite level.", - "IL2087", Scope = "type", Target = "Npgsql.Internal.TypeHandlers.CompositeHandlers.CompositeHandler")] -[module: UnconditionalSuppressMessage( - "Composite type mapping currently isn't trimming-safe, and warnings are generated at the MapComposite level.", - "IL2055", Scope = "type", Target = "Npgsql.Internal.TypeHandlers.CompositeHandlers.CompositeHandler")] -[module: UnconditionalSuppressMessage( - "Composite type mapping currently isn't trimming-safe, and warnings are generated at the MapComposite level.", - "IL2077", Scope = "type", Target = "Npgsql.Internal.TypeHandlers.CompositeHandlers.CompositeHandler")] - -#endregion - -namespace Npgsql.Internal.TypeHandlers.CompositeHandlers; - -sealed partial class CompositeHandler : NpgsqlTypeHandler, ICompositeHandler -{ - readonly TypeMapper _typeMapper; - readonly INpgsqlNameTranslator _nameTranslator; - - Func? _constructor; - CompositeConstructorHandler? _constructorHandler; - CompositeMemberHandler[] _memberHandlers = null!; - - public Type CompositeType => typeof(T); - - public CompositeHandler(PostgresCompositeType postgresType, TypeMapper typeMapper, INpgsqlNameTranslator nameTranslator) - : base(postgresType) - { - _typeMapper = typeMapper; - _nameTranslator = nameTranslator; - } - - public override ValueTask Read(NpgsqlReadBuffer buffer, int length, bool async, FieldDescription? fieldDescription = null) - { - Initialize(); - - return _constructorHandler is null - ? ReadUsingMemberHandlers(buffer, async) - : _constructorHandler.Read(buffer, async); - - async ValueTask ReadUsingMemberHandlers(NpgsqlReadBuffer buffer, bool async) - { - await buffer.Ensure(sizeof(int), async); - - var fieldCount = buffer.ReadInt32(); - if (fieldCount != _memberHandlers.Length) - throw new InvalidOperationException($"pg_attributes contains {_memberHandlers.Length} fields for type {PgDisplayName}, but {fieldCount} fields were received."); - - if (IsValueType.Value) - { - var composite = new ByReference { Value = _constructor!() }; - foreach (var member in _memberHandlers) - await member.Read(composite, buffer, async); - - return composite.Value; - } - else - { - var composite = _constructor!(); - foreach (var member in _memberHandlers) - await member.Read(composite, buffer, async); - - return composite; - } - } - } - - public override async Task Write(T value, NpgsqlWriteBuffer buffer, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - Initialize(); - - if (buffer.WriteSpaceLeft < sizeof(int)) - await buffer.Flush(async, cancellationToken); - - buffer.WriteInt32(_memberHandlers.Length); - - foreach (var member in _memberHandlers) - await member.Write(value, buffer, lengthCache, async, cancellationToken); - } - - public override int ValidateAndGetLength(T value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - { - Initialize(); - - lengthCache ??= new NpgsqlLengthCache(1); - if (lengthCache.IsPopulated) - return lengthCache.Get(); - - // Leave empty slot for the entire composite type, and go ahead an populate the element slots - var position = lengthCache.Position; - lengthCache.Set(0); - - // number of fields + (type oid + field length) * member count - var length = sizeof(int) + sizeof(int) * 2 * _memberHandlers.Length; - foreach (var member in _memberHandlers) - length += member.ValidateAndGetLength(value, ref lengthCache); - - return lengthCache.Lengths[position] = length; - } - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - void Initialize() - { - if (_memberHandlers is null) - InitializeCore(); - - void InitializeCore() - { - var pgType = (PostgresCompositeType)PostgresType; - - _memberHandlers = CreateMemberHandlers(pgType, _typeMapper, _nameTranslator); - _constructorHandler = CreateConstructorHandler(pgType, _typeMapper, _nameTranslator); - _constructor = _constructorHandler is null - ? Expression - .Lambda>(Expression.New(typeof(T))) - .Compile() - : null; - } - } - - static CompositeConstructorHandler? CreateConstructorHandler(PostgresCompositeType pgType, TypeMapper typeMapper, INpgsqlNameTranslator nameTranslator) - { - var pgFields = pgType.Fields; - var clrType = typeof(T); - - ConstructorInfo? clrDefaultConstructor = null; - - foreach (var clrConstructor in clrType.GetConstructors()) - { - var clrParameters = clrConstructor.GetParameters(); - if (clrParameters.Length != pgFields.Count) - { - if (clrParameters.Length == 0) - clrDefaultConstructor = clrConstructor; - - continue; - } - - var clrParameterHandlerCount = 0; - var clrParametersMapped = new ParameterInfo[pgFields.Count]; - - foreach (var clrParameter in clrParameters) - { - var attr = clrParameter.GetCustomAttribute(); - var name = attr?.PgName ?? (clrParameter.Name is string clrName ? nameTranslator.TranslateMemberName(clrName) : null); - if (name is null) - break; - - for (var pgFieldIndex = pgFields.Count - 1; pgFieldIndex >= 0; --pgFieldIndex) - { - var pgField = pgFields[pgFieldIndex]; - if (pgField.Name != name) - continue; - - if (clrParametersMapped[pgFieldIndex] != null) - throw new AmbiguousMatchException($"Multiple constructor parameters are mapped to the '{pgField.Name}' field."); - - clrParameterHandlerCount++; - clrParametersMapped[pgFieldIndex] = clrParameter; - - break; - } - } - - if (clrParameterHandlerCount < pgFields.Count) - continue; - - var clrParameterHandlers = new CompositeParameterHandler[pgFields.Count]; - for (var pgFieldIndex = 0; pgFieldIndex < pgFields.Count; ++pgFieldIndex) - { - var pgField = pgFields[pgFieldIndex]; - - if (!typeMapper.TryResolveByOID(pgField.Type.OID, out var handler)) - throw new NpgsqlException($"PostgreSQL composite type {pgType.DisplayName} has field {pgField.Type.DisplayName} with an unknown type (OID = {pgField.Type.OID})."); - - var clrParameter = clrParametersMapped[pgFieldIndex]; - var clrParameterHandlerType = typeof(CompositeParameterHandler<>) - .MakeGenericType(clrParameter.ParameterType); - - clrParameterHandlers[pgFieldIndex] = (CompositeParameterHandler)Activator.CreateInstance( - clrParameterHandlerType, - BindingFlags.Instance | BindingFlags.Public, - binder: null, - args: new object[] { handler, clrParameter }, - culture: null)!; - } - - return CompositeConstructorHandler.Create(pgType, clrConstructor, clrParameterHandlers); - } - - if (clrDefaultConstructor is null && !clrType.IsValueType) - throw new InvalidOperationException($"No parameterless constructor defined for type '{clrType}'."); - - return null; - } - - static CompositeMemberHandler[] CreateMemberHandlers(PostgresCompositeType pgType, TypeMapper typeMapper, INpgsqlNameTranslator nameTranslator) - { - var pgFields = pgType.Fields; - - var clrType = typeof(T); - var clrMemberHandlers = new CompositeMemberHandler[pgFields.Count]; - var clrMemberHandlerCount = 0; - var clrMemberHandlerType = IsValueType.Value - ? typeof(CompositeStructMemberHandler<,>) - : typeof(CompositeClassMemberHandler<,>); - - foreach (var clrProperty in clrType.GetProperties(BindingFlags.Instance | BindingFlags.Public)) - CreateMemberHandler(clrProperty, clrProperty.PropertyType); - - foreach (var clrField in clrType.GetFields(BindingFlags.Instance | BindingFlags.Public)) - CreateMemberHandler(clrField, clrField.FieldType); - - if (clrMemberHandlerCount != pgFields.Count) - { - var notMappedFields = string.Join(", ", clrMemberHandlers - .Select((member, memberIndex) => member == null ? $"'{pgFields[memberIndex].Name}'" : null) - .Where(member => member != null)); - throw new InvalidOperationException($"PostgreSQL composite type {pgType.DisplayName} contains fields {notMappedFields} which could not match any on CLR type {clrType.Name}"); - } - - return clrMemberHandlers; - - void CreateMemberHandler(MemberInfo clrMember, Type clrMemberType) - { - var attr = clrMember.GetCustomAttribute(); - var name = attr?.PgName ?? nameTranslator.TranslateMemberName(clrMember.Name); - - for (var pgFieldIndex = pgFields.Count - 1; pgFieldIndex >= 0; --pgFieldIndex) - { - var pgField = pgFields[pgFieldIndex]; - if (pgField.Name != name) - continue; - - if (clrMemberHandlers[pgFieldIndex] != null) - throw new AmbiguousMatchException($"Multiple class members are mapped to the '{pgField.Name}' field."); - - if (!typeMapper.TryResolveByOID(pgField.Type.OID, out var handler)) - throw new NpgsqlException($"PostgreSQL composite type {pgType.DisplayName} has field {pgField.Type.DisplayName} with an unknown type (OID = {pgField.Type.OID})."); - - clrMemberHandlerCount++; - clrMemberHandlers[pgFieldIndex] = (CompositeMemberHandler)Activator.CreateInstance( - clrMemberHandlerType.MakeGenericType(clrType, clrMemberType), - BindingFlags.Instance | BindingFlags.Public, - binder: null, - args: new object[] { clrMember, pgField.Type, handler }, - culture: null)!; - - break; - } - } - } -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/CompositeHandlers/CompositeMemberHandler.cs b/src/Npgsql/Internal/TypeHandlers/CompositeHandlers/CompositeMemberHandler.cs deleted file mode 100644 index 48d57e9c82..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/CompositeHandlers/CompositeMemberHandler.cs +++ /dev/null @@ -1,28 +0,0 @@ -using System.Diagnostics.CodeAnalysis; -using System.Reflection; -using System.Threading; -using System.Threading.Tasks; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; - -namespace Npgsql.Internal.TypeHandlers.CompositeHandlers; - -abstract class CompositeMemberHandler -{ - public MemberInfo MemberInfo { get; } - public PostgresType PostgresType { get; } - - protected CompositeMemberHandler(MemberInfo memberInfo, PostgresType postgresType) - { - MemberInfo = memberInfo; - PostgresType = postgresType; - } - - public abstract ValueTask Read(TComposite composite, NpgsqlReadBuffer buffer, bool async); - - public abstract ValueTask Read(ByReference composite, NpgsqlReadBuffer buffer, bool async); - - public abstract Task Write(TComposite composite, NpgsqlWriteBuffer buffer, NpgsqlLengthCache? lengthCache, bool async, CancellationToken cancellationToken = default); - - public abstract int ValidateAndGetLength(TComposite composite, [NotNullIfNotNull("lengthCache")] ref NpgsqlLengthCache? lengthCache); -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/CompositeHandlers/CompositeMemberHandlerOfClass.cs b/src/Npgsql/Internal/TypeHandlers/CompositeHandlers/CompositeMemberHandlerOfClass.cs deleted file mode 100644 index 0593e4d67e..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/CompositeHandlers/CompositeMemberHandlerOfClass.cs +++ /dev/null @@ -1,105 +0,0 @@ -using System; -using System.Diagnostics; -using System.Linq.Expressions; -using System.Reflection; -using System.Threading; -using System.Threading.Tasks; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; - -namespace Npgsql.Internal.TypeHandlers.CompositeHandlers; - -sealed class CompositeClassMemberHandler : CompositeMemberHandler - where TComposite : class -{ - delegate TMember GetMember(TComposite composite); - delegate void SetMember(TComposite composite, TMember value); - - readonly GetMember? _get; - readonly SetMember? _set; - readonly NpgsqlTypeHandler _handler; - - public CompositeClassMemberHandler(FieldInfo fieldInfo, PostgresType postgresType, NpgsqlTypeHandler handler) - : base(fieldInfo, postgresType) - { - var composite = Expression.Parameter(typeof(TComposite), "composite"); - var value = Expression.Parameter(typeof(TMember), "value"); - - _get = Expression - .Lambda(Expression.Field(composite, fieldInfo), composite) - .Compile(); - _set = Expression - .Lambda(Expression.Assign(Expression.Field(composite, fieldInfo), value), composite, value) - .Compile(); - _handler = handler; - } - - public CompositeClassMemberHandler(PropertyInfo propertyInfo, PostgresType postgresType, NpgsqlTypeHandler handler) - : base(propertyInfo, postgresType) - { - var getMethod = propertyInfo.GetGetMethod(); - if (getMethod != null) - _get = (GetMember)Delegate.CreateDelegate(typeof(GetMember), getMethod); - - var setMethod = propertyInfo.GetSetMethod(); - if (setMethod != null) - _set = (SetMember)Delegate.CreateDelegate(typeof(SetMember), setMethod); - - Debug.Assert(setMethod != null || getMethod != null); - - _handler = handler; - } - - public override async ValueTask Read(TComposite composite, NpgsqlReadBuffer buffer, bool async) - { - if (_set == null) - ThrowHelper.ThrowInvalidOperationException_NoPropertySetter(typeof(TComposite), MemberInfo); - - await buffer.Ensure(sizeof(uint) + sizeof(int), async); - - var oid = buffer.ReadUInt32(); - Debug.Assert(oid == PostgresType.OID); - - var length = buffer.ReadInt32(); - if (length == -1) - return; - - var value = NullableHandler.Exists - ? await NullableHandler.ReadAsync(_handler, buffer, length, async) - : await _handler.Read(buffer, length, async); - - _set(composite, value); - } - - public override ValueTask Read(ByReference composite, NpgsqlReadBuffer buffer, bool async) - => throw new NotSupportedException(); - - public override async Task Write(TComposite composite, NpgsqlWriteBuffer buffer, NpgsqlLengthCache? lengthCache, bool async, CancellationToken cancellationToken = default) - { - if (_get == null) - ThrowHelper.ThrowInvalidOperationException_NoPropertyGetter(typeof(TComposite), MemberInfo); - - if (buffer.WriteSpaceLeft < sizeof(int)) - await buffer.Flush(async, cancellationToken); - - buffer.WriteUInt32(PostgresType.OID); - if (NullableHandler.Exists) - await NullableHandler.WriteAsync(_handler, _get(composite), buffer, lengthCache, null, async, cancellationToken); - else - await _handler.WriteWithLength(_get(composite), buffer, lengthCache, null, async, cancellationToken); - } - - public override int ValidateAndGetLength(TComposite composite, ref NpgsqlLengthCache? lengthCache) - { - if (_get == null) - ThrowHelper.ThrowInvalidOperationException_NoPropertyGetter(typeof(TComposite), MemberInfo); - - var value = _get(composite); - if (value is null) - return 0; - - return NullableHandler.Exists - ? NullableHandler.ValidateAndGetLength(_handler, value, ref lengthCache, null) - : _handler.ValidateAndGetLength(value, ref lengthCache, null); - } -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/CompositeHandlers/CompositeMemberHandlerOfStruct.cs b/src/Npgsql/Internal/TypeHandlers/CompositeHandlers/CompositeMemberHandlerOfStruct.cs deleted file mode 100644 index 2fa1d48ca3..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/CompositeHandlers/CompositeMemberHandlerOfStruct.cs +++ /dev/null @@ -1,109 +0,0 @@ -using System; -using System.Diagnostics; -using System.Linq.Expressions; -using System.Reflection; -using System.Runtime.CompilerServices; -using System.Threading; -using System.Threading.Tasks; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; - -namespace Npgsql.Internal.TypeHandlers.CompositeHandlers; - -sealed class CompositeStructMemberHandler : CompositeMemberHandler - where TComposite : struct -{ - delegate TMember GetMember(ref TComposite composite); - delegate void SetMember(ref TComposite composite, TMember value); - - readonly GetMember? _get; - readonly SetMember? _set; - readonly NpgsqlTypeHandler _handler; - - public CompositeStructMemberHandler(FieldInfo fieldInfo, PostgresType postgresType, NpgsqlTypeHandler handler) - : base(fieldInfo, postgresType) - { - var composite = Expression.Parameter(typeof(TComposite).MakeByRefType(), "composite"); - var value = Expression.Parameter(typeof(TMember), "value"); - - _get = Expression - .Lambda(Expression.Field(composite, fieldInfo), composite) - .Compile(); - _set = Expression - .Lambda(Expression.Assign(Expression.Field(composite, fieldInfo), value), composite, value) - .Compile(); - _handler = handler; - } - - public CompositeStructMemberHandler(PropertyInfo propertyInfo, PostgresType postgresType, NpgsqlTypeHandler handler) - : base(propertyInfo, postgresType) - { - var getMethod = propertyInfo.GetGetMethod(); - if (getMethod != null) - _get = (GetMember)Delegate.CreateDelegate(typeof(GetMember), getMethod); - - var setMethod = propertyInfo.GetSetMethod(); - if (setMethod != null) - _set = (SetMember)Delegate.CreateDelegate(typeof(SetMember), setMethod); - - Debug.Assert(setMethod != null || getMethod != null); - - _handler = handler; - } - - public override ValueTask Read(TComposite composite, NpgsqlReadBuffer buffer, bool async) - => throw new NotSupportedException(); - - public override async ValueTask Read(ByReference composite, NpgsqlReadBuffer buffer, bool async) - { - if (_set == null) - ThrowHelper.ThrowInvalidOperationException_NoPropertySetter(typeof(TComposite), MemberInfo); - - await buffer.Ensure(sizeof(uint) + sizeof(int), async); - - var oid = buffer.ReadUInt32(); - Debug.Assert(oid == PostgresType.OID); - - var length = buffer.ReadInt32(); - if (length == -1) - return; - - var value = NullableHandler.Exists - ? await NullableHandler.ReadAsync(_handler, buffer, length, async) - : await _handler.Read(buffer, length, async); - - Set(composite, value); - } - - public override async Task Write(TComposite composite, NpgsqlWriteBuffer buffer, NpgsqlLengthCache? lengthCache, bool async, CancellationToken cancellationToken = default) - { - if (_get == null) - ThrowHelper.ThrowInvalidOperationException_NoPropertyGetter(typeof(TComposite), MemberInfo); - - if (buffer.WriteSpaceLeft < sizeof(int)) - await buffer.Flush(async, cancellationToken); - - buffer.WriteUInt32(PostgresType.OID); - await (NullableHandler.Exists - ? NullableHandler.WriteAsync(_handler, _get(ref composite), buffer, lengthCache, null, async, cancellationToken) - : _handler.WriteWithLength(_get(ref composite), buffer, lengthCache, null, async, cancellationToken)); - } - - public override int ValidateAndGetLength(TComposite composite, ref NpgsqlLengthCache? lengthCache) - { - if (_get == null) - ThrowHelper.ThrowInvalidOperationException_NoPropertyGetter(typeof(TComposite), MemberInfo); - - var value = _get(ref composite); - if (value is null) - return 0; - - return NullableHandler.Exists - ? NullableHandler.ValidateAndGetLength(_handler, value, ref lengthCache, null) - : _handler.ValidateAndGetLength(value, ref lengthCache, null); - } - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - void Set(ByReference composite, TMember value) - => _set!(ref composite.Value, value); -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/CompositeHandlers/CompositeParameterHandler.cs b/src/Npgsql/Internal/TypeHandlers/CompositeHandlers/CompositeParameterHandler.cs deleted file mode 100644 index f99de18bba..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/CompositeHandlers/CompositeParameterHandler.cs +++ /dev/null @@ -1,36 +0,0 @@ -using System; -using System.Reflection; -using System.Threading.Tasks; -using Npgsql.Internal.TypeHandling; - -namespace Npgsql.Internal.TypeHandlers.CompositeHandlers; - -abstract class CompositeParameterHandler -{ - public NpgsqlTypeHandler Handler { get; } - public Type ParameterType { get; } - public int ParameterPosition { get; } - - public CompositeParameterHandler(NpgsqlTypeHandler handler, ParameterInfo parameterInfo) - { - Handler = handler; - ParameterType = parameterInfo.ParameterType; - ParameterPosition = parameterInfo.Position; - } - - public async ValueTask Read(NpgsqlReadBuffer buffer, bool async) - { - await buffer.Ensure(sizeof(uint) + sizeof(int), async); - - var oid = buffer.ReadUInt32(); - var length = buffer.ReadInt32(); - if (length == -1) - return default!; - - return NullableHandler.Exists - ? await NullableHandler.ReadAsync(Handler, buffer, length, async) - : await Handler.Read(buffer, length, async); - } - - public abstract ValueTask Read(NpgsqlReadBuffer buffer, bool async); -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/CompositeHandlers/CompositeParameterHandler`.cs b/src/Npgsql/Internal/TypeHandlers/CompositeHandlers/CompositeParameterHandler`.cs deleted file mode 100644 index 6c2d9dab8d..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/CompositeHandlers/CompositeParameterHandler`.cs +++ /dev/null @@ -1,21 +0,0 @@ -using System.Reflection; -using System.Threading.Tasks; -using Npgsql.Internal.TypeHandling; - -namespace Npgsql.Internal.TypeHandlers.CompositeHandlers; - -sealed class CompositeParameterHandler : CompositeParameterHandler -{ - public CompositeParameterHandler(NpgsqlTypeHandler handler, ParameterInfo parameterInfo) - : base(handler, parameterInfo) { } - - public override ValueTask Read(NpgsqlReadBuffer buffer, bool async) - { - var task = Read(buffer, async); - return task.IsCompleted - ? new ValueTask(task.Result) - : AwaitTask(task); - - static async ValueTask AwaitTask(ValueTask task) => await task; - } -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/CompositeHandlers/ICompositeHandler.cs b/src/Npgsql/Internal/TypeHandlers/CompositeHandlers/ICompositeHandler.cs deleted file mode 100644 index 5bb186233b..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/CompositeHandlers/ICompositeHandler.cs +++ /dev/null @@ -1,11 +0,0 @@ -using System; - -namespace Npgsql.Internal.TypeHandlers.CompositeHandlers; - -interface ICompositeHandler -{ - /// - /// The CLR type mapped to the PostgreSQL composite type. - /// - Type CompositeType { get; } -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/CompositeHandlers/IsValueType.cs b/src/Npgsql/Internal/TypeHandlers/CompositeHandlers/IsValueType.cs deleted file mode 100644 index 360cae915d..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/CompositeHandlers/IsValueType.cs +++ /dev/null @@ -1,6 +0,0 @@ -namespace Npgsql.Internal.TypeHandlers.CompositeHandlers; - -static class IsValueType -{ - public static readonly bool Value = typeof(T).IsValueType; -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/DateTimeHandlers/DateHandler.cs b/src/Npgsql/Internal/TypeHandlers/DateTimeHandlers/DateHandler.cs deleted file mode 100644 index 0831306a67..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/DateTimeHandlers/DateHandler.cs +++ /dev/null @@ -1,131 +0,0 @@ -using System; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using Npgsql.Properties; -using NpgsqlTypes; -using static Npgsql.Util.Statics; - -namespace Npgsql.Internal.TypeHandlers.DateTimeHandlers; - -/// -/// A type handler for the PostgreSQL date data type. -/// -/// -/// See https://www.postgresql.org/docs/current/static/datatype-datetime.html. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public partial class DateHandler : NpgsqlSimpleTypeHandler, INpgsqlSimpleTypeHandler -#if NET6_0_OR_GREATER - , INpgsqlSimpleTypeHandler -#endif -{ - static readonly DateTime BaseValueDateTime = new(2000, 1, 1, 0, 0, 0); - - /// - /// Constructs a - /// - public DateHandler(PostgresType postgresType) : base(postgresType) {} - - #region Read - - /// - public override DateTime Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - => buf.ReadInt32() switch - { - int.MaxValue => DisableDateTimeInfinityConversions - ? throw new InvalidCastException(NpgsqlStrings.CannotReadInfinityValue) - : DateTime.MaxValue, - int.MinValue => DisableDateTimeInfinityConversions - ? throw new InvalidCastException(NpgsqlStrings.CannotReadInfinityValue) - : DateTime.MinValue, - var value => BaseValueDateTime + TimeSpan.FromDays(value) - }; - - int INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => buf.ReadInt32(); - - #endregion Read - - #region Write - - /// - public override int ValidateAndGetLength(DateTime value, NpgsqlParameter? parameter) => 4; - - /// - public int ValidateAndGetLength(int value, NpgsqlParameter? parameter) => 4; - - /// - public override void Write(DateTime value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - { - if (!DisableDateTimeInfinityConversions) - { - if (value == DateTime.MaxValue) - { - buf.WriteInt32(int.MaxValue); - return; - } - - if (value == DateTime.MinValue) - { - buf.WriteInt32(int.MinValue); - return; - } - } - - buf.WriteInt32((value.Date - BaseValueDateTime).Days); - } - - /// - public void Write(int value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => buf.WriteInt32(value); - - #endregion Write - -#if NET6_0_OR_GREATER - static readonly DateOnly BaseValueDateOnly = new(2000, 1, 1); - - DateOnly INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => buf.ReadInt32() switch - { - int.MaxValue => DisableDateTimeInfinityConversions - ? throw new InvalidCastException(NpgsqlStrings.CannotReadInfinityValue) - : DateOnly.MaxValue, - int.MinValue => DisableDateTimeInfinityConversions - ? throw new InvalidCastException(NpgsqlStrings.CannotReadInfinityValue) - : DateOnly.MinValue, - var value => BaseValueDateOnly.AddDays(value) - }; - - public int ValidateAndGetLength(DateOnly value, NpgsqlParameter? parameter) => 4; - - public void Write(DateOnly value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - { - if (!DisableDateTimeInfinityConversions) - { - if (value == DateOnly.MaxValue) - { - buf.WriteInt32(int.MaxValue); - return; - } - - if (value == DateOnly.MinValue) - { - buf.WriteInt32(int.MinValue); - return; - } - } - - buf.WriteInt32(value.DayNumber - BaseValueDateOnly.DayNumber); - } - - public override NpgsqlTypeHandler CreateRangeHandler(PostgresType pgRangeType) - => new RangeHandler(pgRangeType, this); - - public override NpgsqlTypeHandler CreateMultirangeHandler(PostgresMultirangeType pgRangeType) - => new MultirangeHandler(pgRangeType, new RangeHandler(pgRangeType, this)); -#endif -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/DateTimeHandlers/DateTimeUtils.cs b/src/Npgsql/Internal/TypeHandlers/DateTimeHandlers/DateTimeUtils.cs deleted file mode 100644 index 8b702aad12..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/DateTimeHandlers/DateTimeUtils.cs +++ /dev/null @@ -1,63 +0,0 @@ -using System; -using System.Runtime.CompilerServices; -using Npgsql.Properties; -using static Npgsql.Util.Statics; - -namespace Npgsql.Internal.TypeHandlers.DateTimeHandlers; - -static class DateTimeUtils -{ - const long PostgresTimestampOffsetTicks = 630822816000000000L; - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - internal static DateTime DecodeTimestamp(long value, DateTimeKind kind) - => new(value * 10 + PostgresTimestampOffsetTicks, kind); - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - internal static long EncodeTimestamp(DateTime value) - // Rounding here would cause problems because we would round up DateTime.MaxValue - // which would make it impossible to retrieve it back from the database, so we just drop the additional precision - => (value.Ticks - PostgresTimestampOffsetTicks) / 10; - - internal static DateTime ReadDateTime(NpgsqlReadBuffer buf, DateTimeKind kind) - { - try - { - return buf.ReadInt64() switch - { - long.MaxValue => DisableDateTimeInfinityConversions - ? throw new InvalidCastException(NpgsqlStrings.CannotReadInfinityValue) - : DateTime.MaxValue, - long.MinValue => DisableDateTimeInfinityConversions - ? throw new InvalidCastException(NpgsqlStrings.CannotReadInfinityValue) - : DateTime.MinValue, - var value => DecodeTimestamp(value, kind) - }; - } - catch (ArgumentOutOfRangeException e) - { - throw new InvalidCastException("Out of the range of DateTime (year must be between 1 and 9999)", e); - } - } - - internal static void WriteTimestamp(DateTime value, NpgsqlWriteBuffer buf) - { - if (!DisableDateTimeInfinityConversions) - { - if (value == DateTime.MaxValue) - { - buf.WriteInt64(long.MaxValue); - return; - } - - if (value == DateTime.MinValue) - { - buf.WriteInt64(long.MinValue); - return; - } - } - - var postgresTimestamp = EncodeTimestamp(value); - buf.WriteInt64(postgresTimestamp); - } -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/DateTimeHandlers/IntervalHandler.cs b/src/Npgsql/Internal/TypeHandlers/DateTimeHandlers/IntervalHandler.cs deleted file mode 100644 index 9cce23e486..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/DateTimeHandlers/IntervalHandler.cs +++ /dev/null @@ -1,70 +0,0 @@ -using System; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using Npgsql.Properties; -using NpgsqlTypes; - -namespace Npgsql.Internal.TypeHandlers.DateTimeHandlers; - -/// -/// A type handler for the PostgreSQL date interval type. -/// -/// -/// See https://www.postgresql.org/docs/current/static/datatype-datetime.html. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public partial class IntervalHandler : NpgsqlSimpleTypeHandler, INpgsqlSimpleTypeHandler -{ - /// - /// Constructs an - /// - public IntervalHandler(PostgresType postgresType) : base(postgresType) {} - - /// - public override TimeSpan Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - { - var microseconds = buf.ReadInt64(); - var days = buf.ReadInt32(); - var months = buf.ReadInt32(); - - if (months > 0) - throw new InvalidCastException(NpgsqlStrings.CannotReadIntervalWithMonthsAsTimeSpan); - - return new(microseconds * 10 + days * TimeSpan.TicksPerDay); - } - - NpgsqlInterval INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - { - var ticks = buf.ReadInt64(); - var day = buf.ReadInt32(); - var month = buf.ReadInt32(); - return new NpgsqlInterval(month, day, ticks); - } - - /// - public override int ValidateAndGetLength(TimeSpan value, NpgsqlParameter? parameter) => 16; - - /// - public int ValidateAndGetLength(NpgsqlInterval value, NpgsqlParameter? parameter) => 16; - - /// - public override void Write(TimeSpan value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - { - var ticksInDay = value.Ticks - TimeSpan.TicksPerDay * value.Days; - - buf.WriteInt64(ticksInDay / 10); - buf.WriteInt32(value.Days); - buf.WriteInt32(0); - } - - public void Write(NpgsqlInterval value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - { - buf.WriteInt64(value.Time); - buf.WriteInt32(value.Days); - buf.WriteInt32(value.Months); - } -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/DateTimeHandlers/TimeHandler.cs b/src/Npgsql/Internal/TypeHandlers/DateTimeHandlers/TimeHandler.cs deleted file mode 100644 index f4ec3b689b..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/DateTimeHandlers/TimeHandler.cs +++ /dev/null @@ -1,52 +0,0 @@ -using System; -using System.Data; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using Npgsql.TypeMapping; -using NpgsqlTypes; - -namespace Npgsql.Internal.TypeHandlers.DateTimeHandlers; - -/// -/// A type handler for the PostgreSQL time data type. -/// -/// -/// See https://www.postgresql.org/docs/current/static/datatype-datetime.html. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public partial class TimeHandler : NpgsqlSimpleTypeHandler -#if NET6_0_OR_GREATER - , INpgsqlSimpleTypeHandler -#endif -{ - /// - /// Constructs a . - /// - public TimeHandler(PostgresType postgresType) : base(postgresType) {} - - // PostgreSQL time resolution == 1 microsecond == 10 ticks - /// - public override TimeSpan Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - => new(buf.ReadInt64() * 10); - - /// - public override int ValidateAndGetLength(TimeSpan value, NpgsqlParameter? parameter) => 8; - - /// - public override void Write(TimeSpan value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => buf.WriteInt64(value.Ticks / 10); - -#if NET6_0_OR_GREATER - TimeOnly INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => new(buf.ReadInt64() * 10); - - public int ValidateAndGetLength(TimeOnly value, NpgsqlParameter? parameter) => 8; - - public void Write(TimeOnly value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => buf.WriteInt64(value.Ticks / 10); -#endif -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/DateTimeHandlers/TimeTzHandler.cs b/src/Npgsql/Internal/TypeHandlers/DateTimeHandlers/TimeTzHandler.cs deleted file mode 100644 index 464c4abd01..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/DateTimeHandlers/TimeTzHandler.cs +++ /dev/null @@ -1,53 +0,0 @@ -using System; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; - -namespace Npgsql.Internal.TypeHandlers.DateTimeHandlers; - -/// -/// A type handler for the PostgreSQL timetz data type. -/// -/// -/// See https://www.postgresql.org/docs/current/static/datatype-datetime.html. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public partial class TimeTzHandler : NpgsqlSimpleTypeHandler -{ - // Binary Format: int64 expressing microseconds, int32 expressing timezone in seconds, negative - - /// - /// Constructs an . - /// - public TimeTzHandler(PostgresType postgresType) : base(postgresType) {} - - #region Read - - /// - public override DateTimeOffset Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - { - // Adjust from 1 microsecond to 100ns. Time zone (in seconds) is inverted. - var ticks = buf.ReadInt64() * 10; - var offset = new TimeSpan(0, 0, -buf.ReadInt32()); - return new DateTimeOffset(ticks + TimeSpan.TicksPerDay, offset); - } - - #endregion Read - - #region Write - - /// - public override int ValidateAndGetLength(DateTimeOffset value, NpgsqlParameter? parameter) => 12; - - /// - public override void Write(DateTimeOffset value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - { - buf.WriteInt64(value.TimeOfDay.Ticks / 10); - buf.WriteInt32(-(int)(value.Offset.Ticks / TimeSpan.TicksPerSecond)); - } - - #endregion Write -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/DateTimeHandlers/TimestampHandler.cs b/src/Npgsql/Internal/TypeHandlers/DateTimeHandlers/TimestampHandler.cs deleted file mode 100644 index 1887318b44..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/DateTimeHandlers/TimestampHandler.cs +++ /dev/null @@ -1,62 +0,0 @@ -using System; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using static Npgsql.Util.Statics; -using static Npgsql.Internal.TypeHandlers.DateTimeHandlers.DateTimeUtils; - -namespace Npgsql.Internal.TypeHandlers.DateTimeHandlers; - -/// -/// A type handler for the PostgreSQL timestamp data type. -/// -/// -/// See https://www.postgresql.org/docs/current/static/datatype-datetime.html. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public partial class TimestampHandler : NpgsqlSimpleTypeHandler, INpgsqlSimpleTypeHandler -{ - /// - /// Constructs a . - /// - public TimestampHandler(PostgresType postgresType) : base(postgresType) {} - - #region Read - - /// - public override DateTime Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - => ReadDateTime(buf, DateTimeKind.Unspecified); - - long INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => buf.ReadInt64(); - - #endregion Read - - #region Write - - /// - public override int ValidateAndGetLength(DateTime value, NpgsqlParameter? parameter) - => value.Kind != DateTimeKind.Utc || LegacyTimestampBehavior - ? 8 - : throw new InvalidCastException( - "Cannot write DateTime with Kind=UTC to PostgreSQL type 'timestamp without time zone', " + - "consider using 'timestamp with time zone'. " + - "Note that it's not possible to mix DateTimes with different Kinds in an array/range. " + - "See the Npgsql.EnableLegacyTimestampBehavior AppContext switch to enable legacy behavior."); - - /// - public int ValidateAndGetLength(long value, NpgsqlParameter? parameter) => 8; - - /// - public override void Write(DateTime value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => WriteTimestamp(value, buf); - - /// - public void Write(long value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => buf.WriteInt64(value); - - #endregion Write -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/DateTimeHandlers/TimestampTzHandler.cs b/src/Npgsql/Internal/TypeHandlers/DateTimeHandlers/TimestampTzHandler.cs deleted file mode 100644 index 66b3397ecb..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/DateTimeHandlers/TimestampTzHandler.cs +++ /dev/null @@ -1,143 +0,0 @@ -using System; -using System.Diagnostics; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using Npgsql.Properties; -using NpgsqlTypes; -using static Npgsql.Util.Statics; -using static Npgsql.Internal.TypeHandlers.DateTimeHandlers.DateTimeUtils; - -namespace Npgsql.Internal.TypeHandlers.DateTimeHandlers; - -/// -/// A type handler for the PostgreSQL timestamptz data type. -/// -/// -/// See https://www.postgresql.org/docs/current/static/datatype-datetime.html. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public partial class TimestampTzHandler : NpgsqlSimpleTypeHandler, - INpgsqlSimpleTypeHandler, INpgsqlSimpleTypeHandler -{ - /// - /// Constructs an . - /// - public TimestampTzHandler(PostgresType postgresType) : base(postgresType) {} - - /// - public override NpgsqlTypeHandler CreateRangeHandler(PostgresType pgRangeType) - => new RangeHandler(pgRangeType, this); - - #region Read - - /// - public override DateTime Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - { - var dateTime = ReadDateTime(buf, DateTimeKind.Utc); - return LegacyTimestampBehavior && (DisableDateTimeInfinityConversions || dateTime != DateTime.MaxValue && dateTime != DateTime.MinValue) - ? dateTime.ToLocalTime() - : dateTime; - } - - DateTimeOffset INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - { - try - { - var value = buf.ReadInt64(); - switch (value) - { - case long.MaxValue: - return DisableDateTimeInfinityConversions - ? throw new InvalidCastException(NpgsqlStrings.CannotReadInfinityValue) - : DateTimeOffset.MaxValue; - case long.MinValue: - return DisableDateTimeInfinityConversions - ? throw new InvalidCastException(NpgsqlStrings.CannotReadInfinityValue) - : DateTimeOffset.MinValue; - default: - var dateTime = DecodeTimestamp(value, DateTimeKind.Utc); - return LegacyTimestampBehavior ? dateTime.ToLocalTime() : dateTime; - } - } - catch (ArgumentOutOfRangeException e) - { - throw new InvalidCastException("Out of the range of DateTime (year must be between 1 and 9999)", e); - } - } - - long INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => buf.ReadInt64(); - - #endregion Read - - #region Write - - /// - public override int ValidateAndGetLength(DateTime value, NpgsqlParameter? parameter) - => value.Kind == DateTimeKind.Utc || - value == DateTime.MinValue || // Allowed since this is default(DateTime) - sent without any timezone conversion. - value == DateTime.MaxValue && !DisableDateTimeInfinityConversions || - LegacyTimestampBehavior - ? 8 - : throw new InvalidCastException( - $"Cannot write DateTime with Kind={value.Kind} to PostgreSQL type 'timestamp with time zone', only UTC is supported. " + - "Note that it's not possible to mix DateTimes with different Kinds in an array/range. " + - "See the Npgsql.EnableLegacyTimestampBehavior AppContext switch to enable legacy behavior."); - - /// - public int ValidateAndGetLength(DateTimeOffset value, NpgsqlParameter? parameter) - => value.Offset == TimeSpan.Zero || LegacyTimestampBehavior - ? 8 - : throw new InvalidCastException( - $"Cannot write DateTimeOffset with Offset={value.Offset} to PostgreSQL type 'timestamp with time zone', " + - "only offset 0 (UTC) is supported. " + - "Note that it's not possible to mix DateTimes with different Kinds in an array/range. " + - "See the Npgsql.EnableLegacyTimestampBehavior AppContext switch to enable legacy behavior."); - - /// - public int ValidateAndGetLength(long value, NpgsqlParameter? parameter) => 8; - - /// - public override void Write(DateTime value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - { - if (LegacyTimestampBehavior) - { - switch (value.Kind) - { - case DateTimeKind.Unspecified: - case DateTimeKind.Utc: - break; - case DateTimeKind.Local: - value = value.ToUniversalTime(); - break; - default: - throw new InvalidOperationException($"Internal Npgsql bug: unexpected value {value.Kind} of enum {nameof(DateTimeKind)}. Please file a bug."); - } - } - else - Debug.Assert(value.Kind == DateTimeKind.Utc || value == DateTime.MinValue || value == DateTime.MaxValue); - - WriteTimestamp(value, buf); - } - - /// - public void Write(DateTimeOffset value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - { - if (LegacyTimestampBehavior) - value = value.ToUniversalTime(); - - Debug.Assert(value.Offset == TimeSpan.Zero); - - WriteTimestamp(value.DateTime, buf); - } - - /// - public void Write(long value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => buf.WriteInt64(value); - - #endregion Write -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/EnumHandler.cs b/src/Npgsql/Internal/TypeHandlers/EnumHandler.cs deleted file mode 100644 index 2604563790..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/EnumHandler.cs +++ /dev/null @@ -1,74 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Diagnostics; -using System.Linq; -using System.Reflection; -using System.Text; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using NpgsqlTypes; - -namespace Npgsql.Internal.TypeHandlers; - -/// -/// Interface implemented by all concrete handlers which handle enums -/// -interface IEnumHandler -{ - /// - /// The CLR enum type mapped to the PostgreSQL enum - /// - Type EnumType { get; } -} - -sealed partial class EnumHandler : NpgsqlSimpleTypeHandler, IEnumHandler where TEnum : struct, Enum -{ - readonly Dictionary _enumToLabel; - readonly Dictionary _labelToEnum; - - public Type EnumType => typeof(TEnum); - - #region Construction - - internal EnumHandler(PostgresEnumType postgresType, Dictionary enumToLabel, Dictionary labelToEnum) - : base(postgresType) - { - Debug.Assert(typeof(TEnum).GetTypeInfo().IsEnum, "EnumHandler instantiated for non-enum type"); - _enumToLabel = enumToLabel; - _labelToEnum = labelToEnum; - } - - #endregion - - #region Read - - public override TEnum Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - { - var str = buf.ReadString(len); - var success = _labelToEnum.TryGetValue(str, out var value); - - if (!success) - throw new InvalidCastException($"Received enum value '{str}' from database which wasn't found on enum {typeof(TEnum)}"); - - return value; - } - - #endregion - - #region Write - - public override int ValidateAndGetLength(TEnum value, NpgsqlParameter? parameter) - => _enumToLabel.TryGetValue(value, out var str) - ? Encoding.UTF8.GetByteCount(str) - : throw new InvalidCastException($"Can't write value {value} as enum {typeof(TEnum)}"); - - public override void Write(TEnum value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - { - if (!_enumToLabel.TryGetValue(value, out var str)) - throw new InvalidCastException($"Can't write value {value} as enum {typeof(TEnum)}"); - buf.WriteString(str); - } - - #endregion -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/FullTextSearchHandlers/TsQueryHandler.cs b/src/Npgsql/Internal/TypeHandlers/FullTextSearchHandlers/TsQueryHandler.cs deleted file mode 100644 index 1fefb0f598..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/FullTextSearchHandlers/TsQueryHandler.cs +++ /dev/null @@ -1,291 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Text; -using System.Threading; -using System.Threading.Tasks; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using NpgsqlTypes; - -// TODO: Need to work on the nullability here -#nullable disable -#pragma warning disable CS8632 -#pragma warning disable RS0041 - -namespace Npgsql.Internal.TypeHandlers.FullTextSearchHandlers; - -/// -/// A type handler for the PostgreSQL tsquery data type. -/// -/// -/// See https://www.postgresql.org/docs/current/static/datatype-textsearch.html. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public partial class TsQueryHandler : NpgsqlTypeHandler, - INpgsqlTypeHandler, INpgsqlTypeHandler, - INpgsqlTypeHandler, INpgsqlTypeHandler, - INpgsqlTypeHandler, INpgsqlTypeHandler -{ - // 1 (type) + 1 (weight) + 1 (is prefix search) + 2046 (max str len) + 1 (null terminator) - const int MaxSingleTokenBytes = 2050; - - public TsQueryHandler(PostgresType pgType) : base(pgType) {} - - #region Read - - /// - public override async ValueTask Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - { - await buf.Ensure(4, async); - var numTokens = buf.ReadInt32(); - if (numTokens == 0) - return new NpgsqlTsQueryEmpty(); - - NpgsqlTsQuery? value = null; - var nodes = new Stack>(); - len -= 4; - - for (var tokenPos = 0; tokenPos < numTokens; tokenPos++) - { - await buf.Ensure(Math.Min(len, MaxSingleTokenBytes), async); - var readPos = buf.ReadPosition; - - var isOper = buf.ReadByte() == 2; - if (isOper) - { - var operKind = (NpgsqlTsQuery.NodeKind)buf.ReadByte(); - if (operKind == NpgsqlTsQuery.NodeKind.Not) - { - var node = new NpgsqlTsQueryNot(null); - InsertInTree(node, nodes, ref value); - nodes.Push(new Tuple(node, 0)); - } - else - { - var node = operKind switch - { - NpgsqlTsQuery.NodeKind.And => (NpgsqlTsQuery)new NpgsqlTsQueryAnd(null, null), - NpgsqlTsQuery.NodeKind.Or => new NpgsqlTsQueryOr(null, null), - NpgsqlTsQuery.NodeKind.Phrase => new NpgsqlTsQueryFollowedBy(null, buf.ReadInt16(), null), - _ => throw new InvalidOperationException($"Internal Npgsql bug: unexpected value {operKind} of enum {nameof(NpgsqlTsQuery.NodeKind)}. Please file a bug.") - }; - - InsertInTree(node, nodes, ref value); - - nodes.Push(new Tuple(node, 1)); - nodes.Push(new Tuple(node, 2)); - } - } - else - { - var weight = (NpgsqlTsQueryLexeme.Weight)buf.ReadByte(); - var prefix = buf.ReadByte() != 0; - var str = buf.ReadNullTerminatedString(); - InsertInTree(new NpgsqlTsQueryLexeme(str, weight, prefix), nodes, ref value); - } - - len -= buf.ReadPosition - readPos; - } - - if (nodes.Count != 0) - throw new InvalidOperationException("Internal Npgsql bug, please report."); - - return value!; - - static void InsertInTree(NpgsqlTsQuery node, Stack> nodes, ref NpgsqlTsQuery? value) - { - if (nodes.Count == 0) - value = node; - else - { - var parent = nodes.Pop(); - if (parent.Item2 == 0) - ((NpgsqlTsQueryNot)parent.Item1).Child = node; - else if (parent.Item2 == 1) - ((NpgsqlTsQueryBinOp)parent.Item1).Left = node; - else - ((NpgsqlTsQueryBinOp)parent.Item1).Right = node; - } - } - } - - async ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => (NpgsqlTsQueryEmpty)await Read(buf, len, async, fieldDescription); - - async ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => (NpgsqlTsQueryLexeme)await Read(buf, len, async, fieldDescription); - - async ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => (NpgsqlTsQueryNot)await Read(buf, len, async, fieldDescription); - - async ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => (NpgsqlTsQueryAnd)await Read(buf, len, async, fieldDescription); - - async ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => (NpgsqlTsQueryOr)await Read(buf, len, async, fieldDescription); - - async ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => (NpgsqlTsQueryFollowedBy)await Read(buf, len, async, fieldDescription); - - #endregion Read - - #region Write - - /// - public override int ValidateAndGetLength(NpgsqlTsQuery value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => value.Kind == NpgsqlTsQuery.NodeKind.Empty - ? 4 - : 4 + GetNodeLength(value); - - int GetNodeLength(NpgsqlTsQuery node) - { - // TODO: Figure out the nullability strategy here - switch (node.Kind) - { - case NpgsqlTsQuery.NodeKind.Lexeme: - var strLen = Encoding.UTF8.GetByteCount(((NpgsqlTsQueryLexeme)node).Text); - if (strLen > 2046) - throw new InvalidCastException("Lexeme text too long. Must be at most 2046 bytes in UTF8."); - return 4 + strLen; - case NpgsqlTsQuery.NodeKind.And: - case NpgsqlTsQuery.NodeKind.Or: - return 2 + GetNodeLength(((NpgsqlTsQueryBinOp)node).Left) + GetNodeLength(((NpgsqlTsQueryBinOp)node).Right); - case NpgsqlTsQuery.NodeKind.Phrase: - // 2 additional bytes for uint16 phrase operator "distance" field. - return 4 + GetNodeLength(((NpgsqlTsQueryBinOp)node).Left) + GetNodeLength(((NpgsqlTsQueryBinOp)node).Right); - case NpgsqlTsQuery.NodeKind.Not: - return 2 + GetNodeLength(((NpgsqlTsQueryNot)node).Child); - case NpgsqlTsQuery.NodeKind.Empty: - throw new InvalidOperationException("Empty tsquery nodes must be top-level"); - default: - throw new InvalidOperationException("Illegal node kind: " + node.Kind); - } - } - - /// - public override async Task Write(NpgsqlTsQuery query, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - var numTokens = GetTokenCount(query); - - if (buf.WriteSpaceLeft < 4) - await buf.Flush(async, cancellationToken); - buf.WriteInt32(numTokens); - - if (numTokens == 0) - return; - - await WriteCore(query, buf, async, cancellationToken); - - static async Task WriteCore(NpgsqlTsQuery node, NpgsqlWriteBuffer buf, bool async, CancellationToken cancellationToken = default) - { - if (buf.WriteSpaceLeft < 4) - await buf.Flush(async, cancellationToken); - - buf.WriteByte(node.Kind == NpgsqlTsQuery.NodeKind.Lexeme ? (byte)1 : (byte)2); - - if (node.Kind == NpgsqlTsQuery.NodeKind.Lexeme) - { - if (buf.WriteSpaceLeft < MaxSingleTokenBytes) - await buf.Flush(async, cancellationToken); - - var lexemeNode = (NpgsqlTsQueryLexeme)node; - buf.WriteByte((byte)lexemeNode.Weights); - buf.WriteByte(lexemeNode.IsPrefixSearch ? (byte)1 : (byte)0); - buf.WriteString(lexemeNode.Text); - buf.WriteByte(0); - return; - } - - buf.WriteByte((byte)node.Kind); - if (node.Kind == NpgsqlTsQuery.NodeKind.Not) - { - await WriteCore(((NpgsqlTsQueryNot)node).Child, buf, async, cancellationToken); - return; - } - - if (node.Kind == NpgsqlTsQuery.NodeKind.Phrase) - buf.WriteInt16(((NpgsqlTsQueryFollowedBy)node).Distance); - - await WriteCore(((NpgsqlTsQueryBinOp)node).Right, buf, async, cancellationToken); - await WriteCore(((NpgsqlTsQueryBinOp)node).Left, buf, async, cancellationToken); - } - } - - int GetTokenCount(NpgsqlTsQuery node) - { - switch (node.Kind) - { - case NpgsqlTsQuery.NodeKind.Lexeme: - return 1; - case NpgsqlTsQuery.NodeKind.And: - case NpgsqlTsQuery.NodeKind.Or: - case NpgsqlTsQuery.NodeKind.Phrase: - return 1 + GetTokenCount(((NpgsqlTsQueryBinOp)node).Left) + GetTokenCount(((NpgsqlTsQueryBinOp)node).Right); - case NpgsqlTsQuery.NodeKind.Not: - return 1 + GetTokenCount(((NpgsqlTsQueryNot)node).Child); - case NpgsqlTsQuery.NodeKind.Empty: - return 0; - } - return -1; - } - - /// - public int ValidateAndGetLength(NpgsqlTsQueryOr value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLength((NpgsqlTsQuery)value, ref lengthCache, parameter); - - /// - public int ValidateAndGetLength(NpgsqlTsQueryAnd value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLength((NpgsqlTsQuery)value, ref lengthCache, parameter); - - /// - public int ValidateAndGetLength(NpgsqlTsQueryNot value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLength((NpgsqlTsQuery)value, ref lengthCache, parameter); - - /// - public int ValidateAndGetLength(NpgsqlTsQueryLexeme value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLength((NpgsqlTsQuery)value, ref lengthCache, parameter); - - /// - public int ValidateAndGetLength(NpgsqlTsQueryEmpty value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLength((NpgsqlTsQuery)value, ref lengthCache, parameter); - - /// - public int ValidateAndGetLength(NpgsqlTsQueryFollowedBy value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLength((NpgsqlTsQuery)value, ref lengthCache, parameter); - - /// - public Task Write(NpgsqlTsQueryOr value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => Write((NpgsqlTsQuery)value, buf, lengthCache, parameter, async, cancellationToken); - - /// - public Task Write(NpgsqlTsQueryAnd value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => Write((NpgsqlTsQuery)value, buf, lengthCache, parameter, async, cancellationToken); - - /// - public Task Write(NpgsqlTsQueryNot value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => Write((NpgsqlTsQuery)value, buf, lengthCache, parameter, async, cancellationToken); - - /// - public Task Write(NpgsqlTsQueryLexeme value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => Write((NpgsqlTsQuery)value, buf, lengthCache, parameter, async, cancellationToken); - - /// - public Task Write(NpgsqlTsQueryEmpty value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => Write((NpgsqlTsQuery)value, buf, lengthCache, parameter, async, cancellationToken); - - /// - public Task Write( - NpgsqlTsQueryFollowedBy value, - NpgsqlWriteBuffer buf, - NpgsqlLengthCache? lengthCache, - NpgsqlParameter? parameter, - bool async, - CancellationToken cancellationToken = default) - => Write((NpgsqlTsQuery)value, buf, lengthCache, parameter, async, cancellationToken); - - #endregion Write -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/FullTextSearchHandlers/TsVectorHandler.cs b/src/Npgsql/Internal/TypeHandlers/FullTextSearchHandlers/TsVectorHandler.cs deleted file mode 100644 index 141e566fd1..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/FullTextSearchHandlers/TsVectorHandler.cs +++ /dev/null @@ -1,97 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Linq; -using System.Text; -using System.Threading; -using System.Threading.Tasks; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using NpgsqlTypes; - -namespace Npgsql.Internal.TypeHandlers.FullTextSearchHandlers; - -/// -/// A type handler for the PostgreSQL tsvector data type. -/// -/// -/// See https://www.postgresql.org/docs/current/static/datatype-textsearch.html. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public partial class TsVectorHandler : NpgsqlTypeHandler -{ - // 2561 = 2046 (max length lexeme string) + (1) null terminator + - // 2 (num_pos) + sizeof(int16) * 256 (max_num_pos (positions/wegihts)) - const int MaxSingleLexemeBytes = 2561; - - public TsVectorHandler(PostgresType pgType) : base(pgType) {} - - #region Read - - /// - public override async ValueTask Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - { - await buf.Ensure(4, async); - var numLexemes = buf.ReadInt32(); - len -= 4; - - var lexemes = new List(); - for (var lexemePos = 0; lexemePos < numLexemes; lexemePos++) - { - await buf.Ensure(Math.Min(len, MaxSingleLexemeBytes), async); - var posBefore = buf.ReadPosition; - - List? positions = null; - - var lexemeString = buf.ReadNullTerminatedString(); - int numPositions = buf.ReadInt16(); - for (var i = 0; i < numPositions; i++) - { - var wordEntryPos = buf.ReadInt16(); - if (positions == null) - positions = new List(); - positions.Add(new NpgsqlTsVector.Lexeme.WordEntryPos(wordEntryPos)); - } - - lexemes.Add(new NpgsqlTsVector.Lexeme(lexemeString, positions, true)); - - len -= buf.ReadPosition - posBefore; - } - - return new NpgsqlTsVector(lexemes, true); - } - - #endregion Read - - #region Write - - // TODO: Implement length cache - /// - public override int ValidateAndGetLength(NpgsqlTsVector value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => 4 + value.Sum(l => Encoding.UTF8.GetByteCount(l.Text) + 1 + 2 + l.Count * 2); - - /// - public override async Task Write(NpgsqlTsVector vector, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - if (buf.WriteSpaceLeft < 4) - await buf.Flush(async, cancellationToken); - buf.WriteInt32(vector.Count); - - foreach (var lexeme in vector) - { - if (buf.WriteSpaceLeft < MaxSingleLexemeBytes) - await buf.Flush(async, cancellationToken); - - buf.WriteString(lexeme.Text); - buf.WriteByte(0); - buf.WriteInt16(lexeme.Count); - for (var i = 0; i < lexeme.Count; i++) - buf.WriteInt16(lexeme[i].Value); - } - } - - #endregion Write -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/GeometricHandlers/BoxHandler.cs b/src/Npgsql/Internal/TypeHandlers/GeometricHandlers/BoxHandler.cs deleted file mode 100644 index 6ff333f47e..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/GeometricHandlers/BoxHandler.cs +++ /dev/null @@ -1,41 +0,0 @@ -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using NpgsqlTypes; - -namespace Npgsql.Internal.TypeHandlers.GeometricHandlers; - -/// -/// A type handler for the PostgreSQL box data type. -/// -/// -/// See https://www.postgresql.org/docs/current/static/datatype-geometric.html. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public partial class BoxHandler : NpgsqlSimpleTypeHandler -{ - public BoxHandler(PostgresType pgType) : base(pgType) {} - - /// - public override NpgsqlBox Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - => new( - new NpgsqlPoint(buf.ReadDouble(), buf.ReadDouble()), - new NpgsqlPoint(buf.ReadDouble(), buf.ReadDouble()) - ); - - /// - public override int ValidateAndGetLength(NpgsqlBox value, NpgsqlParameter? parameter) - => 32; - - /// - public override void Write(NpgsqlBox value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - { - buf.WriteDouble(value.Right); - buf.WriteDouble(value.Top); - buf.WriteDouble(value.Left); - buf.WriteDouble(value.Bottom); - } -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/GeometricHandlers/CircleHandler.cs b/src/Npgsql/Internal/TypeHandlers/GeometricHandlers/CircleHandler.cs deleted file mode 100644 index b450177cd3..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/GeometricHandlers/CircleHandler.cs +++ /dev/null @@ -1,37 +0,0 @@ -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using NpgsqlTypes; - -namespace Npgsql.Internal.TypeHandlers.GeometricHandlers; - -/// -/// A type handler for the PostgreSQL circle data type. -/// -/// -/// See https://www.postgresql.org/docs/current/static/datatype-geometric.html. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public partial class CircleHandler : NpgsqlSimpleTypeHandler -{ - public CircleHandler(PostgresType pgType) : base(pgType) {} - - /// - public override NpgsqlCircle Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - => new(buf.ReadDouble(), buf.ReadDouble(), buf.ReadDouble()); - - /// - public override int ValidateAndGetLength(NpgsqlCircle value, NpgsqlParameter? parameter) - => 24; - - /// - public override void Write(NpgsqlCircle value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - { - buf.WriteDouble(value.X); - buf.WriteDouble(value.Y); - buf.WriteDouble(value.Radius); - } -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/GeometricHandlers/LineHandler.cs b/src/Npgsql/Internal/TypeHandlers/GeometricHandlers/LineHandler.cs deleted file mode 100644 index 8b16b68a67..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/GeometricHandlers/LineHandler.cs +++ /dev/null @@ -1,37 +0,0 @@ -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using NpgsqlTypes; - -namespace Npgsql.Internal.TypeHandlers.GeometricHandlers; - -/// -/// A type handler for the PostgreSQL line data type. -/// -/// -/// See https://www.postgresql.org/docs/current/static/datatype-geometric.html. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public partial class LineHandler : NpgsqlSimpleTypeHandler -{ - public LineHandler(PostgresType pgType) : base(pgType) {} - - /// - public override NpgsqlLine Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - => new(buf.ReadDouble(), buf.ReadDouble(), buf.ReadDouble()); - - /// - public override int ValidateAndGetLength(NpgsqlLine value, NpgsqlParameter? parameter) - => 24; - - /// - public override void Write(NpgsqlLine value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - { - buf.WriteDouble(value.A); - buf.WriteDouble(value.B); - buf.WriteDouble(value.C); - } -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/GeometricHandlers/LineSegmentHandler.cs b/src/Npgsql/Internal/TypeHandlers/GeometricHandlers/LineSegmentHandler.cs deleted file mode 100644 index f34083602f..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/GeometricHandlers/LineSegmentHandler.cs +++ /dev/null @@ -1,38 +0,0 @@ -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using NpgsqlTypes; - -namespace Npgsql.Internal.TypeHandlers.GeometricHandlers; - -/// -/// A type handler for the PostgreSQL lseg data type. -/// -/// -/// See https://www.postgresql.org/docs/current/static/datatype-geometric.html. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public partial class LineSegmentHandler : NpgsqlSimpleTypeHandler -{ - public LineSegmentHandler(PostgresType pgType) : base(pgType) {} - - /// - public override NpgsqlLSeg Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - => new(buf.ReadDouble(), buf.ReadDouble(), buf.ReadDouble(), buf.ReadDouble()); - - /// - public override int ValidateAndGetLength(NpgsqlLSeg value, NpgsqlParameter? parameter) - => 32; - - /// - public override void Write(NpgsqlLSeg value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - { - buf.WriteDouble(value.Start.X); - buf.WriteDouble(value.Start.Y); - buf.WriteDouble(value.End.X); - buf.WriteDouble(value.End.Y); - } -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/GeometricHandlers/PathHandler.cs b/src/Npgsql/Internal/TypeHandlers/GeometricHandlers/PathHandler.cs deleted file mode 100644 index 4b7aa4c8b5..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/GeometricHandlers/PathHandler.cs +++ /dev/null @@ -1,74 +0,0 @@ -using System; -using System.Threading; -using System.Threading.Tasks; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using NpgsqlTypes; - -namespace Npgsql.Internal.TypeHandlers.GeometricHandlers; - -/// -/// A type handler for the PostgreSQL path data type. -/// -/// -/// See https://www.postgresql.org/docs/current/static/datatype-geometric.html. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public partial class PathHandler : NpgsqlTypeHandler -{ - public PathHandler(PostgresType pgType) : base(pgType) {} - - #region Read - - /// - public override async ValueTask Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - { - await buf.Ensure(5, async); - var open = buf.ReadByte() switch - { - 1 => false, - 0 => true, - _ => throw new Exception("Error decoding binary geometric path: bad open byte") - }; - - var numPoints = buf.ReadInt32(); - var result = new NpgsqlPath(numPoints, open); - for (var i = 0; i < numPoints; i++) - { - await buf.Ensure(16, async); - result.Add(new NpgsqlPoint(buf.ReadDouble(), buf.ReadDouble())); - } - return result; - } - - #endregion - - #region Write - - /// - public override int ValidateAndGetLength(NpgsqlPath value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => 5 + value.Count * 16; - - /// - public override async Task Write(NpgsqlPath value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - if (buf.WriteSpaceLeft < 5) - await buf.Flush(async, cancellationToken); - buf.WriteByte((byte)(value.Open ? 0 : 1)); - buf.WriteInt32(value.Count); - - foreach (var p in value) - { - if (buf.WriteSpaceLeft < 16) - await buf.Flush(async, cancellationToken); - buf.WriteDouble(p.X); - buf.WriteDouble(p.Y); - } - } - - #endregion -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/GeometricHandlers/PointHandler.cs b/src/Npgsql/Internal/TypeHandlers/GeometricHandlers/PointHandler.cs deleted file mode 100644 index d02bd67ec8..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/GeometricHandlers/PointHandler.cs +++ /dev/null @@ -1,36 +0,0 @@ -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using NpgsqlTypes; - -namespace Npgsql.Internal.TypeHandlers.GeometricHandlers; - -/// -/// A type handler for the PostgreSQL point data type. -/// -/// -/// See https://www.postgresql.org/docs/current/static/datatype-geometric.html. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public partial class PointHandler : NpgsqlSimpleTypeHandler -{ - public PointHandler(PostgresType pgType) : base(pgType) {} - - /// - public override NpgsqlPoint Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - => new(buf.ReadDouble(), buf.ReadDouble()); - - /// - public override int ValidateAndGetLength(NpgsqlPoint value, NpgsqlParameter? parameter) - => 16; - - /// - public override void Write(NpgsqlPoint value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - { - buf.WriteDouble(value.X); - buf.WriteDouble(value.Y); - } -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/GeometricHandlers/PolygonHandler.cs b/src/Npgsql/Internal/TypeHandlers/GeometricHandlers/PolygonHandler.cs deleted file mode 100644 index 004bd3ebbc..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/GeometricHandlers/PolygonHandler.cs +++ /dev/null @@ -1,65 +0,0 @@ -using System.Threading; -using System.Threading.Tasks; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using NpgsqlTypes; - -namespace Npgsql.Internal.TypeHandlers.GeometricHandlers; - -/// -/// A type handler for the PostgreSQL polygon data type. -/// -/// -/// See https://www.postgresql.org/docs/current/static/datatype-geometric.html. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public partial class PolygonHandler : NpgsqlTypeHandler -{ - public PolygonHandler(PostgresType pgType) : base(pgType) {} - - #region Read - - /// - public override async ValueTask Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - { - await buf.Ensure(4, async); - var numPoints = buf.ReadInt32(); - var result = new NpgsqlPolygon(numPoints); - for (var i = 0; i < numPoints; i++) - { - await buf.Ensure(16, async); - result.Add(new NpgsqlPoint(buf.ReadDouble(), buf.ReadDouble())); - } - return result; - } - - #endregion - - #region Write - - /// - public override int ValidateAndGetLength(NpgsqlPolygon value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => 4 + value.Count * 16; - - /// - public override async Task Write(NpgsqlPolygon value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - if (buf.WriteSpaceLeft < 4) - await buf.Flush(async, cancellationToken); - buf.WriteInt32(value.Count); - - foreach (var p in value) - { - if (buf.WriteSpaceLeft < 16) - await buf.Flush(async, cancellationToken); - buf.WriteDouble(p.X); - buf.WriteDouble(p.Y); - } - } - - #endregion -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/HstoreHandler.cs b/src/Npgsql/Internal/TypeHandlers/HstoreHandler.cs deleted file mode 100644 index 0b8613d979..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/HstoreHandler.cs +++ /dev/null @@ -1,178 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Collections.Immutable; -using System.Diagnostics; -using System.Threading; -using System.Threading.Tasks; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; - -namespace Npgsql.Internal.TypeHandlers; - -/// -/// A type handler for the PostgreSQL hstore extension data type, which stores sets of key/value pairs within a -/// single PostgreSQL value. -/// -/// -/// See https://www.postgresql.org/docs/current/hstore.html. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public class HstoreHandler : - NpgsqlTypeHandler>, - INpgsqlTypeHandler>, - INpgsqlTypeHandler> -{ - /// - /// The text handler to which we delegate encoding/decoding of the actual strings - /// - readonly TextHandler _textHandler; - - internal HstoreHandler(PostgresType postgresType, TextHandler textHandler) - : base(postgresType) - => _textHandler = textHandler; - - #region Write - - /// - public int ValidateAndGetLength(IDictionary value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - { - lengthCache ??= new NpgsqlLengthCache(1); - if (lengthCache.IsPopulated) - return lengthCache.Get(); - - // Leave empty slot for the entire hstore length, and go ahead an populate the individual string slots - var pos = lengthCache.Position; - lengthCache.Set(0); - - var totalLen = 4; // Number of key-value pairs - foreach (var kv in value) - { - totalLen += 8; // Key length + value length - if (kv.Key == null) - throw new FormatException("HSTORE doesn't support null keys"); - totalLen += _textHandler.ValidateAndGetLength(kv.Key, ref lengthCache, null); - if (kv.Value != null) - totalLen += _textHandler.ValidateAndGetLength(kv.Value!, ref lengthCache, null); - } - - return lengthCache.Lengths[pos] = totalLen; - } - - /// - public int ValidateAndGetLength( - ImmutableDictionary value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLength((IDictionary)value, ref lengthCache, parameter); - - /// - public override int ValidateAndGetLength(Dictionary value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLength(value, ref lengthCache, parameter); - - /// - public override int ValidateObjectAndGetLength(object? value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => value switch - { - ImmutableDictionary converted => ValidateAndGetLength(converted, ref lengthCache, parameter), - Dictionary converted => ValidateAndGetLength(converted, ref lengthCache, parameter), - IDictionary converted => ValidateAndGetLength(converted, ref lengthCache, parameter), - - DBNull => 0, - null => 0, - _ => throw new InvalidCastException($"Can't write CLR type {value.GetType()} with handler type HstoreHandler") - }; - - /// - public override Task WriteObjectWithLength( - object? value, - NpgsqlWriteBuffer buf, - NpgsqlLengthCache? lengthCache, - NpgsqlParameter? parameter, - bool async, - CancellationToken cancellationToken = default) - => value switch - { - ImmutableDictionary converted => ((INpgsqlTypeHandler>)this).WriteWithLength(converted, buf, lengthCache, parameter, async, cancellationToken), - Dictionary converted => ((INpgsqlTypeHandler>)this).WriteWithLength(converted, buf, lengthCache, parameter, async, cancellationToken), - IDictionary converted => ((INpgsqlTypeHandler>)this).WriteWithLength(converted, buf, lengthCache, parameter, async, cancellationToken), - - DBNull => WriteWithLength(DBNull.Value, buf, lengthCache, parameter, async, cancellationToken), - null => WriteWithLength(DBNull.Value, buf, lengthCache, parameter, async, cancellationToken), - _ => throw new InvalidCastException($"Can't write CLR type {value.GetType()} with handler type BoolHandler") - }; - - /// - public async Task Write(IDictionary value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - if (buf.WriteSpaceLeft < 4) - await buf.Flush(async, cancellationToken); - buf.WriteInt32(value.Count); - if (value.Count == 0) - return; - - foreach (var kv in value) - { - await ((INpgsqlTypeHandler)_textHandler).WriteWithLength(kv.Key, buf, lengthCache, parameter, async, cancellationToken); - await ((INpgsqlTypeHandler)_textHandler).WriteWithLength(kv.Value, buf, lengthCache, parameter, async, cancellationToken); - } - } - - /// - public Task Write(ImmutableDictionary value, - NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => Write((IDictionary)value, buf, lengthCache, parameter, async, cancellationToken); - - /// - public override Task Write(Dictionary value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => Write(value, buf, lengthCache, parameter, async, cancellationToken); - - #endregion - - #region Read - - async ValueTask ReadInto(T dictionary, int numElements, NpgsqlReadBuffer buf, bool async) - where T : IDictionary - { - for (var i = 0; i < numElements; i++) - { - await buf.Ensure(4, async); - var keyLen = buf.ReadInt32(); - Debug.Assert(keyLen != -1); - var key = await _textHandler.Read(buf, keyLen, async); - - await buf.Ensure(4, async); - var valueLen = buf.ReadInt32(); - - dictionary[key] = valueLen == -1 - ? null - : await _textHandler.Read(buf, valueLen, async); - } - return dictionary; - } - - /// - public override async ValueTask> Read(NpgsqlReadBuffer buf, int len, bool async, - FieldDescription? fieldDescription = null) - { - await buf.Ensure(4, async); - var numElements = buf.ReadInt32(); - return await ReadInto(new Dictionary(numElements), numElements, buf, async); - } - - ValueTask> INpgsqlTypeHandler>.Read( - NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => new(Read(buf, len, async, fieldDescription).Result); - - async ValueTask> INpgsqlTypeHandler>.Read( - NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - { - await buf.Ensure(4, async); - var numElements = buf.ReadInt32(); - return (await ReadInto(ImmutableDictionary.Empty.ToBuilder(), numElements, buf, async)) - .ToImmutable(); - } - - #endregion -} diff --git a/src/Npgsql/Internal/TypeHandlers/InternalTypeHandlers/Int2VectorHandler.cs b/src/Npgsql/Internal/TypeHandlers/InternalTypeHandlers/Int2VectorHandler.cs deleted file mode 100644 index 1523b66d69..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/InternalTypeHandlers/Int2VectorHandler.cs +++ /dev/null @@ -1,18 +0,0 @@ -using Npgsql.Internal.TypeHandlers.NumericHandlers; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; - -namespace Npgsql.Internal.TypeHandlers.InternalTypeHandlers; - -/// -/// An int2vector is simply a regular array of shorts, with the sole exception that its lower bound must -/// be 0 (we send 1 for regular arrays). -/// -sealed class Int2VectorHandler : ArrayHandler -{ - public Int2VectorHandler(PostgresType arrayPostgresType, PostgresType postgresShortType) - : base(arrayPostgresType, new Int16Handler(postgresShortType), ArrayNullabilityMode.Never, 0) { } - - public override NpgsqlTypeHandler CreateArrayHandler(PostgresArrayType pgArrayType, ArrayNullabilityMode arrayNullabilityMode) - => new ArrayHandler(pgArrayType, this, arrayNullabilityMode); -} diff --git a/src/Npgsql/Internal/TypeHandlers/InternalTypeHandlers/InternalCharHandler.cs b/src/Npgsql/Internal/TypeHandlers/InternalTypeHandlers/InternalCharHandler.cs deleted file mode 100644 index 2131cc16c8..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/InternalTypeHandlers/InternalCharHandler.cs +++ /dev/null @@ -1,87 +0,0 @@ -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; - -namespace Npgsql.Internal.TypeHandlers.InternalTypeHandlers; - -/// -/// A type handler for the PostgreSQL "char" type, used only internally. -/// -/// -/// See https://www.postgresql.org/docs/current/static/datatype-character.html. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public partial class InternalCharHandler : NpgsqlSimpleTypeHandler, - INpgsqlSimpleTypeHandler, INpgsqlSimpleTypeHandler, INpgsqlSimpleTypeHandler, INpgsqlSimpleTypeHandler -{ - public InternalCharHandler(PostgresType pgType) : base(pgType) {} - - #region Read - - /// - public override char Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - => (char)buf.ReadByte(); - - byte INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => buf.ReadByte(); - - short INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => buf.ReadByte(); - - int INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => buf.ReadByte(); - - long INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => buf.ReadByte(); - - #endregion - - #region Write - - /// - public int ValidateAndGetLength(byte value, NpgsqlParameter? parameter) => 1; - - /// - public override int ValidateAndGetLength(char value, NpgsqlParameter? parameter) - { - _ = checked((byte)value); - return 1; - } - - /// - public int ValidateAndGetLength(short value, NpgsqlParameter? parameter) - { - _ = checked((byte)value); - return 1; - } - - /// - public int ValidateAndGetLength(int value, NpgsqlParameter? parameter) - { - _ = checked((byte)value); - return 1; - } - - /// - public int ValidateAndGetLength(long value, NpgsqlParameter? parameter) - { - _ = checked((byte)value); - return 1; - } - - /// - public override void Write(char value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) => buf.WriteByte((byte)value); - /// - public void Write(byte value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) => buf.WriteByte(value); - /// - public void Write(short value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) => buf.WriteByte((byte)value); - /// - public void Write(int value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) => buf.WriteByte((byte)value); - /// - public void Write(long value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) => buf.WriteByte((byte)value); - - #endregion -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/InternalTypeHandlers/OIDVectorHandler.cs b/src/Npgsql/Internal/TypeHandlers/InternalTypeHandlers/OIDVectorHandler.cs deleted file mode 100644 index 00b3a57aa1..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/InternalTypeHandlers/OIDVectorHandler.cs +++ /dev/null @@ -1,18 +0,0 @@ -using Npgsql.Internal.TypeHandlers.NumericHandlers; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; - -namespace Npgsql.Internal.TypeHandlers.InternalTypeHandlers; - -/// -/// An OIDVector is simply a regular array of uints, with the sole exception that its lower bound must -/// be 0 (we send 1 for regular arrays). -/// -sealed class OIDVectorHandler : ArrayHandler -{ - public OIDVectorHandler(PostgresType oidvectorType, PostgresType oidType) - : base(oidvectorType, new UInt32Handler(oidType), ArrayNullabilityMode.Never, 0) { } - - public override NpgsqlTypeHandler CreateArrayHandler(PostgresArrayType pgArrayType, ArrayNullabilityMode arrayNullabilityMode) - => new ArrayHandler(pgArrayType, this, arrayNullabilityMode); -} diff --git a/src/Npgsql/Internal/TypeHandlers/InternalTypeHandlers/PgLsnHandler.cs b/src/Npgsql/Internal/TypeHandlers/InternalTypeHandlers/PgLsnHandler.cs deleted file mode 100644 index 75e85ab3e6..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/InternalTypeHandlers/PgLsnHandler.cs +++ /dev/null @@ -1,31 +0,0 @@ -using System.Diagnostics; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using NpgsqlTypes; - -namespace Npgsql.Internal.TypeHandlers.InternalTypeHandlers; - -sealed partial class PgLsnHandler : NpgsqlSimpleTypeHandler -{ - public PgLsnHandler(PostgresType pgType) : base(pgType) {} - - #region Read - - public override NpgsqlLogSequenceNumber Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - { - Debug.Assert(len == 8); - return new NpgsqlLogSequenceNumber(buf.ReadUInt64()); - } - - #endregion Read - - #region Write - - public override int ValidateAndGetLength(NpgsqlLogSequenceNumber value, NpgsqlParameter? parameter) => 8; - - public override void Write(NpgsqlLogSequenceNumber value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => buf.WriteUInt64((ulong)value); - - #endregion Write -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/InternalTypeHandlers/TidHandler.cs b/src/Npgsql/Internal/TypeHandlers/InternalTypeHandlers/TidHandler.cs deleted file mode 100644 index 0148fc1071..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/InternalTypeHandlers/TidHandler.cs +++ /dev/null @@ -1,39 +0,0 @@ -using System.Diagnostics; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using NpgsqlTypes; - -namespace Npgsql.Internal.TypeHandlers.InternalTypeHandlers; - -sealed partial class TidHandler : NpgsqlSimpleTypeHandler -{ - public TidHandler(PostgresType pgType) : base(pgType) {} - - #region Read - - public override NpgsqlTid Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - { - Debug.Assert(len == 6); - - var blockNumber = buf.ReadUInt32(); - var offsetNumber = buf.ReadUInt16(); - - return new NpgsqlTid(blockNumber, offsetNumber); - } - - #endregion Read - - #region Write - - public override int ValidateAndGetLength(NpgsqlTid value, NpgsqlParameter? parameter) - => 6; - - public override void Write(NpgsqlTid value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - { - buf.WriteUInt32(value.BlockNumber); - buf.WriteUInt16(value.OffsetNumber); - } - - #endregion Write -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/JsonPathHandler.cs b/src/Npgsql/Internal/TypeHandlers/JsonPathHandler.cs deleted file mode 100644 index 7b2735fcd3..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/JsonPathHandler.cs +++ /dev/null @@ -1,74 +0,0 @@ -using System; -using System.IO; -using System.Text; -using System.Threading; -using System.Threading.Tasks; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using Npgsql.TypeMapping; -using NpgsqlTypes; - -namespace Npgsql.Internal.TypeHandlers; - -/// -/// A type handler for the PostgreSQL jsonpath data type. -/// -/// -/// See https://www.postgresql.org/docs/current/datatype-json.html#DATATYPE-JSONPATH. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public partial class JsonPathHandler : NpgsqlTypeHandler, ITextReaderHandler -{ - readonly TextHandler _textHandler; - - /// - /// Prepended to the string in the wire encoding - /// - const byte JsonPathVersion = 1; - - /// - protected internal JsonPathHandler(PostgresType postgresType, Encoding encoding) - : base(postgresType) - => _textHandler = new TextHandler(postgresType, encoding); - - /// - public override async ValueTask Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - { - await buf.Ensure(1, async); - - var version = buf.ReadByte(); - if (version != JsonPathVersion) - throw new NotSupportedException($"Don't know how to decode JSONPATH with wire format {version}, your connection is now broken"); - - return await _textHandler.Read(buf, len - 1, async, fieldDescription); - } - - /// - public override int ValidateAndGetLength(string value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) => - 1 + _textHandler.ValidateAndGetLength(value, ref lengthCache, parameter); - - /// - public override async Task Write(string value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - if (buf.WriteSpaceLeft < 1) - await buf.Flush(async, cancellationToken); - - buf.WriteByte(JsonPathVersion); - - await _textHandler.Write(value, buf, lengthCache, parameter, async, cancellationToken); - } - - /// - public TextReader GetTextReader(Stream stream, NpgsqlReadBuffer buffer) - { - var version = stream.ReadByte(); - if (version != JsonPathVersion) - throw new NotSupportedException($"Don't know how to decode JSONPATH with wire format {version}, your connection is now broken"); - - return _textHandler.GetTextReader(stream, buffer); - } -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/JsonTextHandler.cs b/src/Npgsql/Internal/TypeHandlers/JsonTextHandler.cs deleted file mode 100644 index 2842370336..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/JsonTextHandler.cs +++ /dev/null @@ -1,209 +0,0 @@ -using System; -using System.Diagnostics.CodeAnalysis; -using System.IO; -using System.Text; -using System.Threading; -using System.Threading.Tasks; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; - -namespace Npgsql.Internal.TypeHandlers; - -/// -/// A text-only type handler for the PostgreSQL json and jsonb data type. This handler does not support serialization/deserialization -/// with System.Text.Json or Json.NET. -/// -/// -/// See https://www.postgresql.org/docs/current/datatype-json.html. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public class JsonTextHandler : NpgsqlTypeHandler, ITextReaderHandler -{ - protected TextHandler TextHandler { get; } - readonly bool _isJsonb; - readonly int _headerLen; - - internal override bool PreferTextWrite => false; - - /// - /// Prepended to the string in the wire encoding - /// - const byte JsonbProtocolVersion = 1; - - /// - public JsonTextHandler(PostgresType postgresType, Encoding encoding, bool isJsonb) - : base(postgresType) - { - _isJsonb = isJsonb; - _headerLen = isJsonb ? 1 : 0; - TextHandler = new TextHandler(postgresType, encoding); - } - - protected bool IsSupportedAsText() - => typeof(T) == typeof(string) || - typeof(T) == typeof(char[]) || - typeof(T) == typeof(ArraySegment) || - typeof(T) == typeof(char) || - typeof(T) == typeof(byte[]) || - typeof(T) == typeof(ReadOnlyMemory); - - protected bool IsSupported(Type type) - => type == typeof(string) || - type == typeof(char[]) || - type == typeof(ArraySegment) || - type == typeof(char) || - type == typeof(byte[]) || - type == typeof(ReadOnlyMemory); - - protected bool TryValidateAndGetLengthCustom( - [DisallowNull] TAny value, - ref NpgsqlLengthCache? lengthCache, - NpgsqlParameter? parameter, - out int length) - { - if (IsSupportedAsText()) - { - length = TextHandler.ValidateAndGetLength(value, ref lengthCache, parameter) + _headerLen; - return true; - } - - length = 0; - return false; - } - - /// - protected internal override int ValidateAndGetLengthCustom([DisallowNull] TAny value, ref NpgsqlLengthCache? lengthCache, - NpgsqlParameter? parameter) - => IsSupportedAsText() - ? TextHandler.ValidateAndGetLength(value, ref lengthCache, parameter) + _headerLen - : throw new InvalidCastException( - $"Can't write CLR type {value.GetType()}. " + - "You may need to use the System.Text.Json or Json.NET plugins, see the docs for more information."); - - protected override async Task WriteWithLengthCustom( - [DisallowNull] TAny value, - NpgsqlWriteBuffer buf, - NpgsqlLengthCache? lengthCache, - NpgsqlParameter? parameter, - bool async, - CancellationToken cancellationToken) - { - var spaceRequired = _isJsonb ? 5 : 4; - - if (buf.WriteSpaceLeft < spaceRequired) - await buf.Flush(async, cancellationToken); - - buf.WriteInt32(ValidateAndGetLength(value, ref lengthCache, parameter)); - - if (_isJsonb) - buf.WriteByte(JsonbProtocolVersion); - - if (typeof(TAny) == typeof(string)) - await TextHandler.Write((string)(object)value, buf, lengthCache, parameter, async, cancellationToken); - else if (typeof(TAny) == typeof(char[])) - await TextHandler.Write((char[])(object)value, buf, lengthCache, parameter, async, cancellationToken); - else if (typeof(TAny) == typeof(ArraySegment)) - await TextHandler.Write((ArraySegment)(object)value, buf, lengthCache, parameter, async, cancellationToken); - else if (typeof(TAny) == typeof(char)) - await TextHandler.Write((char)(object)value, buf, lengthCache, parameter, async, cancellationToken); - else if (typeof(TAny) == typeof(byte[])) - await TextHandler.Write((byte[])(object)value, buf, lengthCache, parameter, async, cancellationToken); - else if (typeof(TAny) == typeof(ReadOnlyMemory)) - await TextHandler.Write((ReadOnlyMemory)(object)value, buf, lengthCache, parameter, async, cancellationToken); - else throw new InvalidCastException( - $"Can't write CLR type {value.GetType()}. " + - "You may need to use the System.Text.Json or Json.NET plugins, see the docs for more information."); - } - - /// - public override int ValidateAndGetLength(string value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLengthCustom(value, ref lengthCache, parameter); - - /// - public override async Task Write(string value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - if (_isJsonb) - { - if (buf.WriteSpaceLeft < 1) - await buf.Flush(async, cancellationToken); - buf.WriteByte(JsonbProtocolVersion); - } - - await TextHandler.Write(value, buf, lengthCache, parameter, async, cancellationToken); - } - - /// - public override int ValidateObjectAndGetLength(object value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => value switch - { - string s => ValidateAndGetLength(s, ref lengthCache, parameter), - char[] s => ValidateAndGetLength(s, ref lengthCache, parameter), - ArraySegment s => ValidateAndGetLength(s, ref lengthCache, parameter), - char s => ValidateAndGetLength(s, ref lengthCache, parameter), - byte[] s => ValidateAndGetLength(s, ref lengthCache, parameter), - ReadOnlyMemory s => ValidateAndGetLength(s, ref lengthCache, parameter), - - _ => throw new InvalidCastException( - $"Can't write CLR type {value.GetType()}. " + - "You may need to use the System.Text.Json or Json.NET plugins, see the docs for more information.") - }; - - /// - public override Task WriteObjectWithLength(object? value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => value switch - { - null => WriteWithLength(DBNull.Value, buf, lengthCache, parameter, async, cancellationToken), - DBNull => WriteWithLength(DBNull.Value, buf, lengthCache, parameter, async, cancellationToken), - string s => WriteWithLengthCustom(s, buf, lengthCache, parameter, async, cancellationToken), - char[] s => WriteWithLengthCustom(s, buf, lengthCache, parameter, async, cancellationToken), - ArraySegment s => WriteWithLengthCustom(s, buf, lengthCache, parameter, async, cancellationToken), - char s => WriteWithLengthCustom(s, buf, lengthCache, parameter, async, cancellationToken), - byte[] s => WriteWithLengthCustom(s, buf, lengthCache, parameter, async, cancellationToken), - ReadOnlyMemory s => WriteWithLengthCustom(s, buf, lengthCache, parameter, async, cancellationToken), - - _ => throw new InvalidCastException( - $"Can't write CLR type {value.GetType()}. " + - "You may need to use the System.Text.Json or Json.NET plugins, see the docs for more information.") - }; - - /// - protected internal override async ValueTask ReadCustom(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - { - if (_isJsonb) - { - await buf.Ensure(1, async); - var version = buf.ReadByte(); - if (version != JsonbProtocolVersion) - throw new NotSupportedException($"Don't know how to decode JSONB with wire format {version}, your connection is now broken"); - len--; - } - - if (IsSupportedAsText()) - return await TextHandler.Read(buf, len, async, fieldDescription); - - throw new InvalidCastException( - $"Can't read JSON as CLR type {typeof(T)}. " + - "You may need to use the System.Text.Json or Json.NET plugins, see the docs for more information."); - } - - /// - public override ValueTask Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - => ReadCustom(buf, len, async, fieldDescription); - - /// - public TextReader GetTextReader(Stream stream, NpgsqlReadBuffer buffer) - { - if (_isJsonb) - { - var version = stream.ReadByte(); - if (version != JsonbProtocolVersion) - throw new NpgsqlException($"Don't know how to decode jsonb with wire format {version}, your connection is now broken"); - } - - return TextHandler.GetTextReader(stream, buffer); - } -} diff --git a/src/Npgsql/Internal/TypeHandlers/LTreeHandlers/LQueryHandler.cs b/src/Npgsql/Internal/TypeHandlers/LTreeHandlers/LQueryHandler.cs deleted file mode 100644 index 9f73a4fb97..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/LTreeHandlers/LQueryHandler.cs +++ /dev/null @@ -1,90 +0,0 @@ -using System; -using System.IO; -using System.Text; -using System.Threading; -using System.Threading.Tasks; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; - -namespace Npgsql.Internal.TypeHandlers.LTreeHandlers; - -/// -/// LQuery binary encoding is a simple UTF8 string, but prepended with a version number. -/// -public class LQueryHandler : TextHandler -{ - /// - /// Prepended to the string in the wire encoding - /// - const byte LQueryProtocolVersion = 1; - - internal override bool PreferTextWrite => false; - - protected internal LQueryHandler(PostgresType postgresType, Encoding encoding) - : base(postgresType, encoding) {} - - #region Write - - public override int ValidateAndGetLength(string value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) => - base.ValidateAndGetLength(value, ref lengthCache, parameter) + 1; - - public override int ValidateAndGetLength(char[] value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) => - base.ValidateAndGetLength(value, ref lengthCache, parameter) + 1; - - public override int ValidateAndGetLength(ArraySegment value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) => - base.ValidateAndGetLength(value, ref lengthCache, parameter) + 1; - - public override async Task Write(string value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - if (buf.WriteSpaceLeft < 1) - await buf.Flush(async, cancellationToken); - - buf.WriteByte(LQueryProtocolVersion); - await base.Write(value, buf, lengthCache, parameter, async, cancellationToken); - } - - public override async Task Write(char[] value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - if (buf.WriteSpaceLeft < 1) - await buf.Flush(async, cancellationToken); - - buf.WriteByte(LQueryProtocolVersion); - await base.Write(value, buf, lengthCache, parameter, async, cancellationToken); - } - - public override async Task Write(ArraySegment value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - if (buf.WriteSpaceLeft < 1) - await buf.Flush(async, cancellationToken); - - buf.WriteByte(LQueryProtocolVersion); - await base.Write(value, buf, lengthCache, parameter, async, cancellationToken); - } - - #endregion - - #region Read - - public override async ValueTask Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - { - await buf.Ensure(1, async); - - var version = buf.ReadByte(); - if (version != LQueryProtocolVersion) - throw new NotSupportedException($"Don't know how to decode lquery with wire format {version}, your connection is now broken"); - - return await base.Read(buf, len - 1, async, fieldDescription); - } - - #endregion - - public override TextReader GetTextReader(Stream stream, NpgsqlReadBuffer buffer) - { - var version = stream.ReadByte(); - if (version != LQueryProtocolVersion) - throw new NpgsqlException($"Don't know how to decode lquery with wire format {version}, your connection is now broken"); - - return base.GetTextReader(stream, buffer); - } -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/LTreeHandlers/LTreeHandler.cs b/src/Npgsql/Internal/TypeHandlers/LTreeHandlers/LTreeHandler.cs deleted file mode 100644 index 4f43266d8f..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/LTreeHandlers/LTreeHandler.cs +++ /dev/null @@ -1,90 +0,0 @@ -using System; -using System.IO; -using System.Text; -using System.Threading; -using System.Threading.Tasks; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; - -namespace Npgsql.Internal.TypeHandlers.LTreeHandlers; - -/// -/// Ltree binary encoding is a simple UTF8 string, but prepended with a version number. -/// -public class LTreeHandler : TextHandler -{ - /// - /// Prepended to the string in the wire encoding - /// - const byte LtreeProtocolVersion = 1; - - internal override bool PreferTextWrite => false; - - protected internal LTreeHandler(PostgresType postgresType, Encoding encoding) - : base(postgresType, encoding) {} - - #region Write - - public override int ValidateAndGetLength(string value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) => - base.ValidateAndGetLength(value, ref lengthCache, parameter) + 1; - - public override int ValidateAndGetLength(char[] value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) => - base.ValidateAndGetLength(value, ref lengthCache, parameter) + 1; - - public override int ValidateAndGetLength(ArraySegment value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) => - base.ValidateAndGetLength(value, ref lengthCache, parameter) + 1; - - public override async Task Write(string value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - if (buf.WriteSpaceLeft < 1) - await buf.Flush(async, cancellationToken); - - buf.WriteByte(LtreeProtocolVersion); - await base.Write(value, buf, lengthCache, parameter, async, cancellationToken); - } - - public override async Task Write(char[] value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - if (buf.WriteSpaceLeft < 1) - await buf.Flush(async, cancellationToken); - - buf.WriteByte(LtreeProtocolVersion); - await base.Write(value, buf, lengthCache, parameter, async, cancellationToken); - } - - public override async Task Write(ArraySegment value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - if (buf.WriteSpaceLeft < 1) - await buf.Flush(async, cancellationToken); - - buf.WriteByte(LtreeProtocolVersion); - await base.Write(value, buf, lengthCache, parameter, async, cancellationToken); - } - - #endregion - - #region Read - - public override async ValueTask Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - { - await buf.Ensure(1, async); - - var version = buf.ReadByte(); - if (version != LtreeProtocolVersion) - throw new NotSupportedException($"Don't know how to decode ltree with wire format {version}, your connection is now broken"); - - return await base.Read(buf, len - 1, async, fieldDescription); - } - - #endregion - - public override TextReader GetTextReader(Stream stream, NpgsqlReadBuffer buffer) - { - var version = stream.ReadByte(); - if (version != LtreeProtocolVersion) - throw new NpgsqlException($"Don't know how to decode ltree with wire format {version}, your connection is now broken"); - - return base.GetTextReader(stream, buffer); - } -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/LTreeHandlers/LTxtQueryHandler.cs b/src/Npgsql/Internal/TypeHandlers/LTreeHandlers/LTxtQueryHandler.cs deleted file mode 100644 index dcde2a1d73..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/LTreeHandlers/LTxtQueryHandler.cs +++ /dev/null @@ -1,93 +0,0 @@ -using System; -using System.IO; -using System.Text; -using System.Threading; -using System.Threading.Tasks; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; - -namespace Npgsql.Internal.TypeHandlers.LTreeHandlers; - -/// -/// LTxtQuery binary encoding is a simple UTF8 string, but prepended with a version number. -/// -public class LTxtQueryHandler : TextHandler -{ - /// - /// Prepended to the string in the wire encoding - /// - const byte LTxtQueryProtocolVersion = 1; - - internal override bool PreferTextWrite => false; - - protected internal LTxtQueryHandler(PostgresType postgresType, Encoding encoding) - : base(postgresType, encoding) {} - - #region Write - - public override int ValidateAndGetLength(string value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) => - base.ValidateAndGetLength(value, ref lengthCache, parameter) + 1; - - - public override int ValidateAndGetLength(char[] value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) => - base.ValidateAndGetLength(value, ref lengthCache, parameter) + 1; - - - public override int ValidateAndGetLength(ArraySegment value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) => - base.ValidateAndGetLength(value, ref lengthCache, parameter) + 1; - - - public override async Task Write(string value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - if (buf.WriteSpaceLeft < 1) - await buf.Flush(async, cancellationToken); - - buf.WriteByte(LTxtQueryProtocolVersion); - await base.Write(value, buf, lengthCache, parameter, async, cancellationToken); - } - - public override async Task Write(char[] value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - if (buf.WriteSpaceLeft < 1) - await buf.Flush(async, cancellationToken); - - buf.WriteByte(LTxtQueryProtocolVersion); - await base.Write(value, buf, lengthCache, parameter, async, cancellationToken); - } - - public override async Task Write(ArraySegment value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - if (buf.WriteSpaceLeft < 1) - await buf.Flush(async, cancellationToken); - - buf.WriteByte(LTxtQueryProtocolVersion); - await base.Write(value, buf, lengthCache, parameter, async, cancellationToken); - } - - #endregion - - #region Read - - public override async ValueTask Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - { - await buf.Ensure(1, async); - - var version = buf.ReadByte(); - if (version != LTxtQueryProtocolVersion) - throw new NotSupportedException($"Don't know how to decode ltxtquery with wire format {version}, your connection is now broken"); - - return await base.Read(buf, len - 1, async, fieldDescription); - } - - #endregion - - public override TextReader GetTextReader(Stream stream, NpgsqlReadBuffer buffer) - { - var version = stream.ReadByte(); - if (version != LTxtQueryProtocolVersion) - throw new NpgsqlException($"Don't know how to decode ltxtquery with wire format {version}, your connection is now broken"); - - return base.GetTextReader(stream, buffer); - } -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/MultirangeHandler.cs b/src/Npgsql/Internal/TypeHandlers/MultirangeHandler.cs deleted file mode 100644 index f0f2c11827..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/MultirangeHandler.cs +++ /dev/null @@ -1,211 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Threading; -using System.Threading.Tasks; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using NpgsqlTypes; - -namespace Npgsql.Internal.TypeHandlers; - -// NOTE: This cannot inherit from NpgsqlTypeHandler[]>, since that triggers infinite generic recursion in Native AOT -public partial class MultirangeHandler : NpgsqlTypeHandler, - INpgsqlTypeHandler[]>, - INpgsqlTypeHandler>> -{ - /// - /// The type handler for the range that this multirange type holds - /// - protected RangeHandler RangeHandler { get; } - - /// - public MultirangeHandler(PostgresMultirangeType pgMultirangeType, RangeHandler rangeHandler) - : base(pgMultirangeType) - => RangeHandler = rangeHandler; - - public ValueTask[]> Read( - NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - => ReadMultirangeArray(buf, len, async, fieldDescription); - - protected async ValueTask[]> ReadMultirangeArray( - NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - { - await buf.Ensure(4, async); - var numRanges = buf.ReadInt32(); - var multirange = new NpgsqlRange[numRanges]; - - for (var i = 0; i < numRanges; i++) - { - await buf.Ensure(4, async); - var rangeLen = buf.ReadInt32(); - multirange[i] = await RangeHandler.ReadRange(buf, rangeLen, async, fieldDescription); - } - - return multirange; - } - - ValueTask>> INpgsqlTypeHandler>>.Read( - NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => ReadMultirangeList(buf, len, async, fieldDescription); - - protected async ValueTask>> ReadMultirangeList( - NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - { - await buf.Ensure(4, async); - var numRanges = buf.ReadInt32(); - var multirange = new List>(numRanges); - - for (var i = 0; i < numRanges; i++) - { - await buf.Ensure(4, async); - var rangeLen = buf.ReadInt32(); - multirange.Add(await RangeHandler.ReadRange(buf, rangeLen, async, fieldDescription)); - } - - return multirange; - } - - public override async ValueTask ReadAsObject(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - => await Read(buf, len, async, fieldDescription); - - public int ValidateAndGetLength(NpgsqlRange[] value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLengthMultirange(value, ref lengthCache, parameter); - - public int ValidateAndGetLength(List> value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLengthMultirange(value, ref lengthCache, parameter); - - protected int ValidateAndGetLengthMultirange( - IList> value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - { - lengthCache ??= new NpgsqlLengthCache(1); - if (lengthCache.IsPopulated) - return lengthCache.Get(); - - // Leave empty slot for the entire array length, and go ahead an populate the element slots - var pos = lengthCache.Position; - lengthCache.Set(0); - - var sum = 4 + 4 * value.Count; - for (var i = 0; i < value.Count; i++) - sum += RangeHandler.ValidateAndGetLength(value[i], ref lengthCache, parameter); - - lengthCache.Lengths[pos] = sum; - return sum; - } - - public Task Write( - NpgsqlRange[] value, - NpgsqlWriteBuffer buf, - NpgsqlLengthCache? lengthCache, - NpgsqlParameter? parameter, - bool async, - CancellationToken cancellationToken = default) - => WriteMultirange(value, buf, lengthCache, parameter, async, cancellationToken); - - public Task Write( - List> value, - NpgsqlWriteBuffer buf, - NpgsqlLengthCache? lengthCache, - NpgsqlParameter? parameter, - bool async, - CancellationToken cancellationToken = default) - => WriteMultirange(value, buf, lengthCache, parameter, async, cancellationToken); - - public async Task WriteMultirange( - IList> value, - NpgsqlWriteBuffer buf, - NpgsqlLengthCache? lengthCache, - NpgsqlParameter? parameter, - bool async, - CancellationToken cancellationToken = default) - { - if (buf.WriteSpaceLeft < 4) - await buf.Flush(async, cancellationToken); - - buf.WriteInt32(value.Count); - - for (var i = 0; i < value.Count; i++) - await ((INpgsqlTypeHandler>)RangeHandler).WriteWithLength(value[i], buf, lengthCache, parameter: null, async, cancellationToken); - } - - public override Type GetFieldType(FieldDescription? fieldDescription = null) => typeof(NpgsqlRange[]); - - /// - public override NpgsqlTypeHandler CreateArrayHandler(PostgresArrayType pgArrayType, ArrayNullabilityMode arrayNullabilityMode) - => throw new NotSupportedException(); - - /// - public override NpgsqlTypeHandler CreateRangeHandler(PostgresType pgRangeType) - => throw new NotSupportedException(); - - /// - public override NpgsqlTypeHandler CreateMultirangeHandler(PostgresMultirangeType pgMultirangeType) - => throw new NotSupportedException(); -} - -public class MultirangeHandler : MultirangeHandler, - INpgsqlTypeHandler[]>, INpgsqlTypeHandler>> -{ - /// - public MultirangeHandler(PostgresMultirangeType pgMultirangeType, RangeHandler rangeHandler) - : base(pgMultirangeType, rangeHandler) {} - - ValueTask[]> INpgsqlTypeHandler[]>.Read( - NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => ReadMultirangeArray(buf, len, async, fieldDescription); - - ValueTask>> INpgsqlTypeHandler>>.Read( - NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => ReadMultirangeList(buf, len, async, fieldDescription); - - public int ValidateAndGetLength(List> value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLengthMultirange(value, ref lengthCache, parameter); - - public int ValidateAndGetLength(NpgsqlRange[] value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLengthMultirange(value, ref lengthCache, parameter); - - public Task Write( - List> value, - NpgsqlWriteBuffer buf, - NpgsqlLengthCache? lengthCache, - NpgsqlParameter? parameter, - bool async, - CancellationToken cancellationToken = default) - => WriteMultirange(value, buf, lengthCache, parameter, async, cancellationToken); - - public Task Write( - NpgsqlRange[] value, - NpgsqlWriteBuffer buf, - NpgsqlLengthCache? lengthCache, - NpgsqlParameter? parameter, - bool async, - CancellationToken cancellationToken = default) - => WriteMultirange(value, buf, lengthCache, parameter, async, cancellationToken); - - public override int ValidateObjectAndGetLength(object? value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => value switch - { - NpgsqlRange[] converted => ((INpgsqlTypeHandler[]>)this).ValidateAndGetLength(converted, ref lengthCache, parameter), - NpgsqlRange[] converted => ((INpgsqlTypeHandler[]>)this).ValidateAndGetLength(converted, ref lengthCache, parameter), - List> converted => ((INpgsqlTypeHandler>>)this).ValidateAndGetLength(converted, ref lengthCache, parameter), - List> converted => ((INpgsqlTypeHandler>>)this).ValidateAndGetLength(converted, ref lengthCache, parameter), - - DBNull => 0, - null => 0, - _ => throw new InvalidCastException($"Can't write CLR type {value.GetType()} with handler type RangeHandler") - }; - - public override Task WriteObjectWithLength(object? value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => value switch - { - NpgsqlRange[] converted => ((INpgsqlTypeHandler[]>)this).WriteWithLength(converted, buf, lengthCache, parameter, async, cancellationToken), - NpgsqlRange[] converted => ((INpgsqlTypeHandler[]>)this).WriteWithLength(converted, buf, lengthCache, parameter, async, cancellationToken), - List> converted => ((INpgsqlTypeHandler>>)this).WriteWithLength(converted, buf, lengthCache, parameter, async, cancellationToken), - List> converted => ((INpgsqlTypeHandler>>)this).WriteWithLength(converted, buf, lengthCache, parameter, async, cancellationToken), - - DBNull => WriteWithLength(DBNull.Value, buf, lengthCache, parameter, async, cancellationToken), - null => WriteWithLength(DBNull.Value, buf, lengthCache, parameter, async, cancellationToken), - _ => throw new InvalidCastException($"Can't write CLR type {value.GetType()} with handler type RangeHandler") - }; -} diff --git a/src/Npgsql/Internal/TypeHandlers/NetworkHandlers/CidrHandler.cs b/src/Npgsql/Internal/TypeHandlers/NetworkHandlers/CidrHandler.cs deleted file mode 100644 index 6d5eb29f10..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/NetworkHandlers/CidrHandler.cs +++ /dev/null @@ -1,50 +0,0 @@ -using System.Net; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using NpgsqlTypes; - -#pragma warning disable 618 - -namespace Npgsql.Internal.TypeHandlers.NetworkHandlers; - -/// -/// A type handler for the PostgreSQL cidr data type. -/// -/// -/// See https://www.postgresql.org/docs/current/static/datatype-net-types.html. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public partial class CidrHandler : NpgsqlSimpleTypeHandler<(IPAddress Address, int Subnet)>, INpgsqlSimpleTypeHandler -{ - public CidrHandler(PostgresType pgType) : base(pgType) {} - - /// - public override (IPAddress Address, int Subnet) Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - => InetHandler.DoRead(buf, len, fieldDescription, true); - - NpgsqlInet INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - { - var (address, subnet) = Read(buf, len, fieldDescription); - return new NpgsqlInet(address, subnet); - } - - /// - public override int ValidateAndGetLength((IPAddress Address, int Subnet) value, NpgsqlParameter? parameter) - => InetHandler.GetLength(value.Address); - - /// - public int ValidateAndGetLength(NpgsqlInet value, NpgsqlParameter? parameter) - => InetHandler.GetLength(value.Address); - - /// - public override void Write((IPAddress Address, int Subnet) value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => InetHandler.DoWrite(value.Address, value.Subnet, buf, true); - - /// - public void Write(NpgsqlInet value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => InetHandler.DoWrite(value.Address, value.Netmask, buf, true); -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/NetworkHandlers/InetHandler.cs b/src/Npgsql/Internal/TypeHandlers/NetworkHandlers/InetHandler.cs deleted file mode 100644 index ed10be3ef8..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/NetworkHandlers/InetHandler.cs +++ /dev/null @@ -1,133 +0,0 @@ -using System; -using System.Diagnostics; -using System.Net; -using System.Net.Sockets; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using NpgsqlTypes; - -#pragma warning disable 618 - -namespace Npgsql.Internal.TypeHandlers.NetworkHandlers; - -/// -/// A type handler for the PostgreSQL cidr data type. -/// -/// -/// See https://www.postgresql.org/docs/current/static/datatype-net-types.html. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public partial class InetHandler : NpgsqlSimpleTypeHandler, - INpgsqlSimpleTypeHandler<(IPAddress Address, int Subnet)>, - INpgsqlSimpleTypeHandler -{ - // ReSharper disable InconsistentNaming - const byte IPv4 = 2; - const byte IPv6 = 3; - // ReSharper restore InconsistentNaming - - public InetHandler(PostgresType pgType) : base(pgType) {} - - #region Read - - /// - public override IPAddress Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - => DoRead(buf, len, fieldDescription, false).Address; - -#pragma warning disable CA1801 // Review unused parameters - internal static (IPAddress Address, int Subnet) DoRead( - NpgsqlReadBuffer buf, - int len, - FieldDescription? fieldDescription, - bool isCidrHandler) - { - buf.ReadByte(); // addressFamily - var mask = buf.ReadByte(); - var isCidr = buf.ReadByte() == 1; - Debug.Assert(isCidrHandler == isCidr); - var numBytes = buf.ReadByte(); - var bytes = new byte[numBytes]; - for (var i = 0; i < bytes.Length; i++) - bytes[i] = buf.ReadByte(); - - return (new IPAddress(bytes), mask); - } -#pragma warning restore CA1801 // Review unused parameters - - /// - (IPAddress Address, int Subnet) INpgsqlSimpleTypeHandler<(IPAddress Address, int Subnet)>.Read( - NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => DoRead(buf, len, fieldDescription, false); - - NpgsqlInet INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - { - var (address, subnet) = DoRead(buf, len, fieldDescription, false); - return new NpgsqlInet(address, subnet); - } - - #endregion Read - - #region Write - - /// - public override int ValidateAndGetLength(IPAddress value, NpgsqlParameter? parameter) - => GetLength(value); - - /// - public int ValidateAndGetLength((IPAddress Address, int Subnet) value, NpgsqlParameter? parameter) - => GetLength(value.Address); - - /// - public int ValidateAndGetLength(NpgsqlInet value, NpgsqlParameter? parameter) - => GetLength(value.Address); - - /// - public override void Write(IPAddress value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => DoWrite(value, -1, buf, false); - - /// - public void Write((IPAddress Address, int Subnet) value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => DoWrite(value.Address, value.Subnet, buf, false); - - /// - public void Write(NpgsqlInet value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => DoWrite(value.Address, value.Netmask, buf, false); - - internal static void DoWrite(IPAddress ip, int mask, NpgsqlWriteBuffer buf, bool isCidrHandler) - { - switch (ip.AddressFamily) { - case AddressFamily.InterNetwork: - buf.WriteByte(IPv4); - if (mask == -1) - mask = 32; - break; - case AddressFamily.InterNetworkV6: - buf.WriteByte(IPv6); - if (mask == -1) - mask = 128; - break; - default: - throw new InvalidCastException($"Can't handle IPAddress with AddressFamily {ip.AddressFamily}, only InterNetwork or InterNetworkV6!"); - } - - buf.WriteByte((byte)mask); - buf.WriteByte((byte)(isCidrHandler ? 1 : 0)); // Ignored on server side - var bytes = ip.GetAddressBytes(); - buf.WriteByte((byte)bytes.Length); - buf.WriteBytes(bytes, 0, bytes.Length); - } - - internal static int GetLength(IPAddress value) - => value.AddressFamily switch - { - AddressFamily.InterNetwork => 8, - AddressFamily.InterNetworkV6 => 20, - _ => throw new InvalidCastException($"Can't handle IPAddress with AddressFamily {value.AddressFamily}, only InterNetwork or InterNetworkV6!") - }; - - #endregion Write -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/NetworkHandlers/MacaddrHandler.cs b/src/Npgsql/Internal/TypeHandlers/NetworkHandlers/MacaddrHandler.cs deleted file mode 100644 index 26ade3e22b..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/NetworkHandlers/MacaddrHandler.cs +++ /dev/null @@ -1,52 +0,0 @@ -using System.Diagnostics; -using System.Net.NetworkInformation; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; - -namespace Npgsql.Internal.TypeHandlers.NetworkHandlers; - -/// -/// A type handler for the PostgreSQL macaddr and macaddr8 data types. -/// -/// -/// See https://www.postgresql.org/docs/current/static/datatype-net-types.html. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public partial class MacaddrHandler : NpgsqlSimpleTypeHandler -{ - public MacaddrHandler(PostgresType pgType) : base(pgType) {} - - #region Read - - /// - public override PhysicalAddress Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - { - Debug.Assert(len == 6 || len == 8); - - var bytes = new byte[len]; - - buf.ReadBytes(bytes, 0, len); - return new PhysicalAddress(bytes); - } - - #endregion Read - - #region Write - - /// - public override int ValidateAndGetLength(PhysicalAddress value, NpgsqlParameter? parameter) - => value.GetAddressBytes().Length; - - /// - public override void Write(PhysicalAddress value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - { - var bytes = value.GetAddressBytes(); - buf.WriteBytes(bytes, 0, bytes.Length); - } - - #endregion Write -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/NumericHandlers/DecimalRaw.cs b/src/Npgsql/Internal/TypeHandlers/NumericHandlers/DecimalRaw.cs deleted file mode 100644 index 0115728f33..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/NumericHandlers/DecimalRaw.cs +++ /dev/null @@ -1,150 +0,0 @@ -using System; -using System.Runtime.InteropServices; - -namespace Npgsql.Internal.TypeHandlers.NumericHandlers; - -[StructLayout(LayoutKind.Explicit)] -struct DecimalRaw -{ - const int SignMask = unchecked((int)0x80000000); - const int ScaleMask = 0x00FF0000; - const int ScaleShift = 16; - - // Fast access for 10^n where n is 0-9 - internal static readonly uint[] Powers10 = - { - 1, - 10, - 100, - 1000, - 10000, - 100000, - 1000000, - 10000000, - 100000000, - 1000000000 - }; - - // The maximum power of 10 that a 32 bit unsigned integer can store - internal static readonly int MaxUInt32Scale = Powers10.Length - 1; - - // Do not change the order in which these fields are declared. It - // should be same as in the System.Decimal struct. - [FieldOffset(0)] - decimal _value; - [FieldOffset(0)] - int _flags; - [FieldOffset(4)] - uint _high; - [FieldOffset(8)] - uint _low; - [FieldOffset(12)] - uint _mid; - - public bool Negative => (_flags & SignMask) != 0; - - public int Scale - { - get => (_flags & ScaleMask) >> ScaleShift; - set => _flags = (_flags & SignMask) | ((value << ScaleShift) & ScaleMask); - } - - public uint High => _high; - public uint Mid => _mid; - public uint Low => _low; - public decimal Value => _value; - - public DecimalRaw(decimal value) : this() => _value = value; - - public DecimalRaw(long value) : this() - { - if (value >= 0) - _flags = 0; - else - { - _flags = SignMask; - value = -value; - } - - _low = (uint)value; - _mid = (uint)(value >> 32); - _high = 0; - } - - public static void Negate(ref DecimalRaw value) - => value._flags ^= SignMask; - - public static void Add(ref DecimalRaw value, uint addend) - { - uint integer; - uint sum; - - integer = value._low; - value._low = sum = integer + addend; - - if (sum >= integer && sum >= addend) - return; - - integer = value._mid; - value._mid = sum = integer + 1; - - if (sum >= integer && sum >= 1) - return; - - integer = value._high; - value._high = sum = integer + 1; - - if (sum < integer || sum < 1) - throw new OverflowException("Numeric value does not fit in a System.Decimal"); - } - - public static void Multiply(ref DecimalRaw value, uint multiplier) - { - ulong integer; - uint remainder; - - integer = (ulong)value._low * multiplier; - value._low = (uint)integer; - remainder = (uint)(integer >> 32); - - integer = (ulong)value._mid * multiplier + remainder; - value._mid = (uint)integer; - remainder = (uint)(integer >> 32); - - integer = (ulong)value._high * multiplier + remainder; - value._high = (uint)integer; - remainder = (uint)(integer >> 32); - - if (remainder != 0) - throw new OverflowException("Numeric value does not fit in a System.Decimal"); - } - - public static uint Divide(ref DecimalRaw value, uint divisor) - { - ulong integer; - uint remainder = 0; - - if (value._high != 0) - { - integer = value._high; - value._high = (uint)(integer / divisor); - remainder = (uint)(integer % divisor); - } - - if (value._mid != 0 || remainder != 0) - { - integer = ((ulong)remainder << 32) | value._mid; - value._mid = (uint)(integer / divisor); - remainder = (uint)(integer % divisor); - } - - if (value._low != 0 || remainder != 0) - { - integer = ((ulong)remainder << 32) | value._low; - value._low = (uint)(integer / divisor); - remainder = (uint)(integer % divisor); - } - - return remainder; - } -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/NumericHandlers/DoubleHandler.cs b/src/Npgsql/Internal/TypeHandlers/NumericHandlers/DoubleHandler.cs deleted file mode 100644 index 33b1bae14c..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/NumericHandlers/DoubleHandler.cs +++ /dev/null @@ -1,32 +0,0 @@ -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; - -namespace Npgsql.Internal.TypeHandlers.NumericHandlers; - -/// -/// A type handler for the PostgreSQL double precision data type. -/// -/// -/// See https://www.postgresql.org/docs/current/static/datatype-numeric.html. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public partial class DoubleHandler : NpgsqlSimpleTypeHandler -{ - public DoubleHandler(PostgresType pgType) : base(pgType) {} - - /// - public override double Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - => buf.ReadDouble(); - - /// - public override int ValidateAndGetLength(double value, NpgsqlParameter? parameter) - => 8; - - /// - public override void Write(double value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => buf.WriteDouble(value); -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/NumericHandlers/Int16Handler.cs b/src/Npgsql/Internal/TypeHandlers/NumericHandlers/Int16Handler.cs deleted file mode 100644 index 30c704e574..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/NumericHandlers/Int16Handler.cs +++ /dev/null @@ -1,109 +0,0 @@ -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; - -namespace Npgsql.Internal.TypeHandlers.NumericHandlers; - -/// -/// A type handler for the PostgreSQL smallint data type. -/// -/// -/// See https://www.postgresql.org/docs/current/static/datatype-numeric.html. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public partial class Int16Handler : NpgsqlSimpleTypeHandler, - INpgsqlSimpleTypeHandler, INpgsqlSimpleTypeHandler, INpgsqlSimpleTypeHandler, INpgsqlSimpleTypeHandler, - INpgsqlSimpleTypeHandler, INpgsqlSimpleTypeHandler, INpgsqlSimpleTypeHandler -{ - public Int16Handler(PostgresType pgType) : base(pgType) {} - - #region Read - - /// - public override short Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - => buf.ReadInt16(); - - byte INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => checked((byte)Read(buf, len, fieldDescription)); - - sbyte INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => checked((sbyte)Read(buf, len, fieldDescription)); - - int INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => Read(buf, len, fieldDescription); - - long INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => Read(buf, len, fieldDescription); - - float INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => Read(buf, len, fieldDescription); - - double INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => Read(buf, len, fieldDescription); - - decimal INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => Read(buf, len, fieldDescription); - - #endregion Read - - #region Write - - /// - public override int ValidateAndGetLength(short value, NpgsqlParameter? parameter) => 2; - /// - public int ValidateAndGetLength(byte value, NpgsqlParameter? parameter) => 2; - /// - public int ValidateAndGetLength(sbyte value, NpgsqlParameter? parameter) => 2; - /// - public int ValidateAndGetLength(decimal value, NpgsqlParameter? parameter) => 2; - - /// - public int ValidateAndGetLength(int value, NpgsqlParameter? parameter) - { - _ = checked((short)value); - return 2; - } - - /// - public int ValidateAndGetLength(long value, NpgsqlParameter? parameter) - { - _ = checked((short)value); - return 2; - } - - /// - public int ValidateAndGetLength(float value, NpgsqlParameter? parameter) - { - _ = checked((short)value); - return 2; - } - - /// - public int ValidateAndGetLength(double value, NpgsqlParameter? parameter) - { - _ = checked((short)value); - return 2; - } - - /// - public override void Write(short value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) => buf.WriteInt16(value); - /// - public void Write(int value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) => buf.WriteInt16((short)value); - /// - public void Write(long value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) => buf.WriteInt16((short)value); - /// - public void Write(byte value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) => buf.WriteInt16(value); - /// - public void Write(sbyte value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) => buf.WriteInt16(value); - /// - public void Write(decimal value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) => buf.WriteInt16((short)value); - /// - public void Write(double value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) => buf.WriteInt16((short)value); - /// - public void Write(float value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) => buf.WriteInt16((short)value); - - #endregion Write -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/NumericHandlers/Int32Handler.cs b/src/Npgsql/Internal/TypeHandlers/NumericHandlers/Int32Handler.cs deleted file mode 100644 index 3b778d9a70..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/NumericHandlers/Int32Handler.cs +++ /dev/null @@ -1,96 +0,0 @@ -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; - -namespace Npgsql.Internal.TypeHandlers.NumericHandlers; - -/// -/// A type handler for the PostgreSQL integer data type. -/// -/// -/// See https://www.postgresql.org/docs/current/static/datatype-numeric.html. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public partial class Int32Handler : NpgsqlSimpleTypeHandler, INpgsqlSimpleTypeHandler, - INpgsqlSimpleTypeHandler, INpgsqlSimpleTypeHandler, INpgsqlSimpleTypeHandler, - INpgsqlSimpleTypeHandler, INpgsqlSimpleTypeHandler, INpgsqlSimpleTypeHandler -{ - public Int32Handler(PostgresType pgType) : base(pgType) {} - - #region Read - - public override int Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - => buf.ReadInt32(); - - byte INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => checked((byte)Read(buf, len, fieldDescription)); - - short INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => checked((short)Read(buf, len, fieldDescription)); - - long INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => Read(buf, len, fieldDescription); - - float INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => Read(buf, len, fieldDescription); - - double INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => Read(buf, len, fieldDescription); - - decimal INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => Read(buf, len, fieldDescription); - - #endregion Read - - #region Write - - /// - public override int ValidateAndGetLength(int value, NpgsqlParameter? parameter) => 4; - /// - public int ValidateAndGetLength(short value, NpgsqlParameter? parameter) => 4; - /// - public int ValidateAndGetLength(byte value, NpgsqlParameter? parameter) => 4; - /// - public int ValidateAndGetLength(decimal value, NpgsqlParameter? parameter) => 4; - - /// - public int ValidateAndGetLength(long value, NpgsqlParameter? parameter) - { - _ = checked((int)value); - return 4; - } - - /// - public int ValidateAndGetLength(float value, NpgsqlParameter? parameter) - { - _ = checked((int)value); - return 4; - } - - /// - public int ValidateAndGetLength(double value, NpgsqlParameter? parameter) - { - _ = checked((int)value); - return 4; - } - - /// - public override void Write(int value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) => buf.WriteInt32(value); - /// - public void Write(short value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) => buf.WriteInt32(value); - /// - public void Write(long value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) => buf.WriteInt32((int)value); - /// - public void Write(byte value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) => buf.WriteInt32(value); - /// - public void Write(float value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) => buf.WriteInt32((int)value); - /// - public void Write(double value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) => buf.WriteInt32((int)value); - /// - public void Write(decimal value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) => buf.WriteInt32((int)value); - - #endregion Write -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/NumericHandlers/Int64Handler.cs b/src/Npgsql/Internal/TypeHandlers/NumericHandlers/Int64Handler.cs deleted file mode 100644 index 7a39de1856..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/NumericHandlers/Int64Handler.cs +++ /dev/null @@ -1,92 +0,0 @@ -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; - -namespace Npgsql.Internal.TypeHandlers.NumericHandlers; - -/// -/// A type handler for the PostgreSQL bigint data type. -/// -/// -/// See https://www.postgresql.org/docs/current/static/datatype-numeric.html. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public partial class Int64Handler : NpgsqlSimpleTypeHandler, - INpgsqlSimpleTypeHandler, INpgsqlSimpleTypeHandler, INpgsqlSimpleTypeHandler, - INpgsqlSimpleTypeHandler, INpgsqlSimpleTypeHandler, INpgsqlSimpleTypeHandler -{ - public Int64Handler(PostgresType pgType) : base(pgType) {} - - #region Read - - /// - public override long Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - => buf.ReadInt64(); - - byte INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => checked((byte)Read(buf, len, fieldDescription)); - - short INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => checked((short)Read(buf, len, fieldDescription)); - - int INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => checked((int)Read(buf, len, fieldDescription)); - - float INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => Read(buf, len, fieldDescription); - - double INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => Read(buf, len, fieldDescription); - - decimal INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => Read(buf, len, fieldDescription); - - #endregion Read - - #region Write - - /// - public override int ValidateAndGetLength(long value, NpgsqlParameter? parameter) => 8; - /// - public int ValidateAndGetLength(int value, NpgsqlParameter? parameter) => 8; - /// - public int ValidateAndGetLength(short value, NpgsqlParameter? parameter) => 8; - /// - public int ValidateAndGetLength(byte value, NpgsqlParameter? parameter) => 8; - /// - public int ValidateAndGetLength(decimal value, NpgsqlParameter? parameter) => 8; - - /// - public int ValidateAndGetLength(float value, NpgsqlParameter? parameter) - { - _ = checked((long)value); - return 8; - } - - /// - public int ValidateAndGetLength(double value, NpgsqlParameter? parameter) - { - _ = checked((long)value); - return 8; - } - - /// - public override void Write(long value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) => buf.WriteInt64(value); - /// - public void Write(short value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) => buf.WriteInt64(value); - /// - public void Write(int value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) => buf.WriteInt64(value); - /// - public void Write(byte value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) => buf.WriteInt64(value); - /// - public void Write(float value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) => buf.WriteInt64((long)value); - /// - public void Write(double value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) => buf.WriteInt64((long)value); - /// - public void Write(decimal value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) => buf.WriteInt64((long)value); - - #endregion Write -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/NumericHandlers/MoneyHandler.cs b/src/Npgsql/Internal/TypeHandlers/NumericHandlers/MoneyHandler.cs deleted file mode 100644 index ebab3d3fb9..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/NumericHandlers/MoneyHandler.cs +++ /dev/null @@ -1,52 +0,0 @@ -using System; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; - -namespace Npgsql.Internal.TypeHandlers.NumericHandlers; - -/// -/// A type handler for the PostgreSQL money data type. -/// -/// -/// See https://www.postgresql.org/docs/current/static/datatype-money.html. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public partial class MoneyHandler : NpgsqlSimpleTypeHandler -{ - public MoneyHandler(PostgresType pgType) : base(pgType) {} - - const int MoneyScale = 2; - - /// - public override decimal Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - => new DecimalRaw(buf.ReadInt64()) { Scale = MoneyScale }.Value; - - /// - public override int ValidateAndGetLength(decimal value, NpgsqlParameter? parameter) - => value < -92233720368547758.08M || value > 92233720368547758.07M - ? throw new OverflowException($"The supplied value ({value}) is outside the range for a PostgreSQL money value.") - : 8; - - /// - public override void Write(decimal value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - { - var raw = new DecimalRaw(value); - - var scaleDifference = MoneyScale - raw.Scale; - if (scaleDifference > 0) - DecimalRaw.Multiply(ref raw, DecimalRaw.Powers10[scaleDifference]); - else - { - value = Math.Round(value, MoneyScale, MidpointRounding.AwayFromZero); - raw = new DecimalRaw(value); - } - - var result = (long)raw.Mid << 32 | raw.Low; - if (raw.Negative) result = -result; - buf.WriteInt64(result); - } -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/NumericHandlers/NumericHandler.cs b/src/Npgsql/Internal/TypeHandlers/NumericHandlers/NumericHandler.cs deleted file mode 100644 index 1e624f86f7..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/NumericHandlers/NumericHandler.cs +++ /dev/null @@ -1,434 +0,0 @@ -using System; -using System.Globalization; -using System.Numerics; -using System.Threading; -using System.Threading.Tasks; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; - -namespace Npgsql.Internal.TypeHandlers.NumericHandlers; - -/// -/// A type handler for the PostgreSQL numeric data type. -/// -/// -/// See https://www.postgresql.org/docs/current/static/datatype-numeric.html. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public partial class NumericHandler : NpgsqlTypeHandler, - INpgsqlTypeHandler, INpgsqlTypeHandler, INpgsqlTypeHandler, INpgsqlTypeHandler, - INpgsqlTypeHandler, INpgsqlTypeHandler, INpgsqlTypeHandler -{ - public NumericHandler(PostgresType pgType) : base(pgType) {} - - const int MaxDecimalScale = 28; - - const int SignPositive = 0x0000; - const int SignNegative = 0x4000; - const int SignNan = 0xC000; - const int SignPinf = 0xD000; - const int SignNinf = 0xF000; - const int SignSpecialMask = 0xC000; - - const int MaxGroupCount = 8; - const int MaxGroupScale = 4; - - static readonly uint MaxGroupSize = DecimalRaw.Powers10[MaxGroupScale]; - - #region Read - - /// - public override async ValueTask Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - { - await buf.Ensure(4 * sizeof(short), async); - var result = new DecimalRaw(); - var groups = buf.ReadInt16(); - var weight = buf.ReadInt16() - groups + 1; - var sign = buf.ReadUInt16(); - - if ((sign & SignSpecialMask) == SignSpecialMask) - { - throw sign switch - { - SignNan => new InvalidCastException("Numeric NaN not supported by System.Decimal"), - SignPinf => new InvalidCastException("Numeric Infinity not supported by System.Decimal"), - SignNinf => new InvalidCastException("Numeric -Infinity not supported by System.Decimal"), - _ => new InvalidCastException($"Numeric special value {sign} not supported by System.Decimal") - }; - } - - if (sign == SignNegative) - DecimalRaw.Negate(ref result); - - var scale = buf.ReadInt16(); - if (scale < 0 is var exponential && exponential) - scale = (short)(-scale); - else - result.Scale = scale; - - if (scale > MaxDecimalScale) - throw new OverflowException("Numeric value does not fit in a System.Decimal"); - - var scaleDifference = exponential - ? weight * MaxGroupScale - : weight * MaxGroupScale + scale; - - if (groups > MaxGroupCount) - throw new OverflowException("Numeric value does not fit in a System.Decimal"); - - await buf.Ensure(groups * sizeof(ushort), async); - - if (groups == MaxGroupCount) - { - while (groups-- > 1) - { - DecimalRaw.Multiply(ref result, MaxGroupSize); - DecimalRaw.Add(ref result, buf.ReadUInt16()); - } - - var group = buf.ReadUInt16(); - var groupSize = DecimalRaw.Powers10[-scaleDifference]; - if (group % groupSize != 0) - throw new OverflowException("Numeric value does not fit in a System.Decimal"); - - DecimalRaw.Multiply(ref result, MaxGroupSize / groupSize); - DecimalRaw.Add(ref result, group / groupSize); - } - else - { - while (groups-- > 0) - { - DecimalRaw.Multiply(ref result, MaxGroupSize); - DecimalRaw.Add(ref result, buf.ReadUInt16()); - } - - if (scaleDifference < 0) - DecimalRaw.Divide(ref result, DecimalRaw.Powers10[-scaleDifference]); - else - while (scaleDifference > 0) - { - var scaleChunk = Math.Min(DecimalRaw.MaxUInt32Scale, scaleDifference); - DecimalRaw.Multiply(ref result, DecimalRaw.Powers10[scaleChunk]); - scaleDifference -= scaleChunk; - } - } - - return result.Value; - } - - async ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => (byte)await Read(buf, len, async, fieldDescription); - - async ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => (short)await Read(buf, len, async, fieldDescription); - - async ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => (int)await Read(buf, len, async, fieldDescription); - - async ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => (long)await Read(buf, len, async, fieldDescription); - - async ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => (float)await Read(buf, len, async, fieldDescription); - - async ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => (double)await Read(buf, len, async, fieldDescription); - - async ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - { - await buf.Ensure(4 * sizeof(short), async); - - var groups = (int)buf.ReadUInt16(); - var weightLeft = (int)buf.ReadInt16(); - var weightRight = weightLeft - groups + 1; - var sign = buf.ReadUInt16(); - buf.ReadInt16(); // dscale - - if (groups == 0) - { - return sign switch - { - SignPositive or SignNegative => BigInteger.Zero, - SignNan => throw new InvalidCastException("Numeric NaN not supported by BigInteger"), - SignPinf => throw new InvalidCastException("Numeric Infinity not supported by BigInteger"), - SignNinf => throw new InvalidCastException("Numeric -Infinity not supported by BigInteger"), - _ => throw new InvalidCastException($"Numeric special value {sign} not supported") - }; - } - - if (weightRight < 0) - { - await buf.Skip(groups * sizeof(ushort), async); - throw new InvalidCastException("Numeric value with non-zero fractional digits not supported by BigInteger"); - } - - var digits = new ushort[groups]; - - for (var i = 0; i < groups; i++) - { - await buf.Ensure(sizeof(ushort), async); - digits[i] = buf.ReadUInt16(); - } - - // Calculate powers 10^8, 10^16, 10^32, ... - // We should have the last calculated power to be less than the input - var lenPow = 2; // 2 ushorts fit in one uint, represents 10^8 - var numPowers = 0; - while (lenPow < weightLeft + 1) - { - lenPow <<= 1; - ++numPowers; - } - var factors = numPowers > 0 ? new BigInteger[numPowers] : null; - if (numPowers > 0) - { - factors![0] = new BigInteger(100000000U); - for (var i = 1; i < numPowers; i++) - factors[i] = factors[i - 1] * factors[i - 1]; - } - - var result = ToBigIntegerInner(0, weightLeft + 1, digits, factors); - return sign == SignPositive ? result : -result; - - static BigInteger ToBigIntegerInner(int offset, int length, ushort[] digits, BigInteger[]? factors) - { - if (length <= 2) - { - var r = 0U; - for (var i = offset; i < offset + length; i++) - { - r *= 10000U; - r += i < digits.Length ? digits[i] : 0U; - } - return r; - } - else - { - // Split the input into two halves, the lower one should be a power of two in digit length, - // then multiply the higher part with a precomputed power of 10^8 and add the results. - var lenFirstHalf = 2 << 1; // 2 ushorts fit in one uint, skip 1 since we've already covered the base case. - var pos = 0; - while (lenFirstHalf < length) - { - lenFirstHalf <<= 1; - ++pos; - } - var factor = factors![pos]; - lenFirstHalf >>= 1; - var lo = ToBigIntegerInner(offset + length - lenFirstHalf, lenFirstHalf, digits, factors); - var hi = ToBigIntegerInner(offset, length - lenFirstHalf, digits, factors); - return hi * factor + lo; // .NET uses Karatsuba multiplication, so this will be fast. - } - } - } - - #endregion - - #region Write - - /// - public override int ValidateAndGetLength(decimal value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - { - lengthCache ??= new NpgsqlLengthCache(1); - if (lengthCache.IsPopulated) - return lengthCache.Get(); - - var groupCount = 0; - var raw = new DecimalRaw(value); - if (raw.Low != 0 || raw.Mid != 0 || raw.High != 0) - { - uint remainder = default; - var scaleChunk = raw.Scale % MaxGroupScale; - if (scaleChunk > 0) - { - var divisor = DecimalRaw.Powers10[scaleChunk]; - var multiplier = DecimalRaw.Powers10[MaxGroupScale - scaleChunk]; - remainder = DecimalRaw.Divide(ref raw, divisor) * multiplier; - } - - while (remainder == 0) - remainder = DecimalRaw.Divide(ref raw, MaxGroupSize); - - groupCount++; - - while (raw.Low != 0 || raw.Mid != 0 || raw.High != 0) - { - DecimalRaw.Divide(ref raw, MaxGroupSize); - groupCount++; - } - } - - return lengthCache.Set((4 + groupCount) * sizeof(short)); - } - - /// - public int ValidateAndGetLength(short value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLength((decimal)value, ref lengthCache, parameter); - /// - public int ValidateAndGetLength(int value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLength((decimal)value, ref lengthCache, parameter); - /// - public int ValidateAndGetLength(long value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLength((decimal)value, ref lengthCache, parameter); - /// - public int ValidateAndGetLength(float value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLength((decimal)value, ref lengthCache, parameter); - /// - public int ValidateAndGetLength(double value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLength((decimal)value, ref lengthCache, parameter); - /// - public int ValidateAndGetLength(byte value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLength((decimal)value, ref lengthCache, parameter); - - public override async Task Write(decimal value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - if (buf.WriteSpaceLeft < (4 + MaxGroupCount) * sizeof(short)) - await buf.Flush(async, cancellationToken); - - WriteInner(new DecimalRaw(value), buf); - - static void WriteInner(DecimalRaw raw, NpgsqlWriteBuffer buf) - { - var weight = 0; - var groupCount = 0; - Span groups = stackalloc short[MaxGroupCount]; - groups.Fill(0); // SkipLocalsInit - - if (raw.Low != 0 || raw.Mid != 0 || raw.High != 0) - { - var scale = raw.Scale; - weight = -scale / MaxGroupScale - 1; - - uint remainder; - var scaleChunk = scale % MaxGroupScale; - if (scaleChunk > 0) - { - var divisor = DecimalRaw.Powers10[scaleChunk]; - var multiplier = DecimalRaw.Powers10[MaxGroupScale - scaleChunk]; - remainder = DecimalRaw.Divide(ref raw, divisor) * multiplier; - - if (remainder != 0) - { - weight--; - goto WriteGroups; - } - } - - while ((remainder = DecimalRaw.Divide(ref raw, MaxGroupSize)) == 0) - weight++; - - WriteGroups: - groups[groupCount++] = (short)remainder; - - while (raw.Low != 0 || raw.Mid != 0 || raw.High != 0) - groups[groupCount++] = (short)DecimalRaw.Divide(ref raw, MaxGroupSize); - } - - buf.WriteInt16(groupCount); - buf.WriteInt16(groupCount + weight); - buf.WriteInt16(raw.Negative ? SignNegative : SignPositive); - buf.WriteInt16(raw.Scale); - - while (groupCount > 0) - buf.WriteInt16(groups[--groupCount]); - } - } - - /// - public Task Write(short value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => Write((decimal)value, buf, lengthCache, parameter, async, cancellationToken); - /// - public Task Write(int value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => Write((decimal)value, buf, lengthCache, parameter, async, cancellationToken); - /// - public Task Write(long value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => Write((decimal)value, buf, lengthCache, parameter, async, cancellationToken); - /// - public Task Write(byte value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => Write((decimal)value, buf, lengthCache, parameter, async, cancellationToken); - /// - public Task Write(float value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => Write((decimal)value, buf, lengthCache, parameter, async, cancellationToken); - /// - public Task Write(double value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => Write((decimal)value, buf, lengthCache, parameter, async, cancellationToken); - - static ushort[] FromBigInteger(BigInteger value) - { - var str = value.ToString(CultureInfo.InvariantCulture); - if (str == "0") - return new ushort[4]; - - var negative = str[0] == '-'; - var strLen = str.Length; - var numGroups = (strLen - (negative ? 1 : 0) + 3) / 4; - - if (numGroups > 131072 / 4) - throw new InvalidCastException("Cannot write a BigInteger with more than 131072 digits"); - - var result = new ushort[4 + numGroups]; - - var strPos = strLen - numGroups * 4; - - var firstDigit = 0; - for (var i = 0; i < 4; i++) - { - if (strPos >= 0 && str[strPos] != '-') - firstDigit = firstDigit * 10 + (str[strPos] - '0'); - strPos++; - } - - result[4] = (ushort)firstDigit; - - for (var i = 1; i < numGroups; i++) - { - result[4 + i] = (ushort)((((str[strPos++] - '0') * 10 + (str[strPos++] - '0')) * 10 + (str[strPos++] - '0')) * 10 + - (str[strPos++] - '0')); - - } - - var lastNonZeroDigitPos = numGroups - 1; - while (result[4 + lastNonZeroDigitPos] == 0) - lastNonZeroDigitPos--; - - result[0] = (ushort)(lastNonZeroDigitPos + 1); // number of items in array - result[1] = (ushort)(numGroups - 1); // weight - result[2] = (ushort)(negative ? SignNegative : SignPositive); - result[3] = 0; // dscale - - return result; - } - - public int ValidateAndGetLength(BigInteger value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - { - lengthCache ??= new NpgsqlLengthCache(1); - if (lengthCache.IsPopulated) - return lengthCache.Get(); - - var result = FromBigInteger(value); - if (parameter != null) - parameter.ConvertedValue = result; - - return lengthCache.Set((4 + result[0]) * sizeof(ushort)); - } - - public async Task Write(BigInteger value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, - CancellationToken cancellationToken = default) - { - var result = (ushort[])(parameter?.ConvertedValue ?? FromBigInteger(value))!; - var len = 4 + result[0]; - var pos = 0; - while (len-- > 0) - { - if (buf.WriteSpaceLeft < sizeof(ushort)) - await buf.Flush(async, cancellationToken); - buf.WriteUInt16(result[pos++]); - } - } - - #endregion -} diff --git a/src/Npgsql/Internal/TypeHandlers/NumericHandlers/SingleHandler.cs b/src/Npgsql/Internal/TypeHandlers/NumericHandlers/SingleHandler.cs deleted file mode 100644 index 09554db1e9..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/NumericHandlers/SingleHandler.cs +++ /dev/null @@ -1,45 +0,0 @@ -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; - -namespace Npgsql.Internal.TypeHandlers.NumericHandlers; - -/// -/// A type handler for the PostgreSQL real data type. -/// -/// -/// See https://www.postgresql.org/docs/current/static/datatype-numeric.html. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public partial class SingleHandler : NpgsqlSimpleTypeHandler, INpgsqlSimpleTypeHandler -{ - public SingleHandler(PostgresType pgType) : base(pgType) {} - - #region Read - - /// - public override float Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - => buf.ReadSingle(); - - double INpgsqlSimpleTypeHandler.Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription) - => Read(buf, len, fieldDescription); - - #endregion Read - - #region Write - - /// - public int ValidateAndGetLength(double value, NpgsqlParameter? parameter) => 4; - /// - public override int ValidateAndGetLength(float value, NpgsqlParameter? parameter) => 4; - - /// - public void Write(double value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) => buf.WriteSingle((float)value); - /// - public override void Write(float value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) => buf.WriteSingle(value); - - #endregion Write -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/NumericHandlers/UInt32Handler.cs b/src/Npgsql/Internal/TypeHandlers/NumericHandlers/UInt32Handler.cs deleted file mode 100644 index 1ea4633289..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/NumericHandlers/UInt32Handler.cs +++ /dev/null @@ -1,31 +0,0 @@ -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; - -namespace Npgsql.Internal.TypeHandlers.NumericHandlers; - -/// -/// A type handler for PostgreSQL unsigned 32-bit data types. This is only used for internal types. -/// -/// -/// See https://www.postgresql.org/docs/current/static/datatype-oid.html. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public partial class UInt32Handler : NpgsqlSimpleTypeHandler -{ - public UInt32Handler(PostgresType pgType) : base(pgType) {} - - /// - public override uint Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - => buf.ReadUInt32(); - - /// - public override int ValidateAndGetLength(uint value, NpgsqlParameter? parameter) => 4; - - /// - public override void Write(uint value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => buf.WriteUInt32(value); -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/NumericHandlers/UInt64Handler.cs b/src/Npgsql/Internal/TypeHandlers/NumericHandlers/UInt64Handler.cs deleted file mode 100644 index db6d00d1db..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/NumericHandlers/UInt64Handler.cs +++ /dev/null @@ -1,29 +0,0 @@ -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; - -namespace Npgsql.Internal.TypeHandlers.NumericHandlers; - -/// -/// A type handler for PostgreSQL unsigned 64-bit data types. This is only used for internal types. -/// -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public partial class UInt64Handler : NpgsqlSimpleTypeHandler -{ - public UInt64Handler(PostgresType pgType) : base(pgType) {} - - /// - public override ulong Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - => buf.ReadUInt64(); - - /// - public override int ValidateAndGetLength(ulong value, NpgsqlParameter? parameter) => 8; - - /// - public override void Write(ulong value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => buf.WriteUInt64(value); -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/RangeHandler.cs b/src/Npgsql/Internal/TypeHandlers/RangeHandler.cs deleted file mode 100644 index 0c108696e1..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/RangeHandler.cs +++ /dev/null @@ -1,187 +0,0 @@ -using System; -using System.Diagnostics.CodeAnalysis; -using System.Threading; -using System.Threading.Tasks; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using NpgsqlTypes; - -namespace Npgsql.Internal.TypeHandlers; - -/// -/// A type handler for PostgreSQL range types. -/// -/// -/// See https://www.postgresql.org/docs/current/static/rangetypes.html. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -/// The range subtype. -// NOTE: This cannot inherit from NpgsqlTypeHandler>, since that triggers infinite generic recursion in Native AOT -public partial class RangeHandler : NpgsqlTypeHandler, INpgsqlTypeHandler> -{ - /// - /// The type handler for the subtype that this range type holds - /// - protected NpgsqlTypeHandler SubtypeHandler { get; } - - /// - public RangeHandler(PostgresType rangePostgresType, NpgsqlTypeHandler subtypeHandler) - : base(rangePostgresType) - => SubtypeHandler = subtypeHandler; - - public override Type GetFieldType(FieldDescription? fieldDescription = null) => typeof(NpgsqlRange); - - /// - public override NpgsqlTypeHandler CreateArrayHandler(PostgresArrayType pgArrayType, ArrayNullabilityMode arrayNullabilityMode) - => new ArrayHandler(pgArrayType, this, arrayNullabilityMode); - - /// - public override NpgsqlTypeHandler CreateRangeHandler(PostgresType pgRangeType) - => throw new NotSupportedException(); - - /// - public override NpgsqlTypeHandler CreateMultirangeHandler(PostgresMultirangeType pgMultirangeType) - => throw new NotSupportedException(); - - #region Read - - /// - public ValueTask> Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - => ReadRange(buf, len, async, fieldDescription); - - protected internal async ValueTask> ReadRange(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - { - await buf.Ensure(1, async); - - var flags = (RangeFlags)buf.ReadByte(); - if ((flags & RangeFlags.Empty) != 0) - return NpgsqlRange.Empty; - - var lowerBound = default(TAnySubtype); - var upperBound = default(TAnySubtype); - - if ((flags & RangeFlags.LowerBoundInfinite) == 0) - lowerBound = await SubtypeHandler.ReadWithLength(buf, async); - - if ((flags & RangeFlags.UpperBoundInfinite) == 0) - upperBound = await SubtypeHandler.ReadWithLength(buf, async); - - return new NpgsqlRange(lowerBound, upperBound, flags); - } - - public override async ValueTask ReadAsObject(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - => await Read(buf, len, async, fieldDescription); - - #endregion - - #region Write - - /// - public int ValidateAndGetLength(NpgsqlRange value, [NotNullIfNotNull("lengthCache")] ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLengthRange(value, ref lengthCache, parameter); - - protected internal int ValidateAndGetLengthRange(NpgsqlRange value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - { - var totalLen = 1; - var lengthCachePos = lengthCache?.Position ?? 0; - if (!value.IsEmpty) - { - if (!value.LowerBoundInfinite) - { - totalLen += 4; - if (value.LowerBound is not null) - totalLen += SubtypeHandler.ValidateAndGetLength(value.LowerBound, ref lengthCache, null); - } - - if (!value.UpperBoundInfinite) - { - totalLen += 4; - if (value.UpperBound is not null) - totalLen += SubtypeHandler.ValidateAndGetLength(value.UpperBound, ref lengthCache, null); - } - } - - // If we're traversing an already-populated length cache, rewind to first element slot so that - // the elements' handlers can access their length cache values - if (lengthCache != null && lengthCache.IsPopulated) - lengthCache.Position = lengthCachePos; - - return totalLen; - } - - /// - public Task Write(NpgsqlRange value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => WriteRange(value, buf, lengthCache, parameter, async, cancellationToken); - - protected internal async Task WriteRange(NpgsqlRange value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - if (buf.WriteSpaceLeft < 1) - await buf.Flush(async, cancellationToken); - - buf.WriteByte((byte)value.Flags); - - if (value.IsEmpty) - return; - - if (!value.LowerBoundInfinite) - await SubtypeHandler.WriteWithLength(value.LowerBound, buf, lengthCache, null, async, cancellationToken); - - if (!value.UpperBoundInfinite) - await SubtypeHandler.WriteWithLength(value.UpperBound, buf, lengthCache, null, async, cancellationToken); - } - - #endregion -} - -/// -/// Type handler for PostgreSQL range types. -/// -/// -/// Introduced in PostgreSQL 9.2. -/// https://www.postgresql.org/docs/current/static/rangetypes.html -/// -/// The main range subtype. -/// An alternative range subtype. -public class RangeHandler : RangeHandler, INpgsqlTypeHandler> -{ - /// - public RangeHandler(PostgresType rangePostgresType, NpgsqlTypeHandler subtypeHandler) - : base(rangePostgresType, subtypeHandler) {} - - ValueTask> INpgsqlTypeHandler>.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => ReadRange(buf, len, async, fieldDescription); - - /// - public int ValidateAndGetLength(NpgsqlRange value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLengthRange(value, ref lengthCache, parameter); - - /// - public Task Write(NpgsqlRange value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => WriteRange(value, buf, lengthCache, parameter, async, cancellationToken); - - public override int ValidateObjectAndGetLength(object? value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => value switch - { - NpgsqlRange converted => ValidateAndGetLength(converted, ref lengthCache, parameter), - NpgsqlRange converted => ValidateAndGetLength(converted, ref lengthCache, parameter), - - DBNull => 0, - null => 0, - _ => throw new InvalidCastException($"Can't write CLR type {value.GetType()} with handler type RangeHandler") - }; - - public override Task WriteObjectWithLength(object? value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => value switch - { - NpgsqlRange converted => ((INpgsqlTypeHandler>)this).WriteWithLength(converted, buf, lengthCache, parameter, async, cancellationToken), - NpgsqlRange converted => ((INpgsqlTypeHandler>)this).WriteWithLength(converted, buf, lengthCache, parameter, async, cancellationToken), - - DBNull => WriteWithLength(DBNull.Value, buf, lengthCache, parameter, async, cancellationToken), - null => WriteWithLength(DBNull.Value, buf, lengthCache, parameter, async, cancellationToken), - _ => throw new InvalidCastException($"Can't write CLR type {value.GetType()} with handler type RangeHandler") - }; -} diff --git a/src/Npgsql/Internal/TypeHandlers/RecordHandler.cs b/src/Npgsql/Internal/TypeHandlers/RecordHandler.cs deleted file mode 100644 index 9a255e02d3..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/RecordHandler.cs +++ /dev/null @@ -1,104 +0,0 @@ -using System; -using System.Linq; -using System.Threading; -using System.Threading.Tasks; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.Internal.TypeMapping; -using Npgsql.PostgresTypes; - -namespace Npgsql.Internal.TypeHandlers; - -/// -/// Type handler for PostgreSQL record types. Defaults to returning object[], but can also return or . -/// -/// -/// https://www.postgresql.org/docs/current/static/datatype-pseudo.html -/// -/// Encoding (identical to composite): -/// A 32-bit integer with the number of columns, then for each column: -/// * An OID indicating the type of the column -/// * The length of the column(32-bit integer), or -1 if null -/// * The column data encoded as binary -/// -sealed partial class RecordHandler : NpgsqlTypeHandler -{ - readonly TypeMapper _typeMapper; - - public RecordHandler(PostgresType postgresType, TypeMapper typeMapper) - : base(postgresType) - => _typeMapper = typeMapper; - - #region Read - - protected internal override async ValueTask ReadCustom( - NpgsqlReadBuffer buf, - int len, - bool async, - FieldDescription? fieldDescription) - { - if (typeof(T) == typeof(object[])) - return (T)(object)await Read(buf, len, async, fieldDescription); - - if (typeof(T).FullName?.StartsWith("System.ValueTuple`", StringComparison.Ordinal) == true || - typeof(T).FullName?.StartsWith("System.Tuple`", StringComparison.Ordinal) == true) - { - var asArray = await Read(buf, len, async, fieldDescription); - if (typeof(T).GenericTypeArguments.Length != asArray.Length) - throw new InvalidCastException($"Cannot read record type with {asArray.Length} fields as {typeof(T)}"); - - var constructor = typeof(T).GetConstructors().Single(c => c.GetParameters().Length == asArray.Length); - return (T)constructor.Invoke(asArray); - } - - return await base.ReadCustom(buf, len, async, fieldDescription); - } - - public override async ValueTask ReadAsObject(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - => await Read(buf, len, async, fieldDescription); - - public override async ValueTask Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - { - await buf.Ensure(4, async); - var fieldCount = buf.ReadInt32(); - var result = new object[fieldCount]; - - for (var i = 0; i < fieldCount; i++) - { - await buf.Ensure(8, async); - var typeOID = buf.ReadUInt32(); - var fieldLen = buf.ReadInt32(); - if (fieldLen == -1) // Null field, simply skip it and leave at default - continue; - result[i] = await _typeMapper.ResolveByOID(typeOID).ReadAsObject(buf, fieldLen, async); - } - - return result; - } - - /// - public override NpgsqlTypeHandler CreateRangeHandler(PostgresType pgRangeType) - => throw new NotSupportedException(); - - /// - public override NpgsqlTypeHandler CreateMultirangeHandler(PostgresMultirangeType pgMultirangeType) - => throw new NotSupportedException(); - - #endregion - - #region Write (unsupported) - - public override int ValidateAndGetLength(object[] value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => throw new NotSupportedException("Can't write record types"); - - public override Task Write( - object[] value, - NpgsqlWriteBuffer buf, - NpgsqlLengthCache? lengthCache, - NpgsqlParameter? parameter, - bool async, - CancellationToken cancellationToken = default) - => throw new NotSupportedException("Can't write record types"); - - #endregion -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/SystemTextJsonHandler.cs b/src/Npgsql/Internal/TypeHandlers/SystemTextJsonHandler.cs deleted file mode 100644 index 5ba3f03b3e..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/SystemTextJsonHandler.cs +++ /dev/null @@ -1,209 +0,0 @@ -using System; -using System.Diagnostics.CodeAnalysis; -using System.IO; -using System.Text; -using System.Text.Json; -using System.Text.Json.Nodes; -using System.Threading; -using System.Threading.Tasks; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; - -namespace Npgsql.Internal.TypeHandlers; - -/// -/// A type handler for the PostgreSQL json and jsonb data type which uses System.Text.Json. -/// -/// -/// See https://www.postgresql.org/docs/current/datatype-json.html. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public class SystemTextJsonHandler : JsonTextHandler -{ - readonly JsonSerializerOptions _serializerOptions; - readonly bool _isJsonb; - readonly int _headerLen; - - /// - /// Prepended to the string in the wire encoding - /// - const byte JsonbProtocolVersion = 1; - - static readonly JsonSerializerOptions DefaultSerializerOptions = new(); - - /// - public SystemTextJsonHandler(PostgresType postgresType, Encoding encoding, bool isJsonb, JsonSerializerOptions? serializerOptions = null) - : base(postgresType, encoding, isJsonb) - { - _serializerOptions = serializerOptions ?? DefaultSerializerOptions; - _isJsonb = isJsonb; - _headerLen = isJsonb ? 1 : 0; - } - - /// - protected internal override int ValidateAndGetLengthCustom([DisallowNull] TAny value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - { - if (IsSupportedAsText()) - return base.ValidateAndGetLengthCustom(value, ref lengthCache, parameter); - - if (typeof(TAny) == typeof(JsonDocument)) - { - lengthCache ??= new NpgsqlLengthCache(1); - if (lengthCache.IsPopulated) - return lengthCache.Get(); - - var data = SerializeJsonDocument((JsonDocument)(object)value); - if (parameter != null) - parameter.ConvertedValue = data; - return lengthCache.Set(data.Length + _headerLen); - } - - if (typeof(TAny) == typeof(JsonObject) || typeof(TAny) == typeof(JsonArray)) - { - lengthCache ??= new NpgsqlLengthCache(1); - if (lengthCache.IsPopulated) - return lengthCache.Get(); - - var data = SerializeJsonObject((JsonNode)(object)value); - if (parameter != null) - parameter.ConvertedValue = data; - return lengthCache.Set(data.Length + _headerLen); - } - - // User POCO, need to serialize. At least internally ArrayPool buffers are used... - var s = JsonSerializer.Serialize(value, _serializerOptions); - if (parameter != null) - parameter.ConvertedValue = s; - - return TextHandler.ValidateAndGetLength(s, ref lengthCache, parameter) + _headerLen; - } - - /// - protected override async Task WriteWithLengthCustom([DisallowNull] TAny value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken) - { - if (IsSupportedAsText()) - { - await base.WriteWithLengthCustom(value, buf, lengthCache, parameter, async, cancellationToken); - return; - } - - var spaceRequired = _isJsonb ? 5 : 4; - - if (buf.WriteSpaceLeft < spaceRequired) - await buf.Flush(async, cancellationToken); - - buf.WriteInt32(ValidateAndGetLength(value, ref lengthCache, parameter)); - - if (_isJsonb) - buf.WriteByte(JsonbProtocolVersion); - - if (typeof(TAny) == typeof(JsonDocument)) - { - var data = parameter?.ConvertedValue != null - ? (byte[])parameter.ConvertedValue - : SerializeJsonDocument((JsonDocument)(object)value); - await buf.WriteBytesRaw(data, async, cancellationToken); - } - else if (typeof(TAny) == typeof(JsonObject) || typeof(TAny) == typeof(JsonArray)) - { - var data = parameter?.ConvertedValue != null - ? (byte[])parameter.ConvertedValue - : SerializeJsonObject((JsonNode)(object)value); - await buf.WriteBytesRaw(data, async, cancellationToken); - } - else - { - // User POCO, read serialized representation from the validation phase - var s = parameter?.ConvertedValue != null - ? (string)parameter.ConvertedValue - : JsonSerializer.Serialize(value, value.GetType(), _serializerOptions); - - await TextHandler.Write(s, buf, lengthCache, parameter, async, cancellationToken); - } - } - - /// - public override int ValidateObjectAndGetLength(object value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => IsSupported(value.GetType()) - ? base.ValidateObjectAndGetLength(value, ref lengthCache, parameter) - : value switch - { - JsonDocument jsonDocument => ValidateAndGetLengthCustom(jsonDocument, ref lengthCache, parameter), - JsonObject jsonObject => ValidateAndGetLengthCustom(jsonObject, ref lengthCache, parameter), - JsonArray jsonArray => ValidateAndGetLengthCustom(jsonArray, ref lengthCache, parameter), - _ => ValidateAndGetLengthCustom(value, ref lengthCache, parameter) - }; - - /// - public override Task WriteObjectWithLength(object? value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => value is null or DBNull || IsSupported(value.GetType()) - ? base.WriteObjectWithLength(value, buf, lengthCache, parameter, async, cancellationToken) - : value switch - { - JsonDocument jsonDocument => WriteWithLengthCustom(jsonDocument, buf, lengthCache, parameter, async, cancellationToken), - JsonObject jsonObject => WriteWithLengthCustom(jsonObject, buf, lengthCache, parameter, async, cancellationToken), - JsonArray jsonArray => WriteWithLengthCustom(jsonArray, buf, lengthCache, parameter, async, cancellationToken), - _ => WriteWithLengthCustom(value, buf, lengthCache, parameter, async, cancellationToken), - }; - - /// - protected internal override async ValueTask ReadCustom(NpgsqlReadBuffer buf, int byteLen, bool async, FieldDescription? fieldDescription) - { - if (IsSupportedAsText()) - { - return await base.ReadCustom(buf, byteLen, async, fieldDescription); - } - - if (_isJsonb) - { - await buf.Ensure(1, async); - var version = buf.ReadByte(); - if (version != JsonbProtocolVersion) - throw new NotSupportedException($"Don't know how to decode JSONB with wire format {version}, your connection is now broken"); - byteLen--; - } - - // JsonDocument is a view over its provided buffer, so we can't return one over our internal buffer (see #2811), so we deserialize - // a string and get a JsonDocument from that. #2818 tracks improving this. - if (typeof(T) == typeof(JsonDocument)) - return (T)(object)JsonDocument.Parse(await TextHandler.Read(buf, byteLen, async, fieldDescription)); - - // User POCO - if (buf.ReadBytesLeft >= byteLen) - return JsonSerializer.Deserialize(buf.ReadSpan(byteLen), _serializerOptions)!; - -#if NET6_0_OR_GREATER - return (async - ? await JsonSerializer.DeserializeAsync(buf.GetStream(byteLen, canSeek: false), _serializerOptions) - : JsonSerializer.Deserialize(buf.GetStream(byteLen, canSeek: false), _serializerOptions))!; -#else - return JsonSerializer.Deserialize(await TextHandler.Read(buf, byteLen, async, fieldDescription), _serializerOptions)!; -#endif - } - - byte[] SerializeJsonDocument(JsonDocument document) - { - // TODO: Writing is currently really inefficient - please don't criticize :) - // We need to implement one-pass writing to serialize directly to the buffer (or just switch to pipelines). - using var stream = new MemoryStream(); - using var writer = new Utf8JsonWriter(stream); - document.WriteTo(writer); - writer.Flush(); - return stream.ToArray(); - } - - byte[] SerializeJsonObject(JsonNode jsonObject) - { - // TODO: Writing is currently really inefficient - please don't criticize :) - // We need to implement one-pass writing to serialize directly to the buffer (or just switch to pipelines). - using var stream = new MemoryStream(); - using var writer = new Utf8JsonWriter(stream); - jsonObject.WriteTo(writer); - writer.Flush(); - return stream.ToArray(); - } -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/TextHandler.cs b/src/Npgsql/Internal/TypeHandlers/TextHandler.cs deleted file mode 100644 index a707c83efc..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/TextHandler.cs +++ /dev/null @@ -1,317 +0,0 @@ -using System; -using System.Buffers; -using System.Diagnostics; -using System.Diagnostics.CodeAnalysis; -using System.IO; -using System.Text; -using System.Threading; -using System.Threading.Tasks; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; - -namespace Npgsql.Internal.TypeHandlers; - -/// -/// A type handler for PostgreSQL character data types (text, char, varchar, xml...). -/// -/// -/// See https://www.postgresql.org/docs/current/datatype-character.html. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public partial class TextHandler : NpgsqlTypeHandler, INpgsqlTypeHandler, INpgsqlTypeHandler>, - INpgsqlTypeHandler, INpgsqlTypeHandler, INpgsqlTypeHandler>, ITextReaderHandler -{ - // Text types are handled a bit more efficiently when sent as text than as binary - // see https://github.com/npgsql/npgsql/issues/1210#issuecomment-235641670 - internal override bool PreferTextWrite => true; - - readonly Encoding _encoding; - - /// - protected internal TextHandler(PostgresType postgresType, Encoding encoding) - : base(postgresType) - => _encoding = encoding; - - #region Read - - /// - public override ValueTask Read(NpgsqlReadBuffer buf, int byteLen, bool async, FieldDescription? fieldDescription = null) - { - return buf.ReadBytesLeft >= byteLen - ? new ValueTask(buf.ReadString(byteLen)) - : ReadLong(_encoding, buf, byteLen, async); - - static async ValueTask ReadLong(Encoding encoding, NpgsqlReadBuffer buf, int byteLen, bool async) - { - if (byteLen <= buf.Size) - { - // The string's byte representation can fit in our read buffer, read it. - await buf.Ensure(byteLen, async); - return buf.ReadString(byteLen); - } - - // Bad case: the string's byte representation doesn't fit in our buffer. - // This is rare - will only happen in CommandBehavior.Sequential mode (otherwise the - // entire row is in memory). Tweaking the buffer length via the connection string can - - var tempBuf = ArrayPool.Shared.Rent(byteLen); - - try - { - var pos = 0; - while (true) - { - var len = Math.Min(buf.ReadBytesLeft, byteLen - pos); - buf.ReadBytes(tempBuf, pos, len); - pos += len; - if (pos < byteLen) - { - await buf.ReadMore(async); - continue; - } - break; - } - return encoding.GetString(tempBuf, 0, byteLen); - } - finally - { - ArrayPool.Shared.Return(tempBuf); - } - } - } - - async ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int byteLen, bool async, FieldDescription? fieldDescription) - { - if (byteLen <= buf.Size) - { - // The string's byte representation can fit in our read buffer, read it. - await buf.Ensure(byteLen, async); - return buf.ReadChars(byteLen); - } - - var tempBuf = ArrayPool.Shared.Rent(byteLen); - - try - { - var pos = 0; - while (true) - { - var len = Math.Min(buf.ReadBytesLeft, byteLen - pos); - buf.ReadBytes(tempBuf, pos, len); - pos += len; - if (pos < byteLen) - { - await buf.ReadMore(async); - continue; - } - break; - } - return _encoding.GetChars(tempBuf, 0, byteLen); - } - finally - { - ArrayPool.Shared.Return(tempBuf); - } - } - - async ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - { - // Make sure we have enough bytes in the buffer for a single character - // We can get here a much bigger length in case it's a string - // while we want to read only its first character - var maxBytes = Math.Min(_encoding.GetMaxByteCount(1), len); - await buf.Ensure(maxBytes, async); - - var character = ReadCharCore(); - - // We've been requested to read 'len' bytes, which is why we're going to skip them - // This is important for NpgsqlDataReader with CommandBehavior.SequentialAccess - // which tracks how many bytes it has to skip for the next column - await buf.Skip(len, async); - return character; - - char ReadCharCore() - { - var charSpan = buf.Buffer.AsSpan(buf.ReadPosition, maxBytes); - var chars = _encoding.GetCharCount(charSpan); - if (chars < 1) - throw new NpgsqlException("Could not read char - string was empty"); - - Span destination = stackalloc char[chars]; - _encoding.GetChars(charSpan, destination); - return destination[0]; - } - } - - ValueTask> INpgsqlTypeHandler>.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => throw new NotSupportedException("Only writing ArraySegment to PostgreSQL text is supported, no reading."); - - ValueTask INpgsqlTypeHandler.Read(NpgsqlReadBuffer buf, int byteLen, bool async, FieldDescription? fieldDescription) - { - var bytes = new byte[byteLen]; - if (buf.ReadBytesLeft >= byteLen) - { - buf.ReadBytes(bytes, 0, byteLen); - return new ValueTask(bytes); - } - return ReadLong(buf, bytes, byteLen, async); - - static async ValueTask ReadLong(NpgsqlReadBuffer buf, byte[] bytes, int byteLen, bool async) - { - if (byteLen <= buf.Size) - { - // The bytes can fit in our read buffer, read it. - await buf.Ensure(byteLen, async); - buf.ReadBytes(bytes, 0, byteLen); - return bytes; - } - - // Bad case: the bytes don't fit in our buffer. - // This is rare - will only happen in CommandBehavior.Sequential mode (otherwise the - // entire row is in memory). Tweaking the buffer length via the connection string can - // help avoid this. - - var pos = 0; - while (true) - { - var len = Math.Min(buf.ReadBytesLeft, byteLen - pos); - buf.ReadBytes(bytes, pos, len); - pos += len; - if (pos < byteLen) - { - await buf.ReadMore(async); - continue; - } - break; - } - return bytes; - } - } - - ValueTask> INpgsqlTypeHandler>.Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => throw new NotSupportedException("Only writing ReadOnlyMemory to PostgreSQL text is supported, no reading."); - - #endregion - - #region Write - - /// - public override unsafe int ValidateAndGetLength(string value, [NotNullIfNotNull("lengthCache")] ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - { - lengthCache ??= new NpgsqlLengthCache(1); - if (lengthCache.IsPopulated) - return lengthCache.Get(); - - if (parameter == null || parameter.Size <= 0 || parameter.Size >= value.Length) - return lengthCache.Set(_encoding.GetByteCount(value)); - fixed (char* p = value) - return lengthCache.Set(_encoding.GetByteCount(p, parameter.Size)); - } - - /// - public virtual int ValidateAndGetLength(char[] value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - { - lengthCache ??= new NpgsqlLengthCache(1); - if (lengthCache.IsPopulated) - return lengthCache.Get(); - - return lengthCache.Set( - parameter == null || parameter.Size <= 0 || parameter.Size >= value.Length - ? _encoding.GetByteCount(value) - : _encoding.GetByteCount(value, 0, parameter.Size) - ); - } - - /// - public virtual int ValidateAndGetLength(ArraySegment value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - { - lengthCache ??= new NpgsqlLengthCache(1); - if (lengthCache.IsPopulated) - return lengthCache.Get(); - - if (parameter?.Size > 0) - throw new ArgumentException($"Parameter {parameter.ParameterName} is of type ArraySegment and should not have its Size set", parameter.ParameterName); - - return lengthCache.Set(value.Array is null ? 0 : _encoding.GetByteCount(value.Array, value.Offset, value.Count)); - } - - /// - public int ValidateAndGetLength(char value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - { - Span singleCharArray = stackalloc char[1]; - singleCharArray[0] = value; - return _encoding.GetByteCount(singleCharArray); - } - - /// - public int ValidateAndGetLength(byte[] value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => value.Length; - - /// - public int ValidateAndGetLength(ReadOnlyMemory value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => value.Length; - - /// - public override Task Write(string value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => WriteString(value, buf, lengthCache!, parameter, async, cancellationToken); - - /// - public virtual Task Write(char[] value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - var charLen = parameter == null || parameter.Size <= 0 || parameter.Size >= value.Length - ? value.Length - : parameter.Size; - return buf.WriteChars(value, 0, charLen, lengthCache!.GetLast(), async, cancellationToken); - } - - /// - public virtual Task Write(ArraySegment value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => value.Array is null ? Task.CompletedTask : buf.WriteChars(value.Array, value.Offset, value.Count, lengthCache!.GetLast(), async, cancellationToken); - - Task WriteString(string str, NpgsqlWriteBuffer buf, NpgsqlLengthCache lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - var charLen = parameter == null || parameter.Size <= 0 || parameter.Size >= str.Length - ? str.Length - : parameter.Size; - return buf.WriteString(str, charLen, lengthCache.GetLast(), async, cancellationToken); - } - - /// - public async Task Write(char value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - if (buf.WriteSpaceLeft < _encoding.GetMaxByteCount(1)) - await buf.Flush(async, cancellationToken); - WriteCharCore(value, buf); - - static unsafe void WriteCharCore(char value, NpgsqlWriteBuffer buf) - { - Span singleCharArray = stackalloc char[1]; - singleCharArray[0] = value; - buf.WriteChars(singleCharArray); - } - } - - - public Task Write(byte[] value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, - CancellationToken cancellationToken = default) - => buf.WriteBytesRaw(value, async, cancellationToken); - - public Task Write(ReadOnlyMemory value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, - CancellationToken cancellationToken = default) - => buf.WriteBytesRaw(value, async, cancellationToken); - - #endregion - - /// - public virtual TextReader GetTextReader(Stream stream, NpgsqlReadBuffer buffer) - { - var byteLength = (int)(stream.Length - stream.Position); - return buffer.ReadBytesLeft >= byteLength - ? buffer.GetPreparedTextReader(_encoding.GetString(buffer.Buffer, buffer.ReadPosition, byteLength), stream) - : new StreamReader(stream, _encoding); - } -} diff --git a/src/Npgsql/Internal/TypeHandlers/UnknownTypeHandler.cs b/src/Npgsql/Internal/TypeHandlers/UnknownTypeHandler.cs deleted file mode 100644 index 43d0be10fc..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/UnknownTypeHandler.cs +++ /dev/null @@ -1,95 +0,0 @@ -using System; -using System.Diagnostics.CodeAnalysis; -using System.Text; -using System.Threading; -using System.Threading.Tasks; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; - -namespace Npgsql.Internal.TypeHandlers; - -/// -/// Handles "conversions" for columns sent by the database with unknown OIDs. -/// This differs from TextHandler in that its a text-only handler (we don't want to receive binary -/// representations of the types registered here). -/// Note that this handler is also used in the very initial query that loads the OID mappings -/// (chicken and egg problem). -/// Also used for sending parameters with unknown types (OID=0) -/// -sealed class UnknownTypeHandler : TextHandler -{ - internal UnknownTypeHandler(Encoding encoding) - : base(UnknownBackendType.Instance, encoding) - { - } - - #region Read - - public override ValueTask Read(NpgsqlReadBuffer buf, int byteLen, bool async, FieldDescription? fieldDescription = null) - { - if (fieldDescription == null) - throw new Exception($"Received an unknown field but {nameof(fieldDescription)} is null (i.e. COPY mode)"); - - if (fieldDescription.IsBinaryFormat) - { - // At least get the name of the PostgreSQL type for the exception - throw new NotSupportedException( - buf.Connector.TypeMapper.DatabaseInfo.ByOID.TryGetValue(fieldDescription.TypeOID, out var pgType) - ? $"The field '{fieldDescription.Name}' has type '{pgType.DisplayName}', which is currently unknown to Npgsql. You can retrieve it as a string by marking it as unknown, please see the FAQ." - : $"The field '{fieldDescription.Name}' has a type currently unknown to Npgsql (OID {fieldDescription.TypeOID}). You can retrieve it as a string by marking it as unknown, please see the FAQ." - ); - } - - return base.Read(buf, byteLen, async, fieldDescription); - } - - #endregion Read - - #region Write - - // Allow writing anything that is a string or can be converted to one via the unknown type handler - - protected internal override int ValidateAndGetLengthCustom( - [DisallowNull] TAny value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateObjectAndGetLength(value, ref lengthCache, parameter); - - public override int ValidateObjectAndGetLength(object value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - { - if (value is string asString) - return ValidateAndGetLength(asString, ref lengthCache, parameter); - - if (parameter == null) - throw CreateConversionButNoParamException(value.GetType()); - - var converted = Convert.ToString(value)!; - parameter.ConvertedValue = converted; - - return ValidateAndGetLength(converted, ref lengthCache, parameter); - } - - public override Task WriteObjectWithLength(object? value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - if (value is null or DBNull) - return base.WriteObjectWithLength(value, buf, lengthCache, parameter, async, cancellationToken); - - var convertedValue = value is string asString - ? asString - : (string)parameter!.ConvertedValue!; - - if (buf.WriteSpaceLeft < 4) - return WriteWithLengthLong(value, convertedValue, buf, lengthCache, parameter, async, cancellationToken); - - buf.WriteInt32(ValidateObjectAndGetLength(value, ref lengthCache, parameter)); - return Write(convertedValue, buf, lengthCache, parameter, async, cancellationToken); - - async Task WriteWithLengthLong(object value, string convertedValue, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken) - { - await buf.Flush(async, cancellationToken); - buf.WriteInt32(ValidateObjectAndGetLength(value!, ref lengthCache, parameter)); - await Write(convertedValue, buf, lengthCache, parameter, async, cancellationToken); - } - } - - #endregion Write -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/UnmappedEnumHandler.cs b/src/Npgsql/Internal/TypeHandlers/UnmappedEnumHandler.cs deleted file mode 100644 index d8accea307..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/UnmappedEnumHandler.cs +++ /dev/null @@ -1,149 +0,0 @@ -using System; -using System.Collections.Concurrent; -using System.Collections.Generic; -using System.Diagnostics.CodeAnalysis; -using System.Linq; -using System.Reflection; -using System.Text; -using System.Threading; -using System.Threading.Tasks; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using NpgsqlTypes; - -namespace Npgsql.Internal.TypeHandlers; - -sealed class UnmappedEnumHandler : TextHandler -{ - readonly INpgsqlNameTranslator _nameTranslator; - - // Note that a separate instance of UnmappedEnumHandler is created for each PG enum type, so concurrency isn't "really" needed. - // However, in theory multiple different CLR enums may be used with the same PG enum type, and even if there's only one, we only know - // about it late (after construction), when the user actually reads/writes with one. So this handler is fully thread-safe. - readonly ConcurrentDictionary _types = new(); - - internal UnmappedEnumHandler(PostgresEnumType pgType, INpgsqlNameTranslator nameTranslator, Encoding encoding) - : base(pgType, encoding) - => _nameTranslator = nameTranslator; - - #region Read - - protected internal override async ValueTask ReadCustom(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - { - var s = await base.Read(buf, len, async, fieldDescription); - if (typeof(TAny) == typeof(string)) - return (TAny)(object)s; - - var typeRecord = GetTypeRecord(typeof(TAny)); - - if (!typeRecord.LabelToEnum.TryGetValue(s, out var value)) - throw new InvalidCastException($"Received enum value '{s}' from database which wasn't found on enum {typeof(TAny)}"); - - // TODO: Avoid boxing - return (TAny)(object)value; - } - - public override ValueTask Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - => base.Read(buf, len, async, fieldDescription); - - #endregion - - #region Write - - public override int ValidateObjectAndGetLength(object? value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => value is null || value is DBNull - ? 0 - : ValidateAndGetLength(value, ref lengthCache, parameter); - - protected internal override int ValidateAndGetLengthCustom(TAny value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => ValidateAndGetLength(value!, ref lengthCache, parameter); - - [UnconditionalSuppressMessage("Unmapped enums currently aren't trimming-safe.", "IL2072")] - int ValidateAndGetLength(object value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - { - var type = value.GetType(); - if (type == typeof(string)) - return base.ValidateAndGetLength((string)value, ref lengthCache, parameter); - - var typeRecord = GetTypeRecord(type); - - // TODO: Avoid boxing - return typeRecord.EnumToLabel.TryGetValue((Enum)value, out var str) - ? base.ValidateAndGetLength(str, ref lengthCache, parameter) - : throw new InvalidCastException($"Can't write value {value} as enum {type}"); - } - - // TODO: This boxes the enum (again) - protected override Task WriteWithLengthCustom(TAny value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken) - => WriteObjectWithLength(value!, buf, lengthCache, parameter, async, cancellationToken); - - public override Task WriteObjectWithLength(object? value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - if (value is null || value is DBNull) - return WriteWithLength(DBNull.Value, buf, lengthCache, parameter, async, cancellationToken); - - if (buf.WriteSpaceLeft < 4) - return WriteWithLengthLong(value, buf, lengthCache, parameter, async, cancellationToken); - - buf.WriteInt32(ValidateAndGetLength(value, ref lengthCache, parameter)); - return Write(value, buf, lengthCache, parameter, async, cancellationToken); - - async Task WriteWithLengthLong(object value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken) - { - await buf.Flush(async, cancellationToken); - buf.WriteInt32(ValidateAndGetLength(value, ref lengthCache, parameter)); - await Write(value, buf, lengthCache, parameter, async, cancellationToken); - } - } - - [UnconditionalSuppressMessage("Unmapped enums currently aren't trimming-safe.", "IL2072")] - internal Task Write(object value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - var type = value.GetType(); - if (type == typeof(string)) - return base.Write((string)value, buf, lengthCache, parameter, async, cancellationToken); - - var typeRecord = GetTypeRecord(type); - - // TODO: Avoid boxing - if (!typeRecord.EnumToLabel.TryGetValue((Enum)value, out var str)) - throw new InvalidCastException($"Can't write value {value} as enum {type}"); - return base.Write(str, buf, lengthCache, parameter, async, cancellationToken); - } - - #endregion - - #region Misc - - TypeRecord GetTypeRecord(Type type) - { -#if NETSTANDARD2_0 - return _types.GetOrAdd(type, t => CreateTypeRecord(t, _nameTranslator)); -#else - return _types.GetOrAdd(type, static (t, translator) => CreateTypeRecord(t, translator), _nameTranslator); -#endif - } - - static TypeRecord CreateTypeRecord(Type type, INpgsqlNameTranslator nameTranslator) - { - var enumToLabel = new Dictionary(); - var labelToEnum = new Dictionary(); - - foreach (var field in type.GetFields(BindingFlags.Static | BindingFlags.Public)) - { - var attribute = (PgNameAttribute?)field.GetCustomAttributes(typeof(PgNameAttribute), false).FirstOrDefault(); - var enumName = attribute?.PgName ?? nameTranslator.TranslateMemberName(field.Name); - var enumValue = (Enum)field.GetValue(null)!; - - enumToLabel[enumValue] = enumName; - labelToEnum[enumName] = enumValue; - } - - return new(enumToLabel, labelToEnum); - } - - #endregion - - record struct TypeRecord(Dictionary EnumToLabel, Dictionary LabelToEnum); -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/UnsupportedHandler.cs b/src/Npgsql/Internal/TypeHandlers/UnsupportedHandler.cs deleted file mode 100644 index 2d1f22f893..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/UnsupportedHandler.cs +++ /dev/null @@ -1,48 +0,0 @@ -using System; -using System.Threading; -using System.Threading.Tasks; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; - -namespace Npgsql.Internal.TypeHandlers; - -sealed class UnsupportedHandler : NpgsqlTypeHandler -{ - readonly string _exceptionMessage; - - public UnsupportedHandler(PostgresType postgresType, string exceptionMessage) : base(postgresType) - => _exceptionMessage = exceptionMessage; - - public override ValueTask ReadAsObject(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - => throw new NotSupportedException(_exceptionMessage); - - public override int ValidateObjectAndGetLength(object value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => throw new NotSupportedException(_exceptionMessage); - - public override Task WriteObjectWithLength(object? value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, - CancellationToken cancellationToken = default) - => throw new NotSupportedException(_exceptionMessage); - - protected internal override ValueTask ReadCustom(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => throw new NotSupportedException(_exceptionMessage); - - protected override Task WriteWithLengthCustom(TAny value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, - CancellationToken cancellationToken) - => throw new NotSupportedException(_exceptionMessage); - - protected internal override int ValidateAndGetLengthCustom(TAny value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => throw new NotSupportedException(_exceptionMessage); - - public override Type GetFieldType(FieldDescription? fieldDescription = null) - => throw new NotSupportedException(_exceptionMessage); - - public override NpgsqlTypeHandler CreateArrayHandler(PostgresArrayType pgArrayType, ArrayNullabilityMode arrayNullabilityMode) - => throw new NotSupportedException(_exceptionMessage); - - public override NpgsqlTypeHandler CreateRangeHandler(PostgresType pgRangeType) - => throw new NotSupportedException(_exceptionMessage); - - public override NpgsqlTypeHandler CreateMultirangeHandler(PostgresMultirangeType pgMultirangeType) - => throw new NotSupportedException(_exceptionMessage); -} diff --git a/src/Npgsql/Internal/TypeHandlers/UuidHandler.cs b/src/Npgsql/Internal/TypeHandlers/UuidHandler.cs deleted file mode 100644 index c70da8060d..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/UuidHandler.cs +++ /dev/null @@ -1,76 +0,0 @@ -using System; -using System.Runtime.InteropServices; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; - -namespace Npgsql.Internal.TypeHandlers; - -/// -/// A type handler for the PostgreSQL uuid data type. -/// -/// -/// See https://www.postgresql.org/docs/current/static/datatype-uuid.html. -/// -/// The type handler API allows customizing Npgsql's behavior in powerful ways. However, although it is public, it -/// should be considered somewhat unstable, and may change in breaking ways, including in non-major releases. -/// Use it at your own risk. -/// -public partial class UuidHandler : NpgsqlSimpleTypeHandler -{ - // The following table shows .NET GUID vs Postgres UUID (RFC 4122) layouts. - // - // Note that the first fields are converted from/to native endianness (handled by the Read* - // and Write* methods), while the last field is always read/written in big-endian format. - // - // We're passing BitConverter.IsLittleEndian to prevent reversing endianness on little-endian systems. - // - // | Bits | Bytes | Name | Endianness (GUID) | Endianness (RFC 4122) | - // | ---- | ----- | ----- | ----------------- | --------------------- | - // | 32 | 4 | Data1 | Native | Big | - // | 16 | 2 | Data2 | Native | Big | - // | 16 | 2 | Data3 | Native | Big | - // | 64 | 8 | Data4 | Big | Big | - - public UuidHandler(PostgresType pgType) : base(pgType) {} - - /// - public override Guid Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - { - var raw = new GuidRaw - { - Data1 = buf.ReadInt32(), - Data2 = buf.ReadInt16(), - Data3 = buf.ReadInt16(), - Data4 = buf.ReadInt64(BitConverter.IsLittleEndian) - }; - - return raw.Value; - } - - /// - public override int ValidateAndGetLength(Guid value, NpgsqlParameter? parameter) - => 16; - - /// - public override void Write(Guid value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - { - var raw = new GuidRaw(value); - - buf.WriteInt32(raw.Data1); - buf.WriteInt16(raw.Data2); - buf.WriteInt16(raw.Data3); - buf.WriteInt64(raw.Data4, BitConverter.IsLittleEndian); - } - - [StructLayout(LayoutKind.Explicit)] - struct GuidRaw - { - [FieldOffset(00)] public Guid Value; - [FieldOffset(00)] public int Data1; - [FieldOffset(04)] public short Data2; - [FieldOffset(06)] public short Data3; - [FieldOffset(08)] public long Data4; - public GuidRaw(Guid value) : this() => Value = value; - } -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandlers/VoidHandler.cs b/src/Npgsql/Internal/TypeHandlers/VoidHandler.cs deleted file mode 100644 index da24b58c75..0000000000 --- a/src/Npgsql/Internal/TypeHandlers/VoidHandler.cs +++ /dev/null @@ -1,41 +0,0 @@ -using System; -using System.Threading; -using System.Threading.Tasks; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; - -namespace Npgsql.Internal.TypeHandlers; - -/// -/// https://www.postgresql.org/docs/current/static/datatype-boolean.html -/// -sealed class VoidHandler : NpgsqlSimpleTypeHandler -{ - public VoidHandler(PostgresType pgType) : base(pgType) {} - - public override DBNull Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - => DBNull.Value; - - public override int ValidateAndGetLength(DBNull value, NpgsqlParameter? parameter) - => throw new NotSupportedException(); - - public override void Write(DBNull value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) - => throw new NotSupportedException(); - - public override int ValidateObjectAndGetLength(object? value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => value switch - { - DBNull => 0, - null => 0, - _ => throw new InvalidCastException($"Can't write CLR type {value.GetType()} with handler type {nameof(VoidHandler)}") - }; - - public override Task WriteObjectWithLength(object? value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => value switch - { - DBNull => WriteWithLength(DBNull.Value, buf, lengthCache, parameter, async, cancellationToken), - null => WriteWithLength(DBNull.Value, buf, lengthCache, parameter, async, cancellationToken), - _ => throw new InvalidCastException($"Can't write CLR type {value.GetType()} with handler type {nameof(VoidHandler)}") - }; -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandling/INpgsqlSimpleTypeHandler.cs b/src/Npgsql/Internal/TypeHandling/INpgsqlSimpleTypeHandler.cs deleted file mode 100644 index 9a8ebf8cfa..0000000000 --- a/src/Npgsql/Internal/TypeHandling/INpgsqlSimpleTypeHandler.cs +++ /dev/null @@ -1,47 +0,0 @@ -using System.Diagnostics.CodeAnalysis; -using Npgsql.BackendMessages; - -namespace Npgsql.Internal.TypeHandling; - -/// -/// Type handlers that wish to support reading other types in additional to the main one can -/// implement this interface for all those types. -/// -public interface INpgsqlSimpleTypeHandler -{ - /// - /// Reads a value of type with the given length from the provided buffer, - /// with the assumption that it is entirely present in the provided memory buffer and no I/O will be - /// required. - /// - /// The buffer from which to read. - /// The byte length of the value. The buffer might not contain the full length, requiring I/O to be performed. - /// Additional PostgreSQL information about the type, such as the length in varchar(30). - /// The fully-read value. - T Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null); - - /// - /// Responsible for validating that a value represents a value of the correct and which can be - /// written for PostgreSQL - if the value cannot be written for any reason, an exception should be thrown. - /// Also returns the byte length needed to write the value. - /// - /// The value to be written to PostgreSQL - /// - /// The instance where this value resides. Can be used to access additional - /// information relevant to the write process (e.g. ). - /// - /// The number of bytes required to write the value. - int ValidateAndGetLength([DisallowNull] T value, NpgsqlParameter? parameter); - - /// - /// Writes a value to the provided buffer, with the assumption that there is enough space in the buffer - /// (no I/O will occur). The Npgsql core will have taken care of that. - /// - /// The value to write. - /// The buffer to which to write. - /// - /// The instance where this value resides. Can be used to access additional - /// information relevant to the write process (e.g. ). - /// - void Write([DisallowNull] T value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter); -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandling/INpgsqlTypeHandler.cs b/src/Npgsql/Internal/TypeHandling/INpgsqlTypeHandler.cs deleted file mode 100644 index e1a4dc125b..0000000000 --- a/src/Npgsql/Internal/TypeHandling/INpgsqlTypeHandler.cs +++ /dev/null @@ -1,75 +0,0 @@ -using System; -using System.Diagnostics.CodeAnalysis; -using System.Threading; -using System.Threading.Tasks; -using Npgsql.BackendMessages; - -namespace Npgsql.Internal.TypeHandling; - -/// -/// Type handlers that wish to support reading other types in additional to the main one can -/// implement this interface for all those types. -/// -public interface INpgsqlTypeHandler -{ - /// - /// Reads a value of type with the given length from the provided buffer, - /// using either sync or async I/O. - /// - /// The buffer from which to read. - /// The byte length of the value. The buffer might not contain the full length, requiring I/O to be performed. - /// If I/O is required to read the full length of the value, whether it should be performed synchronously or asynchronously. - /// Additional PostgreSQL information about the type, such as the length in varchar(30). - /// The fully-read value. - ValueTask Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null); - - /// - /// Responsible for validating that a value represents a value of the correct and which can be - /// written for PostgreSQL - if the value cannot be written for any reason, an exception should be thrown. - /// Also returns the byte length needed to write the value. - /// - /// The value to be written to PostgreSQL - /// A cache where the length calculated during the validation phase can be stored for use at the writing phase. - /// - /// The instance where this value resides. Can be used to access additional - /// information relevant to the write process (e.g. ). - /// - /// The number of bytes required to write the value. - int ValidateAndGetLength([DisallowNull] T value, [NotNullIfNotNull(nameof(lengthCache))]ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter); - - /// - /// Writes a value to the provided buffer. - /// - /// The value to write. - /// The buffer to which to write. - /// A cache where the length calculated during the validation phase can be stored for use at the writing phase. - /// - /// The instance where this value resides. Can be used to access additional - /// information relevant to the write process (e.g. ). - /// - /// - /// If I/O will be necessary (i.e. the buffer is full), determines whether it will be done synchronously or asynchronously. - /// - /// - /// An optional token to cancel the asynchronous operation. The default value is . - /// - Task Write([DisallowNull] T value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default); -} - -static class INpgsqlTypeHandlerExtensions -{ - public static async Task WriteWithLength(this INpgsqlTypeHandler handler, T? value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - if (buf.WriteSpaceLeft < 4) - await buf.Flush(async, cancellationToken); - - if (value is null or DBNull) - { - buf.WriteInt32(-1); - return; - } - - buf.WriteInt32(handler.ValidateAndGetLength(value, ref lengthCache, parameter)); - await handler.Write(value, buf, lengthCache, parameter, async, cancellationToken); - } -} diff --git a/src/Npgsql/Internal/TypeHandling/ITextReaderHandler.cs b/src/Npgsql/Internal/TypeHandling/ITextReaderHandler.cs deleted file mode 100644 index b55000fadf..0000000000 --- a/src/Npgsql/Internal/TypeHandling/ITextReaderHandler.cs +++ /dev/null @@ -1,13 +0,0 @@ -using System.Data.Common; -using System.IO; - -namespace Npgsql.Internal.TypeHandling; - -/// -/// Implemented by handlers which support , returns a standard -/// TextReader given a binary Stream. -/// -interface ITextReaderHandler -{ - TextReader GetTextReader(Stream stream, NpgsqlReadBuffer buffer); -} diff --git a/src/Npgsql/Internal/TypeHandling/NpgsqlLengthCache.cs b/src/Npgsql/Internal/TypeHandling/NpgsqlLengthCache.cs deleted file mode 100644 index b36381e9e8..0000000000 --- a/src/Npgsql/Internal/TypeHandling/NpgsqlLengthCache.cs +++ /dev/null @@ -1,65 +0,0 @@ -using System.Collections.Generic; -using System.Diagnostics; - -namespace Npgsql.Internal.TypeHandling; - -/// -/// An array of cached lengths for the parameters sending process. -/// -/// When sending parameters, lengths need to be calculated more than once (once for Bind, once for -/// an array, once for the string within that array). This cache optimizes that. Lengths are added -/// to the cache, and then retrieved in the same order. -/// -public sealed class NpgsqlLengthCache -{ - public bool IsPopulated; - public int Position; - public List Lengths; - - public NpgsqlLengthCache() => Lengths = new List(); - - public NpgsqlLengthCache(int capacity) => Lengths = new List(capacity); - - /// - /// Stores a length value in the cache, to be fetched later via . - /// Called at the phase. - /// - /// The length parameter. - public int Set(int len) - { - Debug.Assert(!IsPopulated); - Lengths.Add(len); - Position++; - return len; - } - - /// - /// Retrieves a length value previously stored in the cache via . - /// Called at the writing phase, after validation has already occurred and the length cache is populated. - /// - /// - public int Get() - { - Debug.Assert(IsPopulated); - return Lengths[Position++]; - } - - internal int GetLast() - { - Debug.Assert(IsPopulated); - return Lengths[Position-1]; - } - - internal void Rewind() - { - Position = 0; - IsPopulated = true; - } - - internal void Clear() - { - Lengths.Clear(); - Position = 0; - IsPopulated = false; - } -} diff --git a/src/Npgsql/Internal/TypeHandling/NpgsqlSimpleTypeHandler.cs b/src/Npgsql/Internal/TypeHandling/NpgsqlSimpleTypeHandler.cs deleted file mode 100644 index 5a9bbde2cf..0000000000 --- a/src/Npgsql/Internal/TypeHandling/NpgsqlSimpleTypeHandler.cs +++ /dev/null @@ -1,84 +0,0 @@ -using System; -using System.Data.Common; -using System.Threading; -using System.Threading.Tasks; -using Npgsql.BackendMessages; -using Npgsql.PostgresTypes; - -namespace Npgsql.Internal.TypeHandling; - -/// -/// Base class for all simple type handlers, which read and write short, non-arbitrary lengthed -/// values to PostgreSQL. Provides a simpler API to implement when compared to - -/// Npgsql takes care of all I/O before calling into this type, so no I/O needs to be performed by it. -/// -/// -/// The default CLR type that this handler will read and write. For example, calling -/// on a column with this handler will return a value with type . -/// Type handlers can support additional types by implementing . -/// -public abstract class NpgsqlSimpleTypeHandler : NpgsqlTypeHandler, INpgsqlSimpleTypeHandler -{ - protected NpgsqlSimpleTypeHandler(PostgresType postgresType) : base(postgresType) {} - - /// - /// Reads a value of type with the given length from the provided buffer, - /// with the assumption that it is entirely present in the provided memory buffer and no I/O will be - /// required. - /// - /// The buffer from which to read. - /// The byte length of the value. The buffer might not contain the full length, requiring I/O to be performed. - /// Additional PostgreSQL information about the type, such as the length in varchar(30). - /// The fully-read value. - public abstract TDefault Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null); - - public sealed override ValueTask Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - => throw new NotSupportedException(); - - public override async ValueTask ReadAsObject(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - { - await buf.Ensure(len, async); - return Read(buf, len, fieldDescription)!; - } - - #region Write - - /// - /// Responsible for validating that a value represents a value of the correct and which can be - /// written for PostgreSQL - if the value cannot be written for any reason, an exception shold be thrown. - /// Also returns the byte length needed to write the value. - /// - /// The value to be written to PostgreSQL - /// - /// The instance where this value resides. Can be used to access additional - /// information relevant to the write process (e.g. ). - /// - /// The number of bytes required to write the value. - public abstract int ValidateAndGetLength(TDefault value, NpgsqlParameter? parameter); - - /// - /// Writes a value to the provided buffer, with the assumption that there is enough space in the buffer - /// (no I/O will occur). The Npgsql core will have taken care of that. - /// - /// The value to write. - /// The buffer to which to write. - /// - /// The instance where this value resides. Can be used to access additional - /// information relevant to the write process (e.g. ). - /// - public abstract void Write(TDefault value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter); - - /// - /// Simple type handlers override instead of this. - /// - public sealed override Task Write(TDefault value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => throw new NotSupportedException(); - - /// - /// Simple type handlers override instead of this. - /// - public sealed override int ValidateAndGetLength(TDefault value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => throw new NotSupportedException(); - - #endregion -} diff --git a/src/Npgsql/Internal/TypeHandling/NpgsqlTypeHandler.cs b/src/Npgsql/Internal/TypeHandling/NpgsqlTypeHandler.cs deleted file mode 100644 index e9cdf8dd4d..0000000000 --- a/src/Npgsql/Internal/TypeHandling/NpgsqlTypeHandler.cs +++ /dev/null @@ -1,273 +0,0 @@ -using System; -using System.Diagnostics; -using System.Diagnostics.CodeAnalysis; -using System.Runtime.CompilerServices; -using System.Threading; -using System.Threading.Tasks; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandlers; -using Npgsql.PostgresTypes; - -namespace Npgsql.Internal.TypeHandling; - -/// -/// Base class for all type handlers, which read and write CLR types into their PostgreSQL -/// binary representation. -/// Type handler writers shouldn't inherit from this class, inherit -/// or instead. -/// -public abstract class NpgsqlTypeHandler -{ - protected NpgsqlTypeHandler(PostgresType postgresType) - => PostgresType = postgresType; - - /// - /// The PostgreSQL type handled by this type handler. - /// - public PostgresType PostgresType { get; } - - #region Read - - /// - /// Reads a value of type with the given length from the provided buffer, - /// using either sync or async I/O. - /// - /// The buffer from which to read. - /// The byte length of the value. The buffer might not contain the full length, requiring I/O to be performed. - /// If I/O is required to read the full length of the value, whether it should be performed synchronously or asynchronously. - /// Additional PostgreSQL information about the type, such as the length in varchar(30). - /// The fully-read value. - [MethodImpl(MethodImplOptions.AggressiveInlining)] - protected internal async ValueTask Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - { - switch (this) - { - case INpgsqlSimpleTypeHandler simpleTypeHandler: - await buf.Ensure(len, async); - return simpleTypeHandler.Read(buf, len, fieldDescription); - case INpgsqlTypeHandler typeHandler: - return await typeHandler.Read(buf, len, async, fieldDescription); - default: - return await ReadCustom(buf, len, async, fieldDescription); - } - } - - /// - /// Version of that's called when we know the entire value - /// is already buffered in memory (i.e. in non-sequential mode). - /// - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public TAny Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - { - Debug.Assert(buf.ReadBytesLeft >= len); - - return this switch - { - INpgsqlSimpleTypeHandler simpleTypeHandler => simpleTypeHandler.Read(buf, len, fieldDescription), - INpgsqlTypeHandler typeHandler => typeHandler.Read(buf, len, async: false, fieldDescription).Result, - _ => ReadCustom(buf, len, async: false, fieldDescription).Result - }; - } - - protected internal virtual ValueTask ReadCustom(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription) - => throw new InvalidCastException(fieldDescription == null - ? $"Can't cast database type to {typeof(TAny).Name}" - : $"Can't cast database type {fieldDescription.Handler.PgDisplayName} to {typeof(TAny).Name}"); - - /// - /// Reads a column as the type handler's default read type. If it is not already entirely in - /// memory, sync or async I/O will be performed as specified by . - /// - public abstract ValueTask ReadAsObject(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null); - - /// - /// Version of that's called when we know the entire value - /// is already buffered in memory (i.e. in non-sequential mode). - /// - internal object ReadAsObject(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - { - Debug.Assert(buf.ReadBytesLeft >= len); - - return ReadAsObject(buf, len, async: false, fieldDescription).Result; - } - - /// - /// Reads a value from the buffer, assuming our read position is at the value's preceding length. - /// If the length is -1 (null), this method will return the default value. - /// - [MethodImpl(MethodImplOptions.AggressiveInlining)] - internal async ValueTask ReadWithLength(NpgsqlReadBuffer buf, bool async, FieldDescription? fieldDescription = null) - { - await buf.Ensure(4, async); - var len = buf.ReadInt32(); - return len == -1 - ? default! - : NullableHandler.Exists - ? await NullableHandler.ReadAsync(this, buf, len, async, fieldDescription) - : await Read(buf, len, async, fieldDescription); - } - - #endregion - - #region Write - - /// - /// Called to validate and get the length of a value of a generic . - /// and must be handled before calling into this. - /// - [MethodImpl(MethodImplOptions.AggressiveInlining)] - protected internal int ValidateAndGetLength( - [DisallowNull] TAny value, [NotNullIfNotNull(nameof(lengthCache))] ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - { - Debug.Assert(value is not DBNull); - - return this switch - { - INpgsqlSimpleTypeHandler simpleTypeHandler => simpleTypeHandler.ValidateAndGetLength(value, parameter), - INpgsqlTypeHandler typeHandler => typeHandler.ValidateAndGetLength(value, ref lengthCache, parameter), - _ => ValidateAndGetLengthCustom(value, ref lengthCache, parameter) - }; - } - - protected internal virtual int ValidateAndGetLengthCustom( - [DisallowNull] TAny value, [NotNullIfNotNull(nameof(lengthCache))] ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) => - ValidateAndGetLengthCustomCore(parameter, typeof(TAny), PgDisplayName); - - static int ValidateAndGetLengthCustomCore(NpgsqlParameter? parameter, Type type, string displayName) - { - var parameterName = parameter is null - ? null - : parameter.TrimmedName == string.Empty - ? parameter.Collection is { } paramCollection - ? $"${paramCollection.IndexOf(parameter) + 1}" - : null // in case of COPY operations parameter isn't bound to a collection - : parameter.TrimmedName; - - throw new InvalidCastException(parameterName is null - ? $"Cannot write a value of CLR type '{type}' as database type '{displayName}'." - : $"Cannot write a value of CLR type '{type}' as database type '{displayName}' for parameter '{parameterName}'."); - } - - /// - /// Called to write the value of a generic . - /// - /// - /// In the vast majority of cases writing a parameter to the buffer won't need to perform I/O. - /// - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public async Task WriteWithLength(TAny? value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - { - // TODO: Possibly do a sync path when we don't do I/O (e.g. simple type handler, no flush) - if (buf.WriteSpaceLeft < 4) - await buf.Flush(async, cancellationToken); - - if (value is null or DBNull) - { - buf.WriteInt32(-1); - return; - } - - switch (this) - { - case INpgsqlSimpleTypeHandler simpleTypeHandler: - var len = simpleTypeHandler.ValidateAndGetLength(value, parameter); - buf.WriteInt32(len); - if (buf.WriteSpaceLeft < len) - await buf.Flush(async, cancellationToken); - simpleTypeHandler.Write(value, buf, parameter); - return; - case INpgsqlTypeHandler typeHandler: - buf.WriteInt32(typeHandler.ValidateAndGetLength(value, ref lengthCache, parameter)); - await typeHandler.Write(value, buf, lengthCache, parameter, async, cancellationToken); - return; - default: - await WriteWithLengthCustom(value, buf, lengthCache, parameter, async, cancellationToken); - return; - } - } - - /// - /// Typically does not need to be overridden by type handlers, but may be needed in some - /// cases (e.g. . - /// Note that this method assumes it can write 4 bytes of length (already verified by - /// ). - /// - protected virtual Task WriteWithLengthCustom( - [DisallowNull] TAny value, - NpgsqlWriteBuffer buf, - NpgsqlLengthCache? lengthCache, - NpgsqlParameter? parameter, - bool async, - CancellationToken cancellationToken) - => throw new InvalidCastException($"Can't write '{typeof(TAny).Name}' with type handler '{GetType().Name}'"); - - /// - /// Responsible for validating that a value represents a value of the correct and which can be - /// written for PostgreSQL - if the value cannot be written for any reason, an exception shold be thrown. - /// Also returns the byte length needed to write the value. - /// - /// The value to be written to PostgreSQL - /// - /// If the byte length calculation is costly (e.g. for UTF-8 strings), its result can be stored in the - /// length cache to be reused in the writing process, preventing recalculation. - /// - /// - /// The instance where this value resides. Can be used to access additional - /// information relevant to the write process (e.g. ). - /// - /// The number of bytes required to write the value. - // Source-generated - public abstract int ValidateObjectAndGetLength(object value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter); - - /// - /// Writes a value to the provided buffer, using either sync or async I/O. - /// - /// The value to write. - /// The buffer to which to write. - /// - /// - /// The instance where this value resides. Can be used to access additional - /// information relevant to the write process (e.g. ). - /// - /// If I/O is required to read the full length of the value, whether it should be performed synchronously or asynchronously. - /// - /// An optional token to cancel the asynchronous operation. The default value is . - /// - // Source-generated - public abstract Task WriteObjectWithLength(object? value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default); - - #endregion Write - - #region Misc - - public abstract Type GetFieldType(FieldDescription? fieldDescription = null); - - internal virtual bool PreferTextWrite => false; - - /// - /// Creates a type handler for arrays of this handler's type. - /// - public abstract NpgsqlTypeHandler CreateArrayHandler(PostgresArrayType pgArrayType, ArrayNullabilityMode arrayNullabilityMode); - - /// - /// Creates a type handler for ranges of this handler's type. - /// - public abstract NpgsqlTypeHandler CreateRangeHandler(PostgresType pgRangeType); - - /// - /// Creates a type handler for multiranges of this handler's type. - /// - public abstract NpgsqlTypeHandler CreateMultirangeHandler(PostgresMultirangeType pgMultirangeType); - - /// - /// Used to create an exception when the provided type can be converted and written, but an - /// instance of is required for caching of the converted value - /// (in . - /// - protected Exception CreateConversionButNoParamException(Type clrType) - => new InvalidCastException($"Can't convert .NET type '{clrType}' to PostgreSQL '{PgDisplayName}' within an array"); - - internal string PgDisplayName => PostgresType.DisplayName; - - #endregion Misc -} diff --git a/src/Npgsql/Internal/TypeHandling/NpgsqlTypeHandler`.cs b/src/Npgsql/Internal/TypeHandling/NpgsqlTypeHandler`.cs deleted file mode 100644 index ae1e0eee5c..0000000000 --- a/src/Npgsql/Internal/TypeHandling/NpgsqlTypeHandler`.cs +++ /dev/null @@ -1,78 +0,0 @@ -using System; -using System.Data.Common; -using System.Diagnostics; -using System.Diagnostics.CodeAnalysis; -using System.Threading; -using System.Threading.Tasks; -using Npgsql.BackendMessages; -using Npgsql.Internal.TypeHandlers; -using Npgsql.PostgresTypes; - -namespace Npgsql.Internal.TypeHandling; - -/// -/// Base class for all type handlers, which read and write CLR types into their PostgreSQL -/// binary representation. Unless your type is arbitrary-length, consider inheriting from -/// instead. -/// -/// -/// The default CLR type that this handler will read and write. For example, calling -/// on a column with this handler will return a value with type . -/// Type handlers can support additional types by implementing . -/// -public abstract class NpgsqlTypeHandler : NpgsqlTypeHandler, INpgsqlTypeHandler -{ - protected NpgsqlTypeHandler(PostgresType postgresType) : base(postgresType) {} - - #region Read - - /// - /// Reads a value of type with the given length from the provided buffer, - /// using either sync or async I/O. - /// - /// The buffer from which to read. - /// The byte length of the value. The buffer might not contain the full length, requiring I/O to be performed. - /// If I/O is required to read the full length of the value, whether it should be performed synchronously or asynchronously. - /// Additional PostgreSQL information about the type, such as the length in varchar(30). - /// The fully-read value. - public abstract ValueTask Read(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null); - - // Since TAny isn't constrained to class? or struct (C# doesn't have a non-nullable constraint that doesn't limit us to either struct or class), - // we must use the bang operator here to tell the compiler that a null value will never returned. - public override async ValueTask ReadAsObject(NpgsqlReadBuffer buf, int len, bool async, FieldDescription? fieldDescription = null) - => (await Read(buf, len, async, fieldDescription))!; - - #endregion Read - - #region Write - - /// - /// Called to validate and get the length of a value of a generic . - /// - public abstract int ValidateAndGetLength(TDefault value, [NotNullIfNotNull(nameof(lengthCache))] ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter); - - /// - /// Called to write the value of a generic . - /// - public abstract Task Write(TDefault value, NpgsqlWriteBuffer buf, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default); - - #endregion Write - - #region Misc - - public override Type GetFieldType(FieldDescription? fieldDescription = null) => typeof(TDefault); - - /// - public override NpgsqlTypeHandler CreateArrayHandler(PostgresArrayType pgArrayType, ArrayNullabilityMode arrayNullabilityMode) - => new ArrayHandler(pgArrayType, this, arrayNullabilityMode); - - /// - public override NpgsqlTypeHandler CreateRangeHandler(PostgresType pgRangeType) - => new RangeHandler(pgRangeType, this); - - /// - public override NpgsqlTypeHandler CreateMultirangeHandler(PostgresMultirangeType pgMultirangeType) - => new MultirangeHandler(pgMultirangeType, (RangeHandler)CreateRangeHandler(pgMultirangeType.Subrange)); - - #endregion Misc -} diff --git a/src/Npgsql/Internal/TypeHandling/NullableHandler.cs b/src/Npgsql/Internal/TypeHandling/NullableHandler.cs deleted file mode 100644 index 89fd5a0cb4..0000000000 --- a/src/Npgsql/Internal/TypeHandling/NullableHandler.cs +++ /dev/null @@ -1,54 +0,0 @@ -using System; -using System.Diagnostics; -using System.Threading; -using System.Threading.Tasks; -using Npgsql.BackendMessages; - -namespace Npgsql.Internal.TypeHandling; - -abstract class NullableHandler -{ - static NullableHandler? _derivedInstance; - public static bool Exists => default(T) is null && typeof(T).IsValueType; - - static NullableHandler DerivedInstance - { - get - { - Debug.Assert(Exists); - return _derivedInstance ??= (NullableHandler?)Activator.CreateInstance(typeof(NullableHandler<,>).MakeGenericType(typeof(T), typeof(T).GenericTypeArguments[0]))!; - } - } - - public static T Read(NpgsqlTypeHandler handler, NpgsqlReadBuffer buffer, int columnLength, FieldDescription? fieldDescription = null) => - DerivedInstance.ReadImpl(handler, buffer, columnLength, fieldDescription); - public static ValueTask ReadAsync(NpgsqlTypeHandler handler, NpgsqlReadBuffer buffer, int columnLength, bool async, FieldDescription? fieldDescription = null) => - DerivedInstance.ReadAsyncImpl(handler, buffer, columnLength, async, fieldDescription); - public static int ValidateAndGetLength(NpgsqlTypeHandler handler, T value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) => - DerivedInstance.ValidateAndGetLengthImpl(handler, value, ref lengthCache, parameter); - public static Task WriteAsync(NpgsqlTypeHandler handler, T value, NpgsqlWriteBuffer buffer, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) => - DerivedInstance.WriteAsyncImpl(handler, value, buffer, lengthCache, parameter, async, cancellationToken); - - protected abstract T ReadImpl(NpgsqlTypeHandler handler, NpgsqlReadBuffer buffer, int columnLength, FieldDescription? fieldDescription = null); - protected abstract ValueTask ReadAsyncImpl(NpgsqlTypeHandler handler, NpgsqlReadBuffer buffer, int columnLen, bool async, FieldDescription? fieldDescription = null); - protected abstract int ValidateAndGetLengthImpl(NpgsqlTypeHandler handler, T value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter); - protected abstract Task WriteAsyncImpl(NpgsqlTypeHandler handler, T value, NpgsqlWriteBuffer buffer, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default); -} - -class NullableHandler : NullableHandler - where TUnderlying : struct -{ - protected override T ReadImpl(NpgsqlTypeHandler handler, NpgsqlReadBuffer buffer, int columnLength, FieldDescription? fieldDescription = null) - => (T)(object)handler.Read(buffer, columnLength, fieldDescription); - - protected override async ValueTask ReadAsyncImpl(NpgsqlTypeHandler handler, NpgsqlReadBuffer buffer, int columnLength, bool async, FieldDescription? fieldDescription = null) - => (T)(object)await handler.Read(buffer, columnLength, async, fieldDescription); - - protected override int ValidateAndGetLengthImpl(NpgsqlTypeHandler handler, T value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) => - value != null ? handler.ValidateAndGetLength(((TUnderlying?)(object)value).Value, ref lengthCache, parameter) : 0; - - protected override Task WriteAsyncImpl(NpgsqlTypeHandler handler, T value, NpgsqlWriteBuffer buffer, NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter, bool async, CancellationToken cancellationToken = default) - => value != null - ? handler.WriteWithLength(((TUnderlying?)(object)value).Value, buffer, lengthCache, parameter, async, cancellationToken) - : handler.WriteWithLength(DBNull.Value, buffer, lengthCache, parameter, async, cancellationToken); -} diff --git a/src/Npgsql/Internal/TypeHandling/TypeHandlerResolver.cs b/src/Npgsql/Internal/TypeHandling/TypeHandlerResolver.cs deleted file mode 100644 index feb6a719e7..0000000000 --- a/src/Npgsql/Internal/TypeHandling/TypeHandlerResolver.cs +++ /dev/null @@ -1,37 +0,0 @@ -using System; -using NpgsqlTypes; -using Npgsql.PostgresTypes; - -namespace Npgsql.Internal.TypeHandling; - -/// -/// An Npgsql resolver for type handlers. Typically used by plugins to alter how Npgsql reads and writes values to PostgreSQL. -/// -public abstract class TypeHandlerResolver -{ - /// - /// Resolves a type handler given a PostgreSQL type name, corresponding to the typname column in the PostgreSQL pg_type catalog table. - /// - /// See . - public abstract NpgsqlTypeHandler? ResolveByDataTypeName(string typeName); - - /// - /// Resolves a type handler for a given NpgsqlDbType. - /// - public virtual NpgsqlTypeHandler? ResolveByNpgsqlDbType(NpgsqlDbType npgsqlDbType) => null; - - /// - /// Resolves a type handler given a .NET CLR type. - /// - public abstract NpgsqlTypeHandler? ResolveByClrType(Type type); - - /// - /// Resolves a type handler given a PostgreSQL type. - /// - public virtual NpgsqlTypeHandler? ResolveByPostgresType(PostgresType type) - => ResolveByDataTypeName(type.Name); - - public virtual NpgsqlTypeHandler? ResolveValueDependentValue(object value) => null; - - public virtual NpgsqlTypeHandler? ResolveValueTypeGenerically(T value) => null; -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandling/TypeHandlerResolverFactory.cs b/src/Npgsql/Internal/TypeHandling/TypeHandlerResolverFactory.cs deleted file mode 100644 index c1d5030b75..0000000000 --- a/src/Npgsql/Internal/TypeHandling/TypeHandlerResolverFactory.cs +++ /dev/null @@ -1,12 +0,0 @@ -using Npgsql.Internal.TypeMapping; - -namespace Npgsql.Internal.TypeHandling; - -public abstract class TypeHandlerResolverFactory -{ - public abstract TypeHandlerResolver Create(TypeMapper typeMapper, NpgsqlConnector connector); - - public virtual TypeMappingResolver? CreateMappingResolver() => null; - - public virtual TypeMappingResolver? CreateGlobalMappingResolver() => null; -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeHandling/TypeMappingInfo.cs b/src/Npgsql/Internal/TypeHandling/TypeMappingInfo.cs deleted file mode 100644 index d669739e6f..0000000000 --- a/src/Npgsql/Internal/TypeHandling/TypeMappingInfo.cs +++ /dev/null @@ -1,22 +0,0 @@ -using System; -using System.Data; -using Npgsql.TypeMapping; -using NpgsqlTypes; - -namespace Npgsql.Internal.TypeHandling; - -public sealed class TypeMappingInfo -{ - public TypeMappingInfo(NpgsqlDbType? npgsqlDbType, string? dataTypeName, Type clrType) - => (NpgsqlDbType, DataTypeName, ClrTypes) = (npgsqlDbType, dataTypeName, new[] { clrType }); - - public TypeMappingInfo(NpgsqlDbType? npgsqlDbType, string? dataTypeName, params Type[] clrTypes) - => (NpgsqlDbType, DataTypeName, ClrTypes) = (npgsqlDbType, dataTypeName, clrTypes); - - public NpgsqlDbType? NpgsqlDbType { get; } - // Note that we can't cache the result due to nullable's assignment not being thread safe - public DbType DbType - => NpgsqlDbType is null ? DbType.Object : GlobalTypeMapper.NpgsqlDbTypeToDbType(NpgsqlDbType.Value); - public string? DataTypeName { get; } - public Type[] ClrTypes { get; } -} diff --git a/src/Npgsql/Internal/TypeInfoCache.cs b/src/Npgsql/Internal/TypeInfoCache.cs new file mode 100644 index 0000000000..2fce6eb585 --- /dev/null +++ b/src/Npgsql/Internal/TypeInfoCache.cs @@ -0,0 +1,169 @@ +using System; +using System.Collections.Concurrent; +using System.Runtime.CompilerServices; +using Npgsql.Internal.Postgres; + +namespace Npgsql.Internal; + +sealed class TypeInfoCache where TPgTypeId : struct +{ + readonly PgSerializerOptions _options; + readonly bool _validatePgTypeIds; + + // Mostly used for parameter writing, 8ns + readonly ConcurrentDictionary _cacheByClrType = new(); + + // Used for reading, occasionally for parameter writing where a db type was given. + // 8ns, about 10ns total to scan an array with 6, 7 different clr types under one pg type + readonly ConcurrentDictionary _cacheByPgTypeId = new(); + + static TypeInfoCache() + { + if (typeof(TPgTypeId) != typeof(Oid) && typeof(TPgTypeId) != typeof(DataTypeName)) + throw new InvalidOperationException("Cannot use this type argument."); + } + + public TypeInfoCache(PgSerializerOptions options, bool validatePgTypeIds = true) + { + _options = options; + _validatePgTypeIds = validatePgTypeIds; + } + + /// + /// + /// + /// + /// + /// + /// When this flag is true, and both type and pgTypeId are non null, a default info for the pgTypeId can be returned if an exact match + /// can't be found. + /// + /// + /// + public PgTypeInfo? GetOrAddInfo(Type? type, TPgTypeId? pgTypeId, bool defaultTypeFallback = false) + { + if (pgTypeId is { } id) + { + if (_cacheByPgTypeId.TryGetValue(id, out var infos)) + if (FindMatch(type, infos, defaultTypeFallback) is { } info) + return info; + + return AddEntryById(id, infos, defaultTypeFallback); + } + + if (type is not null) + return _cacheByClrType.TryGetValue(type, out var info) ? info : AddByType(type); + + return null; + + PgTypeInfo? FindMatch(Type? type, (Type? Type, PgTypeInfo? Info)[] infos, bool defaultTypeFallback) + { + PgTypeInfo? defaultInfo = null; + var negativeExactMatch = false; + for (var i = 0; i < infos.Length; i++) + { + ref var item = ref infos[i]; + if (item.Type == type) + { + if (item.Info is not null || !defaultTypeFallback) + return item.Info; + negativeExactMatch = true; + } + + if (defaultTypeFallback && item.Type is null) + defaultInfo = item.Info; + } + + // We can only return default info if we've seen a negative match (type: typeof(object), info: null) + // Otherwise we might return a previously requested default while the resolvers could produce the exact match. + return negativeExactMatch ? defaultInfo : null; + } + + PgTypeInfo? AddByType(Type type) + { + // We don't pass PgTypeId as we're interested in default converters here. + var info = CreateInfo(type, null, _options, defaultTypeFallback: false, _validatePgTypeIds); + + return info is null + ? null + : _cacheByClrType.TryAdd(type, info) // We never remove entries so either of these branches will always succeed. + ? info + : _cacheByClrType[type]; + } + + PgTypeInfo? AddEntryById(TPgTypeId pgTypeId, (Type? Type, PgTypeInfo? Info)[]? infos, bool defaultTypeFallback) + { + // We cache negatives (null info) to allow 'object or default' checks to never hit the resolvers after the first lookup. + var info = CreateInfo(type, pgTypeId, _options, defaultTypeFallback, _validatePgTypeIds); + + var isDefaultInfo = type is null && info is not null; + if (infos is null) + { + // Also add defaults by their info type to save a future resolver lookup + resize. + infos = isDefaultInfo + ? new [] { (type, info), (info!.Type, info) } + : new [] { (type, info) }; + + if (_cacheByPgTypeId.TryAdd(pgTypeId, infos)) + return info; + } + + // We have to update it instead. + while (true) + { + infos = _cacheByPgTypeId[pgTypeId]; + if (FindMatch(type, infos, defaultTypeFallback) is { } racedInfo) + return racedInfo; + + // Also add defaults by their info type to save a future resolver lookup + resize. + var oldInfos = infos; + var hasExactType = false; + if (isDefaultInfo) + { + foreach (var oldInfo in oldInfos) + if (oldInfo.Type == info!.Type) + hasExactType = true; + } + Array.Resize(ref infos, oldInfos.Length + (isDefaultInfo && !hasExactType ? 2 : 1)); + infos[oldInfos.Length] = (type, info); + if (isDefaultInfo && !hasExactType) + infos[oldInfos.Length + 1] = (info!.Type, info); + + if (_cacheByPgTypeId.TryUpdate(pgTypeId, infos, oldInfos)) + return info; + } + } + + static PgTypeInfo? CreateInfo(Type? type, TPgTypeId? typeId, PgSerializerOptions options, bool defaultTypeFallback, bool validatePgTypeIds) + { + var pgTypeId = AsPgTypeId(typeId); + // Validate that we only pass data types that are supported by the backend. + var dataTypeName = pgTypeId is { } id ? (DataTypeName?)options.DatabaseInfo.GetDataTypeName(id, validate: validatePgTypeIds) : null; + var info = options.TypeInfoResolver.GetTypeInfo(type, dataTypeName, options); + if (info is null && defaultTypeFallback) + { + type = null; + info = options.TypeInfoResolver.GetTypeInfo(type, dataTypeName, options); + } + + if (info is null) + return null; + + if (pgTypeId is not null && info.PgTypeId != pgTypeId) + throw new InvalidOperationException("A Postgres type was passed but the resolved PgTypeInfo does not have an equal PgTypeId."); + + if (type is not null && !info.IsBoxing && info.Type != type) + throw new InvalidOperationException($"A CLR type '{type}' was passed but the resolved PgTypeInfo does not have an equal Type: {info.Type}."); + + return info; + } + + static PgTypeId? AsPgTypeId(TPgTypeId? pgTypeId) + => pgTypeId switch + { + { } id when typeof(TPgTypeId) == typeof(DataTypeName) => new PgTypeId(Unsafe.As(ref id)), + { } id => new PgTypeId(Unsafe.As(ref id)), + null => null + }; + } +} diff --git a/src/Npgsql/Internal/TypeInfoMapping.cs b/src/Npgsql/Internal/TypeInfoMapping.cs new file mode 100644 index 0000000000..b2dfd58377 --- /dev/null +++ b/src/Npgsql/Internal/TypeInfoMapping.cs @@ -0,0 +1,668 @@ +using System; +using System.Collections; +using System.Collections.Generic; +using System.Diagnostics; +using System.Diagnostics.CodeAnalysis; +using System.Runtime.CompilerServices; +using System.Text; +using Npgsql.Internal.Converters; +using Npgsql.Internal.Postgres; +using Npgsql.PostgresTypes; +using NpgsqlTypes; + +namespace Npgsql.Internal; + +/// +/// +/// +/// +/// +/// +/// Signals whether a resolver based TypeInfo can keep its PgTypeId undecided or whether it should follow mapping.DataTypeName. +/// +public delegate PgTypeInfo TypeInfoFactory(PgSerializerOptions options, TypeInfoMapping mapping, bool resolvedDataTypeName); + +public enum MatchRequirement +{ + /// Match when the clr type and datatype name both match. + /// It's also the only requirement that participates in clr type fallback matching. + All, + /// Match when the datatype name or CLR type matches while the other also matches or is absent. + Single, + /// Match when the datatype name matches and the clr type also matches or is absent. + DataTypeName +} + +/// A factory for well-known PgConverters. +public static class PgConverterFactory +{ + public static PgConverter CreateArrayMultirangeConverter(PgConverter rangeConverter, PgSerializerOptions options) where T : notnull + => new MultirangeConverter(rangeConverter); + + public static PgConverter> CreateListMultirangeConverter(PgConverter rangeConverter, PgSerializerOptions options) where T : notnull + => new MultirangeConverter, T>(rangeConverter); + + public static PgConverter> CreateRangeConverter(PgConverter subTypeConverter, PgSerializerOptions options) + => new RangeConverter(subTypeConverter); + + public static PgConverter CreatePolymorphicArrayConverter(Func> arrayConverterFactory, Func> nullableArrayConverterFactory, PgSerializerOptions options) + => options.ArrayNullabilityMode switch + { + ArrayNullabilityMode.Never => arrayConverterFactory(), + ArrayNullabilityMode.Always => nullableArrayConverterFactory(), + ArrayNullabilityMode.PerInstance => new PolymorphicArrayConverter(arrayConverterFactory(), nullableArrayConverterFactory()), + _ => throw new ArgumentOutOfRangeException() + }; +} + +[DebuggerDisplay("{DebuggerDisplay,nq}")] +public readonly struct TypeInfoMapping +{ + public TypeInfoMapping(Type type, string dataTypeName, TypeInfoFactory factory) + { + Type = type; + // For objects it makes no sense to have clr type only matches by default, there are too many implementations. + MatchRequirement = type == typeof(object) ? MatchRequirement.DataTypeName : MatchRequirement.All; + DataTypeName = Postgres.DataTypeName.NormalizeName(dataTypeName); + Factory = factory; + } + + public TypeInfoFactory Factory { get; init; } + public Type Type { get; init; } + public string DataTypeName { get; init; } + + public MatchRequirement MatchRequirement { get; init; } + public Func? TypeMatchPredicate { get; init; } + + public bool TypeEquals(Type type) => TypeMatchPredicate?.Invoke(type) ?? Type == type; + public bool DataTypeNameEquals(string dataTypeName) + { + var span = DataTypeName.AsSpan(); + return Postgres.DataTypeName.IsFullyQualified(span) + ? span.Equals(dataTypeName.AsSpan(), StringComparison.Ordinal) + : span.Equals(Postgres.DataTypeName.ValidatedName(dataTypeName).UnqualifiedNameSpan, StringComparison.Ordinal); + } + + string DebuggerDisplay + { + get + { + var builder = new StringBuilder() + .Append(Type.Name) + .Append(" <-> ") + .Append(Postgres.DataTypeName.FromDisplayName(DataTypeName).DisplayName); + + if (MatchRequirement is not MatchRequirement.All) + builder.Append($" ({MatchRequirement.ToString().ToLowerInvariant()})"); + + return builder.ToString(); + } + } +} + +public sealed class TypeInfoMappingCollection +{ + readonly TypeInfoMappingCollection? _baseCollection; + readonly List _items; + + public TypeInfoMappingCollection(int capacity = 0) + => _items = new(capacity); + + public TypeInfoMappingCollection() : this(0) { } + + // Not used for resolving, only for composing (arrays that need to find the element mapping etc). + public TypeInfoMappingCollection(TypeInfoMappingCollection baseCollection) : this(0) + => _baseCollection = baseCollection; + + public TypeInfoMappingCollection(IEnumerable items) + => _items = new(items); + + public IReadOnlyList Items => _items; + + /// Returns the first default converter or the first converter that matches both type and dataTypeName. + /// If just a type was passed and no default was found we return the first converter with a type match. + public PgTypeInfo? Find(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + { + TypeInfoMapping? fallback = null; + foreach (var mapping in _items) + { + var looseTypeMatch = mapping.TypeMatchPredicate is { } pred ? pred(type) : type is null || mapping.Type == type; + var typeMatch = type is not null && looseTypeMatch; + var dataTypeMatch = dataTypeName is not null && mapping.DataTypeNameEquals(dataTypeName.Value.Value); + + switch (mapping.MatchRequirement) + { + case var _ when dataTypeMatch && typeMatch: + case not MatchRequirement.All when dataTypeMatch && looseTypeMatch: + case MatchRequirement.Single when dataTypeName is null && looseTypeMatch: + var resolvedMapping = mapping with + { + Type = type ?? mapping.Type, + // Make sure plugins (which match on unqualified names) and resolvers get the fully qualified name to canonicalize. + DataTypeName = dataTypeName is not null ? dataTypeName.GetValueOrDefault().Value : mapping.DataTypeName + }; + return resolvedMapping.Factory(options, resolvedMapping, dataTypeName is not null); + // DataTypeName is explicitly requiring dataTypeName so it won't be used for a fallback, Single would have matched above already. + case MatchRequirement.All when fallback is null && dataTypeName is null && typeMatch: + fallback = mapping.TypeMatchPredicate is not null ? mapping with { Type = type! } : mapping; + break; + default: + continue; + } + } + + return fallback?.Factory(options, fallback.Value, dataTypeName is not null); + } + + bool TryFindMapping(Type type, string dataTypeName, out TypeInfoMapping value) + { + foreach (var mapping in _baseCollection?._items ?? _items) + { + // During mapping we just use look for the declared type, regardless of TypeMatchPredicate. + if (mapping.Type == type && mapping.DataTypeNameEquals(dataTypeName)) + { + value = mapping; + return true; + } + } + + value = default; + return false; + } + + [MethodImpl(MethodImplOptions.NoInlining)] + TypeInfoMapping FindMapping(Type type, string dataTypeName) + => TryFindMapping(type, dataTypeName, out var info) ? info : throw new InvalidOperationException($"Could not find mapping for {type} <-> {dataTypeName}"); + + // Helper to eliminate generic display class duplication. + static TypeInfoFactory CreateComposedFactory(Type mappingType, TypeInfoMapping innerMapping, Func mapper, bool copyPreferredFormat = false, bool supportsWriting = true) + => (options, mapping, dataTypeNameMatch) => + { + var innerInfo = innerMapping.Factory(options, innerMapping, dataTypeNameMatch); + var converter = mapper(mapping, innerInfo); + var preferredFormat = copyPreferredFormat ? innerInfo.PreferredFormat : null; + var writingSupported = supportsWriting && innerInfo.SupportsWriting && mapping.Type != typeof(object); + var unboxedType = ComputeUnboxedType(defaultType: mappingType, converter.TypeToConvert, mapping.Type); + + return new PgTypeInfo(options, converter, TypeInfoMappingHelpers.ResolveFullyQualifiedName(options, mapping.DataTypeName), unboxedType) + { + PreferredFormat = preferredFormat, + SupportsWriting = writingSupported + }; + }; + + // Helper to eliminate generic display class duplication. + static TypeInfoFactory CreateComposedFactory(Type mappingType, TypeInfoMapping innerMapping, Func mapper, bool copyPreferredFormat = false, bool supportsWriting = true) + => (options, mapping, dataTypeNameMatch) => + { + var innerInfo = (PgResolverTypeInfo)innerMapping.Factory(options, innerMapping, dataTypeNameMatch); + var resolver = mapper(mapping, innerInfo); + var preferredFormat = copyPreferredFormat ? innerInfo.PreferredFormat : null; + var writingSupported = supportsWriting && innerInfo.SupportsWriting && mapping.Type != typeof(object); + var unboxedType = ComputeUnboxedType(defaultType: mappingType, resolver.TypeToConvert, mapping.Type); + // We include the data type name if the inner info did so as well. + // This way we can rely on its logic around resolvedDataTypeName, including when it ignores that flag. + PgTypeId? pgTypeId = innerInfo.PgTypeId is not null + ? TypeInfoMappingHelpers.ResolveFullyQualifiedName(options, mapping.DataTypeName) + : null; + return new PgResolverTypeInfo(options, resolver, pgTypeId, unboxedType) + { + PreferredFormat = preferredFormat, + SupportsWriting = writingSupported + }; + }; + + static Type? ComputeUnboxedType(Type defaultType, Type converterType, Type matchedType) + { + // The minimal hierarchy that should hold for things to work is object < converterType < matchedType. + // Though these types could often be seen in a hierarchy: object < converterType < defaultType < matchedType. + // Some caveats with the latter being for instance Array being the matchedType while the defaultType is int[]. + Debug.Assert(converterType.IsAssignableFrom(matchedType) || matchedType == typeof(object)); + Debug.Assert(converterType.IsAssignableFrom(defaultType)); + + // A special case for object matches, where we return a more specific type than was matched. + // This is to report e.g. Array converters as Array when their matched type was object. + if (matchedType == typeof(object)) + return converterType; + + // This is to report e.g. Array converters as int[,,,] when their matched type was such. + if (matchedType != defaultType) + return matchedType; + + // If defaultType does not equal converterType we take defaultType as it's more specific. + // This is to report e.g. Array converters as int[] when their matched type was their default type. + if (defaultType != converterType) + return defaultType; + + // Keep the converter type. + return null; + } + + public void Add(TypeInfoMapping mapping) => _items.Add(mapping); + + public void AddRange(TypeInfoMappingCollection collection) => _items.AddRange(collection._items); + + Func GetDefaultConfigure(bool isDefault) + => GetDefaultConfigure(isDefault ? MatchRequirement.Single : MatchRequirement.All); + Func GetDefaultConfigure(MatchRequirement matchRequirement) + => matchRequirement switch + { + MatchRequirement.All => static mapping => mapping with { MatchRequirement = MatchRequirement.All }, + MatchRequirement.DataTypeName => static mapping => mapping with { MatchRequirement = MatchRequirement.DataTypeName }, + MatchRequirement.Single => static mapping => mapping with { MatchRequirement = MatchRequirement.Single }, + _ => throw new ArgumentOutOfRangeException(nameof(matchRequirement), matchRequirement, null) + }; + + Func GetArrayTypeMatchPredicate(Func elementTypeMatchPredicate) + => type => type is null || (type.IsArray && elementTypeMatchPredicate.Invoke(type.GetElementType()!)); + Func GetListTypeMatchPredicate(Func elementTypeMatchPredicate) + => type => type is null || (type.IsConstructedGenericType && type.GetGenericTypeDefinition() == typeof(List<>) + && elementTypeMatchPredicate(type.GetGenericArguments()[0])); + + public void AddType(string dataTypeName, TypeInfoFactory createInfo, bool isDefault = false) where T : class + => AddType(dataTypeName, createInfo, GetDefaultConfigure(isDefault)); + + public void AddType(string dataTypeName, TypeInfoFactory createInfo, MatchRequirement matchRequirement) where T : class + => AddType(dataTypeName, createInfo, GetDefaultConfigure(matchRequirement)); + + public void AddType(string dataTypeName, TypeInfoFactory createInfo, Func? configure) where T : class + { + var mapping = new TypeInfoMapping(typeof(T), dataTypeName, createInfo); + _items.Add(configure?.Invoke(mapping) ?? mapping); + } + + // Aliased to AddType at this time. + public void AddResolverType(string dataTypeName, TypeInfoFactory createInfo, bool isDefault = false) where T : class + => AddType(dataTypeName, createInfo, GetDefaultConfigure(isDefault)); + + // Aliased to AddType at this time. + public void AddResolverType(string dataTypeName, TypeInfoFactory createInfo, MatchRequirement matchRequirement) where T : class + => AddType(dataTypeName, createInfo, GetDefaultConfigure(matchRequirement)); + + // Aliased to AddType at this time. + public void AddResolverType(string dataTypeName, TypeInfoFactory createInfo, Func? configure) where T : class + => AddType(dataTypeName, createInfo, configure); + + public void AddArrayType(string elementDataTypeName) where TElement : class + => AddArrayType(FindMapping(typeof(TElement), elementDataTypeName)); + + public void AddArrayType(TypeInfoMapping elementMapping) where TElement : class + { + // Always use a predicate to match all dimensions. + var arrayTypeMatchPredicate = GetArrayTypeMatchPredicate(elementMapping.TypeMatchPredicate ?? (static type => type == typeof(TElement))); + var listTypeMatchPredicate = elementMapping.TypeMatchPredicate is not null ? GetListTypeMatchPredicate(elementMapping.TypeMatchPredicate) : null; + + AddArrayType(elementMapping, typeof(TElement[]), CreateArrayBasedConverter, arrayTypeMatchPredicate, suppressObjectMapping: TryFindMapping(typeof(object), elementMapping.DataTypeName, out _)); + AddArrayType(elementMapping, typeof(List), CreateListBasedConverter, listTypeMatchPredicate, suppressObjectMapping: true); + + void AddArrayType(TypeInfoMapping elementMapping, Type type, Func converter, Func? typeMatchPredicate = null, bool suppressObjectMapping = false) + { + var arrayDataTypeName = GetArrayDataTypeName(elementMapping.DataTypeName); + var arrayMapping = new TypeInfoMapping(type, arrayDataTypeName, CreateComposedFactory(type, elementMapping, converter)) + { + MatchRequirement = elementMapping.MatchRequirement, + TypeMatchPredicate = typeMatchPredicate + }; + _items.Add(arrayMapping); + suppressObjectMapping = suppressObjectMapping || arrayMapping.TypeEquals(typeof(object)); + if (!suppressObjectMapping && arrayMapping.MatchRequirement is MatchRequirement.DataTypeName or MatchRequirement.Single) + _items.Add(new TypeInfoMapping(typeof(object), arrayDataTypeName, (options, mapping, dataTypeNameMatch) => + { + if (!dataTypeNameMatch) + throw new InvalidOperationException("Should not happen, please file a bug."); + + return arrayMapping.Factory(options, mapping, dataTypeNameMatch); + })); + } + } + + public void AddResolverArrayType(string elementDataTypeName) where TElement : class + => AddResolverArrayType(FindMapping(typeof(TElement), elementDataTypeName)); + + public void AddResolverArrayType(TypeInfoMapping elementMapping) where TElement : class + { + // Always use a predicate to match all dimensions. + var arrayTypeMatchPredicate = GetArrayTypeMatchPredicate(elementMapping.TypeMatchPredicate ?? (static type => type == typeof(TElement))); + var listTypeMatchPredicate = elementMapping.TypeMatchPredicate is not null ? GetListTypeMatchPredicate(elementMapping.TypeMatchPredicate) : null; + + AddResolverArrayType(elementMapping, typeof(TElement[]), CreateArrayBasedConverterResolver, arrayTypeMatchPredicate, suppressObjectMapping: TryFindMapping(typeof(object), elementMapping.DataTypeName, out _)); + AddResolverArrayType(elementMapping, typeof(List), CreateListBasedConverterResolver, listTypeMatchPredicate, suppressObjectMapping: true); + + void AddResolverArrayType(TypeInfoMapping elementMapping, Type type, Func converter, Func? typeMatchPredicate = null, bool suppressObjectMapping = false) + { + var arrayDataTypeName = GetArrayDataTypeName(elementMapping.DataTypeName); + var arrayMapping = new TypeInfoMapping(type, arrayDataTypeName, CreateComposedFactory(type, elementMapping, converter)) + { + MatchRequirement = elementMapping.MatchRequirement, + TypeMatchPredicate = typeMatchPredicate + }; + _items.Add(arrayMapping); + suppressObjectMapping = suppressObjectMapping || arrayMapping.TypeEquals(typeof(object)); + if (!suppressObjectMapping && arrayMapping.MatchRequirement is MatchRequirement.DataTypeName or MatchRequirement.Single) + _items.Add(new TypeInfoMapping(typeof(object), arrayDataTypeName, (options, mapping, dataTypeNameMatch) => + { + if (!dataTypeNameMatch) + throw new InvalidOperationException("Should not happen, please file a bug."); + + return arrayMapping.Factory(options, mapping, dataTypeNameMatch); + })); + } + } + + public void AddStructType(string dataTypeName, TypeInfoFactory createInfo, bool isDefault = false) where T : struct + => AddStructType(typeof(T), typeof(T?), dataTypeName, createInfo, + static (_, innerInfo) => new NullableConverter(innerInfo.GetConcreteResolution().GetConverter()), GetDefaultConfigure(isDefault)); + + public void AddStructType(string dataTypeName, TypeInfoFactory createInfo, MatchRequirement matchRequirement) where T : struct + => AddStructType(typeof(T), typeof(T?), dataTypeName, createInfo, + static (_, innerInfo) => new NullableConverter(innerInfo.GetConcreteResolution().GetConverter()), GetDefaultConfigure(matchRequirement)); + + public void AddStructType(string dataTypeName, TypeInfoFactory createInfo, Func? configure) where T : struct + => AddStructType(typeof(T), typeof(T?), dataTypeName, createInfo, + static (_, innerInfo) => new NullableConverter(innerInfo.GetConcreteResolution().GetConverter()), configure); + + // Lives outside to prevent capture of T. + void AddStructType(Type type, Type nullableType, string dataTypeName, TypeInfoFactory createInfo, + Func nullableConverter, Func? configure) + { + var mapping = new TypeInfoMapping(type, dataTypeName, createInfo); + mapping = configure?.Invoke(mapping) ?? mapping; + _items.Add(mapping); + _items.Add(new TypeInfoMapping(nullableType, dataTypeName, + CreateComposedFactory(nullableType, mapping, nullableConverter, copyPreferredFormat: true)) + { + MatchRequirement = mapping.MatchRequirement, + TypeMatchPredicate = mapping.TypeMatchPredicate is not null + ? type => type is null + ? mapping.TypeMatchPredicate(null) + : Nullable.GetUnderlyingType(type) is { } underlying && mapping.TypeMatchPredicate(underlying) + : null + }); + } + + public void AddStructArrayType(string elementDataTypeName) where TElement : struct + => AddStructArrayType(FindMapping(typeof(TElement), elementDataTypeName), FindMapping(typeof(TElement?), elementDataTypeName), null); + + public void AddStructArrayType(string elementDataTypeName, Func configure) where TElement : struct + => AddStructArrayType(FindMapping(typeof(TElement), elementDataTypeName), FindMapping(typeof(TElement?), elementDataTypeName), configure); + + public void AddStructArrayType(TypeInfoMapping elementMapping, TypeInfoMapping nullableElementMapping, + Func? configure) where TElement : struct + { + // Always use a predicate to match all dimensions. + var arrayTypeMatchPredicate = GetArrayTypeMatchPredicate(elementMapping.TypeMatchPredicate ?? (static type => type is null || type == typeof(TElement))); + var nullableArrayTypeMatchPredicate = GetArrayTypeMatchPredicate(nullableElementMapping.TypeMatchPredicate ?? (static type => + type is null || (Nullable.GetUnderlyingType(type) is { } underlying && underlying == typeof(TElement)))); + var listTypeMatchPredicate = elementMapping.TypeMatchPredicate is not null ? GetListTypeMatchPredicate(elementMapping.TypeMatchPredicate) : null; + var nullableListTypeMatchPredicate = nullableElementMapping.TypeMatchPredicate is not null ? GetListTypeMatchPredicate(nullableElementMapping.TypeMatchPredicate) : null; + + AddStructArrayType(elementMapping, nullableElementMapping, typeof(TElement[]), typeof(TElement?[]), + CreateArrayBasedConverter, CreateArrayBasedConverter, + arrayTypeMatchPredicate, nullableArrayTypeMatchPredicate, + configure, suppressObjectMapping: TryFindMapping(typeof(object), elementMapping.DataTypeName, out _)); + + // Don't add the object converter for the list based converter. + AddStructArrayType(elementMapping, nullableElementMapping, typeof(List), typeof(List), + CreateListBasedConverter, CreateListBasedConverter, + listTypeMatchPredicate, nullableListTypeMatchPredicate, + configure, suppressObjectMapping: true); + } + + // Lives outside to prevent capture of TElement. + void AddStructArrayType(TypeInfoMapping elementMapping, TypeInfoMapping nullableElementMapping, Type type, Type nullableType, + Func converter, Func nullableConverter, + Func? typeMatchPredicate, Func? nullableTypeMatchPredicate, Func? configure, bool suppressObjectMapping) + { + var arrayDataTypeName = GetArrayDataTypeName(elementMapping.DataTypeName); + + var arrayMapping = new TypeInfoMapping(type, arrayDataTypeName, CreateComposedFactory(type, elementMapping, converter)) + { + MatchRequirement = elementMapping.MatchRequirement, + TypeMatchPredicate = typeMatchPredicate + }; + arrayMapping = configure?.Invoke(arrayMapping) ?? arrayMapping; + var nullableArrayMapping = new TypeInfoMapping(nullableType, arrayDataTypeName, CreateComposedFactory(nullableType, nullableElementMapping, nullableConverter)) + { + MatchRequirement = arrayMapping.MatchRequirement, + TypeMatchPredicate = nullableTypeMatchPredicate + }; + + _items.Add(arrayMapping); + _items.Add(nullableArrayMapping); + suppressObjectMapping = suppressObjectMapping || arrayMapping.TypeEquals(typeof(object)); + if (!suppressObjectMapping && arrayMapping.MatchRequirement is MatchRequirement.DataTypeName or MatchRequirement.Single) + _items.Add(new TypeInfoMapping(typeof(object), arrayDataTypeName, (options, mapping, dataTypeNameMatch) => + { + return options.ArrayNullabilityMode switch + { + _ when !dataTypeNameMatch => throw new InvalidOperationException("Should not happen, please file a bug."), + ArrayNullabilityMode.Never => arrayMapping.Factory(options, mapping, dataTypeNameMatch), + ArrayNullabilityMode.Always => nullableArrayMapping.Factory(options, mapping, dataTypeNameMatch), + ArrayNullabilityMode.PerInstance => CreateComposedPerInstance( + arrayMapping.Factory(options, mapping, dataTypeNameMatch), + nullableArrayMapping.Factory(options, mapping, dataTypeNameMatch), + mapping.DataTypeName + ), + _ => throw new ArgumentOutOfRangeException() + }; + }) { MatchRequirement = MatchRequirement.DataTypeName }); + + PgTypeInfo CreateComposedPerInstance(PgTypeInfo innerTypeInfo, PgTypeInfo nullableInnerTypeInfo, string dataTypeName) + { + var converter = + new PolymorphicArrayConverter( + innerTypeInfo.GetConcreteResolution().GetConverter(), + nullableInnerTypeInfo.GetConcreteResolution().GetConverter()); + + return new PgTypeInfo(innerTypeInfo.Options, converter, + innerTypeInfo.Options.GetCanonicalTypeId(new DataTypeName(dataTypeName)), unboxedType: typeof(Array)) { SupportsWriting = false }; + } + } + + public void AddResolverStructType(string dataTypeName, TypeInfoFactory createInfo, bool isDefault = false) where T : struct + => AddResolverStructType(typeof(T), typeof(T?), dataTypeName, createInfo, + static (_, innerInfo) => new NullableConverterResolver(innerInfo), GetDefaultConfigure(isDefault)); + + public void AddResolverStructType(string dataTypeName, TypeInfoFactory createInfo, MatchRequirement matchRequirement) where T : struct + => AddResolverStructType(typeof(T), typeof(T?), dataTypeName, createInfo, + static (_, innerInfo) => new NullableConverterResolver(innerInfo), GetDefaultConfigure(matchRequirement)); + + public void AddResolverStructType(string dataTypeName, TypeInfoFactory createInfo, Func? configure) where T : struct + => AddResolverStructType(typeof(T), typeof(T?), dataTypeName, createInfo, + static (_, innerInfo) => new NullableConverterResolver(innerInfo), configure); + + // Lives outside to prevent capture of T. + void AddResolverStructType(Type type, Type nullableType, string dataTypeName, TypeInfoFactory createInfo, + Func nullableConverter, Func? configure) + { + var mapping = new TypeInfoMapping(type, dataTypeName, createInfo); + mapping = configure?.Invoke(mapping) ?? mapping; + _items.Add(mapping); + _items.Add(new TypeInfoMapping(nullableType, dataTypeName, + CreateComposedFactory(nullableType, mapping, nullableConverter, copyPreferredFormat: true)) + { + MatchRequirement = mapping.MatchRequirement, + TypeMatchPredicate = mapping.TypeMatchPredicate is not null + ? type => type is null || (Nullable.GetUnderlyingType(type) is { } underlying && mapping.TypeMatchPredicate(underlying)) + : null + }); + } + + public void AddResolverStructArrayType(string elementDataTypeName) where TElement : struct + => AddResolverStructArrayType(FindMapping(typeof(TElement), elementDataTypeName), FindMapping(typeof(TElement?), elementDataTypeName)); + + public void AddResolverStructArrayType(TypeInfoMapping elementMapping, TypeInfoMapping nullableElementMapping) where TElement : struct + { + // Always use a predicate to match all dimensions. + var arrayTypeMatchPredicate = GetArrayTypeMatchPredicate(elementMapping.TypeMatchPredicate ?? (static type => type is null || type == typeof(TElement))); + var nullableArrayTypeMatchPredicate = GetArrayTypeMatchPredicate(nullableElementMapping.TypeMatchPredicate ?? (static type => + type is null || (Nullable.GetUnderlyingType(type) is { } underlying && underlying == typeof(TElement)))); + var listTypeMatchPredicate = elementMapping.TypeMatchPredicate is not null ? GetListTypeMatchPredicate(elementMapping.TypeMatchPredicate) : null; + + AddResolverStructArrayType(elementMapping, nullableElementMapping, typeof(TElement[]), typeof(TElement?[]), + CreateArrayBasedConverterResolver, + CreateArrayBasedConverterResolver, suppressObjectMapping: TryFindMapping(typeof(object), elementMapping.DataTypeName, out _), arrayTypeMatchPredicate, nullableArrayTypeMatchPredicate); + + // Don't add the object converter for the list based converter. + AddResolverStructArrayType(elementMapping, nullableElementMapping, typeof(List), typeof(List), + CreateListBasedConverterResolver, + CreateListBasedConverterResolver, suppressObjectMapping: true, listTypeMatchPredicate, nullableArrayTypeMatchPredicate); + } + + // Lives outside to prevent capture of TElement. + void AddResolverStructArrayType(TypeInfoMapping elementMapping, TypeInfoMapping nullableElementMapping, Type type, Type nullableType, + Func converter, Func nullableConverter, + bool suppressObjectMapping, Func? typeMatchPredicate, Func? nullableTypeMatchPredicate) + { + var arrayDataTypeName = GetArrayDataTypeName(elementMapping.DataTypeName); + + var arrayMapping = new TypeInfoMapping(type, arrayDataTypeName, CreateComposedFactory(type, elementMapping, converter)) + { + MatchRequirement = elementMapping.MatchRequirement, + TypeMatchPredicate = typeMatchPredicate + }; + var nullableArrayMapping = new TypeInfoMapping(nullableType, arrayDataTypeName, CreateComposedFactory(nullableType, nullableElementMapping, nullableConverter)) + { + MatchRequirement = elementMapping.MatchRequirement, + TypeMatchPredicate = nullableTypeMatchPredicate + }; + + _items.Add(arrayMapping); + _items.Add(nullableArrayMapping); + suppressObjectMapping = suppressObjectMapping || arrayMapping.TypeEquals(typeof(object)); + if (!suppressObjectMapping && arrayMapping.MatchRequirement is MatchRequirement.DataTypeName or MatchRequirement.Single) + _items.Add(new TypeInfoMapping(typeof(object), arrayDataTypeName, (options, mapping, dataTypeNameMatch) => options.ArrayNullabilityMode switch + { + _ when !dataTypeNameMatch => throw new InvalidOperationException("Should not happen, please file a bug."), + ArrayNullabilityMode.Never => arrayMapping.Factory(options, mapping, dataTypeNameMatch), + ArrayNullabilityMode.Always => nullableArrayMapping.Factory(options, mapping, dataTypeNameMatch), + ArrayNullabilityMode.PerInstance => CreateComposedPerInstance( + arrayMapping.Factory(options, mapping, dataTypeNameMatch), + nullableArrayMapping.Factory(options, mapping, dataTypeNameMatch), + mapping.DataTypeName + ), + _ => throw new ArgumentOutOfRangeException() + }) { MatchRequirement = MatchRequirement.DataTypeName }); + + PgTypeInfo CreateComposedPerInstance(PgTypeInfo innerTypeInfo, PgTypeInfo nullableInnerTypeInfo, string dataTypeName) + { + var resolver = + new PolymorphicArrayConverterResolver((PgResolverTypeInfo)innerTypeInfo, + (PgResolverTypeInfo)nullableInnerTypeInfo); + + return new PgResolverTypeInfo(innerTypeInfo.Options, resolver, + innerTypeInfo.Options.GetCanonicalTypeId(new DataTypeName(dataTypeName))) { SupportsWriting = false }; + } + } + + public void AddPolymorphicResolverArrayType(string elementDataTypeName, Func> elementToArrayConverterFactory) + => AddPolymorphicResolverArrayType(FindMapping(typeof(object), elementDataTypeName), elementToArrayConverterFactory); + + public void AddPolymorphicResolverArrayType(TypeInfoMapping elementMapping, Func> elementToArrayConverterFactory) + { + AddPolymorphicResolverArrayType(elementMapping, typeof(object), + (mapping, elemInfo) => new ArrayPolymorphicConverterResolver( + elemInfo.Options.GetCanonicalTypeId(new DataTypeName(mapping.DataTypeName)), elemInfo, elementToArrayConverterFactory(elemInfo.Options)) + , null); + + void AddPolymorphicResolverArrayType(TypeInfoMapping elementMapping, Type type, Func converter, Func? typeMatchPredicate) + { + var arrayDataTypeName = GetArrayDataTypeName(elementMapping.DataTypeName); + var mapping = new TypeInfoMapping(type, arrayDataTypeName, + CreateComposedFactory(typeof(Array), elementMapping, converter, supportsWriting: false)) + { + MatchRequirement = elementMapping.MatchRequirement, + TypeMatchPredicate = typeMatchPredicate + }; + _items.Add(mapping); + } + } + + /// Returns whether type matches any of the types we register pg arrays as. + public static bool IsArrayLikeType(Type type, [NotNullWhen(true)] out Type? elementType) + { + elementType = type switch + { + { IsArray: true } => type.GetElementType(), + { IsConstructedGenericType: true } when type.GetGenericTypeDefinition() == typeof(List<>) => type.GetGenericArguments()[0], + _ => null + }; + + return elementType is not null; + } + + static string GetArrayDataTypeName(string dataTypeName) + => DataTypeName.IsFullyQualified(dataTypeName.AsSpan()) + ? DataTypeName.ValidatedName(dataTypeName).ToArrayName().Value + : "_" + DataTypeName.FromDisplayName(dataTypeName).UnqualifiedName; + + static ArrayBasedArrayConverter CreateArrayBasedConverter(TypeInfoMapping mapping, PgTypeInfo elemInfo) + { + if (!elemInfo.IsBoxing) + return new ArrayBasedArrayConverter(elemInfo.GetConcreteResolution(), mapping.Type); + + ThrowBoxingNotSupported(resolver: false); + return default; + } + + static ListBasedArrayConverter CreateListBasedConverter(TypeInfoMapping mapping, PgTypeInfo elemInfo) + { + if (!elemInfo.IsBoxing) + return new ListBasedArrayConverter(elemInfo.GetConcreteResolution()); + + ThrowBoxingNotSupported(resolver: false); + return default; + } + + static ArrayConverterResolver CreateArrayBasedConverterResolver(TypeInfoMapping mapping, PgResolverTypeInfo elemInfo) + { + if (!elemInfo.IsBoxing) + return new ArrayConverterResolver(elemInfo, mapping.Type); + + ThrowBoxingNotSupported(resolver: true); + return default; + } + + static ArrayConverterResolver CreateListBasedConverterResolver(TypeInfoMapping mapping, PgResolverTypeInfo elemInfo) + { + if (!elemInfo.IsBoxing) + return new ArrayConverterResolver(elemInfo, mapping.Type); + + ThrowBoxingNotSupported(resolver: true); + return default; + } + + [DoesNotReturn] + static void ThrowBoxingNotSupported(bool resolver) + => throw new InvalidOperationException($"Boxing converters are not supported, manually construct a mapping over a casting converter{(resolver ? " resolver" : "")} instead."); +} + +public static class TypeInfoMappingHelpers +{ + internal static PgTypeId ResolveFullyQualifiedName(PgSerializerOptions options, string dataTypeName) + => !DataTypeName.IsFullyQualified(dataTypeName.AsSpan()) + ? options.ToCanonicalTypeId(options.DatabaseInfo.GetPostgresType(dataTypeName)) + : new(new DataTypeName(dataTypeName)); + + internal static PostgresType GetPgType(this TypeInfoMapping mapping, PgSerializerOptions options) + => !DataTypeName.IsFullyQualified(mapping.DataTypeName.AsSpan()) + ? options.DatabaseInfo.GetPostgresType(mapping.DataTypeName) + : options.DatabaseInfo.GetPostgresType(new DataTypeName(mapping.DataTypeName)); + + public static PgTypeInfo CreateInfo(this TypeInfoMapping mapping, PgSerializerOptions options, PgConverter converter, DataFormat? preferredFormat = null, bool supportsWriting = true) + => new(options, converter, ResolveFullyQualifiedName(options, mapping.DataTypeName)) + { + PreferredFormat = preferredFormat, + SupportsWriting = supportsWriting + }; + + public static PgResolverTypeInfo CreateInfo(this TypeInfoMapping mapping, PgSerializerOptions options, PgConverterResolver resolver, bool includeDataTypeName = true, DataFormat? preferredFormat = null, bool supportsWriting = true) + { + PgTypeId? pgTypeId = includeDataTypeName ? ResolveFullyQualifiedName(options, mapping.DataTypeName) : null; + return new(options, resolver, pgTypeId) + { + PreferredFormat = preferredFormat, + SupportsWriting = supportsWriting + }; + } +} diff --git a/src/Npgsql/Internal/TypeInfoResolverChain.cs b/src/Npgsql/Internal/TypeInfoResolverChain.cs new file mode 100644 index 0000000000..64e1f86e0d --- /dev/null +++ b/src/Npgsql/Internal/TypeInfoResolverChain.cs @@ -0,0 +1,25 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using Npgsql.Internal.Postgres; + +namespace Npgsql.Internal; + +sealed class TypeInfoResolverChain : IPgTypeInfoResolver +{ + readonly IPgTypeInfoResolver[] _resolvers; + + public TypeInfoResolverChain(IEnumerable resolvers) + => _resolvers = resolvers.ToArray(); + + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + { + foreach (var resolver in _resolvers) + { + if (resolver.GetTypeInfo(type, dataTypeName, options) is { } info) + return info; + } + + return null; + } +} diff --git a/src/Npgsql/Internal/TypeMapping/IUserTypeMapping.cs b/src/Npgsql/Internal/TypeMapping/IUserTypeMapping.cs deleted file mode 100644 index aedc14e743..0000000000 --- a/src/Npgsql/Internal/TypeMapping/IUserTypeMapping.cs +++ /dev/null @@ -1,13 +0,0 @@ -using System; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; - -namespace Npgsql.Internal.TypeMapping; - -public interface IUserTypeMapping -{ - public string PgTypeName { get; } - public Type ClrType { get; } - - public NpgsqlTypeHandler CreateHandler(PostgresType pgType, NpgsqlConnector connector); -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeMapping/TypeMapper.cs b/src/Npgsql/Internal/TypeMapping/TypeMapper.cs deleted file mode 100644 index eb6bb75f48..0000000000 --- a/src/Npgsql/Internal/TypeMapping/TypeMapper.cs +++ /dev/null @@ -1,539 +0,0 @@ -using System; -using System.Collections; -using System.Collections.Concurrent; -using System.Collections.Generic; -using System.Diagnostics.CodeAnalysis; -using System.Linq; -using System.Reflection; -using Microsoft.Extensions.Logging; -using Npgsql.Internal.TypeHandlers; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using Npgsql.TypeMapping; -using NpgsqlTypes; - -namespace Npgsql.Internal.TypeMapping; - -/// -/// Type mapper used to map types to type handlers. -/// -public sealed class TypeMapper -{ - internal NpgsqlConnector Connector { get; } - readonly object _writeLock = new(); - - NpgsqlDatabaseInfo? _databaseInfo; - - internal NpgsqlDatabaseInfo DatabaseInfo - { - get - { - var databaseInfo = _databaseInfo; - if (databaseInfo is null) - ThrowHelper.ThrowInvalidOperationException("Internal error: this type mapper hasn't yet been bound to a database info object"); - return databaseInfo; - } - } - - volatile TypeHandlerResolver[] _handlerResolvers; - volatile TypeMappingResolver[] _mappingResolvers; - internal NpgsqlTypeHandler UnrecognizedTypeHandler { get; } - - readonly ConcurrentDictionary _handlersByOID = new(); - readonly ConcurrentDictionary _handlersByNpgsqlDbType = new(); - readonly ConcurrentDictionary _handlersByClrType = new(); - readonly ConcurrentDictionary _handlersByDataTypeName = new(); - - readonly Dictionary _userTypeMappings = new(); - readonly INpgsqlNameTranslator _defaultNameTranslator; - - readonly ILogger _commandLogger; - - #region Construction - - internal TypeMapper(NpgsqlConnector connector, INpgsqlNameTranslator defaultNameTranslator) - { - Connector = connector; - _defaultNameTranslator = defaultNameTranslator; - UnrecognizedTypeHandler = new UnknownTypeHandler(Connector.TextEncoding); - _handlerResolvers = Array.Empty(); - _mappingResolvers = Array.Empty(); - _commandLogger = connector.LoggingConfiguration.CommandLogger; - } - - #endregion Constructors - - internal void Initialize( - NpgsqlDatabaseInfo databaseInfo, - List resolverFactories, - Dictionary userTypeMappings) - { - _databaseInfo = databaseInfo; - - var handlerResolvers = new TypeHandlerResolver[resolverFactories.Count]; - var mappingResolvers = new List(resolverFactories.Count); - for (var i = 0; i < resolverFactories.Count; i++) - { - handlerResolvers[i] = resolverFactories[i].Create(this, Connector); - var mappingResolver = resolverFactories[i].CreateMappingResolver(); - if (mappingResolver is not null) - mappingResolvers.Add(mappingResolver); - } - - // Add global mapper resolvers in backwards because they're inserted in the beginning - for (var i = resolverFactories.Count - 1; i >= 0; i--) - { - var globalMappingResolver = resolverFactories[i].CreateGlobalMappingResolver(); - if (globalMappingResolver is not null) - GlobalTypeMapper.Instance.TryAddMappingResolver(globalMappingResolver); - } - - _handlerResolvers = handlerResolvers; - _mappingResolvers = mappingResolvers.ToArray(); - - foreach (var userTypeMapping in userTypeMappings.Values) - { - if (DatabaseInfo.TryGetPostgresTypeByName(userTypeMapping.PgTypeName, out var pgType)) - { - _handlersByOID[pgType.OID] = - _handlersByDataTypeName[pgType.FullName] = - _handlersByDataTypeName[pgType.Name] = - _handlersByClrType[userTypeMapping.ClrType] = userTypeMapping.CreateHandler(pgType, Connector); - - _userTypeMappings[pgType.OID] = new(npgsqlDbType: null, pgType.Name, userTypeMapping.ClrType); - } - } - } - - #region Type handler lookup - - /// - /// Looks up a type handler by its PostgreSQL type's OID. - /// - /// A PostgreSQL type OID - /// A type handler that can be used to encode and decode values. - public NpgsqlTypeHandler ResolveByOID(uint oid) - => TryResolveByOID(oid, out var result) ? result : UnrecognizedTypeHandler; - - internal bool TryResolveByOID(uint oid, [NotNullWhen(true)] out NpgsqlTypeHandler? handler) - { - if (_handlersByOID.TryGetValue(oid, out handler)) - return true; - - return TryResolveLong(oid, out handler); - - bool TryResolveLong(uint oid, [NotNullWhen(true)] out NpgsqlTypeHandler? handler) - { - if (!DatabaseInfo.ByOID.TryGetValue(oid, out var pgType)) - { - handler = null; - return false; - } - - lock (_writeLock) - { - if ((handler = ResolveByPostgresType(pgType)) is not null) - { - _handlersByOID[oid] = handler; - return true; - } - - if ((handler = ResolveComplexTypeByDataTypeName(pgType.FullName, throwOnError: false)) is not null) - { - _handlersByOID[oid] = handler; - return true; - } - - handler = null; - return false; - } - } - } - - /// - /// Looks up a type handler by NpgsqlDbType. - /// - /// Parameter's NpgsqlDbType - /// A type handler that can be used to encode and decode values. - public NpgsqlTypeHandler ResolveByNpgsqlDbType(NpgsqlDbType npgsqlDbType) - { - if (_handlersByNpgsqlDbType.TryGetValue(npgsqlDbType, out var handler)) - return handler; - - return ResolveLong(npgsqlDbType); - - NpgsqlTypeHandler ResolveLong(NpgsqlDbType npgsqlDbType) - { - lock (_writeLock) - { - // First, try to resolve as a base type; translate the NpgsqlDbType to a PG data type name and look that up. - if (GlobalTypeMapper.NpgsqlDbTypeToDataTypeName(npgsqlDbType) is { } dataTypeName) - { - foreach (var resolver in _handlerResolvers) - { - try - { - if (resolver.ResolveByDataTypeName(dataTypeName) is { } handler) - return _handlersByNpgsqlDbType[npgsqlDbType] = handler; - } - catch (Exception e) - { - _commandLogger.LogError(e, - $"Type resolver {resolver.GetType().Name} threw exception while resolving NpgsqlDbType {npgsqlDbType}"); - } - } - } - - // Can't find (or translate) PG data type name by NpgsqlDbType. - // This might happen because of flags (like Array, Range or Multirange). - foreach (var resolver in _handlerResolvers) - { - try - { - if (resolver.ResolveByNpgsqlDbType(npgsqlDbType) is { } handler) - return _handlersByNpgsqlDbType[npgsqlDbType] = handler; - } - catch (Exception e) - { - _commandLogger.LogError(e, - $"Type resolver {resolver.GetType().Name} threw exception while resolving NpgsqlDbType {npgsqlDbType}"); - } - } - - if (npgsqlDbType.HasFlag(NpgsqlDbType.Array)) - { - var elementHandler = ResolveByNpgsqlDbType(npgsqlDbType & ~NpgsqlDbType.Array); - - if (elementHandler.PostgresType.Array is not { } pgArrayType) - throw new ArgumentException( - $"No array type could be found in the database for element {elementHandler.PostgresType}"); - - return _handlersByNpgsqlDbType[npgsqlDbType] = - elementHandler.CreateArrayHandler(pgArrayType, Connector.Settings.ArrayNullabilityMode); - } - - throw new NpgsqlException($"The NpgsqlDbType '{npgsqlDbType}' isn't present in your database. " + - "You may need to install an extension or upgrade to a newer version."); - } - } - } - - internal NpgsqlTypeHandler ResolveByDataTypeName(string typeName) - => ResolveByDataTypeNameCore(typeName) ?? ResolveComplexTypeByDataTypeName(typeName, throwOnError: true)!; - - NpgsqlTypeHandler? ResolveByDataTypeNameCore(string typeName) - { - if (_handlersByDataTypeName.TryGetValue(typeName, out var handler)) - return handler; - - return ResolveLong(typeName); - - NpgsqlTypeHandler? ResolveLong(string typeName) - { - lock (_writeLock) - { - foreach (var resolver in _handlerResolvers) - { - try - { - if (resolver.ResolveByDataTypeName(typeName) is { } handler) - return _handlersByDataTypeName[typeName] = handler; - } - catch (Exception e) - { - _commandLogger.LogError(e, $"Type resolver {resolver.GetType().Name} threw exception while resolving data type name {typeName}"); - } - } - - return null; - } - } - } - - NpgsqlTypeHandler? ResolveByPostgresType(PostgresType type) - { - if (_handlersByDataTypeName.TryGetValue(type.FullName, out var handler)) - return handler; - - return ResolveLong(type); - - NpgsqlTypeHandler? ResolveLong(PostgresType type) - { - lock (_writeLock) - { - foreach (var resolver in _handlerResolvers) - { - try - { - if (resolver.ResolveByPostgresType(type) is { } handler) - return _handlersByDataTypeName[type.FullName] = handler; - } - catch (Exception e) - { - _commandLogger.LogError(e, $"Type resolver {resolver.GetType().Name} threw exception while resolving data type name {type.FullName}"); - } - } - - return null; - } - } - } - - NpgsqlTypeHandler? ResolveComplexTypeByDataTypeName(string typeName, bool throwOnError) - { - lock (_writeLock) - { - var pgType = DatabaseInfo.GetPostgresTypeByName(typeName); - - switch (pgType) - { - case PostgresArrayType pgArrayType: - { - var elementHandler = ResolveByOID(pgArrayType.Element.OID); - return _handlersByDataTypeName[typeName] = - elementHandler.CreateArrayHandler(pgArrayType, Connector.Settings.ArrayNullabilityMode); - } - - case PostgresEnumType pgEnumType: - { - // A mapped enum would have been registered in _extraHandlersByDataTypeName and bound above - this is unmapped. - return _handlersByDataTypeName[typeName] = - new UnmappedEnumHandler(pgEnumType, _defaultNameTranslator, Connector.TextEncoding); - } - - case PostgresDomainType pgDomainType: - return _handlersByDataTypeName[typeName] = ResolveByOID(pgDomainType.BaseType.OID); - - case PostgresBaseType pgBaseType: - return throwOnError - ? throw new NotSupportedException($"PostgreSQL type '{pgBaseType}' isn't supported by Npgsql") - : null; - - case PostgresCompositeType pgCompositeType: - // We don't support writing unmapped composite types, but we do support reading unmapped composite types. - // So when we're invoked from ResolveOID (which is the read path), we don't want to raise an exception. - return throwOnError - ? throw new NotSupportedException( - $"Composite type '{pgCompositeType}' must be mapped with Npgsql before being used, see the docs.") - : null; - -#pragma warning disable CS0618 - case PostgresRangeType: - case PostgresMultirangeType: - return throwOnError - ? throw new NotSupportedException( - $"'{pgType}' is a range type; please call {nameof(NpgsqlSlimDataSourceBuilder.EnableRanges)} on {nameof(NpgsqlSlimDataSourceBuilder)} to enable ranges. " + - "See https://www.npgsql.org/doc/types/ranges.html for more information.") - : null; -#pragma warning restore CS0618 - - default: - throw new ArgumentOutOfRangeException($"Unhandled PostgreSQL type type: {pgType.GetType()}"); - } - } - } - - internal NpgsqlTypeHandler ResolveByValue(T value) - { - if (value is null) - return ResolveByClrType(typeof(T)); - - if (typeof(T).IsValueType) - { - // Attempt to resolve value types generically via the resolver. This is the efficient fast-path, where we don't even need to - // do a dictionary lookup (the JIT elides type checks in generic methods for value types) - NpgsqlTypeHandler? handler; - - foreach (var resolver in _handlerResolvers) - { - try - { - if ((handler = resolver.ResolveValueTypeGenerically(value)) is not null) - return handler; - } - catch (Exception e) - { - _commandLogger.LogError(e, $"Type resolver {resolver.GetType().Name} threw exception while resolving value with type {typeof(T)}"); - } - } - - // There may still be some value types not resolved by the above, e.g. NpgsqlRange - } - - // Value types would have been resolved above, so this is a reference type - no JIT optimizations. - // We go through the regular logic (and there's no boxing). - return ResolveByValue((object)value); - } - - internal NpgsqlTypeHandler ResolveByValue(object value) - { - // We resolve as follows: - // 1. Cached by-type lookup (fast path). This will work for almost all types after the very first resolution. - // 2. Value-dependent type lookup (e.g. DateTime by Kind) via the resolvers. This includes complex types (e.g. array/range - // over DateTime), and the results cannot be cached. - // 3. Uncached by-type lookup (for the very first resolution of a given type) - - var type = value.GetType(); - if (_handlersByClrType.TryGetValue(type, out var handler)) - return handler; - - return ResolveLong(value, type); - - NpgsqlTypeHandler ResolveLong(object value, Type type) - { - foreach (var resolver in _handlerResolvers) - { - try - { - if (resolver.ResolveValueDependentValue(value) is { } handler) - return handler; - } - catch (Exception e) - { - _commandLogger.LogError(e, $"Type resolver {resolver.GetType().Name} threw exception while resolving value with type {type}"); - } - } - - // ResolveByClrType either throws, or resolves a handler and caches it in _handlersByClrType (where it would be found above the - // next time we resolve this type) - return ResolveByClrType(type); - } - } - - // TODO: This is needed as a separate method only because of binary COPY, see #3957 - /// - /// Looks up a type handler by CLR Type. - /// - /// Parameter's CLR type - /// A type handler that can be used to encode and decode values. - public NpgsqlTypeHandler ResolveByClrType(Type type) - { - if (_handlersByClrType.TryGetValue(type, out var handler)) - return handler; - - return ResolveLong(type); - - NpgsqlTypeHandler ResolveLong(Type type) - { - lock (_writeLock) - { - foreach (var resolver in _handlerResolvers) - { - try - { - if (resolver.ResolveByClrType(type) is { } handler) - return _handlersByClrType[type] = handler; - } - catch (Exception e) - { - _commandLogger.LogError(e, $"Type resolver {resolver.GetType().Name} threw exception while resolving value with type {type}"); - } - } - - // Try to see if it is an array type - var arrayElementType = GetArrayListElementType(type); - if (arrayElementType is not null) - { - if (ResolveByClrType(arrayElementType) is not { } elementHandler) - throw new ArgumentException($"Array type over CLR type {arrayElementType.Name} isn't supported by Npgsql"); - - if (elementHandler.PostgresType.Array is not { } pgArrayType) - throw new ArgumentException( - $"No array type could be found in the database for element {elementHandler.PostgresType}"); - - return _handlersByClrType[type] = - elementHandler.CreateArrayHandler(pgArrayType, Connector.Settings.ArrayNullabilityMode); - } - - if (Nullable.GetUnderlyingType(type) is { } underlyingType && ResolveByClrType(underlyingType) is { } underlyingHandler) - return _handlersByClrType[type] = underlyingHandler; - - if (type.IsEnum) - { - return DatabaseInfo.TryGetPostgresTypeByName(GetPgName(type, _defaultNameTranslator), out var pgType) - && pgType is PostgresEnumType pgEnumType - ? _handlersByClrType[type] = new UnmappedEnumHandler(pgEnumType, _defaultNameTranslator, Connector.TextEncoding) - : throw new NotSupportedException( - $"Could not find a PostgreSQL enum type corresponding to {type.Name}. " + - "Consider mapping the enum before usage, refer to the documentation for more details."); - } - - if (typeof(IEnumerable).IsAssignableFrom(type)) - throw new NotSupportedException("IEnumerable parameters are not supported, pass an array or List instead"); - - throw new NotSupportedException($"The CLR type {type} isn't natively supported by Npgsql or your PostgreSQL. " + - $"To use it with a PostgreSQL composite you need to specify {nameof(NpgsqlParameter.DataTypeName)} or to map it, please refer to the documentation."); - } - - static Type? GetArrayListElementType(Type type) - { - var typeInfo = type.GetTypeInfo(); - if (typeInfo.IsArray) - return GetUnderlyingType(type.GetElementType()!); // The use of bang operator is justified here as Type.GetElementType() only returns null for the Array base class which can't be mapped in a useful way. - - var ilist = typeInfo.ImplementedInterfaces.FirstOrDefault(x => x.GetTypeInfo().IsGenericType && x.GetGenericTypeDefinition() == typeof(IList<>)); - if (ilist != null) - return GetUnderlyingType(ilist.GetGenericArguments()[0]); - - if (typeof(IList).IsAssignableFrom(type)) - throw new NotSupportedException("Non-generic IList is a supported parameter, but the NpgsqlDbType parameter must be set on the parameter"); - - return null; - - Type GetUnderlyingType(Type t) - => Nullable.GetUnderlyingType(t) ?? t; - } - } - } - - #endregion Type handler lookup - - internal bool TryGetMapping(PostgresType pgType, [NotNullWhen(true)] out TypeMappingInfo? mapping) - { - foreach (var resolver in _mappingResolvers) - if ((mapping = resolver.GetMappingByPostgresType(this, pgType)) is not null) - return true; - - switch (pgType) - { - case PostgresArrayType pgArrayType: - if (TryGetMapping(pgArrayType.Element, out var elementMapping)) - { - mapping = new(elementMapping.NpgsqlDbType | NpgsqlDbType.Array, pgType.DisplayName); - return true; - } - - break; - - case PostgresDomainType pgDomainType: - if (TryGetMapping(pgDomainType.BaseType, out var baseMapping)) - { - mapping = new(baseMapping.NpgsqlDbType, pgType.DisplayName, baseMapping.ClrTypes); - return true; - } - - break; - - case PostgresEnumType or PostgresCompositeType: - return _userTypeMappings.TryGetValue(pgType.OID, out mapping); - } - - mapping = null; - return false; - } - - internal (NpgsqlDbType? npgsqlDbType, PostgresType postgresType) GetTypeInfoByOid(uint oid) - { - if (!DatabaseInfo.ByOID.TryGetValue(oid, out var pgType)) - ThrowHelper.ThrowInvalidOperationException($"Couldn't find PostgreSQL type with OID {oid}"); - - if (TryGetMapping(pgType, out var mapping)) - return (mapping.NpgsqlDbType, pgType); - - return (null, pgType); - } - - static string GetPgName(Type clrType, INpgsqlNameTranslator nameTranslator) - => clrType.GetCustomAttribute()?.PgName - ?? nameTranslator.TranslateTypeName(clrType.Name); -} diff --git a/src/Npgsql/Internal/TypeMapping/TypeMappingResolver.cs b/src/Npgsql/Internal/TypeMapping/TypeMappingResolver.cs deleted file mode 100644 index af426e6f2f..0000000000 --- a/src/Npgsql/Internal/TypeMapping/TypeMappingResolver.cs +++ /dev/null @@ -1,25 +0,0 @@ -using System; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; - -namespace Npgsql.Internal.TypeMapping; - -public abstract class TypeMappingResolver -{ - public abstract string? GetDataTypeNameByClrType(Type clrType); - public virtual string? GetDataTypeNameByValueDependentValue(object value) => null; - public abstract TypeMappingInfo? GetMappingByDataTypeName(string dataTypeName); - - /// - /// Gets type mapping information for a given PostgreSQL type. - /// Invoked in scenarios when mapping information is required, rather than a type handler for reading or writing. - /// - public virtual TypeMappingInfo? GetMappingByPostgresType(TypeMapper typeMapper, PostgresType type) - => GetMappingByDataTypeName(type.Name); - - internal TypeMappingInfo? GetMappingByValueDependentValue(object value) - => GetDataTypeNameByValueDependentValue(value) is { } dataTypeName ? GetMappingByDataTypeName(dataTypeName) : null; - - internal TypeMappingInfo? GetMappingByClrType(Type clrType) - => GetDataTypeNameByClrType(clrType) is { } dataTypeName ? GetMappingByDataTypeName(dataTypeName) : null; -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeMapping/UserCompositeTypeMappings.cs b/src/Npgsql/Internal/TypeMapping/UserCompositeTypeMappings.cs deleted file mode 100644 index 75d680b200..0000000000 --- a/src/Npgsql/Internal/TypeMapping/UserCompositeTypeMappings.cs +++ /dev/null @@ -1,24 +0,0 @@ -using System; -using Npgsql.Internal.TypeHandlers.CompositeHandlers; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; - -namespace Npgsql.Internal.TypeMapping; - -public interface IUserCompositeTypeMapping : IUserTypeMapping -{ - INpgsqlNameTranslator NameTranslator { get; } -} - -sealed class UserCompositeTypeMapping : IUserCompositeTypeMapping -{ - public string PgTypeName { get; } - public Type ClrType => typeof(T); - public INpgsqlNameTranslator NameTranslator { get; } - - public UserCompositeTypeMapping(string pgTypeName, INpgsqlNameTranslator nameTranslator) - => (PgTypeName, NameTranslator) = (pgTypeName, nameTranslator); - - public NpgsqlTypeHandler CreateHandler(PostgresType pgType, NpgsqlConnector connector) - => new CompositeHandler((PostgresCompositeType)pgType, connector.TypeMapper, NameTranslator); -} \ No newline at end of file diff --git a/src/Npgsql/Internal/TypeMapping/UserEnumTypeMappings.cs b/src/Npgsql/Internal/TypeMapping/UserEnumTypeMappings.cs deleted file mode 100644 index 9c2c3e35d7..0000000000 --- a/src/Npgsql/Internal/TypeMapping/UserEnumTypeMappings.cs +++ /dev/null @@ -1,46 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Linq; -using System.Reflection; -using Npgsql.Internal.TypeHandlers; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using NpgsqlTypes; - -namespace Npgsql.Internal.TypeMapping; - -public interface IUserEnumTypeMapping : IUserTypeMapping -{ - INpgsqlNameTranslator NameTranslator { get; } -} - -sealed class UserEnumTypeMapping : IUserEnumTypeMapping - where TEnum : struct, Enum -{ - public string PgTypeName { get; } - public Type ClrType => typeof(TEnum); - public INpgsqlNameTranslator NameTranslator { get; } - - readonly Dictionary _enumToLabel = new(); - readonly Dictionary _labelToEnum = new(); - - public UserEnumTypeMapping(string pgTypeName, INpgsqlNameTranslator nameTranslator) - { - (PgTypeName, NameTranslator) = (pgTypeName, nameTranslator); - - foreach (var field in typeof(TEnum).GetFields(BindingFlags.Static | BindingFlags.Public)) - { - var attribute = (PgNameAttribute?)field.GetCustomAttributes(typeof(PgNameAttribute), false).FirstOrDefault(); - var enumName = attribute is null - ? nameTranslator.TranslateMemberName(field.Name) - : attribute.PgName; - var enumValue = (TEnum)field.GetValue(null)!; - - _enumToLabel[enumValue] = enumName; - _labelToEnum[enumName] = enumValue; - } - } - - public NpgsqlTypeHandler CreateHandler(PostgresType postgresType, NpgsqlConnector connector) - => new EnumHandler((PostgresEnumType)postgresType, _enumToLabel, _labelToEnum); -} \ No newline at end of file diff --git a/src/Npgsql/Internal/ValueMetadata.cs b/src/Npgsql/Internal/ValueMetadata.cs new file mode 100644 index 0000000000..ff041a3060 --- /dev/null +++ b/src/Npgsql/Internal/ValueMetadata.cs @@ -0,0 +1,9 @@ +namespace Npgsql.Internal; + +public readonly struct ValueMetadata +{ + public required DataFormat Format { get; init; } + public required Size BufferRequirement { get; init; } + public required Size Size { get; init; } + public object? WriteState { get; init; } +} diff --git a/src/Npgsql/MultiplexingDataSource.cs b/src/Npgsql/MultiplexingDataSource.cs index 03c8718216..e9e7fa3069 100644 --- a/src/Npgsql/MultiplexingDataSource.cs +++ b/src/Npgsql/MultiplexingDataSource.cs @@ -262,7 +262,7 @@ bool WriteCommand(NpgsqlConnector connector, NpgsqlCommand command, ref Multiple if (t.IsFaulted) { - FailWrite(conn, t.Exception!.UnwrapAggregate()); + FailWrite(conn, t.Exception!.InnerException!); return; } @@ -314,7 +314,7 @@ void Flush(NpgsqlConnector connector, ref MultiplexingStats stats) var conn = (NpgsqlConnector)o!; if (t.IsFaulted) { - FailWrite(conn, t.Exception!.UnwrapAggregate()); + FailWrite(conn, t.Exception!.InnerException!); return; } diff --git a/src/Npgsql/NameTranslation/NpgsqlSnakeCaseNameTranslator.cs b/src/Npgsql/NameTranslation/NpgsqlSnakeCaseNameTranslator.cs index cdb9bb40a8..c4ba594ba7 100644 --- a/src/Npgsql/NameTranslation/NpgsqlSnakeCaseNameTranslator.cs +++ b/src/Npgsql/NameTranslation/NpgsqlSnakeCaseNameTranslator.cs @@ -11,6 +11,8 @@ namespace Npgsql.NameTranslation; /// public sealed class NpgsqlSnakeCaseNameTranslator : INpgsqlNameTranslator { + internal static NpgsqlSnakeCaseNameTranslator Instance { get; } = new(); + readonly CultureInfo _culture; /// diff --git a/src/Npgsql/Npgsql.csproj b/src/Npgsql/Npgsql.csproj index 0c1c0600ff..53a0e38377 100644 --- a/src/Npgsql/Npgsql.csproj +++ b/src/Npgsql/Npgsql.csproj @@ -25,14 +25,14 @@ - + - + @@ -55,5 +55,4 @@ NpgsqlStrings.resx - diff --git a/src/Npgsql/NpgsqlBatchCommandCollection.cs b/src/Npgsql/NpgsqlBatchCommandCollection.cs index 58227ac69a..a79afa359b 100644 --- a/src/Npgsql/NpgsqlBatchCommandCollection.cs +++ b/src/Npgsql/NpgsqlBatchCommandCollection.cs @@ -2,7 +2,6 @@ using System.Collections.Generic; using System.Data.Common; using System.Diagnostics.CodeAnalysis; -using System.Runtime.CompilerServices; namespace Npgsql; @@ -111,4 +110,4 @@ static NpgsqlBatchCommand Cast(DbBatchCommand? value) static void ThrowInvalidCastException(DbBatchCommand? value) => throw new InvalidCastException( $"The value \"{value}\" is not of type \"{nameof(NpgsqlBatchCommand)}\" and cannot be used in this batch command collection."); -} \ No newline at end of file +} diff --git a/src/Npgsql/NpgsqlBinaryExporter.cs b/src/Npgsql/NpgsqlBinaryExporter.cs index f772334feb..a7c0e395e9 100644 --- a/src/Npgsql/NpgsqlBinaryExporter.cs +++ b/src/Npgsql/NpgsqlBinaryExporter.cs @@ -6,9 +6,7 @@ using Microsoft.Extensions.Logging; using Npgsql.BackendMessages; using Npgsql.Internal; -using Npgsql.Internal.TypeHandling; -using Npgsql.Internal.TypeMapping; -using Npgsql.TypeMapping; +using Npgsql.Internal.Postgres; using NpgsqlTypes; using static Npgsql.Util.Statics; @@ -20,23 +18,27 @@ namespace Npgsql; /// public sealed class NpgsqlBinaryExporter : ICancelable { + const int BeforeRow = -2; + const int BeforeColumn = -1; + #region Fields and Properties NpgsqlConnector _connector; NpgsqlReadBuffer _buf; - TypeMapper _typeMapper; bool _isConsumed, _isDisposed; - int _leftToReadInDataMsg, _columnLen; + long _endOfMessagePos; short _column; ulong _rowsExported; + PgReader PgReader => _buf.PgReader; + /// /// The number of columns, as returned from the backend in the CopyInResponse. /// internal int NumColumns { get; private set; } - NpgsqlTypeHandler?[] _typeHandlerCache; + PgConverterInfo[] _columnInfoCache; readonly ILogger _copyLogger; @@ -61,10 +63,8 @@ internal NpgsqlBinaryExporter(NpgsqlConnector connector) { _connector = connector; _buf = connector.ReadBuffer; - _typeMapper = connector.TypeMapper; - _columnLen = int.MinValue; // Mark that the (first) column length hasn't been read yet - _column = -1; - _typeHandlerCache = null!; + _column = BeforeRow; + _columnInfoCache = null!; _copyLogger = connector.LoggingConfiguration.CopyLogger; } @@ -80,7 +80,7 @@ internal async Task Init(string copyToCommand, bool async, CancellationToken can switch (msg.Code) { case BackendMessageCode.CopyOutResponse: - copyOutResponse = (CopyOutResponseMessage) msg; + copyOutResponse = (CopyOutResponseMessage)msg; if (!copyOutResponse.IsBinary) { throw _connector.Break( @@ -98,14 +98,16 @@ internal async Task Init(string copyToCommand, bool async, CancellationToken can } NumColumns = copyOutResponse.NumColumns; - _typeHandlerCache = new NpgsqlTypeHandler[NumColumns]; + _columnInfoCache = new PgConverterInfo[NumColumns]; _rowsExported = 0; + _endOfMessagePos = _buf.CumulativeReadPosition; await ReadHeader(async); } async Task ReadHeader(bool async) { - _leftToReadInDataMsg = Expect(await _connector.ReadMessage(async), _connector).Length; + var msg = await _connector.ReadMessage(async); + _endOfMessagePos = _buf.CumulativeReadPosition + Expect(msg, _connector).Length; var headerLen = NpgsqlRawCopyStream.BinarySignature.Length + 4 + 4; await _buf.Ensure(headerLen, async); @@ -117,7 +119,6 @@ async Task ReadHeader(bool async) throw new NotSupportedException("Unsupported flags in COPY operation (OID inclusion?)"); _buf.ReadInt32(); // Header extensions, currently unused - _leftToReadInDataMsg -= headerLen; } #endregion @@ -148,38 +149,44 @@ public ValueTask StartRowAsync(CancellationToken cancellationToken = defaul async ValueTask StartRow(bool async, CancellationToken cancellationToken = default) { + CheckDisposed(); if (_isConsumed) return -1; using var registration = _connector.StartNestedCancellableOperation(cancellationToken); + // Consume and advance any active column. + if (_column >= 0) + await Commit(async, resumableOp: false); + // The very first row (i.e. _column == -1) is included in the header's CopyData message. // Otherwise we need to read in a new CopyData row (the docs specify that there's a CopyData // message per row). if (_column == NumColumns) - _leftToReadInDataMsg = Expect(await _connector.ReadMessage(async), _connector).Length; - else if (_column != -1) + { + var msg = Expect(await _connector.ReadMessage(async), _connector); + _endOfMessagePos = _buf.CumulativeReadPosition + msg.Length; + } + else if (_column != BeforeRow) ThrowHelper.ThrowInvalidOperationException("Already in the middle of a row"); await _buf.Ensure(2, async); - _leftToReadInDataMsg -= 2; var numColumns = _buf.ReadInt16(); if (numColumns == -1) { - Debug.Assert(_leftToReadInDataMsg == 0); Expect(await _connector.ReadMessage(async), _connector); Expect(await _connector.ReadMessage(async), _connector); Expect(await _connector.ReadMessage(async), _connector); - _column = -1; + _column = BeforeRow; _isConsumed = true; return -1; } Debug.Assert(numColumns == NumColumns); - _column = 0; + _column = BeforeColumn; _rowsExported++; return NumColumns; } @@ -194,7 +201,7 @@ async ValueTask StartRow(bool async, CancellationToken cancellationToken = /// specify the type. /// /// The value of the column - public T Read() => Read(false).GetAwaiter().GetResult(); + public T Read() => Read(async: false).GetAwaiter().GetResult(); /// /// Reads the current column, returns its value and moves ahead to the next column. @@ -209,22 +216,33 @@ async ValueTask StartRow(bool async, CancellationToken cancellationToken = public ValueTask ReadAsync(CancellationToken cancellationToken = default) { using (NoSynchronizationContextScope.Enter()) - return Read(true, cancellationToken); + return Read(async: true, cancellationToken); } ValueTask Read(bool async, CancellationToken cancellationToken = default) - { - CheckDisposed(); - - if (_column == -1 || _column == NumColumns) - ThrowHelper.ThrowInvalidOperationException("Not reading a row"); + => Read(async, null, cancellationToken); - var type = typeof(T); - var handler = _typeHandlerCache[_column]; - if (handler == null) - handler = _typeHandlerCache[_column] = _typeMapper.ResolveByClrType(type); + PgConverterInfo CreateConverterInfo(Type type, NpgsqlDbType? npgsqlDbType = null) + { + var options = _connector.SerializerOptions; + PgTypeId? pgTypeId = null; + if (npgsqlDbType.HasValue) + { + pgTypeId = npgsqlDbType.Value.ToDataTypeName() is { } name + ? options.GetCanonicalTypeId(name) + // Handle plugin types via lookup. + : GetRepresentationalOrDefault(npgsqlDbType.Value.ToUnqualifiedDataTypeNameOrThrow()); + } + var info = options.GetTypeInfo(type, pgTypeId) + ?? throw new NotSupportedException($"Reading is not supported for type '{type}'{(npgsqlDbType is null ? "" : $" and NpgsqlDbType '{npgsqlDbType}'")}"); + // Binary export has no type info so we only do caller-directed interpretation of data. + return info.Bind(new Field("?", info.PgTypeId!.Value, -1), DataFormat.Binary); - return DoRead(handler, async, cancellationToken); + PgTypeId GetRepresentationalOrDefault(string dataTypeName) + { + var type = options.DatabaseInfo.GetPostgresType(dataTypeName); + return options.ToCanonicalTypeId(type.GetRepresentationalType()); + } } /// @@ -240,7 +258,7 @@ ValueTask Read(bool async, CancellationToken cancellationToken = default) /// /// The .NET type of the column to be read. /// The value of the column - public T Read(NpgsqlDbType type) => Read(type, false).GetAwaiter().GetResult(); + public T Read(NpgsqlDbType type) => Read(async: false, type, CancellationToken.None).GetAwaiter().GetResult(); /// /// Reads the current column, returns its value according to and @@ -261,58 +279,76 @@ ValueTask Read(bool async, CancellationToken cancellationToken = default) public ValueTask ReadAsync(NpgsqlDbType type, CancellationToken cancellationToken = default) { using (NoSynchronizationContextScope.Enter()) - return Read(type, true, cancellationToken); + return Read(async: true, type, cancellationToken); } - ValueTask Read(NpgsqlDbType type, bool async, CancellationToken cancellationToken = default) + async ValueTask Read(bool async, NpgsqlDbType? type, CancellationToken cancellationToken) { CheckDisposed(); - if (_column == -1 || _column == NumColumns) + if (_column is BeforeRow) ThrowHelper.ThrowInvalidOperationException("Not reading a row"); - var handler = _typeHandlerCache[_column]; - if (handler == null) - handler = _typeHandlerCache[_column] = _typeMapper.ResolveByNpgsqlDbType(type); + using var registration = _connector.StartNestedCancellableOperation(cancellationToken, attemptPgCancellation: false); - return DoRead(handler, async, cancellationToken); - } + // Allow one more read if the field is a db null. + // We cannot allow endless rereads otherwise it becomes quite unclear when a column advance happens. + if (PgReader is { Resumable: true, FieldSize: -1 }) + { + await Commit(async, resumableOp: false); + return DbNullOrThrow(); + } - async ValueTask DoRead(NpgsqlTypeHandler handler, bool async, CancellationToken cancellationToken = default) - { - try + // We must commit the current column before reading the next one unless it was an IsNull call. + PgConverterInfo info; + if (!PgReader.Resumable || PgReader.CurrentRemaining != PgReader.FieldSize) { - using var registration = _connector.StartNestedCancellableOperation(cancellationToken); + await Commit(async, resumableOp: false); + info = GetInfo(); - await ReadColumnLenIfNeeded(async); + // We need to get info after potential I/O as we don't know beforehand at what column we're at. + var columnLen = await ReadColumnLenIfNeeded(async, resumableOp: false); + if (_column == NumColumns) + ThrowHelper.ThrowInvalidOperationException("No more columns left in the current row"); - if (_columnLen == -1) - { -#pragma warning disable CS8653 // A default expression introduces a null value when 'T' is a non-nullable reference type. - // When T is a Nullable, we support returning null - if (NullableHandler.Exists) - return default!; -#pragma warning restore CS8653 - throw new InvalidCastException("Column is null"); - } + if (columnLen is -1) + return DbNullOrThrow(); - // If we know the entire column is already in memory, use the code path without async - var result = NullableHandler.Exists - ? _columnLen <= _buf.ReadBytesLeft - ? NullableHandler.Read(handler, _buf, _columnLen) - : await NullableHandler.ReadAsync(handler, _buf, _columnLen, async) - : _columnLen <= _buf.ReadBytesLeft - ? handler.Read(_buf, _columnLen) - : await handler.Read(_buf, _columnLen, async); - - _leftToReadInDataMsg -= _columnLen; - _columnLen = int.MinValue; // Mark that the (next) column length hasn't been read yet - _column++; - return result; } - catch (Exception e) + else + info = GetInfo(); + + T result; + if (async) + { + await PgReader.StartReadAsync(info.BufferRequirement, cancellationToken); + result = info.AsObject + ? (T)await info.Converter.ReadAsObjectAsync(PgReader, cancellationToken) + : await info.GetConverter().ReadAsync(PgReader, cancellationToken); + await PgReader.EndReadAsync(); + } + else { - _connector.Break(e); - throw; + PgReader.StartRead(info.BufferRequirement); + result = info.AsObject + ? (T)info.Converter.ReadAsObject(PgReader) + : info.GetConverter().Read(PgReader); + PgReader.EndRead(); + } + + return result; + + PgConverterInfo GetInfo() + { + ref var cachedInfo = ref _columnInfoCache[_column]; + return cachedInfo.IsDefault ? cachedInfo = CreateConverterInfo(typeof(T), type) : cachedInfo; + } + + T DbNullOrThrow() + { + // When T is a Nullable, we support returning null + if (default(T) is null && typeof(T).IsValueType) + return default!; + throw new InvalidCastException("Column is null"); } } @@ -323,8 +359,8 @@ public bool IsNull { get { - ReadColumnLenIfNeeded(false).GetAwaiter().GetResult(); - return _columnLen == -1; + Commit(async: false, resumableOp: true); + return ReadColumnLenIfNeeded(async: false, resumableOp: true).GetAwaiter().GetResult() is -1; } } @@ -348,26 +384,34 @@ async Task Skip(bool async, CancellationToken cancellationToken = default) using var registration = _connector.StartNestedCancellableOperation(cancellationToken); - await ReadColumnLenIfNeeded(async); - if (_columnLen != -1) - await _buf.Skip(_columnLen, async); - - _columnLen = int.MinValue; - _column++; + // We allow IsNull to have been called before skip. + if (PgReader.Initialized && PgReader is not { Resumable: true, FieldSize: -1 }) + await Commit(async, resumableOp: false); + await ReadColumnLenIfNeeded(async, resumableOp: false); + await PgReader.Consume(async, cancellationToken: cancellationToken); } #endregion #region Utilities - async Task ReadColumnLenIfNeeded(bool async) + ValueTask Commit(bool async, bool resumableOp) { - if (_columnLen == int.MinValue) - { - await _buf.Ensure(4, async); - _columnLen = _buf.ReadInt32(); - _leftToReadInDataMsg -= 4; - } + var resuming = PgReader is { Initialized: true, Resumable: true } && resumableOp; + if (!resuming) + _column++; + return PgReader.Commit(async, resuming); + } + + async ValueTask ReadColumnLenIfNeeded(bool async, bool resumableOp) + { + if (PgReader is { Resumable: true, FieldSize: -1 }) + return -1; + + await _buf.Ensure(4, async); + var columnLen = _buf.ReadInt32(); + PgReader.Init(columnLen, DataFormat.Binary, resumableOp); + return PgReader.FieldSize; } void CheckDisposed() @@ -423,8 +467,10 @@ async ValueTask DisposeAsync(bool async) try { using var registration = _connector.StartNestedCancellableOperation(attemptPgCancellation: false); + // Be sure to commit the reader. + await PgReader.Commit(async, resuming: false); // Finish the current CopyData message - _buf.Skip(_leftToReadInDataMsg); + await _buf.Skip(checked((int)(_endOfMessagePos - _buf.CumulativeReadPosition)), async); // Read to the end _connector.SkipUntil(BackendMessageCode.CopyDone); // We intentionally do not pass a CancellationToken since we don't want to cancel cleanup @@ -458,7 +504,6 @@ void Cleanup() _connector = null; } - _typeMapper = null; _buf = null; _isDisposed = true; } diff --git a/src/Npgsql/NpgsqlBinaryImporter.cs b/src/Npgsql/NpgsqlBinaryImporter.cs index be963c1552..a57c071448 100644 --- a/src/Npgsql/NpgsqlBinaryImporter.cs +++ b/src/Npgsql/NpgsqlBinaryImporter.cs @@ -1,6 +1,4 @@ using System; -using System.Diagnostics; -using System.Diagnostics.CodeAnalysis; using System.Threading; using System.Threading.Tasks; using Microsoft.Extensions.Logging; @@ -43,6 +41,7 @@ public sealed class NpgsqlBinaryImporter : ICancelable NpgsqlParameter?[] _params; readonly ILogger _copyLogger; + PgWriter _pgWriter = null!; // Setup in Init /// /// Current timeout @@ -82,7 +81,7 @@ internal async Task Init(string copyFromCommand, bool async, CancellationToken c switch (msg.Code) { case BackendMessageCode.CopyInResponse: - copyInResponse = (CopyInResponseMessage) msg; + copyInResponse = (CopyInResponseMessage)msg; if (!copyInResponse.IsBinary) { throw _connector.Break( @@ -104,6 +103,8 @@ internal async Task Init(string copyFromCommand, bool async, CancellationToken c _rowsImported = 0; _buf.StartCopyMode(); WriteHeader(); + // Only init after header. + _pgWriter = _buf.GetWriter(_connector.DatabaseInfo); } void WriteHeader() @@ -144,6 +145,7 @@ async Task StartRow(bool async, CancellationToken cancellationToken = default) await _buf.Flush(async, cancellationToken); _buf.WriteInt16(NumColumns); + _pgWriter.Refresh(); _column = 0; _rowsImported++; } @@ -239,7 +241,7 @@ Task Write(T value, NpgsqlDbType npgsqlDbType, bool async, CancellationToken if (p == null) { // First row, create the parameter objects - _params[_column] = p = typeof(T) == typeof(object) + _params[_column] = p = typeof(T) == typeof(object) || typeof(T) == typeof(DBNull) ? new NpgsqlParameter() : new NpgsqlParameter(); p.NpgsqlDbType = npgsqlDbType; @@ -309,14 +311,14 @@ async Task Write(T value, NpgsqlParameter param, bool async, CancellationToke if (_column == -1) throw new InvalidOperationException("A row hasn't been started"); - if (value == null || value is DBNull) - { - await WriteNull(async, cancellationToken); - return; - } - - if (typeof(T) == typeof(object)) + if (typeof(T) == typeof(object) || typeof(T) == typeof(DBNull)) { + if (param.GetType() != typeof(NpgsqlParameter)) + { + var newParam = _params[_column] = new NpgsqlParameter(); + newParam.NpgsqlDbType = param.NpgsqlDbType; + param = newParam; + } param.Value = value; } else @@ -329,11 +331,17 @@ async Task Write(T value, NpgsqlParameter param, bool async, CancellationToke } typedParam.TypedValue = value; } - param.ResolveHandler(_connector.TypeMapper); - param.ValidateAndGetLength(); - param.LengthCache?.Rewind(); - await param.WriteWithLength(_buf, async, cancellationToken); - param.LengthCache?.Clear(); + param.ResolveTypeInfo(_connector.SerializerOptions); + param.Bind(out _, out _); + try + { + await param.Write(async, _pgWriter.WithFlushMode(async ? FlushMode.NonBlocking : FlushMode.Blocking), cancellationToken); + } + catch (Exception ex) + { + _connector.Break(ex); + throw; + } _column++; } @@ -363,6 +371,7 @@ async Task WriteNull(bool async, CancellationToken cancellationToken = default) await _buf.Flush(async, cancellationToken); _buf.WriteInt32(-1); + _pgWriter.Refresh(); _column++; } @@ -465,8 +474,8 @@ async ValueTask Complete(bool async, CancellationToken cancellationToken /// /// /// Note that if hasn't been invoked before calling this, the import will be cancelled and all changes will - /// be reverted. - /// + /// be reverted. + /// /// public void Dispose() => Close(); @@ -476,8 +485,8 @@ async ValueTask Complete(bool async, CancellationToken cancellationToken /// /// /// Note that if hasn't been invoked before calling this, the import will be cancelled and all changes will - /// be reverted. - /// + /// be reverted. + /// /// public ValueTask DisposeAsync() { @@ -513,8 +522,8 @@ async Task Cancel(bool async, CancellationToken cancellationToken = default) /// /// /// Note that if hasn't been invoked before calling this, the import will be cancelled and all changes will - /// be reverted. - /// + /// be reverted. + /// /// public void Close() => CloseAsync(false).GetAwaiter().GetResult(); @@ -524,8 +533,8 @@ async Task Cancel(bool async, CancellationToken cancellationToken = default) /// /// /// Note that if hasn't been invoked before calling this, the import will be cancelled and all changes will - /// be reverted. - /// + /// be reverted. + /// /// public ValueTask CloseAsync(CancellationToken cancellationToken = default) { diff --git a/src/Npgsql/NpgsqlCommand.cs b/src/Npgsql/NpgsqlCommand.cs index 6f83b8f0ad..77c192e601 100644 --- a/src/Npgsql/NpgsqlCommand.cs +++ b/src/Npgsql/NpgsqlCommand.cs @@ -17,6 +17,7 @@ using System.Threading.Channels; using Microsoft.Extensions.Logging; using Npgsql.Internal; +using Npgsql.Internal.Postgres; using Npgsql.Properties; namespace Npgsql; @@ -483,14 +484,14 @@ void DeriveParametersForFunction() throw new InvalidOperationException($"{CommandText} does not exist in pg_proc"); } - var typeMapper = c.InternalConnection!.Connector!.TypeMapper; + var serializerOptions = c.InternalConnection!.Connector!.SerializerOptions; for (var i = 0; i < types.Length; i++) { var param = new NpgsqlParameter(); - var (npgsqlDbType, postgresType) = typeMapper.GetTypeInfoByOid(types[i]); - + var postgresType = serializerOptions.DatabaseInfo.GetPostgresType(types[i]); + var npgsqlDbType = postgresType.DataTypeName.ToNpgsqlDbType(); param.DataTypeName = postgresType.DisplayName; param.PostgresType = postgresType; if (npgsqlDbType.HasValue) @@ -560,8 +561,9 @@ void DeriveParametersForQuery(NpgsqlConnector connector) var param = batchCommand.PositionalParameters[i]; var paramOid = paramTypeOIDs[i]; - var (npgsqlDbType, postgresType) = connector.TypeMapper.GetTypeInfoByOid(paramOid); - + var postgresType = connector.SerializerOptions.DatabaseInfo.GetPostgresType(paramOid); + // We want to keep any domain types visible on the parameter, it will internally do a representational lookup again if necessary. + var npgsqlDbType = postgresType.GetRepresentationalType().DataTypeName.ToNpgsqlDbType(); if (param.NpgsqlDbType != NpgsqlDbType.Unknown && param.NpgsqlDbType != npgsqlDbType) throw new NpgsqlException( "The backend parser inferred different types for parameters with the same name. Please try explicit casting within your SQL statement or batch or use different placeholder names."); @@ -649,7 +651,7 @@ Task Prepare(bool async, CancellationToken cancellationToken = default) { foreach (var batchCommand in InternalBatchCommands) { - batchCommand.Parameters.ProcessParameters(connector.TypeMapper, validateValues: false, CommandType); + batchCommand.Parameters.ProcessParameters(connector.SerializerOptions, validateValues: false, CommandType); ProcessRawQuery(connector.SqlQueryParser, connector.UseConformingStrings, batchCommand); needToPrepare = batchCommand.ExplicitPrepare(connector) || needToPrepare; @@ -660,7 +662,7 @@ Task Prepare(bool async, CancellationToken cancellationToken = default) } else { - Parameters.ProcessParameters(connector.TypeMapper, validateValues: false, CommandType); + Parameters.ProcessParameters(connector.SerializerOptions, validateValues: false, CommandType); ProcessRawQuery(connector.SqlQueryParser, connector.UseConformingStrings, batchCommand: null); foreach (var batchCommand in InternalBatchCommands) @@ -1346,7 +1348,6 @@ internal virtual async ValueTask ExecuteReader(CommandBehavior { if (connector is not null) { - var dataSource = connector.DataSource; var logger = connector.CommandLogger; cancellationToken.ThrowIfCancellationRequested(); @@ -1378,7 +1379,7 @@ internal virtual async ValueTask ExecuteReader(CommandBehavior goto case false; } - batchCommand.Parameters.ProcessParameters(dataSource.TypeMapper, validateParameterValues, CommandType); + batchCommand.Parameters.ProcessParameters(connector.SerializerOptions, validateParameterValues, CommandType); } } else @@ -1391,7 +1392,7 @@ internal virtual async ValueTask ExecuteReader(CommandBehavior ResetPreparation(); goto case false; } - Parameters.ProcessParameters(dataSource.TypeMapper, validateParameterValues, CommandType); + Parameters.ProcessParameters(connector.SerializerOptions, validateParameterValues, CommandType); } NpgsqlEventSource.Log.CommandStartPrepared(); @@ -1407,7 +1408,7 @@ internal virtual async ValueTask ExecuteReader(CommandBehavior { var batchCommand = InternalBatchCommands[i]; - batchCommand.Parameters.ProcessParameters(dataSource.TypeMapper, validateParameterValues, CommandType); + batchCommand.Parameters.ProcessParameters(connector.SerializerOptions, validateParameterValues, CommandType); ProcessRawQuery(connector.SqlQueryParser, connector.UseConformingStrings, batchCommand); if (connector.Settings.MaxAutoPrepare > 0 && batchCommand.TryAutoPrepare(connector)) @@ -1419,7 +1420,7 @@ internal virtual async ValueTask ExecuteReader(CommandBehavior } else { - Parameters.ProcessParameters(dataSource.TypeMapper, validateParameterValues, CommandType); + Parameters.ProcessParameters(connector.SerializerOptions, validateParameterValues, CommandType); ProcessRawQuery(connector.SqlQueryParser, connector.UseConformingStrings, batchCommand: null); if (connector.Settings.MaxAutoPrepare > 0) @@ -1513,13 +1514,13 @@ internal virtual async ValueTask ExecuteReader(CommandBehavior { foreach (var batchCommand in InternalBatchCommands) { - batchCommand.Parameters.ProcessParameters(dataSource.TypeMapper, validateValues: true, CommandType); + batchCommand.Parameters.ProcessParameters(dataSource.SerializerOptions, validateValues: true, CommandType); ProcessRawQuery(null, standardConformingStrings: true, batchCommand); } } else { - Parameters.ProcessParameters(dataSource.TypeMapper, validateValues: true, CommandType); + Parameters.ProcessParameters(dataSource.SerializerOptions, validateValues: true, CommandType); ProcessRawQuery(null, standardConformingStrings: true, batchCommand: null); } @@ -1733,10 +1734,9 @@ internal void FixupRowDescription(RowDescriptionMessage rowDescription, bool isF for (var i = 0; i < rowDescription.Count; i++) { var field = rowDescription[i]; - field.FormatCode = (UnknownResultTypeList == null || !isFirst ? AllResultTypesAreUnknown : UnknownResultTypeList[i]) - ? FormatCode.Text - : FormatCode.Binary; - field.ResolveHandler(); + field.DataFormat = (UnknownResultTypeList == null || !isFirst ? AllResultTypesAreUnknown : UnknownResultTypeList[i]) + ? DataFormat.Text + : DataFormat.Binary; } } @@ -1818,7 +1818,11 @@ public virtual NpgsqlCommand Clone() { var clone = new NpgsqlCommand(CommandText, InternalConnection, Transaction) { - CommandTimeout = CommandTimeout, CommandType = CommandType, DesignTimeVisible = DesignTimeVisible, _allResultTypesAreUnknown = _allResultTypesAreUnknown, _unknownResultTypeList = _unknownResultTypeList + CommandTimeout = CommandTimeout, + CommandType = CommandType, + DesignTimeVisible = DesignTimeVisible, + _allResultTypesAreUnknown = _allResultTypesAreUnknown, + _unknownResultTypeList = _unknownResultTypeList }; _parameters.CloneTo(clone._parameters); return clone; diff --git a/src/Npgsql/NpgsqlConnection.cs b/src/Npgsql/NpgsqlConnection.cs index 627dcb1443..53e2afe5b0 100644 --- a/src/Npgsql/NpgsqlConnection.cs +++ b/src/Npgsql/NpgsqlConnection.cs @@ -884,7 +884,7 @@ async Task CloseAsync(bool async) } } - Debug.Assert(connector.IsReady || connector.IsBroken); + Debug.Assert(connector.IsReady || connector.IsBroken, $"Connector is not ready or broken during close, it's {connector.State}"); Debug.Assert(connector.CurrentReader == null); Debug.Assert(connector.CurrentCopyOperation == null); diff --git a/src/Npgsql/NpgsqlConnectionStringBuilder.cs b/src/Npgsql/NpgsqlConnectionStringBuilder.cs index b927807844..c1a11c34c3 100644 --- a/src/Npgsql/NpgsqlConnectionStringBuilder.cs +++ b/src/Npgsql/NpgsqlConnectionStringBuilder.cs @@ -9,7 +9,6 @@ using System.Linq; using Npgsql.Internal; using Npgsql.Netstandard20; -using Npgsql.Properties; using Npgsql.Replication; namespace Npgsql; diff --git a/src/Npgsql/NpgsqlDataReader.cs b/src/Npgsql/NpgsqlDataReader.cs index db30da551c..cc86de063e 100644 --- a/src/Npgsql/NpgsqlDataReader.cs +++ b/src/Npgsql/NpgsqlDataReader.cs @@ -1,4 +1,5 @@ using System; +using System.Buffers; using System.Collections; using System.Collections.Generic; using System.Collections.ObjectModel; @@ -10,17 +11,14 @@ using System.Linq; using System.Runtime.CompilerServices; using System.Runtime.ExceptionServices; -using System.Text; using System.Threading; using System.Threading.Tasks; using Microsoft.Extensions.Logging; using Npgsql.BackendMessages; using Npgsql.Internal; -using Npgsql.Internal.TypeHandlers; -using Npgsql.Internal.TypeHandling; +using Npgsql.Internal.Converters; using Npgsql.PostgresTypes; using Npgsql.Schema; -using Npgsql.Util; using NpgsqlTypes; using static Npgsql.Util.Statics; @@ -34,6 +32,9 @@ namespace Npgsql; public sealed class NpgsqlDataReader : DbDataReader, IDbColumnSchemaGenerator #pragma warning restore CA1010 { + static readonly Task TrueTask = Task.FromResult(true); + static readonly Task FalseTask = Task.FromResult(false); + internal NpgsqlCommand Command { get; private set; } = default!; internal NpgsqlConnector Connector { get; } NpgsqlConnection? _connection; @@ -52,6 +53,7 @@ public sealed class NpgsqlDataReader : DbDataReader, IDbColumnSchemaGenerator internal ReaderState State = ReaderState.Disposed; internal NpgsqlReadBuffer Buffer = default!; + PgReader PgReader => Buffer.PgReader; /// /// Holds the list of statements being executed by this reader. @@ -81,14 +83,6 @@ public sealed class NpgsqlDataReader : DbDataReader, IDbColumnSchemaGenerator /// int _column; - /// - /// For streaming types (e.g. bytea), holds the byte length of the column. - /// Does not include the length prefix. - /// - internal int ColumnLen; - - internal int PosInColumn; - /// /// The position in the buffer at which the current data row message ends. /// Used only when the row is consumed non-sequentially. @@ -102,13 +96,16 @@ public sealed class NpgsqlDataReader : DbDataReader, IDbColumnSchemaGenerator /// bool _canConsumeRowNonSequentially; - int _charPos; - /// /// The RowDescription message for the current resultset being processed /// internal RowDescriptionMessage? RowDescription; + /// + /// Stores the last converter info resolved by column, to speed up repeated reading. + /// + PgConverterInfo[]? ColumnInfoCache { get; set; } + ulong? _recordsAffected; /// @@ -124,17 +121,6 @@ public sealed class NpgsqlDataReader : DbDataReader, IDbColumnSchemaGenerator bool _isSchemaOnly; bool _isSequential; - /// - /// A stream that has been opened on a column. - /// - NpgsqlReadBuffer.ColumnStream? _columnStream; - - /// - /// Used to keep track of every unique row this reader object ever traverses. - /// This is used to detect whether nested DbDataReaders are still valid. - /// - internal ulong UniqueRowId; - internal NpgsqlNestedDataReader? CachedFreeNestedDataReader; long _startTimestamp; @@ -153,6 +139,7 @@ internal void Init( long startTimestamp = 0, Task? sendTask = null) { + Debug.Assert(ColumnInfoCache is null); Command = command; _connection = command.InternalConnection; _behavior = behavior; @@ -179,7 +166,6 @@ public override bool Read() { CheckClosedOrDisposed(); - UniqueRowId++; var fastRead = TryFastRead(); return fastRead.HasValue ? fastRead.Value @@ -197,10 +183,9 @@ public override Task ReadAsync(CancellationToken cancellationToken) { CheckClosedOrDisposed(); - UniqueRowId++; var fastRead = TryFastRead(); if (fastRead.HasValue) - return fastRead.Value ? PGUtil.TrueTask : PGUtil.FalseTask; + return fastRead.Value ? TrueTask : FalseTask; using (NoSynchronizationContextScope.Enter()) return Read(true, cancellationToken); @@ -252,8 +237,7 @@ public override Task ReadAsync(CancellationToken cancellationToken) async Task Read(bool async, CancellationToken cancellationToken = default) { - var registration = Connector.StartNestedCancellableOperation(cancellationToken); - + using var registration = Connector.StartNestedCancellableOperation(cancellationToken); try { switch (State) @@ -304,13 +288,11 @@ async Task Read(bool async, CancellationToken cancellationToken = default) } catch { - State = ReaderState.Consumed; + // Break may have progressed the reader already. + if (State is not ReaderState.Closed) + State = ReaderState.Consumed; throw; } - finally - { - registration.Dispose(); - } } ValueTask ReadMessage(bool async) @@ -387,7 +369,11 @@ async Task NextResult(bool async, bool isConsuming = false, CancellationTo case BackendMessageCode.EmptyQueryResponse: ProcessMessage(completedMsg); - if (_statements[StatementIndex].AppendErrorBarrier ?? Command.EnableErrorBarriers) + var statement = _statements[StatementIndex]; + if (statement.IsPrepared && ColumnInfoCache is not null) + RowDescription!.SetConverterInfoCache(new(ColumnInfoCache, 0, _numColumns)); + + if (statement.AppendErrorBarrier ?? Command.EnableErrorBarriers) Expect(await Connector.ReadMessage(async), Connector); break; @@ -402,8 +388,11 @@ async Task NextResult(bool async, bool isConsuming = false, CancellationTo break; case ReaderState.BetweenResults: + { + if (StatementIndex >= 0 && _statements[StatementIndex].IsPrepared && ColumnInfoCache is not null) + RowDescription!.SetConverterInfoCache(new(ColumnInfoCache, 0, _numColumns)); break; - + } case ReaderState.Consumed: case ReaderState.Closed: case ReaderState.Disposed: @@ -474,7 +463,20 @@ async Task NextResult(bool async, bool isConsuming = false, CancellationTo }; } - if (RowDescription == null) + if (RowDescription is not null) + { + if (ColumnInfoCache?.Length >= RowDescription.Count) + Array.Clear(ColumnInfoCache, 0, RowDescription.Count); + else + { + if (ColumnInfoCache is { } cache) + ArrayPool.Shared.Return(cache, clearArray: true); + ColumnInfoCache = ArrayPool.Shared.Rent(RowDescription.Count); + } + if (statement.IsPrepared) + RowDescription.LoadConverterInfoCache(ColumnInfoCache); + } + else { // Statement did not generate a resultset (e.g. INSERT) // Read and process its completion message and move on to the next statement @@ -605,7 +607,9 @@ async Task NextResult(bool async, bool isConsuming = false, CancellationTo } } - State = ReaderState.Consumed; + // Break may have progressed the reader already. + if (State is not ReaderState.Closed) + State = ReaderState.Consumed; throw; } } @@ -648,12 +652,11 @@ void PopulateOutputParameters() p.Value = pending.Dequeue(); } + PgReader.Commit(async: false, resuming: false).GetAwaiter().GetResult(); State = ReaderState.BeforeResult; // Set the state back Buffer.ReadPosition = currentPosition; // Restore position _column = -1; - ColumnLen = -1; - PosInColumn = 0; } /// @@ -739,18 +742,29 @@ async Task NextResultSchemaOnly(bool async, bool isConsuming = false, Canc } // Found a resultset - if (RowDescription != null) + if (RowDescription is not null) + { + if (ColumnInfoCache?.Length >= RowDescription.Count) + Array.Clear(ColumnInfoCache, 0, RowDescription.Count); + else + { + if (ColumnInfoCache is { } cache) + ArrayPool.Shared.Return(cache, clearArray: true); + ColumnInfoCache = ArrayPool.Shared.Rent(RowDescription.Count); + } return true; + } } - RowDescription = null; State = ReaderState.Consumed; - + RowDescription = null; return false; } catch (Exception e) { - State = ReaderState.Consumed; + // Break may have progressed the reader already. + if (State is not ReaderState.Closed) + State = ReaderState.Consumed; // Reference the triggering statement from the exception if (e is PostgresException postgresException && StatementIndex >= 0 && StatementIndex < _statements.Count) @@ -832,12 +846,11 @@ void ProcessDataRowMessage(DataRowMessage msg) // recapture the connector's buffer on each new DataRow. // Note that this can happen even in sequential mode, if the row description message is big // (see #2003) - Buffer = Connector.ReadBuffer; + if (!ReferenceEquals(Buffer, Connector.ReadBuffer)) + Buffer = Connector.ReadBuffer; _hasRows = true; _column = -1; - ColumnLen = -1; - PosInColumn = 0; // We assume that the row's number of columns is identical to the description's _numColumns = Buffer.ReadInt16(); @@ -1115,7 +1128,7 @@ internal async Task Close(bool connectionClosing, bool async, bool isDisposing) { await Consume(async); } - catch (Exception ex) when (ex is OperationCanceledException or NpgsqlException { InnerException : TimeoutException }) + catch (Exception ex) when (ex is OperationCanceledException or NpgsqlException { InnerException: TimeoutException }) { // Timeout/cancellation - completely normal, consume has basically completed. } @@ -1187,6 +1200,12 @@ internal async Task Cleanup(bool async, bool connectionClosing = false, bool isD } } + if (ColumnInfoCache is { } cache) + { + ColumnInfoCache = null; + ArrayPool.Shared.Return(cache, clearArray: true); + } + State = ReaderState.Closed; Command.State = CommandState.Idle; Connector.CurrentReader = null; @@ -1238,84 +1257,84 @@ internal async Task Cleanup(bool async, bool connectionClosing = false, bool isD /// /// The zero-based column ordinal. /// The value of the specified column. - public override bool GetBoolean(int ordinal) => GetFieldValue(ordinal); + public override bool GetBoolean(int ordinal) => GetFieldValueCore(ordinal); /// /// Gets the value of the specified column as a byte. /// /// The zero-based column ordinal. /// The value of the specified column. - public override byte GetByte(int ordinal) => GetFieldValue(ordinal); + public override byte GetByte(int ordinal) => GetFieldValueCore(ordinal); /// /// Gets the value of the specified column as a single character. /// /// The zero-based column ordinal. /// The value of the specified column. - public override char GetChar(int ordinal) => GetFieldValue(ordinal); + public override char GetChar(int ordinal) => GetFieldValueCore(ordinal); /// /// Gets the value of the specified column as a 16-bit signed integer. /// /// The zero-based column ordinal. /// The value of the specified column. - public override short GetInt16(int ordinal) => GetFieldValue(ordinal); + public override short GetInt16(int ordinal) => GetFieldValueCore(ordinal); /// /// Gets the value of the specified column as a 32-bit signed integer. /// /// The zero-based column ordinal. /// The value of the specified column. - public override int GetInt32(int ordinal) => GetFieldValue(ordinal); + public override int GetInt32(int ordinal) => GetFieldValueCore(ordinal); /// /// Gets the value of the specified column as a 64-bit signed integer. /// /// The zero-based column ordinal. /// The value of the specified column. - public override long GetInt64(int ordinal) => GetFieldValue(ordinal); + public override long GetInt64(int ordinal) => GetFieldValueCore(ordinal); /// /// Gets the value of the specified column as a object. /// /// The zero-based column ordinal. /// The value of the specified column. - public override DateTime GetDateTime(int ordinal) => GetFieldValue(ordinal); + public override DateTime GetDateTime(int ordinal) => GetFieldValueCore(ordinal); /// /// Gets the value of the specified column as an instance of . /// /// The zero-based column ordinal. /// The value of the specified column. - public override string GetString(int ordinal) => GetFieldValue(ordinal); + public override string GetString(int ordinal) => GetFieldValueCore(ordinal); /// /// Gets the value of the specified column as a object. /// /// The zero-based column ordinal. /// The value of the specified column. - public override decimal GetDecimal(int ordinal) => GetFieldValue(ordinal); + public override decimal GetDecimal(int ordinal) => GetFieldValueCore(ordinal); /// /// Gets the value of the specified column as a double-precision floating point number. /// /// The zero-based column ordinal. /// The value of the specified column. - public override double GetDouble(int ordinal) => GetFieldValue(ordinal); + public override double GetDouble(int ordinal) => GetFieldValueCore(ordinal); /// /// Gets the value of the specified column as a single-precision floating point number. /// /// The zero-based column ordinal. /// The value of the specified column. - public override float GetFloat(int ordinal) => GetFieldValue(ordinal); + public override float GetFloat(int ordinal) => GetFieldValueCore(ordinal); /// /// Gets the value of the specified column as a globally-unique identifier (GUID). /// /// The zero-based column ordinal. /// The value of the specified column. - public override Guid GetGuid(int ordinal) => GetFieldValue(ordinal); + public override Guid GetGuid(int ordinal) => GetFieldValueCore(ordinal); /// /// Populates an array of objects with the column values of the current row. @@ -1356,7 +1375,7 @@ public override int GetValues(object[] values) /// /// The zero-based column ordinal. /// The value of the specified column. - public TimeSpan GetTimeSpan(int ordinal) => GetFieldValue(ordinal); + public TimeSpan GetTimeSpan(int ordinal) => GetFieldValueCore(ordinal); /// protected override DbDataReader GetDbDataReader(int ordinal) => GetData(ordinal); @@ -1370,30 +1389,33 @@ public override int GetValues(object[] values) /// A data reader. public new NpgsqlNestedDataReader GetData(int ordinal) { + if (_isSequential) + throw new NotSupportedException("GetData() not supported in sequential mode."); + var field = CheckRowAndGetField(ordinal); var type = field.PostgresType; var isArray = type is PostgresArrayType; var elementType = isArray ? ((PostgresArrayType)type).Element : type; var compositeType = elementType as PostgresCompositeType; - if (elementType.InternalName != "record" && compositeType == null) + if (field.DataFormat is DataFormat.Text || (elementType.InternalName != "record" && compositeType == null)) throw new InvalidCastException("GetData() not supported for type " + field.TypeDisplayName); - SeekToColumn(ordinal, false).GetAwaiter().GetResult(); - if (ColumnLen == -1) + var columnLength = SeekToColumn(async: false, ordinal, field, resumableOp: true).GetAwaiter().GetResult(); + if (columnLength is -1) ThrowHelper.ThrowInvalidCastException_NoValue(field); - if (_isSequential) - throw new NotSupportedException("GetData() not supported in sequential mode."); + if (PgReader.FieldOffset > 0) + PgReader.Rewind(PgReader.FieldOffset); var reader = CachedFreeNestedDataReader; if (reader != null) { CachedFreeNestedDataReader = null; - reader.Init(UniqueRowId, compositeType); + reader.Init(compositeType); } else { - reader = new NpgsqlNestedDataReader(this, null, UniqueRowId, 1, compositeType); + reader = new NpgsqlNestedDataReader(this, null, 1, compositeType); } if (isArray) reader.InitArray(); @@ -1425,34 +1447,22 @@ public override long GetBytes(int ordinal, long dataOffset, byte[]? buffer, int throw new IndexOutOfRangeException($"length must be between 0 and {buffer.Length - bufferOffset}"); var field = CheckRowAndGetField(ordinal); - var handler = field.Handler; - if (!(handler is ByteaHandler)) - throw new InvalidCastException("GetBytes() not supported for type " + field.Name); - - SeekToColumn(ordinal, false).GetAwaiter().GetResult(); - if (ColumnLen is -1) + var columnLength = SeekToColumn(async: false, ordinal, field, resumableOp: true).GetAwaiter().GetResult(); + if (columnLength == -1) ThrowHelper.ThrowInvalidCastException_NoValue(field); if (buffer is null) - return ColumnLen; - - var dataOffset2 = (int)dataOffset; - SeekInColumn(dataOffset2, false).GetAwaiter().GetResult(); + return columnLength; - // Attempt to read beyond the end of the column - if (dataOffset2 + length > ColumnLen) - length = Math.Max(ColumnLen - dataOffset2, 0); - - var left = length; - while (left > 0) - { - var read = Buffer.Read(new Span(buffer, bufferOffset, left)); - bufferOffset += read; - left -= read; - } + // Move to offset + if (_isSequential && PgReader.FieldOffset > dataOffset) + ThrowHelper.ThrowInvalidOperationException("Attempt to read a position in the column which has already been read"); - PosInColumn += length; + PgReader.Seek((int)dataOffset); + // At offset, read into buffer. + length = Math.Min(length, PgReader.FieldRemaining); + PgReader.ReadBytes(new Span(buffer, bufferOffset, length)); return length; } @@ -1461,7 +1471,8 @@ public override long GetBytes(int ordinal, long dataOffset, byte[]? buffer, int /// /// The zero-based column ordinal. /// The returned object. - public override Stream GetStream(int ordinal) => GetStream(ordinal, false).Result; + public override Stream GetStream(int ordinal) + => GetFieldValueCore(ordinal); /// /// Retrieves data as a . @@ -1472,31 +1483,7 @@ public override long GetBytes(int ordinal, long dataOffset, byte[]? buffer, int /// /// The returned object. public Task GetStreamAsync(int ordinal, CancellationToken cancellationToken = default) - { - using (NoSynchronizationContextScope.Enter()) - return GetStream(ordinal, true, cancellationToken).AsTask(); - } - - ValueTask GetStream(int ordinal, bool async, CancellationToken cancellationToken = default) => - GetStreamInternal(CheckRowAndGetField(ordinal), ordinal, async, cancellationToken); - - async ValueTask GetStreamInternal(FieldDescription field, int ordinal, bool async, CancellationToken cancellationToken = default) - { - if (_columnStream is { IsDisposed: false }) - ThrowHelper.ThrowInvalidOperationException("A stream is already open for this reader"); - - using var registration = Connector.StartNestedCancellableOperation(cancellationToken, attemptPgCancellation: false); - - await SeekToColumn(ordinal, async, cancellationToken); - if (_isSequential) - CheckColumnStart(); - - if (ColumnLen == -1) - ThrowHelper.ThrowInvalidCastException_NoValue(field); - - PosInColumn += ColumnLen; - return _columnStream = (NpgsqlReadBuffer.ColumnStream)Buffer.GetStream(ColumnLen, !_isSequential); - } + => GetFieldValueAsync(ordinal, cancellationToken); #endregion @@ -1520,96 +1507,30 @@ public override long GetChars(int ordinal, long dataOffset, char[]? buffer, int if (buffer != null && (length < 0 || length > buffer.Length - bufferOffset)) throw new IndexOutOfRangeException($"length must be between 0 and {buffer.Length - bufferOffset}"); - var field = CheckRowAndGetField(ordinal); - var handler = field.Handler as TextHandler; - if (handler == null) - throw new InvalidCastException("The GetChars method is not supported for type " + field.Name); + // Check whether we can do resumable reads. + var field = GetInfo(ordinal, typeof(GetChars), out var converter, out var bufferRequirement, out var asObject); + if (converter is not IResumableRead { Supported: true }) + throw new NotSupportedException("The GetChars method is not supported for this column type"); - SeekToColumn(ordinal, false).GetAwaiter().GetResult(); - if (ColumnLen == -1) + var columnLength = SeekToColumn(async: false, ordinal, field, resumableOp: true).GetAwaiter().GetResult(); + if (columnLength == -1) ThrowHelper.ThrowInvalidCastException_NoValue(field); - if (PosInColumn == 0) - _charPos = 0; - - var decoder = Buffer.TextEncoding.GetDecoder(); + dataOffset = buffer is null ? 0 : dataOffset; + PgReader.InitCharsRead(checked((int)dataOffset), + buffer is not null ? new ArraySegment(buffer, bufferOffset, length) : (ArraySegment?)null, + out var previousDataOffset); - if (buffer == null) - { - // Note: Getting the length of a text column means decoding the entire field, - // very inefficient and also consumes the column in sequential mode. But this seems to - // be SqlClient's behavior as well. - var (bytesSkipped, charsSkipped) = SkipChars(decoder, int.MaxValue, ColumnLen - PosInColumn); - Debug.Assert(bytesSkipped == ColumnLen - PosInColumn); - PosInColumn += bytesSkipped; - _charPos += charsSkipped; - return _charPos; - } - - if (PosInColumn == ColumnLen || dataOffset < _charPos) - { - // Either the column has already been read (e.g. GetString()) or a previous GetChars() - // has positioned us in the column *after* the requested read start offset. Seek back - // (this will throw for sequential) - SeekInColumn(0, false).GetAwaiter().GetResult(); - _charPos = 0; - } - - if (dataOffset > _charPos) - { - var charsToSkip = (int)dataOffset - _charPos; - var (bytesSkipped, charsSkipped) = SkipChars(decoder, charsToSkip, ColumnLen - PosInColumn); - decoder.Reset(); - PosInColumn += bytesSkipped; - _charPos += charsSkipped; - if (charsSkipped < charsToSkip) // data offset is beyond the column's end - return 0; - } - - // We're now positioned at the start of the segment of characters we need to read. - if (length == 0) - return 0; - - var (bytesRead, charsRead) = DecodeChars(decoder, buffer.AsSpan(bufferOffset, length), ColumnLen - PosInColumn); - - PosInColumn += bytesRead; - _charPos += charsRead; - return charsRead; - } - - (int BytesRead, int CharsRead) DecodeChars(Decoder decoder, Span output, int byteCount) - { - var (bytesRead, charsRead) = (0, 0); - var outputLength = output.Length; - - while (true) - { - Buffer.Ensure(1); // Make sure we have at least some data - var maxBytes = Math.Min(byteCount - bytesRead, Buffer.ReadBytesLeft); - var bytes = Buffer.Buffer.AsSpan(Buffer.ReadPosition, maxBytes); - decoder.Convert(bytes, output, false, out var bytesUsed, out var charsUsed, out _); - Buffer.ReadPosition += bytesUsed; - bytesRead += bytesUsed; - charsRead += charsUsed; - if (charsRead == outputLength || bytesRead == byteCount) - break; - output = output.Slice(charsUsed); - } + if (_isSequential && previousDataOffset > dataOffset) + ThrowHelper.ThrowInvalidOperationException("Attempt to read a position in the column which has already been read"); - return (bytesRead, charsRead); - } - - internal (int BytesSkipped, int CharsSkipped) SkipChars(Decoder decoder, int charCount, int byteCount) - { - Span tempCharBuf = stackalloc char[512]; - var (charsSkipped, bytesSkipped) = (0, 0); - while (charsSkipped < charCount && bytesSkipped < byteCount) - { - var (bytesRead, charsRead) = DecodeChars(decoder, tempCharBuf.Slice(0, Math.Min(charCount, tempCharBuf.Length)), byteCount); - bytesSkipped += bytesRead; - charsSkipped += charsRead; - } - return (bytesSkipped, charsSkipped); + PgReader.StartRead(bufferRequirement); + var result = asObject + ? (GetChars)converter.ReadAsObject(PgReader) + : ((PgConverter)converter).Read(PgReader); + PgReader.AdvanceCharsRead(result.Read); + PgReader.EndRead(); + return result.Read; } /// @@ -1618,7 +1539,7 @@ public override long GetChars(int ordinal, long dataOffset, char[]? buffer, int /// The zero-based column ordinal. /// The returned object. public override TextReader GetTextReader(int ordinal) - => GetTextReader(ordinal, false).Result; + => GetFieldValueCore(ordinal); /// /// Retrieves data as a . @@ -1629,25 +1550,7 @@ public override TextReader GetTextReader(int ordinal) /// /// The returned object. public Task GetTextReaderAsync(int ordinal, CancellationToken cancellationToken = default) - { - using (NoSynchronizationContextScope.Enter()) - return GetTextReader(ordinal, true, cancellationToken).AsTask(); - } - - async ValueTask GetTextReader(int ordinal, bool async, CancellationToken cancellationToken = default) - { - var field = CheckRowAndGetField(ordinal); - - if (field.Handler is ITextReaderHandler handler) - { - var stream = async - ? await GetStreamInternal(field, ordinal, true, cancellationToken) - : GetStreamInternal(field, ordinal, false, CancellationToken.None).Result; - return handler.GetTextReader(stream, Buffer); - } - - throw new InvalidCastException($"The GetTextReader method is not supported for type {field.PostgresType.DisplayName}"); - } + => GetFieldValueAsync(ordinal, cancellationToken); #endregion @@ -1664,18 +1567,40 @@ async ValueTask GetTextReader(int ordinal, bool async, CancellationT /// public override Task GetFieldValueAsync(int ordinal, CancellationToken cancellationToken) { - if (typeof(T) == typeof(Stream)) - return (Task)(object)GetStreamAsync(ordinal, cancellationToken); - - if (typeof(T) == typeof(TextReader)) - return (Task)(object)GetTextReaderAsync(ordinal, cancellationToken); - // In non-sequential, we know that the column is already buffered - no I/O will take place if (!_isSequential) - return Task.FromResult(GetFieldValue(ordinal)); + return Task.FromResult(GetFieldValueCore(ordinal)); using (NoSynchronizationContextScope.Enter()) - return GetFieldValueSequential(ordinal, true, cancellationToken).AsTask(); + return Core(ordinal, cancellationToken).AsTask(); + + async ValueTask Core(int ordinal, CancellationToken cancellationToken) + { + using var registration = Connector.StartNestedCancellableOperation(cancellationToken, attemptPgCancellation: false); + var isStream = typeof(T) == typeof(Stream); + var field = GetInfo(ordinal, isStream ? null : typeof(T), out var converter, out var bufferRequirement, out var asObject); + + var columnLength = await SeekToColumn(async: true, ordinal, field); + if (columnLength == -1) + return DbNullValueOrThrow(field); + + if (isStream || typeof(T) == typeof(TextReader)) + { + PgReader.ThrowIfStreamActive(); + + // The only statically mapped converter, it always exists. + if (isStream) + return (T)(object)PgReader.GetStream(canSeek: !_isSequential); + } + + Debug.Assert(asObject || converter is PgConverter); + await PgReader.StartReadAsync(bufferRequirement, cancellationToken); + var result = asObject + ? (T)await converter.ReadAsObjectAsync(PgReader, cancellationToken) + : await Unsafe.As>(converter).ReadAsync(PgReader, cancellationToken); + await PgReader.EndReadAsync(); + return result; + } } /// @@ -1684,93 +1609,40 @@ public override Task GetFieldValueAsync(int ordinal, CancellationToken can /// Synchronously gets the value of the specified column as a type. /// The column to be retrieved. /// The column to be retrieved. - public override T GetFieldValue(int ordinal) - { - if (typeof(T) == typeof(Stream)) - return (T)(object)GetStream(ordinal); + public override T GetFieldValue(int ordinal) => GetFieldValueCore(ordinal); - if (typeof(T) == typeof(TextReader)) - return (T)(object)GetTextReader(ordinal); - - if (_isSequential) - return GetFieldValueSequential(ordinal, false).GetAwaiter().GetResult(); - - // In non-sequential, we know that the column is already buffered - no I/O will take place - - var field = CheckRowAndGetField(ordinal); - SeekToColumnNonSequential(ordinal); - - if (ColumnLen == -1) - { - // When T is a Nullable (and only in that case), we support returning null - if (NullableHandler.Exists) - return default!; - - if (typeof(T) == typeof(object)) - return (T)(object)DBNull.Value; - - ThrowHelper.ThrowInvalidCastException_NoValue(field); - } - - // We don't handle exceptions or update PosInColumn - // As with non-sequential reads we always just move to the start/end of the column - return NullableHandler.Exists - ? NullableHandler.Read(field.Handler, Buffer, ColumnLen, field) - : typeof(T) == typeof(object) - ? (T)field.Handler.ReadAsObject(Buffer, ColumnLen, field) - : field.Handler.Read(Buffer, ColumnLen, field); - } - - async ValueTask GetFieldValueSequential(int column, bool async, CancellationToken cancellationToken = default) + T GetFieldValueCore(int ordinal) { - using var registration = Connector.StartNestedCancellableOperation(cancellationToken, attemptPgCancellation: false); - - var field = CheckRowAndGetField(column); - await SeekToColumnSequential(column, async, CancellationToken.None); - CheckColumnStart(); - - if (ColumnLen == -1) - { - // When T is a Nullable (and only in that case), we support returning null - if (NullableHandler.Exists) - return default!; + // The only statically mapped converter, it always exists. + if (typeof(T) == typeof(Stream)) + return GetStream(); - if (typeof(T) == typeof(object)) - return (T)(object)DBNull.Value; + var field = GetInfo(ordinal, typeof(T), out var converter, out var bufferRequirement, out var asObject); - ThrowHelper.ThrowInvalidCastException_NoValue(field); - } + if (typeof(T) == typeof(TextReader)) + PgReader.ThrowIfStreamActive(); + + var columnLength = SeekToColumn(async: false, ordinal, field).GetAwaiter().GetResult(); + if (columnLength == -1) + return DbNullValueOrThrow(field); + + Debug.Assert(asObject || converter is PgConverter); + PgReader.StartRead(bufferRequirement); + var result = asObject + ? (T)converter.ReadAsObject(PgReader) + : Unsafe.As>(converter).Read(PgReader); + PgReader.EndRead(); + return result; - var position = Buffer.ReadPosition; - try - { - return NullableHandler.Exists - ? ColumnLen <= Buffer.ReadBytesLeft - ? NullableHandler.Read(field.Handler, Buffer, ColumnLen, field) - : await NullableHandler.ReadAsync(field.Handler, Buffer, ColumnLen, async, field) - : typeof(T) == typeof(object) - ? ColumnLen <= Buffer.ReadBytesLeft - ? (T)field.Handler.ReadAsObject(Buffer, ColumnLen, field) - : (T)await field.Handler.ReadAsObject(Buffer, ColumnLen, async, field) - : ColumnLen <= Buffer.ReadBytesLeft - ? field.Handler.Read(Buffer, ColumnLen, field) - : await field.Handler.Read(Buffer, ColumnLen, async, field); - } - catch + [MethodImpl(MethodImplOptions.NoInlining)] + T GetStream() { - if (Connector.State != ConnectorState.Broken) - { - var writtenBytes = Buffer.ReadPosition - position; - var remainingBytes = ColumnLen - writtenBytes; - if (remainingBytes > 0) - await Buffer.Skip(remainingBytes, async); - } - throw; - } - finally - { - // Important: position must still be updated - PosInColumn += ColumnLen; + var field = GetInfo(ordinal, null, out _, out _, out _); + PgReader.ThrowIfStreamActive(); + var columnLength = SeekToColumn(async: false, ordinal, field).GetAwaiter().GetResult(); + if (columnLength == -1) + return DbNullValueOrThrow(field); + return (T)(object)PgReader.GetStream(canSeek: !_isSequential); } } @@ -1785,43 +1657,14 @@ async ValueTask GetFieldValueSequential(int column, bool async, Cancellati /// The value of the specified column. public override object GetValue(int ordinal) { - var fieldDescription = CheckRowAndGetField(ordinal); - - if (_isSequential) - { - SeekToColumnSequential(ordinal, false).GetAwaiter().GetResult(); - CheckColumnStart(); - } - else - SeekToColumnNonSequential(ordinal); - - if (ColumnLen == -1) + var field = GetInfo(ordinal, null, out var converter, out var bufferRequirement, out _); + var columnLength = SeekToColumn(async: false, ordinal, field).GetAwaiter().GetResult(); + if (columnLength == -1) return DBNull.Value; - object result; - var position = Buffer.ReadPosition; - try - { - result = _isSequential - ? fieldDescription.Handler.ReadAsObject(Buffer, ColumnLen, false, fieldDescription).GetAwaiter().GetResult() - : fieldDescription.Handler.ReadAsObject(Buffer, ColumnLen, fieldDescription); - } - catch - { - if (Connector.State != ConnectorState.Broken) - { - var writtenBytes = Buffer.ReadPosition - position; - var remainingBytes = ColumnLen - writtenBytes; - if (remainingBytes > 0) - Buffer.Skip(remainingBytes, false).GetAwaiter().GetResult(); - } - throw; - } - finally - { - // Important: position must still be updated - PosInColumn += ColumnLen; - } + PgReader.StartRead(bufferRequirement); + var result = converter.ReadAsObject(PgReader); + PgReader.EndRead(); return result; } @@ -1843,16 +1686,7 @@ public override object GetValue(int ordinal) /// The zero-based column ordinal. /// true if the specified column is equivalent to ; otherwise false. public override bool IsDBNull(int ordinal) - { - CheckRowAndGetField(ordinal); - - if (_isSequential) - SeekToColumnSequential(ordinal, false).GetAwaiter().GetResult(); - else - SeekToColumnNonSequential(ordinal); - - return ColumnLen == -1; - } + => SeekToColumn(async: false, ordinal, CheckRowAndGetField(ordinal), resumableOp: true).GetAwaiter().GetResult() is -1; /// /// An asynchronous version of , which gets a value that indicates whether the column contains non-existent or missing values. @@ -1865,21 +1699,16 @@ public override bool IsDBNull(int ordinal) /// true if the specified column value is equivalent to otherwise false. public override Task IsDBNullAsync(int ordinal, CancellationToken cancellationToken) { - CheckRowAndGetField(ordinal); - if (!_isSequential) - return IsDBNull(ordinal) ? PGUtil.TrueTask : PGUtil.FalseTask; + return IsDBNull(ordinal) ? TrueTask : FalseTask; using (NoSynchronizationContextScope.Enter()) - return IsDBNullAsyncInternal(ordinal, cancellationToken); + return Core(ordinal, cancellationToken); - // ReSharper disable once InconsistentNaming - async Task IsDBNullAsyncInternal(int ordinal, CancellationToken cancellationToken) + async Task Core(int ordinal, CancellationToken cancellationToken) { using var registration = Connector.StartNestedCancellableOperation(cancellationToken, attemptPgCancellation: false); - - await SeekToColumn(ordinal, true, cancellationToken); - return ColumnLen == -1; + return await SeekToColumn(async: true, ordinal, CheckRowAndGetField(ordinal), resumableOp: true) is -1; } } @@ -1932,6 +1761,7 @@ public override int GetOrdinal(string name) /// /// The zero-based column ordinal. /// The data type of the specified column. + [UnconditionalSuppressMessage("ILLink", "IL2093", Justification = "No members are dynamically accessed by Npgsql via GetFieldType")] public override Type GetFieldType(int ordinal) => GetField(ordinal).FieldType; @@ -2074,111 +1904,138 @@ Task> GetColumnSchema(bool async, Cancellatio #region Seeking - Task SeekToColumn(int column, bool async, CancellationToken cancellationToken = default) - { - if (_isSequential) - return SeekToColumnSequential(column, async, cancellationToken); - SeekToColumnNonSequential(column); - return Task.CompletedTask; - } + [MethodImpl(MethodImplOptions.AggressiveInlining)] + ValueTask SeekToColumn(bool async, int ordinal, FieldDescription field, bool resumableOp = false) + => _isSequential + ? SeekToColumnSequential(async, ordinal, field, resumableOp) + : new(SeekToColumnNonSequential(ordinal, field, resumableOp)); - void SeekToColumnNonSequential(int column) + int SeekToColumnNonSequential(int ordinal, FieldDescription field, bool resumableOp = false) { - // Shut down any streaming going on on the column - if (_columnStream != null) - { - _columnStream.Dispose(); - _columnStream = null; - } + PgReader.Commit(async: false, _column == ordinal && PgReader.Resumable && resumableOp).GetAwaiter().GetResult(); - for (var lastColumnRead = _columns.Count; column >= lastColumnRead; lastColumnRead++) + for (var lastColumnRead = _columns.Count; ordinal >= lastColumnRead; lastColumnRead++) { - int lastColumnLen; - (Buffer.ReadPosition, lastColumnLen) = _columns[lastColumnRead - 1]; + (Buffer.ReadPosition, var lastColumnLen) = _columns[lastColumnRead - 1]; if (lastColumnLen != -1) Buffer.ReadPosition += lastColumnLen; var len = Buffer.ReadInt32(); _columns.Add((Buffer.ReadPosition, len)); } - (Buffer.ReadPosition, ColumnLen) = _columns[column]; - _column = column; - PosInColumn = 0; + (Buffer.ReadPosition, var columnLength) = _columns[ordinal]; + PgReader.Init(columnLength, field.DataFormat, resumableOp); + _column = ordinal; + + return columnLength; } /// - /// Seeks to the given column. The 4-byte length is read and stored in . + /// Seeks to the given column. The 4-byte length is read and returned. /// - async Task SeekToColumnSequential(int column, bool async, CancellationToken cancellationToken = default) + ValueTask SeekToColumnSequential(bool async, int ordinal, FieldDescription field, bool resumableOp = false) { - if (column < 0 || column >= _numColumns) - throw new IndexOutOfRangeException("Column index out of range"); - - if (column < _column) - throw new InvalidOperationException($"Invalid attempt to read from column ordinal '{column}'. With CommandBehavior.SequentialAccess, you may only read from column ordinal '{_column}' or greater."); - - if (column == _column) - return; - - // Need to seek forward - - // Shut down any streaming going on on the column - if (_columnStream != null) + var reread = _column == ordinal; + // Column rereading rules for sequential mode: + // * We never allow rereading if the column didn't get initialized as resumable the previous time + // * If it did get initialized as resumable we only allow rereading when either of the following is true: + // - The op is a resumable one again + // - The op isn't resumable but the field is still entirely unconsumed + if (ordinal < _column || (reread && (!PgReader.Resumable || (!resumableOp && !PgReader.IsAtStart)))) + ThrowHelper.ThrowInvalidOperationException( + $"Invalid attempt to read from column ordinal '{ordinal}'. With CommandBehavior.SequentialAccess, " + + $"you may only read from column ordinal '{_column}' or greater."); + + var committed = false; + if (!PgReader.CommitHasIO(reread)) { - _columnStream.Dispose(); - _columnStream = null; - // Disposing the stream leaves us at the end of the column - PosInColumn = ColumnLen; + PgReader.Commit(async: false, reread).GetAwaiter().GetResult(); + committed = true; + if (TrySeekBuffered(ordinal, out var columnLength)) + { + PgReader.Init(columnLength, field.DataFormat, columnLength is -1 || resumableOp); + return new(columnLength); + } + + // If we couldn't consume the column TrySeekBuffered had to stop at, do so now. + if (columnLength > -1) + { + // Resumable: true causes commit to consume without error. + PgReader.Init(columnLength, field.DataFormat, resumable: true); + committed = false; + } } - // Skip to end of column if needed - // TODO: Simplify by better initializing _columnLen/_posInColumn - var remainingInColumn = ColumnLen == -1 ? 0 : ColumnLen - PosInColumn; - if (remainingInColumn > 0) - await Buffer.Skip(remainingInColumn, async); + return Core(async, !committed, ordinal, field.DataFormat, resumableOp); - // Skip over unwanted fields - for (; _column < column - 1; _column++) +#if NET6_0_OR_GREATER + [AsyncMethodBuilder(typeof(PoolingAsyncValueTaskMethodBuilder<>))] +#endif + async ValueTask Core(bool async, bool commit, int ordinal, DataFormat dataFormat, bool resumableOp) { - await Buffer.Ensure(4, async); - var len = Buffer.ReadInt32(); - if (len != -1) - await Buffer.Skip(len, async); - } + if (commit) + { + Debug.Assert(ordinal != _column); + await PgReader.Commit(async, reread); + } - await Buffer.Ensure(4, async); - ColumnLen = Buffer.ReadInt32(); - PosInColumn = 0; - _column = column; - } + if (ordinal == _column) + { + PgReader.Init(PgReader.FieldSize, field.DataFormat, PgReader.FieldSize is -1 || resumableOp); + return PgReader.FieldSize; + } - Task SeekInColumn(int dataOffset, bool async, CancellationToken cancellationToken = default) - { - if (_isSequential) - return SeekInColumnSequential(dataOffset, async); + // Seek to the requested column + var buffer = Buffer; + for (; _column < ordinal - 1; _column++) + { + await buffer.Ensure(4, async); + var len = buffer.ReadInt32(); + if (len != -1) + await buffer.Skip(len, async); + } - if (dataOffset >= ColumnLen) - ThrowHelper.ThrowArgumentOutOfRange_OutOfColumnBounds(nameof(dataOffset), ColumnLen); + await buffer.Ensure(4, async); + var columnLength = buffer.ReadInt32(); + _column = ordinal; - Buffer.ReadPosition = _columns[_column].Offset + dataOffset; - PosInColumn = dataOffset; - return Task.CompletedTask; + PgReader.Init(columnLength, dataFormat, resumableOp); + return columnLength; + } - async Task SeekInColumnSequential(int dataOffset, bool async) + bool TrySeekBuffered(int ordinal, out int columnLength) { - Debug.Assert(_column > -1); - - if (dataOffset < PosInColumn) - ThrowHelper.ThrowInvalidOperationException("Attempt to read a position in the column which has already been read"); + if (ordinal == _column) + { + columnLength = PgReader.FieldSize; + return true; + } - if (dataOffset >= ColumnLen) - ThrowHelper.ThrowArgumentOutOfRange_OutOfColumnBounds(nameof(dataOffset), ColumnLen); + // Skip over unwanted fields + columnLength = -1; + var buffer = Buffer; + for (; _column < ordinal - 1; _column++) + { + if (buffer.ReadBytesLeft < 4) + return false; + columnLength = buffer.ReadInt32(); + if (columnLength > 0) + { + if (buffer.ReadBytesLeft < columnLength) + return false; + buffer.Skip(columnLength); + } + } - if (dataOffset > PosInColumn) + if (buffer.ReadBytesLeft < 4) { - await Buffer.Skip(dataOffset - PosInColumn, async); - PosInColumn = dataOffset; + columnLength = -1; + return false; } + + columnLength = buffer.ReadInt32(); + _column = ordinal; + return true; } } @@ -2190,8 +2047,6 @@ Task ConsumeRow(bool async) { Debug.Assert(State == ReaderState.InResult || State == ReaderState.BeforeResult); - UniqueRowId++; - if (!_canConsumeRowNonSequentially) return ConsumeRowSequential(async); @@ -2201,19 +2056,7 @@ Task ConsumeRow(bool async) async Task ConsumeRowSequential(bool async) { - if (_columnStream != null) - { - _columnStream.Dispose(); - _columnStream = null; - // Disposing the stream leaves us at the end of the column - PosInColumn = ColumnLen; - } - - // TODO: Potential for code-sharing with ReadColumn above, which also skips - // Skip to end of column if needed - var remainingInColumn = ColumnLen == -1 ? 0 : ColumnLen - PosInColumn; - if (remainingInColumn > 0) - await Buffer.Skip(remainingInColumn, async); + await PgReader.Commit(async, resuming: false); // Skip over the remaining columns in the row for (; _column < _numColumns - 1; _column++) @@ -2230,14 +2073,7 @@ async Task ConsumeRowSequential(bool async) void ConsumeRowNonSequential() { Debug.Assert(State == ReaderState.InResult || State == ReaderState.BeforeResult); - - if (_columnStream is not null) - { - _columnStream.Dispose(); - _columnStream = null; - // Disposing the stream leaves us at the end of the column - PosInColumn = ColumnLen; - } + PgReader.Commit(async: false, resuming: false).GetAwaiter().GetResult(); Buffer.ReadPosition = _dataMsgEnd; } @@ -2264,27 +2100,72 @@ void CheckResultSet() } } - FieldDescription CheckRowAndGetField(int column) + [MethodImpl(MethodImplOptions.NoInlining)] + static T DbNullValueOrThrow(FieldDescription field) { - switch (State) + // When T is a Nullable (and only in that case), we support returning null + if (default(T) is null && typeof(T).IsValueType) + return default!; + + if (typeof(T) == typeof(object)) + return (T)(object)DBNull.Value; + + ThrowHelper.ThrowInvalidCastException_NoValue(field); + return default; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + FieldDescription GetInfo(int ordinal, Type? type, out PgConverter converter, out Size bufferRequirement, out bool asObject) + { + var field = CheckRowAndGetField(ordinal); + + if (type is null) { - case ReaderState.InResult: - break; - case ReaderState.Closed: - ThrowHelper.ThrowInvalidOperationException("The reader is closed"); - break; - case ReaderState.Disposed: - ThrowHelper.ThrowObjectDisposedException(nameof(NpgsqlDataReader)); - break; - default: - ThrowHelper.ThrowInvalidOperationException("No row is available"); - break; + converter = field.ObjectOrDefaultInfo.Converter; + bufferRequirement = field.ObjectOrDefaultInfo.BufferRequirement; + asObject = field.ObjectOrDefaultInfo.AsObject; + return field; } - if (column < 0 || column >= RowDescription!.Count) - ThrowColumnOutOfRange(RowDescription!.Count); + ref var info = ref ColumnInfoCache![ordinal]; + field.GetInfo(type, ref info); + converter = info.Converter; + bufferRequirement = info.BufferRequirement; + asObject = info.AsObject; + return field; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + FieldDescription CheckRowAndGetField(int column) + { + var columns = RowDescription; + var state = State; + if (state is ReaderState.InResult && column >= 0 && column < columns!.Count) + return columns[column]; + + return HandleInvalidState(state, columns?.Count ?? 0); - return RowDescription[column]; + [MethodImpl(MethodImplOptions.NoInlining)] + static FieldDescription HandleInvalidState(ReaderState state, int maxColumns) + { + switch (state) + { + case ReaderState.InResult: + break; + case ReaderState.Closed: + ThrowHelper.ThrowInvalidOperationException("The reader is closed"); + break; + case ReaderState.Disposed: + ThrowHelper.ThrowObjectDisposedException(nameof(NpgsqlDataReader)); + break; + default: + ThrowHelper.ThrowInvalidOperationException("No row is available"); + break; + } + + ThrowColumnOutOfRange(maxColumns); + return default!; + } } /// @@ -2296,17 +2177,11 @@ FieldDescription GetField(int column) if (RowDescription is null) ThrowHelper.ThrowInvalidOperationException("No resultset is currently being traversed"); - if (column < 0 || column >= RowDescription.Count) - ThrowColumnOutOfRange(RowDescription.Count); + var columns = RowDescription; + if (column < 0 || column >= columns.Count) + ThrowColumnOutOfRange(columns.Count); - return RowDescription[column]; - } - - void CheckColumnStart() - { - Debug.Assert(_isSequential); - if (PosInColumn != 0) - ThrowHelper.ThrowInvalidOperationException("Attempt to read a position in the column which has already been read"); + return columns[column]; } void CheckClosedOrDisposed() diff --git a/src/Npgsql/NpgsqlDataSource.cs b/src/Npgsql/NpgsqlDataSource.cs index 510513d0fb..ee3ec18eb5 100644 --- a/src/Npgsql/NpgsqlDataSource.cs +++ b/src/Npgsql/NpgsqlDataSource.cs @@ -10,8 +10,7 @@ using System.Transactions; using Microsoft.Extensions.Logging; using Npgsql.Internal; -using Npgsql.Internal.TypeHandling; -using Npgsql.Internal.TypeMapping; +using Npgsql.Internal.Resolvers; using Npgsql.Properties; using Npgsql.Util; @@ -32,11 +31,8 @@ public abstract class NpgsqlDataSource : DbDataSource internal NpgsqlDataSourceConfiguration Configuration { get; } internal NpgsqlLoggingConfiguration LoggingConfiguration { get; } - readonly List _resolverFactories; - readonly Dictionary _userTypeMappings; - readonly INpgsqlNameTranslator _defaultNameTranslator; - - internal TypeMapper TypeMapper { get; private set; } = null!; // Initialized at bootstrapping + readonly IPgTypeInfoResolver _resolver; + internal PgSerializerOptions SerializerOptions { get; private set; } = null!; // Initialized at bootstrapping /// /// Information about PostgreSQL and PostgreSQL-like databases (e.g. type definitions, capabilities...). @@ -81,6 +77,8 @@ private protected readonly Dictionary> _pendi /// readonly SemaphoreSlim _setupMappingsSemaphore = new(1); + readonly INpgsqlNameTranslator _defaultNameTranslator; + internal NpgsqlDataSource( NpgsqlConnectionStringBuilder settings, NpgsqlDataSourceConfiguration dataSourceConfig) @@ -100,14 +98,15 @@ internal NpgsqlDataSource( _periodicPasswordProvider, _periodicPasswordSuccessRefreshInterval, _periodicPasswordFailureRefreshInterval, - _resolverFactories, - _userTypeMappings, + var resolverChain, _defaultNameTranslator, ConnectionInitializer, ConnectionInitializerAsync) = dataSourceConfig; _connectionLogger = LoggingConfiguration.ConnectionLogger; + // TODO probably want this on the options so it can devirt unconditionally. + _resolver = new TypeInfoResolverChain(resolverChain); _password = settings.Password; if (_periodicPasswordSuccessRefreshInterval != default) @@ -127,11 +126,11 @@ internal NpgsqlDataSource( MetricsReporter = new MetricsReporter(this); } - /// + /// public new NpgsqlConnection CreateConnection() => NpgsqlConnection.FromDataSource(this); - /// + /// public new NpgsqlConnection OpenConnection() { var connection = CreateConnection(); @@ -152,7 +151,7 @@ internal NpgsqlDataSource( protected override DbConnection OpenDbConnection() => OpenConnection(); - /// + /// public new async ValueTask OpenConnectionAsync(CancellationToken cancellationToken = default) { var connection = CreateConnection(); @@ -233,19 +232,29 @@ internal async Task Bootstrap( return; // The type loading below will need to send queries to the database, and that depends on a type mapper being set up (even if its - // empty). So we set up here, and then later inject the DatabaseInfo. - var typeMapper = new TypeMapper(connector, _defaultNameTranslator); - connector.TypeMapper = typeMapper; + // empty). So we set up a minimal version here, and then later inject the actual DatabaseInfo. + connector.SerializerOptions = + new(PostgresMinimalDatabaseInfo.DefaultTypeCatalog) + { + TextEncoding = connector.TextEncoding, + TypeInfoResolver = AdoTypeInfoResolver.Instance + }; NpgsqlDatabaseInfo databaseInfo; using (connector.StartUserAction(ConnectorState.Executing, cancellationToken)) databaseInfo = await NpgsqlDatabaseInfo.Load(connector, timeout, async); - DatabaseInfo = databaseInfo; - connector.DatabaseInfo = databaseInfo; - typeMapper.Initialize(databaseInfo, _resolverFactories, _userTypeMappings); - TypeMapper = typeMapper; + connector.DatabaseInfo = DatabaseInfo = databaseInfo; + connector.SerializerOptions = SerializerOptions = + new(databaseInfo, CreateTimeZoneProvider(connector.Timezone)) + { + ArrayNullabilityMode = Settings.ArrayNullabilityMode, + EnableDateTimeInfinityConversions = !Statics.DisableDateTimeInfinityConversions, + TextEncoding = connector.TextEncoding, + TypeInfoResolver = _resolver, + DefaultNameTranslator = _defaultNameTranslator + }; _isBootstrapped = true; } @@ -253,6 +262,18 @@ internal async Task Bootstrap( { _setupMappingsSemaphore.Release(); } + + // Func in a static function to make sure we don't capture state that might not stay around, like a connector. + static Func CreateTimeZoneProvider(string postgresTimeZone) + => () => + { + if (string.Equals(postgresTimeZone, "localtime", StringComparison.OrdinalIgnoreCase)) + throw new TimeZoneNotFoundException( + "The special PostgreSQL timezone 'localtime' is not supported when reading values of type 'timestamp with time zone'. " + + "Please specify a real timezone in 'postgresql.conf' on the server, or set the 'PGTZ' environment variable on the client."); + + return postgresTimeZone; + }; } #region Password management @@ -478,7 +499,7 @@ sealed class DatabaseStateInfo // While the TimeStamp is not strictly required, it does lower the risk of overwriting the current state with an old value internal readonly DateTime TimeStamp; - public DatabaseStateInfo() : this(default, default, default) {} + public DatabaseStateInfo() : this(default, default, default) { } public DatabaseStateInfo(DatabaseState state, NpgsqlTimeout timeout, DateTime timeStamp) => (State, Timeout, TimeStamp) = (state, timeout, timeStamp); diff --git a/src/Npgsql/NpgsqlDataSourceBuilder.cs b/src/Npgsql/NpgsqlDataSourceBuilder.cs index 356fa48cb3..de87962d5c 100644 --- a/src/Npgsql/NpgsqlDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlDataSourceBuilder.cs @@ -1,12 +1,14 @@ using System; using System.Diagnostics.CodeAnalysis; +using System.Linq; using System.Net.Security; using System.Security.Cryptography.X509Certificates; using System.Text.Json; using System.Threading; using System.Threading.Tasks; using Microsoft.Extensions.Logging; -using Npgsql.Internal.TypeHandling; +using Npgsql.Internal; +using Npgsql.Internal.Resolvers; using Npgsql.TypeMapping; using NpgsqlTypes; @@ -17,6 +19,8 @@ namespace Npgsql; /// public sealed class NpgsqlDataSourceBuilder : INpgsqlTypeMapper { + static UnsupportedTypeInfoResolver UnsupportedTypeInfoResolver { get; } = new(); + readonly NpgsqlSlimDataSourceBuilder _internalBuilder; /// @@ -45,14 +49,80 @@ public INpgsqlNameTranslator DefaultNameTranslator /// public string ConnectionString => _internalBuilder.ConnectionString; + internal static void ResetGlobalMappings(bool overwrite) + => GlobalTypeMapper.Instance.AddGlobalTypeMappingResolvers(new IPgTypeInfoResolver[] + { + overwrite ? new AdoTypeInfoResolver() : AdoTypeInfoResolver.Instance, + new ExtraConversionsResolver(), + new SystemTextJsonTypeInfoResolver(), + new SystemTextJsonPocoTypeInfoResolver(), + new RangeTypeInfoResolver(), + new RecordTypeInfoResolver(), + new TupledRecordTypeInfoResolver(), + new FullTextSearchTypeInfoResolver(), + new NetworkTypeInfoResolver(), + new GeometricTypeInfoResolver(), + new LTreeTypeInfoResolver(), + new UnmappedEnumTypeInfoResolver(), + new UnmappedRangeTypeInfoResolver(), + new UnmappedMultirangeTypeInfoResolver(), + // Arrays + new AdoArrayTypeInfoResolver(), + new ExtraConversionsArrayTypeInfoResolver(), + new SystemTextJsonArrayTypeInfoResolver(), + new SystemTextJsonPocoArrayTypeInfoResolver(), + new RangeArrayTypeInfoResolver(), + new RecordArrayTypeInfoResolver(), + new TupledRecordArrayTypeInfoResolver(), + new UnmappedEnumArrayTypeInfoResolver(), + new UnmappedRangeArrayTypeInfoResolver(), + new UnmappedMultirangeArrayTypeInfoResolver(), + }, overwrite); + + static NpgsqlDataSourceBuilder() + => ResetGlobalMappings(overwrite: false); + /// /// Constructs a new , optionally starting out from the given . /// public NpgsqlDataSourceBuilder(string? connectionString = null) { - _internalBuilder = new(connectionString); - + _internalBuilder = new(new NpgsqlConnectionStringBuilder(connectionString)); AddDefaultFeatures(); + + void AddDefaultFeatures() + { + _internalBuilder.EnableEncryption(); + AddTypeInfoResolver(UnsupportedTypeInfoResolver); + // Reverse order arrays. + AddTypeInfoResolver(new UnmappedMultirangeArrayTypeInfoResolver()); + AddTypeInfoResolver(new UnmappedRangeArrayTypeInfoResolver()); + AddTypeInfoResolver(new UnmappedEnumArrayTypeInfoResolver()); + AddTypeInfoResolver(new TupledRecordArrayTypeInfoResolver()); + AddTypeInfoResolver(new RecordArrayTypeInfoResolver()); + AddTypeInfoResolver(new RangeArrayTypeInfoResolver()); + AddTypeInfoResolver(new SystemTextJsonPocoArrayTypeInfoResolver()); + AddTypeInfoResolver(new SystemTextJsonArrayTypeInfoResolver()); + AddTypeInfoResolver(new ExtraConversionsArrayTypeInfoResolver()); + AddTypeInfoResolver(new AdoArrayTypeInfoResolver()); + // Reverse order. + AddTypeInfoResolver(new UnmappedMultirangeTypeInfoResolver()); + AddTypeInfoResolver(new UnmappedRangeTypeInfoResolver()); + AddTypeInfoResolver(new UnmappedEnumTypeInfoResolver()); + AddTypeInfoResolver(new LTreeTypeInfoResolver()); + AddTypeInfoResolver(new GeometricTypeInfoResolver()); + AddTypeInfoResolver(new NetworkTypeInfoResolver()); + AddTypeInfoResolver(new FullTextSearchTypeInfoResolver()); + AddTypeInfoResolver(new TupledRecordTypeInfoResolver()); + AddTypeInfoResolver(new RecordTypeInfoResolver()); + AddTypeInfoResolver(new RangeTypeInfoResolver()); + AddTypeInfoResolver(new SystemTextJsonPocoTypeInfoResolver()); + AddTypeInfoResolver(new SystemTextJsonTypeInfoResolver()); + AddTypeInfoResolver(new ExtraConversionsResolver()); + AddTypeInfoResolver(AdoTypeInfoResolver.Instance); + foreach (var plugin in GlobalTypeMapper.Instance.GetPluginResolvers().Reverse()) + AddTypeInfoResolver(plugin); + } } /// @@ -208,8 +278,12 @@ public NpgsqlDataSourceBuilder UsePeriodicPasswordProvider( #region Type mapping /// - public void AddTypeResolverFactory(TypeHandlerResolverFactory resolverFactory) - => _internalBuilder.AddTypeResolverFactory(resolverFactory); + public void AddTypeInfoResolver(IPgTypeInfoResolver resolver) + => _internalBuilder.AddTypeInfoResolver(resolver); + + /// + void INpgsqlTypeMapper.Reset() + => _internalBuilder.ResetTypeMappings(); /// /// Sets up System.Text.Json mappings for the PostgreSQL json and jsonb types. @@ -226,7 +300,8 @@ public NpgsqlDataSourceBuilder UseSystemTextJson( Type[]? jsonbClrTypes = null, Type[]? jsonClrTypes = null) { - AddTypeResolverFactory(new SystemTextJsonTypeHandlerResolverFactory(jsonbClrTypes, jsonClrTypes, serializerOptions)); + AddTypeInfoResolver(new SystemTextJsonPocoArrayTypeInfoResolver(jsonbClrTypes, jsonClrTypes, serializerOptions)); + AddTypeInfoResolver(new SystemTextJsonPocoTypeInfoResolver(jsonbClrTypes, jsonClrTypes, serializerOptions)); return this; } @@ -269,13 +344,6 @@ public bool UnmapComposite(string? pgName = null, INpgsqlNameTranslator? name public bool UnmapComposite(Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) => _internalBuilder.UnmapComposite(clrType, pgName, nameTranslator); - void INpgsqlTypeMapper.Reset() - { - ((INpgsqlTypeMapper)_internalBuilder).Reset(); - - AddDefaultFeatures(); - } - #endregion Type mapping /// @@ -318,13 +386,4 @@ public NpgsqlDataSource Build() /// public NpgsqlMultiHostDataSource BuildMultiHost() => _internalBuilder.BuildMultiHost(); - - void AddDefaultFeatures() - { - _internalBuilder.EnableEncryption(); - _internalBuilder.AddDefaultTypeResolverFactory(new SystemTextJsonTypeHandlerResolverFactory()); - _internalBuilder.AddDefaultTypeResolverFactory(new RangeTypeHandlerResolverFactory()); - _internalBuilder.AddDefaultTypeResolverFactory(new RecordTypeHandlerResolverFactory()); - _internalBuilder.AddDefaultTypeResolverFactory(new FullTextSearchTypeHandlerResolverFactory()); - } } diff --git a/src/Npgsql/NpgsqlDataSourceConfiguration.cs b/src/Npgsql/NpgsqlDataSourceConfiguration.cs index 40aec62171..749ab7df7b 100644 --- a/src/Npgsql/NpgsqlDataSourceConfiguration.cs +++ b/src/Npgsql/NpgsqlDataSourceConfiguration.cs @@ -5,8 +5,6 @@ using System.Threading; using System.Threading.Tasks; using Npgsql.Internal; -using Npgsql.Internal.TypeHandling; -using Npgsql.Internal.TypeMapping; namespace Npgsql; @@ -19,8 +17,7 @@ sealed record NpgsqlDataSourceConfiguration( Func>? PeriodicPasswordProvider, TimeSpan PeriodicPasswordSuccessRefreshInterval, TimeSpan PeriodicPasswordFailureRefreshInterval, - List ResolverFactories, - Dictionary UserTypeMappings, + IEnumerable ResolverChain, INpgsqlNameTranslator DefaultNameTranslator, Action? ConnectionInitializer, Func? ConnectionInitializerAsync); diff --git a/src/Npgsql/NpgsqlLargeObjectManager.cs b/src/Npgsql/NpgsqlLargeObjectManager.cs index 4ec6cb002d..8f9b4cf6ea 100644 --- a/src/Npgsql/NpgsqlLargeObjectManager.cs +++ b/src/Npgsql/NpgsqlLargeObjectManager.cs @@ -1,5 +1,4 @@ using Npgsql.Util; -using System; using System.Data; using System.Text; using System.Threading; diff --git a/src/Npgsql/NpgsqlNestedDataReader.cs b/src/Npgsql/NpgsqlNestedDataReader.cs index 55234f5423..060592e312 100644 --- a/src/Npgsql/NpgsqlNestedDataReader.cs +++ b/src/Npgsql/NpgsqlNestedDataReader.cs @@ -1,8 +1,5 @@ using Npgsql.Internal; -using Npgsql.Internal.TypeHandlers; -using Npgsql.Internal.TypeHandling; using Npgsql.PostgresTypes; -using Npgsql.TypeMapping; using System; using System.Collections; using System.Collections.Generic; @@ -10,7 +7,7 @@ using System.Globalization; using System.IO; using System.Runtime.CompilerServices; -using Npgsql.Internal.TypeMapping; +using Npgsql.Internal.Postgres; namespace Npgsql; @@ -22,7 +19,6 @@ namespace Npgsql; public sealed class NpgsqlNestedDataReader : DbDataReader { readonly NpgsqlDataReader _outermostReader; - ulong _uniqueOutermostReaderRowId; readonly NpgsqlNestedDataReader? _outerNestedReader; NpgsqlNestedDataReader? _cachedFreeNestedDataReader; PostgresCompositeType? _compositeType; @@ -33,37 +29,53 @@ public sealed class NpgsqlNestedDataReader : DbDataReader ReaderState _readerState; readonly List _columns = new(); + long _startPos; - readonly struct ColumnInfo + DataFormat Format => DataFormat.Binary; + + struct ColumnInfo { - public readonly uint TypeOid; - public readonly int BufferPos; - public readonly NpgsqlTypeHandler TypeHandler; + readonly DataFormat _format; + public PostgresType PostgresType { get; } + public int BufferPos { get; } + public PgConverterInfo LastConverterInfo { get; private set; } + + public PgTypeInfo ObjectOrDefaultTypeInfo { get; } + public PgConverterInfo ObjectOrDefaultInfo => ObjectOrDefaultTypeInfo.Bind(Field, _format); + + Field Field => new("?", ObjectOrDefaultTypeInfo.Options.PortableTypeIds ? PostgresType.DataTypeName : (Oid)PostgresType.OID, -1); - public ColumnInfo(uint typeOid, int bufferPos, NpgsqlTypeHandler typeHandler) + public ColumnInfo SetConverterInfo(PgTypeInfo typeInfo) + => this with + { + LastConverterInfo = typeInfo.Bind(Field, _format) + }; + + public ColumnInfo(PostgresType postgresType, int bufferPos, PgTypeInfo objectOrDefaultTypeInfo, DataFormat format) { - TypeOid = typeOid; + _format = format; + PostgresType = postgresType; BufferPos = bufferPos; - TypeHandler = typeHandler; + ObjectOrDefaultTypeInfo = objectOrDefaultTypeInfo; } } - NpgsqlReadBuffer Buffer => _outermostReader.Buffer; - TypeMapper TypeMapper => _outermostReader.Connector.TypeMapper; + PgReader PgReader => _outermostReader.Buffer.PgReader; + PgSerializerOptions SerializerOptions => _outermostReader.Connector.SerializerOptions; internal NpgsqlNestedDataReader(NpgsqlDataReader outermostReader, NpgsqlNestedDataReader? outerNestedReader, - ulong uniqueOutermostReaderRowId, int depth, PostgresCompositeType? compositeType) + int depth, PostgresCompositeType? compositeType) { _outermostReader = outermostReader; _outerNestedReader = outerNestedReader; - _uniqueOutermostReaderRowId = uniqueOutermostReaderRowId; _depth = depth; _compositeType = compositeType; + _startPos = PgReader.FieldStartPos; } - internal void Init(ulong uniqueOutermostReaderRowId, PostgresCompositeType? compositeType) + internal void Init(PostgresCompositeType? compositeType) { - _uniqueOutermostReaderRowId = uniqueOutermostReaderRowId; + _startPos = PgReader.FieldStartPos; _columns.Clear(); _numRows = 0; _nextRowIndex = 0; @@ -74,9 +86,9 @@ internal void Init(ulong uniqueOutermostReaderRowId, PostgresCompositeType? comp internal void InitArray() { - var dimensions = Buffer.ReadInt32(); - var containsNulls = Buffer.ReadInt32() == 1; - Buffer.ReadUInt32(); // Element OID. Ignored. + var dimensions = PgReader.ReadInt32(); + var containsNulls = PgReader.ReadInt32() == 1; + PgReader.ReadUInt32(); // Element OID. Ignored. if (containsNulls) throw new InvalidOperationException("Record array contains null record"); @@ -87,19 +99,19 @@ internal void InitArray() if (dimensions != 1) throw new InvalidOperationException("Cannot read a multidimensional array with a nested DbDataReader"); - _numRows = Buffer.ReadInt32(); - Buffer.ReadInt32(); // Lower bound + _numRows = PgReader.ReadInt32(); + PgReader.ReadInt32(); // Lower bound if (_numRows > 0) - Buffer.ReadInt32(); // Length of first row + PgReader.ReadInt32(); // Length of first row - _nextRowBufferPos = Buffer.ReadPosition; + _nextRowBufferPos = PgReader.FieldOffset; } internal void InitSingleRow() { _numRows = 1; - _nextRowBufferPos = Buffer.ReadPosition; + _nextRowBufferPos = PgReader.FieldOffset; } /// @@ -141,7 +153,7 @@ public override bool HasRows /// public override bool IsClosed => _readerState == ReaderState.Closed || _readerState == ReaderState.Disposed - || _outermostReader.IsClosed || _uniqueOutermostReaderRowId != _outermostReader.UniqueRowId; + || _outermostReader.IsClosed || PgReader.FieldStartPos != _startPos; /// public override int RecordsAffected => -1; @@ -181,26 +193,22 @@ public override long GetBytes(int ordinal, long dataOffset, byte[]? buffer, int if (buffer != null && (length < 0 || length > buffer.Length - bufferOffset)) throw new IndexOutOfRangeException($"length must be between 0 and {buffer.Length - bufferOffset}"); - var field = CheckRowAndColumnAndSeek(ordinal); - var handler = field.Handler; - if (!(handler is ByteaHandler)) - throw new InvalidCastException("GetBytes() not supported for type " + field.Handler.PgDisplayName); + var columnLen = CheckRowAndColumnAndSeek(ordinal, out var column); + if (columnLen is -1) + ThrowHelper.ThrowInvalidCastException_NoValue(); - if (field.Length == -1) - throw new InvalidCastException("field is null"); + if (buffer is null) + return columnLen; - var dataOffset2 = (int)dataOffset; - if (dataOffset2 >= field.Length) - ThrowHelper.ThrowArgumentOutOfRange_OutOfColumnBounds(nameof(dataOffset), field.Length); + using var _ = PgReader.BeginNestedRead(columnLen, Size.Zero); - Buffer.ReadPosition += dataOffset2; + // Move to offset + PgReader.Seek((int)dataOffset); - length = Math.Min(length, field.Length - dataOffset2); - - if (buffer == null) - return length; - - return Buffer.Read(new Span(buffer, bufferOffset, length)); + // At offset, read into buffer. + length = Math.Min(length, PgReader.CurrentRemaining); + PgReader.ReadBytes(new Span(buffer, bufferOffset, length)); + return length; } /// public override long GetChars(int ordinal, long dataOffset, char[]? buffer, int bufferOffset, int length) @@ -217,26 +225,26 @@ public override long GetChars(int ordinal, long dataOffset, char[]? buffer, int /// A data reader. public new NpgsqlNestedDataReader GetData(int ordinal) { - var field = CheckRowAndColumnAndSeek(ordinal); - var type = field.Handler.PostgresType; + var valueLength = CheckRowAndColumnAndSeek(ordinal, out var column); + var type = column.PostgresType; var isArray = type is PostgresArrayType; var elementType = isArray ? ((PostgresArrayType)type).Element : type; var compositeType = elementType as PostgresCompositeType; if (elementType.InternalName != "record" && compositeType == null) throw new InvalidCastException("GetData() not supported for type " + type.DisplayName); - if (field.Length == -1) + if (valueLength == -1) throw new InvalidCastException("field is null"); var reader = _cachedFreeNestedDataReader; if (reader != null) { _cachedFreeNestedDataReader = null; - reader.Init(_uniqueOutermostReaderRowId, compositeType); + reader.Init(compositeType); } else { - reader = new NpgsqlNestedDataReader(_outermostReader, this, _uniqueOutermostReaderRowId, _depth + 1, compositeType); + reader = new NpgsqlNestedDataReader(_outermostReader, this, _depth + 1, compositeType); } if (isArray) reader.InitArray(); @@ -249,7 +257,7 @@ public override long GetChars(int ordinal, long dataOffset, char[]? buffer, int public override string GetDataTypeName(int ordinal) { var column = CheckRowAndColumn(ordinal); - return column.TypeHandler.PgDisplayName; + return column.PostgresType.DisplayName; } /// @@ -288,16 +296,19 @@ public override int GetOrdinal(string name) public override Type GetFieldType(int ordinal) { var column = CheckRowAndColumn(ordinal); - return column.TypeHandler.GetFieldType(); + return column.ObjectOrDefaultTypeInfo.Type; } /// public override object GetValue(int ordinal) { - var column = CheckRowAndColumnAndSeek(ordinal); - if (column.Length == -1) + var columnLength = CheckRowAndColumnAndSeek(ordinal, out var column); + var info = column.ObjectOrDefaultInfo; + if (columnLength == -1) return DBNull.Value; - return column.Handler.ReadAsObject(Buffer, column.Length); + + using var _ = PgReader.BeginNestedRead(columnLength, info.BufferRequirement); + return info.Converter.ReadAsObject(PgReader); } /// @@ -315,7 +326,7 @@ public override int GetValues(object[] values) /// public override bool IsDBNull(int ordinal) - => CheckRowAndColumnAndSeek(ordinal).Length == -1; + => CheckRowAndColumnAndSeek(ordinal, out _) == -1; /// public override T GetFieldValue(int ordinal) @@ -326,25 +337,25 @@ public override T GetFieldValue(int ordinal) if (typeof(T) == typeof(TextReader)) return (T)(object)GetTextReader(ordinal); - var field = CheckRowAndColumnAndSeek(ordinal); + var columnLength = CheckRowAndColumnAndSeek(ordinal, out var column); + var info = GetOrAddConverterInfo(typeof(T), column, ordinal); - if (field.Length == -1) + if (columnLength == -1) { // When T is a Nullable (and only in that case), we support returning null - if (NullableHandler.Exists) + if (default(T) is null && typeof(T).IsValueType) return default!; if (typeof(T) == typeof(object)) return (T)(object)DBNull.Value; - throw new InvalidCastException("field is null"); + ThrowHelper.ThrowInvalidCastException_NoValue(); } - return NullableHandler.Exists - ? NullableHandler.Read(field.Handler, Buffer, field.Length, fieldDescription: null) - : typeof(T) == typeof(object) - ? (T)field.Handler.ReadAsObject(Buffer, field.Length, fieldDescription: null) - : field.Handler.Read(Buffer, field.Length, fieldDescription: null); + using var _ = PgReader.BeginNestedRead(columnLength, info.BufferRequirement); + return info.AsObject + ? (T)info.Converter.ReadAsObject(PgReader)! + : info.GetConverter().Read(PgReader); } /// @@ -352,7 +363,7 @@ public override bool Read() { CheckResultSet(); - Buffer.ReadPosition = _nextRowBufferPos; + PgReader.Seek(_nextRowBufferPos); if (_nextRowIndex == _numRows) { _readerState = ReaderState.AfterRows; @@ -360,27 +371,34 @@ public override bool Read() } if (_nextRowIndex++ != 0) - Buffer.ReadInt32(); // Length of record + PgReader.ReadInt32(); // Length of record - var numColumns = Buffer.ReadInt32(); + var numColumns = PgReader.ReadInt32(); for (var i = 0; i < numColumns; i++) { - var typeOid = Buffer.ReadUInt32(); - var bufferPos = Buffer.ReadPosition; + var typeOid = PgReader.ReadUInt32(); + var bufferPos = PgReader.FieldOffset; if (i >= _columns.Count) - _columns.Add(new ColumnInfo(typeOid, bufferPos, TypeMapper.ResolveByOID(typeOid))); + { + var pgType = SerializerOptions.DatabaseInfo.GetPostgresType(typeOid); + _columns.Add(new ColumnInfo(pgType, bufferPos, AdoSerializerHelpers.GetTypeInfoForReading(typeof(object), pgType, SerializerOptions), Format)); + } else - _columns[i] = new ColumnInfo(typeOid, bufferPos, - _columns[i].TypeOid == typeOid ? _columns[i].TypeHandler : TypeMapper.ResolveByOID(typeOid)); + { + var pgType = _columns[i].PostgresType.OID == typeOid + ? _columns[i].PostgresType + : SerializerOptions.DatabaseInfo.GetPostgresType(typeOid); + _columns[i] = new ColumnInfo(pgType, bufferPos, AdoSerializerHelpers.GetTypeInfoForReading(typeof(object), pgType, SerializerOptions), Format); + } - var columnLen = Buffer.ReadInt32(); + var columnLen = PgReader.ReadInt32(); if (columnLen >= 0) - Buffer.Skip(columnLen); + PgReader.Consume(columnLen); } _columns.RemoveRange(numColumns, _columns.Count - numColumns); - _nextRowBufferPos = Buffer.ReadPosition; + _nextRowBufferPos = PgReader.FieldOffset; _readerState = ReaderState.OnRow; return true; @@ -465,12 +483,25 @@ ColumnInfo CheckRowAndColumn(int column) return _columns[column]; } - (NpgsqlTypeHandler Handler, int Length) CheckRowAndColumnAndSeek(int ordinal) + int CheckRowAndColumnAndSeek(int ordinal, out ColumnInfo column) { - var column = CheckRowAndColumn(ordinal); - Buffer.ReadPosition = column.BufferPos; - var len = Buffer.ReadInt32(); - return (column.TypeHandler, len); + column = CheckRowAndColumn(ordinal); + PgReader.Seek(column.BufferPos); + return PgReader.ReadInt32(); + } + + PgConverterInfo GetOrAddConverterInfo(Type type, ColumnInfo column, int ordinal) + { + PgConverterInfo info; + if (!column.LastConverterInfo.IsDefault && column.LastConverterInfo.TypeToConvert == type) + info = column.LastConverterInfo; + else + { + var columnInfo = column.SetConverterInfo(AdoSerializerHelpers.GetTypeInfoForReading(type, column.PostgresType, SerializerOptions)); + _columns[ordinal] = columnInfo; + info = columnInfo.LastConverterInfo; + } + return info; } enum ReaderState diff --git a/src/Npgsql/NpgsqlParameter.cs b/src/Npgsql/NpgsqlParameter.cs index 3be8758799..79e9ed8ccd 100644 --- a/src/Npgsql/NpgsqlParameter.cs +++ b/src/Npgsql/NpgsqlParameter.cs @@ -3,16 +3,15 @@ using System.Data; using System.Data.Common; using System.Diagnostics.CodeAnalysis; +using System.IO; using System.Threading; using System.Threading.Tasks; using Npgsql.Internal; -using Npgsql.Internal.TypeHandling; -using Npgsql.Internal.TypeMapping; +using Npgsql.Internal.Postgres; using Npgsql.PostgresTypes; using Npgsql.TypeMapping; using Npgsql.Util; using NpgsqlTypes; -using static Npgsql.Util.Statics; namespace Npgsql; @@ -27,29 +26,27 @@ public class NpgsqlParameter : DbParameter, IDbDataParameter, ICloneable private protected byte _scale; private protected int _size; - // ReSharper disable InconsistentNaming private protected NpgsqlDbType? _npgsqlDbType; private protected string? _dataTypeName; - // ReSharper restore InconsistentNaming - private protected string _name = string.Empty; - private protected object? _value; - private protected string _sourceColumn; + private protected string _name = string.Empty; + object? _value; + private protected bool _useSubStream; + private protected SubReadStream? _subStream; + private protected string _sourceColumn; internal string TrimmedName { get; private protected set; } = PositionalName; - internal const string PositionalName = ""; - - /// - /// Can be used to communicate a value from the validation phase to the writing phase. - /// To be used by type handlers only. - /// - public object? ConvertedValue { get; set; } + internal const string PositionalName = ""; - internal NpgsqlLengthCache? LengthCache { get; set; } + internal PgTypeInfo? TypeInfo { get; private set; } - internal NpgsqlTypeHandler? Handler { get; set; } + internal PgTypeId PgTypeId { get; set; } + internal PgConverter? Converter { get; private set; } - internal FormatCode FormatCode { get; private set; } + internal DataFormat Format { get; private protected set; } + private protected Size? WriteSize { get; set; } + private protected object? _writeState; + private protected Size _bufferRequirement; #endregion @@ -250,14 +247,14 @@ public sealed override string ParameterName { if (Collection is not null) Collection.ChangeParameterName(this, value); - else + else ChangeParameterName(value); } } internal void ChangeParameterName(string? value) { - if (value == null) + if (value is null) _name = TrimmedName = PositionalName; else if (value.Length > 0 && (value[0] == ':' || value[0] == '@')) TrimmedName = (_name = value).Substring(1); @@ -278,10 +275,9 @@ public override object? Value get => _value; set { - if (_value == null || value == null || _value.GetType() != value.GetType()) - Handler = null; + if (value is null || _value?.GetType() != value.GetType()) + ResetTypeInfo(); _value = value; - ConvertedValue = null; } } @@ -314,27 +310,25 @@ public sealed override DbType DbType { get { - if (_npgsqlDbType.HasValue) - return GlobalTypeMapper.NpgsqlDbTypeToDbType(_npgsqlDbType.Value); + if (_npgsqlDbType is { } npgsqlDbType) + return npgsqlDbType.ToDbType(); if (_dataTypeName is not null) - return GlobalTypeMapper.NpgsqlDbTypeToDbType(GlobalTypeMapper.DataTypeNameToNpgsqlDbType(_dataTypeName)); + return Internal.Postgres.DataTypeName.FromDisplayName(_dataTypeName).ToNpgsqlDbType()?.ToDbType() ?? DbType.Object; - if (Value is not null) // Infer from value but don't cache - { - return GlobalTypeMapper.Instance.TryResolveMappingByValue(Value, out var mapping) - ? mapping.DbType - : DbType.Object; - } + // Infer from value but don't cache + if (Value is not null) + // We pass ValueType here for the generic derived type, where we should respect T and not the runtime type. + return GlobalTypeMapper.Instance.TryGetDataTypeName(GetValueType(StaticValueType)!, Value)?.ToNpgsqlDbType()?.ToDbType() ?? DbType.Object; return DbType.Object; } set { - Handler = null; + ResetTypeInfo(); _npgsqlDbType = value == DbType.Object ? null - : GlobalTypeMapper.DbTypeToNpgsqlDbType(value) + : value.ToNpgsqlDbType() ?? throw new NotSupportedException($"The parameter type DbType.{value} isn't supported by PostgreSQL or Npgsql"); } } @@ -355,14 +349,12 @@ public NpgsqlDbType NpgsqlDbType return _npgsqlDbType.Value; if (_dataTypeName is not null) - return GlobalTypeMapper.DataTypeNameToNpgsqlDbType(_dataTypeName); + return Internal.Postgres.DataTypeName.FromDisplayName(_dataTypeName).ToNpgsqlDbType() ?? NpgsqlDbType.Unknown; - if (Value is not null) // Infer from value - { - return GlobalTypeMapper.Instance.TryResolveMappingByValue(Value, out var mapping) - ? mapping.NpgsqlDbType ?? NpgsqlDbType.Unknown - : throw new NotSupportedException("Can't infer NpgsqlDbType for type " + Value.GetType()); - } + // Infer from value but don't cache + if (Value is not null) + // We pass ValueType here for the generic derived type (NpgsqlParameter) where we should respect T and not the runtime type. + return GlobalTypeMapper.Instance.TryGetDataTypeName(GetValueType(StaticValueType)!, Value)?.ToNpgsqlDbType() ?? NpgsqlDbType.Unknown; return NpgsqlDbType.Unknown; } @@ -373,7 +365,7 @@ public NpgsqlDbType NpgsqlDbType if (value == NpgsqlDbType.Range) throw new ArgumentOutOfRangeException(nameof(value), "Cannot set NpgsqlDbType to just Range, Binary-Or with the element type (e.g. Range of integer is NpgsqlDbType.Range | NpgsqlDbType.Integer)"); - Handler = null; + ResetTypeInfo(); _npgsqlDbType = value; } } @@ -388,22 +380,25 @@ public string? DataTypeName if (_dataTypeName != null) return _dataTypeName; - if (_npgsqlDbType.HasValue) - return GlobalTypeMapper.NpgsqlDbTypeToDataTypeName(_npgsqlDbType.Value); - - if (Value != null) // Infer from value + // Map it to a display name. + if (_npgsqlDbType is { } npgsqlDbType) { - return GlobalTypeMapper.Instance.TryResolveMappingByValue(Value, out var mapping) - ? mapping.DataTypeName - : null; + var unqualifiedName = npgsqlDbType.ToUnqualifiedDataTypeName(); + return unqualifiedName is null ? null : Internal.Postgres.DataTypeName.ValidatedName( + "pg_catalog." + unqualifiedName).UnqualifiedDisplayName; } + // Infer from value but don't cache + if (Value is not null) + // We pass ValueType here for the generic derived type, where we should respect T and not the runtime type. + return GlobalTypeMapper.Instance.TryGetDataTypeName(GetValueType(StaticValueType)!, Value)?.DisplayName; + return null; } set { + ResetTypeInfo(); _dataTypeName = value; - Handler = null; } } @@ -431,11 +426,7 @@ public string? DataTypeName public new byte Precision { get => _precision; - set - { - _precision = value; - Handler = null; - } + set => _precision = value; } /// @@ -447,11 +438,7 @@ public string? DataTypeName public new byte Scale { get => _scale; - set - { - _scale = value; - Handler = null; - } + set => _scale = value; } #pragma warning restore CS0109 @@ -466,8 +453,8 @@ public sealed override int Size if (value < -1) throw new ArgumentException($"Invalid parameter Size value '{value}'. The value must be greater than or equal to 0."); + ResetBindingInfo(); _size = value; - Handler = null; } } @@ -506,60 +493,247 @@ public sealed override string SourceColumn #region Internals - internal virtual void ResolveHandler(TypeMapper typeMapper) - { - if (Handler is not null) - return; + private protected virtual Type StaticValueType => typeof(object); - Resolve(typeMapper); + Type? GetValueType(Type staticValueType) => staticValueType != typeof(object) ? staticValueType : Value?.GetType(); - void Resolve(TypeMapper typeMapper) + /// Attempt to resolve a type info based on available (postgres) type information on the parameter. + internal void ResolveTypeInfo(PgSerializerOptions options) + { + var previouslyBound = TypeInfo?.Options == options; + if (!previouslyBound) { - if (_npgsqlDbType.HasValue) - Handler = typeMapper.ResolveByNpgsqlDbType(_npgsqlDbType.Value); + var staticValueType = StaticValueType; + var valueType = GetValueType(StaticValueType); + + string? dataTypeName = null; + DataTypeName? builtinDataTypeName = null; + if (_npgsqlDbType is { } npgsqlDbType) + { + dataTypeName = npgsqlDbType.ToUnqualifiedDataTypeNameOrThrow(); + builtinDataTypeName = npgsqlDbType.ToDataTypeName(); + } else if (_dataTypeName is not null) - Handler = typeMapper.ResolveByDataTypeName(_dataTypeName); - else if (_value is not null) - Handler = typeMapper.ResolveByValue(_value); - else - ThrowInvalidOperationException(); + { + dataTypeName = Internal.Postgres.DataTypeName.NormalizeName(_dataTypeName); + // If we can find a match in an NpgsqlDbType we known we're dealing with a fully qualified built-in data type name. + builtinDataTypeName = NpgsqlDbTypeExtensions.ToNpgsqlDbType(dataTypeName)?.ToDataTypeName(); + } + + var pgTypeId = dataTypeName is null + ? (PgTypeId?)null + : TryGetRepresentationalTypeId(builtinDataTypeName ?? dataTypeName, out var id) + ? id + : throw new NotSupportedException(_npgsqlDbType is not null + ? $"The NpgsqlDbType '{_npgsqlDbType}' isn't present in your database. You may need to install an extension or upgrade to a newer version." + : $"The data type name '{builtinDataTypeName ?? dataTypeName}' isn't present in your database. You may need to install an extension or upgrade to a newer version."); + + if (staticValueType == typeof(object)) + { + if (valueType == null && pgTypeId is null) + { + var parameterName = !string.IsNullOrEmpty(ParameterName) ? ParameterName : $"${Collection?.IndexOf(this) + 1}"; + ThrowHelper.ThrowInvalidOperationException( + $"Parameter '{parameterName}' must have either its NpgsqlDbType or its DataTypeName or its Value set."); + return; + } + + // We treat object typed DBNull values as default info. + if (valueType == typeof(DBNull)) + { + valueType = null; + pgTypeId ??= options.ToCanonicalTypeId(options.UnknownPgType); + } + } + + TypeInfo = AdoSerializerHelpers.GetTypeInfoForWriting(valueType, pgTypeId, options, _npgsqlDbType); + } + + // This step isn't part of BindValue because we need to know the PgTypeId beforehand for things like SchemaOnly with null values. + // We never reuse resolutions for resolvers across executions as a mutable value itself may influence the result. + // TODO we could expose a property on a Converter/TypeInfo to indicate whether it's immutable, at that point we can reuse. + if (!previouslyBound || TypeInfo is PgResolverTypeInfo) + { + ResetConverterResolution(); + var resolution = ResolveConverter(TypeInfo!); + Converter = resolution.Converter; + PgTypeId = resolution.PgTypeId; } - void ThrowInvalidOperationException() + bool TryGetRepresentationalTypeId(string dataTypeName, out PgTypeId pgTypeId) { - var parameterName = !string.IsNullOrEmpty(ParameterName) ? ParameterName : $"${Collection?.IndexOf(this) + 1}"; - ThrowHelper.ThrowInvalidOperationException($"Parameter '{parameterName}' must have either its NpgsqlDbType or its DataTypeName or its Value set"); + if (options.DatabaseInfo.TryGetPostgresTypeByName(dataTypeName, out var pgType)) + { + pgTypeId = options.ToCanonicalTypeId(pgType.GetRepresentationalType()); + return true; + } + + pgTypeId = default; + return false; } } - internal void Bind(TypeMapper typeMapper) + // Pull from Value so we also support object typed generic params. + private protected virtual PgConverterResolution ResolveConverter(PgTypeInfo typeInfo) => typeInfo.GetObjectResolution(Value); + + /// Bind the current value to the type info, truncate (if applicable), take its size, and do any final validation before writing. + internal void Bind(out DataFormat format, out Size size) { - ResolveHandler(typeMapper); - FormatCode = Handler!.PreferTextWrite ? FormatCode.Text : FormatCode.Binary; + if (TypeInfo is null) + ThrowHelper.ThrowInvalidOperationException($"Missing type info, {nameof(ResolveTypeInfo)} needs to be called before {nameof(Bind)}."); + + if (!TypeInfo.SupportsWriting) + ThrowHelper.ThrowNotSupportedException($"Cannot write values for parameters of type '{TypeInfo.Type}' and postgres type '{TypeInfo.Options.DatabaseInfo.GetDataTypeName(PgTypeId).DisplayName}'."); + + // We might call this twice, once during validation and once during WriteBind, only compute things once. + if (WriteSize is not null) + { + format = Format; + size = WriteSize.Value; + return; + } + + // Handle Size truncate behavior for a predetermined set of types and pg types. + // Doesn't matter if we 'box' Value, all supported types are reference types. + if (_size > 0 && Converter!.TypeToConvert is var type && + (type == typeof(string) || type == typeof(char[]) || type == typeof(byte[]) || type == typeof(Stream)) && + Value is { } value) + { + var dataTypeName = TypeInfo!.Options.GetDataTypeName(PgTypeId); + if (dataTypeName == DataTypeNames.Text || dataTypeName == DataTypeNames.Varchar || dataTypeName == DataTypeNames.Bpchar) + { + if (value is string s && s.Length > _size) + Value = s.Substring(0, _size); + else if (value is char[] chars && chars.Length > _size) + { + var truncated = new char[_size]; + Array.Copy(chars, truncated, _size); + Value = truncated; + } + } + else if (dataTypeName == DataTypeNames.Bytea) + { + if (value is byte[] bytes && bytes.Length > _size) + { + var truncated = new byte[_size]; + Array.Copy(bytes, truncated, _size); + Value = truncated; + } + else if (value is Stream) + _useSubStream = true; + } + } + + BindCore(); + format = Format; + size = WriteSize!.Value; + } + + private protected virtual void BindCore(bool allowNullReference = false) + { + // Pull from Value so we also support object typed generic params. + var value = Value; + if (value is null && !allowNullReference) + ThrowHelper.ThrowInvalidOperationException($"Parameter '{ParameterName}' cannot be null, DBNull.Value should be used instead."); + + if (_useSubStream && value is not null) + value = _subStream = new SubReadStream((Stream)value, _size); + + if (TypeInfo!.BindObject(Converter!, value, out var size, out _writeState, out var dataFormat) is { } info) + { + WriteSize = size; + _bufferRequirement = info.BufferRequirement; + } + else + { + WriteSize = -1; + _bufferRequirement = default; + } + Format = dataFormat; } - internal virtual int ValidateAndGetLength() + internal async ValueTask Write(bool async, PgWriter writer, CancellationToken cancellationToken) { - if (_value is DBNull) - return 0; - if (_value == null) - ThrowHelper.ThrowInvalidCastException("Parameter {0} must be set", ParameterName); - - var lengthCache = LengthCache; - var len = Handler!.ValidateObjectAndGetLength(_value, ref lengthCache, this); - LengthCache = lengthCache; - return len; + if (WriteSize is not { } writeSize) + { + ThrowHelper.ThrowInvalidOperationException("Missing type info or binding info."); + return; + } + + try + { + if (writer.ShouldFlush(sizeof(int))) + await writer.Flush(async, cancellationToken); + + writer.WriteInt32(writeSize.Value); + if (writeSize.Value is -1) + { + writer.Commit(sizeof(int)); + return; + } + + var current = new ValueMetadata + { + Format = Format, + BufferRequirement = _bufferRequirement, + Size = writeSize, + WriteState = _writeState + }; + await writer.BeginWrite(async, current, cancellationToken).ConfigureAwait(false); + await WriteValue(async, writer, cancellationToken); + writer.Commit(writeSize.Value + sizeof(int)); + } + finally + { + ResetBindingInfo(); + } } - internal virtual Task WriteWithLength(NpgsqlWriteBuffer buf, bool async, CancellationToken cancellationToken = default) - => Handler!.WriteObjectWithLength(_value!, buf, LengthCache, this, async, cancellationToken); + private protected virtual ValueTask WriteValue(bool async, PgWriter writer, CancellationToken cancellationToken) + { + // Pull from Value so we also support base calls from generic parameters. + var value = (_useSubStream ? _subStream : Value)!; + if (async) + return Converter!.WriteAsObjectAsync(writer, value, cancellationToken); + + Converter!.WriteAsObject(writer, value); + return new(); + } /// public override void ResetDbType() { _npgsqlDbType = null; _dataTypeName = null; - Handler = null; + ResetTypeInfo(); + } + + private protected void ResetTypeInfo() + { + TypeInfo = null; + ResetConverterResolution(); + } + + void ResetConverterResolution() + { + Converter = null; + PgTypeId = default; + ResetBindingInfo(); + } + + void ResetBindingInfo() + { + if (_writeState is not null) + TypeInfo?.DisposeWriteState(_writeState); + if (_useSubStream) + { + _useSubStream = false; + _subStream?.Dispose(); + _subStream = null; + } + WriteSize = null; + Format = default; + _bufferRequirement = default; } internal bool IsInputDirection => Direction == ParameterDirection.InputOutput || Direction == ParameterDirection.Input; @@ -599,4 +773,4 @@ private protected virtual NpgsqlParameter CloneCore() => object ICloneable.Clone() => Clone(); #endregion -} \ No newline at end of file +} diff --git a/src/Npgsql/NpgsqlParameterCollection.cs b/src/Npgsql/NpgsqlParameterCollection.cs index 3f1e139b08..58c0315753 100644 --- a/src/Npgsql/NpgsqlParameterCollection.cs +++ b/src/Npgsql/NpgsqlParameterCollection.cs @@ -5,9 +5,7 @@ using System.Data.Common; using System.Diagnostics; using System.Diagnostics.CodeAnalysis; -using System.Runtime.CompilerServices; -using Npgsql.Internal.TypeMapping; -using Npgsql.TypeMapping; +using Npgsql.Internal; using NpgsqlTypes; namespace Npgsql; @@ -38,7 +36,7 @@ static NpgsqlParameterCollection() /// /// Initializes a new instance of the NpgsqlParameterCollection class. /// - internal NpgsqlParameterCollection() {} + internal NpgsqlParameterCollection() { } bool LookupEnabled => InternalList.Count >= LookupThreshold; @@ -681,14 +679,15 @@ internal void CloneTo(NpgsqlParameterCollection other) } } - internal void ProcessParameters(TypeMapper typeMapper, bool validateValues, CommandType commandType) + internal void ProcessParameters(PgSerializerOptions options, bool validateValues, CommandType commandType) { HasOutputParameters = false; PlaceholderType = PlaceholderType.NoParameters; - for (var i = 0; i < InternalList.Count; i++) + var list = InternalList; + for (var i = 0; i < list.Count; i++) { - var p = InternalList[i]; + var p = list[i]; switch (PlaceholderType) { @@ -737,12 +736,11 @@ internal void ProcessParameters(TypeMapper typeMapper, bool validateValues, Comm break; } - p.Bind(typeMapper); + p.ResolveTypeInfo(options); if (validateValues) { - p.LengthCache?.Clear(); - p.ValidateAndGetLength(); + p.Bind(out _, out _); } } } diff --git a/src/Npgsql/NpgsqlParameter`.cs b/src/Npgsql/NpgsqlParameter`.cs index a0487a9aec..18ac5aff45 100644 --- a/src/Npgsql/NpgsqlParameter`.cs +++ b/src/Npgsql/NpgsqlParameter`.cs @@ -1,12 +1,11 @@ using System; using System.Data; +using System.Diagnostics; +using System.Runtime.CompilerServices; using System.Threading; using System.Threading.Tasks; using Npgsql.Internal; -using Npgsql.Internal.TypeMapping; -using Npgsql.TypeMapping; using NpgsqlTypes; -using static Npgsql.Util.Statics; namespace Npgsql; @@ -17,10 +16,21 @@ namespace Npgsql; /// The type of the value that will be stored in the parameter. public sealed class NpgsqlParameter : NpgsqlParameter { + T? _typedValue; + /// /// Gets or sets the strongly-typed value of the parameter. /// - public T? TypedValue { get; set; } + public T? TypedValue + { + get => _typedValue; + set + { + if (typeof(T) == typeof(object) && (value is null || _typedValue?.GetType() != value.GetType())) + ResetTypeInfo(); + _typedValue = value; + } + } /// /// Gets or sets the value of the parameter. This delegates to . @@ -31,12 +41,14 @@ public override object? Value set => TypedValue = (T)value!; } + private protected override Type StaticValueType => typeof(T); + #region Constructors /// /// Initializes a new instance of . /// - public NpgsqlParameter() {} + public NpgsqlParameter() { } /// /// Initializes a new instance of with a parameter name and value. @@ -67,33 +79,45 @@ public NpgsqlParameter(string parameterName, DbType dbType) #endregion Constructors - internal override void ResolveHandler(TypeMapper typeMapper) + private protected override PgConverterResolution ResolveConverter(PgTypeInfo typeInfo) + => typeInfo.IsBoxing ? base.ResolveConverter(typeInfo) : typeInfo.GetResolution(TypedValue); + + private protected override void BindCore(bool allowNullReference = false) { - if (Handler is not null) + // If we're object typed we should support DBNull, call into base BindCore. + if (typeof(T) == typeof(object) || TypeInfo!.IsBoxing || _useSubStream) + { + base.BindCore(TypeInfo!.IsBoxing || _useSubStream || allowNullReference); return; + } - // TODO: Better exceptions in case of cast failure etc. - if (_npgsqlDbType.HasValue) - Handler = typeMapper.ResolveByNpgsqlDbType(_npgsqlDbType.Value); - else if (_dataTypeName is not null) - Handler = typeMapper.ResolveByDataTypeName(_dataTypeName); + var value = TypedValue; + Debug.Assert(Converter is PgConverter); + if (TypeInfo!.Bind(Unsafe.As>(Converter), value, out var size, out _writeState, out var dataFormat) is { } info) + { + WriteSize = size; + _bufferRequirement = info.BufferRequirement; + } else - Handler = typeMapper.ResolveByValue(TypedValue); + { + WriteSize = -1; + _bufferRequirement = default; + } + Format = dataFormat; } - internal override int ValidateAndGetLength() + private protected override ValueTask WriteValue(bool async, PgWriter writer, CancellationToken cancellationToken) { - if (TypedValue is null or DBNull) - return 0; + if (TypeInfo!.IsBoxing || _useSubStream) + return base.WriteValue(async, writer, cancellationToken); - var lengthCache = LengthCache; - var len = Handler!.ValidateAndGetLength(TypedValue, ref lengthCache, this); - LengthCache = lengthCache; - return len; - } + Debug.Assert(Converter is PgConverter); + if (async) + return Unsafe.As>(Converter!).WriteAsync(writer, TypedValue!, cancellationToken); - internal override Task WriteWithLength(NpgsqlWriteBuffer buf, bool async, CancellationToken cancellationToken = default) - => Handler!.WriteWithLength(TypedValue, buf, LengthCache, this, async, cancellationToken); + Unsafe.As>(Converter!).Write(writer, TypedValue!); + return new(); + } private protected override NpgsqlParameter CloneCore() => // use fields instead of properties @@ -114,4 +138,4 @@ private protected override NpgsqlParameter CloneCore() => TypedValue = TypedValue, SourceColumnNullMapping = SourceColumnNullMapping, }; -} \ No newline at end of file +} diff --git a/src/Npgsql/NpgsqlSchema.cs b/src/Npgsql/NpgsqlSchema.cs index e8d65ecbf1..461ae2e873 100644 --- a/src/Npgsql/NpgsqlSchema.cs +++ b/src/Npgsql/NpgsqlSchema.cs @@ -6,6 +6,7 @@ using System.Text; using System.Threading; using System.Threading.Tasks; +using Npgsql.Internal; using Npgsql.PostgresTypes; using NpgsqlTypes; @@ -556,106 +557,110 @@ static DataTable GetDataTypes(NpgsqlConnection conn) // Npgsql-specific table.Columns.Add("OID", typeof(uint)); - // TODO: Support type name restriction - foreach (var baseType in connector.DatabaseInfo.BaseTypes.Cast() - .Concat(connector.DatabaseInfo.EnumTypes) - .Concat(connector.DatabaseInfo.CompositeTypes)) + // TODO: Support type name restriction + try { - if (!connector.TypeMapper.TryGetMapping(baseType, out var mapping)) - continue; + PgSerializerOptions.IntrospectionCaller = true; + foreach (var baseType in connector.DatabaseInfo.BaseTypes.Cast() + .Concat(connector.DatabaseInfo.EnumTypes) + .Concat(connector.DatabaseInfo.CompositeTypes)) + { + if (connector.SerializerOptions.GetDefaultTypeInfo(baseType) is not { } info) + continue; - var row = table.Rows.Add(); + var row = table.Rows.Add(); - PopulateDefaultDataTypeInfo(row, baseType); - PopulateHardcodedDataTypeInfo(row, baseType); + PopulateDefaultDataTypeInfo(row, baseType); + PopulateHardcodedDataTypeInfo(row, baseType); - if (mapping.ClrTypes.Length > 0) - row["DataType"] = mapping.ClrTypes[0].FullName; - if (mapping.NpgsqlDbType.HasValue) - row["ProviderDbType"] = (int)mapping.NpgsqlDbType.Value; - } + row["DataType"] = info.Type.FullName; + if (baseType.DataTypeName.ToNpgsqlDbType() is { } npgsqlDbType) + row["ProviderDbType"] = (int)npgsqlDbType; + } - foreach (var arrayType in connector.DatabaseInfo.ArrayTypes) - { - if (!connector.TypeMapper.TryGetMapping(arrayType.Element, out var elementMapping)) - continue; - - var row = table.Rows.Add(); - - PopulateDefaultDataTypeInfo(row, arrayType.Element); - // Populate hardcoded values based on the element type (e.g. citext[] is case-insensitive). - PopulateHardcodedDataTypeInfo(row, arrayType.Element); - - row["TypeName"] = arrayType.DisplayName; - row["OID"] = arrayType.OID; - row["CreateFormat"] += "[]"; - if (elementMapping.ClrTypes.Length > 0) - row["DataType"] = elementMapping.ClrTypes[0].MakeArrayType().FullName; - if (elementMapping.NpgsqlDbType.HasValue) - row["ProviderDbType"] = (int)(elementMapping.NpgsqlDbType.Value | NpgsqlDbType.Array); - } + foreach (var arrayType in connector.DatabaseInfo.ArrayTypes) + { + if (connector.SerializerOptions.GetDefaultTypeInfo(arrayType) is not { } info) + continue; - foreach (var rangeType in connector.DatabaseInfo.RangeTypes) - { - if (!connector.TypeMapper.TryGetMapping(rangeType.Subtype, out var subtypeMapping)) - continue; - - var row = table.Rows.Add(); - - PopulateDefaultDataTypeInfo(row, rangeType.Subtype); - // Populate hardcoded values based on the subtype type (e.g. citext[] is case-insensitive). - PopulateHardcodedDataTypeInfo(row, rangeType.Subtype); - - row["TypeName"] = rangeType.DisplayName; - row["OID"] = rangeType.OID; - row["CreateFormat"] = rangeType.DisplayName.ToUpperInvariant(); - if (subtypeMapping.ClrTypes.Length > 0) - row["DataType"] = typeof(NpgsqlRange<>).MakeGenericType(subtypeMapping.ClrTypes[0]).FullName; - if (subtypeMapping.NpgsqlDbType.HasValue) - row["ProviderDbType"] = (int)(subtypeMapping.NpgsqlDbType.Value | NpgsqlDbType.Range); - } + var row = table.Rows.Add(); - foreach (var multirangeType in connector.DatabaseInfo.MultirangeTypes) - { - var subtypeType = multirangeType.Subrange.Subtype; - if (!connector.TypeMapper.TryGetMapping(subtypeType, out var subtypeMapping)) - continue; - - var row = table.Rows.Add(); - - PopulateDefaultDataTypeInfo(row, subtypeType); - // Populate hardcoded values based on the subtype type (e.g. citext[] is case-insensitive). - PopulateHardcodedDataTypeInfo(row, subtypeType); - - row["TypeName"] = multirangeType.DisplayName; - row["OID"] = multirangeType.OID; - row["CreateFormat"] = multirangeType.DisplayName.ToUpperInvariant(); - if (subtypeMapping.ClrTypes.Length > 0) - row["DataType"] = typeof(NpgsqlRange<>).MakeGenericType(subtypeMapping.ClrTypes[0]).FullName; - if (subtypeMapping.NpgsqlDbType.HasValue) - row["ProviderDbType"] = (int)(subtypeMapping.NpgsqlDbType.Value | NpgsqlDbType.Range); - } + PopulateDefaultDataTypeInfo(row, arrayType.Element); + // Populate hardcoded values based on the element type (e.g. citext[] is case-insensitive). + PopulateHardcodedDataTypeInfo(row, arrayType.Element); - foreach (var domainType in connector.DatabaseInfo.DomainTypes) - { - if (!connector.TypeMapper.TryGetMapping(domainType, out var baseMapping)) - continue; + row["TypeName"] = arrayType.DisplayName; + row["OID"] = arrayType.OID; + row["CreateFormat"] += "[]"; + row["DataType"] = info.Type.FullName; + if (arrayType.DataTypeName.ToNpgsqlDbType() is { } npgsqlDbType) + row["ProviderDbType"] = (int)npgsqlDbType; + } + + foreach (var rangeType in connector.DatabaseInfo.RangeTypes) + { + if (connector.SerializerOptions.GetDefaultTypeInfo(rangeType) is not { } info) + continue; - var row = table.Rows.Add(); + var row = table.Rows.Add(); - PopulateDefaultDataTypeInfo(row, domainType.BaseType); - // Populate hardcoded values based on the element type (e.g. citext[] is case-insensitive). - PopulateHardcodedDataTypeInfo(row, domainType.BaseType); - row["TypeName"] = domainType.DisplayName; - row["OID"] = domainType.OID; - // A domain is never the best match, since its underlying base type is - row["IsBestMatch"] = false; + PopulateDefaultDataTypeInfo(row, rangeType.Subtype); + // Populate hardcoded values based on the subtype type (e.g. citext[] is case-insensitive). + PopulateHardcodedDataTypeInfo(row, rangeType.Subtype); - if (baseMapping.ClrTypes.Length > 0) - row["DataType"] = baseMapping.ClrTypes[0].FullName; - if (baseMapping.NpgsqlDbType.HasValue) - row["ProviderDbType"] = (int)baseMapping.NpgsqlDbType.Value; + row["TypeName"] = rangeType.DisplayName; + row["OID"] = rangeType.OID; + row["CreateFormat"] = rangeType.DisplayName.ToUpperInvariant(); + row["DataType"] = info.Type.FullName; + if (rangeType.DataTypeName.ToNpgsqlDbType() is { } npgsqlDbType) + row["ProviderDbType"] = (int)npgsqlDbType; + } + + foreach (var multirangeType in connector.DatabaseInfo.MultirangeTypes) + { + var subtypeType = multirangeType.Subrange.Subtype; + if (connector.SerializerOptions.GetDefaultTypeInfo(multirangeType) is not { } info) + continue; + + var row = table.Rows.Add(); + + PopulateDefaultDataTypeInfo(row, subtypeType); + // Populate hardcoded values based on the subtype type (e.g. citext[] is case-insensitive). + PopulateHardcodedDataTypeInfo(row, subtypeType); + + row["TypeName"] = multirangeType.DisplayName; + row["OID"] = multirangeType.OID; + row["CreateFormat"] = multirangeType.DisplayName.ToUpperInvariant(); + row["DataType"] = info.Type.FullName; + if (multirangeType.DataTypeName.ToNpgsqlDbType() is { } npgsqlDbType) + row["ProviderDbType"] = (int)npgsqlDbType; + } + + foreach (var domainType in connector.DatabaseInfo.DomainTypes) + { + var representationalType = domainType.GetRepresentationalType(); + if (connector.SerializerOptions.GetDefaultTypeInfo(representationalType) is not { } info) + continue; + + var row = table.Rows.Add(); + + PopulateDefaultDataTypeInfo(row, representationalType); + // Populate hardcoded values based on the element type (e.g. citext[] is case-insensitive). + PopulateHardcodedDataTypeInfo(row, representationalType); + row["TypeName"] = domainType.DisplayName; + row["OID"] = domainType.OID; + // A domain is never the best match, since its underlying base type is + row["IsBestMatch"] = false; + + row["DataType"] = info.Type.FullName; + if (representationalType.DataTypeName.ToNpgsqlDbType() is { } npgsqlDbType) + row["ProviderDbType"] = (int)npgsqlDbType; + } + } + finally + { + PgSerializerOptions.IntrospectionCaller = false; } return table; diff --git a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs index 97a7dd7a34..c074e29dec 100644 --- a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs @@ -1,16 +1,15 @@ using System; using System.Collections.Generic; using System.Diagnostics.CodeAnalysis; +using System.Linq; using System.Net.Security; -using System.Reflection; using System.Security.Cryptography.X509Certificates; using System.Text.Json; using System.Threading; using System.Threading.Tasks; using Microsoft.Extensions.Logging; using Npgsql.Internal; -using Npgsql.Internal.TypeHandling; -using Npgsql.Internal.TypeMapping; +using Npgsql.Internal.Resolvers; using Npgsql.Properties; using Npgsql.TypeMapping; using NpgsqlTypes; @@ -26,6 +25,8 @@ namespace Npgsql; /// public sealed class NpgsqlSlimDataSourceBuilder : INpgsqlTypeMapper { + static UnsupportedTypeInfoResolver UnsupportedTypeInfoResolver { get; } = new(); + ILoggerFactory? _loggerFactory; bool _sensitiveDataLoggingEnabled; @@ -36,11 +37,8 @@ public sealed class NpgsqlSlimDataSourceBuilder : INpgsqlTypeMapper Func>? _periodicPasswordProvider; TimeSpan _periodicPasswordSuccessRefreshInterval, _periodicPasswordFailureRefreshInterval; - readonly List _resolverFactories = new(); - readonly Dictionary _userTypeMappings = new(); - - /// - public INpgsqlNameTranslator DefaultNameTranslator { get; set; } = GlobalTypeMapper.Instance.DefaultNameTranslator; + readonly List _resolverChain = new(); + readonly UserTypeMapper _userTypeMapper; Action? _syncConnectionInitializer; Func? _asyncConnectionInitializer; @@ -55,6 +53,12 @@ public sealed class NpgsqlSlimDataSourceBuilder : INpgsqlTypeMapper /// public string ConnectionString => ConnectionStringBuilder.ToString(); + static NpgsqlSlimDataSourceBuilder() + => GlobalTypeMapper.Instance.AddGlobalTypeMappingResolvers(new [] + { + AdoTypeInfoResolver.Instance + }); + /// /// A diagnostics name used by Npgsql when generating tracing, logging and metrics. /// @@ -67,8 +71,19 @@ public sealed class NpgsqlSlimDataSourceBuilder : INpgsqlTypeMapper public NpgsqlSlimDataSourceBuilder(string? connectionString = null) { ConnectionStringBuilder = new NpgsqlConnectionStringBuilder(connectionString); + _userTypeMapper = new(); + // Reverse order + AddTypeInfoResolver(UnsupportedTypeInfoResolver); + AddTypeInfoResolver(new AdoTypeInfoResolver()); + // When used publicly we start off with our slim defaults. + foreach (var plugin in GlobalTypeMapper.Instance.GetPluginResolvers().Reverse()) + AddTypeInfoResolver(plugin); + } - ResetTypeMappings(); + internal NpgsqlSlimDataSourceBuilder(NpgsqlConnectionStringBuilder connectionStringBuilder) + { + ConnectionStringBuilder = connectionStringBuilder; + _userTypeMapper = new(); } /// @@ -237,158 +252,105 @@ public NpgsqlSlimDataSourceBuilder UsePeriodicPasswordProvider( #region Type mapping /// - public void AddTypeResolverFactory(TypeHandlerResolverFactory resolverFactory) - { - var type = resolverFactory.GetType(); - - for (var i = 0; i < _resolverFactories.Count; i++) - { - if (_resolverFactories[i].GetType() == type) - { - _resolverFactories.RemoveAt(i); - break; - } - } - - _resolverFactories.Insert(0, resolverFactory); - } - - internal void AddDefaultTypeResolverFactory(TypeHandlerResolverFactory resolverFactory) - { - // For these "default" resolvers: - // 1. If they were already added in the global type mapper, we don't want to replace them (there may be custom user config, e.g. - // for JSON. - // 2. They can't be at the start, since then they'd override a user-added resolver in global (e.g. the range handler would override - // NodaTime, but NodaTime has special handling for tstzrange, mapping it to Interval in addition to NpgsqlRange). - // 3. They also can't be at the end, since then they'd be overridden by builtin (builtin has limited JSON handler, but we want - // the System.Text.Json handler to take precedence. - // So we (currently) add these at the end, but before the builtin resolver. - var type = resolverFactory.GetType(); - - // 1st pass to skip if the resolver already exists from the global type mapper - for (var i = 0; i < _resolverFactories.Count; i++) - if (_resolverFactories[i].GetType() == type) - return; - - for (var i = 0; i < _resolverFactories.Count; i++) - { - if (_resolverFactories[i] is BuiltInTypeHandlerResolverFactory) - { - _resolverFactories.Insert(i, resolverFactory); - return; - } - } - - throw new Exception("No built-in resolver factory found"); - } + public INpgsqlNameTranslator DefaultNameTranslator { get; set; } = GlobalTypeMapper.Instance.DefaultNameTranslator; /// public INpgsqlTypeMapper MapEnum(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) where TEnum : struct, Enum { - if (pgName != null && pgName.Trim() == "") - throw new ArgumentException("pgName can't be empty", nameof(pgName)); - - nameTranslator ??= DefaultNameTranslator; - pgName ??= GetPgName(typeof(TEnum), nameTranslator); - - _userTypeMappings[pgName] = new UserEnumTypeMapping(pgName, nameTranslator); + _userTypeMapper.MapEnum(pgName, nameTranslator); return this; } /// public bool UnmapEnum(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) where TEnum : struct, Enum - { - if (pgName != null && pgName.Trim() == "") - throw new ArgumentException("pgName can't be empty", nameof(pgName)); - - nameTranslator ??= DefaultNameTranslator; - pgName ??= GetPgName(typeof(TEnum), nameTranslator); - - return _userTypeMappings.Remove(pgName); - } + => _userTypeMapper.UnmapEnum(pgName, nameTranslator); /// - [RequiresUnreferencedCode("Composite type mapping currently isn't trimming-safe.")] + [RequiresUnreferencedCode("Composite type mapping isn't trimming-safe.")] public INpgsqlTypeMapper MapComposite(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) { - if (pgName != null && pgName.Trim() == "") - throw new ArgumentException("pgName can't be empty", nameof(pgName)); - - nameTranslator ??= DefaultNameTranslator; - pgName ??= GetPgName(typeof(T), nameTranslator); - - _userTypeMappings[pgName] = new UserCompositeTypeMapping(pgName, nameTranslator); + _userTypeMapper.MapComposite(typeof(T), pgName, nameTranslator); return this; } /// - [RequiresUnreferencedCode("Composite type mapping currently isn't trimming-safe.")] + [RequiresUnreferencedCode("Composite type mapping isn't trimming-safe.")] + public bool UnmapComposite(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) + => _userTypeMapper.UnmapComposite(typeof(T), pgName, nameTranslator); + + /// + [RequiresUnreferencedCode("Composite type mapping isn't trimming-safe.")] public INpgsqlTypeMapper MapComposite(Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) { - var openMethod = typeof(NpgsqlSlimDataSourceBuilder).GetMethod(nameof(MapComposite), new[] { typeof(string), typeof(INpgsqlNameTranslator) })!; - var method = openMethod.MakeGenericMethod(clrType); - method.Invoke(this, new object?[] { pgName, nameTranslator }); - + _userTypeMapper.MapComposite(clrType, pgName, nameTranslator); return this; } /// - [RequiresUnreferencedCode("Composite type mapping currently isn't trimming-safe.")] - public bool UnmapComposite(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) - => UnmapComposite(typeof(T), pgName, nameTranslator); - - /// - [RequiresUnreferencedCode("Composite type mapping currently isn't trimming-safe.")] + [RequiresUnreferencedCode("Composite type mapping isn't trimming-safe.")] public bool UnmapComposite(Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) + => _userTypeMapper.UnmapComposite(clrType, pgName, nameTranslator); + + /// + /// Adds a type info resolver which can add or modify support for PostgreSQL types. + /// Typically used by plugins. + /// + /// The type resolver to be added. + public void AddTypeInfoResolver(IPgTypeInfoResolver resolver) { - if (pgName != null && pgName.Trim() == "") - throw new ArgumentException("pgName can't be empty", nameof(pgName)); + var type = resolver.GetType(); - nameTranslator ??= DefaultNameTranslator; - pgName ??= GetPgName(clrType, nameTranslator); + for (var i = 0; i < _resolverChain.Count; i++) + if (_resolverChain[i].GetType() == type) + { + _resolverChain.RemoveAt(i); + break; + } - return _userTypeMappings.Remove(pgName); + _resolverChain.Insert(0, resolver); } void INpgsqlTypeMapper.Reset() => ResetTypeMappings(); - void ResetTypeMappings() + internal void ResetTypeMappings() { - var globalMapper = GlobalTypeMapper.Instance; - globalMapper.Lock.EnterReadLock(); - try - { - _resolverFactories.Clear(); - foreach (var resolverFactory in globalMapper.HandlerResolverFactories) - _resolverFactories.Add(resolverFactory); - - _userTypeMappings.Clear(); - foreach (var kv in globalMapper.UserTypeMappings) - _userTypeMappings[kv.Key] = kv.Value; - } - finally - { - globalMapper.Lock.ExitReadLock(); - } + _resolverChain.Clear(); + _resolverChain.AddRange(GlobalTypeMapper.Instance.GetPluginResolvers()); } - static string GetPgName(Type clrType, INpgsqlNameTranslator nameTranslator) - => clrType.GetCustomAttribute()?.PgName - ?? nameTranslator.TranslateTypeName(clrType.Name); - #endregion Type mapping #region Optional opt-ins /// - /// Sets up mappings for the PostgreSQL range and multirange types. + /// Sets up mappings for the PostgreSQL array types. + /// + public NpgsqlSlimDataSourceBuilder EnableArrays() + { + AddTypeInfoResolver(new RangeArrayTypeInfoResolver()); + AddTypeInfoResolver(new ExtraConversionsArrayTypeInfoResolver()); + AddTypeInfoResolver(new AdoArrayTypeInfoResolver()); + return this; + } + + /// + /// Sets up mappings for the PostgreSQL range types. /// public NpgsqlSlimDataSourceBuilder EnableRanges() { - AddTypeResolverFactory(new RangeTypeHandlerResolverFactory()); + AddTypeInfoResolver(new RangeTypeInfoResolver()); + return this; + } + + /// + /// Sets up mappings for the PostgreSQL multirange types. + /// + public NpgsqlSlimDataSourceBuilder EnableMultiranges() + { + AddTypeInfoResolver(new RangeTypeInfoResolver()); return this; } @@ -407,7 +369,7 @@ public NpgsqlSlimDataSourceBuilder UseSystemTextJson( Type[]? jsonbClrTypes = null, Type[]? jsonClrTypes = null) { - AddTypeResolverFactory(new SystemTextJsonTypeHandlerResolverFactory(jsonbClrTypes, jsonClrTypes, serializerOptions)); + AddTypeInfoResolver(new SystemTextJsonPocoTypeInfoResolver(jsonbClrTypes, jsonClrTypes, serializerOptions)); return this; } @@ -416,7 +378,7 @@ public NpgsqlSlimDataSourceBuilder UseSystemTextJson( /// public NpgsqlSlimDataSourceBuilder EnableRecords() { - AddTypeResolverFactory(new RecordTypeHandlerResolverFactory()); + AddTypeInfoResolver(new RecordTypeInfoResolver()); return this; } @@ -425,7 +387,25 @@ public NpgsqlSlimDataSourceBuilder EnableRecords() /// public NpgsqlSlimDataSourceBuilder EnableFullTextSearch() { - AddTypeResolverFactory(new FullTextSearchTypeHandlerResolverFactory()); + AddTypeInfoResolver(new FullTextSearchTypeInfoResolver()); + return this; + } + + /// + /// Sets up mappings for the PostgreSQL ltree extension types. + /// + public NpgsqlSlimDataSourceBuilder EnableLTree() + { + AddTypeInfoResolver(new LTreeTypeInfoResolver()); + return this; + } + + /// + /// Sets up mappings for extra conversions from PostgreSQL to .NET types. + /// + public NpgsqlSlimDataSourceBuilder EnableExtraConversions() + { + AddTypeInfoResolver(new ExtraConversionsResolver()); return this; } @@ -536,11 +516,25 @@ _loggerFactory is null _periodicPasswordProvider, _periodicPasswordSuccessRefreshInterval, _periodicPasswordFailureRefreshInterval, - _resolverFactories, - _userTypeMappings, + Resolvers(), DefaultNameTranslator, _syncConnectionInitializer, _asyncConnectionInitializer); + + IEnumerable Resolvers() + { + var resolvers = new List(); + + if (_userTypeMapper.Items.Count > 0) + resolvers.Add(_userTypeMapper.Build()); + + if (GlobalTypeMapper.Instance.GetUserMappingsResolver() is { } globalUserTypeMapper) + resolvers.Add(globalUserTypeMapper); + + resolvers.AddRange(_resolverChain); + + return resolvers; + } } void ValidateMultiHost() diff --git a/src/Npgsql/NpgsqlTypes/NpgsqlDbType.cs b/src/Npgsql/NpgsqlTypes/NpgsqlDbType.cs index 8df0ee874f..b05f623867 100644 --- a/src/Npgsql/NpgsqlTypes/NpgsqlDbType.cs +++ b/src/Npgsql/NpgsqlTypes/NpgsqlDbType.cs @@ -1,6 +1,8 @@ using System; +using System.Data; using Npgsql; -using Npgsql.TypeMapping; +using Npgsql.Internal.Postgres; +using static Npgsql.Util.Statics; #pragma warning disable CA1720 @@ -635,6 +637,420 @@ public enum NpgsqlDbType #endregion } +static class NpgsqlDbTypeExtensions +{ + internal static NpgsqlDbType? ToNpgsqlDbType(this DbType dbType) + => dbType switch + { + DbType.AnsiString => NpgsqlDbType.Text, + DbType.Binary => NpgsqlDbType.Bytea, + DbType.Byte => NpgsqlDbType.Smallint, + DbType.Boolean => NpgsqlDbType.Boolean, + DbType.Currency => NpgsqlDbType.Money, + DbType.Date => NpgsqlDbType.Date, + DbType.DateTime => LegacyTimestampBehavior ? NpgsqlDbType.Timestamp : NpgsqlDbType.TimestampTz, + DbType.Decimal => NpgsqlDbType.Numeric, + DbType.VarNumeric => NpgsqlDbType.Numeric, + DbType.Double => NpgsqlDbType.Double, + DbType.Guid => NpgsqlDbType.Uuid, + DbType.Int16 => NpgsqlDbType.Smallint, + DbType.Int32 => NpgsqlDbType.Integer, + DbType.Int64 => NpgsqlDbType.Bigint, + DbType.Single => NpgsqlDbType.Real, + DbType.String => NpgsqlDbType.Text, + DbType.Time => NpgsqlDbType.Time, + DbType.AnsiStringFixedLength => NpgsqlDbType.Text, + DbType.StringFixedLength => NpgsqlDbType.Text, + DbType.Xml => NpgsqlDbType.Xml, + DbType.DateTime2 => NpgsqlDbType.Timestamp, + DbType.DateTimeOffset => NpgsqlDbType.TimestampTz, + + DbType.Object => null, + DbType.SByte => null, + DbType.UInt16 => null, + DbType.UInt32 => null, + DbType.UInt64 => null, + + _ => throw new ArgumentOutOfRangeException(nameof(dbType), dbType, null) + }; + + public static DbType ToDbType(this NpgsqlDbType npgsqlDbType) + => npgsqlDbType switch + { + // Numeric types + NpgsqlDbType.Smallint => DbType.Int16, + NpgsqlDbType.Integer => DbType.Int32, + NpgsqlDbType.Bigint => DbType.Int64, + NpgsqlDbType.Real => DbType.Single, + NpgsqlDbType.Double => DbType.Double, + NpgsqlDbType.Numeric => DbType.Decimal, + NpgsqlDbType.Money => DbType.Currency, + + // Text types + NpgsqlDbType.Text => DbType.String, + NpgsqlDbType.Xml => DbType.Xml, + NpgsqlDbType.Varchar => DbType.String, + NpgsqlDbType.Char => DbType.String, + NpgsqlDbType.Name => DbType.String, + NpgsqlDbType.Citext => DbType.String, + NpgsqlDbType.Refcursor => DbType.Object, + NpgsqlDbType.Jsonb => DbType.Object, + NpgsqlDbType.Json => DbType.Object, + NpgsqlDbType.JsonPath => DbType.Object, + + // Date/time types + NpgsqlDbType.Timestamp => LegacyTimestampBehavior ? DbType.DateTime : DbType.DateTime2, + NpgsqlDbType.TimestampTz => LegacyTimestampBehavior ? DbType.DateTimeOffset : DbType.DateTime, + NpgsqlDbType.Date => DbType.Date, + NpgsqlDbType.Time => DbType.Time, + + // Misc data types + NpgsqlDbType.Bytea => DbType.Binary, + NpgsqlDbType.Boolean => DbType.Boolean, + NpgsqlDbType.Uuid => DbType.Guid, + + NpgsqlDbType.Unknown => DbType.Object, + + _ => DbType.Object + }; + + /// Can return null when a custom range type is used. + internal static string? ToUnqualifiedDataTypeName(this NpgsqlDbType npgsqlDbType) + => npgsqlDbType switch + { + // Numeric types + NpgsqlDbType.Smallint => "int2", + NpgsqlDbType.Integer => "int4", + NpgsqlDbType.Bigint => "int8", + NpgsqlDbType.Real => "float4", + NpgsqlDbType.Double => "float8", + NpgsqlDbType.Numeric => "numeric", + NpgsqlDbType.Money => "money", + + // Text types + NpgsqlDbType.Text => "text", + NpgsqlDbType.Xml => "xml", + NpgsqlDbType.Varchar => "varchar", + NpgsqlDbType.Char => "bpchar", + NpgsqlDbType.Name => "name", + NpgsqlDbType.Refcursor => "refcursor", + NpgsqlDbType.Jsonb => "jsonb", + NpgsqlDbType.Json => "json", + NpgsqlDbType.JsonPath => "jsonpath", + + // Date/time types + NpgsqlDbType.Timestamp => "timestamp", + NpgsqlDbType.TimestampTz => "timestamptz", + NpgsqlDbType.Date => "date", + NpgsqlDbType.Time => "time", + NpgsqlDbType.TimeTz => "timetz", + NpgsqlDbType.Interval => "interval", + + // Network types + NpgsqlDbType.Cidr => "cidr", + NpgsqlDbType.Inet => "inet", + NpgsqlDbType.MacAddr => "macaddr", + NpgsqlDbType.MacAddr8 => "macaddr8", + + // Full-text search types + NpgsqlDbType.TsQuery => "tsquery", + NpgsqlDbType.TsVector => "tsvector", + + // Geometry types + NpgsqlDbType.Box => "box", + NpgsqlDbType.Circle => "circle", + NpgsqlDbType.Line => "line", + NpgsqlDbType.LSeg => "lseg", + NpgsqlDbType.Path => "path", + NpgsqlDbType.Point => "point", + NpgsqlDbType.Polygon => "polygon", + + + // UInt types + NpgsqlDbType.Oid => "oid", + NpgsqlDbType.Xid => "xid", + NpgsqlDbType.Xid8 => "xid8", + NpgsqlDbType.Cid => "cid", + NpgsqlDbType.Regtype => "regtype", + NpgsqlDbType.Regconfig => "regconfig", + + // Misc types + NpgsqlDbType.Boolean => "bool", + NpgsqlDbType.Bytea => "bytea", + NpgsqlDbType.Uuid => "uuid", + NpgsqlDbType.Varbit => "varbit", + NpgsqlDbType.Bit => "bit", + + // Built-in range types + NpgsqlDbType.IntegerRange => "int4range", + NpgsqlDbType.BigIntRange => "int8range", + NpgsqlDbType.NumericRange => "numrange", + NpgsqlDbType.TimestampRange => "tsrange", + NpgsqlDbType.TimestampTzRange => "tstzrange", + NpgsqlDbType.DateRange => "daterange", + + // Built-in multirange types + NpgsqlDbType.IntegerMultirange => "int4multirange", + NpgsqlDbType.BigIntMultirange => "int8multirange", + NpgsqlDbType.NumericMultirange => "nummultirange", + NpgsqlDbType.TimestampMultirange => "tsmultirange", + NpgsqlDbType.TimestampTzMultirange => "tstzmultirange", + NpgsqlDbType.DateMultirange => "datemultirange", + + // Internal types + NpgsqlDbType.Int2Vector => "int2vector", + NpgsqlDbType.Oidvector => "oidvector", + NpgsqlDbType.PgLsn => "pg_lsn", + NpgsqlDbType.Tid => "tid", + NpgsqlDbType.InternalChar => "char", + + // Plugin types + NpgsqlDbType.Citext => "citext", + NpgsqlDbType.LQuery => "lquery", + NpgsqlDbType.LTree => "ltree", + NpgsqlDbType.LTxtQuery => "ltxtquery", + NpgsqlDbType.Hstore => "hstore", + NpgsqlDbType.Geometry => "geometry", + NpgsqlDbType.Geography => "geography", + + NpgsqlDbType.Unknown => "unknown", + + // Unknown cannot be composed + _ when npgsqlDbType.HasFlag(NpgsqlDbType.Array) && (npgsqlDbType & ~NpgsqlDbType.Array) == NpgsqlDbType.Unknown + => "unknown", + _ when npgsqlDbType.HasFlag(NpgsqlDbType.Range) && (npgsqlDbType & ~NpgsqlDbType.Range) == NpgsqlDbType.Unknown + => "unknown", + _ when npgsqlDbType.HasFlag(NpgsqlDbType.Multirange) && (npgsqlDbType & ~NpgsqlDbType.Multirange) == NpgsqlDbType.Unknown + => "unknown", + + _ => npgsqlDbType.HasFlag(NpgsqlDbType.Array) + ? ToUnqualifiedDataTypeName(npgsqlDbType & ~NpgsqlDbType.Array) is { } name ? "_" + name : null + : null // e.g. ranges + }; + + internal static string ToUnqualifiedDataTypeNameOrThrow(this NpgsqlDbType npgsqlDbType) + => npgsqlDbType.ToUnqualifiedDataTypeName() ?? throw new ArgumentOutOfRangeException(nameof(npgsqlDbType), npgsqlDbType, "Cannot convert NpgsqlDbType to DataTypeName"); + + /// Can return null when a plugin type or custom range type is used. + internal static DataTypeName? ToDataTypeName(this NpgsqlDbType npgsqlDbType) + => npgsqlDbType switch + { + // Numeric types + NpgsqlDbType.Smallint => DataTypeNames.Int2, + NpgsqlDbType.Integer => DataTypeNames.Int4, + NpgsqlDbType.Bigint => DataTypeNames.Int8, + NpgsqlDbType.Real => DataTypeNames.Float4, + NpgsqlDbType.Double => DataTypeNames.Float8, + NpgsqlDbType.Numeric => DataTypeNames.Numeric, + NpgsqlDbType.Money => DataTypeNames.Money, + + // Text types + NpgsqlDbType.Text => DataTypeNames.Text, + NpgsqlDbType.Xml => DataTypeNames.Xml, + NpgsqlDbType.Varchar => DataTypeNames.Varchar, + NpgsqlDbType.Char => DataTypeNames.Bpchar, + NpgsqlDbType.Name => DataTypeNames.Name, + NpgsqlDbType.Refcursor => DataTypeNames.RefCursor, + NpgsqlDbType.Jsonb => DataTypeNames.Jsonb, + NpgsqlDbType.Json => DataTypeNames.Json, + NpgsqlDbType.JsonPath => DataTypeNames.Jsonpath, + + // Date/time types + NpgsqlDbType.Timestamp => DataTypeNames.Timestamp, + NpgsqlDbType.TimestampTz => DataTypeNames.TimestampTz, + NpgsqlDbType.Date => DataTypeNames.Date, + NpgsqlDbType.Time => DataTypeNames.Time, + NpgsqlDbType.TimeTz => DataTypeNames.TimeTz, + NpgsqlDbType.Interval => DataTypeNames.Interval, + + // Network types + NpgsqlDbType.Cidr => DataTypeNames.Cidr, + NpgsqlDbType.Inet => DataTypeNames.Inet, + NpgsqlDbType.MacAddr => DataTypeNames.MacAddr, + NpgsqlDbType.MacAddr8 => DataTypeNames.MacAddr8, + + // Full-text search types + NpgsqlDbType.TsQuery => DataTypeNames.TsQuery, + NpgsqlDbType.TsVector => DataTypeNames.TsVector, + + // Geometry types + NpgsqlDbType.Box => DataTypeNames.Box, + NpgsqlDbType.Circle => DataTypeNames.Circle, + NpgsqlDbType.Line => DataTypeNames.Line, + NpgsqlDbType.LSeg => DataTypeNames.LSeg, + NpgsqlDbType.Path => DataTypeNames.Path, + NpgsqlDbType.Point => DataTypeNames.Point, + NpgsqlDbType.Polygon => DataTypeNames.Polygon, + + // UInt types + NpgsqlDbType.Oid => DataTypeNames.Oid, + NpgsqlDbType.Xid => DataTypeNames.Xid, + NpgsqlDbType.Xid8 => DataTypeNames.Xid8, + NpgsqlDbType.Cid => DataTypeNames.Cid, + NpgsqlDbType.Regtype => DataTypeNames.RegType, + NpgsqlDbType.Regconfig => DataTypeNames.RegConfig, + + // Misc types + NpgsqlDbType.Boolean => DataTypeNames.Bool, + NpgsqlDbType.Bytea => DataTypeNames.Bytea, + NpgsqlDbType.Uuid => DataTypeNames.Uuid, + NpgsqlDbType.Varbit => DataTypeNames.Varbit, + NpgsqlDbType.Bit => DataTypeNames.Bit, + + // Built-in range types + NpgsqlDbType.IntegerRange => DataTypeNames.Int4Range, + NpgsqlDbType.BigIntRange => DataTypeNames.Int8Range, + NpgsqlDbType.NumericRange => DataTypeNames.NumRange, + NpgsqlDbType.TimestampRange => DataTypeNames.TsRange, + NpgsqlDbType.TimestampTzRange => DataTypeNames.TsTzRange, + NpgsqlDbType.DateRange => DataTypeNames.DateRange, + + // Internal types + NpgsqlDbType.Int2Vector => DataTypeNames.Int2Vector, + NpgsqlDbType.Oidvector => DataTypeNames.OidVector, + NpgsqlDbType.PgLsn => DataTypeNames.PgLsn, + NpgsqlDbType.Tid => DataTypeNames.Tid, + NpgsqlDbType.InternalChar => DataTypeNames.Char, + + // Special types + NpgsqlDbType.Unknown => DataTypeNames.Unknown, + + // Unknown cannot be composed + _ when npgsqlDbType.HasFlag(NpgsqlDbType.Array) && (npgsqlDbType & ~NpgsqlDbType.Array) == NpgsqlDbType.Unknown + => DataTypeNames.Unknown, + _ when npgsqlDbType.HasFlag(NpgsqlDbType.Range) && (npgsqlDbType & ~NpgsqlDbType.Range) == NpgsqlDbType.Unknown + => DataTypeNames.Unknown, + _ when npgsqlDbType.HasFlag(NpgsqlDbType.Multirange) && (npgsqlDbType & ~NpgsqlDbType.Multirange) == NpgsqlDbType.Unknown + => DataTypeNames.Unknown, + + // If both multirange and array are set we first remove array, so array is added to the outermost datatypename. + _ when npgsqlDbType.HasFlag(NpgsqlDbType.Array) + => ToDataTypeName(npgsqlDbType & ~NpgsqlDbType.Array)?.ToArrayName(), + _ when npgsqlDbType.HasFlag(NpgsqlDbType.Multirange) + => ToDataTypeName((npgsqlDbType | NpgsqlDbType.Range) & ~NpgsqlDbType.Multirange)?.ToDefaultMultirangeName(), + + // Plugin types don't have a stable fully qualified name. + _ => null + }; + + internal static NpgsqlDbType? ToNpgsqlDbType(this DataTypeName dataTypeName) => ToNpgsqlDbType(dataTypeName.UnqualifiedName); + /// Should not be used with display names, first normalize it instead. + internal static NpgsqlDbType? ToNpgsqlDbType(string dataTypeName) + { + var unqualifiedName = dataTypeName; + if (dataTypeName.IndexOf(".", StringComparison.Ordinal) is not -1 and var index) + unqualifiedName = dataTypeName.Substring(0, index); + + return unqualifiedName switch + { + // Numeric types + "int2" => NpgsqlDbType.Smallint, + "int4" => NpgsqlDbType.Integer, + "int8" => NpgsqlDbType.Bigint, + "float4" => NpgsqlDbType.Real, + "float8" => NpgsqlDbType.Double, + "numeric" => NpgsqlDbType.Numeric, + "money" => NpgsqlDbType.Money, + + // Text types + "text" => NpgsqlDbType.Text, + "xml" => NpgsqlDbType.Xml, + "varchar" => NpgsqlDbType.Varchar, + "bpchar" => NpgsqlDbType.Char, + "name" => NpgsqlDbType.Name, + "refcursor" => NpgsqlDbType.Refcursor, + "jsonb" => NpgsqlDbType.Jsonb, + "json" => NpgsqlDbType.Json, + "jsonpath" => NpgsqlDbType.JsonPath, + + // Date/time types + "timestamp" => NpgsqlDbType.Timestamp, + "timestamptz" => NpgsqlDbType.TimestampTz, + "date" => NpgsqlDbType.Date, + "time" => NpgsqlDbType.Time, + "timetz" => NpgsqlDbType.TimeTz, + "interval" => NpgsqlDbType.Interval, + + // Network types + "cidr" => NpgsqlDbType.Cidr, + "inet" => NpgsqlDbType.Inet, + "macaddr" => NpgsqlDbType.MacAddr, + "macaddr8" => NpgsqlDbType.MacAddr8, + + // Full-text search types + "tsquery" => NpgsqlDbType.TsQuery, + "tsvector" => NpgsqlDbType.TsVector, + + // Geometry types + "box" => NpgsqlDbType.Box, + "circle" => NpgsqlDbType.Circle, + "line" => NpgsqlDbType.Line, + "lseg" => NpgsqlDbType.LSeg, + "path" => NpgsqlDbType.Path, + "point" => NpgsqlDbType.Point, + "polygon" => NpgsqlDbType.Polygon, + + // UInt types + "oid" => NpgsqlDbType.Oid, + "xid" => NpgsqlDbType.Xid, + "xid8" => NpgsqlDbType.Xid8, + "cid" => NpgsqlDbType.Cid, + "regtype" => NpgsqlDbType.Regtype, + "regconfig" => NpgsqlDbType.Regconfig, + + // Misc types + "bool" => NpgsqlDbType.Boolean, + "bytea" => NpgsqlDbType.Bytea, + "uuid" => NpgsqlDbType.Uuid, + "varbit" => NpgsqlDbType.Varbit, + "bit" => NpgsqlDbType.Bit, + + // Built-in range types + "int4range" => NpgsqlDbType.IntegerRange, + "int8range" => NpgsqlDbType.BigIntRange, + "numrange" => NpgsqlDbType.NumericRange, + "tsrange" => NpgsqlDbType.TimestampRange, + "tstzrange" => NpgsqlDbType.TimestampTzRange, + "daterange" => NpgsqlDbType.DateRange, + + // Built-in multirange types + "int4multirange" => NpgsqlDbType.IntegerMultirange, + "int8multirange" => NpgsqlDbType.BigIntMultirange, + "nummultirange" => NpgsqlDbType.NumericMultirange, + "tsmultirange" => NpgsqlDbType.TimestampMultirange, + "tstzmultirange" => NpgsqlDbType.TimestampTzMultirange, + "datemultirange" => NpgsqlDbType.DateMultirange, + + // Internal types + "int2vector" => NpgsqlDbType.Int2Vector, + "oidvector" => NpgsqlDbType.Oidvector, + "pg_lsn" => NpgsqlDbType.PgLsn, + "tid" => NpgsqlDbType.Tid, + "char" => NpgsqlDbType.InternalChar, + + // Plugin types + "citext" => NpgsqlDbType.Citext, + "lquery" => NpgsqlDbType.LQuery, + "ltree" => NpgsqlDbType.LTree, + "ltxtquery" => NpgsqlDbType.LTxtQuery, + "hstore" => NpgsqlDbType.Hstore, + "geometry" => NpgsqlDbType.Geometry, + "geography" => NpgsqlDbType.Geography, + + _ when unqualifiedName.Contains("unknown") + => !unqualifiedName.StartsWith("_", StringComparison.Ordinal) + ? NpgsqlDbType.Unknown + : null, + _ when unqualifiedName.StartsWith("_", StringComparison.Ordinal) + => ToNpgsqlDbType(unqualifiedName.Substring(1)) is { } elementNpgsqlDbType + ? elementNpgsqlDbType | NpgsqlDbType.Array + : null, + // e.g. custom ranges, plugin types etc. + _ => null + }; + } +} + /// /// Represents a built-in PostgreSQL type as it appears in pg_type, including its name and OID. /// Extension types with variable OIDs are not represented. @@ -669,4 +1085,4 @@ internal BuiltInPostgresType( MultirangeName = multirangeName; MultirangeOID = multirangeOID; } -} \ No newline at end of file +} diff --git a/src/Npgsql/NpgsqlTypes/NpgsqlInterval.cs b/src/Npgsql/NpgsqlTypes/NpgsqlInterval.cs index f3c1d49139..f4b51ba4a9 100644 --- a/src/Npgsql/NpgsqlTypes/NpgsqlInterval.cs +++ b/src/Npgsql/NpgsqlTypes/NpgsqlInterval.cs @@ -1,8 +1,4 @@ using System; -using System.Collections; -using System.Collections.Generic; -using System.Text; -using Npgsql; // ReSharper disable once CheckNamespace namespace NpgsqlTypes; diff --git a/src/Npgsql/NpgsqlTypes/NpgsqlTsQuery.cs b/src/Npgsql/NpgsqlTypes/NpgsqlTsQuery.cs index e83b69edad..cd68ecc3c8 100644 --- a/src/Npgsql/NpgsqlTypes/NpgsqlTsQuery.cs +++ b/src/Npgsql/NpgsqlTypes/NpgsqlTsQuery.cs @@ -89,7 +89,7 @@ public static NpgsqlTsQuery Parse(string value) var pos = 0; var expectingBinOp = false; - var lastFollowedByOpDistance = -1; + short lastFollowedByOpDistance = -1; NextToken: if (pos >= value.Length) @@ -125,7 +125,7 @@ public static NpgsqlTsQuery Parse(string value) { lastFollowedByOpDistance = 1; } - else if (!int.TryParse(followedByOpDistanceString, out lastFollowedByOpDistance) + else if (!short.TryParse(followedByOpDistanceString, out lastFollowedByOpDistance) || lastFollowedByOpDistance < 0) { throw new FormatException("Syntax error in tsquery. Malformed distance in 'followed by' operator."); @@ -172,7 +172,7 @@ public static NpgsqlTsQuery Parse(string value) var tsOp = opStack.Pop(); valStack.Push((char)tsOp switch { - '&' => (NpgsqlTsQuery)new NpgsqlTsQueryAnd(left, right), + '&' => new NpgsqlTsQueryAnd(left, right), '|' => new NpgsqlTsQueryOr(left, right), '<' => new NpgsqlTsQueryFollowedBy(left, tsOp.FollowedByDistance, right), _ => throw new FormatException("Syntax error in tsquery") @@ -383,9 +383,9 @@ public override bool Equals(object? obj) readonly struct NpgsqlTsQueryOperator { public readonly char Char; - public readonly int FollowedByDistance; + public readonly short FollowedByDistance; - public NpgsqlTsQueryOperator(char character, int followedByDistance) + public NpgsqlTsQueryOperator(char character, short followedByDistance) { Char = character; FollowedByDistance = followedByDistance; @@ -670,7 +670,7 @@ public sealed class NpgsqlTsQueryFollowedBy : NpgsqlTsQueryBinOp /// /// The distance between the 2 nodes, in lexemes. /// - public int Distance { get; set; } + public short Distance { get; set; } /// /// Creates a "followed by" operator, specifying 2 child nodes and the @@ -681,7 +681,7 @@ public sealed class NpgsqlTsQueryFollowedBy : NpgsqlTsQueryBinOp /// public NpgsqlTsQueryFollowedBy( NpgsqlTsQuery left, - int distance, + short distance, NpgsqlTsQuery right) : base(NodeKind.Phrase, left, right) { @@ -741,4 +741,4 @@ public override bool Equals(NpgsqlTsQuery? other) /// public override int GetHashCode() => Kind.GetHashCode(); -} \ No newline at end of file +} diff --git a/src/Npgsql/NpgsqlTypes/NpgsqlTypes.cs b/src/Npgsql/NpgsqlTypes/NpgsqlTypes.cs index 792a215774..c4a69a0c58 100644 --- a/src/Npgsql/NpgsqlTypes/NpgsqlTypes.cs +++ b/src/Npgsql/NpgsqlTypes/NpgsqlTypes.cs @@ -405,19 +405,17 @@ public override int GetHashCode() } /// -/// Represents a PostgreSQL inet type, which is a combination of an IPAddress and a -/// subnet mask. +/// Represents a PostgreSQL inet type, which is a combination of an IPAddress and a subnet mask. /// /// /// https://www.postgresql.org/docs/current/static/datatype-net-types.html /// -[Obsolete("Use ValueTuple instead")] -public struct NpgsqlInet : IEquatable +public readonly record struct NpgsqlInet { - public IPAddress Address { get; set; } - public int Netmask { get; set; } + public IPAddress Address { get; } + public byte Netmask { get; } - public NpgsqlInet(IPAddress address, int netmask) + public NpgsqlInet(IPAddress address, byte netmask) { if (address.AddressFamily != AddressFamily.InterNetwork && address.AddressFamily != AddressFamily.InterNetworkV6) throw new ArgumentException("Only IPAddress of InterNetwork or InterNetworkV6 address families are accepted", nameof(address)); @@ -427,76 +425,92 @@ public NpgsqlInet(IPAddress address, int netmask) } public NpgsqlInet(IPAddress address) + : this(address, (byte)(address.AddressFamily == AddressFamily.InterNetwork ? 32 : 128)) { - if (address.AddressFamily != AddressFamily.InterNetwork && address.AddressFamily != AddressFamily.InterNetworkV6) - throw new ArgumentException("Only IPAddress of InterNetwork or InterNetworkV6 address families are accepted", nameof(address)); - - Address = address; - Netmask = address.AddressFamily == AddressFamily.InterNetwork ? 32 : 128; } public NpgsqlInet(string addr) { - if (addr.IndexOf('/') > 0) + switch (addr.Split('/')) { - var addrbits = addr.Split('/'); - if (addrbits.GetUpperBound(0) != 1) - { - throw new FormatException("Invalid number of parts in CIDR specification"); - } + case { Length: 2 } segments: + Address = IPAddress.Parse(segments[0]); + Netmask = byte.Parse(segments[1]); + return; - Address = IPAddress.Parse(addrbits[0]); - Netmask = int.Parse(addrbits[1]); - } - else - { - Address = IPAddress.Parse(addr); + case { Length: 1 } segments: + Address = IPAddress.Parse(segments[0]); Netmask = 32; + return; + + default: + throw new FormatException("Invalid number of parts in CIDR specification"); } } public override string ToString() - { - if ((Address.AddressFamily == AddressFamily.InterNetwork && Netmask == 32) || - (Address.AddressFamily == AddressFamily.InterNetworkV6 && Netmask == 128)) - { - return Address.ToString(); - } + => (Address.AddressFamily == AddressFamily.InterNetwork && Netmask == 32) || + (Address.AddressFamily == AddressFamily.InterNetworkV6 && Netmask == 128) + ? Address.ToString() + : $"{Address}/{Netmask}"; - return $"{Address}/{Netmask}"; - } + public static explicit operator IPAddress(NpgsqlInet inet) + => inet.Address; + + public static explicit operator NpgsqlInet(IPAddress ip) + => new(ip); - // ReSharper disable once InconsistentNaming - public static IPAddress ToIPAddress(NpgsqlInet inet) + public void Deconstruct(out IPAddress address, out byte netmask) { - if (inet.Netmask != 32) - throw new InvalidCastException("Cannot cast CIDR network to address"); - return inet.Address; + address = Address; + netmask = Netmask; } +} - public static explicit operator IPAddress(NpgsqlInet inet) => ToIPAddress(inet); +/// +/// Represents a PostgreSQL cidr type. +/// +/// +/// https://www.postgresql.org/docs/current/static/datatype-net-types.html +/// +public readonly record struct NpgsqlCidr +{ + public IPAddress Address { get; } + public byte Netmask { get; } - public static NpgsqlInet ToNpgsqlInet(IPAddress? ip) - => ip is null ? default : new NpgsqlInet(ip); + public NpgsqlCidr(IPAddress address, byte netmask) + { + if (address.AddressFamily != AddressFamily.InterNetwork && address.AddressFamily != AddressFamily.InterNetworkV6) + throw new ArgumentException("Only IPAddress of InterNetwork or InterNetworkV6 address families are accepted", nameof(address)); - public static implicit operator NpgsqlInet(IPAddress ip) => ToNpgsqlInet(ip); + Address = address; + Netmask = netmask; + } - public void Deconstruct(out IPAddress address, out int netmask) + public NpgsqlCidr(string addr) { - address = Address; - netmask = Netmask; + switch (addr.Split('/')) + { + case { Length: 2 } segments: + Address = IPAddress.Parse(segments[0]); + Netmask = byte.Parse(segments[1]); + return; + + case { Length: 1 } segments: + throw new FormatException("Missing netmask"); + default: + throw new FormatException("Invalid number of parts in CIDR specification"); + } } - public bool Equals(NpgsqlInet other) => Address.Equals(other.Address) && Netmask == other.Netmask; - - public override bool Equals(object? obj) - => obj is NpgsqlInet inet && Equals(inet); - - public override int GetHashCode() - => HashCode.Combine(Address, Netmask); + public override string ToString() + => $"{Address}/{Netmask}"; - public static bool operator ==(NpgsqlInet x, NpgsqlInet y) => x.Equals(y); - public static bool operator !=(NpgsqlInet x, NpgsqlInet y) => !(x == y); + public void Deconstruct(out IPAddress address, out byte netmask) + { + address = Address; + netmask = Netmask; + } } /// diff --git a/src/Npgsql/PoolManager.cs b/src/Npgsql/PoolManager.cs index adc3c75fa6..d1086b5196 100644 --- a/src/Npgsql/PoolManager.cs +++ b/src/Npgsql/PoolManager.cs @@ -1,7 +1,5 @@ using System; using System.Collections.Concurrent; -using System.Diagnostics.CodeAnalysis; -using System.Threading; namespace Npgsql; diff --git a/src/Npgsql/PoolingDataSource.cs b/src/Npgsql/PoolingDataSource.cs index 60b288520c..0421953f4c 100644 --- a/src/Npgsql/PoolingDataSource.cs +++ b/src/Npgsql/PoolingDataSource.cs @@ -230,9 +230,9 @@ bool CheckIdleConnector([NotNullWhen(true)] NpgsqlConnector? connector) // The connector directly references the data source type mapper into the connector, to protect it against changes by a concurrent // ReloadTypes. We update them here before returning the connector from the pool. - Debug.Assert(TypeMapper is not null); + Debug.Assert(SerializerOptions is not null); Debug.Assert(DatabaseInfo is not null); - connector.TypeMapper = TypeMapper; + connector.SerializerOptions = SerializerOptions; connector.DatabaseInfo = DatabaseInfo; Debug.Assert(connector.State == ConnectorState.Ready, diff --git a/src/Npgsql/PostgresDatabaseInfo.cs b/src/Npgsql/PostgresDatabaseInfo.cs index 4d640fb261..a4e7a33462 100644 --- a/src/Npgsql/PostgresDatabaseInfo.cs +++ b/src/Npgsql/PostgresDatabaseInfo.cs @@ -3,14 +3,13 @@ using System.Diagnostics; using System.Globalization; using System.Linq; -using System.Runtime.CompilerServices; using System.Text; using System.Threading.Tasks; using Microsoft.Extensions.Logging; +using Microsoft.Extensions.Logging.Abstractions; using Npgsql.BackendMessages; using Npgsql.Internal; using Npgsql.PostgresTypes; -using Npgsql.TypeMapping; using Npgsql.Util; using static Npgsql.Util.Statics; @@ -80,6 +79,10 @@ internal PostgresDatabaseInfo(NpgsqlConnector conn) : base(conn.Host!, conn.Port, conn.Database!, conn.PostgresParameters["server_version"]) => _connectionLogger = conn.LoggingConfiguration.ConnectionLogger; + private protected PostgresDatabaseInfo(string host, int port, string databaseName, string serverVersion) + : base(host, port, databaseName, serverVersion) + => _connectionLogger = NullLogger.Instance; + /// /// Loads database information from the PostgreSQL database specified by . /// @@ -142,7 +145,7 @@ JOIN pg_namespace AS ns ON (ns.oid = typnamespace) WHERE typtype IN ('b', 'r', 'm', 'e', 'd') OR -- Base, range, multirange, enum, domain (typtype = 'c' AND {(loadTableComposites ? "ns.nspname NOT IN ('pg_catalog', 'information_schema', 'pg_toast')" : "relkind='c'")}) OR -- User-defined free-standing composites (not table composites) by default - (typtype = 'p' AND typname IN ('record', 'void')) OR -- Some special supported pseudo-types + (typtype = 'p' AND typname IN ('record', 'void', 'unknown')) OR -- Some special supported pseudo-types (typtype = 'a' AND ( -- Array of... elemtyptype IN ('b', 'r', 'm', 'e', 'd') OR -- Array of base, range, multirange, enum, domain (elemtyptype = 'p' AND elemtypname IN ('record', 'void')) OR -- Arrays of special supported pseudo-types @@ -543,4 +546,4 @@ static string SanitizeForReplicationConnection(string str) static string ReadNonNullableString(NpgsqlReadBuffer buffer) => buffer.ReadString(buffer.ReadInt32()); } -} \ No newline at end of file +} diff --git a/src/Npgsql/PostgresMinimalDatabaseInfo.cs b/src/Npgsql/PostgresMinimalDatabaseInfo.cs index 31b2d24f1d..94b76f541c 100644 --- a/src/Npgsql/PostgresMinimalDatabaseInfo.cs +++ b/src/Npgsql/PostgresMinimalDatabaseInfo.cs @@ -59,4 +59,24 @@ internal PostgresMinimalDatabaseInfo(NpgsqlConnector conn) HasIntegerDateTimes = !conn.PostgresParameters.TryGetValue("integer_datetimes", out var intDateTimes) || intDateTimes == "on"; } -} \ No newline at end of file + + // TODO, split database info and type catalog. + internal PostgresMinimalDatabaseInfo() + : base("minimal", 5432, "minimal", "14") + { + } + + static PostgresMinimalDatabaseInfo? _defaultTypeCatalog; + internal static PostgresMinimalDatabaseInfo DefaultTypeCatalog + { + get + { + if (_defaultTypeCatalog is not null) + return _defaultTypeCatalog; + + var catalog = new PostgresMinimalDatabaseInfo(); + catalog.ProcessTypes(); + return _defaultTypeCatalog = catalog; + } + } +} diff --git a/src/Npgsql/PostgresTypes/PostgresArrayType.cs b/src/Npgsql/PostgresTypes/PostgresArrayType.cs index cfeb89c736..7f0b2246d3 100644 --- a/src/Npgsql/PostgresTypes/PostgresArrayType.cs +++ b/src/Npgsql/PostgresTypes/PostgresArrayType.cs @@ -1,6 +1,4 @@ -using System.Diagnostics; - -namespace Npgsql.PostgresTypes; +namespace Npgsql.PostgresTypes; /// /// Represents a PostgreSQL array data type, which can hold several multiple values in a single column. @@ -18,10 +16,9 @@ public class PostgresArrayType : PostgresType /// /// Constructs a representation of a PostgreSQL array data type. /// - protected internal PostgresArrayType(string ns, string internalName, uint oid, PostgresType elementPostgresType) - : base(ns, elementPostgresType.Name + "[]", internalName, oid) + protected internal PostgresArrayType(string ns, string name, uint oid, PostgresType elementPostgresType) + : base(ns, name, oid) { - Debug.Assert(internalName == '_' + elementPostgresType.InternalName); Element = elementPostgresType; Element.Array = this; } @@ -34,4 +31,4 @@ internal override string GetPartialNameWithFacets(int typeModifier) internal override PostgresFacets GetFacets(int typeModifier) => Element.GetFacets(typeModifier); -} \ No newline at end of file +} diff --git a/src/Npgsql/PostgresTypes/PostgresBaseType.cs b/src/Npgsql/PostgresTypes/PostgresBaseType.cs index de9a7bc13e..a7cb0857cc 100644 --- a/src/Npgsql/PostgresTypes/PostgresBaseType.cs +++ b/src/Npgsql/PostgresTypes/PostgresBaseType.cs @@ -7,8 +7,8 @@ namespace Npgsql.PostgresTypes; public class PostgresBaseType : PostgresType { /// - protected internal PostgresBaseType(string ns, string internalName, uint oid) - : base(ns, TranslateInternalName(internalName), internalName, oid) + protected internal PostgresBaseType(string ns, string name, uint oid) + : base(ns, name, oid) {} /// @@ -68,27 +68,4 @@ internal override PostgresFacets GetFacets(int typeModifier) return PostgresFacets.None; } } - - // The type names returned by PostgreSQL are internal names (int4 instead of - // integer). We perform translation to the user-facing standard names. - // https://www.postgresql.org/docs/current/static/datatype.html#DATATYPE-TABLE - static string TranslateInternalName(string internalName) - => internalName switch - { - "bool" => "boolean", - "bpchar" => "character", - "decimal" => "numeric", - "float4" => "real", - "float8" => "double precision", - "int2" => "smallint", - "int4" => "integer", - "int8" => "bigint", - "time" => "time without time zone", - "timestamp" => "timestamp without time zone", - "timetz" => "time with time zone", - "timestamptz" => "timestamp with time zone", - "varbit" => "bit varying", - "varchar" => "character varying", - _ => internalName - }; -} \ No newline at end of file +} diff --git a/src/Npgsql/PostgresTypes/PostgresMultirangeType.cs b/src/Npgsql/PostgresTypes/PostgresMultirangeType.cs index 3d35783263..2e57075cb3 100644 --- a/src/Npgsql/PostgresTypes/PostgresMultirangeType.cs +++ b/src/Npgsql/PostgresTypes/PostgresMultirangeType.cs @@ -23,4 +23,4 @@ protected internal PostgresMultirangeType(string ns, string name, uint oid, Post Subrange = rangePostgresType; Subrange.Multirange = this; } -} \ No newline at end of file +} diff --git a/src/Npgsql/PostgresTypes/PostgresType.cs b/src/Npgsql/PostgresTypes/PostgresType.cs index 543cf3dcfd..8cc5fb7b63 100644 --- a/src/Npgsql/PostgresTypes/PostgresType.cs +++ b/src/Npgsql/PostgresTypes/PostgresType.cs @@ -1,5 +1,5 @@ using System; -using System.Linq; +using Npgsql.Internal.Postgres; namespace Npgsql.PostgresTypes; @@ -22,23 +22,11 @@ public abstract class PostgresType /// The data type's namespace (or schema). /// The data type's name. /// The data type's OID. - protected PostgresType(string ns, string name, uint oid) - : this(ns, name, name, oid) {} - - /// - /// Constructs a representation of a PostgreSQL data type. - /// - /// The data type's namespace (or schema). - /// The data type's name. - /// The data type's internal name (e.g. _int4 for integer[]). - /// The data type's OID. - protected PostgresType(string ns, string name, string internalName, uint oid) + private protected PostgresType(string ns, string name, uint oid) { - Namespace = ns; - Name = name; - FullName = Namespace + '.' + Name; - InternalName = internalName; + DataTypeName = DataTypeName.FromDisplayName(name, ns); OID = oid; + FullName = Namespace + "." + Name; } #endregion @@ -53,7 +41,7 @@ protected PostgresType(string ns, string name, string internalName, uint oid) /// /// The data type's namespace (or schema). /// - public string Namespace { get; } + public string Namespace => DataTypeName.Schema; /// /// The data type's name. @@ -62,24 +50,26 @@ protected PostgresType(string ns, string name, string internalName, uint oid) /// Note that this is the standard, user-displayable type name (e.g. integer[]) rather than the internal /// PostgreSQL name as it is in pg_type (_int4). See for the latter. /// - public string Name { get; } + public string Name => DataTypeName.UnqualifiedDisplayName; /// /// The full name of the backend type, including its namespace. /// public string FullName { get; } + internal DataTypeName DataTypeName { get; } + /// /// A display name for this backend type, including the namespace unless it is pg_catalog (the namespace /// for all built-in types). /// - public string DisplayName => Namespace == "pg_catalog" ? Name : FullName; + public string DisplayName => DataTypeName.DisplayName; /// /// The data type's internal PostgreSQL name (e.g. _int4 not integer[]). /// See for a more user-friendly name. /// - public string InternalName { get; } + public string InternalName => DataTypeName.UnqualifiedName; /// /// If a PostgreSQL array type exists for this type, it will be referenced here. @@ -111,4 +101,21 @@ internal string GetDisplayNameWithFacets(int typeModifier) /// Returns a string that represents the current object. /// public override string ToString() => DisplayName; -} \ No newline at end of file + + PostgresType? _representationalType; + + /// Canonizes (nested) domain types to underlying types, does not handle composites. + internal PostgresType GetRepresentationalType() + { + return _representationalType ??= Core(this) ?? throw new InvalidOperationException("Couldn't map type to representational type"); + + static PostgresType? Core(PostgresType? postgresType) + => (postgresType as PostgresDomainType)?.BaseType ?? postgresType switch + { + PostgresArrayType { Element: PostgresDomainType domain } => Core(domain.BaseType)?.Array, + PostgresMultirangeType { Subrange.Subtype: PostgresDomainType domain } => domain.BaseType.Range?.Multirange, + PostgresRangeType { Subtype: PostgresDomainType domain } => domain.Range, + var type => type + }; + } +} diff --git a/src/Npgsql/PostgresTypes/PostgresUnknownType.cs b/src/Npgsql/PostgresTypes/PostgresUnknownType.cs index a520df9696..bbe952726d 100644 --- a/src/Npgsql/PostgresTypes/PostgresUnknownType.cs +++ b/src/Npgsql/PostgresTypes/PostgresUnknownType.cs @@ -3,7 +3,7 @@ /// /// Represents a PostgreSQL data type that isn't known to Npgsql and cannot be handled. /// -public class UnknownBackendType : PostgresType +public sealed class UnknownBackendType : PostgresType { internal static readonly PostgresType Instance = new UnknownBackendType(); @@ -13,4 +13,4 @@ public class UnknownBackendType : PostgresType #pragma warning disable CA2222 // Do not decrease inherited member visibility UnknownBackendType() : base("", "", 0) { } #pragma warning restore CA2222 // Do not decrease inherited member visibility -} \ No newline at end of file +} diff --git a/src/Npgsql/PreparedStatement.cs b/src/Npgsql/PreparedStatement.cs index 015adc5dd3..9186df77c9 100644 --- a/src/Npgsql/PreparedStatement.cs +++ b/src/Npgsql/PreparedStatement.cs @@ -2,6 +2,7 @@ using System.Collections.Generic; using System.Diagnostics; using Npgsql.BackendMessages; +using Npgsql.Internal.Postgres; namespace Npgsql; @@ -46,9 +47,7 @@ sealed class PreparedStatement /// Contains the handler types for a prepared statement's parameters, for overloaded cases (same SQL, different param types) /// Only populated after the statement has been prepared (i.e. null for candidates). /// - internal Type[]? HandlerParamTypes { get; private set; } - - static readonly Type[] EmptyParamTypes = Type.EmptyTypes; + PgTypeId[]? ConverterParamTypes { get; set; } internal static PreparedStatement CreateExplicit( PreparedStatementManager manager, @@ -81,22 +80,22 @@ internal void SetParamTypes(List parameters) { if (parameters.Count == 0) { - HandlerParamTypes = EmptyParamTypes; + ConverterParamTypes = Array.Empty(); return; } - HandlerParamTypes = new Type[parameters.Count]; + ConverterParamTypes = new PgTypeId[parameters.Count]; for (var i = 0; i < parameters.Count; i++) - HandlerParamTypes[i] = parameters[i].Handler!.GetType(); + ConverterParamTypes[i] = parameters[i].PgTypeId; } internal bool DoParametersMatch(List parameters) { - if (HandlerParamTypes!.Length != parameters.Count) + if (ConverterParamTypes!.Length != parameters.Count) return false; - for (var i = 0; i < HandlerParamTypes.Length; i++) - if (HandlerParamTypes[i] != parameters[i].Handler!.GetType()) + for (var i = 0; i < ConverterParamTypes.Length; i++) + if (ConverterParamTypes[i] != parameters[i].PgTypeId) return false; return true; @@ -170,4 +169,4 @@ enum PreparedState /// The statement was invalidated because e.g. table schema has changed since preparation. /// Invalidated -} \ No newline at end of file +} diff --git a/src/Npgsql/PreparedTextReader.cs b/src/Npgsql/PreparedTextReader.cs index 8a2cf806d2..8862daa3e7 100644 --- a/src/Npgsql/PreparedTextReader.cs +++ b/src/Npgsql/PreparedTextReader.cs @@ -27,7 +27,7 @@ public void Init(string str, NpgsqlReadBuffer.ColumnStream stream) public override int Peek() { CheckDisposed(); - + return _position < _str.Length ? _str[_position] : -1; @@ -36,7 +36,7 @@ public override int Peek() public override int Read() { CheckDisposed(); - + return _position < _str.Length ? _str[_position++] : -1; @@ -82,7 +82,7 @@ public override Task ReadAsync(char[] buffer, int index, int count) public #if !NETSTANDARD2_0 - override + override #endif ValueTask ReadAsync(Memory buffer, CancellationToken cancellationToken = default) => new(Read(buffer.Span)); @@ -91,7 +91,7 @@ public override Task ReadAsync(char[] buffer, int index, int count) public override string ReadToEnd() { CheckDisposed(); - + if (_position == _str.Length) return string.Empty; @@ -108,6 +108,12 @@ void CheckDisposed() ThrowHelper.ThrowObjectDisposedException(nameof(PreparedTextReader)); } + public void Restart() + { + CheckDisposed(); + _position = 0; + } + protected override void Dispose(bool disposing) { base.Dispose(disposing); diff --git a/src/Npgsql/Properties/AssemblyInfo.cs b/src/Npgsql/Properties/AssemblyInfo.cs index e71a69a9dd..666ee3f170 100644 --- a/src/Npgsql/Properties/AssemblyInfo.cs +++ b/src/Npgsql/Properties/AssemblyInfo.cs @@ -33,7 +33,7 @@ "7aa16153bcea2ae9a471145624826f60d7c8e71cd025b554a0177bd935a78096" + "29f0a7afc778ebb4ad033e1bf512c1a9c6ceea26b077bc46cac93800435e77ee")] -[assembly: InternalsVisibleTo("Npgsql.NodaTime.Tests, PublicKey=" + +[assembly: InternalsVisibleTo("Npgsql.PluginTests, PublicKey=" + "0024000004800000940000000602000000240000525341310004000001000100" + "2b3c590b2a4e3d347e6878dc0ff4d21eb056a50420250c6617044330701d35c9" + "8078a5df97a62d83c9a2db2d072523a8fc491398254c6b89329b8c1dcef43a1e" + diff --git a/src/Npgsql/Properties/NpgsqlStrings.Designer.cs b/src/Npgsql/Properties/NpgsqlStrings.Designer.cs index 5f0847543f..707240754c 100644 --- a/src/Npgsql/Properties/NpgsqlStrings.Designer.cs +++ b/src/Npgsql/Properties/NpgsqlStrings.Designer.cs @@ -11,46 +11,32 @@ namespace Npgsql.Properties { using System; - /// - /// A strongly-typed resource class, for looking up localized strings, etc. - /// - // This class was auto-generated by the StronglyTypedResourceBuilder - // class via a tool like ResGen or Visual Studio. - // To add or remove a member, edit your .ResX file then rerun ResGen - // with the /str option, or rebuild your VS project. - [global::System.CodeDom.Compiler.GeneratedCodeAttribute("System.Resources.Tools.StronglyTypedResourceBuilder", "4.0.0.0")] - [global::System.Diagnostics.DebuggerNonUserCodeAttribute()] - [global::System.Runtime.CompilerServices.CompilerGeneratedAttribute()] + [System.CodeDom.Compiler.GeneratedCodeAttribute("System.Resources.Tools.StronglyTypedResourceBuilder", "4.0.0.0")] + [System.Diagnostics.DebuggerNonUserCodeAttribute()] + [System.Runtime.CompilerServices.CompilerGeneratedAttribute()] internal class NpgsqlStrings { - private static global::System.Resources.ResourceManager resourceMan; + private static System.Resources.ResourceManager resourceMan; - private static global::System.Globalization.CultureInfo resourceCulture; + private static System.Globalization.CultureInfo resourceCulture; - [global::System.Diagnostics.CodeAnalysis.SuppressMessageAttribute("Microsoft.Performance", "CA1811:AvoidUncalledPrivateCode")] + [System.Diagnostics.CodeAnalysis.SuppressMessageAttribute("Microsoft.Performance", "CA1811:AvoidUncalledPrivateCode")] internal NpgsqlStrings() { } - /// - /// Returns the cached ResourceManager instance used by this class. - /// - [global::System.ComponentModel.EditorBrowsableAttribute(global::System.ComponentModel.EditorBrowsableState.Advanced)] - internal static global::System.Resources.ResourceManager ResourceManager { + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Advanced)] + internal static System.Resources.ResourceManager ResourceManager { get { - if (object.ReferenceEquals(resourceMan, null)) { - global::System.Resources.ResourceManager temp = new global::System.Resources.ResourceManager("Npgsql.Properties.NpgsqlStrings", typeof(NpgsqlStrings).Assembly); + if (object.Equals(null, resourceMan)) { + System.Resources.ResourceManager temp = new System.Resources.ResourceManager("Npgsql.Properties.NpgsqlStrings", typeof(NpgsqlStrings).Assembly); resourceMan = temp; } return resourceMan; } } - /// - /// Overrides the current thread's CurrentUICulture property for all - /// resource lookups using this strongly typed resource class. - /// - [global::System.ComponentModel.EditorBrowsableAttribute(global::System.ComponentModel.EditorBrowsableState.Advanced)] - internal static global::System.Globalization.CultureInfo Culture { + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Advanced)] + internal static System.Globalization.CultureInfo Culture { get { return resourceCulture; } @@ -59,156 +45,147 @@ internal NpgsqlStrings() { } } - /// - /// Looks up a localized string similar to '{0}' must be positive.. - /// - internal static string ArgumentMustBePositive { + internal static string CannotUseSslVerifyWithUserCallback { get { - return ResourceManager.GetString("ArgumentMustBePositive", resourceCulture); + return ResourceManager.GetString("CannotUseSslVerifyWithUserCallback", resourceCulture); } } - /// - /// Looks up a localized string similar to Cannot read infinity value since Npgsql.DisableDateTimeInfinityConversions is enabled.. - /// - internal static string CannotReadInfinityValue { + internal static string CannotUseSslRootCertificateWithUserCallback { get { - return ResourceManager.GetString("CannotReadInfinityValue", resourceCulture); + return ResourceManager.GetString("CannotUseSslRootCertificateWithUserCallback", resourceCulture); } } - /// - /// Looks up a localized string similar to Cannot read interval values with non-zero months as TimeSpan, since that type doesn't support months. Consider using NodaTime Period which better corresponds to PostgreSQL interval, or read the value as NpgsqlInterval, or transform the interval to not contain months or years in PostgreSQL before reading it.. - /// - internal static string CannotReadIntervalWithMonthsAsTimeSpan { + internal static string EncryptionDisabled { get { - return ResourceManager.GetString("CannotReadIntervalWithMonthsAsTimeSpan", resourceCulture); + return ResourceManager.GetString("EncryptionDisabled", resourceCulture); + } + } + + internal static string NoMultirangeTypeFound { + get { + return ResourceManager.GetString("NoMultirangeTypeFound", resourceCulture); + } + } + + internal static string NotSupportedOnDataSourceCommand { + get { + return ResourceManager.GetString("NotSupportedOnDataSourceCommand", resourceCulture); + } + } + + internal static string NotSupportedOnDataSourceBatch { + get { + return ResourceManager.GetString("NotSupportedOnDataSourceBatch", resourceCulture); } } - /// - /// Looks up a localized string similar to When registering a password provider, a password or password file may not be set.. - /// internal static string CannotSetBothPasswordProviderAndPassword { get { return ResourceManager.GetString("CannotSetBothPasswordProviderAndPassword", resourceCulture); } } - /// - /// Looks up a localized string similar to When creating a multi-host data source, TargetSessionAttributes cannot be specified. Create without TargetSessionAttributes, and then obtain DataSource wrappers from it. Consult the docs for more information.. - /// + internal static string PasswordProviderMissing { + get { + return ResourceManager.GetString("PasswordProviderMissing", resourceCulture); + } + } + + internal static string ArgumentMustBePositive { + get { + return ResourceManager.GetString("ArgumentMustBePositive", resourceCulture); + } + } + internal static string CannotSpecifyTargetSessionAttributes { get { return ResourceManager.GetString("CannotSpecifyTargetSessionAttributes", resourceCulture); } } - /// - /// Looks up a localized string similar to RootCertificate cannot be used in conjunction with UserCertificateValidationCallback; when registering a validation callback, perform whatever validation you require in that callback.. - /// - internal static string CannotUseSslRootCertificateWithUserCallback { + internal static string CannotReadIntervalWithMonthsAsTimeSpan { get { - return ResourceManager.GetString("CannotUseSslRootCertificateWithUserCallback", resourceCulture); + return ResourceManager.GetString("CannotReadIntervalWithMonthsAsTimeSpan", resourceCulture); } } - /// - /// Looks up a localized string similar to SslMode.{0} cannot be used in conjunction with UserCertificateValidationCallback; when registering a validation callback, perform whatever validation you require in that callback.. - /// - internal static string CannotUseSslVerifyWithUserCallback { + internal static string PositionalParameterAfterNamed { get { - return ResourceManager.GetString("CannotUseSslVerifyWithUserCallback", resourceCulture); + return ResourceManager.GetString("PositionalParameterAfterNamed", resourceCulture); + } + } + + internal static string CannotReadInfinityValue { + get { + return ResourceManager.GetString("CannotReadInfinityValue", resourceCulture); + } + } + + internal static string SyncAndAsyncConnectionInitializersRequired { + get { + return ResourceManager.GetString("SyncAndAsyncConnectionInitializersRequired", resourceCulture); } } - /// - /// Looks up a localized string similar to ValidationRootCertificateCallback cannot be used in conjunction with UserCertificateValidationCallback; when registering a validation callback, perform whatever validation you require in that callback.. - /// internal static string CannotUseValidationRootCertificateCallbackWithUserCallback { get { return ResourceManager.GetString("CannotUseValidationRootCertificateCallbackWithUserCallback", resourceCulture); } } - /// - /// Looks up a localized string similar to NpgsqlSlimDataSourceBuilder is being used, and encryption hasn't been enabled, call EnableEncryption() on NpgsqlSlimDataSourceBuilder to enable it.. - /// - internal static string EncryptionDisabled { + internal static string RecordsNotEnabled { get { - return ResourceManager.GetString("EncryptionDisabled", resourceCulture); + return ResourceManager.GetString("RecordsNotEnabled", resourceCulture); } } - /// - /// Looks up a localized string similar to Full-text search isn't enabled; please call {0} on {1} to enable full-text search.. - /// internal static string FullTextSearchNotEnabled { get { return ResourceManager.GetString("FullTextSearchNotEnabled", resourceCulture); } } - /// - /// Looks up a localized string similar to No multirange type could be found in the database for subtype {0}.. - /// - internal static string NoMultirangeTypeFound { + internal static string LTreeNotEnabled { get { - return ResourceManager.GetString("NoMultirangeTypeFound", resourceCulture); + return ResourceManager.GetString("LTreeNotEnabled", resourceCulture); } } - /// - /// Looks up a localized string similar to Connection and transaction access is not supported on batches created from DbDataSource.. - /// - internal static string NotSupportedOnDataSourceBatch { + internal static string RangesNotEnabled { get { - return ResourceManager.GetString("NotSupportedOnDataSourceBatch", resourceCulture); + return ResourceManager.GetString("RangesNotEnabled", resourceCulture); } } - /// - /// Looks up a localized string similar to Connection and transaction access is not supported on commands created from DbDataSource.. - /// - internal static string NotSupportedOnDataSourceCommand { + internal static string RangeArraysNotEnabled { get { - return ResourceManager.GetString("NotSupportedOnDataSourceCommand", resourceCulture); + return ResourceManager.GetString("RangeArraysNotEnabled", resourceCulture); } } - /// - /// Looks up a localized string similar to The right type of password provider (sync or async) was not found.. - /// - internal static string PasswordProviderMissing { + internal static string MultirangesNotEnabled { get { - return ResourceManager.GetString("PasswordProviderMissing", resourceCulture); + return ResourceManager.GetString("MultirangesNotEnabled", resourceCulture); } } - /// - /// Looks up a localized string similar to When using CommandType.StoredProcedure, all positional parameters must come before named parameters.. - /// - internal static string PositionalParameterAfterNamed { + internal static string MultirangeArraysNotEnabled { get { - return ResourceManager.GetString("PositionalParameterAfterNamed", resourceCulture); + return ResourceManager.GetString("MultirangeArraysNotEnabled", resourceCulture); } } - /// - /// Looks up a localized string similar to Records aren't enabled; please call {0} on {1} to enable records.. - /// - internal static string RecordsNotEnabled { + internal static string TimestampTzNoDateTimeUnspecified { get { - return ResourceManager.GetString("RecordsNotEnabled", resourceCulture); + return ResourceManager.GetString("TimestampTzNoDateTimeUnspecified", resourceCulture); } } - /// - /// Looks up a localized string similar to Both sync and async connection initializers must be provided.. - /// - internal static string SyncAndAsyncConnectionInitializersRequired { + internal static string TimestampNoDateTimeUtc { get { - return ResourceManager.GetString("SyncAndAsyncConnectionInitializersRequired", resourceCulture); + return ResourceManager.GetString("TimestampNoDateTimeUtc", resourceCulture); } } } diff --git a/src/Npgsql/Properties/NpgsqlStrings.resx b/src/Npgsql/Properties/NpgsqlStrings.resx index 8df8e0b335..5ca209070f 100644 --- a/src/Npgsql/Properties/NpgsqlStrings.resx +++ b/src/Npgsql/Properties/NpgsqlStrings.resx @@ -69,4 +69,25 @@ Full-text search isn't enabled; please call {0} on {1} to enable full-text search. - \ No newline at end of file + + Ltree isn't enabled; please call {0} on {1} to enable LTree. + + + Ranges aren't enabled; please call {0} on {1} to enable ranges. + + + Range arrays aren't enabled; please call {0} on {1} to enable arrays for ranges. + + + Multiranges aren't enabled; please call {0} on {1} to enable multiranges. + + + Multirange arrays aren't enabled; please call {0} on {1} to enable arrays for multiranges. + + + Cannot write DateTime with Kind={0} to PostgreSQL type '{1}', only UTC is supported. Note that it's not possible to mix DateTimes with different Kinds in an array, range, or multirange. + + + Cannot write DateTime with Kind=UTC to PostgreSQL type '{0}', consider using '{1}'. Note that it's not possible to mix DateTimes with different Kinds in an array, range, or multirange. + + diff --git a/src/Npgsql/PublicAPI.Unshipped.txt b/src/Npgsql/PublicAPI.Unshipped.txt index aa795b81ff..37a23693b4 100644 --- a/src/Npgsql/PublicAPI.Unshipped.txt +++ b/src/Npgsql/PublicAPI.Unshipped.txt @@ -7,26 +7,31 @@ Npgsql.ChannelBinding.Require = 2 -> Npgsql.ChannelBinding Npgsql.NpgsqlBatch.CreateBatchCommand() -> Npgsql.NpgsqlBatchCommand! Npgsql.NpgsqlConnectionStringBuilder.ChannelBinding.get -> Npgsql.ChannelBinding Npgsql.NpgsqlConnectionStringBuilder.ChannelBinding.set -> void -Npgsql.NpgsqlSlimDataSourceBuilder.EnableFullTextSearch() -> Npgsql.NpgsqlSlimDataSourceBuilder! -Npgsql.NpgsqlSlimDataSourceBuilder.EnableRecords() -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlBinaryImporter.WriteRow(params object?[]! values) -> void Npgsql.NpgsqlBinaryImporter.WriteRowAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken), params object?[]! values) -> System.Threading.Tasks.Task! +Npgsql.NpgsqlDataSourceBuilder.AddTypeInfoResolver(Npgsql.Internal.IPgTypeInfoResolver! resolver) -> void Npgsql.NpgsqlDataSourceBuilder.Name.get -> string? Npgsql.NpgsqlDataSourceBuilder.Name.set -> void Npgsql.NpgsqlDataSourceBuilder.UseRootCertificate(System.Security.Cryptography.X509Certificates.X509Certificate2? rootCertificate) -> Npgsql.NpgsqlDataSourceBuilder! Npgsql.NpgsqlDataSourceBuilder.UseRootCertificateCallback(System.Func? rootCertificateCallback) -> Npgsql.NpgsqlDataSourceBuilder! Npgsql.NpgsqlDataSourceBuilder.UseSystemTextJson(System.Text.Json.JsonSerializerOptions? serializerOptions = null, System.Type![]? jsonbClrTypes = null, System.Type![]? jsonClrTypes = null) -> Npgsql.NpgsqlDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder -Npgsql.NpgsqlSlimDataSourceBuilder.AddTypeResolverFactory(Npgsql.Internal.TypeHandling.TypeHandlerResolverFactory! resolverFactory) -> void +Npgsql.NpgsqlSlimDataSourceBuilder.AddTypeInfoResolver(Npgsql.Internal.IPgTypeInfoResolver! resolver) -> void Npgsql.NpgsqlSlimDataSourceBuilder.Build() -> Npgsql.NpgsqlDataSource! Npgsql.NpgsqlSlimDataSourceBuilder.BuildMultiHost() -> Npgsql.NpgsqlMultiHostDataSource! Npgsql.NpgsqlSlimDataSourceBuilder.ConnectionString.get -> string! Npgsql.NpgsqlSlimDataSourceBuilder.ConnectionStringBuilder.get -> Npgsql.NpgsqlConnectionStringBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.DefaultNameTranslator.get -> Npgsql.INpgsqlNameTranslator! Npgsql.NpgsqlSlimDataSourceBuilder.DefaultNameTranslator.set -> void +Npgsql.NpgsqlSlimDataSourceBuilder.EnableArrays() -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.EnableEncryption() -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.EnableExtraConversions() -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.EnableFullTextSearch() -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.EnableLTree() -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.EnableMultiranges() -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.EnableParameterLogging(bool parameterLoggingEnabled = true) -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.EnableRanges() -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.EnableRecords() -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.MapComposite(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! Npgsql.NpgsqlSlimDataSourceBuilder.MapComposite(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! Npgsql.NpgsqlSlimDataSourceBuilder.MapEnum(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! @@ -46,10 +51,28 @@ Npgsql.NpgsqlSlimDataSourceBuilder.UseRootCertificate(System.Security.Cryptograp Npgsql.NpgsqlSlimDataSourceBuilder.UseRootCertificateCallback(System.Func? rootCertificateCallback) -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.UseSystemTextJson(System.Text.Json.JsonSerializerOptions? serializerOptions = null, System.Type![]? jsonbClrTypes = null, System.Type![]? jsonClrTypes = null) -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.UseUserCertificateValidationCallback(System.Net.Security.RemoteCertificateValidationCallback! userCertificateValidationCallback) -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.PostgresTypes.PostgresArrayType.PostgresArrayType(string! ns, string! name, uint oid, Npgsql.PostgresTypes.PostgresType! elementPostgresType) -> void +Npgsql.PostgresTypes.PostgresBaseType.PostgresBaseType(string! ns, string! name, uint oid) -> void Npgsql.Replication.PhysicalReplicationConnection.StartReplication(Npgsql.Replication.PhysicalReplicationSlot? slot, NpgsqlTypes.NpgsqlLogSequenceNumber walLocation, System.Threading.CancellationToken cancellationToken, uint timeline = 0) -> System.Collections.Generic.IAsyncEnumerable! Npgsql.Replication.PhysicalReplicationConnection.StartReplication(NpgsqlTypes.NpgsqlLogSequenceNumber walLocation, System.Threading.CancellationToken cancellationToken, uint timeline = 0) -> System.Collections.Generic.IAsyncEnumerable! Npgsql.Replication.PhysicalReplicationSlot.PhysicalReplicationSlot(string! slotName, NpgsqlTypes.NpgsqlLogSequenceNumber? restartLsn = null, uint? restartTimeline = null) -> void Npgsql.Replication.PhysicalReplicationSlot.RestartTimeline.get -> uint? +Npgsql.TypeMapping.INpgsqlTypeMapper.AddTypeInfoResolver(Npgsql.Internal.IPgTypeInfoResolver! resolver) -> void +Npgsql.TypeMapping.UserTypeMapping +Npgsql.TypeMapping.UserTypeMapping.ClrType.get -> System.Type! +Npgsql.TypeMapping.UserTypeMapping.PgTypeName.get -> string! +NpgsqlTypes.NpgsqlCidr +NpgsqlTypes.NpgsqlCidr.Address.get -> System.Net.IPAddress! +NpgsqlTypes.NpgsqlCidr.Deconstruct(out System.Net.IPAddress! address, out byte netmask) -> void +NpgsqlTypes.NpgsqlCidr.Netmask.get -> byte +NpgsqlTypes.NpgsqlCidr.NpgsqlCidr() -> void +NpgsqlTypes.NpgsqlCidr.NpgsqlCidr(string! addr) -> void +NpgsqlTypes.NpgsqlCidr.NpgsqlCidr(System.Net.IPAddress! address, byte netmask) -> void +NpgsqlTypes.NpgsqlInet.Deconstruct(out System.Net.IPAddress! address, out byte netmask) -> void +NpgsqlTypes.NpgsqlInet.NpgsqlInet(System.Net.IPAddress! address, byte netmask) -> void +NpgsqlTypes.NpgsqlInet.Netmask.get -> byte +NpgsqlTypes.NpgsqlTsQueryFollowedBy.Distance.get -> short +NpgsqlTypes.NpgsqlTsQueryFollowedBy.NpgsqlTsQueryFollowedBy(NpgsqlTypes.NpgsqlTsQuery! left, short distance, NpgsqlTypes.NpgsqlTsQuery! right) -> void override Npgsql.NpgsqlBatch.Dispose() -> void *REMOVED*static NpgsqlTypes.NpgsqlBox.Parse(string! s) -> NpgsqlTypes.NpgsqlBox *REMOVED*static NpgsqlTypes.NpgsqlCircle.Parse(string! s) -> NpgsqlTypes.NpgsqlCircle @@ -58,6 +81,14 @@ override Npgsql.NpgsqlBatch.Dispose() -> void *REMOVED*static NpgsqlTypes.NpgsqlPath.Parse(string! s) -> NpgsqlTypes.NpgsqlPath *REMOVED*static NpgsqlTypes.NpgsqlPoint.Parse(string! s) -> NpgsqlTypes.NpgsqlPoint *REMOVED*static NpgsqlTypes.NpgsqlPolygon.Parse(string! s) -> NpgsqlTypes.NpgsqlPolygon +*REMOVED*NpgsqlTypes.NpgsqlInet.Deconstruct(out System.Net.IPAddress! address, out int netmask) -> void +*REMOVED*NpgsqlTypes.NpgsqlInet.NpgsqlInet(System.Net.IPAddress! address, int netmask) -> void +*REMOVED*NpgsqlTypes.NpgsqlInet.Address.set -> void +*REMOVED*NpgsqlTypes.NpgsqlInet.Equals(NpgsqlTypes.NpgsqlInet other) -> bool +*REMOVED*NpgsqlTypes.NpgsqlInet.Netmask.get -> int +*REMOVED*NpgsqlTypes.NpgsqlInet.Netmask.set -> void +*REMOVED*NpgsqlTypes.NpgsqlTsQueryFollowedBy.Distance.get -> int +*REMOVED*NpgsqlTypes.NpgsqlTsQueryFollowedBy.NpgsqlTsQueryFollowedBy(NpgsqlTypes.NpgsqlTsQuery! left, int distance, NpgsqlTypes.NpgsqlTsQuery! right) -> void *REMOVED*Npgsql.NpgsqlBinaryImporter.WriteRow(params object![]! values) -> void *REMOVED*Npgsql.NpgsqlBinaryImporter.WriteRowAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken), params object![]! values) -> System.Threading.Tasks.Task! *REMOVED*override Npgsql.NpgsqlDataReader.GetProviderSpecificFieldType(int ordinal) -> System.Type! @@ -66,7 +97,24 @@ override Npgsql.NpgsqlBatch.Dispose() -> void *REMOVED*override Npgsql.NpgsqlNestedDataReader.GetProviderSpecificFieldType(int ordinal) -> System.Type! *REMOVED*override Npgsql.NpgsqlNestedDataReader.GetProviderSpecificValue(int ordinal) -> object! *REMOVED*override Npgsql.NpgsqlNestedDataReader.GetProviderSpecificValues(object![]! values) -> int +*REMOVED*override NpgsqlTypes.NpgsqlInet.Equals(object? obj) -> bool +*REMOVED*override NpgsqlTypes.NpgsqlInet.GetHashCode() -> int *REMOVED*Npgsql.Replication.PhysicalReplicationConnection.StartReplication(Npgsql.Replication.PhysicalReplicationSlot? slot, NpgsqlTypes.NpgsqlLogSequenceNumber walLocation, System.Threading.CancellationToken cancellationToken, ulong timeline = 0) -> System.Collections.Generic.IAsyncEnumerable! *REMOVED*Npgsql.Replication.PhysicalReplicationConnection.StartReplication(NpgsqlTypes.NpgsqlLogSequenceNumber walLocation, System.Threading.CancellationToken cancellationToken, ulong timeline = 0) -> System.Collections.Generic.IAsyncEnumerable! *REMOVED*Npgsql.Replication.PhysicalReplicationSlot.PhysicalReplicationSlot(string! slotName, NpgsqlTypes.NpgsqlLogSequenceNumber? restartLsn = null, ulong? restartTimeline = null) -> void *REMOVED*Npgsql.Replication.PhysicalReplicationSlot.RestartTimeline.get -> ulong? +override NpgsqlTypes.NpgsqlCidr.ToString() -> string! +*REMOVED*static NpgsqlTypes.NpgsqlInet.operator !=(NpgsqlTypes.NpgsqlInet x, NpgsqlTypes.NpgsqlInet y) -> bool +*REMOVED*static NpgsqlTypes.NpgsqlInet.operator ==(NpgsqlTypes.NpgsqlInet x, NpgsqlTypes.NpgsqlInet y) -> bool +*REMOVED*static NpgsqlTypes.NpgsqlInet.ToIPAddress(NpgsqlTypes.NpgsqlInet inet) -> System.Net.IPAddress! +*REMOVED*static NpgsqlTypes.NpgsqlInet.ToNpgsqlInet(System.Net.IPAddress? ip) -> NpgsqlTypes.NpgsqlInet +*REMOVED*Npgsql.NpgsqlDataSourceBuilder.AddTypeResolverFactory(Npgsql.Internal.TypeHandling.TypeHandlerResolverFactory! resolverFactory) -> void +static NpgsqlTypes.NpgsqlInet.explicit operator NpgsqlTypes.NpgsqlInet(System.Net.IPAddress! ip) -> NpgsqlTypes.NpgsqlInet +*REMOVED*Npgsql.NpgsqlParameter.ConvertedValue.get -> object? +*REMOVED*Npgsql.NpgsqlParameter.ConvertedValue.set -> void +*REMOVED*Npgsql.PostgresTypes.PostgresArrayType.PostgresArrayType(string! ns, string! internalName, uint oid, Npgsql.PostgresTypes.PostgresType! elementPostgresType) -> void +*REMOVED*Npgsql.PostgresTypes.PostgresBaseType.PostgresBaseType(string! ns, string! internalName, uint oid) -> void +*REMOVED*static NpgsqlTypes.NpgsqlInet.implicit operator NpgsqlTypes.NpgsqlInet(System.Net.IPAddress! ip) -> NpgsqlTypes.NpgsqlInet +*REMOVED*Npgsql.PostgresTypes.PostgresType.PostgresType(string! ns, string! name, string! internalName, uint oid) -> void +*REMOVED*Npgsql.PostgresTypes.PostgresType.PostgresType(string! ns, string! name, uint oid) -> void +*REMOVED*Npgsql.TypeMapping.INpgsqlTypeMapper.AddTypeResolverFactory(Npgsql.Internal.TypeHandling.TypeHandlerResolverFactory! resolverFactory) -> void diff --git a/src/Npgsql/Replication/PgDateTime.cs b/src/Npgsql/Replication/PgDateTime.cs new file mode 100644 index 0000000000..aa68bda7f6 --- /dev/null +++ b/src/Npgsql/Replication/PgDateTime.cs @@ -0,0 +1,16 @@ +using System; + +namespace Npgsql.Replication; + +static class PgDateTime +{ + const long PostgresTimestampOffsetTicks = 630822816000000000L; + + public static DateTime DecodeTimestamp(long value, DateTimeKind kind) + => new(value * 10 + PostgresTimestampOffsetTicks, kind); + + public static long EncodeTimestamp(DateTime value) + // Rounding here would cause problems because we would round up DateTime.MaxValue + // which would make it impossible to retrieve it back from the database, so we just drop the additional precision + => (value.Ticks - PostgresTimestampOffsetTicks) / 10; +} diff --git a/src/Npgsql/Replication/PgOutput/Messages/DefaultUpdateMessage.cs b/src/Npgsql/Replication/PgOutput/Messages/DefaultUpdateMessage.cs index 8a9a34741d..6fd36d7ea0 100644 --- a/src/Npgsql/Replication/PgOutput/Messages/DefaultUpdateMessage.cs +++ b/src/Npgsql/Replication/PgOutput/Messages/DefaultUpdateMessage.cs @@ -1,5 +1,4 @@ using System; -using System.Collections.Generic; using System.Threading; using System.Threading.Tasks; using Npgsql.Internal; diff --git a/src/Npgsql/Replication/PgOutput/Messages/FullDeleteMessage.cs b/src/Npgsql/Replication/PgOutput/Messages/FullDeleteMessage.cs index 933b50ac68..a426a2b6ad 100644 --- a/src/Npgsql/Replication/PgOutput/Messages/FullDeleteMessage.cs +++ b/src/Npgsql/Replication/PgOutput/Messages/FullDeleteMessage.cs @@ -1,6 +1,5 @@ using NpgsqlTypes; using System; -using System.Collections.Generic; using System.Threading; using System.Threading.Tasks; using Npgsql.Internal; diff --git a/src/Npgsql/Replication/PgOutput/Messages/FullUpdateMessage.cs b/src/Npgsql/Replication/PgOutput/Messages/FullUpdateMessage.cs index 7da8f77c68..814780cf37 100644 --- a/src/Npgsql/Replication/PgOutput/Messages/FullUpdateMessage.cs +++ b/src/Npgsql/Replication/PgOutput/Messages/FullUpdateMessage.cs @@ -1,5 +1,4 @@ using System; -using System.Collections.Generic; using System.Threading; using System.Threading.Tasks; using Npgsql.Internal; diff --git a/src/Npgsql/Replication/PgOutput/Messages/IndexUpdateMessage.cs b/src/Npgsql/Replication/PgOutput/Messages/IndexUpdateMessage.cs index 14f31b1672..021458140d 100644 --- a/src/Npgsql/Replication/PgOutput/Messages/IndexUpdateMessage.cs +++ b/src/Npgsql/Replication/PgOutput/Messages/IndexUpdateMessage.cs @@ -1,5 +1,4 @@ using System; -using System.Collections.Generic; using System.Threading; using System.Threading.Tasks; using Npgsql.Internal; diff --git a/src/Npgsql/Replication/PgOutput/Messages/InsertMessage.cs b/src/Npgsql/Replication/PgOutput/Messages/InsertMessage.cs index d0f67841e9..fe862ead1b 100644 --- a/src/Npgsql/Replication/PgOutput/Messages/InsertMessage.cs +++ b/src/Npgsql/Replication/PgOutput/Messages/InsertMessage.cs @@ -1,6 +1,5 @@ using NpgsqlTypes; using System; -using System.Collections.Generic; using System.Threading; using System.Threading.Tasks; using Npgsql.Internal; diff --git a/src/Npgsql/Replication/PgOutput/Messages/KeyDeleteMessage.cs b/src/Npgsql/Replication/PgOutput/Messages/KeyDeleteMessage.cs index 9905d44753..9b30b3e1df 100644 --- a/src/Npgsql/Replication/PgOutput/Messages/KeyDeleteMessage.cs +++ b/src/Npgsql/Replication/PgOutput/Messages/KeyDeleteMessage.cs @@ -1,6 +1,5 @@ using NpgsqlTypes; using System; -using System.Collections.Generic; using System.Threading; using System.Threading.Tasks; using Npgsql.Internal; diff --git a/src/Npgsql/Replication/PgOutput/Messages/PgOutputReplicationMessage.cs b/src/Npgsql/Replication/PgOutput/Messages/PgOutputReplicationMessage.cs index b93e27fa3c..24de9e201f 100644 --- a/src/Npgsql/Replication/PgOutput/Messages/PgOutputReplicationMessage.cs +++ b/src/Npgsql/Replication/PgOutput/Messages/PgOutputReplicationMessage.cs @@ -1,7 +1,4 @@ -using NpgsqlTypes; -using System; - -namespace Npgsql.Replication.PgOutput.Messages; +namespace Npgsql.Replication.PgOutput.Messages; /// /// The base class of all Logical Replication Protocol Messages diff --git a/src/Npgsql/Replication/PgOutput/Messages/RelationMessage.cs b/src/Npgsql/Replication/PgOutput/Messages/RelationMessage.cs index f9be4a1eeb..85d83debb7 100644 --- a/src/Npgsql/Replication/PgOutput/Messages/RelationMessage.cs +++ b/src/Npgsql/Replication/PgOutput/Messages/RelationMessage.cs @@ -1,7 +1,6 @@ using NpgsqlTypes; using System; using System.Collections.Generic; -using System.Collections.Immutable; using Npgsql.BackendMessages; namespace Npgsql.Replication.PgOutput.Messages; @@ -136,4 +135,4 @@ public enum ReplicaIdentitySetting : byte /// IndexWithIndIsReplIdent = (byte)'i' } -} \ No newline at end of file +} diff --git a/src/Npgsql/Replication/PgOutput/PgOutputAsyncEnumerable.cs b/src/Npgsql/Replication/PgOutput/PgOutputAsyncEnumerable.cs index 76d983a6ee..5b53e06bdf 100644 --- a/src/Npgsql/Replication/PgOutput/PgOutputAsyncEnumerable.cs +++ b/src/Npgsql/Replication/PgOutput/PgOutputAsyncEnumerable.cs @@ -5,10 +5,8 @@ using System.Threading.Tasks; using Npgsql.BackendMessages; using Npgsql.Internal; -using Npgsql.Internal.TypeHandlers.DateTimeHandlers; using Npgsql.Replication.Internal; using Npgsql.Replication.PgOutput.Messages; -using Npgsql.Util; using NpgsqlTypes; namespace Npgsql.Replication.PgOutput; @@ -91,7 +89,7 @@ async IAsyncEnumerator StartReplicationInternal(Canc _slot, cancellationToken, _walLocation, _options.GetOptionPairs(), bypassingStream: true); var buf = _connection.Connector!.ReadBuffer; var inStreamingTransaction = false; - var formatCode = _options.Binary ?? false ? FormatCode.Binary : FormatCode.Text; + var dataFormat = _options.Binary ?? false ? DataFormat.Binary : DataFormat.Text; await foreach (var xLogData in stream.WithCancellation(cancellationToken)) { @@ -104,7 +102,7 @@ async IAsyncEnumerator StartReplicationInternal(Canc await buf.EnsureAsync(20); yield return _beginMessage.Populate(xLogData.WalStart, xLogData.WalEnd, xLogData.ServerClock, transactionFinalLsn: new NpgsqlLogSequenceNumber(buf.ReadUInt64()), - transactionCommitTimestamp: DateTimeUtils.DecodeTimestamp(buf.ReadInt64(), DateTimeKind.Utc), + transactionCommitTimestamp: PgDateTime.DecodeTimestamp(buf.ReadInt64(), DateTimeKind.Utc), transactionXid: buf.ReadUInt32()); continue; } @@ -128,7 +126,7 @@ async IAsyncEnumerator StartReplicationInternal(Canc await buf.EnsureAsync(4); var length = buf.ReadUInt32(); var data = (NpgsqlReadBuffer.ColumnStream)xLogData.Data; - data.Init(checked((int)length), false); + data.Init(checked((int)length), canSeek: false, commandScoped: false); yield return _logicalDecodingMessage.Populate(xLogData.WalStart, xLogData.WalEnd, xLogData.ServerClock, transactionXid, flags, messageLsn, prefix, data); continue; @@ -141,7 +139,7 @@ async IAsyncEnumerator StartReplicationInternal(Canc (CommitMessage.CommitFlags)buf.ReadByte(), commitLsn: new NpgsqlLogSequenceNumber(buf.ReadUInt64()), transactionEndLsn: new NpgsqlLogSequenceNumber(buf.ReadUInt64()), - transactionCommitTimestamp: DateTimeUtils.DecodeTimestamp(buf.ReadInt64(), DateTimeKind.Utc)); + transactionCommitTimestamp: PgDateTime.DecodeTimestamp(buf.ReadInt64(), DateTimeKind.Utc)); continue; } case BackendReplicationMessageCode.Origin: @@ -193,7 +191,7 @@ async IAsyncEnumerator StartReplicationInternal(Canc } msg.RowDescription = RowDescriptionMessage.CreateForReplication( - _connection.Connector.TypeMapper, relationId, formatCode, columns); + _connection.Connector.SerializerOptions, relationId, dataFormat, columns); yield return msg; continue; @@ -397,7 +395,7 @@ async IAsyncEnumerator StartReplicationInternal(Canc yield return _streamCommitMessage.Populate(xLogData.WalStart, xLogData.WalEnd, xLogData.ServerClock, transactionXid: buf.ReadUInt32(), flags: buf.ReadByte(), commitLsn: new NpgsqlLogSequenceNumber(buf.ReadUInt64()), transactionEndLsn: new NpgsqlLogSequenceNumber(buf.ReadUInt64()), - transactionCommitTimestamp: DateTimeUtils.DecodeTimestamp(buf.ReadInt64(), DateTimeKind.Utc)); + transactionCommitTimestamp: PgDateTime.DecodeTimestamp(buf.ReadInt64(), DateTimeKind.Utc)); continue; } case BackendReplicationMessageCode.StreamAbort: @@ -413,7 +411,7 @@ async IAsyncEnumerator StartReplicationInternal(Canc yield return _beginPrepareMessage.Populate(xLogData.WalStart, xLogData.WalEnd, xLogData.ServerClock, prepareLsn: new NpgsqlLogSequenceNumber(buf.ReadUInt64()), prepareEndLsn: new NpgsqlLogSequenceNumber(buf.ReadUInt64()), - transactionPrepareTimestamp: DateTimeUtils.DecodeTimestamp(buf.ReadInt64(), DateTimeKind.Utc), + transactionPrepareTimestamp: PgDateTime.DecodeTimestamp(buf.ReadInt64(), DateTimeKind.Utc), transactionXid: buf.ReadUInt32(), transactionGid: buf.ReadNullTerminatedString()); continue; @@ -425,7 +423,7 @@ async IAsyncEnumerator StartReplicationInternal(Canc flags: (PrepareMessage.PrepareFlags)buf.ReadByte(), prepareLsn: new NpgsqlLogSequenceNumber(buf.ReadUInt64()), prepareEndLsn: new NpgsqlLogSequenceNumber(buf.ReadUInt64()), - transactionPrepareTimestamp: DateTimeUtils.DecodeTimestamp(buf.ReadInt64(), DateTimeKind.Utc), + transactionPrepareTimestamp: PgDateTime.DecodeTimestamp(buf.ReadInt64(), DateTimeKind.Utc), transactionXid: buf.ReadUInt32(), transactionGid: buf.ReadNullTerminatedString()); continue; @@ -437,7 +435,7 @@ async IAsyncEnumerator StartReplicationInternal(Canc flags: (CommitPreparedMessage.CommitPreparedFlags)buf.ReadByte(), commitPreparedLsn: new NpgsqlLogSequenceNumber(buf.ReadUInt64()), commitPreparedEndLsn: new NpgsqlLogSequenceNumber(buf.ReadUInt64()), - transactionCommitTimestamp: DateTimeUtils.DecodeTimestamp(buf.ReadInt64(), DateTimeKind.Utc), + transactionCommitTimestamp: PgDateTime.DecodeTimestamp(buf.ReadInt64(), DateTimeKind.Utc), transactionXid: buf.ReadUInt32(), transactionGid: buf.ReadNullTerminatedString()); continue; @@ -449,8 +447,8 @@ async IAsyncEnumerator StartReplicationInternal(Canc flags: (RollbackPreparedMessage.RollbackPreparedFlags)buf.ReadByte(), preparedTransactionEndLsn: new NpgsqlLogSequenceNumber(buf.ReadUInt64()), rollbackPreparedEndLsn: new NpgsqlLogSequenceNumber(buf.ReadUInt64()), - transactionPrepareTimestamp: DateTimeUtils.DecodeTimestamp(buf.ReadInt64(), DateTimeKind.Utc), - transactionRollbackTimestamp: DateTimeUtils.DecodeTimestamp(buf.ReadInt64(), DateTimeKind.Utc), + transactionPrepareTimestamp: PgDateTime.DecodeTimestamp(buf.ReadInt64(), DateTimeKind.Utc), + transactionRollbackTimestamp: PgDateTime.DecodeTimestamp(buf.ReadInt64(), DateTimeKind.Utc), transactionXid: buf.ReadUInt32(), transactionGid: buf.ReadNullTerminatedString()); continue; @@ -462,7 +460,7 @@ async IAsyncEnumerator StartReplicationInternal(Canc flags: (StreamPrepareMessage.StreamPrepareFlags)buf.ReadByte(), prepareLsn: new NpgsqlLogSequenceNumber(buf.ReadUInt64()), prepareEndLsn: new NpgsqlLogSequenceNumber(buf.ReadUInt64()), - transactionPrepareTimestamp: DateTimeUtils.DecodeTimestamp(buf.ReadInt64(), DateTimeKind.Utc), + transactionPrepareTimestamp: PgDateTime.DecodeTimestamp(buf.ReadInt64(), DateTimeKind.Utc), transactionXid: buf.ReadUInt32(), transactionGid: buf.ReadNullTerminatedString()); continue; diff --git a/src/Npgsql/Replication/PgOutput/ReadonlyArrayBuffer.cs b/src/Npgsql/Replication/PgOutput/ReadonlyArrayBuffer.cs index 596dc471fb..df910af4d2 100644 --- a/src/Npgsql/Replication/PgOutput/ReadonlyArrayBuffer.cs +++ b/src/Npgsql/Replication/PgOutput/ReadonlyArrayBuffer.cs @@ -1,7 +1,6 @@ using System; using System.Collections; using System.Collections.Generic; -using Npgsql.Replication.PgOutput.Messages; namespace Npgsql.Replication.PgOutput; diff --git a/src/Npgsql/Replication/PgOutput/ReplicationValue.cs b/src/Npgsql/Replication/PgOutput/ReplicationValue.cs index 6ad0cbc6e1..7c5f104f3e 100644 --- a/src/Npgsql/Replication/PgOutput/ReplicationValue.cs +++ b/src/Npgsql/Replication/PgOutput/ReplicationValue.cs @@ -4,9 +4,7 @@ using System.Threading.Tasks; using Npgsql.BackendMessages; using Npgsql.Internal; -using Npgsql.Internal.TypeHandling; using Npgsql.PostgresTypes; -using Npgsql.Replication.PgOutput.Messages; namespace Npgsql.Replication.PgOutput; @@ -27,26 +25,21 @@ public class ReplicationValue /// public TupleDataKind Kind { get; private set; } - bool _columnConsumed; FieldDescription _fieldDescription = null!; + PgConverterInfo _lastInfo; + bool _isConsumed; - /// - /// A stream that has been opened on a column. - /// - readonly NpgsqlReadBuffer.ColumnStream _columnStream; + PgReader PgReader => _readBuffer.PgReader; - internal ReplicationValue(NpgsqlConnector connector) - { - _readBuffer = connector.ReadBuffer; - _columnStream = new NpgsqlReadBuffer.ColumnStream(connector, startCancellableOperations: false); - } + internal ReplicationValue(NpgsqlConnector connector) => _readBuffer = connector.ReadBuffer; internal void Reset(TupleDataKind kind, int length, FieldDescription fieldDescription) { Kind = kind; Length = length; _fieldDescription = fieldDescription; - _columnConsumed = false; + _lastInfo = default; + _isConsumed = false; } // ReSharper disable once InconsistentNaming @@ -93,13 +86,16 @@ public bool IsUnchangedToastedValue /// public ValueTask Get(CancellationToken cancellationToken = default) { - CheckAndMarkConsumed(); + CheckActive(); + + ref var info = ref _lastInfo; + _fieldDescription.GetInfo(typeof(T), ref info); switch (Kind) { case TupleDataKind.Null: // When T is a Nullable (and only in that case), we support returning null - if (NullableHandler.Exists) + if (default(T) is null && typeof(T).IsValueType) return default!; if (typeof(T) == typeof(object)) @@ -114,36 +110,19 @@ public ValueTask Get(CancellationToken cancellationToken = default) } using (NoSynchronizationContextScope.Enter()) - return GetCore(cancellationToken); + return GetCore(info, _fieldDescription.DataFormat, _readBuffer, Length, cancellationToken); - async ValueTask GetCore(CancellationToken cancellationToken) + static async ValueTask GetCore(PgConverterInfo info, DataFormat format, NpgsqlReadBuffer buffer, int length, CancellationToken cancellationToken) { - using var tokenRegistration = _readBuffer.ReadBytesLeft < Length - ? _readBuffer.Connector.StartNestedCancellableOperation(cancellationToken) - : default; - - var position = _readBuffer.ReadPosition; - - try - { - return NullableHandler.Exists - ? await NullableHandler.ReadAsync(_fieldDescription.Handler, _readBuffer, Length, async: true, _fieldDescription) - : typeof(T) == typeof(object) - ? (T)await _fieldDescription.Handler.ReadAsObject(_readBuffer, Length, async: true, _fieldDescription) - : await _fieldDescription.Handler.Read(_readBuffer, Length, async: true, _fieldDescription); - } - catch - { - if (_readBuffer.Connector.State != ConnectorState.Broken) - { - var writtenBytes = _readBuffer.ReadPosition - position; - var remainingBytes = Length - writtenBytes; - if (remainingBytes > 0) - _readBuffer.Skip(remainingBytes, false).GetAwaiter().GetResult(); - } - - throw; - } + using var registration = buffer.Connector.StartNestedCancellableOperation(cancellationToken, attemptPgCancellation: false); + + var reader = buffer.PgReader.Init(length, format); + await reader.StartReadAsync(info.BufferRequirement, cancellationToken); + var result = info.AsObject + ? (T)await info.Converter.ReadAsObjectAsync(reader, cancellationToken) + : await info.GetConverter().ReadAsync(reader, cancellationToken); + await reader.EndReadAsync(); + return result; } } @@ -154,56 +133,38 @@ async ValueTask GetCore(CancellationToken cancellationToken) /// An optional token to cancel the asynchronous operation. The default value is . /// /// - public ValueTask Get(CancellationToken cancellationToken = default) + public ValueTask Get(CancellationToken cancellationToken = default) => Get(cancellationToken); + + /// + /// Retrieves data as a . + /// + public Stream GetStream() { - CheckAndMarkConsumed(); + CheckActive(); switch (Kind) { case TupleDataKind.Null: - return new ValueTask(DBNull.Value); + ThrowHelper.ThrowInvalidCastException_NoValue(_fieldDescription); + break; case TupleDataKind.UnchangedToastedValue: - throw new InvalidCastException( - $"Column '{_fieldDescription.Name}' is an unchanged TOASTed value (actual value not sent)."); + throw new InvalidCastException($"Column '{_fieldDescription.Name}' is an unchanged TOASTed value (actual value not sent)."); } - using (NoSynchronizationContextScope.Enter()) - return GetCore(cancellationToken); - - async ValueTask GetCore(CancellationToken cancellationToken) - { - using var tokenRegistration = _readBuffer.ReadBytesLeft < Length - ? _readBuffer.Connector.StartNestedCancellableOperation(cancellationToken) - : default; - - var position = _readBuffer.ReadPosition; - - try - { - return await _fieldDescription.Handler.ReadAsObject(_readBuffer, Length, async: true, _fieldDescription); - } - catch - { - if (_readBuffer.Connector.State != ConnectorState.Broken) - { - var writtenBytes = _readBuffer.ReadPosition - position; - var remainingBytes = Length - writtenBytes; - if (remainingBytes > 0) - _readBuffer.Skip(remainingBytes, false).GetAwaiter().GetResult(); - } - - throw; - } - } + var reader = _readBuffer.PgReader.Init(Length, _fieldDescription.DataFormat); + return reader.GetStream(canSeek: false); } /// - /// Retrieves data as a . + /// Retrieves data as a . /// - public Stream GetStream() + public TextReader GetTextReader() { - CheckAndMarkConsumed(); + CheckActive(); + + ref var info = ref _lastInfo; + _fieldDescription.GetInfo(typeof(TextReader), ref info); switch (Kind) { @@ -215,44 +176,29 @@ public Stream GetStream() throw new InvalidCastException($"Column '{_fieldDescription.Name}' is an unchanged TOASTed value (actual value not sent)."); } - _columnStream.Init(Length, canSeek: false); - return _columnStream; + var reader = PgReader.Init(Length, _fieldDescription.DataFormat); + reader.StartRead(info.BufferRequirement); + var result = (TextReader)info.Converter.ReadAsObject(reader); + reader.EndRead(); + return result; } - /// - /// Retrieves data as a . - /// - public TextReader GetTextReader() - => _fieldDescription.Handler is ITextReaderHandler handler - ? handler.GetTextReader(GetStream(), _readBuffer) - : throw new InvalidCastException( - $"The GetTextReader method is not supported for type {_fieldDescription.Handler.PgDisplayName}"); - internal async Task Consume(CancellationToken cancellationToken) { - if (!_columnStream.IsDisposed) - await _columnStream.DisposeAsync(); + if (_isConsumed) + return; - if (!_columnConsumed) - { - if (_readBuffer.ReadBytesLeft < 4) - { - using var tokenRegistration = _readBuffer.Connector.StartNestedCancellableOperation(cancellationToken); - await _readBuffer.Skip(Length, async: true); - } - else - { - await _readBuffer.Skip(Length, async: true); - } - } + if (!PgReader.Initialized) + PgReader.Init(Length, _fieldDescription.DataFormat); + await PgReader.ConsumeAsync(cancellationToken: cancellationToken); + await PgReader.Commit(async: true, resuming: false); - _columnConsumed = true; + _isConsumed = true; } - void CheckAndMarkConsumed() + void CheckActive() { - if (_columnConsumed) + if (PgReader.Initialized) throw new InvalidOperationException("Column has already been consumed"); - _columnConsumed = true; } -} \ No newline at end of file +} diff --git a/src/Npgsql/Replication/PgOutput/TupleEnumerator.cs b/src/Npgsql/Replication/PgOutput/TupleEnumerator.cs index 95e5bfe293..dc54a92515 100644 --- a/src/Npgsql/Replication/PgOutput/TupleEnumerator.cs +++ b/src/Npgsql/Replication/PgOutput/TupleEnumerator.cs @@ -4,7 +4,6 @@ using System.Threading.Tasks; using Npgsql.BackendMessages; using Npgsql.Internal; -using Npgsql.Replication.PgOutput.Messages; namespace Npgsql.Replication.PgOutput; @@ -64,11 +63,7 @@ async ValueTask MoveNextCore() break; case TupleDataKind.TextValue: case TupleDataKind.BinaryValue: - if (_readBuffer.ReadBytesLeft < 4) - { - using var tokenRegistration = _readBuffer.Connector.StartNestedCancellableOperation(_cancellationToken); - await _readBuffer.Ensure(4, async: true); - } + await _readBuffer.Ensure(4, async: true); len = _readBuffer.ReadInt32(); break; default: @@ -96,4 +91,4 @@ public async ValueTask DisposeAsync() _tupleEnumerable.State = RowState.Consumed; } -} \ No newline at end of file +} diff --git a/src/Npgsql/Replication/ReplicationConnection.cs b/src/Npgsql/Replication/ReplicationConnection.cs index 903e6b7b28..5b0381afb0 100644 --- a/src/Npgsql/Replication/ReplicationConnection.cs +++ b/src/Npgsql/Replication/ReplicationConnection.cs @@ -11,7 +11,6 @@ using System.Threading.Tasks; using Microsoft.Extensions.Logging; using Npgsql.Internal; -using Npgsql.Internal.TypeHandlers.DateTimeHandlers; using static Npgsql.Util.Statics; using Npgsql.Util; @@ -242,6 +241,8 @@ public async Task Open(CancellationToken cancellationToken = default) SetTimeouts(CommandTimeout, CommandTimeout); + _npgsqlConnection.Connector!.LongRunningConnection = true; + ReplicationLogger = _npgsqlConnection.Connector!.LoggingConfiguration.ReplicationLogger; } @@ -449,7 +450,7 @@ internal async IAsyncEnumerator StartReplicationInternal( _replicationCancellationTokenSource = CancellationTokenSource.CreateLinkedTokenSource(cancellationToken); - using var _ = Connector.StartUserAction( + using var _ = connector.StartUserAction( ConnectorState.Replication, _replicationCancellationTokenSource.Token, attemptPgCancellation: _pgCancellationSupported); NpgsqlReadBuffer.ColumnStream? columnStream = null; @@ -474,8 +475,7 @@ internal async IAsyncEnumerator StartReplicationInternal( var buf = connector.ReadBuffer; - // Cancellation is handled at the replication level - we don't want every ReadAsync - columnStream = new NpgsqlReadBuffer.ColumnStream(connector, startCancellableOperations: false); + columnStream = new NpgsqlReadBuffer.ColumnStream(connector); SetTimeouts(_walReceiverTimeout, CommandTimeout); @@ -484,7 +484,7 @@ internal async IAsyncEnumerator StartReplicationInternal( while (true) { - msg = await Connector.ReadMessage(async: true); + msg = await connector.ReadMessage(async: true); Expect(msg, Connector); // We received some message so there's no need to forcibly request feedback @@ -501,7 +501,7 @@ internal async IAsyncEnumerator StartReplicationInternal( await buf.EnsureAsync(24); var startLsn = buf.ReadUInt64(); var endLsn = buf.ReadUInt64(); - var sendTime = DateTimeUtils.DecodeTimestamp(buf.ReadInt64(), DateTimeKind.Utc); + var sendTime = PgDateTime.DecodeTimestamp(buf.ReadInt64(), DateTimeKind.Utc); if (unchecked((ulong)Interlocked.Read(ref _lastReceivedLsn)) < startLsn) Interlocked.Exchange(ref _lastReceivedLsn, unchecked((long)startLsn)); @@ -510,7 +510,7 @@ internal async IAsyncEnumerator StartReplicationInternal( // dataLen = msg.Length - (code = 1 + walStart = 8 + walEnd = 8 + serverClock = 8) var dataLen = messageLength - 25; - columnStream.Init(dataLen, canSeek: false); + columnStream.Init(dataLen, canSeek: false, commandScoped: false); _cachedXLogDataMessage.Populate(new NpgsqlLogSequenceNumber(startLsn), new NpgsqlLogSequenceNumber(endLsn), sendTime, columnStream); @@ -519,7 +519,7 @@ internal async IAsyncEnumerator StartReplicationInternal( // Our consumer may not have read the stream to the end, but it might as well have been us // ourselves bypassing the stream and reading directly from the buffer in StartReplication() if (!columnStream.IsDisposed && columnStream.Position < columnStream.Length && !bypassingStream) - await buf.Skip(columnStream.Length - columnStream.Position, true); + await buf.Skip(checked((int)(columnStream.Length - columnStream.Position)), true); continue; } @@ -532,7 +532,7 @@ internal async IAsyncEnumerator StartReplicationInternal( if (ReplicationLogger.IsEnabled(LogLevel.Trace)) { var endLsn = new NpgsqlLogSequenceNumber(end); - var timestamp = DateTimeUtils.DecodeTimestamp(buf.ReadInt64(), DateTimeKind.Utc); + var timestamp = PgDateTime.DecodeTimestamp(buf.ReadInt64(), DateTimeKind.Utc); LogMessages.ReceivedReplicationPrimaryKeepalive(ReplicationLogger, endLsn, timestamp, Connector.Id); } else @@ -679,7 +679,7 @@ async Task SendFeedback(bool waitOnSemaphore = false, bool requestReply = false, buf.WriteInt64(lastReceivedLsn); buf.WriteInt64(lastFlushedLsn); buf.WriteInt64(lastAppliedLsn); - buf.WriteInt64(DateTimeUtils.EncodeTimestamp(timestamp)); + buf.WriteInt64(PgDateTime.EncodeTimestamp(timestamp)); buf.WriteByte(requestReply ? (byte)1 : (byte)0); await connector.Flush(async: true, cancellationToken); diff --git a/src/Npgsql/Schema/DbColumnSchemaGenerator.cs b/src/Npgsql/Schema/DbColumnSchemaGenerator.cs index 835cfbe424..88cc775e00 100644 --- a/src/Npgsql/Schema/DbColumnSchemaGenerator.cs +++ b/src/Npgsql/Schema/DbColumnSchemaGenerator.cs @@ -8,9 +8,10 @@ using System.Transactions; using Npgsql.BackendMessages; using Npgsql.Internal; -using Npgsql.Internal.TypeHandlers; -using Npgsql.Internal.TypeHandlers.CompositeHandlers; +using Npgsql.Internal.Postgres; +using Npgsql.PostgresTypes; using Npgsql.Util; +using NpgsqlTypes; namespace Npgsql.Schema; @@ -115,18 +116,18 @@ internal async Task> GetColumnSchema(bool asy .Where(f => f.TableOID != 0) // Only column fields .Select(c => $"(attr.attrelid={c.TableOID} AND attr.attnum={c.ColumnAttributeNumber})") .Join(" OR "); - + if (columnFieldFilter != string.Empty) { var query = oldQueryMode ? GenerateOldColumnsQuery(columnFieldFilter) : GenerateColumnsQuery(_connection.PostgreSqlVersion, columnFieldFilter); - + using var scope = new TransactionScope( TransactionScopeOption.Suppress, async ? TransactionScopeAsyncFlowOption.Enabled : TransactionScopeAsyncFlowOption.Suppress); using var connection = (NpgsqlConnection)((ICloneable)_connection).Clone(); - + await connection.Open(async, cancellationToken); using var cmd = new NpgsqlCommand(query, connection); @@ -135,7 +136,7 @@ internal async Task> GetColumnSchema(bool asy { while (async ? await reader.ReadAsync(cancellationToken) : reader.Read()) { - var column = LoadColumnDefinition(reader, _connection.Connector!.TypeMapper.DatabaseInfo, oldQueryMode); + var column = LoadColumnDefinition(reader, _connection.Connector!.DatabaseInfo, oldQueryMode); for (var ordinal = 0; ordinal < numFields; ordinal++) { var field = _rowDescription[ordinal]; @@ -253,19 +254,16 @@ NpgsqlDbColumn SetUpNonColumnField(FieldDescription field) /// void ColumnPostConfig(NpgsqlDbColumn column, int typeModifier) { - var typeMapper = _connection.Connector!.TypeMapper; - - column.NpgsqlDbType = typeMapper.GetTypeInfoByOid(column.TypeOID).npgsqlDbType; - column.DataType = typeMapper.TryResolveByOID(column.TypeOID, out var handler) - ? handler.GetFieldType() - : null; + var serializerOptions = _connection.Connector!.SerializerOptions; - if (column.DataType != null) + column.NpgsqlDbType = column.PostgresType.DataTypeName.ToNpgsqlDbType(); + if (serializerOptions.GetObjectOrDefaultTypeInfo(column.PostgresType) is { } typeInfo) { - column.IsLong = handler is ByteaHandler; + column.DataType = typeInfo.Type; + column.IsLong = column.PostgresType.DataTypeName == DataTypeNames.Bytea; - if (handler is ICompositeHandler) - column.UdtAssemblyQualifiedName = column.DataType.AssemblyQualifiedName; + if (column.PostgresType is PostgresCompositeType) + column.UdtAssemblyQualifiedName = typeInfo.Type.AssemblyQualifiedName; } var facets = column.PostgresType.GetFacets(typeModifier); @@ -276,4 +274,4 @@ void ColumnPostConfig(NpgsqlDbColumn column, int typeModifier) if (facets.Scale != null) column.NumericScale = facets.Scale; } -} \ No newline at end of file +} diff --git a/src/Npgsql/Shims/ConcurrentDictionaryExtensions.cs b/src/Npgsql/Shims/ConcurrentDictionaryExtensions.cs index c752cf2199..02f5c2077c 100644 --- a/src/Npgsql/Shims/ConcurrentDictionaryExtensions.cs +++ b/src/Npgsql/Shims/ConcurrentDictionaryExtensions.cs @@ -1,6 +1,3 @@ -using System; -using System.Collections.Concurrent; - namespace System.Collections.Concurrent; #if NETSTANDARD2_0 diff --git a/src/Npgsql/Shims/MemoryExtensions.cs b/src/Npgsql/Shims/MemoryExtensions.cs new file mode 100644 index 0000000000..6247c6a21e --- /dev/null +++ b/src/Npgsql/Shims/MemoryExtensions.cs @@ -0,0 +1,18 @@ +#if !NET7_0_OR_GREATER +namespace System; + +static class MemoryExtensions +{ + public static int IndexOfAnyExcept(this ReadOnlySpan span, T value0, T value1) where T : IEquatable + { + for (var i = 0; i < span.Length; i++) + { + var v = span[i]; + if (!v.Equals(value0) && !v.Equals(value1)) + return i; + } + + return -1; + } +} +#endif diff --git a/src/Npgsql/Shims/ReadOnlySequenceExtensions.cs b/src/Npgsql/Shims/ReadOnlySequenceExtensions.cs new file mode 100644 index 0000000000..0370285a7d --- /dev/null +++ b/src/Npgsql/Shims/ReadOnlySequenceExtensions.cs @@ -0,0 +1,13 @@ +namespace System.Buffers; + +static class ReadOnlySequenceExtensions +{ + public static ReadOnlySpan GetFirstSpan(this ReadOnlySequence sequence) + { +#if NETSTANDARD + return sequence.First.Span; +# else + return sequence.FirstSpan; +#endif + } +} diff --git a/src/Npgsql/Shims/ReadOnlySpanOfCharExtensions.cs b/src/Npgsql/Shims/ReadOnlySpanOfCharExtensions.cs index c805e984a5..11a70c9793 100644 --- a/src/Npgsql/Shims/ReadOnlySpanOfCharExtensions.cs +++ b/src/Npgsql/Shims/ReadOnlySpanOfCharExtensions.cs @@ -1,7 +1,5 @@ using System; -using System.Collections.Generic; using System.Runtime.CompilerServices; -using System.Text; namespace Npgsql.Netstandard20; diff --git a/src/Npgsql/Shims/ReferenceEqualityComparer.cs b/src/Npgsql/Shims/ReferenceEqualityComparer.cs new file mode 100644 index 0000000000..38515ed90f --- /dev/null +++ b/src/Npgsql/Shims/ReferenceEqualityComparer.cs @@ -0,0 +1,48 @@ +using System.Runtime.CompilerServices; + +namespace System.Collections.Generic; + +#if NETSTANDARD +sealed class ReferenceEqualityComparer : IEqualityComparer, IEqualityComparer +{ + ReferenceEqualityComparer() { } + + /// + /// Gets the singleton instance. + /// + public static ReferenceEqualityComparer Instance { get; } = new(); + + /// + /// Determines whether two object references refer to the same object instance. + /// + /// The first object to compare. + /// The second object to compare. + /// + /// if both and refer to the same object instance + /// or if both are ; otherwise, . + /// + /// + /// This API is a wrapper around . + /// It is not necessarily equivalent to calling . + /// + public new bool Equals(object? x, object? y) => ReferenceEquals(x, y); + + /// + /// Returns a hash code for the specified object. The returned hash code is based on the object + /// identity, not on the contents of the object. + /// + /// The object for which to retrieve the hash code. + /// A hash code for the identity of . + /// + /// This API is a wrapper around . + /// It is not necessarily equivalent to calling . + /// + public int GetHashCode(object? obj) + { + // Depending on target framework, RuntimeHelpers.GetHashCode might not be annotated + // with the proper nullability attribute. We'll suppress any warning that might + // result. + return RuntimeHelpers.GetHashCode(obj!); + } +} +#endif diff --git a/src/Npgsql/Shims/StreamExtensions.cs b/src/Npgsql/Shims/StreamExtensions.cs index 925061870d..5215b02ce0 100644 --- a/src/Npgsql/Shims/StreamExtensions.cs +++ b/src/Npgsql/Shims/StreamExtensions.cs @@ -1,7 +1,9 @@ -#if NETSTANDARD2_0 +#if NETSTANDARD2_0 || !NET7_0_OR_GREATER using System.Buffers; +using System.Diagnostics; using System.Threading; using System.Threading.Tasks; +using Npgsql; // ReSharper disable once CheckNamespace namespace System.IO @@ -9,6 +11,33 @@ namespace System.IO // Helpers to read/write Span/Memory to Stream before netstandard 2.1 static class StreamExtensions { + public static void ReadExactly(this Stream stream, Span buffer) + { + var totalRead = 0; + while (totalRead < buffer.Length) + { + var read = stream.Read(buffer.Slice(totalRead)); + if (read is 0) + throw new EndOfStreamException(); + + totalRead += read; + } + } + + public static async ValueTask ReadExactlyAsync(this Stream stream, Memory buffer, CancellationToken cancellationToken = default) + { + var totalRead = 0; + while (totalRead < buffer.Length) + { + var read = await stream.ReadAsync(buffer.Slice(totalRead), cancellationToken).ConfigureAwait(false); + if (read is 0) + throw new EndOfStreamException(); + + totalRead += read; + } + } + +#if NETSTANDARD2_0 public static int Read(this Stream stream, Span buffer) { var sharedBuffer = ArrayPool.Shared.Rent(buffer.Length); @@ -66,6 +95,7 @@ public static async ValueTask WriteAsync(this Stream stream, ReadOnlyMemory.Shared.Return(sharedBuffer); } } +#endif } } #endif diff --git a/src/Npgsql/Shims/UnreachableException.cs b/src/Npgsql/Shims/UnreachableException.cs new file mode 100644 index 0000000000..c45f3fd1d8 --- /dev/null +++ b/src/Npgsql/Shims/UnreachableException.cs @@ -0,0 +1,41 @@ +#if !NET7_0_OR_GREATER +using System; + +namespace System.Diagnostics; + +/// +/// Exception thrown when the program executes an instruction that was thought to be unreachable. +/// +sealed class UnreachableException : Exception +{ + /// + /// Initializes a new instance of the class with the default error message. + /// + public UnreachableException() + : base("The program executed an instruction that was thought to be unreachable.") + { + } + + /// + /// Initializes a new instance of the + /// class with a specified error message. + /// + /// The error message that explains the reason for the exception. + public UnreachableException(string? message) + : base(message) + { + } + + /// + /// Initializes a new instance of the + /// class with a specified error message and a reference to the inner exception that is the cause of + /// this exception. + /// + /// The error message that explains the reason for the exception. + /// The exception that is the cause of the current exception. + public UnreachableException(string? message, Exception? innerException) + : base(message, innerException) + { + } +} +#endif diff --git a/src/Npgsql/ThrowHelper.cs b/src/Npgsql/ThrowHelper.cs index 57eaa5cc42..d6666bd130 100644 --- a/src/Npgsql/ThrowHelper.cs +++ b/src/Npgsql/ThrowHelper.cs @@ -52,6 +52,14 @@ internal static void ThrowInvalidCastException(string message, object argument) internal static void ThrowInvalidCastException_NoValue(FieldDescription field) => throw new InvalidCastException($"Column '{field.Name}' is null."); + [DoesNotReturn] + internal static void ThrowInvalidCastException(string message) => + throw new InvalidCastException(message); + + [DoesNotReturn] + internal static void ThrowInvalidCastException_NoValue() => + throw new InvalidCastException("Field is null."); + [DoesNotReturn] internal static void ThrowArgumentOutOfRange_OutOfColumnBounds(string paramName, int columnLength) => throw new ArgumentOutOfRangeException(paramName, $"The value is out of bounds from the column data, dataOffset must be between 0 and {columnLength}"); @@ -96,6 +104,10 @@ internal static void ThrowArgumentException(string message, string paramName) internal static void ThrowArgumentNullException(string paramName) => throw new ArgumentNullException(paramName); + [DoesNotReturn] + internal static void ThrowArgumentNullException(string message, string paramName) + => throw new ArgumentNullException(paramName, message); + [DoesNotReturn] internal static void ThrowIndexOutOfRangeException(string message) => throw new IndexOutOfRangeException(message); diff --git a/src/Npgsql/TypeMapping/BuiltInTypeHandlerResolver.cs b/src/Npgsql/TypeMapping/BuiltInTypeHandlerResolver.cs deleted file mode 100644 index fcdbb626d1..0000000000 --- a/src/Npgsql/TypeMapping/BuiltInTypeHandlerResolver.cs +++ /dev/null @@ -1,449 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Collections.Specialized; -using System.IO; -using System.Net; -using System.Net.NetworkInformation; -using System.Numerics; -using Npgsql.Internal; -using Npgsql.Internal.TypeHandlers; -using Npgsql.Internal.TypeHandlers.DateTimeHandlers; -using Npgsql.Internal.TypeHandlers.FullTextSearchHandlers; -using Npgsql.Internal.TypeHandlers.GeometricHandlers; -using Npgsql.Internal.TypeHandlers.InternalTypeHandlers; -using Npgsql.Internal.TypeHandlers.LTreeHandlers; -using Npgsql.Internal.TypeHandlers.NetworkHandlers; -using Npgsql.Internal.TypeHandlers.NumericHandlers; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using Npgsql.Properties; -using NpgsqlTypes; -using static Npgsql.Util.Statics; - -namespace Npgsql.TypeMapping; - -sealed class BuiltInTypeHandlerResolver : TypeHandlerResolver -{ - readonly NpgsqlConnector _connector; - readonly NpgsqlDatabaseInfo _databaseInfo; - - #region Cached handlers - - // Numeric types - readonly Int16Handler _int16Handler; - readonly Int32Handler _int32Handler; - readonly Int64Handler _int64Handler; - SingleHandler? _singleHandler; - readonly DoubleHandler _doubleHandler; - readonly NumericHandler _numericHandler; - MoneyHandler? _moneyHandler; - - // Text types - readonly TextHandler _textHandler; - TextHandler? _xmlHandler; - TextHandler? _varcharHandler; - TextHandler? _charHandler; - TextHandler? _nameHandler; - TextHandler? _refcursorHandler; - TextHandler? _citextHandler; - - // Note that old versions of PG - as well as some PG-like databases (Redshift, CockroachDB) don't have json/jsonb, so we create - // these handlers lazily rather than eagerly. - JsonTextHandler? _jsonbHandler; - JsonTextHandler? _jsonHandler; - JsonPathHandler? _jsonPathHandler; - - // Date/time types - readonly TimestampHandler _timestampHandler; - readonly TimestampTzHandler _timestampTzHandler; - readonly DateHandler _dateHandler; - TimeHandler? _timeHandler; - TimeTzHandler? _timeTzHandler; - IntervalHandler? _intervalHandler; - - // Network types - CidrHandler? _cidrHandler; - InetHandler? _inetHandler; - MacaddrHandler? _macaddrHandler; - MacaddrHandler? _macaddr8Handler; - - // Geometry types - BoxHandler? _boxHandler; - CircleHandler? _circleHandler; - LineHandler? _lineHandler; - LineSegmentHandler? _lineSegmentHandler; - PathHandler? _pathHandler; - PointHandler? _pointHandler; - PolygonHandler? _polygonHandler; - - // LTree types - LQueryHandler? _lQueryHandler; - LTreeHandler? _lTreeHandler; - LTxtQueryHandler? _lTxtQueryHandler; - - // UInt types - UInt32Handler? _oidHandler; - UInt32Handler? _xidHandler; - UInt64Handler? _xid8Handler; - UInt32Handler? _cidHandler; - UInt32Handler? _regtypeHandler; - UInt32Handler? _regconfigHandler; - - // Misc types - readonly BoolHandler _boolHandler; - ByteaHandler? _byteaHandler; - UuidHandler? _uuidHandler; - BitStringHandler? _bitVaryingHandler; - BitStringHandler? _bitHandler; - VoidHandler? _voidHandler; - HstoreHandler? _hstoreHandler; - - // Internal types - Int2VectorHandler? _int2VectorHandler; - OIDVectorHandler? _oidVectorHandler; - PgLsnHandler? _pgLsnHandler; - TidHandler? _tidHandler; - InternalCharHandler? _internalCharHandler; - - // Special types - UnknownTypeHandler? _unknownHandler; - - // Complex type handlers over timestamp/timestamptz (because DateTime is value-dependent) - NpgsqlTypeHandler? _timestampArrayHandler; - NpgsqlTypeHandler? _timestampTzArrayHandler; - - #endregion Cached handlers - - internal BuiltInTypeHandlerResolver(NpgsqlConnector connector) - { - _connector = connector; - _databaseInfo = connector.DatabaseInfo; - - // Eagerly instantiate some handlers for very common types so we don't need to check later - _int16Handler = new Int16Handler(PgType("smallint")); - _int32Handler = new Int32Handler(PgType("integer")); - _int64Handler = new Int64Handler(PgType("bigint")); - _doubleHandler = new DoubleHandler(PgType("double precision")); - _numericHandler = new NumericHandler(PgType("numeric")); - _textHandler ??= new TextHandler(PgType("text"), _connector.TextEncoding); - _timestampHandler ??= new TimestampHandler(PgType("timestamp without time zone")); - _timestampTzHandler ??= new TimestampTzHandler(PgType("timestamp with time zone")); - _dateHandler ??= new DateHandler(PgType("date")); - _boolHandler ??= new BoolHandler(PgType("boolean")); - } - - public override NpgsqlTypeHandler? ResolveByDataTypeName(string typeName) - => typeName switch - { - // Numeric types - "smallint" => _int16Handler, - "integer" or "int" => _int32Handler, - "bigint" => _int64Handler, - "real" => SingleHandler(), - "double precision" => _doubleHandler, - "numeric" or "decimal" => _numericHandler, - "money" => MoneyHandler(), - - // Text types - "text" => _textHandler, - "xml" => XmlHandler(), - "varchar" or "character varying" => VarcharHandler(), - "character" => CharHandler(), - "name" => NameHandler(), - "refcursor" => RefcursorHandler(), - "citext" => CitextHandler(), - "jsonb" => JsonbHandler(), - "json" => JsonHandler(), - "jsonpath" => JsonPathHandler(), - - // Date/time types - "timestamp" or "timestamp without time zone" => _timestampHandler, - "timestamptz" or "timestamp with time zone" => _timestampTzHandler, - "date" => _dateHandler, - "time without time zone" => TimeHandler(), - "time with time zone" => TimeTzHandler(), - "interval" => IntervalHandler(), - - // Network types - "cidr" => CidrHandler(), - "inet" => InetHandler(), - "macaddr" => MacaddrHandler(), - "macaddr8" => Macaddr8Handler(), - - // Geometry types - "box" => BoxHandler(), - "circle" => CircleHandler(), - "line" => LineHandler(), - "lseg" => LineSegmentHandler(), - "path" => PathHandler(), - "point" => PointHandler(), - "polygon" => PolygonHandler(), - - // LTree types - "lquery" => LQueryHandler(), - "ltree" => LTreeHandler(), - "ltxtquery" => LTxtHandler(), - - // UInt types - "oid" => OidHandler(), - "xid" => XidHandler(), - "xid8" => Xid8Handler(), - "cid" => CidHandler(), - "regtype" => RegtypeHandler(), - "regconfig" => RegconfigHandler(), - - // Misc types - "bool" or "boolean" => _boolHandler, - "bytea" => ByteaHandler(), - "uuid" => UuidHandler(), - "bit varying" or "varbit" => BitVaryingHandler(), - "bit" => BitHandler(), - "hstore" => HstoreHandler(), - - // Internal types - "int2vector" => Int2VectorHandler(), - "oidvector" => OidVectorHandler(), - "pg_lsn" => PgLsnHandler(), - "tid" => TidHandler(), - "char" => InternalCharHandler(), - "void" => VoidHandler(), - - "unknown" => UnknownHandler(), - - // Types that are unsupported by default when using NpgsqlSlimDataSourceBuilder - "record" => UnsupportedRecordHandler(), - "tsvector" => UnsupportedTsVectorHandler(), - "tsquery" => UnsupportedTsQueryHandler(), - - _ => null - }; - - public override NpgsqlTypeHandler? ResolveByClrType(Type type) - { - if (BuiltInTypeMappingResolver.ClrTypeToDataTypeName(type) is { } dataTypeName) - return ResolveByDataTypeName(dataTypeName); - - if (type.IsSubclassOf(typeof(Stream))) - return ResolveByDataTypeName("bytea"); - - switch (type.FullName) - { - case "NpgsqlTypes.NpgsqlTsVector": - case "NpgsqlTypes.NpgsqlTsQueryLexeme": - case "NpgsqlTypes.NpgsqlTsQueryAnd": - case "NpgsqlTypes.NpgsqlTsQueryOr": - case "NpgsqlTypes.NpgsqlTsQueryNot": - case "NpgsqlTypes.NpgsqlTsQueryEmpty": - case "NpgsqlTypes.NpgsqlTsQueryFollowedBy": - return UnsupportedTsQueryHandler(); - - default: - return null; - } - } - - public override NpgsqlTypeHandler? ResolveValueDependentValue(object value) - { - // In LegacyTimestampBehavior, DateTime isn't value-dependent, and handled above in ClrTypeToDataTypeNameTable like other types - if (LegacyTimestampBehavior) - return null; - - return value switch - { - DateTime dateTime => dateTime.Kind == DateTimeKind.Utc ? _timestampTzHandler : _timestampHandler, - - // For arrays/lists, return timestamp or timestamptz based on the kind of the first DateTime; if the user attempts to - // mix incompatible Kinds, that will fail during validation. For empty arrays it doesn't matter. - IList array => ArrayHandler(array.Count == 0 ? DateTimeKind.Unspecified : array[0].Kind), - - _ => null - }; - - NpgsqlTypeHandler ArrayHandler(DateTimeKind kind) - => kind == DateTimeKind.Utc - ? _timestampTzArrayHandler ??= _timestampTzHandler.CreateArrayHandler( - (PostgresArrayType)PgType("timestamp with time zone[]"), _connector.Settings.ArrayNullabilityMode) - : _timestampArrayHandler ??= _timestampHandler.CreateArrayHandler( - (PostgresArrayType)PgType("timestamp without time zone[]"), _connector.Settings.ArrayNullabilityMode); - } - - public override NpgsqlTypeHandler? ResolveValueTypeGenerically(T value) - { - // This method only ever gets called for value types, and relies on the JIT specializing the method for T by eliding all the - // type checks below. - - // Numeric types - if (typeof(T) == typeof(byte)) - return _int16Handler; - if (typeof(T) == typeof(short)) - return _int16Handler; - if (typeof(T) == typeof(int)) - return _int32Handler; - if (typeof(T) == typeof(long)) - return _int64Handler; - if (typeof(T) == typeof(float)) - return SingleHandler(); - if (typeof(T) == typeof(double)) - return _doubleHandler; - if (typeof(T) == typeof(decimal)) - return _numericHandler; - if (typeof(T) == typeof(BigInteger)) - return _numericHandler; - - // Text types - if (typeof(T) == typeof(char)) - return _textHandler; - if (typeof(T) == typeof(ArraySegment)) - return _textHandler; - - // Date/time types - // No resolution for DateTime, since that's value-dependent (Kind) - if (typeof(T) == typeof(DateTimeOffset)) - return _timestampTzHandler; -#if NET6_0_OR_GREATER - if (typeof(T) == typeof(DateOnly)) - return _dateHandler; - if (typeof(T) == typeof(TimeOnly)) - return _timeHandler; -#endif - if (typeof(T) == typeof(TimeSpan)) - return _intervalHandler; - if (typeof(T) == typeof(NpgsqlInterval)) - return _intervalHandler; - - // Network types - if (typeof(T) == typeof(IPAddress)) - return InetHandler(); - if (typeof(T) == typeof(PhysicalAddress)) - return _macaddrHandler; - if (typeof(T) == typeof(TimeSpan)) - return _intervalHandler; - - // Geometry types - if (typeof(T) == typeof(NpgsqlBox)) - return BoxHandler(); - if (typeof(T) == typeof(NpgsqlCircle)) - return CircleHandler(); - if (typeof(T) == typeof(NpgsqlLine)) - return LineHandler(); - if (typeof(T) == typeof(NpgsqlLSeg)) - return LineSegmentHandler(); - if (typeof(T) == typeof(NpgsqlPath)) - return PathHandler(); - if (typeof(T) == typeof(NpgsqlPoint)) - return PointHandler(); - if (typeof(T) == typeof(NpgsqlPolygon)) - return PolygonHandler(); - - // Misc types - if (typeof(T) == typeof(bool)) - return _boolHandler; - if (typeof(T) == typeof(Guid)) - return UuidHandler(); - if (typeof(T) == typeof(BitVector32)) - return BitVaryingHandler(); - - // Internal types - if (typeof(T) == typeof(NpgsqlLogSequenceNumber)) - return PgLsnHandler(); - if (typeof(T) == typeof(NpgsqlTid)) - return TidHandler(); - if (typeof(T) == typeof(DBNull)) - return UnknownHandler(); - - return null; - } - - PostgresType PgType(string pgTypeName) => _databaseInfo.GetPostgresTypeByName(pgTypeName); - - #region Handler accessors - - // Numeric types - NpgsqlTypeHandler SingleHandler() => _singleHandler ??= new SingleHandler(PgType("real")); - NpgsqlTypeHandler MoneyHandler() => _moneyHandler ??= new MoneyHandler(PgType("money")); - - // Text types - NpgsqlTypeHandler XmlHandler() => _xmlHandler ??= new TextHandler(PgType("xml"), _connector.TextEncoding); - NpgsqlTypeHandler VarcharHandler() => _varcharHandler ??= new TextHandler(PgType("character varying"), _connector.TextEncoding); - NpgsqlTypeHandler CharHandler() => _charHandler ??= new TextHandler(PgType("character"), _connector.TextEncoding); - NpgsqlTypeHandler NameHandler() => _nameHandler ??= new TextHandler(PgType("name"), _connector.TextEncoding); - NpgsqlTypeHandler RefcursorHandler() => _refcursorHandler ??= new TextHandler(PgType("refcursor"), _connector.TextEncoding); - NpgsqlTypeHandler? CitextHandler() => _citextHandler ??= _databaseInfo.TryGetPostgresTypeByName("citext", out var pgType) - ? new TextHandler(pgType, _connector.TextEncoding) - : null; - NpgsqlTypeHandler JsonbHandler() => _jsonbHandler ??= new JsonTextHandler(PgType("jsonb"), _connector.TextEncoding, isJsonb: true); - NpgsqlTypeHandler JsonHandler() => _jsonHandler ??= new JsonTextHandler(PgType("json"), _connector.TextEncoding, isJsonb: false); - NpgsqlTypeHandler JsonPathHandler() => _jsonPathHandler ??= new JsonPathHandler(PgType("jsonpath"), _connector.TextEncoding); - - // Date/time types - NpgsqlTypeHandler TimeHandler() => _timeHandler ??= new TimeHandler(PgType("time without time zone")); - NpgsqlTypeHandler TimeTzHandler() => _timeTzHandler ??= new TimeTzHandler(PgType("time with time zone")); - NpgsqlTypeHandler IntervalHandler() => _intervalHandler ??= new IntervalHandler(PgType("interval")); - - // Network types - NpgsqlTypeHandler CidrHandler() => _cidrHandler ??= new CidrHandler(PgType("cidr")); - NpgsqlTypeHandler InetHandler() => _inetHandler ??= new InetHandler(PgType("inet")); - NpgsqlTypeHandler MacaddrHandler() => _macaddrHandler ??= new MacaddrHandler(PgType("macaddr")); - NpgsqlTypeHandler Macaddr8Handler() => _macaddr8Handler ??= new MacaddrHandler(PgType("macaddr8")); - - // Geometry types - NpgsqlTypeHandler BoxHandler() => _boxHandler ??= new BoxHandler(PgType("box")); - NpgsqlTypeHandler CircleHandler() => _circleHandler ??= new CircleHandler(PgType("circle")); - NpgsqlTypeHandler LineHandler() => _lineHandler ??= new LineHandler(PgType("line")); - NpgsqlTypeHandler LineSegmentHandler() => _lineSegmentHandler ??= new LineSegmentHandler(PgType("lseg")); - NpgsqlTypeHandler PathHandler() => _pathHandler ??= new PathHandler(PgType("path")); - NpgsqlTypeHandler PointHandler() => _pointHandler ??= new PointHandler(PgType("point")); - NpgsqlTypeHandler PolygonHandler() => _polygonHandler ??= new PolygonHandler(PgType("polygon")); - - // LTree types - NpgsqlTypeHandler? LQueryHandler() => _lQueryHandler ??= _databaseInfo.TryGetPostgresTypeByName("lquery", out var pgType) - ? new LQueryHandler(pgType, _connector.TextEncoding) - : null; - NpgsqlTypeHandler? LTreeHandler() => _lTreeHandler ??= _databaseInfo.TryGetPostgresTypeByName("ltree", out var pgType) - ? new LTreeHandler(pgType, _connector.TextEncoding) - : null; - NpgsqlTypeHandler? LTxtHandler() => _lTxtQueryHandler ??= _databaseInfo.TryGetPostgresTypeByName("ltxtquery", out var pgType) - ? new LTxtQueryHandler(pgType, _connector.TextEncoding) - : null; - - // UInt types - NpgsqlTypeHandler OidHandler() => _oidHandler ??= new UInt32Handler(PgType("oid")); - NpgsqlTypeHandler XidHandler() => _xidHandler ??= new UInt32Handler(PgType("xid")); - NpgsqlTypeHandler Xid8Handler() => _xid8Handler ??= new UInt64Handler(PgType("xid8")); - NpgsqlTypeHandler CidHandler() => _cidHandler ??= new UInt32Handler(PgType("cid")); - NpgsqlTypeHandler RegtypeHandler() => _regtypeHandler ??= new UInt32Handler(PgType("regtype")); - NpgsqlTypeHandler RegconfigHandler() => _regconfigHandler ??= new UInt32Handler(PgType("regconfig")); - - // Misc types - NpgsqlTypeHandler ByteaHandler() => _byteaHandler ??= new ByteaHandler(PgType("bytea")); - NpgsqlTypeHandler UuidHandler() => _uuidHandler ??= new UuidHandler(PgType("uuid")); - NpgsqlTypeHandler BitVaryingHandler() => _bitVaryingHandler ??= new BitStringHandler(PgType("bit varying")); - NpgsqlTypeHandler BitHandler() => _bitHandler ??= new BitStringHandler(PgType("bit")); - NpgsqlTypeHandler? HstoreHandler() => _hstoreHandler ??= _databaseInfo.TryGetPostgresTypeByName("hstore", out var pgType) - ? new HstoreHandler(pgType, _textHandler) - : null; - - // Internal types - NpgsqlTypeHandler Int2VectorHandler() => _int2VectorHandler ??= new Int2VectorHandler(PgType("int2vector"), PgType("smallint")); - NpgsqlTypeHandler OidVectorHandler() => _oidVectorHandler ??= new OIDVectorHandler(PgType("oidvector"), PgType("oid")); - NpgsqlTypeHandler PgLsnHandler() => _pgLsnHandler ??= new PgLsnHandler(PgType("pg_lsn")); - NpgsqlTypeHandler TidHandler() => _tidHandler ??= new TidHandler(PgType("tid")); - NpgsqlTypeHandler InternalCharHandler() => _internalCharHandler ??= new InternalCharHandler(PgType("char")); - NpgsqlTypeHandler VoidHandler() => _voidHandler ??= new VoidHandler(PgType("void")); - - // Types that are unsupported by default when using NpgsqlSlimDataSourceBuilder - NpgsqlTypeHandler UnsupportedRecordHandler() => new UnsupportedHandler(PgType("record"), string.Format( - NpgsqlStrings.RecordsNotEnabled, nameof(NpgsqlSlimDataSourceBuilder.EnableRecords), nameof(NpgsqlSlimDataSourceBuilder))); - - NpgsqlTypeHandler UnsupportedTsVectorHandler() => new UnsupportedHandler(PgType("tsvector"), string.Format( - NpgsqlStrings.FullTextSearchNotEnabled, nameof(NpgsqlSlimDataSourceBuilder.EnableFullTextSearch), - nameof(NpgsqlSlimDataSourceBuilder))); - - NpgsqlTypeHandler UnsupportedTsQueryHandler() => new UnsupportedHandler(PgType("tsquery"), string.Format( - NpgsqlStrings.FullTextSearchNotEnabled, nameof(NpgsqlSlimDataSourceBuilder.EnableFullTextSearch), - nameof(NpgsqlSlimDataSourceBuilder))); - - NpgsqlTypeHandler UnknownHandler() => _unknownHandler ??= new UnknownTypeHandler(_connector.TextEncoding); - - #endregion Handler accessors -} diff --git a/src/Npgsql/TypeMapping/BuiltInTypeHandlerResolverFactory.cs b/src/Npgsql/TypeMapping/BuiltInTypeHandlerResolverFactory.cs deleted file mode 100644 index 2912b97249..0000000000 --- a/src/Npgsql/TypeMapping/BuiltInTypeHandlerResolverFactory.cs +++ /dev/null @@ -1,13 +0,0 @@ -using Npgsql.Internal; -using Npgsql.Internal.TypeHandling; -using Npgsql.Internal.TypeMapping; - -namespace Npgsql.TypeMapping; - -sealed class BuiltInTypeHandlerResolverFactory : TypeHandlerResolverFactory -{ - public override TypeHandlerResolver Create(TypeMapper typeMapper, NpgsqlConnector connector) - => new BuiltInTypeHandlerResolver(connector); - - public override TypeMappingResolver CreateMappingResolver() => new BuiltInTypeMappingResolver(); -} \ No newline at end of file diff --git a/src/Npgsql/TypeMapping/BuiltInTypeMappingResolver.cs b/src/Npgsql/TypeMapping/BuiltInTypeMappingResolver.cs deleted file mode 100644 index 8a236a86f6..0000000000 --- a/src/Npgsql/TypeMapping/BuiltInTypeMappingResolver.cs +++ /dev/null @@ -1,237 +0,0 @@ -using System; -using System.Collections; -using System.Collections.Generic; -using System.Collections.Immutable; -using System.Collections.Specialized; -using System.Net; -using System.Net.NetworkInformation; -using System.Numerics; -using Npgsql.Internal.TypeHandling; -using Npgsql.Internal.TypeMapping; -using NpgsqlTypes; -using static Npgsql.Util.Statics; - -namespace Npgsql.TypeMapping; - -sealed class BuiltInTypeMappingResolver : TypeMappingResolver -{ - static readonly Type ReadOnlyIPAddressType = IPAddress.Loopback.GetType(); - - static readonly Dictionary Mappings = new() - { - // Numeric types - { "smallint", new(NpgsqlDbType.Smallint, "smallint", typeof(short), typeof(byte), typeof(sbyte)) }, - { "integer", new(NpgsqlDbType.Integer, "integer", typeof(int)) }, - { "int", new(NpgsqlDbType.Integer, "integer", typeof(int)) }, - { "bigint", new(NpgsqlDbType.Bigint, "bigint", typeof(long)) }, - { "real", new(NpgsqlDbType.Real, "real", typeof(float)) }, - { "double precision", new(NpgsqlDbType.Double, "double precision", typeof(double)) }, - { "numeric", new(NpgsqlDbType.Numeric, "numeric", typeof(decimal), typeof(BigInteger)) }, - { "decimal", new(NpgsqlDbType.Numeric, "numeric", typeof(decimal), typeof(BigInteger)) }, - { "money", new(NpgsqlDbType.Money, "money") }, - - // Text types - { "text", new(NpgsqlDbType.Text, "text", typeof(string), typeof(char[]), typeof(char), typeof(ArraySegment)) }, - { "xml", new(NpgsqlDbType.Xml, "xml") }, - { "character varying", new(NpgsqlDbType.Varchar, "character varying") }, - { "varchar", new(NpgsqlDbType.Varchar, "character varying") }, - { "character", new(NpgsqlDbType.Char, "character") }, - { "name", new(NpgsqlDbType.Name, "name") }, - { "refcursor", new(NpgsqlDbType.Refcursor, "refcursor") }, - { "citext", new(NpgsqlDbType.Citext, "citext") }, - { "jsonb", new(NpgsqlDbType.Jsonb, "jsonb") }, - { "json", new(NpgsqlDbType.Json, "json") }, - { "jsonpath", new(NpgsqlDbType.JsonPath, "jsonpath") }, - - // Date/time types - { "timestamp without time zone", new(NpgsqlDbType.Timestamp, "timestamp without time zone", typeof(DateTime)) }, - { "timestamp", new(NpgsqlDbType.Timestamp, "timestamp without time zone", typeof(DateTime)) }, - { "timestamp with time zone", new(NpgsqlDbType.TimestampTz, "timestamp with time zone", typeof(DateTimeOffset)) }, - { "timestamptz", new(NpgsqlDbType.TimestampTz, "timestamp with time zone", typeof(DateTimeOffset)) }, - { "date", new(NpgsqlDbType.Date, "date" -#if NET6_0_OR_GREATER - , typeof(DateOnly) -#endif - ) }, - { "time without time zone", new(NpgsqlDbType.Time, "time without time zone" -#if NET6_0_OR_GREATER - , typeof(TimeOnly) -#endif - ) }, - { "time", new(NpgsqlDbType.Time, "time without time zone" -#if NET6_0_OR_GREATER - , typeof(TimeOnly) -#endif - ) }, - { "time with time zone", new(NpgsqlDbType.TimeTz, "time with time zone") }, - { "timetz", new(NpgsqlDbType.TimeTz, "time with time zone") }, - { "interval", new(NpgsqlDbType.Interval, "interval", typeof(TimeSpan)) }, - - { "timestamp without time zone[]", new(NpgsqlDbType.Array | NpgsqlDbType.Timestamp, "timestamp without time zone[]") }, - { "timestamp with time zone[]", new(NpgsqlDbType.Array | NpgsqlDbType.TimestampTz, "timestamp with time zone[]") }, - - // Network types - { "cidr", new(NpgsqlDbType.Cidr, "cidr") }, -#pragma warning disable 618 - { "inet", new(NpgsqlDbType.Inet, "inet", typeof(IPAddress), typeof((IPAddress Address, int Subnet)), typeof(NpgsqlInet), ReadOnlyIPAddressType) }, -#pragma warning restore 618 - { "macaddr", new(NpgsqlDbType.MacAddr, "macaddr", typeof(PhysicalAddress)) }, - { "macaddr8", new(NpgsqlDbType.MacAddr8, "macaddr8") }, - - // Geometry types - { "box", new(NpgsqlDbType.Box, "box", typeof(NpgsqlBox)) }, - { "circle", new(NpgsqlDbType.Circle, "circle", typeof(NpgsqlCircle)) }, - { "line", new(NpgsqlDbType.Line, "line", typeof(NpgsqlLine)) }, - { "lseg", new(NpgsqlDbType.LSeg, "lseg", typeof(NpgsqlLSeg)) }, - { "path", new(NpgsqlDbType.Path, "path", typeof(NpgsqlPath)) }, - { "point", new(NpgsqlDbType.Point, "point", typeof(NpgsqlPoint)) }, - { "polygon", new(NpgsqlDbType.Polygon, "polygon", typeof(NpgsqlPolygon)) }, - - // LTree types - { "lquery", new(NpgsqlDbType.LQuery, "lquery") }, - { "ltree", new(NpgsqlDbType.LTree, "ltree") }, - { "ltxtquery", new(NpgsqlDbType.LTxtQuery, "ltxtquery") }, - - // UInt types - { "oid", new(NpgsqlDbType.Oid, "oid") }, - { "xid", new(NpgsqlDbType.Xid, "xid") }, - { "xid8", new(NpgsqlDbType.Xid8, "xid8") }, - { "cid", new(NpgsqlDbType.Cid, "cid") }, - { "regtype", new(NpgsqlDbType.Regtype, "regtype") }, - { "regconfig", new(NpgsqlDbType.Regconfig, "regconfig") }, - - // Misc types - { "boolean", new(NpgsqlDbType.Boolean, "boolean", typeof(bool)) }, - { "bool", new(NpgsqlDbType.Boolean, "boolean", typeof(bool)) }, - { "bytea", new(NpgsqlDbType.Bytea, "bytea", typeof(byte[]), typeof(ArraySegment) -#if !NETSTANDARD2_0 - , typeof(ReadOnlyMemory), typeof(Memory) -#endif - ) }, - { "uuid", new(NpgsqlDbType.Uuid, "uuid", typeof(Guid)) }, - { "bit varying", new(NpgsqlDbType.Varbit, "bit varying", typeof(BitArray), typeof(BitVector32)) }, - { "varbit", new(NpgsqlDbType.Varbit, "bit varying", typeof(BitArray), typeof(BitVector32)) }, - { "bit", new(NpgsqlDbType.Bit, "bit") }, - { "hstore", new(NpgsqlDbType.Hstore, "hstore", typeof(Dictionary), typeof(IDictionary), typeof(ImmutableDictionary)) }, - - // Internal types - { "int2vector", new(NpgsqlDbType.Int2Vector, "int2vector") }, - { "oidvector", new(NpgsqlDbType.Oidvector, "oidvector") }, - { "pg_lsn", new(NpgsqlDbType.PgLsn, "pg_lsn", typeof(NpgsqlLogSequenceNumber)) }, - { "tid", new(NpgsqlDbType.Tid, "tid", typeof(NpgsqlTid)) }, - { "char", new(NpgsqlDbType.InternalChar, "char") }, - - // Special types - { "unknown", new(NpgsqlDbType.Unknown, "unknown") }, - }; - - static readonly Dictionary ClrTypeToDataTypeNameTable; - - static BuiltInTypeMappingResolver() - { - ClrTypeToDataTypeNameTable = new() - { - // Numeric types - { typeof(byte), "smallint" }, - { typeof(short), "smallint" }, - { typeof(int), "integer" }, - { typeof(long), "bigint" }, - { typeof(float), "real" }, - { typeof(double), "double precision" }, - { typeof(decimal), "decimal" }, - { typeof(BigInteger), "decimal" }, - - // Text types - { typeof(string), "text" }, - { typeof(char[]), "text" }, - { typeof(char), "text" }, - { typeof(ArraySegment), "text" }, - - // Date/time types - // The DateTime entry is for LegacyTimestampBehavior mode only. In regular mode we resolve through - // ResolveValueDependentValue below - { typeof(DateTime), "timestamp without time zone" }, - { typeof(DateTimeOffset), "timestamp with time zone" }, -#if NET6_0_OR_GREATER - { typeof(DateOnly), "date" }, - { typeof(TimeOnly), "time without time zone" }, -#endif - { typeof(TimeSpan), "interval" }, - { typeof(NpgsqlInterval), "interval" }, - - // Network types - { typeof(IPAddress), "inet" }, - // See ReadOnlyIPAddress below - { typeof((IPAddress Address, int Subnet)), "inet" }, -#pragma warning disable 618 - { typeof(NpgsqlInet), "inet" }, -#pragma warning restore 618 - { typeof(PhysicalAddress), "macaddr" }, - - // Geometry types - { typeof(NpgsqlBox), "box" }, - { typeof(NpgsqlCircle), "circle" }, - { typeof(NpgsqlLine), "line" }, - { typeof(NpgsqlLSeg), "lseg" }, - { typeof(NpgsqlPath), "path" }, - { typeof(NpgsqlPoint), "point" }, - { typeof(NpgsqlPolygon), "polygon" }, - - // Misc types - { typeof(bool), "boolean" }, - { typeof(byte[]), "bytea" }, - { typeof(ArraySegment), "bytea" }, -#if !NETSTANDARD2_0 - { typeof(ReadOnlyMemory), "bytea" }, - { typeof(Memory), "bytea" }, -#endif - { typeof(Guid), "uuid" }, - { typeof(BitArray), "bit varying" }, - { typeof(BitVector32), "bit varying" }, - { typeof(Dictionary), "hstore" }, - { typeof(ImmutableDictionary), "hstore" }, - - // Internal types - { typeof(NpgsqlLogSequenceNumber), "pg_lsn" }, - { typeof(NpgsqlTid), "tid" }, - { typeof(DBNull), "unknown" } - }; - - // Recent versions of .NET Core have an internal ReadOnlyIPAddress type (returned e.g. for IPAddress.Loopback) - // But older versions don't have it - if (ReadOnlyIPAddressType != typeof(IPAddress)) - ClrTypeToDataTypeNameTable[ReadOnlyIPAddressType] = "inet"; - - if (LegacyTimestampBehavior) - ClrTypeToDataTypeNameTable[typeof(DateTime)] = "timestamp without time zone"; - } - - public override string? GetDataTypeNameByClrType(Type clrType) - => ClrTypeToDataTypeName(clrType); - - internal static string? ClrTypeToDataTypeName(Type clrType) - => ClrTypeToDataTypeNameTable.TryGetValue(clrType, out var dataTypeName) ? dataTypeName : null; - - public override string? GetDataTypeNameByValueDependentValue(object value) - { - // In LegacyTimestampBehavior, DateTime isn't value-dependent, and handled above in ClrTypeToDataTypeNameTable like other types - if (LegacyTimestampBehavior) - return null; - - return value switch - { - DateTime dateTime => dateTime.Kind == DateTimeKind.Utc ? "timestamp with time zone" : "timestamp without time zone", - - // For arrays/lists, return timestamp or timestamptz based on the kind of the first DateTime; if the user attempts to - // mix incompatible Kinds, that will fail during validation. For empty arrays it doesn't matter. - IList array => array.Count == 0 - ? "timestamp without time zone[]" - : array[0].Kind == DateTimeKind.Utc ? "timestamp with time zone[]" : "timestamp without time zone[]", - - _ => null - }; - } - - public override TypeMappingInfo? GetMappingByDataTypeName(string dataTypeName) - => Mappings.TryGetValue(dataTypeName, out var mapping) ? mapping : null; -} diff --git a/src/Npgsql/TypeMapping/DefaultPgTypes.cs b/src/Npgsql/TypeMapping/DefaultPgTypes.cs new file mode 100644 index 0000000000..015e338a2c --- /dev/null +++ b/src/Npgsql/TypeMapping/DefaultPgTypes.cs @@ -0,0 +1,191 @@ +using System; +using System.Collections.Generic; +using Npgsql.Internal.Postgres; +using static Npgsql.TypeMapping.PgTypeGroup; + +namespace Npgsql.TypeMapping; + +static class DefaultPgTypes +{ + static IEnumerable> GetIdentifiers() + { + var list = new List>(); + foreach (var group in Items) + { + list.Add(new(group.Oid, group.Name)); + list.Add(new(group.ArrayOid, group.ArrayName)); + if (group.TypeKind is PgTypeKind.Range) + { + list.Add(new(group.MultirangeOid!.Value, group.MultirangeName!.Value)); + list.Add(new(group.MultirangeArrayOid!.Value, group.MultirangeArrayName!.Value)); + } + } + + return list; + } + + static Dictionary? _oidMap; + public static IReadOnlyDictionary OidMap + { + get + { + if (_oidMap is not null) + return _oidMap; + + var dict = new Dictionary(); + foreach (var element in GetIdentifiers()) + dict.Add(element.Key, element.Value); + + return _oidMap = dict; + } + } + + static Dictionary? _dataTypeNameMap; + public static IReadOnlyDictionary DataTypeNameMap + { + get + { + if (_dataTypeNameMap is not null) + return _dataTypeNameMap; + + var dict = new Dictionary(); + foreach (var element in GetIdentifiers()) + dict.Add(element.Value, element.Key); + + return _dataTypeNameMap = dict; + } + } + + // We could also codegen this from pg_type.dat that lives in the postgres repo. + public static IEnumerable Items + => new[] + { + Create(DataTypeNames.Int2, oid: 21, arrayOid: 1005), + Create(DataTypeNames.Int4, oid: 23, arrayOid: 1007), + Create(DataTypeNames.Int4Range, oid: 3904, arrayOid: 3905, multirangeOid: 4451, multirangeArrayOid: 6150, typeKind: PgTypeKind.Range), + Create(DataTypeNames.Int8, oid: 20, arrayOid: 1016), + Create(DataTypeNames.Int8Range, oid: 3926, arrayOid: 3927, multirangeOid: 4536, multirangeArrayOid: 6157, typeKind: PgTypeKind.Range), + Create(DataTypeNames.Float4, oid: 700, arrayOid: 1021), + Create(DataTypeNames.Float8, oid: 701, arrayOid: 1022), + Create(DataTypeNames.Numeric, oid: 1700, arrayOid: 1231), + Create(DataTypeNames.NumRange, oid: 3906, arrayOid: 3907, multirangeOid: 4532, multirangeArrayOid: 6151, typeKind: PgTypeKind.Range), + Create(DataTypeNames.Money, oid: 790, arrayOid: 791), + Create(DataTypeNames.Bool, oid: 16, arrayOid: 1000), + Create(DataTypeNames.Box, oid: 603, arrayOid: 1020), + Create(DataTypeNames.Circle, oid: 718, arrayOid: 719), + Create(DataTypeNames.Line, oid: 628, arrayOid: 629), + Create(DataTypeNames.LSeg, oid: 601, arrayOid: 1018), + Create(DataTypeNames.Path, oid: 602, arrayOid: 1019), + Create(DataTypeNames.Point, oid: 600, arrayOid: 1017), + Create(DataTypeNames.Polygon, oid: 604, arrayOid: 1027), + Create(DataTypeNames.Bpchar, oid: 1042, arrayOid: 1014), + Create(DataTypeNames.Text, oid: 25, arrayOid: 1009), + Create(DataTypeNames.Varchar, oid: 1043, arrayOid: 1015), + Create(DataTypeNames.Name, oid: 19, arrayOid: 1003), + Create(DataTypeNames.Bytea, oid: 17, arrayOid: 1001), + Create(DataTypeNames.Date, oid: 1082, arrayOid: 1182), + Create(DataTypeNames.DateRange, oid: 3912, arrayOid: 3913, multirangeOid: 4535, multirangeArrayOid: 6155, typeKind: PgTypeKind.Range), + Create(DataTypeNames.Time, oid: 1083, arrayOid: 1183), + Create(DataTypeNames.Timestamp, oid: 1114, arrayOid: 1115), + Create(DataTypeNames.TsRange, oid: 3908, arrayOid: 3909, multirangeOid: 4533, multirangeArrayOid: 6152, typeKind: PgTypeKind.Range), + Create(DataTypeNames.TimestampTz, oid: 1184, arrayOid: 1185), + Create(DataTypeNames.TsTzRange, oid: 3910, arrayOid: 3911, multirangeOid: 4534, multirangeArrayOid: 6153, typeKind: PgTypeKind.Range), + Create(DataTypeNames.Interval, oid: 1186, arrayOid: 1187), + Create(DataTypeNames.TimeTz, oid: 1266, arrayOid: 1270), + Create(DataTypeNames.Inet, oid: 869, arrayOid: 1041), + Create(DataTypeNames.Cidr, oid: 650, arrayOid: 651), + Create(DataTypeNames.MacAddr, oid: 829, arrayOid: 1040), + Create(DataTypeNames.MacAddr8, oid: 774, arrayOid: 775), + Create(DataTypeNames.Bit, oid: 1560, arrayOid: 1561), + Create(DataTypeNames.Varbit, oid: 1562, arrayOid: 1563), + Create(DataTypeNames.TsVector, oid: 3614, arrayOid: 3643), + Create(DataTypeNames.TsQuery, oid: 3615, arrayOid: 3645), + Create(DataTypeNames.RegConfig, oid: 3734, arrayOid: 3735), + Create(DataTypeNames.Uuid, oid: 2950, arrayOid: 2951), + Create(DataTypeNames.Xml, oid: 142, arrayOid: 143), + Create(DataTypeNames.Json, oid: 114, arrayOid: 199), + Create(DataTypeNames.Jsonb, oid: 3802, arrayOid: 3807), + Create(DataTypeNames.Jsonpath, oid: 4072, arrayOid: 4073), + Create(DataTypeNames.RefCursor, oid: 1790, arrayOid: 2201), + Create(DataTypeNames.OidVector, oid: 30, arrayOid: 1013), + Create(DataTypeNames.Int2Vector, oid: 22, arrayOid: 1006), + Create(DataTypeNames.Oid, oid: 26, arrayOid: 1028), + Create(DataTypeNames.Xid, oid: 28, arrayOid: 1011), + Create(DataTypeNames.Xid8, oid: 5069, arrayOid: 271), + Create(DataTypeNames.Cid, oid: 29, arrayOid: 1012), + Create(DataTypeNames.RegType, oid: 2206, arrayOid: 2211), + Create(DataTypeNames.Tid, oid: 27, arrayOid: 1010), + Create(DataTypeNames.PgLsn, oid: 3220, arrayOid: 3221), + Create(DataTypeNames.Unknown, oid: 705, arrayOid: 0, typeKind: PgTypeKind.Pseudo), + Create(DataTypeNames.Void, oid: 2278, arrayOid: 0, typeKind: PgTypeKind.Pseudo), + }; +} + +enum PgTypeKind +{ + /// A base type. + Base, + /// An enum carying its variants. + Enum, + /// A pseudo type like anyarray. + Pseudo, + // An array carying its element type. + Array, + // A range carying its element type. + Range, + // A multi-range carying its element type. + Multirange, + // A domain carying its underlying type. + Domain, + // A composite carying its constituent fields. + Composite +} + +readonly struct PgTypeGroup +{ + public required PgTypeKind TypeKind { get; init; } + public required DataTypeName Name { get; init; } + public required Oid Oid { get; init; } + public required DataTypeName ArrayName { get; init; } + public required Oid ArrayOid { get; init; } + public DataTypeName? MultirangeName { get; init; } + public Oid? MultirangeOid { get; init; } + public DataTypeName? MultirangeArrayName { get; init; } + public Oid? MultirangeArrayOid { get; init; } + + public static PgTypeGroup Create(DataTypeName name, Oid oid, Oid arrayOid, string? multirangeName = null, Oid? multirangeOid = null, Oid? multirangeArrayOid = null, PgTypeKind typeKind = PgTypeKind.Base) + { + DataTypeName? multirangeDataTypeName = null; + if (typeKind is PgTypeKind.Range) + { + if (multirangeOid is null) + throw new ArgumentException("When a range is supplied its multirange oid cannot be omitted."); + if (multirangeArrayOid is null) + throw new ArgumentException("When a range is supplied its multirange array oid cannot be omitted."); + multirangeDataTypeName = multirangeName is not null ? DataTypeName.CreateFullyQualifiedName(multirangeName) : name.ToDefaultMultirangeName(); + } + else + { + if (multirangeName is not null || multirangeOid is not null) + throw new ArgumentException("Only range types can have a multirange oid or name."); + + if (multirangeArrayOid is not null) + throw new ArgumentException("Only range types can have a multirange array oid."); + } + + return new PgTypeGroup + { + TypeKind = typeKind, + Name = name, + Oid = oid, + + ArrayName = name.ToArrayName(), + ArrayOid = arrayOid, + + MultirangeName = multirangeDataTypeName, + MultirangeOid = multirangeOid, + MultirangeArrayName = multirangeDataTypeName?.ToArrayName(), + MultirangeArrayOid = multirangeArrayOid + }; + } +} diff --git a/src/Npgsql/TypeMapping/FullTextSearchTypeHandlerResolver.cs b/src/Npgsql/TypeMapping/FullTextSearchTypeHandlerResolver.cs deleted file mode 100644 index 38db435814..0000000000 --- a/src/Npgsql/TypeMapping/FullTextSearchTypeHandlerResolver.cs +++ /dev/null @@ -1,34 +0,0 @@ -using System; -using Npgsql.Internal; -using Npgsql.Internal.TypeHandlers.FullTextSearchHandlers; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; - -namespace Npgsql.TypeMapping; - -sealed class FullTextSearchTypeHandlerResolver : TypeHandlerResolver -{ - readonly NpgsqlDatabaseInfo _databaseInfo; - - public FullTextSearchTypeHandlerResolver(NpgsqlConnector connector) - => _databaseInfo = connector.DatabaseInfo; - - TsQueryHandler? _tsQueryHandler; - TsVectorHandler? _tsVectorHandler; - - public override NpgsqlTypeHandler? ResolveByDataTypeName(string typeName) => - typeName switch - { - "tsquery" => TsQueryHandler(), - "tsvector" => TsVectorHandler(), - _ => null - }; - - public override NpgsqlTypeHandler? ResolveByClrType(Type type) - => FullTextSearchTypeMappingResolver.ClrTypeToDataTypeName(type) is { } dataTypeName ? ResolveByDataTypeName(dataTypeName) : null; - - NpgsqlTypeHandler TsQueryHandler() => _tsQueryHandler ??= new TsQueryHandler(PgType("tsquery")); - NpgsqlTypeHandler TsVectorHandler() => _tsVectorHandler ??= new TsVectorHandler(PgType("tsvector")); - - PostgresType PgType(string pgTypeName) => _databaseInfo.GetPostgresTypeByName(pgTypeName); -} diff --git a/src/Npgsql/TypeMapping/FullTextSearchTypeHandlerResolverFactory.cs b/src/Npgsql/TypeMapping/FullTextSearchTypeHandlerResolverFactory.cs deleted file mode 100644 index cbfb8a9838..0000000000 --- a/src/Npgsql/TypeMapping/FullTextSearchTypeHandlerResolverFactory.cs +++ /dev/null @@ -1,15 +0,0 @@ -using Npgsql.Internal; -using Npgsql.Internal.TypeHandling; -using Npgsql.Internal.TypeMapping; - -namespace Npgsql.TypeMapping; - -sealed class FullTextSearchTypeHandlerResolverFactory : TypeHandlerResolverFactory -{ - public override TypeHandlerResolver Create(TypeMapper typeMapper, NpgsqlConnector connector) => - new FullTextSearchTypeHandlerResolver(connector); - - public override TypeMappingResolver CreateMappingResolver() => new FullTextSearchTypeMappingResolver(); - - public override TypeMappingResolver CreateGlobalMappingResolver() => new FullTextSearchTypeMappingResolver(); -} diff --git a/src/Npgsql/TypeMapping/FullTextSearchTypeMappingResolver.cs b/src/Npgsql/TypeMapping/FullTextSearchTypeMappingResolver.cs deleted file mode 100644 index 90185578c0..0000000000 --- a/src/Npgsql/TypeMapping/FullTextSearchTypeMappingResolver.cs +++ /dev/null @@ -1,41 +0,0 @@ -using System; -using System.Collections.Generic; -using Npgsql.Internal.TypeHandling; -using Npgsql.Internal.TypeMapping; -using NpgsqlTypes; - -namespace Npgsql.TypeMapping; - -sealed class FullTextSearchTypeMappingResolver : TypeMappingResolver -{ - static readonly TypeMappingInfo TsQueryMappingInfo = new(NpgsqlDbType.TsQuery, "tsquery", - typeof(NpgsqlTsQuery), typeof(NpgsqlTsQueryAnd), typeof(NpgsqlTsQueryEmpty), typeof(NpgsqlTsQueryFollowedBy), - typeof(NpgsqlTsQueryLexeme), typeof(NpgsqlTsQueryNot), typeof(NpgsqlTsQueryOr), typeof(NpgsqlTsQueryBinOp)); - - static readonly TypeMappingInfo TsVectorMappingInfo = new(NpgsqlDbType.TsVector, "tsvector", typeof(NpgsqlTsVector)); - - static readonly Dictionary ClrTypeToDataTypeNameTable = new() - { - { typeof(NpgsqlTsVector), "tsvector" }, - { typeof(NpgsqlTsQueryLexeme), "tsquery" }, - { typeof(NpgsqlTsQueryAnd), "tsquery" }, - { typeof(NpgsqlTsQueryOr), "tsquery" }, - { typeof(NpgsqlTsQueryNot), "tsquery" }, - { typeof(NpgsqlTsQueryEmpty), "tsquery" }, - { typeof(NpgsqlTsQueryFollowedBy), "tsquery" }, - }; - - public override TypeMappingInfo? GetMappingByDataTypeName(string dataTypeName) - => dataTypeName switch - { - "tsquery" => TsQueryMappingInfo, - "tsvector" => TsVectorMappingInfo, - _ => null - }; - - public override string? GetDataTypeNameByClrType(Type clrType) - => ClrTypeToDataTypeName(clrType); - - internal static string? ClrTypeToDataTypeName(Type clrType) - => ClrTypeToDataTypeNameTable.TryGetValue(clrType, out var dataTypeName) ? dataTypeName : null; -} diff --git a/src/Npgsql/TypeMapping/GlobalTypeMapper.cs b/src/Npgsql/TypeMapping/GlobalTypeMapper.cs index a13abf5ea0..fdaa340bd8 100644 --- a/src/Npgsql/TypeMapping/GlobalTypeMapper.cs +++ b/src/Npgsql/TypeMapping/GlobalTypeMapper.cs @@ -1,654 +1,240 @@ using System; -using System.Collections.Concurrent; using System.Collections.Generic; -using System.Data; using System.Diagnostics.CodeAnalysis; -using System.Linq; -using System.Reflection; using System.Threading; -using Npgsql.Internal.TypeHandling; -using Npgsql.Internal.TypeMapping; -using Npgsql.NameTranslation; -using NpgsqlTypes; -using static Npgsql.Util.Statics; +using Npgsql.Internal; +using Npgsql.Internal.Postgres; namespace Npgsql.TypeMapping; +/// sealed class GlobalTypeMapper : INpgsqlTypeMapper { - public static GlobalTypeMapper Instance { get; } + readonly UserTypeMapper _userTypeMapper = new(); + readonly List _pluginResolvers = new(); + readonly ReaderWriterLockSlim _lock = new(); + IPgTypeInfoResolver[] _typeMappingResolvers = Array.Empty(); - public INpgsqlNameTranslator DefaultNameTranslator { get; set; } = new NpgsqlSnakeCaseNameTranslator(); - - internal List HandlerResolverFactories { get; } = new(); - List MappingResolvers { get; } = new(); - public ConcurrentDictionary UserTypeMappings { get; } = new(); - - readonly ConcurrentDictionary _mappingsByClrType = new(); - - internal ReaderWriterLockSlim Lock { get; } - = new(LockRecursionPolicy.SupportsRecursion); - - static GlobalTypeMapper() - => Instance = new GlobalTypeMapper(); - - GlobalTypeMapper() - => Reset(); - - #region Mapping management - - public INpgsqlTypeMapper MapEnum(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) - where TEnum : struct, Enum + internal IEnumerable GetPluginResolvers() { - if (pgName != null && pgName.Trim() == "") - throw new ArgumentException("pgName can't be empty", nameof(pgName)); - - nameTranslator ??= DefaultNameTranslator; - pgName ??= GetPgName(typeof(TEnum), nameTranslator); - - Lock.EnterWriteLock(); + var resolvers = new List(); + _lock.EnterReadLock(); try { - UserTypeMappings[pgName] = new UserEnumTypeMapping(pgName, nameTranslator); - RecordChange(); - return this; + resolvers.AddRange(_pluginResolvers); } finally { - Lock.ExitWriteLock(); + _lock.ExitReadLock(); } + + return resolvers; } - public bool UnmapEnum(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) - where TEnum : struct, Enum + internal IPgTypeInfoResolver? GetUserMappingsResolver() { - if (pgName != null && pgName.Trim() == "") - throw new ArgumentException("pgName can't be empty", nameof(pgName)); - - nameTranslator ??= DefaultNameTranslator; - pgName ??= GetPgName(typeof(TEnum), nameTranslator); - - Lock.EnterWriteLock(); + _lock.EnterReadLock(); try { - if (UserTypeMappings.TryRemove(pgName, out _)) - { - RecordChange(); - return true; - } - - return false; + return _userTypeMapper.Items.Count > 0 ? _userTypeMapper.Build() : null; } finally { - Lock.ExitWriteLock(); + _lock.ExitReadLock(); } } - [RequiresUnreferencedCode("Composite type mapping currently isn't trimming-safe.")] - public INpgsqlTypeMapper MapComposite(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) + internal void AddGlobalTypeMappingResolvers(IPgTypeInfoResolver[] resolvers, bool overwrite = false) { - if (pgName != null && pgName.Trim() == "") - throw new ArgumentException("pgName can't be empty", nameof(pgName)); - - nameTranslator ??= DefaultNameTranslator; - pgName ??= GetPgName(typeof(T), nameTranslator); - - Lock.EnterWriteLock(); - try - { - UserTypeMappings[pgName] = new UserCompositeTypeMapping(pgName, nameTranslator); - RecordChange(); - return this; - } - finally + // Good enough logic to prevent SlimBuilder overriding the normal Builder. + if (overwrite || resolvers.Length > _typeMappingResolvers.Length) { - Lock.ExitWriteLock(); + _typeMappingResolvers = resolvers; + ResetTypeMappingCache(); } } - [RequiresUnreferencedCode("Composite type mapping currently isn't trimming-safe.")] - public INpgsqlTypeMapper MapComposite(Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) - { - var openMethod = typeof(GlobalTypeMapper).GetMethod(nameof(MapComposite), new[] { typeof(string), typeof(INpgsqlNameTranslator) })!; - var method = openMethod.MakeGenericMethod(clrType); - method.Invoke(this, new object?[] { pgName, nameTranslator }); + void ResetTypeMappingCache() => _typeMappingOptions = null; - return this; - } - - [RequiresUnreferencedCode("Composite type mapping currently isn't trimming-safe.")] - public bool UnmapComposite(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) - => UnmapComposite(typeof(T), pgName, nameTranslator); + PgSerializerOptions? _typeMappingOptions; - [RequiresUnreferencedCode("Composite type mapping currently isn't trimming-safe.")] - public bool UnmapComposite(Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) + PgSerializerOptions TypeMappingOptions { - if (pgName != null && pgName.Trim() == "") - throw new ArgumentException("pgName can't be empty", nameof(pgName)); + get + { + if (_typeMappingOptions is not null) + return _typeMappingOptions; - nameTranslator ??= DefaultNameTranslator; - pgName ??= GetPgName(clrType, nameTranslator); + _lock.EnterReadLock(); + try + { + var resolvers = new List(); + resolvers.Add(_userTypeMapper.Build()); + resolvers.AddRange(_pluginResolvers); + resolvers.AddRange(_typeMappingResolvers); + return _typeMappingOptions = new(PostgresMinimalDatabaseInfo.DefaultTypeCatalog) + { + // This means we don't ever have a missing oid for a datatypename as our canonical format is datatypenames. + PortableTypeIds = true, + // Don't throw if our catalog doesn't know the datatypename. + IntrospectionMode = true, + TypeInfoResolver = new TypeInfoResolverChain(resolvers) + }; + } + finally + { + _lock.ExitReadLock(); + } + } + } - Lock.EnterWriteLock(); - try - { - if (UserTypeMappings.TryRemove(pgName, out _)) + internal DataTypeName? TryGetDataTypeName(Type type, object value) + { + var typeInfo = TypeMappingOptions.GetTypeInfo(type); + DataTypeName? dataTypeName; + if (typeInfo is PgResolverTypeInfo info) + try + { + dataTypeName = info.GetObjectResolution(value).PgTypeId.DataTypeName; + } + catch { - RecordChange(); - return true; + dataTypeName = null; } + else + dataTypeName = typeInfo?.GetConcreteResolution().PgTypeId.DataTypeName; - return false; - } - finally - { - Lock.ExitWriteLock(); - } + return dataTypeName; } - public void AddTypeResolverFactory(TypeHandlerResolverFactory resolverFactory) + internal static GlobalTypeMapper Instance { get; } + + static GlobalTypeMapper() + => Instance = new GlobalTypeMapper(); + + /// + /// Adds a type info resolver which can add or modify support for PostgreSQL types. + /// Typically used by plugins. + /// + /// The type resolver to be added. + public void AddTypeInfoResolver(IPgTypeInfoResolver resolver) { - Lock.EnterWriteLock(); + _lock.EnterWriteLock(); try { - // Since EFCore.PG plugins (and possibly other users) repeatedly call NpgsqlConnection.GlobalTypeMapped.UseNodaTime, - // we replace an existing resolver of the same CLR type. - var type = resolverFactory.GetType(); + var type = resolver.GetType(); - if (HandlerResolverFactories[0].GetType() == type) - HandlerResolverFactories[0] = resolverFactory; - else + // Since EFCore.PG plugins (and possibly other users) repeatedly call NpgsqlConnection.GlobalTypeMapper.UseNodaTime, + // we replace an existing resolver of the same CLR type. + if (_pluginResolvers.Count > 0 && _pluginResolvers[0].GetType() == type) + _pluginResolvers[0] = resolver; + for (var i = 0; i < _pluginResolvers.Count; i++) { - for (var i = 0; i < HandlerResolverFactories.Count; i++) - if (HandlerResolverFactories[i].GetType() == type) - HandlerResolverFactories.RemoveAt(i); - - HandlerResolverFactories.Insert(0, resolverFactory); + if (_pluginResolvers[i].GetType() == type) + { + _pluginResolvers.RemoveAt(i); + break; + } } - var mappingResolver = resolverFactory.CreateMappingResolver(); - if (mappingResolver is not null) - AddMappingResolver(mappingResolver, overwrite: true); - - RecordChange(); + _pluginResolvers.Insert(0, resolver); + ResetTypeMappingCache(); } finally { - Lock.ExitWriteLock(); + _lock.ExitWriteLock(); } } - internal void TryAddMappingResolver(TypeMappingResolver resolver) + /// + public void Reset() { - Lock.EnterWriteLock(); + _lock.EnterWriteLock(); try { - // For global mapper resolvers we don't need to overwrite them in case we add another of the same type - // because they shouldn't have a state. - // The only exception is whenever a user adds a resolver factory to global type mapper specifically. - // In that case we create a local mapper resolver and always overwrite the one we already have - // as it can have settings (e.g. json serialization) - if (AddMappingResolver(resolver, overwrite: false)) - RecordChange(); + _pluginResolvers.Clear(); + _userTypeMapper.Items.Clear(); } finally { - Lock.ExitWriteLock(); + _lock.ExitWriteLock(); } } - bool AddMappingResolver(TypeMappingResolver resolver, bool overwrite) + /// + public INpgsqlNameTranslator DefaultNameTranslator { - // Since EFCore.PG plugins (and possibly other users) repeatedly call NpgsqlConnection.GlobalTypeMapped.UseNodaTime, - // we replace an existing resolver of the same CLR type. - var type = resolver.GetType(); + get => _userTypeMapper.DefaultNameTranslator; + set => _userTypeMapper.DefaultNameTranslator = value; + } - if (MappingResolvers[0].GetType() == type) + /// + public INpgsqlTypeMapper MapEnum(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) where TEnum : struct, Enum + { + _lock.EnterWriteLock(); + try { - if (!overwrite) - return false; - MappingResolvers[0] = resolver; + _userTypeMapper.MapEnum(pgName, nameTranslator); + return this; } - else + finally { - for (var i = 0; i < MappingResolvers.Count; i++) - { - if (MappingResolvers[i].GetType() == type) - { - if (!overwrite) - return false; - MappingResolvers.RemoveAt(i); - break; - } - } - - MappingResolvers.Insert(0, resolver); + _lock.ExitWriteLock(); } - - return true; } - public void Reset() + /// + public bool UnmapEnum(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) where TEnum : struct, Enum { - Lock.EnterWriteLock(); + _lock.EnterWriteLock(); try { - HandlerResolverFactories.Clear(); - HandlerResolverFactories.Add(new BuiltInTypeHandlerResolverFactory()); - - MappingResolvers.Clear(); - MappingResolvers.Add(new BuiltInTypeMappingResolver()); - - UserTypeMappings.Clear(); - - RecordChange(); + return _userTypeMapper.UnmapEnum(pgName, nameTranslator); } finally { - Lock.ExitWriteLock(); + _lock.ExitWriteLock(); } } - internal void RecordChange() - => _mappingsByClrType.Clear(); - - static string GetPgName(Type clrType, INpgsqlNameTranslator nameTranslator) - => clrType.GetCustomAttribute()?.PgName - ?? nameTranslator.TranslateTypeName(clrType.Name); - - #endregion Mapping management + /// + [RequiresUnreferencedCode("Composite type mapping currently isn't trimming-safe.")] + public INpgsqlTypeMapper MapComposite(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) + => MapComposite(typeof(T), pgName, nameTranslator); - #region NpgsqlDbType/DbType inference for NpgsqlParameter + /// + [RequiresUnreferencedCode("Composite type mapping currently isn't trimming-safe.")] + public bool UnmapComposite(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) + => UnmapComposite(typeof(T), pgName, nameTranslator); - [RequiresUnreferencedCode("ToNpgsqlDbType uses interface-based reflection and isn't trimming-safe")] - internal bool TryResolveMappingByValue(object value, [NotNullWhen(true)] out TypeMappingInfo? typeMapping) + /// + [RequiresUnreferencedCode("Composite type mapping currently isn't trimming-safe.")] + public INpgsqlTypeMapper MapComposite(Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) { - Lock.EnterReadLock(); + _lock.EnterWriteLock(); try { - // We resolve as follows: - // 1. Cached by-type lookup (fast path). This will work for almost all types after the very first resolution. - // 2. Value-dependent type lookup (e.g. DateTime by Kind) via the resolvers. This includes complex types (e.g. array/range - // over DateTime), and the results cannot be cached. - // 3. Uncached by-type lookup (for the very first resolution of a given type) - - var type = value.GetType(); - if (_mappingsByClrType.TryGetValue(type, out typeMapping)) - return true; - - foreach (var resolver in MappingResolvers) - if ((typeMapping = resolver.GetMappingByValueDependentValue(value)) is not null) - return true; - - return TryResolveMappingByClrType(type, out typeMapping); + _userTypeMapper.MapComposite(clrType, pgName, nameTranslator); + return this; } finally { - Lock.ExitReadLock(); - } - - bool TryResolveMappingByClrType(Type clrType, [NotNullWhen(true)] out TypeMappingInfo? typeMapping) - { - if (_mappingsByClrType.TryGetValue(clrType, out typeMapping)) - return true; - - foreach (var resolver in MappingResolvers) - { - if ((typeMapping = resolver.GetMappingByClrType(clrType)) is not null) - { - _mappingsByClrType[clrType] = typeMapping; - return true; - } - } - - if (clrType.IsArray) - { - if (TryResolveMappingByClrType(clrType.GetElementType()!, out var elementMapping)) - { - _mappingsByClrType[clrType] = typeMapping = new( - NpgsqlDbType.Array | elementMapping.NpgsqlDbType, - elementMapping.DataTypeName + "[]"); - return true; - } - - typeMapping = null; - return false; - } - - var typeInfo = clrType.GetTypeInfo(); - - var ilist = typeInfo.ImplementedInterfaces.FirstOrDefault(x => - x.GetTypeInfo().IsGenericType && x.GetGenericTypeDefinition() == typeof(IList<>)); - if (ilist != null) - { - if (TryResolveMappingByClrType(ilist.GetGenericArguments()[0], out var elementMapping)) - { - _mappingsByClrType[clrType] = typeMapping = new( - NpgsqlDbType.Array | elementMapping.NpgsqlDbType, - elementMapping.DataTypeName + "[]"); - return true; - } - - typeMapping = null; - return false; - } - - if (typeInfo.IsGenericType && clrType.GetGenericTypeDefinition() == typeof(NpgsqlRange<>)) - { - if (TryResolveMappingByClrType(clrType.GetGenericArguments()[0], out var elementMapping)) - { - _mappingsByClrType[clrType] = typeMapping = new( - NpgsqlDbType.Range | elementMapping.NpgsqlDbType, - dataTypeName: null); - return true; - } - - typeMapping = null; - return false; - } - - typeMapping = null; - return false; + _lock.ExitWriteLock(); } } - #endregion NpgsqlDbType/DbType inference for NpgsqlParameter - - #region Static translation tables - - public static string? NpgsqlDbTypeToDataTypeName(NpgsqlDbType npgsqlDbType) - => npgsqlDbType switch - { - // Numeric types - NpgsqlDbType.Smallint => "smallint", - NpgsqlDbType.Integer => "integer", - NpgsqlDbType.Bigint => "bigint", - NpgsqlDbType.Real => "real", - NpgsqlDbType.Double => "double precision", - NpgsqlDbType.Numeric => "numeric", - NpgsqlDbType.Money => "money", - - // Text types - NpgsqlDbType.Text => "text", - NpgsqlDbType.Xml => "xml", - NpgsqlDbType.Varchar => "character varying", - NpgsqlDbType.Char => "character", - NpgsqlDbType.Name => "name", - NpgsqlDbType.Refcursor => "refcursor", - NpgsqlDbType.Citext => "citext", - NpgsqlDbType.Jsonb => "jsonb", - NpgsqlDbType.Json => "json", - NpgsqlDbType.JsonPath => "jsonpath", - - // Date/time types - NpgsqlDbType.Timestamp => "timestamp without time zone", - NpgsqlDbType.TimestampTz => "timestamp with time zone", - NpgsqlDbType.Date => "date", - NpgsqlDbType.Time => "time without time zone", - NpgsqlDbType.TimeTz => "time with time zone", - NpgsqlDbType.Interval => "interval", - - // Network types - NpgsqlDbType.Cidr => "cidr", - NpgsqlDbType.Inet => "inet", - NpgsqlDbType.MacAddr => "macaddr", - NpgsqlDbType.MacAddr8 => "macaddr8", - - // Full-text search types - NpgsqlDbType.TsQuery => "tsquery", - NpgsqlDbType.TsVector => "tsvector", - - // Geometry types - NpgsqlDbType.Box => "box", - NpgsqlDbType.Circle => "circle", - NpgsqlDbType.Line => "line", - NpgsqlDbType.LSeg => "lseg", - NpgsqlDbType.Path => "path", - NpgsqlDbType.Point => "point", - NpgsqlDbType.Polygon => "polygon", - - // LTree types - NpgsqlDbType.LQuery => "lquery", - NpgsqlDbType.LTree => "ltree", - NpgsqlDbType.LTxtQuery => "ltxtquery", - - // UInt types - NpgsqlDbType.Oid => "oid", - NpgsqlDbType.Xid => "xid", - NpgsqlDbType.Xid8 => "xid8", - NpgsqlDbType.Cid => "cid", - NpgsqlDbType.Regtype => "regtype", - NpgsqlDbType.Regconfig => "regconfig", - - // Misc types - NpgsqlDbType.Boolean => "boolean", - NpgsqlDbType.Bytea => "bytea", - NpgsqlDbType.Uuid => "uuid", - NpgsqlDbType.Varbit => "bit varying", - NpgsqlDbType.Bit => "bit", - NpgsqlDbType.Hstore => "hstore", - - NpgsqlDbType.Geometry => "geometry", - NpgsqlDbType.Geography => "geography", - - // Built-in range types - NpgsqlDbType.IntegerRange => "int4range", - NpgsqlDbType.BigIntRange => "int8range", - NpgsqlDbType.NumericRange => "numrange", - NpgsqlDbType.TimestampRange => "tsrange", - NpgsqlDbType.TimestampTzRange => "tstzrange", - NpgsqlDbType.DateRange => "daterange", - - // Built-in multirange types - NpgsqlDbType.IntegerMultirange => "int4multirange", - NpgsqlDbType.BigIntMultirange => "int8multirange", - NpgsqlDbType.NumericMultirange => "nummultirange", - NpgsqlDbType.TimestampMultirange => "tsmultirange", - NpgsqlDbType.TimestampTzMultirange => "tstzmultirange", - NpgsqlDbType.DateMultirange => "datemultirange", - - // Internal types - NpgsqlDbType.Int2Vector => "int2vector", - NpgsqlDbType.Oidvector => "oidvector", - NpgsqlDbType.PgLsn => "pg_lsn", - NpgsqlDbType.Tid => "tid", - NpgsqlDbType.InternalChar => "char", - - // Special types - NpgsqlDbType.Unknown => "unknown", - - _ => npgsqlDbType.HasFlag(NpgsqlDbType.Array) - ? NpgsqlDbTypeToDataTypeName(npgsqlDbType & ~NpgsqlDbType.Array) + "[]" - : null // e.g. ranges - }; - - public static NpgsqlDbType DataTypeNameToNpgsqlDbType(string typeName) + /// + [RequiresUnreferencedCode("Composite type mapping currently isn't trimming-safe.")] + public bool UnmapComposite(Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) { - // Strip any facet information (length/precision/scale) - var parenIndex = typeName.IndexOf('('); - if (parenIndex > -1) - typeName = typeName.Substring(0, parenIndex); - - return typeName switch - { - // Numeric types - "smallint" => NpgsqlDbType.Smallint, - "integer" or "int" => NpgsqlDbType.Integer, - "bigint" => NpgsqlDbType.Bigint, - "real" => NpgsqlDbType.Real, - "double precision" => NpgsqlDbType.Double, - "numeric" => NpgsqlDbType.Numeric, - "money" => NpgsqlDbType.Money, - - // Text types - "text" => NpgsqlDbType.Text, - "xml" => NpgsqlDbType.Xml, - "character varying" or "varchar" => NpgsqlDbType.Varchar, - "character" => NpgsqlDbType.Char, - "name" => NpgsqlDbType.Name, - "refcursor" => NpgsqlDbType.Refcursor, - "citext" => NpgsqlDbType.Citext, - "jsonb" => NpgsqlDbType.Jsonb, - "json" => NpgsqlDbType.Json, - "jsonpath" => NpgsqlDbType.JsonPath, - - // Date/time types - "timestamp without time zone" or "timestamp" => NpgsqlDbType.Timestamp, - "timestamp with time zone" or "timestamptz" => NpgsqlDbType.TimestampTz, - "date" => NpgsqlDbType.Date, - "time without time zone" or "timetz" => NpgsqlDbType.Time, - "time with time zone" or "time" => NpgsqlDbType.TimeTz, - "interval" => NpgsqlDbType.Interval, - - // Network types - "cidr" => NpgsqlDbType.Cidr, - "inet" => NpgsqlDbType.Inet, - "macaddr" => NpgsqlDbType.MacAddr, - "macaddr8" => NpgsqlDbType.MacAddr8, - - // Full-text search types - "tsquery" => NpgsqlDbType.TsQuery, - "tsvector" => NpgsqlDbType.TsVector, - - // Geometry types - "box" => NpgsqlDbType.Box, - "circle" => NpgsqlDbType.Circle, - "line" => NpgsqlDbType.Line, - "lseg" => NpgsqlDbType.LSeg, - "path" => NpgsqlDbType.Path, - "point" => NpgsqlDbType.Point, - "polygon" => NpgsqlDbType.Polygon, - - // LTree types - "lquery" => NpgsqlDbType.LQuery, - "ltree" => NpgsqlDbType.LTree, - "ltxtquery" => NpgsqlDbType.LTxtQuery, - - // UInt types - "oid" => NpgsqlDbType.Oid, - "xid" => NpgsqlDbType.Xid, - "xid8" => NpgsqlDbType.Xid8, - "cid" => NpgsqlDbType.Cid, - "regtype" => NpgsqlDbType.Regtype, - "regconfig" => NpgsqlDbType.Regconfig, - - // Misc types - "boolean" or "bool" => NpgsqlDbType.Boolean, - "bytea" => NpgsqlDbType.Bytea, - "uuid" => NpgsqlDbType.Uuid, - "bit varying" or "varbit" => NpgsqlDbType.Varbit, - "bit" => NpgsqlDbType.Bit, - "hstore" => NpgsqlDbType.Hstore, - - "geometry" => NpgsqlDbType.Geometry, - "geography" => NpgsqlDbType.Geography, - - // Built-in range types - "int4range" => NpgsqlDbType.IntegerRange, - "int8range" => NpgsqlDbType.BigIntRange, - "numrange" => NpgsqlDbType.NumericRange, - "tsrange" => NpgsqlDbType.TimestampRange, - "tstzrange" => NpgsqlDbType.TimestampTzRange, - "daterange" => NpgsqlDbType.DateRange, - - // Built-in multirange types - "int4multirange" => NpgsqlDbType.IntegerMultirange, - "int8multirange" => NpgsqlDbType.BigIntMultirange, - "nummultirange" => NpgsqlDbType.NumericMultirange, - "tsmultirange" => NpgsqlDbType.TimestampMultirange, - "tstzmultirange" => NpgsqlDbType.TimestampTzMultirange, - "datemultirange" => NpgsqlDbType.DateMultirange, - - // Internal types - "int2vector" => NpgsqlDbType.Int2Vector, - "oidvector" => NpgsqlDbType.Oidvector, - "pg_lsn" => NpgsqlDbType.PgLsn, - "tid" => NpgsqlDbType.Tid, - "char" => NpgsqlDbType.InternalChar, - - _ => typeName.EndsWith("[]", StringComparison.Ordinal) && - DataTypeNameToNpgsqlDbType(typeName.Substring(0, typeName.Length - 2)) is { } elementNpgsqlDbType && - elementNpgsqlDbType != NpgsqlDbType.Unknown - ? elementNpgsqlDbType | NpgsqlDbType.Array - : NpgsqlDbType.Unknown // e.g. ranges - }; - } - - internal static NpgsqlDbType? DbTypeToNpgsqlDbType(DbType dbType) - => dbType switch + _lock.EnterWriteLock(); + try { - DbType.AnsiString => NpgsqlDbType.Text, - DbType.Binary => NpgsqlDbType.Bytea, - DbType.Byte => NpgsqlDbType.Smallint, - DbType.Boolean => NpgsqlDbType.Boolean, - DbType.Currency => NpgsqlDbType.Money, - DbType.Date => NpgsqlDbType.Date, - DbType.DateTime => LegacyTimestampBehavior ? NpgsqlDbType.Timestamp : NpgsqlDbType.TimestampTz, - DbType.Decimal => NpgsqlDbType.Numeric, - DbType.VarNumeric => NpgsqlDbType.Numeric, - DbType.Double => NpgsqlDbType.Double, - DbType.Guid => NpgsqlDbType.Uuid, - DbType.Int16 => NpgsqlDbType.Smallint, - DbType.Int32 => NpgsqlDbType.Integer, - DbType.Int64 => NpgsqlDbType.Bigint, - DbType.Single => NpgsqlDbType.Real, - DbType.String => NpgsqlDbType.Text, - DbType.Time => NpgsqlDbType.Time, - DbType.AnsiStringFixedLength => NpgsqlDbType.Text, - DbType.StringFixedLength => NpgsqlDbType.Text, - DbType.Xml => NpgsqlDbType.Xml, - DbType.DateTime2 => NpgsqlDbType.Timestamp, - DbType.DateTimeOffset => NpgsqlDbType.TimestampTz, - - DbType.Object => null, - DbType.SByte => null, - DbType.UInt16 => null, - DbType.UInt32 => null, - DbType.UInt64 => null, - - _ => throw new ArgumentOutOfRangeException(nameof(dbType), dbType, null) - }; - - internal static DbType NpgsqlDbTypeToDbType(NpgsqlDbType npgsqlDbType) - => npgsqlDbType switch + return _userTypeMapper.UnmapComposite(clrType, pgName, nameTranslator); + } + finally { - // Numeric types - NpgsqlDbType.Smallint => DbType.Int16, - NpgsqlDbType.Integer => DbType.Int32, - NpgsqlDbType.Bigint => DbType.Int64, - NpgsqlDbType.Real => DbType.Single, - NpgsqlDbType.Double => DbType.Double, - NpgsqlDbType.Numeric => DbType.Decimal, - NpgsqlDbType.Money => DbType.Currency, - - // Text types - NpgsqlDbType.Text => DbType.String, - NpgsqlDbType.Xml => DbType.Xml, - NpgsqlDbType.Varchar => DbType.String, - NpgsqlDbType.Char => DbType.String, - NpgsqlDbType.Name => DbType.String, - NpgsqlDbType.Refcursor => DbType.String, - NpgsqlDbType.Citext => DbType.String, - NpgsqlDbType.Jsonb => DbType.Object, - NpgsqlDbType.Json => DbType.Object, - NpgsqlDbType.JsonPath => DbType.String, - - // Date/time types - NpgsqlDbType.Timestamp => LegacyTimestampBehavior ? DbType.DateTime : DbType.DateTime2, - NpgsqlDbType.TimestampTz => LegacyTimestampBehavior ? DbType.DateTimeOffset : DbType.DateTime, - NpgsqlDbType.Date => DbType.Date, - NpgsqlDbType.Time => DbType.Time, - - // Misc data types - NpgsqlDbType.Bytea => DbType.Binary, - NpgsqlDbType.Boolean => DbType.Boolean, - NpgsqlDbType.Uuid => DbType.Guid, - - NpgsqlDbType.Unknown => DbType.Object, - - _ => DbType.Object - }; - - #endregion Static translation tables -} \ No newline at end of file + _lock.ExitWriteLock(); + } + } +} diff --git a/src/Npgsql/TypeMapping/INpgsqlTypeMapper.cs b/src/Npgsql/TypeMapping/INpgsqlTypeMapper.cs index 3d0c46dd92..2f4d7ff040 100644 --- a/src/Npgsql/TypeMapping/INpgsqlTypeMapper.cs +++ b/src/Npgsql/TypeMapping/INpgsqlTypeMapper.cs @@ -1,6 +1,6 @@ using System; using System.Diagnostics.CodeAnalysis; -using Npgsql.Internal.TypeHandling; +using Npgsql.Internal; using Npgsql.NameTranslation; using NpgsqlTypes; @@ -147,14 +147,14 @@ bool UnmapComposite( INpgsqlNameTranslator? nameTranslator = null); /// - /// Adds a type resolver factory, which produces resolvers that can add or modify support for PostgreSQL types. + /// Adds a type info resolver which can add or modify support for PostgreSQL types. /// Typically used by plugins. /// - /// The type resolver factory to be added. - void AddTypeResolverFactory(TypeHandlerResolverFactory resolverFactory); + /// The type resolver to be added. + void AddTypeInfoResolver(IPgTypeInfoResolver resolver); /// /// Resets all mapping changes performed on this type mapper and reverts it to its original, starting state. /// void Reset(); -} \ No newline at end of file +} diff --git a/src/Npgsql/TypeMapping/PostgresTypeOIDs.cs b/src/Npgsql/TypeMapping/PostgresTypeOIDs.cs deleted file mode 100644 index e3f0d72c4d..0000000000 --- a/src/Npgsql/TypeMapping/PostgresTypeOIDs.cs +++ /dev/null @@ -1,112 +0,0 @@ -#pragma warning disable RS0016 -#pragma warning disable 1591 - -namespace Npgsql.TypeMapping; - -/// -/// Holds well-known, built-in PostgreSQL type OIDs. -/// -/// -/// Source: -/// -static class PostgresTypeOIDs -{ - // Numeric - public const uint Int8 = 20; - public const uint Float8 = 701; - public const uint Int4 = 23; - public const uint Numeric = 1700; - public const uint Float4 = 700; - public const uint Int2 = 21; - public const uint Money = 790; - - // Boolean - public const uint Bool = 16; - - // Geometric - public const uint Box = 603; - public const uint Circle = 718; - public const uint Line = 628; - public const uint LSeg = 601; - public const uint Path = 602; - public const uint Point = 600; - public const uint Polygon = 604; - - // Character - public const uint BPChar = 1042; - public const uint Text = 25; - public const uint Varchar = 1043; - public const uint Name = 19; - public const uint Char = 18; - - // Binary data - public const uint Bytea = 17; - - // Date/Time - public const uint Date = 1082; - public const uint Time = 1083; - public const uint Timestamp = 1114; - public const uint TimestampTz = 1184; - public const uint Interval = 1186; - public const uint TimeTz = 1266; - public const uint Abstime = 702; - - // Network address - public const uint Inet = 869; - public const uint Cidr = 650; - public const uint Macaddr = 829; - public const uint Macaddr8 = 774; - - // Bit string - public const uint Bit = 1560; - public const uint Varbit = 1562; - - // Text search - public const uint TsVector = 3614; - public const uint TsQuery = 3615; - public const uint Regconfig = 3734; - - // UUID - public const uint Uuid = 2950; - - // XML - public const uint Xml = 142; - - // JSON - public const uint Json = 114; - public const uint Jsonb = 3802; - public const uint JsonPath = 4072; - - // public - public const uint Refcursor = 1790; - public const uint Oidvector = 30; - public const uint Int2vector = 22; - public const uint Oid = 26; - public const uint Xid = 28; - public const uint Xid8 = 5069; - public const uint Cid = 29; - public const uint Regtype = 2206; - public const uint Tid = 27; - public const uint PgLsn = 3220; - - // Special - public const uint Record = 2249; - public const uint Void = 2278; - public const uint Unknown = 705; - - // Range types - public const uint Int4Range = 3904; - public const uint Int8Range = 3926; - public const uint NumRange = 3906; - public const uint TsRange = 3908; - public const uint TsTzRange = 3910; - public const uint DateRange = 3912; - - // Multirange types - public const uint Int4Multirange = 4451; - public const uint Int8Multirange = 4536; - public const uint NumMultirange = 4532; - public const uint TsMultirange = 4533; - public const uint TsTzMultirange = 4534; - public const uint DateMultirange = 4535; -} \ No newline at end of file diff --git a/src/Npgsql/TypeMapping/RangeTypeHandlerResolver.cs b/src/Npgsql/TypeMapping/RangeTypeHandlerResolver.cs deleted file mode 100644 index ba3f0f44d2..0000000000 --- a/src/Npgsql/TypeMapping/RangeTypeHandlerResolver.cs +++ /dev/null @@ -1,178 +0,0 @@ -using System; -using System.Collections; -using System.Collections.Generic; -using System.Linq; -using System.Reflection; -using Npgsql.Internal; -using Npgsql.Internal.TypeHandlers.DateTimeHandlers; -using Npgsql.Internal.TypeHandling; -using Npgsql.Internal.TypeMapping; -using Npgsql.PostgresTypes; -using Npgsql.Properties; -using Npgsql.Util; -using NpgsqlTypes; -using static Npgsql.Util.Statics; - -namespace Npgsql.TypeMapping; - -sealed class RangeTypeHandlerResolver : TypeHandlerResolver -{ - readonly TypeMapper _typeMapper; - readonly NpgsqlDatabaseInfo _databaseInfo; - - readonly TimestampHandler _timestampHandler; - readonly TimestampTzHandler _timestampTzHandler; - - NpgsqlTypeHandler? _timestampRangeHandler; - NpgsqlTypeHandler? _timestampTzRangeHandler; - NpgsqlTypeHandler? _timestampMultirangeHandler; - NpgsqlTypeHandler? _timestampTzMultirangeHandler; - - internal RangeTypeHandlerResolver(TypeMapper typeMapper, NpgsqlConnector connector) - { - _typeMapper = typeMapper; - _databaseInfo = connector.DatabaseInfo; - - _timestampHandler = new TimestampHandler(PgType("timestamp without time zone")); - _timestampTzHandler = new TimestampTzHandler(PgType("timestamp with time zone")); - } - - public override NpgsqlTypeHandler? ResolveByDataTypeName(string typeName) - { - if (!_databaseInfo.TryGetPostgresTypeByName(typeName, out var pgType)) - return null; - - return pgType switch - { - PostgresRangeType pgRangeType - => _typeMapper.ResolveByOID(pgRangeType.Subtype.OID).CreateRangeHandler(pgRangeType), - PostgresMultirangeType pgMultirangeType - => _typeMapper.ResolveByOID(pgMultirangeType.Subrange.Subtype.OID).CreateMultirangeHandler(pgMultirangeType), - _ => null - }; - } - - public override NpgsqlTypeHandler? ResolveByNpgsqlDbType(NpgsqlDbType npgsqlDbType) - { - if (npgsqlDbType.HasFlag(NpgsqlDbType.Range)) - { - var subtypeHandler = _typeMapper.ResolveByNpgsqlDbType(npgsqlDbType & ~NpgsqlDbType.Range); - - if (subtypeHandler.PostgresType.Range is not { } pgRangeType) - throw new ArgumentException( - $"No range type could be found in the database for subtype {subtypeHandler.PostgresType}"); - - return subtypeHandler.CreateRangeHandler(pgRangeType); - } - - if (npgsqlDbType.HasFlag(NpgsqlDbType.Multirange)) - { - var subtypeHandler = _typeMapper.ResolveByNpgsqlDbType(npgsqlDbType & ~NpgsqlDbType.Multirange); - - if (subtypeHandler.PostgresType.Range?.Multirange is not { } pgMultirangeType) - throw new ArgumentException(string.Format(NpgsqlStrings.NoMultirangeTypeFound, subtypeHandler.PostgresType)); - - return subtypeHandler.CreateMultirangeHandler(pgMultirangeType); - } - - // Not a range or multirange - return null; - } - - public override NpgsqlTypeHandler? ResolveByClrType(Type type) - { - // Try to see if it is an array type - var arrayElementType = GetArrayListElementType(type); - if (arrayElementType is not null) - { - // With PG14, we map arrays over range types to PG multiranges by default, not to regular arrays over ranges. - if (arrayElementType.IsGenericType && - arrayElementType.GetGenericTypeDefinition() == typeof(NpgsqlRange<>) && - _databaseInfo.Version.IsGreaterOrEqual(14)) - { - var arraySubtypeType = arrayElementType.GetGenericArguments()[0]; - - return _typeMapper.ResolveByClrType(arraySubtypeType) is - { PostgresType : { Range : { Multirange: { } pgMultirangeType } } } arraySubtypeHandler - ? arraySubtypeHandler.CreateMultirangeHandler(pgMultirangeType) - : throw new NotSupportedException($"The CLR range type {type} isn't supported by Npgsql or your PostgreSQL."); - } - } - - // TODO: We can make the following compatible with reflection-free mode by having NpgsqlRange implement some interface, and - // check for that. - if (!type.IsGenericType || type.GetGenericTypeDefinition() != typeof(NpgsqlRange<>)) - return null; - - var subtypeType = type.GetGenericArguments()[0]; - - return _typeMapper.ResolveByClrType(subtypeType) is { PostgresType : { Range : { } pgRangeType } } subtypeHandler - ? subtypeHandler.CreateRangeHandler(pgRangeType) - : throw new NotSupportedException($"The CLR range type {type} isn't supported by Npgsql or your PostgreSQL."); - - static Type? GetArrayListElementType(Type type) - { - var typeInfo = type.GetTypeInfo(); - if (typeInfo.IsArray) - return GetUnderlyingType(type.GetElementType()!); // The use of bang operator is justified here as Type.GetElementType() only returns null for the Array base class which can't be mapped in a useful way. - - var ilist = typeInfo.ImplementedInterfaces.FirstOrDefault(x => x.GetTypeInfo().IsGenericType && x.GetGenericTypeDefinition() == typeof(IList<>)); - if (ilist != null) - return GetUnderlyingType(ilist.GetGenericArguments()[0]); - - if (typeof(IList).IsAssignableFrom(type)) - throw new NotSupportedException("Non-generic IList is a supported parameter, but the NpgsqlDbType parameter must be set on the parameter"); - - return null; - - Type GetUnderlyingType(Type t) - => Nullable.GetUnderlyingType(t) ?? t; - } - } - - public override NpgsqlTypeHandler? ResolveValueDependentValue(object value) - { - // In LegacyTimestampBehavior, DateTime isn't value-dependent, and handled above in ClrTypeToDataTypeNameTable like other types - if (LegacyTimestampBehavior) - return null; - - return value switch - { - NpgsqlRange range => RangeHandler(!range.LowerBoundInfinite ? range.LowerBound.Kind : - !range.UpperBoundInfinite ? range.UpperBound.Kind : DateTimeKind.Unspecified), - - NpgsqlRange[] multirange => MultirangeHandler(GetMultirangeKind(multirange)), - List> multirange => MultirangeHandler(GetMultirangeKind(multirange)), - - _ => null - }; - - NpgsqlTypeHandler RangeHandler(DateTimeKind kind) - => kind == DateTimeKind.Utc - ? _timestampTzRangeHandler ??= _timestampTzHandler.CreateRangeHandler((PostgresRangeType)PgType("tstzrange")) - : _timestampRangeHandler ??= _timestampHandler.CreateRangeHandler((PostgresRangeType)PgType("tsrange")); - - NpgsqlTypeHandler MultirangeHandler(DateTimeKind kind) - => kind == DateTimeKind.Utc - ? _timestampTzMultirangeHandler ??= _timestampTzHandler.CreateMultirangeHandler((PostgresMultirangeType)PgType("tstzmultirange")) - : _timestampMultirangeHandler ??= _timestampHandler.CreateMultirangeHandler((PostgresMultirangeType)PgType("tsmultirange")); - } - - static DateTimeKind GetRangeKind(NpgsqlRange range) - => !range.LowerBoundInfinite - ? range.LowerBound.Kind - : !range.UpperBoundInfinite - ? range.UpperBound.Kind - : DateTimeKind.Unspecified; - - static DateTimeKind GetMultirangeKind(IList> multirange) - { - for (var i = 0; i < multirange.Count; i++) - if (!multirange[i].IsEmpty) - return GetRangeKind(multirange[i]); - - return DateTimeKind.Unspecified; - } - - PostgresType PgType(string pgTypeName) => _databaseInfo.GetPostgresTypeByName(pgTypeName); -} diff --git a/src/Npgsql/TypeMapping/RangeTypeHandlerResolverFactory.cs b/src/Npgsql/TypeMapping/RangeTypeHandlerResolverFactory.cs deleted file mode 100644 index bc7212eda8..0000000000 --- a/src/Npgsql/TypeMapping/RangeTypeHandlerResolverFactory.cs +++ /dev/null @@ -1,15 +0,0 @@ -using Npgsql.Internal; -using Npgsql.Internal.TypeHandling; -using Npgsql.Internal.TypeMapping; - -namespace Npgsql.TypeMapping; - -sealed class RangeTypeHandlerResolverFactory : TypeHandlerResolverFactory -{ - public override TypeHandlerResolver Create(TypeMapper typeMapper, NpgsqlConnector connector) - => new RangeTypeHandlerResolver(typeMapper, connector); - - public override TypeMappingResolver CreateMappingResolver() => new RangeTypeMappingResolver(); - - public override TypeMappingResolver CreateGlobalMappingResolver() => new RangeTypeMappingResolver(); -} diff --git a/src/Npgsql/TypeMapping/RangeTypeMappingResolver.cs b/src/Npgsql/TypeMapping/RangeTypeMappingResolver.cs deleted file mode 100644 index 5061a9780c..0000000000 --- a/src/Npgsql/TypeMapping/RangeTypeMappingResolver.cs +++ /dev/null @@ -1,118 +0,0 @@ -using System; -using System.Collections.Generic; -using Npgsql.Internal.TypeHandling; -using Npgsql.Internal.TypeMapping; -using Npgsql.PostgresTypes; -using NpgsqlTypes; -using static Npgsql.Util.Statics; - -namespace Npgsql.TypeMapping; - -sealed class RangeTypeMappingResolver : TypeMappingResolver -{ - static readonly Dictionary Mappings = new() - { - { "int4range", new(NpgsqlDbType.IntegerRange, "int4range") }, - { "int8range", new(NpgsqlDbType.BigIntRange, "int8range") }, - { "numrange", new(NpgsqlDbType.NumericRange, "numrange") }, - { "daterange", new(NpgsqlDbType.DateRange, "daterange") }, - { "tsrange", new(NpgsqlDbType.TimestampRange, "tsrange") }, - { "tstzrange", new(NpgsqlDbType.TimestampTzRange, "tstzrange") }, - - { "int4multirange", new(NpgsqlDbType.IntegerMultirange, "int4range") }, - { "int8multirange", new(NpgsqlDbType.BigIntMultirange, "int8range") }, - { "nummultirange", new(NpgsqlDbType.NumericMultirange, "numrange") }, - { "datemultirange", new(NpgsqlDbType.DateMultirange, "datemultirange") }, - { "tsmultirange", new(NpgsqlDbType.TimestampMultirange, "tsmultirange") }, - { "tstzmultirange", new(NpgsqlDbType.TimestampTzMultirange, "tstzmultirange") } - }; - - static readonly Dictionary ClrTypeToDataTypeNameTable = new() - { - // Built-in range types - { typeof(NpgsqlRange), "int4range" }, - { typeof(NpgsqlRange), "int8range" }, - { typeof(NpgsqlRange), "numrange" }, -#if NET6_0_OR_GREATER - { typeof(NpgsqlRange), "daterange" }, -#endif - - // Built-in multirange types - { typeof(NpgsqlRange[]), "int4multirange" }, - { typeof(List>), "int4multirange" }, - { typeof(NpgsqlRange[]), "int8multirange" }, - { typeof(List>), "int8multirange" }, - { typeof(NpgsqlRange[]), "nummultirange" }, - { typeof(List>), "nummultirange" }, -#if NET6_0_OR_GREATER - { typeof(NpgsqlRange[]), "datemultirange" }, - { typeof(List>), "datemultirange" }, -#endif - }; - - public override string? GetDataTypeNameByClrType(Type clrType) - => ClrTypeToDataTypeNameTable.TryGetValue(clrType, out var dataTypeName) ? dataTypeName : null; - - public override string? GetDataTypeNameByValueDependentValue(object value) - { - // In LegacyTimestampBehavior, DateTime isn't value-dependent, and handled above in ClrTypeToDataTypeNameTable like other types - if (LegacyTimestampBehavior) - return null; - - return value switch - { - NpgsqlRange range => GetRangeKind(range) == DateTimeKind.Utc ? "tstzrange" : "tsrange", - - NpgsqlRange[] multirange => GetMultirangeKind(multirange) == DateTimeKind.Utc ? "tstzmultirange" : "tsmultirange", - - _ => null - }; - } - - public override TypeMappingInfo? GetMappingByDataTypeName(string dataTypeName) - => Mappings.TryGetValue(dataTypeName, out var mapping) ? mapping : null; - - public override TypeMappingInfo? GetMappingByPostgresType(TypeMapper mapper, PostgresType type) - { - switch (type) - { - case PostgresRangeType pgRangeType: - { - if (mapper.TryGetMapping(pgRangeType.Subtype, out var subtypeMapping)) - { - return new(subtypeMapping.NpgsqlDbType | NpgsqlDbType.Range, type.DisplayName); - } - - break; - } - - case PostgresMultirangeType pgMultirangeType: - { - if (mapper.TryGetMapping(pgMultirangeType.Subrange.Subtype, out var subtypeMapping)) - { - return new(subtypeMapping.NpgsqlDbType | NpgsqlDbType.Multirange, type.DisplayName); - } - - break; - } - } - - return null; - } - - static DateTimeKind GetRangeKind(NpgsqlRange range) - => !range.LowerBoundInfinite - ? range.LowerBound.Kind - : !range.UpperBoundInfinite - ? range.UpperBound.Kind - : DateTimeKind.Unspecified; - - static DateTimeKind GetMultirangeKind(IList> multirange) - { - for (var i = 0; i < multirange.Count; i++) - if (!multirange[i].IsEmpty) - return GetRangeKind(multirange[i]); - - return DateTimeKind.Unspecified; - } -} diff --git a/src/Npgsql/TypeMapping/RecordTypeHandlerResolver.cs b/src/Npgsql/TypeMapping/RecordTypeHandlerResolver.cs deleted file mode 100644 index df0a44f4e4..0000000000 --- a/src/Npgsql/TypeMapping/RecordTypeHandlerResolver.cs +++ /dev/null @@ -1,29 +0,0 @@ -using System; -using Npgsql.Internal; -using Npgsql.Internal.TypeHandlers; -using Npgsql.Internal.TypeHandling; -using Npgsql.Internal.TypeMapping; -using Npgsql.PostgresTypes; - -namespace Npgsql.TypeMapping; - -sealed class RecordTypeHandlerResolver : TypeHandlerResolver -{ - readonly TypeMapper _typeMapper; - readonly NpgsqlDatabaseInfo _databaseInfo; - - RecordHandler? _recordHandler; - - public RecordTypeHandlerResolver(TypeMapper typeMapper, NpgsqlConnector connector) - { - _typeMapper = typeMapper; - _databaseInfo = connector.DatabaseInfo; - } - - public override NpgsqlTypeHandler? ResolveByDataTypeName(string typeName) - => typeName == "record" ? GetHandler() : null; - - public override NpgsqlTypeHandler? ResolveByClrType(Type type) => null; - - NpgsqlTypeHandler GetHandler() => _recordHandler ??= new RecordHandler(_databaseInfo.GetPostgresTypeByName("record"), _typeMapper); -} diff --git a/src/Npgsql/TypeMapping/RecordTypeHandlerResolverFactory.cs b/src/Npgsql/TypeMapping/RecordTypeHandlerResolverFactory.cs deleted file mode 100644 index e308fb03e4..0000000000 --- a/src/Npgsql/TypeMapping/RecordTypeHandlerResolverFactory.cs +++ /dev/null @@ -1,12 +0,0 @@ -using System; -using Npgsql.Internal; -using Npgsql.Internal.TypeHandling; -using Npgsql.Internal.TypeMapping; - -namespace Npgsql.TypeMapping; - -sealed class RecordTypeHandlerResolverFactory : TypeHandlerResolverFactory -{ - public override TypeHandlerResolver Create(TypeMapper typeMapper, NpgsqlConnector connector) - => new RecordTypeHandlerResolver(typeMapper, connector); -} diff --git a/src/Npgsql/TypeMapping/SystemTextJsonTypeHandlerResolver.cs b/src/Npgsql/TypeMapping/SystemTextJsonTypeHandlerResolver.cs deleted file mode 100644 index a60f53d9c5..0000000000 --- a/src/Npgsql/TypeMapping/SystemTextJsonTypeHandlerResolver.cs +++ /dev/null @@ -1,60 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Text.Json; -using System.Text.Json.Nodes; -using Npgsql.Internal; -using Npgsql.Internal.TypeHandlers; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; - -namespace Npgsql.TypeMapping; - -sealed class SystemTextJsonTypeHandlerResolver : TypeHandlerResolver -{ - readonly NpgsqlConnector _connector; - readonly NpgsqlDatabaseInfo _databaseInfo; - readonly JsonSerializerOptions _serializerOptions; - readonly Dictionary? _userClrTypes; - - // Note that old versions of PG - as well as some PG-like databases (Redshift, CockroachDB) don't have json/jsonb, so we create - // these handlers lazily rather than eagerly. - SystemTextJsonHandler? _jsonbHandler; - SystemTextJsonHandler? _jsonHandler; - - internal SystemTextJsonTypeHandlerResolver( - NpgsqlConnector connector, - Dictionary? userClrTypes, - JsonSerializerOptions serializerOptions) - { - _connector = connector; - _databaseInfo = connector.DatabaseInfo; - _serializerOptions = serializerOptions; - _userClrTypes = userClrTypes; - } - - public override NpgsqlTypeHandler? ResolveByDataTypeName(string typeName) - => typeName switch - { - "jsonb" => JsonbHandler(), - "json" => JsonHandler(), - _ => null - }; - - public override NpgsqlTypeHandler? ResolveByClrType(Type type) - => SystemTextJsonTypeMappingResolver.ClrTypeToDataTypeName(type, _userClrTypes) is { } dataTypeName && - ResolveByDataTypeName(dataTypeName) is { } handler - ? handler - : null; - - public override NpgsqlTypeHandler? ResolveValueTypeGenerically(T value) - => typeof(T) == typeof(JsonDocument) || typeof(T) == typeof(JsonObject) || typeof(T) == typeof(JsonArray) - ? JsonbHandler() - : null; - - NpgsqlTypeHandler JsonbHandler() - => _jsonbHandler ??= new SystemTextJsonHandler(PgType("jsonb"), _connector.TextEncoding, isJsonb: true, _serializerOptions); - NpgsqlTypeHandler JsonHandler() - => _jsonHandler ??= new SystemTextJsonHandler(PgType("json"), _connector.TextEncoding, isJsonb: false, _serializerOptions); - - PostgresType PgType(string pgTypeName) => _databaseInfo.GetPostgresTypeByName(pgTypeName); -} diff --git a/src/Npgsql/TypeMapping/SystemTextJsonTypeHandlerResolverFactory.cs b/src/Npgsql/TypeMapping/SystemTextJsonTypeHandlerResolverFactory.cs deleted file mode 100644 index 26f593933e..0000000000 --- a/src/Npgsql/TypeMapping/SystemTextJsonTypeHandlerResolverFactory.cs +++ /dev/null @@ -1,45 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Text.Json; -using Npgsql.Internal; -using Npgsql.Internal.TypeHandling; -using Npgsql.Internal.TypeMapping; - -namespace Npgsql.TypeMapping; - -sealed class SystemTextJsonTypeHandlerResolverFactory : TypeHandlerResolverFactory -{ - readonly JsonSerializerOptions _settings; - readonly Dictionary? _userClrTypes; - - public SystemTextJsonTypeHandlerResolverFactory( - Type[]? jsonbClrTypes = null, - Type[]? jsonClrTypes = null, - JsonSerializerOptions? settings = null) - { - _settings = settings ?? new JsonSerializerOptions(); - - if (jsonbClrTypes is not null) - { - _userClrTypes ??= new(); - - foreach (var type in jsonbClrTypes) - _userClrTypes[type] = "jsonb"; - } - - if (jsonClrTypes is not null) - { - _userClrTypes ??= new(); - - foreach (var type in jsonClrTypes) - _userClrTypes[type] = "json"; - } - } - - public override TypeHandlerResolver Create(TypeMapper typeMapper, NpgsqlConnector connector) - => new SystemTextJsonTypeHandlerResolver(connector, _userClrTypes, _settings); - - public override TypeMappingResolver CreateMappingResolver() => new SystemTextJsonTypeMappingResolver(_userClrTypes); - - public override TypeMappingResolver CreateGlobalMappingResolver() => new SystemTextJsonTypeMappingResolver(userClrTypes: null); -} diff --git a/src/Npgsql/TypeMapping/SystemTextJsonTypeMappingResolver.cs b/src/Npgsql/TypeMapping/SystemTextJsonTypeMappingResolver.cs deleted file mode 100644 index b76820f718..0000000000 --- a/src/Npgsql/TypeMapping/SystemTextJsonTypeMappingResolver.cs +++ /dev/null @@ -1,39 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Text.Json; -using System.Text.Json.Nodes; -using Npgsql.Internal.TypeHandling; -using Npgsql.Internal.TypeMapping; -using Npgsql.PostgresTypes; -using NpgsqlTypes; - -namespace Npgsql.TypeMapping; - -sealed class SystemTextJsonTypeMappingResolver : TypeMappingResolver -{ - readonly Dictionary? _userClrTypes; - - public SystemTextJsonTypeMappingResolver(Dictionary? userClrTypes) => _userClrTypes = userClrTypes; - - public override string? GetDataTypeNameByClrType(Type type) - => ClrTypeToDataTypeName(type, _userClrTypes); - - public override TypeMappingInfo? GetMappingByDataTypeName(string dataTypeName) - => DoGetMappingByDataTypeName(dataTypeName); - - internal static string? ClrTypeToDataTypeName(Type type, Dictionary? clrTypes) - => type == typeof(JsonDocument) - || type == typeof(JsonObject) || type == typeof(JsonArray) - ? "jsonb" - : clrTypes is not null && clrTypes.TryGetValue(type, out var dataTypeName) ? dataTypeName : null; - - static TypeMappingInfo? DoGetMappingByDataTypeName(string dataTypeName) - => dataTypeName switch - { - "jsonb" => new(NpgsqlDbType.Jsonb, "jsonb", typeof(JsonDocument) - , typeof(JsonObject), typeof(JsonArray) - ), - "json" => new(NpgsqlDbType.Json, "json"), - _ => null - }; -} diff --git a/src/Npgsql/TypeMapping/UserTypeMapper.cs b/src/Npgsql/TypeMapping/UserTypeMapper.cs new file mode 100644 index 0000000000..8524dfeb14 --- /dev/null +++ b/src/Npgsql/TypeMapping/UserTypeMapper.cs @@ -0,0 +1,216 @@ +using System; +using System.Collections.Generic; +using System.Diagnostics.CodeAnalysis; +using System.Reflection; +using Npgsql.Internal; +using Npgsql.Internal.Composites; +using Npgsql.Internal.Converters; +using Npgsql.Internal.Postgres; +using Npgsql.NameTranslation; +using Npgsql.PostgresTypes; +using NpgsqlTypes; + +namespace Npgsql.TypeMapping; + +/// +/// The base class for user type mappings. +/// +public abstract class UserTypeMapping +{ + /// + /// The name of the PostgreSQL type that this mapping is for. + /// + public string PgTypeName { get; } + /// + /// The CLR type that this mapping is for. + /// + public Type ClrType { get; } + + internal UserTypeMapping(string pgTypeName, Type type) + => (PgTypeName, ClrType) = (pgTypeName, type); + + internal abstract void Build(TypeInfoMappingCollection mappings); +} + +sealed class UserTypeMapper +{ + readonly List _mappings; + public IList Items => _mappings; + + public INpgsqlNameTranslator DefaultNameTranslator { get; set; } = NpgsqlSnakeCaseNameTranslator.Instance; + + UserTypeMapper(IEnumerable mappings) => _mappings = new List(mappings); + public UserTypeMapper() => _mappings = new(); + + public UserTypeMapper Clone() => new(_mappings) { DefaultNameTranslator = DefaultNameTranslator }; + + public UserTypeMapper MapEnum(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) + where TEnum : struct, Enum + { + Unmap(typeof(TEnum), out var resolvedName, pgName, nameTranslator); + Items.Add(new EnumMapping(resolvedName, nameTranslator ?? DefaultNameTranslator)); + return this; + } + + public bool UnmapEnum(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) + where TEnum : struct, Enum + => Unmap(typeof(TEnum), out _, pgName, nameTranslator ?? DefaultNameTranslator); + + public UserTypeMapper MapComposite(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) where T : class + { + Unmap(typeof(T), out var resolvedName, pgName, nameTranslator); + Items.Add(new CompositeMapping(resolvedName, nameTranslator ?? DefaultNameTranslator)); + return this; + } + + public UserTypeMapper MapStructComposite(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) where T : struct + { + Unmap(typeof(T), out var resolvedName, pgName, nameTranslator); + Items.Add(new StructCompositeMapping(resolvedName, nameTranslator ?? DefaultNameTranslator)); + return this; + } + + [RequiresUnreferencedCode("Composite type mapping currently isn't trimming-safe.")] + public UserTypeMapper MapComposite(Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) + { + if (clrType.IsConstructedGenericType && clrType.GetGenericTypeDefinition() == typeof(Nullable<>)) + throw new ArgumentException("Cannot map nullable.", nameof(clrType)); + + var openMethod = typeof(UserTypeMapper).GetMethod( + clrType.IsValueType ? nameof(MapStructComposite) : nameof(MapComposite), + new[] { typeof(string), typeof(INpgsqlNameTranslator) })!; + + var method = openMethod.MakeGenericMethod(clrType); + + method.Invoke(this, new object?[] { pgName, nameTranslator }); + + return this; + } + + public bool UnmapComposite(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) where T : class + => UnmapComposite(typeof(T), pgName, nameTranslator); + + public bool UnmapStructComposite(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) where T : struct + => UnmapComposite(typeof(T), pgName, nameTranslator); + + public bool UnmapComposite(Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) + => Unmap(clrType, out _, pgName, nameTranslator); + + bool Unmap(Type type, out string resolvedName, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) + { + if (pgName != null && pgName.Trim() == "") + throw new ArgumentException("pgName can't be empty", nameof(pgName)); + + nameTranslator ??= DefaultNameTranslator; + resolvedName = pgName ??= GetPgName(type, nameTranslator); + + UserTypeMapping? toRemove = null; + foreach (var item in _mappings) + if (item.PgTypeName == pgName) + toRemove = item; + + return toRemove is not null && _mappings.Remove(toRemove); + } + + static string GetPgName(Type type, INpgsqlNameTranslator nameTranslator) + => type.GetCustomAttribute()?.PgName + ?? nameTranslator.TranslateTypeName(type.Name); + + public IPgTypeInfoResolver Build() + { + var infoMappings = new TypeInfoMappingCollection(); + foreach (var mapping in _mappings) + mapping.Build(infoMappings); + + return new UserMappingResolver(infoMappings); + } + + sealed class UserMappingResolver : IPgTypeInfoResolver + { + readonly TypeInfoMappingCollection _mappings; + public UserMappingResolver(TypeInfoMappingCollection mappings) => _mappings = mappings; + PgTypeInfo? IPgTypeInfoResolver.GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => _mappings.Find(type, dataTypeName, options); + } + + sealed class CompositeMapping : UserTypeMapping where T : class + { + readonly INpgsqlNameTranslator _nameTranslator; + + public CompositeMapping(string pgTypeName, INpgsqlNameTranslator nameTranslator) + : base(pgTypeName, typeof(T)) + => _nameTranslator = nameTranslator; + + internal override void Build(TypeInfoMappingCollection mappings) + { + mappings.AddType(PgTypeName, (options, mapping, _) => + { + var pgType = mapping.GetPgType(options); + if (pgType is not PostgresCompositeType compositeType) + throw new InvalidOperationException("Composite mapping must be to a composite type"); + + return mapping.CreateInfo(options, new CompositeConverter( + ReflectionCompositeInfoFactory.CreateCompositeInfo(compositeType, _nameTranslator, options))); + }, isDefault: true); + // TODO this should be split out so we can enjoy EnableArray trimming. + mappings.AddArrayType(PgTypeName); + } + } + + sealed class StructCompositeMapping : UserTypeMapping where T : struct + { + readonly INpgsqlNameTranslator _nameTranslator; + + public StructCompositeMapping(string pgTypeName, INpgsqlNameTranslator nameTranslator) + : base(pgTypeName, typeof(T)) + => _nameTranslator = nameTranslator; + + internal override void Build(TypeInfoMappingCollection mappings) + { + mappings.AddStructType(PgTypeName, (options, mapping, dataTypeNameMatch) => + { + var pgType = mapping.GetPgType(options); + if (pgType is not PostgresCompositeType compositeType) + throw new InvalidOperationException("Composite mapping must be to a composite type"); + + return mapping.CreateInfo(options, new CompositeConverter( + ReflectionCompositeInfoFactory.CreateCompositeInfo(compositeType, _nameTranslator, options))); + }, isDefault: true); + // TODO this should be split out so we can enjoy EnableArray trimming. + mappings.AddStructArrayType(PgTypeName); + } + } + + sealed class EnumMapping : UserTypeMapping + where TEnum : struct, Enum + { + readonly Dictionary _enumToLabel = new(); + readonly Dictionary _labelToEnum = new(); + + public EnumMapping(string pgTypeName, INpgsqlNameTranslator nameTranslator) + : base(pgTypeName, typeof(TEnum)) + { + foreach (var field in typeof(TEnum).GetFields(BindingFlags.Static | BindingFlags.Public)) + { + var attribute = (PgNameAttribute?)field.GetCustomAttribute(typeof(PgNameAttribute), false); + var enumName = attribute is null + ? nameTranslator.TranslateMemberName(field.Name) + : attribute.PgName; + var enumValue = (TEnum)field.GetValue(null)!; + + _enumToLabel[enumValue] = enumName; + _labelToEnum[enumName] = enumValue; + } + } + + internal override void Build(TypeInfoMappingCollection mappings) + { + mappings.AddStructType(PgTypeName, (options, mapping, _) => + mapping.CreateInfo(options, new EnumConverter(_enumToLabel, _labelToEnum, options.TextEncoding), preferredFormat: DataFormat.Text), isDefault: true); + + // TODO this should be split out so we can enjoy EnableArray trimming. + mappings.AddStructArrayType(PgTypeName); + } + } +} + diff --git a/src/Npgsql/UnpooledDataSource.cs b/src/Npgsql/UnpooledDataSource.cs index 8226524635..3e3cf5b019 100644 --- a/src/Npgsql/UnpooledDataSource.cs +++ b/src/Npgsql/UnpooledDataSource.cs @@ -1,7 +1,6 @@ using System.Diagnostics.CodeAnalysis; using System.Threading; using System.Threading.Tasks; -using System.Transactions; using Npgsql.Internal; using Npgsql.Util; @@ -48,4 +47,4 @@ internal override void Return(NpgsqlConnector connector) } internal override void Clear() {} -} \ No newline at end of file +} diff --git a/src/Npgsql/Util/NpgsqlTimeout.cs b/src/Npgsql/Util/NpgsqlTimeout.cs new file mode 100644 index 0000000000..eb4fb06aed --- /dev/null +++ b/src/Npgsql/Util/NpgsqlTimeout.cs @@ -0,0 +1,57 @@ +using System; +using System.Threading; +using Npgsql.Internal; + +namespace Npgsql.Util; + +/// +/// Represents a timeout that will expire at some point. +/// +public readonly struct NpgsqlTimeout +{ + readonly DateTime _expiration; + + internal static readonly NpgsqlTimeout Infinite = new(TimeSpan.Zero); + + internal NpgsqlTimeout(TimeSpan expiration) + => _expiration = expiration > TimeSpan.Zero + ? DateTime.UtcNow + expiration + : expiration == TimeSpan.Zero + ? DateTime.MaxValue + : DateTime.MinValue; + + internal void Check() + { + if (HasExpired) + ThrowHelper.ThrowNpgsqlExceptionWithInnerTimeoutException("The operation has timed out"); + } + + internal void CheckAndApply(NpgsqlConnector connector) + { + if (!IsSet) + return; + + var timeLeft = CheckAndGetTimeLeft(); + // Set the remaining timeout on the read and write buffers + connector.ReadBuffer.Timeout = connector.WriteBuffer.Timeout = timeLeft; + + // Note that we set UserTimeout as well, otherwise the read timeout will get overwritten in ReadMessage + // Note also that we must set the read buffer's timeout directly (above), since the SSL handshake + // reads data directly from the buffer, without going through ReadMessage. + connector.UserTimeout = (int) Math.Ceiling(timeLeft.TotalMilliseconds); + } + + internal bool IsSet => _expiration != DateTime.MaxValue; + + internal bool HasExpired => DateTime.UtcNow >= _expiration; + + internal TimeSpan CheckAndGetTimeLeft() + { + if (!IsSet) + return Timeout.InfiniteTimeSpan; + var timeLeft = _expiration - DateTime.UtcNow; + if (timeLeft <= TimeSpan.Zero) + Check(); + return timeLeft; + } +} diff --git a/src/Npgsql/Util/PGUtil.cs b/src/Npgsql/Util/PGUtil.cs deleted file mode 100644 index 1b9aa0ce2a..0000000000 --- a/src/Npgsql/Util/PGUtil.cs +++ /dev/null @@ -1,228 +0,0 @@ -using Npgsql.Internal; -using System; -using System.Collections.Generic; -using System.Diagnostics.CodeAnalysis; -using System.Reflection; -using System.Runtime.CompilerServices; -using System.Text; -using System.Threading; -using System.Threading.Tasks; - -namespace Npgsql.Util; - -static class Statics -{ -#if DEBUG - internal static bool LegacyTimestampBehavior; - internal static bool DisableDateTimeInfinityConversions; -#else - internal static readonly bool LegacyTimestampBehavior; - internal static readonly bool DisableDateTimeInfinityConversions; -#endif - - static Statics() - { - LegacyTimestampBehavior = AppContext.TryGetSwitch("Npgsql.EnableLegacyTimestampBehavior", out var enabled) && enabled; - DisableDateTimeInfinityConversions = AppContext.TryGetSwitch("Npgsql.DisableDateTimeInfinityConversions", out enabled) && enabled; - } - - internal static T Expect(IBackendMessage msg, NpgsqlConnector connector) - { - if (msg.GetType() != typeof(T)) - ThrowIfMsgWrongType(msg, connector); - - return (T)msg; - } - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - internal static T ExpectAny(IBackendMessage msg, NpgsqlConnector connector) - { - if (msg is T t) - return t; - - ThrowIfMsgWrongType(msg, connector); - return default; - } - - [DoesNotReturn] - static void ThrowIfMsgWrongType(IBackendMessage msg, NpgsqlConnector connector) - => throw connector.Break( - new NpgsqlException($"Received backend message {msg.Code} while expecting {typeof(T).Name}. Please file a bug.")); - - internal static DeferDisposable Defer(Action action) => new(action); - internal static DeferDisposable Defer(Action action, T arg) => new(action, arg); - internal static DeferDisposable Defer(Action action, T1 arg1, T2 arg2) => new(action, arg1, arg2); - // internal static AsyncDeferDisposable DeferAsync(Func func) => new AsyncDeferDisposable(func); - internal static AsyncDeferDisposable DeferAsync(Func func) => new(func); - - internal readonly struct DeferDisposable : IDisposable - { - readonly Action _action; - public DeferDisposable(Action action) => _action = action; - public void Dispose() => _action(); - } - - internal readonly struct DeferDisposable : IDisposable - { - readonly Action _action; - readonly T _arg; - public DeferDisposable(Action action, T arg) - { - _action = action; - _arg = arg; - } - public void Dispose() => _action(_arg); - } - - internal readonly struct DeferDisposable : IDisposable - { - readonly Action _action; - readonly T1 _arg1; - readonly T2 _arg2; - public DeferDisposable(Action action, T1 arg1, T2 arg2) - { - _action = action; - _arg1 = arg1; - _arg2 = arg2; - } - public void Dispose() => _action(_arg1, _arg2); - } - - internal readonly struct AsyncDeferDisposable : IAsyncDisposable - { - readonly Func _func; - public AsyncDeferDisposable(Func func) => _func = func; - public async ValueTask DisposeAsync() => await _func(); - } -} - -// ReSharper disable once InconsistentNaming -static class PGUtil -{ - internal static readonly UTF8Encoding UTF8Encoding = new(false, true); - internal static readonly UTF8Encoding RelaxedUTF8Encoding = new(false, false); - - internal static void ValidateBackendMessageCode(BackendMessageCode code) - { - switch (code) - { - case BackendMessageCode.AuthenticationRequest: - case BackendMessageCode.BackendKeyData: - case BackendMessageCode.BindComplete: - case BackendMessageCode.CloseComplete: - case BackendMessageCode.CommandComplete: - case BackendMessageCode.CopyData: - case BackendMessageCode.CopyDone: - case BackendMessageCode.CopyBothResponse: - case BackendMessageCode.CopyInResponse: - case BackendMessageCode.CopyOutResponse: - case BackendMessageCode.DataRow: - case BackendMessageCode.EmptyQueryResponse: - case BackendMessageCode.ErrorResponse: - case BackendMessageCode.FunctionCall: - case BackendMessageCode.FunctionCallResponse: - case BackendMessageCode.NoData: - case BackendMessageCode.NoticeResponse: - case BackendMessageCode.NotificationResponse: - case BackendMessageCode.ParameterDescription: - case BackendMessageCode.ParameterStatus: - case BackendMessageCode.ParseComplete: - case BackendMessageCode.PasswordPacket: - case BackendMessageCode.PortalSuspended: - case BackendMessageCode.ReadyForQuery: - case BackendMessageCode.RowDescription: - return; - default: - ThrowUnknownMessageCode(code); - return; - } - - static void ThrowUnknownMessageCode(BackendMessageCode code) - => ThrowHelper.ThrowNpgsqlException($"Unknown message code: {code}"); - } - - internal static readonly Task TrueTask = Task.FromResult(true); - internal static readonly Task FalseTask = Task.FromResult(false); -} - -enum FormatCode : short -{ - Text = 0, - Binary = 1 -} - -static class EnumerableExtensions -{ - internal static string Join(this IEnumerable values, string separator) - => string.Join(separator, values); -} - -static class ExceptionExtensions -{ - internal static Exception UnwrapAggregate(this Exception exception) - => exception is AggregateException agg ? agg.InnerException! : exception; -} - -/// -/// Represents a timeout that will expire at some point. -/// -public readonly struct NpgsqlTimeout -{ - readonly DateTime _expiration; - - internal static readonly NpgsqlTimeout Infinite = new(TimeSpan.Zero); - - internal NpgsqlTimeout(TimeSpan expiration) - => _expiration = expiration > TimeSpan.Zero - ? DateTime.UtcNow + expiration - : expiration == TimeSpan.Zero - ? DateTime.MaxValue - : DateTime.MinValue; - - internal void Check() - { - if (HasExpired) - ThrowHelper.ThrowNpgsqlExceptionWithInnerTimeoutException("The operation has timed out"); - } - - internal void CheckAndApply(NpgsqlConnector connector) - { - if (!IsSet) - return; - - var timeLeft = CheckAndGetTimeLeft(); - // Set the remaining timeout on the read and write buffers - connector.ReadBuffer.Timeout = connector.WriteBuffer.Timeout = timeLeft; - - // Note that we set UserTimeout as well, otherwise the read timeout will get overwritten in ReadMessage - // Note also that we must set the read buffer's timeout directly (above), since the SSL handshake - // reads data directly from the buffer, without going through ReadMessage. - connector.UserTimeout = (int) Math.Ceiling(timeLeft.TotalMilliseconds); - } - - internal bool IsSet => _expiration != DateTime.MaxValue; - - internal bool HasExpired => DateTime.UtcNow >= _expiration; - - internal TimeSpan CheckAndGetTimeLeft() - { - if (!IsSet) - return Timeout.InfiniteTimeSpan; - var timeLeft = _expiration - DateTime.UtcNow; - if (timeLeft <= TimeSpan.Zero) - Check(); - return timeLeft; - } -} - -static class MethodInfos -{ - internal static readonly ConstructorInfo InvalidCastExceptionCtor = - typeof(InvalidCastException).GetConstructor(new[] { typeof(string) })!; - - internal static readonly MethodInfo StringFormat = - typeof(string).GetMethod(nameof(string.Format), new[] { typeof(string), typeof(object) })!; - - internal static readonly MethodInfo ObjectGetType = - typeof(object).GetMethod(nameof(GetType), new Type[0])!; -} \ No newline at end of file diff --git a/src/Npgsql/Util/ResettableCancellationTokenSource.cs b/src/Npgsql/Util/ResettableCancellationTokenSource.cs index 9bb507b1cb..0912ceb7b9 100644 --- a/src/Npgsql/Util/ResettableCancellationTokenSource.cs +++ b/src/Npgsql/Util/ResettableCancellationTokenSource.cs @@ -97,11 +97,8 @@ public void RestartTimeoutWithoutReset() /// in order make sure the next call to will not invalidate /// the cancellation token. /// - /// - /// An optional token to cancel the asynchronous operation. The default value is . - /// /// The from the wrapped - public CancellationToken Reset(CancellationToken cancellationToken = default) + public CancellationToken Reset() { _registration.Dispose(); lock (lockObject) @@ -124,8 +121,6 @@ public CancellationToken Reset(CancellationToken cancellationToken = default) _cts = new CancellationTokenSource(); } } - if (cancellationToken.CanBeCanceled) - _registration = cancellationToken.Register(cts => ((CancellationTokenSource)cts!).Cancel(), _cts); #if DEBUG _isRunning = false; #endif @@ -230,4 +225,4 @@ public void Dispose() isDisposed = true; } } -} \ No newline at end of file +} diff --git a/src/Npgsql/Util/Statics.cs b/src/Npgsql/Util/Statics.cs new file mode 100644 index 0000000000..b84cea4afd --- /dev/null +++ b/src/Npgsql/Util/Statics.cs @@ -0,0 +1,92 @@ +using Npgsql.Internal; +using System; +using System.Collections.Generic; +using System.Diagnostics.CodeAnalysis; +using System.Runtime.CompilerServices; + +namespace Npgsql.Util; + +static class Statics +{ +#if DEBUG + internal static bool LegacyTimestampBehavior; + internal static bool DisableDateTimeInfinityConversions; +#else + internal static readonly bool LegacyTimestampBehavior; + internal static readonly bool DisableDateTimeInfinityConversions; +#endif + + static Statics() + { + LegacyTimestampBehavior = AppContext.TryGetSwitch("Npgsql.EnableLegacyTimestampBehavior", out var enabled) && enabled; + DisableDateTimeInfinityConversions = AppContext.TryGetSwitch("Npgsql.DisableDateTimeInfinityConversions", out enabled) && enabled; + } + + internal static T Expect(IBackendMessage msg, NpgsqlConnector connector) + { + if (msg.GetType() != typeof(T)) + ThrowIfMsgWrongType(msg, connector); + + return (T)msg; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + internal static T ExpectAny(IBackendMessage msg, NpgsqlConnector connector) + { + if (msg is T t) + return t; + + ThrowIfMsgWrongType(msg, connector); + return default; + } + + [DoesNotReturn] + static void ThrowIfMsgWrongType(IBackendMessage msg, NpgsqlConnector connector) + => throw connector.Break( + new NpgsqlException($"Received backend message {msg.Code} while expecting {typeof(T).Name}. Please file a bug.")); + + internal static void ValidateBackendMessageCode(BackendMessageCode code) + { + switch (code) + { + case BackendMessageCode.AuthenticationRequest: + case BackendMessageCode.BackendKeyData: + case BackendMessageCode.BindComplete: + case BackendMessageCode.CloseComplete: + case BackendMessageCode.CommandComplete: + case BackendMessageCode.CopyData: + case BackendMessageCode.CopyDone: + case BackendMessageCode.CopyBothResponse: + case BackendMessageCode.CopyInResponse: + case BackendMessageCode.CopyOutResponse: + case BackendMessageCode.DataRow: + case BackendMessageCode.EmptyQueryResponse: + case BackendMessageCode.ErrorResponse: + case BackendMessageCode.FunctionCall: + case BackendMessageCode.FunctionCallResponse: + case BackendMessageCode.NoData: + case BackendMessageCode.NoticeResponse: + case BackendMessageCode.NotificationResponse: + case BackendMessageCode.ParameterDescription: + case BackendMessageCode.ParameterStatus: + case BackendMessageCode.ParseComplete: + case BackendMessageCode.PasswordPacket: + case BackendMessageCode.PortalSuspended: + case BackendMessageCode.ReadyForQuery: + case BackendMessageCode.RowDescription: + return; + default: + ThrowUnknownMessageCode(code); + return; + } + + static void ThrowUnknownMessageCode(BackendMessageCode code) + => ThrowHelper.ThrowNpgsqlException($"Unknown message code: {code}"); + } +} + +static class EnumerableExtensions +{ + internal static string Join(this IEnumerable values, string separator) + => string.Join(separator, values); +} diff --git a/src/Npgsql/Util/StrongBox.cs b/src/Npgsql/Util/StrongBox.cs new file mode 100644 index 0000000000..d72c3140e0 --- /dev/null +++ b/src/Npgsql/Util/StrongBox.cs @@ -0,0 +1,41 @@ +using System.Diagnostics.CodeAnalysis; + +namespace Npgsql.Util; + +abstract class StrongBox +{ + private protected StrongBox() { } + public abstract bool HasValue { get; } + public abstract object? Value { get; set; } + public abstract void Clear(); +} + +sealed class StrongBox : StrongBox +{ + bool _hasValue; + + [MaybeNull] T _typedValue; + [MaybeNull] + public T TypedValue { + get => _typedValue; + set + { + _hasValue = true; + _typedValue = value; + } + } + + public override bool HasValue => _hasValue; + + public override object? Value + { + get => TypedValue; + set => TypedValue = (T)value!; + } + + public override void Clear() + { + _hasValue = false; + TypedValue = default!; + } +} diff --git a/src/Npgsql/Util/SubReadStream.cs b/src/Npgsql/Util/SubReadStream.cs new file mode 100644 index 0000000000..6aaee9651a --- /dev/null +++ b/src/Npgsql/Util/SubReadStream.cs @@ -0,0 +1,227 @@ +using System; +using System.Diagnostics; +using System.IO; +using System.Threading; +using System.Threading.Tasks; + +namespace Npgsql.Util; + +// Adapted from https://github.com/dotnet/runtime/blob/83adfae6a6273d8fb4c69554aa3b1cc7cbf01c71/src/libraries/System.IO.Compression/src/System/IO/Compression/ZipCustomStreams.cs#L221 +sealed class SubReadStream : Stream +{ + readonly long _startInSuperStream; + long _positionInSuperStream; + readonly long _endInSuperStream; + readonly Stream _superStream; + readonly bool _canSeek; + bool _isDisposed; + + public SubReadStream(Stream superStream, long maxLength) + { + _startInSuperStream = -1; + _positionInSuperStream = 0; + _endInSuperStream = maxLength; + _superStream = superStream; + _canSeek = false; + _isDisposed = false; + } + + public SubReadStream(Stream superStream, long startPosition, long maxLength) + { + _startInSuperStream = startPosition; + _positionInSuperStream = startPosition; + _endInSuperStream = startPosition + maxLength; + _superStream = superStream; + _canSeek = superStream.CanSeek; + _isDisposed = false; + } + + public override long Length + { + get + { + ThrowIfDisposed(); + + if (!_canSeek) + throw new NotSupportedException(); + + return _endInSuperStream - _startInSuperStream; + } + } + + public override long Position + { + get + { + ThrowIfDisposed(); + + if (!_canSeek) + throw new NotSupportedException(); + + return _positionInSuperStream - _startInSuperStream; + } + set + { + ThrowIfDisposed(); + + throw new NotSupportedException(); + } + } + + public override bool CanRead => _superStream.CanRead && !_isDisposed; + + public override bool CanSeek => false; + + public override bool CanWrite => false; + + void ThrowIfDisposed() + { + if (_isDisposed) + throw new ObjectDisposedException(GetType().ToString()); + } + + void ThrowIfCantRead() + { + if (!CanRead) + throw new NotSupportedException(); + } + + public override int Read(byte[] buffer, int offset, int count) + { + // parameter validation sent to _superStream.Read + var origCount = count; + + ThrowIfDisposed(); + ThrowIfCantRead(); + + if (_canSeek && _superStream.Position != _positionInSuperStream) + _superStream.Seek(_positionInSuperStream, SeekOrigin.Begin); + if (_positionInSuperStream > _endInSuperStream - count) + count = (int)(_endInSuperStream - _positionInSuperStream); + + Debug.Assert(count >= 0); + Debug.Assert(count <= origCount); + + var ret = _superStream.Read(buffer, offset, count); + + _positionInSuperStream += ret; + return ret; + } + +#if !NETSTANDARD2_0 + public override int Read(Span destination) +#else + int Read(Span destination) +#endif + { + // parameter validation sent to _superStream.Read + var origCount = destination.Length; + var count = destination.Length; + + ThrowIfDisposed(); + ThrowIfCantRead(); + + if (_canSeek && _superStream.Position != _positionInSuperStream) + _superStream.Seek(_positionInSuperStream, SeekOrigin.Begin); + if (_positionInSuperStream + count > _endInSuperStream) + count = (int)(_endInSuperStream - _positionInSuperStream); + + Debug.Assert(count >= 0); + Debug.Assert(count <= origCount); + + var ret = _superStream.Read(destination.Slice(0, count)); + + _positionInSuperStream += ret; + return ret; + } + + public override int ReadByte() + { + Span b = stackalloc byte[1]; + return Read(b) == 1 ? b[0] : -1; + } + + public override Task ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) + { + ValidateBufferArguments(buffer, offset, count); + return ReadAsync(new Memory(buffer, offset, count), cancellationToken).AsTask(); + } + +#if !NETSTANDARD2_0 + public override ValueTask ReadAsync(Memory buffer, CancellationToken cancellationToken = default) +#else + ValueTask ReadAsync(Memory buffer, CancellationToken cancellationToken = default) +#endif + { + ThrowIfDisposed(); + ThrowIfCantRead(); + if (_canSeek && _superStream.Position != _positionInSuperStream) + { + _superStream.Seek(_positionInSuperStream, SeekOrigin.Begin); + } + + if (_positionInSuperStream > _endInSuperStream - buffer.Length) + { + buffer = buffer.Slice(0, (int)(_endInSuperStream - _positionInSuperStream)); + } + + return Core(buffer, cancellationToken); + + async ValueTask Core(Memory buffer, CancellationToken cancellationToken) + { + var ret = await _superStream.ReadAsync(buffer, cancellationToken).ConfigureAwait(false); + _positionInSuperStream += ret; + return ret; + } + } + + public override long Seek(long offset, SeekOrigin origin) + { + ThrowIfDisposed(); + throw new NotSupportedException(); + } + + public override void SetLength(long value) + { + ThrowIfDisposed(); + throw new NotSupportedException(); + } + + public override void Write(byte[] buffer, int offset, int count) + { + ThrowIfDisposed(); + throw new NotSupportedException(); + } + + public override void Flush() + { + ThrowIfDisposed(); + throw new NotSupportedException(); + } + + // Close the stream for reading. Note that this does NOT close the superStream (since + // the substream is just 'a chunk' of the super-stream + protected override void Dispose(bool disposing) + { + if (disposing && !_isDisposed) + { + _isDisposed = true; + } + base.Dispose(disposing); + } + +#if NETSTANDARD + void ValidateBufferArguments(byte[]? buffer, int offset, int count) + { + if (buffer is null) + ThrowHelper.ThrowArgumentNullException(nameof(buffer)); + + if (offset < 0) + ThrowHelper.ThrowArgumentOutOfRangeException(nameof(offset), "Offset is less than 0"); + + if ((uint)count > buffer.Length - offset) + ThrowHelper.ThrowArgumentOutOfRangeException(nameof(count), "Count larger than buffer minus offset"); + + } +#endif +} diff --git a/src/Npgsql/VolatileResourceManager.cs b/src/Npgsql/VolatileResourceManager.cs index 70afea0557..816cf15b32 100644 --- a/src/Npgsql/VolatileResourceManager.cs +++ b/src/Npgsql/VolatileResourceManager.cs @@ -1,5 +1,4 @@ using System; -using System.Diagnostics; using System.Threading; using System.Transactions; using Microsoft.Extensions.Logging; diff --git a/src/Shared/CodeAnalysis.cs b/src/Shared/CodeAnalysis.cs index 594d5bb5d0..8e8e3b3d9e 100644 --- a/src/Shared/CodeAnalysis.cs +++ b/src/Shared/CodeAnalysis.cs @@ -5,6 +5,54 @@ namespace System.Diagnostics.CodeAnalysis { +#if !NET7_0_OR_GREATER + /// + /// Indicates that the specified method requires the ability to generate new code at runtime, + /// for example through . + /// + /// + /// This allows tools to understand which methods are unsafe to call when compiling ahead of time. + /// + [AttributeUsage(AttributeTargets.Method | AttributeTargets.Constructor | AttributeTargets.Class, Inherited = false)] + sealed class RequiresDynamicCodeAttribute : Attribute + { + /// + /// Initializes a new instance of the class + /// with the specified message. + /// + /// + /// A message that contains information about the usage of dynamic code. + /// + public RequiresDynamicCodeAttribute(string message) + { + Message = message; + } + + /// + /// Gets a message that contains information about the usage of dynamic code. + /// + public string Message { get; } + + /// + /// Gets or sets an optional URL that contains more information about the method, + /// why it requires dynamic code, and what options a consumer has to deal with it. + /// + public string? Url { get; set; } + } + + [AttributeUsage(AttributeTargets.Constructor, AllowMultiple = false, Inherited = false)] + sealed class SetsRequiredMembersAttribute : Attribute + { + } + [AttributeUsageAttribute(AttributeTargets.Method | AttributeTargets.Property | AttributeTargets.Parameter, AllowMultiple = false, Inherited = false)] + sealed class UnscopedRefAttribute : Attribute + { + /// + /// Initializes a new instance of the class. + /// + public UnscopedRefAttribute() { } + } +#endif #if NETSTANDARD2_0 [AttributeUsageAttribute(AttributeTargets.Field | AttributeTargets.Parameter | AttributeTargets.Property)] sealed class AllowNullAttribute : Attribute @@ -167,9 +215,43 @@ public UnconditionalSuppressMessageAttribute(string category, string checkId) #endif } -#if !NET5_0_OR_GREATER namespace System.Runtime.CompilerServices { - internal static class IsExternalInit {} +#if !NET5_0_OR_GREATER + static class IsExternalInit {} +#endif +#if !NET7_0_OR_GREATER + [AttributeUsage(AttributeTargets.Class | AttributeTargets.Struct | AttributeTargets.Field | AttributeTargets.Property, AllowMultiple = false, Inherited = false)] + sealed class RequiredMemberAttribute : Attribute + { } + + [AttributeUsage(AttributeTargets.All, AllowMultiple = true, Inherited = false)] + sealed class CompilerFeatureRequiredAttribute : Attribute + { + public CompilerFeatureRequiredAttribute(string featureName) + { + FeatureName = featureName; + } + + /// + /// The name of the compiler feature. + /// + public string FeatureName { get; } + + /// + /// If true, the compiler can choose to allow access to the location where this attribute is applied if it does not understand . + /// + public bool IsOptional { get; init; } + + /// + /// The used for the ref structs C# feature. + /// + public const string RefStructs = nameof(RefStructs); + + /// + /// The used for the required members C# feature. + /// + public const string RequiredMembers = nameof(RequiredMembers); + } +#endif } -#endif \ No newline at end of file diff --git a/test/Npgsql.Benchmarks/Prepare.cs b/test/Npgsql.Benchmarks/Prepare.cs index 246b25e491..6b8d9b06bc 100644 --- a/test/Npgsql.Benchmarks/Prepare.cs +++ b/test/Npgsql.Benchmarks/Prepare.cs @@ -1,5 +1,4 @@ -using System.Diagnostics.CodeAnalysis; -using System.Linq; +using System.Linq; using System.Reflection; using System.Text; using BenchmarkDotNet.Attributes; diff --git a/test/Npgsql.Benchmarks/ReadArray.cs b/test/Npgsql.Benchmarks/ReadArray.cs index fecda03f43..e1e5b2d8de 100644 --- a/test/Npgsql.Benchmarks/ReadArray.cs +++ b/test/Npgsql.Benchmarks/ReadArray.cs @@ -1,10 +1,4 @@ using BenchmarkDotNet.Attributes; -using System; -using System.Collections.Generic; -using System.Diagnostics; -using System.IO; -using System.Linq; -using System.Runtime.CompilerServices; namespace Npgsql.Benchmarks; diff --git a/test/Npgsql.Benchmarks/ResolveHandler.cs b/test/Npgsql.Benchmarks/ResolveHandler.cs index 419b3e179c..86e5d20fbb 100644 --- a/test/Npgsql.Benchmarks/ResolveHandler.cs +++ b/test/Npgsql.Benchmarks/ResolveHandler.cs @@ -1,8 +1,6 @@ using BenchmarkDotNet.Attributes; -using Npgsql.Internal.TypeHandling; -using Npgsql.Internal.TypeMapping; -using Npgsql.TypeMapping; -using NpgsqlTypes; +using Npgsql.Internal; +using Npgsql.Internal.Postgres; namespace Npgsql.Benchmarks; @@ -10,7 +8,7 @@ namespace Npgsql.Benchmarks; public class ResolveHandler { NpgsqlDataSource? _dataSource; - TypeMapper _typeMapper = null!; + PgSerializerOptions _serializerOptions = null!; [Params(0, 1, 2)] public int NumPlugins { get; set; } @@ -24,29 +22,21 @@ public void Setup() if (NumPlugins > 1) dataSourceBuilder.UseNetTopologySuite(); _dataSource = dataSourceBuilder.Build(); - _typeMapper = _dataSource.TypeMapper; + _serializerOptions = _dataSource.SerializerOptions; } [GlobalCleanup] public void Cleanup() => _dataSource?.Dispose(); [Benchmark] - public NpgsqlTypeHandler ResolveOID() - => _typeMapper.ResolveByOID(23); // int4 + public PgTypeInfo? ResolveDefault() + => _serializerOptions.GetDefaultTypeInfo(new Oid(23)); // int4 [Benchmark] - public NpgsqlTypeHandler ResolveNpgsqlDbType() - => _typeMapper.ResolveByNpgsqlDbType(NpgsqlDbType.Integer); + public PgTypeInfo? ResolveType() + => _serializerOptions.GetTypeInfo(typeof(int)); [Benchmark] - public NpgsqlTypeHandler ResolveDataTypeName() - => _typeMapper.ResolveByDataTypeName("integer"); - - [Benchmark] - public NpgsqlTypeHandler ResolveClrTypeNonGeneric() - => _typeMapper.ResolveByValue((object)8); - - [Benchmark] - public NpgsqlTypeHandler ResolveClrTypeGeneric() - => _typeMapper.ResolveByValue(8); + public PgTypeInfo? ResolveBoth() + => _serializerOptions.GetTypeInfo(typeof(int), new Oid(23)); // int4 } diff --git a/test/Npgsql.Benchmarks/TypeHandlers/Composite.cs b/test/Npgsql.Benchmarks/TypeHandlers/Composite.cs index 496b51af6f..52418a7240 100644 --- a/test/Npgsql.Benchmarks/TypeHandlers/Composite.cs +++ b/test/Npgsql.Benchmarks/TypeHandlers/Composite.cs @@ -1,9 +1,4 @@ -using System.Collections.Generic; -using BenchmarkDotNet.Attributes; -using Npgsql.NameTranslation; -using Npgsql.PostgresTypes; -using Npgsql.TypeMapping; -using Npgsql.Util; + /* Disabling for now: unmapped composite support is probably going away, and there's a good chance this * class can be simplified to a certain extent diff --git a/test/Npgsql.Benchmarks/TypeHandlers/Numeric.cs b/test/Npgsql.Benchmarks/TypeHandlers/Numeric.cs index 19e044b0a4..42f5f3936a 100644 --- a/test/Npgsql.Benchmarks/TypeHandlers/Numeric.cs +++ b/test/Npgsql.Benchmarks/TypeHandlers/Numeric.cs @@ -1,43 +1,43 @@ using System.Collections.Generic; using BenchmarkDotNet.Attributes; -using Npgsql.Internal.TypeHandlers.NumericHandlers; +using Npgsql.Internal.Converters; namespace Npgsql.Benchmarks.TypeHandlers; [Config(typeof(Config))] public class Int16 : TypeHandlerBenchmarks { - public Int16() : base(new Int16Handler(GetPostgresType("smallint"))) { } + public Int16() : base(new Int2Converter()) { } } [Config(typeof(Config))] public class Int32 : TypeHandlerBenchmarks { - public Int32() : base(new Int32Handler(GetPostgresType("integer"))) { } + public Int32() : base(new Int4Converter()) { } } [Config(typeof(Config))] public class Int64 : TypeHandlerBenchmarks { - public Int64() : base(new Int64Handler(GetPostgresType("bigint"))) { } + public Int64() : base(new Int8Converter()) { } } [Config(typeof(Config))] public class Single : TypeHandlerBenchmarks { - public Single() : base(new SingleHandler(GetPostgresType("real"))) { } + public Single() : base(new RealConverter()) { } } [Config(typeof(Config))] public class Double : TypeHandlerBenchmarks { - public Double() : base(new DoubleHandler(GetPostgresType("double precision"))) { } + public Double() : base(new DoubleConverter()) { } } [Config(typeof(Config))] public class Numeric : TypeHandlerBenchmarks { - public Numeric() : base(new NumericHandler(GetPostgresType("numeric"))) { } + public Numeric() : base(new DecimalNumericConverter()) { } protected override IEnumerable ValuesOverride() => new[] { @@ -62,5 +62,5 @@ protected override IEnumerable ValuesOverride() => new[] [Config(typeof(Config))] public class Money : TypeHandlerBenchmarks { - public Money() : base(new MoneyHandler(GetPostgresType("money"))) { } -} \ No newline at end of file + public Money() : base(new MoneyConverter()) { } +} diff --git a/test/Npgsql.Benchmarks/TypeHandlers/Text.cs b/test/Npgsql.Benchmarks/TypeHandlers/Text.cs index 407a749240..80d5f6ce0c 100644 --- a/test/Npgsql.Benchmarks/TypeHandlers/Text.cs +++ b/test/Npgsql.Benchmarks/TypeHandlers/Text.cs @@ -1,18 +1,18 @@ using BenchmarkDotNet.Attributes; using System.Collections.Generic; using System.Text; -using Npgsql.Internal.TypeHandlers; +using Npgsql.Internal.Converters; namespace Npgsql.Benchmarks.TypeHandlers; [Config(typeof(Config))] public class Text : TypeHandlerBenchmarks { - public Text() : base(new TextHandler(GetPostgresType("text"), Encoding.UTF8)) { } + public Text() : base(new StringTextConverter(Encoding.UTF8)) { } protected override IEnumerable ValuesOverride() { for (var i = 1; i <= 10000; i *= 10) yield return new string('x', i); } -} \ No newline at end of file +} diff --git a/test/Npgsql.Benchmarks/TypeHandlers/TypeHandlerBenchmarks.cs b/test/Npgsql.Benchmarks/TypeHandlers/TypeHandlerBenchmarks.cs index 76cc862378..994839c219 100644 --- a/test/Npgsql.Benchmarks/TypeHandlers/TypeHandlerBenchmarks.cs +++ b/test/Npgsql.Benchmarks/TypeHandlers/TypeHandlerBenchmarks.cs @@ -5,11 +5,8 @@ using System; using System.Collections.Generic; using System.IO; -using System.Text; +using System.Threading; using Npgsql.Internal; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using Npgsql.Util; #nullable disable @@ -40,27 +37,26 @@ public override void SetLength(long value) { } public override void Write(byte[] buffer, int offset, int count) { } } - readonly EndlessStream _stream; - readonly NpgsqlTypeHandler _handler; - readonly NpgsqlReadBuffer _readBuffer; + readonly PgConverter _converter; + readonly PgReader _reader; + readonly PgWriter _writer; readonly NpgsqlWriteBuffer _writeBuffer; - T _value; - int _elementSize; + readonly NpgsqlReadBuffer _readBuffer; + readonly BufferRequirements _binaryRequirements; - protected TypeHandlerBenchmarks(NpgsqlTypeHandler handler) - { - _stream = new EndlessStream(); - _handler = handler ?? throw new ArgumentNullException(nameof(handler)); - _readBuffer = new NpgsqlReadBuffer(null, _stream, null, NpgsqlReadBuffer.MinimumSize, Encoding.UTF8, PGUtil.RelaxedUTF8Encoding); - _writeBuffer = new NpgsqlWriteBuffer(null, _stream, null, NpgsqlWriteBuffer.MinimumSize, Encoding.UTF8); - } + T _value; + Size _elementSize; - protected static PostgresType GetPostgresType(string pgType) + protected TypeHandlerBenchmarks(PgConverter handler) { - using (var conn = BenchmarkEnvironment.OpenConnection()) - using (var cmd = new NpgsqlCommand($"SELECT NULL::{pgType}", conn)) - using (var reader = cmd.ExecuteReader()) - return reader.GetPostgresType(0); + var stream = new EndlessStream(); + _converter = handler ?? throw new ArgumentNullException(nameof(handler)); + _readBuffer = new NpgsqlReadBuffer(null, stream, null, NpgsqlReadBuffer.MinimumSize, NpgsqlWriteBuffer.UTF8Encoding, NpgsqlWriteBuffer.RelaxedUTF8Encoding); + _writeBuffer = new NpgsqlWriteBuffer(null, stream, null, NpgsqlWriteBuffer.MinimumSize, NpgsqlWriteBuffer.UTF8Encoding); + _reader = new PgReader(_readBuffer); + _writer = new PgWriter(new NpgsqlBufferWriter(_writeBuffer)); + _writer.Init(new PostgresMinimalDatabaseInfo()); + _converter.CanConvert(DataFormat.Binary, out _binaryRequirements); } public IEnumerable Values() => ValuesOverride(); @@ -73,17 +69,16 @@ public T Value get => _value; set { - NpgsqlLengthCache cache = null; - _value = value; - _elementSize = _handler.ValidateAndGetLength(value, ref cache, null); - - cache.Rewind(); - - _handler.WriteWithLength(_value, _writeBuffer, cache, null, false); - Buffer.BlockCopy(_writeBuffer.Buffer, 0, _readBuffer.Buffer, 0, _elementSize); - - _readBuffer.FilledBytes = _elementSize; + object state = null; + var size = _elementSize = _converter.GetSizeAsObject(new(DataFormat.Binary, _binaryRequirements.Write), value, ref state); + var current = new ValueMetadata { Format = DataFormat.Binary, BufferRequirement = _binaryRequirements.Write, Size = size, WriteState = state }; + _writer.BeginWrite(async: false, current, CancellationToken.None).GetAwaiter().GetResult(); + _converter.WriteAsObject(_writer, value); + Buffer.BlockCopy(_writeBuffer.Buffer, 0, _readBuffer.Buffer, 0, size.Value); + + _writer.Commit(size.Value); + _readBuffer.FilledBytes = size.Value; _writeBuffer.WritePosition = 0; } } @@ -92,13 +87,18 @@ public T Value public T Read() { _readBuffer.ReadPosition = sizeof(int); - return _handler.Read(_readBuffer, _elementSize); + _reader.StartRead(_binaryRequirements.Read); + var value = ((PgConverter)_converter).Read(_reader); + _reader.EndRead(); + return value; } [Benchmark] public void Write() { _writeBuffer.WritePosition = 0; - _handler.WriteWithLength(_value, _writeBuffer, null, null, false); + var current = new ValueMetadata { Format = DataFormat.Binary, BufferRequirement = _binaryRequirements.Write, Size = _elementSize, WriteState = null }; + _writer.BeginWrite(async: false, current, CancellationToken.None).GetAwaiter().GetResult(); + ((PgConverter)_converter).Write(_writer, _value); } -} \ No newline at end of file +} diff --git a/test/Npgsql.Benchmarks/TypeHandlers/Uuid.cs b/test/Npgsql.Benchmarks/TypeHandlers/Uuid.cs index 78d4018dfd..7c229a3b57 100644 --- a/test/Npgsql.Benchmarks/TypeHandlers/Uuid.cs +++ b/test/Npgsql.Benchmarks/TypeHandlers/Uuid.cs @@ -1,11 +1,11 @@ using System; using BenchmarkDotNet.Attributes; -using Npgsql.Internal.TypeHandlers; +using Npgsql.Internal.Converters; namespace Npgsql.Benchmarks.TypeHandlers; [Config(typeof(Config))] public class Uuid : TypeHandlerBenchmarks { - public Uuid() : base(new UuidHandler(GetPostgresType("uuid"))) { } -} \ No newline at end of file + public Uuid() : base(new GuidUuidConverter()) { } +} diff --git a/test/Npgsql.NativeAotTests/Npgsql.NativeAotTests.csproj b/test/Npgsql.NativeAotTests/Npgsql.NativeAotTests.csproj index 3396a51a92..bc680c3052 100644 --- a/test/Npgsql.NativeAotTests/Npgsql.NativeAotTests.csproj +++ b/test/Npgsql.NativeAotTests/Npgsql.NativeAotTests.csproj @@ -5,12 +5,13 @@ net8.0 true + true true true true - false - Size + false true + Size diff --git a/test/Npgsql.NodaTime.Tests/LegacyNodaTimeTests.cs b/test/Npgsql.NodaTime.Tests/LegacyNodaTimeTests.cs deleted file mode 100644 index 67c4202ff4..0000000000 --- a/test/Npgsql.NodaTime.Tests/LegacyNodaTimeTests.cs +++ /dev/null @@ -1,104 +0,0 @@ -using System; -using System.Data; -using System.Threading.Tasks; -using NodaTime; -using Npgsql.Tests; -using NpgsqlTypes; -using NUnit.Framework; - -namespace Npgsql.NodaTime.Tests; - -// Since this test suite manipulates TimeZone, it is incompatible with multiplexing -[NonParallelizable] -public class LegacyNodaTimeTests : TestBase -{ - [Test] - public Task Timestamp_as_Instant() - => AssertType( - new LocalDateTime(1998, 4, 12, 13, 26, 38, 789).InUtc().ToInstant(), - "1998-04-12 13:26:38.789", - "timestamp without time zone", - NpgsqlDbType.Timestamp, - DbType.DateTime); - - [Test] - public Task Timestamp_as_LocalDateTime() - => AssertType( - new LocalDateTime(1998, 4, 12, 13, 26, 38, 789), - "1998-04-12 13:26:38.789", - "timestamp without time zone", - NpgsqlDbType.Timestamp, - DbType.DateTime, - isDefaultForReading: false); - - [Test] - public Task Timestamptz_as_Instant() - => AssertType( - new LocalDateTime(1998, 4, 12, 13, 26, 38, 789).InUtc().ToInstant(), - "1998-04-12 15:26:38.789+02", - "timestamp with time zone", - NpgsqlDbType.TimestampTz, - DbType.DateTimeOffset, - isDefault: false); - - [Test] - public Task Timestamptz_ZonedDateTime_infinite_values_are_not_supported() - => AssertTypeUnsupported(Instant.MaxValue.InZone(DateTimeZone.Utc), "infinity", "timestamptz"); - - [Test] - public Task Timestamptz_OffsetDateTime_infinite_values_are_not_supported() - => AssertTypeUnsupported(Instant.MaxValue.WithOffset(Offset.Zero), "infinity", "timestamptz"); - - #region Support - - protected override async ValueTask OpenConnectionAsync() - { - var conn = await base.OpenConnectionAsync(); - await conn.ExecuteNonQueryAsync("SET TimeZone='Europe/Berlin'"); - return conn; - } - - protected override NpgsqlConnection OpenConnection() - => throw new NotSupportedException(); - -#pragma warning disable CS1998 // Release code blocks below lack await -#pragma warning disable CS0618 // GlobalTypeMapper is obsolete - [OneTimeSetUp] - public async Task Setup() - { -#if DEBUG - Internal.NodaTimeUtils.LegacyTimestampBehavior = true; - Util.Statics.LegacyTimestampBehavior = true; - - // Clear any previous cached mappings/handlers in case tests were executed before the legacy flag was set. - NpgsqlConnection.GlobalTypeMapper.Reset(); - NpgsqlConnection.GlobalTypeMapper.UseNodaTime(); - await using var connection = await OpenConnectionAsync(); - await connection.ReloadTypesAsync(); -#else - Assert.Ignore( - "Legacy NodaTime tests rely on the Npgsql.EnableLegacyTimestampBehavior AppContext switch and can only be run in DEBUG builds"); -#endif - - } - - [OneTimeTearDown] - public async Task Teardown() - { -#if DEBUG - Internal.NodaTimeUtils.LegacyTimestampBehavior = false; - Util.Statics.LegacyTimestampBehavior = false; - - // Clear any previous cached mappings/handlers to not affect test which will run later without the legacy flag - NpgsqlConnection.GlobalTypeMapper.Reset(); - NpgsqlConnection.GlobalTypeMapper.UseNodaTime(); - - await using var connection = await OpenConnectionAsync(); - await connection.ReloadTypesAsync(); -#endif - } -#pragma warning restore CS1998 -#pragma warning restore CS0618 // GlobalTypeMapper is obsolete - - #endregion Support -} diff --git a/test/Npgsql.NodaTime.Tests/NodaTimeSetupFixture.cs b/test/Npgsql.NodaTime.Tests/NodaTimeSetupFixture.cs deleted file mode 100644 index 25ab4f58cd..0000000000 --- a/test/Npgsql.NodaTime.Tests/NodaTimeSetupFixture.cs +++ /dev/null @@ -1,18 +0,0 @@ -using NUnit.Framework; - -namespace Npgsql.NodaTime.Tests; - -// Note that we register NodaTime globally, rather than using the more standard data source mapping. -// We can do this since NUnit runs each test assembly in a different process, so we get isolation and don't interfere with other, -// non-NodaTime tests. This also allows us to test global type inference, which only works with global mappings. -[SetUpFixture] -public class NodaTimeSetupFixture -{ -#pragma warning disable CS0618 // GlobalTypeMapper is obsolete - [OneTimeSetUp] - public void OneTimeSetUp() => NpgsqlConnection.GlobalTypeMapper.UseNodaTime(); - - [OneTimeTearDown] - public void OneTimeTearDown() => NpgsqlConnection.GlobalTypeMapper.Reset(); -#pragma warning restore CS0618 // GlobalTypeMapper is obsolete -} diff --git a/test/Npgsql.NodaTime.Tests/Npgsql.NodaTime.Tests.csproj b/test/Npgsql.NodaTime.Tests/Npgsql.NodaTime.Tests.csproj deleted file mode 100644 index bfa9b74079..0000000000 --- a/test/Npgsql.NodaTime.Tests/Npgsql.NodaTime.Tests.csproj +++ /dev/null @@ -1,13 +0,0 @@ - - - - - - - - - - - - - diff --git a/test/Npgsql.PluginTests/LegacyNodaTimeTests.cs b/test/Npgsql.PluginTests/LegacyNodaTimeTests.cs new file mode 100644 index 0000000000..6af0afec24 --- /dev/null +++ b/test/Npgsql.PluginTests/LegacyNodaTimeTests.cs @@ -0,0 +1,106 @@ +using System; +using System.Data; +using System.Threading.Tasks; +using NodaTime; +using Npgsql.Tests; +using NpgsqlTypes; +using NUnit.Framework; +using Npgsql.NodaTime.Internal; + +namespace Npgsql.PluginTests; + +[NonParallelizable] // Since this test suite manipulates an AppContext switch +public class LegacyNodaTimeTests : TestBase, IDisposable +{ + const string TimeZone = "Europe/Berlin"; + + [Test] + public async Task Timestamp_as_ZonedDateTime() + { + await AssertType( + new LocalDateTime(1998, 4, 12, 13, 26, 38, 789).InZoneLeniently(DateTimeZoneProviders.Tzdb[TimeZone]), + "1998-04-12 13:26:38.789+02", + "timestamp with time zone", + NpgsqlDbType.TimestampTz, + DbType.DateTimeOffset, + isNpgsqlDbTypeInferredFromClrType: false, isDefault: false); + } + + [Test] + public Task Timestamp_as_Instant() + => AssertType( + new LocalDateTime(1998, 4, 12, 13, 26, 38, 789).InUtc().ToInstant(), + "1998-04-12 13:26:38.789", + "timestamp without time zone", + NpgsqlDbType.Timestamp, + DbType.DateTime, + isNpgsqlDbTypeInferredFromClrType: false); + + [Test] + public Task Timestamp_as_LocalDateTime() + => AssertType( + new LocalDateTime(1998, 4, 12, 13, 26, 38, 789), + "1998-04-12 13:26:38.789", + "timestamp without time zone", + NpgsqlDbType.Timestamp, + DbType.DateTime, + isDefaultForReading: false, + isNpgsqlDbTypeInferredFromClrType: false); + + [Test] + public Task Timestamptz_as_Instant() + => AssertType( + new LocalDateTime(1998, 4, 12, 13, 26, 38, 789).InUtc().ToInstant(), + "1998-04-12 15:26:38.789+02", + "timestamp with time zone", + NpgsqlDbType.TimestampTz, + DbType.DateTimeOffset, + isDefault: false, + isNpgsqlDbTypeInferredFromClrType: false); + + [Test] + public async Task Timestamptz_ZonedDateTime_infinite_values_are_not_supported() + { + await AssertTypeUnsupportedRead("infinity", "timestamptz"); + await AssertTypeUnsupportedWrite(Instant.MaxValue.WithOffset(Offset.Zero), "timestamptz"); + } + + [Test] + public async Task Timestamptz_OffsetDateTime_infinite_values_are_not_supported() + { + await AssertTypeUnsupportedRead("infinity", "timestamptz"); + await AssertTypeUnsupportedWrite(Instant.MaxValue.WithOffset(Offset.Zero), "timestamptz"); + } + + #region Support + + protected override NpgsqlDataSource DataSource { get; } + + public LegacyNodaTimeTests() + { +#if DEBUG + NodaTimeUtils.LegacyTimestampBehavior = true; + Util.Statics.LegacyTimestampBehavior = true; + + var builder = CreateDataSourceBuilder(); + builder.UseNodaTime(); + builder.ConnectionStringBuilder.Timezone = TimeZone; + DataSource = builder.Build(); +#else + Assert.Ignore( + "Legacy NodaTime tests rely on the Npgsql.EnableLegacyTimestampBehavior AppContext switch and can only be run in DEBUG builds"); +#endif + } + + public void Dispose() + { +#if DEBUG + NodaTimeUtils.LegacyTimestampBehavior = false; + Util.Statics.LegacyTimestampBehavior = false; + + DataSource.Dispose(); +#endif + } + + #endregion Support +} diff --git a/test/Npgsql.PluginTests/NetTopologySuiteTests.cs b/test/Npgsql.PluginTests/NetTopologySuiteTests.cs index 20fc9f17a4..2fb33f678d 100644 --- a/test/Npgsql.PluginTests/NetTopologySuiteTests.cs +++ b/test/Npgsql.PluginTests/NetTopologySuiteTests.cs @@ -1,5 +1,4 @@ using System; -using System.Collections; using System.Collections.Concurrent; using System.Linq; using System.Threading.Tasks; @@ -14,26 +13,34 @@ namespace Npgsql.PluginTests; public class NetTopologySuiteTests : TestBase { - public struct TestData + static readonly TestCaseData[] TestCases = { - public Ordinates Ordinates; - public Geometry Geometry; - public string CommandText; - } + new TestCaseData(Ordinates.None, new Point(1d, 2500d), "st_makepoint(1,2500)") + .SetName("Point"), - public static IEnumerable TestCases { - get - { - // Two dimensional data - yield return new TestCaseData(Ordinates.None, new Point(1d, 2500d), "st_makepoint(1,2500)"); + new TestCaseData(Ordinates.None, new MultiPoint(new[] { new Point(new Coordinate(1d, 1d)) }), "st_multi(st_makepoint(1, 1))") + .SetName("MultiPoint"), - yield return new TestCaseData( + new TestCaseData( Ordinates.None, new LineString(new[] { new Coordinate(1d, 1d), new Coordinate(1d, 2500d) }), - "st_makeline(st_makepoint(1,1),st_makepoint(1,2500))" - ); + "st_makeline(st_makepoint(1,1),st_makepoint(1,2500))") + .SetName("LineString"), + + new TestCaseData( + Ordinates.None, + new MultiLineString(new[] + { + new LineString(new[] + { + new Coordinate(1d, 1d), + new Coordinate(1d, 2500d) + }) + }), + "st_multi(st_makeline(st_makepoint(1,1),st_makepoint(1,2500)))") + .SetName("MultiLineString"), - yield return new TestCaseData( + new TestCaseData( Ordinates.None, new Polygon( new LinearRing(new[] @@ -44,29 +51,10 @@ public static IEnumerable TestCases { new Coordinate(1d, 1d) }) ), - "st_makepolygon(st_makeline(ARRAY[st_makepoint(1,1),st_makepoint(2,2),st_makepoint(3,3),st_makepoint(1,1)]))" - ); - - yield return new TestCaseData( - Ordinates.None, - new MultiPoint(new[] { new Point(new Coordinate(1d, 1d)) }), - "st_multi(st_makepoint(1, 1))" - ); + "st_makepolygon(st_makeline(ARRAY[st_makepoint(1,1),st_makepoint(2,2),st_makepoint(3,3),st_makepoint(1,1)]))") + .SetName("Polygon"), - yield return new TestCaseData( - Ordinates.None, - new MultiLineString(new[] - { - new LineString(new[] - { - new Coordinate(1d, 1d), - new Coordinate(1d, 2500d) - }) - }), - "st_multi(st_makeline(st_makepoint(1,1),st_makepoint(1,2500)))" - ); - - yield return new TestCaseData( + new TestCaseData( Ordinates.None, new MultiPolygon(new[] { @@ -80,16 +68,13 @@ public static IEnumerable TestCases { }) ) }), - "st_multi(st_makepolygon(st_makeline(ARRAY[st_makepoint(1,1),st_makepoint(2,2),st_makepoint(3,3),st_makepoint(1,1)])))" - ); + "st_multi(st_makepolygon(st_makeline(ARRAY[st_makepoint(1,1),st_makepoint(2,2),st_makepoint(3,3),st_makepoint(1,1)])))") + .SetName("MultiPolygon"), - yield return new TestCaseData( - Ordinates.None, - GeometryCollection.Empty, - "st_geomfromtext('GEOMETRYCOLLECTION EMPTY')" - ); + new TestCaseData(Ordinates.None, GeometryCollection.Empty, "st_geomfromtext('GEOMETRYCOLLECTION EMPTY')") + .SetName("EmptyCollection"), - yield return new TestCaseData( + new TestCaseData( Ordinates.None, new GeometryCollection(new Geometry[] { @@ -107,10 +92,10 @@ public static IEnumerable TestCases { ) }) }), - "st_collect(st_makepoint(1,1),st_multi(st_makepolygon(st_makeline(ARRAY[st_makepoint(1,1),st_makepoint(2,2),st_makepoint(3,3),st_makepoint(1,1)]))))" - ); + "st_collect(st_makepoint(1,1),st_multi(st_makepolygon(st_makeline(ARRAY[st_makepoint(1,1),st_makepoint(2,2),st_makepoint(3,3),st_makepoint(1,1)]))))") + .SetName("Collection"), - yield return new TestCaseData( + new TestCaseData( Ordinates.None, new GeometryCollection(new Geometry[] { @@ -132,26 +117,26 @@ public static IEnumerable TestCases { }) }) }), - "st_collect(st_makepoint(1,1),st_collect(st_makepoint(1,1),st_multi(st_makepolygon(st_makeline(ARRAY[st_makepoint(1,1),st_makepoint(2,2),st_makepoint(3,3),st_makepoint(1,1)])))))" - ); + "st_collect(st_makepoint(1,1),st_collect(st_makepoint(1,1),st_multi(st_makepolygon(st_makeline(ARRAY[st_makepoint(1,1),st_makepoint(2,2),st_makepoint(3,3),st_makepoint(1,1)])))))") + .SetName("CollectionNested"), - yield return new TestCaseData(Ordinates.XYZ, new Point(1d, 2d, 3d), "st_makepoint(1,2,3)"); + new TestCaseData(Ordinates.XYZ, new Point(1d, 2d, 3d), "st_makepoint(1,2,3)") + .SetName("PointXYZ"), - yield return new TestCaseData( + new TestCaseData( Ordinates.XYZM, new Point( new DotSpatialAffineCoordinateSequence(new[] { 1d, 2d }, new[] { 3d }, new[] { 4d }), GeometryFactory.Default), - "st_makepoint(1,2,3,4)" - ); - } - } + "st_makepoint(1,2,3,4)") + .SetName("PointXYZM") + }; [Test, TestCaseSource(nameof(TestCases))] public async Task Read(Ordinates ordinates, Geometry geometry, string sqlRepresentation) { - using var conn = await OpenConnectionAsync(); - using var cmd = conn.CreateCommand(); + await using var conn = await OpenConnectionAsync(); + await using var cmd = conn.CreateCommand(); cmd.CommandText = $"SELECT {sqlRepresentation}"; Assert.That(Equals(cmd.ExecuteScalar(), geometry)); } @@ -159,8 +144,8 @@ public async Task Read(Ordinates ordinates, Geometry geometry, string sqlReprese [Test, TestCaseSource(nameof(TestCases))] public async Task Write(Ordinates ordinates, Geometry geometry, string sqlRepresentation) { - using var conn = await OpenConnectionAsync(handleOrdinates: ordinates); - using var cmd = conn.CreateCommand(); + await using var conn = await OpenConnectionAsync(handleOrdinates: ordinates); + await using var cmd = conn.CreateCommand(); cmd.Parameters.AddWithValue("p1", geometry); cmd.CommandText = $"SELECT st_asewkb(@p1) = st_asewkb({sqlRepresentation})"; Assert.That(cmd.ExecuteScalar(), Is.True); @@ -172,7 +157,7 @@ public async Task Array() var point = new Point(new Coordinate(1d, 1d)); await AssertType( - NtsDataSource, + DataSource, new Geometry[] { point }, '{' + GetSqlLiteral(point) + '}', "geometry[]", @@ -183,9 +168,9 @@ await AssertType( [Test] public async Task Read_as_concrete_type() { - using var conn = await OpenConnectionAsync(); - using var cmd = new NpgsqlCommand("SELECT st_makepoint(1,1)", conn); - using var reader = cmd.ExecuteReader(); + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT st_makepoint(1,1)", conn); + await using var reader = cmd.ExecuteReader(); reader.Read(); Assert.That(reader.GetFieldValue(0), Is.EqualTo(new Point(new Coordinate(1d, 1d)))); Assert.That(() => reader.GetFieldValue(0), Throws.Exception.TypeOf()); @@ -195,16 +180,16 @@ public async Task Read_as_concrete_type() public async Task Roundtrip_geometry_geography() { var point = new Point(new Coordinate(1d, 1d)); - using var conn = await OpenConnectionAsync(); - conn.ExecuteNonQuery("CREATE TEMP TABLE data (geom GEOMETRY, geog GEOGRAPHY)"); - using (var cmd = new NpgsqlCommand("INSERT INTO data (geom, geog) VALUES (@p, @p)", conn)) + await using var conn = await OpenConnectionAsync(); + await conn.ExecuteNonQueryAsync("CREATE TEMP TABLE data (geom GEOMETRY, geog GEOGRAPHY)"); + await using (var cmd = new NpgsqlCommand("INSERT INTO data (geom, geog) VALUES (@p, @p)", conn)) { cmd.Parameters.AddWithValue("@p", point); cmd.ExecuteNonQuery(); } - using (var cmd = new NpgsqlCommand("SELECT geom, geog FROM data", conn)) - using (var reader = cmd.ExecuteReader()) + await using (var cmd = new NpgsqlCommand("SELECT geom, geog FROM data", conn)) + await using (var reader = cmd.ExecuteReader()) { reader.Read(); Assert.That(reader[0], Is.EqualTo(point)); @@ -215,7 +200,7 @@ public async Task Roundtrip_geometry_geography() [Test, Explicit] public async Task Concurrency_test() { - await using var adminConnection = OpenConnection(); + await using var adminConnection = await OpenConnectionAsync(); var table = await CreateTempTable( adminConnection, "point GEOMETRY, linestring GEOMETRY, polygon GEOMETRY, " + @@ -324,7 +309,7 @@ protected ValueTask OpenConnectionAsync(string? connectionStri }); if (handleOrdinates == Ordinates.XY) - NtsDataSource = dataSource; + _xyDataSource ??= dataSource; return dataSource.OpenConnectionAsync(); } @@ -343,6 +328,8 @@ public async Task SetUp() public async Task Teardown() => await Task.WhenAll(NtsDataSources.Values.Select(async ds => await ds.DisposeAsync())); - NpgsqlDataSource NtsDataSource = default!; + protected override NpgsqlDataSource DataSource => _xyDataSource ?? throw new InvalidOperationException(); + NpgsqlDataSource? _xyDataSource; + ConcurrentDictionary NtsDataSources = new(); } diff --git a/test/Npgsql.NodaTime.Tests/NodaTimeInfinityTests.cs b/test/Npgsql.PluginTests/NodaTimeInfinityTests.cs similarity index 78% rename from test/Npgsql.NodaTime.Tests/NodaTimeInfinityTests.cs rename to test/Npgsql.PluginTests/NodaTimeInfinityTests.cs index b719449e1d..59f581e7de 100644 --- a/test/Npgsql.NodaTime.Tests/NodaTimeInfinityTests.cs +++ b/test/Npgsql.PluginTests/NodaTimeInfinityTests.cs @@ -7,32 +7,50 @@ using NUnit.Framework; using static Npgsql.NodaTime.Internal.NodaTimeUtils; -namespace Npgsql.NodaTime.Tests; +namespace Npgsql.PluginTests; [TestFixture(false)] #if DEBUG [TestFixture(true)] -[NonParallelizable] +[NonParallelizable] // Since this test suite manipulates an AppContext switch #endif -public class NodaTimeInfinityTests : TestBase +public class NodaTimeInfinityTests : TestBase, IDisposable { [Test] // #4715 public async Task DateRange_with_upper_bound_infinity() { - if (DisableDateTimeInfinityConversions) + if (Statics.DisableDateTimeInfinityConversions) return; await AssertType( new DateInterval(LocalDate.MinIsoValue, LocalDate.MaxIsoValue), "[-infinity,infinity]", "daterange", - NpgsqlDbType.DateRange); + NpgsqlDbType.DateRange, + isNpgsqlDbTypeInferredFromClrType: false, skipArrayCheck: true); // NpgsqlRange[] is mapped to multirange by default, not array; test separately + + await AssertType( + new [] {new DateInterval(LocalDate.MinIsoValue, LocalDate.MaxIsoValue)}, + """{"[-infinity,infinity]"}""", + "daterange[]", + NpgsqlDbType.DateRange | NpgsqlDbType.Array, + isDefault: false, skipArrayCheck: true); + + await using var conn = await OpenConnectionAsync(); + if (conn.PostgreSqlVersion < new Version(14, 0)) + return; + + await AssertType( + new [] {new DateInterval(LocalDate.MinIsoValue, LocalDate.MaxIsoValue)}, + """{[-infinity,infinity]}""", + "datemultirange", + NpgsqlDbType.DateMultirange, isNpgsqlDbTypeInferredFromClrType: false, skipArrayCheck: true); } [Test] public async Task Timestamptz_read_values() { - if (DisableDateTimeInfinityConversions) + if (Statics.DisableDateTimeInfinityConversions) return; await using var conn = await OpenConnectionAsync(); @@ -50,7 +68,7 @@ public async Task Timestamptz_read_values() [Test] public async Task Timestamptz_write_values() { - if (DisableDateTimeInfinityConversions) + if (Statics.DisableDateTimeInfinityConversions) return; await using var conn = await OpenConnectionAsync(); @@ -83,7 +101,7 @@ public async Task Timestamptz_write() Parameters = { new() { Value = Instant.MinValue, NpgsqlDbType = NpgsqlDbType.TimestampTz } } }; - if (DisableDateTimeInfinityConversions) + if (Statics.DisableDateTimeInfinityConversions) { // NodaTime Instant.MinValue is outside the PG timestamp range. Assert.That(async () => await cmd.ExecuteScalarAsync(), @@ -100,7 +118,7 @@ public async Task Timestamptz_write() Parameters = { new() { Value = Instant.MaxValue, NpgsqlDbType = NpgsqlDbType.TimestampTz } } }; - Assert.That(await cmd2.ExecuteScalarAsync(), Is.EqualTo(DisableDateTimeInfinityConversions ? "9999-12-31 23:59:59.999999" : "infinity")); + Assert.That(await cmd2.ExecuteScalarAsync(), Is.EqualTo(Statics.DisableDateTimeInfinityConversions ? "9999-12-31 23:59:59.999999" : "infinity")); } [Test] @@ -113,7 +131,7 @@ public async Task Timestamptz_read() await using var reader = await cmd.ExecuteReaderAsync(); await reader.ReadAsync(); - if (DisableDateTimeInfinityConversions) + if (Statics.DisableDateTimeInfinityConversions) { Assert.That(() => reader[0], Throws.Exception.TypeOf()); Assert.That(() => reader[1], Throws.Exception.TypeOf()); @@ -130,14 +148,12 @@ public async Task Timestamp_write() { await using var conn = await OpenConnectionAsync(); - // TODO: Switch to use LocalDateTime.MinMaxValue when available (#4061) - await using var cmd = new NpgsqlCommand("SELECT $1::text", conn) { - Parameters = { new() { Value = LocalDate.MinIsoValue + LocalTime.MinValue, NpgsqlDbType = NpgsqlDbType.Timestamp } } + Parameters = { new() { Value = LocalDateTime.MinIsoValue, NpgsqlDbType = NpgsqlDbType.Timestamp } } }; - if (DisableDateTimeInfinityConversions) + if (Statics.DisableDateTimeInfinityConversions) { // NodaTime LocalDateTime.MinValue is outside the PG timestamp range. Assert.That(async () => await cmd.ExecuteScalarAsync(), @@ -151,10 +167,10 @@ public async Task Timestamp_write() await using var cmd2 = new NpgsqlCommand("SELECT $1::text", conn) { - Parameters = { new() { Value = LocalDate.MaxIsoValue + LocalTime.MaxValue, NpgsqlDbType = NpgsqlDbType.Timestamp } } + Parameters = { new() { Value = LocalDateTime.MaxIsoValue, NpgsqlDbType = NpgsqlDbType.Timestamp } } }; - Assert.That(await cmd2.ExecuteScalarAsync(), Is.EqualTo(DisableDateTimeInfinityConversions + Assert.That(await cmd2.ExecuteScalarAsync(), Is.EqualTo(Statics.DisableDateTimeInfinityConversions ? "9999-12-31 23:59:59.999999" : "infinity")); } @@ -169,16 +185,15 @@ public async Task Timestamp_read() await using var reader = await cmd.ExecuteReaderAsync(); await reader.ReadAsync(); - if (DisableDateTimeInfinityConversions) + if (Statics.DisableDateTimeInfinityConversions) { Assert.That(() => reader[0], Throws.Exception.TypeOf()); Assert.That(() => reader[1], Throws.Exception.TypeOf()); } else { - // TODO: Switch to use LocalDateTime.MinMaxValue when available (#4061) - Assert.That(reader[0], Is.EqualTo(LocalDate.MinIsoValue + LocalTime.MinValue)); - Assert.That(reader[1], Is.EqualTo(LocalDate.MaxIsoValue + LocalTime.MaxValue)); + Assert.That(reader[0], Is.EqualTo(LocalDateTime.MinIsoValue)); + Assert.That(reader[1], Is.EqualTo(LocalDateTime.MaxIsoValue)); } } @@ -193,7 +208,7 @@ public async Task Date_write() }; // LocalDate.MinIsoValue is outside of the PostgreSQL date range - if (DisableDateTimeInfinityConversions) + if (Statics.DisableDateTimeInfinityConversions) Assert.That(async () => await cmd.ExecuteScalarAsync(), Throws.Exception.TypeOf() .With.Property(nameof(PostgresException.SqlState)).EqualTo(PostgresErrorCodes.DatetimeFieldOverflow)); @@ -202,7 +217,7 @@ public async Task Date_write() cmd.Parameters[0].Value = LocalDate.MaxIsoValue; - Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo(DisableDateTimeInfinityConversions ? "9999-12-31" : "infinity")); + Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo(Statics.DisableDateTimeInfinityConversions ? "9999-12-31" : "infinity")); } [Test] @@ -215,7 +230,7 @@ public async Task Date_read() await using var reader = await cmd.ExecuteReaderAsync(); await reader.ReadAsync(); - if (DisableDateTimeInfinityConversions) + if (Statics.DisableDateTimeInfinityConversions) { Assert.That(() => reader[0], Throws.Exception.TypeOf()); Assert.That(() => reader[1], Throws.Exception.TypeOf()); @@ -230,7 +245,7 @@ public async Task Date_read() [Test, Description("Makes sure that when ConvertInfinityDateTime is true, infinity values are properly converted")] public async Task DateConvertInfinity() { - if (DisableDateTimeInfinityConversions) + if (Statics.DisableDateTimeInfinityConversions) return; await using var conn = await OpenConnectionAsync(); @@ -266,20 +281,11 @@ public async Task DateConvertInfinity() } } - protected override async ValueTask OpenConnectionAsync() - { - var conn = await base.OpenConnectionAsync(); - await conn.ExecuteNonQueryAsync("SET TimeZone='Europe/Berlin'"); - return conn; - } - - protected override NpgsqlConnection OpenConnection() - => throw new NotSupportedException(); + protected override NpgsqlDataSource DataSource { get; } public NodaTimeInfinityTests(bool disableDateTimeInfinityConversions) { #if DEBUG - DisableDateTimeInfinityConversions = disableDateTimeInfinityConversions; Statics.DisableDateTimeInfinityConversions = disableDateTimeInfinityConversions; #else if (disableDateTimeInfinityConversions) @@ -288,13 +294,19 @@ public NodaTimeInfinityTests(bool disableDateTimeInfinityConversions) "NodaTimeInfinityTests rely on the Npgsql.DisableDateTimeInfinityConversions AppContext switch and can only be run in DEBUG builds"); } #endif + + var builder = CreateDataSourceBuilder(); + builder.UseNodaTime(); + builder.ConnectionStringBuilder.Options = "-c TimeZone=Europe/Berlin"; + DataSource = builder.Build(); } public void Dispose() { #if DEBUG - DisableDateTimeInfinityConversions = false; Statics.DisableDateTimeInfinityConversions = false; #endif + + DataSource.Dispose(); } } diff --git a/test/Npgsql.NodaTime.Tests/NodaTimeTests.cs b/test/Npgsql.PluginTests/NodaTimeTests.cs similarity index 64% rename from test/Npgsql.NodaTime.Tests/NodaTimeTests.cs rename to test/Npgsql.PluginTests/NodaTimeTests.cs index 1aa6784261..adccd163cc 100644 --- a/test/Npgsql.NodaTime.Tests/NodaTimeTests.cs +++ b/test/Npgsql.PluginTests/NodaTimeTests.cs @@ -2,6 +2,7 @@ using System.Data; using System.Threading.Tasks; using NodaTime; +using Npgsql.NodaTime.Properties; using Npgsql.Tests; using NpgsqlTypes; using NUnit.Framework; @@ -10,10 +11,9 @@ // ReSharper disable AccessToModifiedClosure // ReSharper disable AccessToDisposedClosure -namespace Npgsql.NodaTime.Tests; +namespace Npgsql.PluginTests; -// Since this test suite manipulates TimeZone, it is incompatible with multiplexing -public class NodaTimeTests : TestBase +public class NodaTimeTests : MultiplexingTestBase, IDisposable { #region Timestamp without time zone @@ -29,7 +29,8 @@ public class NodaTimeTests : TestBase [Test, TestCaseSource(nameof(TimestampValues))] public Task Timestamp_as_LocalDateTime(LocalDateTime localDateTime, string sqlLiteral) - => AssertType(localDateTime, sqlLiteral, "timestamp without time zone", NpgsqlDbType.Timestamp, DbType.DateTime2); + => AssertType(localDateTime, sqlLiteral, "timestamp without time zone", NpgsqlDbType.Timestamp, DbType.DateTime2, + isNpgsqlDbTypeInferredFromClrType: false); [Test] public Task Timestamp_as_unspecified_DateTime() @@ -81,17 +82,41 @@ public Task Timestamp_cannot_use_as_DateTimeOffset() [Test] public Task Timestamp_cannot_write_utc_DateTime() - => AssertTypeUnsupportedWrite(new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Utc), "timestamp without time zone"); + => AssertTypeUnsupportedWrite(new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Utc), "timestamp without time zone"); [Test] - public Task Tsrange_as_NpgsqlRange_of_LocalDateTime() - => AssertType( + public async Task Tsrange_as_NpgsqlRange_of_LocalDateTime() + { + await AssertType( new NpgsqlRange( new(1998, 4, 12, 13, 26, 38), new(1998, 4, 12, 15, 26, 38)), - @"[""1998-04-12 13:26:38"",""1998-04-12 15:26:38""]", + """["1998-04-12 13:26:38","1998-04-12 15:26:38"]""", "tsrange", - NpgsqlDbType.TimestampRange); + NpgsqlDbType.TimestampRange, + isNpgsqlDbTypeInferredFromClrType: false, skipArrayCheck: true); // NpgsqlRange[] is mapped to multirange by default, not array; test separately + + await AssertType( + new [] { new NpgsqlRange( + new(1998, 4, 12, 13, 26, 38), + new(1998, 4, 12, 15, 26, 38)), }, + """{"[\"1998-04-12 13:26:38\",\"1998-04-12 15:26:38\"]"}""", + "tsrange[]", + NpgsqlDbType.TimestampRange | NpgsqlDbType.Array, + isDefault: false, skipArrayCheck: true); + + await using var conn = await OpenConnectionAsync(); + if (conn.PostgreSqlVersion < new Version(14, 0)) + return; + + await AssertType( + new [] { new NpgsqlRange( + new(1998, 4, 12, 13, 26, 38), + new(1998, 4, 12, 15, 26, 38)), }, + """{["1998-04-12 13:26:38","1998-04-12 15:26:38"]}""", + "tsmultirange", + NpgsqlDbType.TimestampMultirange, isNpgsqlDbTypeInferredFromClrType: false, skipArrayCheck: true); + } [Test] public async Task Tsmultirange_as_array_of_NpgsqlRange_of_LocalDateTime() @@ -109,9 +134,10 @@ await AssertType( new(1998, 4, 13, 13, 26, 38), new(1998, 4, 13, 15, 26, 38)), }, - @"{[""1998-04-12 13:26:38"",""1998-04-12 15:26:38""],[""1998-04-13 13:26:38"",""1998-04-13 15:26:38""]}", + """{["1998-04-12 13:26:38","1998-04-12 15:26:38"],["1998-04-13 13:26:38","1998-04-13 15:26:38"]}""", "tsmultirange", - NpgsqlDbType.TimestampMultirange); + NpgsqlDbType.TimestampMultirange, + isNpgsqlDbTypeInferredFromClrType: false); } #endregion Timestamp without time zone @@ -132,7 +158,8 @@ await AssertType( [Test, TestCaseSource(nameof(TimestamptzValues))] public Task Timestamptz_as_Instant(Instant instant, string sqlLiteral) - => AssertType(instant, sqlLiteral, "timestamp with time zone", NpgsqlDbType.TimestampTz, DbType.DateTime); + => AssertType(instant, sqlLiteral, "timestamp with time zone", NpgsqlDbType.TimestampTz, DbType.DateTime, + isNpgsqlDbTypeInferredFromClrType: false); [Test] public Task Timestamptz_as_ZonedDateTime() @@ -142,6 +169,7 @@ public Task Timestamptz_as_ZonedDateTime() "timestamp with time zone", NpgsqlDbType.TimestampTz, DbType.DateTime, + isNpgsqlDbTypeInferredFromClrType: false, isDefaultForReading: false); [Test] @@ -152,6 +180,7 @@ public Task Timestamptz_as_OffsetDateTime() "timestamp with time zone", NpgsqlDbType.TimestampTz, DbType.DateTime, + isNpgsqlDbTypeInferredFromClrType: false, isDefaultForReading: false); [Test] @@ -190,56 +219,81 @@ public Task Timestamptz_cannot_use_as_LocalDateTime() [Test] public async Task Timestamptz_cannot_write_non_utc_ZonedDateTime() - => await AssertTypeUnsupportedWrite( + => await AssertTypeUnsupportedWrite( new LocalDateTime().InUtc().ToInstant().InZone(DateTimeZoneProviders.Tzdb["Europe/Berlin"]), "timestamp with time zone"); [Test] public async Task Timestamptz_cannot_write_non_utc_OffsetDateTime() - => await AssertTypeUnsupportedWrite(new LocalDateTime().WithOffset(Offset.FromHours(2)), "timestamp with time zone"); + => await AssertTypeUnsupportedWrite(new LocalDateTime().WithOffset(Offset.FromHours(2)), "timestamp with time zone"); [Test] public async Task Timestamptz_cannot_write_non_utc_DateTime() { - await AssertTypeUnsupportedWrite(new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Unspecified), "timestamp with time zone"); - await AssertTypeUnsupportedWrite(new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Local), "timestamp with time zone"); + await AssertTypeUnsupportedWrite(new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Unspecified), "timestamp with time zone"); + await AssertTypeUnsupportedWrite(new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Local), "timestamp with time zone"); } [Test] - public Task Tstzrange_as_Interval() - => AssertType( + public async Task Tstzrange_as_Interval() + { + await AssertType( new Interval( new LocalDateTime(1998, 4, 12, 13, 26, 38).InUtc().ToInstant(), new LocalDateTime(1998, 4, 12, 15, 26, 38).InUtc().ToInstant()), - @"[""1998-04-12 15:26:38+02"",""1998-04-12 17:26:38+02"")", + """["1998-04-12 15:26:38+02","1998-04-12 17:26:38+02")""", "tstzrange", - NpgsqlDbType.TimestampTzRange); + NpgsqlDbType.TimestampTzRange, + isNpgsqlDbTypeInferredFromClrType: false, skipArrayCheck: true); // NpgsqlRange[] is mapped to multirange by default, not array; test separately + + await AssertType( + new [] { new Interval( + new LocalDateTime(1998, 4, 12, 13, 26, 38).InUtc().ToInstant(), + new LocalDateTime(1998, 4, 12, 15, 26, 38).InUtc().ToInstant()), }, + """{"[\"1998-04-12 15:26:38+02\",\"1998-04-12 17:26:38+02\")"}""", + "tstzrange[]", + NpgsqlDbType.TimestampTzRange | NpgsqlDbType.Array, + isDefault: false, skipArrayCheck: true); + + await using var conn = await OpenConnectionAsync(); + if (conn.PostgreSqlVersion < new Version(14, 0)) + return; + + await AssertType( + new [] { new Interval( + new LocalDateTime(1998, 4, 12, 13, 26, 38).InUtc().ToInstant(), + new LocalDateTime(1998, 4, 12, 15, 26, 38).InUtc().ToInstant()), }, + """{["1998-04-12 15:26:38+02","1998-04-12 17:26:38+02")}""", + "tstzmultirange", + NpgsqlDbType.TimestampTzMultirange, isNpgsqlDbTypeInferredFromClrType: false, skipArrayCheck: true); + } [Test] public Task Tstzrange_with_no_end_as_Interval() => AssertType( - new Interval( - new LocalDateTime(1998, 4, 12, 13, 26, 38).InUtc().ToInstant(), null), - @"[""1998-04-12 15:26:38+02"",)", + new Interval(new LocalDateTime(1998, 4, 12, 13, 26, 38).InUtc().ToInstant(), null), + """["1998-04-12 15:26:38+02",)""", "tstzrange", - NpgsqlDbType.TimestampTzRange); + NpgsqlDbType.TimestampTzRange, + isNpgsqlDbTypeInferredFromClrType: false, skipArrayCheck: true); [Test] public Task Tstzrange_with_no_start_as_Interval() => AssertType( - new Interval( null, - new LocalDateTime(1998, 4, 12, 13, 26, 38).InUtc().ToInstant()), - @"(,""1998-04-12 15:26:38+02"")", + new Interval(null, new LocalDateTime(1998, 4, 12, 13, 26, 38).InUtc().ToInstant()), + """(,"1998-04-12 15:26:38+02")""", "tstzrange", - NpgsqlDbType.TimestampTzRange); + NpgsqlDbType.TimestampTzRange, + isNpgsqlDbTypeInferredFromClrType: false, skipArrayCheck: true); [Test] public Task Tstzrange_with_no_start_or_end_as_Interval() => AssertType( new Interval(null, null), - @"(,)", + """(,)""", "tstzrange", - NpgsqlDbType.TimestampTzRange); + NpgsqlDbType.TimestampTzRange, + isNpgsqlDbTypeInferredFromClrType: false, skipArrayCheck: true); [Test] public Task Tstzrange_as_NpgsqlRange_of_Instant() @@ -247,10 +301,11 @@ public Task Tstzrange_as_NpgsqlRange_of_Instant() new NpgsqlRange( new LocalDateTime(1998, 4, 12, 13, 26, 38).InUtc().ToInstant(), new LocalDateTime(1998, 4, 12, 15, 26, 38).InUtc().ToInstant()), - @"[""1998-04-12 15:26:38+02"",""1998-04-12 17:26:38+02""]", + """["1998-04-12 15:26:38+02","1998-04-12 17:26:38+02"]""", "tstzrange", NpgsqlDbType.TimestampTzRange, - isDefaultForReading: false); + isNpgsqlDbTypeInferredFromClrType: false, + isDefaultForReading: false, skipArrayCheck: true); [Test] public Task Tstzrange_as_NpgsqlRange_of_ZonedDateTime() @@ -258,10 +313,11 @@ public Task Tstzrange_as_NpgsqlRange_of_ZonedDateTime() new NpgsqlRange( new LocalDateTime(1998, 4, 12, 13, 26, 38).InUtc(), new LocalDateTime(1998, 4, 12, 15, 26, 38).InUtc()), - @"[""1998-04-12 15:26:38+02"",""1998-04-12 17:26:38+02""]", + """["1998-04-12 15:26:38+02","1998-04-12 17:26:38+02"]""", "tstzrange", NpgsqlDbType.TimestampTzRange, - isDefaultForReading: false); + isNpgsqlDbTypeInferredFromClrType: false, + isDefaultForReading: false, skipArrayCheck: true); [Test] public Task Tstzrange_as_NpgsqlRange_of_OffsetDateTime() @@ -269,10 +325,11 @@ public Task Tstzrange_as_NpgsqlRange_of_OffsetDateTime() new NpgsqlRange( new LocalDateTime(1998, 4, 12, 13, 26, 38).WithOffset(Offset.Zero), new LocalDateTime(1998, 4, 12, 15, 26, 38).WithOffset(Offset.Zero)), - @"[""1998-04-12 15:26:38+02"",""1998-04-12 17:26:38+02""]", + """["1998-04-12 15:26:38+02","1998-04-12 17:26:38+02"]""", "tstzrange", NpgsqlDbType.TimestampTzRange, - isDefaultForReading: false); + isNpgsqlDbTypeInferredFromClrType: false, + isDefaultForReading: false, skipArrayCheck: true); [Test] public async Task Tstzmultirange_as_array_of_Interval() @@ -290,9 +347,10 @@ await AssertType( new LocalDateTime(1998, 4, 13, 13, 26, 38).InUtc().ToInstant(), new LocalDateTime(1998, 4, 13, 15, 26, 38).InUtc().ToInstant()), }, - @"{[""1998-04-12 15:26:38+02"",""1998-04-12 17:26:38+02""),[""1998-04-13 15:26:38+02"",""1998-04-13 17:26:38+02"")}", + """{["1998-04-12 15:26:38+02","1998-04-12 17:26:38+02"),["1998-04-13 15:26:38+02","1998-04-13 17:26:38+02")}""", "tstzmultirange", - NpgsqlDbType.TimestampTzMultirange); + NpgsqlDbType.TimestampTzMultirange, + isNpgsqlDbTypeInferredFromClrType: false); } [Test] @@ -311,9 +369,10 @@ await AssertType( new LocalDateTime(1998, 4, 13, 13, 26, 38).InUtc().ToInstant(), new LocalDateTime(1998, 4, 13, 15, 26, 38).InUtc().ToInstant()), }, - @"{[""1998-04-12 15:26:38+02"",""1998-04-12 17:26:38+02""],[""1998-04-13 15:26:38+02"",""1998-04-13 17:26:38+02""]}", + """{["1998-04-12 15:26:38+02","1998-04-12 17:26:38+02"],["1998-04-13 15:26:38+02","1998-04-13 17:26:38+02"]}""", "tstzmultirange", NpgsqlDbType.TimestampTzMultirange, + isNpgsqlDbTypeInferredFromClrType: false, isDefaultForReading: false); } @@ -333,9 +392,10 @@ await AssertType( new LocalDateTime(1998, 4, 13, 13, 26, 38).InUtc(), new LocalDateTime(1998, 4, 13, 15, 26, 38).InUtc()), }, - @"{[""1998-04-12 15:26:38+02"",""1998-04-12 17:26:38+02""],[""1998-04-13 15:26:38+02"",""1998-04-13 17:26:38+02""]}", + """{["1998-04-12 15:26:38+02","1998-04-12 17:26:38+02"],["1998-04-13 15:26:38+02","1998-04-13 17:26:38+02"]}""", "tstzmultirange", NpgsqlDbType.TimestampTzMultirange, + isNpgsqlDbTypeInferredFromClrType: false, isDefaultForReading: false); } @@ -355,9 +415,10 @@ await AssertType( new LocalDateTime(1998, 4, 13, 13, 26, 38).WithOffset(Offset.Zero), new LocalDateTime(1998, 4, 13, 15, 26, 38).WithOffset(Offset.Zero)), }, - @"{[""1998-04-12 15:26:38+02"",""1998-04-12 17:26:38+02""],[""1998-04-13 15:26:38+02"",""1998-04-13 17:26:38+02""]}", + """{["1998-04-12 15:26:38+02","1998-04-12 17:26:38+02"],["1998-04-13 15:26:38+02","1998-04-13 17:26:38+02"]}""", "tstzmultirange", NpgsqlDbType.TimestampTzMultirange, + isNpgsqlDbTypeInferredFromClrType: false, isDefaultForReading: false); } @@ -385,9 +446,10 @@ await AssertType( null, null) }, - @"{""[\""1998-04-12 15:26:38+02\"",\""1998-04-12 17:26:38+02\"")"",""[\""1998-04-13 15:26:38+02\"",\""1998-04-13 17:26:38+02\"")"",""[\""1998-04-13 15:26:38+02\"",)"",""(,\""1998-04-13 15:26:38+02\"")"",""(,)""}", + """{"[\"1998-04-12 15:26:38+02\",\"1998-04-12 17:26:38+02\")","[\"1998-04-13 15:26:38+02\",\"1998-04-13 17:26:38+02\")","[\"1998-04-13 15:26:38+02\",)","(,\"1998-04-13 15:26:38+02\")","(,)"}""", "tstzrange[]", NpgsqlDbType.TimestampTzRange | NpgsqlDbType.Array, + isNpgsqlDbTypeInferredFromClrType: false, isDefaultForWriting: false); } @@ -406,9 +468,10 @@ await AssertType( new LocalDateTime(1998, 4, 13, 13, 26, 38).InUtc().ToInstant(), new LocalDateTime(1998, 4, 13, 15, 26, 38).InUtc().ToInstant()), }, - @"{""[\""1998-04-12 15:26:38+02\"",\""1998-04-12 17:26:38+02\""]"",""[\""1998-04-13 15:26:38+02\"",\""1998-04-13 17:26:38+02\""]""}", + """{"[\"1998-04-12 15:26:38+02\",\"1998-04-12 17:26:38+02\"]","[\"1998-04-13 15:26:38+02\",\"1998-04-13 17:26:38+02\"]"}""", "tstzrange[]", NpgsqlDbType.TimestampTzRange | NpgsqlDbType.Array, + isNpgsqlDbTypeInferredFromClrType: false, isDefault: false); } @@ -418,7 +481,8 @@ await AssertType( [Test] public Task Date_as_LocalDate() - => AssertType(new LocalDate(2020, 10, 1), "2020-10-01", "date", NpgsqlDbType.Date, DbType.Date); + => AssertType(new LocalDate(2020, 10, 1), "2020-10-01", "date", NpgsqlDbType.Date, DbType.Date, + isNpgsqlDbTypeInferredFromClrType: false); [Test] public Task Date_as_DateTime() @@ -429,21 +493,61 @@ public Task Date_as_int() => AssertType(7579, "2020-10-01", "date", NpgsqlDbType.Date, DbType.Date, isDefault: false); [Test] - public Task Daterange_as_DateInterval() - => AssertType( + public async Task Daterange_as_DateInterval() + { + await AssertType( new DateInterval(new(2002, 3, 4), new(2002, 3, 6)), "[2002-03-04,2002-03-07)", "daterange", - NpgsqlDbType.DateRange); + NpgsqlDbType.DateRange, + isNpgsqlDbTypeInferredFromClrType: false, skipArrayCheck: true); // DateInterval[] is mapped to multirange by default, not array; test separately + + await AssertType( + new [] {new DateInterval(new(2002, 3, 4), new(2002, 3, 6))}, + """{"[2002-03-04,2002-03-07)"}""", + "daterange[]", + NpgsqlDbType.DateRange | NpgsqlDbType.Array, + isDefault: false, skipArrayCheck: true); + + await using var conn = await OpenConnectionAsync(); + if (conn.PostgreSqlVersion < new Version(14, 0)) + return; + + await AssertType( + new [] {new DateInterval(new(2002, 3, 4), new(2002, 3, 6))}, + """{[2002-03-04,2002-03-07)}""", + "datemultirange", + NpgsqlDbType.DateMultirange, isNpgsqlDbTypeInferredFromClrType: false, skipArrayCheck: true); + } [Test] - public Task Daterange_as_NpgsqlRange_of_LocalDate() - => AssertType( + public async Task Daterange_as_NpgsqlRange_of_LocalDate() + { + await AssertType( new NpgsqlRange(new(2002, 3, 4), true, new(2002, 3, 6), false), "[2002-03-04,2002-03-06)", "daterange", NpgsqlDbType.DateRange, - isDefaultForReading: false); + isNpgsqlDbTypeInferredFromClrType: false, + isDefaultForReading: false, skipArrayCheck: true); // NpgsqlRange[] is mapped to multirange by default, not array; test separately + + await AssertType( + new [] { new NpgsqlRange(new(2002, 3, 4), true, new(2002, 3, 6), false) }, + """{"[2002-03-04,2002-03-06)"}""", + "daterange[]", + NpgsqlDbType.DateRange | NpgsqlDbType.Array, + isDefault: false, skipArrayCheck: true); + + await using var conn = await OpenConnectionAsync(); + if (conn.PostgreSqlVersion < new Version(14, 0)) + return; + + await AssertType( + new [] { new NpgsqlRange(new(2002, 3, 4), true, new(2002, 3, 6), false) }, + """{[2002-03-04,2002-03-06)}""", + "datemultirange", + NpgsqlDbType.DateMultirange, isDefault: false, skipArrayCheck: true); + } [Test] public async Task Datemultirange_as_array_of_DateInterval() @@ -459,7 +563,8 @@ await AssertType( }, "{[2002-03-04,2002-03-06),[2002-03-08,2002-03-11)}", "datemultirange", - NpgsqlDbType.DateMultirange); + NpgsqlDbType.DateMultirange, + isNpgsqlDbTypeInferredFromClrType: false); } [Test] @@ -477,7 +582,8 @@ await AssertType( "{[2002-03-04,2002-03-06),[2002-03-08,2002-03-11)}", "datemultirange", NpgsqlDbType.DateMultirange, - isDefaultForReading: false); + isDefaultForReading: false, + isNpgsqlDbTypeInferredFromClrType: false); } #if NET6_0_OR_GREATER @@ -486,13 +592,32 @@ public Task Date_as_DateOnly() => AssertType(new DateOnly(2020, 10, 1), "2020-10-01", "date", NpgsqlDbType.Date, DbType.Date, isDefaultForReading: false); [Test] - public Task Daterange_as_NpgsqlRange_of_DateOnly() - => AssertType( + public async Task Daterange_as_NpgsqlRange_of_DateOnly() + { + await AssertType( new NpgsqlRange(new(2002, 3, 4), true, new(2002, 3, 6), false), "[2002-03-04,2002-03-06)", "daterange", NpgsqlDbType.DateRange, - isDefaultForReading: false); + isDefaultForReading: false, skipArrayCheck: true); + + await AssertType( + new [] { new NpgsqlRange(new(2002, 3, 4), true, new(2002, 3, 6), false) }, + """{"[2002-03-04,2002-03-06)"}""", + "daterange[]", + NpgsqlDbType.DateRange | NpgsqlDbType.Array, + isDefault: false, skipArrayCheck: true); + + await using var conn = await OpenConnectionAsync(); + if (conn.PostgreSqlVersion < new Version(14, 0)) + return; + + await AssertType( + new [] { new NpgsqlRange(new(2002, 3, 4), true, new(2002, 3, 6), false) }, + """{[2002-03-04,2002-03-06)}""", + "datemultirange", + NpgsqlDbType.DateMultirange, isDefault: false, skipArrayCheck: true); + } #endif [Test] @@ -506,7 +631,7 @@ await AssertType( new DateInterval(new(2002, 3, 4), new(2002, 3, 5)), new DateInterval(new(2002, 3, 8), new(2002, 3, 10)) }, - @"{""[2002-03-04,2002-03-06)"",""[2002-03-08,2002-03-11)""}", + """{"[2002-03-04,2002-03-06)","[2002-03-08,2002-03-11)"}""", "daterange[]", NpgsqlDbType.DateRange | NpgsqlDbType.Array, isDefaultForWriting: false); @@ -523,7 +648,7 @@ await AssertType( new NpgsqlRange(new(2002, 3, 4), true, new(2002, 3, 6), false), new NpgsqlRange(new(2002, 3, 8), true, new(2002, 3, 11), false) }, - @"{""[2002-03-04,2002-03-06)"",""[2002-03-08,2002-03-11)""}", + """{"[2002-03-04,2002-03-06)","[2002-03-08,2002-03-11)"}""", "daterange[]", NpgsqlDbType.DateRange | NpgsqlDbType.Array, isDefault: false); @@ -535,7 +660,8 @@ await AssertType( [Test] public Task Time_as_LocalTime() - => AssertType(new LocalTime(10, 45, 34, 500), "10:45:34.5", "time without time zone", NpgsqlDbType.Time, DbType.Time); + => AssertType(new LocalTime(10, 45, 34, 500), "10:45:34.5", "time without time zone", NpgsqlDbType.Time, DbType.Time, + isNpgsqlDbTypeInferredFromClrType: false); [Test] public Task Time_as_TimeSpan() @@ -569,7 +695,8 @@ public Task TimeTz_as_OffsetTime() new OffsetTime(new LocalTime(1, 2, 3, 4).PlusNanoseconds(5000), Offset.FromHoursAndMinutes(3, 30) + Offset.FromSeconds(5)), "01:02:03.004005+03:30:05", "time with time zone", - NpgsqlDbType.TimeTz); + NpgsqlDbType.TimeTz, + isNpgsqlDbTypeInferredFromClrType: false); [Test] public async Task TimeTz_as_DateTimeOffset() @@ -608,7 +735,8 @@ public Task Interval_as_Period() }.Build().Normalize(), "1 year 2 mons 25 days 05:06:07.008009", "interval", - NpgsqlDbType.Interval); + NpgsqlDbType.Interval, + isNpgsqlDbTypeInferredFromClrType: false); [Test] public Task Interval_as_Duration() @@ -618,24 +746,28 @@ public Task Interval_as_Duration() "5 days 00:04:03.002001", "interval", NpgsqlDbType.Interval, - isDefaultForReading: false); + isDefaultForReading: false, + isNpgsqlDbTypeInferredFromClrType: false); [Test] - public Task Interval_as_Duration_with_months_fails() - => AssertTypeUnsupportedRead("2 months", "interval"); + public async Task Interval_as_Duration_with_months_fails() + { + var exception = await AssertTypeUnsupportedRead("2 months", "interval"); + Assert.That(exception.Message, Is.EqualTo(NpgsqlNodaTimeStrings.CannotReadIntervalWithMonthsAsDuration)); + } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/3438")] public async Task Bug3438() { await using var conn = await OpenConnectionAsync(); - using var cmd = new NpgsqlCommand("SELECT @p1, @p2", conn); + await using var cmd = new NpgsqlCommand("SELECT @p1, @p2", conn); var expected = Duration.FromSeconds(2148); cmd.Parameters.Add(new NpgsqlParameter("p1", NpgsqlDbType.Interval) { Value = expected }); cmd.Parameters.AddWithValue("p2", expected); - using var reader = cmd.ExecuteReader(); - reader.Read(); + await using var reader = await cmd.ExecuteReaderAsync(); + await reader.ReadAsync(); for (var i = 0; i < 2; i++) { Assert.That(reader.GetFieldType(i), Is.EqualTo(typeof(Period))); @@ -646,15 +778,19 @@ public async Task Bug3438() #region Support - protected override async ValueTask OpenConnectionAsync() + protected override NpgsqlDataSource DataSource { get; } + + public NodaTimeTests(MultiplexingMode multiplexingMode) + : base(multiplexingMode) { - var conn = await base.OpenConnectionAsync(); - await conn.ExecuteNonQueryAsync("SET TimeZone='Europe/Berlin'"); - return conn; + var builder = CreateDataSourceBuilder(); + builder.UseNodaTime(); + builder.ConnectionStringBuilder.Options = "-c TimeZone=Europe/Berlin"; + DataSource = builder.Build(); } - protected override NpgsqlConnection OpenConnection() - => throw new NotSupportedException(); + public void Dispose() + => DataSource.Dispose(); #endregion Support } diff --git a/test/Npgsql.PluginTests/Npgsql.PluginTests.csproj b/test/Npgsql.PluginTests/Npgsql.PluginTests.csproj index b7e0b21a09..30dfb8ea16 100644 --- a/test/Npgsql.PluginTests/Npgsql.PluginTests.csproj +++ b/test/Npgsql.PluginTests/Npgsql.PluginTests.csproj @@ -1,11 +1,16 @@  + + + + + diff --git a/test/Npgsql.Specification.Tests/NpgsqlDataReaderTests.cs b/test/Npgsql.Specification.Tests/NpgsqlDataReaderTests.cs index 45bfb1a197..356d1da966 100644 --- a/test/Npgsql.Specification.Tests/NpgsqlDataReaderTests.cs +++ b/test/Npgsql.Specification.Tests/NpgsqlDataReaderTests.cs @@ -1,5 +1,4 @@ using AdoNet.Specification.Tests; -using Xunit; namespace Npgsql.Specification.Tests; diff --git a/test/Npgsql.Specification.Tests/Utility.cs b/test/Npgsql.Specification.Tests/Utility.cs index 9e91767d55..51bdc18dcd 100644 --- a/test/Npgsql.Specification.Tests/Utility.cs +++ b/test/Npgsql.Specification.Tests/Utility.cs @@ -1,4 +1,3 @@ -using System; using AdoNet.Specification.Tests; namespace Npgsql.Specification.Tests; diff --git a/test/Npgsql.Tests/AuthenticationTests.cs b/test/Npgsql.Tests/AuthenticationTests.cs index b0173e36a2..487bc5457c 100644 --- a/test/Npgsql.Tests/AuthenticationTests.cs +++ b/test/Npgsql.Tests/AuthenticationTests.cs @@ -7,7 +7,6 @@ using Npgsql.Properties; using Npgsql.Tests.Support; using NUnit.Framework; -using static Npgsql.Util.Statics; using static Npgsql.Tests.TestUtil; namespace Npgsql.Tests; @@ -292,6 +291,15 @@ public void Password_source_precedence() Assert.That(() => dataSource4.OpenConnection(), Throws.Nothing); } + + static DeferDisposable Defer(Action action) => new(action); + } + + readonly struct DeferDisposable : IDisposable + { + readonly Action _action; + public DeferDisposable(Action action) => _action = action; + public void Dispose() => _action(); } [Test, Description("Connects with a bad password to ensure the proper error is thrown")] diff --git a/test/Npgsql.Tests/BatchTests.cs b/test/Npgsql.Tests/BatchTests.cs index 2983285f85..96013a9676 100644 --- a/test/Npgsql.Tests/BatchTests.cs +++ b/test/Npgsql.Tests/BatchTests.cs @@ -1,4 +1,3 @@ -using Npgsql.Util; using NUnit.Framework; using System; using System.Collections.Generic; diff --git a/test/Npgsql.Tests/BugTests.cs b/test/Npgsql.Tests/BugTests.cs index d702f8d0b2..6dac813475 100644 --- a/test/Npgsql.Tests/BugTests.cs +++ b/test/Npgsql.Tests/BugTests.cs @@ -9,12 +9,15 @@ using System.Threading; using System.Threading.Tasks; using System.Transactions; +using Npgsql.Internal.Postgres; using static Npgsql.Tests.TestUtil; namespace Npgsql.Tests; public class BugTests : TestBase { + static uint ByteaOid => DefaultPgTypes.DataTypeNameMap[DataTypeNames.Bytea].Value; + #region Sequential reader bugs [Test, Description("In sequential access, performing a null check on a non-first field would check the first field")] @@ -71,18 +74,6 @@ public void Many_parameters_with_mixed_FormatCode() .Or.EqualTo(PostgresErrorCodes.TooManyColumns)); // PostgreSQL 14.5, 13.8, 12.12, 11.17 and 10.22 changed the returned error } - [Test, IssueLink("https://github.com/npgsql/npgsql/issues/1238")] - public void Record_with_non_int_field() - { - using var conn = OpenConnection(); - using var cmd = new NpgsqlCommand("SELECT ('one'::TEXT, 2)", conn); - using var reader = cmd.ExecuteReader(); - reader.Read(); - var record = reader.GetFieldValue(0); - Assert.That(record[0], Is.EqualTo("one")); - Assert.That(record[1], Is.EqualTo(2)); - } - [Test, IssueLink("https://github.com/npgsql/npgsql/issues/1450")] public void Bug1450() { @@ -1201,7 +1192,7 @@ LANGUAGE plpgsql AS END; $$;"); - Assert.ThrowsAsync(async () => await connection.ExecuteScalarAsync($"SELECT {func}(0)")); + Assert.ThrowsAsync(async () => await connection.ExecuteScalarAsync($"SELECT {func}(0)")); } [Test] @@ -1370,7 +1361,7 @@ public async Task Bug4099() await server .WriteParseComplete() .WriteBindComplete() - .WriteRowDescription(new FieldDescription(PostgresTypeOIDs.Bytea)) + .WriteRowDescription(new FieldDescription(ByteaOid)) .WriteDataRowWithFlush(data); var otherData = new byte[10]; @@ -1379,7 +1370,7 @@ await server .WriteReadyForQuery() .WriteParseComplete() .WriteBindComplete() - .WriteRowDescription(new FieldDescription(PostgresTypeOIDs.Bytea)) + .WriteRowDescription(new FieldDescription(ByteaOid)) .WriteDataRow(otherData) .WriteCommandComplete() .WriteReadyForQuery() diff --git a/test/Npgsql.Tests/CommandParameterTests.cs b/test/Npgsql.Tests/CommandParameterTests.cs new file mode 100644 index 0000000000..aa2cb0ee15 --- /dev/null +++ b/test/Npgsql.Tests/CommandParameterTests.cs @@ -0,0 +1,207 @@ +using System; +using System.Data; +using System.Threading.Tasks; +using NpgsqlTypes; +using NUnit.Framework; + +namespace Npgsql.Tests; + +public class CommandParameterTests : MultiplexingTestBase +{ + [Test] + [TestCase(CommandBehavior.Default)] + [TestCase(CommandBehavior.SequentialAccess)] + public async Task Input_and_output_parameters(CommandBehavior behavior) + { + using var conn = await OpenConnectionAsync(); + using var cmd = new NpgsqlCommand("SELECT @c-1 AS c, @a+2 AS b", conn); + cmd.Parameters.Add(new NpgsqlParameter("a", 3)); + var b = new NpgsqlParameter { ParameterName = "b", Direction = ParameterDirection.Output }; + cmd.Parameters.Add(b); + var c = new NpgsqlParameter { ParameterName = "c", Direction = ParameterDirection.InputOutput, Value = 4 }; + cmd.Parameters.Add(c); + using (await cmd.ExecuteReaderAsync(behavior)) + { + Assert.AreEqual(5, b.Value); + Assert.AreEqual(3, c.Value); + } + } + + [Test] + public async Task Send_NpgsqlDbType_Unknown([Values(PrepareOrNot.NotPrepared, PrepareOrNot.Prepared)] PrepareOrNot prepare) + { + if (prepare == PrepareOrNot.Prepared && IsMultiplexing) + return; + + using var conn = await OpenConnectionAsync(); + using var cmd = new NpgsqlCommand("SELECT @p::TIMESTAMP", conn); + cmd.CommandText = "SELECT @p::TIMESTAMP"; + cmd.Parameters.Add(new NpgsqlParameter("p", NpgsqlDbType.Unknown) { Value = "2008-1-1" }); + if (prepare == PrepareOrNot.Prepared) + cmd.Prepare(); + using var reader = await cmd.ExecuteReaderAsync(); + reader.Read(); + Assert.That(reader.GetValue(0), Is.EqualTo(new DateTime(2008, 1, 1))); + } + + [Test] + public async Task Positional_parameter() + { + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT $1", conn); + cmd.Parameters.Add(new NpgsqlParameter { NpgsqlDbType = NpgsqlDbType.Integer, Value = 8 }); + Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo(8)); + } + + [Test] + public async Task Positional_parameters_are_not_supported_with_legacy_batching() + { + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT $1; SELECT $1", conn); + cmd.Parameters.Add(new NpgsqlParameter { NpgsqlDbType = NpgsqlDbType.Integer, Value = 8 }); + Assert.That(async () => await cmd.ExecuteScalarAsync(), Throws.Exception.TypeOf() + .With.Property(nameof(PostgresException.SqlState)).EqualTo(PostgresErrorCodes.SyntaxError)); + } + + [Test] + public async Task Unreferenced_named_parameter_works() + { + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT 1", conn); + cmd.Parameters.AddWithValue("not_used", 8); + Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo(1)); + } + + [Test] + public async Task Unreferenced_positional_parameter_works() + { + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT 1", conn); + cmd.Parameters.Add(new NpgsqlParameter { Value = 8 }); + Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo(1)); + } + + [Test] + public async Task Mixing_positional_and_named_parameters_is_not_supported() + { + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT $1, @p", conn); + cmd.Parameters.Add(new NpgsqlParameter { Value = 8 }); + cmd.Parameters.Add(new NpgsqlParameter { ParameterName = "p", Value = 9 }); + Assert.That(() => cmd.ExecuteNonQueryAsync(), Throws.Exception.TypeOf()); + } + + [Test] + [IssueLink("https://github.com/npgsql/npgsql/issues/4171")] + public async Task Reuse_command_with_different_parameter_placeholder_types() + { + await using var conn = await OpenConnectionAsync(); + await using var cmd = conn.CreateCommand(); + + cmd.CommandText = "SELECT @p1"; + cmd.Parameters.AddWithValue("@p1", 8); + _ = await cmd.ExecuteScalarAsync(); + + cmd.CommandText = "SELECT $1"; + cmd.Parameters[0].ParameterName = null; + _ = await cmd.ExecuteScalarAsync(); + } + + [Test] + public async Task Positional_output_parameters_are_not_supported() + { + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT $1", conn); + cmd.Parameters.Add(new NpgsqlParameter { Value = 8, Direction = ParameterDirection.InputOutput }); + Assert.That(() => cmd.ExecuteNonQueryAsync(), Throws.Exception.TypeOf()); + } + + [Test] + public void Parameters_get_name() + { + var command = new NpgsqlCommand(); + + // Add parameters. + command.Parameters.Add(new NpgsqlParameter(":Parameter1", DbType.Boolean)); + command.Parameters.Add(new NpgsqlParameter(":Parameter2", DbType.Int32)); + command.Parameters.Add(new NpgsqlParameter(":Parameter3", DbType.DateTime)); + command.Parameters.Add(new NpgsqlParameter("Parameter4", DbType.DateTime)); + + var idbPrmtr = command.Parameters["Parameter1"]; + Assert.IsNotNull(idbPrmtr); + command.Parameters[0].Value = 1; + + // Get by indexers. + + Assert.AreEqual(":Parameter1", command.Parameters["Parameter1"].ParameterName); + Assert.AreEqual(":Parameter2", command.Parameters["Parameter2"].ParameterName); + Assert.AreEqual(":Parameter3", command.Parameters["Parameter3"].ParameterName); + Assert.AreEqual("Parameter4", command.Parameters["Parameter4"].ParameterName); //Should this work? + + Assert.AreEqual(":Parameter1", command.Parameters[0].ParameterName); + Assert.AreEqual(":Parameter2", command.Parameters[1].ParameterName); + Assert.AreEqual(":Parameter3", command.Parameters[2].ParameterName); + Assert.AreEqual("Parameter4", command.Parameters[3].ParameterName); + } + + [Test] + public async Task Same_param_multiple_times() + { + using var conn = await OpenConnectionAsync(); + using var cmd = new NpgsqlCommand("SELECT @p1, @p1", conn); + cmd.Parameters.AddWithValue("@p1", 8); + using var reader = await cmd.ExecuteReaderAsync(); + reader.Read(); + Assert.That(reader[0], Is.EqualTo(8)); + Assert.That(reader[1], Is.EqualTo(8)); + } + + [Test] + public async Task Generic_parameter() + { + using var conn = await OpenConnectionAsync(); + using var cmd = new NpgsqlCommand("SELECT @p1, @p2, @p3, @p4", conn); + cmd.Parameters.Add(new NpgsqlParameter("p1", 8)); + cmd.Parameters.Add(new NpgsqlParameter("p2", 8) { NpgsqlDbType = NpgsqlDbType.Integer }); + cmd.Parameters.Add(new NpgsqlParameter("p3", "hello")); + cmd.Parameters.Add(new NpgsqlParameter("p4", new[] { 'f', 'o', 'o' })); + using var reader = await cmd.ExecuteReaderAsync(); + reader.Read(); + Assert.That(reader.GetInt32(0), Is.EqualTo(8)); + Assert.That(reader.GetInt32(1), Is.EqualTo(8)); + Assert.That(reader.GetString(2), Is.EqualTo("hello")); + Assert.That(reader.GetString(3), Is.EqualTo("foo")); + } + + [Test] + [TestCase(false)] + [TestCase(true)] + public async Task Parameter_must_be_set(bool genericParam) + { + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT @p1::TEXT", conn); + cmd.Parameters.Add( + genericParam + ? new NpgsqlParameter("p1", null) + : new NpgsqlParameter("p1", null) + ); + + Assert.That(async () => await cmd.ExecuteReaderAsync(), + Throws.Exception + .TypeOf() + .With.Message.EqualTo("Parameter 'p1' must have either its NpgsqlDbType or its DataTypeName or its Value set.")); + } + + [Test] + public async Task Object_generic_param_does_runtime_lookup() + { + await AssertTypeWrite(1, "1", "integer", NpgsqlDbType.Integer, DbType.Int32, DbType.Int32, isDefault: false, + isNpgsqlDbTypeInferredFromClrType: true, skipArrayCheck: true); + await AssertTypeWrite(new[] {1, 1}, "{1,1}", "integer[]", NpgsqlDbType.Integer | NpgsqlDbType.Array, isDefault: false, + isNpgsqlDbTypeInferredFromClrType: true, skipArrayCheck: true); + } + + public CommandParameterTests(MultiplexingMode multiplexingMode) : base(multiplexingMode) + { + } +} diff --git a/test/Npgsql.Tests/CommandTests.cs b/test/Npgsql.Tests/CommandTests.cs index 68c559509f..6133e100c5 100644 --- a/test/Npgsql.Tests/CommandTests.cs +++ b/test/Npgsql.Tests/CommandTests.cs @@ -11,12 +11,16 @@ using System.Text; using System.Threading; using System.Threading.Tasks; +using Npgsql.Internal.Postgres; using static Npgsql.Tests.TestUtil; namespace Npgsql.Tests; public class CommandTests : MultiplexingTestBase { + static uint Int4Oid => DefaultPgTypes.DataTypeNameMap[DataTypeNames.Int4].Value; + static uint TextOid => DefaultPgTypes.DataTypeNameMap[DataTypeNames.Text].Value; + #region Legacy batching [Test] @@ -126,7 +130,7 @@ public async Task Multiple_statements_large_first_command() [NonParallelizable] // Disables sql rewriting public async Task Legacy_batching_is_not_supported_when_EnableSqlParsing_is_disabled() { - using var _ = DisableSqlRewriting(); + using var _ = DisableSqlRewriting(ClearDataSources); using var conn = await OpenConnectionAsync(); using var cmd = new NpgsqlCommand("SELECT 1; SELECT 2", conn); @@ -134,6 +138,30 @@ public async Task Legacy_batching_is_not_supported_when_EnableSqlParsing_is_disa .With.Property(nameof(PostgresException.SqlState)).EqualTo(PostgresErrorCodes.SyntaxError)); } + [Test] + [NonParallelizable] // Disables sql rewriting + public async Task Positional_parameters_are_supported_when_EnableSqlParsing_is_disabled() + { + using var _ = DisableSqlRewriting(ClearDataSources); + + using var conn = await OpenConnectionAsync(); + using var cmd = new NpgsqlCommand("SELECT $1", conn); + cmd.Parameters.Add(new NpgsqlParameter { NpgsqlDbType = NpgsqlDbType.Integer, Value = 8 }); + Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo(8)); + } + + [Test] + [NonParallelizable] // Disables sql rewriting + public async Task Named_parameters_are_not_supported_when_EnableSqlParsing_is_disabled() + { + using var _ = DisableSqlRewriting(ClearDataSources); + + using var conn = await OpenConnectionAsync(); + using var cmd = new NpgsqlCommand("SELECT @p", conn); + cmd.Parameters.Add(new NpgsqlParameter("p", 8)); + Assert.That(async () => await cmd.ExecuteScalarAsync(), Throws.Exception.TypeOf()); + } + #endregion #region Timeout @@ -402,7 +430,7 @@ public async Task Bug3466([Values(false, true)] bool isBroken) await serverMock .WriteParseComplete() .WriteBindComplete() - .WriteRowDescription(new FieldDescription(PostgresTypeOIDs.Int4)) + .WriteRowDescription(new FieldDescription(Int4Oid)) .WriteDataRow(BitConverter.GetBytes(BinaryPrimitives.ReverseEndianness(1))) .WriteCommandComplete() .WriteReadyForQuery() @@ -537,197 +565,6 @@ public async Task SingleRow([Values(PrepareOrNot.NotPrepared, PrepareOrNot.Prepa Assert.That(reader.Read(), Is.False); } - #region Parameters - - [Test] - public async Task Positional_parameter() - { - await using var conn = await OpenConnectionAsync(); - await using var cmd = new NpgsqlCommand("SELECT $1", conn); - cmd.Parameters.Add(new NpgsqlParameter { NpgsqlDbType = NpgsqlDbType.Integer, Value = 8 }); - Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo(8)); - } - - [Test] - public async Task Positional_parameters_are_not_supported_with_legacy_batching() - { - await using var conn = await OpenConnectionAsync(); - await using var cmd = new NpgsqlCommand("SELECT $1; SELECT $1", conn); - cmd.Parameters.Add(new NpgsqlParameter { NpgsqlDbType = NpgsqlDbType.Integer, Value = 8 }); - Assert.That(async () => await cmd.ExecuteScalarAsync(), Throws.Exception.TypeOf() - .With.Property(nameof(PostgresException.SqlState)).EqualTo(PostgresErrorCodes.SyntaxError)); - } - - [Test] - [NonParallelizable] // Disables sql rewriting - public async Task Positional_parameters_are_supported_when_EnableSqlParsing_is_disabled() - { - using var _ = DisableSqlRewriting(); - - using var conn = await OpenConnectionAsync(); - using var cmd = new NpgsqlCommand("SELECT $1", conn); - cmd.Parameters.Add(new NpgsqlParameter { NpgsqlDbType = NpgsqlDbType.Integer, Value = 8 }); - Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo(8)); - } - - [Test] - [NonParallelizable] // Disables sql rewriting - public async Task Named_parameters_are_not_supported_when_EnableSqlParsing_is_disabled() - { - using var _ = DisableSqlRewriting(); - - using var conn = await OpenConnectionAsync(); - using var cmd = new NpgsqlCommand("SELECT @p", conn); - cmd.Parameters.Add(new NpgsqlParameter("p", 8)); - Assert.That(async () => await cmd.ExecuteScalarAsync(), Throws.Exception.TypeOf()); - } - - [Test, Description("Makes sure writing an unset parameter isn't allowed")] - public async Task Parameter_without_Value() - { - using var conn = await OpenConnectionAsync(); - using var cmd = new NpgsqlCommand("SELECT @p", conn); - cmd.Parameters.Add(new NpgsqlParameter("@p", NpgsqlDbType.Integer)); - Assert.That(() => cmd.ExecuteScalarAsync(), Throws.Exception.TypeOf()); - } - - [Test] - public async Task Unreferenced_named_parameter_works() - { - await using var conn = await OpenConnectionAsync(); - await using var cmd = new NpgsqlCommand("SELECT 1", conn); - cmd.Parameters.AddWithValue("not_used", 8); - Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo(1)); - } - - [Test] - public async Task Unreferenced_positional_parameter_works() - { - await using var conn = await OpenConnectionAsync(); - await using var cmd = new NpgsqlCommand("SELECT 1", conn); - cmd.Parameters.Add(new NpgsqlParameter { Value = 8 }); - Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo(1)); - } - - [Test] - public async Task Mixing_positional_and_named_parameters_is_not_supported() - { - await using var conn = await OpenConnectionAsync(); - await using var cmd = new NpgsqlCommand("SELECT $1, @p", conn); - cmd.Parameters.Add(new NpgsqlParameter { Value = 8 }); - cmd.Parameters.Add(new NpgsqlParameter { ParameterName = "p", Value = 9 }); - Assert.That(() => cmd.ExecuteNonQueryAsync(), Throws.Exception.TypeOf()); - } - - [Test] - [IssueLink("https://github.com/npgsql/npgsql/issues/4171")] - public async Task Cached_command_clears_parameters_placeholder_type() - { - await using var conn = await OpenConnectionAsync(); - - await using (var cmd1 = conn.CreateCommand()) - { - cmd1.CommandText = "SELECT @p1"; - cmd1.Parameters.AddWithValue("@p1", 8); - await using var reader1 = await cmd1.ExecuteReaderAsync(); - reader1.Read(); - Assert.That(reader1[0], Is.EqualTo(8)); - } - - await using (var cmd2 = conn.CreateCommand()) - { - cmd2.CommandText = "SELECT $1"; - cmd2.Parameters.AddWithValue(8); - await using var reader2 = await cmd2.ExecuteReaderAsync(); - reader2.Read(); - Assert.That(reader2[0], Is.EqualTo(8)); - } - } - - [Test] - [IssueLink("https://github.com/npgsql/npgsql/issues/4171")] - public async Task Reuse_command_with_different_parameter_placeholder_types() - { - await using var conn = await OpenConnectionAsync(); - await using var cmd = conn.CreateCommand(); - - cmd.CommandText = "SELECT @p1"; - cmd.Parameters.AddWithValue("@p1", 8); - _ = await cmd.ExecuteScalarAsync(); - - cmd.CommandText = "SELECT $1"; - cmd.Parameters[0].ParameterName = null; - _ = await cmd.ExecuteScalarAsync(); - } - - [Test] - public async Task Positional_output_parameters_are_not_supported() - { - await using var conn = await OpenConnectionAsync(); - await using var cmd = new NpgsqlCommand("SELECT $1", conn); - cmd.Parameters.Add(new NpgsqlParameter { Value = 8, Direction = ParameterDirection.InputOutput }); - Assert.That(() => cmd.ExecuteNonQueryAsync(), Throws.Exception.TypeOf()); - } - - [Test] - public void Parameters_get_name() - { - var command = new NpgsqlCommand(); - - // Add parameters. - command.Parameters.Add(new NpgsqlParameter(":Parameter1", DbType.Boolean)); - command.Parameters.Add(new NpgsqlParameter(":Parameter2", DbType.Int32)); - command.Parameters.Add(new NpgsqlParameter(":Parameter3", DbType.DateTime)); - command.Parameters.Add(new NpgsqlParameter("Parameter4", DbType.DateTime)); - - var idbPrmtr = command.Parameters["Parameter1"]; - Assert.IsNotNull(idbPrmtr); - command.Parameters[0].Value = 1; - - // Get by indexers. - - Assert.AreEqual(":Parameter1", command.Parameters["Parameter1"].ParameterName); - Assert.AreEqual(":Parameter2", command.Parameters["Parameter2"].ParameterName); - Assert.AreEqual(":Parameter3", command.Parameters["Parameter3"].ParameterName); - Assert.AreEqual("Parameter4", command.Parameters["Parameter4"].ParameterName); //Should this work? - - Assert.AreEqual(":Parameter1", command.Parameters[0].ParameterName); - Assert.AreEqual(":Parameter2", command.Parameters[1].ParameterName); - Assert.AreEqual(":Parameter3", command.Parameters[2].ParameterName); - Assert.AreEqual("Parameter4", command.Parameters[3].ParameterName); - } - - [Test] - public async Task Same_param_multiple_times() - { - using var conn = await OpenConnectionAsync(); - using var cmd = new NpgsqlCommand("SELECT @p1, @p1", conn); - cmd.Parameters.AddWithValue("@p1", 8); - using var reader = await cmd.ExecuteReaderAsync(); - reader.Read(); - Assert.That(reader[0], Is.EqualTo(8)); - Assert.That(reader[1], Is.EqualTo(8)); - } - - [Test] - public async Task Generic_parameter() - { - using var conn = await OpenConnectionAsync(); - using var cmd = new NpgsqlCommand("SELECT @p1, @p2, @p3, @p4", conn); - cmd.Parameters.Add(new NpgsqlParameter("p1", 8)); - cmd.Parameters.Add(new NpgsqlParameter("p2", 8) { NpgsqlDbType = NpgsqlDbType.Integer }); - cmd.Parameters.Add(new NpgsqlParameter("p3", "hello")); - cmd.Parameters.Add(new NpgsqlParameter("p4", new[] { 'f', 'o', 'o' })); - using var reader = await cmd.ExecuteReaderAsync(); - reader.Read(); - Assert.That(reader.GetInt32(0), Is.EqualTo(8)); - Assert.That(reader.GetInt32(1), Is.EqualTo(8)); - Assert.That(reader.GetString(2), Is.EqualTo("hello")); - Assert.That(reader.GetString(3), Is.EqualTo("foo")); - } - - #endregion Parameters - [Test] public async Task CommandText_not_set() { @@ -834,6 +671,31 @@ public async Task Parameter_and_operator_unclear() Assert.AreEqual(rdr.GetInt32(0), 4); } + [Test] + [IssueLink("https://github.com/npgsql/npgsql/issues/4171")] + public async Task Cached_command_clears_parameters_placeholder_type() + { + await using var conn = await OpenConnectionAsync(); + + await using (var cmd1 = conn.CreateCommand()) + { + cmd1.CommandText = "SELECT @p1"; + cmd1.Parameters.AddWithValue("@p1", 8); + await using var reader1 = await cmd1.ExecuteReaderAsync(); + reader1.Read(); + Assert.That(reader1[0], Is.EqualTo(8)); + } + + await using (var cmd2 = conn.CreateCommand()) + { + cmd2.CommandText = "SELECT $1"; + cmd2.Parameters.AddWithValue(8); + await using var reader2 = await cmd2.ExecuteReaderAsync(); + reader2.Read(); + Assert.That(reader2[0], Is.EqualTo(8)); + } + } + [Test] [TestCase(CommandBehavior.Default)] [TestCase(CommandBehavior.SequentialAccess)] @@ -937,41 +799,6 @@ public async Task TableDirect() Assert.That(rdr["name"], Is.EqualTo("foo")); } - [Test] - [TestCase(CommandBehavior.Default)] - [TestCase(CommandBehavior.SequentialAccess)] - public async Task Input_and_output_parameters(CommandBehavior behavior) - { - using var conn = await OpenConnectionAsync(); - using var cmd = new NpgsqlCommand("SELECT @c-1 AS c, @a+2 AS b", conn); - cmd.Parameters.Add(new NpgsqlParameter("a", 3)); - var b = new NpgsqlParameter { ParameterName = "b", Direction = ParameterDirection.Output }; - cmd.Parameters.Add(b); - var c = new NpgsqlParameter { ParameterName = "c", Direction = ParameterDirection.InputOutput, Value = 4 }; - cmd.Parameters.Add(c); - using (await cmd.ExecuteReaderAsync(behavior)) - { - Assert.AreEqual(5, b.Value); - Assert.AreEqual(3, c.Value); - } - } - - [Test] - public async Task Send_NpgsqlDbType_Unknown([Values(PrepareOrNot.NotPrepared, PrepareOrNot.Prepared)] PrepareOrNot prepare) - { - if (prepare == PrepareOrNot.Prepared && IsMultiplexing) - return; - - using var conn = await OpenConnectionAsync(); - using var cmd = new NpgsqlCommand("SELECT @p::TIMESTAMP", conn); - cmd.CommandText = "SELECT @p::TIMESTAMP"; - cmd.Parameters.Add(new NpgsqlParameter("p", NpgsqlDbType.Unknown) { Value = "2008-1-1" }); - if (prepare == PrepareOrNot.Prepared) - cmd.Prepare(); - using var reader = await cmd.ExecuteReaderAsync(); - reader.Read(); - Assert.That(reader.GetValue(0), Is.EqualTo(new DateTime(2008, 1, 1))); - } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/503")] public async Task Invalid_UTF8() @@ -1532,7 +1359,7 @@ public async Task Oversize_buffer_lost_messages() await server .WriteParseComplete() .WriteBindComplete() - .WriteRowDescription(new FieldDescription(PostgresTypeOIDs.Text)) + .WriteRowDescription(new FieldDescription(TextOid)) .WriteDataRowWithFlush(Encoding.ASCII.GetBytes(new string('a', connection.Settings.ReadBufferSize * 2))); // Just to make sure we have enough space await server.FlushAsync(); @@ -1557,7 +1384,7 @@ await server await server .WriteParseComplete() .WriteBindComplete() - .WriteRowDescription(new FieldDescription(PostgresTypeOIDs.Text)) + .WriteRowDescription(new FieldDescription(TextOid)) .WriteDataRow(Encoding.ASCII.GetBytes("abc")) .WriteCommandComplete() .WriteReadyForQuery() diff --git a/test/Npgsql.Tests/ConnectionTests.cs b/test/Npgsql.Tests/ConnectionTests.cs index 19fb21b693..01ce93b1d2 100644 --- a/test/Npgsql.Tests/ConnectionTests.cs +++ b/test/Npgsql.Tests/ConnectionTests.cs @@ -13,7 +13,6 @@ using System.Threading.Tasks; using Npgsql.Internal; using Npgsql.PostgresTypes; -using Npgsql.Properties; using Npgsql.Util; using NpgsqlTypes; using NUnit.Framework; @@ -1176,7 +1175,7 @@ public async Task NoTypeLoading() }; Assert.That(async () => await cmd.ExecuteScalarAsync(), - Throws.Exception.TypeOf() + Throws.Exception.TypeOf() .With.Message.EqualTo("The NpgsqlDbType 'IntegerMultirange' isn't present in your database. You may need to install an extension or upgrade to a newer version.")); } } diff --git a/test/Npgsql.Tests/CopyTests.cs b/test/Npgsql.Tests/CopyTests.cs index 41fabe6ddc..1ab6405956 100644 --- a/test/Npgsql.Tests/CopyTests.cs +++ b/test/Npgsql.Tests/CopyTests.cs @@ -1,7 +1,9 @@ using System; using System.Collections; using System.Collections.Generic; +using System.Collections.Specialized; using System.Data; +using System.Diagnostics; using System.IO; using System.Numerics; using System.Text; @@ -65,10 +67,9 @@ public async Task Raw_binary_roundtrip([Values(false, true)] bool async) const int iterations = 500; var table = await GetTempTableName(conn); - + await conn.ExecuteNonQueryAsync($@"CREATE TABLE {table} (field_text TEXT, field_int2 SMALLINT, field_int4 INTEGER)"); using (var tx = conn.BeginTransaction()) { - await conn.ExecuteNonQueryAsync($@"CREATE TABLE {table} (field_text TEXT, field_int2 SMALLINT, field_int4 INTEGER)"); // Preload some data into the table using (var cmd = @@ -159,14 +160,15 @@ public async Task Cancel_raw_binary_import() using var conn = await OpenConnectionAsync(); var table = await GetTempTableName(conn); await conn.ExecuteNonQueryAsync($@"CREATE TABLE {table} (field_text TEXT, field_int2 SMALLINT, field_int4 INTEGER)"); - - var garbage = new byte[] {1, 2, 3, 4}; - using (var s = conn.BeginRawBinaryCopy($"COPY {table} (field_text, field_int4) FROM STDIN BINARY")) + await using (var tx = await conn.BeginTransactionAsync()) { - s.Write(garbage, 0, garbage.Length); - s.Cancel(); + var garbage = new byte[] {1, 2, 3, 4}; + using (var s = conn.BeginRawBinaryCopy($"COPY {table} (field_text, field_int4) FROM STDIN BINARY")) + { + s.Write(garbage, 0, garbage.Length); + s.Cancel(); + } } - Assert.That(await conn.ExecuteScalarAsync($"SELECT COUNT(*) FROM {table}"), Is.EqualTo(0)); } @@ -294,6 +296,7 @@ public async Task Binary_roundtrip([Values(false, true)] bool async) Assert.That(reader.StartRow(), Is.EqualTo(2)); Assert.That(reader.Read(), Is.EqualTo(longString)); Assert.That(reader.IsNull, Is.True); + Assert.That(reader.IsNull, Is.True); reader.Skip(); Assert.That(reader.StartRow(), Is.EqualTo(-1)); @@ -307,13 +310,15 @@ public async Task Cancel_binary_import() { using var conn = await OpenConnectionAsync(); var table = await CreateTempTable(conn, "field_text TEXT, field_int2 SMALLINT, field_int4 INTEGER"); - - using (var writer = conn.BeginBinaryImport($"COPY {table} (field_text, field_int4) FROM STDIN BINARY")) + await using (var tx = await conn.BeginTransactionAsync()) { - writer.StartRow(); - writer.Write("Hello"); - writer.Write(8); - // No commit should rollback + using (var writer = conn.BeginBinaryImport($"COPY {table} (field_text, field_int4) FROM STDIN BINARY")) + { + writer.StartRow(); + writer.Write("Hello"); + writer.Write(8); + // No commit should rollback + } } Assert.That(await conn.ExecuteScalarAsync($"SELECT COUNT(*) FROM {table}"), Is.EqualTo(0)); } @@ -525,12 +530,21 @@ public async Task Export_long_string() using (var reader = conn.BeginBinaryExport($"COPY {table} (foo1, foo2, foo3, foo4, foo5) TO STDIN BINARY")) { - for (var row = 0; row < iterations; row++) + int row, col = 0; + for (row = 0; row < iterations; row++) { Assert.That(reader.StartRow(), Is.EqualTo(5)); - for (var col = 0; col < 5; col++) - Assert.That(reader.Read().Length, Is.EqualTo(len)); + for (col = 0; col < 5; col++) + { + var str = reader.Read(); + Assert.That(str.Length, Is.EqualTo(len)); +#if NET6_0_OR_GREATER + Assert.True(str.AsSpan().IndexOfAnyExcept('x') is -1); +#endif + } } + Assert.That(row, Is.EqualTo(100)); + Assert.That(col, Is.EqualTo(5)); } } @@ -541,12 +555,13 @@ public async Task Read_bit_string() var table = await GetTempTableName(conn); await conn.ExecuteNonQueryAsync($@" -CREATE TABLE {table} (bits BIT(3), bitarray BIT(3)[]); -INSERT INTO {table} (bits, bitarray) VALUES (B'101', ARRAY[B'101', B'111'])"); +CREATE TABLE {table} (bits BIT(11), bitvector BIT(11), bitarray BIT(3)[]); +INSERT INTO {table} (bits, bitvector, bitarray) VALUES (B'00000001101', B'00000001101', ARRAY[B'101', B'111'])"); - using var reader = conn.BeginBinaryExport($"COPY {table} (bits, bitarray) TO STDIN BINARY"); + using var reader = conn.BeginBinaryExport($"COPY {table} (bits, bitvector, bitarray) TO STDIN BINARY"); reader.StartRow(); - Assert.That(reader.Read(), Is.EqualTo(new BitArray(new[] { true, false, true }))); + Assert.That(reader.Read(), Is.EqualTo(new BitArray(new[] { false, false, false, false, false, false, false, true, true, false, true }))); + Assert.That(reader.Read(), Is.EqualTo(new BitVector32(0b00000001101000000000000000000000))); Assert.That(reader.Read(), Is.EqualTo(new[] { new BitArray(new[] { true, false, true }), @@ -744,12 +759,15 @@ public async Task Write_column_out_of_bounds_throws() public async Task Cancel_raw_binary_export_when_not_consumed_and_then_Dispose() { await using var conn = await OpenConnectionAsync(); - // This must be large enough to cause Postgres to queue up CopyData messages. - var stream = conn.BeginRawBinaryCopy("COPY (select md5(random()::text) as id from generate_series(1, 100000)) TO STDOUT BINARY"); - var buffer = new byte[32]; - await stream.ReadAsync(buffer, 0, buffer.Length); - stream.Cancel(); - Assert.DoesNotThrowAsync(async () => await stream.DisposeAsync()); + await using (var tx = await conn.BeginTransactionAsync()) + { + // This must be large enough to cause Postgres to queue up CopyData messages. + var stream = conn.BeginRawBinaryCopy("COPY (select md5(random()::text) as id from generate_series(1, 100000)) TO STDOUT BINARY"); + var buffer = new byte[32]; + await stream.ReadAsync(buffer, 0, buffer.Length); + stream.Cancel(); + Assert.DoesNotThrowAsync(async () => await stream.DisposeAsync()); + } Assert.That(async () => await conn.ExecuteScalarAsync("SELECT 1"), Is.EqualTo(1), "The connection is still OK"); } @@ -757,28 +775,18 @@ public async Task Cancel_raw_binary_export_when_not_consumed_and_then_Dispose() public async Task Cancel_binary_export_when_not_consumed_and_then_Dispose() { await using var conn = await OpenConnectionAsync(); - // This must be large enough to cause Postgres to queue up CopyData messages. - var exporter = conn.BeginBinaryExport("COPY (select md5(random()::text) as id from generate_series(1, 100000)) TO STDOUT BINARY"); - await exporter.StartRowAsync(); - await exporter.ReadAsync(); - exporter.Cancel(); - Assert.DoesNotThrowAsync(async () => await exporter.DisposeAsync()); + await using (var tx = await conn.BeginTransactionAsync()) + { + // This must be large enough to cause Postgres to queue up CopyData messages. + var exporter = conn.BeginBinaryExport("COPY (select md5(random()::text) as id from generate_series(1, 100000)) TO STDOUT BINARY"); + await exporter.StartRowAsync(); + await exporter.ReadAsync(); + exporter.Cancel(); + Assert.DoesNotThrowAsync(async () => await exporter.DisposeAsync()); + } Assert.That(async () => await conn.ExecuteScalarAsync("SELECT 1"), Is.EqualTo(1), "The connection is still OK"); } - [Test] - [IssueLink("https://github.com/npgsql/npgsql/issues/4417")] - public async Task Binary_copy_throws_for_nullable() - { - await using var conn = await OpenConnectionAsync(); - var tableName = await CreateTempTable(conn, "house_number integer"); - - await using var writer = await conn.BeginBinaryImportAsync($"COPY {tableName}(house_number) FROM STDIN BINARY"); - int? value = 1; - await writer.StartRowAsync(); - Assert.ThrowsAsync(async () => await writer.WriteAsync(value, NpgsqlDbType.Integer)); - } - [Test] [IssueLink("https://github.com/npgsql/npgsql/issues/5110")] public async Task Binary_copy_read_char_column() @@ -836,10 +844,12 @@ public async Task Cancel_text_import() { using var conn = await OpenConnectionAsync(); var table = await CreateTempTable(conn, "field_text TEXT, field_int2 SMALLINT, field_int4 INTEGER"); - - var writer = (NpgsqlCopyTextWriter)conn.BeginTextImport($"COPY {table} (field_text, field_int4) FROM STDIN"); - writer.Write("HELLO\t1\n"); - writer.Cancel(); + await using (var tx = await conn.BeginTransactionAsync()) + { + var writer = (NpgsqlCopyTextWriter)conn.BeginTextImport($"COPY {table} (field_text, field_int4) FROM STDIN"); + writer.Write("HELLO\t1\n"); + writer.Cancel(); + } Assert.That(await conn.ExecuteScalarAsync($"SELECT COUNT(*) FROM {table}"), Is.EqualTo(0)); } @@ -944,12 +954,15 @@ public async Task Wrong_format_text_export() public async Task Cancel_text_export_when_not_consumed_and_then_Dispose() { await using var conn = await OpenConnectionAsync(); - // This must be large enough to cause Postgres to queue up CopyData messages. - var reader = (NpgsqlCopyTextReader) conn.BeginTextExport("COPY (select md5(random()::text) as id from generate_series(1, 100000)) TO STDOUT"); - var buffer = new char[32]; - await reader.ReadAsync(buffer, 0, buffer.Length); - reader.Cancel(); - Assert.DoesNotThrow(reader.Dispose); + await using (var tx = await conn.BeginTransactionAsync()) + { + // This must be large enough to cause Postgres to queue up CopyData messages. + var reader = (NpgsqlCopyTextReader) conn.BeginTextExport("COPY (select md5(random()::text) as id from generate_series(1, 100000)) TO STDOUT"); + var buffer = new char[32]; + await reader.ReadAsync(buffer, 0, buffer.Length); + reader.Cancel(); + Assert.DoesNotThrow(reader.Dispose); + } Assert.That(async () => await conn.ExecuteScalarAsync("SELECT 1"), Is.EqualTo(1), "The connection is still OK"); } @@ -1029,7 +1042,7 @@ public async Task Write_null_values() { writer.StartRow(); writer.Write(DBNull.Value, NpgsqlDbType.Integer); - writer.Write((string?)null, NpgsqlDbType.Uuid); + writer.Write(null, NpgsqlDbType.Uuid); writer.Write(DBNull.Value); writer.Write((string?)null); var rowsWritten = writer.Complete(); @@ -1054,7 +1067,7 @@ public async Task Write_different_types() { writer.StartRow(); writer.Write(3.0, NpgsqlDbType.Integer); - writer.Write((object)new[] { 1, 2, 3 }); + writer.Write(new[] { 1, 2, 3 }); writer.StartRow(); writer.Write(3, NpgsqlDbType.Integer); writer.Write((object)new List { 4, 5, 6 }); diff --git a/test/Npgsql.Tests/FunctionTests.cs b/test/Npgsql.Tests/FunctionTests.cs index 6ca3c2db6d..37f203b812 100644 --- a/test/Npgsql.Tests/FunctionTests.cs +++ b/test/Npgsql.Tests/FunctionTests.cs @@ -4,7 +4,6 @@ using Npgsql.PostgresTypes; using NpgsqlTypes; using NUnit.Framework; -using static Npgsql.Util.Statics; using static Npgsql.Tests.TestUtil; namespace Npgsql.Tests; diff --git a/test/Npgsql.Tests/GlobalTypeMapperTests.cs b/test/Npgsql.Tests/GlobalTypeMapperTests.cs new file mode 100644 index 0000000000..32c647731c --- /dev/null +++ b/test/Npgsql.Tests/GlobalTypeMapperTests.cs @@ -0,0 +1,85 @@ +using System; +using System.Threading.Tasks; +using Npgsql.Internal; +using Npgsql.Internal.Postgres; +using NUnit.Framework; +using static Npgsql.Tests.TestUtil; + +namespace Npgsql.Tests; + +#pragma warning disable CS0618 // GlobalTypeMapper is obsolete + +[NonParallelizable] +public class GlobalTypeMapperTests : TestBase +{ + [Test] + public async Task MapEnum() + { + await using var adminConnection = await OpenConnectionAsync(); + var type = await GetTempTypeName(adminConnection); + NpgsqlConnection.GlobalTypeMapper.MapEnum(type); + + await using var dataSource1 = CreateDataSource(); + + await using (var connection = await dataSource1.OpenConnectionAsync()) + { + await connection.ExecuteNonQueryAsync($"CREATE TYPE {type} AS ENUM ('sad', 'ok', 'happy')"); + await connection.ReloadTypesAsync(); + + await AssertType(connection, Mood.Happy, "happy", type, npgsqlDbType: null); + } + + NpgsqlConnection.GlobalTypeMapper.UnmapEnum(type); + + // Global mapping changes have no effect on already-built data sources + await AssertType(dataSource1, Mood.Happy, "happy", type, npgsqlDbType: null); + + // But they do affect on new data sources + await using var dataSource2 = CreateDataSource(); + await AssertType(dataSource2, "happy", "happy", type, npgsqlDbType: null, isDefault: false); + } + + [Test] + public async Task Reset() + { + await using var adminConnection = await OpenConnectionAsync(); + var type = await GetTempTypeName(adminConnection); + NpgsqlConnection.GlobalTypeMapper.MapEnum(type); + + await using var dataSource1 = CreateDataSource(); + + await using (var connection = await dataSource1.OpenConnectionAsync()) + { + await connection.ExecuteNonQueryAsync($"CREATE TYPE {type} AS ENUM ('sad', 'ok', 'happy')"); + await connection.ReloadTypesAsync(); + } + + // A global mapping change has no effects on data sources which have already been built + NpgsqlConnection.GlobalTypeMapper.Reset(); + + // Global mapping changes have no effect on already-built data sources + await AssertType(dataSource1, Mood.Happy, "happy", type, npgsqlDbType: null); + + // But they do affect on new data sources + await using var dataSource2 = CreateDataSource(); + await AssertType(dataSource2, "happy", "happy", type, npgsqlDbType: null, isDefault: false); + } + + [Test] + public void Reset_and_add_resolver() + { + NpgsqlConnection.GlobalTypeMapper.Reset(); + NpgsqlConnection.GlobalTypeMapper.AddTypeInfoResolver(new DummyResolver()); + } + + [TearDown] + public void Teardown() + => NpgsqlConnection.GlobalTypeMapper.Reset(); + + enum Mood { Sad, Ok, Happy } + + class DummyResolver : IPgTypeInfoResolver + { + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) => null; + } +} diff --git a/test/Npgsql.Tests/MultipleHostsTests.cs b/test/Npgsql.Tests/MultipleHostsTests.cs index 2b2c3f5304..5de416672b 100644 --- a/test/Npgsql.Tests/MultipleHostsTests.cs +++ b/test/Npgsql.Tests/MultipleHostsTests.cs @@ -957,7 +957,7 @@ async Task Query(NpgsqlDataSource dataSource) [NonParallelizable] // Disables sql rewriting public async Task Multiple_hosts_with_disabled_sql_rewriting() { - using var _ = DisableSqlRewriting(); + using var _ = DisableSqlRewriting(ClearDataSources); var dataSourceBuilder = new NpgsqlDataSourceBuilder(ConnectionString) { diff --git a/test/Npgsql.Tests/NotificationTests.cs b/test/Npgsql.Tests/NotificationTests.cs index 8f3810a779..9df9aba44d 100644 --- a/test/Npgsql.Tests/NotificationTests.cs +++ b/test/Npgsql.Tests/NotificationTests.cs @@ -3,7 +3,6 @@ using System.Data; using System.Threading; using System.Threading.Tasks; -using Npgsql.Internal; using static Npgsql.Tests.TestUtil; namespace Npgsql.Tests; diff --git a/test/Npgsql.Tests/NpgsqlParameterTests.cs b/test/Npgsql.Tests/NpgsqlParameterTests.cs index fe9f5f96b5..1678b3b37e 100644 --- a/test/Npgsql.Tests/NpgsqlParameterTests.cs +++ b/test/Npgsql.Tests/NpgsqlParameterTests.cs @@ -1,7 +1,6 @@ using NpgsqlTypes; using NUnit.Framework; using System; -using System.Collections.Generic; using System.Data; using System.Data.Common; @@ -109,8 +108,8 @@ public void Cannot_infer_data_type_name_from_NpgsqlDbType_for_unknown_range() [Test] public void Infer_data_type_name_from_ClrType() { - var p = new NpgsqlParameter("p1", new Dictionary()); - Assert.That(p.DataTypeName, Is.EqualTo("hstore")); + var p = new NpgsqlParameter("p1", Array.Empty()); + Assert.That(p.DataTypeName, Is.EqualTo("bytea")); } [Test] diff --git a/test/Npgsql.Tests/PoolTests.cs b/test/Npgsql.Tests/PoolTests.cs index 6e5d7f8326..d9024dd0dd 100644 --- a/test/Npgsql.Tests/PoolTests.cs +++ b/test/Npgsql.Tests/PoolTests.cs @@ -1,11 +1,9 @@ using System; -using System.Collections.Generic; using System.Linq; using System.Net.Sockets; using System.Threading; using System.Threading.Tasks; using NUnit.Framework; -using static Npgsql.Tests.TestUtil; namespace Npgsql.Tests; diff --git a/test/Npgsql.Tests/PostgresTypeTests.cs b/test/Npgsql.Tests/PostgresTypeTests.cs index 644d839697..056830cf32 100644 --- a/test/Npgsql.Tests/PostgresTypeTests.cs +++ b/test/Npgsql.Tests/PostgresTypeTests.cs @@ -1,7 +1,6 @@ using System.Linq; using System.Threading.Tasks; using Npgsql.Internal; -using Npgsql.TypeMapping; using NUnit.Framework; namespace Npgsql.Tests; @@ -70,6 +69,6 @@ public async Task Multirange() async Task GetDatabaseInfo() { await using var conn = await OpenConnectionAsync(); - return conn.NpgsqlDataSource.TypeMapper.DatabaseInfo; + return conn.NpgsqlDataSource.DatabaseInfo; } } diff --git a/test/Npgsql.Tests/ReadBufferTests.cs b/test/Npgsql.Tests/ReadBufferTests.cs index 9246479355..b9ace59606 100644 --- a/test/Npgsql.Tests/ReadBufferTests.cs +++ b/test/Npgsql.Tests/ReadBufferTests.cs @@ -1,5 +1,4 @@ using Npgsql.Internal; -using Npgsql.Util; using NUnit.Framework; using System; using System.IO; @@ -17,14 +16,14 @@ public void Skip() for (byte i = 0; i < 50; i++) Writer.WriteByte(i); - ReadBuffer.Ensure(10); + ReadBuffer.Ensure(10, async: false).GetAwaiter().GetResult(); ReadBuffer.Skip(7); Assert.That(ReadBuffer.ReadByte(), Is.EqualTo(7)); ReadBuffer.Skip(10); - ReadBuffer.Ensure(1); + ReadBuffer.Ensure(1, async: false).GetAwaiter().GetResult(); Assert.That(ReadBuffer.ReadByte(), Is.EqualTo(18)); ReadBuffer.Skip(20); - ReadBuffer.Ensure(1); + ReadBuffer.Ensure(1, async: false).GetAwaiter().GetResult(); Assert.That(ReadBuffer.ReadByte(), Is.EqualTo(39)); } @@ -36,7 +35,7 @@ public void ReadSingle() Array.Reverse(bytes); Writer.Write(bytes); - ReadBuffer.Ensure(4); + ReadBuffer.Ensure(4, async: false).GetAwaiter().GetResult(); Assert.That(ReadBuffer.ReadSingle(), Is.EqualTo(expected)); } @@ -48,7 +47,7 @@ public void ReadDouble() Array.Reverse(bytes); Writer.Write(bytes); - ReadBuffer.Ensure(8); + ReadBuffer.Ensure(8, async: false).GetAwaiter().GetResult(); Assert.That(ReadBuffer.ReadDouble(), Is.EqualTo(expected)); } @@ -56,12 +55,12 @@ public void ReadDouble() public void ReadNullTerminatedString_buffered_only() { Writer - .Write(PGUtil.UTF8Encoding.GetBytes(new string("foo"))) + .Write(NpgsqlWriteBuffer.UTF8Encoding.GetBytes(new string("foo"))) .WriteByte(0) - .Write(PGUtil.UTF8Encoding.GetBytes(new string("bar"))) + .Write(NpgsqlWriteBuffer.UTF8Encoding.GetBytes(new string("bar"))) .WriteByte(0); - ReadBuffer.Ensure(1); + ReadBuffer.Ensure(1, async: false); Assert.That(ReadBuffer.ReadNullTerminatedString(), Is.EqualTo("foo")); Assert.That(ReadBuffer.ReadNullTerminatedString(), Is.EqualTo("bar")); @@ -70,15 +69,15 @@ public void ReadNullTerminatedString_buffered_only() [Test] public async Task ReadNullTerminatedString_with_io() { - Writer.Write(PGUtil.UTF8Encoding.GetBytes(new string("Chunked "))); - ReadBuffer.Ensure(1); + Writer.Write(NpgsqlWriteBuffer.UTF8Encoding.GetBytes(new string("Chunked "))); + await ReadBuffer.Ensure(1, async: true); var task = ReadBuffer.ReadNullTerminatedString(async: true); Assert.That(!task.IsCompleted); Writer - .Write(PGUtil.UTF8Encoding.GetBytes(new string("string"))) + .Write(NpgsqlWriteBuffer.UTF8Encoding.GetBytes(new string("string"))) .WriteByte(0) - .Write(PGUtil.UTF8Encoding.GetBytes(new string("bar"))) + .Write(NpgsqlWriteBuffer.UTF8Encoding.GetBytes(new string("bar"))) .WriteByte(0); Assert.That(task.IsCompleted); Assert.That(await task, Is.EqualTo("Chunked string")); @@ -90,7 +89,7 @@ public async Task ReadNullTerminatedString_with_io() public void SetUp() { var stream = new MockStream(); - ReadBuffer = new NpgsqlReadBuffer(null, stream, null, NpgsqlReadBuffer.DefaultSize, PGUtil.UTF8Encoding, PGUtil.RelaxedUTF8Encoding); + ReadBuffer = new NpgsqlReadBuffer(null, stream, null, NpgsqlReadBuffer.DefaultSize, NpgsqlWriteBuffer.UTF8Encoding, NpgsqlWriteBuffer.RelaxedUTF8Encoding); Writer = stream.Writer; } #pragma warning restore CS8625 diff --git a/test/Npgsql.Tests/ReaderNewSchemaTests.cs b/test/Npgsql.Tests/ReaderNewSchemaTests.cs index d70f772e37..7489bae711 100644 --- a/test/Npgsql.Tests/ReaderNewSchemaTests.cs +++ b/test/Npgsql.Tests/ReaderNewSchemaTests.cs @@ -1,5 +1,4 @@ -using System; -using System.Collections.ObjectModel; +using System.Collections.ObjectModel; using System.Data; using System.Linq; using System.Threading.Tasks; diff --git a/test/Npgsql.Tests/ReaderTests.cs b/test/Npgsql.Tests/ReaderTests.cs index 8f47f53aab..790b1b48e0 100644 --- a/test/Npgsql.Tests/ReaderTests.cs +++ b/test/Npgsql.Tests/ReaderTests.cs @@ -10,8 +10,7 @@ using System.Threading.Tasks; using Npgsql.BackendMessages; using Npgsql.Internal; -using Npgsql.Internal.TypeHandling; -using Npgsql.Internal.TypeMapping; +using Npgsql.Internal.Postgres; using Npgsql.PostgresTypes; using Npgsql.Tests.Support; using Npgsql.TypeMapping; @@ -28,6 +27,24 @@ namespace Npgsql.Tests; [TestFixture(MultiplexingMode.Multiplexing, CommandBehavior.SequentialAccess)] public class ReaderTests : MultiplexingTestBase { + static uint Int4Oid => DefaultPgTypes.DataTypeNameMap[DataTypeNames.Int4].Value; + static uint ByteaOid => DefaultPgTypes.DataTypeNameMap[DataTypeNames.Bytea].Value; + + [Test] + public async Task Resumable_non_consumed_to_non_resumable() + { + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand( "SELECT 'aaaaaaaa', 1", conn); + await using var reader = await cmd.ExecuteReaderAsync(Behavior); + await reader.ReadAsync(); + + await reader.IsDBNullAsync(0); // resumable, no consumption + _ = reader.IsDBNull(0); // resumable, no consumption + await using var stream = await reader.GetStreamAsync(0); // non-resumable + if (IsSequential) + Assert.That(() => reader.GetString(0), Throws.Exception.TypeOf()); + } + [Test] public async Task Seek_columns() { @@ -1167,7 +1184,7 @@ public async Task Bug3772() pgMock .WriteParseComplete() .WriteBindComplete() - .WriteRowDescription(new FieldDescription(PostgresTypeOIDs.Int4), new FieldDescription(PostgresTypeOIDs.Bytea)); + .WriteRowDescription(new FieldDescription(Int4Oid), new FieldDescription(ByteaOid)); var intValue = new byte[] { 0, 0, 0, 1 }; var byteValue = new byte[] { 1, 2, 3, 4 }; @@ -1209,13 +1226,19 @@ public async Task Dispose_does_not_swallow_exceptions([Values(true, false)] bool await using var postmasterMock = PgPostmasterMock.Start(ConnectionString); await using var dataSource = CreateDataSource(postmasterMock.ConnectionString); await using var conn = await dataSource.OpenConnectionAsync(); + await using var tx = IsMultiplexing ? await conn.BeginTransactionAsync() : null; var pgMock = await postmasterMock.WaitForServerConnection(); + if (IsMultiplexing) + pgMock + .WriteEmptyQueryResponse() + .WriteReadyForQuery(TransactionStatus.InTransactionBlock); + // Write responses for the query, but break the connection before sending CommandComplete/ReadyForQuery await pgMock .WriteParseComplete() .WriteBindComplete() - .WriteRowDescription(new FieldDescription(PostgresTypeOIDs.Int4)) + .WriteRowDescription(new FieldDescription(DefaultPgTypes.DataTypeNameMap[DataTypeNames.Int4].Value)) .WriteDataRow(BitConverter.GetBytes(BinaryPrimitives.ReverseEndianness(1))) .FlushAsync(); @@ -1283,11 +1306,6 @@ public async Task GetBytes() Assert.That(actual, Is.EqualTo(expected)); Assert.That(reader.GetBytes(0, 0, null, 0, 0), Is.EqualTo(expected.Length), "Bad column length"); - Assert.That(() => reader.GetBytes(1, 0, null, 0, 0), Throws.Exception.TypeOf(), - "GetBytes on non-bytea"); - Assert.That(() => reader.GetBytes(1, 0, actual, 0, 1), - Throws.Exception.TypeOf(), - "GetBytes on non-bytea"); Assert.That(reader.GetString(1), Is.EqualTo("foo")); reader.GetBytes(2, 0, actual, 0, 2); // Jump to another column from the middle of the column @@ -1533,7 +1551,8 @@ public async Task GetChars() Assert.That(reader.GetChars(0, 0, actual, 0, 2), Is.EqualTo(2)); Assert.That(actual[0], Is.EqualTo(expected[0])); Assert.That(actual[1], Is.EqualTo(expected[1])); - Assert.That(reader.GetChars(0, 0, null, 0, 0), Is.EqualTo(expected.Length), "Bad column length"); + if (!IsSequential) + Assert.That(reader.GetChars(0, 0, null, 0, 0), Is.EqualTo(expected.Length), "Bad column length"); // Note: Unlike with bytea, finding out the length of the column consumes it (variable-width // UTF8 encoding) Assert.That(reader.GetChars(2, 0, actual, 0, 2), Is.EqualTo(2)); @@ -1728,7 +1747,7 @@ public async Task SafeReadException() { var dataSourceBuilder = CreateDataSourceBuilder(); // Temporarily reroute integer to go to a type handler which generates SafeReadExceptions - dataSourceBuilder.AddTypeResolverFactory(new ExplodingTypeHandlerResolverFactory(safe: true)); + dataSourceBuilder.AddTypeInfoResolver(new ExplodingTypeHandlerResolver(safe: true)); await using var dataSource = dataSourceBuilder.Build(); await using var connection = await dataSource.OpenConnectionAsync(); @@ -1745,14 +1764,14 @@ public async Task Non_SafeReadException() { var dataSourceBuilder = CreateDataSourceBuilder(); // Temporarily reroute integer to go to a type handler which generates some exception - dataSourceBuilder.AddTypeResolverFactory(new ExplodingTypeHandlerResolverFactory(safe: false)); + dataSourceBuilder.AddTypeInfoResolver(new ExplodingTypeHandlerResolver(safe: false)); await using var dataSource = dataSourceBuilder.Build(); await using var connection = await dataSource.OpenConnectionAsync(); await using var cmd = new NpgsqlCommand(@"SELECT 1, 'hello'", connection); await using var reader = await cmd.ExecuteReaderAsync(Behavior); await reader.ReadAsync(); - Assert.That(() => reader.GetInt32(0), Throws.Exception.With.Message.EqualTo("Non-safe read exception as requested")); + Assert.That(() => reader.GetInt32(0), Throws.Exception.With.Message.EqualTo("Broken")); Assert.That(connection.FullState, Is.EqualTo(ConnectionState.Broken)); Assert.That(connection.State, Is.EqualTo(ConnectionState.Closed)); } @@ -1774,7 +1793,7 @@ public async Task ReadAsync_cancel_command_soft() await pgMock .WriteParseComplete() .WriteBindComplete() - .WriteRowDescription(new FieldDescription(PostgresTypeOIDs.Int4)) + .WriteRowDescription(new FieldDescription(Int4Oid)) .WriteDataRow(BitConverter.GetBytes(BinaryPrimitives.ReverseEndianness(1))) .FlushAsync(); @@ -1823,7 +1842,7 @@ public async Task ReadAsync_cancel_soft() await pgMock .WriteParseComplete() .WriteBindComplete() - .WriteRowDescription(new FieldDescription(PostgresTypeOIDs.Int4)) + .WriteRowDescription(new FieldDescription(Int4Oid)) .WriteDataRow(BitConverter.GetBytes(BinaryPrimitives.ReverseEndianness(1))) .FlushAsync(); @@ -1874,7 +1893,7 @@ public async Task NextResult_cancel_soft() await pgMock .WriteParseComplete() .WriteBindComplete() - .WriteRowDescription(new FieldDescription(PostgresTypeOIDs.Int4)) + .WriteRowDescription(new FieldDescription(Int4Oid)) .WriteDataRow(BitConverter.GetBytes(BinaryPrimitives.ReverseEndianness(1))) .WriteCommandComplete() .FlushAsync(); @@ -1926,7 +1945,7 @@ public async Task ReadAsync_cancel_hard([Values(true, false)] bool passCancelled await pgMock .WriteParseComplete() .WriteBindComplete() - .WriteRowDescription(new FieldDescription(PostgresTypeOIDs.Int4)) + .WriteRowDescription(new FieldDescription(Int4Oid)) .WriteDataRow(BitConverter.GetBytes(BinaryPrimitives.ReverseEndianness(1))) .FlushAsync(); @@ -1970,7 +1989,7 @@ public async Task NextResultAsync_cancel_hard([Values(true, false)] bool passCan await pgMock .WriteParseComplete() .WriteBindComplete() - .WriteRowDescription(new FieldDescription(PostgresTypeOIDs.Int4)) + .WriteRowDescription(new FieldDescription(Int4Oid)) .WriteDataRow(BitConverter.GetBytes(BinaryPrimitives.ReverseEndianness(1))) .WriteCommandComplete() .FlushAsync(); @@ -2018,7 +2037,7 @@ public async Task GetFieldValueAsync_sequential_cancel([Values(true, false)] boo await pgMock .WriteParseComplete() .WriteBindComplete() - .WriteRowDescription(new FieldDescription(PostgresTypeOIDs.Bytea)) + .WriteRowDescription(new FieldDescription(ByteaOid)) .WriteDataRowWithFlush(new byte[10000]); using var cmd = new NpgsqlCommand("SELECT some_bytea FROM some_table", conn); @@ -2056,7 +2075,7 @@ public async Task IsDBNullAsync_sequential_cancel([Values(true, false)] bool pas await pgMock .WriteParseComplete() .WriteBindComplete() - .WriteRowDescription(new FieldDescription(PostgresTypeOIDs.Bytea), new FieldDescription(PostgresTypeOIDs.Int4)) + .WriteRowDescription(new FieldDescription(ByteaOid), new FieldDescription(Int4Oid)) .WriteDataRowWithFlush(new byte[10000], new byte[4]); using var cmd = new NpgsqlCommand("SELECT some_bytea, some_int FROM some_table", conn); @@ -2122,7 +2141,7 @@ public async Task GetFieldValueAsync_sequential_timeout() await pgMock .WriteParseComplete() .WriteBindComplete() - .WriteRowDescription(new FieldDescription(PostgresTypeOIDs.Bytea)) + .WriteRowDescription(new FieldDescription(ByteaOid)) .WriteDataRowWithFlush(new byte[10000]); using var cmd = new NpgsqlCommand("SELECT some_bytea FROM some_table", conn); @@ -2162,7 +2181,7 @@ public async Task IsDBNullAsync_sequential_timeout() await pgMock .WriteParseComplete() .WriteBindComplete() - .WriteRowDescription(new FieldDescription(PostgresTypeOIDs.Bytea), new FieldDescription(PostgresTypeOIDs.Int4)) + .WriteRowDescription(new FieldDescription(ByteaOid), new FieldDescription(Int4Oid)) .WriteDataRowWithFlush(new byte[10000], new byte[4]); using var cmd = new NpgsqlCommand("SELECT some_bytea, some_int FROM some_table", conn); @@ -2192,7 +2211,7 @@ public async Task Bug3446() await pgMock .WriteParseComplete() .WriteBindComplete() - .WriteRowDescription(new FieldDescription(PostgresTypeOIDs.Int4)) + .WriteRowDescription(new FieldDescription(Int4Oid)) .WriteDataRow(new byte[4]) .FlushAsync(); @@ -2231,52 +2250,43 @@ public ReaderTests(MultiplexingMode multiplexingMode, CommandBehavior behavior) #region Mock Type Handlers -class ExplodingTypeHandlerResolverFactory : TypeHandlerResolverFactory +class ExplodingTypeHandlerResolver : IPgTypeInfoResolver { readonly bool _safe; - public ExplodingTypeHandlerResolverFactory(bool safe) => _safe = safe; - public override TypeHandlerResolver Create(TypeMapper typeMapper, NpgsqlConnector connector) => new ExplodingTypeHandlerResolver(_safe); + public ExplodingTypeHandlerResolver(bool safe) => _safe = safe; - class ExplodingTypeHandlerResolver : TypeHandlerResolver + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) { - readonly bool _safe; - - public ExplodingTypeHandlerResolver(bool safe) => _safe = safe; + if (dataTypeName == DataTypeNames.Int4 && (type == typeof(int) || type is null)) + return new(options, new ExplodingTypeHandler(_safe), DataTypeNames.Int4); - public override NpgsqlTypeHandler? ResolveByDataTypeName(string typeName) => - typeName == "integer" ? new ExplodingTypeHandler(null!, _safe) : null; - public override NpgsqlTypeHandler? ResolveByClrType(Type type) => null; + return null; } } -class ExplodingTypeHandler : NpgsqlSimpleTypeHandler +class ExplodingTypeHandler : PgBufferedConverter { readonly bool _safe; - internal ExplodingTypeHandler(PostgresType postgresType, bool safe) : base(postgresType) => _safe = safe; + internal ExplodingTypeHandler(bool safe) => _safe = safe; - public override int Read(NpgsqlReadBuffer buf, int len, FieldDescription? fieldDescription = null) - { - buf.ReadInt32(); + public override Size GetSize(SizeContext context, int value, ref object? writeState) + => throw new NotSupportedException(); - throw _safe - ? new Exception("Safe read exception as requested") - : buf.Connector.Break(new Exception("Non-safe read exception as requested")); - } + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + => CanConvertBufferedDefault(format, out bufferRequirements); - public override int ValidateAndGetLength(int value, NpgsqlParameter? parameter) => throw new NotSupportedException(); - public override int ValidateObjectAndGetLength(object? value, ref NpgsqlLengthCache? lengthCache, NpgsqlParameter? parameter) - => throw new NotSupportedException(); - public override void Write(int value, NpgsqlWriteBuffer buf, NpgsqlParameter? parameter) => throw new NotSupportedException(); - - public override Task WriteObjectWithLength( - object? value, - NpgsqlWriteBuffer buf, - NpgsqlLengthCache? lengthCache, - NpgsqlParameter? parameter, - bool async, - CancellationToken cancellationToken = default) + protected override void WriteCore(PgWriter writer, int value) => throw new NotSupportedException(); + + protected override int ReadCore(PgReader reader) + { + if (_safe) + throw new Exception("Safe read exception as requested"); + + reader.BreakConnection(); + return default; + } } #endregion diff --git a/test/Npgsql.Tests/Replication/CommonReplicationTests.cs b/test/Npgsql.Tests/Replication/CommonReplicationTests.cs index 9033d14e31..36a11b434a 100644 --- a/test/Npgsql.Tests/Replication/CommonReplicationTests.cs +++ b/test/Npgsql.Tests/Replication/CommonReplicationTests.cs @@ -1,5 +1,4 @@ using System; -using System.Collections.Concurrent; using System.Collections.Generic; using System.IO; using System.Runtime.CompilerServices; diff --git a/test/Npgsql.Tests/Replication/PgOutputReplicationTests.cs b/test/Npgsql.Tests/Replication/PgOutputReplicationTests.cs index d8fd2ed3a2..8497646f9d 100644 --- a/test/Npgsql.Tests/Replication/PgOutputReplicationTests.cs +++ b/test/Npgsql.Tests/Replication/PgOutputReplicationTests.cs @@ -33,7 +33,6 @@ namespace Npgsql.Tests.Replication; // [TestFixture(ProtocolVersion.V3, ReplicationDataMode.TextReplicationDataMode, TransactionMode.NonStreamingTransactionMode)] // [TestFixture(ProtocolVersion.V3, ReplicationDataMode.BinaryReplicationDataMode, TransactionMode.DefaultTransactionMode)] // [TestFixture(ProtocolVersion.V3, ReplicationDataMode.BinaryReplicationDataMode, TransactionMode.StreamingTransactionMode)] -[Platform(Exclude = "MacOsX", Reason = "Replication tests are flaky in CI on Mac")] [NonParallelizable] // These tests aren't designed to be parallelizable public class PgOutputReplicationTests : SafeReplicationTestBase { diff --git a/test/Npgsql.Tests/Replication/TestDecodingReplicationTests.cs b/test/Npgsql.Tests/Replication/TestDecodingReplicationTests.cs index 732c1a3e67..5d7c633f6c 100644 --- a/test/Npgsql.Tests/Replication/TestDecodingReplicationTests.cs +++ b/test/Npgsql.Tests/Replication/TestDecodingReplicationTests.cs @@ -1,5 +1,4 @@ -using System; -using System.Collections.Generic; +using System.Collections.Generic; using System.Threading; using System.Threading.Tasks; using NUnit.Framework; diff --git a/test/Npgsql.Tests/SchemaTests.cs b/test/Npgsql.Tests/SchemaTests.cs index 5deee67a0d..2a143cccfd 100644 --- a/test/Npgsql.Tests/SchemaTests.cs +++ b/test/Npgsql.Tests/SchemaTests.cs @@ -535,6 +535,14 @@ await conn.ExecuteNonQueryAsync($@" Assert.That(row["data_type"], Is.EqualTo($"{schema}.{enumName}")); } + [Test] + public async Task SlimBuilder_introspection_without_unsupported_type_exceptions() + { + await using var dataSource = new NpgsqlSlimDataSourceBuilder(ConnectionString).Build(); + await using var conn = await dataSource.OpenConnectionAsync(); + Assert.That(() => GetSchema(conn, DbMetaDataCollectionNames.DataTypes), Throws.Nothing); + } + public SchemaTests(SyncOrAsync syncOrAsync) : base(syncOrAsync) { } // ReSharper disable MethodHasAsyncOverload diff --git a/test/Npgsql.Tests/SecurityTests.cs b/test/Npgsql.Tests/SecurityTests.cs index 9aa2ee7d50..8600942969 100644 --- a/test/Npgsql.Tests/SecurityTests.cs +++ b/test/Npgsql.Tests/SecurityTests.cs @@ -1,4 +1,6 @@ using System; +using System.IO; +using System.Runtime.InteropServices; using System.Security.Authentication; using System.Threading; using System.Threading.Tasks; @@ -153,6 +155,7 @@ public void Bug1718() csb.SslMode = SslMode.Require; }); using var conn = dataSource.OpenConnection(); + using var tx = conn.BeginTransaction(); using var cmd = CreateSleepCommand(conn, 10000); var cts = new CancellationTokenSource(1000).Token; Assert.That(async () => await cmd.ExecuteNonQueryAsync(cts), Throws.Exception @@ -276,6 +279,13 @@ public async Task Connect_with_only_non_ssl_allowed_user([Values] bool multiplex await using var conn = await dataSource.OpenConnectionAsync(); Assert.IsFalse(conn.IsSecure); } + catch (NpgsqlException ex) when (RuntimeInformation.IsOSPlatform(OSPlatform.Windows) && ex.InnerException is IOException) + { + // Windows server to windows client invites races that can cause the socket to be reset before all data can be read. + // https://www.postgresql.org/message-id/flat/90b34057-4176-7bb0-0dbb-9822a5f6425b%40greiz-reinsdorf.de + // https://www.postgresql.org/message-id/flat/16678-253e48d34dc0c376@postgresql.org + Assert.Ignore(); + } catch (Exception e) when (!IsOnBuildServer) { Console.WriteLine(e); @@ -387,20 +397,23 @@ public async Task Bug4305_Secure([Values] bool async) } await using var __ = conn; - var originalConnector = conn.Connector; - await using var cmd = conn.CreateCommand(); - cmd.CommandText = "select pg_sleep(30)"; - cmd.CommandTimeout = 3; - var ex = async - ? Assert.ThrowsAsync(() => cmd.ExecuteNonQueryAsync())! - : Assert.Throws(() => cmd.ExecuteNonQuery())!; - Assert.That(ex.InnerException, Is.TypeOf()); + await using (var tx = await conn.BeginTransactionAsync()) + { + var originalConnector = conn.Connector; - await conn.CloseAsync(); - await conn.OpenAsync(); + cmd.CommandText = "select pg_sleep(30)"; + cmd.CommandTimeout = 3; + var ex = async + ? Assert.ThrowsAsync(() => cmd.ExecuteNonQueryAsync())! + : Assert.Throws(() => cmd.ExecuteNonQuery())!; + Assert.That(ex.InnerException, Is.TypeOf()); - Assert.AreSame(originalConnector, conn.Connector); + await conn.CloseAsync(); + await conn.OpenAsync(); + + Assert.AreSame(originalConnector, conn.Connector); + } cmd.CommandText = "SELECT 1"; if (async) diff --git a/test/Npgsql.Tests/SqlQueryParserTests.cs b/test/Npgsql.Tests/SqlQueryParserTests.cs index d161823114..1044b707fc 100644 --- a/test/Npgsql.Tests/SqlQueryParserTests.cs +++ b/test/Npgsql.Tests/SqlQueryParserTests.cs @@ -1,5 +1,4 @@ -using System; -using System.Collections.Generic; +using System.Collections.Generic; using System.Data; using System.Linq; using NUnit.Framework; diff --git a/test/Npgsql.Tests/Support/AssemblySetUp.cs b/test/Npgsql.Tests/Support/AssemblySetUp.cs index 851e452acb..f1619ecec4 100644 --- a/test/Npgsql.Tests/Support/AssemblySetUp.cs +++ b/test/Npgsql.Tests/Support/AssemblySetUp.cs @@ -1,7 +1,5 @@ -using Microsoft.Extensions.Logging; -using Npgsql; +using Npgsql; using Npgsql.Tests; -using Npgsql.Tests.Support; using NUnit.Framework; using System; using System.Threading; diff --git a/test/Npgsql.Tests/Support/MultiplexingTestBase.cs b/test/Npgsql.Tests/Support/MultiplexingTestBase.cs index c7483390e0..892dd79f5e 100644 --- a/test/Npgsql.Tests/Support/MultiplexingTestBase.cs +++ b/test/Npgsql.Tests/Support/MultiplexingTestBase.cs @@ -34,4 +34,4 @@ public enum MultiplexingMode { NonMultiplexing, Multiplexing -} \ No newline at end of file +} diff --git a/test/Npgsql.Tests/Support/PgPostmasterMock.cs b/test/Npgsql.Tests/Support/PgPostmasterMock.cs index 7cc33c1877..e45c1a7f28 100644 --- a/test/Npgsql.Tests/Support/PgPostmasterMock.cs +++ b/test/Npgsql.Tests/Support/PgPostmasterMock.cs @@ -7,7 +7,6 @@ using System.Threading.Channels; using System.Threading.Tasks; using Npgsql.Internal; -using Npgsql.Util; namespace Npgsql.Tests.Support; @@ -18,8 +17,8 @@ class PgPostmasterMock : IAsyncDisposable const int CancelRequestCode = 1234 << 16 | 5678; const int SslRequest = 80877103; - static readonly Encoding Encoding = PGUtil.UTF8Encoding; - static readonly Encoding RelaxedEncoding = PGUtil.RelaxedUTF8Encoding; + static readonly Encoding Encoding = NpgsqlWriteBuffer.UTF8Encoding; + static readonly Encoding RelaxedEncoding = NpgsqlWriteBuffer.RelaxedUTF8Encoding; readonly Socket _socket; readonly List _allServers = new(); diff --git a/test/Npgsql.Tests/Support/PgServerMock.cs b/test/Npgsql.Tests/Support/PgServerMock.cs index 6a83cc0248..0135059d0d 100644 --- a/test/Npgsql.Tests/Support/PgServerMock.cs +++ b/test/Npgsql.Tests/Support/PgServerMock.cs @@ -7,15 +7,19 @@ using System.Threading.Tasks; using Npgsql.BackendMessages; using Npgsql.Internal; +using Npgsql.Internal.Postgres; using Npgsql.TypeMapping; -using Npgsql.Util; using NUnit.Framework; namespace Npgsql.Tests.Support; class PgServerMock : IDisposable { - static readonly Encoding Encoding = PGUtil.UTF8Encoding; + static uint BoolOid => DefaultPgTypes.DataTypeNameMap[DataTypeNames.Bool].Value; + static uint Int4Oid => DefaultPgTypes.DataTypeNameMap[DataTypeNames.Int4].Value; + static uint TextOid => DefaultPgTypes.DataTypeNameMap[DataTypeNames.Text].Value; + + static readonly Encoding Encoding = NpgsqlWriteBuffer.UTF8Encoding; readonly NetworkStream _stream; readonly NpgsqlReadBuffer _readBuffer; @@ -90,12 +94,12 @@ internal Task SendMockState(MockState state) return WriteParseComplete() .WriteBindComplete() - .WriteRowDescription(new FieldDescription(PostgresTypeOIDs.Bool)) + .WriteRowDescription(new FieldDescription(BoolOid)) .WriteDataRow(BitConverter.GetBytes(isStandby)) .WriteCommandComplete() .WriteParseComplete() .WriteBindComplete() - .WriteRowDescription(new FieldDescription(PostgresTypeOIDs.Text)) + .WriteRowDescription(new FieldDescription(TextOid)) .WriteDataRow(Encoding.ASCII.GetBytes(transactionReadOnly)) .WriteCommandComplete() .WriteReadyForQuery() @@ -159,7 +163,7 @@ internal Task FlushAsync() internal Task WriteScalarResponseAndFlush(int value) => WriteParseComplete() .WriteBindComplete() - .WriteRowDescription(new FieldDescription(PostgresTypeOIDs.Int4)) + .WriteRowDescription(new FieldDescription(Int4Oid)) .WriteDataRow(BitConverter.GetBytes(BinaryPrimitives.ReverseEndianness(value))) .WriteCommandComplete() .WriteReadyForQuery() @@ -168,7 +172,7 @@ internal Task WriteScalarResponseAndFlush(int value) internal Task WriteScalarResponseAndFlush(bool value) => WriteParseComplete() .WriteBindComplete() - .WriteRowDescription(new FieldDescription(PostgresTypeOIDs.Bool)) + .WriteRowDescription(new FieldDescription(BoolOid)) .WriteDataRow(BitConverter.GetBytes(value)) .WriteCommandComplete() .WriteReadyForQuery() @@ -177,7 +181,7 @@ internal Task WriteScalarResponseAndFlush(bool value) internal Task WriteScalarResponseAndFlush(string value) => WriteParseComplete() .WriteBindComplete() - .WriteRowDescription(new FieldDescription(PostgresTypeOIDs.Text)) + .WriteRowDescription(new FieldDescription(TextOid)) .WriteDataRow(Encoding.ASCII.GetBytes(value)) .WriteCommandComplete() .WriteReadyForQuery() @@ -219,7 +223,7 @@ internal PgServerMock WriteRowDescription(params FieldDescription[] fields) _writeBuffer.WriteUInt32(field.TypeOID); _writeBuffer.WriteInt16(field.TypeSize); _writeBuffer.WriteInt32(field.TypeModifier); - _writeBuffer.WriteInt16((short)field.FormatCode); + _writeBuffer.WriteInt16(field.DataFormat.ToFormatCode()); } return this; @@ -233,6 +237,14 @@ internal PgServerMock WriteNoData() return this; } + internal PgServerMock WriteEmptyQueryResponse() + { + CheckDisposed(); + _writeBuffer.WriteByte((byte)BackendMessageCode.EmptyQueryResponse); + _writeBuffer.WriteInt32(4); + return this; + } + internal PgServerMock WriteDataRow(params byte[][] columnValues) { CheckDisposed(); diff --git a/test/Npgsql.Tests/Support/TestBase.cs b/test/Npgsql.Tests/Support/TestBase.cs index 126a3575fd..81bac44b3e 100644 --- a/test/Npgsql.Tests/Support/TestBase.cs +++ b/test/Npgsql.Tests/Support/TestBase.cs @@ -40,12 +40,13 @@ public async Task AssertType( bool isDefaultForWriting = true, bool? isDefault = null, bool isNpgsqlDbTypeInferredFromClrType = true, - Func? comparer = null) + Func? comparer = null, + bool skipArrayCheck = false) { await using var connection = await OpenConnectionAsync(); return await AssertType( connection, value, sqlLiteral, pgTypeName, npgsqlDbType, dbType, inferredDbType, isDefaultForReading, isDefaultForWriting, - isDefault, isNpgsqlDbTypeInferredFromClrType, comparer); + isDefault, isNpgsqlDbTypeInferredFromClrType, comparer, skipArrayCheck); } public async Task AssertType( @@ -60,12 +61,13 @@ public async Task AssertType( bool isDefaultForWriting = true, bool? isDefault = null, bool isNpgsqlDbTypeInferredFromClrType = true, - Func? comparer = null) + Func? comparer = null, + bool skipArrayCheck = false) { await using var connection = await dataSource.OpenConnectionAsync(); return await AssertType(connection, value, sqlLiteral, pgTypeName, npgsqlDbType, dbType, inferredDbType, isDefaultForReading, - isDefaultForWriting, isDefault, isNpgsqlDbTypeInferredFromClrType, comparer); + isDefaultForWriting, isDefault, isNpgsqlDbTypeInferredFromClrType, comparer, skipArrayCheck); } public async Task AssertType( @@ -80,19 +82,27 @@ public async Task AssertType( bool isDefaultForWriting = true, bool? isDefault = null, bool isNpgsqlDbTypeInferredFromClrType = true, - Func? comparer = null) + Func? comparer = null, + bool skipArrayCheck = false) { if (isDefault is not null) isDefaultForReading = isDefaultForWriting = isDefault.Value; - await AssertTypeWrite(connection, () => value, sqlLiteral, pgTypeName, npgsqlDbType, dbType, inferredDbType, isDefaultForWriting, isNpgsqlDbTypeInferredFromClrType); - return await AssertTypeRead(connection, sqlLiteral, pgTypeName, value, isDefaultForReading, comparer); + await AssertTypeWrite(connection, () => value, sqlLiteral, pgTypeName, npgsqlDbType, dbType, inferredDbType, isDefaultForWriting, isNpgsqlDbTypeInferredFromClrType, skipArrayCheck); + return await AssertTypeRead(connection, sqlLiteral, pgTypeName, value, isDefaultForReading, comparer, fieldType: null, skipArrayCheck); } - public async Task AssertTypeRead(string sqlLiteral, string pgTypeName, T expected, bool isDefault = true) + public async Task AssertTypeRead(string sqlLiteral, string pgTypeName, T expected, bool isDefault = true, bool skipArrayCheck = false) { await using var connection = await OpenConnectionAsync(); - return await AssertTypeRead(connection, sqlLiteral, pgTypeName, expected, isDefault); + return await AssertTypeRead(connection, sqlLiteral, pgTypeName, expected, isDefault, comparer: null, fieldType: null, skipArrayCheck); + } + + public async Task AssertTypeRead(NpgsqlDataSource dataSource, string sqlLiteral, string pgTypeName, T expected, + bool isDefault = true, Func? comparer = null, Type? fieldType = null, bool skipArrayCheck = false) + { + await using var connection = await dataSource.OpenConnectionAsync(); + return await AssertTypeRead(connection, sqlLiteral, pgTypeName, expected, isDefault, comparer, fieldType, skipArrayCheck); } public async Task AssertTypeWrite( @@ -104,12 +114,13 @@ public async Task AssertTypeWrite( DbType? dbType = null, DbType? inferredDbType = null, bool isDefault = true, - bool isNpgsqlDbTypeInferredFromClrType = true) + bool isNpgsqlDbTypeInferredFromClrType = true, + bool skipArrayCheck = false) { await using var connection = await dataSource.OpenConnectionAsync(); await AssertTypeWrite(connection, () => value, expectedSqlLiteral, pgTypeName, npgsqlDbType, dbType, inferredDbType, isDefault, - isNpgsqlDbTypeInferredFromClrType); + isNpgsqlDbTypeInferredFromClrType, skipArrayCheck); } public Task AssertTypeWrite( @@ -120,9 +131,10 @@ public Task AssertTypeWrite( DbType? dbType = null, DbType? inferredDbType = null, bool isDefault = true, - bool isNpgsqlDbTypeInferredFromClrType = true) + bool isNpgsqlDbTypeInferredFromClrType = true, + bool skipArrayCheck = false) => AssertTypeWrite(() => value, expectedSqlLiteral, pgTypeName, npgsqlDbType, dbType, inferredDbType, isDefault, - isNpgsqlDbTypeInferredFromClrType); + isNpgsqlDbTypeInferredFromClrType, skipArrayCheck); public async Task AssertTypeWrite( Func valueFactory, @@ -132,10 +144,11 @@ public async Task AssertTypeWrite( DbType? dbType = null, DbType? inferredDbType = null, bool isDefault = true, - bool isNpgsqlDbTypeInferredFromClrType = true) + bool isNpgsqlDbTypeInferredFromClrType = true, + bool skipArrayCheck = false) { await using var connection = await OpenConnectionAsync(); - await AssertTypeWrite(connection, valueFactory, expectedSqlLiteral, pgTypeName, npgsqlDbType, dbType, inferredDbType, isDefault, isNpgsqlDbTypeInferredFromClrType); + await AssertTypeWrite(connection, valueFactory, expectedSqlLiteral, pgTypeName, npgsqlDbType, dbType, inferredDbType, isDefault, isNpgsqlDbTypeInferredFromClrType, skipArrayCheck); } internal static async Task AssertTypeRead( @@ -144,7 +157,35 @@ internal static async Task AssertTypeRead( string pgTypeName, T expected, bool isDefault = true, - Func? comparer = null) + Func? comparer = null, + Type? fieldType = null, + bool skipArrayCheck = false) + { + var result = await AssertTypeReadCore(connection, sqlLiteral, pgTypeName, expected, isDefault, comparer); + + // Check the corresponding array type as well + if (!skipArrayCheck && !pgTypeName.EndsWith("[]", StringComparison.Ordinal)) + { + await AssertTypeReadCore( + connection, + ArrayLiteral(sqlLiteral), + pgTypeName + "[]", + new[] { expected, expected }, + isDefault, + comparer is null ? null : (array1, array2) => comparer(array1[0], array2[0]) && comparer(array1[1], array2[1])); + } + + return result; + } + + internal static async Task AssertTypeReadCore( + NpgsqlConnection connection, + string sqlLiteral, + string pgTypeName, + T expected, + bool isDefault = true, + Func? comparer = null, + Type? fieldType = null) { if (sqlLiteral.Contains('\'')) sqlLiteral = sqlLiteral.Replace("'", "''"); @@ -166,7 +207,7 @@ internal static async Task AssertTypeRead( if (isDefault) { // For arrays, GetFieldType always returns typeof(Array), since PG arrays can have arbitrary dimensionality - Assert.That(reader.GetFieldType(0), Is.EqualTo(dataTypeName.EndsWith("[]") ? typeof(Array) : typeof(T)), + Assert.That(reader.GetFieldType(0), Is.EqualTo(dataTypeName.EndsWith("[]") ? typeof(Array) : fieldType ?? typeof(T)), $"Got wrong result from GetFieldType when reading '{truncatedSqlLiteral}'"); } @@ -179,6 +220,38 @@ internal static async Task AssertTypeRead( } internal static async Task AssertTypeWrite( + NpgsqlConnection connection, + Func valueFactory, + string expectedSqlLiteral, + string pgTypeName, + NpgsqlDbType? npgsqlDbType, + DbType? dbType = null, + DbType? inferredDbType = null, + bool isDefault = true, + bool isNpgsqlDbTypeInferredFromClrType = true, + bool skipArrayCheck = false) + { + await AssertTypeWriteCore( + connection, valueFactory, expectedSqlLiteral, pgTypeName, npgsqlDbType, dbType, inferredDbType, isDefault, + isNpgsqlDbTypeInferredFromClrType); + + // Check the corresponding array type as well + if (!skipArrayCheck && !pgTypeName.EndsWith("[]", StringComparison.Ordinal)) + { + await AssertTypeWriteCore( + connection, + () => new[] { valueFactory(), valueFactory() }, + ArrayLiteral(expectedSqlLiteral), + pgTypeName + "[]", + npgsqlDbType | NpgsqlDbType.Array, + dbType: null, + inferredDbType: null, + isDefault, + isNpgsqlDbTypeInferredFromClrType); + } + } + + internal static async Task AssertTypeWriteCore( NpgsqlConnection connection, Func valueFactory, string expectedSqlLiteral, @@ -198,7 +271,10 @@ internal static async Task AssertTypeWrite( // Strip any facet information (length/precision/scale) var parenIndex = pgTypeName.IndexOf('('); - var pgTypeNameWithoutFacets = parenIndex > -1 ? pgTypeName[..parenIndex] : pgTypeName; + // var pgTypeNameWithoutFacets = parenIndex > -1 ? pgTypeName[..parenIndex] : pgTypeName; + var pgTypeNameWithoutFacets = parenIndex > -1 + ? pgTypeName[..parenIndex] + pgTypeName[(pgTypeName.IndexOf(')') + 1)..] + : pgTypeName; // We test the following scenarios (between 2 and 5 in total): // 1. With NpgsqlDbType explicitly set @@ -241,14 +317,14 @@ internal static async Task AssertTypeWrite( // With (non-generic) value only p = new NpgsqlParameter { Value = valueFactory() }; cmd.Parameters.Add(p); - errorIdentifier[++errorIdentifierIndex] = "Value only (non-generic)"; + errorIdentifier[++errorIdentifierIndex] = $"Value only (type {p.Value!.GetType().Name}, non-generic)"; if (isNpgsqlDbTypeInferredFromClrType) CheckInference(); // With (generic) value only p = new NpgsqlParameter { TypedValue = valueFactory() }; cmd.Parameters.Add(p); - errorIdentifier[++errorIdentifierIndex] = "Value only (generic)"; + errorIdentifier[++errorIdentifierIndex] = $"Value only (type {p.Value!.GetType().Name}, generic)"; if (isNpgsqlDbTypeInferredFromClrType) CheckInference(); } @@ -294,6 +370,8 @@ public async Task AssertTypeUnsupportedRead(string sqlLiteral, string pgTypeName dataSource ??= DefaultDataSource; await using var conn = await dataSource.OpenConnectionAsync(); + // Make sure we don't poison the connection with a fault, potentially terminating other perfectly passing tests as well. + await using var tx = dataSource.Settings.Multiplexing ? await conn.BeginTransactionAsync() : null; await using var cmd = new NpgsqlCommand($"SELECT '{sqlLiteral}'::{pgTypeName}", conn); await using var reader = await cmd.ExecuteReaderAsync(); await reader.ReadAsync(); @@ -307,7 +385,7 @@ public Task AssertTypeUnsupportedRead(string sqlLiteral public async Task AssertTypeUnsupportedRead(string sqlLiteral, string pgTypeName, NpgsqlDataSource? dataSource = null) where TException : Exception { - dataSource ??= DefaultDataSource; + dataSource ??= DataSource; await using var conn = await dataSource.OpenConnectionAsync(); await using var cmd = new NpgsqlCommand($"SELECT '{sqlLiteral}'::{pgTypeName}", conn); @@ -323,9 +401,11 @@ public Task AssertTypeUnsupportedWrite(T value, string? public async Task AssertTypeUnsupportedWrite(T value, string? pgTypeName = null, NpgsqlDataSource? dataSource = null) where TException : Exception { - dataSource ??= DefaultDataSource; + dataSource ??= DataSource; await using var conn = await dataSource.OpenConnectionAsync(); + // Make sure we don't poison the connection with a fault, potentially terminating other perfectly passing tests as well. + await using var tx = dataSource.Settings.Multiplexing ? await conn.BeginTransactionAsync() : null; await using var cmd = new NpgsqlCommand("SELECT $1", conn) { Parameters = { new() { Value = value } } @@ -352,6 +432,31 @@ public bool Equals(T? x, T? y) public int GetHashCode(T obj) => throw new NotSupportedException(); } + // For array quoting rules, see array_out in https://github.com/postgres/postgres/blob/master/src/backend/utils/adt/arrayfuncs.c + static string ArrayLiteral(string elementLiteral) + { + switch (elementLiteral) + { + case "": + elementLiteral = "\"\""; + break; + case "NULL": + elementLiteral = "\"NULL\""; + break; + default: + // Escape quotes and backslashes, quote for special chars + elementLiteral = elementLiteral.Replace("\\", "\\\\").Replace("\"", "\\\""); + if (elementLiteral.Any(c => c is '{' or '}' or ',' or '"' or '\\' || char.IsWhiteSpace(c))) + { + elementLiteral = '"' + elementLiteral + '"'; + } + + break; + } + + return $"{{{elementLiteral},{elementLiteral}}}"; + } + #endregion Type testing #region Utilities for use by tests @@ -364,16 +469,23 @@ protected virtual NpgsqlDataSourceBuilder CreateDataSourceBuilder() protected virtual NpgsqlDataSource CreateDataSource() => CreateDataSource(ConnectionString); - protected virtual NpgsqlDataSource CreateDataSource(string connectionString) + protected NpgsqlDataSource CreateDataSource(string connectionString) => NpgsqlDataSource.Create(connectionString); - protected virtual NpgsqlDataSource CreateDataSource(Action connectionStringBuilderAction) + protected NpgsqlDataSource CreateDataSource(Action connectionStringBuilderAction) { var connectionStringBuilder = new NpgsqlConnectionStringBuilder(ConnectionString); connectionStringBuilderAction(connectionStringBuilder); return NpgsqlDataSource.Create(connectionStringBuilder); } + protected NpgsqlDataSource CreateDataSource(Action configure) + { + var builder = new NpgsqlDataSourceBuilder(ConnectionString); + configure(builder); + return builder.Build(); + } + protected static NpgsqlDataSource GetDataSource(string connectionString) { if (!DataSources.TryGetValue(connectionString, out var dataSource)) @@ -412,8 +524,12 @@ protected virtual NpgsqlDataSource CreateLoggingDataSource( protected NpgsqlDataSource DefaultDataSource => GetDataSource(ConnectionString); + protected virtual NpgsqlDataSource DataSource => DefaultDataSource; + + protected void ClearDataSources() => DataSources.Clear(); + protected virtual NpgsqlConnection CreateConnection() - => DefaultDataSource.CreateConnection(); + => DataSource.CreateConnection(); protected virtual NpgsqlConnection OpenConnection() { diff --git a/test/Npgsql.Tests/TestUtil.cs b/test/Npgsql.Tests/TestUtil.cs index 1fa69cb6e1..35df4e6e4c 100644 --- a/test/Npgsql.Tests/TestUtil.cs +++ b/test/Npgsql.Tests/TestUtil.cs @@ -57,7 +57,7 @@ public static void MinimumPgVersion(NpgsqlDataSource dataSource, string minVersi MinimumPgVersion(connection, minVersion, ignoreText); } - public static void MinimumPgVersion(NpgsqlConnection conn, string minVersion, string? ignoreText = null) + public static bool MinimumPgVersion(NpgsqlConnection conn, string minVersion, string? ignoreText = null) { var min = new Version(minVersion); if (conn.PostgreSqlVersion < min) @@ -66,7 +66,10 @@ public static void MinimumPgVersion(NpgsqlConnection conn, string minVersion, st if (ignoreText != null) msg += ": " + ignoreText; Assert.Ignore(msg); + return false; } + + return true; } public static void MaximumPgVersionExclusive(NpgsqlConnection conn, string maxVersion, string? ignoreText = null) @@ -105,16 +108,24 @@ public static Task EnsureExtensionAsync(NpgsqlConnection conn, string extension, static async Task EnsureExtension(NpgsqlConnection conn, string extension, string? minVersion, bool async) { - if (minVersion != null) - MinimumPgVersion(conn, minVersion, $"The extension '{extension}' only works for PostgreSQL {minVersion} and higher."); + if (minVersion != null && !MinimumPgVersion(conn, minVersion, $"The extension '{extension}' only works for PostgreSQL {minVersion} and higher.")) + return; if (conn.PostgreSqlVersion < MinCreateExtensionVersion) Assert.Ignore($"The 'CREATE EXTENSION' command only works for PostgreSQL {MinCreateExtensionVersion} and higher."); - if (async) - await conn.ExecuteNonQueryAsync($"CREATE EXTENSION IF NOT EXISTS {extension}"); - else - conn.ExecuteNonQuery($"CREATE EXTENSION IF NOT EXISTS {extension}"); + try + { + if (async) + await conn.ExecuteNonQueryAsync($"CREATE EXTENSION IF NOT EXISTS {extension}"); + else + conn.ExecuteNonQuery($"CREATE EXTENSION IF NOT EXISTS {extension}"); + } + catch (PostgresException ex) when (ex.ConstraintName == "pg_extension_name_index") + { + // The extension is already installed, but we can race across threads. + // https://stackoverflow.com/questions/63104126/create-extention-if-not-exists-doesnt-really-check-if-extention-does-not-exis + } conn.ReloadTypes(); } @@ -154,6 +165,7 @@ static async Task IgnoreIfFeatureNotSupported(NpgsqlConnection conn, string test public static async Task EnsurePostgis(NpgsqlConnection conn) { + var isPreRelease = IsPgPrerelease(conn); try { await EnsureExtensionAsync(conn, "postgis"); @@ -161,10 +173,14 @@ public static async Task EnsurePostgis(NpgsqlConnection conn) catch (PostgresException e) when (e.SqlState == PostgresErrorCodes.UndefinedFile) { // PostGIS packages aren't available for PostgreSQL prereleases - if (IsPgPrerelease(conn)) + if (isPreRelease) { Assert.Ignore($"PostGIS could not be installed, but PostgreSQL is prerelease ({conn.ServerVersion}), ignoring test suite."); } + else + { + throw; + } } } @@ -360,12 +376,10 @@ internal static IDisposable SetCurrentCulture(CultureInfo culture) return new DeferredExecutionDisposable(() => CultureInfo.CurrentCulture = oldCulture); } - internal static IDisposable DisableSqlRewriting() + internal static IDisposable DisableSqlRewriting(Action clearDataSources) { #if DEBUG - // We clear the pools to make sure we don't accidentally reuse a pool - // Since EnableSqlRewriting is a global change - PoolManager.Reset(); + clearDataSources(); NpgsqlCommand.EnableSqlRewriting = false; return new DeferredExecutionDisposable(() => NpgsqlCommand.EnableSqlRewriting = true); #else diff --git a/test/Npgsql.Tests/TypeMapperTests.cs b/test/Npgsql.Tests/TypeMapperTests.cs index 55db858600..92db0bdea1 100644 --- a/test/Npgsql.Tests/TypeMapperTests.cs +++ b/test/Npgsql.Tests/TypeMapperTests.cs @@ -1,87 +1,15 @@ using Npgsql.Internal; -using Npgsql.Internal.TypeHandlers; -using Npgsql.Internal.TypeHandling; -using Npgsql.PostgresTypes; -using Npgsql.TypeMapping; using NUnit.Framework; using System; using System.Threading.Tasks; -using Npgsql.Internal.TypeMapping; +using Npgsql.Internal.Converters; +using Npgsql.Internal.Postgres; using static Npgsql.Tests.TestUtil; namespace Npgsql.Tests; public class TypeMapperTests : TestBase { -#pragma warning disable CS0618 // GlobalTypeMapper is obsolete - [Test, NonParallelizable] - public async Task Global_mapping() - { - await using var adminConnection = await OpenConnectionAsync(); - var type = await GetTempTypeName(adminConnection); - NpgsqlConnection.GlobalTypeMapper.MapEnum(type); - - try - { - await using var dataSource1 = CreateDataSource(); - - await using (var connection = await dataSource1.OpenConnectionAsync()) - { - await connection.ExecuteNonQueryAsync($"CREATE TYPE {type} AS ENUM ('sad', 'ok', 'happy')"); - await connection.ReloadTypesAsync(); - - await AssertType(connection, Mood.Happy, "happy", type, npgsqlDbType: null); - } - - NpgsqlConnection.GlobalTypeMapper.UnmapEnum(type); - - // Global mapping changes have no effect on already-built data sources - await AssertType(dataSource1, Mood.Happy, "happy", type, npgsqlDbType: null); - - // But they do affect on new data sources - await using var dataSource2 = CreateDataSource(); - Assert.ThrowsAsync(() => AssertType(dataSource2, Mood.Happy, "happy", type, npgsqlDbType: null)); - } - finally - { - NpgsqlConnection.GlobalTypeMapper.UnmapEnum(type); - } - } - - [Test, NonParallelizable] - public async Task Global_mapping_reset() - { - await using var adminConnection = await OpenConnectionAsync(); - var type = await GetTempTypeName(adminConnection); - NpgsqlConnection.GlobalTypeMapper.MapEnum(type); - - try - { - await using var dataSource1 = CreateDataSource(); - - await using (var connection = await dataSource1.OpenConnectionAsync()) - { - await connection.ExecuteNonQueryAsync($"CREATE TYPE {type} AS ENUM ('sad', 'ok', 'happy')"); - await connection.ReloadTypesAsync(); - } - - // A global mapping change has no effects on data sources which have already been built - NpgsqlConnection.GlobalTypeMapper.Reset(); - - // Global mapping changes have no effect on already-built data sources - await AssertType(dataSource1, Mood.Happy, "happy", type, npgsqlDbType: null); - - // But they do affect on new data sources - await using var dataSource2 = CreateDataSource(); - Assert.ThrowsAsync(() => AssertType(dataSource2, Mood.Happy, "happy", type, npgsqlDbType: null)); - } - finally - { - NpgsqlConnection.GlobalTypeMapper.Reset(); - } - } -#pragma warning restore CS0618 // GlobalTypeMapper is obsolete - [Test] public async Task ReloadTypes_across_connections_in_data_source() { @@ -91,7 +19,7 @@ public async Task ReloadTypes_across_connections_in_data_source() // via the data source. var dataSourceBuilder = CreateDataSourceBuilder(); - dataSourceBuilder.MapEnum(); + dataSourceBuilder.MapEnum(type); await using var dataSource = dataSourceBuilder.Build(); await using var connection1 = await dataSource.OpenConnectionAsync(); await using var connection2 = await dataSource.OpenConnectionAsync(); @@ -101,8 +29,8 @@ public async Task ReloadTypes_across_connections_in_data_source() // The data source type mapper has been replaced and connection1 should have the new mapper, but connection2 should retain the older // type mapper - where there's no mapping - as long as it's still open + Assert.ThrowsAsync(async () => await connection2.ExecuteScalarAsync($"SELECT 'happy'::{type}")); Assert.DoesNotThrowAsync(async () => await connection1.ExecuteScalarAsync($"SELECT 'happy'::{type}")); - Assert.ThrowsAsync(async () => await connection2.ExecuteScalarAsync($"SELECT 'happy'::{type}")); // Close connection2 and reopen to make sure it picks up the new type and mapping from the data source var connId = connection2.ProcessID; @@ -121,7 +49,7 @@ public async Task String_to_citext() await EnsureExtensionAsync(adminConnection, "citext"); var dataSourceBuilder = CreateDataSourceBuilder(); - dataSourceBuilder.AddTypeResolverFactory(new CitextToStringTypeHandlerResolverFactory()); + dataSourceBuilder.AddTypeInfoResolver(new CitextToStringTypeHandlerResolverFactory()); await using var dataSource = dataSourceBuilder.Build(); await using var connection = await dataSource.OpenConnectionAsync(); @@ -162,25 +90,15 @@ await conn.ExecuteNonQueryAsync(@$" #region Support - class CitextToStringTypeHandlerResolverFactory : TypeHandlerResolverFactory + class CitextToStringTypeHandlerResolverFactory : IPgTypeInfoResolver { - public override TypeHandlerResolver Create(TypeMapper typeMapper, NpgsqlConnector connector) - => new CitextToStringTypeHandlerResolver(connector); - - class CitextToStringTypeHandlerResolver : TypeHandlerResolver + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) { - readonly NpgsqlConnector _connector; - readonly PostgresType _pgCitextType; - - public CitextToStringTypeHandlerResolver(NpgsqlConnector connector) - { - _connector = connector; - _pgCitextType = connector.DatabaseInfo.GetPostgresTypeByName("citext"); - } - - public override NpgsqlTypeHandler? ResolveByClrType(Type type) - => type == typeof(string) ? new TextHandler(_pgCitextType, _connector.TextEncoding) : null; - public override NpgsqlTypeHandler? ResolveByDataTypeName(string typeName) => null; + if (type == typeof(string) || dataTypeName?.UnqualifiedName == "citext") + if (options.DatabaseInfo.TryGetPostgresTypeByName("citext", out var pgType)) + return new(options, new StringTextConverter(options.TextEncoding), options.ToCanonicalTypeId(pgType)); + + return null; } } diff --git a/test/Npgsql.Tests/Types/ArrayTests.cs b/test/Npgsql.Tests/Types/ArrayTests.cs index 5e56c75c50..6c929c07a7 100644 --- a/test/Npgsql.Tests/Types/ArrayTests.cs +++ b/test/Npgsql.Tests/Types/ArrayTests.cs @@ -1,11 +1,10 @@ using System; -using System.Collections; using System.Collections.Generic; using System.Data; using System.Linq; using System.Text; using System.Threading.Tasks; -using Npgsql.Internal.TypeHandlers; +using Npgsql.Internal.Converters; using NpgsqlTypes; using NUnit.Framework; using static Npgsql.Tests.TestUtil; @@ -75,6 +74,18 @@ public async Task Array_resolution() } } + [Test] + public async Task Throws_too_many_dimensions() + { + await using var conn = CreateConnection(); + await conn.OpenAsync(); + await using var cmd = new NpgsqlCommand("SELECT 1", conn); + cmd.Parameters.AddWithValue("p", new int[1, 1, 1, 1, 1, 1, 1, 1, 1]); // 9 dimensions + Assert.That( + () => cmd.ExecuteScalarAsync(), + Throws.Exception.TypeOf().With.Message.EqualTo("values (Parameter 'Postgres arrays can have at most 8 dimensions.')")); + } + [Test] public async Task Bind_int_then_array_of_int() { @@ -150,9 +161,9 @@ public async Task Nullable_ints_cannot_be_read_as_non_nullable() await using var reader = await cmd.ExecuteReaderAsync(); reader.Read(); - Assert.That(() => reader.GetFieldValue(0), Throws.Exception.TypeOf()); - Assert.That(() => reader.GetFieldValue>(0), Throws.Exception.TypeOf()); - Assert.That(() => reader.GetValue(0), Throws.Exception.TypeOf()); + Assert.That(() => reader.GetFieldValue(0), Throws.Exception.TypeOf()); + Assert.That(() => reader.GetFieldValue>(0), Throws.Exception.TypeOf()); + Assert.That(() => reader.GetValue(0), Throws.Exception.TypeOf()); } [Test, Description("Checks that PG arrays containing nulls are returned as set via ValueTypeArrayMode.")] @@ -184,9 +195,9 @@ public async Task Value_type_array_nullabilities(ArrayNullabilityMode mode) Assert.That(reader.GetValue(1), Is.EqualTo(new [,]{{1, 2}, {3, 4}})); reader.Read(); Assert.That(reader.GetFieldType(0), Is.EqualTo(typeof(Array))); - Assert.That(() => reader.GetValue(0), Throws.Exception.TypeOf()); + Assert.That(() => reader.GetValue(0), Throws.Exception.TypeOf()); Assert.That(reader.GetFieldType(1), Is.EqualTo(typeof(Array))); - Assert.That(() => reader.GetValue(1), Throws.Exception.TypeOf()); + Assert.That(() => reader.GetValue(1), Throws.Exception.TypeOf()); break; case ArrayNullabilityMode.Always: reader.Read(); @@ -271,8 +282,8 @@ public async Task Wrong_array_dimensions_throws() var reader = await cmd.ExecuteReaderAsync(); reader.Read(); - var ex = Assert.Throws(() => reader.GetFieldValue(0))!; - Assert.That(ex.Message, Is.EqualTo("Cannot read an array with 1 dimension(s) from an array with 2 dimension(s)")); + var ex = Assert.Throws(() => reader.GetFieldValue(0))!; + Assert.That(ex.Message, Does.StartWith("Cannot read an array value with 2 dimensions into a collection type with 1 dimension")); } [Test, Description("Verifies that an attempt to read an Array of value types that contains null values as array of a non-nullable type fails.")] @@ -289,8 +300,8 @@ public async Task Read_null_as_non_nullable_array_throws() Assert.That( () => reader.GetFieldValue(0), - Throws.Exception.TypeOf() - .With.Message.EqualTo(ArrayHandlerCore.ReadNonNullableCollectionWithNullsExceptionMessage)); + Throws.Exception.TypeOf() + .With.Message.EqualTo(PgArrayConverter.ReadNonNullableCollectionWithNullsExceptionMessage)); } @@ -308,8 +319,8 @@ public async Task Read_null_as_non_nullable_list_throws() Assert.That( () => reader.GetFieldValue>(0), - Throws.Exception.TypeOf() - .With.Message.EqualTo(ArrayHandlerCore.ReadNonNullableCollectionWithNullsExceptionMessage)); + Throws.Exception.TypeOf() + .With.Message.EqualTo(PgArrayConverter.ReadNonNullableCollectionWithNullsExceptionMessage)); } [Test, Description("Roundtrips a large, one-dimensional array of ints that will be chunked")] @@ -435,19 +446,6 @@ public async Task Array_of_byte_arrays() Assert.That(reader.GetProviderSpecificFieldType(0), Is.EqualTo(typeof(Array))); } - - [Test, Description("Roundtrips a non-generic IList as an array")] - // ReSharper disable once InconsistentNaming - public async Task IList_non_generic() - { - await using var conn = await OpenConnectionAsync(); - await using var cmd = new NpgsqlCommand("SELECT @p", conn); - var expected = new ArrayList(new[] { 1, 2, 3 }); - var p = new NpgsqlParameter("p", NpgsqlDbType.Array | NpgsqlDbType.Integer) { Value = expected }; - cmd.Parameters.Add(p); - Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo(expected.ToArray())); - } - [Test, Description("Roundtrips a generic List as an array")] // ReSharper disable once InconsistentNaming public async Task IList_generic() @@ -477,11 +475,11 @@ public async Task IList_generic_fails_for_multidimensional_array() await using var reader = await cmd.ExecuteReaderAsync(); reader.Read(); Assert.That(reader.GetValue(0), Is.EqualTo(expected)); - var exception = Assert.Throws(() => + var exception = Assert.Throws(() => { reader.GetFieldValue>(0); })!; - Assert.That(exception.Message, Is.EqualTo("Can't read multidimensional array as List")); + Assert.That(exception.Message, Does.StartWith("Cannot read an array value with 2 dimensions")); } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/844")] @@ -490,19 +488,7 @@ public async Task IEnumerable_throws_friendly_exception() await using var conn = await OpenConnectionAsync(); await using var cmd = new NpgsqlCommand("SELECT @p1", conn); cmd.Parameters.AddWithValue("p1", Enumerable.Range(1, 3)); - Assert.That(async () => await cmd.ExecuteScalarAsync(), Throws.Exception.TypeOf().With.Message.Contains("array or List")); - } - - [Test, IssueLink("https://github.com/npgsql/npgsql/issues/960")] - public async Task Mixed_element_types() - { - var mixedList = new ArrayList { 1, "yo" }; - await using var conn = await OpenConnectionAsync(); - await using var cmd = new NpgsqlCommand("SELECT @p1", conn); - cmd.Parameters.AddWithValue("p1", NpgsqlDbType.Array | NpgsqlDbType.Integer, mixedList); - Assert.That(async () => await cmd.ExecuteNonQueryAsync(), Throws.Exception - .TypeOf() - .With.Message.Contains("mix")); + Assert.That(async () => await cmd.ExecuteScalarAsync(), Throws.Exception.TypeOf().With.Property("InnerException").Message.Contains("array or List")); } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/960")] @@ -515,17 +501,8 @@ public async Task Jagged_arrays_not_supported() await using var cmd = new NpgsqlCommand("SELECT @p1", conn); cmd.Parameters.AddWithValue("p1", NpgsqlDbType.Array | NpgsqlDbType.Integer, jagged); Assert.That(async () => await cmd.ExecuteNonQueryAsync(), Throws.Exception - .TypeOf() - .With.Message.Contains("jagged")); - } - - [Test, Description("Checks that ILists are properly serialized as arrays of their underlying types")] - public async Task List_type_resolution() - { - await using var conn = await OpenConnectionAsync(); - await AssertIListRoundtrips(conn, new[] { 1, 2, 3 }); - await AssertIListRoundtrips(conn, new IntList { 1, 2, 3 }); - await AssertIListRoundtrips(conn, new MisleadingIntList() { 1, 2, 3 }); + .TypeOf() + .With.Property("InnerException").Message.Contains("jagged")); } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/1546")] @@ -618,17 +595,6 @@ public async Task Read_two_empty_arrays() Assert.AreNotSame(reader.GetFieldValue>(0), reader.GetFieldValue>(1)); } - async Task AssertIListRoundtrips(NpgsqlConnection conn, IEnumerable value) - { - await using var cmd = new NpgsqlCommand("SELECT @p", conn); - cmd.Parameters.Add(new NpgsqlParameter { ParameterName = "p", Value = value }); - - await using var reader = await cmd.ExecuteReaderAsync(); - reader.Read(); - Assert.That(reader.GetDataTypeName(0), Is.EqualTo("integer[]")); - Assert.That(reader[0], Is.EqualTo(value.ToArray())); - } - class IntList : List { } // ReSharper disable UnusedTypeParameter class MisleadingIntList : List { } diff --git a/test/Npgsql.Tests/Types/BitStringTests.cs b/test/Npgsql.Tests/Types/BitStringTests.cs index 4a22a2a9e6..95c81ffb41 100644 --- a/test/Npgsql.Tests/Types/BitStringTests.cs +++ b/test/Npgsql.Tests/Types/BitStringTests.cs @@ -51,7 +51,7 @@ public Task BitVector32() [Test] public Task BitVector32_too_long() - => AssertTypeUnsupportedRead(new string('0', 34), "bit varying"); + => AssertTypeUnsupportedRead(new string('0', 34), "bit varying"); [Test] public Task Bool() @@ -60,8 +60,8 @@ public Task Bool() [Test] public async Task Bitstring_with_multiple_bits_as_bool_throws() { - await AssertTypeUnsupportedRead("01", "varbit"); - await AssertTypeUnsupportedRead("01", "bit(2)"); + await AssertTypeUnsupportedRead("01", "varbit"); + await AssertTypeUnsupportedRead("01", "bit(2)"); } [Test] @@ -117,16 +117,12 @@ public async Task Array_of_single_bits_and_null() } [Test] - public Task Write_as_string() - => AssertTypeWrite("010101", "010101", "bit varying", NpgsqlDbType.Varbit, isDefault: false); + public Task As_string() + => AssertType("010101", "010101", "bit varying", NpgsqlDbType.Varbit, isDefault: false); [Test] public Task Write_as_string_validation() - => AssertTypeUnsupportedWrite("001q0", "bit varying"); - - [Test] - public Task Read_as_string_is_not_supported() - => AssertTypeUnsupportedRead("010101", "bit varying"); + => AssertTypeUnsupportedWrite("001q0", "bit varying"); public BitStringTests(MultiplexingMode multiplexingMode) : base(multiplexingMode) {} } diff --git a/test/Npgsql.Tests/Types/ByteaTests.cs b/test/Npgsql.Tests/Types/ByteaTests.cs index f29f6e490b..2db7aca492 100644 --- a/test/Npgsql.Tests/Types/ByteaTests.cs +++ b/test/Npgsql.Tests/Types/ByteaTests.cs @@ -35,36 +35,26 @@ public async Task Bytea_long() } [Test] - public Task Write_as_Memory() - => AssertTypeWrite( - new Memory(new byte[] { 1, 2, 3 }), "\\x010203", "bytea", NpgsqlDbType.Bytea, DbType.Binary, isDefault: false); - - [Test] - public Task Read_as_Memory_not_supported() - => AssertTypeUnsupportedRead, NotSupportedException>("\\x010203", "bytea"); + public Task AsMemory() + => AssertType( + new Memory(new byte[] { 1, 2, 3 }), "\\x010203", "bytea", NpgsqlDbType.Bytea, DbType.Binary, isDefault: false, + comparer: (left, right) => left.Span.SequenceEqual(right.Span)); [Test] - public Task Write_as_ReadOnlyMemory() - => AssertTypeWrite( - new ReadOnlyMemory(new byte[] { 1, 2, 3 }), "\\x010203", "bytea", NpgsqlDbType.Bytea, DbType.Binary, isDefault: false); + public Task AsReadOnlyMemory() + => AssertType( + new ReadOnlyMemory(new byte[] { 1, 2, 3 }), "\\x010203", "bytea", NpgsqlDbType.Bytea, DbType.Binary, isDefault: false, + comparer: (left, right) => left.Span.SequenceEqual(right.Span)); [Test] - public Task Read_as_ReadOnlyMemory_not_supported() - => AssertTypeUnsupportedRead, NotSupportedException>("\\x010203", "bytea"); - - [Test] - public Task Write_as_ArraySegment() - => AssertTypeWrite( + public Task AsArraySegment() + => AssertType( new ArraySegment(new byte[] { 1, 2, 3 }), "\\x010203", "bytea", NpgsqlDbType.Bytea, DbType.Binary, isDefault: false); - [Test] - public Task Read_as_ArraySegment_not_supported() - => AssertTypeUnsupportedRead, NotSupportedException>("\\x010203", "bytea"); - [Test] public Task Write_as_MemoryStream() => AssertTypeWrite( - () => new MemoryStream(new byte[] { 1, 2, 3 }), "\\x010203", "bytea", NpgsqlDbType.Bytea, DbType.Binary, isDefault: false); + () => new MemoryStream(new byte[] { 1, 2, 3 }), "\\x010203", "bytea", NpgsqlDbType.Bytea, DbType.Binary, isDefault: false, skipArrayCheck: true); [Test] public Task Write_as_MemoryStream_truncated() @@ -77,7 +67,7 @@ public Task Write_as_MemoryStream_truncated() }; return AssertTypeWrite( - msFactory, "\\x020304", "bytea", NpgsqlDbType.Bytea, DbType.Binary, isDefault: false); + msFactory, "\\x020304", "bytea", NpgsqlDbType.Bytea, DbType.Binary, isDefault: false, skipArrayCheck: true); } [Test] @@ -89,7 +79,7 @@ public async Task Write_as_MemoryStream_long() var expectedSql = "\\x" + ToHex(bytes); await AssertTypeWrite( - () => new MemoryStream(bytes), expectedSql, "bytea", NpgsqlDbType.Bytea, DbType.Binary, isDefault: false); + () => new MemoryStream(bytes), expectedSql, "bytea", NpgsqlDbType.Bytea, DbType.Binary, isDefault: false, skipArrayCheck: true); } [Test] @@ -102,7 +92,7 @@ public async Task Write_as_FileStream() await File.WriteAllBytesAsync(filePath, new byte[] { 1, 2, 3 }); await AssertTypeWrite( - () => FileStreamFactory(filePath, fsList), "\\x010203", "bytea", NpgsqlDbType.Bytea, DbType.Binary, isDefault: false); + () => FileStreamFactory(filePath, fsList), "\\x010203", "bytea", NpgsqlDbType.Bytea, DbType.Binary, isDefault: false, skipArrayCheck: true); } finally { @@ -138,7 +128,7 @@ public async Task Write_as_FileStream_long() var expectedSql = "\\x" + ToHex(bytes); await AssertTypeWrite( - () => FileStreamFactory(filePath, fsList), expectedSql, "bytea", NpgsqlDbType.Bytea, DbType.Binary, isDefault: false); + () => FileStreamFactory(filePath, fsList), expectedSql, "bytea", NpgsqlDbType.Bytea, DbType.Binary, isDefault: false, skipArrayCheck: true); } finally { @@ -187,6 +177,7 @@ public async Task Truncate_array() var p = new NpgsqlParameter("p", data) { Size = 4 }; cmd.Parameters.Add(p); Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo(new byte[] { 1, 2, 3, 4 })); + Assert.That(p.Value, Is.EqualTo(new byte[] { 1, 2, 3, 4 }), "Truncated parameter value should be persisted on the parameter per DbParameter.Size docs"); // NpgsqlParameter.Size needs to persist when value is changed byte[] data2 = { 11, 12, 13, 14, 15, 16 }; @@ -194,6 +185,7 @@ public async Task Truncate_array() Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo(new byte[] { 11, 12, 13, 14 })); // NpgsqlParameter.Size larger than the value size should mean the value size, as well as 0 and -1 + p.Value = data2; p.Size = data2.Length + 10; Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo(data2)); p.Size = 0; @@ -205,7 +197,6 @@ public async Task Truncate_array() } [Test, Description("Tests that bytea stream values are truncated when the NpgsqlParameter's Size is set")] - [NonParallelizable] // The last check will break the connection, which can fail other unrelated queries in multiplexing public async Task Truncate_stream() { await using var conn = await OpenConnectionAsync(); @@ -235,13 +226,9 @@ public async Task Truncate_stream() Assert.That(() => p.Size = -2, Throws.Exception.TypeOf()); - // NpgsqlParameter.Size larger than the value size should throw - p.Size = data2.Length + 10; p.Value = new MemoryStream(data2); - var ex = Assert.ThrowsAsync(async () => await cmd.ExecuteScalarAsync())!; - Assert.That(ex.InnerException, Is.TypeOf()); - if (!IsMultiplexing) - Assert.That(conn.State, Is.EqualTo(ConnectionState.Closed)); + p.Size = data2.Length + 10; + Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo(data2)); } [Test] @@ -261,7 +248,7 @@ public async Task Write_as_NonSeekable_stream() p.Value = new NonSeekableStream(data); p.Size = 0; - Assert.ThrowsAsync(async () => await cmd.ExecuteScalarAsync()); + Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo(data)); Assert.That(conn.State, Is.EqualTo(ConnectionState.Open)); } diff --git a/test/Npgsql.Tests/Types/CompositeHandlerTests.Read.cs b/test/Npgsql.Tests/Types/CompositeHandlerTests.Read.cs index 9e252e2d1b..bf9cb38241 100644 --- a/test/Npgsql.Tests/Types/CompositeHandlerTests.Read.cs +++ b/test/Npgsql.Tests/Types/CompositeHandlerTests.Read.cs @@ -62,7 +62,8 @@ public Task Read_type_with_two_properties_inverted() => [Test] public Task Read_type_with_private_property_throws() => - Read(new TypeWithPrivateProperty(), (execute, expected) => Assert.Throws(() => execute())); + Read(new TypeWithPrivateProperty(), (execute, expected) => + Assert.That(() => execute(), Throws.Exception.TypeOf().With.Property("InnerException").TypeOf())); [Test] public Task Read_type_with_private_getter() => @@ -99,15 +100,18 @@ public Task Read_type_with_more_properties_than_attributes() => [Test] public Task Read_type_with_less_properties_than_attributes_throws() => - Read(new TypeWithLessPropertiesThanAttributes(), (execute, expected) => Assert.Throws(() => execute())); + Read(new TypeWithLessPropertiesThanAttributes(), (execute, expected) => + Assert.That(() => execute(), Throws.Exception.TypeOf().With.Property("InnerException").TypeOf())); [Test] public Task Read_type_with_less_parameters_than_attributes_throws() => - Read(new TypeWithLessParametersThanAttributes(TheAnswer), (execute, expected) => Assert.Throws(() => execute())); + Read(new TypeWithLessParametersThanAttributes(TheAnswer), (execute, expected) => + Assert.That(() => execute(), Throws.Exception.TypeOf().With.Property("InnerException").TypeOf())); [Test] public Task Read_type_with_more_parameters_than_attributes_throws() => - Read(new TypeWithMoreParametersThanAttributes(TheAnswer, HelloSlonik), (execute, expected) => Assert.Throws(() => execute())); + Read(new TypeWithMoreParametersThanAttributes(TheAnswer, HelloSlonik), (execute, expected) => + Assert.That(() => execute(), Throws.Exception.TypeOf().With.Property("InnerException").TypeOf())); [Test] public Task Read_type_with_one_parameter() => diff --git a/test/Npgsql.Tests/Types/CompositeHandlerTests.Write.cs b/test/Npgsql.Tests/Types/CompositeHandlerTests.Write.cs index 938ac9f01a..a251cdd4ed 100644 --- a/test/Npgsql.Tests/Types/CompositeHandlerTests.Write.cs +++ b/test/Npgsql.Tests/Types/CompositeHandlerTests.Write.cs @@ -63,7 +63,9 @@ public Task Write_type_with_two_properties_inverted() [Test] public void Write_type_with_private_property_throws() - => Assert.ThrowsAsync(async () => await Write(new TypeWithPrivateProperty())); + => Assert.ThrowsAsync( + Is.TypeOf().With.Property("InnerException").TypeOf(), + async () => await Write(new TypeWithPrivateProperty())); [Test] public void Write_type_with_private_getter_throws() @@ -95,13 +97,19 @@ public Task Write_type_with_more_properties_than_attributes() [Test] public void Write_type_with_less_properties_than_attributes_throws() - => Assert.ThrowsAsync(async () => await Write(new TypeWithLessPropertiesThanAttributes())); + => Assert.ThrowsAsync( + Is.TypeOf().With.Property("InnerException").TypeOf(), + async () => await Write(new TypeWithLessPropertiesThanAttributes())); [Test] public void Write_type_with_less_parameters_than_attributes_throws() - => Assert.ThrowsAsync(async () => await Write(new TypeWithMoreParametersThanAttributes(TheAnswer, HelloSlonik))); + => Assert.ThrowsAsync( + Is.TypeOf().With.Property("InnerException").TypeOf(), + async () => await Write(new TypeWithMoreParametersThanAttributes(TheAnswer, HelloSlonik))); [Test] public void Write_type_with_more_parameters_than_attributes_throws() - => Assert.ThrowsAsync(async () => await Write(new TypeWithLessParametersThanAttributes(TheAnswer))); + => Assert.ThrowsAsync( + Is.TypeOf().With.Property("InnerException").TypeOf(), + async () => await Write(new TypeWithLessParametersThanAttributes(TheAnswer))); } diff --git a/test/Npgsql.Tests/Types/CompositeTests.cs b/test/Npgsql.Tests/Types/CompositeTests.cs index 2795968470..11f7739158 100644 --- a/test/Npgsql.Tests/Types/CompositeTests.cs +++ b/test/Npgsql.Tests/Types/CompositeTests.cs @@ -138,7 +138,8 @@ await AssertType( new SomeCompositeContainer { A = 8, Containee = new() { SomeText = "foo", X = 9 } }, @"(8,""(9,foo)"")", $"{secondSchemaName}.container", - npgsqlDbType: null); + npgsqlDbType: null, + isDefaultForWriting: false); await AssertType( connection, @@ -146,7 +147,7 @@ await AssertType( @"(8,""(9,foo)"")", $"{firstSchemaName}.container", npgsqlDbType: null, - isDefaultForWriting: false); + isDefaultForWriting: true); } [Test] @@ -237,6 +238,29 @@ await AssertType( npgsqlDbType: null); } + [Test] + public async Task Composite_containing_array_type() + { + await using var adminConnection = await OpenConnectionAsync(); + var compositeType = await GetTempTypeName(adminConnection); + + await adminConnection.ExecuteNonQueryAsync($@" +CREATE TYPE {compositeType} AS (ints int4[])"); + + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.MapComposite(compositeType); + await using var dataSource = dataSourceBuilder.Build(); + await using var connection = await dataSource.OpenConnectionAsync(); + + await AssertType( + connection, + new SomeCompositeWithArray { Ints = new[] { 1, 2, 3, 4 } }, + @"(""{1,2,3,4}"")", + compositeType, + npgsqlDbType: null, + comparer: (actual, expected) => actual.Ints!.SequenceEqual(expected.Ints!)); + } + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/990")] public async Task Table_as_composite([Values] bool enabled) { @@ -254,7 +278,7 @@ public async Task Table_as_composite([Values] bool enabled) await DoAssertion(); else { - Assert.ThrowsAsync(DoAssertion); + Assert.ThrowsAsync(DoAssertion); // Start a transaction specifically for multiplexing (to bind a connector to the connection) await using var tx = await connection.BeginTransactionAsync(); Assert.Null(connection.Connector!.DatabaseInfo.CompositeTypes.SingleOrDefault(c => c.Name.Contains(table))); @@ -402,6 +426,11 @@ struct SomeCompositeStruct public string SomeText { get; set; } } + class SomeCompositeWithArray + { + public int[]? Ints { get; set; } + } + record NameTranslationComposite { public int Simple { get; set; } diff --git a/test/Npgsql.Tests/Types/DateTimeInfinityTests.cs b/test/Npgsql.Tests/Types/DateTimeInfinityTests.cs index fc316adfee..bf2e0d0e65 100644 --- a/test/Npgsql.Tests/Types/DateTimeInfinityTests.cs +++ b/test/Npgsql.Tests/Types/DateTimeInfinityTests.cs @@ -22,16 +22,20 @@ public async Task TimestampTz_write() { Parameters = { - new() { Value = DateTime.MinValue, NpgsqlDbType = NpgsqlDbType.TimestampTz }, + new() + { + Value = DisableDateTimeInfinityConversions ? DateTime.MinValue.ToUniversalTime().AddYears(1) : DateTime.MinValue, + NpgsqlDbType = NpgsqlDbType.TimestampTz + }, } }; - Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo(DisableDateTimeInfinityConversions ? "0001-01-01 00:00:00" : "-infinity")); + Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo(DisableDateTimeInfinityConversions ? "0002-01-01 00:00:00" : "-infinity")); cmd.Parameters[0].Value = DateTime.MaxValue; if (DisableDateTimeInfinityConversions) - Assert.That(async () => await cmd.ExecuteScalarAsync(), Throws.Exception.TypeOf()); + Assert.That(async () => await cmd.ExecuteScalarAsync(), Throws.Exception.TypeOf()); else Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo("infinity")); } @@ -205,6 +209,8 @@ public DateTimeInfinityTests(bool disableDateTimeInfinityConversions) "DateTimeInfinityTests rely on the Npgsql.DisableDateTimeInfinityConversions AppContext switch and can only be run in DEBUG builds"); } #endif + // The switch is baked into the serializer options, so clear the sources on change here. + ClearDataSources(); } public void Dispose() diff --git a/test/Npgsql.Tests/Types/DateTimeTests.cs b/test/Npgsql.Tests/Types/DateTimeTests.cs index f387387dcc..7382891cf5 100644 --- a/test/Npgsql.Tests/Types/DateTimeTests.cs +++ b/test/Npgsql.Tests/Types/DateTimeTests.cs @@ -64,7 +64,21 @@ public Task Daterange_as_NpgsqlRange_of_DateOnly() "[2002-03-04,2002-03-06)", "daterange", NpgsqlDbType.DateRange, - isDefaultForReading: false); + isDefaultForReading: false, + skipArrayCheck: true); // NpgsqlRange[] is mapped to multirange by default, not array; test separately + + [Test] + public Task Daterange_array_as_NpgsqlRange_of_DateOnly_array() + => AssertType( + new[] + { + new NpgsqlRange(new(2002, 3, 4), true, new(2002, 3, 6), false), + new NpgsqlRange(new(2002, 3, 8), true, new(2002, 3, 9), false) + }, + """{"[2002-03-04,2002-03-06)","[2002-03-08,2002-03-09)"}""", + "daterange[]", + NpgsqlDbType.DateRange | NpgsqlDbType.Array, + isDefault: false); [Test] public async Task Datemultirange_as_array_of_NpgsqlRange_of_DateOnly() @@ -72,7 +86,7 @@ public async Task Datemultirange_as_array_of_NpgsqlRange_of_DateOnly() await using var conn = await OpenConnectionAsync(); MinimumPgVersion(conn, "14.0", "Multirange types were introduced in PostgreSQL 14"); - await AssertType( + await AssertType( new[] { new NpgsqlRange(new(2002, 3, 4), true, new(2002, 3, 6), false), @@ -150,11 +164,13 @@ public Task TimeTz_before_utc_zero() [Test, TestCaseSource(nameof(TimestampValues))] public Task Timestamp_as_DateTime(DateTime dateTime, string sqlLiteral) - => AssertType(dateTime, sqlLiteral, "timestamp without time zone", NpgsqlDbType.Timestamp, DbType.DateTime2); + => AssertType(dateTime, sqlLiteral, "timestamp without time zone", NpgsqlDbType.Timestamp, DbType.DateTime2, + // Explicitly check kind as well. + comparer: (actual, expected) => actual.Kind == expected.Kind && actual.Equals(expected)); [Test] public Task Timestamp_cannot_write_utc_DateTime() - => AssertTypeUnsupportedWrite(new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Utc), "timestamp without time zone"); + => AssertTypeUnsupportedWrite(new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Utc), "timestamp without time zone"); [Test] public Task Timestamp_as_long() @@ -181,7 +197,25 @@ public Task Tsrange_as_NpgsqlRange_of_DateTime() new(1998, 4, 12, 15, 26, 38, DateTimeKind.Local)), @"[""1998-04-12 13:26:38"",""1998-04-12 15:26:38""]", "tsrange", - NpgsqlDbType.TimestampRange); + NpgsqlDbType.TimestampRange, + skipArrayCheck: true); // NpgsqlRange[] is mapped to multirange by default, not array; test separately + + [Test] + public Task Tsrange_array_as_NpgsqlRange_of_DateTime_array() + => AssertType( + new[] + { + new NpgsqlRange( + new(1998, 4, 12, 13, 26, 38, DateTimeKind.Local), + new(1998, 4, 12, 15, 26, 38, DateTimeKind.Local)), + new NpgsqlRange( + new(1998, 4, 13, 13, 26, 38, DateTimeKind.Local), + new(1998, 4, 13, 15, 26, 38, DateTimeKind.Local)), + }, + """{"[\"1998-04-12 13:26:38\",\"1998-04-12 15:26:38\"]","[\"1998-04-13 13:26:38\",\"1998-04-13 15:26:38\"]"}""", + "tsrange[]", + NpgsqlDbType.TimestampRange | NpgsqlDbType.Array, + isDefault: false); [Test] public async Task Tsmultirange_as_array_of_NpgsqlRange_of_DateTime() @@ -222,7 +256,9 @@ await AssertType( [Test, TestCaseSource(nameof(TimestampTzWriteValues))] public Task Timestamptz_as_DateTime(DateTime dateTime, string sqlLiteral) - => AssertType(dateTime, sqlLiteral, "timestamp with time zone", NpgsqlDbType.TimestampTz, DbType.DateTime); + => AssertType(dateTime, sqlLiteral, "timestamp with time zone", NpgsqlDbType.TimestampTz, DbType.DateTime, + // Explicitly check kind as well. + comparer: (actual, expected) => actual.Kind == expected.Kind && actual.Equals(expected)); [Test] public async Task Timestamptz_infinity_as_DateTime() @@ -236,8 +272,8 @@ await AssertType(DateTime.MaxValue, "infinity", "timestamp with time zone", Npgs [Test] public async Task Timestamptz_cannot_write_non_utc_DateTime() { - await AssertTypeUnsupportedWrite(new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Unspecified), "timestamp with time zone"); - await AssertTypeUnsupportedWrite(new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Local), "timestamp with time zone"); + await AssertTypeUnsupportedWrite(new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Unspecified), "timestamp with time zone"); + await AssertTypeUnsupportedWrite(new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Local), "timestamp with time zone"); } [Test] @@ -267,7 +303,7 @@ public Task Timestamptz_as_DateTimeOffset_utc_with_DbType_DateTimeOffset() [Test] public Task Timestamptz_cannot_write_non_utc_DateTimeOffset() - => AssertTypeUnsupportedWrite(new DateTimeOffset(1998, 4, 12, 13, 26, 38, TimeSpan.FromHours(2))); + => AssertTypeUnsupportedWrite(new DateTimeOffset(1998, 4, 12, 13, 26, 38, TimeSpan.FromHours(2))); [Test] public Task Timestamptz_as_long() @@ -279,6 +315,24 @@ public Task Timestamptz_as_long() DbType.DateTime, isDefault: false); + [Test] + public async Task Timestamptz_array_as_DateTimeOffset_array() + { + var dateTimeOffsets = await AssertType( + new[] + { + new DateTimeOffset(1998, 4, 12, 13, 26, 38, TimeSpan.Zero), + new DateTimeOffset(1999, 4, 12, 13, 26, 38, TimeSpan.Zero) + }, + """{"1998-04-12 15:26:38+02","1999-04-12 15:26:38+02"}""", + "timestamp with time zone[]", + NpgsqlDbType.TimestampTz | NpgsqlDbType.Array, + isDefaultForReading: false); + + Assert.That(dateTimeOffsets[0].Offset, Is.EqualTo(TimeSpan.Zero)); + Assert.That(dateTimeOffsets[1].Offset, Is.EqualTo(TimeSpan.Zero)); + } + [Test] public Task Tstzrange_as_NpgsqlRange_of_DateTime() => AssertType( @@ -287,7 +341,25 @@ public Task Tstzrange_as_NpgsqlRange_of_DateTime() new DateTime(1998, 4, 12, 15, 26, 38, DateTimeKind.Utc)), @"[""1998-04-12 15:26:38+02"",""1998-04-12 17:26:38+02""]", "tstzrange", - NpgsqlDbType.TimestampTzRange); + NpgsqlDbType.TimestampTzRange, + skipArrayCheck: true); // NpgsqlRange[] is mapped to multirange by default, not array; test separately + + [Test] + public Task Tstzrange_array_as_NpgsqlRange_of_DateTime_array() + => AssertType( + new[] + { + new NpgsqlRange( + new(1998, 4, 12, 13, 26, 38, DateTimeKind.Utc), + new(1998, 4, 12, 15, 26, 38, DateTimeKind.Utc)), + new NpgsqlRange( + new(1998, 4, 13, 13, 26, 38, DateTimeKind.Utc), + new(1998, 4, 13, 15, 26, 38, DateTimeKind.Utc)), + }, + """{"[\"1998-04-12 15:26:38+02\",\"1998-04-12 17:26:38+02\"]","[\"1998-04-13 15:26:38+02\",\"1998-04-13 17:26:38+02\"]"}""", + "tstzrange[]", + NpgsqlDbType.TimestampTzRange | NpgsqlDbType.Array, + isDefault: false); [Test] public async Task Tstzmultirange_as_array_of_NpgsqlRange_of_DateTime() @@ -312,7 +384,7 @@ await AssertType( [Test] public Task Cannot_mix_DateTime_Kinds_in_array() - => AssertTypeUnsupportedWrite(new[] + => AssertTypeUnsupportedWrite(new[] { new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Utc), new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Local), @@ -321,7 +393,7 @@ public Task Cannot_mix_DateTime_Kinds_in_array() [Test] public Task Cannot_mix_DateTime_Kinds_in_range() - => AssertTypeUnsupportedWrite(new NpgsqlRange( + => AssertTypeUnsupportedWrite, ArgumentException>(new NpgsqlRange( new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Utc), new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Local))); @@ -331,8 +403,29 @@ public async Task Cannot_mix_DateTime_Kinds_in_multirange() await using var conn = await OpenConnectionAsync(); MinimumPgVersion(conn, "14.0", "Multirange types were introduced in PostgreSQL 14"); - await AssertTypeUnsupportedWrite(new[] + await AssertTypeUnsupportedWrite[], ArgumentException>(new[] { + new NpgsqlRange( + new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Utc), + new DateTime(1998, 4, 12, 15, 26, 38, DateTimeKind.Utc)), + new NpgsqlRange( + new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Utc), + new DateTime(1998, 4, 12, 15, 26, 38, DateTimeKind.Utc)), + new NpgsqlRange( + new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Utc), + new DateTime(1998, 4, 12, 15, 26, 38, DateTimeKind.Utc)), + new NpgsqlRange( + new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Utc), + new DateTime(1998, 4, 12, 15, 26, 38, DateTimeKind.Utc)), + new NpgsqlRange( + new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Utc), + new DateTime(1998, 4, 12, 15, 26, 38, DateTimeKind.Utc)), + new NpgsqlRange( + new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Utc), + new DateTime(1998, 4, 12, 15, 26, 38, DateTimeKind.Utc)), + new NpgsqlRange( + new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Utc), + new DateTime(1998, 4, 12, 15, 26, 38, DateTimeKind.Utc)), new NpgsqlRange( new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Utc), new DateTime(1998, 4, 12, 15, 26, 38, DateTimeKind.Utc)), @@ -375,6 +468,19 @@ public void NpgsqlParameterNpgsqlDbType_is_value_dependent_timestamp_or_timestam Assert.AreEqual(NpgsqlDbType.TimestampTz, dtotimestamptz.NpgsqlDbType); } + [Test] + public async Task Array_of_nullable_timestamptz() + => await AssertType( + new DateTime?[] + { + new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Utc), + null + }, + @"{""1998-04-12 15:26:38+02"",NULL}", + "timestamp with time zone[]", + NpgsqlDbType.TimestampTz | NpgsqlDbType.Array, + isDefault: false); + #endregion #region Interval @@ -415,7 +521,7 @@ public Task Interval_as_NpgsqlInterval() [Test] public Task Interval_with_months_cannot_read_as_TimeSpan() - => AssertTypeUnsupportedRead("1 month 2 days", "interval"); + => AssertTypeUnsupportedRead("1 month 2 days", "interval"); #endregion diff --git a/test/Npgsql.Tests/Types/FullTextSearchTests.cs b/test/Npgsql.Tests/Types/FullTextSearchTests.cs index ae759a295c..5ffe8b2880 100644 --- a/test/Npgsql.Tests/Types/FullTextSearchTests.cs +++ b/test/Npgsql.Tests/Types/FullTextSearchTests.cs @@ -4,7 +4,6 @@ using Npgsql.Properties; using NpgsqlTypes; using NUnit.Framework; -using NUnit.Framework.Constraints; namespace Npgsql.Tests.Types; @@ -78,15 +77,15 @@ public async Task Full_text_search_supported_only_with_EnableFullTextSearch([Val } else { - var exception = await AssertTypeUnsupportedRead("a", "tsquery", dataSource); - Assert.AreEqual(errorMessage, exception.Message); - exception = await AssertTypeUnsupportedWrite(new NpgsqlTsQueryLexeme("a"), pgTypeName: null, dataSource); - Assert.AreEqual(errorMessage, exception.Message); + var exception = await AssertTypeUnsupportedRead("a", "tsquery", dataSource); + Assert.AreEqual(errorMessage, exception.InnerException!.Message); + exception = await AssertTypeUnsupportedWrite(new NpgsqlTsQueryLexeme("a"), pgTypeName: null, dataSource); + Assert.AreEqual(errorMessage, exception.InnerException!.Message); - exception = await AssertTypeUnsupportedRead("1", "tsvector", dataSource); - Assert.AreEqual(errorMessage, exception.Message); - exception = await AssertTypeUnsupportedWrite(NpgsqlTsVector.Parse("'1'"), pgTypeName: null, dataSource); - Assert.AreEqual(errorMessage, exception.Message); + exception = await AssertTypeUnsupportedRead("1", "tsvector", dataSource); + Assert.AreEqual(errorMessage, exception.InnerException!.Message); + exception = await AssertTypeUnsupportedWrite(NpgsqlTsVector.Parse("'1'"), pgTypeName: null, dataSource); + Assert.AreEqual(errorMessage, exception.InnerException!.Message); } } } diff --git a/test/Npgsql.Tests/Types/GeometricTypeTests.cs b/test/Npgsql.Tests/Types/GeometricTypeTests.cs index b4101cca14..d84218bd12 100644 --- a/test/Npgsql.Tests/Types/GeometricTypeTests.cs +++ b/test/Npgsql.Tests/Types/GeometricTypeTests.cs @@ -26,12 +26,25 @@ public Task LineSegment() [Test] public Task Box() - => AssertType(new NpgsqlBox(3, 4, 1, 2), "(4,3),(2,1)", "box", NpgsqlDbType.Box); + => AssertType(new NpgsqlBox(3, 4, 1, 2), "(4,3),(2,1)", "box", NpgsqlDbType.Box, + skipArrayCheck: true); // Uses semicolon instead of comma as separator + + [Test] + public Task Box_array() + => AssertType( + new[] + { + new NpgsqlBox(3, 4, 1, 2), + new NpgsqlBox(5, 6, 3, 4) + }, + "{(4,3),(2,1);(6,5),(4,3)}", + "box[]", + NpgsqlDbType.Box | NpgsqlDbType.Array); [Test] public Task Path_closed() => AssertType( - new NpgsqlPath(new[] {new NpgsqlPoint(1, 2), new NpgsqlPoint(3, 4)}, false), + new NpgsqlPath(new[] { new NpgsqlPoint(1, 2), new NpgsqlPoint(3, 4) }, false), "((1,2),(3,4))", "path", NpgsqlDbType.Path); diff --git a/test/Npgsql.Tests/Types/HstoreTests.cs b/test/Npgsql.Tests/Types/HstoreTests.cs index ab1ee2ad6c..5696cad98b 100644 --- a/test/Npgsql.Tests/Types/HstoreTests.cs +++ b/test/Npgsql.Tests/Types/HstoreTests.cs @@ -6,7 +6,6 @@ namespace Npgsql.Tests.Types; -[NonParallelizable] public class HstoreTests : MultiplexingTestBase { [Test] @@ -20,11 +19,11 @@ public Task Hstore() }, @"""a""=>""3"", ""b""=>NULL, ""cd""=>""hello""", "hstore", - NpgsqlDbType.Hstore); + NpgsqlDbType.Hstore, isNpgsqlDbTypeInferredFromClrType: false); [Test] public Task Hstore_empty() - => AssertType(new Dictionary(), @"", "hstore", NpgsqlDbType.Hstore); + => AssertType(new Dictionary(), @"", "hstore", NpgsqlDbType.Hstore, isNpgsqlDbTypeInferredFromClrType: false); [Test] public Task Hstore_as_ImmutableDictionary() @@ -40,7 +39,7 @@ public Task Hstore_as_ImmutableDictionary() @"""a""=>""3"", ""b""=>NULL, ""cd""=>""hello""", "hstore", NpgsqlDbType.Hstore, - isDefaultForReading: false); + isDefaultForReading: false, isNpgsqlDbTypeInferredFromClrType: false); } [Test] @@ -55,7 +54,7 @@ public Task Hstore_as_IDictionary() @"""a""=>""3"", ""b""=>NULL, ""cd""=>""hello""", "hstore", NpgsqlDbType.Hstore, - isDefaultForReading: false); + isDefaultForReading: false, isNpgsqlDbTypeInferredFromClrType: false); [OneTimeSetUp] public async Task SetUp() diff --git a/test/Npgsql.Tests/Types/JsonPathTests.cs b/test/Npgsql.Tests/Types/JsonPathTests.cs index 3d068aa3d2..de49a631e0 100644 --- a/test/Npgsql.Tests/Types/JsonPathTests.cs +++ b/test/Npgsql.Tests/Types/JsonPathTests.cs @@ -1,4 +1,5 @@ -using System.Threading.Tasks; +using System.Data; +using System.Threading.Tasks; using NpgsqlTypes; using NUnit.Framework; using static Npgsql.Tests.TestUtil; @@ -16,6 +17,18 @@ public JsonPathTests(MultiplexingMode multiplexingMode) new object[] { "'$\"varname\"'", "$\"varname\"" }, }; + [Test] + [TestCase("$")] + [TestCase("$\"varname\"")] + public async Task JsonPath(string jsonPath) + { + using var conn = await OpenConnectionAsync(); + MinimumPgVersion(conn, "12.0", "The jsonpath type was introduced in PostgreSQL 12"); + await AssertType( + jsonPath, jsonPath, "jsonpath", NpgsqlDbType.JsonPath, isDefaultForWriting: false, isNpgsqlDbTypeInferredFromClrType: false, + inferredDbType: DbType.Object); + } + [Test] [TestCaseSource(nameof(ReadWriteCases))] public async Task Read(string query, string expected) @@ -43,4 +56,4 @@ public async Task Write(string query, string expected) Assert.True(rdr.Read()); } -} \ No newline at end of file +} diff --git a/test/Npgsql.Tests/Types/JsonTests.cs b/test/Npgsql.Tests/Types/JsonTests.cs index 2323430773..3460a88a5c 100644 --- a/test/Npgsql.Tests/Types/JsonTests.cs +++ b/test/Npgsql.Tests/Types/JsonTests.cs @@ -2,6 +2,7 @@ using System.Text; using System.Text.Json; using System.Text.Json.Nodes; +using System.Text.Json.Serialization; using System.Threading.Tasks; using NpgsqlTypes; using NUnit.Framework; @@ -166,6 +167,54 @@ await AssertTypeUnsupported( slimDataSource); } + [Test] + public async Task Poco_does_not_stomp_GetValue_string() + { + var dataSourceBuilder = CreateDataSourceBuilder(); + var dataSource = dataSourceBuilder.UseSystemTextJson(null, new[] {typeof(WeatherForecast)}, new[] {typeof(WeatherForecast)}).Build(); + var sqlLiteral = + IsJsonb + ? """{"Date": "2019-09-01T00:00:00", "Summary": "Partly cloudy", "TemperatureC": 10}""" + : """{"Date":"2019-09-01T00:00:00","TemperatureC":10,"Summary":"Partly cloudy"}"""; + await using var conn = await dataSource.OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand($"SELECT '{sqlLiteral}'::{(IsJsonb ? "jsonb" : "json")}", conn); + await using var reader = await cmd.ExecuteReaderAsync(); + await reader.ReadAsync(); + + Assert.That(reader.GetValue(0), Is.TypeOf()); + } + + [Test] + public Task Roundtrip_string() + => AssertType( + @"{""p"": 1}", + @"{""p"": 1}", + PostgresType, + NpgsqlDbType, + isDefault: false, + isNpgsqlDbTypeInferredFromClrType: false); + + [Test] + public Task Roundtrip_char_array() + => AssertType( + @"{""p"": 1}".ToCharArray(), + @"{""p"": 1}", + PostgresType, + NpgsqlDbType, + isDefault: false, + isNpgsqlDbTypeInferredFromClrType: false); + + [Test] + public Task Roundtrip_byte_array() + => AssertType( + Encoding.ASCII.GetBytes(@"{""p"": 1}"), + @"{""p"": 1}", + PostgresType, + NpgsqlDbType, + isDefault: false, + isNpgsqlDbTypeInferredFromClrType: false); + + [JsonDerivedType(typeof(ExtendedDerivedWeatherForecast), typeDiscriminator: "extended")] record WeatherForecast { public DateTime Date { get; set; } @@ -173,6 +222,15 @@ record WeatherForecast public string Summary { get; set; } = ""; } + record DerivedWeatherForecast : WeatherForecast + { + } + + record ExtendedDerivedWeatherForecast : DerivedWeatherForecast + { + public int TemperatureF => 32 + (int)(TemperatureC / 0.5556); + } + [Test] [IssueLink("https://github.com/npgsql/npgsql/issues/2811")] [IssueLink("https://github.com/npgsql/efcore.pg/issues/1177")] @@ -256,7 +314,7 @@ await AssertTypeWrite( isDefault: false); } - [Test] + [Test, Ignore("TODO We should not change the default type for json/jsonb, it makes little sense.")] public async Task Poco_default_mapping() { var dataSourceBuilder = CreateDataSourceBuilder(); @@ -266,7 +324,7 @@ public async Task Poco_default_mapping() dataSourceBuilder.UseSystemTextJson(jsonClrTypes: new[] { typeof(WeatherForecast) }); await using var dataSource = dataSourceBuilder.Build(); - await AssertTypeWrite( + await AssertType( dataSource, new WeatherForecast { @@ -279,9 +337,138 @@ await AssertTypeWrite( : """{"Date":"2019-09-01T00:00:00","TemperatureC":10,"Summary":"Partly cloudy"}""", PostgresType, NpgsqlDbType, + isDefaultForReading: false, + isNpgsqlDbTypeInferredFromClrType: false); + } + + [Test] + public async Task Poco_polymorphic_mapping() + { + // We don't yet support polymorphic deserialization for jsonb as $type does not come back as the first property. + // This could be fixed by detecting PolymorphicOptions types, always buffering their values and modifying the text. + if (IsJsonb) + return; + + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.UseSystemTextJson(jsonClrTypes: new[] { typeof(WeatherForecast) }); + await using var dataSource = dataSourceBuilder.Build(); + + await AssertType( + dataSource, + new ExtendedDerivedWeatherForecast() + { + Date = new DateTime(2019, 9, 1), + Summary = "Partly cloudy", + TemperatureC = 10 + }, + """{"$type":"extended","TemperatureF":49,"Date":"2019-09-01T00:00:00","TemperatureC":10,"Summary":"Partly cloudy"}""", + PostgresType, + NpgsqlDbType, + isDefaultForReading: false, isNpgsqlDbTypeInferredFromClrType: false); } + [Test] + public async Task Poco_polymorphic_mapping_read_parents() + { + // We don't yet support polymorphic deserialization for jsonb as $type does not come back as the first property. + // This could be fixed by detecting PolymorphicOptions types, always buffering their values and modifying the text. + if (IsJsonb) + return; + + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.UseSystemTextJson(jsonClrTypes: new[] { typeof(WeatherForecast) }); + await using var dataSource = dataSourceBuilder.Build(); + + var value = new ExtendedDerivedWeatherForecast() + { + Date = new DateTime(2019, 9, 1), + Summary = "Partly cloudy", + TemperatureC = 10 + }; + + var sql = """{"$type":"extended","TemperatureF":49,"Date":"2019-09-01T00:00:00","TemperatureC":10,"Summary":"Partly cloudy"}"""; + + await AssertTypeWrite( + dataSource, + value, + sql, + PostgresType, + NpgsqlDbType, + isNpgsqlDbTypeInferredFromClrType: false); + + // GetFieldValue + await AssertTypeRead(dataSource, sql, PostgresType, value, + comparer: (_, actual) => actual.GetType() == typeof(ExtendedDerivedWeatherForecast), + isDefault: false); + + await AssertTypeRead(dataSource, sql, PostgresType, value, + comparer: (_, actual) => actual.GetType() == typeof(DerivedWeatherForecast), isDefault: false); + + await AssertTypeRead(dataSource, sql, PostgresType, value, isDefault: false); + } + + + [Test] + public async Task Poco_exact_polymorphic_mapping() + { + // We don't yet support polymorphic deserialization for jsonb as $type does not come back as the first property. + // This could be fixed by detecting PolymorphicOptions types, always buffering their values and modifying the text. + if (IsJsonb) + return; + + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.UseSystemTextJson(jsonClrTypes: new[] { typeof(ExtendedDerivedWeatherForecast) }); + await using var dataSource = dataSourceBuilder.Build(); + + await AssertType( + dataSource, + new ExtendedDerivedWeatherForecast() + { + Date = new DateTime(2019, 9, 1), + Summary = "Partly cloudy", + TemperatureC = 10 + }, + """{"TemperatureF":49,"Date":"2019-09-01T00:00:00","TemperatureC":10,"Summary":"Partly cloudy"}""", + PostgresType, + NpgsqlDbType, + isDefaultForReading: false, + isNpgsqlDbTypeInferredFromClrType: false); + } + + [Test] + public async Task Poco_unspecified_polymorphic_mapping() + { + // We don't yet support polymorphic deserialization for jsonb as $type does not come back as the first property. + // This could be fixed by detecting PolymorphicOptions types, always buffering their values and modifying the text. + // In this case we don't have any statically mapped base type to check its PolymorphicOptions on. + // Detecting whether the type could be polymorphic would require us to duplicate STJ's nearest polymorphic ancestor search. + if (IsJsonb) + return; + + var value = new ExtendedDerivedWeatherForecast() + { + Date = new DateTime(2019, 9, 1), + Summary = "Partly cloudy", + TemperatureC = 10 + }; + + var sql = """{"$type":"extended","TemperatureF":49,"Date":"2019-09-01T00:00:00","TemperatureC":10,"Summary":"Partly cloudy"}"""; + + await AssertType( + value, + sql, + PostgresType, + NpgsqlDbType, + isDefault: false); + + await AssertTypeRead(DataSource, sql, PostgresType, value, + comparer: (_, actual) => actual.GetType() == typeof(DerivedWeatherForecast), isDefault: false); + + await AssertTypeRead(DataSource, sql, PostgresType, value, + comparer: (_, actual) => actual.GetType() == typeof(ExtendedDerivedWeatherForecast), isDefault: false); + } + public JsonTests(MultiplexingMode multiplexingMode, NpgsqlDbType npgsqlDbType) : base(multiplexingMode) { diff --git a/test/Npgsql.Tests/Types/LTreeTests.cs b/test/Npgsql.Tests/Types/LTreeTests.cs index 48f7d950c9..5d104a4c54 100644 --- a/test/Npgsql.Tests/Types/LTreeTests.cs +++ b/test/Npgsql.Tests/Types/LTreeTests.cs @@ -1,10 +1,10 @@ using System.Threading.Tasks; +using Npgsql.Properties; using NpgsqlTypes; using NUnit.Framework; namespace Npgsql.Tests.Types; -[NonParallelizable] public class LTreeTests : MultiplexingTestBase { [Test] @@ -19,6 +19,32 @@ public Task LTree() public Task LTxtQuery() => AssertType("Science & Astronomy", "Science & Astronomy", "ltxtquery", NpgsqlDbType.LTxtQuery, isDefaultForWriting: false); + [Test] + public async Task LTree_not_supported_by_default_on_NpgsqlSlimSourceBuilder() + { + var errorMessage = string.Format( + NpgsqlStrings.LTreeNotEnabled, nameof(NpgsqlSlimDataSourceBuilder.EnableLTree), nameof(NpgsqlSlimDataSourceBuilder)); + + var dataSourceBuilder = new NpgsqlSlimDataSourceBuilder(ConnectionString); + await using var dataSource = dataSourceBuilder.Build(); + + var exception = + await AssertTypeUnsupportedRead>("Top.Science.Astronomy", "ltree", dataSource); + Assert.That(exception.InnerException!.Message, Is.EqualTo(errorMessage)); + exception = await AssertTypeUnsupportedWrite("Top.Science.Astronomy", "ltree", dataSource); + Assert.That(exception.InnerException!.Message, Is.EqualTo(errorMessage)); + } + + [Test] + public async Task NpgsqlSlimSourceBuilder_EnableLTree() + { + var dataSourceBuilder = new NpgsqlSlimDataSourceBuilder(ConnectionString); + dataSourceBuilder.EnableLTree(); + await using var dataSource = dataSourceBuilder.Build(); + + await AssertType(dataSource, "Top.Science.Astronomy", "Top.Science.Astronomy", "ltree", NpgsqlDbType.LTree, isDefaultForWriting: false); + } + [OneTimeSetUp] public async Task SetUp() { diff --git a/test/Npgsql.Tests/Types/LegacyDateTimeTests.cs b/test/Npgsql.Tests/Types/LegacyDateTimeTests.cs index fbf1b537d1..2b9ae54813 100644 --- a/test/Npgsql.Tests/Types/LegacyDateTimeTests.cs +++ b/test/Npgsql.Tests/Types/LegacyDateTimeTests.cs @@ -1,7 +1,7 @@ using System; using System.Data; using System.Threading.Tasks; -using Npgsql.TypeMapping; +using Npgsql.Internal.Resolvers; using NpgsqlTypes; using NUnit.Framework; using static Npgsql.Util.Statics; @@ -49,21 +49,22 @@ public Task Timestamptz_local_DateTime_converts() isDefaultForWriting: false); } - protected override async ValueTask OpenConnectionAsync() - { - var conn = await base.OpenConnectionAsync(); - await conn.ExecuteNonQueryAsync("SET TimeZone='Europe/Berlin'"); - return conn; - } - - protected override NpgsqlConnection OpenConnection() - => throw new NotSupportedException(); + NpgsqlDataSource _dataSource = null!; + protected override NpgsqlDataSource DataSource => _dataSource; [OneTimeSetUp] public void Setup() { #if DEBUG LegacyTimestampBehavior = true; + _dataSource = CreateDataSource(builder => + { + // Can't use the static AdoTypeInfoResolver instance, it already captured the feature flag. + builder.AddTypeInfoResolver(new AdoTypeInfoResolver()); + builder.AddTypeInfoResolver(new AdoArrayTypeInfoResolver()); + builder.ConnectionStringBuilder.Timezone = "Europe/Berlin"; + }); + NpgsqlDataSourceBuilder.ResetGlobalMappings(overwrite: true); #else Assert.Ignore( "Legacy DateTime tests rely on the Npgsql.EnableLegacyTimestampBehavior AppContext switch and can only be run in DEBUG builds"); @@ -72,6 +73,10 @@ public void Setup() #if DEBUG [OneTimeTearDown] - public void Teardown() => LegacyTimestampBehavior = false; + public void Teardown() + { + LegacyTimestampBehavior = false; + NpgsqlDataSourceBuilder.ResetGlobalMappings(overwrite: true); + } #endif } diff --git a/test/Npgsql.Tests/Types/MiscTypeTests.cs b/test/Npgsql.Tests/Types/MiscTypeTests.cs index 41555e776e..57d241a811 100644 --- a/test/Npgsql.Tests/Types/MiscTypeTests.cs +++ b/test/Npgsql.Tests/Types/MiscTypeTests.cs @@ -1,10 +1,8 @@ using System; using System.Data; using System.Threading.Tasks; -using Npgsql.Properties; using NpgsqlTypes; using NUnit.Framework; -using NUnit.Framework.Constraints; namespace Npgsql.Tests.Types; @@ -16,8 +14,11 @@ class MiscTypeTests : MultiplexingTestBase [Test] public async Task Boolean() { - await AssertType(true, "true", "boolean", NpgsqlDbType.Boolean, DbType.Boolean); - await AssertType(false, "false", "boolean", NpgsqlDbType.Boolean, DbType.Boolean); + await AssertType(true, "true", "boolean", NpgsqlDbType.Boolean, DbType.Boolean, skipArrayCheck: true); + await AssertType(false, "false", "boolean", NpgsqlDbType.Boolean, DbType.Boolean, skipArrayCheck: true); + + // The literal representations for bools inside array are different ({t,f} instead of true/false, so we check separately. + await AssertType(new[] { true, false }, "{t,f}", "boolean[]", NpgsqlDbType.Boolean | NpgsqlDbType.Array); } [Test] @@ -49,7 +50,7 @@ public async Task Null() { cmd.Parameters.AddWithValue("p1", DBNull.Value); cmd.Parameters.Add(new NpgsqlParameter("p2", null)); - cmd.Parameters.Add(new NpgsqlParameter("p3", DBNull.Value)); + cmd.Parameters.Add(new NpgsqlParameter("p3", DBNull.Value)); await using var reader = await cmd.ExecuteReaderAsync(); reader.Read(); @@ -60,107 +61,21 @@ public async Task Null() } } - // Setting non-generic NpgsqlParameter.Value is not allowed, only DBNull.Value + // Setting non-generic NpgsqlParameter.Value to null is not allowed, only DBNull.Value await using (var cmd = new NpgsqlCommand("SELECT @p::TEXT", conn)) { cmd.Parameters.AddWithValue("p4", NpgsqlDbType.Text, null!); - Assert.That(async () => await cmd.ExecuteReaderAsync(), Throws.Exception.TypeOf()); + Assert.That(async () => await cmd.ExecuteReaderAsync(), Throws.Exception.TypeOf()); } - } - - #region Record - - [Test] - [IssueLink("https://github.com/npgsql/npgsql/issues/724")] - [IssueLink("https://github.com/npgsql/npgsql/issues/1980")] - public async Task Read_Record_as_object_array() - { - var recordLiteral = "(1,'foo'::text)::record"; - await using var conn = await OpenConnectionAsync(); - await using var cmd = new NpgsqlCommand($"SELECT {recordLiteral}, ARRAY[{recordLiteral}, {recordLiteral}]", conn); - await using var reader = await cmd.ExecuteReaderAsync(); - reader.Read(); - - var record = (object[])reader[0]; - Assert.That(record[0], Is.EqualTo(1)); - Assert.That(record[1], Is.EqualTo("foo")); - - var array = (object[][])reader[1]; - Assert.That(array.Length, Is.EqualTo(2)); - Assert.That(array[0][0], Is.EqualTo(1)); - Assert.That(array[1][0], Is.EqualTo(1)); - } - - [Test] - public async Task Read_Record_as_ValueTuple() - { - var recordLiteral = "(1,'foo'::text)::record"; - await using var conn = await OpenConnectionAsync(); - await using var cmd = new NpgsqlCommand($"SELECT {recordLiteral}, ARRAY[{recordLiteral}, {recordLiteral}]", conn); - await using var reader = await cmd.ExecuteReaderAsync(); - reader.Read(); - - var record = reader.GetFieldValue<(int, string)>(0); - Assert.That(record.Item1, Is.EqualTo(1)); - Assert.That(record.Item2, Is.EqualTo("foo")); - - var array = (object[][])reader[1]; - Assert.That(array.Length, Is.EqualTo(2)); - Assert.That(array[0][0], Is.EqualTo(1)); - Assert.That(array[1][0], Is.EqualTo(1)); - } - [Test] - public async Task Read_Record_as_Tuple() - { - var recordLiteral = "(1,'foo'::text)::record"; - await using var conn = await OpenConnectionAsync(); - await using var cmd = new NpgsqlCommand($"SELECT {recordLiteral}, ARRAY[{recordLiteral}, {recordLiteral}]", conn); - await using var reader = await cmd.ExecuteReaderAsync(); - reader.Read(); - - var record = reader.GetFieldValue>(0); - Assert.That(record.Item1, Is.EqualTo(1)); - Assert.That(record.Item2, Is.EqualTo("foo")); - - var array = (object[][])reader[1]; - Assert.That(array.Length, Is.EqualTo(2)); - Assert.That(array[0][0], Is.EqualTo(1)); - Assert.That(array[1][0], Is.EqualTo(1)); - } - - [Test] - public Task Write_Record_is_not_supported() - => AssertTypeUnsupportedWrite(new object[] { 1, "foo" }, "record"); - - [Test] - public async Task Records_supported_only_with_EnableRecords([Values] bool withMappings) - { - Func assertExpr = () => withMappings - ? Throws.Nothing - : Throws.Exception - .TypeOf() - .With.Property("Message") - .EqualTo(string.Format(NpgsqlStrings.RecordsNotEnabled, "EnableRecords", "NpgsqlSlimDataSourceBuilder")); - - var dataSourceBuilder = new NpgsqlSlimDataSourceBuilder(ConnectionString); - if (withMappings) - dataSourceBuilder.EnableRecords(); - await using var dataSource = dataSourceBuilder.Build(); - await using var conn = await dataSource.OpenConnectionAsync(); - await using var cmd = conn.CreateCommand(); - - // RecordHandler doesn't support writing, so we only check for reading - cmd.CommandText = "SELECT ('one'::text, 2)"; - await using var reader = await cmd.ExecuteReaderAsync(); - await reader.ReadAsync(); - - Assert.That(() => reader.GetValue(0), assertExpr()); - Assert.That(() => reader.GetFieldValue(0), assertExpr()); + // Setting generic NpgsqlParameter.Value to null is not allowed, only DBNull.Value + await using (var cmd = new NpgsqlCommand("SELECT @p::TEXT", conn)) + { + cmd.Parameters.Add(new NpgsqlParameter("p4", NpgsqlDbType.Text) { Value = null! }); + Assert.That(async () => await cmd.ExecuteReaderAsync(), Throws.Exception.TypeOf()); + } } - #endregion Record - [Test, Description("Makes sure that setting DbType.Object makes Npgsql infer the type")] [IssueLink("https://github.com/npgsql/npgsql/issues/694")] public async Task DbType_causes_inference() @@ -250,7 +165,7 @@ public Task Oidvector() public async Task Void() { await using var conn = await OpenConnectionAsync(); - Assert.That(await conn.ExecuteScalarAsync("SELECT pg_sleep(0)"), Is.SameAs(DBNull.Value)); + Assert.That(await conn.ExecuteScalarAsync("SELECT pg_sleep(0)"), Is.SameAs(null)); } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/1364")] diff --git a/test/Npgsql.Tests/Types/MoneyTests.cs b/test/Npgsql.Tests/Types/MoneyTests.cs index 8aceb03dac..4c38f3d111 100644 --- a/test/Npgsql.Tests/Types/MoneyTests.cs +++ b/test/Npgsql.Tests/Types/MoneyTests.cs @@ -1,5 +1,4 @@ -using System; -using System.Data; +using System.Data; using System.Threading.Tasks; using NpgsqlTypes; using NUnit.Framework; diff --git a/test/Npgsql.Tests/Types/MultirangeTests.cs b/test/Npgsql.Tests/Types/MultirangeTests.cs index 0162fc78ed..84f815c63c 100644 --- a/test/Npgsql.Tests/Types/MultirangeTests.cs +++ b/test/Npgsql.Tests/Types/MultirangeTests.cs @@ -1,5 +1,7 @@ using System; using System.Collections.Generic; +using System.Data; +using System.Linq; using System.Threading.Tasks; using NpgsqlTypes; using NUnit.Framework; @@ -9,189 +11,130 @@ namespace Npgsql.Tests.Types; public class MultirangeTests : TestBase { - [Test] - public async Task Read() - { - await using var conn = await OpenConnectionAsync(); - await using var cmd = new NpgsqlCommand("SELECT '{[3,7), (8,]}'::int4multirange", conn); - await using var reader = await cmd.ExecuteReaderAsync(); - await reader.ReadAsync(); - - Assert.That(reader.GetDataTypeName(0), Is.EqualTo("int4multirange")); - - var multirangeArray = (NpgsqlRange[])reader[0]; - Assert.That(multirangeArray.Length, Is.EqualTo(2)); - Assert.That(multirangeArray[0], Is.EqualTo(new NpgsqlRange(3, true, false, 7, false, false))); - Assert.That(multirangeArray[1], Is.EqualTo(new NpgsqlRange(9, true, false, 0, false, true))); - - var multirangeList = reader.GetFieldValue>>(0); - Assert.That(multirangeList.Count, Is.EqualTo(2)); - Assert.That(multirangeList[0], Is.EqualTo(new NpgsqlRange(3, true, false, 7, false, false))); - Assert.That(multirangeList[1], Is.EqualTo(new NpgsqlRange(9, true, false, 0, false, true))); - } - - [Test] - public async Task Write() - { - var multirangeArray = new NpgsqlRange[] - { - new(3, true, false, 7, false, false), - new(8, false, false, 0, false, true) - }; - - var multirangeList = new List>(multirangeArray); - - await using var conn = await OpenConnectionAsync(); - await using var cmd = new NpgsqlCommand("SELECT $1::text", conn); - - await WriteInternal(multirangeArray); - await WriteInternal(multirangeList); - - async Task WriteInternal(IList> multirange) - { - await conn.ReloadTypesAsync(); - cmd.Parameters.Add(new() { Value = multirange }); - Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo("{[3,7),[9,)}")); - - await conn.ReloadTypesAsync(); - cmd.Parameters[0] = new() { Value = multirange, NpgsqlDbType = NpgsqlDbType.IntegerMultirange }; - Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo("{[3,7),[9,)}")); - - await conn.ReloadTypesAsync(); - cmd.Parameters[0] = new() { Value = multirange, DataTypeName = "int4multirange" }; - Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo("{[3,7),[9,)}")); - } - } - - [Test] - public async Task Write_nummultirange() - { - var multirangeArray = new NpgsqlRange[] - { - new(3, true, false, 7, false, false), - new(8, false, false, 0, false, true) - }; - - var multirangeList = new List>(multirangeArray); - - await using var conn = await OpenConnectionAsync(); - await using var cmd = new NpgsqlCommand("SELECT $1::text", conn); - - await WriteInternal(multirangeArray); - await WriteInternal(multirangeList); - - async Task WriteInternal(IList> multirange) - { - conn.ReloadTypes(); - cmd.Parameters.Add(new() { Value = multirange }); - Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo("{[3,7),(8,)}")); - - conn.ReloadTypes(); - cmd.Parameters[0] = new() { Value = multirange, NpgsqlDbType = NpgsqlDbType.NumericMultirange }; - Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo("{[3,7),(8,)}")); - - conn.ReloadTypes(); - cmd.Parameters[0] = new() { Value = multirange, DataTypeName = "nummultirange" }; - Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo("{[3,7),(8,)}")); - } - } - - [Test] - public async Task Read_Datemultirange() + static readonly TestCaseData[] MultirangeTestCases = { - await using var conn = await OpenConnectionAsync(); - await using var cmd = new NpgsqlCommand("SELECT '{[2020-01-01,2020-01-05), (2020-01-10,]}'::datemultirange", conn); - await using var reader = await cmd.ExecuteReaderAsync(); - await reader.ReadAsync(); - - Assert.That(reader.GetDataTypeName(0), Is.EqualTo("datemultirange")); - - var multirangeDateTimeArray = (NpgsqlRange[])reader[0]; - Assert.That(multirangeDateTimeArray.Length, Is.EqualTo(2)); - Assert.That(multirangeDateTimeArray[0], Is.EqualTo(new NpgsqlRange(new(2020, 1, 1), true, false, new(2020, 1, 5), false, false))); - Assert.That(multirangeDateTimeArray[1], Is.EqualTo(new NpgsqlRange(new(2020, 1, 11), true, false, default, false, true))); - - var multirangeDateTimeList = reader.GetFieldValue>>(0); - Assert.That(multirangeDateTimeList.Count, Is.EqualTo(2)); - Assert.That(multirangeDateTimeList[0], Is.EqualTo(new NpgsqlRange(new(2020, 1, 1), true, false, new(2020, 1, 5), false, false))); - Assert.That(multirangeDateTimeList[1], Is.EqualTo(new NpgsqlRange(new(2020, 1, 11), true, false, default, false, true))); + // int4multirange + new TestCaseData( + new NpgsqlRange[] + { + new(3, true, false, 7, false, false), + new(9, true, false, 0, false, true) + }, + "{[3,7),[9,)}", "int4multirange", NpgsqlDbType.IntegerMultirange, true, true, default(NpgsqlRange)) + .SetName("Int"), + + // int8multirange + new TestCaseData( + new NpgsqlRange[] + { + new(3, true, false, 7, false, false), + new(9, true, false, 0, false, true) + }, + "{[3,7),[9,)}", "int8multirange", NpgsqlDbType.BigIntMultirange, true, true, default(NpgsqlRange)) + .SetName("Long"), + + // nummultirange + // numeric is non-discrete so doesn't undergo normalization, use that to test bound scenarios which otherwise get normalized + new TestCaseData( + new NpgsqlRange[] + { + new(3, true, false, 7, true, false), + new(9, false, false, 0, false, true) + }, + "{[3,7],(9,)}", "nummultirange", NpgsqlDbType.NumericMultirange, true, true, default(NpgsqlRange)) + .SetName("Decimal"), + + // daterange + new TestCaseData( + new NpgsqlRange[] + { + new(new(2020, 1, 1), true, false, new(2020, 1, 5), false, false), + new(new(2020, 1, 10), true, false, default, false, true) + }, + "{[2020-01-01,2020-01-05),[2020-01-10,)}", "datemultirange", NpgsqlDbType.DateMultirange, true, false, default(NpgsqlRange)) + .SetName("DateTime DateMultirange"), + + // tsmultirange + new TestCaseData( + new NpgsqlRange[] + { + new(new(2020, 1, 1), true, false, new(2020, 1, 5), false, false), + new(new(2020, 1, 10), true, false, default, false, true) + }, + """{["2020-01-01 00:00:00","2020-01-05 00:00:00"),["2020-01-10 00:00:00",)}""", "tsmultirange", NpgsqlDbType.TimestampMultirange, true, true, default(NpgsqlRange)) + .SetName("DateTime TimestampMultirange"), + + // tstzmultirange + new TestCaseData( + new NpgsqlRange[] + { + new(new(2020, 1, 1, 0, 0, 0, kind: DateTimeKind.Utc), true, false, new(2020, 1, 5, 0, 0, 0, kind: DateTimeKind.Utc), false, false), + new(new(2020, 1, 10, 0, 0, 0, kind: DateTimeKind.Utc), true, false, default, false, true) + }, + """{["2020-01-01 01:00:00+01","2020-01-05 01:00:00+01"),["2020-01-10 01:00:00+01",)}""", "tstzmultirange", NpgsqlDbType.TimestampTzMultirange, true, true, default(NpgsqlRange)) + .SetName("DateTime TimestampTzMultirange"), #if NET6_0_OR_GREATER - var multirangeDateOnlyArray = reader.GetFieldValue[]>(0); - Assert.That(multirangeDateOnlyArray.Length, Is.EqualTo(2)); - Assert.That(multirangeDateOnlyArray[0], Is.EqualTo(new NpgsqlRange(new(2020, 1, 1), true, false, new(2020, 1, 5), false, false))); - Assert.That(multirangeDateOnlyArray[1], Is.EqualTo(new NpgsqlRange(new(2020, 1, 11), true, false, default, false, true))); - - var multirangeDateOnlyList = reader.GetFieldValue>>(0); - Assert.That(multirangeDateOnlyList.Count, Is.EqualTo(2)); - Assert.That(multirangeDateOnlyList[0], Is.EqualTo(new NpgsqlRange(new(2020, 1, 1), true, false, new(2020, 1, 5), false, false))); - Assert.That(multirangeDateOnlyList[1], Is.EqualTo(new NpgsqlRange(new(2020, 1, 11), true, false, default, false, true))); + new TestCaseData( + new NpgsqlRange[] + { + new(new(2020, 1, 1), true, false, new(2020, 1, 5), false, false), + new(new(2020, 1, 10), true, false, default, false, true) + }, + "{[2020-01-01,2020-01-05),[2020-01-10,)}", "datemultirange", NpgsqlDbType.DateMultirange, false, false, default(NpgsqlRange)) + .SetName("DateOnly"), #endif - } + }; + + [Test, TestCaseSource(nameof(MultirangeTestCases))] + public Task Multirange_as_array( + T multirangeAsArray, string sqlLiteral, string pgTypeName, NpgsqlDbType? npgsqlDbType, bool isDefaultForReading, bool isDefaultForWriting, TRange _) + => AssertType(multirangeAsArray, sqlLiteral, pgTypeName, npgsqlDbType, isDefaultForReading: isDefaultForReading, + isDefaultForWriting: isDefaultForWriting); + + [Test, TestCaseSource(nameof(MultirangeTestCases))] + public Task Multirange_as_list( + T multirangeAsArray, string sqlLiteral, string pgTypeName, NpgsqlDbType? npgsqlDbType, bool isDefaultForReading, bool isDefaultForWriting, TRange _) + where T : IList + => AssertType( + new List(multirangeAsArray), + sqlLiteral, pgTypeName, npgsqlDbType, isDefaultForReading: false, isDefaultForWriting: isDefaultForWriting); -#if NET6_0_OR_GREATER [Test] - public async Task Write_Datemultirange_DateOnly() + [NonParallelizable] + public async Task Unmapped_multirange_with_mapped_subtype() { - var multirangeArray = new NpgsqlRange[] - { - new(new(2020, 1, 1), true, false, new(2020, 1, 5), false, false), - new(new(2020, 1, 10), false, false, default, false, true) - }; - - var multirangeList = new List>(multirangeArray); - - await using var conn = await OpenConnectionAsync(); - await using var cmd = new NpgsqlCommand("SELECT $1::text", conn); - - await WriteInternal(multirangeArray); - await WriteInternal(multirangeList); + await using var dataSource = CreateDataSource(csb => csb.MaxPoolSize = 1); + await using var conn = await dataSource.OpenConnectionAsync(); + + var typeName = await GetTempTypeName(conn); + await conn.ExecuteNonQueryAsync($"CREATE TYPE {typeName} AS RANGE(subtype=text)"); + await Task.Yield(); // TODO: fix multiplexing deadlock bug + conn.ReloadTypes(); + Assert.That(await conn.ExecuteScalarAsync("SELECT 1"), Is.EqualTo(1)); + + var value = new[] {new NpgsqlRange( + new string('a', conn.Settings.WriteBufferSize + 10).ToCharArray(), + new string('z', conn.Settings.WriteBufferSize + 10).ToCharArray() + )}; + + await using var cmd = new NpgsqlCommand("SELECT @p", conn); + cmd.Parameters.Add(new NpgsqlParameter { DataTypeName = typeName + "_multirange", ParameterName = "p", Value = value }); + await using var reader = await cmd.ExecuteReaderAsync(CommandBehavior.SequentialAccess); + await reader.ReadAsync(); - async Task WriteInternal(IList> multirange) - { - conn.ReloadTypes(); - cmd.Parameters.Add(new() { Value = multirange }); - Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo("{[2020-01-01,2020-01-05),[2020-01-11,)}")); - - conn.ReloadTypes(); - cmd.Parameters[0] = new() { Value = multirange, NpgsqlDbType = NpgsqlDbType.DateMultirange }; - Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo("{[2020-01-01,2020-01-05),[2020-01-11,)}")); - - conn.ReloadTypes(); - cmd.Parameters[0] = new() { Value = multirange, DataTypeName = "datemultirange" }; - Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo("{[2020-01-01,2020-01-05),[2020-01-11,)}")); - } + Assert.That(reader.GetFieldType(0), Is.EqualTo(typeof(NpgsqlRange[]))); + var result = reader.GetFieldValue[]>(0); + Assert.That(result, Is.EqualTo(value).Using[]>((actual, expected) => + actual[0].LowerBound!.SequenceEqual(expected[0].LowerBound!) && actual[0].UpperBound!.SequenceEqual(expected[0].UpperBound!))); } -#endif - - [Test] - public async Task Write_Datemultirange_DateTime() - { - var multirangeArray = new NpgsqlRange[] - { - new(new(2020, 1, 1), true, false, new(2020, 1, 5), false, false), - new(new(2020, 1, 10), false, false, default, false, true) - }; - var multirangeList = new List>(multirangeArray); - - await using var conn = await OpenConnectionAsync(); - await using var cmd = new NpgsqlCommand("SELECT $1::text", conn); + protected override NpgsqlDataSource DataSource { get; } - await WriteInternal(multirangeArray); - await WriteInternal(multirangeList); - - async Task WriteInternal(IList> multirange) + public MultirangeTests() => DataSource = CreateDataSource(builder => { - conn.ReloadTypes(); - cmd.Parameters.Add(new() { Value = multirange, NpgsqlDbType = NpgsqlDbType.DateMultirange }); - Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo("{[2020-01-01,2020-01-05),[2020-01-11,)}")); - - conn.ReloadTypes(); - cmd.Parameters[0] = new() { Value = multirange, DataTypeName = "datemultirange" }; - Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo("{[2020-01-01,2020-01-05),[2020-01-11,)}")); - } - } + builder.ConnectionStringBuilder.Timezone = "Europe/Berlin"; + }); [OneTimeSetUp] public async Task Setup() @@ -199,7 +142,4 @@ public async Task Setup() await using var conn = await OpenConnectionAsync(); MinimumPgVersion(conn, "14.0", "Multirange types were introduced in PostgreSQL 14"); } - - protected override NpgsqlConnection OpenConnection() - => throw new NotSupportedException(); } diff --git a/test/Npgsql.Tests/Types/NetworkTypeTests.cs b/test/Npgsql.Tests/Types/NetworkTypeTests.cs index 63d456b9cd..994fdd45e4 100644 --- a/test/Npgsql.Tests/Types/NetworkTypeTests.cs +++ b/test/Npgsql.Tests/Types/NetworkTypeTests.cs @@ -17,46 +17,45 @@ class NetworkTypeTests : MultiplexingTestBase { [Test] public Task Inet_v4_as_IPAddress() - => AssertType(IPAddress.Parse("192.168.1.1"), "192.168.1.1/32", "inet", NpgsqlDbType.Inet); + => AssertType(IPAddress.Parse("192.168.1.1"), "192.168.1.1/32", "inet", NpgsqlDbType.Inet, skipArrayCheck: true); [Test] - public Task Inet_v6_as_IPAddress() + public Task Inet_v4_array_as_IPAddress_array() => AssertType( - IPAddress.Parse("2001:1db8:85a3:1142:1000:8a2e:1370:7334"), - "2001:1db8:85a3:1142:1000:8a2e:1370:7334/128", - "inet", - NpgsqlDbType.Inet); + new[] + { + IPAddress.Parse("192.168.1.1"), + IPAddress.Parse("192.168.1.2") + }, + "{192.168.1.1,192.168.1.2}", "inet[]", NpgsqlDbType.Inet | NpgsqlDbType.Array); [Test] - public Task Inet_v4_as_tuple() - => AssertType((IPAddress.Parse("192.168.1.1"), 24), "192.168.1.1/24", "inet", NpgsqlDbType.Inet, isDefaultForReading: false); - - [Test] - public Task Inet_v6_as_tuple() + public Task Inet_v6_as_IPAddress() => AssertType( - (IPAddress.Parse("2001:1db8:85a3:1142:1000:8a2e:1370:7334"), 24), - "2001:1db8:85a3:1142:1000:8a2e:1370:7334/24", + IPAddress.Parse("2001:1db8:85a3:1142:1000:8a2e:1370:7334"), + "2001:1db8:85a3:1142:1000:8a2e:1370:7334/128", "inet", NpgsqlDbType.Inet, - isDefaultForReading: false); + skipArrayCheck: true); [Test] - public Task Inet_v6_array_as_tuple() + public Task Inet_v6_array_as_IPAddress_array() => AssertType( - new[] { (IPAddress.Parse("2001:1db8:85a3:1142:1000:8a2e:1370:7334"), 24) }, - "{2001:1db8:85a3:1142:1000:8a2e:1370:7334/24}", - "inet[]", - NpgsqlDbType.Inet | NpgsqlDbType.Array, - isDefaultForReading: false); + new[] + { + IPAddress.Parse("2001:1db8:85a3:1142:1000:8a2e:1370:7334"), + IPAddress.Parse("2001:1db8:85a3:1142:1000:8a2e:1370:7335") + }, + "{2001:1db8:85a3:1142:1000:8a2e:1370:7334,2001:1db8:85a3:1142:1000:8a2e:1370:7335}", "inet[]", NpgsqlDbType.Inet | NpgsqlDbType.Array); [Test, IssueLink("https://github.com/dotnet/corefx/issues/33373")] public Task IPAddress_Any() - => AssertTypeWrite(IPAddress.Any, "0.0.0.0/32", "inet", NpgsqlDbType.Inet); + => AssertTypeWrite(IPAddress.Any, "0.0.0.0/32", "inet", NpgsqlDbType.Inet, skipArrayCheck: true); [Test] public Task Cidr() => AssertType( - (Address: IPAddress.Parse("192.168.1.0"), Subnet: 24), + new NpgsqlCidr(IPAddress.Parse("192.168.1.0"), netmask: 24), "192.168.1.0/24", "cidr", NpgsqlDbType.Cidr, @@ -129,10 +128,7 @@ public async Task Macaddr_write_validation() if (conn.PostgreSqlVersion < new Version(10, 0)) Assert.Ignore("macaddr8 only supported on PostgreSQL 10 and above"); - var exception = await AssertTypeUnsupportedWrite( - PhysicalAddress.Parse("08-00-2B-01-02-03-04-05"), "macaddr"); - - Assert.That(exception.Message, Does.StartWith("22P03:").And.Contain("1")); + await AssertTypeUnsupportedWrite(PhysicalAddress.Parse("08-00-2B-01-02-03-04-05"), "macaddr"); } public NetworkTypeTests(MultiplexingMode multiplexingMode) : base(multiplexingMode) {} diff --git a/test/Npgsql.Tests/Types/NumericTypeTests.cs b/test/Npgsql.Tests/Types/NumericTypeTests.cs index 9c5c13c027..78dc2f7fa7 100644 --- a/test/Npgsql.Tests/Types/NumericTypeTests.cs +++ b/test/Npgsql.Tests/Types/NumericTypeTests.cs @@ -1,11 +1,9 @@ using System; -using System.Collections.Generic; using System.Data; using System.Globalization; using System.Threading.Tasks; using NpgsqlTypes; using NUnit.Framework; -using NUnit.Framework.Internal; using static Npgsql.Tests.TestUtil; namespace Npgsql.Tests.Types; @@ -110,4 +108,4 @@ public Task Read_overflow(T _, double value, string pgTypeName) => AssertTypeUnsupportedRead(value.ToString(CultureInfo.InvariantCulture), pgTypeName); public NumericTypeTests(MultiplexingMode multiplexingMode) : base(multiplexingMode) {} -} \ No newline at end of file +} diff --git a/test/Npgsql.Tests/Types/RangeTests.cs b/test/Npgsql.Tests/Types/RangeTests.cs index 33a1365ec0..db57f0d78d 100644 --- a/test/Npgsql.Tests/Types/RangeTests.cs +++ b/test/Npgsql.Tests/Types/RangeTests.cs @@ -2,122 +2,69 @@ using System.ComponentModel; using System.Data; using System.Globalization; +using System.Linq; using System.Threading.Tasks; +using Npgsql.Properties; using Npgsql.Util; using NpgsqlTypes; using NUnit.Framework; - using static Npgsql.Tests.TestUtil; namespace Npgsql.Tests.Types; -/// -/// https://www.postgresql.org/docs/current/static/rangetypes.html -/// class RangeTests : MultiplexingTestBase { - [Test, NUnit.Framework.Description("Resolves a range type handler via the different pathways")] - public async Task Range_resolution() + static readonly TestCaseData[] RangeTestCases = { - if (IsMultiplexing) - Assert.Ignore("Multiplexing, ReloadTypes"); - - await using var dataSource = CreateDataSource(csb => csb.Pooling = false); - await using var conn = await OpenConnectionAsync(); - - // Resolve type by NpgsqlDbType - using (var cmd = new NpgsqlCommand("SELECT @p", conn)) - { - cmd.Parameters.AddWithValue("p", NpgsqlDbType.Range | NpgsqlDbType.Integer, DBNull.Value); - using (var reader = await cmd.ExecuteReaderAsync()) - { - reader.Read(); - Assert.That(reader.GetDataTypeName(0), Is.EqualTo("int4range")); - } - } - - // Resolve type by ClrType (type inference) - conn.ReloadTypes(); - using (var cmd = new NpgsqlCommand("SELECT @p", conn)) - { - cmd.Parameters.Add(new NpgsqlParameter { ParameterName = "p", Value = new NpgsqlRange(3, 5) }); - using (var reader = await cmd.ExecuteReaderAsync()) - { - reader.Read(); - Assert.That(reader.GetDataTypeName(0), Is.EqualTo("int4range")); - } - } - - // Resolve type by DataTypeName - conn.ReloadTypes(); - using (var cmd = new NpgsqlCommand("SELECT @p", conn)) - { - cmd.Parameters.Add(new NpgsqlParameter { ParameterName="p", DataTypeName = "int4range", Value = DBNull.Value }); - using (var reader = await cmd.ExecuteReaderAsync()) - { - reader.Read(); - Assert.That(reader.GetDataTypeName(0), Is.EqualTo("int4range")); - } - } - - // Resolve type by OID (read) - conn.ReloadTypes(); - using (var cmd = new NpgsqlCommand("SELECT int4range(3, 5)", conn)) - using (var reader = await cmd.ExecuteReaderAsync()) - { - reader.Read(); - Assert.That(reader.GetDataTypeName(0), Is.EqualTo("int4range")); - Assert.That(reader.GetFieldValue>(0), Is.EqualTo(new NpgsqlRange(3, true, 5, false))); - } - } - - [Test] - public async Task Range() - { - using var conn = await OpenConnectionAsync(); - using var cmd = new NpgsqlCommand("SELECT @p1, @p2, @p3, @p4", conn); - var p1 = new NpgsqlParameter("p1", NpgsqlDbType.Range | NpgsqlDbType.Integer) { Value = NpgsqlRange.Empty }; - var p2 = new NpgsqlParameter { ParameterName = "p2", Value = new NpgsqlRange(1, 10) }; - var p3 = new NpgsqlParameter { ParameterName = "p3", Value = new NpgsqlRange(1, false, 10, false) }; - var p4 = new NpgsqlParameter { ParameterName = "p4", Value = new NpgsqlRange(0, false, true, 10, false, false) }; - Assert.That(p2.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Range | NpgsqlDbType.Integer)); - cmd.Parameters.Add(p1); - cmd.Parameters.Add(p2); - cmd.Parameters.Add(p3); - cmd.Parameters.Add(p4); - using var reader = await cmd.ExecuteReaderAsync(); - reader.Read(); - - Assert.That(reader[0].ToString(), Is.EqualTo("empty")); - Assert.That(reader[1].ToString(), Is.EqualTo("[1,11)")); - Assert.That(reader[2].ToString(), Is.EqualTo("[2,10)")); - Assert.That(reader[3].ToString(), Is.EqualTo("(,10)")); - } - - [Test] - [NonParallelizable] - public async Task Range_with_long_subtype() - { - await using var dataSource = CreateDataSource(csb => csb.MaxPoolSize = 1); - await using var conn = await dataSource.OpenConnectionAsync(); - - var typeName = await GetTempTypeName(conn); - await conn.ExecuteNonQueryAsync($"CREATE TYPE {typeName} AS RANGE(subtype=text)"); - await Task.Yield(); // TODO: fix multiplexing deadlock bug - conn.ReloadTypes(); - Assert.That(await conn.ExecuteScalarAsync("SELECT 1"), Is.EqualTo(1)); - - var value = new NpgsqlRange( - new string('a', conn.Settings.WriteBufferSize + 10), - new string('z', conn.Settings.WriteBufferSize + 10) - ); - - await using var cmd = new NpgsqlCommand("SELECT @p", conn); - cmd.Parameters.Add(new NpgsqlParameter("p", NpgsqlDbType.Range | NpgsqlDbType.Text) { Value = value }); - await using var reader = await cmd.ExecuteReaderAsync(CommandBehavior.SequentialAccess); - await reader.ReadAsync(); - Assert.That(reader[0], Is.EqualTo(value)); - } + new TestCaseData(new NpgsqlRange(1, true, 10, false), "[1,10)", "int4range", NpgsqlDbType.IntegerRange) + .SetName("IntegerRange"), + new TestCaseData(new NpgsqlRange(1, true, 10, false), "[1,10)", "int8range", NpgsqlDbType.BigIntRange) + .SetName("BigIntRange"), + new TestCaseData(new NpgsqlRange(1, true, 10, false), "[1,10)", "numrange", NpgsqlDbType.NumericRange) + .SetName("NumericRange"), + new TestCaseData(new NpgsqlRange( + new DateTime(2020, 1, 1, 12, 0, 0), true, + new DateTime(2020, 1, 3, 13, 0, 0), false), + """["2020-01-01 12:00:00","2020-01-03 13:00:00")""", "tsrange", NpgsqlDbType.TimestampRange) + .SetName("TimestampRange"), + // Note that the below text representations are local (according to TimeZone, which is set to Europe/Berlin in this test class), + // because that's how PG does timestamptz *text* representation. + new TestCaseData(new NpgsqlRange( + new DateTime(2020, 1, 1, 12, 0, 0, DateTimeKind.Utc), true, + new DateTime(2020, 1, 3, 13, 0, 0, DateTimeKind.Utc), false), + """["2020-01-01 13:00:00+01","2020-01-03 14:00:00+01")""", "tstzrange", NpgsqlDbType.TimestampTzRange) + .SetName("TimestampTzRange"), + + // Note that numrange is a non-discrete range, and therefore doesn't undergo normalization to inclusive/exclusive in PG + new TestCaseData(NpgsqlRange.Empty, "empty", "numrange", NpgsqlDbType.NumericRange) + .SetName("EmptyRange"), + new TestCaseData(new NpgsqlRange(1, true, 10, true), "[1,10]", "numrange", NpgsqlDbType.NumericRange) + .SetName("Inclusive"), + new TestCaseData(new NpgsqlRange(1, false, 10, false), "(1,10)", "numrange", NpgsqlDbType.NumericRange) + .SetName("Exclusive"), + new TestCaseData(new NpgsqlRange(1, true, 10, false), "[1,10)", "numrange", NpgsqlDbType.NumericRange) + .SetName("InclusiveExclusive"), + new TestCaseData(new NpgsqlRange(1, false, 10, true), "(1,10]", "numrange", NpgsqlDbType.NumericRange) + .SetName("ExclusiveInclusive"), + new TestCaseData(new NpgsqlRange(1, false, true, 10, false, false), "(,10)", "numrange", NpgsqlDbType.NumericRange) + .SetName("InfiniteLowerBound"), + new TestCaseData(new NpgsqlRange(1, true, false, 10, false, true), "[1,)", "numrange", NpgsqlDbType.NumericRange) + .SetName("InfiniteUpperBound") + }; + + // See more test cases in DateTimeTests + [Test, TestCaseSource(nameof(RangeTestCases))] + public Task Range(T range, string sqlLiteral, string pgTypeName, NpgsqlDbType? npgsqlDbType) + => AssertType(range, sqlLiteral, pgTypeName, npgsqlDbType, + // NpgsqlRange[] is mapped to multirange by default, not array, so the built-in AssertType testing for arrays fails + // (see below) + skipArrayCheck: true); + + // This re-executes the same scenario as above, but with isDefaultForWriting: false and without skipArrayCheck: true. + // This tests coverage of range arrays (as opposed to multiranges). + [Test, TestCaseSource(nameof(RangeTestCases))] + public Task Range_array(T range, string sqlLiteral, string pgTypeName, NpgsqlDbType? npgsqlDbType) + => AssertType(range, sqlLiteral, pgTypeName, npgsqlDbType, isDefaultForWriting: false); [Test] public void Equality_finite() @@ -217,6 +164,35 @@ public async Task TimestampTz_range_with_DateTimeOffset() Assert.That(actual, Is.EqualTo(range)); } + [Test] + [NonParallelizable] + public async Task Unmapped_range_with_mapped_subtype() + { + await using var dataSource = CreateDataSource(csb => csb.MaxPoolSize = 1); + await using var conn = await dataSource.OpenConnectionAsync(); + + var typeName = await GetTempTypeName(conn); + await conn.ExecuteNonQueryAsync($"CREATE TYPE {typeName} AS RANGE(subtype=text)"); + await Task.Yield(); // TODO: fix multiplexing deadlock bug + conn.ReloadTypes(); + Assert.That(await conn.ExecuteScalarAsync("SELECT 1"), Is.EqualTo(1)); + + var value = new NpgsqlRange( + new string('a', conn.Settings.WriteBufferSize + 10).ToCharArray(), + new string('z', conn.Settings.WriteBufferSize + 10).ToCharArray() + ); + + await using var cmd = new NpgsqlCommand("SELECT @p", conn); + cmd.Parameters.Add(new NpgsqlParameter { DataTypeName = typeName, ParameterName = "p", Value = value }); + await using var reader = await cmd.ExecuteReaderAsync(CommandBehavior.SequentialAccess); + await reader.ReadAsync(); + + Assert.That(reader.GetFieldType(0), Is.EqualTo(typeof(NpgsqlRange))); + var result = reader.GetFieldValue>(0); + Assert.That(result, Is.EqualTo(value).Using>((actual, expected) => + actual.LowerBound!.SequenceEqual(expected.LowerBound!) && actual.UpperBound!.SequenceEqual(expected.UpperBound!))); + } + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/4441")] public async Task Array_of_range() { @@ -240,13 +216,41 @@ await AssertType( new(3, lowerBoundIsInclusive: true, 4, upperBoundIsInclusive: false), new(5, lowerBoundIsInclusive: true, 6, upperBoundIsInclusive: false) }, - @"{""[3,4)"",""[5,6)""}", + """{"[3,4)","[5,6)"}""", "int4range[]", NpgsqlDbType.IntegerRange | NpgsqlDbType.Array, isDefaultForWriting: !supportsMultirange, isNpgsqlDbTypeInferredFromClrType: false); } + [Test] + public async Task Ranges_not_supported_by_default_on_NpgsqlSlimSourceBuilder() + { + var errorMessage = string.Format( + NpgsqlStrings.RangesNotEnabled, nameof(NpgsqlSlimDataSourceBuilder.EnableRanges), nameof(NpgsqlSlimDataSourceBuilder)); + + var dataSourceBuilder = new NpgsqlSlimDataSourceBuilder(ConnectionString); + await using var dataSource = dataSourceBuilder.Build(); + + var exception = await AssertTypeUnsupportedRead>("[1,10)", "int4range", dataSource); + Assert.That(exception.InnerException!.Message, Is.EqualTo(errorMessage)); + exception = await AssertTypeUnsupportedWrite>( + new NpgsqlRange(1, true, 10, false), "int4range", dataSource); + Assert.That(exception.InnerException!.Message, Is.EqualTo(errorMessage)); + } + + [Test] + public async Task NpgsqlSlimSourceBuilder_EnableRanges() + { + var dataSourceBuilder = new NpgsqlSlimDataSourceBuilder(ConnectionString); + dataSourceBuilder.EnableRanges(); + await using var dataSource = dataSourceBuilder.Build(); + + await AssertType( + dataSource, + new NpgsqlRange(1, true, 10, false), "[1,10)", "int4range", NpgsqlDbType.IntegerRange, skipArrayCheck: true); + } + protected override NpgsqlConnection OpenConnection() => throw new NotSupportedException(); @@ -434,5 +438,11 @@ public override object ConvertFrom(ITypeDescriptorContext? context, CultureInfo? #endregion - public RangeTests(MultiplexingMode multiplexingMode) : base(multiplexingMode) {} + protected override NpgsqlDataSource DataSource { get; } + + public RangeTests(MultiplexingMode multiplexingMode) : base(multiplexingMode) + => DataSource = CreateDataSource(builder => + { + builder.ConnectionStringBuilder.Timezone = "Europe/Berlin"; + }); } diff --git a/test/Npgsql.Tests/Types/RecordTests.cs b/test/Npgsql.Tests/Types/RecordTests.cs new file mode 100644 index 0000000000..54a56baa4a --- /dev/null +++ b/test/Npgsql.Tests/Types/RecordTests.cs @@ -0,0 +1,109 @@ +using System; +using System.Threading.Tasks; +using Npgsql.Properties; +using NUnit.Framework; +using NUnit.Framework.Constraints; + +namespace Npgsql.Tests.Types; + +public class RecordTests : MultiplexingTestBase +{ + [Test] + [IssueLink("https://github.com/npgsql/npgsql/issues/724")] + [IssueLink("https://github.com/npgsql/npgsql/issues/1980")] + public async Task Read_Record_as_object_array() + { + var recordLiteral = "(1,'foo'::text)::record"; + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand($"SELECT {recordLiteral}, ARRAY[{recordLiteral}, {recordLiteral}]", conn); + await using var reader = await cmd.ExecuteReaderAsync(); + reader.Read(); + + var record = (object[])reader[0]; + Assert.That(record[0], Is.EqualTo(1)); + Assert.That(record[1], Is.EqualTo("foo")); + + var array = (object[][])reader[1]; + Assert.That(array.Length, Is.EqualTo(2)); + Assert.That(array[0][0], Is.EqualTo(1)); + Assert.That(array[1][0], Is.EqualTo(1)); + } + + [Test] + public async Task Read_Record_as_ValueTuple() + { + var recordLiteral = "(1,'foo'::text)::record"; + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand($"SELECT {recordLiteral}, ARRAY[{recordLiteral}, {recordLiteral}]", conn); + await using var reader = await cmd.ExecuteReaderAsync(); + reader.Read(); + + var record = reader.GetFieldValue<(int, string)>(0); + Assert.That(record.Item1, Is.EqualTo(1)); + Assert.That(record.Item2, Is.EqualTo("foo")); + + var array = (object[][])reader[1]; + Assert.That(array.Length, Is.EqualTo(2)); + Assert.That(array[0][0], Is.EqualTo(1)); + Assert.That(array[1][0], Is.EqualTo(1)); + } + + [Test] + public async Task Read_Record_as_Tuple() + { + var recordLiteral = "(1,'foo'::text)::record"; + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand($"SELECT {recordLiteral}, ARRAY[{recordLiteral}, {recordLiteral}]", conn); + await using var reader = await cmd.ExecuteReaderAsync(); + reader.Read(); + + var record = reader.GetFieldValue>(0); + Assert.That(record.Item1, Is.EqualTo(1)); + Assert.That(record.Item2, Is.EqualTo("foo")); + + var array = (object[][])reader[1]; + Assert.That(array.Length, Is.EqualTo(2)); + Assert.That(array[0][0], Is.EqualTo(1)); + Assert.That(array[1][0], Is.EqualTo(1)); + } + + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/1238")] + public async Task Record_with_non_int_field() + { + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT ('one'::TEXT, 2)", conn); + await using var reader = await cmd.ExecuteReaderAsync(); + reader.Read(); + var record = reader.GetFieldValue(0); + Assert.That(record[0], Is.EqualTo("one")); + Assert.That(record[1], Is.EqualTo(2)); + } + + [Test] + public async Task Records_supported_only_with_EnableRecords([Values] bool withMappings) + { + Func assertExpr = () => withMappings + ? Throws.Nothing + : Throws.Exception + .TypeOf() + .With.Property("InnerException").Property("Message") + .EqualTo(string.Format(NpgsqlStrings.RecordsNotEnabled, "EnableRecords", "NpgsqlSlimDataSourceBuilder")); + + var dataSourceBuilder = new NpgsqlSlimDataSourceBuilder(ConnectionString); + if (withMappings) + dataSourceBuilder.EnableRecords(); + await using var dataSource = dataSourceBuilder.Build(); + await using var conn = await dataSource.OpenConnectionAsync(); + await using var cmd = conn.CreateCommand(); + + // RecordHandler doesn't support writing, so we only check for reading + cmd.CommandText = "SELECT ('one'::text, 2)"; + await using var reader = await cmd.ExecuteReaderAsync(); + await reader.ReadAsync(); + + Assert.That(() => reader.GetValue(0), assertExpr()); + Assert.That(() => reader.GetFieldValue(0), assertExpr()); + } + + public RecordTests(MultiplexingMode multiplexingMode) : base(multiplexingMode) {} +} diff --git a/test/Npgsql.Tests/Types/TextTests.cs b/test/Npgsql.Tests/Types/TextTests.cs index aa2e7d69a3..c4583151b4 100644 --- a/test/Npgsql.Tests/Types/TextTests.cs +++ b/test/Npgsql.Tests/Types/TextTests.cs @@ -79,6 +79,7 @@ public async Task Truncate() Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo(data2.Substring(0, 4))); // NpgsqlParameter.Size larger than the value size should mean the value size, as well as 0 and -1 + p.Value = data2; p.Size = data2.Length + 10; Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo(data2)); p.Size = 0; diff --git a/test/Npgsql.Tests/TypesTests.cs b/test/Npgsql.Tests/TypesTests.cs index 5dbfa844f3..de2b1beed0 100644 --- a/test/Npgsql.Tests/TypesTests.cs +++ b/test/Npgsql.Tests/TypesTests.cs @@ -1,8 +1,5 @@ using System; -using System.Diagnostics; -using System.Globalization; using System.Net; -using Npgsql.Util; using NpgsqlTypes; using NUnit.Framework; @@ -208,10 +205,6 @@ public void NpgsqlInet() { var v = new NpgsqlInet(IPAddress.Parse("2001:1db8:85a3:1142:1000:8a2e:1370:7334"), 32); Assert.That(v.ToString(), Is.EqualTo("2001:1db8:85a3:1142:1000:8a2e:1370:7334/32")); - -#pragma warning disable CS8625 - Assert.That(v != null); // #776 -#pragma warning disable CS8625 } #pragma warning restore 618 } diff --git a/test/Npgsql.Tests/WriteBufferTests.cs b/test/Npgsql.Tests/WriteBufferTests.cs index 19603b1741..5bd6cdf5a1 100644 --- a/test/Npgsql.Tests/WriteBufferTests.cs +++ b/test/Npgsql.Tests/WriteBufferTests.cs @@ -1,6 +1,5 @@ using System.IO; using Npgsql.Internal; -using Npgsql.Util; using NUnit.Framework; namespace Npgsql.Tests; @@ -8,6 +7,16 @@ namespace Npgsql.Tests; [FixtureLifeCycle(LifeCycle.InstancePerTestCase)] // Parallel access to a single buffer class WriteBufferTests { + [Test] + public void GetWriter_Full_Buffer() + { + WriteBuffer.WritePosition += WriteBuffer.WriteSpaceLeft; + var writer = WriteBuffer.GetWriter(null!, FlushMode.Blocking); + Assert.That(writer.ShouldFlush(sizeof(byte)), Is.True); + writer.Flush(); + Assert.That(writer.ShouldFlush(sizeof(byte)), Is.False); + } + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/1275")] public void Write_zero_characters() { @@ -88,7 +97,7 @@ public void Chunked_char_array_encoding_fits_with_surrogates() public void SetUp() { Underlying = new MemoryStream(); - WriteBuffer = new NpgsqlWriteBuffer(null, Underlying, null, NpgsqlReadBuffer.DefaultSize, PGUtil.UTF8Encoding); + WriteBuffer = new NpgsqlWriteBuffer(null, Underlying, null, NpgsqlReadBuffer.DefaultSize, NpgsqlWriteBuffer.UTF8Encoding); } #pragma warning restore CS8625 From 78ee2c2a7a8e2cd2714cf59dc5b37ccb0d8d80b5 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Mon, 25 Sep 2023 23:51:36 +0200 Subject: [PATCH 211/761] Hacky exposing of enum type mappings for EFCore.PG (#5278) --- src/Npgsql/Internal/HackyEnumTypeMapping.cs | 28 +++++++++++++++++++++ src/Npgsql/NpgsqlDataSource.cs | 4 +++ src/Npgsql/NpgsqlDataSourceConfiguration.cs | 1 + src/Npgsql/NpgsqlSlimDataSourceBuilder.cs | 16 ++++++++++++ src/Npgsql/TypeMapping/GlobalTypeMapper.cs | 17 ++++++++++++- src/Npgsql/TypeMapping/UserTypeMapper.cs | 13 ++++++++-- 6 files changed, 76 insertions(+), 3 deletions(-) create mode 100644 src/Npgsql/Internal/HackyEnumTypeMapping.cs diff --git a/src/Npgsql/Internal/HackyEnumTypeMapping.cs b/src/Npgsql/Internal/HackyEnumTypeMapping.cs new file mode 100644 index 0000000000..de50c40fad --- /dev/null +++ b/src/Npgsql/Internal/HackyEnumTypeMapping.cs @@ -0,0 +1,28 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using System.Reflection; +using Npgsql.Internal; +using Npgsql.PostgresTypes; +using NpgsqlTypes; + +namespace Npgsql.Internal; + +#pragma warning disable CS1591 // Missing XML comment for publicly visible type or member + +/// +/// Hacky temporary measure used by EFCore.PG to extract user-configured enum mappings. Accessed via reflection only. +/// +public sealed class HackyEnumTypeMapping +{ + public HackyEnumTypeMapping(Type enumClrType, string pgTypeName, INpgsqlNameTranslator nameTranslator) + { + EnumClrType = enumClrType; + PgTypeName = pgTypeName; + NameTranslator = nameTranslator; + } + + public string PgTypeName { get; } + public Type EnumClrType { get; } + public INpgsqlNameTranslator NameTranslator { get; } +} diff --git a/src/Npgsql/NpgsqlDataSource.cs b/src/Npgsql/NpgsqlDataSource.cs index ee3ec18eb5..8aa07ba0ca 100644 --- a/src/Npgsql/NpgsqlDataSource.cs +++ b/src/Npgsql/NpgsqlDataSource.cs @@ -3,6 +3,7 @@ using System.Data.Common; using System.Diagnostics; using System.Diagnostics.CodeAnalysis; +using System.Linq; using System.Net.Security; using System.Security.Cryptography.X509Certificates; using System.Threading; @@ -79,6 +80,8 @@ private protected readonly Dictionary> _pendi readonly INpgsqlNameTranslator _defaultNameTranslator; + internal List? _hackyEnumTypeMappings; + internal NpgsqlDataSource( NpgsqlConnectionStringBuilder settings, NpgsqlDataSourceConfiguration dataSourceConfig) @@ -99,6 +102,7 @@ internal NpgsqlDataSource( _periodicPasswordSuccessRefreshInterval, _periodicPasswordFailureRefreshInterval, var resolverChain, + _hackyEnumTypeMappings, _defaultNameTranslator, ConnectionInitializer, ConnectionInitializerAsync) diff --git a/src/Npgsql/NpgsqlDataSourceConfiguration.cs b/src/Npgsql/NpgsqlDataSourceConfiguration.cs index 749ab7df7b..dac604b6dc 100644 --- a/src/Npgsql/NpgsqlDataSourceConfiguration.cs +++ b/src/Npgsql/NpgsqlDataSourceConfiguration.cs @@ -18,6 +18,7 @@ sealed record NpgsqlDataSourceConfiguration( TimeSpan PeriodicPasswordSuccessRefreshInterval, TimeSpan PeriodicPasswordFailureRefreshInterval, IEnumerable ResolverChain, + List HackyEnumMappings, INpgsqlNameTranslator DefaultNameTranslator, Action? ConnectionInitializer, Func? ConnectionInitializerAsync); diff --git a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs index c074e29dec..b1b7ec69cb 100644 --- a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs @@ -517,6 +517,7 @@ _loggerFactory is null _periodicPasswordSuccessRefreshInterval, _periodicPasswordFailureRefreshInterval, Resolvers(), + HackyEnumMappings(), DefaultNameTranslator, _syncConnectionInitializer, _asyncConnectionInitializer); @@ -535,6 +536,21 @@ IEnumerable Resolvers() return resolvers; } + + List HackyEnumMappings() + { + var mappings = new List(); + + if (_userTypeMapper.Items.Count > 0) + foreach (var userTypeMapping in _userTypeMapper.Items) + if (userTypeMapping is UserTypeMapper.EnumMapping enumMapping) + mappings.Add(new(enumMapping.ClrType, enumMapping.PgTypeName, enumMapping.NameTranslator)); + + if (GlobalTypeMapper.Instance.HackyEnumTypeMappings.Count > 0) + mappings.AddRange(GlobalTypeMapper.Instance.HackyEnumTypeMappings); + + return mappings; + } } void ValidateMultiHost() diff --git a/src/Npgsql/TypeMapping/GlobalTypeMapper.cs b/src/Npgsql/TypeMapping/GlobalTypeMapper.cs index fdaa340bd8..719ee48ccf 100644 --- a/src/Npgsql/TypeMapping/GlobalTypeMapper.cs +++ b/src/Npgsql/TypeMapping/GlobalTypeMapper.cs @@ -1,6 +1,7 @@ using System; using System.Collections.Generic; using System.Diagnostics.CodeAnalysis; +using System.Linq; using System.Threading; using Npgsql.Internal; using Npgsql.Internal.Postgres; @@ -15,6 +16,8 @@ sealed class GlobalTypeMapper : INpgsqlTypeMapper readonly ReaderWriterLockSlim _lock = new(); IPgTypeInfoResolver[] _typeMappingResolvers = Array.Empty(); + internal List HackyEnumTypeMappings { get; } = new(); + internal IEnumerable GetPluginResolvers() { var resolvers = new List(); @@ -154,6 +157,7 @@ public void Reset() { _pluginResolvers.Clear(); _userTypeMapper.Items.Clear(); + HackyEnumTypeMappings.Clear(); } finally { @@ -175,6 +179,11 @@ public INpgsqlTypeMapper MapEnum(string? pgName = null, INpgsqlNameTransl try { _userTypeMapper.MapEnum(pgName, nameTranslator); + + // Temporary hack for EFCore.PG enum mapping compat + if (_userTypeMapper.Items.FirstOrDefault(i => i.ClrType == typeof(TEnum)) is UserTypeMapping userTypeMapping) + HackyEnumTypeMappings.Add(new(typeof(TEnum), userTypeMapping.PgTypeName, nameTranslator ?? DefaultNameTranslator)); + return this; } finally @@ -189,7 +198,13 @@ public bool UnmapEnum(string? pgName = null, INpgsqlNameTranslator? nameT _lock.EnterWriteLock(); try { - return _userTypeMapper.UnmapEnum(pgName, nameTranslator); + var removed = _userTypeMapper.UnmapEnum(pgName, nameTranslator); + + // Temporary hack for EFCore.PG enum mapping compat + if (removed && ((List)_userTypeMapper.Items).FindIndex(m => m.ClrType == typeof(TEnum)) is > -1 and var index) + HackyEnumTypeMappings.RemoveAt(index); + + return removed; } finally { diff --git a/src/Npgsql/TypeMapping/UserTypeMapper.cs b/src/Npgsql/TypeMapping/UserTypeMapper.cs index 8524dfeb14..5447af9e87 100644 --- a/src/Npgsql/TypeMapping/UserTypeMapper.cs +++ b/src/Npgsql/TypeMapping/UserTypeMapper.cs @@ -181,14 +181,23 @@ internal override void Build(TypeInfoMappingCollection mappings) } } - sealed class EnumMapping : UserTypeMapping + internal abstract class EnumMapping : UserTypeMapping + { + internal INpgsqlNameTranslator NameTranslator { get; } + + public EnumMapping(string pgTypeName, Type enumClrType, INpgsqlNameTranslator nameTranslator) + : base(pgTypeName, enumClrType) + => NameTranslator = nameTranslator; + } + + sealed class EnumMapping : EnumMapping where TEnum : struct, Enum { readonly Dictionary _enumToLabel = new(); readonly Dictionary _labelToEnum = new(); public EnumMapping(string pgTypeName, INpgsqlNameTranslator nameTranslator) - : base(pgTypeName, typeof(TEnum)) + : base(pgTypeName, typeof(TEnum), nameTranslator) { foreach (var field in typeof(TEnum).GetFields(BindingFlags.Static | BindingFlags.Public)) { From 3ca79decf12a7602fdfa0c347f1f5c80d476b2f7 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Mon, 25 Sep 2023 23:58:38 +0200 Subject: [PATCH 212/761] Remove all commonly rooted linq usage from main project (#5203) --- .../FullTextSearch/TsVectorConverter.cs | 9 ++- src/Npgsql/Internal/NpgsqlConnector.Auth.cs | 5 +- .../NpgsqlConnector.FrontendMessages.cs | 9 +-- src/Npgsql/Internal/NpgsqlConnector.cs | 17 +++-- src/Npgsql/Internal/NpgsqlDatabaseInfo.cs | 7 +- src/Npgsql/Internal/NpgsqlWriteBuffer.cs | 11 ++- .../Resolvers/UnmappedEnumTypeInfoResolver.cs | 3 +- src/Npgsql/Internal/TypeInfoResolverChain.cs | 3 +- src/Npgsql/KerberosUsernameProvider.cs | 16 ++-- .../NpgsqlSnakeCaseNameTranslator.cs | 13 +++- src/Npgsql/NpgsqlBinaryExporter.cs | 6 +- src/Npgsql/NpgsqlCommand.cs | 74 ++++++++++++++----- src/Npgsql/NpgsqlConnectionStringBuilder.cs | 47 ++++++++---- src/Npgsql/NpgsqlDataReader.cs | 44 ++++++++--- src/Npgsql/NpgsqlDataSourceBuilder.cs | 6 +- src/Npgsql/NpgsqlMultiHostDataSource.cs | 27 +++++-- src/Npgsql/NpgsqlSchema.cs | 11 ++- src/Npgsql/NpgsqlSlimDataSourceBuilder.cs | 5 +- src/Npgsql/PgPassFile.cs | 26 +++++-- src/Npgsql/PostgresDatabaseInfo.cs | 6 +- src/Npgsql/PostgresErrorCodes.cs | 11 ++- src/Npgsql/PostgresMinimalDatabaseInfo.cs | 37 ++++++---- src/Npgsql/PregeneratedMessages.cs | 9 +-- .../LogicalReplicationConnectionExtensions.cs | 19 +++-- src/Npgsql/Schema/DbColumnSchemaGenerator.cs | 13 ++-- 25 files changed, 298 insertions(+), 136 deletions(-) diff --git a/src/Npgsql/Internal/Converters/FullTextSearch/TsVectorConverter.cs b/src/Npgsql/Internal/Converters/FullTextSearch/TsVectorConverter.cs index a61aa2244c..2c431fd35b 100644 --- a/src/Npgsql/Internal/Converters/FullTextSearch/TsVectorConverter.cs +++ b/src/Npgsql/Internal/Converters/FullTextSearch/TsVectorConverter.cs @@ -1,6 +1,5 @@ using System; using System.Collections.Generic; -using System.Linq; using System.Text; using System.Threading; using System.Threading.Tasks; @@ -68,7 +67,13 @@ async ValueTask Read(bool async, PgReader reader, CancellationTo } public override Size GetSize(SizeContext context, NpgsqlTsVector value, ref object? writeState) - => 4 + value.Sum(l => _encoding.GetByteCount(l.Text) + 1 + 2 + l.Count * 2); + { + var size = 4; + foreach (var l in value) + size += _encoding.GetByteCount(l.Text) + 1 + 2 + l.Count * 2; + + return size; + } public override void Write(PgWriter writer, NpgsqlTsVector value) => Write(async: false, writer, value, CancellationToken.None).GetAwaiter().GetResult(); diff --git a/src/Npgsql/Internal/NpgsqlConnector.Auth.cs b/src/Npgsql/Internal/NpgsqlConnector.Auth.cs index 25847da65e..459ca290df 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.Auth.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.Auth.cs @@ -1,7 +1,6 @@ using System; using System.Collections.Generic; using System.Diagnostics; -using System.Linq; using System.Net.Security; using System.Security.Cryptography; using System.Security.Cryptography.X509Certificates; @@ -239,7 +238,9 @@ internal void AuthenticateSASLSha256Plus(ref string mechanism, ref string cbindF var cbindFlagBytes = Encoding.UTF8.GetBytes($"{cbindFlag},,"); var certificateHash = hashAlgorithm.ComputeHash(remoteCertificate.GetRawCertData()); - var cbindBytes = cbindFlagBytes.Concat(certificateHash).ToArray(); + var cbindBytes = new byte[cbindFlagBytes.Length + certificateHash.Length]; + cbindFlagBytes.CopyTo(cbindBytes, 0); + certificateHash.CopyTo(cbindBytes, cbindFlagBytes.Length); cbind = Convert.ToBase64String(cbindBytes); successfulBind = true; IsScramPlus = true; diff --git a/src/Npgsql/Internal/NpgsqlConnector.FrontendMessages.cs b/src/Npgsql/Internal/NpgsqlConnector.FrontendMessages.cs index 91a492ae5b..ac57019a16 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.FrontendMessages.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.FrontendMessages.cs @@ -1,7 +1,6 @@ using System; using System.Collections.Generic; using System.Diagnostics; -using System.Linq; using System.Threading; using System.Threading.Tasks; @@ -11,7 +10,7 @@ partial class NpgsqlConnector { internal Task WriteDescribe(StatementOrPortal statementOrPortal, string name, bool async, CancellationToken cancellationToken = default) { - Debug.Assert(name.All(c => c < 128)); + NpgsqlWriteBuffer.AssertASCIIOnly(name); var len = sizeof(byte) + // Message code sizeof(int) + // Length @@ -98,7 +97,7 @@ void Write(int maxRows) internal async Task WriteParse(string sql, string statementName, List inputParameters, bool async, CancellationToken cancellationToken = default) { - Debug.Assert(statementName.All(c => c < 128)); + NpgsqlWriteBuffer.AssertASCIIOnly(statementName); int queryByteLen; try @@ -152,8 +151,8 @@ internal async Task WriteBind( bool async, CancellationToken cancellationToken = default) { - Debug.Assert(statement.All(c => c < 128)); - Debug.Assert(portal.All(c => c < 128)); + NpgsqlWriteBuffer.AssertASCIIOnly(statement); + NpgsqlWriteBuffer.AssertASCIIOnly(portal); var headerLength = sizeof(byte) + // Message code diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index d7e359b6af..5ab0e8d174 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -4,7 +4,6 @@ using System.Data; using System.Diagnostics; using System.IO; -using System.Linq; using System.Net; using System.Net.Security; using System.Net.Sockets; @@ -958,7 +957,7 @@ void Connect(NpgsqlTimeout timeout) // Note that there aren't any timeout-able or cancellable DNS methods var endpoints = NpgsqlConnectionStringBuilder.IsUnixSocket(Host, Port, out var socketPath) ? new EndPoint[] { new UnixDomainSocketEndPoint(socketPath) } - : Dns.GetHostAddresses(Host).Select(a => new IPEndPoint(a, Port)).ToArray(); + : IPAddressesToEndpoints(Dns.GetHostAddresses(Host), Port); timeout.Check(); // Give each endpoint an equal share of the remaining time @@ -997,7 +996,7 @@ void Connect(NpgsqlTimeout timeout) var errorCode = (int) socket.GetSocketOption(SocketOptionLevel.Socket, SocketOptionName.Error)!; if (errorCode != 0) throw new SocketException(errorCode); - if (!write.Any()) + if (write.Count is 0) throw new TimeoutException("Timeout during connection attempt"); socket.Blocking = true; SetSocketOptions(socket); @@ -1035,8 +1034,8 @@ Task GetHostAddressesAsync(CancellationToken ct) => // and raises the exception, while the actual task may be left running. var endpoints = NpgsqlConnectionStringBuilder.IsUnixSocket(Host, Port, out var socketPath) ? new EndPoint[] { new UnixDomainSocketEndPoint(socketPath) } - : (await TaskTimeoutAndCancellation.ExecuteAsync(GetHostAddressesAsync, timeout, cancellationToken)) - .Select(a => new IPEndPoint(a, Port)).ToArray(); + : IPAddressesToEndpoints(await TaskTimeoutAndCancellation.ExecuteAsync(GetHostAddressesAsync, timeout, cancellationToken), + Port); // Give each IP an equal share of the remaining time var perIpTimespan = default(TimeSpan); @@ -1103,6 +1102,14 @@ Task ConnectAsync(CancellationToken ct) => } } + IPEndPoint[] IPAddressesToEndpoints(IPAddress[] ipAddresses, int port) + { + var result = new IPEndPoint[ipAddresses.Length]; + for (var i = 0; i < ipAddresses.Length; i++) + result[i] = new IPEndPoint(ipAddresses[i], port); + return result; + } + void SetSocketOptions(Socket socket) { if (socket.AddressFamily == AddressFamily.InterNetwork || socket.AddressFamily == AddressFamily.InterNetworkV6) diff --git a/src/Npgsql/Internal/NpgsqlDatabaseInfo.cs b/src/Npgsql/Internal/NpgsqlDatabaseInfo.cs index f3c8ea52a3..8120b4e9c8 100644 --- a/src/Npgsql/Internal/NpgsqlDatabaseInfo.cs +++ b/src/Npgsql/Internal/NpgsqlDatabaseInfo.cs @@ -1,7 +1,6 @@ using System; using System.Collections.Generic; using System.Diagnostics.CodeAnalysis; -using System.Linq; using System.Threading.Tasks; using Npgsql.Internal.Postgres; using Npgsql.PostgresTypes; @@ -215,7 +214,11 @@ public bool TryGetPostgresTypeByName(string pgName, [NotNullWhen(true)] out Post if (ByFullName.TryGetValue($"pg_catalog.{pgName}", out pgType)) return true; - var ambiguousTypes = ByFullName.Keys.Where(n => n.EndsWith($".{pgName}", StringComparison.Ordinal)); + var ambiguousTypes = new List(); + foreach (var key in ByFullName.Keys) + if (key.EndsWith($".{pgName}", StringComparison.Ordinal)) + ambiguousTypes.Add(key); + throw new ArgumentException($"More than one PostgreSQL type was found with the name {pgName}, " + $"please specify a full name including schema: {string.Join(", ", ambiguousTypes)}"); } diff --git a/src/Npgsql/Internal/NpgsqlWriteBuffer.cs b/src/Npgsql/Internal/NpgsqlWriteBuffer.cs index 94775ec3ad..eb619f58c9 100644 --- a/src/Npgsql/Internal/NpgsqlWriteBuffer.cs +++ b/src/Npgsql/Internal/NpgsqlWriteBuffer.cs @@ -2,7 +2,6 @@ using System.Buffers.Binary; using System.Diagnostics; using System.IO; -using System.Linq; using System.Net.Sockets; using System.Runtime.CompilerServices; using System.Text; @@ -472,7 +471,7 @@ public async Task WriteStreamRaw(Stream stream, int count, bool async, Cancellat public void WriteNullTerminatedString(string s) { - Debug.Assert(s.All(c => c < 128), "Method only supports ASCII strings"); + AssertASCIIOnly(s); Debug.Assert(WriteSpaceLeft >= s.Length + 1); WritePosition += Encoding.ASCII.GetBytes(s, 0, s.Length, Buffer, WritePosition); WriteByte(0); @@ -581,5 +580,13 @@ internal byte[] GetContents() return buf; } + [Conditional("DEBUG")] + internal static void AssertASCIIOnly(string s) + { + foreach (var c in s) + if (c >= 128) + Debug.Fail("Method only supports ASCII strings"); + } + #endregion } diff --git a/src/Npgsql/Internal/Resolvers/UnmappedEnumTypeInfoResolver.cs b/src/Npgsql/Internal/Resolvers/UnmappedEnumTypeInfoResolver.cs index b6ab437255..5c6d19db9b 100644 --- a/src/Npgsql/Internal/Resolvers/UnmappedEnumTypeInfoResolver.cs +++ b/src/Npgsql/Internal/Resolvers/UnmappedEnumTypeInfoResolver.cs @@ -1,7 +1,6 @@ using System; using System.Collections.Generic; using System.Diagnostics.CodeAnalysis; -using System.Linq; using System.Reflection; using Npgsql.Internal.Converters; using Npgsql.Internal.Postgres; @@ -25,7 +24,7 @@ class UnmappedEnumTypeInfoResolver : DynamicTypeInfoResolver var labelToEnum = new Dictionary(); foreach (var field in mapping.Type.GetFields(BindingFlags.Static | BindingFlags.Public)) { - var attribute = (PgNameAttribute?)field.GetCustomAttributes(typeof(PgNameAttribute), false).FirstOrDefault(); + var attribute = (PgNameAttribute?)field.GetCustomAttribute(typeof(PgNameAttribute), false); var enumName = attribute?.PgName ?? options.DefaultNameTranslator.TranslateMemberName(field.Name); var enumValue = (Enum)field.GetValue(null)!; diff --git a/src/Npgsql/Internal/TypeInfoResolverChain.cs b/src/Npgsql/Internal/TypeInfoResolverChain.cs index 64e1f86e0d..36dd8db53c 100644 --- a/src/Npgsql/Internal/TypeInfoResolverChain.cs +++ b/src/Npgsql/Internal/TypeInfoResolverChain.cs @@ -1,6 +1,5 @@ using System; using System.Collections.Generic; -using System.Linq; using Npgsql.Internal.Postgres; namespace Npgsql.Internal; @@ -10,7 +9,7 @@ sealed class TypeInfoResolverChain : IPgTypeInfoResolver readonly IPgTypeInfoResolver[] _resolvers; public TypeInfoResolverChain(IEnumerable resolvers) - => _resolvers = resolvers.ToArray(); + => _resolvers = new List(resolvers).ToArray(); public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) { diff --git a/src/Npgsql/KerberosUsernameProvider.cs b/src/Npgsql/KerberosUsernameProvider.cs index 63cc42fb88..3df990387b 100644 --- a/src/Npgsql/KerberosUsernameProvider.cs +++ b/src/Npgsql/KerberosUsernameProvider.cs @@ -1,7 +1,6 @@ using System; using System.Diagnostics; using System.IO; -using System.Linq; using System.Threading; using System.Threading.Tasks; using Microsoft.Extensions.Logging; @@ -112,8 +111,15 @@ sealed class KerberosUsernameProvider return includeRealm ? _principalWithRealm : _principalWithoutRealm; } - static string? FindInPath(string name) => Environment.GetEnvironmentVariable("PATH") - ?.Split(Path.PathSeparator) - .Select(p => Path.Combine(p, name)) - .FirstOrDefault(File.Exists); + static string? FindInPath(string name) + { + foreach (var p in Environment.GetEnvironmentVariable("PATH")?.Split(Path.PathSeparator) ?? Array.Empty()) + { + var path = Path.Combine(p, name); + if (File.Exists(path)) + return path; + } + + return null; + } } diff --git a/src/Npgsql/NameTranslation/NpgsqlSnakeCaseNameTranslator.cs b/src/Npgsql/NameTranslation/NpgsqlSnakeCaseNameTranslator.cs index c4ba594ba7..f2239890ed 100644 --- a/src/Npgsql/NameTranslation/NpgsqlSnakeCaseNameTranslator.cs +++ b/src/Npgsql/NameTranslation/NpgsqlSnakeCaseNameTranslator.cs @@ -1,6 +1,6 @@ using System; +using System.Collections.Generic; using System.Globalization; -using System.Linq; using System.Text; namespace Npgsql.NameTranslation; @@ -59,8 +59,17 @@ public string TranslateMemberName(string clrName) throw new ArgumentNullException(nameof(clrName)); return LegacyMode - ? string.Concat(clrName.Select((c, i) => i > 0 && char.IsUpper(c) ? "_" + c.ToString() : c.ToString())).ToLower(_culture) + ? string.Concat(LegacyModeMap(clrName)).ToLower(_culture) : ConvertToSnakeCase(clrName, _culture); + + IEnumerable LegacyModeMap(string clrName) + { + for (var i = 0; i < clrName.Length; i++) + { + var c = clrName[i]; + yield return i > 0 && char.IsUpper(c) ? "_" + c.ToString() : c.ToString(); + } + } } /// diff --git a/src/Npgsql/NpgsqlBinaryExporter.cs b/src/Npgsql/NpgsqlBinaryExporter.cs index a7c0e395e9..e62bc06c79 100644 --- a/src/Npgsql/NpgsqlBinaryExporter.cs +++ b/src/Npgsql/NpgsqlBinaryExporter.cs @@ -1,6 +1,5 @@ using System; using System.Diagnostics; -using System.Linq; using System.Threading; using System.Threading.Tasks; using Microsoft.Extensions.Logging; @@ -111,8 +110,9 @@ async Task ReadHeader(bool async) var headerLen = NpgsqlRawCopyStream.BinarySignature.Length + 4 + 4; await _buf.Ensure(headerLen, async); - if (NpgsqlRawCopyStream.BinarySignature.Any(t => _buf.ReadByte() != t)) - throw new NpgsqlException("Invalid COPY binary signature at beginning!"); + foreach (var t in NpgsqlRawCopyStream.BinarySignature) + if (_buf.ReadByte() != t) + throw new NpgsqlException("Invalid COPY binary signature at beginning!"); var flags = _buf.ReadInt32(); if (flags != 0) diff --git a/src/Npgsql/NpgsqlCommand.cs b/src/Npgsql/NpgsqlCommand.cs index 77c192e601..3f513f2e05 100644 --- a/src/Npgsql/NpgsqlCommand.cs +++ b/src/Npgsql/NpgsqlCommand.cs @@ -4,7 +4,6 @@ using System.Data; using System.Data.Common; using System.Diagnostics; -using System.Linq; using System.Runtime.CompilerServices; using System.Text; using System.Threading; @@ -295,9 +294,24 @@ public override UpdateRowSource UpdatedRowSource /// /// Returns whether this query will execute as a prepared (compiled) query. /// - public bool IsPrepared => - _connectorPreparedOn == (InternalConnection?.Connector ?? _connector) && - InternalBatchCommands.Any() && InternalBatchCommands.All(s => s.PreparedStatement?.IsPrepared == true); + public bool IsPrepared + { + get + { + return _connectorPreparedOn == (InternalConnection?.Connector ?? _connector) && AllPrepared(); + + bool AllPrepared() + { + if (InternalBatchCommands.Count is 0) + return false; + + foreach (var s in InternalBatchCommands) + if (s.PreparedStatement is null || !s.PreparedStatement.IsPrepared) + return false; + return true; + } + } + } #endregion Public properties @@ -432,8 +446,9 @@ internal void DeriveParameters() using var _ = conn.StartTemporaryBindingScope(out var connector); - if (InternalBatchCommands.Any(s => s.PreparedStatement?.IsExplicit == true)) - throw new NpgsqlException("Deriving parameters isn't supported for commands that are already prepared."); + foreach (var s in InternalBatchCommands) + if (s.PreparedStatement?.IsExplicit == true) + throw new NpgsqlException("Deriving parameters isn't supported for commands that are already prepared."); // Here we unprepare statements that possibly are auto-prepared Unprepare(); @@ -658,7 +673,13 @@ Task Prepare(bool async, CancellationToken cancellationToken = default) } if (logger.IsEnabled(LogLevel.Debug) && needToPrepare) - LogMessages.PreparingCommandExplicitly(logger, string.Join("; ", InternalBatchCommands.Select(c => c.CommandText)), connector.Id); + LogMessages.PreparingCommandExplicitly(logger, string.Join("; ", CommandTexts()), connector.Id); + + IEnumerable CommandTexts() + { + foreach (var c in InternalBatchCommands) + yield return c.CommandText; + } } else { @@ -799,7 +820,15 @@ async Task Unprepare(bool async, CancellationToken cancellationToken = default) Debug.Assert(connection is not null); if (connection.Settings.Multiplexing) throw new NotSupportedException("Explicit preparation not supported with multiplexing"); - if (InternalBatchCommands.All(s => !s.IsPrepared)) + + var forall = true; + foreach (var statement in InternalBatchCommands) + if (statement.IsPrepared) + { + forall = false; + break; + } + if (forall) return; var connector = connection.Connector!; @@ -1161,8 +1190,10 @@ async Task SendClose(NpgsqlConnector connector, bool async, CancellationToken ca { BeginSend(connector); - foreach (var batchCommand in InternalBatchCommands.Where(s => s.IsPrepared)) + foreach (var batchCommand in InternalBatchCommands) { + if (!batchCommand.IsPrepared) + continue; // No need to force async here since each statement takes no more than 20 bytes await connector.WriteClose(StatementOrPortal.Statement, batchCommand.StatementName, async, cancellationToken).ConfigureAwait(false); batchCommand.PreparedStatement!.State = PreparedState.BeingUnprepared; @@ -1756,7 +1787,7 @@ internal void LogExecutingCompleted(NpgsqlConnector connector, bool executing) LogMessages.ExecutingCommandWithParameters( logger, singleCommand.FinalCommandText!, - singleCommand.PositionalParameters.Select(p => p.Value == DBNull.Value ? "NULL" : p.Value!).ToArray(), + ParametersDbNullAsString(singleCommand), connector.Id); } else @@ -1764,7 +1795,7 @@ internal void LogExecutingCompleted(NpgsqlConnector connector, bool executing) LogMessages.CommandExecutionCompletedWithParameters( logger, singleCommand.FinalCommandText!, - singleCommand.PositionalParameters.Select(p => p.Value == DBNull.Value ? "NULL" : p.Value!).ToArray(), + ParametersDbNullAsString(singleCommand), connector.QueryLogStopWatch.ElapsedMilliseconds, connector.Id); } @@ -1781,11 +1812,9 @@ internal void LogExecutingCompleted(NpgsqlConnector connector, bool executing) { if (logParameters) { - var commands = InternalBatchCommands - .Select(c => ( - c.CommandText, - Parameters: (object[]?)c.PositionalParameters.Select(p => p.Value == DBNull.Value ? "NULL" : p.Value).ToArray()!) - ).ToArray(); + var commands = new (string, object[])[InternalBatchCommands.Count]; + for (var i = 0; i < InternalBatchCommands.Count; i++) + commands[i] = (InternalBatchCommands[i].CommandText, ParametersDbNullAsString(InternalBatchCommands[i])); if (executing) LogMessages.ExecutingBatchWithParameters(logger, commands, connector.Id); @@ -1794,14 +1823,23 @@ internal void LogExecutingCompleted(NpgsqlConnector connector, bool executing) } else { - var commands = InternalBatchCommands.Select(c => c.CommandText).ToArray().ToArray(); - + var commands = new string[InternalBatchCommands.Count]; + for (var i = 0; i < InternalBatchCommands.Count; i++) + commands[i] = InternalBatchCommands[i].CommandText; if (executing) LogMessages.ExecutingBatch(logger, commands, connector.Id); else LogMessages.BatchExecutionCompleted(logger, commands, connector.QueryLogStopWatch.ElapsedMilliseconds, connector.Id); } } + + object[] ParametersDbNullAsString(NpgsqlBatchCommand c) + { + var parameters = new object[c.PositionalParameters.Count]; + for (var i = 0; i < c.PositionalParameters.Count; i++) + parameters[i] = c.PositionalParameters[i].Value == DBNull.Value ? "NULL" : c.PositionalParameters[i].Value!; + return parameters; + } } /// diff --git a/src/Npgsql/NpgsqlConnectionStringBuilder.cs b/src/Npgsql/NpgsqlConnectionStringBuilder.cs index c1a11c34c3..1f5858bfaa 100644 --- a/src/Npgsql/NpgsqlConnectionStringBuilder.cs +++ b/src/Npgsql/NpgsqlConnectionStringBuilder.cs @@ -6,7 +6,6 @@ using System.Data.Common; using System.Diagnostics; using System.Diagnostics.CodeAnalysis; -using System.Linq; using Npgsql.Internal; using Npgsql.Netstandard20; using Npgsql.Replication; @@ -27,7 +26,7 @@ public sealed partial class NpgsqlConnectionStringBuilder : DbConnectionStringBu string? _dataSourceCached; internal string? DataSourceCached - => _dataSourceCached ??= _host is null || _host.Contains(',') + => _dataSourceCached ??= _host is null || _host.Contains(",") ? null : IsUnixSocket(_host, _port, out var socketPath, replaceForAbstract: false) ? socketPath @@ -144,7 +143,7 @@ public bool Remove(KeyValuePair item) public override void Clear() { Debug.Assert(Keys != null); - foreach (var k in Keys.ToArray()) + foreach (var k in (string[])Keys) Remove(k); } @@ -1584,7 +1583,7 @@ internal void PostProcessAndValidate() if (Multiplexing && !Pooling) throw new ArgumentException("Pooling must be on to use multiplexing"); - if (!Host.Contains(',')) + if (!Host.Contains(",")) { if (TargetSessionAttributesParsed is not null && TargetSessionAttributesParsed != Npgsql.TargetSessionAttributes.Any) @@ -1684,12 +1683,32 @@ public override bool Equals(object? obj) /// /// Gets an containing the keys of the . /// - public new ICollection Keys => base.Keys.Cast().ToArray()!; + public new ICollection Keys + { + get + { + var result = new string[base.Keys.Count]; + var i = 0; + foreach (var key in base.Keys) + result[i++] = (string)key; + return result; + } + } /// /// Gets an containing the values in the . /// - public new ICollection Values => base.Values.Cast().ToArray(); + public new ICollection Values + { + get + { + var result = new object?[base.Keys.Count]; + var i = 0; + foreach (var key in base.Values) + result[i++] = (object?)key; + return result; + } + } /// /// Copies the elements of the to an Array, starting at a particular Array index. @@ -1729,13 +1748,15 @@ protected override void GetProperties(Hashtable propertyDescriptors) // provider, for example. base.GetProperties(propertyDescriptors); - var toRemove = propertyDescriptors.Values - .Cast() - .Where(d => - !d.Attributes.Cast().Any(a => a is NpgsqlConnectionStringPropertyAttribute) || - d.Attributes.Cast().Any(a => a is ObsoleteAttribute) - ) - .ToList(); + var toRemove = new List(); + foreach (var value in propertyDescriptors.Values) + { + var d = (PropertyDescriptor)value; + foreach (var attribute in d.Attributes) + if (attribute is NpgsqlConnectionStringPropertyAttribute or ObsoleteAttribute) + toRemove.Add(d); + } + foreach (var o in toRemove) propertyDescriptors.Remove(o.DisplayName); } diff --git a/src/Npgsql/NpgsqlDataReader.cs b/src/Npgsql/NpgsqlDataReader.cs index cc86de063e..57c358b3bf 100644 --- a/src/Npgsql/NpgsqlDataReader.cs +++ b/src/Npgsql/NpgsqlDataReader.cs @@ -8,7 +8,6 @@ using System.Diagnostics; using System.Diagnostics.CodeAnalysis; using System.IO; -using System.Linq; using System.Runtime.CompilerServices; using System.Runtime.ExceptionServices; using System.Threading; @@ -619,7 +618,6 @@ void PopulateOutputParameters() // The first row in a stored procedure command that has output parameters needs to be traversed twice - // once for populating the output parameters and once for the actual result set traversal. So in this // case we can't be sequential. - Debug.Assert(Command.Parameters.Any(p => p.IsOutputDirection)); Debug.Assert(StatementIndex == 0); Debug.Assert(RowDescription != null); Debug.Assert(State == ReaderState.BeforeResult); @@ -645,8 +643,11 @@ void PopulateOutputParameters() // Not sure where this odd behavior comes from: all output parameters which did not get matched by // name now get populated with column values which weren't matched. Keeping this for backwards compat, // opened #2252 for investigation. - foreach (var p in Command.Parameters.Where(p => p.IsOutputDirection && !taken.Contains(p))) + foreach (var p in (IEnumerable)Command.Parameters) { + if (!p.IsOutputDirection || taken.Contains(p)) + continue; + if (pending.Count == 0) break; p.Value = pending.Dequeue(); @@ -734,11 +735,16 @@ async Task NextResultSchemaOnly(bool async, bool isConsuming = false, Canc throw Connector.UnexpectedMessageReceived(msg.Code); } - if (_statements.Skip(StatementIndex + 1).All(x => x.IsPrepared)) - { - // There are no more queries, we're done. Read to the RFQ. + var forall = true; + for (var i = StatementIndex + 1; i < _statements.Count; i++) + if (!_statements[i].IsPrepared) + { + forall = false; + break; + } + // There are no more queries, we're done. Read to the RFQ. + if (forall) Expect(await Connector.ReadMessage(async), Connector); - } } // Found a resultset @@ -1033,7 +1039,7 @@ protected override void Dispose(bool disposing) // by other consumers. Therefore, we only set the state fo Disposed if the exception *wasn't* a PostgresException. if (!(ex is PostgresException || ex is NpgsqlException { InnerException: AggregateException aggregateException } && - aggregateException.InnerExceptions.All(e => e is PostgresException))) + AllPostgresExceptions(aggregateException.InnerExceptions))) { State = ReaderState.Disposed; } @@ -1072,7 +1078,7 @@ async ValueTask DisposeAsyncCore() // by other consumers. Therefore, we only set the state fo Disposed if the exception *wasn't* a PostgresException. if (!(ex is PostgresException || ex is NpgsqlException { InnerException: AggregateException aggregateException } && - aggregateException.InnerExceptions.All(e => e is PostgresException))) + AllPostgresExceptions(aggregateException.InnerExceptions))) { State = ReaderState.Disposed; } @@ -1086,6 +1092,14 @@ async ValueTask DisposeAsyncCore() } } + static bool AllPostgresExceptions(ReadOnlyCollection collection) + { + foreach (var exception in collection) + if (exception is not PostgresException) + return false; + return true; + } + /// /// Closes the reader, allowing a new command to be executed. /// @@ -1135,7 +1149,7 @@ internal async Task Close(bool connectionClosing, bool async, bool isDisposing) catch (Exception ex) when ( ex is PostgresException || ex is NpgsqlException { InnerException: AggregateException aggregateException } && - aggregateException.InnerExceptions.All(e => e is PostgresException)) + AllPostgresExceptions(aggregateException.InnerExceptions)) { // In the case of a PostgresException (or multiple ones, if we have error barriers), the connection is fine and consume // has basically completed. Defer throwing the exception until Cleanup is complete. @@ -1780,7 +1794,15 @@ public ReadOnlyCollection GetColumnSchema() => GetColumnSchema(async: false).GetAwaiter().GetResult(); ReadOnlyCollection IDbColumnSchemaGenerator.GetColumnSchema() - => new(GetColumnSchema().Select(c => (DbColumn)c).ToList()); + { + var columns = GetColumnSchema(); + var result = new DbColumn[columns.Count]; + var i = 0; + foreach (var column in result) + result[i++] = column; + + return new ReadOnlyCollection(result); + } /// /// Asynchronously returns schema information for the columns in the current resultset. diff --git a/src/Npgsql/NpgsqlDataSourceBuilder.cs b/src/Npgsql/NpgsqlDataSourceBuilder.cs index de87962d5c..de76897cd9 100644 --- a/src/Npgsql/NpgsqlDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlDataSourceBuilder.cs @@ -1,6 +1,6 @@ using System; +using System.Collections.Generic; using System.Diagnostics.CodeAnalysis; -using System.Linq; using System.Net.Security; using System.Security.Cryptography.X509Certificates; using System.Text.Json; @@ -120,7 +120,9 @@ void AddDefaultFeatures() AddTypeInfoResolver(new SystemTextJsonTypeInfoResolver()); AddTypeInfoResolver(new ExtraConversionsResolver()); AddTypeInfoResolver(AdoTypeInfoResolver.Instance); - foreach (var plugin in GlobalTypeMapper.Instance.GetPluginResolvers().Reverse()) + var plugins = new List(GlobalTypeMapper.Instance.GetPluginResolvers()); + plugins.Reverse(); + foreach (var plugin in plugins) AddTypeInfoResolver(plugin); } } diff --git a/src/Npgsql/NpgsqlMultiHostDataSource.cs b/src/Npgsql/NpgsqlMultiHostDataSource.cs index 6762de9ad4..eb8bcd3bdd 100644 --- a/src/Npgsql/NpgsqlMultiHostDataSource.cs +++ b/src/Npgsql/NpgsqlMultiHostDataSource.cs @@ -4,7 +4,6 @@ using System.Collections.Generic; using System.Diagnostics; using System.Diagnostics.CodeAnalysis; -using System.Linq; using System.Threading; using System.Threading.Tasks; using System.Transactions; @@ -53,12 +52,15 @@ internal NpgsqlMultiHostDataSource(NpgsqlConnectionStringBuilder settings, Npgsq : new UnpooledDataSource(poolSettings, dataSourceConfig); } - var targetSessionAttributeValues = Enum.GetValues(typeof(TargetSessionAttributes)).Cast().ToArray(); - _wrappers = new MultiHostDataSourceWrapper[targetSessionAttributeValues.Max(t => (int)t) + 1]; + var targetSessionAttributeValues = (TargetSessionAttributes[])Enum.GetValues(typeof(TargetSessionAttributes)); + var highestValue = 0; + foreach (var value in targetSessionAttributeValues) + if ((int)value > highestValue) + highestValue = (int)value; + + _wrappers = new MultiHostDataSourceWrapper[highestValue + 1]; foreach (var targetSessionAttribute in targetSessionAttributeValues) - { _wrappers[(int)targetSessionAttribute] = new(this, targetSessionAttribute); - } } /// @@ -311,14 +313,23 @@ await TryGet(conn, timeoutPerHost, async, preferredType, IsOnline, poolIndex, ex } static NpgsqlException NoSuitableHostsException(IList exceptions) - => exceptions.Count == 0 + { + return exceptions.Count == 0 ? new NpgsqlException("No suitable host was found.") - : exceptions[0] is PostgresException firstException && - exceptions.All(x => x is PostgresException ex && ex.SqlState == firstException.SqlState) + : exceptions[0] is PostgresException firstException && AllEqual(firstException, exceptions) ? firstException : new NpgsqlException("Unable to connect to a suitable host. Check inner exception for more details.", new AggregateException(exceptions)); + static bool AllEqual(PostgresException first, IList exceptions) + { + foreach (var x in exceptions) + if (x is not PostgresException ex || ex.SqlState != first.SqlState) + return false; + return true; + } + } + int GetRoundRobinIndex() { while (true) diff --git a/src/Npgsql/NpgsqlSchema.cs b/src/Npgsql/NpgsqlSchema.cs index 461ae2e873..5fe79a97a6 100644 --- a/src/Npgsql/NpgsqlSchema.cs +++ b/src/Npgsql/NpgsqlSchema.cs @@ -1,8 +1,8 @@ using System; +using System.Collections.Generic; using System.Data; using System.Data.Common; using System.Globalization; -using System.Linq; using System.Text; using System.Threading; using System.Threading.Tasks; @@ -562,9 +562,12 @@ static DataTable GetDataTypes(NpgsqlConnection conn) try { PgSerializerOptions.IntrospectionCaller = true; - foreach (var baseType in connector.DatabaseInfo.BaseTypes.Cast() - .Concat(connector.DatabaseInfo.EnumTypes) - .Concat(connector.DatabaseInfo.CompositeTypes)) + + var types = new List(); + types.AddRange(connector.DatabaseInfo.BaseTypes); + types.AddRange(connector.DatabaseInfo.EnumTypes); + types.AddRange(connector.DatabaseInfo.CompositeTypes); + foreach (var baseType in types) { if (connector.SerializerOptions.GetDefaultTypeInfo(baseType) is not { } info) continue; diff --git a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs index b1b7ec69cb..d01a3d6b4c 100644 --- a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs @@ -1,7 +1,6 @@ using System; using System.Collections.Generic; using System.Diagnostics.CodeAnalysis; -using System.Linq; using System.Net.Security; using System.Security.Cryptography.X509Certificates; using System.Text.Json; @@ -76,7 +75,9 @@ public NpgsqlSlimDataSourceBuilder(string? connectionString = null) AddTypeInfoResolver(UnsupportedTypeInfoResolver); AddTypeInfoResolver(new AdoTypeInfoResolver()); // When used publicly we start off with our slim defaults. - foreach (var plugin in GlobalTypeMapper.Instance.GetPluginResolvers().Reverse()) + var plugins = new List(GlobalTypeMapper.Instance.GetPluginResolvers()); + plugins.Reverse(); + foreach (var plugin in plugins) AddTypeInfoResolver(plugin); } diff --git a/src/Npgsql/PgPassFile.cs b/src/Npgsql/PgPassFile.cs index 2b5df681fd..477534b7b0 100644 --- a/src/Npgsql/PgPassFile.cs +++ b/src/Npgsql/PgPassFile.cs @@ -1,7 +1,6 @@ using System; using System.Collections.Generic; using System.IO; -using System.Linq; using System.Text; namespace Npgsql; @@ -35,10 +34,18 @@ public PgPassFile(string fileName) /// Parses file content and gets all credentials from the file /// /// corresponding to all lines in the .pgpass file - internal IEnumerable Entries => File.ReadLines(FileName) - .Select(line => line.Trim()) - .Where(line => line.Any() && line[0] != '#') - .Select(Entry.Parse); + internal IEnumerable Entries + { + get + { + foreach (var l in File.ReadLines(FileName)) + { + var line = l.Trim(); + if (line.Length > 0 && line[0] != '#') + yield return Entry.Parse(line); + } + } + } /// /// Searches queries loaded from .PGPASS file to find first entry matching the provided parameters. @@ -49,7 +56,12 @@ public PgPassFile(string fileName) /// User name to query. Use null to match any. /// Matching if match was found. Otherwise, returns null. internal Entry? GetFirstMatchingEntry(string? host = null, int? port = null, string? database = null, string? username = null) - => Entries.FirstOrDefault(entry => entry.IsMatch(host, port, database, username)); + { + foreach (var entry in Entries) + if (entry.IsMatch(host, port, database, username)) + return entry; + return null; + } /// /// Represents a hostname, port, database, username, and password combination that has been retrieved from a .pgpass file @@ -183,4 +195,4 @@ bool AreValuesMatched(string? query, string? actual) bool AreValuesMatched(int? query, int? actual) => query == actual || actual == null || query == null; } -} \ No newline at end of file +} diff --git a/src/Npgsql/PostgresDatabaseInfo.cs b/src/Npgsql/PostgresDatabaseInfo.cs index a4e7a33462..465949bec5 100644 --- a/src/Npgsql/PostgresDatabaseInfo.cs +++ b/src/Npgsql/PostgresDatabaseInfo.cs @@ -2,7 +2,6 @@ using System.Collections.Generic; using System.Diagnostics; using System.Globalization; -using System.Linq; using System.Text; using System.Threading.Tasks; using Microsoft.Extensions.Logging; @@ -46,7 +45,7 @@ class PostgresDatabaseInfo : NpgsqlDatabaseInfo List? _types; /// - protected override IEnumerable GetTypes() => _types ?? Enumerable.Empty(); + protected override IEnumerable GetTypes() => _types ?? (IEnumerable)Array.Empty(); /// /// The PostgreSQL version string as returned by the version() function. Populated during loading. @@ -541,7 +540,8 @@ static string SanitizeForReplicationConnection(string str) if (!isReplicationConnection) Expect(await conn.ReadMessage(async), conn); - return byOID.Values.ToList(); + + return new(byOID.Values); static string ReadNonNullableString(NpgsqlReadBuffer buffer) => buffer.ReadString(buffer.ReadInt32()); diff --git a/src/Npgsql/PostgresErrorCodes.cs b/src/Npgsql/PostgresErrorCodes.cs index 95831bc0ef..afeadbf2c6 100644 --- a/src/Npgsql/PostgresErrorCodes.cs +++ b/src/Npgsql/PostgresErrorCodes.cs @@ -1,7 +1,6 @@ #pragma warning disable CS1591 // Missing XML comment for publicly visible type or member using System; -using System.Linq; namespace Npgsql; @@ -478,6 +477,12 @@ public static class PostgresErrorCodes }; internal static bool IsCriticalFailure(PostgresException e, bool clusterError = true) - => CriticalFailureCodes.Any(x => e.SqlState.StartsWith(x, StringComparison.Ordinal)) || - !clusterError && e.SqlState == ProtocolViolation; // We only treat ProtocolViolation as critical for connection + { + foreach (var x in CriticalFailureCodes) + if (e.SqlState.StartsWith(x, StringComparison.Ordinal)) + return true; + + // We only treat ProtocolViolation as critical for connection + return !clusterError && e.SqlState == ProtocolViolation; + } } diff --git a/src/Npgsql/PostgresMinimalDatabaseInfo.cs b/src/Npgsql/PostgresMinimalDatabaseInfo.cs index 94b76f541c..01afa29abf 100644 --- a/src/Npgsql/PostgresMinimalDatabaseInfo.cs +++ b/src/Npgsql/PostgresMinimalDatabaseInfo.cs @@ -1,5 +1,4 @@ using System.Collections.Generic; -using System.Linq; using System.Reflection; using System.Threading.Tasks; using Npgsql.Internal; @@ -23,30 +22,36 @@ sealed class PostgresMinimalDatabaseInfo : PostgresDatabaseInfo static PostgresType[]? _typesWithMultiranges, _typesWithoutMultiranges; static PostgresType[] CreateTypes(bool withMultiranges) - => typeof(NpgsqlDbType).GetFields() - .Select(f => f.GetCustomAttribute()) - .OfType() - .SelectMany(attr => - { - var baseType = new PostgresBaseType("pg_catalog", attr.Name, attr.BaseOID); - var arrayType = new PostgresArrayType("pg_catalog", "_" + attr.Name, attr.ArrayOID, baseType); + { + var builtinTypes = new List(); + foreach (var field in typeof(NpgsqlDbType).GetFields()) + if (field.GetCustomAttribute() is { } attr) + builtinTypes.Add(attr); - if (attr.RangeName is null) - { - return new PostgresType[] { baseType, arrayType }; - } + var pgTypes = new List(); + foreach (var attr in builtinTypes) + { + var baseType = new PostgresBaseType("pg_catalog", attr.Name, attr.BaseOID); + var arrayType = new PostgresArrayType("pg_catalog", "_" + attr.Name, attr.ArrayOID, baseType); + if (attr.RangeName is null) + pgTypes.AddRange(new PostgresType[] { baseType, arrayType }); + else + { var rangeType = new PostgresRangeType("pg_catalog", attr.RangeName, attr.RangeOID, baseType); - return withMultiranges + pgTypes.AddRange(withMultiranges ? new PostgresType[] { baseType, arrayType, rangeType, new PostgresMultirangeType("pg_catalog", attr.MultirangeName!, attr.MultirangeOID, rangeType) } - : new PostgresType[] { baseType, arrayType, rangeType }; - }) - .ToArray(); + : new PostgresType[] { baseType, arrayType, rangeType }); + } + } + + return pgTypes.ToArray(); + } protected override IEnumerable GetTypes() => SupportsMultirangeTypes diff --git a/src/Npgsql/PregeneratedMessages.cs b/src/Npgsql/PregeneratedMessages.cs index 3d315ff12f..b6d2e4dd02 100644 --- a/src/Npgsql/PregeneratedMessages.cs +++ b/src/Npgsql/PregeneratedMessages.cs @@ -1,8 +1,7 @@ -using System.Diagnostics; -using System.IO; -using System.Linq; +using System.IO; using System.Text; using Npgsql.Internal; +using Npgsql.Util; namespace Npgsql; @@ -27,7 +26,7 @@ static PregeneratedMessages() internal static byte[] Generate(NpgsqlWriteBuffer buf, string query) { - Debug.Assert(query.All(c => c < 128)); + NpgsqlWriteBuffer.AssertASCIIOnly(query); var queryByteLen = Encoding.ASCII.GetByteCount(query); @@ -52,4 +51,4 @@ internal static byte[] Generate(NpgsqlWriteBuffer buf, string query) internal static readonly byte[] RollbackTransaction; internal static readonly byte[] DiscardAll; -} \ No newline at end of file +} diff --git a/src/Npgsql/Replication/Internal/LogicalReplicationConnectionExtensions.cs b/src/Npgsql/Replication/Internal/LogicalReplicationConnectionExtensions.cs index dcc58d9fff..d1ec4638d0 100644 --- a/src/Npgsql/Replication/Internal/LogicalReplicationConnectionExtensions.cs +++ b/src/Npgsql/Replication/Internal/LogicalReplicationConnectionExtensions.cs @@ -1,7 +1,6 @@ using NpgsqlTypes; using System; using System.Collections.Generic; -using System.Linq; using System.Runtime.CompilerServices; using System.Text; using System.Threading; @@ -166,12 +165,18 @@ static async IAsyncEnumerable StartLogicalReplicationInternal( .Append(" LOGICAL ") .Append(walLocation ?? slot.ConsistentPoint); - if (options?.Any() == true) + var opts = new List>(options ?? Array.Empty>()); + if (opts.Count > 0) { - builder - .Append(" (") - .Append(string.Join(", ", options.Select(kv => @$"""{kv.Key}""{(kv.Value is null ? "" : $" '{kv.Value}'")}"))) - .Append(')'); + builder.Append(" ("); + var stringOptions = new string[opts.Count]; + for (var i = 0; i < opts.Count; i++) + { + var kv = opts[i]; + stringOptions[i] = @$"""{kv.Key}""{(kv.Value is null ? "" : $" '{kv.Value}'")}"; + } + builder.Append(string.Join(", ", stringOptions)); + builder.Append(')'); } var command = builder.ToString(); @@ -183,4 +188,4 @@ static async IAsyncEnumerable StartLogicalReplicationInternal( yield return enumerator.Current; } } -} \ No newline at end of file +} diff --git a/src/Npgsql/Schema/DbColumnSchemaGenerator.cs b/src/Npgsql/Schema/DbColumnSchemaGenerator.cs index 88cc775e00..707bfbb36a 100644 --- a/src/Npgsql/Schema/DbColumnSchemaGenerator.cs +++ b/src/Npgsql/Schema/DbColumnSchemaGenerator.cs @@ -2,7 +2,6 @@ using System.Collections.Generic; using System.Collections.ObjectModel; using System.Data; -using System.Linq; using System.Threading; using System.Threading.Tasks; using System.Transactions; @@ -112,11 +111,15 @@ internal async Task> GetColumnSchema(bool asy // and those that don't (e.g. SELECT 8). For the former we load lots of info from // the backend (if fetchAdditionalInfo is true), for the latter we only have the RowDescription - var columnFieldFilter = _rowDescription - .Where(f => f.TableOID != 0) // Only column fields - .Select(c => $"(attr.attrelid={c.TableOID} AND attr.attnum={c.ColumnAttributeNumber})") - .Join(" OR "); + var filters = new List(); + foreach (var f in _rowDescription) + { + // Only column fields + if (f.TableOID != 0) + filters.Add($"(attr.attrelid={f.TableOID} AND attr.attnum={f.ColumnAttributeNumber})"); + } + var columnFieldFilter = string.Join(" OR ", filters); if (columnFieldFilter != string.Empty) { var query = oldQueryMode From 1073cb714f91af8e54a4c9a239da91cf0d3c490c Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Tue, 26 Sep 2023 00:29:26 +0200 Subject: [PATCH 213/761] Buffer .pgpass into memory (#5234) --- src/Npgsql/PgPassFile.cs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/Npgsql/PgPassFile.cs b/src/Npgsql/PgPassFile.cs index 477534b7b0..364d2b7409 100644 --- a/src/Npgsql/PgPassFile.cs +++ b/src/Npgsql/PgPassFile.cs @@ -38,7 +38,10 @@ internal IEnumerable Entries { get { - foreach (var l in File.ReadLines(FileName)) + var bytes = File.ReadAllBytes(FileName); + var mem = new MemoryStream(bytes); + using var reader = new StreamReader(mem); + while (reader.ReadLine() is { } l) { var line = l.Trim(); if (line.Length > 0 && line[0] != '#') From 0611f7f2c56ee959aedf0dc737789e46d7dfebd7 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Tue, 26 Sep 2023 01:00:29 +0200 Subject: [PATCH 214/761] Remove hmac ctor net7 (#5235) * Remove HMAC constructor call # Conflicts: # src/Npgsql/Internal/NpgsqlConnector.Auth.cs * And some more factories --- src/Npgsql/Internal/NpgsqlConnector.Auth.cs | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/src/Npgsql/Internal/NpgsqlConnector.Auth.cs b/src/Npgsql/Internal/NpgsqlConnector.Auth.cs index 459ca290df..5c088400c3 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.Auth.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.Auth.cs @@ -135,9 +135,12 @@ async Task AuthenticateSASL(List mechanisms, string username, bool async var clientKey = HMAC(saltedPassword, "Client Key"); byte[] storedKey; +#if NET7_0_OR_GREATER + storedKey = SHA256.HashData(clientKey); +#else using (var sha256 = SHA256.Create()) storedKey = sha256.ComputeHash(clientKey); - +#endif var clientFirstMessageBare = $"n=*,r={clientNonce}"; var serverFirstMessage = $"r={firstServerMsg.Nonce},s={firstServerMsg.Salt},i={firstServerMsg.Iteration}"; var clientFinalMessageWithoutProof = $"c={cbind},r={firstServerMsg.Nonce}"; @@ -261,7 +264,7 @@ static byte[] Xor(byte[] buffer1, byte[] buffer2) static byte[] HMAC(byte[] key, string data) { - byte[] dataBytes = Encoding.UTF8.GetBytes(data); + var dataBytes = Encoding.UTF8.GetBytes(data); #if NET7_0_OR_GREATER return HMACSHA256.HashData(key, dataBytes); #else @@ -278,7 +281,9 @@ async Task AuthenticateMD5(string username, byte[] salt, bool async, Cancellatio throw new NpgsqlException("No password has been provided but the backend requires one (in MD5)"); byte[] result; +#if !NET7_0_OR_GREATER using (var md5 = MD5.Create()) +#endif { // First phase var passwordBytes = NpgsqlWriteBuffer.UTF8Encoding.GetBytes(passwd); @@ -288,7 +293,11 @@ async Task AuthenticateMD5(string username, byte[] salt, bool async, Cancellatio usernameBytes.CopyTo(cryptBuf, passwordBytes.Length); var sb = new StringBuilder(); +#if NET7_0_OR_GREATER + var hashResult = MD5.HashData(cryptBuf); +#else var hashResult = md5.ComputeHash(cryptBuf); +#endif foreach (var b in hashResult) sb.Append(b.ToString("x2")); @@ -303,7 +312,11 @@ async Task AuthenticateMD5(string username, byte[] salt, bool async, Cancellatio prehashbytes.CopyTo(cryptBuf, 0); sb = new StringBuilder("md5"); +#if NET7_0_OR_GREATER + hashResult = MD5.HashData(cryptBuf); +#else hashResult = md5.ComputeHash(cryptBuf); +#endif foreach (var b in hashResult) sb.Append(b.ToString("x2")); From f34b2619b5462a493f890080a6564263dc0cc754 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Tue, 26 Sep 2023 16:50:37 +0300 Subject: [PATCH 215/761] Add Nino Floris as Npgsql author (#5281) --- src/Npgsql/Npgsql.csproj | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Npgsql/Npgsql.csproj b/src/Npgsql/Npgsql.csproj index 53a0e38377..68e5db7395 100644 --- a/src/Npgsql/Npgsql.csproj +++ b/src/Npgsql/Npgsql.csproj @@ -1,7 +1,7 @@  - Shay Rojansky;Nikita Kazmin;Brar Piening;Yoh Deadfall;;Austin Drenski;Emil Lenngren;Francisco Figueiredo Jr.;Kenji Uno + Shay Rojansky;Nikita Kazmin;Brar Piening;Nino Floris;Yoh Deadfall;;Austin Drenski;Emil Lenngren;Francisco Figueiredo Jr.;Kenji Uno Npgsql is the open source .NET data provider for PostgreSQL. npgsql;postgresql;postgres;ado;ado.net;database;sql README.md From 8d51d1c40e975c61005e749f6ccf7dba94780a6e Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Tue, 26 Sep 2023 21:17:10 +0200 Subject: [PATCH 216/761] Remove no sync context scope and add ConfigureAwait(false) (#5225) --- src/.editorconfig | 2 + src/Npgsql.GeoJSON/CrsMapExtensions.cs | 5 +- src/Npgsql/GlobalSuppressions.cs | 5 +- src/Npgsql/Internal/NpgsqlConnector.Auth.cs | 48 ++--- .../Internal/NpgsqlConnector.OldAuth.cs | 10 +- src/Npgsql/Internal/NpgsqlConnector.cs | 96 ++++----- src/Npgsql/Internal/NpgsqlDatabaseInfo.cs | 2 +- src/Npgsql/KerberosUsernameProvider.cs | 6 +- src/Npgsql/MultiplexingDataSource.cs | 8 +- src/Npgsql/NoSynchronizationContextScope.cs | 37 ---- src/Npgsql/NpgsqlBatch.cs | 2 +- src/Npgsql/NpgsqlBinaryExporter.cs | 85 +++----- src/Npgsql/NpgsqlBinaryImporter.cs | 130 ++++-------- src/Npgsql/NpgsqlCommand.cs | 33 +-- src/Npgsql/NpgsqlConnection.cs | 126 ++++------- src/Npgsql/NpgsqlDataAdapter.cs | 12 +- src/Npgsql/NpgsqlDataReader.cs | 172 +++++++-------- src/Npgsql/NpgsqlDataSource.cs | 12 +- src/Npgsql/NpgsqlDataSourceCommand.cs | 9 +- src/Npgsql/NpgsqlLargeObjectManager.cs | 59 ++---- src/Npgsql/NpgsqlLargeObjectStream.cs | 63 +++--- src/Npgsql/NpgsqlMultiHostDataSource.cs | 18 +- src/Npgsql/NpgsqlParameter.cs | 4 +- src/Npgsql/NpgsqlRawCopyStream.cs | 72 +++---- src/Npgsql/NpgsqlSchema.cs | 22 +- src/Npgsql/NpgsqlTransaction.cs | 46 ++-- src/Npgsql/PoolingDataSource.cs | 8 +- src/Npgsql/PostgresDatabaseInfo.cs | 44 ++-- .../LogicalReplicationConnectionExtensions.cs | 93 ++++----- .../PgOutput/Messages/UpdateMessage.cs | 14 +- .../PgOutput/PgOutputAsyncEnumerable.cs | 98 ++++----- .../Replication/PgOutput/ReplicationTuple.cs | 6 +- .../Replication/PgOutput/ReplicationValue.cs | 34 ++- .../Replication/PgOutput/TupleEnumerator.cs | 11 +- .../PhysicalReplicationConnection.cs | 39 ++-- .../Replication/ReplicationConnection.cs | 197 ++++++++---------- .../TestDecodingAsyncEnumerable.cs | 17 +- src/Npgsql/Schema/DbColumnSchemaGenerator.cs | 8 +- src/Npgsql/Shims/StreamExtensions.cs | 4 +- src/Npgsql/Shims/TaskExtensions.cs | 4 +- src/Npgsql/Shims/WaitHandleExtensions.cs | 2 +- src/Npgsql/TaskTimeoutAndCancellation.cs | 6 +- src/Npgsql/UnpooledDataSource.cs | 2 +- 43 files changed, 693 insertions(+), 978 deletions(-) delete mode 100644 src/Npgsql/NoSynchronizationContextScope.cs diff --git a/src/.editorconfig b/src/.editorconfig index 3137d00229..6574a9291a 100644 --- a/src/.editorconfig +++ b/src/.editorconfig @@ -12,3 +12,5 @@ dotnet_diagnostic.RS0026.severity = none # Public API with optional parameter(s) should have the most parameters amongst its public overloads. dotnet_diagnostic.RS0027.severity = none + +dotnet_diagnostic.CA2007.severity = warning; diff --git a/src/Npgsql.GeoJSON/CrsMapExtensions.cs b/src/Npgsql.GeoJSON/CrsMapExtensions.cs index 329b7d9265..dde5e0f688 100644 --- a/src/Npgsql.GeoJSON/CrsMapExtensions.cs +++ b/src/Npgsql.GeoJSON/CrsMapExtensions.cs @@ -1,3 +1,4 @@ +using System; using System.Threading.Tasks; using Npgsql.GeoJSON.Internal; @@ -16,9 +17,9 @@ public static async Task GetCrsMapAsync(this NpgsqlDataSource dataSource { var builder = new CrsMapBuilder(); using var cmd = GetCsrCommand(dataSource); - await using var reader = await cmd.ExecuteReaderAsync(); + using var reader = await cmd.ExecuteReaderAsync().ConfigureAwait(false); - while (await reader.ReadAsync()) + while (await reader.ReadAsync().ConfigureAwait(false)) builder.Add(new CrsMapEntry(reader.GetInt32(0), reader.GetInt32(1), reader.GetString(2))); return builder.Build(); diff --git a/src/Npgsql/GlobalSuppressions.cs b/src/Npgsql/GlobalSuppressions.cs index 07bef71ab3..580c453b9d 100644 --- a/src/Npgsql/GlobalSuppressions.cs +++ b/src/Npgsql/GlobalSuppressions.cs @@ -1,7 +1,7 @@  -// This file is used by Code Analysis to maintain SuppressMessage +// This file is used by Code Analysis to maintain SuppressMessage // attributes that are applied to this project. -// Project-level suppressions either have no target or are given +// Project-level suppressions either have no target or are given // a specific target and scoped to a namespace, type, member, etc. using System.Diagnostics.CodeAnalysis; @@ -10,6 +10,5 @@ [assembly: SuppressMessage("Design", "CA1032:Implement standard exception constructors", Justification = "We have several exception classes where this makes no sense")] [assembly: SuppressMessage("Design", "CA1710:Identifiers should have correct suffix", Justification = "Disagree")] [assembly: SuppressMessage("Design", "CA1707:Remove the underscores from member name", Justification = "Seems to cause some false positives on implicit/explicit cast operators, strange")] -[assembly: SuppressMessage("Reliability", "CA2007:Do not directly await a Task", Justification = "Npgsql uses NoSynchronizationContextScope instead of ConfigureAwait(false)")] [assembly: SuppressMessage("Style", "IDE1006:Naming Styles", Justification = "All I/O methods are both sync and async, avoid clutter")] diff --git a/src/Npgsql/Internal/NpgsqlConnector.Auth.cs b/src/Npgsql/Internal/NpgsqlConnector.Auth.cs index 5c088400c3..1bbd1ada60 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.Auth.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.Auth.cs @@ -21,28 +21,28 @@ async Task Authenticate(string username, NpgsqlTimeout timeout, bool async, Canc while (true) { timeout.CheckAndApply(this); - var msg = ExpectAny(await ReadMessage(async), this); + var msg = ExpectAny(await ReadMessage(async).ConfigureAwait(false), this); switch (msg.AuthRequestType) { case AuthenticationRequestType.AuthenticationOk: return; case AuthenticationRequestType.AuthenticationCleartextPassword: - await AuthenticateCleartext(username, async, cancellationToken); + await AuthenticateCleartext(username, async, cancellationToken).ConfigureAwait(false); break; case AuthenticationRequestType.AuthenticationMD5Password: - await AuthenticateMD5(username, ((AuthenticationMD5PasswordMessage)msg).Salt, async, cancellationToken); + await AuthenticateMD5(username, ((AuthenticationMD5PasswordMessage)msg).Salt, async, cancellationToken).ConfigureAwait(false); break; case AuthenticationRequestType.AuthenticationSASL: await AuthenticateSASL(((AuthenticationSASLMessage)msg).Mechanisms, username, async, - cancellationToken); + cancellationToken).ConfigureAwait(false); break; case AuthenticationRequestType.AuthenticationGSS: case AuthenticationRequestType.AuthenticationSSPI: - await AuthenticateGSS(async); + await AuthenticateGSS(async).ConfigureAwait(false); return; case AuthenticationRequestType.AuthenticationGSSContinue: @@ -56,15 +56,15 @@ await AuthenticateSASL(((AuthenticationSASLMessage)msg).Mechanisms, username, as async Task AuthenticateCleartext(string username, bool async, CancellationToken cancellationToken = default) { - var passwd = await GetPassword(username, async, cancellationToken); + var passwd = await GetPassword(username, async, cancellationToken).ConfigureAwait(false); if (passwd == null) throw new NpgsqlException("No password has been provided but the backend requires one (in cleartext)"); var encoded = new byte[Encoding.UTF8.GetByteCount(passwd) + 1]; Encoding.UTF8.GetBytes(passwd, 0, passwd.Length, encoded, 0); - await WritePassword(encoded, async, cancellationToken); - await Flush(async, cancellationToken); + await WritePassword(encoded, async, cancellationToken).ConfigureAwait(false); + await Flush(async, cancellationToken).ConfigureAwait(false); } async Task AuthenticateSASL(List mechanisms, string username, bool async, CancellationToken cancellationToken) @@ -114,16 +114,16 @@ async Task AuthenticateSASL(List mechanisms, string username, bool async throw new NpgsqlException("Unable to bind to SCRAM-SHA-256-PLUS, check logs for more information"); } - var passwd = await GetPassword(username, async, cancellationToken) ?? + var passwd = await GetPassword(username, async, cancellationToken).ConfigureAwait(false) ?? throw new NpgsqlException($"No password has been provided but the backend requires one (in SASL/{mechanism})"); // Assumption: the write buffer is big enough to contain all our outgoing messages var clientNonce = GetNonce(); - await WriteSASLInitialResponse(mechanism, NpgsqlWriteBuffer.UTF8Encoding.GetBytes($"{cbindFlag},,n=*,r={clientNonce}"), async, cancellationToken); - await Flush(async, cancellationToken); + await WriteSASLInitialResponse(mechanism, NpgsqlWriteBuffer.UTF8Encoding.GetBytes($"{cbindFlag},,n=*,r={clientNonce}"), async, cancellationToken).ConfigureAwait(false); + await Flush(async, cancellationToken).ConfigureAwait(false); - var saslContinueMsg = Expect(await ReadMessage(async), this); + var saslContinueMsg = Expect(await ReadMessage(async).ConfigureAwait(false), this); if (saslContinueMsg.AuthRequestType != AuthenticationRequestType.AuthenticationSASLContinue) throw new NpgsqlException("[SASL] AuthenticationSASLContinue message expected"); var firstServerMsg = AuthenticationSCRAMServerFirstMessage.Load(saslContinueMsg.Payload, ConnectionLogger); @@ -156,10 +156,10 @@ async Task AuthenticateSASL(List mechanisms, string username, bool async var messageStr = $"{clientFinalMessageWithoutProof},p={clientProof}"; - await WriteSASLResponse(Encoding.UTF8.GetBytes(messageStr), async, cancellationToken); - await Flush(async, cancellationToken); + await WriteSASLResponse(Encoding.UTF8.GetBytes(messageStr), async, cancellationToken).ConfigureAwait(false); + await Flush(async, cancellationToken).ConfigureAwait(false); - var saslFinalServerMsg = Expect(await ReadMessage(async), this); + var saslFinalServerMsg = Expect(await ReadMessage(async).ConfigureAwait(false), this); if (saslFinalServerMsg.AuthRequestType != AuthenticationRequestType.AuthenticationSASLFinal) throw new NpgsqlException("[SASL] AuthenticationSASLFinal message expected"); @@ -276,7 +276,7 @@ static byte[] HMAC(byte[] key, string data) async Task AuthenticateMD5(string username, byte[] salt, bool async, CancellationToken cancellationToken = default) { - var passwd = await GetPassword(username, async, cancellationToken); + var passwd = await GetPassword(username, async, cancellationToken).ConfigureAwait(false); if (passwd == null) throw new NpgsqlException("No password has been provided but the backend requires one (in MD5)"); @@ -326,8 +326,8 @@ async Task AuthenticateMD5(string username, byte[] salt, bool async, Cancellatio result[result.Length - 1] = 0; } - await WritePassword(result, async, cancellationToken); - await Flush(async, cancellationToken); + await WritePassword(result, async, cancellationToken).ConfigureAwait(false); + await Flush(async, cancellationToken).ConfigureAwait(false); } #if NET7_0_OR_GREATER @@ -338,11 +338,11 @@ async Task AuthenticateGSS(bool async) using var authContext = new NegotiateAuthentication(new NegotiateAuthenticationClientOptions{ TargetName = targetName}); var data = authContext.GetOutgoingBlob(ReadOnlySpan.Empty, out var statusCode)!; Debug.Assert(statusCode == NegotiateAuthenticationStatusCode.ContinueNeeded); - await WritePassword(data, 0, data.Length, async, UserCancellationToken); - await Flush(async, UserCancellationToken); + await WritePassword(data, 0, data.Length, async, UserCancellationToken).ConfigureAwait(false); + await Flush(async, UserCancellationToken).ConfigureAwait(false); while (true) { - var response = ExpectAny(await ReadMessage(async), this); + var response = ExpectAny(await ReadMessage(async).ConfigureAwait(false), this); if (response.AuthRequestType == AuthenticationRequestType.AuthenticationOk) break; if (response is not AuthenticationGSSContinueMessage gssMsg) @@ -354,15 +354,15 @@ async Task AuthenticateGSS(bool async) // This can happen if it's the first cycle, in which case we have to send that data to complete handshake (#4888) if (data is null) continue; - await WritePassword(data, 0, data.Length, async, UserCancellationToken); - await Flush(async, UserCancellationToken); + await WritePassword(data, 0, data.Length, async, UserCancellationToken).ConfigureAwait(false); + await Flush(async, UserCancellationToken).ConfigureAwait(false); } } #endif async ValueTask GetPassword(string username, bool async, CancellationToken cancellationToken = default) { - var password = await DataSource.GetPassword(async, cancellationToken); + var password = await DataSource.GetPassword(async, cancellationToken).ConfigureAwait(false); if (password is not null) return password; diff --git a/src/Npgsql/Internal/NpgsqlConnector.OldAuth.cs b/src/Npgsql/Internal/NpgsqlConnector.OldAuth.cs index 9cb30d47f7..e007ca4c57 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.OldAuth.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.OldAuth.cs @@ -46,7 +46,7 @@ async Task AuthenticateGSS(bool async) try { if (async) - await negotiateStream.AuthenticateAsClientAsync(CredentialCache.DefaultNetworkCredentials, targetName); + await negotiateStream.AuthenticateAsClientAsync(CredentialCache.DefaultNetworkCredentials, targetName).ConfigureAwait(false); else negotiateStream.AuthenticateAsClient(CredentialCache.DefaultNetworkCredentials, targetName); } @@ -65,7 +65,7 @@ async Task AuthenticateGSS(bool async) throw new NpgsqlException("NegotiateStream.AuthenticateAsClient completed unexpectedly without signaling success"); } - + /// /// This Stream is placed between NegotiateStream and the socket's NetworkStream (or SSLStream). It intercepts /// traffic and performs the following operations: @@ -114,8 +114,8 @@ async Task Write(byte[] buffer, int offset, int count, bool async, CancellationT if (count > _leftToWrite) throw new NpgsqlException($"NegotiateStream trying to write {count} bytes but according to frame header we only have {_leftToWrite} left!"); - await _connector.WritePassword(buffer, offset, count, async, cancellationToken); - await _connector.Flush(async, cancellationToken); + await _connector.WritePassword(buffer, offset, count, async, cancellationToken).ConfigureAwait(false); + await _connector.Flush(async, cancellationToken).ConfigureAwait(false); _leftToWrite -= count; } @@ -129,7 +129,7 @@ async Task Read(byte[] buffer, int offset, int count, bool async, Cancellat { if (_leftToRead == 0) { - var response = ExpectAny(await _connector.ReadMessage(async), _connector); + var response = ExpectAny(await _connector.ReadMessage(async).ConfigureAwait(false), _connector); if (response.AuthRequestType == AuthenticationRequestType.AuthenticationOk) throw new AuthenticationCompleteException(); var gssMsg = response as AuthenticationGSSContinueMessage; diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index 5ab0e8d174..3476ebae64 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -496,9 +496,9 @@ internal async Task Open(NpgsqlTimeout timeout, bool async, CancellationToken ca try { - await OpenCore(this, Settings.SslMode, timeout, async, cancellationToken); + await OpenCore(this, Settings.SslMode, timeout, async, cancellationToken).ConfigureAwait(false); - await DataSource.Bootstrap(this, timeout, forceReload: false, async, cancellationToken); + await DataSource.Bootstrap(this, timeout, forceReload: false, async, cancellationToken).ConfigureAwait(false); Debug.Assert(DataSource.SerializerOptions is not null); Debug.Assert(DataSource.DatabaseInfo is not null); @@ -548,7 +548,7 @@ internal async Task Open(NpgsqlTimeout timeout, bool async, CancellationToken ca try { if (async) - await DataSource.ConnectionInitializerAsync(tempConnection); + await DataSource.ConnectionInitializerAsync(tempConnection).ConfigureAwait(false); else if (!async) DataSource.ConnectionInitializer(tempConnection); } @@ -579,18 +579,18 @@ static async Task OpenCore( CancellationToken cancellationToken, bool isFirstAttempt = true) { - await conn.RawOpen(sslMode, timeout, async, cancellationToken, isFirstAttempt); + await conn.RawOpen(sslMode, timeout, async, cancellationToken, isFirstAttempt).ConfigureAwait(false); - var username = await conn.GetUsernameAsync(async, cancellationToken); + var username = await conn.GetUsernameAsync(async, cancellationToken).ConfigureAwait(false); timeout.CheckAndApply(conn); conn.WriteStartupMessage(username); - await conn.Flush(async, cancellationToken); + await conn.Flush(async, cancellationToken).ConfigureAwait(false); using var cancellationRegistration = conn.StartCancellableOperation(cancellationToken, attemptPgCancellation: false); try { - await conn.Authenticate(username, timeout, async, cancellationToken); + await conn.Authenticate(username, timeout, async, cancellationToken).ConfigureAwait(false); } catch (PostgresException e) when (e.SqlState == PostgresErrorCodes.InvalidAuthorizationSpecification && @@ -609,20 +609,20 @@ await OpenCore( timeout, async, cancellationToken, - isFirstAttempt: false); + isFirstAttempt: false).ConfigureAwait(false); return; } // We treat BackendKeyData as optional because some PostgreSQL-like database // don't send it (CockroachDB, CrateDB) - var msg = await conn.ReadMessage(async); + var msg = await conn.ReadMessage(async).ConfigureAwait(false); if (msg.Code == BackendMessageCode.BackendKeyData) { var keyDataMsg = (BackendKeyDataMessage)msg; conn.BackendProcessId = keyDataMsg.BackendProcessId; conn._backendSecretKey = keyDataMsg.BackendSecretKey; - msg = await conn.ReadMessage(async); + msg = await conn.ReadMessage(async).ConfigureAwait(false); } if (msg.Code != BackendMessageCode.ReadyForQuery) @@ -640,15 +640,15 @@ internal async ValueTask QueryDatabaseState( batch.BatchCommands.Add(new NpgsqlBatchCommand("SHOW default_transaction_read_only")); batch.Timeout = (int)timeout.CheckAndGetTimeLeft().TotalSeconds; - var reader = async ? await batch.ExecuteReaderAsync(cancellationToken) : batch.ExecuteReader(); + var reader = async ? await batch.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false) : batch.ExecuteReader(); try { if (async) { - await reader.ReadAsync(cancellationToken); + await reader.ReadAsync(cancellationToken).ConfigureAwait(false); _isHotStandBy = reader.GetBoolean(0); - await reader.NextResultAsync(cancellationToken); - await reader.ReadAsync(cancellationToken); + await reader.NextResultAsync(cancellationToken).ConfigureAwait(false); + await reader.ReadAsync(cancellationToken).ConfigureAwait(false); } else { @@ -667,7 +667,7 @@ internal async ValueTask QueryDatabaseState( finally { if (async) - await reader.DisposeAsync(); + await reader.DisposeAsync().ConfigureAwait(false); else reader.Dispose(); } @@ -736,7 +736,7 @@ async ValueTask GetUsernameAsyncInternal() if (!RuntimeInformation.IsOSPlatform(OSPlatform.Windows)) { username = await KerberosUsernameProvider.GetUsernameAsync(Settings.IncludeRealm, ConnectionLogger, async, - cancellationToken); + cancellationToken).ConfigureAwait(false); if (username?.Length > 0) { @@ -761,7 +761,7 @@ async Task RawOpen(SslMode sslMode, NpgsqlTimeout timeout, bool async, Cancellat try { if (async) - await ConnectAsync(timeout, cancellationToken); + await ConnectAsync(timeout, cancellationToken).ConfigureAwait(false); else Connect(timeout); @@ -790,9 +790,9 @@ async Task RawOpen(SslMode sslMode, NpgsqlTimeout timeout, bool async, Cancellat sslMode is SslMode.Require or SslMode.VerifyCA or SslMode.VerifyFull) { WriteSslRequest(); - await Flush(async, cancellationToken); + await Flush(async, cancellationToken).ConfigureAwait(false); - await ReadBuffer.Ensure(1, async); + await ReadBuffer.Ensure(1, async).ConfigureAwait(false); var response = (char)ReadBuffer.ReadByte(); timeout.CheckAndApply(this); @@ -805,7 +805,7 @@ async Task RawOpen(SslMode sslMode, NpgsqlTimeout timeout, bool async, Cancellat throw new NpgsqlException("SSL connection requested. No SSL enabled connection from this host is configured."); break; case 'S': - await DataSource.EncryptionHandler.NegotiateEncryption(this, sslMode, timeout, async, isFirstAttempt); + await DataSource.EncryptionHandler.NegotiateEncryption(this, sslMode, timeout, async, isFirstAttempt).ConfigureAwait(false); break; } @@ -927,7 +927,7 @@ internal async Task NegotiateEncryption(SslMode sslMode, NpgsqlTimeout timeout, #endif if (async) - await sslStream.AuthenticateAsClientAsync(Host, clientCertificates, sslProtocols, checkCertificateRevocation); + await sslStream.AuthenticateAsClientAsync(Host, clientCertificates, sslProtocols, checkCertificateRevocation).ConfigureAwait(false); else sslStream.AuthenticateAsClient(Host, clientCertificates, sslProtocols, checkCertificateRevocation); @@ -1034,7 +1034,7 @@ Task GetHostAddressesAsync(CancellationToken ct) => // and raises the exception, while the actual task may be left running. var endpoints = NpgsqlConnectionStringBuilder.IsUnixSocket(Host, Port, out var socketPath) ? new EndPoint[] { new UnixDomainSocketEndPoint(socketPath) } - : IPAddressesToEndpoints(await TaskTimeoutAndCancellation.ExecuteAsync(GetHostAddressesAsync, timeout, cancellationToken), + : IPAddressesToEndpoints(await TaskTimeoutAndCancellation.ExecuteAsync(GetHostAddressesAsync, timeout, cancellationToken).ConfigureAwait(false), Port); // Give each IP an equal share of the remaining time @@ -1058,7 +1058,7 @@ Task GetHostAddressesAsync(CancellationToken ct) => var socket = new Socket(endpoint.AddressFamily, SocketType.Stream, protocolType); try { - await OpenSocketConnectionAsync(socket, endpoint, perIpTimeout, cancellationToken); + await OpenSocketConnectionAsync(socket, endpoint, perIpTimeout, cancellationToken).ConfigureAwait(false); SetSocketOptions(socket); _socket = socket; ConnectedEndPoint = endpoint; @@ -1183,7 +1183,7 @@ async Task MultiplexingReadLoop() try { - while (await CommandsInFlightReader.WaitToReadAsync()) + while (await CommandsInFlightReader.WaitToReadAsync().ConfigureAwait(false)) { commandsRead = 0; Debug.Assert(!InTransaction); @@ -1192,7 +1192,7 @@ async Task MultiplexingReadLoop() { commandsRead++; - await ReadBuffer.Ensure(5, true); + await ReadBuffer.Ensure(5, true).ConfigureAwait(false); // We have a resultset for the command - hand back control to the command (which will // return it to the user) @@ -1204,7 +1204,7 @@ async Task MultiplexingReadLoop() // true, so that the user code calling NpgsqlDataReader.Dispose will not continue executing // synchronously here. The prevents issues if the code after the next command's execution // completion blocks. - await new ValueTask(ReaderCompleted, ReaderCompleted.Version); + await new ValueTask(ReaderCompleted, ReaderCompleted.Version).ConfigureAwait(false); Debug.Assert(!InTransaction); } @@ -1246,7 +1246,7 @@ async Task MultiplexingReadLoop() { while (true) { - var pendingCommand = await CommandsInFlightReader.ReadAsync(); + var pendingCommand = await CommandsInFlightReader.ReadAsync().ConfigureAwait(false); // TODO: the exception we have here is sometimes just the result of the write loop breaking // the connector, so it doesn't represent the actual root cause. @@ -1336,7 +1336,7 @@ internal ValueTask ReadMessage( // TODO: There could be room for optimization here, rather than the async call(s) ReadBuffer.Timeout = TimeSpan.FromMilliseconds(InternalCommandTimeout); for (; PendingPrependedResponses > 0; PendingPrependedResponses--) - await ReadMessageLong(async, DataRowLoadingMode.Skip, readingNotifications: false, isReadingPrependedMessage: true); + await ReadMessageLong(async, DataRowLoadingMode.Skip, readingNotifications: false, isReadingPrependedMessage: true).ConfigureAwait(false); // We've read all the prepended response. // Allow cancellation to proceed. ReadingPrependedMessagesMRE.Set(); @@ -1358,7 +1358,7 @@ internal ValueTask ReadMessage( while (true) { - await ReadBuffer.Ensure(5, async, readingNotifications); + await ReadBuffer.Ensure(5, async, readingNotifications).ConfigureAwait(false); var messageCode = (BackendMessageCode)ReadBuffer.ReadByte(); ValidateBackendMessageCode(messageCode); var len = ReadBuffer.ReadInt32() - 4; // Transmitted length includes itself @@ -1369,7 +1369,7 @@ internal ValueTask ReadMessage( { if (dataRowLoadingMode == DataRowLoadingMode.Skip) { - await ReadBuffer.Skip(len, async); + await ReadBuffer.Skip(len, async).ConfigureAwait(false); continue; } } @@ -1387,7 +1387,7 @@ internal ValueTask ReadMessage( ReadBuffer = oversizeBuffer; } - await ReadBuffer.Ensure(len, async); + await ReadBuffer.Ensure(len, async).ConfigureAwait(false); } var msg = ParseServerMessage(ReadBuffer, messageCode, len, isReadingPrependedMessage); @@ -1960,7 +1960,7 @@ internal async Task CloseOngoingOperations(bool async) var copyOperation = CurrentCopyOperation; if (reader != null) - await reader.Close(connectionClosing: true, async, isDisposing: false); + await reader.Close(async, connectionClosing: true, isDisposing: false).ConfigureAwait(false); else if (copyOperation != null) { // TODO: There's probably a race condition as the COPY operation may finish on its own during the next few lines @@ -1975,7 +1975,7 @@ copyOperation is NpgsqlCopyTextWriter || try { if (async) - await copyOperation.CancelAsync(); + await copyOperation.CancelAsync().ConfigureAwait(false); else copyOperation.Cancel(); } @@ -1988,7 +1988,7 @@ copyOperation is NpgsqlCopyTextWriter || try { if (async) - await copyOperation.DisposeAsync(); + await copyOperation.DisposeAsync().ConfigureAwait(false); else copyOperation.Dispose(); } @@ -2319,7 +2319,7 @@ internal async Task Reset(bool async) break; case TransactionStatus.InTransactionBlock: case TransactionStatus.InFailedTransactionBlock: - await Rollback(async); + await Rollback(async).ConfigureAwait(false); ClearTransaction(); endBindingScope = true; break; @@ -2614,7 +2614,7 @@ internal async Task Wait(bool async, int timeout, CancellationToken cancel using var _ = StartUserAction(ConnectorState.Waiting, cancellationToken: cancellationToken, attemptPgCancellation: false); // We may have prepended messages in the connection's write buffer - these need to be flushed now. - await Flush(async, cancellationToken); + await Flush(async, cancellationToken).ConfigureAwait(false); var keepaliveMs = Settings.KeepAlive * 1000; while (true) @@ -2625,7 +2625,7 @@ internal async Task Wait(bool async, int timeout, CancellationToken cancel UserTimeout = timeoutForKeepalive ? keepaliveMs : timeout; try { - var msg = await ReadMessageWithNotifications(async); + var msg = await ReadMessageWithNotifications(async).ConfigureAwait(false); if (msg != null) { throw Break( @@ -2642,8 +2642,8 @@ internal async Task Wait(bool async, int timeout, CancellationToken cancel LogMessages.SendingKeepalive(ConnectionLogger, Id); var keepaliveTime = Stopwatch.StartNew(); - await WriteSync(async, cancellationToken); - await Flush(async, cancellationToken); + await WriteSync(async, cancellationToken).ConfigureAwait(false); + await Flush(async, cancellationToken).ConfigureAwait(false); var receivedNotification = false; var expectedMessageCode = BackendMessageCode.RowDescription; @@ -2654,7 +2654,7 @@ internal async Task Wait(bool async, int timeout, CancellationToken cancel try { - msg = await ReadMessageWithNotifications(async); + msg = await ReadMessageWithNotifications(async).ConfigureAwait(false); } catch (Exception e) when (e is OperationCanceledException || e is NpgsqlException npgEx && npgEx.InnerException is TimeoutException) { @@ -2712,20 +2712,20 @@ internal async Task ExecuteInternalCommand(string query, bool async, Cancellatio { LogMessages.ExecutingInternalCommand(CommandLogger, query, Id); - await WriteQuery(query, async, cancellationToken); - await Flush(async, cancellationToken); - Expect(await ReadMessage(async), this); - Expect(await ReadMessage(async), this); + await WriteQuery(query, async, cancellationToken).ConfigureAwait(false); + await Flush(async, cancellationToken).ConfigureAwait(false); + Expect(await ReadMessage(async).ConfigureAwait(false), this); + Expect(await ReadMessage(async).ConfigureAwait(false), this); } internal async Task ExecuteInternalCommand(byte[] data, bool async, CancellationToken cancellationToken = default) { Debug.Assert(State != ConnectorState.Ready, "Forgot to start a user action..."); - await WritePregenerated(data, async, cancellationToken); - await Flush(async, cancellationToken); - Expect(await ReadMessage(async), this); - Expect(await ReadMessage(async), this); + await WritePregenerated(data, async, cancellationToken).ConfigureAwait(false); + await Flush(async, cancellationToken).ConfigureAwait(false); + Expect(await ReadMessage(async).ConfigureAwait(false), this); + Expect(await ReadMessage(async).ConfigureAwait(false), this); } #endregion diff --git a/src/Npgsql/Internal/NpgsqlDatabaseInfo.cs b/src/Npgsql/Internal/NpgsqlDatabaseInfo.cs index 8120b4e9c8..a05547ea1b 100644 --- a/src/Npgsql/Internal/NpgsqlDatabaseInfo.cs +++ b/src/Npgsql/Internal/NpgsqlDatabaseInfo.cs @@ -319,7 +319,7 @@ internal static async Task Load(NpgsqlConnector conn, Npgsql { foreach (var factory in Factories) { - var dbInfo = await factory.Load(conn, timeout, async); + var dbInfo = await factory.Load(conn, timeout, async).ConfigureAwait(false); if (dbInfo != null) { dbInfo.ProcessTypes(); diff --git a/src/Npgsql/KerberosUsernameProvider.cs b/src/Npgsql/KerberosUsernameProvider.cs index 3df990387b..ba162ff21a 100644 --- a/src/Npgsql/KerberosUsernameProvider.cs +++ b/src/Npgsql/KerberosUsernameProvider.cs @@ -50,7 +50,7 @@ sealed class KerberosUsernameProvider { #if NET5_0_OR_GREATER if (async) - await process.WaitForExitAsync(cancellationToken); + await process.WaitForExitAsync(cancellationToken).ConfigureAwait(false); else // ReSharper disable once MethodHasAsyncOverloadWithCancellation process.WaitForExit(); @@ -69,9 +69,9 @@ sealed class KerberosUsernameProvider for (var i = 0; i < 2; i++) // ReSharper disable once MethodHasAsyncOverload #if NET7_0_OR_GREATER - if ((line = async ? await process.StandardOutput.ReadLineAsync(cancellationToken) : process.StandardOutput.ReadLine()) == null) + if ((line = async ? await process.StandardOutput.ReadLineAsync(cancellationToken).ConfigureAwait(false) : process.StandardOutput.ReadLine()) == null) #elif NET5_0_OR_GREATER - if ((line = async ? await process.StandardOutput.ReadLineAsync() : process.StandardOutput.ReadLine()) == null) + if ((line = async ? await process.StandardOutput.ReadLineAsync().ConfigureAwait(false) : process.StandardOutput.ReadLine()) == null) #else if ((line = process.StandardOutput.ReadLine()) == null) #endif diff --git a/src/Npgsql/MultiplexingDataSource.cs b/src/Npgsql/MultiplexingDataSource.cs index e9e7fa3069..d1245cfee4 100644 --- a/src/Npgsql/MultiplexingDataSource.cs +++ b/src/Npgsql/MultiplexingDataSource.cs @@ -88,7 +88,7 @@ async Task MultiplexingWriteLoop() { // Get a first command out. if (!_multiplexCommandReader.TryRead(out command)) - command = await _multiplexCommandReader.ReadAsync(); + command = await _multiplexCommandReader.ReadAsync().ConfigureAwait(false); } catch (ChannelClosedException) { @@ -112,7 +112,7 @@ async Task MultiplexingWriteLoop() command.InternalConnection!, new NpgsqlTimeout(TimeSpan.FromSeconds(Settings.Timeout)), async: true, - CancellationToken.None); + CancellationToken.None).ConfigureAwait(false); if (connector != null) { @@ -370,8 +370,8 @@ protected override void DisposeBase() protected override async ValueTask DisposeAsyncBase() { MultiplexCommandWriter.Complete(new ObjectDisposedException(nameof(MultiplexingDataSource))); - await _multiplexWriteLoop; - await base.DisposeAsyncBase(); + await _multiplexWriteLoop.ConfigureAwait(false); + await base.DisposeAsyncBase().ConfigureAwait(false); } struct MultiplexingStats diff --git a/src/Npgsql/NoSynchronizationContextScope.cs b/src/Npgsql/NoSynchronizationContextScope.cs deleted file mode 100644 index d34d884856..0000000000 --- a/src/Npgsql/NoSynchronizationContextScope.cs +++ /dev/null @@ -1,37 +0,0 @@ -using System; -using System.Threading; - -namespace Npgsql; - -/// -/// This mechanism is used to temporarily set the current synchronization context to null while -/// executing Npgsql code, making all await continuations execute on the thread pool. This replaces -/// the need to place ConfigureAwait(false) everywhere, and should be used in all surface async methods, -/// without exception. -/// -/// Warning: do not use this directly in async methods, use it in sync wrappers of async methods -/// (see https://github.com/npgsql/npgsql/issues/1593) -/// -/// -/// https://stackoverflow.com/a/28307965/640325 -/// -static class NoSynchronizationContextScope -{ - internal static Disposable Enter() => new(SynchronizationContext.Current); - - internal struct Disposable : IDisposable - { - readonly SynchronizationContext? _synchronizationContext; - - internal Disposable(SynchronizationContext? synchronizationContext) - { - if (synchronizationContext != null) - SynchronizationContext.SetSynchronizationContext(null); - - _synchronizationContext = synchronizationContext; - } - - public void Dispose() - => SynchronizationContext.SetSynchronizationContext(_synchronizationContext); - } -} \ No newline at end of file diff --git a/src/Npgsql/NpgsqlBatch.cs b/src/Npgsql/NpgsqlBatch.cs index 2c011f1d26..446cb4746f 100644 --- a/src/Npgsql/NpgsqlBatch.cs +++ b/src/Npgsql/NpgsqlBatch.cs @@ -140,7 +140,7 @@ protected override DbDataReader ExecuteDbDataReader(CommandBehavior behavior) protected override async Task ExecuteDbDataReaderAsync( CommandBehavior behavior, CancellationToken cancellationToken) - => await ExecuteReaderAsync(behavior, cancellationToken); + => await ExecuteReaderAsync(behavior, cancellationToken).ConfigureAwait(false); /// public new Task ExecuteReaderAsync(CancellationToken cancellationToken = default) diff --git a/src/Npgsql/NpgsqlBinaryExporter.cs b/src/Npgsql/NpgsqlBinaryExporter.cs index e62bc06c79..2b7d19919b 100644 --- a/src/Npgsql/NpgsqlBinaryExporter.cs +++ b/src/Npgsql/NpgsqlBinaryExporter.cs @@ -69,13 +69,13 @@ internal NpgsqlBinaryExporter(NpgsqlConnector connector) internal async Task Init(string copyToCommand, bool async, CancellationToken cancellationToken = default) { - await _connector.WriteQuery(copyToCommand, async, cancellationToken); - await _connector.Flush(async, cancellationToken); + await _connector.WriteQuery(copyToCommand, async, cancellationToken).ConfigureAwait(false); + await _connector.Flush(async, cancellationToken).ConfigureAwait(false); using var registration = _connector.StartNestedCancellableOperation(cancellationToken, attemptPgCancellation: false); CopyOutResponseMessage copyOutResponse; - var msg = await _connector.ReadMessage(async); + var msg = await _connector.ReadMessage(async).ConfigureAwait(false); switch (msg.Code) { case BackendMessageCode.CopyOutResponse: @@ -100,15 +100,15 @@ internal async Task Init(string copyToCommand, bool async, CancellationToken can _columnInfoCache = new PgConverterInfo[NumColumns]; _rowsExported = 0; _endOfMessagePos = _buf.CumulativeReadPosition; - await ReadHeader(async); + await ReadHeader(async).ConfigureAwait(false); } async Task ReadHeader(bool async) { - var msg = await _connector.ReadMessage(async); + var msg = await _connector.ReadMessage(async).ConfigureAwait(false); _endOfMessagePos = _buf.CumulativeReadPosition + Expect(msg, _connector).Length; var headerLen = NpgsqlRawCopyStream.BinarySignature.Length + 4 + 4; - await _buf.Ensure(headerLen, async); + await _buf.Ensure(headerLen, async).ConfigureAwait(false); foreach (var t in NpgsqlRawCopyStream.BinarySignature) if (_buf.ReadByte() != t) @@ -141,11 +141,7 @@ async Task ReadHeader(bool async) /// The number of columns in the row. -1 if there are no further rows. /// Note: This will currently be the same value for all rows, but this may change in the future. /// - public ValueTask StartRowAsync(CancellationToken cancellationToken = default) - { - using (NoSynchronizationContextScope.Enter()) - return StartRow(true, cancellationToken); - } + public ValueTask StartRowAsync(CancellationToken cancellationToken = default) => StartRow(true, cancellationToken); async ValueTask StartRow(bool async, CancellationToken cancellationToken = default) { @@ -158,27 +154,27 @@ async ValueTask StartRow(bool async, CancellationToken cancellationToken = // Consume and advance any active column. if (_column >= 0) - await Commit(async, resumableOp: false); + await Commit(async, resumableOp: false).ConfigureAwait(false); // The very first row (i.e. _column == -1) is included in the header's CopyData message. // Otherwise we need to read in a new CopyData row (the docs specify that there's a CopyData // message per row). if (_column == NumColumns) { - var msg = Expect(await _connector.ReadMessage(async), _connector); + var msg = Expect(await _connector.ReadMessage(async).ConfigureAwait(false), _connector); _endOfMessagePos = _buf.CumulativeReadPosition + msg.Length; } else if (_column != BeforeRow) ThrowHelper.ThrowInvalidOperationException("Already in the middle of a row"); - await _buf.Ensure(2, async); + await _buf.Ensure(2, async).ConfigureAwait(false); var numColumns = _buf.ReadInt16(); if (numColumns == -1) { - Expect(await _connector.ReadMessage(async), _connector); - Expect(await _connector.ReadMessage(async), _connector); - Expect(await _connector.ReadMessage(async), _connector); + Expect(await _connector.ReadMessage(async).ConfigureAwait(false), _connector); + Expect(await _connector.ReadMessage(async).ConfigureAwait(false), _connector); + Expect(await _connector.ReadMessage(async).ConfigureAwait(false), _connector); _column = BeforeRow; _isConsumed = true; return -1; @@ -214,10 +210,7 @@ async ValueTask StartRow(bool async, CancellationToken cancellationToken = /// /// The value of the column public ValueTask ReadAsync(CancellationToken cancellationToken = default) - { - using (NoSynchronizationContextScope.Enter()) - return Read(async: true, cancellationToken); - } + => Read(async: true, cancellationToken); ValueTask Read(bool async, CancellationToken cancellationToken = default) => Read(async, null, cancellationToken); @@ -277,10 +270,7 @@ PgTypeId GetRepresentationalOrDefault(string dataTypeName) /// The .NET type of the column to be read. /// The value of the column public ValueTask ReadAsync(NpgsqlDbType type, CancellationToken cancellationToken = default) - { - using (NoSynchronizationContextScope.Enter()) - return Read(async: true, type, cancellationToken); - } + => Read(async: true, type, cancellationToken); async ValueTask Read(bool async, NpgsqlDbType? type, CancellationToken cancellationToken) { @@ -294,7 +284,7 @@ async ValueTask Read(bool async, NpgsqlDbType? type, CancellationToken can // We cannot allow endless rereads otherwise it becomes quite unclear when a column advance happens. if (PgReader is { Resumable: true, FieldSize: -1 }) { - await Commit(async, resumableOp: false); + await Commit(async, resumableOp: false).ConfigureAwait(false); return DbNullOrThrow(); } @@ -302,11 +292,11 @@ async ValueTask Read(bool async, NpgsqlDbType? type, CancellationToken can PgConverterInfo info; if (!PgReader.Resumable || PgReader.CurrentRemaining != PgReader.FieldSize) { - await Commit(async, resumableOp: false); + await Commit(async, resumableOp: false).ConfigureAwait(false); info = GetInfo(); // We need to get info after potential I/O as we don't know beforehand at what column we're at. - var columnLen = await ReadColumnLenIfNeeded(async, resumableOp: false); + var columnLen = await ReadColumnLenIfNeeded(async, resumableOp: false).ConfigureAwait(false); if (_column == NumColumns) ThrowHelper.ThrowInvalidOperationException("No more columns left in the current row"); @@ -320,11 +310,11 @@ async ValueTask Read(bool async, NpgsqlDbType? type, CancellationToken can T result; if (async) { - await PgReader.StartReadAsync(info.BufferRequirement, cancellationToken); + await PgReader.StartReadAsync(info.BufferRequirement, cancellationToken).ConfigureAwait(false); result = info.AsObject - ? (T)await info.Converter.ReadAsObjectAsync(PgReader, cancellationToken) - : await info.GetConverter().ReadAsync(PgReader, cancellationToken); - await PgReader.EndReadAsync(); + ? (T)await info.Converter.ReadAsObjectAsync(PgReader, cancellationToken).ConfigureAwait(false) + : await info.GetConverter().ReadAsync(PgReader, cancellationToken).ConfigureAwait(false); + await PgReader.EndReadAsync().ConfigureAwait(false); } else { @@ -367,16 +357,13 @@ public bool IsNull /// /// Skips the current column without interpreting its value. /// - public void Skip() => Skip(false).GetAwaiter().GetResult(); + public void Skip() => Skip(async: false).GetAwaiter().GetResult(); /// /// Skips the current column without interpreting its value. /// public Task SkipAsync(CancellationToken cancellationToken = default) - { - using (NoSynchronizationContextScope.Enter()) - return Skip(true, cancellationToken); - } + => Skip(true, cancellationToken); async Task Skip(bool async, CancellationToken cancellationToken = default) { @@ -386,9 +373,9 @@ async Task Skip(bool async, CancellationToken cancellationToken = default) // We allow IsNull to have been called before skip. if (PgReader.Initialized && PgReader is not { Resumable: true, FieldSize: -1 }) - await Commit(async, resumableOp: false); - await ReadColumnLenIfNeeded(async, resumableOp: false); - await PgReader.Consume(async, cancellationToken: cancellationToken); + await Commit(async, resumableOp: false).ConfigureAwait(false); + await ReadColumnLenIfNeeded(async, resumableOp: false).ConfigureAwait(false); + await PgReader.Consume(async, cancellationToken: cancellationToken).ConfigureAwait(false); } #endregion @@ -408,7 +395,7 @@ async ValueTask ReadColumnLenIfNeeded(bool async, bool resumableOp) if (PgReader is { Resumable: true, FieldSize: -1 }) return -1; - await _buf.Ensure(4, async); + await _buf.Ensure(4, async).ConfigureAwait(false); var columnLen = _buf.ReadInt32(); PgReader.Init(columnLen, DataFormat.Binary, resumableOp); return PgReader.FieldSize; @@ -441,17 +428,13 @@ public Task CancelAsync() /// /// Completes that binary export and sets the connection back to idle state /// - public void Dispose() => DisposeAsync(false).GetAwaiter().GetResult(); + public void Dispose() => DisposeAsync(async: false).GetAwaiter().GetResult(); /// /// Async completes that binary export and sets the connection back to idle state /// /// - public ValueTask DisposeAsync() - { - using (NoSynchronizationContextScope.Enter()) - return DisposeAsync(true); - } + public ValueTask DisposeAsync() => DisposeAsync(async: true); async ValueTask DisposeAsync(bool async) { @@ -468,14 +451,14 @@ async ValueTask DisposeAsync(bool async) { using var registration = _connector.StartNestedCancellableOperation(attemptPgCancellation: false); // Be sure to commit the reader. - await PgReader.Commit(async, resuming: false); + await PgReader.Commit(async, resuming: false).ConfigureAwait(false); // Finish the current CopyData message - await _buf.Skip(checked((int)(_endOfMessagePos - _buf.CumulativeReadPosition)), async); + await _buf.Skip(checked((int)(_endOfMessagePos - _buf.CumulativeReadPosition)), async).ConfigureAwait(false); // Read to the end _connector.SkipUntil(BackendMessageCode.CopyDone); // We intentionally do not pass a CancellationToken since we don't want to cancel cleanup - Expect(await _connector.ReadMessage(async), _connector); - Expect(await _connector.ReadMessage(async), _connector); + Expect(await _connector.ReadMessage(async).ConfigureAwait(false), _connector); + Expect(await _connector.ReadMessage(async).ConfigureAwait(false), _connector); } catch (OperationCanceledException e) when (e.InnerException is PostgresException pg && pg.SqlState == PostgresErrorCodes.QueryCanceled) { diff --git a/src/Npgsql/NpgsqlBinaryImporter.cs b/src/Npgsql/NpgsqlBinaryImporter.cs index a57c071448..ef0c4a051d 100644 --- a/src/Npgsql/NpgsqlBinaryImporter.cs +++ b/src/Npgsql/NpgsqlBinaryImporter.cs @@ -71,13 +71,13 @@ internal NpgsqlBinaryImporter(NpgsqlConnector connector) internal async Task Init(string copyFromCommand, bool async, CancellationToken cancellationToken = default) { - await _connector.WriteQuery(copyFromCommand, async, cancellationToken); - await _connector.Flush(async, cancellationToken); + await _connector.WriteQuery(copyFromCommand, async, cancellationToken).ConfigureAwait(false); + await _connector.Flush(async, cancellationToken).ConfigureAwait(false); using var registration = _connector.StartNestedCancellableOperation(cancellationToken, attemptPgCancellation: false); CopyInResponseMessage copyInResponse; - var msg = await _connector.ReadMessage(async); + var msg = await _connector.ReadMessage(async).ConfigureAwait(false); switch (msg.Code) { case BackendMessageCode.CopyInResponse: @@ -126,23 +126,18 @@ void WriteHeader() /// /// Starts writing a single row, must be invoked before writing any columns. /// - public Task StartRowAsync(CancellationToken cancellationToken = default) - { - if (cancellationToken.IsCancellationRequested) - return Task.FromCanceled(cancellationToken); - using (NoSynchronizationContextScope.Enter()) - return StartRow(true, cancellationToken); - } + public Task StartRowAsync(CancellationToken cancellationToken = default) => StartRow(async: true, cancellationToken); async Task StartRow(bool async, CancellationToken cancellationToken = default) { CheckReady(); + cancellationToken.ThrowIfCancellationRequested(); if (_column != -1 && _column != NumColumns) ThrowHelper.ThrowInvalidOperationException_BinaryImportParametersMismatch(NumColumns, _column); if (_buf.WriteSpaceLeft < 2) - await _buf.Flush(async, cancellationToken); + await _buf.Flush(async, cancellationToken).ConfigureAwait(false); _buf.WriteInt16(NumColumns); _pgWriter.Refresh(); @@ -159,7 +154,7 @@ async Task StartRow(bool async, CancellationToken cancellationToken = default) /// corruption will occur. If in doubt, use to manually /// specify the type. /// - public void Write(T value) => Write(value, false).GetAwaiter().GetResult(); + public void Write(T value) => Write(async: false, value).GetAwaiter().GetResult(); /// /// Writes a single column in the current row. @@ -173,17 +168,13 @@ async Task StartRow(bool async, CancellationToken cancellationToken = default) /// corruption will occur. If in doubt, use to manually /// specify the type. /// - public Task WriteAsync(T value, CancellationToken cancellationToken = default) - { - if (cancellationToken.IsCancellationRequested) - return Task.FromCanceled(cancellationToken); - using (NoSynchronizationContextScope.Enter()) - return Write(value, true, cancellationToken); - } + public Task WriteAsync(T value, CancellationToken cancellationToken = default) => Write(async: true, value, cancellationToken); - Task Write(T value, bool async, CancellationToken cancellationToken = default) + Task Write(bool async, T value, CancellationToken cancellationToken = default) { CheckColumnIndex(); + if (cancellationToken.IsCancellationRequested) + return Task.FromCanceled(cancellationToken); var p = _params[_column]; if (p == null) @@ -209,7 +200,7 @@ Task Write(T value, bool async, CancellationToken cancellationToken = default /// /// The .NET type of the column to be written. public void Write(T value, NpgsqlDbType npgsqlDbType) => - Write(value, npgsqlDbType, false).GetAwaiter().GetResult(); + Write(async: false, value, npgsqlDbType).GetAwaiter().GetResult(); /// /// Writes a single column in the current row as type . @@ -226,16 +217,12 @@ public void Write(T value, NpgsqlDbType npgsqlDbType) => /// /// The .NET type of the column to be written. public Task WriteAsync(T value, NpgsqlDbType npgsqlDbType, CancellationToken cancellationToken = default) - { - if (cancellationToken.IsCancellationRequested) - return Task.FromCanceled(cancellationToken); - using (NoSynchronizationContextScope.Enter()) - return Write(value, npgsqlDbType, true, cancellationToken); - } + => Write(async: true, value, npgsqlDbType, cancellationToken); - Task Write(T value, NpgsqlDbType npgsqlDbType, bool async, CancellationToken cancellationToken = default) + Task Write(bool async, T value, NpgsqlDbType npgsqlDbType, CancellationToken cancellationToken = default) { CheckColumnIndex(); + cancellationToken.ThrowIfCancellationRequested(); var p = _params[_column]; if (p == null) @@ -263,7 +250,7 @@ Task Write(T value, NpgsqlDbType npgsqlDbType, bool async, CancellationToken /// /// The .NET type of the column to be written. public void Write(T value, string dataTypeName) => - Write(value, dataTypeName, false).GetAwaiter().GetResult(); + Write(async: false, value, dataTypeName).GetAwaiter().GetResult(); /// /// Writes a single column in the current row as type . @@ -278,16 +265,12 @@ public void Write(T value, string dataTypeName) => /// /// The .NET type of the column to be written. public Task WriteAsync(T value, string dataTypeName, CancellationToken cancellationToken = default) - { - if (cancellationToken.IsCancellationRequested) - return Task.FromCanceled(cancellationToken); - using (NoSynchronizationContextScope.Enter()) - return Write(value, dataTypeName, true, cancellationToken); - } + => Write(async: true, value, dataTypeName, cancellationToken); - Task Write(T value, string dataTypeName, bool async, CancellationToken cancellationToken = default) + Task Write(bool async, T value, string dataTypeName, CancellationToken cancellationToken = default) { CheckColumnIndex(); + cancellationToken.ThrowIfCancellationRequested(); var p = _params[_column]; if (p == null) @@ -335,7 +318,7 @@ async Task Write(T value, NpgsqlParameter param, bool async, CancellationToke param.Bind(out _, out _); try { - await param.Write(async, _pgWriter.WithFlushMode(async ? FlushMode.NonBlocking : FlushMode.Blocking), cancellationToken); + await param.Write(async, _pgWriter.WithFlushMode(async ? FlushMode.NonBlocking : FlushMode.Blocking), cancellationToken).ConfigureAwait(false); } catch (Exception ex) { @@ -353,22 +336,17 @@ async Task Write(T value, NpgsqlParameter param, bool async, CancellationToke /// /// Writes a single null column value. /// - public Task WriteNullAsync(CancellationToken cancellationToken = default) - { - if (cancellationToken.IsCancellationRequested) - return Task.FromCanceled(cancellationToken); - using (NoSynchronizationContextScope.Enter()) - return WriteNull(true, cancellationToken); - } + public Task WriteNullAsync(CancellationToken cancellationToken = default) => WriteNull(async: true, cancellationToken); async Task WriteNull(bool async, CancellationToken cancellationToken = default) { CheckReady(); + cancellationToken.ThrowIfCancellationRequested(); if (_column == -1) throw new InvalidOperationException("A row hasn't been started"); if (_buf.WriteSpaceLeft < 4) - await _buf.Flush(async, cancellationToken); + await _buf.Flush(async, cancellationToken).ConfigureAwait(false); _buf.WriteInt32(-1); _pgWriter.Refresh(); @@ -393,18 +371,13 @@ async Task WriteNull(bool async, CancellationToken cancellationToken = default) /// /// An array of column values to be written as a single row public Task WriteRowAsync(CancellationToken cancellationToken = default, params object?[] values) - { - if (cancellationToken.IsCancellationRequested) - return Task.FromCanceled(cancellationToken); - using (NoSynchronizationContextScope.Enter()) - return WriteRow(true, cancellationToken, values); - } + => WriteRow(async: true, cancellationToken, values); async Task WriteRow(bool async, CancellationToken cancellationToken = default, params object?[] values) { - await StartRow(async, cancellationToken); + await StartRow(async, cancellationToken).ConfigureAwait(false); foreach (var value in values) - await Write(value, async, cancellationToken); + await Write(async, value, cancellationToken).ConfigureAwait(false); } void CheckColumnIndex() @@ -425,13 +398,7 @@ void CheckColumnIndex() /// /// Completes the import operation. The writer is unusable after this operation. /// - public ValueTask CompleteAsync(CancellationToken cancellationToken = default) - { - if (cancellationToken.IsCancellationRequested) - return new ValueTask(Task.FromCanceled(cancellationToken)); - using (NoSynchronizationContextScope.Enter()) - return Complete(true, cancellationToken); - } + public ValueTask CompleteAsync(CancellationToken cancellationToken = default) => Complete(async: true, cancellationToken); async ValueTask Complete(bool async, CancellationToken cancellationToken = default) { @@ -441,19 +408,19 @@ async ValueTask Complete(bool async, CancellationToken cancellationToken if (InMiddleOfRow) { - await Cancel(async, cancellationToken); + await Cancel(async, cancellationToken).ConfigureAwait(false); throw new InvalidOperationException("Binary importer closed in the middle of a row, cancelling import."); } try { - await WriteTrailer(async, cancellationToken); - await _buf.Flush(async, cancellationToken); + await WriteTrailer(async, cancellationToken).ConfigureAwait(false); + await _buf.Flush(async, cancellationToken).ConfigureAwait(false); _buf.EndCopyMode(); - await _connector.WriteCopyDone(async, cancellationToken); - await _connector.Flush(async, cancellationToken); - var cmdComplete = Expect(await _connector.ReadMessage(async), _connector); - Expect(await _connector.ReadMessage(async), _connector); + await _connector.WriteCopyDone(async, cancellationToken).ConfigureAwait(false); + await _connector.Flush(async, cancellationToken).ConfigureAwait(false); + var cmdComplete = Expect(await _connector.ReadMessage(async).ConfigureAwait(false), _connector); + Expect(await _connector.ReadMessage(async).ConfigureAwait(false), _connector); _state = ImporterState.Committed; return cmdComplete.Rows; } @@ -466,7 +433,7 @@ async ValueTask Complete(bool async, CancellationToken cancellationToken void ICancelable.Cancel() => Close(); - async Task ICancelable.CancelAsync() => await CloseAsync(); + async Task ICancelable.CancelAsync() => await CloseAsync().ConfigureAwait(false); /// /// @@ -488,23 +455,19 @@ async ValueTask Complete(bool async, CancellationToken cancellationToken /// be reverted. /// /// - public ValueTask DisposeAsync() - { - using (NoSynchronizationContextScope.Enter()) - return CloseAsync(true); - } + public ValueTask DisposeAsync() => CloseAsync(true); async Task Cancel(bool async, CancellationToken cancellationToken = default) { _state = ImporterState.Cancelled; _buf.Clear(); _buf.EndCopyMode(); - await _connector.WriteCopyFail(async, cancellationToken); - await _connector.Flush(async, cancellationToken); + await _connector.WriteCopyFail(async, cancellationToken).ConfigureAwait(false); + await _connector.Flush(async, cancellationToken).ConfigureAwait(false); try { using var registration = _connector.StartNestedCancellableOperation(cancellationToken, attemptPgCancellation: false); - var msg = await _connector.ReadMessage(async); + var msg = await _connector.ReadMessage(async).ConfigureAwait(false); // The CopyFail should immediately trigger an exception from the read above. throw _connector.Break( new NpgsqlException("Expected ErrorResponse when cancelling COPY but got: " + msg.Code)); @@ -525,7 +488,7 @@ async Task Cancel(bool async, CancellationToken cancellationToken = default) /// be reverted. /// /// - public void Close() => CloseAsync(false).GetAwaiter().GetResult(); + public void Close() => CloseAsync(async: false).GetAwaiter().GetResult(); /// /// @@ -536,22 +499,17 @@ async Task Cancel(bool async, CancellationToken cancellationToken = default) /// be reverted. /// /// - public ValueTask CloseAsync(CancellationToken cancellationToken = default) - { - if (cancellationToken.IsCancellationRequested) - return new ValueTask(Task.FromCanceled(cancellationToken)); - using (NoSynchronizationContextScope.Enter()) - return CloseAsync(true, cancellationToken); - } + public ValueTask CloseAsync(CancellationToken cancellationToken = default) => CloseAsync(async: true, cancellationToken); async ValueTask CloseAsync(bool async, CancellationToken cancellationToken = default) { + cancellationToken.ThrowIfCancellationRequested(); switch (_state) { case ImporterState.Disposed: return; case ImporterState.Ready: - await Cancel(async, cancellationToken); + await Cancel(async, cancellationToken).ConfigureAwait(false); break; case ImporterState.Cancelled: case ImporterState.Committed: @@ -588,7 +546,7 @@ void Cleanup() async Task WriteTrailer(bool async, CancellationToken cancellationToken = default) { if (_buf.WriteSpaceLeft < 2) - await _buf.Flush(async, cancellationToken); + await _buf.Flush(async, cancellationToken).ConfigureAwait(false); _buf.WriteInt16(-1); } diff --git a/src/Npgsql/NpgsqlCommand.cs b/src/Npgsql/NpgsqlCommand.cs index 3f513f2e05..4c8cbadcf9 100644 --- a/src/Npgsql/NpgsqlCommand.cs +++ b/src/Npgsql/NpgsqlCommand.cs @@ -646,10 +646,7 @@ public virtual Task PrepareAsync(CancellationToken cancellationToken = default) #else public override Task PrepareAsync(CancellationToken cancellationToken = default) #endif - { - using (NoSynchronizationContextScope.Enter()) - return Prepare(true, cancellationToken); - } + => Prepare(async: true, cancellationToken); Task Prepare(bool async, CancellationToken cancellationToken = default) { @@ -809,10 +806,7 @@ public void Unprepare() /// An optional token to cancel the asynchronous operation. The default value is . /// public Task UnprepareAsync(CancellationToken cancellationToken = default) - { - using (NoSynchronizationContextScope.Enter()) - return Unprepare(true, cancellationToken); - } + => Unprepare(async: true, cancellationToken); async Task Unprepare(bool async, CancellationToken cancellationToken = default) { @@ -1221,15 +1215,12 @@ async Task SendClose(NpgsqlConnector connector, bool async, CancellationToken ca /// /// A task representing the asynchronous operation, with the number of rows affected if known; -1 otherwise. public override Task ExecuteNonQueryAsync(CancellationToken cancellationToken) - { - using (NoSynchronizationContextScope.Enter()) - return ExecuteNonQuery(true, cancellationToken); - } + => ExecuteNonQuery(async: true, cancellationToken); [MethodImpl(MethodImplOptions.AggressiveInlining)] async Task ExecuteNonQuery(bool async, CancellationToken cancellationToken) { - var reader = await ExecuteReader(CommandBehavior.Default, async, cancellationToken).ConfigureAwait(false); + var reader = await ExecuteReader(async, CommandBehavior.Default, cancellationToken).ConfigureAwait(false); try { while (async ? await reader.NextResultAsync(cancellationToken).ConfigureAwait(false) : reader.NextResult()) ; @@ -1266,10 +1257,7 @@ async Task ExecuteNonQuery(bool async, CancellationToken cancellationToken) /// A task representing the asynchronous operation, with the first column of the /// first row in the result set, or a null reference if the result set is empty. public override Task ExecuteScalarAsync(CancellationToken cancellationToken) - { - using (NoSynchronizationContextScope.Enter()) - return ExecuteScalar(true, cancellationToken).AsTask(); - } + => ExecuteScalar(async: true, cancellationToken).AsTask(); [MethodImpl(MethodImplOptions.AggressiveInlining)] async ValueTask ExecuteScalar(bool async, CancellationToken cancellationToken) @@ -1278,7 +1266,7 @@ async Task ExecuteNonQuery(bool async, CancellationToken cancellationToken) if (IsWrappedByBatch || !Parameters.HasOutputParameters) behavior |= CommandBehavior.SequentialAccess; - var reader = await ExecuteReader(behavior, async, cancellationToken).ConfigureAwait(false); + var reader = await ExecuteReader(async, behavior, cancellationToken).ConfigureAwait(false); try { var read = async ? await reader.ReadAsync(cancellationToken).ConfigureAwait(false) : reader.Read(); @@ -1322,7 +1310,7 @@ protected override async Task ExecuteDbDataReaderAsync(CommandBeha /// One of the enumeration values that specifies the command behavior. /// A task representing the operation. public new NpgsqlDataReader ExecuteReader(CommandBehavior behavior = CommandBehavior.Default) - => ExecuteReader(behavior, async: false, CancellationToken.None).GetAwaiter().GetResult(); + => ExecuteReader(async: false, behavior, CancellationToken.None).GetAwaiter().GetResult(); /// /// An asynchronous version of , which executes @@ -1347,16 +1335,13 @@ protected override async Task ExecuteDbDataReaderAsync(CommandBeha /// /// A task representing the asynchronous operation. public new Task ExecuteReaderAsync(CommandBehavior behavior, CancellationToken cancellationToken = default) - { - using (NoSynchronizationContextScope.Enter()) - return ExecuteReader(behavior, async: true, cancellationToken).AsTask(); - } + => ExecuteReader(async: true, behavior, cancellationToken).AsTask(); // TODO: Maybe pool these? internal ManualResetValueTaskSource ExecutionCompletion { get; } = new(); - internal virtual async ValueTask ExecuteReader(CommandBehavior behavior, bool async, CancellationToken cancellationToken) + internal virtual async ValueTask ExecuteReader(bool async, CommandBehavior behavior, CancellationToken cancellationToken) { var conn = CheckAndGetConnection(); _behavior = behavior; diff --git a/src/Npgsql/NpgsqlConnection.cs b/src/Npgsql/NpgsqlConnection.cs index 53e2afe5b0..cb15be2792 100644 --- a/src/Npgsql/NpgsqlConnection.cs +++ b/src/Npgsql/NpgsqlConnection.cs @@ -165,11 +165,7 @@ internal static NpgsqlConnection FromDataSource(NpgsqlDataSource dataSource) /// An optional token to cancel the asynchronous operation. The default value is . /// /// A task representing the asynchronous operation. - public override Task OpenAsync(CancellationToken cancellationToken) - { - using (NoSynchronizationContextScope.Enter()) - return Open(true, cancellationToken); - } + public override Task OpenAsync(CancellationToken cancellationToken) => Open(async: true, cancellationToken); void SetupDataSource() { @@ -309,7 +305,7 @@ async Task OpenAsync(bool async, CancellationToken cancellationToken) enlistToTransaction = null; } else - connector = await _dataSource.Get(this, timeout, async, cancellationToken); + connector = await _dataSource.Get(this, timeout, async, cancellationToken).ConfigureAwait(false); Debug.Assert(connector.Connection is null, $"Connection for opened connector '{Connector?.Id.ToString() ?? "???"}' is bound to another connection"); @@ -347,7 +343,7 @@ async Task PerformMultiplexingStartupCheck(bool async, CancellationToken cancell { var timeout = new NpgsqlTimeout(TimeSpan.FromSeconds(ConnectionTimeout)); - _ = await StartBindingScope(ConnectorBindingScope.Connection, timeout, async, cancellationToken); + _ = await StartBindingScope(ConnectorBindingScope.Connection, timeout, async, cancellationToken).ConfigureAwait(false); EndBindingScope(ConnectorBindingScope.Connection); LogMessages.OpenedMultiplexingConnection(_connectionLogger, Settings.Host!, Settings.Port, Settings.Database!, _userFacingConnectionString); @@ -651,9 +647,9 @@ public override ConnectionState State /// A object representing the new transaction. /// Nested transactions are not supported. public new NpgsqlTransaction BeginTransaction(IsolationLevel level) - => BeginTransaction(level, async: false, CancellationToken.None).GetAwaiter().GetResult(); + => BeginTransaction(async: false, level, CancellationToken.None).GetAwaiter().GetResult(); - async ValueTask BeginTransaction(IsolationLevel level, bool async, CancellationToken cancellationToken) + async ValueTask BeginTransaction(bool async, IsolationLevel level, CancellationToken cancellationToken) { if (level == IsolationLevel.Chaos) ThrowHelper.ThrowNotSupportedException($"Unsupported IsolationLevel: {nameof(IsolationLevel.Chaos)}"); @@ -665,7 +661,7 @@ async ValueTask BeginTransaction(IsolationLevel level, bool a // There was a committed/rolled back transaction, but it was not disposed var connector = ConnectorBindingScope == ConnectorBindingScope.Transaction ? Connector - : await StartBindingScope(ConnectorBindingScope.Transaction, NpgsqlTimeout.Infinite, async, cancellationToken); + : await StartBindingScope(ConnectorBindingScope.Transaction, NpgsqlTimeout.Infinite, async, cancellationToken).ConfigureAwait(false); Debug.Assert(connector != null); @@ -699,7 +695,7 @@ async ValueTask BeginTransaction(IsolationLevel level, bool a /// Nested transactions are not supported. /// protected override async ValueTask BeginDbTransactionAsync(IsolationLevel isolationLevel, CancellationToken cancellationToken) - => await BeginTransactionAsync(isolationLevel, cancellationToken); + => await BeginTransactionAsync(isolationLevel, cancellationToken).ConfigureAwait(false); /// /// Asynchronously begins a database transaction. @@ -727,10 +723,7 @@ protected override async ValueTask BeginDbTransactionAsync(Isolat /// Nested transactions are not supported. /// public new ValueTask BeginTransactionAsync(IsolationLevel level, CancellationToken cancellationToken = default) - { - using (NoSynchronizationContextScope.Enter()) - return BeginTransaction(level, async: true, cancellationToken); - } + => BeginTransaction(async: true, level, cancellationToken); #endif /// @@ -804,10 +797,7 @@ public Task CloseAsync() #else public override Task CloseAsync() #endif - { - using (NoSynchronizationContextScope.Enter()) - return Close(async: true); - } + => Close(async: true); internal bool TakeCloseLock() => Interlocked.Exchange(ref _closing, 1) == 0; @@ -871,7 +861,7 @@ async Task CloseAsync(bool async) if (connector.CurrentReader != null || connector.CurrentCopyOperation != null) { // This method could re-enter connection.Close() due to an underlying connection failure. - await connector.CloseOngoingOperations(async); + await connector.CloseOngoingOperations(async).ConfigureAwait(false); if (ConnectorBindingScope == ConnectorBindingScope.None) { @@ -909,7 +899,7 @@ async Task CloseAsync(bool async) // Clear the buffer, roll back any pending transaction and prepend a reset message if needed // Also returns the connector to the pool, if there is an open transaction and multiplexing is on // Note that we're doing this only for pooled connections - await connector.Reset(async); + await connector.Reset(async).ConfigureAwait(false); } else { @@ -961,23 +951,16 @@ protected override void Dispose(bool disposing) /// Releases all resources used by the . /// #if NETSTANDARD2_0 - public ValueTask DisposeAsync() + public async ValueTask DisposeAsync() #else - public override ValueTask DisposeAsync() + public override async ValueTask DisposeAsync() #endif { - using (NoSynchronizationContextScope.Enter()) - return DisposeAsyncCore(); - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - async ValueTask DisposeAsyncCore() - { - if (_disposed) - return; + if (_disposed) + return; - await CloseAsync(); - _disposed = true; - } + await CloseAsync().ConfigureAwait(false); + _disposed = true; } internal void MakeDisposed() @@ -1161,7 +1144,7 @@ public IReadOnlyDictionary PostgresParameters /// See https://www.postgresql.org/docs/current/static/sql-copy.html. /// public NpgsqlBinaryImporter BeginBinaryImport(string copyFromCommand) - => BeginBinaryImport(copyFromCommand, async: false, CancellationToken.None).GetAwaiter().GetResult(); + => BeginBinaryImport(async: false, copyFromCommand, CancellationToken.None).GetAwaiter().GetResult(); /// /// Begins a binary COPY FROM STDIN operation, a high-performance data import mechanism to a PostgreSQL table. @@ -1173,12 +1156,9 @@ public NpgsqlBinaryImporter BeginBinaryImport(string copyFromCommand) /// See https://www.postgresql.org/docs/current/static/sql-copy.html. /// public Task BeginBinaryImportAsync(string copyFromCommand, CancellationToken cancellationToken = default) - { - using (NoSynchronizationContextScope.Enter()) - return BeginBinaryImport(copyFromCommand, async: true, cancellationToken); - } + => BeginBinaryImport(async: true, copyFromCommand, cancellationToken); - async Task BeginBinaryImport(string copyFromCommand, bool async, CancellationToken cancellationToken = default) + async Task BeginBinaryImport(bool async, string copyFromCommand, CancellationToken cancellationToken = default) { if (copyFromCommand == null) throw new ArgumentNullException(nameof(copyFromCommand)); @@ -1194,7 +1174,7 @@ async Task BeginBinaryImport(string copyFromCommand, bool try { var importer = new NpgsqlBinaryImporter(connector); - await importer.Init(copyFromCommand, async, cancellationToken); + await importer.Init(copyFromCommand, async, cancellationToken).ConfigureAwait(false); connector.CurrentCopyOperation = importer; return importer; } @@ -1215,7 +1195,7 @@ async Task BeginBinaryImport(string copyFromCommand, bool /// See https://www.postgresql.org/docs/current/static/sql-copy.html. /// public NpgsqlBinaryExporter BeginBinaryExport(string copyToCommand) - => BeginBinaryExport(copyToCommand, async: false, CancellationToken.None).GetAwaiter().GetResult(); + => BeginBinaryExport(async: false, copyToCommand, CancellationToken.None).GetAwaiter().GetResult(); /// /// Begins a binary COPY TO STDOUT operation, a high-performance data export mechanism from a PostgreSQL table. @@ -1227,12 +1207,9 @@ public NpgsqlBinaryExporter BeginBinaryExport(string copyToCommand) /// See https://www.postgresql.org/docs/current/static/sql-copy.html. /// public Task BeginBinaryExportAsync(string copyToCommand, CancellationToken cancellationToken = default) - { - using (NoSynchronizationContextScope.Enter()) - return BeginBinaryExport(copyToCommand, async: true, cancellationToken); - } + => BeginBinaryExport(async: true, copyToCommand, cancellationToken); - async Task BeginBinaryExport(string copyToCommand, bool async, CancellationToken cancellationToken = default) + async Task BeginBinaryExport(bool async, string copyToCommand, CancellationToken cancellationToken = default) { if (copyToCommand == null) throw new ArgumentNullException(nameof(copyToCommand)); @@ -1248,7 +1225,7 @@ async Task BeginBinaryExport(string copyToCommand, bool as try { var exporter = new NpgsqlBinaryExporter(connector); - await exporter.Init(copyToCommand, async, cancellationToken); + await exporter.Init(copyToCommand, async, cancellationToken).ConfigureAwait(false); connector.CurrentCopyOperation = exporter; return exporter; } @@ -1272,7 +1249,7 @@ async Task BeginBinaryExport(string copyToCommand, bool as /// See https://www.postgresql.org/docs/current/static/sql-copy.html. /// public TextWriter BeginTextImport(string copyFromCommand) - => BeginTextImport(copyFromCommand, async: false, CancellationToken.None).GetAwaiter().GetResult(); + => BeginTextImport(async: false, copyFromCommand, CancellationToken.None).GetAwaiter().GetResult(); /// /// Begins a textual COPY FROM STDIN operation, a data import mechanism to a PostgreSQL table. @@ -1287,12 +1264,9 @@ public TextWriter BeginTextImport(string copyFromCommand) /// See https://www.postgresql.org/docs/current/static/sql-copy.html. /// public Task BeginTextImportAsync(string copyFromCommand, CancellationToken cancellationToken = default) - { - using (NoSynchronizationContextScope.Enter()) - return BeginTextImport(copyFromCommand, async: true, cancellationToken); - } + => BeginTextImport(async: true, copyFromCommand, cancellationToken); - async Task BeginTextImport(string copyFromCommand, bool async, CancellationToken cancellationToken = default) + async Task BeginTextImport(bool async, string copyFromCommand, CancellationToken cancellationToken = default) { if (copyFromCommand == null) throw new ArgumentNullException(nameof(copyFromCommand)); @@ -1308,7 +1282,7 @@ async Task BeginTextImport(string copyFromCommand, bool async, Cance try { var copyStream = new NpgsqlRawCopyStream(connector); - await copyStream.Init(copyFromCommand, async, cancellationToken); + await copyStream.Init(copyFromCommand, async, cancellationToken).ConfigureAwait(false); var writer = new NpgsqlCopyTextWriter(connector, copyStream); connector.CurrentCopyOperation = writer; return writer; @@ -1333,7 +1307,7 @@ async Task BeginTextImport(string copyFromCommand, bool async, Cance /// See https://www.postgresql.org/docs/current/static/sql-copy.html. /// public TextReader BeginTextExport(string copyToCommand) - => BeginTextExport(copyToCommand, async: false, CancellationToken.None).GetAwaiter().GetResult(); + => BeginTextExport(async: false, copyToCommand, CancellationToken.None).GetAwaiter().GetResult(); /// /// Begins a textual COPY TO STDOUT operation, a data export mechanism from a PostgreSQL table. @@ -1348,12 +1322,9 @@ public TextReader BeginTextExport(string copyToCommand) /// See https://www.postgresql.org/docs/current/static/sql-copy.html. /// public Task BeginTextExportAsync(string copyToCommand, CancellationToken cancellationToken = default) - { - using (NoSynchronizationContextScope.Enter()) - return BeginTextExport(copyToCommand, async: true, cancellationToken); - } + => BeginTextExport(async: true, copyToCommand, cancellationToken); - async Task BeginTextExport(string copyToCommand, bool async, CancellationToken cancellationToken = default) + async Task BeginTextExport(bool async, string copyToCommand, CancellationToken cancellationToken = default) { if (copyToCommand == null) throw new ArgumentNullException(nameof(copyToCommand)); @@ -1369,7 +1340,7 @@ async Task BeginTextExport(string copyToCommand, bool async, Cancell try { var copyStream = new NpgsqlRawCopyStream(connector); - await copyStream.Init(copyToCommand, async, cancellationToken); + await copyStream.Init(copyToCommand, async, cancellationToken).ConfigureAwait(false); var reader = new NpgsqlCopyTextReader(connector, copyStream); connector.CurrentCopyOperation = reader; return reader; @@ -1394,7 +1365,7 @@ async Task BeginTextExport(string copyToCommand, bool async, Cancell /// See https://www.postgresql.org/docs/current/static/sql-copy.html. /// public NpgsqlRawCopyStream BeginRawBinaryCopy(string copyCommand) - => BeginRawBinaryCopy(copyCommand, async: false, CancellationToken.None).GetAwaiter().GetResult(); + => BeginRawBinaryCopy(async: false, copyCommand, CancellationToken.None).GetAwaiter().GetResult(); /// /// Begins a raw binary COPY operation (TO STDOUT or FROM STDIN), a high-performance data export/import mechanism to a PostgreSQL table. @@ -1409,12 +1380,9 @@ public NpgsqlRawCopyStream BeginRawBinaryCopy(string copyCommand) /// See https://www.postgresql.org/docs/current/static/sql-copy.html. /// public Task BeginRawBinaryCopyAsync(string copyCommand, CancellationToken cancellationToken = default) - { - using (NoSynchronizationContextScope.Enter()) - return BeginRawBinaryCopy(copyCommand, async: true, cancellationToken); - } + => BeginRawBinaryCopy(async: true, copyCommand, cancellationToken); - async Task BeginRawBinaryCopy(string copyCommand, bool async, CancellationToken cancellationToken = default) + async Task BeginRawBinaryCopy(bool async, string copyCommand, CancellationToken cancellationToken = default) { if (copyCommand == null) throw new ArgumentNullException(nameof(copyCommand)); @@ -1430,7 +1398,7 @@ async Task BeginRawBinaryCopy(string copyCommand, bool asyn try { var stream = new NpgsqlRawCopyStream(connector); - await stream.Init(copyCommand, async, cancellationToken); + await stream.Init(copyCommand, async, cancellationToken).ConfigureAwait(false); if (!stream.IsBinary) { // TODO: Stop the COPY operation gracefully, no breaking @@ -1524,8 +1492,7 @@ public Task WaitAsync(int timeout, CancellationToken cancellationToken = d CheckReady(); LogMessages.StartingWait(_connectionLogger, timeout, Connector!.Id); - using (NoSynchronizationContextScope.Enter()) - return Connector!.Wait(async: true, timeout, cancellationToken); + return Connector!.Wait(async: true, timeout, cancellationToken); } /// @@ -1676,7 +1643,7 @@ async ValueTask StartBindingScopeAsync() Debug.Assert(Settings.Multiplexing); Debug.Assert(_dataSource != null); - var connector = await _dataSource.Get(this, timeout, async, cancellationToken); + var connector = await _dataSource.Get(this, timeout, async, cancellationToken).ConfigureAwait(false); Connector = connector; connector.Connection = this; ConnectorBindingScope = scope; @@ -1766,7 +1733,7 @@ public override DataTable GetSchema() /// /// The collection specified. public override DataTable GetSchema(string? collectionName, string?[]? restrictions) - => NpgsqlSchema.GetSchema(this, collectionName, restrictions, async: false).GetAwaiter().GetResult(); + => NpgsqlSchema.GetSchema(async: false, this, collectionName, restrictions).GetAwaiter().GetResult(); /// /// Asynchronously returns the supported collections. @@ -1815,8 +1782,7 @@ public override Task GetSchemaAsync(string collectionName, string?[]? public Task GetSchemaAsync(string collectionName, string?[]? restrictions, CancellationToken cancellationToken = default) #endif { - using (NoSynchronizationContextScope.Enter()) - return NpgsqlSchema.GetSchema(this, collectionName, restrictions, async: true, cancellationToken); + return NpgsqlSchema.GetSchema(async: true, this, collectionName, restrictions, cancellationToken); } #endregion Schema operations @@ -1964,11 +1930,11 @@ public async Task ReloadTypesAsync() using var scope = StartTemporaryBindingScope(out var connector); await _dataSource!.Bootstrap( - connector, - NpgsqlTimeout.Infinite, - forceReload: true, - async: true, - CancellationToken.None); + connector, + NpgsqlTimeout.Infinite, + forceReload: true, + async: true, + CancellationToken.None).ConfigureAwait(false); } /// diff --git a/src/Npgsql/NpgsqlDataAdapter.cs b/src/Npgsql/NpgsqlDataAdapter.cs index 0c8e0822ce..1e1b12a411 100644 --- a/src/Npgsql/NpgsqlDataAdapter.cs +++ b/src/Npgsql/NpgsqlDataAdapter.cs @@ -150,17 +150,17 @@ internal async Task Fill(DataTable dataTable, bool async, CancellationToken { originalState = activeConnection.State; if (ConnectionState.Closed == originalState) - await activeConnection.Open(async, cancellationToken); + await activeConnection.Open(async, cancellationToken).ConfigureAwait(false); - var dataReader = await command.ExecuteReader(CommandBehavior.Default, async, cancellationToken); + var dataReader = await command.ExecuteReader(async, CommandBehavior.Default, cancellationToken).ConfigureAwait(false); try { - return await Fill(dataTable, dataReader, async, cancellationToken); + return await Fill(dataTable, dataReader, async, cancellationToken).ConfigureAwait(false); } finally { if (async) - await dataReader.DisposeAsync(); + await dataReader.DisposeAsync().ConfigureAwait(false); else dataReader.Dispose(); } @@ -193,7 +193,7 @@ async Task Fill(DataTable dataTable, NpgsqlDataReader dataReader, bool asyn var values = new object[count]; - while (async ? await dataReader.ReadAsync(cancellationToken) : dataReader.Read()) + while (async ? await dataReader.ReadAsync(cancellationToken).ConfigureAwait(false) : dataReader.Read()) { dataReader.GetValues(values); dataTable.LoadDataRow(values, true); @@ -224,4 +224,4 @@ public NpgsqlRowUpdatedEventArgs(DataRow dataRow, IDbCommand? command, System.Da : base(dataRow, command, statementType, tableMapping) {} } -#pragma warning restore 1591 \ No newline at end of file +#pragma warning restore 1591 diff --git a/src/Npgsql/NpgsqlDataReader.cs b/src/Npgsql/NpgsqlDataReader.cs index 57c358b3bf..d53dd3023f 100644 --- a/src/Npgsql/NpgsqlDataReader.cs +++ b/src/Npgsql/NpgsqlDataReader.cs @@ -186,8 +186,7 @@ public override Task ReadAsync(CancellationToken cancellationToken) if (fastRead.HasValue) return fastRead.Value ? TrueTask : FalseTask; - using (NoSynchronizationContextScope.Enter()) - return Read(true, cancellationToken); + return Read(async: true, cancellationToken); } bool? TryFastRead() @@ -247,11 +246,11 @@ async Task Read(bool async, CancellationToken cancellationToken = default) return true; case ReaderState.InResult: - await ConsumeRow(async); + await ConsumeRow(async).ConfigureAwait(false); if (_behavior.HasFlag(CommandBehavior.SingleRow)) { // TODO: See optimization proposal in #410 - await Consume(async); + await Consume(async).ConfigureAwait(false); return false; } break; @@ -266,7 +265,7 @@ async Task Read(bool async, CancellationToken cancellationToken = default) return false; } - var msg = await ReadMessage(async); + var msg = await ReadMessage(async).ConfigureAwait(false); switch (msg.Code) { @@ -278,7 +277,7 @@ async Task Read(bool async, CancellationToken cancellationToken = default) case BackendMessageCode.EmptyQueryResponse: ProcessMessage(msg); if (_statements[StatementIndex].AppendErrorBarrier ?? Command.EnableErrorBarriers) - Expect(await Connector.ReadMessage(async), Connector); + Expect(await Connector.ReadMessage(async).ConfigureAwait(false), Connector); return false; default: @@ -300,11 +299,11 @@ ValueTask ReadMessage(bool async) static async ValueTask ReadMessageSequential(NpgsqlConnector connector, bool async) { - var msg = await connector.ReadMessage(async, DataRowLoadingMode.Sequential); + var msg = await connector.ReadMessage(async, DataRowLoadingMode.Sequential).ConfigureAwait(false); if (msg.Code == BackendMessageCode.DataRow) { // Make sure that the datarow's column count is already buffered - await connector.ReadBuffer.Ensure(2, async); + await connector.ReadBuffer.Ensure(2, async).ConfigureAwait(false); return msg; } return msg; @@ -330,13 +329,9 @@ public override bool NextResult() => (_isSchemaOnly ? NextResultSchemaOnly(false /// /// A task representing the asynchronous operation. public override Task NextResultAsync(CancellationToken cancellationToken) - { - using var _ = NoSynchronizationContextScope.Enter(); - - return _isSchemaOnly + => _isSchemaOnly ? NextResultSchemaOnly(async: true, cancellationToken: cancellationToken) : NextResult(async: true, cancellationToken: cancellationToken); - } /// /// Internal implementation of NextResult @@ -358,10 +353,10 @@ async Task NextResult(bool async, bool isConsuming = false, CancellationTo { case ReaderState.BeforeResult: case ReaderState.InResult: - await ConsumeRow(async); + await ConsumeRow(async).ConfigureAwait(false); while (true) { - var completedMsg = await Connector.ReadMessage(async, DataRowLoadingMode.Skip); + var completedMsg = await Connector.ReadMessage(async, DataRowLoadingMode.Skip).ConfigureAwait(false); switch (completedMsg.Code) { case BackendMessageCode.CommandComplete: @@ -373,7 +368,7 @@ async Task NextResult(bool async, bool isConsuming = false, CancellationTo RowDescription!.SetConverterInfoCache(new(ColumnInfoCache, 0, _numColumns)); if (statement.AppendErrorBarrier ?? Command.EnableErrorBarriers) - Expect(await Connector.ReadMessage(async), Connector); + Expect(await Connector.ReadMessage(async).ConfigureAwait(false), Connector); break; @@ -406,7 +401,7 @@ async Task NextResult(bool async, bool isConsuming = false, CancellationTo if (_behavior.HasFlag(CommandBehavior.SingleResult) && StatementIndex == 0 && !isConsuming) { - await Consume(async); + await Consume(async).ConfigureAwait(false); return false; } @@ -419,7 +414,7 @@ async Task NextResult(bool async, bool isConsuming = false, CancellationTo if (statement.TryGetPrepared(out var preparedStatement)) { - Expect(await Connector.ReadMessage(async), Connector); + Expect(await Connector.ReadMessage(async).ConfigureAwait(false), Connector); RowDescription = preparedStatement.Description; } else // Non-prepared/preparing flow @@ -430,13 +425,13 @@ async Task NextResult(bool async, bool isConsuming = false, CancellationTo Debug.Assert(!preparedStatement.IsPrepared); if (preparedStatement.StatementBeingReplaced != null) { - Expect(await Connector.ReadMessage(async), Connector); + Expect(await Connector.ReadMessage(async).ConfigureAwait(false), Connector); preparedStatement.StatementBeingReplaced.CompleteUnprepare(); preparedStatement.StatementBeingReplaced = null; } } - Expect(await Connector.ReadMessage(async), Connector); + Expect(await Connector.ReadMessage(async).ConfigureAwait(false), Connector); if (statement.IsPreparing) { @@ -445,8 +440,8 @@ async Task NextResult(bool async, bool isConsuming = false, CancellationTo statement.IsPreparing = false; } - Expect(await Connector.ReadMessage(async), Connector); - msg = await Connector.ReadMessage(async); + Expect(await Connector.ReadMessage(async).ConfigureAwait(false), Connector); + msg = await Connector.ReadMessage(async).ConfigureAwait(false); RowDescription = statement.Description = msg.Code switch { @@ -480,7 +475,7 @@ async Task NextResult(bool async, bool isConsuming = false, CancellationTo // Statement did not generate a resultset (e.g. INSERT) // Read and process its completion message and move on to the next statement // No need to read sequentially as it's not a DataRow - msg = await Connector.ReadMessage(async); + msg = await Connector.ReadMessage(async).ConfigureAwait(false); switch (msg.Code) { case BackendMessageCode.CommandComplete: @@ -500,7 +495,7 @@ async Task NextResult(bool async, bool isConsuming = false, CancellationTo ProcessMessage(msg); if (statement.AppendErrorBarrier ?? Command.EnableErrorBarriers) - Expect(await Connector.ReadMessage(async), Connector); + Expect(await Connector.ReadMessage(async).ConfigureAwait(false), Connector); continue; } @@ -510,14 +505,14 @@ async Task NextResult(bool async, bool isConsuming = false, CancellationTo // If output parameters are present and this is the first row of the first resultset, // we must always read it in non-sequential mode because it will be traversed twice (once // here for the parameters, then as a regular row). - msg = await Connector.ReadMessage(async); + msg = await Connector.ReadMessage(async).ConfigureAwait(false); ProcessMessage(msg); if (msg.Code == BackendMessageCode.DataRow) PopulateOutputParameters(); } else { - msg = await ReadMessage(async); + msg = await ReadMessage(async).ConfigureAwait(false); ProcessMessage(msg); } @@ -528,7 +523,7 @@ async Task NextResult(bool async, bool isConsuming = false, CancellationTo return true; case BackendMessageCode.CommandComplete: if (statement.AppendErrorBarrier ?? Command.EnableErrorBarriers) - Expect(await Connector.ReadMessage(async), Connector); + Expect(await Connector.ReadMessage(async).ConfigureAwait(false), Connector); return true; default: throw Connector.UnexpectedMessageReceived(msg.Code); @@ -537,7 +532,7 @@ async Task NextResult(bool async, bool isConsuming = false, CancellationTo // There are no more queries, we're done. Read the RFQ. if (_statements.Count == 0 || !(_statements[_statements.Count - 1].AppendErrorBarrier ?? Command.EnableErrorBarriers)) - Expect(await Connector.ReadMessage(async), Connector); + Expect(await Connector.ReadMessage(async).ConfigureAwait(false), Connector); State = ReaderState.Consumed; RowDescription = null; @@ -600,7 +595,7 @@ async Task NextResult(bool async, bool isConsuming = false, CancellationTo // We provide Consume with the first exception which we've just caught. // If it encounters other exceptions while consuming the rest of the result set, it will raise an AggregateException, // otherwise it will rethrow this first exception. - await Consume(async, firstException: e); + await Consume(async, firstException: e).ConfigureAwait(false); break; // Never reached, Consume always throws above } } @@ -704,13 +699,13 @@ async Task NextResultSchemaOnly(bool async, bool isConsuming = false, Canc Debug.Assert(!pStatement.IsPrepared); if (pStatement.StatementBeingReplaced != null) { - Expect(await Connector.ReadMessage(async), Connector); + Expect(await Connector.ReadMessage(async).ConfigureAwait(false), Connector); pStatement.StatementBeingReplaced.CompleteUnprepare(); pStatement.StatementBeingReplaced = null; } } - Expect(await Connector.ReadMessage(async), Connector); + Expect(await Connector.ReadMessage(async).ConfigureAwait(false), Connector); if (statement.IsPreparing) { @@ -719,8 +714,8 @@ async Task NextResultSchemaOnly(bool async, bool isConsuming = false, Canc statement.IsPreparing = false; } - Expect(await Connector.ReadMessage(async), Connector); - var msg = await Connector.ReadMessage(async); + Expect(await Connector.ReadMessage(async).ConfigureAwait(false), Connector); + var msg = await Connector.ReadMessage(async).ConfigureAwait(false); switch (msg.Code) { case BackendMessageCode.NoData: @@ -744,7 +739,7 @@ async Task NextResultSchemaOnly(bool async, bool isConsuming = false, Canc } // There are no more queries, we're done. Read to the RFQ. if (forall) - Expect(await Connector.ReadMessage(async), Connector); + Expect(await Connector.ReadMessage(async).ConfigureAwait(false), Connector); } // Found a resultset @@ -994,8 +989,8 @@ async Task Consume(bool async, Exception? firstException = null) try { if (!(_isSchemaOnly - ? await NextResultSchemaOnly(async, isConsuming: true) - : await NextResult(async, isConsuming: true))) + ? await NextResultSchemaOnly(async, isConsuming: true).ConfigureAwait(false) + : await NextResult(async, isConsuming: true).ConfigureAwait(false))) { break; } @@ -1056,39 +1051,31 @@ protected override void Dispose(bool disposing) /// Releases the resources used by the . /// #if NETSTANDARD2_0 - public ValueTask DisposeAsync() + public async ValueTask DisposeAsync() #else - public override ValueTask DisposeAsync() + public override async ValueTask DisposeAsync() #endif { - using (NoSynchronizationContextScope.Enter()) - return DisposeAsyncCore(); - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - async ValueTask DisposeAsyncCore() + try { - try - { - await Close(connectionClosing: false, async: true, isDisposing: true); - } - catch (Exception ex) - { - // In the case of a PostgresException (or multiple ones, if we have error barriers), the reader's state has already been set - // to Disposed in Close above; in multiplexing, we also unbind the connector (with its reader), and at that point it can be used - // by other consumers. Therefore, we only set the state fo Disposed if the exception *wasn't* a PostgresException. - if (!(ex is PostgresException || - ex is NpgsqlException { InnerException: AggregateException aggregateException } && - AllPostgresExceptions(aggregateException.InnerExceptions))) - { - State = ReaderState.Disposed; - } - - throw; - } - finally + await Close(connectionClosing: false, async: true, isDisposing: true).ConfigureAwait(false); + } + catch (Exception ex) + { + // In the case of a PostgresException (or multiple ones, if we have error barriers), the reader's state has already been set + // to Disposed in Close above; in multiplexing, we also unbind the connector (with its reader), and at that point it can be used + // by other consumers. Therefore, we only set the state to Disposed if the exception *wasn't* a PostgresException. + if (!(ex is PostgresException || + ex is NpgsqlException { InnerException: AggregateException aggregateException } && + AllPostgresExceptions(aggregateException.InnerExceptions))) { - Command.TraceCommandStop(); + State = ReaderState.Disposed; } + throw; + } + finally + { + Command.TraceCommandStop(); } } @@ -1113,12 +1100,9 @@ public Task CloseAsync() #else public override Task CloseAsync() #endif - { - using (NoSynchronizationContextScope.Enter()) - return Close(connectionClosing: false, async: true, isDisposing: false); - } + => Close(async: true, connectionClosing: false, isDisposing: false); - internal async Task Close(bool connectionClosing, bool async, bool isDisposing) + internal async Task Close(bool async, bool connectionClosing, bool isDisposing) { if (State is ReaderState.Closed or ReaderState.Disposed) { @@ -1140,7 +1124,7 @@ internal async Task Close(bool connectionClosing, bool async, bool isDisposing) { try { - await Consume(async); + await Consume(async).ConfigureAwait(false); } catch (Exception ex) when (ex is OperationCanceledException or NpgsqlException { InnerException: TimeoutException }) { @@ -1153,7 +1137,7 @@ ex is PostgresException || { // In the case of a PostgresException (or multiple ones, if we have error barriers), the connection is fine and consume // has basically completed. Defer throwing the exception until Cleanup is complete. - await Cleanup(async, connectionClosing, isDisposing); + await Cleanup(async, connectionClosing, isDisposing).ConfigureAwait(false); throw; } catch @@ -1175,7 +1159,7 @@ ex is PostgresException || throw new ArgumentOutOfRangeException(); } - await Cleanup(async, connectionClosing, isDisposing); + await Cleanup(async, connectionClosing, isDisposing).ConfigureAwait(false); } internal async Task Cleanup(bool async, bool connectionClosing = false, bool isDisposing = false) @@ -1202,7 +1186,7 @@ internal async Task Cleanup(bool async, bool connectionClosing = false, bool isD try { if (async) - await _sendTask; + await _sendTask.ConfigureAwait(false); else _sendTask.GetAwaiter().GetResult(); } @@ -1585,8 +1569,7 @@ public override Task GetFieldValueAsync(int ordinal, CancellationToken can if (!_isSequential) return Task.FromResult(GetFieldValueCore(ordinal)); - using (NoSynchronizationContextScope.Enter()) - return Core(ordinal, cancellationToken).AsTask(); + return Core(ordinal, cancellationToken).AsTask(); async ValueTask Core(int ordinal, CancellationToken cancellationToken) { @@ -1594,7 +1577,7 @@ async ValueTask Core(int ordinal, CancellationToken cancellationToken) var isStream = typeof(T) == typeof(Stream); var field = GetInfo(ordinal, isStream ? null : typeof(T), out var converter, out var bufferRequirement, out var asObject); - var columnLength = await SeekToColumn(async: true, ordinal, field); + var columnLength = await SeekToColumn(async: true, ordinal, field).ConfigureAwait(false); if (columnLength == -1) return DbNullValueOrThrow(field); @@ -1608,11 +1591,11 @@ async ValueTask Core(int ordinal, CancellationToken cancellationToken) } Debug.Assert(asObject || converter is PgConverter); - await PgReader.StartReadAsync(bufferRequirement, cancellationToken); + await PgReader.StartReadAsync(bufferRequirement, cancellationToken).ConfigureAwait(false); var result = asObject - ? (T)await converter.ReadAsObjectAsync(PgReader, cancellationToken) - : await Unsafe.As>(converter).ReadAsync(PgReader, cancellationToken); - await PgReader.EndReadAsync(); + ? (T)await converter.ReadAsObjectAsync(PgReader, cancellationToken).ConfigureAwait(false) + : await Unsafe.As>(converter).ReadAsync(PgReader, cancellationToken).ConfigureAwait(false); + await PgReader.EndReadAsync().ConfigureAwait(false); return result; } } @@ -1716,13 +1699,12 @@ public override Task IsDBNullAsync(int ordinal, CancellationToken cancella if (!_isSequential) return IsDBNull(ordinal) ? TrueTask : FalseTask; - using (NoSynchronizationContextScope.Enter()) - return Core(ordinal, cancellationToken); + return Core(ordinal, cancellationToken); async Task Core(int ordinal, CancellationToken cancellationToken) { using var registration = Connector.StartNestedCancellableOperation(cancellationToken, attemptPgCancellation: false); - return await SeekToColumn(async: true, ordinal, CheckRowAndGetField(ordinal), resumableOp: true) is -1; + return await SeekToColumn(async: true, ordinal, CheckRowAndGetField(ordinal), resumableOp: true).ConfigureAwait(false) is -1; } } @@ -1813,10 +1795,7 @@ ReadOnlyCollection IDbColumnSchemaGenerator.GetColumnSchema() #else public Task> GetColumnSchemaAsync(CancellationToken cancellationToken = default) #endif - { - using (NoSynchronizationContextScope.Enter()) - return GetColumnSchema(async: true, cancellationToken); - } + => GetColumnSchema(async: true, cancellationToken); Task> GetColumnSchema(bool async, CancellationToken cancellationToken = default) => RowDescription == null || RowDescription.Count == 0 @@ -1846,10 +1825,7 @@ Task> GetColumnSchema(bool async, Cancellatio #else public Task GetSchemaTableAsync(CancellationToken cancellationToken = default) #endif - { - using (NoSynchronizationContextScope.Enter()) - return GetSchemaTable(async: true, cancellationToken); - } + => GetSchemaTable(async: true, cancellationToken); [UnconditionalSuppressMessage( "Composite type mapping currently isn't trimming-safe, and warnings are generated at the MapComposite level.", "IL2026")] @@ -1888,7 +1864,7 @@ Task> GetColumnSchema(bool async, Cancellatio table.Columns.Add("ProviderSpecificDataType", typeof(Type)); table.Columns.Add("DataTypeName", typeof(string)); - foreach (var column in await GetColumnSchema(async, cancellationToken)) + foreach (var column in await GetColumnSchema(async, cancellationToken).ConfigureAwait(false)) { var row = table.NewRow(); @@ -1998,7 +1974,7 @@ async ValueTask Core(bool async, bool commit, int ordinal, DataFormat dataF if (commit) { Debug.Assert(ordinal != _column); - await PgReader.Commit(async, reread); + await PgReader.Commit(async, reread).ConfigureAwait(false); } if (ordinal == _column) @@ -2011,13 +1987,13 @@ async ValueTask Core(bool async, bool commit, int ordinal, DataFormat dataF var buffer = Buffer; for (; _column < ordinal - 1; _column++) { - await buffer.Ensure(4, async); + await buffer.Ensure(4, async).ConfigureAwait(false); var len = buffer.ReadInt32(); if (len != -1) - await buffer.Skip(len, async); + await buffer.Skip(len, async).ConfigureAwait(false); } - await buffer.Ensure(4, async); + await buffer.Ensure(4, async).ConfigureAwait(false); var columnLength = buffer.ReadInt32(); _column = ordinal; @@ -2078,15 +2054,15 @@ Task ConsumeRow(bool async) async Task ConsumeRowSequential(bool async) { - await PgReader.Commit(async, resuming: false); + await PgReader.Commit(async, resuming: false).ConfigureAwait(false); // Skip over the remaining columns in the row for (; _column < _numColumns - 1; _column++) { - await Buffer.Ensure(4, async); + await Buffer.Ensure(4, async).ConfigureAwait(false); var len = Buffer.ReadInt32(); if (len != -1) - await Buffer.Skip(len, async); + await Buffer.Skip(len, async).ConfigureAwait(false); } } } diff --git a/src/Npgsql/NpgsqlDataSource.cs b/src/Npgsql/NpgsqlDataSource.cs index 8aa07ba0ca..c9d5619c4c 100644 --- a/src/Npgsql/NpgsqlDataSource.cs +++ b/src/Npgsql/NpgsqlDataSource.cs @@ -174,7 +174,7 @@ protected override DbConnection OpenDbConnection() /// protected override async ValueTask OpenDbConnectionAsync(CancellationToken cancellationToken = default) - => await OpenConnectionAsync(cancellationToken); + => await OpenConnectionAsync(cancellationToken).ConfigureAwait(false); /// protected override DbConnection CreateDbConnection() @@ -224,7 +224,7 @@ internal async Task Bootstrap( return; var hasSemaphore = async - ? await _setupMappingsSemaphore.WaitAsync(timeout.CheckAndGetTimeLeft(), cancellationToken) + ? await _setupMappingsSemaphore.WaitAsync(timeout.CheckAndGetTimeLeft(), cancellationToken).ConfigureAwait(false) : _setupMappingsSemaphore.Wait(timeout.CheckAndGetTimeLeft(), cancellationToken); if (!hasSemaphore) @@ -247,7 +247,7 @@ internal async Task Bootstrap( NpgsqlDatabaseInfo databaseInfo; using (connector.StartUserAction(ConnectorState.Executing, cancellationToken)) - databaseInfo = await NpgsqlDatabaseInfo.Load(connector, timeout, async); + databaseInfo = await NpgsqlDatabaseInfo.Load(connector, timeout, async).ConfigureAwait(false); connector.DatabaseInfo = DatabaseInfo = databaseInfo; connector.SerializerOptions = SerializerOptions = @@ -304,7 +304,7 @@ public string Password if (_password is null && _periodicPasswordProvider is not null) { if (async) - await _passwordRefreshTask; + await _passwordRefreshTask.ConfigureAwait(false); else _passwordRefreshTask.GetAwaiter().GetResult(); @@ -318,7 +318,7 @@ async Task RefreshPassword() { try { - _password = await _periodicPasswordProvider!(Settings, _timerPasswordProviderCancellationTokenSource!.Token); + _password = await _periodicPasswordProvider!(Settings, _timerPasswordProviderCancellationTokenSource!.Token).ConfigureAwait(false); _passwordProviderTimer!.Change(_periodicPasswordSuccessRefreshInterval, Timeout.InfiniteTimeSpan); } @@ -475,7 +475,7 @@ protected virtual async ValueTask DisposeAsyncBase() if (_passwordProviderTimer is not null) { #if NET5_0_OR_GREATER - await _passwordProviderTimer.DisposeAsync(); + await _passwordProviderTimer.DisposeAsync().ConfigureAwait(false); #else _passwordProviderTimer.Dispose(); #endif diff --git a/src/Npgsql/NpgsqlDataSourceCommand.cs b/src/Npgsql/NpgsqlDataSourceCommand.cs index f6a500592d..3ff565de66 100644 --- a/src/Npgsql/NpgsqlDataSourceCommand.cs +++ b/src/Npgsql/NpgsqlDataSourceCommand.cs @@ -21,17 +21,16 @@ internal NpgsqlDataSourceCommand(int batchCommandCapacity, NpgsqlConnection conn } internal override async ValueTask ExecuteReader( - CommandBehavior behavior, - bool async, + bool async, CommandBehavior behavior, CancellationToken cancellationToken) { - await InternalConnection!.Open(async, cancellationToken); + await InternalConnection!.Open(async, cancellationToken).ConfigureAwait(false); try { return await base.ExecuteReader( - behavior | CommandBehavior.CloseConnection, async, + behavior | CommandBehavior.CloseConnection, cancellationToken) .ConfigureAwait(false); } @@ -39,7 +38,7 @@ internal override async ValueTask ExecuteReader( { try { - await InternalConnection.Close(async); + await InternalConnection.Close(async).ConfigureAwait(false); } catch { diff --git a/src/Npgsql/NpgsqlLargeObjectManager.cs b/src/Npgsql/NpgsqlLargeObjectManager.cs index 8f9b4cf6ea..74b30e535e 100644 --- a/src/Npgsql/NpgsqlLargeObjectManager.cs +++ b/src/Npgsql/NpgsqlLargeObjectManager.cs @@ -34,7 +34,7 @@ public NpgsqlLargeObjectManager(NpgsqlConnection connection) /// /// Execute a function /// - internal async Task ExecuteFunction(string function, bool async, CancellationToken cancellationToken, params object[] arguments) + internal async Task ExecuteFunction(bool async, string function, CancellationToken cancellationToken, params object[] arguments) { using var command = Connection.CreateCommand(); var stringBuilder = new StringBuilder("SELECT * FROM ").Append(function).Append('('); @@ -50,7 +50,7 @@ internal async Task ExecuteFunction(string function, bool async, Cancellat stringBuilder.Append(')'); command.CommandText = stringBuilder.ToString(); - return (T)(async ? await command.ExecuteScalarAsync(cancellationToken) : command.ExecuteScalar())!; + return (T)(async ? await command.ExecuteScalarAsync(cancellationToken).ConfigureAwait(false) : command.ExecuteScalar())!; } /// @@ -58,7 +58,7 @@ internal async Task ExecuteFunction(string function, bool async, Cancellat /// /// internal async Task ExecuteFunctionGetBytes( - string function, byte[] buffer, int offset, int len, bool async, CancellationToken cancellationToken, params object[] arguments) + bool async, string function, byte[] buffer, int offset, int len, CancellationToken cancellationToken, params object[] arguments) { using var command = Connection.CreateCommand(); var stringBuilder = new StringBuilder("SELECT * FROM ").Append(function).Append('('); @@ -75,12 +75,12 @@ internal async Task ExecuteFunctionGetBytes( command.CommandText = stringBuilder.ToString(); var reader = async - ? await command.ExecuteReaderAsync(CommandBehavior.SequentialAccess, cancellationToken) + ? await command.ExecuteReaderAsync(CommandBehavior.SequentialAccess, cancellationToken).ConfigureAwait(false) : command.ExecuteReader(CommandBehavior.SequentialAccess); try { if (async) - await reader.ReadAsync(cancellationToken); + await reader.ReadAsync(cancellationToken).ConfigureAwait(false); else reader.Read(); @@ -89,7 +89,7 @@ internal async Task ExecuteFunctionGetBytes( finally { if (async) - await reader.DisposeAsync(); + await reader.DisposeAsync().ConfigureAwait(false); else reader.Dispose(); } @@ -117,7 +117,7 @@ public Task CreateAsync(uint preferredOid, CancellationToken cancellationT => Create(preferredOid, true, cancellationToken); Task Create(uint preferredOid, bool async, CancellationToken cancellationToken = default) - => ExecuteFunction("lo_create", async, cancellationToken, (int)preferredOid); + => ExecuteFunction(async, "lo_create", cancellationToken, (int)preferredOid); /// /// Opens a large object on the backend, returning a stream controlling this remote object. @@ -128,7 +128,7 @@ Task Create(uint preferredOid, bool async, CancellationToken cancellationT /// Oid of the object /// An NpgsqlLargeObjectStream public NpgsqlLargeObjectStream OpenRead(uint oid) - => OpenRead(oid, false).GetAwaiter().GetResult(); + => OpenRead(async: false, oid).GetAwaiter().GetResult(); /// /// Opens a large object on the backend, returning a stream controlling this remote object. @@ -142,14 +142,11 @@ public NpgsqlLargeObjectStream OpenRead(uint oid) /// /// An NpgsqlLargeObjectStream public Task OpenReadAsync(uint oid, CancellationToken cancellationToken = default) - { - using (NoSynchronizationContextScope.Enter()) - return OpenRead(oid, true, cancellationToken); - } + => OpenRead(async: true, oid, cancellationToken); - async Task OpenRead(uint oid, bool async, CancellationToken cancellationToken = default) + async Task OpenRead(bool async, uint oid, CancellationToken cancellationToken = default) { - var fd = await ExecuteFunction("lo_open", async, cancellationToken, (int)oid, InvRead); + var fd = await ExecuteFunction(async, "lo_open", cancellationToken, (int)oid, InvRead).ConfigureAwait(false); return new NpgsqlLargeObjectStream(this, fd, false); } @@ -160,7 +157,7 @@ async Task OpenRead(uint oid, bool async, CancellationT /// Oid of the object /// An NpgsqlLargeObjectStream public NpgsqlLargeObjectStream OpenReadWrite(uint oid) - => OpenReadWrite(oid, false).GetAwaiter().GetResult(); + => OpenReadWrite(async: false, oid).GetAwaiter().GetResult(); /// /// Opens a large object on the backend, returning a stream controlling this remote object. @@ -172,14 +169,11 @@ public NpgsqlLargeObjectStream OpenReadWrite(uint oid) /// /// An NpgsqlLargeObjectStream public Task OpenReadWriteAsync(uint oid, CancellationToken cancellationToken = default) - { - using (NoSynchronizationContextScope.Enter()) - return OpenReadWrite(oid, true, cancellationToken); - } + => OpenReadWrite(async: true, oid, cancellationToken); - async Task OpenReadWrite(uint oid, bool async, CancellationToken cancellationToken = default) + async Task OpenReadWrite(bool async, uint oid, CancellationToken cancellationToken = default) { - var fd = await ExecuteFunction("lo_open", async, cancellationToken, (int)oid, InvRead | InvWrite); + var fd = await ExecuteFunction(async, "lo_open", cancellationToken, (int)oid, InvRead | InvWrite).ConfigureAwait(false); return new NpgsqlLargeObjectStream(this, fd, true); } @@ -188,7 +182,7 @@ async Task OpenReadWrite(uint oid, bool async, Cancella /// /// Oid of the object to delete public void Unlink(uint oid) - => ExecuteFunction("lo_unlink", false, CancellationToken.None, (int)oid).GetAwaiter().GetResult(); + => ExecuteFunction(async: false, "lo_unlink", CancellationToken.None, (int)oid).GetAwaiter().GetResult(); /// /// Deletes a large object on the backend. @@ -198,10 +192,7 @@ public void Unlink(uint oid) /// An optional token to cancel the asynchronous operation. The default value is . /// public Task UnlinkAsync(uint oid, CancellationToken cancellationToken = default) - { - using (NoSynchronizationContextScope.Enter()) - return ExecuteFunction("lo_unlink", true, cancellationToken, (int)oid); - } + => ExecuteFunction(async: true, "lo_unlink", cancellationToken, (int)oid); /// /// Exports a large object stored in the database to a file on the backend. This requires superuser permissions. @@ -209,7 +200,7 @@ public Task UnlinkAsync(uint oid, CancellationToken cancellationToken = default) /// Oid of the object to export /// Path to write the file on the backend public void ExportRemote(uint oid, string path) - => ExecuteFunction("lo_export", false, CancellationToken.None, (int)oid, path).GetAwaiter().GetResult(); + => ExecuteFunction(async: false, "lo_export", CancellationToken.None, (int)oid, path).GetAwaiter().GetResult(); /// /// Exports a large object stored in the database to a file on the backend. This requires superuser permissions. @@ -220,10 +211,7 @@ public void ExportRemote(uint oid, string path) /// An optional token to cancel the asynchronous operation. The default value is . /// public Task ExportRemoteAsync(uint oid, string path, CancellationToken cancellationToken = default) - { - using (NoSynchronizationContextScope.Enter()) - return ExecuteFunction("lo_export", true, cancellationToken, (int)oid, path); - } + => ExecuteFunction(async: true, "lo_export", cancellationToken, (int)oid, path); /// /// Imports a large object to be stored as a large object in the database from a file stored on the backend. This requires superuser permissions. @@ -231,7 +219,7 @@ public Task ExportRemoteAsync(uint oid, string path, CancellationToken cancellat /// Path to read the file on the backend /// A preferred oid, or specify 0 if one should be automatically assigned public void ImportRemote(string path, uint oid = 0) - => ExecuteFunction("lo_import", false, CancellationToken.None, path, (int)oid).GetAwaiter().GetResult(); + => ExecuteFunction(async: false, "lo_import", CancellationToken.None, path, (int)oid).GetAwaiter().GetResult(); /// /// Imports a large object to be stored as a large object in the database from a file stored on the backend. This requires superuser permissions. @@ -242,14 +230,11 @@ public void ImportRemote(string path, uint oid = 0) /// An optional token to cancel the asynchronous operation. The default value is . /// public Task ImportRemoteAsync(string path, uint oid, CancellationToken cancellationToken = default) - { - using (NoSynchronizationContextScope.Enter()) - return ExecuteFunction("lo_import", true, cancellationToken, path, (int)oid); - } + => ExecuteFunction(async: true, "lo_import", cancellationToken, path, (int)oid); /// /// Since PostgreSQL 9.3, large objects larger than 2GB can be handled, up to 4TB. /// This property returns true whether the PostgreSQL version is >= 9.3. /// public bool Has64BitSupport => Connection.PostgreSqlVersion.IsGreaterOrEqual(9, 3); -} \ No newline at end of file +} diff --git a/src/Npgsql/NpgsqlLargeObjectStream.cs b/src/Npgsql/NpgsqlLargeObjectStream.cs index 33fe99b5fc..42c757a237 100644 --- a/src/Npgsql/NpgsqlLargeObjectStream.cs +++ b/src/Npgsql/NpgsqlLargeObjectStream.cs @@ -46,7 +46,7 @@ void CheckDisposed() /// The maximum number of bytes that should be read. /// How many bytes actually read, or 0 if end of file was already reached. public override int Read(byte[] buffer, int offset, int count) - => Read(buffer, offset, count, false).GetAwaiter().GetResult(); + => Read(async: false, buffer, offset, count).GetAwaiter().GetResult(); /// /// Reads count bytes from the large object. The only case when fewer bytes are read is when end of stream is reached. @@ -59,12 +59,9 @@ public override int Read(byte[] buffer, int offset, int count) /// /// How many bytes actually read, or 0 if end of file was already reached. public override Task ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) - { - using (NoSynchronizationContextScope.Enter()) - return Read(buffer, offset, count, true, cancellationToken); - } + => Read(async: true, buffer, offset, count, cancellationToken); - async Task Read(byte[] buffer, int offset, int count, bool async, CancellationToken cancellationToken = default) + async Task Read(bool async, byte[] buffer, int offset, int count, CancellationToken cancellationToken = default) { if (buffer == null) throw new ArgumentNullException(nameof(buffer)); @@ -83,7 +80,7 @@ async Task Read(byte[] buffer, int offset, int count, bool async, Cancellat while (read < count) { var bytesRead = await _manager.ExecuteFunctionGetBytes( - "loread", buffer, offset + read, count - read, async, cancellationToken, _fd, chunkCount); + async, "loread", buffer, offset + read, count - read, cancellationToken, _fd, chunkCount).ConfigureAwait(false); _pos += bytesRead; read += bytesRead; if (bytesRead < chunkCount) @@ -101,7 +98,7 @@ async Task Read(byte[] buffer, int offset, int count, bool async, Cancellat /// The offset in the buffer at which to begin copying bytes. /// The number of bytes to write. public override void Write(byte[] buffer, int offset, int count) - => Write(buffer, offset, count, false).GetAwaiter().GetResult(); + => Write(async: false, buffer, offset, count).GetAwaiter().GetResult(); /// /// Writes count bytes to the large object. @@ -113,12 +110,9 @@ public override void Write(byte[] buffer, int offset, int count) /// An optional token to cancel the asynchronous operation. The default value is . /// public override Task WriteAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) - { - using (NoSynchronizationContextScope.Enter()) - return Write(buffer, offset, count, true, cancellationToken); - } + => Write(async: true, buffer, offset, count, cancellationToken); - async Task Write(byte[] buffer, int offset, int count, bool async, CancellationToken cancellationToken = default) + async Task Write(bool async, byte[] buffer, int offset, int count, CancellationToken cancellationToken = default) { if (buffer == null) throw new ArgumentNullException(nameof(buffer)); @@ -139,7 +133,7 @@ async Task Write(byte[] buffer, int offset, int count, bool async, CancellationT while (totalWritten < count) { var chunkSize = Math.Min(count - totalWritten, _manager.MaxTransferBlockSize); - var bytesWritten = await _manager.ExecuteFunction("lowrite", async, cancellationToken, _fd, new ArraySegment(buffer, offset + totalWritten, chunkSize)); + var bytesWritten = await _manager.ExecuteFunction(async, "lowrite", cancellationToken, _fd, new ArraySegment(buffer, offset + totalWritten, chunkSize)).ConfigureAwait(false); totalWritten += bytesWritten; if (bytesWritten != chunkSize) @@ -193,19 +187,15 @@ public override long Position /// /// An optional token to cancel the asynchronous operation. The default value is . /// - public Task GetLengthAsync(CancellationToken cancellationToken = default) - { - using (NoSynchronizationContextScope.Enter()) - return GetLength(true); - } + public Task GetLengthAsync(CancellationToken cancellationToken = default) => GetLength(async: true); async Task GetLength(bool async) { CheckDisposed(); var old = _pos; - var retval = await Seek(0, SeekOrigin.End, async); + var retval = await Seek(async, 0, SeekOrigin.End).ConfigureAwait(false); if (retval != old) - await Seek(old, SeekOrigin.Begin, async); + await Seek(async, old, SeekOrigin.Begin).ConfigureAwait(false); return retval; } @@ -216,7 +206,7 @@ async Task GetLength(bool async) /// A value of type SeekOrigin indicating the reference point used to obtain the new position. /// public override long Seek(long offset, SeekOrigin origin) - => Seek(offset, origin, false).GetAwaiter().GetResult(); + => Seek(async: false, offset, origin).GetAwaiter().GetResult(); /// /// Seeks in the stream to the specified position. This requires a round-trip to the backend. @@ -227,12 +217,9 @@ public override long Seek(long offset, SeekOrigin origin) /// An optional token to cancel the asynchronous operation. The default value is . /// public Task SeekAsync(long offset, SeekOrigin origin, CancellationToken cancellationToken = default) - { - using (NoSynchronizationContextScope.Enter()) - return Seek(offset, origin, true, cancellationToken); - } + => Seek(async: true, offset, origin, cancellationToken); - async Task Seek(long offset, SeekOrigin origin, bool async, CancellationToken cancellationToken = default) + async Task Seek(bool async, long offset, SeekOrigin origin, CancellationToken cancellationToken = default) { if (origin < SeekOrigin.Begin || origin > SeekOrigin.End) throw new ArgumentException("Invalid origin"); @@ -242,8 +229,8 @@ async Task Seek(long offset, SeekOrigin origin, bool async, CancellationTo CheckDisposed(); return _manager.Has64BitSupport - ? _pos = await _manager.ExecuteFunction("lo_lseek64", async, cancellationToken, _fd, offset, (int)origin) - : _pos = await _manager.ExecuteFunction("lo_lseek", async, cancellationToken, _fd, (int)offset, (int)origin); + ? _pos = await _manager.ExecuteFunction(async, "lo_lseek64", cancellationToken, _fd, offset, (int)origin).ConfigureAwait(false) + : _pos = await _manager.ExecuteFunction(async, "lo_lseek", cancellationToken, _fd, (int)offset, (int)origin).ConfigureAwait(false); } /// @@ -257,7 +244,7 @@ public override void Flush() {} /// /// Number of bytes to either truncate or enlarge the large object. public override void SetLength(long value) - => SetLength(value, false).GetAwaiter().GetResult(); + => SetLength(async: false, value).GetAwaiter().GetResult(); /// /// Truncates or enlarges the large object to the given size. If enlarging, the large object is extended with null bytes. @@ -268,14 +255,12 @@ public override void SetLength(long value) /// An optional token to cancel the asynchronous operation. The default value is . /// public Task SetLength(long value, CancellationToken cancellationToken) + => SetLength(async: true, value, cancellationToken); + + async Task SetLength(bool async, long value, CancellationToken cancellationToken = default) { cancellationToken.ThrowIfCancellationRequested(); - using (NoSynchronizationContextScope.Enter()) - return SetLength(value, true, cancellationToken); - } - async Task SetLength(long value, bool async, CancellationToken cancellationToken = default) - { if (value < 0) throw new ArgumentOutOfRangeException(nameof(value)); if (!Has64BitSupport && value != (int)value) @@ -287,9 +272,9 @@ async Task SetLength(long value, bool async, CancellationToken cancellationToken throw new NotSupportedException("SetLength cannot be called on a stream opened with no write permissions"); if (_manager.Has64BitSupport) - await _manager.ExecuteFunction("lo_truncate64", async, cancellationToken, _fd, value); + await _manager.ExecuteFunction(async, "lo_truncate64", cancellationToken, _fd, value).ConfigureAwait(false); else - await _manager.ExecuteFunction("lo_truncate", async, cancellationToken, _fd, (int)value); + await _manager.ExecuteFunction(async, "lo_truncate", cancellationToken, _fd, (int)value).ConfigureAwait(false); } /// @@ -299,7 +284,7 @@ public override void Close() { if (!_disposed) { - _manager.ExecuteFunction("lo_close", false, CancellationToken.None, _fd).GetAwaiter().GetResult(); + _manager.ExecuteFunction(async: false, "lo_close", CancellationToken.None, _fd).GetAwaiter().GetResult(); _disposed = true; } } @@ -315,4 +300,4 @@ protected override void Dispose(bool disposing) Close(); } } -} \ No newline at end of file +} diff --git a/src/Npgsql/NpgsqlMultiHostDataSource.cs b/src/Npgsql/NpgsqlMultiHostDataSource.cs index eb8bcd3bdd..fe6688b3a5 100644 --- a/src/Npgsql/NpgsqlMultiHostDataSource.cs +++ b/src/Npgsql/NpgsqlMultiHostDataSource.cs @@ -184,7 +184,7 @@ static bool IsOnline(DatabaseState state, TargetSessionAttributes preferredType) { if (databaseState == DatabaseState.Unknown) { - databaseState = await connector.QueryDatabaseState(new NpgsqlTimeout(timeoutPerHost), async, cancellationToken); + databaseState = await connector.QueryDatabaseState(new NpgsqlTimeout(timeoutPerHost), async, cancellationToken).ConfigureAwait(false); Debug.Assert(databaseState != DatabaseState.Unknown); if (!stateValidator(databaseState, preferredType)) { @@ -197,7 +197,7 @@ static bool IsOnline(DatabaseState state, TargetSessionAttributes preferredType) } else { - connector = await pool.OpenNewConnector(conn, new NpgsqlTimeout(timeoutPerHost), async, cancellationToken); + connector = await pool.OpenNewConnector(conn, new NpgsqlTimeout(timeoutPerHost), async, cancellationToken).ConfigureAwait(false); if (connector is not null) { if (databaseState == DatabaseState.Unknown) @@ -205,7 +205,7 @@ static bool IsOnline(DatabaseState state, TargetSessionAttributes preferredType) // While opening a new connector we might have refreshed the database state, check again databaseState = pool.GetDatabaseState(); if (databaseState == DatabaseState.Unknown) - databaseState = await connector.QueryDatabaseState(new NpgsqlTimeout(timeoutPerHost), async, cancellationToken); + databaseState = await connector.QueryDatabaseState(new NpgsqlTimeout(timeoutPerHost), async, cancellationToken).ConfigureAwait(false); Debug.Assert(databaseState != DatabaseState.Unknown); if (!stateValidator(databaseState, preferredType)) { @@ -255,13 +255,13 @@ static bool IsOnline(DatabaseState state, TargetSessionAttributes preferredType) try { - connector = await pool.Get(conn, new NpgsqlTimeout(timeoutPerHost), async, cancellationToken); + connector = await pool.Get(conn, new NpgsqlTimeout(timeoutPerHost), async, cancellationToken).ConfigureAwait(false); if (databaseState == DatabaseState.Unknown) { // Get might have opened a new physical connection and refreshed the database state, check again databaseState = pool.GetDatabaseState(); if (databaseState == DatabaseState.Unknown) - databaseState = await connector.QueryDatabaseState(new NpgsqlTimeout(timeoutPerHost), async, cancellationToken); + databaseState = await connector.QueryDatabaseState(new NpgsqlTimeout(timeoutPerHost), async, cancellationToken).ConfigureAwait(false); Debug.Assert(databaseState != DatabaseState.Unknown); if (!stateValidator(databaseState, preferredType)) @@ -300,13 +300,13 @@ internal override async ValueTask Get( var preferredType = GetTargetSessionAttributes(conn); var checkUnpreferred = preferredType is TargetSessionAttributes.PreferPrimary or TargetSessionAttributes.PreferStandby; - var connector = await TryGetIdleOrNew(conn, timeoutPerHost, async, preferredType, IsPreferred, poolIndex, exceptions, cancellationToken) ?? + var connector = await TryGetIdleOrNew(conn, timeoutPerHost, async, preferredType, IsPreferred, poolIndex, exceptions, cancellationToken).ConfigureAwait(false) ?? (checkUnpreferred ? - await TryGetIdleOrNew(conn, timeoutPerHost, async, preferredType, IsOnline, poolIndex, exceptions, cancellationToken) + await TryGetIdleOrNew(conn, timeoutPerHost, async, preferredType, IsOnline, poolIndex, exceptions, cancellationToken).ConfigureAwait(false) : null) ?? - await TryGet(conn, timeoutPerHost, async, preferredType, IsPreferred, poolIndex, exceptions, cancellationToken) ?? + await TryGet(conn, timeoutPerHost, async, preferredType, IsPreferred, poolIndex, exceptions, cancellationToken).ConfigureAwait(false) ?? (checkUnpreferred ? - await TryGet(conn, timeoutPerHost, async, preferredType, IsOnline, poolIndex, exceptions, cancellationToken) + await TryGet(conn, timeoutPerHost, async, preferredType, IsOnline, poolIndex, exceptions, cancellationToken).ConfigureAwait(false) : null); return connector ?? throw NoSuitableHostsException(exceptions); diff --git a/src/Npgsql/NpgsqlParameter.cs b/src/Npgsql/NpgsqlParameter.cs index 79e9ed8ccd..7a5aabc8ae 100644 --- a/src/Npgsql/NpgsqlParameter.cs +++ b/src/Npgsql/NpgsqlParameter.cs @@ -663,7 +663,7 @@ internal async ValueTask Write(bool async, PgWriter writer, CancellationToken ca try { if (writer.ShouldFlush(sizeof(int))) - await writer.Flush(async, cancellationToken); + await writer.Flush(async, cancellationToken).ConfigureAwait(false); writer.WriteInt32(writeSize.Value); if (writeSize.Value is -1) @@ -680,7 +680,7 @@ internal async ValueTask Write(bool async, PgWriter writer, CancellationToken ca WriteState = _writeState }; await writer.BeginWrite(async, current, cancellationToken).ConfigureAwait(false); - await WriteValue(async, writer, cancellationToken); + await WriteValue(async, writer, cancellationToken).ConfigureAwait(false); writer.Commit(writeSize.Value + sizeof(int)); } finally diff --git a/src/Npgsql/NpgsqlRawCopyStream.cs b/src/Npgsql/NpgsqlRawCopyStream.cs index 77153f830c..5656e9c865 100644 --- a/src/Npgsql/NpgsqlRawCopyStream.cs +++ b/src/Npgsql/NpgsqlRawCopyStream.cs @@ -80,12 +80,12 @@ internal NpgsqlRawCopyStream(NpgsqlConnector connector) internal async Task Init(string copyCommand, bool async, CancellationToken cancellationToken = default) { - await _connector.WriteQuery(copyCommand, async, cancellationToken); - await _connector.Flush(async, cancellationToken); + await _connector.WriteQuery(copyCommand, async, cancellationToken).ConfigureAwait(false); + await _connector.Flush(async, cancellationToken).ConfigureAwait(false); using var registration = _connector.StartNestedCancellableOperation(cancellationToken, attemptPgCancellation: false); - var msg = await _connector.ReadMessage(async); + var msg = await _connector.ReadMessage(async).ConfigureAwait(false); switch (msg.Code) { case BackendMessageCode.CopyInResponse: @@ -166,8 +166,8 @@ public override ValueTask WriteAsync(ReadOnlyMemory buffer, CancellationTo if (!CanWrite) throw new InvalidOperationException("Stream not open for writing"); cancellationToken.ThrowIfCancellationRequested(); - using (NoSynchronizationContextScope.Enter()) - return WriteAsyncInternal(buffer, cancellationToken); + + return WriteAsyncInternal(buffer, cancellationToken); async ValueTask WriteAsyncInternal(ReadOnlyMemory buffer, CancellationToken cancellationToken) { @@ -181,7 +181,7 @@ async ValueTask WriteAsyncInternal(ReadOnlyMemory buffer, CancellationToke } // Value is too big, flush. - await FlushAsync(true, cancellationToken); + await FlushAsync(true, cancellationToken).ConfigureAwait(false); if (buffer.Length <= _writeBuf.WriteSpaceLeft) { @@ -190,18 +190,18 @@ async ValueTask WriteAsyncInternal(ReadOnlyMemory buffer, CancellationToke } // Value is too big even after a flush - bypass the buffer and write directly. - await _writeBuf.DirectWrite(buffer, true, cancellationToken); + await _writeBuf.DirectWrite(buffer, true, cancellationToken).ConfigureAwait(false); } } - public override void Flush() => FlushAsync(false).GetAwaiter().GetResult(); + public override void Flush() => FlushAsync(async: false).GetAwaiter().GetResult(); public override Task FlushAsync(CancellationToken cancellationToken) { if (cancellationToken.IsCancellationRequested) return Task.FromCanceled(cancellationToken); - using (NoSynchronizationContextScope.Enter()) - return FlushAsync(true, cancellationToken); + + return FlushAsync(async: true, cancellationToken); } Task FlushAsync(bool async, CancellationToken cancellationToken = default) @@ -252,12 +252,12 @@ public override ValueTask ReadAsync(Memory buffer, CancellationToken if (!CanRead) throw new InvalidOperationException("Stream not open for reading"); cancellationToken.ThrowIfCancellationRequested(); - using (NoSynchronizationContextScope.Enter()) - return ReadAsyncInternal(); + + return ReadAsyncInternal(); async ValueTask ReadAsyncInternal() { - var count = await ReadCore(buffer.Length, true, cancellationToken); + var count = await ReadCore(buffer.Length, true, cancellationToken).ConfigureAwait(false); if (count > 0) _readBuf.ReadBytes(buffer.Slice(0, count).Span); return count; @@ -278,7 +278,7 @@ async ValueTask ReadCore(int count, bool async, CancellationToken cancellat { // We've consumed the current DataMessage (or haven't yet received the first), // read the next message - msg = await _connector.ReadMessage(async); + msg = await _connector.ReadMessage(async).ConfigureAwait(false); } catch { @@ -293,8 +293,8 @@ async ValueTask ReadCore(int count, bool async, CancellationToken cancellat _leftToReadInDataMsg = ((CopyDataMessage)msg).Length; break; case BackendMessageCode.CopyDone: - Expect(await _connector.ReadMessage(async), _connector); - Expect(await _connector.ReadMessage(async), _connector); + Expect(await _connector.ReadMessage(async).ConfigureAwait(false), _connector); + Expect(await _connector.ReadMessage(async).ConfigureAwait(false), _connector); _isConsumed = true; return 0; default: @@ -307,7 +307,7 @@ async ValueTask ReadCore(int count, bool async, CancellationToken cancellat // If our buffer is empty, read in more. Otherwise return whatever is there, even if the // user asked for more (normal socket behavior) if (_readBuf.ReadBytesLeft == 0) - await _readBuf.ReadMore(async); + await _readBuf.ReadMore(async).ConfigureAwait(false); Debug.Assert(_readBuf.ReadBytesLeft > 0); @@ -326,16 +326,12 @@ async ValueTask ReadCore(int count, bool async, CancellationToken cancellat /// /// Cancels and terminates an ongoing operation. Any data already written will be discarded. /// - public void Cancel() => Cancel(false).GetAwaiter().GetResult(); + public void Cancel() => Cancel(async: false).GetAwaiter().GetResult(); /// /// Cancels and terminates an ongoing operation. Any data already written will be discarded. /// - public Task CancelAsync() - { - using (NoSynchronizationContextScope.Enter()) - return Cancel(true); - } + public Task CancelAsync() => Cancel(async: true); async Task Cancel(bool async) { @@ -345,11 +341,11 @@ async Task Cancel(bool async) { _writeBuf.EndCopyMode(); _writeBuf.Clear(); - await _connector.WriteCopyFail(async); - await _connector.Flush(async); + await _connector.WriteCopyFail(async).ConfigureAwait(false); + await _connector.Flush(async).ConfigureAwait(false); try { - var msg = await _connector.ReadMessage(async); + var msg = await _connector.ReadMessage(async).ConfigureAwait(false); // The CopyFail should immediately trigger an exception from the read above. throw _connector.Break( new NpgsqlException("Expected ErrorResponse when cancelling COPY but got: " + msg.Code)); @@ -393,12 +389,12 @@ async ValueTask DisposeAsync(bool disposing, bool async) if (CanWrite) { - await FlushAsync(async); + await FlushAsync(async).ConfigureAwait(false); _writeBuf.EndCopyMode(); - await _connector.WriteCopyDone(async); - await _connector.Flush(async); - Expect(await _connector.ReadMessage(async), _connector); - Expect(await _connector.ReadMessage(async), _connector); + await _connector.WriteCopyDone(async).ConfigureAwait(false); + await _connector.Flush(async).ConfigureAwait(false); + Expect(await _connector.ReadMessage(async).ConfigureAwait(false), _connector); + Expect(await _connector.ReadMessage(async).ConfigureAwait(false), _connector); } else { @@ -408,7 +404,7 @@ async ValueTask DisposeAsync(bool disposing, bool async) { if (_leftToReadInDataMsg > 0) { - await _readBuf.Skip(_leftToReadInDataMsg, async); + await _readBuf.Skip(_leftToReadInDataMsg, async).ConfigureAwait(false); } _connector.SkipUntil(BackendMessageCode.ReadyForQuery); } @@ -515,11 +511,7 @@ public void Cancel() /// /// Cancels and terminates an ongoing import. Any data already written will be discarded. /// - public Task CancelAsync() - { - using (NoSynchronizationContextScope.Enter()) - return ((NpgsqlRawCopyStream)BaseStream).CancelAsync(); - } + public Task CancelAsync() => ((NpgsqlRawCopyStream)BaseStream).CancelAsync(); #if NETSTANDARD2_0 public ValueTask DisposeAsync() @@ -553,11 +545,7 @@ public void Cancel() /// /// Asynchronously cancels and terminates an ongoing export. /// - public Task CancelAsync() - { - using (NoSynchronizationContextScope.Enter()) - return ((NpgsqlRawCopyStream)BaseStream).CancelAsync(); - } + public Task CancelAsync() => ((NpgsqlRawCopyStream)BaseStream).CancelAsync(); public ValueTask DisposeAsync() { diff --git a/src/Npgsql/NpgsqlSchema.cs b/src/Npgsql/NpgsqlSchema.cs index 5fe79a97a6..94ee317f2b 100644 --- a/src/Npgsql/NpgsqlSchema.cs +++ b/src/Npgsql/NpgsqlSchema.cs @@ -17,7 +17,7 @@ namespace Npgsql; /// static class NpgsqlSchema { - public static Task GetSchema(NpgsqlConnection conn, string? collectionName, string?[]? restrictions, bool async, CancellationToken cancellationToken = default) + public static Task GetSchema(bool async, NpgsqlConnection conn, string? collectionName, string?[]? restrictions, CancellationToken cancellationToken = default) { if (collectionName is null) throw new ArgumentNullException(nameof(collectionName)); @@ -170,7 +170,7 @@ static async Task GetDatabases(NpgsqlConnection conn, string?[]? rest using var command = BuildCommand(conn, getDatabases, restrictions, "datname"); using var adapter = new NpgsqlDataAdapter(command); - await adapter.Fill(databases, async, cancellationToken); + await adapter.Fill(databases, async, cancellationToken).ConfigureAwait(false); return databases; } @@ -196,7 +196,7 @@ r.rolname AS schema_owner using var command = BuildCommand(conn, getSchemata, restrictions, "catalog_name", "schema_name", "schema_owner"); using var adapter = new NpgsqlDataAdapter(command); - await adapter.Fill(schemata, async, cancellationToken); + await adapter.Fill(schemata, async, cancellationToken).ConfigureAwait(false); return schemata; } @@ -224,7 +224,7 @@ table_type IN ('BASE TABLE', 'FOREIGN', 'FOREIGN TABLE') AND using var command = BuildCommand(conn, getTables, restrictions, false, "table_catalog", "table_schema", "table_name", "table_type"); using var adapter = new NpgsqlDataAdapter(command); - await adapter.Fill(tables, async, cancellationToken); + await adapter.Fill(tables, async, cancellationToken).ConfigureAwait(false); return tables; } @@ -264,7 +264,7 @@ FROM information_schema.columns using var command = BuildCommand(conn, getColumns, restrictions, "table_catalog", "table_schema", "table_name", "column_name"); using var adapter = new NpgsqlDataAdapter(command); - await adapter.Fill(columns, async, cancellationToken); + await adapter.Fill(columns, async, cancellationToken).ConfigureAwait(false); return columns; } @@ -285,7 +285,7 @@ FROM information_schema.views using var command = BuildCommand(conn, getViews, restrictions, false, "table_catalog", "table_schema", "table_name"); using var adapter = new NpgsqlDataAdapter(command); - await adapter.Fill(views, async, cancellationToken); + await adapter.Fill(views, async, cancellationToken).ConfigureAwait(false); return views; } @@ -302,7 +302,7 @@ static async Task GetUsers(NpgsqlConnection conn, string?[]? restrict using var command = BuildCommand(conn, getUsers, restrictions, "usename"); using var adapter = new NpgsqlDataAdapter(command); - await adapter.Fill(users, async, cancellationToken); + await adapter.Fill(users, async, cancellationToken).ConfigureAwait(false); return users; } @@ -335,7 +335,7 @@ n.nspname NOT IN ('pg_catalog', 'pg_toast') AND using var command = BuildCommand(conn, getIndexes, restrictions, false, "current_database()", "n.nspname", "t.relname", "i.relname"); using var adapter = new NpgsqlDataAdapter(command); - await adapter.Fill(indexes, async, cancellationToken); + await adapter.Fill(indexes, async, cancellationToken).ConfigureAwait(false); return indexes; } @@ -375,7 +375,7 @@ t_ns.nspname NOT IN ('pg_catalog', 'pg_toast') AND using var command = BuildCommand(conn, getIndexColumns, restrictions, false, "current_database()", "t_ns.nspname", "t.relname", "ix_cls.relname", "a.attname"); using var adapter = new NpgsqlDataAdapter(command); - await adapter.Fill(indexColumns, async, cancellationToken); + await adapter.Fill(indexColumns, async, cancellationToken).ConfigureAwait(false); return indexColumns; } @@ -418,7 +418,7 @@ UNION ALL using var adapter = new NpgsqlDataAdapter(command); var table = new DataTable(constraintType) { Locale = CultureInfo.InvariantCulture }; - await adapter.Fill(table, async, cancellationToken); + await adapter.Fill(table, async, cancellationToken).ConfigureAwait(false); return table; } @@ -453,7 +453,7 @@ UNION ALL using var adapter = new NpgsqlDataAdapter(command); var table = new DataTable("ConstraintColumns") { Locale = CultureInfo.InvariantCulture }; - await adapter.Fill(table, async, cancellationToken); + await adapter.Fill(table, async, cancellationToken).ConfigureAwait(false); return table; } diff --git a/src/Npgsql/NpgsqlTransaction.cs b/src/Npgsql/NpgsqlTransaction.cs index 5dbaaadb85..5ca2821cfd 100644 --- a/src/Npgsql/NpgsqlTransaction.cs +++ b/src/Npgsql/NpgsqlTransaction.cs @@ -132,7 +132,7 @@ async Task Commit(bool async, CancellationToken cancellationToken = default) using (_connector.StartUserAction(cancellationToken)) { - await _connector.ExecuteInternalCommand(PregeneratedMessages.CommitTransaction, async, cancellationToken); + await _connector.ExecuteInternalCommand(PregeneratedMessages.CommitTransaction, async, cancellationToken).ConfigureAwait(false); LogMessages.CommittedTransaction(_transactionLogger, _connector.Id); } } @@ -148,10 +148,7 @@ public Task CommitAsync(CancellationToken cancellationToken = default) #else public override Task CommitAsync(CancellationToken cancellationToken = default) #endif - { - using (NoSynchronizationContextScope.Enter()) - return Commit(true, cancellationToken); - } + => Commit(async: true, cancellationToken); #endregion @@ -171,7 +168,7 @@ async Task Rollback(bool async, CancellationToken cancellationToken = default) using (_connector.StartUserAction(cancellationToken)) { - await _connector.Rollback(async, cancellationToken); + await _connector.Rollback(async, cancellationToken).ConfigureAwait(false); LogMessages.RolledBackTransaction(_transactionLogger, _connector.Id); } } @@ -187,10 +184,7 @@ public Task RollbackAsync(CancellationToken cancellationToken = default) #else public override Task RollbackAsync(CancellationToken cancellationToken = default) #endif - { - using (NoSynchronizationContextScope.Enter()) - return Rollback(true, cancellationToken); - } + => Rollback(async: true, cancellationToken); #endregion @@ -265,7 +259,7 @@ public Task SaveAsync(string name, CancellationToken cancellationToken = default return Task.CompletedTask; } - async Task Rollback(string name, bool async, CancellationToken cancellationToken = default) + async Task Rollback(bool async, string name, CancellationToken cancellationToken = default) { if (name == null) throw new ArgumentNullException(nameof(name)); @@ -278,7 +272,7 @@ async Task Rollback(string name, bool async, CancellationToken cancellationToken using (_connector.StartUserAction(cancellationToken)) { var quotedName = RequiresQuoting(name) ? $"\"{name.Replace("\"", "\"\"")}\"" : name; - await _connector.ExecuteInternalCommand($"ROLLBACK TO SAVEPOINT {quotedName}", async, cancellationToken); + await _connector.ExecuteInternalCommand($"ROLLBACK TO SAVEPOINT {quotedName}", async, cancellationToken).ConfigureAwait(false); LogMessages.RolledBackToSavepoint(_transactionLogger, name, _connector.Id); } } @@ -292,7 +286,7 @@ public override void Rollback(string name) #else public void Rollback(string name) #endif - => Rollback(name, false).GetAwaiter().GetResult(); + => Rollback(async: false, name).GetAwaiter().GetResult(); /// /// Rolls back a transaction from a pending savepoint state. @@ -306,12 +300,9 @@ public override Task RollbackAsync(string name, CancellationToken cancellationTo #else public Task RollbackAsync(string name, CancellationToken cancellationToken = default) #endif - { - using (NoSynchronizationContextScope.Enter()) - return Rollback(name, true, cancellationToken); - } + => Rollback(async: true, name, cancellationToken); - async Task Release(string name, bool async, CancellationToken cancellationToken = default) + async Task Release(bool async, string name, CancellationToken cancellationToken = default) { if (name == null) throw new ArgumentNullException(nameof(name)); @@ -324,7 +315,7 @@ async Task Release(string name, bool async, CancellationToken cancellationToken using (_connector.StartUserAction(cancellationToken)) { var quotedName = RequiresQuoting(name) ? $"\"{name.Replace("\"", "\"\"")}\"" : name; - await _connector.ExecuteInternalCommand($"RELEASE SAVEPOINT {quotedName}", async, cancellationToken); + await _connector.ExecuteInternalCommand($"RELEASE SAVEPOINT {quotedName}", async, cancellationToken).ConfigureAwait(false); LogMessages.ReleasedSavepoint(_transactionLogger, name, _connector.Id); } } @@ -334,10 +325,11 @@ async Task Release(string name, bool async, CancellationToken cancellationToken /// /// The name of the savepoint. #if NET5_0_OR_GREATER - public override void Release(string name) => Release(name, false).GetAwaiter().GetResult(); + public override void Release(string name) #else - public void Release(string name) => Release(name, false).GetAwaiter().GetResult(); + public void Release(string name) #endif + => Release(async: false, name).GetAwaiter().GetResult(); /// /// Releases a transaction from a pending savepoint state. @@ -351,10 +343,7 @@ public override Task ReleaseAsync(string name, CancellationToken cancellationTok #else public Task ReleaseAsync(string name, CancellationToken cancellationToken = default) #endif - { - using (NoSynchronizationContextScope.Enter()) - return Release(name, true, cancellationToken); - } + => Release(async: false, name, cancellationToken); /// /// Indicates whether this transaction supports database savepoints. @@ -413,8 +402,7 @@ public override ValueTask DisposeAsync() { if (!IsCompleted) { - using (NoSynchronizationContextScope.Enter()) - return DisposeAsyncInternal(); + return DisposeAsyncInternal(); } IsDisposed = true; @@ -427,8 +415,8 @@ async ValueTask DisposeAsyncInternal() // We're disposing, so no cancellation token try { - await _connector.CloseOngoingOperations(async: true); - await Rollback(async: true); + await _connector.CloseOngoingOperations(async: true).ConfigureAwait(false); + await Rollback(async: true).ConfigureAwait(false); } catch (Exception ex) { diff --git a/src/Npgsql/PoolingDataSource.cs b/src/Npgsql/PoolingDataSource.cs index 0421953f4c..4d563207b4 100644 --- a/src/Npgsql/PoolingDataSource.cs +++ b/src/Npgsql/PoolingDataSource.cs @@ -131,7 +131,7 @@ async ValueTask RentAsync( NpgsqlConnection conn, NpgsqlTimeout timeout, bool async, CancellationToken cancellationToken) { // First, try to open a new physical connector. This will fail if we're at max capacity. - var connector = await OpenNewConnector(conn, timeout, async, cancellationToken); + var connector = await OpenNewConnector(conn, timeout, async, cancellationToken).ConfigureAwait(false); if (connector != null) return connector; @@ -153,7 +153,7 @@ async ValueTask RentAsync( if (!async && !task.IsCompleted) await new TaskSchedulerAwaitable(ConstrainedConcurrencyScheduler.ConcurrentScheduler); - connector = await task; + connector = await task.ConfigureAwait(false); if (CheckIdleConnector(connector)) return connector; } @@ -180,7 +180,7 @@ async ValueTask RentAsync( // We might have closed a connector in the meantime and no longer be at max capacity // so try to open a new connector and if that fails, loop again. - connector = await OpenNewConnector(conn, timeout, async, cancellationToken); + connector = await OpenNewConnector(conn, timeout, async, cancellationToken).ConfigureAwait(false); if (connector != null) return connector; } @@ -262,7 +262,7 @@ bool CheckIdleConnector([NotNullWhen(true)] NpgsqlConnector? connector) var startTime = Stopwatch.GetTimestamp(); #endif var connector = new NpgsqlConnector(this, conn) { ClearCounter = _clearCounter }; - await connector.Open(timeout, async, cancellationToken); + await connector.Open(timeout, async, cancellationToken).ConfigureAwait(false); #if NET7_0_OR_GREATER MetricsReporter.ReportConnectionCreateTime(Stopwatch.GetElapsedTime(startTime)); #endif diff --git a/src/Npgsql/PostgresDatabaseInfo.cs b/src/Npgsql/PostgresDatabaseInfo.cs index 465949bec5..0d2397eac3 100644 --- a/src/Npgsql/PostgresDatabaseInfo.cs +++ b/src/Npgsql/PostgresDatabaseInfo.cs @@ -26,7 +26,7 @@ sealed class PostgresDatabaseInfoFactory : INpgsqlDatabaseInfoFactory public async Task Load(NpgsqlConnector conn, NpgsqlTimeout timeout, bool async) { var db = new PostgresDatabaseInfo(conn); - await db.LoadPostgresInfo(conn, timeout, async); + await db.LoadPostgresInfo(conn, timeout, async).ConfigureAwait(false); Debug.Assert(db.LongVersion != null); return db; } @@ -98,7 +98,7 @@ internal async Task LoadPostgresInfo(NpgsqlConnector conn, NpgsqlTimeout timeout intDateTimes == "on"; IsRedshift = conn.Settings.ServerCompatibilityMode == ServerCompatibilityMode.Redshift; - _types = await LoadBackendTypes(conn, timeout, async); + _types = await LoadBackendTypes(conn, timeout, async).ConfigureAwait(false); } /// @@ -217,11 +217,11 @@ internal async Task> LoadBackendTypes(NpgsqlConnector conn, N var isReplicationConnection = conn.Settings.ReplicationMode != ReplicationMode.Off; if (isReplicationConnection) { - await conn.WriteQuery(versionQuery, async); - await conn.WriteQuery(SanitizeForReplicationConnection(loadTypesQuery), async); - await conn.WriteQuery(SanitizeForReplicationConnection(loadCompositeTypesQuery), async); + await conn.WriteQuery(versionQuery, async).ConfigureAwait(false); + await conn.WriteQuery(SanitizeForReplicationConnection(loadTypesQuery), async).ConfigureAwait(false); + await conn.WriteQuery(SanitizeForReplicationConnection(loadCompositeTypesQuery), async).ConfigureAwait(false); if (SupportsEnumTypes) - await conn.WriteQuery(SanitizeForReplicationConnection(loadEnumFieldsQuery), async); + await conn.WriteQuery(SanitizeForReplicationConnection(loadEnumFieldsQuery), async).ConfigureAwait(false); static string SanitizeForReplicationConnection(string str) { @@ -297,31 +297,31 @@ static string SanitizeForReplicationConnection(string str) if (SupportsEnumTypes) batchQuery.AppendLine(loadEnumFieldsQuery); - await conn.WriteQuery(batchQuery.ToString(), async); + await conn.WriteQuery(batchQuery.ToString(), async).ConfigureAwait(false); } - await conn.Flush(async); + await conn.Flush(async).ConfigureAwait(false); var byOID = new Dictionary(); // First read the PostgreSQL version - Expect(await conn.ReadMessage(async), conn); + Expect(await conn.ReadMessage(async).ConfigureAwait(false), conn); // We read the message in non-sequential mode which buffers the whole message. // There is no need to ensure data within the message boundaries - Expect(await conn.ReadMessage(async), conn); + Expect(await conn.ReadMessage(async).ConfigureAwait(false), conn); // Note that here and below we don't assign ReadBuffer to a variable // because we might allocate oversize buffer conn.ReadBuffer.Skip(2); // Column count LongVersion = ReadNonNullableString(conn.ReadBuffer); - Expect(await conn.ReadMessage(async), conn); + Expect(await conn.ReadMessage(async).ConfigureAwait(false), conn); if (isReplicationConnection) - Expect(await conn.ReadMessage(async), conn); + Expect(await conn.ReadMessage(async).ConfigureAwait(false), conn); // Then load the types - Expect(await conn.ReadMessage(async), conn); + Expect(await conn.ReadMessage(async).ConfigureAwait(false), conn); IBackendMessage msg; while (true) { - msg = await conn.ReadMessage(async); + msg = await conn.ReadMessage(async).ConfigureAwait(false); if (msg is not DataRowMessage) break; @@ -424,10 +424,10 @@ static string SanitizeForReplicationConnection(string str) } Expect(msg, conn); if (isReplicationConnection) - Expect(await conn.ReadMessage(async), conn); + Expect(await conn.ReadMessage(async).ConfigureAwait(false), conn); // Then load the composite type fields - Expect(await conn.ReadMessage(async), conn); + Expect(await conn.ReadMessage(async).ConfigureAwait(false), conn); var currentOID = uint.MaxValue; PostgresCompositeType? currentComposite = null; @@ -435,7 +435,7 @@ static string SanitizeForReplicationConnection(string str) while (true) { - msg = await conn.ReadMessage(async); + msg = await conn.ReadMessage(async).ConfigureAwait(false); if (msg is not DataRowMessage) break; @@ -484,12 +484,12 @@ static string SanitizeForReplicationConnection(string str) } Expect(msg, conn); if (isReplicationConnection) - Expect(await conn.ReadMessage(async), conn); + Expect(await conn.ReadMessage(async).ConfigureAwait(false), conn); if (SupportsEnumTypes) { // Then load the enum fields - Expect(await conn.ReadMessage(async), conn); + Expect(await conn.ReadMessage(async).ConfigureAwait(false), conn); currentOID = uint.MaxValue; PostgresEnumType? currentEnum = null; @@ -497,7 +497,7 @@ static string SanitizeForReplicationConnection(string str) while (true) { - msg = await conn.ReadMessage(async); + msg = await conn.ReadMessage(async).ConfigureAwait(false); if (msg is not DataRowMessage) break; @@ -535,11 +535,11 @@ static string SanitizeForReplicationConnection(string str) } Expect(msg, conn); if (isReplicationConnection) - Expect(await conn.ReadMessage(async), conn); + Expect(await conn.ReadMessage(async).ConfigureAwait(false), conn); } if (!isReplicationConnection) - Expect(await conn.ReadMessage(async), conn); + Expect(await conn.ReadMessage(async).ConfigureAwait(false), conn); return new(byOID.Values); diff --git a/src/Npgsql/Replication/Internal/LogicalReplicationConnectionExtensions.cs b/src/Npgsql/Replication/Internal/LogicalReplicationConnectionExtensions.cs index d1ec4638d0..6f703970de 100644 --- a/src/Npgsql/Replication/Internal/LogicalReplicationConnectionExtensions.cs +++ b/src/Npgsql/Replication/Internal/LogicalReplicationConnectionExtensions.cs @@ -61,67 +61,60 @@ public static Task CreateLogicalReplicationSlot( CancellationToken cancellationToken = default) { connection.CheckDisposed(); + if (slotName is null) + throw new ArgumentNullException(nameof(slotName)); + if (outputPlugin is null) + throw new ArgumentNullException(nameof(outputPlugin)); - using var _ = NoSynchronizationContextScope.Enter(); - return CreateLogicalReplicationSlotCore(); + cancellationToken.ThrowIfCancellationRequested(); - Task CreateLogicalReplicationSlotCore() + var builder = new StringBuilder("CREATE_REPLICATION_SLOT ").Append(slotName); + if (isTemporary) + builder.Append(" TEMPORARY"); + builder.Append(" LOGICAL ").Append(outputPlugin); + if (connection.PostgreSqlVersion.Major >= 15 && (slotSnapshotInitMode.HasValue || twoPhase)) { - if (slotName is null) - throw new ArgumentNullException(nameof(slotName)); - if (outputPlugin is null) - throw new ArgumentNullException(nameof(outputPlugin)); - - cancellationToken.ThrowIfCancellationRequested(); - - var builder = new StringBuilder("CREATE_REPLICATION_SLOT ").Append(slotName); - if (isTemporary) - builder.Append(" TEMPORARY"); - builder.Append(" LOGICAL ").Append(outputPlugin); - if (connection.PostgreSqlVersion.Major >= 15 && (slotSnapshotInitMode.HasValue || twoPhase)) - { - builder.Append('('); - if (slotSnapshotInitMode.HasValue) - { - builder.Append(slotSnapshotInitMode switch - { - LogicalSlotSnapshotInitMode.Export => "SNAPSHOT 'export'", - LogicalSlotSnapshotInitMode.Use => "SNAPSHOT 'use'", - LogicalSlotSnapshotInitMode.NoExport => "SNAPSHOT 'nothing'", - _ => throw new ArgumentOutOfRangeException(nameof(slotSnapshotInitMode), - slotSnapshotInitMode, - $"Unexpected value {slotSnapshotInitMode} for argument {nameof(slotSnapshotInitMode)}.") - }); - if (twoPhase) - builder.Append(",TWO_PHASE"); - } - else - builder.Append("TWO_PHASE"); - builder.Append(')'); - } - else + builder.Append('('); + if (slotSnapshotInitMode.HasValue) { builder.Append(slotSnapshotInitMode switch { - // EXPORT_SNAPSHOT is the default since it has been introduced. - // We don't set it unless it is explicitly requested so that older backends can digest the query too. - null => string.Empty, - LogicalSlotSnapshotInitMode.Export => " EXPORT_SNAPSHOT", - LogicalSlotSnapshotInitMode.Use => " USE_SNAPSHOT", - LogicalSlotSnapshotInitMode.NoExport => " NOEXPORT_SNAPSHOT", + LogicalSlotSnapshotInitMode.Export => "SNAPSHOT 'export'", + LogicalSlotSnapshotInitMode.Use => "SNAPSHOT 'use'", + LogicalSlotSnapshotInitMode.NoExport => "SNAPSHOT 'nothing'", _ => throw new ArgumentOutOfRangeException(nameof(slotSnapshotInitMode), slotSnapshotInitMode, $"Unexpected value {slotSnapshotInitMode} for argument {nameof(slotSnapshotInitMode)}.") }); if (twoPhase) - builder.Append(" TWO_PHASE"); + builder.Append(",TWO_PHASE"); } - var command = builder.ToString(); + else + builder.Append("TWO_PHASE"); + builder.Append(')'); + } + else + { + builder.Append(slotSnapshotInitMode switch + { + // EXPORT_SNAPSHOT is the default since it has been introduced. + // We don't set it unless it is explicitly requested so that older backends can digest the query too. + null => string.Empty, + LogicalSlotSnapshotInitMode.Export => " EXPORT_SNAPSHOT", + LogicalSlotSnapshotInitMode.Use => " USE_SNAPSHOT", + LogicalSlotSnapshotInitMode.NoExport => " NOEXPORT_SNAPSHOT", + _ => throw new ArgumentOutOfRangeException(nameof(slotSnapshotInitMode), + slotSnapshotInitMode, + $"Unexpected value {slotSnapshotInitMode} for argument {nameof(slotSnapshotInitMode)}.") + }); + if (twoPhase) + builder.Append(" TWO_PHASE"); + } + var command = builder.ToString(); - LogMessages.CreatingReplicationSlot(connection.ReplicationLogger, slotName, command, connection.Connector.Id); + LogMessages.CreatingReplicationSlot(connection.ReplicationLogger, slotName, command, connection.Connector.Id); - return connection.CreateReplicationSlot(command, cancellationToken); - } + return connection.CreateReplicationSlot(command, cancellationToken); } /// @@ -149,9 +142,9 @@ public static IAsyncEnumerable StartLogicalReplication( IEnumerable>? options = null, bool bypassingStream = false) { - using (NoSynchronizationContextScope.Enter()) - return StartLogicalReplicationInternal(connection, slot, cancellationToken, walLocation, options, bypassingStream); + return StartLogicalReplicationInternal(connection, slot, cancellationToken, walLocation, options, bypassingStream); + // Local method to avoid having to add the EnumeratorCancellation attribute to the public signature. static async IAsyncEnumerable StartLogicalReplicationInternal( LogicalReplicationConnection connection, LogicalReplicationSlot slot, @@ -184,7 +177,7 @@ static async IAsyncEnumerable StartLogicalReplicationInternal( LogMessages.StartingLogicalReplication(connection.ReplicationLogger, slot.Name, command, connection.Connector.Id); var enumerator = connection.StartReplicationInternalWrapper(command, bypassingStream, cancellationToken); - while (await enumerator.MoveNextAsync()) + while (await enumerator.MoveNextAsync().ConfigureAwait(false)) yield return enumerator.Current; } } diff --git a/src/Npgsql/Replication/PgOutput/Messages/UpdateMessage.cs b/src/Npgsql/Replication/PgOutput/Messages/UpdateMessage.cs index 39d324b6f2..8cecc44b6e 100644 --- a/src/Npgsql/Replication/PgOutput/Messages/UpdateMessage.cs +++ b/src/Npgsql/Replication/PgOutput/Messages/UpdateMessage.cs @@ -55,13 +55,13 @@ public override async IAsyncEnumerator GetAsyncEnumerator(Canc // This will throw if we're already reading (or consumed) the second row var enumerator = base.GetAsyncEnumerator(cancellationToken); - await _oldRowTupleEnumerable.Consume(cancellationToken); - await ReadBuffer.EnsureAsync(3); + await _oldRowTupleEnumerable.Consume(cancellationToken).ConfigureAwait(false); + await ReadBuffer.EnsureAsync(3).ConfigureAwait(false); var tupleType = (TupleType)ReadBuffer.ReadByte(); Debug.Assert(tupleType == TupleType.NewTuple); _ = ReadBuffer.ReadUInt16(); // numColumns, - while (await enumerator.MoveNextAsync()) + while (await enumerator.MoveNextAsync().ConfigureAwait(false)) yield return enumerator.Current; } @@ -69,13 +69,13 @@ public override async IAsyncEnumerator GetAsyncEnumerator(Canc { if (State == RowState.NotRead) { - await _oldRowTupleEnumerable.Consume(cancellationToken); - await ReadBuffer.EnsureAsync(3); + await _oldRowTupleEnumerable.Consume(cancellationToken).ConfigureAwait(false); + await ReadBuffer.EnsureAsync(3).ConfigureAwait(false); var tupleType = (TupleType)ReadBuffer.ReadByte(); Debug.Assert(tupleType == TupleType.NewTuple); _ = ReadBuffer.ReadUInt16(); // numColumns, } - await base.Consume(cancellationToken); + await base.Consume(cancellationToken).ConfigureAwait(false); } } -} \ No newline at end of file +} diff --git a/src/Npgsql/Replication/PgOutput/PgOutputAsyncEnumerable.cs b/src/Npgsql/Replication/PgOutput/PgOutputAsyncEnumerable.cs index 5b53e06bdf..d7979a55d8 100644 --- a/src/Npgsql/Replication/PgOutput/PgOutputAsyncEnumerable.cs +++ b/src/Npgsql/Replication/PgOutput/PgOutputAsyncEnumerable.cs @@ -75,13 +75,7 @@ internal PgOutputAsyncEnumerable( } public IAsyncEnumerator GetAsyncEnumerator(CancellationToken cancellationToken = default) - { - using (NoSynchronizationContextScope.Enter()) - { - return StartReplicationInternal( - CancellationTokenSource.CreateLinkedTokenSource(_baseCancellationToken, cancellationToken).Token); - } - } + => StartReplicationInternal(CancellationTokenSource.CreateLinkedTokenSource(_baseCancellationToken, cancellationToken).Token); async IAsyncEnumerator StartReplicationInternal(CancellationToken cancellationToken) { @@ -91,15 +85,15 @@ async IAsyncEnumerator StartReplicationInternal(Canc var inStreamingTransaction = false; var dataFormat = _options.Binary ?? false ? DataFormat.Binary : DataFormat.Text; - await foreach (var xLogData in stream.WithCancellation(cancellationToken)) + await foreach (var xLogData in stream.WithCancellation(cancellationToken).ConfigureAwait(false)) { - await buf.EnsureAsync(1); + await buf.EnsureAsync(1).ConfigureAwait(false); var messageCode = (BackendReplicationMessageCode)buf.ReadByte(); switch (messageCode) { case BackendReplicationMessageCode.Begin: { - await buf.EnsureAsync(20); + await buf.EnsureAsync(20).ConfigureAwait(false); yield return _beginMessage.Populate(xLogData.WalStart, xLogData.WalEnd, xLogData.ServerClock, transactionFinalLsn: new NpgsqlLogSequenceNumber(buf.ReadUInt64()), transactionCommitTimestamp: PgDateTime.DecodeTimestamp(buf.ReadInt64(), DateTimeKind.Utc), @@ -111,19 +105,19 @@ async IAsyncEnumerator StartReplicationInternal(Canc uint? transactionXid; if (inStreamingTransaction) { - await buf.EnsureAsync(14); + await buf.EnsureAsync(14).ConfigureAwait(false); transactionXid = buf.ReadUInt32(); } else { - await buf.EnsureAsync(10); + await buf.EnsureAsync(10).ConfigureAwait(false); transactionXid = null; } var flags = buf.ReadByte(); var messageLsn = new NpgsqlLogSequenceNumber(buf.ReadUInt64()); - var prefix = await buf.ReadNullTerminatedString(async: true, cancellationToken); - await buf.EnsureAsync(4); + var prefix = await buf.ReadNullTerminatedString(async: true, cancellationToken).ConfigureAwait(false); + await buf.EnsureAsync(4).ConfigureAwait(false); var length = buf.ReadUInt32(); var data = (NpgsqlReadBuffer.ColumnStream)xLogData.Data; data.Init(checked((int)length), canSeek: false, commandScoped: false); @@ -133,7 +127,7 @@ async IAsyncEnumerator StartReplicationInternal(Canc } case BackendReplicationMessageCode.Commit: { - await buf.EnsureAsync(25); + await buf.EnsureAsync(25).ConfigureAwait(false); yield return _commitMessage.Populate( xLogData.WalStart, xLogData.WalEnd, xLogData.ServerClock, (CommitMessage.CommitFlags)buf.ReadByte(), @@ -144,10 +138,10 @@ async IAsyncEnumerator StartReplicationInternal(Canc } case BackendReplicationMessageCode.Origin: { - await buf.EnsureAsync(9); + await buf.EnsureAsync(9).ConfigureAwait(false); yield return _originMessage.Populate(xLogData.WalStart, xLogData.WalEnd, xLogData.ServerClock, originCommitLsn: new NpgsqlLogSequenceNumber(buf.ReadUInt64()), - originName: await buf.ReadNullTerminatedString(async: true, cancellationToken)); + originName: await buf.ReadNullTerminatedString(async: true, cancellationToken).ConfigureAwait(false)); continue; } case BackendReplicationMessageCode.Relation: @@ -155,19 +149,19 @@ async IAsyncEnumerator StartReplicationInternal(Canc uint? transactionXid; if (inStreamingTransaction) { - await buf.EnsureAsync(10); + await buf.EnsureAsync(10).ConfigureAwait(false); transactionXid = buf.ReadUInt32(); } else { - await buf.EnsureAsync(6); + await buf.EnsureAsync(6).ConfigureAwait(false); transactionXid = null; } var relationId = buf.ReadUInt32(); - var ns = await buf.ReadNullTerminatedString(async: true, cancellationToken); - var relationName = await buf.ReadNullTerminatedString(async: true, cancellationToken); - await buf.EnsureAsync(3); + var ns = await buf.ReadNullTerminatedString(async: true, cancellationToken).ConfigureAwait(false); + var relationName = await buf.ReadNullTerminatedString(async: true, cancellationToken).ConfigureAwait(false); + await buf.EnsureAsync(3).ConfigureAwait(false); var relationReplicaIdentitySetting = (RelationMessage.ReplicaIdentitySetting)buf.ReadByte(); var numColumns = buf.ReadUInt16(); @@ -181,10 +175,10 @@ async IAsyncEnumerator StartReplicationInternal(Canc columns.Count = numColumns; for (var i = 0; i < numColumns; i++) { - await buf.EnsureAsync(2); + await buf.EnsureAsync(2).ConfigureAwait(false); var flags = (RelationMessage.Column.ColumnFlags)buf.ReadByte(); - var columnName = await buf.ReadNullTerminatedString(async: true, cancellationToken); - await buf.EnsureAsync(8); + var columnName = await buf.ReadNullTerminatedString(async: true, cancellationToken).ConfigureAwait(false); + await buf.EnsureAsync(8).ConfigureAwait(false); var dateTypeId = buf.ReadUInt32(); var typeModifier = buf.ReadInt32(); columns[i] = new RelationMessage.Column(flags, columnName, dateTypeId, typeModifier); @@ -201,18 +195,18 @@ async IAsyncEnumerator StartReplicationInternal(Canc uint? transactionXid; if (inStreamingTransaction) { - await buf.EnsureAsync(9); + await buf.EnsureAsync(9).ConfigureAwait(false); transactionXid = buf.ReadUInt32(); } else { - await buf.EnsureAsync(5); + await buf.EnsureAsync(5).ConfigureAwait(false); transactionXid = null; } var typeId = buf.ReadUInt32(); - var ns = await buf.ReadNullTerminatedString(async: true, cancellationToken); - var name = await buf.ReadNullTerminatedString(async: true, cancellationToken); + var ns = await buf.ReadNullTerminatedString(async: true, cancellationToken).ConfigureAwait(false); + var name = await buf.ReadNullTerminatedString(async: true, cancellationToken).ConfigureAwait(false); yield return _typeMessage.Populate( xLogData.WalStart, xLogData.WalEnd, xLogData.ServerClock, transactionXid, typeId, ns, name); continue; @@ -222,12 +216,12 @@ async IAsyncEnumerator StartReplicationInternal(Canc uint? transactionXid; if (inStreamingTransaction) { - await buf.EnsureAsync(11); + await buf.EnsureAsync(11).ConfigureAwait(false); transactionXid = buf.ReadUInt32(); } else { - await buf.EnsureAsync(7); + await buf.EnsureAsync(7).ConfigureAwait(false); transactionXid = null; } @@ -246,7 +240,7 @@ async IAsyncEnumerator StartReplicationInternal(Canc yield return _insertMessage.Populate( xLogData.WalStart, xLogData.WalEnd, xLogData.ServerClock, transactionXid, relation, numColumns); - await _insertMessage.Consume(cancellationToken); + await _insertMessage.Consume(cancellationToken).ConfigureAwait(false); continue; } @@ -255,12 +249,12 @@ async IAsyncEnumerator StartReplicationInternal(Canc uint? transactionXid; if (inStreamingTransaction) { - await buf.EnsureAsync(11); + await buf.EnsureAsync(11).ConfigureAwait(false); transactionXid = buf.ReadUInt32(); } else { - await buf.EnsureAsync(7); + await buf.EnsureAsync(7).ConfigureAwait(false); transactionXid = null; } @@ -281,17 +275,17 @@ async IAsyncEnumerator StartReplicationInternal(Canc case TupleType.Key: yield return _indexUpdateMessage.Populate(xLogData.WalStart, xLogData.WalEnd, xLogData.ServerClock, transactionXid, relation, numColumns); - await _indexUpdateMessage.Consume(cancellationToken); + await _indexUpdateMessage.Consume(cancellationToken).ConfigureAwait(false); continue; case TupleType.OldTuple: yield return _fullUpdateMessage.Populate(xLogData.WalStart, xLogData.WalEnd, xLogData.ServerClock, transactionXid, relation, numColumns); - await _fullUpdateMessage.Consume(cancellationToken); + await _fullUpdateMessage.Consume(cancellationToken).ConfigureAwait(false); continue; case TupleType.NewTuple: yield return _defaultUpdateMessage.Populate(xLogData.WalStart, xLogData.WalEnd, xLogData.ServerClock, transactionXid, relation, numColumns); - await _defaultUpdateMessage.Consume(cancellationToken); + await _defaultUpdateMessage.Consume(cancellationToken).ConfigureAwait(false); continue; default: throw new NotSupportedException($"The tuple type '{tupleType}' is not supported."); @@ -302,12 +296,12 @@ async IAsyncEnumerator StartReplicationInternal(Canc uint? transactionXid; if (inStreamingTransaction) { - await buf.EnsureAsync(11); + await buf.EnsureAsync(11).ConfigureAwait(false); transactionXid = buf.ReadUInt32(); } else { - await buf.EnsureAsync(7); + await buf.EnsureAsync(7).ConfigureAwait(false); transactionXid = null; } @@ -328,12 +322,12 @@ async IAsyncEnumerator StartReplicationInternal(Canc case TupleType.Key: yield return _keyDeleteMessage.Populate(xLogData.WalStart, xLogData.WalEnd, xLogData.ServerClock, transactionXid, relation, numColumns); - await _keyDeleteMessage.Consume(cancellationToken); + await _keyDeleteMessage.Consume(cancellationToken).ConfigureAwait(false); continue; case TupleType.OldTuple: yield return _fullDeleteMessage.Populate(xLogData.WalStart, xLogData.WalEnd, xLogData.ServerClock, transactionXid, relation, numColumns); - await _fullDeleteMessage.Consume(cancellationToken); + await _fullDeleteMessage.Consume(cancellationToken).ConfigureAwait(false); continue; default: throw new NotSupportedException($"The tuple type '{tupleDataType}' is not supported."); @@ -344,12 +338,12 @@ async IAsyncEnumerator StartReplicationInternal(Canc uint? transactionXid; if (inStreamingTransaction) { - await buf.EnsureAsync(9); + await buf.EnsureAsync(9).ConfigureAwait(false); transactionXid = buf.ReadUInt32(); } else { - await buf.EnsureAsync(5); + await buf.EnsureAsync(5).ConfigureAwait(false); transactionXid = null; } @@ -359,7 +353,7 @@ async IAsyncEnumerator StartReplicationInternal(Canc _truncateMessageRelations.Count = numRels; for (var i = 0; i < numRels; i++) { - await buf.EnsureAsync(4); + await buf.EnsureAsync(4).ConfigureAwait(false); var relationId = buf.ReadUInt32(); if (!_relations.TryGetValue(relationId, out var relation)) @@ -377,7 +371,7 @@ async IAsyncEnumerator StartReplicationInternal(Canc } case BackendReplicationMessageCode.StreamStart: { - await buf.EnsureAsync(5); + await buf.EnsureAsync(5).ConfigureAwait(false); inStreamingTransaction = true; yield return _streamStartMessage.Populate(xLogData.WalStart, xLogData.WalEnd, xLogData.ServerClock, transactionXid: buf.ReadUInt32(), streamSegmentIndicator: buf.ReadByte()); @@ -391,7 +385,7 @@ async IAsyncEnumerator StartReplicationInternal(Canc } case BackendReplicationMessageCode.StreamCommit: { - await buf.EnsureAsync(29); + await buf.EnsureAsync(29).ConfigureAwait(false); yield return _streamCommitMessage.Populate(xLogData.WalStart, xLogData.WalEnd, xLogData.ServerClock, transactionXid: buf.ReadUInt32(), flags: buf.ReadByte(), commitLsn: new NpgsqlLogSequenceNumber(buf.ReadUInt64()), transactionEndLsn: new NpgsqlLogSequenceNumber(buf.ReadUInt64()), @@ -400,14 +394,14 @@ async IAsyncEnumerator StartReplicationInternal(Canc } case BackendReplicationMessageCode.StreamAbort: { - await buf.EnsureAsync(8); + await buf.EnsureAsync(8).ConfigureAwait(false); yield return _streamAbortMessage.Populate(xLogData.WalStart, xLogData.WalEnd, xLogData.ServerClock, transactionXid: buf.ReadUInt32(), subtransactionXid: buf.ReadUInt32()); continue; } case BackendReplicationMessageCode.BeginPrepare: { - await buf.EnsureAsync(29); + await buf.EnsureAsync(29).ConfigureAwait(false); yield return _beginPrepareMessage.Populate(xLogData.WalStart, xLogData.WalEnd, xLogData.ServerClock, prepareLsn: new NpgsqlLogSequenceNumber(buf.ReadUInt64()), prepareEndLsn: new NpgsqlLogSequenceNumber(buf.ReadUInt64()), @@ -418,7 +412,7 @@ async IAsyncEnumerator StartReplicationInternal(Canc } case BackendReplicationMessageCode.Prepare: { - await buf.EnsureAsync(30); + await buf.EnsureAsync(30).ConfigureAwait(false); yield return _prepareMessage.Populate(xLogData.WalStart, xLogData.WalEnd, xLogData.ServerClock, flags: (PrepareMessage.PrepareFlags)buf.ReadByte(), prepareLsn: new NpgsqlLogSequenceNumber(buf.ReadUInt64()), @@ -430,7 +424,7 @@ async IAsyncEnumerator StartReplicationInternal(Canc } case BackendReplicationMessageCode.CommitPrepared: { - await buf.EnsureAsync(30); + await buf.EnsureAsync(30).ConfigureAwait(false); yield return _commitPreparedMessage.Populate(xLogData.WalStart, xLogData.WalEnd, xLogData.ServerClock, flags: (CommitPreparedMessage.CommitPreparedFlags)buf.ReadByte(), commitPreparedLsn: new NpgsqlLogSequenceNumber(buf.ReadUInt64()), @@ -442,7 +436,7 @@ async IAsyncEnumerator StartReplicationInternal(Canc } case BackendReplicationMessageCode.RollbackPrepared: { - await buf.EnsureAsync(38); + await buf.EnsureAsync(38).ConfigureAwait(false); yield return _rollbackPreparedMessage.Populate(xLogData.WalStart, xLogData.WalEnd, xLogData.ServerClock, flags: (RollbackPreparedMessage.RollbackPreparedFlags)buf.ReadByte(), preparedTransactionEndLsn: new NpgsqlLogSequenceNumber(buf.ReadUInt64()), @@ -455,7 +449,7 @@ async IAsyncEnumerator StartReplicationInternal(Canc } case BackendReplicationMessageCode.StreamPrepare: { - await buf.EnsureAsync(30); + await buf.EnsureAsync(30).ConfigureAwait(false); yield return _streamPrepareMessage.Populate(xLogData.WalStart, xLogData.WalEnd, xLogData.ServerClock, flags: (StreamPrepareMessage.StreamPrepareFlags)buf.ReadByte(), prepareLsn: new NpgsqlLogSequenceNumber(buf.ReadUInt64()), diff --git a/src/Npgsql/Replication/PgOutput/ReplicationTuple.cs b/src/Npgsql/Replication/PgOutput/ReplicationTuple.cs index 4ef1e6bf6b..43bd08b4ac 100644 --- a/src/Npgsql/Replication/PgOutput/ReplicationTuple.cs +++ b/src/Npgsql/Replication/PgOutput/ReplicationTuple.cs @@ -58,10 +58,10 @@ internal async Task Consume(CancellationToken cancellationToken) case RowState.NotRead: State = RowState.Reading; _tupleEnumerator.Reset(NumColumns, _rowDescription, cancellationToken); - while (await _tupleEnumerator.MoveNextAsync()) { } + while (await _tupleEnumerator.MoveNextAsync().ConfigureAwait(false)) { } break; case RowState.Reading: - while (await _tupleEnumerator.MoveNextAsync()) { } + while (await _tupleEnumerator.MoveNextAsync().ConfigureAwait(false)) { } break; case RowState.Consumed: return; @@ -76,4 +76,4 @@ enum RowState NotRead, Reading, Consumed -} \ No newline at end of file +} diff --git a/src/Npgsql/Replication/PgOutput/ReplicationValue.cs b/src/Npgsql/Replication/PgOutput/ReplicationValue.cs index 7c5f104f3e..b0da855b61 100644 --- a/src/Npgsql/Replication/PgOutput/ReplicationValue.cs +++ b/src/Npgsql/Replication/PgOutput/ReplicationValue.cs @@ -84,12 +84,12 @@ public bool IsUnchangedToastedValue /// An optional token to cancel the asynchronous operation. The default value is . /// /// - public ValueTask Get(CancellationToken cancellationToken = default) + public async ValueTask Get(CancellationToken cancellationToken = default) { CheckActive(); - ref var info = ref _lastInfo; - _fieldDescription.GetInfo(typeof(T), ref info); + _fieldDescription.GetInfo(typeof(T), ref _lastInfo); + var info = _lastInfo; switch (Kind) { @@ -99,7 +99,7 @@ public ValueTask Get(CancellationToken cancellationToken = default) return default!; if (typeof(T) == typeof(object)) - return new ValueTask((T)(object)DBNull.Value); + return (T)(object)DBNull.Value; ThrowHelper.ThrowInvalidCastException_NoValue(_fieldDescription); break; @@ -109,21 +109,15 @@ public ValueTask Get(CancellationToken cancellationToken = default) $"Column '{_fieldDescription.Name}' is an unchanged TOASTed value (actual value not sent)."); } - using (NoSynchronizationContextScope.Enter()) - return GetCore(info, _fieldDescription.DataFormat, _readBuffer, Length, cancellationToken); + using var registration = _readBuffer.Connector.StartNestedCancellableOperation(cancellationToken, attemptPgCancellation: false); - static async ValueTask GetCore(PgConverterInfo info, DataFormat format, NpgsqlReadBuffer buffer, int length, CancellationToken cancellationToken) - { - using var registration = buffer.Connector.StartNestedCancellableOperation(cancellationToken, attemptPgCancellation: false); - - var reader = buffer.PgReader.Init(length, format); - await reader.StartReadAsync(info.BufferRequirement, cancellationToken); - var result = info.AsObject - ? (T)await info.Converter.ReadAsObjectAsync(reader, cancellationToken) - : await info.GetConverter().ReadAsync(reader, cancellationToken); - await reader.EndReadAsync(); - return result; - } + var reader = PgReader.Init(Length, _fieldDescription.DataFormat); + await reader.StartReadAsync(info.BufferRequirement, cancellationToken).ConfigureAwait(false); + var result = info.AsObject + ? (T)await info.Converter.ReadAsObjectAsync(reader, cancellationToken).ConfigureAwait(false) + : await info.GetConverter().ReadAsync(reader, cancellationToken).ConfigureAwait(false); + await reader.EndReadAsync().ConfigureAwait(false); + return result; } /// @@ -190,8 +184,8 @@ internal async Task Consume(CancellationToken cancellationToken) if (!PgReader.Initialized) PgReader.Init(Length, _fieldDescription.DataFormat); - await PgReader.ConsumeAsync(cancellationToken: cancellationToken); - await PgReader.Commit(async: true, resuming: false); + await PgReader.ConsumeAsync(cancellationToken: cancellationToken).ConfigureAwait(false); + await PgReader.Commit(async: true, resuming: false).ConfigureAwait(false); _isConsumed = true; } diff --git a/src/Npgsql/Replication/PgOutput/TupleEnumerator.cs b/src/Npgsql/Replication/PgOutput/TupleEnumerator.cs index dc54a92515..cee25671af 100644 --- a/src/Npgsql/Replication/PgOutput/TupleEnumerator.cs +++ b/src/Npgsql/Replication/PgOutput/TupleEnumerator.cs @@ -38,21 +38,20 @@ public ValueTask MoveNextAsync() if (_tupleEnumerable.State != RowState.Reading) throw new ObjectDisposedException(null); - using (NoSynchronizationContextScope.Enter()) - return MoveNextCore(); + return MoveNextCore(); async ValueTask MoveNextCore() { // Consume the previous column if (_pos != -1) - await _value.Consume(_cancellationToken); + await _value.Consume(_cancellationToken).ConfigureAwait(false); if (_pos + 1 == _numColumns) return false; _pos++; // Read the next column - await _readBuffer.Ensure(1, async: true); + await _readBuffer.Ensure(1, async: true).ConfigureAwait(false); var kind = (TupleDataKind)_readBuffer.ReadByte(); int len; switch (kind) @@ -63,7 +62,7 @@ async ValueTask MoveNextCore() break; case TupleDataKind.TextValue: case TupleDataKind.BinaryValue: - await _readBuffer.Ensure(4, async: true); + await _readBuffer.Ensure(4, async: true).ConfigureAwait(false); len = _readBuffer.ReadInt32(); break; default: @@ -87,7 +86,7 @@ async ValueTask MoveNextCore() public async ValueTask DisposeAsync() { if (_tupleEnumerable.State == RowState.Reading) - while (await MoveNextAsync()) { /* Do nothing, just iterate the enumerator */ } + while (await MoveNextAsync().ConfigureAwait(false)) { /* Do nothing, just iterate the enumerator */ } _tupleEnumerable.State = RowState.Consumed; } diff --git a/src/Npgsql/Replication/PhysicalReplicationConnection.cs b/src/Npgsql/Replication/PhysicalReplicationConnection.cs index 9d5faeca98..05d0af33ca 100644 --- a/src/Npgsql/Replication/PhysicalReplicationConnection.cs +++ b/src/Npgsql/Replication/PhysicalReplicationConnection.cs @@ -50,31 +50,25 @@ public PhysicalReplicationConnection(string? connectionString) : base(connection /// A representing a that represents the /// newly-created replication slot. /// - public Task CreateReplicationSlot( + public async Task CreateReplicationSlot( string slotName, bool isTemporary = false, bool reserveWal = false, CancellationToken cancellationToken = default) { CheckDisposed(); - using var _ = NoSynchronizationContextScope.Enter(); - return CreatePhysicalReplicationSlot(slotName, isTemporary, reserveWal, cancellationToken); + var builder = new StringBuilder("CREATE_REPLICATION_SLOT ").Append(slotName); + if (isTemporary) + builder.Append(" TEMPORARY"); + builder.Append(" PHYSICAL"); + if (reserveWal) + builder.Append(PostgreSqlVersion.Major >= 15 ? " (RESERVE_WAL)" : " RESERVE_WAL"); - async Task CreatePhysicalReplicationSlot(string slotName, bool isTemporary, bool reserveWal, CancellationToken cancellationToken) - { - var builder = new StringBuilder("CREATE_REPLICATION_SLOT ").Append(slotName); - if (isTemporary) - builder.Append(" TEMPORARY"); - builder.Append(" PHYSICAL"); - if (reserveWal) - builder.Append(PostgreSqlVersion.Major >= 15 ? " (RESERVE_WAL)" : " RESERVE_WAL"); - - var command = builder.ToString(); + var command = builder.ToString(); - LogMessages.CreatingReplicationSlot(ReplicationLogger, slotName, command, Connector.Id); + LogMessages.CreatingReplicationSlot(ReplicationLogger, slotName, command, Connector.Id); - var slotOptions = await CreateReplicationSlot(builder.ToString(), cancellationToken); + var slotOptions = await CreateReplicationSlot(builder.ToString(), cancellationToken).ConfigureAwait(false); - return new PhysicalReplicationSlot(slotOptions.SlotName); - } + return new PhysicalReplicationSlot(slotOptions.SlotName); } /// @@ -92,10 +86,7 @@ async Task CreatePhysicalReplicationSlot(string slotNam /// A representing a or /// if the replication slot does not exist. public Task ReadReplicationSlot(string slotName, CancellationToken cancellationToken = default) - { - using (NoSynchronizationContextScope.Enter()) - return ReadReplicationSlotInternal(slotName, cancellationToken); - } + => ReadReplicationSlotInternal(slotName, cancellationToken); /// /// Instructs the server to start streaming the WAL for physical replication, starting at WAL location @@ -121,9 +112,9 @@ public IAsyncEnumerable StartReplication(PhysicalReplicationSlo CancellationToken cancellationToken, uint timeline = default) { - using (NoSynchronizationContextScope.Enter()) - return StartPhysicalReplication(slot, walLocation, cancellationToken, timeline); + return StartPhysicalReplication(slot, walLocation, cancellationToken, timeline); + // Local method to avoid having to add the EnumeratorCancellation attribute to the public signature. async IAsyncEnumerable StartPhysicalReplication(PhysicalReplicationSlot? slot, NpgsqlLogSequenceNumber walLocation, [EnumeratorCancellation] CancellationToken cancellationToken, @@ -141,7 +132,7 @@ async IAsyncEnumerable StartPhysicalReplication(PhysicalReplica LogMessages.StartingPhysicalReplication(ReplicationLogger, slot?.Name, command, Connector.Id); var enumerator = StartReplicationInternalWrapper(command, bypassingStream: false, cancellationToken); - while (await enumerator.MoveNextAsync()) + while (await enumerator.MoveNextAsync().ConfigureAwait(false)) yield return enumerator.Current; } } diff --git a/src/Npgsql/Replication/ReplicationConnection.cs b/src/Npgsql/Replication/ReplicationConnection.cs index 5b0381afb0..c30892760c 100644 --- a/src/Npgsql/Replication/ReplicationConnection.cs +++ b/src/Npgsql/Replication/ReplicationConnection.cs @@ -251,53 +251,47 @@ public async Task Open(CancellationToken cancellationToken = default) /// with freeing, releasing, or resetting its unmanaged resources asynchronously. /// /// A task that represents the asynchronous dispose operation. - public ValueTask DisposeAsync() + public async ValueTask DisposeAsync() { - using (NoSynchronizationContextScope.Enter()) - return DisposeAsyncCore(); + if (_isDisposed) + return; - async ValueTask DisposeAsyncCore() + if (_npgsqlConnection.Connector?.State == ConnectorState.Replication) { - if (_isDisposed) - return; + Debug.Assert(_currentEnumerator is not null); + Debug.Assert(_replicationCancellationTokenSource is not null); - if (_npgsqlConnection.Connector?.State == ConnectorState.Replication) + // Replication is in progress; cancel it (soft or hard) and iterate the enumerator until we get the cancellation + // exception. Note: this isn't thread-safe: a user calling DisposeAsync and enumerating at the same time is violating + // our contract. + _replicationCancellationTokenSource.Cancel(); + try { - Debug.Assert(_currentEnumerator is not null); - Debug.Assert(_replicationCancellationTokenSource is not null); - - // Replication is in progress; cancel it (soft or hard) and iterate the enumerator until we get the cancellation - // exception. Note: this isn't thread-safe: a user calling DisposeAsync and enumerating at the same time is violating - // our contract. - _replicationCancellationTokenSource.Cancel(); - try - { - while (await _currentEnumerator.MoveNextAsync()) - { - // Do nothing with messages - simply enumerate until cancellation/termination - } - } - catch + while (await _currentEnumerator.MoveNextAsync().ConfigureAwait(false)) { - // Cancellation/termination occurred + // Do nothing with messages - simply enumerate until cancellation/termination } } - - Debug.Assert(_sendFeedbackTimer is null, "Send feedback timer isn't null at replication shutdown"); - Debug.Assert(_requestFeedbackTimer is null, "Request feedback timer isn't null at replication shutdown"); - _feedbackSemaphore.Dispose(); - - try - { - await _npgsqlConnection.Close(async: true); - } catch { - // Dispose + // Cancellation/termination occurred } + } + + Debug.Assert(_sendFeedbackTimer is null, "Send feedback timer isn't null at replication shutdown"); + Debug.Assert(_requestFeedbackTimer is null, "Request feedback timer isn't null at replication shutdown"); + _feedbackSemaphore.Dispose(); - _isDisposed = true; + try + { + await _npgsqlConnection.Close(async: true).ConfigureAwait(false); } + catch + { + // Dispose + } + + _isDisposed = true; } #endregion Open / Dispose @@ -313,19 +307,12 @@ async ValueTask DisposeAsyncCore() /// /// A containing information about the system we are connected to. /// - public Task IdentifySystem(CancellationToken cancellationToken = default) + public async Task IdentifySystem(CancellationToken cancellationToken = default) { - using (NoSynchronizationContextScope.Enter()) - return IdentifySystemInternal(cancellationToken); - - async Task IdentifySystemInternal(CancellationToken cancellationToken) - { - var row = await ReadSingleRow("IDENTIFY_SYSTEM", cancellationToken); - return new ReplicationSystemIdentification( - (string)row[0], (uint)row[1], NpgsqlLogSequenceNumber.Parse((string)row[2]), (string)row[3]); - } + var row = await ReadSingleRow("IDENTIFY_SYSTEM", cancellationToken).ConfigureAwait(false); + return new ReplicationSystemIdentification( + (string)row[0], (uint)row[1], NpgsqlLogSequenceNumber.Parse((string)row[2]), (string)row[3]); } - /// /// Requests the server to send the current setting of a run-time parameter. /// This is similar to the SQL command SHOW. @@ -342,11 +329,10 @@ public Task Show(string parameterName, CancellationToken cancellationTok if (parameterName is null) throw new ArgumentNullException(nameof(parameterName)); - using (NoSynchronizationContextScope.Enter()) - return ShowInternal(parameterName, cancellationToken); + return ShowInternal(parameterName, cancellationToken); async Task ShowInternal(string parameterName, CancellationToken cancellationToken) - => (string)(await ReadSingleRow("SHOW " + parameterName, cancellationToken))[0]; + => (string)(await ReadSingleRow("SHOW " + parameterName, cancellationToken).ConfigureAwait(false))[0]; } /// @@ -357,23 +343,17 @@ async Task ShowInternal(string parameterName, CancellationToken cancella /// An optional token to cancel the asynchronous operation. The default value is . /// /// The timeline history file for timeline tli - public Task TimelineHistory(uint tli, CancellationToken cancellationToken = default) + public async Task TimelineHistory(uint tli, CancellationToken cancellationToken = default) { - using (NoSynchronizationContextScope.Enter()) - return TimelineHistoryInternal(tli, cancellationToken); - - async Task TimelineHistoryInternal(uint tli, CancellationToken cancellationToken) - { - var result = await ReadSingleRow($"TIMELINE_HISTORY {tli:D}", cancellationToken); - return new TimelineHistoryFile((string)result[0], (byte[])result[1]); - } + var result = await ReadSingleRow($"TIMELINE_HISTORY {tli:D}", cancellationToken).ConfigureAwait(false); + return new TimelineHistoryFile((string)result[0], (byte[])result[1]); } internal async Task CreateReplicationSlot(string command, CancellationToken cancellationToken = default) { try { - var result = await ReadSingleRow(command, cancellationToken); + var result = await ReadSingleRow(command, cancellationToken).ConfigureAwait(false); var slotName = (string)result[0]; var consistentPoint = (string)result[1]; var snapshotName = (string?)result[2]; @@ -408,7 +388,7 @@ internal async Task CreateReplicationSlot(string command internal async Task ReadReplicationSlotInternal(string slotName, CancellationToken cancellationToken = default) { - var result = await ReadSingleRow($"READ_REPLICATION_SLOT {slotName}", cancellationToken); + var result = await ReadSingleRow($"READ_REPLICATION_SLOT {slotName}", cancellationToken).ConfigureAwait(false); var slotType = (string?)result[0]; // Currently (2021-12-30) slot_type is always 'physical' for existing slots or null for slot names that don't exist but that @@ -457,10 +437,10 @@ internal async IAsyncEnumerator StartReplicationInternal( try { - await connector.WriteQuery(command, true, cancellationToken); - await connector.Flush(true, cancellationToken); + await connector.WriteQuery(command, true, cancellationToken).ConfigureAwait(false); + await connector.Flush(true, cancellationToken).ConfigureAwait(false); - var msg = await connector.ReadMessage(true); + var msg = await connector.ReadMessage(true).ConfigureAwait(false); switch (msg.Code) { case BackendMessageCode.CopyBothResponse: @@ -484,7 +464,7 @@ internal async IAsyncEnumerator StartReplicationInternal( while (true) { - msg = await connector.ReadMessage(async: true); + msg = await connector.ReadMessage(async: true).ConfigureAwait(false); Expect(msg, Connector); // We received some message so there's no need to forcibly request feedback @@ -492,13 +472,13 @@ internal async IAsyncEnumerator StartReplicationInternal( _requestFeedbackTimer.Change(_requestFeedbackInterval, Timeout.InfiniteTimeSpan); var messageLength = ((CopyDataMessage)msg).Length; - await buf.EnsureAsync(1); + await buf.EnsureAsync(1).ConfigureAwait(false); var code = (char)buf.ReadByte(); switch (code) { case 'w': // XLogData { - await buf.EnsureAsync(24); + await buf.EnsureAsync(24).ConfigureAwait(false); var startLsn = buf.ReadUInt64(); var endLsn = buf.ReadUInt64(); var sendTime = PgDateTime.DecodeTimestamp(buf.ReadInt64(), DateTimeKind.Utc); @@ -519,14 +499,14 @@ internal async IAsyncEnumerator StartReplicationInternal( // Our consumer may not have read the stream to the end, but it might as well have been us // ourselves bypassing the stream and reading directly from the buffer in StartReplication() if (!columnStream.IsDisposed && columnStream.Position < columnStream.Length && !bypassingStream) - await buf.Skip(checked((int)(columnStream.Length - columnStream.Position)), true); + await buf.Skip(checked((int)(columnStream.Length - columnStream.Position)), true).ConfigureAwait(false); continue; } case 'k': // Primary keepalive message { - await buf.EnsureAsync(17); + await buf.EnsureAsync(17).ConfigureAwait(false); var end = buf.ReadUInt64(); if (ReplicationLogger.IsEnabled(LogLevel.Trace)) @@ -545,7 +525,7 @@ internal async IAsyncEnumerator StartReplicationInternal( if (replyRequested) { LogMessages.SendingReplicationStandbyStatusUpdate(ReplicationLogger, "the server requested it", Connector.Id); - await SendFeedback(waitOnSemaphore: true, cancellationToken: CancellationToken.None); + await SendFeedback(waitOnSemaphore: true, cancellationToken: CancellationToken.None).ConfigureAwait(false); } continue; @@ -559,7 +539,7 @@ internal async IAsyncEnumerator StartReplicationInternal( finally { if (columnStream != null && !bypassingStream && !_replicationCancellationTokenSource.Token.IsCancellationRequested) - await columnStream.DisposeAsync(); + await columnStream.DisposeAsync().ConfigureAwait(false); #if NETSTANDARD2_0 if (_sendFeedbackTimer != null) @@ -568,7 +548,7 @@ internal async IAsyncEnumerator StartReplicationInternal( var actuallyDisposed = _sendFeedbackTimer.Dispose(mre); Debug.Assert(actuallyDisposed, $"{nameof(_sendFeedbackTimer)} had already been disposed when completing replication"); if (actuallyDisposed) - await mre.WaitOneAsync(cancellationToken); + await mre.WaitOneAsync(cancellationToken).ConfigureAwait(false); } if (_requestFeedbackTimer != null) @@ -577,14 +557,14 @@ internal async IAsyncEnumerator StartReplicationInternal( var actuallyDisposed = _requestFeedbackTimer.Dispose(mre); Debug.Assert(actuallyDisposed, $"{nameof(_requestFeedbackTimer)} had already been disposed when completing replication"); if (actuallyDisposed) - await mre.WaitOneAsync(cancellationToken); + await mre.WaitOneAsync(cancellationToken).ConfigureAwait(false); } #else if (_sendFeedbackTimer != null) - await _sendFeedbackTimer.DisposeAsync(); + await _sendFeedbackTimer.DisposeAsync().ConfigureAwait(false); if (_requestFeedbackTimer != null) - await _requestFeedbackTimer.DisposeAsync(); + await _requestFeedbackTimer.DisposeAsync().ConfigureAwait(false); #endif _sendFeedbackTimer = null; _requestFeedbackTimer = null; @@ -626,31 +606,25 @@ public void SetReplicationStatus(NpgsqlLogSequenceNumber lastAppliedAndFlushedLs /// /// The connection currently isn't streaming /// A Task representing the sending of the status update (and not any PostgreSQL response). - public Task SendStatusUpdate(CancellationToken cancellationToken = default) + public async Task SendStatusUpdate(CancellationToken cancellationToken = default) { - using (NoSynchronizationContextScope.Enter()) - return SendStatusUpdateInternal(cancellationToken); - - async Task SendStatusUpdateInternal(CancellationToken cancellationToken) - { - CheckDisposed(); - cancellationToken.ThrowIfCancellationRequested(); + CheckDisposed(); + cancellationToken.ThrowIfCancellationRequested(); - // TODO: If the user accidentally does concurrent usage of the connection, the following is vulnerable to race conditions. - // However, we generally aren't safe for this in Npgsql, leaving as-is for now. - if (Connector.State != ConnectorState.Replication) - throw new InvalidOperationException("Status update can only be sent during replication"); + // TODO: If the user accidentally does concurrent usage of the connection, the following is vulnerable to race conditions. + // However, we generally aren't safe for this in Npgsql, leaving as-is for now. + if (Connector.State != ConnectorState.Replication) + throw new InvalidOperationException("Status update can only be sent during replication"); - LogMessages.SendingReplicationStandbyStatusUpdate(ReplicationLogger, nameof(SendStatusUpdate) + "was called", Connector.Id); - await SendFeedback(waitOnSemaphore: true, cancellationToken: cancellationToken); - } + LogMessages.SendingReplicationStandbyStatusUpdate(ReplicationLogger, nameof(SendStatusUpdate) + "was called", Connector.Id); + await SendFeedback(waitOnSemaphore: true, cancellationToken: cancellationToken).ConfigureAwait(false); } async Task SendFeedback(bool waitOnSemaphore = false, bool requestReply = false, CancellationToken cancellationToken = default) { var taken = waitOnSemaphore - ? await _feedbackSemaphore.WaitAsync(Timeout.Infinite, cancellationToken) - : await _feedbackSemaphore.WaitAsync(TimeSpan.Zero, cancellationToken); + ? await _feedbackSemaphore.WaitAsync(Timeout.Infinite, cancellationToken).ConfigureAwait(false) + : await _feedbackSemaphore.WaitAsync(TimeSpan.Zero, cancellationToken).ConfigureAwait(false); if (!taken) { @@ -666,7 +640,7 @@ async Task SendFeedback(bool waitOnSemaphore = false, bool requestReply = false, const int len = 39; if (buf.WriteSpaceLeft < len) - await connector.Flush(async: true, cancellationToken); + await connector.Flush(async: true, cancellationToken).ConfigureAwait(false); buf.WriteByte(FrontendMessageCode.CopyData); buf.WriteInt32(len - 1); @@ -682,7 +656,7 @@ async Task SendFeedback(bool waitOnSemaphore = false, bool requestReply = false, buf.WriteInt64(PgDateTime.EncodeTimestamp(timestamp)); buf.WriteByte(requestReply ? (byte)1 : (byte)0); - await connector.Flush(async: true, cancellationToken); + await connector.Flush(async: true, cancellationToken).ConfigureAwait(false); if (ReplicationLogger.IsEnabled(LogLevel.Trace)) { @@ -718,7 +692,7 @@ async void TimerRequestFeedback(object? obj) if (ReplicationLogger.IsEnabled(LogLevel.Trace)) LogMessages.SendingReplicationStandbyStatusUpdate(ReplicationLogger, $"half of the {nameof(WalReceiverTimeout)} of {WalReceiverTimeout} has expired", Connector.Id); - await SendFeedback(waitOnSemaphore: true, requestReply: true); + await SendFeedback(waitOnSemaphore: true, requestReply: true).ConfigureAwait(false); } catch { @@ -736,7 +710,7 @@ async void TimerSendFeedback(object? obj) if (ReplicationLogger.IsEnabled(LogLevel.Trace)) LogMessages.SendingReplicationStandbyStatusUpdate(ReplicationLogger, $"{nameof(WalReceiverStatusInterval)} of {WalReceiverStatusInterval} has expired", Connector.Id); - await SendFeedback(); + await SendFeedback().ConfigureAwait(false); } catch { @@ -763,13 +737,12 @@ public Task DropReplicationSlot(string slotName, bool wait = false, Cancellation if (slotName is null) throw new ArgumentNullException(nameof(slotName)); - using (NoSynchronizationContextScope.Enter()) - return DropReplicationSlotInternal(slotName, wait, cancellationToken); + CheckDisposed(); + + return DropReplicationSlotInternal(slotName, wait, cancellationToken); async Task DropReplicationSlotInternal(string slotName, bool wait, CancellationToken cancellationToken) { - CheckDisposed(); - using var _ = Connector.StartUserAction(cancellationToken, attemptPgCancellation: _pgCancellationSupported); var command = "DROP_REPLICATION_SLOT " + slotName; @@ -778,16 +751,16 @@ async Task DropReplicationSlotInternal(string slotName, bool wait, CancellationT LogMessages.DroppingReplicationSlot(ReplicationLogger, slotName, command, Connector.Id); - await Connector.WriteQuery(command, true, CancellationToken.None); - await Connector.Flush(true, CancellationToken.None); + await Connector.WriteQuery(command, true, CancellationToken.None).ConfigureAwait(false); + await Connector.Flush(true, CancellationToken.None).ConfigureAwait(false); - Expect(await Connector.ReadMessage(true), Connector); + Expect(await Connector.ReadMessage(true).ConfigureAwait(false), Connector); // Two CommandComplete messages are returned if (PostgreSqlVersion < FirstVersionWithoutDropSlotDoubleCommandCompleteMessage) - Expect(await Connector.ReadMessage(true), Connector); + Expect(await Connector.ReadMessage(true).ConfigureAwait(false), Connector); - Expect(await Connector.ReadMessage(true), Connector); + Expect(await Connector.ReadMessage(true).ConfigureAwait(false), Connector); } } @@ -801,22 +774,22 @@ async Task ReadSingleRow(string command, CancellationToken cancellatio LogMessages.ExecutingReplicationCommand(ReplicationLogger, command, Connector.Id); - await Connector.WriteQuery(command, true, cancellationToken); - await Connector.Flush(true, cancellationToken); + await Connector.WriteQuery(command, true, cancellationToken).ConfigureAwait(false); + await Connector.Flush(true, cancellationToken).ConfigureAwait(false); - var rowDescription = Expect(await Connector.ReadMessage(true), Connector); - Expect(await Connector.ReadMessage(true), Connector); + var rowDescription = Expect(await Connector.ReadMessage(true).ConfigureAwait(false), Connector); + Expect(await Connector.ReadMessage(true).ConfigureAwait(false), Connector); var buf = Connector.ReadBuffer; - await buf.EnsureAsync(2); + await buf.EnsureAsync(2).ConfigureAwait(false); var results = new object[buf.ReadInt16()]; for (var i = 0; i < results.Length; i++) { - await buf.EnsureAsync(4); + await buf.EnsureAsync(4).ConfigureAwait(false); var len = buf.ReadInt32(); if (len == -1) continue; - await buf.EnsureAsync(len); + await buf.EnsureAsync(len).ConfigureAwait(false); var field = rowDescription[i]; switch (field.PostgresType.Name) { @@ -868,8 +841,8 @@ async Task ReadSingleRow(string command, CancellationToken cancellatio } } - Expect(await Connector.ReadMessage(true), Connector); - Expect(await Connector.ReadMessage(true), Connector); + Expect(await Connector.ReadMessage(true).ConfigureAwait(false), Connector); + Expect(await Connector.ReadMessage(true).ConfigureAwait(false), Connector); return results; static byte[] ParseBytea(ReadOnlySpan bytes) diff --git a/src/Npgsql/Replication/TestDecoding/TestDecodingAsyncEnumerable.cs b/src/Npgsql/Replication/TestDecoding/TestDecodingAsyncEnumerable.cs index 406b39db7f..aca7ee70ea 100644 --- a/src/Npgsql/Replication/TestDecoding/TestDecodingAsyncEnumerable.cs +++ b/src/Npgsql/Replication/TestDecoding/TestDecodingAsyncEnumerable.cs @@ -33,17 +33,10 @@ internal TestDecodingAsyncEnumerable( _walLocation = walLocation; } - public IAsyncEnumerator GetAsyncEnumerator(CancellationToken cancellationToken = default) + public async IAsyncEnumerator GetAsyncEnumerator(CancellationToken cancellationToken = default) { - using (NoSynchronizationContextScope.Enter()) - { - return StartReplicationInternal( - CancellationTokenSource.CreateLinkedTokenSource(_baseCancellationToken, cancellationToken).Token); - } - } + cancellationToken = CancellationTokenSource.CreateLinkedTokenSource(_baseCancellationToken, cancellationToken).Token; - async IAsyncEnumerator StartReplicationInternal(CancellationToken cancellationToken) - { var stream = _connection.StartLogicalReplication( _slot, cancellationToken, _walLocation, _options.GetOptionPairs()); var encoding = _connection.Encoding!; @@ -52,7 +45,7 @@ async IAsyncEnumerator StartReplicationInternal(CancellationTo try { - await foreach (var msg in stream.WithCancellation(cancellationToken)) + await foreach (var msg in stream.ConfigureAwait(false)) { var len = (int)msg.Data.Length; Debug.Assert(msg.Data.Position == 0); @@ -65,7 +58,7 @@ async IAsyncEnumerator StartReplicationInternal(CancellationTo var offset = 0; while (offset < len) { - var read = await msg.Data.ReadAsync(buffer, offset, len - offset, CancellationToken.None); + var read = await msg.Data.ReadAsync(buffer, offset, len - offset, CancellationToken.None).ConfigureAwait(false); if (read == 0) throw new EndOfStreamException(); offset += read; @@ -82,4 +75,4 @@ async IAsyncEnumerator StartReplicationInternal(CancellationTo ArrayPool.Shared.Return(buffer); } } -} \ No newline at end of file +} diff --git a/src/Npgsql/Schema/DbColumnSchemaGenerator.cs b/src/Npgsql/Schema/DbColumnSchemaGenerator.cs index 707bfbb36a..d587c8a6b7 100644 --- a/src/Npgsql/Schema/DbColumnSchemaGenerator.cs +++ b/src/Npgsql/Schema/DbColumnSchemaGenerator.cs @@ -131,13 +131,13 @@ internal async Task> GetColumnSchema(bool asy async ? TransactionScopeAsyncFlowOption.Enabled : TransactionScopeAsyncFlowOption.Suppress); using var connection = (NpgsqlConnection)((ICloneable)_connection).Clone(); - await connection.Open(async, cancellationToken); + await connection.Open(async, cancellationToken).ConfigureAwait(false); using var cmd = new NpgsqlCommand(query, connection); - var reader = await cmd.ExecuteReader(CommandBehavior.Default, async, cancellationToken); + var reader = await cmd.ExecuteReader(async, CommandBehavior.Default, cancellationToken).ConfigureAwait(false); try { - while (async ? await reader.ReadAsync(cancellationToken) : reader.Read()) + while (async ? await reader.ReadAsync(cancellationToken).ConfigureAwait(false) : reader.Read()) { var column = LoadColumnDefinition(reader, _connection.Connector!.DatabaseInfo, oldQueryMode); for (var ordinal = 0; ordinal < numFields; ordinal++) @@ -161,7 +161,7 @@ internal async Task> GetColumnSchema(bool asy finally { if (async) - await reader.DisposeAsync(); + await reader.DisposeAsync().ConfigureAwait(false); else reader.Dispose(); } diff --git a/src/Npgsql/Shims/StreamExtensions.cs b/src/Npgsql/Shims/StreamExtensions.cs index 5215b02ce0..6a6a54231b 100644 --- a/src/Npgsql/Shims/StreamExtensions.cs +++ b/src/Npgsql/Shims/StreamExtensions.cs @@ -58,7 +58,7 @@ public static async ValueTask ReadAsync(this Stream stream, Memory bu var sharedBuffer = ArrayPool.Shared.Rent(buffer.Length); try { - var result = await stream.ReadAsync(sharedBuffer, 0, buffer.Length, cancellationToken); + var result = await stream.ReadAsync(sharedBuffer, 0, buffer.Length, cancellationToken).ConfigureAwait(false); new Span(sharedBuffer, 0, result).CopyTo(buffer.Span); return result; } @@ -88,7 +88,7 @@ public static async ValueTask WriteAsync(this Stream stream, ReadOnlyMemory WaitOneAsync( state: tcs, millisecondsTimeout, executeOnlyOnce: true); - return await tcs.Task; + return await tcs.Task.ConfigureAwait(false); } finally { diff --git a/src/Npgsql/TaskTimeoutAndCancellation.cs b/src/Npgsql/TaskTimeoutAndCancellation.cs index 359c4947b1..ceed87ba94 100644 --- a/src/Npgsql/TaskTimeoutAndCancellation.cs +++ b/src/Npgsql/TaskTimeoutAndCancellation.cs @@ -24,8 +24,8 @@ static class TaskTimeoutAndCancellation internal static async Task ExecuteAsync(Func> getTaskFunc, NpgsqlTimeout timeout, CancellationToken cancellationToken) { Task? task = default; - await ExecuteAsync(ct => (Task)(task = getTaskFunc(ct)), timeout, cancellationToken); - return await task!; + await ExecuteAsync(ct => (Task)(task = getTaskFunc(ct)), timeout, cancellationToken).ConfigureAwait(false); + return await task!.ConfigureAwait(false); } /// @@ -46,7 +46,7 @@ internal static async Task ExecuteAsync(Func getTaskFun { try { - await task.WaitAsync(timeout.CheckAndGetTimeLeft(), cancellationToken); + await task.WaitAsync(timeout.CheckAndGetTimeLeft(), cancellationToken).ConfigureAwait(false); } catch (TimeoutException) when (!task!.IsCompleted) { diff --git a/src/Npgsql/UnpooledDataSource.cs b/src/Npgsql/UnpooledDataSource.cs index 3e3cf5b019..549a45f9b8 100644 --- a/src/Npgsql/UnpooledDataSource.cs +++ b/src/Npgsql/UnpooledDataSource.cs @@ -25,7 +25,7 @@ internal override async ValueTask Get( CheckDisposed(); var connector = new NpgsqlConnector(this, conn); - await connector.Open(timeout, async, cancellationToken); + await connector.Open(timeout, async, cancellationToken).ConfigureAwait(false); Interlocked.Increment(ref _numConnectors); return connector; } From 4e50a113aa4233dab3194a33915cf61de3b53f76 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Wed, 27 Sep 2023 01:39:05 +0200 Subject: [PATCH 217/761] Fix dbtype and npgsqldbtype inference (#5283) --- src/Npgsql/TypeMapping/GlobalTypeMapper.cs | 32 +++++++++++++--------- test/Npgsql.Tests/Support/TestBase.cs | 19 +++++++------ 2 files changed, 29 insertions(+), 22 deletions(-) diff --git a/src/Npgsql/TypeMapping/GlobalTypeMapper.cs b/src/Npgsql/TypeMapping/GlobalTypeMapper.cs index 719ee48ccf..e1739ecd96 100644 --- a/src/Npgsql/TypeMapping/GlobalTypeMapper.cs +++ b/src/Npgsql/TypeMapping/GlobalTypeMapper.cs @@ -93,20 +93,19 @@ PgSerializerOptions TypeMappingOptions internal DataTypeName? TryGetDataTypeName(Type type, object value) { - var typeInfo = TypeMappingOptions.GetTypeInfo(type); DataTypeName? dataTypeName; - if (typeInfo is PgResolverTypeInfo info) - try - { + try + { + var typeInfo = TypeMappingOptions.GetTypeInfo(type); + if (typeInfo is PgResolverTypeInfo info) dataTypeName = info.GetObjectResolution(value).PgTypeId.DataTypeName; - } - catch - { - dataTypeName = null; - } - else - dataTypeName = typeInfo?.GetConcreteResolution().PgTypeId.DataTypeName; - + else + dataTypeName = typeInfo?.GetConcreteResolution().PgTypeId.DataTypeName; + } + catch + { + dataTypeName = null; + } return dataTypeName; } @@ -184,6 +183,8 @@ public INpgsqlTypeMapper MapEnum(string? pgName = null, INpgsqlNameTransl if (_userTypeMapper.Items.FirstOrDefault(i => i.ClrType == typeof(TEnum)) is UserTypeMapping userTypeMapping) HackyEnumTypeMappings.Add(new(typeof(TEnum), userTypeMapping.PgTypeName, nameTranslator ?? DefaultNameTranslator)); + ResetTypeMappingCache(); + return this; } finally @@ -204,6 +205,8 @@ public bool UnmapEnum(string? pgName = null, INpgsqlNameTranslator? nameT if (removed && ((List)_userTypeMapper.Items).FindIndex(m => m.ClrType == typeof(TEnum)) is > -1 and var index) HackyEnumTypeMappings.RemoveAt(index); + ResetTypeMappingCache(); + return removed; } finally @@ -230,6 +233,7 @@ public INpgsqlTypeMapper MapComposite(Type clrType, string? pgName = null, INpgs try { _userTypeMapper.MapComposite(clrType, pgName, nameTranslator); + ResetTypeMappingCache(); return this; } finally @@ -245,7 +249,9 @@ public bool UnmapComposite(Type clrType, string? pgName = null, INpgsqlNameTrans _lock.EnterWriteLock(); try { - return _userTypeMapper.UnmapComposite(clrType, pgName, nameTranslator); + var result = _userTypeMapper.UnmapComposite(clrType, pgName, nameTranslator); + ResetTypeMappingCache(); + return result; } finally { diff --git a/test/Npgsql.Tests/Support/TestBase.cs b/test/Npgsql.Tests/Support/TestBase.cs index 81bac44b3e..24daf573cf 100644 --- a/test/Npgsql.Tests/Support/TestBase.cs +++ b/test/Npgsql.Tests/Support/TestBase.cs @@ -265,6 +265,8 @@ internal static async Task AssertTypeWriteCore( if (npgsqlDbType is null) isNpgsqlDbTypeInferredFromClrType = false; + inferredDbType ??= isNpgsqlDbTypeInferredFromClrType ? dbType ?? DbType.Object : DbType.Object; + // TODO: Interferes with both multiplexing and connection-specific mapping (used e.g. in NodaTime) // Reset the type mapper to make sure we're resolving this type with a clean slate (for isolation, just in case) // connection.TypeMapper.Reset(); @@ -318,15 +320,13 @@ internal static async Task AssertTypeWriteCore( p = new NpgsqlParameter { Value = valueFactory() }; cmd.Parameters.Add(p); errorIdentifier[++errorIdentifierIndex] = $"Value only (type {p.Value!.GetType().Name}, non-generic)"; - if (isNpgsqlDbTypeInferredFromClrType) - CheckInference(); + CheckInference(valueOnlyInference: true); // With (generic) value only p = new NpgsqlParameter { TypedValue = valueFactory() }; cmd.Parameters.Add(p); errorIdentifier[++errorIdentifierIndex] = $"Value only (type {p.Value!.GetType().Name}, generic)"; - if (isNpgsqlDbTypeInferredFromClrType) - CheckInference(); + CheckInference(valueOnlyInference: true); } Debug.Assert(cmd.Parameters.Count == errorIdentifierIndex + 1); @@ -343,19 +343,20 @@ internal static async Task AssertTypeWriteCore( Assert.That(reader[i+1], Is.EqualTo(expectedSqlLiteral), $"Got wrong SQL literal when writing with {errorIdentifier[i / 2]}"); } - void CheckInference() + void CheckInference(bool valueOnlyInference = false) { - if (npgsqlDbType is not null) + if (isNpgsqlDbTypeInferredFromClrType && npgsqlDbType is not null) { Assert.That(p.NpgsqlDbType, Is.EqualTo(npgsqlDbType), () => $"Got wrong inferred NpgsqlDbType when inferring with {errorIdentifier[errorIdentifierIndex]}"); } - Assert.That(p.DbType, Is.EqualTo(inferredDbType ?? dbType ?? DbType.Object), + Assert.That(p.DbType, Is.EqualTo(valueOnlyInference ? inferredDbType : isNpgsqlDbTypeInferredFromClrType ? inferredDbType : dbType ?? DbType.Object), () => $"Got wrong inferred DbType when inferring with {errorIdentifier[errorIdentifierIndex]}"); - Assert.That(p.DataTypeName, Is.EqualTo(pgTypeNameWithoutFacets), - () => $"Got wrong inferred DataTypeName when inferring with {errorIdentifier[errorIdentifierIndex]}"); + if (isNpgsqlDbTypeInferredFromClrType) + Assert.That(p.DataTypeName, Is.EqualTo(pgTypeNameWithoutFacets), + () => $"Got wrong inferred DataTypeName when inferring with {errorIdentifier[errorIdentifierIndex]}"); } } From 3d9a9dd6dacc19e1407c70dd2d43a131b74ce410 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Wed, 27 Sep 2023 02:23:17 +0200 Subject: [PATCH 218/761] Opt in integrated security (#5204) --- .../Internal/IntegratedSecurityHandler.cs | 32 ++++++++++++++++ src/Npgsql/Internal/NpgsqlConnector.Auth.cs | 6 +-- .../Internal/NpgsqlConnector.OldAuth.cs | 2 +- src/Npgsql/Internal/NpgsqlConnector.cs | 10 ++--- ...Handler.cs => TransportSecurityHandler.cs} | 16 ++++---- src/Npgsql/KerberosUsernameProvider.cs | 2 +- src/Npgsql/NpgsqlDataSource.cs | 7 +++- src/Npgsql/NpgsqlDataSourceBuilder.cs | 3 +- src/Npgsql/NpgsqlDataSourceConfiguration.cs | 6 +-- src/Npgsql/NpgsqlSlimDataSourceBuilder.cs | 38 +++++++++++++++---- .../Properties/NpgsqlStrings.Designer.cs | 10 ++++- src/Npgsql/Properties/NpgsqlStrings.resx | 7 +++- src/Npgsql/PublicAPI.Unshipped.txt | 3 +- 13 files changed, 106 insertions(+), 36 deletions(-) create mode 100644 src/Npgsql/Internal/IntegratedSecurityHandler.cs rename src/Npgsql/Internal/{EncryptionHandler.cs => TransportSecurityHandler.cs} (50%) diff --git a/src/Npgsql/Internal/IntegratedSecurityHandler.cs b/src/Npgsql/Internal/IntegratedSecurityHandler.cs new file mode 100644 index 0000000000..2b2f2f1bb9 --- /dev/null +++ b/src/Npgsql/Internal/IntegratedSecurityHandler.cs @@ -0,0 +1,32 @@ +using System; +using System.Threading; +using System.Threading.Tasks; +using Microsoft.Extensions.Logging; +using Npgsql.Properties; + +namespace Npgsql.Internal; + +class IntegratedSecurityHandler +{ + public virtual bool IsSupported => false; + + public virtual ValueTask GetUsername(bool async, bool includeRealm, ILogger connectionLogger, CancellationToken cancellationToken) + { + connectionLogger.LogDebug(string.Format(NpgsqlStrings.IntegratedSecurityDisabled, nameof(NpgsqlSlimDataSourceBuilder.EnableIntegratedSecurity))); + return new(); + } + + public virtual ValueTask NegotiateAuthentication(bool async, NpgsqlConnector connector) + => throw new NotSupportedException(string.Format(NpgsqlStrings.IntegratedSecurityDisabled, nameof(NpgsqlSlimDataSourceBuilder.EnableIntegratedSecurity))); +} + +sealed class RealIntegratedSecurityHandler : IntegratedSecurityHandler +{ + public override bool IsSupported => true; + + public override ValueTask GetUsername(bool async, bool includeRealm, ILogger connectionLogger, CancellationToken cancellationToken) + => KerberosUsernameProvider.GetUsername(async, includeRealm, connectionLogger, cancellationToken); + + public override ValueTask NegotiateAuthentication(bool async, NpgsqlConnector connector) + => new(connector.AuthenticateGSS(async)); +} diff --git a/src/Npgsql/Internal/NpgsqlConnector.Auth.cs b/src/Npgsql/Internal/NpgsqlConnector.Auth.cs index 1bbd1ada60..6eeb0fa44b 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.Auth.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.Auth.cs @@ -42,7 +42,7 @@ await AuthenticateSASL(((AuthenticationSASLMessage)msg).Mechanisms, username, as case AuthenticationRequestType.AuthenticationGSS: case AuthenticationRequestType.AuthenticationSSPI: - await AuthenticateGSS(async).ConfigureAwait(false); + await DataSource.IntegratedSecurityHandler.NegotiateAuthentication(async, this).ConfigureAwait(false); return; case AuthenticationRequestType.AuthenticationGSSContinue: @@ -93,7 +93,7 @@ async Task AuthenticateSASL(List mechanisms, string username, bool async var successfulBind = false; if (clientSupportsSha256Plus) - DataSource.EncryptionHandler.AuthenticateSASLSha256Plus(this, ref mechanism, ref cbindFlag, ref cbind, ref successfulBind); + DataSource.TransportSecurityHandler.AuthenticateSASLSha256Plus(this, ref mechanism, ref cbindFlag, ref cbind, ref successfulBind); if (!successfulBind && serverSupportsSha256) { @@ -331,7 +331,7 @@ async Task AuthenticateMD5(string username, byte[] salt, bool async, Cancellatio } #if NET7_0_OR_GREATER - async Task AuthenticateGSS(bool async) + internal async Task AuthenticateGSS(bool async) { var targetName = $"{KerberosServiceName}/{Host}"; diff --git a/src/Npgsql/Internal/NpgsqlConnector.OldAuth.cs b/src/Npgsql/Internal/NpgsqlConnector.OldAuth.cs index e007ca4c57..e750e730cb 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.OldAuth.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.OldAuth.cs @@ -38,7 +38,7 @@ static byte[] Hi(string str, byte[] salt, int count) #endif #if !NET7_0_OR_GREATER - async Task AuthenticateGSS(bool async) + internal async Task AuthenticateGSS(bool async) { var targetName = $"{KerberosServiceName}/{Host}"; diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index 3476ebae64..83117f44ac 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -735,7 +735,7 @@ async ValueTask GetUsernameAsyncInternal() { if (!RuntimeInformation.IsOSPlatform(OSPlatform.Windows)) { - username = await KerberosUsernameProvider.GetUsernameAsync(Settings.IncludeRealm, ConnectionLogger, async, + username = await DataSource.IntegratedSecurityHandler.GetUsername(async, Settings.IncludeRealm, ConnectionLogger, cancellationToken).ConfigureAwait(false); if (username?.Length > 0) @@ -786,7 +786,7 @@ async Task RawOpen(SslMode sslMode, NpgsqlTimeout timeout, bool async, Cancellat IsSecure = false; - if ((sslMode is SslMode.Prefer && DataSource.EncryptionHandler.SupportEncryption) || + if ((sslMode is SslMode.Prefer && DataSource.TransportSecurityHandler.SupportEncryption) || sslMode is SslMode.Require or SslMode.VerifyCA or SslMode.VerifyFull) { WriteSslRequest(); @@ -805,7 +805,7 @@ async Task RawOpen(SslMode sslMode, NpgsqlTimeout timeout, bool async, Cancellat throw new NpgsqlException("SSL connection requested. No SSL enabled connection from this host is configured."); break; case 'S': - await DataSource.EncryptionHandler.NegotiateEncryption(this, sslMode, timeout, async, isFirstAttempt).ConfigureAwait(false); + await DataSource.TransportSecurityHandler.NegotiateEncryption(async, this, sslMode, timeout, isFirstAttempt).ConfigureAwait(false); break; } @@ -888,7 +888,7 @@ internal async Task NegotiateEncryption(SslMode sslMode, NpgsqlTimeout timeout, if (Settings.RootCertificate is not null) throw new ArgumentException(NpgsqlStrings.CannotUseSslRootCertificateWithUserCallback); - if (DataSource.EncryptionHandler.RootCertificateCallback is not null) + if (DataSource.TransportSecurityHandler.RootCertificateCallback is not null) throw new ArgumentException(NpgsqlStrings.CannotUseValidationRootCertificateCallbackWithUserCallback); certificateValidationCallback = UserCertificateValidationCallback; @@ -898,7 +898,7 @@ internal async Task NegotiateEncryption(SslMode sslMode, NpgsqlTimeout timeout, certificateValidationCallback = SslTrustServerValidation; checkCertificateRevocation = false; } - else if ((caCert = DataSource.EncryptionHandler.RootCertificateCallback?.Invoke()) is not null || + else if ((caCert = DataSource.TransportSecurityHandler.RootCertificateCallback?.Invoke()) is not null || (certRootPath = Settings.RootCertificate ?? PostgresEnvironment.SslCertRoot ?? PostgresEnvironment.SslCertRootDefault) is not null) { diff --git a/src/Npgsql/Internal/EncryptionHandler.cs b/src/Npgsql/Internal/TransportSecurityHandler.cs similarity index 50% rename from src/Npgsql/Internal/EncryptionHandler.cs rename to src/Npgsql/Internal/TransportSecurityHandler.cs index c03d11ef20..e34b2444a7 100644 --- a/src/Npgsql/Internal/EncryptionHandler.cs +++ b/src/Npgsql/Internal/TransportSecurityHandler.cs @@ -6,31 +6,31 @@ namespace Npgsql.Internal; -class EncryptionHandler +class TransportSecurityHandler { public virtual bool SupportEncryption => false; public virtual Func? RootCertificateCallback { - get => throw new InvalidOperationException(NpgsqlStrings.EncryptionDisabled); - set => throw new InvalidOperationException(NpgsqlStrings.EncryptionDisabled); + get => throw new NotSupportedException(string.Format(NpgsqlStrings.TransportSecurityDisabled, nameof(NpgsqlSlimDataSourceBuilder.EnableTransportSecurity))); + set => throw new NotSupportedException(string.Format(NpgsqlStrings.TransportSecurityDisabled, nameof(NpgsqlSlimDataSourceBuilder.EnableTransportSecurity))); } - public virtual Task NegotiateEncryption(NpgsqlConnector connector, SslMode sslMode, NpgsqlTimeout timeout, bool async, bool isFirstAttempt) - => throw new InvalidOperationException(NpgsqlStrings.EncryptionDisabled); + public virtual Task NegotiateEncryption(bool async, NpgsqlConnector connector, SslMode sslMode, NpgsqlTimeout timeout, bool isFirstAttempt) + => throw new NotSupportedException(string.Format(NpgsqlStrings.TransportSecurityDisabled, nameof(NpgsqlSlimDataSourceBuilder.EnableTransportSecurity))); public virtual void AuthenticateSASLSha256Plus(NpgsqlConnector connector, ref string mechanism, ref string cbindFlag, ref string cbind, ref bool successfulBind) - => throw new InvalidOperationException(NpgsqlStrings.EncryptionDisabled); + => throw new NotSupportedException(string.Format(NpgsqlStrings.TransportSecurityDisabled, nameof(NpgsqlSlimDataSourceBuilder.EnableTransportSecurity))); } -sealed class RealEncryptionHandler : EncryptionHandler +sealed class RealTransportSecurityHandler : TransportSecurityHandler { public override bool SupportEncryption => true; public override Func? RootCertificateCallback { get; set; } - public override Task NegotiateEncryption(NpgsqlConnector connector, SslMode sslMode, NpgsqlTimeout timeout, bool async, bool isFirstAttempt) + public override Task NegotiateEncryption(bool async, NpgsqlConnector connector, SslMode sslMode, NpgsqlTimeout timeout, bool isFirstAttempt) => connector.NegotiateEncryption(sslMode, timeout, async, isFirstAttempt); public override void AuthenticateSASLSha256Plus(NpgsqlConnector connector, ref string mechanism, ref string cbindFlag, ref string cbind, diff --git a/src/Npgsql/KerberosUsernameProvider.cs b/src/Npgsql/KerberosUsernameProvider.cs index ba162ff21a..a962a6fdc2 100644 --- a/src/Npgsql/KerberosUsernameProvider.cs +++ b/src/Npgsql/KerberosUsernameProvider.cs @@ -17,7 +17,7 @@ sealed class KerberosUsernameProvider static string? _principalWithRealm; static string? _principalWithoutRealm; - internal static ValueTask GetUsernameAsync(bool includeRealm, ILogger connectionLogger, bool async, CancellationToken cancellationToken) + internal static ValueTask GetUsername(bool async, bool includeRealm, ILogger connectionLogger, CancellationToken cancellationToken) { if (_performedDetection) return new(includeRealm ? _principalWithRealm : _principalWithoutRealm); diff --git a/src/Npgsql/NpgsqlDataSource.cs b/src/Npgsql/NpgsqlDataSource.cs index c9d5619c4c..18c0d05f32 100644 --- a/src/Npgsql/NpgsqlDataSource.cs +++ b/src/Npgsql/NpgsqlDataSource.cs @@ -40,13 +40,15 @@ public abstract class NpgsqlDataSource : DbDataSource /// internal NpgsqlDatabaseInfo DatabaseInfo { get; private set; } = null!; // Initialized at bootstrapping - internal EncryptionHandler EncryptionHandler { get; } + internal TransportSecurityHandler TransportSecurityHandler { get; } internal RemoteCertificateValidationCallback? UserCertificateValidationCallback { get; } internal Action? ClientCertificatesCallback { get; } readonly Func>? _periodicPasswordProvider; readonly TimeSpan _periodicPasswordSuccessRefreshInterval, _periodicPasswordFailureRefreshInterval; + internal IntegratedSecurityHandler IntegratedSecurityHandler { get; } + internal Action? ConnectionInitializer { get; } internal Func? ConnectionInitializerAsync { get; } @@ -95,7 +97,8 @@ internal NpgsqlDataSource( (var name, LoggingConfiguration, - EncryptionHandler, + TransportSecurityHandler, + IntegratedSecurityHandler, UserCertificateValidationCallback, ClientCertificatesCallback, _periodicPasswordProvider, diff --git a/src/Npgsql/NpgsqlDataSourceBuilder.cs b/src/Npgsql/NpgsqlDataSourceBuilder.cs index de76897cd9..72e834bf42 100644 --- a/src/Npgsql/NpgsqlDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlDataSourceBuilder.cs @@ -92,7 +92,8 @@ public NpgsqlDataSourceBuilder(string? connectionString = null) void AddDefaultFeatures() { - _internalBuilder.EnableEncryption(); + _internalBuilder.EnableTransportSecurity(); + _internalBuilder.EnableIntegratedSecurity(); AddTypeInfoResolver(UnsupportedTypeInfoResolver); // Reverse order arrays. AddTypeInfoResolver(new UnmappedMultirangeArrayTypeInfoResolver()); diff --git a/src/Npgsql/NpgsqlDataSourceConfiguration.cs b/src/Npgsql/NpgsqlDataSourceConfiguration.cs index dac604b6dc..9fbfdb94f5 100644 --- a/src/Npgsql/NpgsqlDataSourceConfiguration.cs +++ b/src/Npgsql/NpgsqlDataSourceConfiguration.cs @@ -8,10 +8,10 @@ namespace Npgsql; -sealed record NpgsqlDataSourceConfiguration( - string? Name, +sealed record NpgsqlDataSourceConfiguration(string? Name, NpgsqlLoggingConfiguration LoggingConfiguration, - EncryptionHandler EncryptionHandler, + TransportSecurityHandler TransportSecurityHandler, + IntegratedSecurityHandler userCertificateValidationCallback, RemoteCertificateValidationCallback? UserCertificateValidationCallback, Action? ClientCertificatesCallback, Func>? PeriodicPasswordProvider, diff --git a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs index d01a3d6b4c..6e3982a7b7 100644 --- a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs @@ -29,10 +29,12 @@ public sealed class NpgsqlSlimDataSourceBuilder : INpgsqlTypeMapper ILoggerFactory? _loggerFactory; bool _sensitiveDataLoggingEnabled; - EncryptionHandler _encryptionHandler = new(); + TransportSecurityHandler _transportSecurityHandler = new(); RemoteCertificateValidationCallback? _userCertificateValidationCallback; Action? _clientCertificatesCallback; + IntegratedSecurityHandler _integratedSecurityHandler = new(); + Func>? _periodicPasswordProvider; TimeSpan _periodicPasswordSuccessRefreshInterval, _periodicPasswordFailureRefreshInterval; @@ -202,9 +204,10 @@ public NpgsqlSlimDataSourceBuilder UseRootCertificate(X509Certificate2? rootCert /// and might change during the lifetime of the application. /// When that's not the case, use the overload which directly accepts the certificate. /// + /// The same builder instance so that multiple calls can be chained. public NpgsqlSlimDataSourceBuilder UseRootCertificateCallback(Func? rootCertificateCallback) { - _encryptionHandler.RootCertificateCallback = rootCertificateCallback; + _transportSecurityHandler.RootCertificateCallback = rootCertificateCallback; return this; } @@ -229,6 +232,7 @@ public NpgsqlSlimDataSourceBuilder UseRootCertificateCallback(Func /// + /// The same builder instance so that multiple calls can be chained. public NpgsqlSlimDataSourceBuilder UsePeriodicPasswordProvider( Func>? passwordProvider, TimeSpan successRefreshInterval, @@ -329,6 +333,7 @@ internal void ResetTypeMappings() /// /// Sets up mappings for the PostgreSQL array types. /// + /// The same builder instance so that multiple calls can be chained. public NpgsqlSlimDataSourceBuilder EnableArrays() { AddTypeInfoResolver(new RangeArrayTypeInfoResolver()); @@ -340,6 +345,7 @@ public NpgsqlSlimDataSourceBuilder EnableArrays() /// /// Sets up mappings for the PostgreSQL range types. /// + /// The same builder instance so that multiple calls can be chained. public NpgsqlSlimDataSourceBuilder EnableRanges() { AddTypeInfoResolver(new RangeTypeInfoResolver()); @@ -349,6 +355,7 @@ public NpgsqlSlimDataSourceBuilder EnableRanges() /// /// Sets up mappings for the PostgreSQL multirange types. /// + /// The same builder instance so that multiple calls can be chained. public NpgsqlSlimDataSourceBuilder EnableMultiranges() { AddTypeInfoResolver(new RangeTypeInfoResolver()); @@ -365,6 +372,7 @@ public NpgsqlSlimDataSourceBuilder EnableMultiranges() /// /// A list of CLR types to map to PostgreSQL json (no need to specify ). /// + /// The same builder instance so that multiple calls can be chained. public NpgsqlSlimDataSourceBuilder UseSystemTextJson( JsonSerializerOptions? serializerOptions = null, Type[]? jsonbClrTypes = null, @@ -377,6 +385,7 @@ public NpgsqlSlimDataSourceBuilder UseSystemTextJson( /// /// Sets up mappings for the PostgreSQL record type. /// + /// The same builder instance so that multiple calls can be chained. public NpgsqlSlimDataSourceBuilder EnableRecords() { AddTypeInfoResolver(new RecordTypeInfoResolver()); @@ -386,6 +395,7 @@ public NpgsqlSlimDataSourceBuilder EnableRecords() /// /// Sets up mappings for the PostgreSQL tsquery and tsvector types. /// + /// The same builder instance so that multiple calls can be chained. public NpgsqlSlimDataSourceBuilder EnableFullTextSearch() { AddTypeInfoResolver(new FullTextSearchTypeInfoResolver()); @@ -395,6 +405,7 @@ public NpgsqlSlimDataSourceBuilder EnableFullTextSearch() /// /// Sets up mappings for the PostgreSQL ltree extension types. /// + /// The same builder instance so that multiple calls can be chained. public NpgsqlSlimDataSourceBuilder EnableLTree() { AddTypeInfoResolver(new LTreeTypeInfoResolver()); @@ -404,6 +415,7 @@ public NpgsqlSlimDataSourceBuilder EnableLTree() /// /// Sets up mappings for extra conversions from PostgreSQL to .NET types. /// + /// The same builder instance so that multiple calls can be chained. public NpgsqlSlimDataSourceBuilder EnableExtraConversions() { AddTypeInfoResolver(new ExtraConversionsResolver()); @@ -414,10 +426,21 @@ public NpgsqlSlimDataSourceBuilder EnableExtraConversions() /// Enables the possibility to use TLS/SSl encryption for connections to PostgreSQL. This does not guarantee that encryption will /// actually be used; see for more details. /// - public NpgsqlSlimDataSourceBuilder EnableEncryption() + /// The same builder instance so that multiple calls can be chained. + public NpgsqlSlimDataSourceBuilder EnableTransportSecurity() { - _encryptionHandler = new RealEncryptionHandler(); + _transportSecurityHandler = new RealTransportSecurityHandler(); + return this; + } + /// + /// Enables the possibility to use GSS/SSPI authentication for connections to PostgreSQL. This does not guarantee that it will + /// actually be used; see for more details. + /// + /// The same builder instance so that multiple calls can be chained. + public NpgsqlSlimDataSourceBuilder EnableIntegratedSecurity() + { + _integratedSecurityHandler = new RealIntegratedSecurityHandler(); return this; } @@ -495,9 +518,9 @@ NpgsqlDataSourceConfiguration PrepareConfiguration() { ConnectionStringBuilder.PostProcessAndValidate(); - if (!_encryptionHandler.SupportEncryption && (_userCertificateValidationCallback is not null || _clientCertificatesCallback is not null)) + if (!_transportSecurityHandler.SupportEncryption && (_userCertificateValidationCallback is not null || _clientCertificatesCallback is not null)) { - throw new InvalidOperationException(NpgsqlStrings.EncryptionDisabled); + throw new InvalidOperationException(NpgsqlStrings.TransportSecurityDisabled); } if (_periodicPasswordProvider is not null && @@ -511,7 +534,8 @@ NpgsqlDataSourceConfiguration PrepareConfiguration() _loggerFactory is null ? NpgsqlLoggingConfiguration.NullConfiguration : new NpgsqlLoggingConfiguration(_loggerFactory, _sensitiveDataLoggingEnabled), - _encryptionHandler, + _transportSecurityHandler, + _integratedSecurityHandler, _userCertificateValidationCallback, _clientCertificatesCallback, _periodicPasswordProvider, diff --git a/src/Npgsql/Properties/NpgsqlStrings.Designer.cs b/src/Npgsql/Properties/NpgsqlStrings.Designer.cs index 707240754c..8a81077ffa 100644 --- a/src/Npgsql/Properties/NpgsqlStrings.Designer.cs +++ b/src/Npgsql/Properties/NpgsqlStrings.Designer.cs @@ -57,9 +57,15 @@ internal static string CannotUseSslRootCertificateWithUserCallback { } } - internal static string EncryptionDisabled { + internal static string TransportSecurityDisabled { get { - return ResourceManager.GetString("EncryptionDisabled", resourceCulture); + return ResourceManager.GetString("TransportSecurityDisabled", resourceCulture); + } + } + + internal static string IntegratedSecurityDisabled { + get { + return ResourceManager.GetString("IntegratedSecurityDisabled", resourceCulture); } } diff --git a/src/Npgsql/Properties/NpgsqlStrings.resx b/src/Npgsql/Properties/NpgsqlStrings.resx index 5ca209070f..4b2535fb6f 100644 --- a/src/Npgsql/Properties/NpgsqlStrings.resx +++ b/src/Npgsql/Properties/NpgsqlStrings.resx @@ -24,8 +24,11 @@ RootCertificate cannot be used in conjunction with UserCertificateValidationCallback; when registering a validation callback, perform whatever validation you require in that callback. - - NpgsqlSlimDataSourceBuilder is being used, and encryption hasn't been enabled, call EnableEncryption() on NpgsqlSlimDataSourceBuilder to enable it. + + Transport security hasn't been enabled; please call {0} on NpgsqlSlimDataSourceBuilder to enable it. + + + Integrated security hasn't been enabled; please call {0} on NpgsqlSlimDataSourceBuilder to enable it. No multirange type could be found in the database for subtype {0}. diff --git a/src/Npgsql/PublicAPI.Unshipped.txt b/src/Npgsql/PublicAPI.Unshipped.txt index 37a23693b4..1cd6f3da9c 100644 --- a/src/Npgsql/PublicAPI.Unshipped.txt +++ b/src/Npgsql/PublicAPI.Unshipped.txt @@ -24,14 +24,15 @@ Npgsql.NpgsqlSlimDataSourceBuilder.ConnectionStringBuilder.get -> Npgsql.NpgsqlC Npgsql.NpgsqlSlimDataSourceBuilder.DefaultNameTranslator.get -> Npgsql.INpgsqlNameTranslator! Npgsql.NpgsqlSlimDataSourceBuilder.DefaultNameTranslator.set -> void Npgsql.NpgsqlSlimDataSourceBuilder.EnableArrays() -> Npgsql.NpgsqlSlimDataSourceBuilder! -Npgsql.NpgsqlSlimDataSourceBuilder.EnableEncryption() -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.EnableExtraConversions() -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.EnableFullTextSearch() -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.EnableIntegratedSecurity() -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.EnableLTree() -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.EnableMultiranges() -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.EnableParameterLogging(bool parameterLoggingEnabled = true) -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.EnableRanges() -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.EnableRecords() -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.EnableTransportSecurity() -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.MapComposite(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! Npgsql.NpgsqlSlimDataSourceBuilder.MapComposite(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! Npgsql.NpgsqlSlimDataSourceBuilder.MapEnum(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! From ac72fb1fd5b2319defb5bc42bdc97f88c1fd51e4 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Wed, 27 Sep 2023 13:15:56 +0200 Subject: [PATCH 219/761] Modernize array tests and other test cleanup (#5279) --- test/Npgsql.Tests/Types/ArrayTests.cs | 341 ++++-------------- .../Npgsql.Tests/Types/FullTextSearchTests.cs | 50 ++- test/Npgsql.Tests/Types/RangeTests.cs | 3 +- test/Npgsql.Tests/Types/RecordTests.cs | 42 ++- 4 files changed, 126 insertions(+), 310 deletions(-) diff --git a/test/Npgsql.Tests/Types/ArrayTests.cs b/test/Npgsql.Tests/Types/ArrayTests.cs index 6c929c07a7..aab21df1a7 100644 --- a/test/Npgsql.Tests/Types/ArrayTests.cs +++ b/test/Npgsql.Tests/Types/ArrayTests.cs @@ -21,59 +21,39 @@ namespace Npgsql.Tests.Types; /// public class ArrayTests : MultiplexingTestBase { - [Test, Description("Resolves an array type handler via the different pathways")] - public async Task Array_resolution() + static readonly TestCaseData[] ArrayTestCases = { - if (IsMultiplexing) - Assert.Ignore("Multiplexing, ReloadTypes"); - - await using var dataSource = CreateDataSource(csb => csb.Pooling = false); - await using var conn = await dataSource.OpenConnectionAsync(); - - // Resolve type by NpgsqlDbType - await using (var cmd = new NpgsqlCommand("SELECT @p", conn)) - { - cmd.Parameters.AddWithValue("p", NpgsqlDbType.Array | NpgsqlDbType.Integer, DBNull.Value); - await using var reader = await cmd.ExecuteReaderAsync(); + new TestCaseData(new[] { 1, 2, 3 }, "{1,2,3}", "integer[]", NpgsqlDbType.Integer | NpgsqlDbType.Array) + .SetName("Integer_array"), + new TestCaseData(Array.Empty(), "{}", "integer[]", NpgsqlDbType.Integer | NpgsqlDbType.Array) + .SetName("Empty_array"), + new TestCaseData(new[,] { { 1, 2, 3 }, { 7, 8, 9 } }, "{{1,2,3},{7,8,9}}", "integer[]", NpgsqlDbType.Integer | NpgsqlDbType.Array) + .SetName("Two_dimensional_array"), + new TestCaseData(new[] { new byte[] { 1, 2 }, new byte[] { 3, 4 } }, """{"\\x0102","\\x0304"}""", "bytea[]", NpgsqlDbType.Bytea | NpgsqlDbType.Array) + .SetName("Bytea_array") + }; + + [Test, TestCaseSource(nameof(ArrayTestCases))] + public Task Arrays(T array, string sqlLiteral, string pgTypeName, NpgsqlDbType? npgsqlDbType) + => AssertType(array, sqlLiteral, pgTypeName, npgsqlDbType); - reader.Read(); - Assert.That(reader.GetDataTypeName(0), Is.EqualTo("integer[]")); - } - - // Resolve type by ClrType (type inference) - await conn.ReloadTypesAsync(); - await using (var cmd = new NpgsqlCommand("SELECT @p", conn)) - { - cmd.Parameters.Add(new NpgsqlParameter { ParameterName = "p", Value = Array.Empty() }); - await using var reader = await cmd.ExecuteReaderAsync(); - - reader.Read(); - Assert.That(reader.GetDataTypeName(0), Is.EqualTo("integer[]")); - } - - // Resolve type by DataTypeName - await conn.ReloadTypesAsync(); - await using (var cmd = new NpgsqlCommand("SELECT @p", conn)) + [Test] + public async Task NullableInts() + { + var connectionStringBuilder = new NpgsqlConnectionStringBuilder(ConnectionString) { - cmd.Parameters.Add(new NpgsqlParameter { ParameterName="p", DataTypeName = "integer[]", Value = DBNull.Value }); - await using (var reader = await cmd.ExecuteReaderAsync()) - { - reader.Read(); - Assert.That(reader.GetDataTypeName(0), Is.EqualTo("integer[]")); - } - } + ArrayNullabilityMode = ArrayNullabilityMode.Always + }; + var dataSourceBuilder = new NpgsqlDataSourceBuilder(connectionStringBuilder.ToString()); + await using var dataSource = dataSourceBuilder.Build(); - // Resolve type by OID (read) - await conn.ReloadTypesAsync(); - await using (var cmd = new NpgsqlCommand("SELECT '{1, 3}'::INTEGER[]", conn)) - await using (var reader = await cmd.ExecuteReaderAsync()) - { - reader.Read(); - Assert.That(reader.GetDataTypeName(0), Is.EqualTo("integer[]")); - Assert.That(reader.GetFieldValue(0), Is.EqualTo(new[] { 1, 3 })); - } + await AssertType(dataSource, new int?[] { 1, 2, null, 3 }, "{1,2,NULL,3}", "integer[]", NpgsqlDbType.Integer | NpgsqlDbType.Array); } + [Test, Description("Checks that PG arrays containing nulls can't be read as CLR arrays of non-nullable value types (the default).")] + public async Task Nullable_ints_cannot_be_read_as_non_nullable() + => await AssertTypeUnsupportedRead("{1,NULL,2}", "int[]"); + [Test] public async Task Throws_too_many_dimensions() { @@ -86,86 +66,6 @@ public async Task Throws_too_many_dimensions() Throws.Exception.TypeOf().With.Message.EqualTo("values (Parameter 'Postgres arrays can have at most 8 dimensions.')")); } - [Test] - public async Task Bind_int_then_array_of_int() - { - await using var dataSource = CreateDataSource(); - await using var conn = await dataSource.OpenConnectionAsync(); - - await using var cmd = new NpgsqlCommand("SELECT 1", conn); - _ = await cmd.ExecuteScalarAsync(); - - cmd.CommandText = "SELECT ARRAY[1,2]"; - Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo(new[] { 1, 2 })); - } - - [Test, Description("Roundtrips a simple, one-dimensional array of ints")] - public async Task Ints() - { - await using var conn = await OpenConnectionAsync(); - await using var cmd = new NpgsqlCommand("SELECT @p1, @p2, @p3", conn); - - var expected = new[] { 1, 5, 9 }; - var p1 = new NpgsqlParameter("p1", NpgsqlDbType.Array | NpgsqlDbType.Integer); - var p2 = new NpgsqlParameter { ParameterName = "p2", Value = expected }; - var p3 = new NpgsqlParameter("p3", expected); - cmd.Parameters.Add(p1); - cmd.Parameters.Add(p2); - cmd.Parameters.Add(p3); - p1.Value = expected; - var reader = await cmd.ExecuteReaderAsync(); - reader.Read(); - - for (var i = 0; i < cmd.Parameters.Count; i++) - { - Assert.That(reader.GetValue(i), Is.EqualTo(expected)); - Assert.That(reader.GetValue(i), Is.TypeOf()); - Assert.That(reader.GetProviderSpecificValue(i), Is.EqualTo(expected)); - Assert.That(reader.GetFieldValue(i), Is.EqualTo(expected)); - Assert.That(reader.GetFieldType(i), Is.EqualTo(typeof(Array))); - Assert.That(reader.GetProviderSpecificFieldType(i), Is.EqualTo(typeof(Array))); - } - } - - [Test, Description("Roundtrips a simple, one-dimensional array of int? values")] - public async Task Nullable_ints() - { - await using var conn = await OpenConnectionAsync(); - await using var cmd = new NpgsqlCommand("SELECT @p1, @p2, @p3", conn); - - var expected = new int?[] { 1, 5, null, 9 }; - var p1 = new NpgsqlParameter("p1", NpgsqlDbType.Array | NpgsqlDbType.Integer); - var p2 = new NpgsqlParameter { ParameterName = "p2", Value = expected }; - var p3 = new NpgsqlParameter("p3", expected); - cmd.Parameters.Add(p1); - cmd.Parameters.Add(p2); - cmd.Parameters.Add(p3); - p1.Value = expected; - var reader = await cmd.ExecuteReaderAsync(); - reader.Read(); - - for (var i = 0; i < cmd.Parameters.Count; i++) - { - Assert.That(reader.GetFieldValue(i), Is.EqualTo(expected)); - Assert.That(reader.GetFieldValue>(i), Is.EqualTo(expected.ToList())); - Assert.That(reader.GetFieldType(i), Is.EqualTo(typeof(Array))); - Assert.That(reader.GetProviderSpecificFieldType(i), Is.EqualTo(typeof(Array))); - } - } - - [Test, Description("Checks that PG arrays containing nulls can't be read as CLR arrays of non-nullable value types.")] - public async Task Nullable_ints_cannot_be_read_as_non_nullable() - { - await using var conn = await OpenConnectionAsync(); - await using var cmd = new NpgsqlCommand("SELECT '{1, NULL, 2}'::integer[]", conn); - await using var reader = await cmd.ExecuteReaderAsync(); - reader.Read(); - - Assert.That(() => reader.GetFieldValue(0), Throws.Exception.TypeOf()); - Assert.That(() => reader.GetFieldValue>(0), Throws.Exception.TypeOf()); - Assert.That(() => reader.GetValue(0), Throws.Exception.TypeOf()); - } - [Test, Description("Checks that PG arrays containing nulls are returned as set via ValueTypeArrayMode.")] [TestCase(ArrayNullabilityMode.Always)] [TestCase(ArrayNullabilityMode.Never)] @@ -244,34 +144,15 @@ public async Task Value_type_array_nullabilities(ArrayNullabilityMode mode) } } + // Note that PG normalizes empty multidimensional arrays to single-dimensional, e.g. ARRAY[[], []]::integer[] returns {}. [Test] - public async Task Empty_array() - { - await using var conn = await OpenConnectionAsync(); - await using var cmd = new NpgsqlCommand("SELECT @p", conn); - - cmd.Parameters.Add(new NpgsqlParameter("p", NpgsqlDbType.Array | NpgsqlDbType.Integer) { Value = Array.Empty() }); - var reader = await cmd.ExecuteReaderAsync(); - reader.Read(); - - Assert.That(reader.GetFieldValue(0), Is.SameAs(Array.Empty())); - Assert.That(reader.GetFieldValue(0), Is.SameAs(Array.Empty())); - } - - [Test, Description("Roundtrips an empty multi-dimensional array.")] - public async Task Empty_multidimensional_array() - { - await using var conn = await OpenConnectionAsync(); - await using var cmd = new NpgsqlCommand("SELECT @p", conn); + public async Task Write_empty_multidimensional_array() + => await AssertTypeWrite(new int[0, 0], "{}", "integer[]", NpgsqlDbType.Integer | NpgsqlDbType.Array); - var expected = new int[0, 0]; - cmd.Parameters.AddWithValue("p", NpgsqlDbType.Array | NpgsqlDbType.Integer, expected); - - var reader = await cmd.ExecuteReaderAsync(); - reader.Read(); - - Assert.That(reader.GetFieldValue(0), Is.EqualTo(expected)); - } + [Test] + public async Task Generic_List() + => await AssertType( + new List { 1, 2, 3 }, "{1,2,3}", "integer[]", NpgsqlDbType.Integer | NpgsqlDbType.Array, isDefaultForReading: false); [Test, Description("Verifies that an InvalidOperationException is thrown when the returned array has a different number of dimensions from what was requested.")] public async Task Wrong_array_dimensions_throws() @@ -359,6 +240,14 @@ public async Task Long_two_dimensional() Assert.That(reader[0], Is.EqualTo(expected)); } + [Test, Description("Reads an one-dimensional array with lower bound != 0")] + public Task Read_non_zero_lower_bounded() + => AssertTypeRead("[2:3]={ 8, 9 }", "integer[]", new[] { 8, 9 }); + + [Test, Description("Reads an one-dimensional array with lower bound != 0")] + public Task Read_non_zero_lower_bounded_multidimensional() + => AssertTypeRead("[2:3][2:3]={ {8,9}, {1,2} }", "integer[]", new[,] { { 8, 9 }, { 1, 2 }}); + [Test, Description("Roundtrips a long, one-dimensional array of strings, including a null")] public async Task Strings_with_null() { @@ -374,116 +263,8 @@ public async Task Strings_with_null() Assert.That(reader.GetFieldValue(0), Is.EqualTo(expected)); } - [Test, Description("Roundtrips a zero-dimensional array of ints, should return empty one-dimensional")] - public async Task Zero_dimensional() - { - await using var conn = await OpenConnectionAsync(); - await using var cmd = new NpgsqlCommand("SELECT @p", conn); - var expected = Array.Empty(); - var p = new NpgsqlParameter("p", NpgsqlDbType.Array | NpgsqlDbType.Integer) { Value = expected }; - cmd.Parameters.Add(p); - var reader = await cmd.ExecuteReaderAsync(); - reader.Read(); - Assert.That(reader.GetValue(0), Is.EqualTo(expected)); - Assert.That(reader.GetProviderSpecificValue(0), Is.EqualTo(expected)); - Assert.That(reader.GetFieldValue(0), Is.EqualTo(expected)); - cmd.Dispose(); - } - - [Test, Description("Roundtrips a two-dimensional array of ints")] - public async Task Two_dimensional_ints() - { - await using var conn = await OpenConnectionAsync(); - await using var cmd = new NpgsqlCommand("SELECT @p1, @p2", conn); - var expected = new[,] { { 1, 2, 3 }, { 7, 8, 9 } }; - var p1 = new NpgsqlParameter("p1", NpgsqlDbType.Array | NpgsqlDbType.Integer); - var p2 = new NpgsqlParameter { ParameterName = "p2", Value = expected }; - cmd.Parameters.Add(p1); - cmd.Parameters.Add(p2); - p1.Value = expected; - var reader = await cmd.ExecuteReaderAsync(); - reader.Read(); - Assert.That(reader.GetValue(0), Is.EqualTo(expected)); - Assert.That(reader.GetProviderSpecificValue(0), Is.EqualTo(expected)); - Assert.That(reader.GetFieldValue(0), Is.EqualTo(expected)); - } - - [Test, Description("Reads an one-dimensional array with lower bound != 0")] - public async Task Read_non_zero_lower_bounded() - { - await using var conn = await OpenConnectionAsync(); - await using (var cmd = new NpgsqlCommand("SELECT '[2:3]={ 8, 9 }'::INT[]", conn)) - await using (var reader = await cmd.ExecuteReaderAsync()) - { - reader.Read(); - Assert.That(reader.GetFieldValue(0), Is.EqualTo(new[] {8, 9})); - } - - await using (var cmd = new NpgsqlCommand("SELECT '[2:3][2:3]={ {8,9}, {1,2} }'::INT[][]", conn)) - await using (var reader = await cmd.ExecuteReaderAsync()) - { - reader.Read(); - Assert.That(reader.GetFieldValue(0), Is.EqualTo(new[,] {{8, 9}, {1, 2}})); - } - } - - [Test, Description("Roundtrips a one-dimensional array of bytea values")] - public async Task Array_of_byte_arrays() - { - await using var conn = await OpenConnectionAsync(); - await using var cmd = new NpgsqlCommand("SELECT @p1, @p2", conn); - var expected = new[] { new byte[] { 1, 2 }, new byte[] { 3, 4, } }; - var p1 = new NpgsqlParameter("p1", NpgsqlDbType.Array | NpgsqlDbType.Bytea); - var p2 = new NpgsqlParameter { ParameterName = "p2", Value = expected }; - cmd.Parameters.Add(p1); - cmd.Parameters.Add(p2); - p1.Value = expected; - await using var reader = await cmd.ExecuteReaderAsync(); - reader.Read(); - Assert.That(reader.GetValue(0), Is.EqualTo(expected)); - Assert.That(reader.GetFieldValue(0), Is.EqualTo(expected)); - Assert.That(reader.GetFieldType(0), Is.EqualTo(typeof(Array))); - Assert.That(reader.GetProviderSpecificFieldType(0), Is.EqualTo(typeof(Array))); - } - - [Test, Description("Roundtrips a generic List as an array")] - // ReSharper disable once InconsistentNaming - public async Task IList_generic() - { - await using var conn = await OpenConnectionAsync(); - await using var cmd = new NpgsqlCommand("SELECT @p1, @p2", conn); - var expected = new[] { 1, 2, 3 }.ToList(); - var p1 = new NpgsqlParameter { ParameterName = "p1", Value = expected }; - var p2 = new NpgsqlParameter { ParameterName = "p2", Value = expected }; - cmd.Parameters.Add(p1); - cmd.Parameters.Add(p2); - await using var reader = await cmd.ExecuteReaderAsync(); - reader.Read(); - Assert.That(reader.GetValue(0), Is.EqualTo(expected)); - Assert.That(reader.GetFieldValue>(1), Is.EqualTo(expected)); - } - - [Test, Description("Tests for failure when reading a generic IList from a multidimensional array")] - // ReSharper disable once InconsistentNaming - public async Task IList_generic_fails_for_multidimensional_array() - { - await using var conn = await OpenConnectionAsync(); - await using var cmd = new NpgsqlCommand("SELECT @p1", conn); - var expected = new[,] { { 1, 2 }, { 3, 4 } }; - var p1 = new NpgsqlParameter { ParameterName = "p1", Value = expected }; - cmd.Parameters.Add(p1); - await using var reader = await cmd.ExecuteReaderAsync(); - reader.Read(); - Assert.That(reader.GetValue(0), Is.EqualTo(expected)); - var exception = Assert.Throws(() => - { - reader.GetFieldValue>(0); - })!; - Assert.That(exception.Message, Does.StartWith("Cannot read an array value with 2 dimensions")); - } - [Test, IssueLink("https://github.com/npgsql/npgsql/issues/844")] - public async Task IEnumerable_throws_friendly_exception() + public async Task Writing_IEnumerable_is_not_supported() { await using var conn = await OpenConnectionAsync(); await using var cmd = new NpgsqlCommand("SELECT @p1", conn); @@ -494,28 +275,14 @@ public async Task IEnumerable_throws_friendly_exception() [Test, IssueLink("https://github.com/npgsql/npgsql/issues/960")] public async Task Jagged_arrays_not_supported() { - var jagged = new int[2][]; - jagged[0] = new[] { 8 }; - jagged[1] = new[] { 8, 10 }; await using var conn = await OpenConnectionAsync(); await using var cmd = new NpgsqlCommand("SELECT @p1", conn); - cmd.Parameters.AddWithValue("p1", NpgsqlDbType.Array | NpgsqlDbType.Integer, jagged); + cmd.Parameters.AddWithValue("p1", NpgsqlDbType.Array | NpgsqlDbType.Integer, new[] { new[] { 8 }, new[] { 8, 10 } }); Assert.That(async () => await cmd.ExecuteNonQueryAsync(), Throws.Exception .TypeOf() .With.Property("InnerException").Message.Contains("jagged")); } - [Test, IssueLink("https://github.com/npgsql/npgsql/issues/1546")] - public void Generic_List_get_NpgsqlDbType() - { - var p = new NpgsqlParameter - { - ParameterName = "p1", - Value = new List { 1, 2, 3 } - }; - Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Array | NpgsqlDbType.Integer)); - } - [Test, Description("Roundtrips one-dimensional and two-dimensional arrays of a PostgreSQL domain.")] public async Task Array_of_domain() { @@ -595,6 +362,26 @@ public async Task Read_two_empty_arrays() Assert.AreNotSame(reader.GetFieldValue>(0), reader.GetFieldValue>(1)); } + [Test] + public async Task Arrays_not_supported_by_default_on_NpgsqlSlimSourceBuilder() + { + var dataSourceBuilder = new NpgsqlSlimDataSourceBuilder(ConnectionString); + await using var dataSource = dataSourceBuilder.Build(); + + await AssertTypeUnsupportedRead("{1,2,3}", "integer[]", dataSource); + await AssertTypeUnsupportedWrite(new[] { 1, 2, 3 }, "integer[]", dataSource); + } + + [Test] + public async Task NpgsqlSlimSourceBuilder_EnableArrays() + { + var dataSourceBuilder = new NpgsqlSlimDataSourceBuilder(ConnectionString); + dataSourceBuilder.EnableArrays(); + await using var dataSource = dataSourceBuilder.Build(); + + await AssertType(dataSource, new[] { 1, 2, 3 }, "{1,2,3}", "integer[]", NpgsqlDbType.Integer | NpgsqlDbType.Array); + } + class IntList : List { } // ReSharper disable UnusedTypeParameter class MisleadingIntList : List { } diff --git a/test/Npgsql.Tests/Types/FullTextSearchTests.cs b/test/Npgsql.Tests/Types/FullTextSearchTests.cs index 5ffe8b2880..359c1b18d6 100644 --- a/test/Npgsql.Tests/Types/FullTextSearchTests.cs +++ b/test/Npgsql.Tests/Types/FullTextSearchTests.cs @@ -61,31 +61,41 @@ public Task TsQuery(string sqlLiteral, NpgsqlTsQuery query) => AssertType(query, sqlLiteral, "tsquery", NpgsqlDbType.TsQuery); [Test] - public async Task Full_text_search_supported_only_with_EnableFullTextSearch([Values] bool enableFullTextSearch) + public async Task Full_text_search_not_supported_by_default_on_NpgsqlSlimSourceBuilder() { - var errorMessage = string.Format(NpgsqlStrings.FullTextSearchNotEnabled, "EnableFullTextSearch", "NpgsqlSlimDataSourceBuilder"); + var errorMessage = string.Format( + NpgsqlStrings.FullTextSearchNotEnabled, + nameof(NpgsqlSlimDataSourceBuilder.EnableFullTextSearch), + nameof(NpgsqlSlimDataSourceBuilder)); var dataSourceBuilder = new NpgsqlSlimDataSourceBuilder(ConnectionString); - if (enableFullTextSearch) - dataSourceBuilder.EnableFullTextSearch(); await using var dataSource = dataSourceBuilder.Build(); - if (enableFullTextSearch) - { - await AssertType(new NpgsqlTsQueryLexeme("a"), "'a'", "tsquery", NpgsqlDbType.TsQuery); - await AssertType(NpgsqlTsVector.Parse("'1'"), "'1'", "tsvector", NpgsqlDbType.TsVector); - } - else - { - var exception = await AssertTypeUnsupportedRead("a", "tsquery", dataSource); - Assert.AreEqual(errorMessage, exception.InnerException!.Message); - exception = await AssertTypeUnsupportedWrite(new NpgsqlTsQueryLexeme("a"), pgTypeName: null, dataSource); - Assert.AreEqual(errorMessage, exception.InnerException!.Message); + var exception = await AssertTypeUnsupportedRead("a", "tsquery", dataSource); + Assert.IsInstanceOf(exception.InnerException); + Assert.AreEqual(errorMessage, exception.InnerException!.Message); - exception = await AssertTypeUnsupportedRead("1", "tsvector", dataSource); - Assert.AreEqual(errorMessage, exception.InnerException!.Message); - exception = await AssertTypeUnsupportedWrite(NpgsqlTsVector.Parse("'1'"), pgTypeName: null, dataSource); - Assert.AreEqual(errorMessage, exception.InnerException!.Message); - } + exception = await AssertTypeUnsupportedWrite(new NpgsqlTsQueryLexeme("a"), pgTypeName: null, dataSource); + Assert.IsInstanceOf(exception.InnerException); + Assert.AreEqual(errorMessage, exception.InnerException!.Message); + + exception = await AssertTypeUnsupportedRead("1", "tsvector", dataSource); + Assert.IsInstanceOf(exception.InnerException); + Assert.AreEqual(errorMessage, exception.InnerException!.Message); + + exception = await AssertTypeUnsupportedWrite(NpgsqlTsVector.Parse("'1'"), pgTypeName: null, dataSource); + Assert.IsInstanceOf(exception.InnerException); + Assert.AreEqual(errorMessage, exception.InnerException!.Message); + } + + [Test] + public async Task NpgsqlSlimSourceBuilder_EnableFullTextSearch() + { + var dataSourceBuilder = new NpgsqlSlimDataSourceBuilder(ConnectionString); + dataSourceBuilder.EnableFullTextSearch(); + await using var dataSource = dataSourceBuilder.Build(); + + await AssertType(new NpgsqlTsQueryLexeme("a"), "'a'", "tsquery", NpgsqlDbType.TsQuery); + await AssertType(NpgsqlTsVector.Parse("'1'"), "'1'", "tsvector", NpgsqlDbType.TsVector); } } diff --git a/test/Npgsql.Tests/Types/RangeTests.cs b/test/Npgsql.Tests/Types/RangeTests.cs index db57f0d78d..b6b54ef2bf 100644 --- a/test/Npgsql.Tests/Types/RangeTests.cs +++ b/test/Npgsql.Tests/Types/RangeTests.cs @@ -234,8 +234,7 @@ public async Task Ranges_not_supported_by_default_on_NpgsqlSlimSourceBuilder() var exception = await AssertTypeUnsupportedRead>("[1,10)", "int4range", dataSource); Assert.That(exception.InnerException!.Message, Is.EqualTo(errorMessage)); - exception = await AssertTypeUnsupportedWrite>( - new NpgsqlRange(1, true, 10, false), "int4range", dataSource); + exception = await AssertTypeUnsupportedWrite(new NpgsqlRange(1, true, 10, false), "int4range", dataSource); Assert.That(exception.InnerException!.Message, Is.EqualTo(errorMessage)); } diff --git a/test/Npgsql.Tests/Types/RecordTests.cs b/test/Npgsql.Tests/Types/RecordTests.cs index 54a56baa4a..f40f5f0965 100644 --- a/test/Npgsql.Tests/Types/RecordTests.cs +++ b/test/Npgsql.Tests/Types/RecordTests.cs @@ -1,4 +1,5 @@ using System; +using System.Data; using System.Threading.Tasks; using Npgsql.Properties; using NUnit.Framework; @@ -80,18 +81,37 @@ public async Task Record_with_non_int_field() } [Test] - public async Task Records_supported_only_with_EnableRecords([Values] bool withMappings) + public async Task Records_not_supported_by_default_on_NpgsqlSlimSourceBuilder() { - Func assertExpr = () => withMappings - ? Throws.Nothing - : Throws.Exception - .TypeOf() - .With.Property("InnerException").Property("Message") - .EqualTo(string.Format(NpgsqlStrings.RecordsNotEnabled, "EnableRecords", "NpgsqlSlimDataSourceBuilder")); + var dataSourceBuilder = new NpgsqlSlimDataSourceBuilder(ConnectionString); + await using var dataSource = dataSourceBuilder.Build(); + await using var conn = await dataSource.OpenConnectionAsync(); + await using var cmd = conn.CreateCommand(); + // RecordHandler doesn't support writing, so we only check for reading + cmd.CommandText = "SELECT ('one'::text, 2)"; + await using var reader = await cmd.ExecuteReaderAsync(); + await reader.ReadAsync(); + + var errorMessage = string.Format( + NpgsqlStrings.RecordsNotEnabled, + nameof(NpgsqlSlimDataSourceBuilder.EnableRecords), + nameof(NpgsqlSlimDataSourceBuilder)); + + var exception = Assert.Throws(() => reader.GetValue(0))!; + Assert.IsInstanceOf(exception.InnerException); + Assert.AreEqual(errorMessage, exception.InnerException!.Message); + + exception = Assert.Throws(() => reader.GetFieldValue(0))!; + Assert.IsInstanceOf(exception.InnerException); + Assert.AreEqual(errorMessage, exception.InnerException!.Message); + } + + [Test] + public async Task NpgsqlSlimSourceBuilder_EnableRecords() + { var dataSourceBuilder = new NpgsqlSlimDataSourceBuilder(ConnectionString); - if (withMappings) - dataSourceBuilder.EnableRecords(); + dataSourceBuilder.EnableRecords(); await using var dataSource = dataSourceBuilder.Build(); await using var conn = await dataSource.OpenConnectionAsync(); await using var cmd = conn.CreateCommand(); @@ -101,8 +121,8 @@ public async Task Records_supported_only_with_EnableRecords([Values] bool withMa await using var reader = await cmd.ExecuteReaderAsync(); await reader.ReadAsync(); - Assert.That(() => reader.GetValue(0), assertExpr()); - Assert.That(() => reader.GetFieldValue(0), assertExpr()); + Assert.That(() => reader.GetValue(0), Throws.Nothing); + Assert.That(() => reader.GetFieldValue(0), Throws.Nothing); } public RecordTests(MultiplexingMode multiplexingMode) : base(multiplexingMode) {} From ca8f131469b2ce3e6e5889902ec1a413941db208 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Wed, 27 Sep 2023 15:28:26 +0200 Subject: [PATCH 220/761] Add missed Task.FromCanceled calls (#5287) --- src/Npgsql/NpgsqlBinaryImporter.cs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/Npgsql/NpgsqlBinaryImporter.cs b/src/Npgsql/NpgsqlBinaryImporter.cs index ef0c4a051d..b7335990d5 100644 --- a/src/Npgsql/NpgsqlBinaryImporter.cs +++ b/src/Npgsql/NpgsqlBinaryImporter.cs @@ -222,7 +222,8 @@ public Task WriteAsync(T value, NpgsqlDbType npgsqlDbType, CancellationToken Task Write(bool async, T value, NpgsqlDbType npgsqlDbType, CancellationToken cancellationToken = default) { CheckColumnIndex(); - cancellationToken.ThrowIfCancellationRequested(); + if (cancellationToken.IsCancellationRequested) + return Task.FromCanceled(cancellationToken); var p = _params[_column]; if (p == null) @@ -270,7 +271,8 @@ public Task WriteAsync(T value, string dataTypeName, CancellationToken cancel Task Write(bool async, T value, string dataTypeName, CancellationToken cancellationToken = default) { CheckColumnIndex(); - cancellationToken.ThrowIfCancellationRequested(); + if (cancellationToken.IsCancellationRequested) + return Task.FromCanceled(cancellationToken); var p = _params[_column]; if (p == null) From 2087483118745627b35490a1cb6c6ca7eb0afce5 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Wed, 27 Sep 2023 18:13:42 +0200 Subject: [PATCH 221/761] Don't mix plural and singular (#5292) This seems to throw off some tooling, e.g. the trimming/AOT analyzers --- .../Npgsql.DependencyInjection.csproj | 2 +- src/Npgsql.Json.NET/Npgsql.Json.NET.csproj | 2 +- src/Npgsql.NodaTime/Npgsql.NodaTime.csproj | 2 +- src/Npgsql/Npgsql.csproj | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/Npgsql.DependencyInjection/Npgsql.DependencyInjection.csproj b/src/Npgsql.DependencyInjection/Npgsql.DependencyInjection.csproj index 97dba6845c..8910b7fad9 100644 --- a/src/Npgsql.DependencyInjection/Npgsql.DependencyInjection.csproj +++ b/src/Npgsql.DependencyInjection/Npgsql.DependencyInjection.csproj @@ -4,7 +4,7 @@ Shay Rojansky netstandard2.0;net7.0 - net8.0 + net8.0 npgsql;postgresql;postgres;ado;ado.net;database;sql;di;dependency injection README.md diff --git a/src/Npgsql.Json.NET/Npgsql.Json.NET.csproj b/src/Npgsql.Json.NET/Npgsql.Json.NET.csproj index baff7e6af6..0c740dca34 100644 --- a/src/Npgsql.Json.NET/Npgsql.Json.NET.csproj +++ b/src/Npgsql.Json.NET/Npgsql.Json.NET.csproj @@ -4,7 +4,7 @@ Json.NET plugin for Npgsql, allowing transparent serialization/deserialization of JSON objects directly to and from the database. npgsql;postgresql;json;postgres;ado;ado.net;database;sql netstandard2.0;net6.0 - net8.0 + net8.0 enable diff --git a/src/Npgsql.NodaTime/Npgsql.NodaTime.csproj b/src/Npgsql.NodaTime/Npgsql.NodaTime.csproj index 1fc55fae1e..a6b65d79c5 100644 --- a/src/Npgsql.NodaTime/Npgsql.NodaTime.csproj +++ b/src/Npgsql.NodaTime/Npgsql.NodaTime.csproj @@ -5,7 +5,7 @@ npgsql;postgresql;postgres;nodatime;date;time;ado;ado;net;database;sql README.md netstandard2.0;net6.0 - net8.0 + net8.0 diff --git a/src/Npgsql/Npgsql.csproj b/src/Npgsql/Npgsql.csproj index 68e5db7395..52806c98dc 100644 --- a/src/Npgsql/Npgsql.csproj +++ b/src/Npgsql/Npgsql.csproj @@ -6,7 +6,7 @@ npgsql;postgresql;postgres;ado;ado.net;database;sql README.md netstandard2.0;netstandard2.1;net6.0;net7.0;net8.0 - net8.0 + net8.0 $(NoWarn);CA2017 From aa32e4f479a37a71a965edb52cc4fd837895c1fb Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Wed, 27 Sep 2023 19:39:21 +0300 Subject: [PATCH 222/761] Fix setting IsReadOnly with old readers's schema (#5286) Fixes #5284 --- src/Npgsql/NpgsqlDataReader.cs | 1 + test/Npgsql.Tests/ReaderOldSchemaTests.cs | 53 ++++++++--------------- 2 files changed, 18 insertions(+), 36 deletions(-) diff --git a/src/Npgsql/NpgsqlDataReader.cs b/src/Npgsql/NpgsqlDataReader.cs index d53dd3023f..6f3ac2ff71 100644 --- a/src/Npgsql/NpgsqlDataReader.cs +++ b/src/Npgsql/NpgsqlDataReader.cs @@ -1890,6 +1890,7 @@ Task> GetColumnSchema(bool async, Cancellatio row["IsRowVersion"] = false; row["IsHidden"] = column.IsHidden == true; row["IsLong"] = column.IsLong == true; + row["IsReadOnly"] = column.IsReadOnly == true; row["DataTypeName"] = column.DataTypeName; table.Rows.Add(row); diff --git a/test/Npgsql.Tests/ReaderOldSchemaTests.cs b/test/Npgsql.Tests/ReaderOldSchemaTests.cs index 92e3cf2e6d..edbeb15842 100644 --- a/test/Npgsql.Tests/ReaderOldSchemaTests.cs +++ b/test/Npgsql.Tests/ReaderOldSchemaTests.cs @@ -118,32 +118,18 @@ await conn.ExecuteNonQueryAsync($@" CREATE TABLE {table} (id SERIAL PRIMARY KEY, int2 SMALLINT); CREATE OR REPLACE VIEW {view} (id, int2) AS SELECT id, int2 + int2 AS int2 FROM {table}"); - var command = new NpgsqlCommand($"SELECT * FROM {view}", conn); + var command = new NpgsqlCommand($"SELECT id, int2 FROM {view}", conn); - using var dr = command.ExecuteReader(); + using var dr = command.ExecuteReader(CommandBehavior.SchemaOnly | CommandBehavior.KeyInfo); var metadata = await GetSchemaTable(dr); - foreach (var r in metadata!.Rows.OfType()) - { - switch ((string)r["ColumnName"]) - { - case "field_pk": - if (conn.PostgreSqlVersion < new Version("9.4")) - { - // 9.3 and earlier: IsUpdatable = False - Assert.IsTrue((bool)r["IsReadonly"], "field_pk"); - } - else - { - // 9.4: IsUpdatable = True - Assert.IsFalse((bool)r["IsReadonly"], "field_pk"); - } - break; - case "field_int2": - Assert.IsTrue((bool)r["IsReadonly"]); - break; - } - } + var idRow = metadata!.Rows.OfType().FirstOrDefault(x => (string)x["ColumnName"] == "id"); + Assert.IsNotNull(idRow, "Unable to find metadata for id column"); + var int2Row = metadata.Rows.OfType().FirstOrDefault(x => (string)x["ColumnName"] == "int2"); + Assert.IsNotNull(int2Row, "Unable to find metadata for int2 column"); + + Assert.IsFalse((bool)idRow!["IsReadonly"]); + Assert.IsTrue((bool)int2Row!["IsReadonly"]); } // ReSharper disable once InconsistentNaming @@ -156,19 +142,14 @@ public async Task AllowDBNull() using var cmd = new NpgsqlCommand($"SELECT * FROM {table}", conn); using var reader = await cmd.ExecuteReaderAsync(CommandBehavior.SchemaOnly | CommandBehavior.KeyInfo); using var metadata = await GetSchemaTable(reader); - foreach (var row in metadata!.Rows.OfType()) - { - var isNullable = (bool)row["AllowDBNull"]; - switch ((string)row["ColumnName"]) - { - case "nullable": - Assert.IsTrue(isNullable); - continue; - case "non_nullable": - Assert.IsFalse(isNullable); - continue; - } - } + + var nullableRow = metadata!.Rows.OfType().FirstOrDefault(x => (string)x["ColumnName"] == "nullable"); + Assert.IsNotNull(nullableRow, "Unable to find metadata for nullable column"); + var nonNullableRow = metadata.Rows.OfType().FirstOrDefault(x => (string)x["ColumnName"] == "non_nullable"); + Assert.IsNotNull(nonNullableRow, "Unable to find metadata for non_nullable column"); + + Assert.IsTrue((bool)nullableRow!["AllowDBNull"]); + Assert.IsFalse((bool)nonNullableRow!["AllowDBNull"]); } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/1027")] From ee24545fa6480cd27a5ef9819e64683549888132 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Wed, 27 Sep 2023 20:31:22 +0200 Subject: [PATCH 223/761] Trimming and Aot annotations (#5271) --- src/Directory.Build.props | 2 ++ .../NpgsqlServiceCollectionExtensions.cs | 15 +++++++- .../NpgsqlJsonNetExtensions.cs | 3 ++ .../Composites/Metadata/CompositeInfo.cs | 23 +++++------- .../ReflectionCompositeInfoFactory.cs | 20 +++++++---- .../Internal/Converters/CastingConverter.cs | 2 ++ .../Converters/SystemTextJsonConverter.cs | 3 +- .../Internal/DynamicTypeInfoResolver.cs | 2 +- .../Resolvers/NetworkTypeInfoResolver.cs | 25 +++++++------ .../SystemTextJsonTypeInfoResolvers.cs | 10 +++++- src/Npgsql/NpgsqlConnection.cs | 35 +++++++++++++++++-- src/Npgsql/NpgsqlDataAdapter.cs | 5 +++ src/Npgsql/NpgsqlDataReader.cs | 7 ++-- src/Npgsql/NpgsqlDataSource.cs | 4 +++ src/Npgsql/NpgsqlDataSourceBuilder.cs | 24 +++++++------ src/Npgsql/NpgsqlFactory.cs | 3 +- src/Npgsql/NpgsqlMultiHostDataSource.cs | 7 +++- src/Npgsql/NpgsqlNestedDataReader.cs | 4 ++- src/Npgsql/NpgsqlParameter.cs | 1 - src/Npgsql/NpgsqlSchema.cs | 13 ++++++- src/Npgsql/NpgsqlSlimDataSourceBuilder.cs | 26 ++++++++------ src/Npgsql/NpgsqlTypes/NpgsqlRange.cs | 7 ++-- src/Npgsql/TypeMapping/GlobalTypeMapper.cs | 22 ++++++------ src/Npgsql/TypeMapping/INpgsqlTypeMapper.cs | 20 +++++------ src/Npgsql/TypeMapping/UserTypeMapper.cs | 29 +++++++++------ src/Npgsql/VolatileResourceManager.cs | 3 ++ 26 files changed, 215 insertions(+), 100 deletions(-) diff --git a/src/Directory.Build.props b/src/Directory.Build.props index f7d4b965b7..169a5988a2 100644 --- a/src/Directory.Build.props +++ b/src/Directory.Build.props @@ -3,6 +3,8 @@ true + + true diff --git a/src/Npgsql.DependencyInjection/NpgsqlServiceCollectionExtensions.cs b/src/Npgsql.DependencyInjection/NpgsqlServiceCollectionExtensions.cs index dd73e7c14e..2e5a5eca56 100644 --- a/src/Npgsql.DependencyInjection/NpgsqlServiceCollectionExtensions.cs +++ b/src/Npgsql.DependencyInjection/NpgsqlServiceCollectionExtensions.cs @@ -1,5 +1,6 @@ using System; using System.Data.Common; +using System.Diagnostics.CodeAnalysis; using Microsoft.Extensions.DependencyInjection.Extensions; using Microsoft.Extensions.Logging; using Npgsql; @@ -29,6 +30,8 @@ public static class NpgsqlServiceCollectionExtensions /// Defaults to . /// /// The same service collection so that multiple calls can be chained. + [RequiresUnreferencedCode("NpgsqlDataSource uses reflection to handle various PostgreSQL types like records, unmapped enums etc. Use NpgsqlSlimDataSourceBuilder to start with a reduced - reflection free - set and opt into what your app specifically requires.")] + [RequiresDynamicCode("NpgsqlDataSource uses reflection to handle various PostgreSQL types like records, unmapped enums. This can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] public static IServiceCollection AddNpgsqlDataSource( this IServiceCollection serviceCollection, string connectionString, @@ -51,6 +54,8 @@ public static IServiceCollection AddNpgsqlDataSource( /// Defaults to . /// /// The same service collection so that multiple calls can be chained. + [RequiresUnreferencedCode("NpgsqlDataSource uses reflection to handle various PostgreSQL types like records, unmapped enums etc. Use NpgsqlSlimDataSourceBuilder to start with a reduced - reflection free - set and opt into what your app specifically requires.")] + [RequiresDynamicCode("NpgsqlDataSource uses reflection to handle various PostgreSQL types like records, unmapped enums. This can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] public static IServiceCollection AddNpgsqlDataSource( this IServiceCollection serviceCollection, string connectionString, @@ -76,6 +81,8 @@ public static IServiceCollection AddNpgsqlDataSource( /// Defaults to . /// /// The same service collection so that multiple calls can be chained. + [RequiresUnreferencedCode("NpgsqlDataSource uses reflection to handle various PostgreSQL types like records, unmapped enums etc. Use NpgsqlSlimDataSourceBuilder to start with a reduced - reflection free - set and opt into what your app specifically requires.")] + [RequiresDynamicCode("NpgsqlDataSource uses reflection to handle various PostgreSQL types like records, unmapped enums. This can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] public static IServiceCollection AddMultiHostNpgsqlDataSource( this IServiceCollection serviceCollection, string connectionString, @@ -100,6 +107,8 @@ public static IServiceCollection AddMultiHostNpgsqlDataSource( /// Defaults to . /// /// The same service collection so that multiple calls can be chained. + [RequiresUnreferencedCode("NpgsqlDataSource uses reflection to handle various PostgreSQL types like records, unmapped enums etc. Use NpgsqlSlimDataSourceBuilder to start with a reduced - reflection free - set and opt into what your app specifically requires.")] + [RequiresDynamicCode("NpgsqlDataSource uses reflection to handle various PostgreSQL types like records, unmapped enums. This can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] public static IServiceCollection AddMultiHostNpgsqlDataSource( this IServiceCollection serviceCollection, string connectionString, @@ -108,6 +117,8 @@ public static IServiceCollection AddMultiHostNpgsqlDataSource( => AddNpgsqlMultiHostDataSourceCore( serviceCollection, connectionString, dataSourceBuilderAction: null, connectionLifetime, dataSourceLifetime); + [RequiresUnreferencedCode("NpgsqlDataSource uses reflection to handle various PostgreSQL types like records, unmapped enums etc. Use NpgsqlSlimDataSourceBuilder to start with a reduced - reflection free - set and opt into what your app specifically requires.")] + [RequiresDynamicCode("NpgsqlDataSource uses reflection to handle various PostgreSQL types like records, unmapped enums. This can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] static IServiceCollection AddNpgsqlDataSourceCore( this IServiceCollection serviceCollection, string connectionString, @@ -132,6 +143,8 @@ static IServiceCollection AddNpgsqlDataSourceCore( return serviceCollection; } + [RequiresUnreferencedCode("NpgsqlDataSource uses reflection to handle various PostgreSQL types like records, unmapped enums etc. Use NpgsqlSlimDataSourceBuilder to start with a reduced - reflection free - set and opt into what your app specifically requires.")] + [RequiresDynamicCode("NpgsqlDataSource uses reflection to handle various PostgreSQL types like records, unmapped enums. This can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] static IServiceCollection AddNpgsqlMultiHostDataSourceCore( this IServiceCollection serviceCollection, string connectionString, @@ -185,4 +198,4 @@ static void AddCommonServices( sp => sp.GetRequiredService(), connectionLifetime)); } -} \ No newline at end of file +} diff --git a/src/Npgsql.Json.NET/NpgsqlJsonNetExtensions.cs b/src/Npgsql.Json.NET/NpgsqlJsonNetExtensions.cs index 9cb70d86f1..572458f882 100644 --- a/src/Npgsql.Json.NET/NpgsqlJsonNetExtensions.cs +++ b/src/Npgsql.Json.NET/NpgsqlJsonNetExtensions.cs @@ -1,4 +1,5 @@ using System; +using System.Diagnostics.CodeAnalysis; using Npgsql.TypeMapping; using NpgsqlTypes; using Newtonsoft.Json; @@ -23,6 +24,8 @@ public static class NpgsqlJsonNetExtensions /// /// A list of CLR types to map to PostgreSQL json (no need to specify ). /// + [RequiresUnreferencedCode("Json serializer may perform reflection on trimmed types.")] + [RequiresDynamicCode("Serializing arbitary types to json can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] public static INpgsqlTypeMapper UseJsonNet( this INpgsqlTypeMapper mapper, JsonSerializerSettings? settings = null, diff --git a/src/Npgsql/Internal/Composites/Metadata/CompositeInfo.cs b/src/Npgsql/Internal/Composites/Metadata/CompositeInfo.cs index 95a2c316a1..1db91b2052 100644 --- a/src/Npgsql/Internal/Composites/Metadata/CompositeInfo.cs +++ b/src/Npgsql/Internal/Composites/Metadata/CompositeInfo.cs @@ -1,6 +1,5 @@ using System; using System.Collections.Generic; -using System.Diagnostics; using Npgsql.Util; namespace Npgsql.Internal.Composites; @@ -10,7 +9,7 @@ sealed class CompositeInfo readonly int _lastConstructorFieldIndex; readonly CompositeFieldInfo[] _fields; - public CompositeInfo(CompositeFieldInfo[] fields, int? constructorParameters, Func? constructor) + public CompositeInfo(CompositeFieldInfo[] fields, int constructorParameters, Func constructor) { _lastConstructorFieldIndex = -1; for (var i = fields.Length - 1; i >= 0; i--) @@ -21,7 +20,7 @@ public CompositeInfo(CompositeFieldInfo[] fields, int? constructorParameters, Fu } var parameterSum = 0; - for(var i = constructorParameters - 1 ?? 0; i > 0; i--) + for (var i = constructorParameters - 1; i > 0; i--) parameterSum += i; var argumentsSum = 0; @@ -36,20 +35,14 @@ public CompositeInfo(CompositeFieldInfo[] fields, int? constructorParameters, Fu throw new InvalidOperationException($"Missing composite fields to map to the required {constructorParameters} constructor parameters."); _fields = fields; - if (constructor is null) - Constructor = _ => Activator.CreateInstance(); - else + var arguments = constructorParameters is 0 ? Array.Empty() : new CompositeFieldInfo[constructorParameters]; + foreach (var field in fields) { - var arguments = new CompositeFieldInfo[constructorParameters.GetValueOrDefault()]; - foreach (var field in fields) - { - if (field.ConstructorParameterIndex is { } index) - arguments[index] = field; - } - Constructor = constructor; + if (field.ConstructorParameterIndex is { } index) + arguments[index] = field; } - - ConstructorParameters = constructorParameters ?? 0; + Constructor = constructor; + ConstructorParameters = constructorParameters; } public IReadOnlyList Fields => _fields; diff --git a/src/Npgsql/Internal/Composites/ReflectionCompositeInfoFactory.cs b/src/Npgsql/Internal/Composites/ReflectionCompositeInfoFactory.cs index 1fe217f5dc..cf757e2f42 100644 --- a/src/Npgsql/Internal/Composites/ReflectionCompositeInfoFactory.cs +++ b/src/Npgsql/Internal/Composites/ReflectionCompositeInfoFactory.cs @@ -1,6 +1,7 @@ using System; using System.Collections.Generic; using System.Diagnostics; +using System.Diagnostics.CodeAnalysis; using System.Linq; using System.Linq.Expressions; using System.Reflection; @@ -10,9 +11,11 @@ namespace Npgsql.Internal.Composites; +[RequiresDynamicCode("Serializing arbitary types can require creating new generic types or methods. This may not work when AOT compiling.")] static class ReflectionCompositeInfoFactory { - public static CompositeInfo CreateCompositeInfo(PostgresCompositeType pgType, INpgsqlNameTranslator nameTranslator, PgSerializerOptions options) + public static CompositeInfo CreateCompositeInfo<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicFields | DynamicallyAccessedMemberTypes.PublicProperties)] T>( + PostgresCompositeType pgType, INpgsqlNameTranslator nameTranslator, PgSerializerOptions options) { var pgFields = pgType.Fields; var propertyMap = MapProperties(pgFields, nameTranslator); @@ -86,8 +89,8 @@ public static CompositeInfo CreateCompositeInfo(PostgresCompositeType pgTy Debug.Assert(compositeFields.All(x => x is not null)); - var constructor = constructorInfo is null ? null : CreateStrongBoxConstructor(constructorInfo); - return new CompositeInfo(compositeFields!, constructorInfo is null ? null : constructorParameters.Length, constructor); + var constructor = constructorInfo is null ? _ => Activator.CreateInstance() : CreateStrongBoxConstructor(constructorInfo); + return new CompositeInfo(compositeFields!, constructorInfo is null ? 0 : constructorParameters.Length, constructor); // We have to map the pg type back to the composite field type, as we've resolved based on the representational pg type. PgConverterResolution MapResolution(PostgresCompositeType.Field field, PgConverterResolution resolution) @@ -151,6 +154,11 @@ static Delegate CreateSetter(PropertyInfo info) static Expression UnboxAny(Expression expression, Type type) => type.IsValueType ? Expression.Unbox(expression, type) : Expression.Convert(expression, type, null); +#if !NETSTANDARD + [DynamicDependency("TypedValue", typeof(StrongBox<>))] + [DynamicDependency("Length", typeof(StrongBox[]))] +#endif + [UnconditionalSuppressMessage("Trimming", "IL2026", Justification = "DynamicDependencies in place for the System.Linq.Expression.Property calls")] static Func CreateStrongBoxConstructor(ConstructorInfo constructorInfo) { var values = Expression.Parameter(typeof(StrongBox[]), "values"); @@ -187,7 +195,7 @@ static CompositeFieldInfo CreateCompositeFieldInfo(string name, Type type, PgCon => (CompositeFieldInfo)Activator.CreateInstance( typeof(CompositeFieldInfo<>).MakeGenericType(type), name, converterResolution, getter, setter)!; - static Dictionary MapProperties(IReadOnlyList fields, INpgsqlNameTranslator nameTranslator) + static Dictionary MapProperties<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicProperties)] T>(IReadOnlyList fields, INpgsqlNameTranslator nameTranslator) { var properties = typeof(T).GetProperties(BindingFlags.Public | BindingFlags.Instance); var propertiesAndNames = properties.Select(x => @@ -215,7 +223,7 @@ static Dictionary MapProperties(IReadOnlyList MapFields(IReadOnlyList fields, INpgsqlNameTranslator nameTranslator) + static Dictionary MapFields<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields)] T>(IReadOnlyList fields, INpgsqlNameTranslator nameTranslator) { var clrFields = typeof(T).GetFields(BindingFlags.Public | BindingFlags.Instance); var clrFieldsAndNames = clrFields.Select(x => @@ -243,7 +251,7 @@ static Dictionary MapFields(IReadOnlyList(IReadOnlyList fields, INpgsqlNameTranslator nameTranslator) + static (ConstructorInfo? ConstructorInfo, int[] ParameterFieldMap) MapBestMatchingConstructor<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors)] T>(IReadOnlyList fields, INpgsqlNameTranslator nameTranslator) { ConstructorInfo? clrDefaultConstructor = null; foreach (var constructor in typeof(T).GetConstructors().OrderByDescending(x => x.GetParameters().Length)) diff --git a/src/Npgsql/Internal/Converters/CastingConverter.cs b/src/Npgsql/Internal/Converters/CastingConverter.cs index f721d8d08e..d51abf8532 100644 --- a/src/Npgsql/Internal/Converters/CastingConverter.cs +++ b/src/Npgsql/Internal/Converters/CastingConverter.cs @@ -1,4 +1,5 @@ using System; +using System.Diagnostics.CodeAnalysis; using System.Threading; using System.Threading.Tasks; using Npgsql.Internal.Postgres; @@ -65,6 +66,7 @@ protected override PgConverter CreateConverter(PgConverterResolution effectiv static class CastingTypeInfoExtensions { + [RequiresDynamicCode("Changing boxing converters to their non-boxing counterpart can require creating new generic types or methods, which requires creating code at runtime. This may not be AOT when AOT compiling")] internal static PgTypeInfo ToNonBoxing(this PgTypeInfo typeInfo) { if (!typeInfo.IsBoxing) diff --git a/src/Npgsql/Internal/Converters/SystemTextJsonConverter.cs b/src/Npgsql/Internal/Converters/SystemTextJsonConverter.cs index cedf1664f2..d91d50a90e 100644 --- a/src/Npgsql/Internal/Converters/SystemTextJsonConverter.cs +++ b/src/Npgsql/Internal/Converters/SystemTextJsonConverter.cs @@ -20,9 +20,8 @@ sealed class SystemTextJsonConverter : PgStreamingConverter where public SystemTextJsonConverter(bool jsonb, Encoding textEncoding, JsonSerializerOptions serializerOptions) { - // We do GetTypeInfo calls directly so we need a resolver. if (serializerOptions.TypeInfoResolver is null) - serializerOptions.TypeInfoResolver = new DefaultJsonTypeInfoResolver(); + throw new InvalidOperationException("System.Text.Json serialization requires a type info resolver, make sure to set-it up beforehand."); _jsonb = jsonb; _textEncoding = textEncoding; diff --git a/src/Npgsql/Internal/DynamicTypeInfoResolver.cs b/src/Npgsql/Internal/DynamicTypeInfoResolver.cs index 22ffcd2248..45748a6164 100644 --- a/src/Npgsql/Internal/DynamicTypeInfoResolver.cs +++ b/src/Npgsql/Internal/DynamicTypeInfoResolver.cs @@ -6,7 +6,6 @@ namespace Npgsql.Internal; -[RequiresUnreferencedCode("A dynamic type info resolver may perform reflection on types that were trimmed if not referenced directly.")] [RequiresDynamicCode("A dynamic type info resolver may need to construct a generic converter for a statically unknown type.")] public abstract class DynamicTypeInfoResolver : IPgTypeInfoResolver { @@ -43,6 +42,7 @@ protected static bool IsArrayDataTypeName(DataTypeName dataTypeName, PgSerialize protected abstract DynamicMappingCollection? GetMappings(Type? type, DataTypeName dataTypeName, PgSerializerOptions options); + [RequiresDynamicCode("A dynamic type info resolver may need to construct a generic converter for a statically unknown type.")] protected class DynamicMappingCollection { TypeInfoMappingCollection? _mappings; diff --git a/src/Npgsql/Internal/Resolvers/NetworkTypeInfoResolver.cs b/src/Npgsql/Internal/Resolvers/NetworkTypeInfoResolver.cs index 49ef7e8a5f..ecf393d7d8 100644 --- a/src/Npgsql/Internal/Resolvers/NetworkTypeInfoResolver.cs +++ b/src/Npgsql/Internal/Resolvers/NetworkTypeInfoResolver.cs @@ -39,23 +39,26 @@ static void AddInfos(TypeInfoMappingCollection mappings) // We do so by wrapping our converter in a casting converter constructed over the derived type. // Finally we add a custom predicate to be able to match any type which values are assignable to IPAddress. mappings.AddType(DataTypeNames.Inet, - [UnconditionalSuppressMessage("AOT", "IL3050", Justification = "MakeGenericType is safe because the target will only ever be a reference type.")] - static (options, resolvedMapping, _) => - { - var derivedType = resolvedMapping.Type != typeof(IPAddress); - PgConverter converter = new IPAddressConverter(); - if (derivedType) - // There is not much more we can do, the deriving type IPAddress+ReadOnlyIPAddress isn't public. - converter = (PgConverter)Activator.CreateInstance(typeof(CastingConverter<>).MakeGenericType(resolvedMapping.Type), converter)!; - - return resolvedMapping.CreateInfo(options, converter); - }, mapping => mapping with { MatchRequirement = MatchRequirement.Single, TypeMatchPredicate = type => type is null || typeof(IPAddress).IsAssignableFrom(type) }); + CreateInfo, mapping => mapping with { MatchRequirement = MatchRequirement.Single, TypeMatchPredicate = type => type is null || typeof(IPAddress).IsAssignableFrom(type) }); mappings.AddStructType(DataTypeNames.Inet, static (options, mapping, _) => mapping.CreateInfo(options, new NpgsqlInetConverter())); // cidr mappings.AddStructType(DataTypeNames.Cidr, static (options, mapping, _) => mapping.CreateInfo(options, new NpgsqlCidrConverter()), isDefault: true); + + // Code is split out to a local method as suppression attributes on lambdas aren't properly handled by the ILLink analyzer yet. + [UnconditionalSuppressMessage("AotAnalysis", "IL3050", Justification = "MakeGenericType is safe because the target will only ever be a reference type.")] + static PgTypeInfo CreateInfo(PgSerializerOptions options, TypeInfoMapping resolvedMapping, bool _) + { + var derivedType = resolvedMapping.Type != typeof(IPAddress); + PgConverter converter = new IPAddressConverter(); + if (derivedType) + // There is not much more we can do, the deriving type IPAddress+ReadOnlyIPAddress isn't public. + converter = (PgConverter)Activator.CreateInstance(typeof(CastingConverter<>).MakeGenericType(resolvedMapping.Type), converter)!; + + return resolvedMapping.CreateInfo(options, converter); + } } static void AddArrayInfos(TypeInfoMappingCollection mappings) diff --git a/src/Npgsql/Internal/Resolvers/SystemTextJsonTypeInfoResolvers.cs b/src/Npgsql/Internal/Resolvers/SystemTextJsonTypeInfoResolvers.cs index 5650906ecb..d637700505 100644 --- a/src/Npgsql/Internal/Resolvers/SystemTextJsonTypeInfoResolvers.cs +++ b/src/Npgsql/Internal/Resolvers/SystemTextJsonTypeInfoResolvers.cs @@ -1,6 +1,8 @@ using System; +using System.Diagnostics.CodeAnalysis; using System.Text.Json; using System.Text.Json.Nodes; +using System.Text.Json.Serialization.Metadata; using Npgsql.Internal.Converters; using Npgsql.Internal.Postgres; @@ -13,12 +15,18 @@ class SystemTextJsonTypeInfoResolver : IPgTypeInfoResolver public SystemTextJsonTypeInfoResolver(JsonSerializerOptions? serializerOptions = null) => AddTypeInfos(Mappings, serializerOptions); + [UnconditionalSuppressMessage("Trimming", "IL2026", Justification = "Only used to request rooted and statically known types (JsonDocument,JsonElement etc).")] + [UnconditionalSuppressMessage("Aot", "IL3050", Justification = "Only used to request rooted and statically known types (JsonDocument,JsonElement etc).")] static void AddTypeInfos(TypeInfoMappingCollection mappings, JsonSerializerOptions? serializerOptions = null) { #if NET7_0_OR_GREATER serializerOptions ??= JsonSerializerOptions.Default; #else - serializerOptions ??= new JsonSerializerOptions(); + if (serializerOptions is null) + { + serializerOptions = new JsonSerializerOptions(); + serializerOptions.TypeInfoResolver = new DefaultJsonTypeInfoResolver(); + } #endif // Jsonb is the first default for JsonDocument diff --git a/src/Npgsql/NpgsqlConnection.cs b/src/Npgsql/NpgsqlConnection.cs index cb15be2792..79fa25150e 100644 --- a/src/Npgsql/NpgsqlConnection.cs +++ b/src/Npgsql/NpgsqlConnection.cs @@ -127,6 +127,9 @@ public NpgsqlConnection() /// Initializes a new instance of with the given connection string. /// /// The connection used to open the PostgreSQL database. + + [RequiresUnreferencedCode("ConnectionString based NpgsqlConnections use reflection to handle various PostgreSQL types like records, unmapped enums, etc. Use NpgsqlSlimDataSourceBuilder to start with a reduced - reflection free - set and opt into what your app specifically requires.")] + [RequiresDynamicCode("ConnectionString based NpgsqlConnections use reflection to handle various PostgreSQL types like records, unmapped enums, etc. This can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] public NpgsqlConnection(string? connectionString) : this() => ConnectionString = connectionString; @@ -167,6 +170,8 @@ internal static NpgsqlConnection FromDataSource(NpgsqlDataSource dataSource) /// A task representing the asynchronous operation. public override Task OpenAsync(CancellationToken cancellationToken) => Open(async: true, cancellationToken); + [RequiresUnreferencedCode("NpgsqlConnection uses reflection to handle various PostgreSQL types like records, unmapped enums etc. Use NpgsqlSlimDataSourceBuilder to start with a reduced - reflection free - set and opt into what your app specifically requires.")] + [RequiresDynamicCode("NpgsqlConnection uses reflection to handle various PostgreSQL types like records, unmapped enums. This can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] void SetupDataSource() { // Fast path: a pool already corresponds to this exact version of the connection string. @@ -374,6 +379,11 @@ async Task PerformMultiplexingStartupCheck(bool async, CancellationToken cancell public override string ConnectionString { get => _userFacingConnectionString; + + [RequiresUnreferencedCode("ConnectionString based NpgsqlConnections use reflection to handle various PostgreSQL types like records, unmapped enums, etc. Use NpgsqlSlimDataSourceBuilder to start with a reduced - reflection free - set and opt into what your app specifically requires.")] + [RequiresDynamicCode("ConnectionString based NpgsqlConnections use reflection to handle various PostgreSQL types like records, unmapped enums, etc. This can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] + [UnconditionalSuppressMessage("Trimming", "IL2046", Justification = "At the Npgsql level we cannot add RUC to DbConnection.")] + [UnconditionalSuppressMessage("Aot", "IL3051", Justification = "At the Npgsql level we cannot add RDC to DbConnection.")] set { CheckClosed(); @@ -1711,8 +1721,8 @@ internal void EndBindingScope(ConnectorBindingScope scope) /// /// Returns the supported collections /// - [UnconditionalSuppressMessage( - "Composite type mapping currently isn't trimming-safe, and warnings are generated at the MapComposite level.", "IL2026")] + [RequiresUnreferencedCode("Members from serialized types or types used in expressions may be trimmed if not referenced directly.")] + [UnconditionalSuppressMessage("Trimming", "IL2046", Justification = "At the Npgsql level we cannot add RUC to GetSchema.")] public override DataTable GetSchema() => GetSchema("MetaDataCollections", null); @@ -1721,6 +1731,8 @@ public override DataTable GetSchema() /// /// The collection name. /// The collection specified. + [RequiresUnreferencedCode("Members from serialized types or types used in expressions may be trimmed if not referenced directly.")] + [UnconditionalSuppressMessage("Trimming", "IL2046", Justification = "At the Npgsql level we cannot add RUC to GetSchema.")] public override DataTable GetSchema(string? collectionName) => GetSchema(collectionName, null); /// @@ -1732,6 +1744,8 @@ public override DataTable GetSchema() /// in the Restrictions collection. /// /// The collection specified. + [RequiresUnreferencedCode("Members from serialized types or types used in expressions may be trimmed if not referenced directly.")] + [UnconditionalSuppressMessage("Trimming", "IL2046", Justification = "At the Npgsql level we cannot add RUC to GetSchemaAsync.")] public override DataTable GetSchema(string? collectionName, string?[]? restrictions) => NpgsqlSchema.GetSchema(async: false, this, collectionName, restrictions).GetAwaiter().GetResult(); @@ -1742,7 +1756,9 @@ public override DataTable GetSchema(string? collectionName, string?[]? restricti /// An optional token to cancel the asynchronous operation. The default value is . /// /// The collection specified. + [RequiresUnreferencedCode("Members from serialized types or types used in expressions may be trimmed if not referenced directly.")] #if NET5_0_OR_GREATER + [UnconditionalSuppressMessage("Trimming", "IL2046", Justification = "At the Npgsql level we cannot add RUC to GetSchemaAsync.")] public override Task GetSchemaAsync(CancellationToken cancellationToken = default) #else public Task GetSchemaAsync(CancellationToken cancellationToken = default) @@ -1757,7 +1773,9 @@ public Task GetSchemaAsync(CancellationToken cancellationToken = defa /// An optional token to cancel the asynchronous operation. The default value is . /// /// The collection specified. + [RequiresUnreferencedCode("Members from serialized types or types used in expressions may be trimmed if not referenced directly.")] #if NET5_0_OR_GREATER + [UnconditionalSuppressMessage("Trimming", "IL2046", Justification = "At the Npgsql level we cannot add RUC to GetSchemaAsync.")] public override Task GetSchemaAsync(string collectionName, CancellationToken cancellationToken = default) #else public Task GetSchemaAsync(string collectionName, CancellationToken cancellationToken = default) @@ -1776,7 +1794,9 @@ public Task GetSchemaAsync(string collectionName, CancellationToken c /// An optional token to cancel the asynchronous operation. The default value is . /// /// The collection specified. + [RequiresUnreferencedCode("Members from serialized types or types used in expressions may be trimmed if not referenced directly.")] #if NET5_0_OR_GREATER + [UnconditionalSuppressMessage("Trimming", "IL2046", Justification = "At the Npgsql level we cannot add RUC to GetSchemaAsync.")] public override Task GetSchemaAsync(string collectionName, string?[]? restrictions, CancellationToken cancellationToken = default) #else public Task GetSchemaAsync(string collectionName, string?[]? restrictions, CancellationToken cancellationToken = default) @@ -1822,6 +1842,8 @@ object ICloneable.Clone() /// (password, SSL callbacks) while changing other connection parameters (e.g. /// database or pooling) /// + [RequiresUnreferencedCode("ConnectionString based NpgsqlConnections use reflection to handle various PostgreSQL types like records, unmapped enums, etc. Use NpgsqlSlimDataSourceBuilder to start with a reduced - reflection free - set and opt into what your app specifically requires.")] + [RequiresDynamicCode("ConnectionString based NpgsqlConnections use reflection to handle various PostgreSQL types like records, unmapped enums, etc. This can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] public NpgsqlConnection CloneWith(string connectionString) { CheckDisposed(); @@ -1870,7 +1892,14 @@ public override void ChangeDatabase(string dbName) /// /// DB provider factory. /// - protected override DbProviderFactory DbProviderFactory => NpgsqlFactory.Instance; + protected override DbProviderFactory DbProviderFactory + { + [RequiresUnreferencedCode("NpgsqlDataSource uses reflection to handle various PostgreSQL types like records, unmapped enums etc. Use NpgsqlSlimDataSourceBuilder to start with a reduced - reflection free - set and opt into what your app specifically requires.")] + [RequiresDynamicCode("NpgsqlDataSource uses reflection to handle various PostgreSQL types like records, unmapped enums. This can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] + [UnconditionalSuppressMessage("Trimming", "IL2046", Justification = "At the Npgsql level we cannot add RUC to DbProviderFactory.")] + [UnconditionalSuppressMessage("Aot", "IL3051", Justification = "At the Npgsql level we cannot add RDC to DbProviderFactory.")] + get => NpgsqlFactory.Instance; + } /// /// Clears the connection pool. All idle physical connections in the pool of the given connection are diff --git a/src/Npgsql/NpgsqlDataAdapter.cs b/src/Npgsql/NpgsqlDataAdapter.cs index 1e1b12a411..f34b1aeec7 100644 --- a/src/Npgsql/NpgsqlDataAdapter.cs +++ b/src/Npgsql/NpgsqlDataAdapter.cs @@ -1,6 +1,7 @@ using System; using System.Data; using System.Data.Common; +using System.Diagnostics.CodeAnalysis; using System.Threading; using System.Threading.Tasks; @@ -61,6 +62,8 @@ public NpgsqlDataAdapter(string selectCommandText, NpgsqlConnection selectConnec /// /// /// + [RequiresUnreferencedCode("ConnectionString based NpgsqlConnections use reflection to handle various PostgreSQL types like records, unmapped enums, etc. Use NpgsqlSlimDataSourceBuilder to start with a reduced - reflection free - set and opt into what your app specifically requires.")] + [RequiresDynamicCode("ConnectionString based NpgsqlConnections use reflection to handle various PostgreSQL types like records, unmapped enums, etc. This can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] public NpgsqlDataAdapter(string selectCommandText, string selectConnectionString) : this(selectCommandText, new NpgsqlConnection(selectConnectionString)) {} @@ -140,6 +143,7 @@ protected override void OnRowUpdating(RowUpdatingEventArgs value) } // Temporary implementation, waiting for official support in System.Data via https://github.com/dotnet/runtime/issues/22109 + [RequiresUnreferencedCode("Members from serialized types or types used in expressions may be trimmed if not referenced directly.")] internal async Task Fill(DataTable dataTable, bool async, CancellationToken cancellationToken = default) { var command = SelectCommand; @@ -172,6 +176,7 @@ internal async Task Fill(DataTable dataTable, bool async, CancellationToken } } + [RequiresUnreferencedCode("Members from serialized types or types used in expressions may be trimmed if not referenced directly.")] async Task Fill(DataTable dataTable, NpgsqlDataReader dataReader, bool async, CancellationToken cancellationToken = default) { dataTable.BeginLoadData(); diff --git a/src/Npgsql/NpgsqlDataReader.cs b/src/Npgsql/NpgsqlDataReader.cs index 6f3ac2ff71..4c63af1743 100644 --- a/src/Npgsql/NpgsqlDataReader.cs +++ b/src/Npgsql/NpgsqlDataReader.cs @@ -1757,7 +1757,9 @@ public override int GetOrdinal(string name) /// /// The zero-based column ordinal. /// The data type of the specified column. - [UnconditionalSuppressMessage("ILLink", "IL2093", Justification = "No members are dynamically accessed by Npgsql via GetFieldType")] + [UnconditionalSuppressMessage("ILLink", "IL2093", + Justification = "Members are only dynamically accessed by Npgsql via GetFieldType by GetSchema, and only in certain cases. " + + "Holding PublicFields and PublicProperties metadata on all our mapped types just for that case is the wrong tradeoff.")] public override Type GetFieldType(int ordinal) => GetField(ordinal).FieldType; @@ -1827,8 +1829,7 @@ Task> GetColumnSchema(bool async, Cancellatio #endif => GetSchemaTable(async: true, cancellationToken); - [UnconditionalSuppressMessage( - "Composite type mapping currently isn't trimming-safe, and warnings are generated at the MapComposite level.", "IL2026")] + [UnconditionalSuppressMessage("Trimming", "IL2111", Justification = "typeof(Type).TypeInitializer is not used.")] async Task GetSchemaTable(bool async, CancellationToken cancellationToken = default) { if (FieldCount == 0) // No resultset diff --git a/src/Npgsql/NpgsqlDataSource.cs b/src/Npgsql/NpgsqlDataSource.cs index 18c0d05f32..5a408a5020 100644 --- a/src/Npgsql/NpgsqlDataSource.cs +++ b/src/Npgsql/NpgsqlDataSource.cs @@ -207,12 +207,16 @@ protected override DbBatch CreateDbBatch() /// /// Creates a new for the given . /// + [RequiresUnreferencedCode("NpgsqlDataSource uses reflection to handle various PostgreSQL types like records, unmapped enums etc. Use NpgsqlSlimDataSourceBuilder to start with a reduced - reflection free - set and opt into what your app specifically requires.")] + [RequiresDynamicCode("NpgsqlDataSource uses reflection to handle various PostgreSQL types like records, unmapped enums. This can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] public static NpgsqlDataSource Create(string connectionString) => new NpgsqlDataSourceBuilder(connectionString).Build(); /// /// Creates a new for the given . /// + [RequiresUnreferencedCode("NpgsqlDataSource uses reflection to handle various PostgreSQL types like records, unmapped enums etc. Use NpgsqlSlimDataSourceBuilder to start with a reduced - reflection free - set and opt into what your app specifically requires.")] + [RequiresDynamicCode("NpgsqlDataSource uses reflection to handle various PostgreSQL types like records, unmapped enums. This can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] public static NpgsqlDataSource Create(NpgsqlConnectionStringBuilder connectionStringBuilder) => Create(connectionStringBuilder.ToString()); diff --git a/src/Npgsql/NpgsqlDataSourceBuilder.cs b/src/Npgsql/NpgsqlDataSourceBuilder.cs index 72e834bf42..e905bcbd23 100644 --- a/src/Npgsql/NpgsqlDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlDataSourceBuilder.cs @@ -17,6 +17,8 @@ namespace Npgsql; /// /// Provides a simple API for configuring and creating an , from which database connections can be obtained. /// +[RequiresUnreferencedCode("NpgsqlDataSource uses reflection to handle various PostgreSQL types like records, unmapped enums, etc. Use NpgsqlSlimDataSourceBuilder to start with a reduced - reflection free - set and opt into what your app specifically requires.")] +[RequiresDynamicCode("NpgsqlDataSource uses reflection to handle various PostgreSQL types like records, unmapped enums, etc. This can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] public sealed class NpgsqlDataSourceBuilder : INpgsqlTypeMapper { static UnsupportedTypeInfoResolver UnsupportedTypeInfoResolver { get; } = new(); @@ -298,6 +300,8 @@ void INpgsqlTypeMapper.Reset() /// /// A list of CLR types to map to PostgreSQL json (no need to specify ). /// + [RequiresUnreferencedCode("Json serializer may perform reflection on trimmed types.")] + [RequiresDynamicCode("Serializing arbitary types to json can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] public NpgsqlDataSourceBuilder UseSystemTextJson( JsonSerializerOptions? serializerOptions = null, Type[]? jsonbClrTypes = null, @@ -309,7 +313,7 @@ public NpgsqlDataSourceBuilder UseSystemTextJson( } /// - public INpgsqlTypeMapper MapEnum(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) + public INpgsqlTypeMapper MapEnum<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields)] TEnum>(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) where TEnum : struct, Enum { _internalBuilder.MapEnum(pgName, nameTranslator); @@ -317,34 +321,34 @@ public INpgsqlTypeMapper MapEnum(string? pgName = null, INpgsqlNameTransl } /// - public bool UnmapEnum(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) + public bool UnmapEnum<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields)] TEnum>(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) where TEnum : struct, Enum => _internalBuilder.UnmapEnum(pgName, nameTranslator); /// - [RequiresUnreferencedCode("Composite type mapping currently isn't trimming-safe.")] - public INpgsqlTypeMapper MapComposite(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) + public INpgsqlTypeMapper MapComposite<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] T>( + string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) { _internalBuilder.MapComposite(pgName, nameTranslator); return this; } /// - [RequiresUnreferencedCode("Composite type mapping currently isn't trimming-safe.")] - public INpgsqlTypeMapper MapComposite(Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) + public INpgsqlTypeMapper MapComposite([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] + Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) { _internalBuilder.MapComposite(clrType, pgName, nameTranslator); return this; } /// - [RequiresUnreferencedCode("Composite type mapping currently isn't trimming-safe.")] - public bool UnmapComposite(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) + public bool UnmapComposite<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] T>( + string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) => _internalBuilder.UnmapComposite(pgName, nameTranslator); /// - [RequiresUnreferencedCode("Composite type mapping currently isn't trimming-safe.")] - public bool UnmapComposite(Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) + public bool UnmapComposite([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] + Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) => _internalBuilder.UnmapComposite(clrType, pgName, nameTranslator); #endregion Type mapping diff --git a/src/Npgsql/NpgsqlFactory.cs b/src/Npgsql/NpgsqlFactory.cs index 6e45cde7b0..40d8b6de91 100644 --- a/src/Npgsql/NpgsqlFactory.cs +++ b/src/Npgsql/NpgsqlFactory.cs @@ -1,7 +1,6 @@ using System; using System.Data.Common; using System.Diagnostics.CodeAnalysis; -using System.Reflection; namespace Npgsql; @@ -9,6 +8,8 @@ namespace Npgsql; /// A factory to create instances of various Npgsql objects. /// [Serializable] +[RequiresUnreferencedCode("NpgsqlDataSource uses reflection to handle various PostgreSQL types like records, unmapped enums etc. Use NpgsqlSlimDataSourceBuilder to start with a reduced - reflection free - set and opt into what your app specifically requires.")] +[RequiresDynamicCode("NpgsqlDataSource uses reflection to handle various PostgreSQL types like records, unmapped enums. This can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] public sealed class NpgsqlFactory : DbProviderFactory, IServiceProvider { /// diff --git a/src/Npgsql/NpgsqlMultiHostDataSource.cs b/src/Npgsql/NpgsqlMultiHostDataSource.cs index fe6688b3a5..813460b557 100644 --- a/src/Npgsql/NpgsqlMultiHostDataSource.cs +++ b/src/Npgsql/NpgsqlMultiHostDataSource.cs @@ -4,6 +4,7 @@ using System.Collections.Generic; using System.Diagnostics; using System.Diagnostics.CodeAnalysis; +using System.Linq; using System.Threading; using System.Threading.Tasks; using System.Transactions; @@ -52,7 +53,11 @@ internal NpgsqlMultiHostDataSource(NpgsqlConnectionStringBuilder settings, Npgsq : new UnpooledDataSource(poolSettings, dataSourceConfig); } - var targetSessionAttributeValues = (TargetSessionAttributes[])Enum.GetValues(typeof(TargetSessionAttributes)); +#if NETSTANDARD + var targetSessionAttributeValues = Enum.GetValues(typeof(TargetSessionAttributes)).Cast().ToArray(); +#else + var targetSessionAttributeValues = Enum.GetValues().ToArray(); +#endif var highestValue = 0; foreach (var value in targetSessionAttributeValues) if ((int)value > highestValue) diff --git a/src/Npgsql/NpgsqlNestedDataReader.cs b/src/Npgsql/NpgsqlNestedDataReader.cs index 060592e312..49a0ac513c 100644 --- a/src/Npgsql/NpgsqlNestedDataReader.cs +++ b/src/Npgsql/NpgsqlNestedDataReader.cs @@ -4,6 +4,7 @@ using System.Collections; using System.Collections.Generic; using System.Data.Common; +using System.Diagnostics.CodeAnalysis; using System.Globalization; using System.IO; using System.Runtime.CompilerServices; @@ -293,10 +294,11 @@ public override int GetOrdinal(string name) } /// + [UnconditionalSuppressMessage("ILLink", "IL2093", Justification = "No members are dynamically accessed by Npgsql via NpgsqlNestedDataReader.GetFieldType.")] public override Type GetFieldType(int ordinal) { var column = CheckRowAndColumn(ordinal); - return column.ObjectOrDefaultTypeInfo.Type; + return column.ObjectOrDefaultInfo.TypeToConvert; } /// diff --git a/src/Npgsql/NpgsqlParameter.cs b/src/Npgsql/NpgsqlParameter.cs index 7a5aabc8ae..4288412025 100644 --- a/src/Npgsql/NpgsqlParameter.cs +++ b/src/Npgsql/NpgsqlParameter.cs @@ -342,7 +342,6 @@ public sealed override DbType DbType [DbProviderSpecificTypeProperty(true)] public NpgsqlDbType NpgsqlDbType { - [RequiresUnreferencedCode("The NpgsqlDbType getter isn't trimming-safe")] get { if (_npgsqlDbType.HasValue) diff --git a/src/Npgsql/NpgsqlSchema.cs b/src/Npgsql/NpgsqlSchema.cs index 94ee317f2b..89446eb50f 100644 --- a/src/Npgsql/NpgsqlSchema.cs +++ b/src/Npgsql/NpgsqlSchema.cs @@ -2,6 +2,7 @@ using System.Collections.Generic; using System.Data; using System.Data.Common; +using System.Diagnostics.CodeAnalysis; using System.Globalization; using System.Text; using System.Threading; @@ -17,6 +18,7 @@ namespace Npgsql; /// static class NpgsqlSchema { + [RequiresUnreferencedCode("Members from serialized types or types used in expressions may be trimmed if not referenced directly.")] public static Task GetSchema(bool async, NpgsqlConnection conn, string? collectionName, string?[]? restrictions, CancellationToken cancellationToken = default) { if (collectionName is null) @@ -154,6 +156,7 @@ static NpgsqlCommand BuildCommand(NpgsqlConnection conn, StringBuilder query, st static string RemoveSpecialChars(string paramName) => paramName.Replace("(", "").Replace(")", "").Replace(".", ""); + [RequiresUnreferencedCode("Members from serialized types or types used in expressions may be trimmed if not referenced directly.")] static async Task GetDatabases(NpgsqlConnection conn, string?[]? restrictions, bool async, CancellationToken cancellationToken = default) { var databases = new DataTable("Databases") { Locale = CultureInfo.InvariantCulture }; @@ -175,6 +178,7 @@ static async Task GetDatabases(NpgsqlConnection conn, string?[]? rest return databases; } + [RequiresUnreferencedCode("Members from serialized types or types used in expressions may be trimmed if not referenced directly.")] static async Task GetSchemata(NpgsqlConnection conn, string?[]? restrictions, bool async, CancellationToken cancellationToken = default) { var schemata = new DataTable("Schemata") { Locale = CultureInfo.InvariantCulture }; @@ -201,7 +205,7 @@ r.rolname AS schema_owner return schemata; } - + [RequiresUnreferencedCode("Members from serialized types or types used in expressions may be trimmed if not referenced directly.")] static async Task GetTables(NpgsqlConnection conn, string?[]? restrictions, bool async, CancellationToken cancellationToken = default) { var tables = new DataTable("Tables") { Locale = CultureInfo.InvariantCulture }; @@ -229,6 +233,7 @@ table_type IN ('BASE TABLE', 'FOREIGN', 'FOREIGN TABLE') AND return tables; } + [RequiresUnreferencedCode("Members from serialized types or types used in expressions may be trimmed if not referenced directly.")] static async Task GetColumns(NpgsqlConnection conn, string?[]? restrictions, bool async, CancellationToken cancellationToken = default) { var columns = new DataTable("Columns") { Locale = CultureInfo.InvariantCulture }; @@ -269,6 +274,7 @@ FROM information_schema.columns return columns; } + [RequiresUnreferencedCode("Members from serialized types or types used in expressions may be trimmed if not referenced directly.")] static async Task GetViews(NpgsqlConnection conn, string?[]? restrictions, bool async, CancellationToken cancellationToken = default) { var views = new DataTable("Views") { Locale = CultureInfo.InvariantCulture }; @@ -290,6 +296,7 @@ FROM information_schema.views return views; } + [RequiresUnreferencedCode("Members from serialized types or types used in expressions may be trimmed if not referenced directly.")] static async Task GetUsers(NpgsqlConnection conn, string?[]? restrictions, bool async, CancellationToken cancellationToken = default) { var users = new DataTable("Users") { Locale = CultureInfo.InvariantCulture }; @@ -307,6 +314,7 @@ static async Task GetUsers(NpgsqlConnection conn, string?[]? restrict return users; } + [RequiresUnreferencedCode("Members from serialized types or types used in expressions may be trimmed if not referenced directly.")] static async Task GetIndexes(NpgsqlConnection conn, string?[]? restrictions, bool async, CancellationToken cancellationToken = default) { var indexes = new DataTable("Indexes") { Locale = CultureInfo.InvariantCulture }; @@ -340,6 +348,7 @@ n.nspname NOT IN ('pg_catalog', 'pg_toast') AND return indexes; } + [RequiresUnreferencedCode("Members from serialized types or types used in expressions may be trimmed if not referenced directly.")] static async Task GetIndexColumns(NpgsqlConnection conn, string?[]? restrictions, bool async, CancellationToken cancellationToken = default) { var indexColumns = new DataTable("IndexColumns") { Locale = CultureInfo.InvariantCulture }; @@ -380,6 +389,7 @@ t_ns.nspname NOT IN ('pg_catalog', 'pg_toast') AND return indexColumns; } + [RequiresUnreferencedCode("Members from serialized types or types used in expressions may be trimmed if not referenced directly.")] static async Task GetConstraints(NpgsqlConnection conn, string?[]? restrictions, string? constraintType, bool async, CancellationToken cancellationToken = default) { var getConstraints = new StringBuilder(@" @@ -423,6 +433,7 @@ UNION ALL return table; } + [RequiresUnreferencedCode("Members from serialized types or types used in expressions may be trimmed if not referenced directly.")] static async Task GetConstraintColumns(NpgsqlConnection conn, string?[]? restrictions, bool async, CancellationToken cancellationToken = default) { var getConstraintColumns = new StringBuilder(@" diff --git a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs index 6e3982a7b7..923bd9f46a 100644 --- a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs @@ -260,7 +260,7 @@ public NpgsqlSlimDataSourceBuilder UsePeriodicPasswordProvider( public INpgsqlNameTranslator DefaultNameTranslator { get; set; } = GlobalTypeMapper.Instance.DefaultNameTranslator; /// - public INpgsqlTypeMapper MapEnum(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) + public INpgsqlTypeMapper MapEnum<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields)] TEnum>(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) where TEnum : struct, Enum { _userTypeMapper.MapEnum(pgName, nameTranslator); @@ -268,34 +268,38 @@ public INpgsqlTypeMapper MapEnum(string? pgName = null, INpgsqlNameTransl } /// - public bool UnmapEnum(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) + public bool UnmapEnum<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields)] TEnum>(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) where TEnum : struct, Enum => _userTypeMapper.UnmapEnum(pgName, nameTranslator); /// - [RequiresUnreferencedCode("Composite type mapping isn't trimming-safe.")] - public INpgsqlTypeMapper MapComposite(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) + [RequiresDynamicCode("Serializing arbitary types can require creating new generic types or methods. This may not work when AOT compiling.")] + public INpgsqlTypeMapper MapComposite<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] T>( + string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) { _userTypeMapper.MapComposite(typeof(T), pgName, nameTranslator); return this; } /// - [RequiresUnreferencedCode("Composite type mapping isn't trimming-safe.")] - public bool UnmapComposite(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) + [RequiresDynamicCode("Serializing arbitary types can require creating new generic types or methods. This may not work when AOT compiling.")] + public bool UnmapComposite<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] T>( + string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) => _userTypeMapper.UnmapComposite(typeof(T), pgName, nameTranslator); /// - [RequiresUnreferencedCode("Composite type mapping isn't trimming-safe.")] - public INpgsqlTypeMapper MapComposite(Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) + [RequiresDynamicCode("Serializing arbitary types can require creating new generic types or methods. This may not work when AOT compiling.")] + public INpgsqlTypeMapper MapComposite([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] + Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) { _userTypeMapper.MapComposite(clrType, pgName, nameTranslator); return this; } /// - [RequiresUnreferencedCode("Composite type mapping isn't trimming-safe.")] - public bool UnmapComposite(Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) + [RequiresDynamicCode("Serializing arbitary types can require creating new generic types or methods. This may not work when AOT compiling.")] + public bool UnmapComposite([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] + Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) => _userTypeMapper.UnmapComposite(clrType, pgName, nameTranslator); /// @@ -373,6 +377,8 @@ public NpgsqlSlimDataSourceBuilder EnableMultiranges() /// A list of CLR types to map to PostgreSQL json (no need to specify ). /// /// The same builder instance so that multiple calls can be chained. + [RequiresUnreferencedCode("Json serializer may perform reflection on trimmed types.")] + [RequiresDynamicCode("Serializing arbitary types to json can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] public NpgsqlSlimDataSourceBuilder UseSystemTextJson( JsonSerializerOptions? serializerOptions = null, Type[]? jsonbClrTypes = null, diff --git a/src/Npgsql/NpgsqlTypes/NpgsqlRange.cs b/src/Npgsql/NpgsqlTypes/NpgsqlRange.cs index 96720522da..c260202ce9 100644 --- a/src/Npgsql/NpgsqlTypes/NpgsqlRange.cs +++ b/src/Npgsql/NpgsqlTypes/NpgsqlRange.cs @@ -80,7 +80,7 @@ namespace NpgsqlTypes; /// /// The used by to convert bounds into . /// - static readonly TypeConverter BoundConverter = TypeDescriptor.GetConverter(typeof(T)); + static TypeConverter? BoundConverter; /// /// True if implements ; otherwise, false. @@ -375,6 +375,7 @@ public override string ToString() /// /// See: https://www.postgresql.org/docs/current/static/rangetypes.html /// + [RequiresUnreferencedCode("Parse implementations for certain types of T may require members that have been trimmed.")] public static NpgsqlRange Parse(string value) { if (value is null) @@ -429,6 +430,7 @@ public static NpgsqlRange Parse(string value) string.Equals(upperSegment, NullLiteral, StringComparison.OrdinalIgnoreCase) || string.Equals(upperSegment, UpperInfinityLiteral, StringComparison.OrdinalIgnoreCase); + BoundConverter ??= TypeDescriptor.GetConverter(typeof(T)); var lower = lowerInfinite ? default : (T?)BoundConverter.ConvertFromString(lowerSegment); var upper = upperInfinite ? default : (T?)BoundConverter.ConvertFromString(upperSegment); @@ -438,6 +440,7 @@ public static NpgsqlRange Parse(string value) /// /// Represents a type converter for . /// + [RequiresUnreferencedCode("ConvertFrom implementations for certain types of T may require members that have been trimmed.")] public class RangeTypeConverter : TypeConverter { /// @@ -524,4 +527,4 @@ enum RangeFlags : byte /// The upper bound is both inclusive and indefinite. This represents an error condition. /// UpperInclusiveInfinite = UpperBoundInclusive | UpperBoundInfinite -} \ No newline at end of file +} diff --git a/src/Npgsql/TypeMapping/GlobalTypeMapper.cs b/src/Npgsql/TypeMapping/GlobalTypeMapper.cs index e1739ecd96..c0bf23e172 100644 --- a/src/Npgsql/TypeMapping/GlobalTypeMapper.cs +++ b/src/Npgsql/TypeMapping/GlobalTypeMapper.cs @@ -172,7 +172,7 @@ public INpgsqlNameTranslator DefaultNameTranslator } /// - public INpgsqlTypeMapper MapEnum(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) where TEnum : struct, Enum + public INpgsqlTypeMapper MapEnum<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields)] TEnum>(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) where TEnum : struct, Enum { _lock.EnterWriteLock(); try @@ -194,7 +194,7 @@ public INpgsqlTypeMapper MapEnum(string? pgName = null, INpgsqlNameTransl } /// - public bool UnmapEnum(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) where TEnum : struct, Enum + public bool UnmapEnum<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields)] TEnum>(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) where TEnum : struct, Enum { _lock.EnterWriteLock(); try @@ -216,18 +216,19 @@ public bool UnmapEnum(string? pgName = null, INpgsqlNameTranslator? nameT } /// - [RequiresUnreferencedCode("Composite type mapping currently isn't trimming-safe.")] - public INpgsqlTypeMapper MapComposite(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) + [RequiresDynamicCode("Serializing arbitary types can require creating new generic types or methods. This may not work when AOT compiling.")] + public INpgsqlTypeMapper MapComposite<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] T>(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) => MapComposite(typeof(T), pgName, nameTranslator); /// - [RequiresUnreferencedCode("Composite type mapping currently isn't trimming-safe.")] - public bool UnmapComposite(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) + [RequiresDynamicCode("Serializing arbitary types can require creating new generic types or methods. This may not work when AOT compiling.")] + public bool UnmapComposite<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] T>(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) => UnmapComposite(typeof(T), pgName, nameTranslator); /// - [RequiresUnreferencedCode("Composite type mapping currently isn't trimming-safe.")] - public INpgsqlTypeMapper MapComposite(Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) + [RequiresDynamicCode("Serializing arbitary types can require creating new generic types or methods. This may not work when AOT compiling.")] + public INpgsqlTypeMapper MapComposite([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] + Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) { _lock.EnterWriteLock(); try @@ -243,8 +244,9 @@ public INpgsqlTypeMapper MapComposite(Type clrType, string? pgName = null, INpgs } /// - [RequiresUnreferencedCode("Composite type mapping currently isn't trimming-safe.")] - public bool UnmapComposite(Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) + [RequiresDynamicCode("Serializing arbitary types can require creating new generic types or methods. This may not work when AOT compiling.")] + public bool UnmapComposite([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] + Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) { _lock.EnterWriteLock(); try diff --git a/src/Npgsql/TypeMapping/INpgsqlTypeMapper.cs b/src/Npgsql/TypeMapping/INpgsqlTypeMapper.cs index 2f4d7ff040..210942b8f8 100644 --- a/src/Npgsql/TypeMapping/INpgsqlTypeMapper.cs +++ b/src/Npgsql/TypeMapping/INpgsqlTypeMapper.cs @@ -41,7 +41,7 @@ public interface INpgsqlTypeMapper /// Defaults to . /// /// The .NET enum type to be mapped - INpgsqlTypeMapper MapEnum( + INpgsqlTypeMapper MapEnum<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields)] TEnum>( string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) where TEnum : struct, Enum; @@ -57,7 +57,7 @@ INpgsqlTypeMapper MapEnum( /// A component which will be used to translate CLR names (e.g. SomeClass) into database names (e.g. some_class). /// Defaults to . /// - bool UnmapEnum( + bool UnmapEnum<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields)] TEnum>( string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) where TEnum : struct, Enum; @@ -82,8 +82,8 @@ bool UnmapEnum( /// Defaults to . /// /// The .NET type to be mapped - [RequiresUnreferencedCode("Composite type mapping currently isn't trimming-safe.")] - INpgsqlTypeMapper MapComposite( + [RequiresDynamicCode("Serializing arbitary types can require creating new generic types or methods. This may not work when AOT compiling.")] + INpgsqlTypeMapper MapComposite<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] T>( string? pgName = null, INpgsqlNameTranslator? nameTranslator = null); @@ -98,8 +98,8 @@ INpgsqlTypeMapper MapComposite( /// A component which will be used to translate CLR names (e.g. SomeClass) into database names (e.g. some_class). /// Defaults to /// - [RequiresUnreferencedCode("Composite type mapping currently isn't trimming-safe.")] - bool UnmapComposite( + [RequiresDynamicCode("Serializing arbitary types can require creating new generic types or methods. This may not work when AOT compiling.")] + bool UnmapComposite<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] T>( string? pgName = null, INpgsqlNameTranslator? nameTranslator = null); @@ -122,9 +122,9 @@ bool UnmapComposite( /// A component which will be used to translate CLR names (e.g. SomeClass) into database names (e.g. some_class). /// Defaults to . /// - [RequiresUnreferencedCode("Composite type mapping currently isn't trimming-safe.")] + [RequiresDynamicCode("Serializing arbitary types can require creating new generic types or methods. This may not work when AOT compiling.")] INpgsqlTypeMapper MapComposite( - Type clrType, + [DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null); @@ -140,9 +140,9 @@ INpgsqlTypeMapper MapComposite( /// A component which will be used to translate CLR names (e.g. SomeClass) into database names (e.g. some_class). /// Defaults to . /// - [RequiresUnreferencedCode("Composite type mapping currently isn't trimming-safe.")] + [RequiresDynamicCode("Serializing arbitary types can require creating new generic types or methods. This may not work when AOT compiling.")] bool UnmapComposite( - Type clrType, + [DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null); diff --git a/src/Npgsql/TypeMapping/UserTypeMapper.cs b/src/Npgsql/TypeMapping/UserTypeMapper.cs index 5447af9e87..71e460305d 100644 --- a/src/Npgsql/TypeMapping/UserTypeMapper.cs +++ b/src/Npgsql/TypeMapping/UserTypeMapper.cs @@ -44,7 +44,7 @@ sealed class UserTypeMapper public UserTypeMapper Clone() => new(_mappings) { DefaultNameTranslator = DefaultNameTranslator }; - public UserTypeMapper MapEnum(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) + public UserTypeMapper MapEnum<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields)] TEnum>(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) where TEnum : struct, Enum { Unmap(typeof(TEnum), out var resolvedName, pgName, nameTranslator); @@ -52,26 +52,33 @@ public UserTypeMapper MapEnum(string? pgName = null, INpgsqlNameTranslato return this; } - public bool UnmapEnum(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) + public bool UnmapEnum<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields)] TEnum>( + string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) where TEnum : struct, Enum => Unmap(typeof(TEnum), out _, pgName, nameTranslator ?? DefaultNameTranslator); - public UserTypeMapper MapComposite(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) where T : class + [RequiresDynamicCode("Serializing arbitary types can require creating new generic types or methods. This may not work when AOT compiling.")] + public UserTypeMapper MapComposite<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicFields | DynamicallyAccessedMemberTypes.PublicProperties)] T>( + string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) where T : class { Unmap(typeof(T), out var resolvedName, pgName, nameTranslator); Items.Add(new CompositeMapping(resolvedName, nameTranslator ?? DefaultNameTranslator)); return this; } - public UserTypeMapper MapStructComposite(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) where T : struct + [RequiresDynamicCode("Serializing arbitary types can require creating new generic types or methods. This may not work when AOT compiling.")] + public UserTypeMapper MapStructComposite<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicFields | DynamicallyAccessedMemberTypes.PublicProperties)] T>( + string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) where T : struct { Unmap(typeof(T), out var resolvedName, pgName, nameTranslator); Items.Add(new StructCompositeMapping(resolvedName, nameTranslator ?? DefaultNameTranslator)); return this; } - [RequiresUnreferencedCode("Composite type mapping currently isn't trimming-safe.")] - public UserTypeMapper MapComposite(Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) + [UnconditionalSuppressMessage("Trimming", "IL2111", Justification = "MapStructComposite and MapComposite have identical DAM annotations to clrType.")] + [RequiresDynamicCode("MapComposite switches between MapStructComposite and MapComposite at runtime based on clr type. This can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] + public UserTypeMapper MapComposite([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] + Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) { if (clrType.IsConstructedGenericType && clrType.GetGenericTypeDefinition() == typeof(Nullable<>)) throw new ArgumentException("Cannot map nullable.", nameof(clrType)); @@ -133,7 +140,8 @@ sealed class UserMappingResolver : IPgTypeInfoResolver => _mappings.Find(type, dataTypeName, options); } - sealed class CompositeMapping : UserTypeMapping where T : class + [RequiresDynamicCode("Serializing arbitary types can require creating new generic types or methods. This may not work when AOT compiling.")] + sealed class CompositeMapping<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicFields | DynamicallyAccessedMemberTypes.PublicProperties)] T> : UserTypeMapping where T : class { readonly INpgsqlNameTranslator _nameTranslator; @@ -157,7 +165,8 @@ internal override void Build(TypeInfoMappingCollection mappings) } } - sealed class StructCompositeMapping : UserTypeMapping where T : struct + [RequiresDynamicCode("Serializing arbitary types can require creating new generic types or methods. This may not work when AOT compiling.")] + sealed class StructCompositeMapping<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicFields | DynamicallyAccessedMemberTypes.PublicProperties)] T> : UserTypeMapping where T : struct { readonly INpgsqlNameTranslator _nameTranslator; @@ -185,12 +194,12 @@ internal abstract class EnumMapping : UserTypeMapping { internal INpgsqlNameTranslator NameTranslator { get; } - public EnumMapping(string pgTypeName, Type enumClrType, INpgsqlNameTranslator nameTranslator) + public EnumMapping(string pgTypeName, [DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields)]Type enumClrType, INpgsqlNameTranslator nameTranslator) : base(pgTypeName, enumClrType) => NameTranslator = nameTranslator; } - sealed class EnumMapping : EnumMapping + sealed class EnumMapping<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields)] TEnum> : EnumMapping where TEnum : struct, Enum { readonly Dictionary _enumToLabel = new(); diff --git a/src/Npgsql/VolatileResourceManager.cs b/src/Npgsql/VolatileResourceManager.cs index 816cf15b32..239b62fe8e 100644 --- a/src/Npgsql/VolatileResourceManager.cs +++ b/src/Npgsql/VolatileResourceManager.cs @@ -1,4 +1,5 @@ using System; +using System.Diagnostics.CodeAnalysis; using System.Threading; using System.Transactions; using Microsoft.Extensions.Logging; @@ -95,6 +96,8 @@ public void Prepare(PreparingEnlistment preparingEnlistment) } } + [UnconditionalSuppressMessage("Trimming", "IL2026", Justification = "Changing Enlist to be false does not affect potentially trimmed out functionality.")] + [UnconditionalSuppressMessage("Aot", "IL3050", Justification = "Changing Enlist to be false does not cause dynamic codegen.")] public void Commit(Enlistment enlistment) { CheckDisposed(); From 95b7a45a837414a0f813dbeae41efe216e26b7e6 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Wed, 27 Sep 2023 20:46:57 +0200 Subject: [PATCH 224/761] Exclude flaky test --- test/Npgsql.Tests/Replication/PgOutputReplicationTests.cs | 1 + 1 file changed, 1 insertion(+) diff --git a/test/Npgsql.Tests/Replication/PgOutputReplicationTests.cs b/test/Npgsql.Tests/Replication/PgOutputReplicationTests.cs index 8497646f9d..0f13770d11 100644 --- a/test/Npgsql.Tests/Replication/PgOutputReplicationTests.cs +++ b/test/Npgsql.Tests/Replication/PgOutputReplicationTests.cs @@ -640,6 +640,7 @@ await c.ExecuteNonQueryAsync(@$" await NextMessage(messages); }, nameof(Dispose_while_replicating)); + [Platform(Exclude = "MacOsX", Reason = "Test is flaky in CI on Mac, see https://github.com/npgsql/npgsql/issues/5294")] [TestCase(true)] [TestCase(false)] [Test(Description = "Tests whether logical decoding messages get replicated as Logical Replication Protocol Messages on PostgreSQL 14 and above")] From 7d5ece2bfb865fb1eb5ba1aee3989aa0271e3be9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Emmanuel=20Andr=C3=A9?= <2341261+manandre@users.noreply.github.com> Date: Sun, 1 Oct 2023 21:24:35 +0200 Subject: [PATCH 225/761] Implement CreateParameter for DbBatchCommand (#5300) Closes #5179 --- src/Npgsql/NpgsqlBatchCommand.cs | 27 +++++++++++++++++++++++++++ src/Npgsql/PublicAPI.Unshipped.txt | 4 +++- test/Npgsql.Tests/BatchTests.cs | 6 ++++++ 3 files changed, 36 insertions(+), 1 deletion(-) diff --git a/src/Npgsql/NpgsqlBatchCommand.cs b/src/Npgsql/NpgsqlBatchCommand.cs index 9e45f45c99..a27b76352b 100644 --- a/src/Npgsql/NpgsqlBatchCommand.cs +++ b/src/Npgsql/NpgsqlBatchCommand.cs @@ -38,6 +38,33 @@ public override string CommandText /// public new NpgsqlParameterCollection Parameters { get; } = new(); +#pragma warning disable CA1822 // Mark members as static + +#if NET8_0_OR_GREATER + /// + public override NpgsqlParameter CreateParameter() +#else + /// + /// Creates a new instance of a object. + /// + /// An object. + public NpgsqlParameter CreateParameter() +#endif + => new(); + +#if NET8_0_OR_GREATER + /// + public override bool CanCreateParameter +#else + /// + /// Returns whether the method is implemented. + /// + public bool CanCreateParameter +#endif + => true; + +#pragma warning restore CA1822 // Mark members as static + /// /// Appends an error barrier after this batch command. Defaults to the value of on the /// batch. diff --git a/src/Npgsql/PublicAPI.Unshipped.txt b/src/Npgsql/PublicAPI.Unshipped.txt index 1cd6f3da9c..190f7e3948 100644 --- a/src/Npgsql/PublicAPI.Unshipped.txt +++ b/src/Npgsql/PublicAPI.Unshipped.txt @@ -1,4 +1,4 @@ -#nullable enable +#nullable enable const Npgsql.PostgresErrorCodes.IdleSessionTimeout = "57P05" -> string! Npgsql.ChannelBinding Npgsql.ChannelBinding.Disable = 0 -> Npgsql.ChannelBinding @@ -104,6 +104,8 @@ override Npgsql.NpgsqlBatch.Dispose() -> void *REMOVED*Npgsql.Replication.PhysicalReplicationConnection.StartReplication(NpgsqlTypes.NpgsqlLogSequenceNumber walLocation, System.Threading.CancellationToken cancellationToken, ulong timeline = 0) -> System.Collections.Generic.IAsyncEnumerable! *REMOVED*Npgsql.Replication.PhysicalReplicationSlot.PhysicalReplicationSlot(string! slotName, NpgsqlTypes.NpgsqlLogSequenceNumber? restartLsn = null, ulong? restartTimeline = null) -> void *REMOVED*Npgsql.Replication.PhysicalReplicationSlot.RestartTimeline.get -> ulong? +override Npgsql.NpgsqlBatchCommand.CanCreateParameter.get -> bool +override Npgsql.NpgsqlBatchCommand.CreateParameter() -> Npgsql.NpgsqlParameter! override NpgsqlTypes.NpgsqlCidr.ToString() -> string! *REMOVED*static NpgsqlTypes.NpgsqlInet.operator !=(NpgsqlTypes.NpgsqlInet x, NpgsqlTypes.NpgsqlInet y) -> bool *REMOVED*static NpgsqlTypes.NpgsqlInet.operator ==(NpgsqlTypes.NpgsqlInet x, NpgsqlTypes.NpgsqlInet y) -> bool diff --git a/test/Npgsql.Tests/BatchTests.cs b/test/Npgsql.Tests/BatchTests.cs index 96013a9676..766f022f07 100644 --- a/test/Npgsql.Tests/BatchTests.cs +++ b/test/Npgsql.Tests/BatchTests.cs @@ -288,6 +288,12 @@ public async Task StatementOID() Assert.That(batch.BatchCommands[1].OID, Is.EqualTo(0)); } + [Test] + public void CanCreateParameter() => Assert.True(new NpgsqlBatchCommand().CanCreateParameter); + + [Test] + public void CreateParameter() => Assert.NotNull(new NpgsqlBatchCommand().CreateParameter()); + #endregion NpgsqlBatchCommand #region Command behaviors From 25328d6d0ae5ad5286308b52b7ddd685968929bd Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Mon, 2 Oct 2023 15:30:02 +0200 Subject: [PATCH 226/761] Move JsonNode to dynamic (RUC/RDC) side and add JsonElement as a static mapping (#5295) --- ... SystemTextJsonDynamicTypeInfoResolver.cs} | 31 ++++++++++++++++--- .../SystemTextJsonTypeInfoResolvers.cs | 16 ++-------- src/Npgsql/NpgsqlDataSourceBuilder.cs | 12 +++---- src/Npgsql/NpgsqlSlimDataSourceBuilder.cs | 2 +- 4 files changed, 37 insertions(+), 24 deletions(-) rename src/Npgsql/Internal/Resolvers/{SystemTextJsonPocoTypeInfoResolver.cs => SystemTextJsonDynamicTypeInfoResolver.cs} (74%) diff --git a/src/Npgsql/Internal/Resolvers/SystemTextJsonPocoTypeInfoResolver.cs b/src/Npgsql/Internal/Resolvers/SystemTextJsonDynamicTypeInfoResolver.cs similarity index 74% rename from src/Npgsql/Internal/Resolvers/SystemTextJsonPocoTypeInfoResolver.cs rename to src/Npgsql/Internal/Resolvers/SystemTextJsonDynamicTypeInfoResolver.cs index e513b29a86..580004edd3 100644 --- a/src/Npgsql/Internal/Resolvers/SystemTextJsonPocoTypeInfoResolver.cs +++ b/src/Npgsql/Internal/Resolvers/SystemTextJsonDynamicTypeInfoResolver.cs @@ -2,6 +2,7 @@ using System.Diagnostics.CodeAnalysis; using System.Text; using System.Text.Json; +using System.Text.Json.Nodes; using System.Text.Json.Serialization.Metadata; using Npgsql.Internal.Converters; using Npgsql.Internal.Postgres; @@ -10,12 +11,12 @@ namespace Npgsql.Internal.Resolvers; [RequiresUnreferencedCode("Json serializer may perform reflection on trimmed types.")] [RequiresDynamicCode("Serializing arbitary types to json can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] -class SystemTextJsonPocoTypeInfoResolver : DynamicTypeInfoResolver, IPgTypeInfoResolver +class SystemTextJsonDynamicTypeInfoResolver : DynamicTypeInfoResolver, IPgTypeInfoResolver { protected TypeInfoMappingCollection Mappings { get; } = new(); protected JsonSerializerOptions _serializerOptions; - public SystemTextJsonPocoTypeInfoResolver(Type[]? jsonbClrTypes = null, Type[]? jsonClrTypes = null, JsonSerializerOptions? serializerOptions = null) + public SystemTextJsonDynamicTypeInfoResolver(Type[]? jsonbClrTypes = null, Type[]? jsonClrTypes = null, JsonSerializerOptions? serializerOptions = null) { #if NET7_0_OR_GREATER _serializerOptions = serializerOptions ??= JsonSerializerOptions.Default; @@ -31,6 +32,20 @@ void AddMappings(TypeInfoMappingCollection mappings, Type[] jsonbClrTypes, Type[ // We do GetTypeInfo calls directly so we need a resolver. serializerOptions.TypeInfoResolver ??= new DefaultJsonTypeInfoResolver(); + // These live in the RUC/RDC part as JsonValues can contain any .NET type. + foreach (var dataTypeName in new[] { DataTypeNames.Jsonb, DataTypeNames.Json }) + { + var jsonb = dataTypeName == DataTypeNames.Jsonb; + mappings.AddType(dataTypeName, (options, mapping, _) => + mapping.CreateInfo(options, new SystemTextJsonConverter(jsonb, options.TextEncoding, serializerOptions))); + mappings.AddType(dataTypeName, (options, mapping, _) => + mapping.CreateInfo(options, new SystemTextJsonConverter(jsonb, options.TextEncoding, serializerOptions))); + mappings.AddType(dataTypeName, (options, mapping, _) => + mapping.CreateInfo(options, new SystemTextJsonConverter(jsonb, options.TextEncoding, serializerOptions))); + mappings.AddType(dataTypeName, (options, mapping, _) => + mapping.CreateInfo(options, new SystemTextJsonConverter(jsonb, options.TextEncoding, serializerOptions))); + } + AddUserMappings(jsonb: true, jsonbClrTypes); AddUserMappings(jsonb: false, jsonClrTypes); @@ -62,6 +77,14 @@ protected void AddArrayInfos(TypeInfoMappingCollection mappings, TypeInfoMapping if (baseMappings.Items.Count == 0) return; + foreach (var dataTypeName in new[] { DataTypeNames.Jsonb, DataTypeNames.Json }) + { + mappings.AddArrayType(dataTypeName); + mappings.AddArrayType(dataTypeName); + mappings.AddArrayType(dataTypeName); + mappings.AddArrayType(dataTypeName); + } + var dynamicMappings = CreateCollection(baseMappings); foreach (var mapping in baseMappings.Items) dynamicMappings.AddArrayMapping(mapping.Type, mapping.DataTypeName); @@ -102,11 +125,11 @@ static PgConverter CreateSystemTextJsonConverter(Type valueType, bool jsonb, Enc [RequiresUnreferencedCode("Json serializer may perform reflection on trimmed types.")] [RequiresDynamicCode("Serializing arbitary types to json can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] -sealed class SystemTextJsonPocoArrayTypeInfoResolver : SystemTextJsonPocoTypeInfoResolver, IPgTypeInfoResolver +sealed class SystemTextJsonDynamicArrayTypeInfoResolver : SystemTextJsonDynamicTypeInfoResolver, IPgTypeInfoResolver { new TypeInfoMappingCollection Mappings { get; } - public SystemTextJsonPocoArrayTypeInfoResolver(Type[]? jsonbClrTypes = null, Type[]? jsonClrTypes = null, JsonSerializerOptions? serializerOptions = null) + public SystemTextJsonDynamicArrayTypeInfoResolver(Type[]? jsonbClrTypes = null, Type[]? jsonClrTypes = null, JsonSerializerOptions? serializerOptions = null) : base(jsonbClrTypes, jsonClrTypes, serializerOptions) { Mappings = new TypeInfoMappingCollection(base.Mappings); diff --git a/src/Npgsql/Internal/Resolvers/SystemTextJsonTypeInfoResolvers.cs b/src/Npgsql/Internal/Resolvers/SystemTextJsonTypeInfoResolvers.cs index d637700505..42107dc686 100644 --- a/src/Npgsql/Internal/Resolvers/SystemTextJsonTypeInfoResolvers.cs +++ b/src/Npgsql/Internal/Resolvers/SystemTextJsonTypeInfoResolvers.cs @@ -1,7 +1,6 @@ using System; using System.Diagnostics.CodeAnalysis; using System.Text.Json; -using System.Text.Json.Nodes; using System.Text.Json.Serialization.Metadata; using Npgsql.Internal.Converters; using Npgsql.Internal.Postgres; @@ -36,14 +35,8 @@ static void AddTypeInfos(TypeInfoMappingCollection mappings, JsonSerializerOptio mappings.AddType(dataTypeName, (options, mapping, _) => mapping.CreateInfo(options, new SystemTextJsonConverter(jsonb, options.TextEncoding, serializerOptions)), isDefault: true); - mappings.AddType(dataTypeName, (options, mapping, _) => - mapping.CreateInfo(options, new SystemTextJsonConverter(jsonb, options.TextEncoding, serializerOptions))); - mappings.AddType(dataTypeName, (options, mapping, _) => - mapping.CreateInfo(options, new SystemTextJsonConverter(jsonb, options.TextEncoding, serializerOptions))); - mappings.AddType(dataTypeName, (options, mapping, _) => - mapping.CreateInfo(options, new SystemTextJsonConverter(jsonb, options.TextEncoding, serializerOptions))); - mappings.AddType(dataTypeName, (options, mapping, _) => - mapping.CreateInfo(options, new SystemTextJsonConverter(jsonb, options.TextEncoding, serializerOptions))); + mappings.AddStructType(dataTypeName, (options, mapping, _) => + mapping.CreateInfo(options, new SystemTextJsonConverter(jsonb, options.TextEncoding, serializerOptions))); } } @@ -52,10 +45,7 @@ protected static void AddArrayInfos(TypeInfoMappingCollection mappings) foreach (var dataTypeName in new[] { DataTypeNames.Jsonb, DataTypeNames.Json }) { mappings.AddArrayType(dataTypeName); - mappings.AddArrayType(dataTypeName); - mappings.AddArrayType(dataTypeName); - mappings.AddArrayType(dataTypeName); - mappings.AddArrayType(dataTypeName); + mappings.AddStructArrayType(dataTypeName); } } diff --git a/src/Npgsql/NpgsqlDataSourceBuilder.cs b/src/Npgsql/NpgsqlDataSourceBuilder.cs index e905bcbd23..a7c188e147 100644 --- a/src/Npgsql/NpgsqlDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlDataSourceBuilder.cs @@ -57,7 +57,7 @@ internal static void ResetGlobalMappings(bool overwrite) overwrite ? new AdoTypeInfoResolver() : AdoTypeInfoResolver.Instance, new ExtraConversionsResolver(), new SystemTextJsonTypeInfoResolver(), - new SystemTextJsonPocoTypeInfoResolver(), + new SystemTextJsonDynamicTypeInfoResolver(), new RangeTypeInfoResolver(), new RecordTypeInfoResolver(), new TupledRecordTypeInfoResolver(), @@ -72,7 +72,7 @@ internal static void ResetGlobalMappings(bool overwrite) new AdoArrayTypeInfoResolver(), new ExtraConversionsArrayTypeInfoResolver(), new SystemTextJsonArrayTypeInfoResolver(), - new SystemTextJsonPocoArrayTypeInfoResolver(), + new SystemTextJsonDynamicArrayTypeInfoResolver(), new RangeArrayTypeInfoResolver(), new RecordArrayTypeInfoResolver(), new TupledRecordArrayTypeInfoResolver(), @@ -104,7 +104,7 @@ void AddDefaultFeatures() AddTypeInfoResolver(new TupledRecordArrayTypeInfoResolver()); AddTypeInfoResolver(new RecordArrayTypeInfoResolver()); AddTypeInfoResolver(new RangeArrayTypeInfoResolver()); - AddTypeInfoResolver(new SystemTextJsonPocoArrayTypeInfoResolver()); + AddTypeInfoResolver(new SystemTextJsonDynamicArrayTypeInfoResolver()); AddTypeInfoResolver(new SystemTextJsonArrayTypeInfoResolver()); AddTypeInfoResolver(new ExtraConversionsArrayTypeInfoResolver()); AddTypeInfoResolver(new AdoArrayTypeInfoResolver()); @@ -119,7 +119,7 @@ void AddDefaultFeatures() AddTypeInfoResolver(new TupledRecordTypeInfoResolver()); AddTypeInfoResolver(new RecordTypeInfoResolver()); AddTypeInfoResolver(new RangeTypeInfoResolver()); - AddTypeInfoResolver(new SystemTextJsonPocoTypeInfoResolver()); + AddTypeInfoResolver(new SystemTextJsonDynamicTypeInfoResolver()); AddTypeInfoResolver(new SystemTextJsonTypeInfoResolver()); AddTypeInfoResolver(new ExtraConversionsResolver()); AddTypeInfoResolver(AdoTypeInfoResolver.Instance); @@ -307,8 +307,8 @@ public NpgsqlDataSourceBuilder UseSystemTextJson( Type[]? jsonbClrTypes = null, Type[]? jsonClrTypes = null) { - AddTypeInfoResolver(new SystemTextJsonPocoArrayTypeInfoResolver(jsonbClrTypes, jsonClrTypes, serializerOptions)); - AddTypeInfoResolver(new SystemTextJsonPocoTypeInfoResolver(jsonbClrTypes, jsonClrTypes, serializerOptions)); + AddTypeInfoResolver(new SystemTextJsonDynamicArrayTypeInfoResolver(jsonbClrTypes, jsonClrTypes, serializerOptions)); + AddTypeInfoResolver(new SystemTextJsonDynamicTypeInfoResolver(jsonbClrTypes, jsonClrTypes, serializerOptions)); return this; } diff --git a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs index 923bd9f46a..274d37c00a 100644 --- a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs @@ -384,7 +384,7 @@ public NpgsqlSlimDataSourceBuilder UseSystemTextJson( Type[]? jsonbClrTypes = null, Type[]? jsonClrTypes = null) { - AddTypeInfoResolver(new SystemTextJsonPocoTypeInfoResolver(jsonbClrTypes, jsonClrTypes, serializerOptions)); + AddTypeInfoResolver(new SystemTextJsonDynamicTypeInfoResolver(jsonbClrTypes, jsonClrTypes, serializerOptions)); return this; } From ffbf6fe7d3cb29aa9e9a419860acad0b5e7a8bb6 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Tue, 3 Oct 2023 11:32:27 +0200 Subject: [PATCH 227/761] Make NpgsqlConnection.GetSchema AOT/trimming-compatible (#5301) Closes #5293 --- src/Npgsql/NpgsqlConnection.cs | 12 - src/Npgsql/NpgsqlSchema.cs | 651 +++++++++++++++++++++---------- test/Npgsql.Tests/SchemaTests.cs | 28 ++ 3 files changed, 467 insertions(+), 224 deletions(-) diff --git a/src/Npgsql/NpgsqlConnection.cs b/src/Npgsql/NpgsqlConnection.cs index 79fa25150e..32523a7f5a 100644 --- a/src/Npgsql/NpgsqlConnection.cs +++ b/src/Npgsql/NpgsqlConnection.cs @@ -1721,8 +1721,6 @@ internal void EndBindingScope(ConnectorBindingScope scope) /// /// Returns the supported collections /// - [RequiresUnreferencedCode("Members from serialized types or types used in expressions may be trimmed if not referenced directly.")] - [UnconditionalSuppressMessage("Trimming", "IL2046", Justification = "At the Npgsql level we cannot add RUC to GetSchema.")] public override DataTable GetSchema() => GetSchema("MetaDataCollections", null); @@ -1731,8 +1729,6 @@ public override DataTable GetSchema() /// /// The collection name. /// The collection specified. - [RequiresUnreferencedCode("Members from serialized types or types used in expressions may be trimmed if not referenced directly.")] - [UnconditionalSuppressMessage("Trimming", "IL2046", Justification = "At the Npgsql level we cannot add RUC to GetSchema.")] public override DataTable GetSchema(string? collectionName) => GetSchema(collectionName, null); /// @@ -1744,8 +1740,6 @@ public override DataTable GetSchema() /// in the Restrictions collection. /// /// The collection specified. - [RequiresUnreferencedCode("Members from serialized types or types used in expressions may be trimmed if not referenced directly.")] - [UnconditionalSuppressMessage("Trimming", "IL2046", Justification = "At the Npgsql level we cannot add RUC to GetSchemaAsync.")] public override DataTable GetSchema(string? collectionName, string?[]? restrictions) => NpgsqlSchema.GetSchema(async: false, this, collectionName, restrictions).GetAwaiter().GetResult(); @@ -1756,9 +1750,7 @@ public override DataTable GetSchema(string? collectionName, string?[]? restricti /// An optional token to cancel the asynchronous operation. The default value is . /// /// The collection specified. - [RequiresUnreferencedCode("Members from serialized types or types used in expressions may be trimmed if not referenced directly.")] #if NET5_0_OR_GREATER - [UnconditionalSuppressMessage("Trimming", "IL2046", Justification = "At the Npgsql level we cannot add RUC to GetSchemaAsync.")] public override Task GetSchemaAsync(CancellationToken cancellationToken = default) #else public Task GetSchemaAsync(CancellationToken cancellationToken = default) @@ -1773,9 +1765,7 @@ public Task GetSchemaAsync(CancellationToken cancellationToken = defa /// An optional token to cancel the asynchronous operation. The default value is . /// /// The collection specified. - [RequiresUnreferencedCode("Members from serialized types or types used in expressions may be trimmed if not referenced directly.")] #if NET5_0_OR_GREATER - [UnconditionalSuppressMessage("Trimming", "IL2046", Justification = "At the Npgsql level we cannot add RUC to GetSchemaAsync.")] public override Task GetSchemaAsync(string collectionName, CancellationToken cancellationToken = default) #else public Task GetSchemaAsync(string collectionName, CancellationToken cancellationToken = default) @@ -1794,9 +1784,7 @@ public Task GetSchemaAsync(string collectionName, CancellationToken c /// An optional token to cancel the asynchronous operation. The default value is . /// /// The collection specified. - [RequiresUnreferencedCode("Members from serialized types or types used in expressions may be trimmed if not referenced directly.")] #if NET5_0_OR_GREATER - [UnconditionalSuppressMessage("Trimming", "IL2046", Justification = "At the Npgsql level we cannot add RUC to GetSchemaAsync.")] public override Task GetSchemaAsync(string collectionName, string?[]? restrictions, CancellationToken cancellationToken = default) #else public Task GetSchemaAsync(string collectionName, string?[]? restrictions, CancellationToken cancellationToken = default) diff --git a/src/Npgsql/NpgsqlSchema.cs b/src/Npgsql/NpgsqlSchema.cs index 89446eb50f..0e749e1589 100644 --- a/src/Npgsql/NpgsqlSchema.cs +++ b/src/Npgsql/NpgsqlSchema.cs @@ -2,7 +2,6 @@ using System.Collections.Generic; using System.Data; using System.Data.Common; -using System.Diagnostics.CodeAnalysis; using System.Globalization; using System.Text; using System.Threading; @@ -18,7 +17,6 @@ namespace Npgsql; /// static class NpgsqlSchema { - [RequiresUnreferencedCode("Members from serialized types or types used in expressions may be trimmed if not referenced directly.")] public static Task GetSchema(bool async, NpgsqlConnection conn, string? collectionName, string?[]? restrictions, CancellationToken cancellationToken = default) { if (collectionName is null) @@ -127,7 +125,7 @@ static NpgsqlCommand BuildCommand(NpgsqlConnection conn, StringBuilder query, st { for (var i = 0; i < restrictions.Length && i < names.Length; ++i) { - if (restrictions[i] is string restriction && restriction.Length != 0) + if (restrictions[i] is { Length: > 0 } restriction) { if (addWhere) { @@ -156,180 +154,279 @@ static NpgsqlCommand BuildCommand(NpgsqlConnection conn, StringBuilder query, st static string RemoveSpecialChars(string paramName) => paramName.Replace("(", "").Replace(")", "").Replace(".", ""); - [RequiresUnreferencedCode("Members from serialized types or types used in expressions may be trimmed if not referenced directly.")] - static async Task GetDatabases(NpgsqlConnection conn, string?[]? restrictions, bool async, CancellationToken cancellationToken = default) - { - var databases = new DataTable("Databases") { Locale = CultureInfo.InvariantCulture }; - - databases.Columns.AddRange(new[] { - new DataColumn("database_name"), - new DataColumn("owner"), - new DataColumn("encoding") - }); - var getDatabases = new StringBuilder(); + static Task GetDatabases(NpgsqlConnection conn, string?[]? restrictions, bool async, CancellationToken cancellationToken = default) + { + var dataTable = new DataTable("Databases") + { + Locale = CultureInfo.InvariantCulture, + Columns = + { + new DataColumn("database_name"), + new DataColumn("owner"), + new DataColumn("encoding") + } + }; - getDatabases.Append("SELECT d.datname AS database_name, u.usename AS owner, pg_catalog.pg_encoding_to_char(d.encoding) AS encoding FROM pg_catalog.pg_database d LEFT JOIN pg_catalog.pg_user u ON d.datdba = u.usesysid"); + var sql = new StringBuilder(); - using var command = BuildCommand(conn, getDatabases, restrictions, "datname"); - using var adapter = new NpgsqlDataAdapter(command); - await adapter.Fill(databases, async, cancellationToken).ConfigureAwait(false); + sql.Append( + """ +SELECT d.datname, u.usename, pg_catalog.pg_encoding_to_char(d.encoding) +FROM pg_catalog.pg_database d +LEFT JOIN pg_catalog.pg_user u ON d.datdba = u.usesysid +"""); - return databases; + return ParseResults( + async, + BuildCommand(conn, sql, restrictions, "datname"), + dataTable, + (reader, row) => + { + row["database_name"] = GetFieldValueOrDBNull(reader, 0); + row["owner"] = GetFieldValueOrDBNull(reader, 1); + row["encoding"] = GetFieldValueOrDBNull(reader, 2); + }, cancellationToken); } - [RequiresUnreferencedCode("Members from serialized types or types used in expressions may be trimmed if not referenced directly.")] - static async Task GetSchemata(NpgsqlConnection conn, string?[]? restrictions, bool async, CancellationToken cancellationToken = default) + static Task GetSchemata(NpgsqlConnection conn, string?[]? restrictions, bool async, CancellationToken cancellationToken = default) { - var schemata = new DataTable("Schemata") { Locale = CultureInfo.InvariantCulture }; - - schemata.Columns.AddRange(new[] { - new DataColumn("catalog_name"), - new DataColumn("schema_name"), - new DataColumn("schema_owner") - }); + var dataTable = new DataTable("Schemata") + { + Locale = CultureInfo.InvariantCulture, + Columns = + { + new DataColumn("catalog_name"), + new DataColumn("schema_name"), + new DataColumn("schema_owner") + } + }; - var getSchemata = new StringBuilder(@" + var sql = new StringBuilder( + """ SELECT * FROM ( - SELECT current_database() AS catalog_name, - nspname AS schema_name, - r.rolname AS schema_owner - FROM - pg_catalog.pg_namespace LEFT JOIN pg_catalog.pg_roles r ON r.oid = nspowner - ) tmp"); - - using var command = BuildCommand(conn, getSchemata, restrictions, "catalog_name", "schema_name", "schema_owner"); - using var adapter = new NpgsqlDataAdapter(command); - await adapter.Fill(schemata, async, cancellationToken).ConfigureAwait(false); - - return schemata; + SELECT current_database(), nspname, r.rolname + FROM pg_catalog.pg_namespace + LEFT JOIN pg_catalog.pg_roles r ON r.oid = nspowner +) tmp +"""); + + return ParseResults( + async, + BuildCommand(conn, sql, restrictions, "catalog_name", "schema_name", "schema_owner"), + dataTable, + (reader, row) => + { + row["catalog_name"] = GetFieldValueOrDBNull(reader, 0); + row["schema_name"] = GetFieldValueOrDBNull(reader, 1); + row["schema_owner"] = GetFieldValueOrDBNull(reader, 2); + }, cancellationToken); } - [RequiresUnreferencedCode("Members from serialized types or types used in expressions may be trimmed if not referenced directly.")] - static async Task GetTables(NpgsqlConnection conn, string?[]? restrictions, bool async, CancellationToken cancellationToken = default) + static Task GetTables(NpgsqlConnection conn, string?[]? restrictions, bool async, CancellationToken cancellationToken = default) { - var tables = new DataTable("Tables") { Locale = CultureInfo.InvariantCulture }; - - tables.Columns.AddRange(new[] { - new DataColumn("table_catalog"), - new DataColumn("table_schema"), - new DataColumn("table_name"), - new DataColumn("table_type") - }); + var dataTable = new DataTable("Tables") + { + Locale = CultureInfo.InvariantCulture, + Columns = + { + new DataColumn("table_catalog"), + new DataColumn("table_schema"), + new DataColumn("table_name"), + new DataColumn("table_type") + } + }; - var getTables = new StringBuilder(); + var sql = new StringBuilder(); - getTables.Append(@" + sql.Append( + """ SELECT table_catalog, table_schema, table_name, table_type FROM information_schema.tables WHERE table_type IN ('BASE TABLE', 'FOREIGN', 'FOREIGN TABLE') AND - table_schema NOT IN ('pg_catalog', 'information_schema')"); - - using var command = BuildCommand(conn, getTables, restrictions, false, "table_catalog", "table_schema", "table_name", "table_type"); - using var adapter = new NpgsqlDataAdapter(command); - await adapter.Fill(tables, async, cancellationToken).ConfigureAwait(false); - - return tables; + table_schema NOT IN ('pg_catalog', 'information_schema') +"""); + + return ParseResults( + async, + BuildCommand(conn, sql, restrictions, false, "table_catalog", "table_schema", "table_name", "table_type"), + dataTable, + (reader, row) => + { + row["table_catalog"] = GetFieldValueOrDBNull(reader, 0); + row["table_schema"] = GetFieldValueOrDBNull(reader, 1); + row["table_name"] = GetFieldValueOrDBNull(reader, 2); + row["table_type"] = GetFieldValueOrDBNull(reader, 3); + }, cancellationToken); } - [RequiresUnreferencedCode("Members from serialized types or types used in expressions may be trimmed if not referenced directly.")] - static async Task GetColumns(NpgsqlConnection conn, string?[]? restrictions, bool async, CancellationToken cancellationToken = default) + static Task GetColumns(NpgsqlConnection conn, string?[]? restrictions, bool async, CancellationToken cancellationToken = default) { - var columns = new DataTable("Columns") { Locale = CultureInfo.InvariantCulture }; - - columns.Columns.AddRange(new DataColumn[] { - new("table_catalog"), new("table_schema"), new("table_name"), new("column_name"), - new("ordinal_position", typeof(int)), - new("column_default"), - new("is_nullable"), - new("data_type"), - new("character_maximum_length", typeof(int)), new("character_octet_length", typeof(int)), - new("numeric_precision", typeof(int)), new("numeric_precision_radix", typeof(int)), new("numeric_scale", typeof(int)), - new("datetime_precision", typeof(int)), - new("character_set_catalog"), new("character_set_schema"), new("character_set_name"), - new("collation_catalog") - }); - - var getColumns = new StringBuilder(@" + var dataTable = new DataTable("Columns") + { + Locale = CultureInfo.InvariantCulture, + Columns = + { + new DataColumn("table_catalog"), + new DataColumn("table_schema"), + new DataColumn("table_name"), + new DataColumn("column_name"), + new DataColumn("ordinal_position", typeof(int)), + new DataColumn("column_default"), + new DataColumn("is_nullable"), + new DataColumn("data_type"), + new DataColumn("character_maximum_length", typeof(int)), + new DataColumn("character_octet_length", typeof(int)), + new DataColumn("numeric_precision", typeof(int)), + new DataColumn("numeric_precision_radix", typeof(int)), + new DataColumn("numeric_scale", typeof(int)), + new DataColumn("datetime_precision", typeof(int)), + new DataColumn("character_set_catalog"), + new DataColumn("character_set_schema"), + new DataColumn("character_set_name"), + new DataColumn("collation_catalog") + } + }; + + var sql = new StringBuilder( + """ SELECT - table_catalog, table_schema, table_name, column_name, + table_catalog, + table_schema, + table_name, + column_name, ordinal_position, column_default, is_nullable, - CASE WHEN udt_schema is NULL THEN udt_name ELSE format_type(typ.oid, NULL) END AS data_type, - character_maximum_length, character_octet_length, - numeric_precision, numeric_precision_radix, numeric_scale, + CASE WHEN udt_schema is NULL THEN udt_name ELSE format_type(typ.oid, NULL) END, + character_maximum_length, + character_octet_length, + numeric_precision, + numeric_precision_radix, + numeric_scale, datetime_precision, - character_set_catalog, character_set_schema, character_set_name, + character_set_catalog, + character_set_schema, + character_set_name, collation_catalog FROM information_schema.columns JOIN pg_namespace AS ns ON ns.nspname = udt_schema -JOIN pg_type AS typ ON typnamespace = ns.oid AND typname = udt_name"); - - using var command = BuildCommand(conn, getColumns, restrictions, "table_catalog", "table_schema", "table_name", "column_name"); - using var adapter = new NpgsqlDataAdapter(command); - await adapter.Fill(columns, async, cancellationToken).ConfigureAwait(false); - - return columns; +JOIN pg_type AS typ ON typnamespace = ns.oid AND typname = udt_name +"""); + + return ParseResults( + async, + BuildCommand(conn, sql, restrictions, "table_catalog", "table_schema", "table_name", "column_name"), + dataTable, + (reader, row) => + { + row["table_catalog"] = GetFieldValueOrDBNull(reader, 0); + row["table_schema"] = GetFieldValueOrDBNull(reader, 1); + row["table_name"] = GetFieldValueOrDBNull(reader, 2); + row["column_name"] = GetFieldValueOrDBNull(reader, 3); + row["ordinal_position"] = GetFieldValueOrDBNull(reader, 4); + row["column_default"] = GetFieldValueOrDBNull(reader, 5); + row["is_nullable"] = GetFieldValueOrDBNull(reader, 6); + row["data_type"] = GetFieldValueOrDBNull(reader, 7); + row["character_maximum_length"] = GetFieldValueOrDBNull(reader, 8); + row["character_octet_length"] = GetFieldValueOrDBNull(reader, 9); + row["numeric_precision"] = GetFieldValueOrDBNull(reader, 10); + row["numeric_precision_radix"] = GetFieldValueOrDBNull(reader, 11); + row["numeric_scale"] = GetFieldValueOrDBNull(reader, 12); + row["datetime_precision"] = GetFieldValueOrDBNull(reader, 13); + row["character_set_catalog"] = GetFieldValueOrDBNull(reader, 14); + row["character_set_schema"] = GetFieldValueOrDBNull(reader, 15); + row["character_set_name"] = GetFieldValueOrDBNull(reader, 16); + row["collation_catalog"] = GetFieldValueOrDBNull(reader, 17); + }, cancellationToken); } - [RequiresUnreferencedCode("Members from serialized types or types used in expressions may be trimmed if not referenced directly.")] - static async Task GetViews(NpgsqlConnection conn, string?[]? restrictions, bool async, CancellationToken cancellationToken = default) + static Task GetViews(NpgsqlConnection conn, string?[]? restrictions, bool async, CancellationToken cancellationToken = default) { - var views = new DataTable("Views") { Locale = CultureInfo.InvariantCulture }; - - views.Columns.AddRange(new[] { - new DataColumn("table_catalog"), new DataColumn("table_schema"), new DataColumn("table_name"), - new DataColumn("check_option"), new DataColumn("is_updatable") - }); + var dataTable = new DataTable("Views") + { + Locale = CultureInfo.InvariantCulture, + Columns = + { + new DataColumn("table_catalog"), + new DataColumn("table_schema"), + new DataColumn("table_name"), + new DataColumn("check_option"), + new DataColumn("is_updatable") + } + }; - var getViews = new StringBuilder(@" + var sql = new StringBuilder( + """ SELECT table_catalog, table_schema, table_name, check_option, is_updatable FROM information_schema.views -WHERE table_schema NOT IN ('pg_catalog', 'information_schema')"); - - using var command = BuildCommand(conn, getViews, restrictions, false, "table_catalog", "table_schema", "table_name"); - using var adapter = new NpgsqlDataAdapter(command); - await adapter.Fill(views, async, cancellationToken).ConfigureAwait(false); - - return views; +WHERE table_schema NOT IN ('pg_catalog', 'information_schema') +"""); + + return ParseResults( + async, + BuildCommand(conn, sql, restrictions, false, "table_catalog", "table_schema", "table_name"), + dataTable, + (reader, row) => + { + row["table_catalog"] = GetFieldValueOrDBNull(reader, 0); + row["table_schema"] = GetFieldValueOrDBNull(reader, 1); + row["table_name"] = GetFieldValueOrDBNull(reader, 2); + row["check_option"] = GetFieldValueOrDBNull(reader, 3); + row["is_updatable"] = GetFieldValueOrDBNull(reader, 3); + }, cancellationToken); } - [RequiresUnreferencedCode("Members from serialized types or types used in expressions may be trimmed if not referenced directly.")] - static async Task GetUsers(NpgsqlConnection conn, string?[]? restrictions, bool async, CancellationToken cancellationToken = default) + static Task GetUsers(NpgsqlConnection conn, string?[]? restrictions, bool async, CancellationToken cancellationToken = default) { - var users = new DataTable("Users") { Locale = CultureInfo.InvariantCulture }; - - users.Columns.AddRange(new[] { new DataColumn("user_name"), new DataColumn("user_sysid", typeof(uint)) }); - - var getUsers = new StringBuilder(); + var dataTable = new DataTable("Users") + { + Locale = CultureInfo.InvariantCulture, + Columns = + { + new DataColumn("user_name"), + new DataColumn("user_sysid", typeof(uint)) + } + }; - getUsers.Append("SELECT usename as user_name, usesysid as user_sysid FROM pg_catalog.pg_user"); + var sql = new StringBuilder(); - using var command = BuildCommand(conn, getUsers, restrictions, "usename"); - using var adapter = new NpgsqlDataAdapter(command); - await adapter.Fill(users, async, cancellationToken).ConfigureAwait(false); + sql.Append("SELECT usename, usesysid FROM pg_catalog.pg_user"); - return users; + return ParseResults( + async, + BuildCommand(conn, sql, restrictions, "usename"), + dataTable, + (reader, row) => + { + row["user_name"] = GetFieldValueOrDBNull(reader, 0); + row["user_sysid"] = GetFieldValueOrDBNull(reader, 1); + }, cancellationToken); } - [RequiresUnreferencedCode("Members from serialized types or types used in expressions may be trimmed if not referenced directly.")] - static async Task GetIndexes(NpgsqlConnection conn, string?[]? restrictions, bool async, CancellationToken cancellationToken = default) + static Task GetIndexes(NpgsqlConnection conn, string?[]? restrictions, bool async, CancellationToken cancellationToken = default) { - var indexes = new DataTable("Indexes") { Locale = CultureInfo.InvariantCulture }; - - indexes.Columns.AddRange(new[] { - new DataColumn("table_catalog"), new DataColumn("table_schema"), new DataColumn("table_name"), - new DataColumn("index_name"), new DataColumn("type_desc") - }); - - var getIndexes = new StringBuilder(@" -SELECT current_database() AS table_catalog, - n.nspname AS table_schema, - t.relname AS table_name, - i.relname AS index_name, - '' AS type_desc + var dataTable = new DataTable("Indexes") + { + Locale = CultureInfo.InvariantCulture, + Columns = + { + new DataColumn("table_catalog"), + new DataColumn("table_schema"), + new DataColumn("table_name"), + new DataColumn("index_name"), + new DataColumn("type_desc") + } + }; + + var sql = new StringBuilder( + """ +SELECT current_database(), + n.nspname, + t.relname, + i.relname, + '' FROM pg_catalog.pg_class i JOIN pg_catalog.pg_index ix ON ix.indexrelid = i.oid @@ -339,36 +436,52 @@ pg_catalog.pg_class i WHERE i.relkind = 'i' AND n.nspname NOT IN ('pg_catalog', 'pg_toast') AND - t.relkind = 'r'"); - - using var command = BuildCommand(conn, getIndexes, restrictions, false, "current_database()", "n.nspname", "t.relname", "i.relname"); - using var adapter = new NpgsqlDataAdapter(command); - await adapter.Fill(indexes, async, cancellationToken).ConfigureAwait(false); - - return indexes; + t.relkind = 'r' +"""); + + return ParseResults( + async, + BuildCommand(conn, sql, restrictions, false, "current_database()", "n.nspname", "t.relname", "i.relname"), + dataTable, + (reader, row) => + { + row["table_catalog"] = GetFieldValueOrDBNull(reader, 0); + row["table_schema"] = GetFieldValueOrDBNull(reader, 1); + row["table_name"] = GetFieldValueOrDBNull(reader, 2); + row["index_name"] = GetFieldValueOrDBNull(reader, 3); + row["type_desc"] = GetFieldValueOrDBNull(reader, 4); + }, cancellationToken); } - [RequiresUnreferencedCode("Members from serialized types or types used in expressions may be trimmed if not referenced directly.")] - static async Task GetIndexColumns(NpgsqlConnection conn, string?[]? restrictions, bool async, CancellationToken cancellationToken = default) + static Task GetIndexColumns(NpgsqlConnection conn, string?[]? restrictions, bool async, CancellationToken cancellationToken = default) { - var indexColumns = new DataTable("IndexColumns") { Locale = CultureInfo.InvariantCulture }; - - indexColumns.Columns.AddRange(new[] { - new DataColumn("constraint_catalog"), new DataColumn("constraint_schema"), new DataColumn("constraint_name"), - new DataColumn("table_catalog"), new DataColumn("table_schema"), new DataColumn("table_name"), - new DataColumn("column_name"), new DataColumn("index_name") - }); + var dataTable = new DataTable("IndexColumns") + { + Locale = CultureInfo.InvariantCulture, + Columns = + { + new DataColumn("constraint_catalog"), + new DataColumn("constraint_schema"), + new DataColumn("constraint_name"), + new DataColumn("table_catalog"), + new DataColumn("table_schema"), + new DataColumn("table_name"), + new DataColumn("column_name"), + new DataColumn("index_name") + } + }; - var getIndexColumns = new StringBuilder(@" + var sql = new StringBuilder( + """ SELECT - current_database() AS constraint_catalog, - t_ns.nspname AS constraint_schema, - ix_cls.relname AS constraint_name, - current_database() AS table_catalog, - ix_ns.nspname AS table_schema, - t.relname AS table_name, - a.attname AS column_name, - ix_cls.relname AS index_name + current_database(), + t_ns.nspname, + ix_cls.relname, + current_database(), + ix_ns.nspname, + t.relname, + a.attname, + ix_cls.relname FROM pg_class t JOIN pg_index ix ON t.oid = ix.indrelid @@ -380,71 +493,117 @@ pg_class t ix_cls.relkind = 'i' AND t_ns.nspname NOT IN ('pg_catalog', 'pg_toast') AND a.attnum = ANY(ix.indkey) AND - t.relkind = 'r'"); - - using var command = BuildCommand(conn, getIndexColumns, restrictions, false, "current_database()", "t_ns.nspname", "t.relname", "ix_cls.relname", "a.attname"); - using var adapter = new NpgsqlDataAdapter(command); - await adapter.Fill(indexColumns, async, cancellationToken).ConfigureAwait(false); - - return indexColumns; + t.relkind = 'r' +"""); + + return ParseResults( + async, + BuildCommand(conn, sql, restrictions, false, "current_database()", "t_ns.nspname", "t.relname", "ix_cls.relname", "a.attname"), + dataTable, + (reader, row) => + { + row["constraint_catalog"] = GetFieldValueOrDBNull(reader, 0); + row["constraint_schema"] = GetFieldValueOrDBNull(reader, 1); + row["constraint_name"] = GetFieldValueOrDBNull(reader, 2); + row["table_catalog"] = GetFieldValueOrDBNull(reader, 3); + row["table_schema"] = GetFieldValueOrDBNull(reader, 4); + row["table_name"] = GetFieldValueOrDBNull(reader, 5); + row["column_name"] = GetFieldValueOrDBNull(reader, 6); + row["index_name"] = GetFieldValueOrDBNull(reader, 7); + }, cancellationToken); } - [RequiresUnreferencedCode("Members from serialized types or types used in expressions may be trimmed if not referenced directly.")] - static async Task GetConstraints(NpgsqlConnection conn, string?[]? restrictions, string? constraintType, bool async, CancellationToken cancellationToken = default) + static Task GetConstraints(NpgsqlConnection conn, string?[]? restrictions, string? constraintType, bool async, CancellationToken cancellationToken = default) { - var getConstraints = new StringBuilder(@" + var sql = new StringBuilder( + """ SELECT - current_database() AS ""CONSTRAINT_CATALOG"", - pgn.nspname AS ""CONSTRAINT_SCHEMA"", - pgc.conname AS ""CONSTRAINT_NAME"", - current_database() AS ""TABLE_CATALOG"", - pgtn.nspname AS ""TABLE_SCHEMA"", - pgt.relname AS ""TABLE_NAME"", - ""CONSTRAINT_TYPE"", - pgc.condeferrable AS ""IS_DEFERRABLE"", - pgc.condeferred AS ""INITIALLY_DEFERRED"" + current_database(), + pgn.nspname, + pgc.conname, + current_database(), + pgtn.nspname, + pgt.relname, + constraint_type, + pgc.condeferrable, + pgc.condeferred FROM pg_catalog.pg_constraint pgc JOIN pg_catalog.pg_namespace pgn ON pgc.connamespace = pgn.oid JOIN pg_catalog.pg_class pgt ON pgc.conrelid = pgt.oid JOIN pg_catalog.pg_namespace pgtn ON pgt.relnamespace = pgtn.oid JOIN ( - SELECT 'PRIMARY KEY' AS ""CONSTRAINT_TYPE"", 'p' AS ""contype"" + SELECT 'PRIMARY KEY' AS constraint_type, 'p' AS contype UNION ALL - SELECT 'FOREIGN KEY' AS ""CONSTRAINT_TYPE"", 'f' AS ""contype"" + SELECT 'FOREIGN KEY' AS constraint_type, 'f' AS contype UNION ALL - SELECT 'UNIQUE KEY' AS ""CONSTRAINT_TYPE"", 'u' AS ""contype"" -) mapping_table ON mapping_table.contype = pgc.contype"); - if ("ForeignKeys".Equals(constraintType)) - getConstraints.Append(" and pgc.contype='f'"); - else if ("PrimaryKey".Equals(constraintType)) - getConstraints.Append(" and pgc.contype='p'"); - else if ("UniqueKeys".Equals(constraintType)) - getConstraints.Append(" and pgc.contype='u'"); - else - constraintType = "Constraints"; + SELECT 'UNIQUE KEY' AS constraint_type, 'u' AS contype +) mapping_table ON mapping_table.contype = pgc.contype +"""); - using var command = BuildCommand(conn, getConstraints, restrictions, false, "current_database()", "pgtn.nspname", "pgt.relname", "pgc.conname"); - using var adapter = new NpgsqlDataAdapter(command); - var table = new DataTable(constraintType) { Locale = CultureInfo.InvariantCulture }; + switch (constraintType) + { + case "ForeignKeys": + sql.Append(" and pgc.contype='f'"); + break; + case "PrimaryKey": + sql.Append(" and pgc.contype='p'"); + break; + case "UniqueKeys": + sql.Append(" and pgc.contype='u'"); + break; + default: + constraintType = "Constraints"; + break; + } - await adapter.Fill(table, async, cancellationToken).ConfigureAwait(false); + var dataTable = new DataTable(constraintType) + { + Locale = CultureInfo.InvariantCulture, + Columns = + { + new DataColumn("CONSTRAINT_CATALOG"), + new DataColumn("CONSTRAINT_SCHEMA"), + new DataColumn("CONSTRAINT_NAME"), + new DataColumn("TABLE_CATALOG"), + new DataColumn("TABLE_SCHEMA"), + new DataColumn("TABLE_NAME"), + new DataColumn("CONSTRAINT_TYPE"), + new DataColumn("IS_DEFERRABLE", typeof(bool)), + new DataColumn("INITIALLY_DEFERRED", typeof(bool)) + } + }; - return table; + return ParseResults( + async, + BuildCommand(conn, sql, restrictions, false, "current_database()", "pgtn.nspname", "pgt.relname", "pgc.conname"), + dataTable, + (reader, row) => + { + row["CONSTRAINT_CATALOG"] = GetFieldValueOrDBNull(reader, 0); + row["CONSTRAINT_SCHEMA"] = GetFieldValueOrDBNull(reader, 1); + row["CONSTRAINT_NAME"] = GetFieldValueOrDBNull(reader, 2); + row["TABLE_CATALOG"] = GetFieldValueOrDBNull(reader, 3); + row["TABLE_SCHEMA"] = GetFieldValueOrDBNull(reader, 4); + row["TABLE_NAME"] = GetFieldValueOrDBNull(reader, 5); + row["CONSTRAINT_TYPE"] = GetFieldValueOrDBNull(reader, 6); + row["IS_DEFERRABLE"] = GetFieldValueOrDBNull(reader, 7); + row["INITIALLY_DEFERRED"] = GetFieldValueOrDBNull(reader, 8); + }, cancellationToken); } - [RequiresUnreferencedCode("Members from serialized types or types used in expressions may be trimmed if not referenced directly.")] - static async Task GetConstraintColumns(NpgsqlConnection conn, string?[]? restrictions, bool async, CancellationToken cancellationToken = default) + static Task GetConstraintColumns(NpgsqlConnection conn, string?[]? restrictions, bool async, CancellationToken cancellationToken = default) { - var getConstraintColumns = new StringBuilder(@" -SELECT current_database() AS constraint_catalog, - n.nspname AS constraint_schema, - c.conname AS constraint_name, - current_database() AS table_catalog, - n.nspname AS table_schema, - t.relname AS table_name, - a.attname AS column_name, - a.attnum AS ordinal_number, + var sql = new StringBuilder( + """ +SELECT current_database(), + n.nspname, + c.conname, + current_database(), + n.nspname, + t.relname, + a.attname, + a.attnum, mapping_table.constraint_type FROM pg_constraint c JOIN pg_namespace n on n.oid = c.connamespace @@ -458,15 +617,42 @@ UNION ALL SELECT 'UNIQUE KEY' AS constraint_type, 'u' AS contype ) mapping_table ON mapping_table.contype = c.contype - AND n.nspname NOT IN ('pg_catalog', 'pg_toast')"); + AND n.nspname NOT IN ('pg_catalog', 'pg_toast') +"""); - using var command = BuildCommand(conn, getConstraintColumns, restrictions, false, "current_database()", "n.nspname", "t.relname", "c.conname", "a.attname"); - using var adapter = new NpgsqlDataAdapter(command); - var table = new DataTable("ConstraintColumns") { Locale = CultureInfo.InvariantCulture }; - - await adapter.Fill(table, async, cancellationToken).ConfigureAwait(false); + var dataTable = new DataTable("ConstraintColumns") + { + Locale = CultureInfo.InvariantCulture, + Columns = + { + new DataColumn("constraint_catalog"), + new DataColumn("constraint_schema"), + new DataColumn("constraint_name"), + new DataColumn("table_catalog"), + new DataColumn("table_schema"), + new DataColumn("table_name"), + new DataColumn("column_name"), + new DataColumn("ordinal_number", typeof(int)), + new DataColumn("constraint_type") + } + }; - return table; + return ParseResults( + async, + BuildCommand(conn, sql, restrictions, false, "current_database()", "n.nspname", "t.relname", "c.conname", "a.attname"), + dataTable, + (reader, row) => + { + row["constraint_catalog"] = GetFieldValueOrDBNull(reader, 0); + row["constraint_schema"] = GetFieldValueOrDBNull(reader, 1); + row["constraint_name"] = GetFieldValueOrDBNull(reader, 2); + row["table_catalog"] = GetFieldValueOrDBNull(reader, 3); + row["table_schema"] = GetFieldValueOrDBNull(reader, 4); + row["table_name"] = GetFieldValueOrDBNull(reader, 5); + row["column_name"] = GetFieldValueOrDBNull(reader, 6); + row["ordinal_number"] = GetFieldValueOrDBNull(reader, 7); + row["constraint_type"] = GetFieldValueOrDBNull(reader, 8); + }, cancellationToken); } static DataTable GetDataSourceInformation(NpgsqlConnection conn) @@ -887,4 +1073,45 @@ static DataTable GetReservedWords() }; #endregion Reserved Keywords + + static async Task ParseResults(bool async, NpgsqlCommand command, DataTable dataTable, Action populateRow, CancellationToken cancellationToken) + { + NpgsqlDataReader? reader = null; + try + { + reader = async + ? await command.ExecuteReaderAsync(cancellationToken).ConfigureAwait(false) + : command.ExecuteReader(); + + dataTable.BeginLoadData(); + + while (async ? await reader.ReadAsync(cancellationToken).ConfigureAwait(false) : reader.Read()) + populateRow(reader, dataTable.Rows.Add()); + + return dataTable; + } + finally + { + dataTable.EndLoadData(); + + if (async) + { + if (reader is not null) + await reader.DisposeAsync().ConfigureAwait(false); +#if NETSTANDARD2_0 + command.Dispose(); +#else + await command.DisposeAsync().ConfigureAwait(false); +#endif + } + else + { + reader?.Dispose(); + command.Dispose(); + } + } + } + + static object GetFieldValueOrDBNull(NpgsqlDataReader reader, int ordinal) + => reader.IsDBNull(ordinal) ? DBNull.Value : reader.GetFieldValue(ordinal)!; } diff --git a/test/Npgsql.Tests/SchemaTests.cs b/test/Npgsql.Tests/SchemaTests.cs index 2a143cccfd..500cd77345 100644 --- a/test/Npgsql.Tests/SchemaTests.cs +++ b/test/Npgsql.Tests/SchemaTests.cs @@ -224,6 +224,34 @@ public async Task ReservedWords() Assert.That(reservedWords.Rows, Has.Count.GreaterThan(0)); } + [Test] + public async Task Databases() + { + await using var conn = await OpenConnectionAsync(); + var database = await conn.ExecuteScalarAsync("SELECT current_database()"); + + var dataTable = await GetSchema(conn, "Databases"); + var databases = dataTable.Rows + .Cast() + .Select(r => (string)r["database_name"]) + .ToList(); + + Assert.That(databases, Does.Contain(database)); + } + + [Test] + public async Task Schemata() + { + await using var conn = await OpenConnectionAsync(); + var schema = await CreateTempSchema(conn); + + var dataTable = await GetSchema(conn, "Schemata"); + var row = dataTable.Rows.Cast().Single(r => (string)r["schema_name"] == schema); + + Assert.That(row["catalog_name"], Is.EqualTo(await conn.ExecuteScalarAsync("SELECT current_database()"))); + Assert.That(row["schema_owner"], Is.EqualTo(await conn.ExecuteScalarAsync("SELECT current_user"))); + } + [Test] public async Task ForeignKeys() { From 0f7267af6a40c343e1dbafed2d638facc055eb2d Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Tue, 3 Oct 2023 15:23:44 +0200 Subject: [PATCH 228/761] Move asObject to be a caller concern (#5290) --- .../BackendMessages/RowDescriptionMessage.cs | 16 ++- src/Npgsql/Internal/PgTypeInfo.cs | 120 ++++++++---------- src/Npgsql/NpgsqlBinaryExporter.cs | 15 ++- src/Npgsql/NpgsqlDataReader.cs | 10 +- src/Npgsql/NpgsqlNestedDataReader.cs | 53 +++++--- .../Replication/PgOutput/ReplicationValue.cs | 6 +- 6 files changed, 113 insertions(+), 107 deletions(-) diff --git a/src/Npgsql/BackendMessages/RowDescriptionMessage.cs b/src/Npgsql/BackendMessages/RowDescriptionMessage.cs index a963d165c9..52fcdf8e6a 100644 --- a/src/Npgsql/BackendMessages/RowDescriptionMessage.cs +++ b/src/Npgsql/BackendMessages/RowDescriptionMessage.cs @@ -337,7 +337,7 @@ internal PgConverterInfo ObjectOrDefaultInfo return _objectOrDefaultInfo; ref var info = ref _objectOrDefaultInfo; - GetInfo(null, ref _objectOrDefaultInfo); + GetInfo(null, ref _objectOrDefaultInfo, out _); return info; } } @@ -350,34 +350,39 @@ internal FieldDescription Clone() return field; } - internal void GetInfo(Type? type, ref PgConverterInfo lastConverterInfo) + internal void GetInfo(Type? type, ref PgConverterInfo lastConverterInfo, out bool asObject) { Debug.Assert(lastConverterInfo.IsDefault || ( ReferenceEquals(_serializerOptions, lastConverterInfo.TypeInfo.Options) && lastConverterInfo.TypeInfo.PgTypeId == _serializerOptions.ToCanonicalTypeId(PostgresType)), "Cache is bleeding over"); if (!lastConverterInfo.IsDefault && lastConverterInfo.TypeToConvert == type) + { + asObject = lastConverterInfo.IsBoxingConverter; return; + } // Have to check for null as it's a sentinel value used by ObjectOrDefaultTypeInfo init itself. if (type is not null && ObjectOrDefaultInfo is var odfInfo) { if (typeof(object) == type) { - lastConverterInfo = odfInfo with { AsObject = true }; + lastConverterInfo = odfInfo; + asObject = true; return; } if (odfInfo.TypeToConvert == type) { lastConverterInfo = odfInfo; + asObject = lastConverterInfo.IsBoxingConverter; return; } } - GetInfoSlow(out lastConverterInfo); + GetInfoSlow(out lastConverterInfo, out asObject); [MethodImpl(MethodImplOptions.NoInlining)] - void GetInfoSlow(out PgConverterInfo lastConverterInfo) + void GetInfoSlow(out PgConverterInfo lastConverterInfo, out bool asObject) { PgConverterInfo converterInfo; var typeInfo = AdoSerializerHelpers.GetTypeInfoForReading(type ?? typeof(object), PostgresType, _serializerOptions); @@ -398,6 +403,7 @@ void GetInfoSlow(out PgConverterInfo lastConverterInfo) } lastConverterInfo = converterInfo; + asObject = converterInfo.IsBoxingConverter; } } diff --git a/src/Npgsql/Internal/PgTypeInfo.cs b/src/Npgsql/Internal/PgTypeInfo.cs index 8b0dc22c2d..592151509e 100644 --- a/src/Npgsql/Internal/PgTypeInfo.cs +++ b/src/Npgsql/Internal/PgTypeInfo.cs @@ -77,45 +77,6 @@ internal void DisposeWriteState(object writeState) disposable.Dispose(); } - internal bool TryBind(Field field, DataFormat format, out PgConverterInfo info) - { - switch (this) - { - case { IsResolverInfo: false }: - // Type lies when IsBoxing is true. - var typeToConvert = IsBoxing ? typeof(object) : Type; - if (!CachedCanConvert(format, out var bufferRequirements)) - { - info = default; - return false; - } - info = CreateConverterInfo(bufferRequirements, isRead: true, Converter, typeToConvert); - return true; - case PgResolverTypeInfo resolverInfo: - var resolution = resolverInfo.GetResolution(field); - if (!HasCachedInfo(resolution.Converter) - ? !CachedCanConvert(format, out bufferRequirements) - : !resolution.Converter.CanConvert(format, out bufferRequirements)) - { - info = default; - return false; - } - info = CreateConverterInfo(bufferRequirements, isRead: true, resolution.Converter, resolution.Converter.TypeToConvert); - return true; - default: - throw new NotSupportedException("Should not happen, please file a bug."); - } - } - - // Bind for reading. - internal PgConverterInfo Bind(Field field, DataFormat format) - { - if (!TryBind(field, format, out var info)) - ThrowHelper.ThrowInvalidOperationException($"Resolved converter does not support {format} format."); - - return info; - } - public PgConverterResolution GetResolution(T? value) { // Other cases, to keep binary bloat minimal. @@ -163,15 +124,6 @@ static PgConverterResolution ThrowNotSupported() => throw new NotSupportedException("Should not happen, please file a bug."); } - PgConverterInfo CreateConverterInfo(BufferRequirements bufferRequirements, bool isRead, PgConverter converter, Type typeToConvert) - => new() - { - TypeInfo = this, - Converter = converter, - AsObject = Type != typeToConvert, - BufferRequirement = isRead ? bufferRequirements.Read : bufferRequirements.Write - }; - bool CachedCanConvert(DataFormat format, out BufferRequirements bufferRequirements) { if (format is DataFormat.Binary) @@ -193,6 +145,44 @@ bool CachedCanConvert(DataFormat format, out BufferRequirements bufferRequiremen return success ? bufferRequirements : null; } + // TryBind for reading. + internal bool TryBind(Field field, DataFormat format, out PgConverterInfo info) + { + switch (this) + { + case { IsResolverInfo: false }: + if (!CachedCanConvert(format, out var bufferRequirements)) + { + info = default; + return false; + } + info = new(this, Converter, bufferRequirements.Read); + return true; + case PgResolverTypeInfo resolverInfo: + var resolution = resolverInfo.GetResolution(field); + if (!HasCachedInfo(resolution.Converter) + ? !CachedCanConvert(format, out bufferRequirements) + : !resolution.Converter.CanConvert(format, out bufferRequirements)) + { + info = default; + return false; + } + info = new(this, resolution.Converter, bufferRequirements.Read); + return true; + default: + throw new NotSupportedException("Should not happen, please file a bug."); + } + } + + // Bind for reading. + internal PgConverterInfo Bind(Field field, DataFormat format) + { + if (!TryBind(field, format, out var info)) + ThrowHelper.ThrowInvalidOperationException($"Resolved converter does not support {format} format."); + + return info; + } + // Bind for writing. /// When result is null, the value was interpreted to be a SQL NULL. internal PgConverterInfo? Bind(PgConverter converter, T? value, out Size size, out object? writeState, out DataFormat format, DataFormat? formatPreference = null) @@ -215,13 +205,7 @@ bool CachedCanConvert(DataFormat format, out BufferRequirements bufferRequiremen if (size is { Kind: SizeKind.Unknown}) ThrowHelper.ThrowNotSupportedException($"Returning {nameof(Size.Unknown)} from {nameof(PgConverter.GetSize)} is not supported yet."); - return new() - { - TypeInfo = this, - Converter = converter, - AsObject = IsBoxing, - BufferRequirement = bufferRequirements.Write, - }; + return new(this, converter, bufferRequirements.Write); } // Bind for writing. @@ -249,13 +233,7 @@ bool CachedCanConvert(DataFormat format, out BufferRequirements bufferRequiremen if (size is { Kind: SizeKind.Unknown}) ThrowHelper.ThrowNotSupportedException($"Returning {nameof(Size.Unknown)} from {nameof(PgConverter.GetSizeAsObject)} is not supported yet."); - return new() - { - TypeInfo = this, - Converter = converter, - AsObject = Type != typeof(object), - BufferRequirement = bufferRequirements.Write, - }; + return new(this, converter, bufferRequirements.Write); } // If we don't have a converter stored we must ask the retrieved one. @@ -337,6 +315,13 @@ public PgConverterResolution(PgConverter converter, PgTypeId pgTypeId) readonly struct PgConverterInfo { + public PgConverterInfo(PgTypeInfo pgTypeInfo, PgConverter converter, Size bufferRequirement) + { + TypeInfo = pgTypeInfo; + Converter = converter; + BufferRequirement = bufferRequirement; + } + public bool IsDefault => TypeInfo is null; public Type TypeToConvert @@ -352,11 +337,12 @@ public Type TypeToConvert } } - public required PgTypeInfo TypeInfo { get; init; } - public required PgConverter Converter { get; init; } - public required Size BufferRequirement { get; init; } - // Whether Converter.TypeToConvert matches the PgTypeInfo.Type, if it doesn't object apis and a downcast should be used. - public required bool AsObject { get; init; } + public PgTypeInfo TypeInfo { get; } + public PgConverter Converter { get; } + public Size BufferRequirement { get; } + + /// Whether Converter.TypeToConvert matches PgTypeInfo.Type, if it doesn't object apis should be used. + public bool IsBoxingConverter => TypeInfo.IsBoxing; public PgConverter GetConverter() => (PgConverter)Converter; } diff --git a/src/Npgsql/NpgsqlBinaryExporter.cs b/src/Npgsql/NpgsqlBinaryExporter.cs index 2b7d19919b..bb1716eb0e 100644 --- a/src/Npgsql/NpgsqlBinaryExporter.cs +++ b/src/Npgsql/NpgsqlBinaryExporter.cs @@ -290,10 +290,11 @@ async ValueTask Read(bool async, NpgsqlDbType? type, CancellationToken can // We must commit the current column before reading the next one unless it was an IsNull call. PgConverterInfo info; + bool asObject; if (!PgReader.Resumable || PgReader.CurrentRemaining != PgReader.FieldSize) { await Commit(async, resumableOp: false).ConfigureAwait(false); - info = GetInfo(); + info = GetInfo(out asObject); // We need to get info after potential I/O as we don't know beforehand at what column we're at. var columnLen = await ReadColumnLenIfNeeded(async, resumableOp: false).ConfigureAwait(false); @@ -305,13 +306,13 @@ async ValueTask Read(bool async, NpgsqlDbType? type, CancellationToken can } else - info = GetInfo(); + info = GetInfo(out asObject); T result; if (async) { await PgReader.StartReadAsync(info.BufferRequirement, cancellationToken).ConfigureAwait(false); - result = info.AsObject + result = asObject ? (T)await info.Converter.ReadAsObjectAsync(PgReader, cancellationToken).ConfigureAwait(false) : await info.GetConverter().ReadAsync(PgReader, cancellationToken).ConfigureAwait(false); await PgReader.EndReadAsync().ConfigureAwait(false); @@ -319,7 +320,7 @@ async ValueTask Read(bool async, NpgsqlDbType? type, CancellationToken can else { PgReader.StartRead(info.BufferRequirement); - result = info.AsObject + result = asObject ? (T)info.Converter.ReadAsObject(PgReader) : info.GetConverter().Read(PgReader); PgReader.EndRead(); @@ -327,10 +328,12 @@ async ValueTask Read(bool async, NpgsqlDbType? type, CancellationToken can return result; - PgConverterInfo GetInfo() + PgConverterInfo GetInfo(out bool asObject) { ref var cachedInfo = ref _columnInfoCache[_column]; - return cachedInfo.IsDefault ? cachedInfo = CreateConverterInfo(typeof(T), type) : cachedInfo; + var converterInfo = cachedInfo.IsDefault ? cachedInfo = CreateConverterInfo(typeof(T), type) : cachedInfo; + asObject = converterInfo.IsBoxingConverter; + return converterInfo; } T DbNullOrThrow() diff --git a/src/Npgsql/NpgsqlDataReader.cs b/src/Npgsql/NpgsqlDataReader.cs index 4c63af1743..17e5b46eeb 100644 --- a/src/Npgsql/NpgsqlDataReader.cs +++ b/src/Npgsql/NpgsqlDataReader.cs @@ -2121,17 +2121,17 @@ FieldDescription GetInfo(int ordinal, Type? type, out PgConverter converter, out if (type is null) { - converter = field.ObjectOrDefaultInfo.Converter; - bufferRequirement = field.ObjectOrDefaultInfo.BufferRequirement; - asObject = field.ObjectOrDefaultInfo.AsObject; + var odfInfo = field.ObjectOrDefaultInfo; + converter = odfInfo.Converter; + bufferRequirement = odfInfo.BufferRequirement; + asObject = odfInfo.IsBoxingConverter; return field; } ref var info = ref ColumnInfoCache![ordinal]; - field.GetInfo(type, ref info); + field.GetInfo(type, ref info, out asObject); converter = info.Converter; bufferRequirement = info.BufferRequirement; - asObject = info.AsObject; return field; } diff --git a/src/Npgsql/NpgsqlNestedDataReader.cs b/src/Npgsql/NpgsqlNestedDataReader.cs index 49a0ac513c..305e3dd197 100644 --- a/src/Npgsql/NpgsqlNestedDataReader.cs +++ b/src/Npgsql/NpgsqlNestedDataReader.cs @@ -34,23 +34,19 @@ public sealed class NpgsqlNestedDataReader : DbDataReader DataFormat Format => DataFormat.Binary; - struct ColumnInfo + readonly struct ColumnInfo { readonly DataFormat _format; public PostgresType PostgresType { get; } public int BufferPos { get; } - public PgConverterInfo LastConverterInfo { get; private set; } + public PgConverterInfo LastConverterInfo { get; init; } public PgTypeInfo ObjectOrDefaultTypeInfo { get; } - public PgConverterInfo ObjectOrDefaultInfo => ObjectOrDefaultTypeInfo.Bind(Field, _format); + public PgConverterInfo GetObjectOrDefaultInfo() => ObjectOrDefaultTypeInfo.Bind(Field, _format); Field Field => new("?", ObjectOrDefaultTypeInfo.Options.PortableTypeIds ? PostgresType.DataTypeName : (Oid)PostgresType.OID, -1); - public ColumnInfo SetConverterInfo(PgTypeInfo typeInfo) - => this with - { - LastConverterInfo = typeInfo.Bind(Field, _format) - }; + public PgConverterInfo Bind(PgTypeInfo typeInfo) => typeInfo.Bind(Field, _format); public ColumnInfo(PostgresType postgresType, int bufferPos, PgTypeInfo objectOrDefaultTypeInfo, DataFormat format) { @@ -298,14 +294,14 @@ public override int GetOrdinal(string name) public override Type GetFieldType(int ordinal) { var column = CheckRowAndColumn(ordinal); - return column.ObjectOrDefaultInfo.TypeToConvert; + return column.GetObjectOrDefaultInfo().TypeToConvert; } /// public override object GetValue(int ordinal) { var columnLength = CheckRowAndColumnAndSeek(ordinal, out var column); - var info = column.ObjectOrDefaultInfo; + var info = column.GetObjectOrDefaultInfo(); if (columnLength == -1) return DBNull.Value; @@ -340,7 +336,7 @@ public override T GetFieldValue(int ordinal) return (T)(object)GetTextReader(ordinal); var columnLength = CheckRowAndColumnAndSeek(ordinal, out var column); - var info = GetOrAddConverterInfo(typeof(T), column, ordinal); + var info = GetOrAddConverterInfo(typeof(T), column, ordinal, out var asObject); if (columnLength == -1) { @@ -355,7 +351,7 @@ public override T GetFieldValue(int ordinal) } using var _ = PgReader.BeginNestedRead(columnLength, info.BufferRequirement); - return info.AsObject + return asObject ? (T)info.Converter.ReadAsObject(PgReader)! : info.GetConverter().Read(PgReader); } @@ -492,18 +488,33 @@ int CheckRowAndColumnAndSeek(int ordinal, out ColumnInfo column) return PgReader.ReadInt32(); } - PgConverterInfo GetOrAddConverterInfo(Type type, ColumnInfo column, int ordinal) + PgConverterInfo GetOrAddConverterInfo(Type type, ColumnInfo column, int ordinal, out bool asObject) { - PgConverterInfo info; - if (!column.LastConverterInfo.IsDefault && column.LastConverterInfo.TypeToConvert == type) - info = column.LastConverterInfo; - else + if (column.LastConverterInfo is { IsDefault: false } lastInfo && lastInfo.TypeToConvert == type) { - var columnInfo = column.SetConverterInfo(AdoSerializerHelpers.GetTypeInfoForReading(type, column.PostgresType, SerializerOptions)); - _columns[ordinal] = columnInfo; - info = columnInfo.LastConverterInfo; + asObject = lastInfo.IsBoxingConverter; + return lastInfo; } - return info; + + if (column.GetObjectOrDefaultInfo() is { IsDefault: false } odfInfo) + { + if (typeof(object) == type) + { + asObject = true; + return odfInfo; + } + + if (odfInfo.TypeToConvert == type) + { + asObject = odfInfo.IsBoxingConverter; + return odfInfo; + } + } + + var converterInfo = column.Bind(AdoSerializerHelpers.GetTypeInfoForReading(type, column.PostgresType, SerializerOptions)); + _columns[ordinal] = column with { LastConverterInfo = converterInfo }; + asObject = converterInfo.IsBoxingConverter; + return converterInfo; } enum ReaderState diff --git a/src/Npgsql/Replication/PgOutput/ReplicationValue.cs b/src/Npgsql/Replication/PgOutput/ReplicationValue.cs index b0da855b61..dfa8a6ecd4 100644 --- a/src/Npgsql/Replication/PgOutput/ReplicationValue.cs +++ b/src/Npgsql/Replication/PgOutput/ReplicationValue.cs @@ -88,7 +88,7 @@ public async ValueTask Get(CancellationToken cancellationToken = default) { CheckActive(); - _fieldDescription.GetInfo(typeof(T), ref _lastInfo); + _fieldDescription.GetInfo(typeof(T), ref _lastInfo, out var asObject); var info = _lastInfo; switch (Kind) @@ -113,7 +113,7 @@ public async ValueTask Get(CancellationToken cancellationToken = default) var reader = PgReader.Init(Length, _fieldDescription.DataFormat); await reader.StartReadAsync(info.BufferRequirement, cancellationToken).ConfigureAwait(false); - var result = info.AsObject + var result = asObject ? (T)await info.Converter.ReadAsObjectAsync(reader, cancellationToken).ConfigureAwait(false) : await info.GetConverter().ReadAsync(reader, cancellationToken).ConfigureAwait(false); await reader.EndReadAsync().ConfigureAwait(false); @@ -158,7 +158,7 @@ public TextReader GetTextReader() CheckActive(); ref var info = ref _lastInfo; - _fieldDescription.GetInfo(typeof(TextReader), ref info); + _fieldDescription.GetInfo(typeof(TextReader), ref info, out _); switch (Kind) { From abbf9ae256b045065f3fb4d72a2a0f7a3d147c5e Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Tue, 3 Oct 2023 15:48:54 +0200 Subject: [PATCH 229/761] Converter resolvers fix (#5291) --- .../Composites/Metadata/CompositeFieldInfo.cs | 161 +++++++++++++----- .../ReflectionCompositeInfoFactory.cs | 17 +- .../Internal/Converters/CastingConverter.cs | 2 +- .../Internal/Converters/CompositeConverter.cs | 82 +++++++-- ...yRecordConverter.cs => RecordConverter.cs} | 20 +-- src/Npgsql/Internal/PgConverter.cs | 28 ++- src/Npgsql/Internal/PgTypeInfo.cs | 47 ++--- .../Resolvers/RecordTypeInfoResolvers.cs | 4 +- .../UnmappedMultirangeTypeInfoResolver.cs | 2 +- .../UnmappedRangeTypeInfoResolver.cs | 2 +- src/Npgsql/Internal/TypeInfoMapping.cs | 14 +- src/Npgsql/TypeMapping/GlobalTypeMapper.cs | 2 +- test/Npgsql.Tests/Types/CompositeTests.cs | 53 ++++++ 13 files changed, 300 insertions(+), 134 deletions(-) rename src/Npgsql/Internal/Converters/{ObjectArrayRecordConverter.cs => RecordConverter.cs} (70%) diff --git a/src/Npgsql/Internal/Composites/Metadata/CompositeFieldInfo.cs b/src/Npgsql/Internal/Composites/Metadata/CompositeFieldInfo.cs index 765399bf76..2b742ed511 100644 --- a/src/Npgsql/Internal/Composites/Metadata/CompositeFieldInfo.cs +++ b/src/Npgsql/Internal/Composites/Metadata/CompositeFieldInfo.cs @@ -9,33 +9,75 @@ namespace Npgsql.Internal.Composites; abstract class CompositeFieldInfo { - protected PgConverter Converter { get; } + protected PgTypeInfo PgTypeInfo { get; } + protected PgConverter? Converter { get; } protected BufferRequirements _binaryBufferRequirements; - private protected CompositeFieldInfo(string name, PgConverterResolution resolution) + /// + /// CompositeFieldInfo constructor. + /// + /// Name of the field. + /// Type info for reading/writing. + /// The nominal field type, this may differ from the typeInfo.PgTypeId when the field is a domain type. + private protected CompositeFieldInfo(string name, PgTypeInfo typeInfo, PgTypeId nominalPgTypeId) { Name = name; - Converter = resolution.Converter; - PgTypeId = resolution.PgTypeId; + PgTypeInfo = typeInfo; + PgTypeId = nominalPgTypeId; - if (!Converter.CanConvert(DataFormat.Binary, out _binaryBufferRequirements)) - throw new InvalidOperationException("Converter must support binary format to participate in composite types."); + if (typeInfo.PgTypeId is null) + ThrowHelper.ThrowArgumentException("PgTypeInfo must have a PgTypeId."); + + if (!typeInfo.IsResolverInfo) + { + var resolution = typeInfo.GetResolution(); + if (typeInfo.GetBufferRequirements(resolution.Converter, DataFormat.Binary) is not { } bufferRequirements) + { + ThrowHelper.ThrowInvalidOperationException("Converter must support binary format to participate in composite types."); + return; + } + _binaryBufferRequirements = bufferRequirements; + } } - protected PgConverter GetConverter() => (PgConverter)Converter; + public PgConverter GetReadInfo(out Size readRequirement) + { + if (Converter is not null) + { + readRequirement = _binaryBufferRequirements.Read; + return Converter; + } + + // TODO this is effectively static work, we could optimize this away. + if (!PgTypeInfo.TryBind(new Field(Name, PgTypeInfo.PgTypeId.GetValueOrDefault(), -1), DataFormat.Binary, out var converterInfo)) + ThrowHelper.ThrowInvalidOperationException("Converter must support binary format to participate in composite types."); + + readRequirement = converterInfo.BufferRequirement; + return converterInfo.Converter; + } + + public PgConverter GetWriteInfo(object instance, out Size writeRequirement) + { + if (Converter is null) + return BindValue(instance, out writeRequirement); + + writeRequirement = _binaryBufferRequirements.Write; + return Converter; + + } - protected ValueTask ReadAsObject(bool async, CompositeBuilder builder, PgReader reader, CancellationToken cancellationToken) + protected ValueTask ReadAsObject(bool async, PgConverter converter, CompositeBuilder builder, PgReader reader, CancellationToken cancellationToken) { if (async) { - var task = Converter.ReadAsObjectAsync(reader, cancellationToken); + var task = converter.ReadAsObjectAsync(reader, cancellationToken); if (!task.IsCompletedSuccessfully) return Core(builder, task); AddValue(builder, task.Result); } else - AddValue(builder, Converter.ReadAsObject(reader)); + AddValue(builder, converter.ReadAsObject(reader)); return new(); #if NET6_0_OR_GREATER [AsyncMethodBuilder(typeof(PoolingAsyncValueTaskMethodBuilder))] @@ -46,22 +88,23 @@ async ValueTask Core(CompositeBuilder builder, ValueTask task) } } - protected ValueTask WriteAsObject(bool async, PgWriter writer, object value, CancellationToken cancellationToken) + protected ValueTask WriteAsObject(bool async, PgConverter converter, PgWriter writer, object value, CancellationToken cancellationToken) { if (async) - return Converter.WriteAsObjectAsync(writer, value, cancellationToken); + return converter.WriteAsObjectAsync(writer, value, cancellationToken); - Converter.WriteAsObject(writer, value); + converter.WriteAsObject(writer, value); return new(); } public string Name { get; } public PgTypeId PgTypeId { get; } - public Size BinaryReadRequirement => _binaryBufferRequirements.Read; - public Size BinaryWriteRequirement => _binaryBufferRequirements.Write; + public Size BinaryReadRequirement => Converter is not null ? _binaryBufferRequirements.Read : Size.Unknown; + public Size BinaryWriteRequirement => Converter is not null ? _binaryBufferRequirements.Write : Size.Unknown; public abstract Type Type { get; } + protected abstract PgConverter BindValue(object instance, out Size writeRequirement); protected abstract void AddValue(CompositeBuilder builder, object value); public abstract StrongBox CreateBox(); @@ -70,10 +113,10 @@ protected ValueTask WriteAsObject(bool async, PgWriter writer, object value, Can public abstract bool IsDbNullable { get; } public abstract void ReadDbNull(CompositeBuilder builder); - public abstract ValueTask Read(bool async, CompositeBuilder builder, PgReader reader, CancellationToken cancellationToken = default); - public abstract bool IsDbNull(object instance); - public abstract Size? GetSizeOrDbNull(DataFormat format, object instance, ref object? writeState); - public abstract ValueTask Write(bool async, PgWriter writer, object instance, CancellationToken cancellationToken); + public abstract ValueTask Read(bool async, PgConverter converter, CompositeBuilder builder, PgReader reader, CancellationToken cancellationToken = default); + public abstract bool IsDbNull(PgConverter converter, object instance); + public abstract Size? GetSizeOrDbNull(PgConverter converter, DataFormat format, Size writeRequirement, object instance, ref object? writeState); + public abstract ValueTask Write(bool async, PgConverter converter, PgWriter writer, object instance, CancellationToken cancellationToken); } sealed class CompositeFieldInfo : CompositeFieldInfo @@ -83,25 +126,35 @@ sealed class CompositeFieldInfo : CompositeFieldInfo readonly Func _getter; readonly bool _asObject; - CompositeFieldInfo(string name, PgConverterResolution resolution, Func getter) - : base(name, resolution) + CompositeFieldInfo(string name, PgTypeInfo typeInfo, PgTypeId nominalPgTypeId, Func getter) + : base(name, typeInfo, nominalPgTypeId) { - var typeToConvert = resolution.Converter.TypeToConvert; - if (!typeToConvert.IsAssignableFrom(typeof(T))) - throw new InvalidOperationException($"Converter type '{typeToConvert}' must be assignable from field type '{typeof(T)}'."); + if (typeInfo.Type != typeof(T)) + throw new InvalidOperationException($"PgTypeInfo type '{typeInfo.Type.FullName}' must be equal to field type '{typeof(T)}'."); + + if (!typeInfo.IsResolverInfo) + { + var resolution = typeInfo.GetResolution(); + var typeToConvert = resolution.Converter.TypeToConvert; + _asObject = typeToConvert != typeof(T); + if (!typeToConvert.IsAssignableFrom(typeof(T))) + throw new InvalidOperationException($"Converter type '{typeToConvert.FullName}' must be assignable from field type '{typeof(T)}'."); + } _getter = getter; - _asObject = typeToConvert != typeof(T); } - public CompositeFieldInfo(string name, PgConverterResolution resolution, Func getter, int parameterIndex) - : this(name, resolution, getter) + public CompositeFieldInfo(string name, PgTypeInfo typeInfo, PgTypeId nominalPgTypeId, Func getter, int parameterIndex) + : this(name, typeInfo, nominalPgTypeId, getter) => _parameterIndex = parameterIndex; - public CompositeFieldInfo(string name, PgConverterResolution resolution, Func getter, Action setter) - : this(name, resolution, getter) + public CompositeFieldInfo(string name, PgTypeInfo typeInfo, PgTypeId nominalPgTypeId, Func getter, Action setter) + : this(name, typeInfo, nominalPgTypeId, getter) => _setter = setter; + bool AsObject(PgConverter converter) + => ReferenceEquals(Converter, converter) ? _asObject : converter.TypeToConvert != typeof(T); + public override Type Type => typeof(T); public override int? ConstructorParameterIndex => _setter is not null ? null : _parameterIndex; @@ -134,24 +187,40 @@ public override void ReadDbNull(CompositeBuilder builder) builder.AddValue((T?)default); } + protected override PgConverter BindValue(object instance, out Size writeRequirement) + { + var value = _getter(instance); + var resolution = PgTypeInfo.IsBoxing ? PgTypeInfo.GetObjectResolution(value) : PgTypeInfo.GetResolution(value); + if (PgTypeInfo.GetBufferRequirements(resolution.Converter, DataFormat.Binary) is not { } bufferRequirements) + { + ThrowHelper.ThrowInvalidOperationException("Converter must support binary format to participate in composite types."); + writeRequirement = default; + return default; + } + + writeRequirement = bufferRequirements.Write; + return resolution.Converter; + } + protected override void AddValue(CompositeBuilder builder, object value) => builder.AddValue((T)value); - public override ValueTask Read(bool async, CompositeBuilder builder, PgReader reader, CancellationToken cancellationToken = default) + public override ValueTask Read(bool async, PgConverter converter, CompositeBuilder builder, PgReader reader, CancellationToken cancellationToken = default) { - if (_asObject) - return ReadAsObject(async, builder, reader, cancellationToken); + if (AsObject(converter)) + return ReadAsObject(async, converter, builder, reader, cancellationToken); if (async) { - var task = GetConverter().ReadAsync(reader, cancellationToken); + var task = ((PgConverter)converter).ReadAsync(reader, cancellationToken); if (!task.IsCompletedSuccessfully) return Core(builder, task); builder.AddValue(task.Result); } else - builder.AddValue(GetConverter().Read(reader)); + builder.AddValue(((PgConverter)converter).Read(reader)); return new(); + #if NET6_0_OR_GREATER [AsyncMethodBuilder(typeof(PoolingAsyncValueTaskMethodBuilder))] #endif @@ -161,32 +230,32 @@ async ValueTask Core(CompositeBuilder builder, ValueTask task) } } - public override bool IsDbNullable => Converter.IsDbNullable; + public override bool IsDbNullable => Converter?.IsDbNullable ?? true; - public override bool IsDbNull(object instance) + public override bool IsDbNull(PgConverter converter, object instance) { var value = _getter(instance); - return _asObject ? Converter.IsDbNullAsObject(value) : GetConverter().IsDbNull(value); + return AsObject(converter) ? converter.IsDbNullAsObject(value) : ((PgConverter)converter).IsDbNull(value); } - public override Size? GetSizeOrDbNull(DataFormat format, object instance, ref object? writeState) + public override Size? GetSizeOrDbNull(PgConverter converter, DataFormat format, Size writeRequirement, object instance, ref object? writeState) { var value = _getter(instance); - return _asObject - ? Converter.GetSizeOrDbNullAsObject(format, _binaryBufferRequirements.Write, value, ref writeState) - : GetConverter().GetSizeOrDbNull(format, _binaryBufferRequirements.Write, value, ref writeState); + return AsObject(converter) + ? converter.GetSizeOrDbNullAsObject(format, writeRequirement, value, ref writeState) + : ((PgConverter)converter).GetSizeOrDbNull(format, writeRequirement, value, ref writeState); } - public override ValueTask Write(bool async, PgWriter writer, object instance, CancellationToken cancellationToken) + public override ValueTask Write(bool async, PgConverter converter, PgWriter writer, object instance, CancellationToken cancellationToken) { var value = _getter(instance); - if (_asObject) - return WriteAsObject(async, writer, value!, cancellationToken); + if (AsObject(converter)) + return WriteAsObject(async, converter, writer, value!, cancellationToken); if (async) - return GetConverter().WriteAsync(writer, value!, cancellationToken); + return ((PgConverter)converter).WriteAsync(writer, value!, cancellationToken); - GetConverter().Write(writer, value!); + ((PgConverter)converter).Write(writer, value!); return new(); } } diff --git a/src/Npgsql/Internal/Composites/ReflectionCompositeInfoFactory.cs b/src/Npgsql/Internal/Composites/ReflectionCompositeInfoFactory.cs index cf757e2f42..9554a62c9d 100644 --- a/src/Npgsql/Internal/Composites/ReflectionCompositeInfoFactory.cs +++ b/src/Npgsql/Internal/Composites/ReflectionCompositeInfoFactory.cs @@ -5,6 +5,7 @@ using System.Linq; using System.Linq.Expressions; using System.Reflection; +using Npgsql.Internal.Postgres; using Npgsql.PostgresTypes; using Npgsql.Util; using NpgsqlTypes; @@ -54,7 +55,7 @@ static class ReflectionCompositeInfoFactory else throw new InvalidOperationException($"Cannot find property or field for composite field {pgFields[fieldIndex].Name}."); - compositeFields[fieldIndex] = CreateCompositeFieldInfo(pgField.Name, pgTypeInfo.Type, MapResolution(pgField, pgTypeInfo.GetConcreteResolution()), getter, i); + compositeFields[fieldIndex] = CreateCompositeFieldInfo(pgField.Name, pgTypeInfo.Type, pgTypeInfo, options.ToCanonicalTypeId(pgField.Type), getter, i); } for (var fieldIndex = 0; fieldIndex < pgFields.Count; fieldIndex++) @@ -84,7 +85,7 @@ static class ReflectionCompositeInfoFactory else throw new InvalidOperationException($"Cannot find property or field for composite field '{pgFields[fieldIndex].Name}'."); - compositeFields[fieldIndex] = CreateCompositeFieldInfo(pgField.Name, pgTypeInfo.Type, MapResolution(pgField, pgTypeInfo.GetConcreteResolution()), getter, setter); + compositeFields[fieldIndex] = CreateCompositeFieldInfo(pgField.Name, pgTypeInfo.Type, pgTypeInfo, options.ToCanonicalTypeId(pgField.Type), getter, setter); } Debug.Assert(compositeFields.All(x => x is not null)); @@ -92,10 +93,6 @@ static class ReflectionCompositeInfoFactory var constructor = constructorInfo is null ? _ => Activator.CreateInstance() : CreateStrongBoxConstructor(constructorInfo); return new CompositeInfo(compositeFields!, constructorInfo is null ? 0 : constructorParameters.Length, constructor); - // We have to map the pg type back to the composite field type, as we've resolved based on the representational pg type. - PgConverterResolution MapResolution(PostgresCompositeType.Field field, PgConverterResolution resolution) - => new(resolution.Converter, options.ToCanonicalTypeId(field.Type)); - static NotSupportedException NotSupportedField(PostgresCompositeType composite, PostgresCompositeType.Field field, bool isField, string name, Type type) => new($"No resolution could be found for ('{type.FullName}', '{field.Type.FullName}'). Mapping: CLR {(isField ? "field" : "property")} '{type.Name}.{name}' <-> Composite field '{composite.Name}.{field.Name}'"); } @@ -187,13 +184,13 @@ static Func CreateStrongBoxConstructor(ConstructorInfo constr ), values) .Compile(); } - static CompositeFieldInfo CreateCompositeFieldInfo(string name, Type type, PgConverterResolution converterResolution, Delegate getter, int constructorParameterIndex) + static CompositeFieldInfo CreateCompositeFieldInfo(string name, Type type, PgTypeInfo typeInfo, PgTypeId nominalPgTypeId, Delegate getter, int constructorParameterIndex) => (CompositeFieldInfo)Activator.CreateInstance( - typeof(CompositeFieldInfo<>).MakeGenericType(type), name, converterResolution, getter, constructorParameterIndex)!; + typeof(CompositeFieldInfo<>).MakeGenericType(type), name, typeInfo, nominalPgTypeId, getter, constructorParameterIndex)!; - static CompositeFieldInfo CreateCompositeFieldInfo(string name, Type type, PgConverterResolution converterResolution, Delegate getter, Delegate setter) + static CompositeFieldInfo CreateCompositeFieldInfo(string name, Type type, PgTypeInfo typeInfo, PgTypeId nominalPgTypeId, Delegate getter, Delegate setter) => (CompositeFieldInfo)Activator.CreateInstance( - typeof(CompositeFieldInfo<>).MakeGenericType(type), name, converterResolution, getter, setter)!; + typeof(CompositeFieldInfo<>).MakeGenericType(type), name, typeInfo, nominalPgTypeId, getter, setter)!; static Dictionary MapProperties<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicProperties)] T>(IReadOnlyList fields, INpgsqlNameTranslator nameTranslator) { diff --git a/src/Npgsql/Internal/Converters/CastingConverter.cs b/src/Npgsql/Internal/Converters/CastingConverter.cs index d51abf8532..1d90cde352 100644 --- a/src/Npgsql/Internal/Converters/CastingConverter.cs +++ b/src/Npgsql/Internal/Converters/CastingConverter.cs @@ -78,7 +78,7 @@ internal static PgTypeInfo ToNonBoxing(this PgTypeInfo typeInfo) (PgConverterResolver)Activator.CreateInstance(typeof(CastingConverterResolver<>).MakeGenericType(type), resolverTypeInfo)!, typeInfo.PgTypeId); - var resolution = typeInfo.GetConcreteResolution(); + var resolution = typeInfo.GetResolution(); return new PgTypeInfo(typeInfo.Options, (PgConverter)Activator.CreateInstance(typeof(CastingConverter<>).MakeGenericType(type), resolution.Converter)!, resolution.PgTypeId); } diff --git a/src/Npgsql/Internal/Converters/CompositeConverter.cs b/src/Npgsql/Internal/Converters/CompositeConverter.cs index 62befeb900..cec42f28e1 100644 --- a/src/Npgsql/Internal/Converters/CompositeConverter.cs +++ b/src/Npgsql/Internal/Converters/CompositeConverter.cs @@ -25,7 +25,7 @@ public CompositeConverter(CompositeInfo composite) if (field.IsDbNullable) { readReq = readReq.Combine(Size.CreateUpperBound(0)); - writeReq = readReq.Combine(Size.CreateUpperBound(0)); + writeReq = writeReq.Combine(Size.CreateUpperBound(0)); } req = req.Combine( @@ -93,10 +93,11 @@ async ValueTask Read(bool async, PgReader reader, CancellationToken cancellat field.ReadDbNull(builder); else { - var scope = await reader.BeginNestedRead(async, length, field.BinaryReadRequirement, cancellationToken).ConfigureAwait(false); + var converter = field.GetReadInfo(out var readRequirement); + var scope = await reader.BeginNestedRead(async, length, readRequirement, cancellationToken).ConfigureAwait(false); try { - await field.Read(async, builder, reader, cancellationToken).ConfigureAwait(false); + await field.Read(async, converter, builder, reader, cancellationToken).ConfigureAwait(false); } finally { @@ -113,28 +114,35 @@ async ValueTask Read(bool async, PgReader reader, CancellationToken cancellat public override Size GetSize(SizeContext context, T value, ref object? writeState) { - var arrayPool = ArrayPool<(Size Size, object? WriteState)>.Shared; + var arrayPool = ArrayPool.Shared; var data = arrayPool.Rent(_composite.Fields.Count); var totalSize = Size.Create(sizeof(int) + _composite.Fields.Count * (sizeof(uint) + sizeof(int))); - var boxedValue = (object)value; + var boxedInstance = (object)value; var anyWriteState = false; for (var i = 0; i < _composite.Fields.Count; i++) { var field = _composite.Fields[i]; + var converter = field.GetWriteInfo(boxedInstance, out var writeRequirement); object? fieldState = null; - var fieldSize = field.GetSizeOrDbNull(context.Format, boxedValue, ref fieldState); + var fieldSize = field.GetSizeOrDbNull(converter, context.Format, writeRequirement, boxedInstance, ref fieldState); anyWriteState = anyWriteState || fieldState is not null; - data[i] = (fieldSize ?? -1, fieldState); + data[i] = new() + { + Size = fieldSize ?? -1, + WriteState = fieldState, + Converter = converter, + BufferRequirement = writeRequirement + }; totalSize = totalSize.Combine(fieldSize ?? 0); } writeState = new WriteState { ArrayPool = arrayPool, - BoxedInstance = boxedValue, Data = new(data, 0, _composite.Fields.Count), - AnyWriteState = anyWriteState + AnyWriteState = anyWriteState, + BoxedInstance = boxedInstance, }; return totalSize; } @@ -156,7 +164,7 @@ async ValueTask Write(bool async, PgWriter writer, T value, CancellationToken ca writer.WriteInt32(_composite.Fields.Count); var writeState = writer.Current.WriteState as WriteState; - var boxedInstance = writeState?.BoxedInstance ?? value!; + var boxedInstance = writeState?.BoxedInstance ?? value; var data = writeState?.Data.Array; for (var i = 0; i < _composite.Fields.Count; i++) { @@ -166,20 +174,60 @@ async ValueTask Write(bool async, PgWriter writer, T value, CancellationToken ca var field = _composite.Fields[i]; writer.WriteAsOid(field.PgTypeId); - var (size, fieldState) = data?[i] ?? (field.IsDbNull(boxedInstance) ? -1 : field.BinaryReadRequirement, null); - - var length = size.Value; + ElementState elementState; + if (data?[i] is not { } state) + { + var converter = field.GetWriteInfo(boxedInstance, out var writeRequirement); + elementState = new() + { + Size = field.IsDbNull(converter, boxedInstance) ? -1 : writeRequirement, + WriteState = null, + Converter = converter, + BufferRequirement = writeRequirement, + }; + } + else + elementState = state; + var length = elementState.Size.Value; writer.WriteInt32(length); - if (length != -1) + if (length is not -1) { - using var _ = await writer.BeginNestedWrite(async, _bufferRequirements.Write, length, fieldState, cancellationToken).ConfigureAwait(false); - await field.Write(async, writer, boxedInstance, cancellationToken).ConfigureAwait(false); + using var _ = await writer.BeginNestedWrite(async, elementState.BufferRequirement, length, elementState.WriteState, cancellationToken).ConfigureAwait(false); + await field.Write(async, elementState.Converter, writer, boxedInstance, cancellationToken).ConfigureAwait(false); } } } - sealed class WriteState : MultiWriteState + readonly struct ElementState + { + public required Size Size { get; init; } + public required object? WriteState { get; init; } + public required PgConverter Converter { get; init; } + public required Size BufferRequirement { get; init; } + } + + class WriteState : IDisposable { + public required ArrayPool? ArrayPool { get; init; } + public required ArraySegment Data { get; init; } + public required bool AnyWriteState { get; init; } public required object BoxedInstance { get; init; } + + public void Dispose() + { + if (Data.Array is not { } array) + return; + + if (AnyWriteState) + { + for (var i = Data.Offset; i < array.Length; i++) + if (array[i].WriteState is IDisposable disposable) + disposable.Dispose(); + + Array.Clear(Data.Array, Data.Offset, Data.Count); + } + + ArrayPool?.Return(Data.Array); + } } } diff --git a/src/Npgsql/Internal/Converters/ObjectArrayRecordConverter.cs b/src/Npgsql/Internal/Converters/RecordConverter.cs similarity index 70% rename from src/Npgsql/Internal/Converters/ObjectArrayRecordConverter.cs rename to src/Npgsql/Internal/Converters/RecordConverter.cs index 9b028c02cc..aabd914b49 100644 --- a/src/Npgsql/Internal/Converters/ObjectArrayRecordConverter.cs +++ b/src/Npgsql/Internal/Converters/RecordConverter.cs @@ -5,14 +5,14 @@ namespace Npgsql.Internal.Converters; -sealed class ObjectArrayRecordConverter : PgStreamingConverter +sealed class RecordConverter : PgStreamingConverter { - readonly PgSerializerOptions _serializerOptions; + readonly PgSerializerOptions _options; readonly Func? _factory; - public ObjectArrayRecordConverter(PgSerializerOptions serializerOptions, Func? factory = null) + public RecordConverter(PgSerializerOptions options, Func? factory = null) { - _serializerOptions = serializerOptions; + _options = options; _factory = factory; } @@ -41,20 +41,18 @@ async ValueTask Read(bool async, PgReader reader, CancellationToken cancellat continue; var postgresType = - _serializerOptions.DatabaseInfo.GetPostgresType(typeOid).GetRepresentationalType() + _options.DatabaseInfo.GetPostgresType(typeOid).GetRepresentationalType() ?? throw new NotSupportedException($"Reading isn't supported for record field {i} (unknown type OID {typeOid}"); - var typeInfo = _serializerOptions.GetObjectOrDefaultTypeInfo(postgresType) + var typeInfo = _options.GetObjectOrDefaultTypeInfo(postgresType) ?? throw new NotSupportedException( $"Reading isn't supported for record field {i} (PG type '{postgresType.DisplayName}'"); - var resolution = typeInfo.GetConcreteResolution(); - if (typeInfo.GetBufferRequirements(resolution.Converter, DataFormat.Binary) is not { } bufferRequirements) - throw new NotSupportedException($"Resolved record field converter '{resolution.Converter.GetType()}' has to support the binary format to be compatible."); - var scope = await reader.BeginNestedRead(async, length, bufferRequirements.Read, cancellationToken).ConfigureAwait(false); + var converterInfo = typeInfo.Bind(new Field("?", _options.ToCanonicalTypeId(postgresType), -1), DataFormat.Binary); + var scope = await reader.BeginNestedRead(async, length, converterInfo.BufferRequirement, cancellationToken).ConfigureAwait(false); try { - result[i] = await resolution.Converter.ReadAsObject(async, reader, cancellationToken).ConfigureAwait(false); + result[i] = await converterInfo.Converter.ReadAsObject(async, reader, cancellationToken).ConfigureAwait(false); } finally { diff --git a/src/Npgsql/Internal/PgConverter.cs b/src/Npgsql/Internal/PgConverter.cs index e136e9a904..cecd49fe4b 100644 --- a/src/Npgsql/Internal/PgConverter.cs +++ b/src/Npgsql/Internal/PgConverter.cs @@ -143,8 +143,18 @@ static class PgConverterExtensions if (writeRequirement is { Kind: SizeKind.Exact, Value: var byteCount }) return byteCount; var size = converter.GetSize(new(format, writeRequirement), value, ref writeState); - if (size.Kind is SizeKind.UpperBound) - throw new InvalidOperationException("SizeKind.UpperBound is not a valid return value for GetSize."); + + switch (size.Kind) + { + case SizeKind.UpperBound: + ThrowHelper.ThrowInvalidOperationException($"{nameof(SizeKind.UpperBound)} is not a valid return value for GetSize."); + break; + case SizeKind.Unknown: + // Not valid yet. + ThrowHelper.ThrowInvalidOperationException($"{nameof(SizeKind.Unknown)} is not a valid return value for GetSize."); + break; + } + return size; } @@ -156,8 +166,18 @@ static class PgConverterExtensions if (writeRequirement is { Kind: SizeKind.Exact, Value: var byteCount }) return byteCount; var size = converter.GetSizeAsObject(new(format, writeRequirement), value, ref writeState); - if (size.Kind is SizeKind.UpperBound) - throw new InvalidOperationException("SizeKind.UpperBound is not a valid return value for GetSize."); + + switch (size.Kind) + { + case SizeKind.UpperBound: + ThrowHelper.ThrowInvalidOperationException($"{nameof(SizeKind.UpperBound)} is not a valid return value for GetSize."); + break; + case SizeKind.Unknown: + // Not valid yet. + ThrowHelper.ThrowInvalidOperationException($"{nameof(SizeKind.Unknown)} is not a valid return value for GetSize."); + break; + } + return size; } } diff --git a/src/Npgsql/Internal/PgTypeInfo.cs b/src/Npgsql/Internal/PgTypeInfo.cs index 592151509e..f8a808af15 100644 --- a/src/Npgsql/Internal/PgTypeInfo.cs +++ b/src/Npgsql/Internal/PgTypeInfo.cs @@ -79,9 +79,9 @@ internal void DisposeWriteState(object writeState) public PgConverterResolution GetResolution(T? value) { - // Other cases, to keep binary bloat minimal. if (this is not PgResolverTypeInfo resolverInfo) - return GetObjectResolution(null); + return new(Converter!, PgTypeId.GetValueOrDefault()); + var resolution = resolverInfo.GetResolution(value, null); return resolution ?? resolverInfo.GetDefaultResolution(null); } @@ -106,22 +106,12 @@ static PgConverterResolution ThrowNotSupported() => throw new NotSupportedException("Should not happen, please file a bug."); } - /// Throws if the type info is undecided in its PgTypeId. - internal PgConverterResolution GetConcreteResolution() + /// Throws if the instance is a PgResolverTypeInfo. + internal PgConverterResolution GetResolution() { - var pgTypeId = PgTypeId; - if (pgTypeId is null) - ThrowHelper.ThrowInvalidOperationException("PgTypeId is null."); - - return this switch - { - { IsResolverInfo: false } => new(Converter, pgTypeId.GetValueOrDefault()), - PgResolverTypeInfo resolverInfo => resolverInfo.GetDefaultResolution(null), - _ => ThrowNotSupported() - }; - - static PgConverterResolution ThrowNotSupported() - => throw new NotSupportedException("Should not happen, please file a bug."); + if (IsResolverInfo) + ThrowHelper.ThrowInvalidOperationException("Instance is a PgResolverTypeInfo."); + return new(Converter, PgTypeId.GetValueOrDefault()); } bool CachedCanConvert(DataFormat format, out BufferRequirements bufferRequirements) @@ -192,19 +182,15 @@ internal PgConverterInfo Bind(Field field, DataFormat format) ThrowHelper.ThrowNotSupportedException($"Writing {Type} is not supported for this type info."); format = ResolveFormat(converter, out var bufferRequirements, formatPreference ?? PreferredFormat); - if (converter.IsDbNull(value)) + + writeState = null; + if (converter.GetSizeOrDbNull(format, bufferRequirements.Write, value, ref writeState) is not { } sizeOrDbNull) { - writeState = null; size = default; return null; } - writeState = null; - var context = new SizeContext(format, bufferRequirements.Write); - size = bufferRequirements.Write is { Kind: SizeKind.Exact } req ? req : converter.GetSize(context, value, ref writeState); - - if (size is { Kind: SizeKind.Unknown}) - ThrowHelper.ThrowNotSupportedException($"Returning {nameof(Size.Unknown)} from {nameof(PgConverter.GetSize)} is not supported yet."); + size = sizeOrDbNull; return new(this, converter, bufferRequirements.Write); } @@ -220,19 +206,14 @@ internal PgConverterInfo Bind(Field field, DataFormat format) format = ResolveFormat(converter, out var bufferRequirements, formatPreference ?? PreferredFormat); // Given SQL values are effectively a union of T | NULL we support DBNull.Value to signify a NULL value for all types except DBNull in this api. - if (value is DBNull && Type != typeof(DBNull) || converter.IsDbNullAsObject(value)) + writeState = null; + if (value is DBNull && Type != typeof(DBNull) || converter.GetSizeOrDbNullAsObject(format, bufferRequirements.Write, value, ref writeState) is not { } sizeOrDbNull) { - writeState = null; size = default; return null; } - writeState = null; - var context = new SizeContext(format, bufferRequirements.Write); - size = bufferRequirements.Write is { Kind: SizeKind.Exact } req ? req : converter.GetSizeAsObject(context, value, ref writeState); - - if (size is { Kind: SizeKind.Unknown}) - ThrowHelper.ThrowNotSupportedException($"Returning {nameof(Size.Unknown)} from {nameof(PgConverter.GetSizeAsObject)} is not supported yet."); + size = sizeOrDbNull; return new(this, converter, bufferRequirements.Write); } diff --git a/src/Npgsql/Internal/Resolvers/RecordTypeInfoResolvers.cs b/src/Npgsql/Internal/Resolvers/RecordTypeInfoResolvers.cs index f51aeac322..1728c07b9e 100644 --- a/src/Npgsql/Internal/Resolvers/RecordTypeInfoResolvers.cs +++ b/src/Npgsql/Internal/Resolvers/RecordTypeInfoResolvers.cs @@ -17,7 +17,7 @@ class RecordTypeInfoResolver : IPgTypeInfoResolver static void AddInfos(TypeInfoMappingCollection mappings) => mappings.AddType(DataTypeNames.Record, static (options, mapping, _) => - mapping.CreateInfo(options, new ObjectArrayRecordConverter(options), supportsWriting: false), + mapping.CreateInfo(options, new RecordConverter(options), supportsWriting: false), MatchRequirement.DataTypeName); protected static void AddArrayInfos(TypeInfoMappingCollection mappings) @@ -107,7 +107,7 @@ protected static void AddArrayInfos(TypeInfoMappingCollection mappings) .MakeGenericMethod(mapping.Type) .Invoke(null, new object[] { constructor, constructor.GetParameters().Length }); - var converterType = typeof(ObjectArrayRecordConverter<>).MakeGenericType(mapping.Type); + var converterType = typeof(RecordConverter<>).MakeGenericType(mapping.Type); var converter = (PgConverter)Activator.CreateInstance(converterType, options, factory)!; return mapping.CreateInfo(options, converter, supportsWriting: false); }; diff --git a/src/Npgsql/Internal/Resolvers/UnmappedMultirangeTypeInfoResolver.cs b/src/Npgsql/Internal/Resolvers/UnmappedMultirangeTypeInfoResolver.cs index d18b1421db..33a76c0749 100644 --- a/src/Npgsql/Internal/Resolvers/UnmappedMultirangeTypeInfoResolver.cs +++ b/src/Npgsql/Internal/Resolvers/UnmappedMultirangeTypeInfoResolver.cs @@ -36,7 +36,7 @@ elementType is null return CreateCollection().AddMapping(type, dataTypeName, (options, mapping, _) => mapping.CreateInfo(options, - (PgConverter)Activator.CreateInstance(typeof(MultirangeConverter<,>).MakeGenericType(type, subInfo.Type), subInfo.GetConcreteResolution().Converter)!, + (PgConverter)Activator.CreateInstance(typeof(MultirangeConverter<,>).MakeGenericType(type, subInfo.Type), subInfo.GetResolution().Converter)!, preferredFormat: subInfo.PreferredFormat, supportsWriting: subInfo.SupportsWriting), mapping => mapping with { MatchRequirement = MatchRequirement.Single }); } diff --git a/src/Npgsql/Internal/Resolvers/UnmappedRangeTypeInfoResolver.cs b/src/Npgsql/Internal/Resolvers/UnmappedRangeTypeInfoResolver.cs index 9e9ba0fb7d..69211df68b 100644 --- a/src/Npgsql/Internal/Resolvers/UnmappedRangeTypeInfoResolver.cs +++ b/src/Npgsql/Internal/Resolvers/UnmappedRangeTypeInfoResolver.cs @@ -36,7 +36,7 @@ matchedType is null return CreateCollection().AddMapping(matchedType, dataTypeName, (options, mapping, _) => mapping.CreateInfo(options, - (PgConverter)Activator.CreateInstance(typeof(RangeConverter<>).MakeGenericType(subInfo.Type), subInfo.GetConcreteResolution().Converter)!, + (PgConverter)Activator.CreateInstance(typeof(RangeConverter<>).MakeGenericType(subInfo.Type), subInfo.GetResolution().Converter)!, preferredFormat: subInfo.PreferredFormat, supportsWriting: subInfo.SupportsWriting), mapping => mapping with { MatchRequirement = MatchRequirement.Single }); } diff --git a/src/Npgsql/Internal/TypeInfoMapping.cs b/src/Npgsql/Internal/TypeInfoMapping.cs index b2dfd58377..c52a2331c0 100644 --- a/src/Npgsql/Internal/TypeInfoMapping.cs +++ b/src/Npgsql/Internal/TypeInfoMapping.cs @@ -351,15 +351,15 @@ void AddResolverArrayType(TypeInfoMapping elementMapping, Type type, Func(string dataTypeName, TypeInfoFactory createInfo, bool isDefault = false) where T : struct => AddStructType(typeof(T), typeof(T?), dataTypeName, createInfo, - static (_, innerInfo) => new NullableConverter(innerInfo.GetConcreteResolution().GetConverter()), GetDefaultConfigure(isDefault)); + static (_, innerInfo) => new NullableConverter(innerInfo.GetResolution().GetConverter()), GetDefaultConfigure(isDefault)); public void AddStructType(string dataTypeName, TypeInfoFactory createInfo, MatchRequirement matchRequirement) where T : struct => AddStructType(typeof(T), typeof(T?), dataTypeName, createInfo, - static (_, innerInfo) => new NullableConverter(innerInfo.GetConcreteResolution().GetConverter()), GetDefaultConfigure(matchRequirement)); + static (_, innerInfo) => new NullableConverter(innerInfo.GetResolution().GetConverter()), GetDefaultConfigure(matchRequirement)); public void AddStructType(string dataTypeName, TypeInfoFactory createInfo, Func? configure) where T : struct => AddStructType(typeof(T), typeof(T?), dataTypeName, createInfo, - static (_, innerInfo) => new NullableConverter(innerInfo.GetConcreteResolution().GetConverter()), configure); + static (_, innerInfo) => new NullableConverter(innerInfo.GetResolution().GetConverter()), configure); // Lives outside to prevent capture of T. void AddStructType(Type type, Type nullableType, string dataTypeName, TypeInfoFactory createInfo, @@ -451,8 +451,8 @@ PgTypeInfo CreateComposedPerInstance(PgTypeInfo innerTypeInfo, PgTypeInfo nullab { var converter = new PolymorphicArrayConverter( - innerTypeInfo.GetConcreteResolution().GetConverter(), - nullableInnerTypeInfo.GetConcreteResolution().GetConverter()); + innerTypeInfo.GetResolution().GetConverter(), + nullableInnerTypeInfo.GetResolution().GetConverter()); return new PgTypeInfo(innerTypeInfo.Options, converter, innerTypeInfo.Options.GetCanonicalTypeId(new DataTypeName(dataTypeName)), unboxedType: typeof(Array)) { SupportsWriting = false }; @@ -599,7 +599,7 @@ static string GetArrayDataTypeName(string dataTypeName) static ArrayBasedArrayConverter CreateArrayBasedConverter(TypeInfoMapping mapping, PgTypeInfo elemInfo) { if (!elemInfo.IsBoxing) - return new ArrayBasedArrayConverter(elemInfo.GetConcreteResolution(), mapping.Type); + return new ArrayBasedArrayConverter(elemInfo.GetResolution(), mapping.Type); ThrowBoxingNotSupported(resolver: false); return default; @@ -608,7 +608,7 @@ static ArrayBasedArrayConverter CreateArrayBasedConverter CreateListBasedConverter(TypeInfoMapping mapping, PgTypeInfo elemInfo) { if (!elemInfo.IsBoxing) - return new ListBasedArrayConverter(elemInfo.GetConcreteResolution()); + return new ListBasedArrayConverter(elemInfo.GetResolution()); ThrowBoxingNotSupported(resolver: false); return default; diff --git a/src/Npgsql/TypeMapping/GlobalTypeMapper.cs b/src/Npgsql/TypeMapping/GlobalTypeMapper.cs index c0bf23e172..5ece24129a 100644 --- a/src/Npgsql/TypeMapping/GlobalTypeMapper.cs +++ b/src/Npgsql/TypeMapping/GlobalTypeMapper.cs @@ -100,7 +100,7 @@ PgSerializerOptions TypeMappingOptions if (typeInfo is PgResolverTypeInfo info) dataTypeName = info.GetObjectResolution(value).PgTypeId.DataTypeName; else - dataTypeName = typeInfo?.GetConcreteResolution().PgTypeId.DataTypeName; + dataTypeName = typeInfo?.GetResolution().PgTypeId.DataTypeName; } catch { diff --git a/test/Npgsql.Tests/Types/CompositeTests.cs b/test/Npgsql.Tests/Types/CompositeTests.cs index 11f7739158..c62496b8b8 100644 --- a/test/Npgsql.Tests/Types/CompositeTests.cs +++ b/test/Npgsql.Tests/Types/CompositeTests.cs @@ -261,6 +261,54 @@ await AssertType( comparer: (actual, expected) => actual.Ints!.SequenceEqual(expected.Ints!)); } + [Test] + public async Task Composite_containing_converter_resolver_type() + { + await using var adminConnection = await OpenConnectionAsync(); + var compositeType = await GetTempTypeName(adminConnection); + + await adminConnection.ExecuteNonQueryAsync($@" +CREATE TYPE {compositeType} AS (date_times timestamp[])"); + + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.ConnectionStringBuilder.Timezone = "Europe/Berlin"; + dataSourceBuilder.MapComposite(compositeType); + await using var dataSource = dataSourceBuilder.Build(); + await using var connection = await dataSource.OpenConnectionAsync(); + + await AssertType( + connection, + new SomeCompositeWithConverterResolverType { DateTimes = new [] { new DateTime(DateTime.UnixEpoch.Ticks, DateTimeKind.Unspecified), new DateTime(DateTime.UnixEpoch.Ticks, DateTimeKind.Unspecified).AddDays(1) } }, + """("{""1970-01-01 00:00:00"",""1970-01-02 00:00:00""}")""", + compositeType, + npgsqlDbType: null, + comparer: (actual, expected) => actual.DateTimes!.SequenceEqual(expected.DateTimes!)); + } + + [Test] + public async Task Composite_containing_converter_resolver_type_throws() + { + await using var adminConnection = await OpenConnectionAsync(); + var compositeType = await GetTempTypeName(adminConnection); + + await adminConnection.ExecuteNonQueryAsync($@" +CREATE TYPE {compositeType} AS (date_times timestamp[])"); + + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.ConnectionStringBuilder.Timezone = "Europe/Berlin"; + dataSourceBuilder.MapComposite(compositeType); + await using var dataSource = dataSourceBuilder.Build(); + await using var connection = await dataSource.OpenConnectionAsync(); + + Assert.ThrowsAsync(() => AssertType( + connection, + new SomeCompositeWithConverterResolverType { DateTimes = new[] { DateTime.UnixEpoch } }, // UTC DateTime + """("{""1970-01-01 01:00:00"",""1970-01-02 01:00:00""}")""", + compositeType, + npgsqlDbType: null, + comparer: (actual, expected) => actual.DateTimes!.SequenceEqual(expected.DateTimes!))); + } + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/990")] public async Task Table_as_composite([Values] bool enabled) { @@ -431,6 +479,11 @@ class SomeCompositeWithArray public int[]? Ints { get; set; } } + class SomeCompositeWithConverterResolverType + { + public DateTime[]? DateTimes { get; set; } + } + record NameTranslationComposite { public int Simple { get; set; } From 41baedb415eb868fa106f8272e31a65503b21440 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Tue, 3 Oct 2023 17:33:18 +0200 Subject: [PATCH 230/761] Polymorphic object arrays redux (#5302) --- .../Composites/Metadata/CompositeFieldInfo.cs | 6 +- .../Internal/Converters/CastingConverter.cs | 2 +- .../Internal/Converters/CompositeConverter.cs | 3 +- .../Internal/Converters/NullableConverter.cs | 4 +- .../Internal/Converters/ObjectConverter.cs | 109 ++++++++++++++++++ .../VersionPrefixedTextConverter.cs | 2 +- src/Npgsql/Internal/PgConverter.cs | 22 ++-- .../Internal/Resolvers/AdoTypeInfoResolver.cs | 36 +++++- test/Npgsql.Tests/Types/MiscTypeTests.cs | 16 +++ 9 files changed, 175 insertions(+), 25 deletions(-) create mode 100644 src/Npgsql/Internal/Converters/ObjectConverter.cs diff --git a/src/Npgsql/Internal/Composites/Metadata/CompositeFieldInfo.cs b/src/Npgsql/Internal/Composites/Metadata/CompositeFieldInfo.cs index 2b742ed511..1e6f321f6b 100644 --- a/src/Npgsql/Internal/Composites/Metadata/CompositeFieldInfo.cs +++ b/src/Npgsql/Internal/Composites/Metadata/CompositeFieldInfo.cs @@ -114,7 +114,7 @@ protected ValueTask WriteAsObject(bool async, PgConverter converter, PgWriter wr public abstract void ReadDbNull(CompositeBuilder builder); public abstract ValueTask Read(bool async, PgConverter converter, CompositeBuilder builder, PgReader reader, CancellationToken cancellationToken = default); - public abstract bool IsDbNull(PgConverter converter, object instance); + public abstract bool IsDbNull(PgConverter converter, object instance, ref object? writeState); public abstract Size? GetSizeOrDbNull(PgConverter converter, DataFormat format, Size writeRequirement, object instance, ref object? writeState); public abstract ValueTask Write(bool async, PgConverter converter, PgWriter writer, object instance, CancellationToken cancellationToken); } @@ -232,10 +232,10 @@ async ValueTask Core(CompositeBuilder builder, ValueTask task) public override bool IsDbNullable => Converter?.IsDbNullable ?? true; - public override bool IsDbNull(PgConverter converter, object instance) + public override bool IsDbNull(PgConverter converter, object instance, ref object? writeState) { var value = _getter(instance); - return AsObject(converter) ? converter.IsDbNullAsObject(value) : ((PgConverter)converter).IsDbNull(value); + return AsObject(converter) ? converter.IsDbNullAsObject(value, ref writeState) : ((PgConverter)converter).IsDbNull(value, ref writeState); } public override Size? GetSizeOrDbNull(PgConverter converter, DataFormat format, Size writeRequirement, object instance, ref object? writeState) diff --git a/src/Npgsql/Internal/Converters/CastingConverter.cs b/src/Npgsql/Internal/Converters/CastingConverter.cs index 1d90cde352..fdf8b9a26e 100644 --- a/src/Npgsql/Internal/Converters/CastingConverter.cs +++ b/src/Npgsql/Internal/Converters/CastingConverter.cs @@ -14,7 +14,7 @@ public CastingConverter(PgConverter effectiveConverter) : base(effectiveConverter.DbNullPredicateKind is DbNullPredicate.Custom) => _effectiveConverter = effectiveConverter; - protected override bool IsDbNullValue(T? value) => _effectiveConverter.IsDbNullAsObject(value); + protected override bool IsDbNullValue(T? value, ref object? writeState) => _effectiveConverter.IsDbNullAsObject(value, ref writeState); public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) => _effectiveConverter.CanConvert(format, out bufferRequirements); diff --git a/src/Npgsql/Internal/Converters/CompositeConverter.cs b/src/Npgsql/Internal/Converters/CompositeConverter.cs index cec42f28e1..04bd0bca60 100644 --- a/src/Npgsql/Internal/Converters/CompositeConverter.cs +++ b/src/Npgsql/Internal/Converters/CompositeConverter.cs @@ -178,9 +178,10 @@ async ValueTask Write(bool async, PgWriter writer, T value, CancellationToken ca if (data?[i] is not { } state) { var converter = field.GetWriteInfo(boxedInstance, out var writeRequirement); + object? fieldState = null; elementState = new() { - Size = field.IsDbNull(converter, boxedInstance) ? -1 : writeRequirement, + Size = field.IsDbNull(converter, boxedInstance, ref fieldState) ? -1 : writeRequirement, WriteState = null, Converter = converter, BufferRequirement = writeRequirement, diff --git a/src/Npgsql/Internal/Converters/NullableConverter.cs b/src/Npgsql/Internal/Converters/NullableConverter.cs index b3f8a8a0b2..d16f289a43 100644 --- a/src/Npgsql/Internal/Converters/NullableConverter.cs +++ b/src/Npgsql/Internal/Converters/NullableConverter.cs @@ -14,8 +14,8 @@ public NullableConverter(PgConverter effectiveConverter) : base(effectiveConverter.DbNullPredicateKind is DbNullPredicate.Custom) => _effectiveConverter = effectiveConverter; - protected override bool IsDbNullValue(T? value) - => value is null || _effectiveConverter.IsDbNull(value.GetValueOrDefault()); + protected override bool IsDbNullValue(T? value, ref object? writeState) + => value is null || _effectiveConverter.IsDbNull(value.GetValueOrDefault(), ref writeState); public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) => _effectiveConverter.CanConvert(format, out bufferRequirements); diff --git a/src/Npgsql/Internal/Converters/ObjectConverter.cs b/src/Npgsql/Internal/Converters/ObjectConverter.cs new file mode 100644 index 0000000000..568fc32c2b --- /dev/null +++ b/src/Npgsql/Internal/Converters/ObjectConverter.cs @@ -0,0 +1,109 @@ +using System; +using System.Threading; +using System.Threading.Tasks; +using Npgsql.Internal.Postgres; + +namespace Npgsql.Internal; + +sealed class ObjectConverter : PgStreamingConverter +{ + readonly PgSerializerOptions _options; + readonly PgTypeId _pgTypeId; + + public ObjectConverter(PgSerializerOptions options, PgTypeId pgTypeId) + : base(customDbNullPredicate: true) + { + _options = options; + _pgTypeId = pgTypeId; + } + + protected override bool IsDbNullValue(object? value, ref object? writeState) + { + if (value is null or DBNull) + return true; + + var typeInfo = GetTypeInfo(value.GetType()); + + object? effectiveState = null; + var converter = typeInfo.GetObjectResolution(value).Converter; + if (converter.IsDbNullAsObject(value, ref effectiveState)) + return true; + + writeState = effectiveState is not null ? new WriteState { TypeInfo = typeInfo, EffectiveState = effectiveState } : typeInfo; + return false; + } + + public override object Read(PgReader reader) => throw new NotSupportedException(); + public override ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) => throw new NotSupportedException(); + + public override Size GetSize(SizeContext context, object value, ref object? writeState) + { + var (typeInfo, effectiveState) = writeState switch + { + PgTypeInfo info => (info, null), + WriteState state => (state.TypeInfo, state.EffectiveState), + _ => throw new InvalidOperationException("Invalid state") + }; + + // We can call GetDefaultResolution here as validation has already happened in IsDbNullValue. + // And we know it was called due to the writeState being filled. + var converter = typeInfo is PgResolverTypeInfo resolverTypeInfo + ? resolverTypeInfo.GetDefaultResolution(null).Converter + : typeInfo.GetResolution().Converter; + if (typeInfo.GetBufferRequirements(converter, context.Format) is not { } bufferRequirements) + { + ThrowHelper.ThrowNotSupportedException($"Resolved converter '{converter.GetType()}' has to support the {context.Format} format to be compatible."); + return default; + } + + // Fixed size converters won't have a GetSize implementation. + if (bufferRequirements.Write.Kind is SizeKind.Exact) + return bufferRequirements.Write; + + var result = converter.GetSizeAsObject(context, value, ref effectiveState); + if (effectiveState is not null) + { + if (writeState is WriteState state && !ReferenceEquals(state.EffectiveState, effectiveState)) + state.EffectiveState = effectiveState; + else + writeState = new WriteState { TypeInfo = typeInfo, EffectiveState = effectiveState }; + } + + return result; + } + + public override void Write(PgWriter writer, object value) + => Write(async: false, writer, value, CancellationToken.None).GetAwaiter().GetResult(); + + public override ValueTask WriteAsync(PgWriter writer, object value, CancellationToken cancellationToken = default) + => Write(async: true, writer, value, cancellationToken); + + async ValueTask Write(bool async, PgWriter writer, object value, CancellationToken cancellationToken) + { + var (typeInfo, effectiveState) = writer.Current.WriteState switch + { + PgTypeInfo info => (info, null), + WriteState state => (state.TypeInfo, state.EffectiveState), + _ => throw new InvalidOperationException("Invalid state") + }; + + // We can call GetDefaultResolution here as validation has already happened in IsDbNullValue. + // And we know it was called due to the writeState being filled. + var converter = typeInfo is PgResolverTypeInfo resolverTypeInfo + ? resolverTypeInfo.GetDefaultResolution(null).Converter + : typeInfo.GetResolution().Converter; + var writeRequirement = typeInfo.GetBufferRequirements(converter, DataFormat.Binary)!.Value.Write; + using var _ = await writer.BeginNestedWrite(async, writeRequirement, writer.Current.Size.Value, effectiveState, cancellationToken).ConfigureAwait(false); + await converter.WriteAsObject(async, writer, value, cancellationToken).ConfigureAwait(false); + } + + PgTypeInfo GetTypeInfo(Type type) + => _options.GetTypeInfo(type, _pgTypeId) + ?? throw new NotSupportedException($"Writing values of '{type.FullName}' having DataTypeName '{_options.DatabaseInfo.GetPostgresType(_pgTypeId).DisplayName}' is not supported."); + + sealed class WriteState + { + public required PgTypeInfo TypeInfo { get; init; } + public required object EffectiveState { get; set; } + } +} diff --git a/src/Npgsql/Internal/Converters/VersionPrefixedTextConverter.cs b/src/Npgsql/Internal/Converters/VersionPrefixedTextConverter.cs index d4776550fd..ff3a985a66 100644 --- a/src/Npgsql/Internal/Converters/VersionPrefixedTextConverter.cs +++ b/src/Npgsql/Internal/Converters/VersionPrefixedTextConverter.cs @@ -18,7 +18,7 @@ public VersionPrefixedTextConverter(byte versionPrefix, PgConverter textConve _textConverter = textConverter; } - protected override bool IsDbNullValue(T? value) => _textConverter.IsDbNull(value); + protected override bool IsDbNullValue(T? value, ref object? writeState) => _textConverter.IsDbNull(value, ref writeState); public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) => VersionPrefixedTextConverter.CanConvert(_textConverter, format, out _innerRequirements, out bufferRequirements); diff --git a/src/Npgsql/Internal/PgConverter.cs b/src/Npgsql/Internal/PgConverter.cs index cecd49fe4b..db6796c1ff 100644 --- a/src/Npgsql/Internal/PgConverter.cs +++ b/src/Npgsql/Internal/PgConverter.cs @@ -26,17 +26,17 @@ private protected PgConverter(Type type, bool isNullDefaultValue, bool customDbN internal abstract Type TypeToConvert { get; } - internal bool IsDbNullAsObject([NotNullWhen(false)] object? value) + internal bool IsDbNullAsObject([NotNullWhen(false)] object? value, ref object? writeState) => DbNullPredicateKind switch { DbNullPredicate.Null => value is null, DbNullPredicate.None => false, DbNullPredicate.PolymorphicNull => value is null or DBNull, // We do the null check to keep the NotNullWhen(false) invariant. - _ => IsDbNullValueAsObject(value) || (value is null && ThrowInvalidNullValue()) + _ => IsDbNullValueAsObject(value, ref writeState) || (value is null && ThrowInvalidNullValue()) }; - private protected abstract bool IsDbNullValueAsObject(object? value); + private protected abstract bool IsDbNullValueAsObject(object? value, ref object? writeState); internal abstract Size GetSizeAsObject(SizeContext context, object value, ref object? writeState); @@ -94,14 +94,14 @@ public abstract class PgConverter : PgConverter private protected PgConverter(bool customDbNullPredicate) : base(typeof(T), default(T) is null, customDbNullPredicate) { } - protected virtual bool IsDbNullValue(T? value) => throw new NotSupportedException(); + protected virtual bool IsDbNullValue(T? value, ref object? writeState) => throw new NotSupportedException(); // Object null semantics as follows, if T is a struct (so excluding nullable) report false for null values, don't throw on the cast. // As a result this creates symmetry with IsDbNull when we're dealing with a struct T, as it cannot be passed null at all. - private protected override bool IsDbNullValueAsObject(object? value) - => (default(T) is null || value is not null) && IsDbNullValue(Downcast(value)); + private protected override bool IsDbNullValueAsObject(object? value, ref object? writeState) + => (default(T) is null || value is not null) && IsDbNullValue(Downcast(value), ref writeState); - public bool IsDbNull([NotNullWhen(false)] T? value) + public bool IsDbNull([NotNullWhen(false)] T? value, ref object? writeState) { return DbNullPredicateKind switch { @@ -109,7 +109,7 @@ public bool IsDbNull([NotNullWhen(false)] T? value) DbNullPredicate.None => false, DbNullPredicate.PolymorphicNull => value is null or DBNull, // We do the null check to keep the NotNullWhen(false) invariant. - DbNullPredicate.Custom => IsDbNullValue(value) || (value is null && ThrowInvalidNullValue()), + DbNullPredicate.Custom => IsDbNullValue(value, ref writeState) || (value is null && ThrowInvalidNullValue()), _ => ThrowOutOfRange() }; @@ -137,7 +137,7 @@ static class PgConverterExtensions { public static Size? GetSizeOrDbNull(this PgConverter converter, DataFormat format, Size writeRequirement, T? value, ref object? writeState) { - if (converter.IsDbNull(value)) + if (converter.IsDbNull(value, ref writeState)) return null; if (writeRequirement is { Kind: SizeKind.Exact, Value: var byteCount }) @@ -160,7 +160,7 @@ static class PgConverterExtensions public static Size? GetSizeOrDbNullAsObject(this PgConverter converter, DataFormat format, Size writeRequirement, object? value, ref object? writeState) { - if (converter.IsDbNullAsObject(value)) + if (converter.IsDbNullAsObject(value, ref writeState)) return null; if (writeRequirement is { Kind: SizeKind.Exact, Value: var byteCount }) @@ -196,8 +196,8 @@ public SizeContext(DataFormat format, Size bufferRequirement) BufferRequirement = bufferRequirement; } - public DataFormat Format { get; } public required Size BufferRequirement { get; init; } + public DataFormat Format { get; } } class MultiWriteState : IDisposable diff --git a/src/Npgsql/Internal/Resolvers/AdoTypeInfoResolver.cs b/src/Npgsql/Internal/Resolvers/AdoTypeInfoResolver.cs index 0f2c077aad..121581405b 100644 --- a/src/Npgsql/Internal/Resolvers/AdoTypeInfoResolver.cs +++ b/src/Npgsql/Internal/Resolvers/AdoTypeInfoResolver.cs @@ -31,6 +31,7 @@ public AdoTypeInfoResolver() var info = Mappings.Find(type, dataTypeName, options); if (info is null && dataTypeName is not null) info = GetEnumTypeInfo(type, dataTypeName.GetValueOrDefault(), options); + return info; } @@ -470,21 +471,44 @@ public AdoArrayTypeInfoResolver() public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) { var info = Mappings.Find(type, dataTypeName, options); - if (info is null && dataTypeName is not null) - info = GetEnumArrayTypeInfo(type, dataTypeName.GetValueOrDefault(), options); + + Type? elementType = null; + if (info is null && dataTypeName is not null && + (type is null || type == typeof(object) || TypeInfoMappingCollection.IsArrayLikeType(type, out elementType)) + && options.DatabaseInfo.GetPostgresType(dataTypeName) is PostgresArrayType { Element: var pgElementType }) + { + info = GetEnumArrayTypeInfo(elementType, pgElementType, type, dataTypeName.GetValueOrDefault(), options) ?? + GetObjectArrayTypeInfo(elementType, pgElementType, type, dataTypeName.GetValueOrDefault(), options); + } return info; } - static PgTypeInfo? GetEnumArrayTypeInfo(Type? type, DataTypeName dataTypeName, PgSerializerOptions options) + static PgTypeInfo? GetObjectArrayTypeInfo(Type? elementType, PostgresType pgElementType, Type? type, DataTypeName dataTypeName, + PgSerializerOptions options) { - if (type is not null && type != typeof(object) && (!TypeInfoMappingCollection.IsArrayLikeType(type, out var elementType) || elementType != typeof(string))) + if (elementType != typeof(object)) + return null; + + // Probe if there is any mapping at all for this element type. + var elementId = options.ToCanonicalTypeId(pgElementType); + if (options.GetDefaultTypeInfo(elementId) is null) return null; - if (options.DatabaseInfo.GetPostgresType(dataTypeName) is not PostgresArrayType { Element: PostgresEnumType enumType }) + var mappings = new TypeInfoMappingCollection(); + mappings.AddType(pgElementType.DataTypeName, + (options, mapping, _) => mapping.CreateInfo(options, new ObjectConverter(options, elementId)), MatchRequirement.DataTypeName); + mappings.AddArrayType(pgElementType.DataTypeName); + return mappings.Find(type, dataTypeName, options); + } + + static PgTypeInfo? GetEnumArrayTypeInfo(Type? elementType, PostgresType pgElementType, Type? type, DataTypeName dataTypeName, PgSerializerOptions options) + { + if ((type != typeof(object) && elementType is not null && elementType != typeof(string)) || pgElementType is not PostgresEnumType enumType) return null; var mappings = new TypeInfoMappingCollection(); - mappings.AddType(enumType.DataTypeName, (options, mapping, _) => mapping.CreateInfo(options, new StringTextConverter(options.TextEncoding)), MatchRequirement.DataTypeName); + mappings.AddType(enumType.DataTypeName, + (options, mapping, _) => mapping.CreateInfo(options, new StringTextConverter(options.TextEncoding)), MatchRequirement.DataTypeName); mappings.AddArrayType(enumType.DataTypeName); return mappings.Find(type, dataTypeName, options); } diff --git a/test/Npgsql.Tests/Types/MiscTypeTests.cs b/test/Npgsql.Tests/Types/MiscTypeTests.cs index 57d241a811..c080fa1f5a 100644 --- a/test/Npgsql.Tests/Types/MiscTypeTests.cs +++ b/test/Npgsql.Tests/Types/MiscTypeTests.cs @@ -153,6 +153,22 @@ public async Task Send_unknown() #endregion + + [Test] + public async Task ObjectArray() + { + await AssertTypeWrite(new object?[] { (short)4, null, (long)5, 6 }, "{4,NULL,5,6}", "integer[]", NpgsqlDbType.Integer | NpgsqlDbType.Array, isDefault: false); + await AssertTypeWrite(new object?[] { "text", null, DBNull.Value, "chars".ToCharArray(), 'c' }, "{text,NULL,NULL,chars,c}", "text[]", NpgsqlDbType.Text | NpgsqlDbType.Array, isDefault: false); + + await using var dataSource = CreateDataSource(b => b.ConnectionStringBuilder.Timezone = "Europe/Berlin"); + await AssertTypeWrite(dataSource, new object?[] { DateTime.UnixEpoch, null, DBNull.Value, DateTime.UnixEpoch.AddDays(1) }, "{\"1970-01-01 01:00:00+01\",NULL,NULL,\"1970-01-02 01:00:00+01\"}", "timestamp with time zone[]", NpgsqlDbType.TimestampTz | NpgsqlDbType.Array, isDefault: false); + Assert.ThrowsAsync(() => AssertTypeWrite(dataSource, new object?[] + { + DateTime.Now, null, DBNull.Value, DateTime.UnixEpoch.AddDays(1) + }, "{\"1970-01-01 01:00:00+01\",NULL,NULL,\"1970-01-02 01:00:00+01\"}", "timestamp with time zone[]", + NpgsqlDbType.TimestampTz | NpgsqlDbType.Array, isDefault: false)); + } + [Test] public Task Int2Vector() => AssertType(new short[] { 4, 5, 6 }, "4 5 6", "int2vector", NpgsqlDbType.Int2Vector, isDefault: false); From 6384993d8e58889c4ef5e9e18a24b4b550f8fd13 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 3 Oct 2023 19:19:20 +0200 Subject: [PATCH 231/761] Bump AdoNet.Specification.Tests from 2.0.0-alpha8 to 2.0.0-beta.1 (#4590) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 4d1e52c2bc..a0d29356a3 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -41,7 +41,7 @@ - + From f5b3be1c40d9c74cd20eac84947802d426fe568e Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Wed, 4 Oct 2023 15:36:31 +0200 Subject: [PATCH 232/761] Bump version to 8.0.0-rc.2 --- Directory.Build.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Build.props b/Directory.Build.props index c677a6ee5d..bcae7c3d25 100644 --- a/Directory.Build.props +++ b/Directory.Build.props @@ -1,6 +1,6 @@  - 8.0.0-preview.5 + 8.0.0-rc.2 latest true enable From 33d4960ca3397bdd611ce688dd75aa7bbd526a08 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Emmanuel=20Andr=C3=A9?= <2341261+manandre@users.noreply.github.com> Date: Wed, 4 Oct 2023 17:55:46 +0200 Subject: [PATCH 233/761] Fix logging of legacy batch commands (#5242) Closes #5199 --- src/Npgsql/NpgsqlCommand.cs | 4 +- test/Npgsql.Tests/CommandTests.cs | 81 +++++++++++++++++++++++++++++++ 2 files changed, 83 insertions(+), 2 deletions(-) diff --git a/src/Npgsql/NpgsqlCommand.cs b/src/Npgsql/NpgsqlCommand.cs index 4c8cbadcf9..92edf67b21 100644 --- a/src/Npgsql/NpgsqlCommand.cs +++ b/src/Npgsql/NpgsqlCommand.cs @@ -1799,7 +1799,7 @@ internal void LogExecutingCompleted(NpgsqlConnector connector, bool executing) { var commands = new (string, object[])[InternalBatchCommands.Count]; for (var i = 0; i < InternalBatchCommands.Count; i++) - commands[i] = (InternalBatchCommands[i].CommandText, ParametersDbNullAsString(InternalBatchCommands[i])); + commands[i] = (InternalBatchCommands[i].FinalCommandText!, ParametersDbNullAsString(InternalBatchCommands[i])); if (executing) LogMessages.ExecutingBatchWithParameters(logger, commands, connector.Id); @@ -1810,7 +1810,7 @@ internal void LogExecutingCompleted(NpgsqlConnector connector, bool executing) { var commands = new string[InternalBatchCommands.Count]; for (var i = 0; i < InternalBatchCommands.Count; i++) - commands[i] = InternalBatchCommands[i].CommandText; + commands[i] = InternalBatchCommands[i].FinalCommandText!; if (executing) LogMessages.ExecutingBatch(logger, commands, connector.Id); else diff --git a/test/Npgsql.Tests/CommandTests.cs b/test/Npgsql.Tests/CommandTests.cs index 6133e100c5..0cbd9692e9 100644 --- a/test/Npgsql.Tests/CommandTests.cs +++ b/test/Npgsql.Tests/CommandTests.cs @@ -6,6 +6,7 @@ using NUnit.Framework; using System; using System.Buffers.Binary; +using System.Collections.Generic; using System.Data; using System.Linq; using System.Text; @@ -1488,6 +1489,86 @@ public async Task Log_ExecuteScalar_single_statement_with_parameter_logging_off( AssertLoggingStateDoesNotContain(executingCommandEvent, "Parameters"); } + [Test] + public async Task Log_ExecuteScalar_multiple_statement_without_parameters() + { + await using var dataSource = CreateLoggingDataSource(out var listLoggerProvider); + await using var conn = await dataSource.OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT 1; SELECT 2", conn); + + using (listLoggerProvider.Record()) + { + await cmd.ExecuteScalarAsync(); + } + + var executingCommandEvent = listLoggerProvider.Log.Single(l => l.Id == NpgsqlEventId.CommandExecutionCompleted); + Assert.That(executingCommandEvent.Message, Does.Contain("Batch execution completed").And.Contains("[(SELECT 1, System.Object[]), (SELECT 2, System.Object[])]")); + var batchCommands = (IList<(string CommandText, object[] Parameters)>)AssertLoggingStateContains(executingCommandEvent, "BatchCommands"); + Assert.That(batchCommands.Count, Is.EqualTo(2)); + Assert.That(batchCommands[0].CommandText, Is.EqualTo("SELECT 1")); + Assert.That(batchCommands[0].Parameters, Is.Empty); + Assert.That(batchCommands[1].CommandText, Is.EqualTo("SELECT 2")); + Assert.That(batchCommands[1].Parameters, Is.Empty); + AssertLoggingStateDoesNotContain(executingCommandEvent, "Parameters"); + + if (!IsMultiplexing) + AssertLoggingStateContains(executingCommandEvent, "ConnectorId", conn.ProcessID); + } + + [Test] + public async Task Log_ExecuteScalar_multiple_statement_with_parameters() + { + await using var dataSource = CreateLoggingDataSource(out var listLoggerProvider); + await using var conn = await dataSource.OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT @p1; SELECT @p2", conn); + cmd.Parameters.Add(new() { ParameterName = "p1", Value = 8 }); + cmd.Parameters.Add(new() { ParameterName = "p2", Value = 9 }); + + using (listLoggerProvider.Record()) + { + await cmd.ExecuteScalarAsync(); + } + + var executingCommandEvent = listLoggerProvider.Log.Single(l => l.Id == NpgsqlEventId.CommandExecutionCompleted); + Assert.That(executingCommandEvent.Message, Does.Contain("Batch execution completed").And.Contains("[(SELECT $1, System.Object[]), (SELECT $1, System.Object[])]")); + var batchCommands = (IList<(string CommandText, object[] Parameters)>)AssertLoggingStateContains(executingCommandEvent, "BatchCommands"); + Assert.That(batchCommands.Count, Is.EqualTo(2)); + Assert.That(batchCommands[0].CommandText, Is.EqualTo("SELECT $1")); + Assert.That(batchCommands[0].Parameters[0], Is.EqualTo(8)); + Assert.That(batchCommands[1].CommandText, Is.EqualTo("SELECT $1")); + Assert.That(batchCommands[1].Parameters[0], Is.EqualTo(9)); + AssertLoggingStateDoesNotContain(executingCommandEvent, "Parameters"); + + if (!IsMultiplexing) + AssertLoggingStateContains(executingCommandEvent, "ConnectorId", conn.ProcessID); + } + + [Test] + public async Task Log_ExecuteScalar_multiple_statement_with_parameter_logging_off() + { + await using var dataSource = CreateLoggingDataSource(out var listLoggerProvider, sensitiveDataLoggingEnabled: false); + await using var conn = await dataSource.OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT @p1; SELECT @p2", conn); + cmd.Parameters.Add(new() { ParameterName = "p1", Value = 8 }); + cmd.Parameters.Add(new() { ParameterName = "p2", Value = 9 }); + + using (listLoggerProvider.Record()) + { + await cmd.ExecuteScalarAsync(); + } + + var executingCommandEvent = listLoggerProvider.Log.Single(l => l.Id == NpgsqlEventId.CommandExecutionCompleted); + Assert.That(executingCommandEvent.Message, Does.Contain("Batch execution completed").And.Contains("[SELECT $1, SELECT $1]")); + var batchCommands = (IList)AssertLoggingStateContains(executingCommandEvent, "BatchCommands"); + Assert.That(batchCommands.Count, Is.EqualTo(2)); + Assert.That(batchCommands[0], Is.EqualTo("SELECT $1")); + Assert.That(batchCommands[1], Is.EqualTo("SELECT $1")); + AssertLoggingStateDoesNotContain(executingCommandEvent, "Parameters"); + + if (!IsMultiplexing) + AssertLoggingStateContains(executingCommandEvent, "ConnectorId", conn.ProcessID); + } + #endregion Logging public CommandTests(MultiplexingMode multiplexingMode) : base(multiplexingMode) {} From 94b9478e65ec3e6d1dee23ba72ce6607622109ab Mon Sep 17 00:00:00 2001 From: John Moshakis Date: Wed, 4 Oct 2023 15:59:18 -0400 Subject: [PATCH 234/761] Use derived commandText (#5298) Fixes #5297 --- src/Npgsql/NpgsqlCommand.cs | 2 +- test/Npgsql.Tests/BatchTests.cs | 23 +++++++++++++++++++++++ 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/src/Npgsql/NpgsqlCommand.cs b/src/Npgsql/NpgsqlCommand.cs index 92edf67b21..90032d8476 100644 --- a/src/Npgsql/NpgsqlCommand.cs +++ b/src/Npgsql/NpgsqlCommand.cs @@ -942,7 +942,7 @@ internal void ProcessRawQuery(SqlQueryParser? parser, bool standardConformingStr case CommandType.StoredProcedure: var sqlBuilder = new StringBuilder() .Append(EnableStoredProcedureCompatMode ? "SELECT * FROM " : "CALL ") - .Append(CommandText) + .Append(commandText) .Append('('); var isFirstParam = true; diff --git a/test/Npgsql.Tests/BatchTests.cs b/test/Npgsql.Tests/BatchTests.cs index 766f022f07..bd38ea2e87 100644 --- a/test/Npgsql.Tests/BatchTests.cs +++ b/test/Npgsql.Tests/BatchTests.cs @@ -242,6 +242,29 @@ public async Task StatementType_Call() Assert.That(batch.BatchCommands[0].StatementType, Is.EqualTo(StatementType.Call)); } + [Test] + public async Task CommandType_StoredProcedure() + { + await using var conn = await OpenConnectionAsync(); + MinimumPgVersion(conn, "11.0", "Stored procedures are supported starting with PG 11"); + + var sproc = await GetTempProcedureName(conn); + await conn.ExecuteNonQueryAsync($"CREATE PROCEDURE {sproc}() LANGUAGE sql AS ''"); + + await using var batch = new NpgsqlBatch(conn) + { + BatchCommands = { new($"{sproc}") {CommandType = CommandType.StoredProcedure} } + }; + + await using var reader = await batch.ExecuteReaderAsync(Behavior); + + // Consume SELECT result set to parse the CommandComplete + await reader.CloseAsync(); + + Assert.That(batch.BatchCommands[0].StatementType, Is.EqualTo(StatementType.Call)); + } + + [Test] public async Task StatementType_Merge() { From 3d4b1d709982468345aca212eceee17e69869087 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Thu, 5 Oct 2023 18:44:53 +0200 Subject: [PATCH 235/761] Small rename --- src/Npgsql/NpgsqlParameter.cs | 6 +++--- src/Npgsql/TypeMapping/GlobalTypeMapper.cs | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/Npgsql/NpgsqlParameter.cs b/src/Npgsql/NpgsqlParameter.cs index 4288412025..eda05a7746 100644 --- a/src/Npgsql/NpgsqlParameter.cs +++ b/src/Npgsql/NpgsqlParameter.cs @@ -319,7 +319,7 @@ public sealed override DbType DbType // Infer from value but don't cache if (Value is not null) // We pass ValueType here for the generic derived type, where we should respect T and not the runtime type. - return GlobalTypeMapper.Instance.TryGetDataTypeName(GetValueType(StaticValueType)!, Value)?.ToNpgsqlDbType()?.ToDbType() ?? DbType.Object; + return GlobalTypeMapper.Instance.FindDataTypeName(GetValueType(StaticValueType)!, Value)?.ToNpgsqlDbType()?.ToDbType() ?? DbType.Object; return DbType.Object; } @@ -353,7 +353,7 @@ public NpgsqlDbType NpgsqlDbType // Infer from value but don't cache if (Value is not null) // We pass ValueType here for the generic derived type (NpgsqlParameter) where we should respect T and not the runtime type. - return GlobalTypeMapper.Instance.TryGetDataTypeName(GetValueType(StaticValueType)!, Value)?.ToNpgsqlDbType() ?? NpgsqlDbType.Unknown; + return GlobalTypeMapper.Instance.FindDataTypeName(GetValueType(StaticValueType)!, Value)?.ToNpgsqlDbType() ?? NpgsqlDbType.Unknown; return NpgsqlDbType.Unknown; } @@ -390,7 +390,7 @@ public string? DataTypeName // Infer from value but don't cache if (Value is not null) // We pass ValueType here for the generic derived type, where we should respect T and not the runtime type. - return GlobalTypeMapper.Instance.TryGetDataTypeName(GetValueType(StaticValueType)!, Value)?.DisplayName; + return GlobalTypeMapper.Instance.FindDataTypeName(GetValueType(StaticValueType)!, Value)?.DisplayName; return null; } diff --git a/src/Npgsql/TypeMapping/GlobalTypeMapper.cs b/src/Npgsql/TypeMapping/GlobalTypeMapper.cs index 5ece24129a..1f2b52aaa8 100644 --- a/src/Npgsql/TypeMapping/GlobalTypeMapper.cs +++ b/src/Npgsql/TypeMapping/GlobalTypeMapper.cs @@ -91,7 +91,7 @@ PgSerializerOptions TypeMappingOptions } } - internal DataTypeName? TryGetDataTypeName(Type type, object value) + internal DataTypeName? FindDataTypeName(Type type, object value) { DataTypeName? dataTypeName; try From 2d1b08d1a0448d450e6ccc96b2f76f9c1f810352 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 6 Oct 2023 12:05:26 +0200 Subject: [PATCH 236/761] Bump BenchmarkDotNet from 0.13.8 to 0.13.9 (#5313) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index a0d29356a3..dd00ae8494 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -45,7 +45,7 @@ - + From f1adab4826e7cc3f42bcefbd5a58066c794a102f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 6 Oct 2023 12:05:44 +0200 Subject: [PATCH 237/761] Bump BenchmarkDotNet.Diagnostics.Windows from 0.13.8 to 0.13.9 (#5312) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index dd00ae8494..66707f188b 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -47,7 +47,7 @@ - + From f60fbbd1c4c9b274e5eeb17d302ebab27fd9ab95 Mon Sep 17 00:00:00 2001 From: Patryk Pasek Date: Fri, 6 Oct 2023 13:43:51 +0200 Subject: [PATCH 238/761] Added support for materialized views in GetSchema (#5275) Closes #5151 --- src/Npgsql/NpgsqlSchema.cs | 36 ++++++++++++++++++++++++++++++++ test/Npgsql.Tests/SchemaTests.cs | 13 ++++++++++++ test/Npgsql.Tests/TestUtil.cs | 11 ++++++++++ 3 files changed, 60 insertions(+) diff --git a/src/Npgsql/NpgsqlSchema.cs b/src/Npgsql/NpgsqlSchema.cs index 0e749e1589..bf090058d7 100644 --- a/src/Npgsql/NpgsqlSchema.cs +++ b/src/Npgsql/NpgsqlSchema.cs @@ -37,6 +37,7 @@ public static Task GetSchema(bool async, NpgsqlConnection conn, strin "TABLES" => GetTables(conn, restrictions, async, cancellationToken), "COLUMNS" => GetColumns(conn, restrictions, async, cancellationToken), "VIEWS" => GetViews(conn, restrictions, async, cancellationToken), + "MATERIALIZEDVIEWS" => GetMaterializedViews(conn, restrictions, async, cancellationToken), "USERS" => GetUsers(conn, restrictions, async, cancellationToken), "INDEXES" => GetIndexes(conn, restrictions, async, cancellationToken), "INDEXCOLUMNS" => GetIndexColumns(conn, restrictions, async, cancellationToken), @@ -378,6 +379,41 @@ WHERE table_schema NOT IN ('pg_catalog', 'information_schema') }, cancellationToken); } + static Task GetMaterializedViews(NpgsqlConnection conn, string?[]? restrictions, bool async, CancellationToken cancellationToken = default) + { + var dataTable = new DataTable("MaterializedViews") + { + Locale = CultureInfo.InvariantCulture, + Columns = + { + new DataColumn("schemaname"), + new DataColumn("matviewname"), + new DataColumn("matviewowner"), + new DataColumn("tablespace"), + new DataColumn("hasindexes", typeof(bool)), + new DataColumn("ispopulated", typeof(bool)) + } + }; + + var sql = new StringBuilder(); + + sql.Append(@"SELECT schemaname, matviewname, matviewowner, tablespace, hasindexes, ispopulated FROM pg_catalog.pg_matviews"); + + return ParseResults( + async, + BuildCommand(conn, sql, restrictions, "schemaname", "matviewname", "matviewowner", "tablespace"), + dataTable, + (reader, row) => + { + row["schemaname"] = GetFieldValueOrDBNull(reader, 0); + row["matviewname"] = GetFieldValueOrDBNull(reader, 1); + row["matviewowner"] = GetFieldValueOrDBNull(reader, 2); + row["tablespace"] = GetFieldValueOrDBNull(reader, 3); + row["hasindexes"] = GetFieldValueOrDBNull(reader, 4); + row["ispopulated"] = GetFieldValueOrDBNull(reader, 5); + }, cancellationToken); + } + static Task GetUsers(NpgsqlConnection conn, string?[]? restrictions, bool async, CancellationToken cancellationToken = default) { var dataTable = new DataTable("Users") diff --git a/test/Npgsql.Tests/SchemaTests.cs b/test/Npgsql.Tests/SchemaTests.cs index 500cd77345..53d27e7ef2 100644 --- a/test/Npgsql.Tests/SchemaTests.cs +++ b/test/Npgsql.Tests/SchemaTests.cs @@ -357,6 +357,19 @@ public async Task GetSchema_views_with_restrictions() Assert.That(row["table_name"], Is.EqualTo(view)); } + [Test] + public async Task GetSchema_materialized_views_with_restrictions() + { + await using var conn = await OpenConnectionAsync(); + var viewName = await GetTempMaterializedViewName(conn); + + await conn.ExecuteNonQueryAsync($"CREATE MATERIALIZED VIEW {viewName} AS SELECT 8 AS foo"); + + var dt = await GetSchema(conn, "MaterializedViews", new[] { null, viewName, null, null }); + foreach (var row in dt.Rows.OfType()) + Assert.That(row["matviewname"], Is.EqualTo(viewName)); + } + [Test] public async Task Primary_key() { diff --git a/test/Npgsql.Tests/TestUtil.cs b/test/Npgsql.Tests/TestUtil.cs index 35df4e6e4c..ead7f8eae3 100644 --- a/test/Npgsql.Tests/TestUtil.cs +++ b/test/Npgsql.Tests/TestUtil.cs @@ -258,6 +258,17 @@ internal static async Task GetTempViewName(NpgsqlConnection conn) return viewName; } + /// + /// Generates a unique materialized view name, usable for a single test, and drops it if it already exists. + /// Actual creation of the materialized view is the responsibility of the caller. + /// + internal static async Task GetTempMaterializedViewName(NpgsqlConnection conn) + { + var viewName = "temp_materialized_view" + Interlocked.Increment(ref _tempViewCounter); + await conn.ExecuteNonQueryAsync($"DROP MATERIALIZED VIEW IF EXISTS {viewName} CASCADE"); + return viewName; + } + /// /// Generates a unique function name, usable for a single test. /// Actual creation of the function is the responsibility of the caller. From 3c2b212dcad34b8aea9ff614ea22c18b9189915e Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Fri, 6 Oct 2023 14:07:31 +0200 Subject: [PATCH 239/761] Make regular NpgsqlDataSource safe for trimming/NativeAOT (#5309) Closes #5289 Co-authored-by: Nino Floris --- .../NpgsqlServiceCollectionExtensions.cs | 12 - .../Internal/JsonNetPocoTypeInfoResolver.cs | 4 +- .../NpgsqlJsonNetExtensions.cs | 2 +- .../BackendMessages/RowDescriptionMessage.cs | 21 +- .../ReflectionCompositeInfoFactory.cs | 2 +- ...mTextJsonConverter.cs => JsonConverter.cs} | 18 +- .../Internal/Resolvers/AdoTypeInfoResolver.cs | 6 +- ...lver.cs => JsonDynamicTypeInfoResolver.cs} | 26 +- ...oResolvers.cs => JsonTypeInfoResolvers.cs} | 12 +- .../Resolvers/RecordTypeInfoResolvers.cs | 19 +- .../Resolvers/UnsupportedTypeInfoResolver.cs | 43 +- src/Npgsql/Internal/TypeInfoMapping.cs | 76 ++-- src/Npgsql/NpgsqlConnection.cs | 20 +- src/Npgsql/NpgsqlDataAdapter.cs | 2 - src/Npgsql/NpgsqlDataSource.cs | 4 - src/Npgsql/NpgsqlDataSourceBuilder.cs | 61 +-- src/Npgsql/NpgsqlFactory.cs | 2 - src/Npgsql/NpgsqlSlimDataSourceBuilder.cs | 32 +- .../Properties/NpgsqlStrings.Designer.cs | 18 + src/Npgsql/Properties/NpgsqlStrings.resx | 13 +- src/Npgsql/PublicAPI.Unshipped.txt | 8 +- src/Npgsql/TypeMapping/GlobalTypeMapper.cs | 12 +- src/Npgsql/TypeMapping/INpgsqlTypeMapper.cs | 10 +- .../INpgsqlTypeMapperExtensions.cs | 77 ++++ src/Npgsql/TypeMapping/UserTypeMapper.cs | 10 +- test/Npgsql.Tests/Support/TestBase.cs | 6 +- test/Npgsql.Tests/Types/EnumTests.cs | 29 +- test/Npgsql.Tests/Types/JsonDynamicTests.cs | 380 ++++++++++++++++++ test/Npgsql.Tests/Types/JsonTests.cs | 335 +-------------- test/Npgsql.Tests/Types/MultirangeTests.cs | 35 +- test/Npgsql.Tests/Types/RangeTests.cs | 25 +- test/Npgsql.Tests/Types/RecordTests.cs | 32 +- 32 files changed, 799 insertions(+), 553 deletions(-) rename src/Npgsql/Internal/Converters/{SystemTextJsonConverter.cs => JsonConverter.cs} (89%) rename src/Npgsql/Internal/Resolvers/{SystemTextJsonDynamicTypeInfoResolver.cs => JsonDynamicTypeInfoResolver.cs} (80%) rename src/Npgsql/Internal/Resolvers/{SystemTextJsonTypeInfoResolvers.cs => JsonTypeInfoResolvers.cs} (77%) create mode 100644 src/Npgsql/TypeMapping/INpgsqlTypeMapperExtensions.cs create mode 100644 test/Npgsql.Tests/Types/JsonDynamicTests.cs diff --git a/src/Npgsql.DependencyInjection/NpgsqlServiceCollectionExtensions.cs b/src/Npgsql.DependencyInjection/NpgsqlServiceCollectionExtensions.cs index 2e5a5eca56..b1754b690b 100644 --- a/src/Npgsql.DependencyInjection/NpgsqlServiceCollectionExtensions.cs +++ b/src/Npgsql.DependencyInjection/NpgsqlServiceCollectionExtensions.cs @@ -30,8 +30,6 @@ public static class NpgsqlServiceCollectionExtensions /// Defaults to . /// /// The same service collection so that multiple calls can be chained. - [RequiresUnreferencedCode("NpgsqlDataSource uses reflection to handle various PostgreSQL types like records, unmapped enums etc. Use NpgsqlSlimDataSourceBuilder to start with a reduced - reflection free - set and opt into what your app specifically requires.")] - [RequiresDynamicCode("NpgsqlDataSource uses reflection to handle various PostgreSQL types like records, unmapped enums. This can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] public static IServiceCollection AddNpgsqlDataSource( this IServiceCollection serviceCollection, string connectionString, @@ -54,8 +52,6 @@ public static IServiceCollection AddNpgsqlDataSource( /// Defaults to . /// /// The same service collection so that multiple calls can be chained. - [RequiresUnreferencedCode("NpgsqlDataSource uses reflection to handle various PostgreSQL types like records, unmapped enums etc. Use NpgsqlSlimDataSourceBuilder to start with a reduced - reflection free - set and opt into what your app specifically requires.")] - [RequiresDynamicCode("NpgsqlDataSource uses reflection to handle various PostgreSQL types like records, unmapped enums. This can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] public static IServiceCollection AddNpgsqlDataSource( this IServiceCollection serviceCollection, string connectionString, @@ -81,8 +77,6 @@ public static IServiceCollection AddNpgsqlDataSource( /// Defaults to . /// /// The same service collection so that multiple calls can be chained. - [RequiresUnreferencedCode("NpgsqlDataSource uses reflection to handle various PostgreSQL types like records, unmapped enums etc. Use NpgsqlSlimDataSourceBuilder to start with a reduced - reflection free - set and opt into what your app specifically requires.")] - [RequiresDynamicCode("NpgsqlDataSource uses reflection to handle various PostgreSQL types like records, unmapped enums. This can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] public static IServiceCollection AddMultiHostNpgsqlDataSource( this IServiceCollection serviceCollection, string connectionString, @@ -107,8 +101,6 @@ public static IServiceCollection AddMultiHostNpgsqlDataSource( /// Defaults to . /// /// The same service collection so that multiple calls can be chained. - [RequiresUnreferencedCode("NpgsqlDataSource uses reflection to handle various PostgreSQL types like records, unmapped enums etc. Use NpgsqlSlimDataSourceBuilder to start with a reduced - reflection free - set and opt into what your app specifically requires.")] - [RequiresDynamicCode("NpgsqlDataSource uses reflection to handle various PostgreSQL types like records, unmapped enums. This can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] public static IServiceCollection AddMultiHostNpgsqlDataSource( this IServiceCollection serviceCollection, string connectionString, @@ -117,8 +109,6 @@ public static IServiceCollection AddMultiHostNpgsqlDataSource( => AddNpgsqlMultiHostDataSourceCore( serviceCollection, connectionString, dataSourceBuilderAction: null, connectionLifetime, dataSourceLifetime); - [RequiresUnreferencedCode("NpgsqlDataSource uses reflection to handle various PostgreSQL types like records, unmapped enums etc. Use NpgsqlSlimDataSourceBuilder to start with a reduced - reflection free - set and opt into what your app specifically requires.")] - [RequiresDynamicCode("NpgsqlDataSource uses reflection to handle various PostgreSQL types like records, unmapped enums. This can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] static IServiceCollection AddNpgsqlDataSourceCore( this IServiceCollection serviceCollection, string connectionString, @@ -143,8 +133,6 @@ static IServiceCollection AddNpgsqlDataSourceCore( return serviceCollection; } - [RequiresUnreferencedCode("NpgsqlDataSource uses reflection to handle various PostgreSQL types like records, unmapped enums etc. Use NpgsqlSlimDataSourceBuilder to start with a reduced - reflection free - set and opt into what your app specifically requires.")] - [RequiresDynamicCode("NpgsqlDataSource uses reflection to handle various PostgreSQL types like records, unmapped enums. This can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] static IServiceCollection AddNpgsqlMultiHostDataSourceCore( this IServiceCollection serviceCollection, string connectionString, diff --git a/src/Npgsql.Json.NET/Internal/JsonNetPocoTypeInfoResolver.cs b/src/Npgsql.Json.NET/Internal/JsonNetPocoTypeInfoResolver.cs index a9d54d863f..9d92dca4db 100644 --- a/src/Npgsql.Json.NET/Internal/JsonNetPocoTypeInfoResolver.cs +++ b/src/Npgsql.Json.NET/Internal/JsonNetPocoTypeInfoResolver.cs @@ -8,7 +8,7 @@ namespace Npgsql.Json.NET.Internal; [RequiresUnreferencedCode("Json serializer may perform reflection on trimmed types.")] -[RequiresDynamicCode("Serializing arbitary types to json can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] +[RequiresDynamicCode("Serializing arbitrary types to json can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] class JsonNetPocoTypeInfoResolver : DynamicTypeInfoResolver, IPgTypeInfoResolver { protected TypeInfoMappingCollection Mappings { get; } = new(); @@ -83,7 +83,7 @@ static PgConverter CreateConverter(Type valueType, bool jsonb, Encoding textEnco } [RequiresUnreferencedCode("Json serializer may perform reflection on trimmed types.")] -[RequiresDynamicCode("Serializing arbitary types to json can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] +[RequiresDynamicCode("Serializing arbitrary types to json can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] sealed class JsonNetPocoArrayTypeInfoResolver : JsonNetPocoTypeInfoResolver, IPgTypeInfoResolver { new TypeInfoMappingCollection Mappings { get; } diff --git a/src/Npgsql.Json.NET/NpgsqlJsonNetExtensions.cs b/src/Npgsql.Json.NET/NpgsqlJsonNetExtensions.cs index 572458f882..9bfa11df25 100644 --- a/src/Npgsql.Json.NET/NpgsqlJsonNetExtensions.cs +++ b/src/Npgsql.Json.NET/NpgsqlJsonNetExtensions.cs @@ -25,7 +25,7 @@ public static class NpgsqlJsonNetExtensions /// A list of CLR types to map to PostgreSQL json (no need to specify ). /// [RequiresUnreferencedCode("Json serializer may perform reflection on trimmed types.")] - [RequiresDynamicCode("Serializing arbitary types to json can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] + [RequiresDynamicCode("Serializing arbitrary types to json can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] public static INpgsqlTypeMapper UseJsonNet( this INpgsqlTypeMapper mapper, JsonSerializerSettings? settings = null, diff --git a/src/Npgsql/BackendMessages/RowDescriptionMessage.cs b/src/Npgsql/BackendMessages/RowDescriptionMessage.cs index 52fcdf8e6a..693db81516 100644 --- a/src/Npgsql/BackendMessages/RowDescriptionMessage.cs +++ b/src/Npgsql/BackendMessages/RowDescriptionMessage.cs @@ -362,8 +362,8 @@ internal void GetInfo(Type? type, ref PgConverterInfo lastConverterInfo, out boo return; } - // Have to check for null as it's a sentinel value used by ObjectOrDefaultTypeInfo init itself. - if (type is not null && ObjectOrDefaultInfo is var odfInfo) + var odfInfo = DataFormat is DataFormat.Text && type is not null ? ObjectOrDefaultInfo : _objectOrDefaultInfo; + if (odfInfo is { IsDefault: false }) { if (typeof(object) == type) { @@ -384,26 +384,31 @@ internal void GetInfo(Type? type, ref PgConverterInfo lastConverterInfo, out boo [MethodImpl(MethodImplOptions.NoInlining)] void GetInfoSlow(out PgConverterInfo lastConverterInfo, out bool asObject) { - PgConverterInfo converterInfo; var typeInfo = AdoSerializerHelpers.GetTypeInfoForReading(type ?? typeof(object), PostgresType, _serializerOptions); switch (DataFormat) { case DataFormat.Binary: // If we don't support binary we'll just throw. - converterInfo = typeInfo.Bind(Field, DataFormat); + lastConverterInfo = typeInfo.Bind(Field, DataFormat); + asObject = typeof(object) == type || lastConverterInfo.IsBoxingConverter; break; default: // For text we'll fall back to any available text converter for the expected clr type or throw. - if (!typeInfo.TryBind(Field, DataFormat, out converterInfo)) + if (!typeInfo.TryBind(Field, DataFormat, out lastConverterInfo)) { typeInfo = AdoSerializerHelpers.GetTypeInfoForReading(type ?? typeof(string), _serializerOptions.UnknownPgType, _serializerOptions); - converterInfo = typeInfo.Bind(Field, DataFormat); + lastConverterInfo = typeInfo.Bind(Field, DataFormat); + asObject = type != lastConverterInfo.TypeToConvert || lastConverterInfo.IsBoxingConverter; } + else + asObject = typeof(object) == type || lastConverterInfo.IsBoxingConverter; break; } - lastConverterInfo = converterInfo; - asObject = converterInfo.IsBoxingConverter; + // We delay initializing ObjectOrDefaultInfo until after the first lookup (unless it is itself the first lookup). + // When passed in an unsupported type it allows the error to be more specific, instead of just having object/null to deal with. + if (_objectOrDefaultInfo.IsDefault && type is not null) + _ = ObjectOrDefaultInfo; } } diff --git a/src/Npgsql/Internal/Composites/ReflectionCompositeInfoFactory.cs b/src/Npgsql/Internal/Composites/ReflectionCompositeInfoFactory.cs index 9554a62c9d..c0e0e25b94 100644 --- a/src/Npgsql/Internal/Composites/ReflectionCompositeInfoFactory.cs +++ b/src/Npgsql/Internal/Composites/ReflectionCompositeInfoFactory.cs @@ -12,7 +12,7 @@ namespace Npgsql.Internal.Composites; -[RequiresDynamicCode("Serializing arbitary types can require creating new generic types or methods. This may not work when AOT compiling.")] +[RequiresDynamicCode("Serializing arbitrary types can require creating new generic types or methods. This may not work when AOT compiling.")] static class ReflectionCompositeInfoFactory { public static CompositeInfo CreateCompositeInfo<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicFields | DynamicallyAccessedMemberTypes.PublicProperties)] T>( diff --git a/src/Npgsql/Internal/Converters/SystemTextJsonConverter.cs b/src/Npgsql/Internal/Converters/JsonConverter.cs similarity index 89% rename from src/Npgsql/Internal/Converters/SystemTextJsonConverter.cs rename to src/Npgsql/Internal/Converters/JsonConverter.cs index d91d50a90e..7e89489674 100644 --- a/src/Npgsql/Internal/Converters/SystemTextJsonConverter.cs +++ b/src/Npgsql/Internal/Converters/JsonConverter.cs @@ -11,14 +11,14 @@ namespace Npgsql.Internal.Converters; -sealed class SystemTextJsonConverter : PgStreamingConverter where T: TBase? +sealed class JsonConverter : PgStreamingConverter where T: TBase? { readonly bool _jsonb; readonly Encoding _textEncoding; readonly JsonTypeInfo _jsonTypeInfo; readonly JsonTypeInfo? _objectTypeInfo; - public SystemTextJsonConverter(bool jsonb, Encoding textEncoding, JsonSerializerOptions serializerOptions) + public JsonConverter(bool jsonb, Encoding textEncoding, JsonSerializerOptions serializerOptions) { if (serializerOptions.TypeInfoResolver is null) throw new InvalidOperationException("System.Text.Json serialization requires a type info resolver, make sure to set-it up beforehand."); @@ -45,7 +45,7 @@ public SystemTextJsonConverter(bool jsonb, Encoding textEncoding, JsonSerializer await reader.Buffer(async, sizeof(byte), cancellationToken).ConfigureAwait(false); // We always fall back to buffers on older targets due to the absence of transcoding stream. - if (SystemTextJsonConverter.TryReadStream(_jsonb, _textEncoding, reader, out var byteCount, out var stream)) + if (JsonConverter.TryReadStream(_jsonb, _textEncoding, reader, out var byteCount, out var stream)) { using var _ = stream; if (_jsonTypeInfo is JsonTypeInfo typeInfoOfT) @@ -59,7 +59,7 @@ public SystemTextJsonConverter(bool jsonb, Encoding textEncoding, JsonSerializer } else { - var (rentedChars, rentedBytes) = await SystemTextJsonConverter.ReadRentedBuffer(async, _textEncoding, byteCount, reader, cancellationToken).ConfigureAwait(false); + var (rentedChars, rentedBytes) = await JsonConverter.ReadRentedBuffer(async, _textEncoding, byteCount, reader, cancellationToken).ConfigureAwait(false); var result = _jsonTypeInfo is JsonTypeInfo typeInfoOfT ? JsonSerializer.Deserialize(rentedChars.AsSpan(), typeInfoOfT) : (T?)JsonSerializer.Deserialize(rentedChars.AsSpan(), (JsonTypeInfo)_jsonTypeInfo); @@ -85,18 +85,18 @@ public override Size GetSize(SizeContext context, T? value, ref object? writeSta else JsonSerializer.Serialize(stream, value, _objectTypeInfo); - return SystemTextJsonConverter.GetSizeCore(_jsonb, stream, _textEncoding, ref writeState); + return JsonConverter.GetSizeCore(_jsonb, stream, _textEncoding, ref writeState); } public override void Write(PgWriter writer, T? value) - => SystemTextJsonConverter.Write(_jsonb, async: false, writer, CancellationToken.None).GetAwaiter().GetResult(); + => JsonConverter.Write(_jsonb, async: false, writer, CancellationToken.None).GetAwaiter().GetResult(); public override ValueTask WriteAsync(PgWriter writer, T? value, CancellationToken cancellationToken = default) - => SystemTextJsonConverter.Write(_jsonb, async: true, writer, cancellationToken); + => JsonConverter.Write(_jsonb, async: true, writer, cancellationToken); } -// Split out to avoid unneccesary code duplication. -static class SystemTextJsonConverter +// Split out to avoid unnecessary code duplication. +static class JsonConverter { public const byte JsonbProtocolVersion = 1; // We pick a value that is the largest multiple of 4096 that is still smaller than the large object heap threshold (85K). diff --git a/src/Npgsql/Internal/Resolvers/AdoTypeInfoResolver.cs b/src/Npgsql/Internal/Resolvers/AdoTypeInfoResolver.cs index 121581405b..e01af49bb3 100644 --- a/src/Npgsql/Internal/Resolvers/AdoTypeInfoResolver.cs +++ b/src/Npgsql/Internal/Resolvers/AdoTypeInfoResolver.cs @@ -94,11 +94,9 @@ static void AddInfos(TypeInfoMappingCollection mappings) DataTypeNames.Xml, DataTypeNames.Name, DataTypeNames.RefCursor }) { mappings.AddType(dataTypeName, - static (options, mapping, _) => mapping.CreateInfo(options, new StringTextConverter(options.TextEncoding), preferredFormat: DataFormat.Text), - MatchRequirement.DataTypeName); + static (options, mapping, _) => mapping.CreateInfo(options, new StringTextConverter(options.TextEncoding), preferredFormat: DataFormat.Text), isDefault: true); mappings.AddStructType(dataTypeName, - static (options, mapping, _) => mapping.CreateInfo(options, new CharTextConverter(options.TextEncoding), preferredFormat: DataFormat.Text), - MatchRequirement.DataTypeName); + static (options, mapping, _) => mapping.CreateInfo(options, new CharTextConverter(options.TextEncoding), preferredFormat: DataFormat.Text)); // Uses the bytea converters, as neither type has a header. mappings.AddType(dataTypeName, static (options, mapping, _) => mapping.CreateInfo(options, new ArrayByteaConverter()), diff --git a/src/Npgsql/Internal/Resolvers/SystemTextJsonDynamicTypeInfoResolver.cs b/src/Npgsql/Internal/Resolvers/JsonDynamicTypeInfoResolver.cs similarity index 80% rename from src/Npgsql/Internal/Resolvers/SystemTextJsonDynamicTypeInfoResolver.cs rename to src/Npgsql/Internal/Resolvers/JsonDynamicTypeInfoResolver.cs index 580004edd3..26170d8a92 100644 --- a/src/Npgsql/Internal/Resolvers/SystemTextJsonDynamicTypeInfoResolver.cs +++ b/src/Npgsql/Internal/Resolvers/JsonDynamicTypeInfoResolver.cs @@ -6,17 +6,18 @@ using System.Text.Json.Serialization.Metadata; using Npgsql.Internal.Converters; using Npgsql.Internal.Postgres; +using Npgsql.Properties; namespace Npgsql.Internal.Resolvers; [RequiresUnreferencedCode("Json serializer may perform reflection on trimmed types.")] -[RequiresDynamicCode("Serializing arbitary types to json can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] -class SystemTextJsonDynamicTypeInfoResolver : DynamicTypeInfoResolver, IPgTypeInfoResolver +[RequiresDynamicCode("Serializing arbitrary types to json can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] +class JsonDynamicTypeInfoResolver : DynamicTypeInfoResolver, IPgTypeInfoResolver { protected TypeInfoMappingCollection Mappings { get; } = new(); protected JsonSerializerOptions _serializerOptions; - public SystemTextJsonDynamicTypeInfoResolver(Type[]? jsonbClrTypes = null, Type[]? jsonClrTypes = null, JsonSerializerOptions? serializerOptions = null) + public JsonDynamicTypeInfoResolver(Type[]? jsonbClrTypes = null, Type[]? jsonClrTypes = null, JsonSerializerOptions? serializerOptions = null) { #if NET7_0_OR_GREATER _serializerOptions = serializerOptions ??= JsonSerializerOptions.Default; @@ -37,13 +38,13 @@ void AddMappings(TypeInfoMappingCollection mappings, Type[] jsonbClrTypes, Type[ { var jsonb = dataTypeName == DataTypeNames.Jsonb; mappings.AddType(dataTypeName, (options, mapping, _) => - mapping.CreateInfo(options, new SystemTextJsonConverter(jsonb, options.TextEncoding, serializerOptions))); + mapping.CreateInfo(options, new JsonConverter(jsonb, options.TextEncoding, serializerOptions))); mappings.AddType(dataTypeName, (options, mapping, _) => - mapping.CreateInfo(options, new SystemTextJsonConverter(jsonb, options.TextEncoding, serializerOptions))); + mapping.CreateInfo(options, new JsonConverter(jsonb, options.TextEncoding, serializerOptions))); mappings.AddType(dataTypeName, (options, mapping, _) => - mapping.CreateInfo(options, new SystemTextJsonConverter(jsonb, options.TextEncoding, serializerOptions))); + mapping.CreateInfo(options, new JsonConverter(jsonb, options.TextEncoding, serializerOptions))); mappings.AddType(dataTypeName, (options, mapping, _) => - mapping.CreateInfo(options, new SystemTextJsonConverter(jsonb, options.TextEncoding, serializerOptions))); + mapping.CreateInfo(options, new JsonConverter(jsonb, options.TextEncoding, serializerOptions))); } AddUserMappings(jsonb: true, jsonbClrTypes); @@ -116,20 +117,19 @@ protected void AddArrayInfos(TypeInfoMappingCollection mappings, TypeInfoMapping static PgConverter CreateSystemTextJsonConverter(Type valueType, bool jsonb, Encoding textEncoding, JsonSerializerOptions serializerOptions, Type baseType) => (PgConverter)Activator.CreateInstance( - typeof(SystemTextJsonConverter<,>).MakeGenericType(valueType, baseType), + typeof(JsonConverter<,>).MakeGenericType(valueType, baseType), jsonb, textEncoding, - serializerOptions - )!; + serializerOptions)!; } [RequiresUnreferencedCode("Json serializer may perform reflection on trimmed types.")] -[RequiresDynamicCode("Serializing arbitary types to json can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] -sealed class SystemTextJsonDynamicArrayTypeInfoResolver : SystemTextJsonDynamicTypeInfoResolver, IPgTypeInfoResolver +[RequiresDynamicCode("Serializing arbitrary types to json can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] +sealed class JsonDynamicArrayTypeInfoResolver : JsonDynamicTypeInfoResolver, IPgTypeInfoResolver { new TypeInfoMappingCollection Mappings { get; } - public SystemTextJsonDynamicArrayTypeInfoResolver(Type[]? jsonbClrTypes = null, Type[]? jsonClrTypes = null, JsonSerializerOptions? serializerOptions = null) + public JsonDynamicArrayTypeInfoResolver(Type[]? jsonbClrTypes = null, Type[]? jsonClrTypes = null, JsonSerializerOptions? serializerOptions = null) : base(jsonbClrTypes, jsonClrTypes, serializerOptions) { Mappings = new TypeInfoMappingCollection(base.Mappings); diff --git a/src/Npgsql/Internal/Resolvers/SystemTextJsonTypeInfoResolvers.cs b/src/Npgsql/Internal/Resolvers/JsonTypeInfoResolvers.cs similarity index 77% rename from src/Npgsql/Internal/Resolvers/SystemTextJsonTypeInfoResolvers.cs rename to src/Npgsql/Internal/Resolvers/JsonTypeInfoResolvers.cs index 42107dc686..7af57fde45 100644 --- a/src/Npgsql/Internal/Resolvers/SystemTextJsonTypeInfoResolvers.cs +++ b/src/Npgsql/Internal/Resolvers/JsonTypeInfoResolvers.cs @@ -7,11 +7,11 @@ namespace Npgsql.Internal.Resolvers; -class SystemTextJsonTypeInfoResolver : IPgTypeInfoResolver +class JsonTypeInfoResolver : IPgTypeInfoResolver { protected TypeInfoMappingCollection Mappings { get; } = new(); - public SystemTextJsonTypeInfoResolver(JsonSerializerOptions? serializerOptions = null) + public JsonTypeInfoResolver(JsonSerializerOptions? serializerOptions = null) => AddTypeInfos(Mappings, serializerOptions); [UnconditionalSuppressMessage("Trimming", "IL2026", Justification = "Only used to request rooted and statically known types (JsonDocument,JsonElement etc).")] @@ -33,10 +33,10 @@ static void AddTypeInfos(TypeInfoMappingCollection mappings, JsonSerializerOptio { var jsonb = dataTypeName == DataTypeNames.Jsonb; mappings.AddType(dataTypeName, (options, mapping, _) => - mapping.CreateInfo(options, new SystemTextJsonConverter(jsonb, options.TextEncoding, serializerOptions)), + mapping.CreateInfo(options, new JsonConverter(jsonb, options.TextEncoding, serializerOptions)), isDefault: true); mappings.AddStructType(dataTypeName, (options, mapping, _) => - mapping.CreateInfo(options, new SystemTextJsonConverter(jsonb, options.TextEncoding, serializerOptions))); + mapping.CreateInfo(options, new JsonConverter(jsonb, options.TextEncoding, serializerOptions))); } } @@ -53,11 +53,11 @@ protected static void AddArrayInfos(TypeInfoMappingCollection mappings) => Mappings.Find(type, dataTypeName, options); } -sealed class SystemTextJsonArrayTypeInfoResolver : SystemTextJsonTypeInfoResolver, IPgTypeInfoResolver +sealed class JsonArrayTypeInfoResolver : JsonTypeInfoResolver, IPgTypeInfoResolver { new TypeInfoMappingCollection Mappings { get; } - public SystemTextJsonArrayTypeInfoResolver(JsonSerializerOptions? serializerOptions = null) : base(serializerOptions) + public JsonArrayTypeInfoResolver(JsonSerializerOptions? serializerOptions = null) : base(serializerOptions) { Mappings = new TypeInfoMappingCollection(base.Mappings); AddArrayInfos(Mappings); diff --git a/src/Npgsql/Internal/Resolvers/RecordTypeInfoResolvers.cs b/src/Npgsql/Internal/Resolvers/RecordTypeInfoResolvers.cs index 1728c07b9e..4cedd542f1 100644 --- a/src/Npgsql/Internal/Resolvers/RecordTypeInfoResolvers.cs +++ b/src/Npgsql/Internal/Resolvers/RecordTypeInfoResolvers.cs @@ -4,6 +4,7 @@ using Npgsql.Internal.Converters; using Npgsql.Internal.Postgres; using Npgsql.Properties; +using Npgsql.TypeMapping; namespace Npgsql.Internal.Resolvers; @@ -28,7 +29,11 @@ public static void CheckUnsupported(Type? type, DataTypeName? dataType if (type != typeof(object) && dataTypeName == DataTypeNames.Record) { throw new NotSupportedException( - string.Format(NpgsqlStrings.RecordsNotEnabled, nameof(NpgsqlSlimDataSourceBuilder.EnableRecords), typeof(TBuilder).Name)); + string.Format( + NpgsqlStrings.RecordsNotEnabled, + nameof(INpgsqlTypeMapperExtensions.EnableRecordsAsTuples), + typeof(TBuilder).Name, + nameof(NpgsqlSlimDataSourceBuilder.EnableRecords))); } } } @@ -64,23 +69,23 @@ static void AddInfos(TypeInfoMappingCollection mappings) mapping => mapping with { MatchRequirement = MatchRequirement.DataTypeName, - TypeMatchPredicate = type => type is null || (type is { IsConstructedGenericType: true, FullName: not null } - && type.FullName.StartsWith("System.Tuple", StringComparison.Ordinal)) + TypeMatchPredicate = type => type is { IsConstructedGenericType: true, FullName: not null } + && type.FullName.StartsWith("System.Tuple", StringComparison.Ordinal) }); mappings.AddStructType>(DataTypeNames.Record, Factory, mapping => mapping with { MatchRequirement = MatchRequirement.DataTypeName, - TypeMatchPredicate = type => type is null || (type is { IsConstructedGenericType: true, FullName: not null } - && type.FullName.StartsWith("System.ValueTuple", StringComparison.Ordinal)) + TypeMatchPredicate = type => type is { IsConstructedGenericType: true, FullName: not null } + && type.FullName.StartsWith("System.ValueTuple", StringComparison.Ordinal) }); } protected static void AddArrayInfos(TypeInfoMappingCollection mappings) { - mappings.AddArrayType>(DataTypeNames.Record); - mappings.AddStructArrayType>(DataTypeNames.Record); + mappings.AddArrayType>(DataTypeNames.Record, suppressObjectMapping: true); + mappings.AddStructArrayType>(DataTypeNames.Record, suppressObjectMapping: true); } static readonly TypeInfoFactory Factory = static (options, mapping, _) => diff --git a/src/Npgsql/Internal/Resolvers/UnsupportedTypeInfoResolver.cs b/src/Npgsql/Internal/Resolvers/UnsupportedTypeInfoResolver.cs index 845816f7a7..3b98d26a2c 100644 --- a/src/Npgsql/Internal/Resolvers/UnsupportedTypeInfoResolver.cs +++ b/src/Npgsql/Internal/Resolvers/UnsupportedTypeInfoResolver.cs @@ -1,6 +1,9 @@ using System; using System.Collections; using Npgsql.Internal.Postgres; +using Npgsql.PostgresTypes; +using Npgsql.Properties; +using Npgsql.TypeMapping; namespace Npgsql.Internal.Resolvers; @@ -19,16 +22,48 @@ sealed class UnsupportedTypeInfoResolver : IPgTypeInfoResolver if (type is null) return null; + // These checks are here because their resolver types have RUC/RDC + if (type != typeof(object)) + { + switch (dataTypeName) + { + case "pg_catalog.json" or "pg_catalog.jsonb": + throw new NotSupportedException( + string.Format( + NpgsqlStrings.DynamicJsonNotEnabled, + type == typeof(object) ? "" : type.Name, + nameof(INpgsqlTypeMapperExtensions.EnableDynamicJsonMappings), + typeof(TBuilder).Name)); + + case not null when options.DatabaseInfo.GetPostgresType(dataTypeName) is PostgresEnumType: + throw new NotSupportedException( + string.Format( + NpgsqlStrings.UnmappedEnumsNotEnabled, + nameof(INpgsqlTypeMapperExtensions.EnableUnmappedTypes), + typeof(TBuilder).Name)); + + case not null when options.DatabaseInfo.GetPostgresType(dataTypeName) is PostgresRangeType: + throw new NotSupportedException( + string.Format( + NpgsqlStrings.UnmappedRangesNotEnabled, + nameof(INpgsqlTypeMapperExtensions.EnableUnmappedTypes), + typeof(TBuilder).Name)); + + case not null when options.DatabaseInfo.GetPostgresType(dataTypeName) is PostgresMultirangeType: + throw new NotSupportedException( + string.Format( + NpgsqlStrings.UnmappedRangesNotEnabled, + nameof(INpgsqlTypeMapperExtensions.EnableUnmappedTypes), + typeof(TBuilder).Name)); + } + } + if (TypeInfoMappingCollection.IsArrayLikeType(type, out var elementType) && TypeInfoMappingCollection.IsArrayLikeType(elementType, out _)) throw new NotSupportedException("Writing is not supported for jagged collections, use a multidimensional array instead."); if (typeof(IEnumerable).IsAssignableFrom(type) && !typeof(IList).IsAssignableFrom(type) && type != typeof(string) && (dataTypeName is null || dataTypeName.Value.IsArray)) throw new NotSupportedException("Writing is not supported for IEnumerable parameters, use an array or List instead."); - // TODO bring back json help message. - // $"Can't write CLR type {value.GetType()}. " + - // "You may need to use the System.Text.Json or Json.NET plugins, see the docs for more information." - return null; } } diff --git a/src/Npgsql/Internal/TypeInfoMapping.cs b/src/Npgsql/Internal/TypeInfoMapping.cs index c52a2331c0..b27402c0f7 100644 --- a/src/Npgsql/Internal/TypeInfoMapping.cs +++ b/src/Npgsql/Internal/TypeInfoMapping.cs @@ -254,10 +254,10 @@ Func GetDefaultConfigure(MatchRequirement matc }; Func GetArrayTypeMatchPredicate(Func elementTypeMatchPredicate) - => type => type is null || (type.IsArray && elementTypeMatchPredicate.Invoke(type.GetElementType()!)); + => type => type is null ? elementTypeMatchPredicate(null) : type.IsArray && elementTypeMatchPredicate.Invoke(type.GetElementType()!); Func GetListTypeMatchPredicate(Func elementTypeMatchPredicate) - => type => type is null || (type.IsConstructedGenericType && type.GetGenericTypeDefinition() == typeof(List<>) - && elementTypeMatchPredicate(type.GetGenericArguments()[0])); + => type => type is null ? elementTypeMatchPredicate(null) : type.IsConstructedGenericType && type.GetGenericTypeDefinition() == typeof(List<>) + && elementTypeMatchPredicate(type.GetGenericArguments()[0]); public void AddType(string dataTypeName, TypeInfoFactory createInfo, bool isDefault = false) where T : class => AddType(dataTypeName, createInfo, GetDefaultConfigure(isDefault)); @@ -284,20 +284,27 @@ public void AddResolverType(string dataTypeName, TypeInfoFactory createInfo, => AddType(dataTypeName, createInfo, configure); public void AddArrayType(string elementDataTypeName) where TElement : class - => AddArrayType(FindMapping(typeof(TElement), elementDataTypeName)); + => AddArrayType(FindMapping(typeof(TElement), elementDataTypeName), suppressObjectMapping: false); + + public void AddArrayType(string elementDataTypeName, bool suppressObjectMapping) where TElement : class + => AddArrayType(FindMapping(typeof(TElement), elementDataTypeName), suppressObjectMapping); public void AddArrayType(TypeInfoMapping elementMapping) where TElement : class + => AddArrayType(elementMapping, suppressObjectMapping: false); + + public void AddArrayType(TypeInfoMapping elementMapping, bool suppressObjectMapping) where TElement : class { // Always use a predicate to match all dimensions. - var arrayTypeMatchPredicate = GetArrayTypeMatchPredicate(elementMapping.TypeMatchPredicate ?? (static type => type == typeof(TElement))); + var arrayTypeMatchPredicate = GetArrayTypeMatchPredicate(elementMapping.TypeMatchPredicate ?? (static type => type is null || type == typeof(TElement))); var listTypeMatchPredicate = elementMapping.TypeMatchPredicate is not null ? GetListTypeMatchPredicate(elementMapping.TypeMatchPredicate) : null; - AddArrayType(elementMapping, typeof(TElement[]), CreateArrayBasedConverter, arrayTypeMatchPredicate, suppressObjectMapping: TryFindMapping(typeof(object), elementMapping.DataTypeName, out _)); + var arrayDataTypeName = GetArrayDataTypeName(elementMapping.DataTypeName); + + AddArrayType(elementMapping, typeof(TElement[]), CreateArrayBasedConverter, arrayTypeMatchPredicate, suppressObjectMapping: suppressObjectMapping || TryFindMapping(typeof(object), arrayDataTypeName, out _)); AddArrayType(elementMapping, typeof(List), CreateListBasedConverter, listTypeMatchPredicate, suppressObjectMapping: true); void AddArrayType(TypeInfoMapping elementMapping, Type type, Func converter, Func? typeMatchPredicate = null, bool suppressObjectMapping = false) { - var arrayDataTypeName = GetArrayDataTypeName(elementMapping.DataTypeName); var arrayMapping = new TypeInfoMapping(type, arrayDataTypeName, CreateComposedFactory(type, elementMapping, converter)) { MatchRequirement = elementMapping.MatchRequirement, @@ -317,20 +324,27 @@ void AddArrayType(TypeInfoMapping elementMapping, Type type, Func(string elementDataTypeName) where TElement : class - => AddResolverArrayType(FindMapping(typeof(TElement), elementDataTypeName)); + => AddResolverArrayType(FindMapping(typeof(TElement), elementDataTypeName), suppressObjectMapping: false); + + public void AddResolverArrayType(string elementDataTypeName, bool suppressObjectMapping) where TElement : class + => AddResolverArrayType(FindMapping(typeof(TElement), elementDataTypeName), suppressObjectMapping); public void AddResolverArrayType(TypeInfoMapping elementMapping) where TElement : class + => AddResolverArrayType(elementMapping, suppressObjectMapping: false); + + public void AddResolverArrayType(TypeInfoMapping elementMapping, bool suppressObjectMapping) where TElement : class { // Always use a predicate to match all dimensions. - var arrayTypeMatchPredicate = GetArrayTypeMatchPredicate(elementMapping.TypeMatchPredicate ?? (static type => type == typeof(TElement))); + var arrayTypeMatchPredicate = GetArrayTypeMatchPredicate(elementMapping.TypeMatchPredicate ?? (static type => type is null || type == typeof(TElement))); var listTypeMatchPredicate = elementMapping.TypeMatchPredicate is not null ? GetListTypeMatchPredicate(elementMapping.TypeMatchPredicate) : null; - AddResolverArrayType(elementMapping, typeof(TElement[]), CreateArrayBasedConverterResolver, arrayTypeMatchPredicate, suppressObjectMapping: TryFindMapping(typeof(object), elementMapping.DataTypeName, out _)); + var arrayDataTypeName = GetArrayDataTypeName(elementMapping.DataTypeName); + + AddResolverArrayType(elementMapping, typeof(TElement[]), CreateArrayBasedConverterResolver, arrayTypeMatchPredicate, suppressObjectMapping: suppressObjectMapping || TryFindMapping(typeof(object), arrayDataTypeName, out _)); AddResolverArrayType(elementMapping, typeof(List), CreateListBasedConverterResolver, listTypeMatchPredicate, suppressObjectMapping: true); void AddResolverArrayType(TypeInfoMapping elementMapping, Type type, Func converter, Func? typeMatchPredicate = null, bool suppressObjectMapping = false) { - var arrayDataTypeName = GetArrayDataTypeName(elementMapping.DataTypeName); var arrayMapping = new TypeInfoMapping(type, arrayDataTypeName, CreateComposedFactory(type, elementMapping, converter)) { MatchRequirement = elementMapping.MatchRequirement, @@ -381,13 +395,15 @@ void AddStructType(Type type, Type nullableType, string dataTypeName, TypeInfoFa } public void AddStructArrayType(string elementDataTypeName) where TElement : struct - => AddStructArrayType(FindMapping(typeof(TElement), elementDataTypeName), FindMapping(typeof(TElement?), elementDataTypeName), null); + => AddStructArrayType(FindMapping(typeof(TElement), elementDataTypeName), FindMapping(typeof(TElement?), elementDataTypeName), suppressObjectMapping: false); - public void AddStructArrayType(string elementDataTypeName, Func configure) where TElement : struct - => AddStructArrayType(FindMapping(typeof(TElement), elementDataTypeName), FindMapping(typeof(TElement?), elementDataTypeName), configure); + public void AddStructArrayType(string elementDataTypeName, bool suppressObjectMapping) where TElement : struct + => AddStructArrayType(FindMapping(typeof(TElement), elementDataTypeName), FindMapping(typeof(TElement?), elementDataTypeName), suppressObjectMapping); - public void AddStructArrayType(TypeInfoMapping elementMapping, TypeInfoMapping nullableElementMapping, - Func? configure) where TElement : struct + public void AddStructArrayType(TypeInfoMapping elementMapping, TypeInfoMapping nullableElementMapping) where TElement : struct + => AddStructArrayType(elementMapping, nullableElementMapping, suppressObjectMapping: false); + + public void AddStructArrayType(TypeInfoMapping elementMapping, TypeInfoMapping nullableElementMapping, bool suppressObjectMapping) where TElement : struct { // Always use a predicate to match all dimensions. var arrayTypeMatchPredicate = GetArrayTypeMatchPredicate(elementMapping.TypeMatchPredicate ?? (static type => type is null || type == typeof(TElement))); @@ -396,31 +412,29 @@ public void AddStructArrayType(TypeInfoMapping elementMapping, TypeInf var listTypeMatchPredicate = elementMapping.TypeMatchPredicate is not null ? GetListTypeMatchPredicate(elementMapping.TypeMatchPredicate) : null; var nullableListTypeMatchPredicate = nullableElementMapping.TypeMatchPredicate is not null ? GetListTypeMatchPredicate(nullableElementMapping.TypeMatchPredicate) : null; + var arrayDataTypeName = GetArrayDataTypeName(elementMapping.DataTypeName); + AddStructArrayType(elementMapping, nullableElementMapping, typeof(TElement[]), typeof(TElement?[]), CreateArrayBasedConverter, CreateArrayBasedConverter, - arrayTypeMatchPredicate, nullableArrayTypeMatchPredicate, - configure, suppressObjectMapping: TryFindMapping(typeof(object), elementMapping.DataTypeName, out _)); + arrayTypeMatchPredicate, nullableArrayTypeMatchPredicate, suppressObjectMapping: suppressObjectMapping || TryFindMapping(typeof(object), arrayDataTypeName, out _)); // Don't add the object converter for the list based converter. AddStructArrayType(elementMapping, nullableElementMapping, typeof(List), typeof(List), CreateListBasedConverter, CreateListBasedConverter, - listTypeMatchPredicate, nullableListTypeMatchPredicate, - configure, suppressObjectMapping: true); + listTypeMatchPredicate, nullableListTypeMatchPredicate, suppressObjectMapping: true); } // Lives outside to prevent capture of TElement. void AddStructArrayType(TypeInfoMapping elementMapping, TypeInfoMapping nullableElementMapping, Type type, Type nullableType, Func converter, Func nullableConverter, - Func? typeMatchPredicate, Func? nullableTypeMatchPredicate, Func? configure, bool suppressObjectMapping) + Func? typeMatchPredicate, Func? nullableTypeMatchPredicate, bool suppressObjectMapping) { var arrayDataTypeName = GetArrayDataTypeName(elementMapping.DataTypeName); - var arrayMapping = new TypeInfoMapping(type, arrayDataTypeName, CreateComposedFactory(type, elementMapping, converter)) { MatchRequirement = elementMapping.MatchRequirement, TypeMatchPredicate = typeMatchPredicate }; - arrayMapping = configure?.Invoke(arrayMapping) ?? arrayMapping; var nullableArrayMapping = new TypeInfoMapping(nullableType, arrayDataTypeName, CreateComposedFactory(nullableType, nullableElementMapping, nullableConverter)) { MatchRequirement = arrayMapping.MatchRequirement, @@ -483,15 +497,23 @@ void AddResolverStructType(Type type, Type nullableType, string dataTypeName, Ty { MatchRequirement = mapping.MatchRequirement, TypeMatchPredicate = mapping.TypeMatchPredicate is not null - ? type => type is null || (Nullable.GetUnderlyingType(type) is { } underlying && mapping.TypeMatchPredicate(underlying)) + ? type => type is null + ? mapping.TypeMatchPredicate(null) + : Nullable.GetUnderlyingType(type) is { } underlying && mapping.TypeMatchPredicate(underlying) : null }); } public void AddResolverStructArrayType(string elementDataTypeName) where TElement : struct - => AddResolverStructArrayType(FindMapping(typeof(TElement), elementDataTypeName), FindMapping(typeof(TElement?), elementDataTypeName)); + => AddResolverStructArrayType(FindMapping(typeof(TElement), elementDataTypeName), FindMapping(typeof(TElement?), elementDataTypeName), suppressObjectMapping: false); + + public void AddResolverStructArrayType(string elementDataTypeName, bool suppressObjectMapping) where TElement : struct + => AddResolverStructArrayType(FindMapping(typeof(TElement), elementDataTypeName), FindMapping(typeof(TElement?), elementDataTypeName), suppressObjectMapping); public void AddResolverStructArrayType(TypeInfoMapping elementMapping, TypeInfoMapping nullableElementMapping) where TElement : struct + => AddResolverStructArrayType(elementMapping, nullableElementMapping, suppressObjectMapping: false); + + public void AddResolverStructArrayType(TypeInfoMapping elementMapping, TypeInfoMapping nullableElementMapping, bool suppressObjectMapping) where TElement : struct { // Always use a predicate to match all dimensions. var arrayTypeMatchPredicate = GetArrayTypeMatchPredicate(elementMapping.TypeMatchPredicate ?? (static type => type is null || type == typeof(TElement))); @@ -499,9 +521,11 @@ public void AddResolverStructArrayType(TypeInfoMapping elementMapping, type is null || (Nullable.GetUnderlyingType(type) is { } underlying && underlying == typeof(TElement)))); var listTypeMatchPredicate = elementMapping.TypeMatchPredicate is not null ? GetListTypeMatchPredicate(elementMapping.TypeMatchPredicate) : null; + var arrayDataTypeName = GetArrayDataTypeName(elementMapping.DataTypeName); + AddResolverStructArrayType(elementMapping, nullableElementMapping, typeof(TElement[]), typeof(TElement?[]), CreateArrayBasedConverterResolver, - CreateArrayBasedConverterResolver, suppressObjectMapping: TryFindMapping(typeof(object), elementMapping.DataTypeName, out _), arrayTypeMatchPredicate, nullableArrayTypeMatchPredicate); + CreateArrayBasedConverterResolver, suppressObjectMapping: suppressObjectMapping || TryFindMapping(typeof(object), arrayDataTypeName, out _), arrayTypeMatchPredicate, nullableArrayTypeMatchPredicate); // Don't add the object converter for the list based converter. AddResolverStructArrayType(elementMapping, nullableElementMapping, typeof(List), typeof(List), diff --git a/src/Npgsql/NpgsqlConnection.cs b/src/Npgsql/NpgsqlConnection.cs index 32523a7f5a..b511f93841 100644 --- a/src/Npgsql/NpgsqlConnection.cs +++ b/src/Npgsql/NpgsqlConnection.cs @@ -128,8 +128,6 @@ public NpgsqlConnection() /// /// The connection used to open the PostgreSQL database. - [RequiresUnreferencedCode("ConnectionString based NpgsqlConnections use reflection to handle various PostgreSQL types like records, unmapped enums, etc. Use NpgsqlSlimDataSourceBuilder to start with a reduced - reflection free - set and opt into what your app specifically requires.")] - [RequiresDynamicCode("ConnectionString based NpgsqlConnections use reflection to handle various PostgreSQL types like records, unmapped enums, etc. This can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] public NpgsqlConnection(string? connectionString) : this() => ConnectionString = connectionString; @@ -170,8 +168,6 @@ internal static NpgsqlConnection FromDataSource(NpgsqlDataSource dataSource) /// A task representing the asynchronous operation. public override Task OpenAsync(CancellationToken cancellationToken) => Open(async: true, cancellationToken); - [RequiresUnreferencedCode("NpgsqlConnection uses reflection to handle various PostgreSQL types like records, unmapped enums etc. Use NpgsqlSlimDataSourceBuilder to start with a reduced - reflection free - set and opt into what your app specifically requires.")] - [RequiresDynamicCode("NpgsqlConnection uses reflection to handle various PostgreSQL types like records, unmapped enums. This can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] void SetupDataSource() { // Fast path: a pool already corresponds to this exact version of the connection string. @@ -379,11 +375,6 @@ async Task PerformMultiplexingStartupCheck(bool async, CancellationToken cancell public override string ConnectionString { get => _userFacingConnectionString; - - [RequiresUnreferencedCode("ConnectionString based NpgsqlConnections use reflection to handle various PostgreSQL types like records, unmapped enums, etc. Use NpgsqlSlimDataSourceBuilder to start with a reduced - reflection free - set and opt into what your app specifically requires.")] - [RequiresDynamicCode("ConnectionString based NpgsqlConnections use reflection to handle various PostgreSQL types like records, unmapped enums, etc. This can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] - [UnconditionalSuppressMessage("Trimming", "IL2046", Justification = "At the Npgsql level we cannot add RUC to DbConnection.")] - [UnconditionalSuppressMessage("Aot", "IL3051", Justification = "At the Npgsql level we cannot add RDC to DbConnection.")] set { CheckClosed(); @@ -1830,8 +1821,6 @@ object ICloneable.Clone() /// (password, SSL callbacks) while changing other connection parameters (e.g. /// database or pooling) /// - [RequiresUnreferencedCode("ConnectionString based NpgsqlConnections use reflection to handle various PostgreSQL types like records, unmapped enums, etc. Use NpgsqlSlimDataSourceBuilder to start with a reduced - reflection free - set and opt into what your app specifically requires.")] - [RequiresDynamicCode("ConnectionString based NpgsqlConnections use reflection to handle various PostgreSQL types like records, unmapped enums, etc. This can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] public NpgsqlConnection CloneWith(string connectionString) { CheckDisposed(); @@ -1880,14 +1869,7 @@ public override void ChangeDatabase(string dbName) /// /// DB provider factory. /// - protected override DbProviderFactory DbProviderFactory - { - [RequiresUnreferencedCode("NpgsqlDataSource uses reflection to handle various PostgreSQL types like records, unmapped enums etc. Use NpgsqlSlimDataSourceBuilder to start with a reduced - reflection free - set and opt into what your app specifically requires.")] - [RequiresDynamicCode("NpgsqlDataSource uses reflection to handle various PostgreSQL types like records, unmapped enums. This can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] - [UnconditionalSuppressMessage("Trimming", "IL2046", Justification = "At the Npgsql level we cannot add RUC to DbProviderFactory.")] - [UnconditionalSuppressMessage("Aot", "IL3051", Justification = "At the Npgsql level we cannot add RDC to DbProviderFactory.")] - get => NpgsqlFactory.Instance; - } + protected override DbProviderFactory DbProviderFactory => NpgsqlFactory.Instance; /// /// Clears the connection pool. All idle physical connections in the pool of the given connection are diff --git a/src/Npgsql/NpgsqlDataAdapter.cs b/src/Npgsql/NpgsqlDataAdapter.cs index f34b1aeec7..c18773b2d6 100644 --- a/src/Npgsql/NpgsqlDataAdapter.cs +++ b/src/Npgsql/NpgsqlDataAdapter.cs @@ -62,8 +62,6 @@ public NpgsqlDataAdapter(string selectCommandText, NpgsqlConnection selectConnec /// /// /// - [RequiresUnreferencedCode("ConnectionString based NpgsqlConnections use reflection to handle various PostgreSQL types like records, unmapped enums, etc. Use NpgsqlSlimDataSourceBuilder to start with a reduced - reflection free - set and opt into what your app specifically requires.")] - [RequiresDynamicCode("ConnectionString based NpgsqlConnections use reflection to handle various PostgreSQL types like records, unmapped enums, etc. This can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] public NpgsqlDataAdapter(string selectCommandText, string selectConnectionString) : this(selectCommandText, new NpgsqlConnection(selectConnectionString)) {} diff --git a/src/Npgsql/NpgsqlDataSource.cs b/src/Npgsql/NpgsqlDataSource.cs index 5a408a5020..18c0d05f32 100644 --- a/src/Npgsql/NpgsqlDataSource.cs +++ b/src/Npgsql/NpgsqlDataSource.cs @@ -207,16 +207,12 @@ protected override DbBatch CreateDbBatch() /// /// Creates a new for the given . /// - [RequiresUnreferencedCode("NpgsqlDataSource uses reflection to handle various PostgreSQL types like records, unmapped enums etc. Use NpgsqlSlimDataSourceBuilder to start with a reduced - reflection free - set and opt into what your app specifically requires.")] - [RequiresDynamicCode("NpgsqlDataSource uses reflection to handle various PostgreSQL types like records, unmapped enums. This can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] public static NpgsqlDataSource Create(string connectionString) => new NpgsqlDataSourceBuilder(connectionString).Build(); /// /// Creates a new for the given . /// - [RequiresUnreferencedCode("NpgsqlDataSource uses reflection to handle various PostgreSQL types like records, unmapped enums etc. Use NpgsqlSlimDataSourceBuilder to start with a reduced - reflection free - set and opt into what your app specifically requires.")] - [RequiresDynamicCode("NpgsqlDataSource uses reflection to handle various PostgreSQL types like records, unmapped enums. This can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] public static NpgsqlDataSource Create(NpgsqlConnectionStringBuilder connectionStringBuilder) => Create(connectionStringBuilder.ToString()); diff --git a/src/Npgsql/NpgsqlDataSourceBuilder.cs b/src/Npgsql/NpgsqlDataSourceBuilder.cs index a7c188e147..0d9d79ec58 100644 --- a/src/Npgsql/NpgsqlDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlDataSourceBuilder.cs @@ -4,6 +4,7 @@ using System.Net.Security; using System.Security.Cryptography.X509Certificates; using System.Text.Json; +using System.Text.Json.Nodes; using System.Threading; using System.Threading.Tasks; using Microsoft.Extensions.Logging; @@ -17,8 +18,6 @@ namespace Npgsql; /// /// Provides a simple API for configuring and creating an , from which database connections can be obtained. /// -[RequiresUnreferencedCode("NpgsqlDataSource uses reflection to handle various PostgreSQL types like records, unmapped enums, etc. Use NpgsqlSlimDataSourceBuilder to start with a reduced - reflection free - set and opt into what your app specifically requires.")] -[RequiresDynamicCode("NpgsqlDataSource uses reflection to handle various PostgreSQL types like records, unmapped enums, etc. This can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] public sealed class NpgsqlDataSourceBuilder : INpgsqlTypeMapper { static UnsupportedTypeInfoResolver UnsupportedTypeInfoResolver { get; } = new(); @@ -56,29 +55,20 @@ internal static void ResetGlobalMappings(bool overwrite) { overwrite ? new AdoTypeInfoResolver() : AdoTypeInfoResolver.Instance, new ExtraConversionsResolver(), - new SystemTextJsonTypeInfoResolver(), - new SystemTextJsonDynamicTypeInfoResolver(), + new JsonTypeInfoResolver(), new RangeTypeInfoResolver(), new RecordTypeInfoResolver(), - new TupledRecordTypeInfoResolver(), new FullTextSearchTypeInfoResolver(), new NetworkTypeInfoResolver(), new GeometricTypeInfoResolver(), new LTreeTypeInfoResolver(), - new UnmappedEnumTypeInfoResolver(), - new UnmappedRangeTypeInfoResolver(), - new UnmappedMultirangeTypeInfoResolver(), + // Arrays new AdoArrayTypeInfoResolver(), new ExtraConversionsArrayTypeInfoResolver(), - new SystemTextJsonArrayTypeInfoResolver(), - new SystemTextJsonDynamicArrayTypeInfoResolver(), + new JsonArrayTypeInfoResolver(), new RangeArrayTypeInfoResolver(), new RecordArrayTypeInfoResolver(), - new TupledRecordArrayTypeInfoResolver(), - new UnmappedEnumArrayTypeInfoResolver(), - new UnmappedRangeArrayTypeInfoResolver(), - new UnmappedMultirangeArrayTypeInfoResolver(), }, overwrite); static NpgsqlDataSourceBuilder() @@ -97,32 +87,25 @@ void AddDefaultFeatures() _internalBuilder.EnableTransportSecurity(); _internalBuilder.EnableIntegratedSecurity(); AddTypeInfoResolver(UnsupportedTypeInfoResolver); + // Reverse order arrays. - AddTypeInfoResolver(new UnmappedMultirangeArrayTypeInfoResolver()); - AddTypeInfoResolver(new UnmappedRangeArrayTypeInfoResolver()); - AddTypeInfoResolver(new UnmappedEnumArrayTypeInfoResolver()); - AddTypeInfoResolver(new TupledRecordArrayTypeInfoResolver()); AddTypeInfoResolver(new RecordArrayTypeInfoResolver()); AddTypeInfoResolver(new RangeArrayTypeInfoResolver()); - AddTypeInfoResolver(new SystemTextJsonDynamicArrayTypeInfoResolver()); - AddTypeInfoResolver(new SystemTextJsonArrayTypeInfoResolver()); + AddTypeInfoResolver(new JsonArrayTypeInfoResolver()); AddTypeInfoResolver(new ExtraConversionsArrayTypeInfoResolver()); AddTypeInfoResolver(new AdoArrayTypeInfoResolver()); + // Reverse order. - AddTypeInfoResolver(new UnmappedMultirangeTypeInfoResolver()); - AddTypeInfoResolver(new UnmappedRangeTypeInfoResolver()); - AddTypeInfoResolver(new UnmappedEnumTypeInfoResolver()); AddTypeInfoResolver(new LTreeTypeInfoResolver()); AddTypeInfoResolver(new GeometricTypeInfoResolver()); AddTypeInfoResolver(new NetworkTypeInfoResolver()); AddTypeInfoResolver(new FullTextSearchTypeInfoResolver()); - AddTypeInfoResolver(new TupledRecordTypeInfoResolver()); AddTypeInfoResolver(new RecordTypeInfoResolver()); AddTypeInfoResolver(new RangeTypeInfoResolver()); - AddTypeInfoResolver(new SystemTextJsonDynamicTypeInfoResolver()); - AddTypeInfoResolver(new SystemTextJsonTypeInfoResolver()); + AddTypeInfoResolver(new JsonTypeInfoResolver()); AddTypeInfoResolver(new ExtraConversionsResolver()); AddTypeInfoResolver(AdoTypeInfoResolver.Instance); + var plugins = new List(GlobalTypeMapper.Instance.GetPluginResolvers()); plugins.Reverse(); foreach (var plugin in plugins) @@ -290,28 +273,6 @@ public void AddTypeInfoResolver(IPgTypeInfoResolver resolver) void INpgsqlTypeMapper.Reset() => _internalBuilder.ResetTypeMappings(); - /// - /// Sets up System.Text.Json mappings for the PostgreSQL json and jsonb types. - /// - /// Options to customize JSON serialization and deserialization. - /// - /// A list of CLR types to map to PostgreSQL jsonb (no need to specify ). - /// - /// - /// A list of CLR types to map to PostgreSQL json (no need to specify ). - /// - [RequiresUnreferencedCode("Json serializer may perform reflection on trimmed types.")] - [RequiresDynamicCode("Serializing arbitary types to json can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] - public NpgsqlDataSourceBuilder UseSystemTextJson( - JsonSerializerOptions? serializerOptions = null, - Type[]? jsonbClrTypes = null, - Type[]? jsonClrTypes = null) - { - AddTypeInfoResolver(new SystemTextJsonDynamicArrayTypeInfoResolver(jsonbClrTypes, jsonClrTypes, serializerOptions)); - AddTypeInfoResolver(new SystemTextJsonDynamicTypeInfoResolver(jsonbClrTypes, jsonClrTypes, serializerOptions)); - return this; - } - /// public INpgsqlTypeMapper MapEnum<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields)] TEnum>(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) where TEnum : struct, Enum @@ -326,6 +287,7 @@ public NpgsqlDataSourceBuilder UseSystemTextJson( => _internalBuilder.UnmapEnum(pgName, nameTranslator); /// + [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types, requiring require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] public INpgsqlTypeMapper MapComposite<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] T>( string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) { @@ -334,6 +296,7 @@ public NpgsqlDataSourceBuilder UseSystemTextJson( } /// + [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types, requiring require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] public INpgsqlTypeMapper MapComposite([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) { @@ -342,11 +305,13 @@ public INpgsqlTypeMapper MapComposite([DynamicallyAccessedMembers(DynamicallyAcc } /// + [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types, requiring require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] public bool UnmapComposite<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] T>( string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) => _internalBuilder.UnmapComposite(pgName, nameTranslator); /// + [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types, requiring require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] public bool UnmapComposite([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) => _internalBuilder.UnmapComposite(clrType, pgName, nameTranslator); diff --git a/src/Npgsql/NpgsqlFactory.cs b/src/Npgsql/NpgsqlFactory.cs index 40d8b6de91..7d21a917a0 100644 --- a/src/Npgsql/NpgsqlFactory.cs +++ b/src/Npgsql/NpgsqlFactory.cs @@ -8,8 +8,6 @@ namespace Npgsql; /// A factory to create instances of various Npgsql objects. /// [Serializable] -[RequiresUnreferencedCode("NpgsqlDataSource uses reflection to handle various PostgreSQL types like records, unmapped enums etc. Use NpgsqlSlimDataSourceBuilder to start with a reduced - reflection free - set and opt into what your app specifically requires.")] -[RequiresDynamicCode("NpgsqlDataSource uses reflection to handle various PostgreSQL types like records, unmapped enums. This can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] public sealed class NpgsqlFactory : DbProviderFactory, IServiceProvider { /// diff --git a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs index 274d37c00a..929cd5a850 100644 --- a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs @@ -273,7 +273,7 @@ public NpgsqlSlimDataSourceBuilder UsePeriodicPasswordProvider( => _userTypeMapper.UnmapEnum(pgName, nameTranslator); /// - [RequiresDynamicCode("Serializing arbitary types can require creating new generic types or methods. This may not work when AOT compiling.")] + [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types, requiring require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] public INpgsqlTypeMapper MapComposite<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] T>( string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) { @@ -282,13 +282,13 @@ public NpgsqlSlimDataSourceBuilder UsePeriodicPasswordProvider( } /// - [RequiresDynamicCode("Serializing arbitary types can require creating new generic types or methods. This may not work when AOT compiling.")] + [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types, requiring require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] public bool UnmapComposite<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] T>( string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) => _userTypeMapper.UnmapComposite(typeof(T), pgName, nameTranslator); /// - [RequiresDynamicCode("Serializing arbitary types can require creating new generic types or methods. This may not work when AOT compiling.")] + [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types, requiring require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] public INpgsqlTypeMapper MapComposite([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) { @@ -297,7 +297,7 @@ public INpgsqlTypeMapper MapComposite([DynamicallyAccessedMembers(DynamicallyAcc } /// - [RequiresDynamicCode("Serializing arbitary types can require creating new generic types or methods. This may not work when AOT compiling.")] + [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types, requiring require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] public bool UnmapComposite([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) => _userTypeMapper.UnmapComposite(clrType, pgName, nameTranslator); @@ -367,29 +367,7 @@ public NpgsqlSlimDataSourceBuilder EnableMultiranges() } /// - /// Sets up System.Text.Json mappings for the PostgreSQL json and jsonb types. - /// - /// Options to customize JSON serialization and deserialization. - /// - /// A list of CLR types to map to PostgreSQL jsonb (no need to specify ). - /// - /// - /// A list of CLR types to map to PostgreSQL json (no need to specify ). - /// - /// The same builder instance so that multiple calls can be chained. - [RequiresUnreferencedCode("Json serializer may perform reflection on trimmed types.")] - [RequiresDynamicCode("Serializing arbitary types to json can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] - public NpgsqlSlimDataSourceBuilder UseSystemTextJson( - JsonSerializerOptions? serializerOptions = null, - Type[]? jsonbClrTypes = null, - Type[]? jsonClrTypes = null) - { - AddTypeInfoResolver(new SystemTextJsonDynamicTypeInfoResolver(jsonbClrTypes, jsonClrTypes, serializerOptions)); - return this; - } - - /// - /// Sets up mappings for the PostgreSQL record type. + /// Sets up mappings for the PostgreSQL record type as a .NET object[]. /// /// The same builder instance so that multiple calls can be chained. public NpgsqlSlimDataSourceBuilder EnableRecords() diff --git a/src/Npgsql/Properties/NpgsqlStrings.Designer.cs b/src/Npgsql/Properties/NpgsqlStrings.Designer.cs index 8a81077ffa..e9eb4810a4 100644 --- a/src/Npgsql/Properties/NpgsqlStrings.Designer.cs +++ b/src/Npgsql/Properties/NpgsqlStrings.Designer.cs @@ -194,5 +194,23 @@ internal static string TimestampNoDateTimeUtc { return ResourceManager.GetString("TimestampNoDateTimeUtc", resourceCulture); } } + + internal static string DynamicJsonNotEnabled { + get { + return ResourceManager.GetString("DynamicJsonNotEnabled", resourceCulture); + } + } + + internal static string UnmappedEnumsNotEnabled { + get { + return ResourceManager.GetString("UnmappedEnumsNotEnabled", resourceCulture); + } + } + + internal static string UnmappedRangesNotEnabled { + get { + return ResourceManager.GetString("UnmappedRangesNotEnabled", resourceCulture); + } + } } } diff --git a/src/Npgsql/Properties/NpgsqlStrings.resx b/src/Npgsql/Properties/NpgsqlStrings.resx index 4b2535fb6f..375b516576 100644 --- a/src/Npgsql/Properties/NpgsqlStrings.resx +++ b/src/Npgsql/Properties/NpgsqlStrings.resx @@ -67,7 +67,8 @@ ValidationRootCertificateCallback cannot be used in conjunction with UserCertificateValidationCallback; when registering a validation callback, perform whatever validation you require in that callback. - Records aren't enabled; please call {0} on {1} to enable records. + Could not read a PostgreSQL record. If you're attempting to read a record as a .NET tuple, call '{0}' on '{1}' or NpgsqlConnection.GlobalTypeMapper (see https://www.npgsql.org/doc/types/basic.html and the 8.0 release notes for more details). If you're reading a record as a .NET object array using NpgsqlSlimDataSourceBuilder, call '{2}'. + Full-text search isn't enabled; please call {0} on {1} to enable full-text search. @@ -93,4 +94,14 @@ Cannot write DateTime with Kind=UTC to PostgreSQL type '{0}', consider using '{1}'. Note that it's not possible to mix DateTimes with different Kinds in an array, range, or multirange. + + Type '{0}' required dynamic JSON serialization, which requires an explicit opt-in; call '{1}' on '{2}' or NpgsqlConnection.GlobalTypeMapper (see https://www.npgsql.org/doc/types/json.html and the 8.0 release notes for more details). Alternatively, if you meant to use Newtonsoft JSON.NET instead of System.Text.Json, call UseJsonNet() instead. + + + + Reading and writing unmapped enums requires an explicit opt-in; call '{0}' on '{1}' or NpgsqlConnection.GlobalTypeMapper (see https://www.npgsql.org/doc/types/enums_and_composites.html and the 8.0 release notes for more details). + + + Reading and writing unmapped ranges and multiranges requires an explicit opt-in; call '{0}' on '{1}' or NpgsqlConnection.GlobalTypeMapper (see https://www.npgsql.org/doc/types/ranges.html and the 8.0 release notes for more details). + diff --git a/src/Npgsql/PublicAPI.Unshipped.txt b/src/Npgsql/PublicAPI.Unshipped.txt index 190f7e3948..c3d776ae30 100644 --- a/src/Npgsql/PublicAPI.Unshipped.txt +++ b/src/Npgsql/PublicAPI.Unshipped.txt @@ -1,9 +1,10 @@ -#nullable enable +#nullable enable const Npgsql.PostgresErrorCodes.IdleSessionTimeout = "57P05" -> string! Npgsql.ChannelBinding Npgsql.ChannelBinding.Disable = 0 -> Npgsql.ChannelBinding Npgsql.ChannelBinding.Prefer = 1 -> Npgsql.ChannelBinding Npgsql.ChannelBinding.Require = 2 -> Npgsql.ChannelBinding +Npgsql.INpgsqlTypeMapperExtensions Npgsql.NpgsqlBatch.CreateBatchCommand() -> Npgsql.NpgsqlBatchCommand! Npgsql.NpgsqlConnectionStringBuilder.ChannelBinding.get -> Npgsql.ChannelBinding Npgsql.NpgsqlConnectionStringBuilder.ChannelBinding.set -> void @@ -14,7 +15,6 @@ Npgsql.NpgsqlDataSourceBuilder.Name.get -> string? Npgsql.NpgsqlDataSourceBuilder.Name.set -> void Npgsql.NpgsqlDataSourceBuilder.UseRootCertificate(System.Security.Cryptography.X509Certificates.X509Certificate2? rootCertificate) -> Npgsql.NpgsqlDataSourceBuilder! Npgsql.NpgsqlDataSourceBuilder.UseRootCertificateCallback(System.Func? rootCertificateCallback) -> Npgsql.NpgsqlDataSourceBuilder! -Npgsql.NpgsqlDataSourceBuilder.UseSystemTextJson(System.Text.Json.JsonSerializerOptions? serializerOptions = null, System.Type![]? jsonbClrTypes = null, System.Type![]? jsonClrTypes = null) -> Npgsql.NpgsqlDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder Npgsql.NpgsqlSlimDataSourceBuilder.AddTypeInfoResolver(Npgsql.Internal.IPgTypeInfoResolver! resolver) -> void Npgsql.NpgsqlSlimDataSourceBuilder.Build() -> Npgsql.NpgsqlDataSource! @@ -50,7 +50,6 @@ Npgsql.NpgsqlSlimDataSourceBuilder.UsePeriodicPasswordProvider(System.Func? connectionInitializer, System.Func? connectionInitializerAsync) -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.UseRootCertificate(System.Security.Cryptography.X509Certificates.X509Certificate2? rootCertificate) -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.UseRootCertificateCallback(System.Func? rootCertificateCallback) -> Npgsql.NpgsqlSlimDataSourceBuilder! -Npgsql.NpgsqlSlimDataSourceBuilder.UseSystemTextJson(System.Text.Json.JsonSerializerOptions? serializerOptions = null, System.Type![]? jsonbClrTypes = null, System.Type![]? jsonClrTypes = null) -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.UseUserCertificateValidationCallback(System.Net.Security.RemoteCertificateValidationCallback! userCertificateValidationCallback) -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.PostgresTypes.PostgresArrayType.PostgresArrayType(string! ns, string! name, uint oid, Npgsql.PostgresTypes.PostgresType! elementPostgresType) -> void Npgsql.PostgresTypes.PostgresBaseType.PostgresBaseType(string! ns, string! name, uint oid) -> void @@ -112,6 +111,9 @@ override NpgsqlTypes.NpgsqlCidr.ToString() -> string! *REMOVED*static NpgsqlTypes.NpgsqlInet.ToIPAddress(NpgsqlTypes.NpgsqlInet inet) -> System.Net.IPAddress! *REMOVED*static NpgsqlTypes.NpgsqlInet.ToNpgsqlInet(System.Net.IPAddress? ip) -> NpgsqlTypes.NpgsqlInet *REMOVED*Npgsql.NpgsqlDataSourceBuilder.AddTypeResolverFactory(Npgsql.Internal.TypeHandling.TypeHandlerResolverFactory! resolverFactory) -> void +static Npgsql.INpgsqlTypeMapperExtensions.EnableDynamicJsonMappings(this T mapper, System.Text.Json.JsonSerializerOptions? serializerOptions = null, System.Type![]? jsonbClrTypes = null, System.Type![]? jsonClrTypes = null) -> T +static Npgsql.INpgsqlTypeMapperExtensions.EnableRecordsAsTuples(this T mapper) -> T +static Npgsql.INpgsqlTypeMapperExtensions.EnableUnmappedTypes(this T mapper) -> T static NpgsqlTypes.NpgsqlInet.explicit operator NpgsqlTypes.NpgsqlInet(System.Net.IPAddress! ip) -> NpgsqlTypes.NpgsqlInet *REMOVED*Npgsql.NpgsqlParameter.ConvertedValue.get -> object? *REMOVED*Npgsql.NpgsqlParameter.ConvertedValue.set -> void diff --git a/src/Npgsql/TypeMapping/GlobalTypeMapper.cs b/src/Npgsql/TypeMapping/GlobalTypeMapper.cs index 1f2b52aaa8..5aba125808 100644 --- a/src/Npgsql/TypeMapping/GlobalTypeMapper.cs +++ b/src/Npgsql/TypeMapping/GlobalTypeMapper.cs @@ -2,9 +2,13 @@ using System.Collections.Generic; using System.Diagnostics.CodeAnalysis; using System.Linq; +using System.Text.Json; +using System.Text.Json.Nodes; using System.Threading; using Npgsql.Internal; using Npgsql.Internal.Postgres; +using Npgsql.Internal.Resolvers; +using NpgsqlTypes; namespace Npgsql.TypeMapping; @@ -216,17 +220,17 @@ public INpgsqlNameTranslator DefaultNameTranslator } /// - [RequiresDynamicCode("Serializing arbitary types can require creating new generic types or methods. This may not work when AOT compiling.")] + [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types, requiring require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] public INpgsqlTypeMapper MapComposite<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] T>(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) => MapComposite(typeof(T), pgName, nameTranslator); /// - [RequiresDynamicCode("Serializing arbitary types can require creating new generic types or methods. This may not work when AOT compiling.")] + [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types, requiring require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] public bool UnmapComposite<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] T>(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) => UnmapComposite(typeof(T), pgName, nameTranslator); /// - [RequiresDynamicCode("Serializing arbitary types can require creating new generic types or methods. This may not work when AOT compiling.")] + [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types, requiring require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] public INpgsqlTypeMapper MapComposite([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) { @@ -244,7 +248,7 @@ public INpgsqlTypeMapper MapComposite([DynamicallyAccessedMembers(DynamicallyAcc } /// - [RequiresDynamicCode("Serializing arbitary types can require creating new generic types or methods. This may not work when AOT compiling.")] + [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types, requiring require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] public bool UnmapComposite([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) { diff --git a/src/Npgsql/TypeMapping/INpgsqlTypeMapper.cs b/src/Npgsql/TypeMapping/INpgsqlTypeMapper.cs index 210942b8f8..fc77ee7316 100644 --- a/src/Npgsql/TypeMapping/INpgsqlTypeMapper.cs +++ b/src/Npgsql/TypeMapping/INpgsqlTypeMapper.cs @@ -1,5 +1,7 @@ using System; using System.Diagnostics.CodeAnalysis; +using System.Text.Json; +using System.Text.Json.Nodes; using Npgsql.Internal; using Npgsql.NameTranslation; using NpgsqlTypes; @@ -82,7 +84,7 @@ public interface INpgsqlTypeMapper /// Defaults to . /// /// The .NET type to be mapped - [RequiresDynamicCode("Serializing arbitary types can require creating new generic types or methods. This may not work when AOT compiling.")] + [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types, requiring require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] INpgsqlTypeMapper MapComposite<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] T>( string? pgName = null, INpgsqlNameTranslator? nameTranslator = null); @@ -98,7 +100,7 @@ public interface INpgsqlTypeMapper /// A component which will be used to translate CLR names (e.g. SomeClass) into database names (e.g. some_class). /// Defaults to /// - [RequiresDynamicCode("Serializing arbitary types can require creating new generic types or methods. This may not work when AOT compiling.")] + [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types, requiring require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] bool UnmapComposite<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] T>( string? pgName = null, INpgsqlNameTranslator? nameTranslator = null); @@ -122,7 +124,7 @@ public interface INpgsqlTypeMapper /// A component which will be used to translate CLR names (e.g. SomeClass) into database names (e.g. some_class). /// Defaults to . /// - [RequiresDynamicCode("Serializing arbitary types can require creating new generic types or methods. This may not work when AOT compiling.")] + [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types, requiring require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] INpgsqlTypeMapper MapComposite( [DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] Type clrType, string? pgName = null, @@ -140,7 +142,7 @@ INpgsqlTypeMapper MapComposite( /// A component which will be used to translate CLR names (e.g. SomeClass) into database names (e.g. some_class). /// Defaults to . /// - [RequiresDynamicCode("Serializing arbitary types can require creating new generic types or methods. This may not work when AOT compiling.")] + [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types, requiring require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] bool UnmapComposite( [DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] Type clrType, string? pgName = null, diff --git a/src/Npgsql/TypeMapping/INpgsqlTypeMapperExtensions.cs b/src/Npgsql/TypeMapping/INpgsqlTypeMapperExtensions.cs new file mode 100644 index 0000000000..91758822b8 --- /dev/null +++ b/src/Npgsql/TypeMapping/INpgsqlTypeMapperExtensions.cs @@ -0,0 +1,77 @@ +using System; +using System.Diagnostics.CodeAnalysis; +using System.Text.Json; +using System.Text.Json.Nodes; +using Npgsql.Internal.Resolvers; +using Npgsql.TypeMapping; +using NpgsqlTypes; + +// ReSharper disable once CheckNamespace +namespace Npgsql; + +/// +/// Extension methods over . +/// +public static class INpgsqlTypeMapperExtensions +{ + /// + /// Sets up dynamic System.Text.Json mappings. This allows mapping arbitrary .NET types to PostgreSQL json and jsonb + /// types, as well as and its derived types. + /// + /// The type mapper. + /// Options to customize JSON serialization and deserialization. + /// + /// A list of CLR types to map to PostgreSQL jsonb (no need to specify ). + /// + /// + /// A list of CLR types to map to PostgreSQL json (no need to specify ). + /// + /// + /// Due to the dynamic nature of these mappings, they are not compatible with NativeAOT or trimming. + /// + [RequiresUnreferencedCode("Json serializer may perform reflection on trimmed types.")] + [RequiresDynamicCode("Serializing arbitrary types to json can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] + public static T EnableDynamicJsonMappings( + this T mapper, + JsonSerializerOptions? serializerOptions = null, + Type[]? jsonbClrTypes = null, + Type[]? jsonClrTypes = null) + where T : INpgsqlTypeMapper + { + mapper.AddTypeInfoResolver(new JsonDynamicTypeInfoResolver(jsonbClrTypes, jsonClrTypes, serializerOptions)); + mapper.AddTypeInfoResolver(new JsonDynamicArrayTypeInfoResolver(jsonbClrTypes, jsonClrTypes, serializerOptions)); + return mapper; + } + + /// + /// Sets up mappings for the PostgreSQL record type as a .NET or . + /// + /// The same builder instance so that multiple calls can be chained. + [RequiresUnreferencedCode("The mapping of PostgreSQL records as .NET tuples requires reflection usage which is incompatible with trimming.")] + [RequiresDynamicCode("The mapping of PostgreSQL records as .NET tuples requires dynamic code usage which is incompatible with NativeAOT.")] + public static T EnableRecordsAsTuples(this T mapper) where T : INpgsqlTypeMapper + { + mapper.AddTypeInfoResolver(new TupledRecordTypeInfoResolver()); + mapper.AddTypeInfoResolver(new TupledRecordArrayTypeInfoResolver()); + return mapper; + } + + /// + /// Sets up mappings allowing the use of unmapped enum, range and multirange types. + /// + /// The same builder instance so that multiple calls can be chained. + [RequiresUnreferencedCode("The use of unmapped enums, ranges or multiranges requires reflection usage which is incompatible with trimming.")] + [RequiresDynamicCode("The use of unmapped enums, ranges or multiranges requires dynamic code usage which is incompatible with NativeAOT.")] + public static T EnableUnmappedTypes(this T mapper) where T : INpgsqlTypeMapper + { + mapper.AddTypeInfoResolver(new UnmappedEnumTypeInfoResolver()); + mapper.AddTypeInfoResolver(new UnmappedRangeTypeInfoResolver()); + mapper.AddTypeInfoResolver(new UnmappedMultirangeTypeInfoResolver()); + + mapper.AddTypeInfoResolver(new UnmappedEnumArrayTypeInfoResolver()); + mapper.AddTypeInfoResolver(new UnmappedRangeArrayTypeInfoResolver()); + mapper.AddTypeInfoResolver(new UnmappedMultirangeArrayTypeInfoResolver()); + + return mapper; + } +} diff --git a/src/Npgsql/TypeMapping/UserTypeMapper.cs b/src/Npgsql/TypeMapping/UserTypeMapper.cs index 71e460305d..5ca68d0c37 100644 --- a/src/Npgsql/TypeMapping/UserTypeMapper.cs +++ b/src/Npgsql/TypeMapping/UserTypeMapper.cs @@ -57,7 +57,7 @@ sealed class UserTypeMapper where TEnum : struct, Enum => Unmap(typeof(TEnum), out _, pgName, nameTranslator ?? DefaultNameTranslator); - [RequiresDynamicCode("Serializing arbitary types can require creating new generic types or methods. This may not work when AOT compiling.")] + [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types, requiring require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] public UserTypeMapper MapComposite<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicFields | DynamicallyAccessedMemberTypes.PublicProperties)] T>( string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) where T : class { @@ -66,7 +66,7 @@ sealed class UserTypeMapper return this; } - [RequiresDynamicCode("Serializing arbitary types can require creating new generic types or methods. This may not work when AOT compiling.")] + [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types, requiring require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] public UserTypeMapper MapStructComposite<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicFields | DynamicallyAccessedMemberTypes.PublicProperties)] T>( string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) where T : struct { @@ -76,7 +76,7 @@ sealed class UserTypeMapper } [UnconditionalSuppressMessage("Trimming", "IL2111", Justification = "MapStructComposite and MapComposite have identical DAM annotations to clrType.")] - [RequiresDynamicCode("MapComposite switches between MapStructComposite and MapComposite at runtime based on clr type. This can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] + [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types, requiring require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] public UserTypeMapper MapComposite([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) { @@ -140,7 +140,7 @@ sealed class UserMappingResolver : IPgTypeInfoResolver => _mappings.Find(type, dataTypeName, options); } - [RequiresDynamicCode("Serializing arbitary types can require creating new generic types or methods. This may not work when AOT compiling.")] + [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types, requiring require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] sealed class CompositeMapping<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicFields | DynamicallyAccessedMemberTypes.PublicProperties)] T> : UserTypeMapping where T : class { readonly INpgsqlNameTranslator _nameTranslator; @@ -165,7 +165,7 @@ internal override void Build(TypeInfoMappingCollection mappings) } } - [RequiresDynamicCode("Serializing arbitary types can require creating new generic types or methods. This may not work when AOT compiling.")] + [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types, requiring require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] sealed class StructCompositeMapping<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicFields | DynamicallyAccessedMemberTypes.PublicProperties)] T> : UserTypeMapping where T : struct { readonly INpgsqlNameTranslator _nameTranslator; diff --git a/test/Npgsql.Tests/Support/TestBase.cs b/test/Npgsql.Tests/Support/TestBase.cs index 24daf573cf..0b1a7210b9 100644 --- a/test/Npgsql.Tests/Support/TestBase.cs +++ b/test/Npgsql.Tests/Support/TestBase.cs @@ -366,7 +366,7 @@ public async Task AssertTypeUnsupported(T value, string sqlLiteral, string pg await AssertTypeUnsupportedWrite(value, pgTypeName, dataSource); } - public async Task AssertTypeUnsupportedRead(string sqlLiteral, string pgTypeName, NpgsqlDataSource? dataSource = null) + public async Task AssertTypeUnsupportedRead(string sqlLiteral, string pgTypeName, NpgsqlDataSource? dataSource = null) { dataSource ??= DefaultDataSource; @@ -377,7 +377,7 @@ public async Task AssertTypeUnsupportedRead(string sqlLiteral, string pgTypeName await using var reader = await cmd.ExecuteReaderAsync(); await reader.ReadAsync(); - Assert.That(() => reader.GetValue(0), Throws.Exception.TypeOf()); + return Assert.Throws(() => reader.GetValue(0))!; } public Task AssertTypeUnsupportedRead(string sqlLiteral, string pgTypeName, NpgsqlDataSource? dataSource = null) @@ -389,6 +389,8 @@ public async Task AssertTypeUnsupportedRead(string sq dataSource ??= DataSource; await using var conn = await dataSource.OpenConnectionAsync(); + // Make sure we don't poison the connection with a fault, potentially terminating other perfectly passing tests as well. + await using var tx = dataSource.Settings.Multiplexing ? await conn.BeginTransactionAsync() : null; await using var cmd = new NpgsqlCommand($"SELECT '{sqlLiteral}'::{pgTypeName}", conn); await using var reader = await cmd.ExecuteReaderAsync(); await reader.ReadAsync(); diff --git a/test/Npgsql.Tests/Types/EnumTests.cs b/test/Npgsql.Tests/Types/EnumTests.cs index bc8a4992d5..bdc05fc512 100644 --- a/test/Npgsql.Tests/Types/EnumTests.cs +++ b/test/Npgsql.Tests/Types/EnumTests.cs @@ -1,7 +1,9 @@ -using System.Collections.Generic; +using System; +using System.Collections.Generic; using System.Threading.Tasks; using Npgsql.NameTranslation; using Npgsql.PostgresTypes; +using Npgsql.Properties; using NpgsqlTypes; using NUnit.Framework; using static Npgsql.Tests.TestUtil; @@ -94,7 +96,8 @@ public async Task Name_translation_null() [Test] public async Task Unmapped_enum_as_clr_enum() { - await using var connection = await OpenConnectionAsync(); + await using var dataSource = CreateDataSource(b => b.EnableUnmappedTypes()); + await using var connection = await dataSource.OpenConnectionAsync(); var type1 = await GetTempTypeName(connection); var type2 = await GetTempTypeName(connection); await connection.ExecuteNonQueryAsync(@$" @@ -106,6 +109,28 @@ await connection.ExecuteNonQueryAsync(@$" await AssertType(connection, AnotherEnum.Value2, "value2", type2, npgsqlDbType: null, isDefault: false); } + [Test] + public async Task Unmapped_enum_as_clr_enum_supported_only_with_EnableUnmappedTypes() + { + await using var connection = await DataSource.OpenConnectionAsync(); + var enumType = await GetTempTypeName(connection); + await connection.ExecuteNonQueryAsync($"CREATE TYPE {enumType} AS ENUM ('sad', 'ok', 'happy')"); + await connection.ReloadTypesAsync(); + + var errorMessage = string.Format( + NpgsqlStrings.UnmappedEnumsNotEnabled, + nameof(INpgsqlTypeMapperExtensions.EnableUnmappedTypes), + nameof(NpgsqlDataSourceBuilder)); + + var exception = await AssertTypeUnsupportedWrite(Mood.Happy, enumType); + Assert.IsInstanceOf(exception.InnerException); + Assert.That(exception.InnerException!.Message, Is.EqualTo(errorMessage)); + + exception = await AssertTypeUnsupportedRead("happy", enumType); + Assert.IsInstanceOf(exception.InnerException); + Assert.That(exception.InnerException!.Message, Is.EqualTo(errorMessage)); + } + [Test] public async Task Unmapped_enum_as_string() { diff --git a/test/Npgsql.Tests/Types/JsonDynamicTests.cs b/test/Npgsql.Tests/Types/JsonDynamicTests.cs new file mode 100644 index 0000000000..774f74c1e3 --- /dev/null +++ b/test/Npgsql.Tests/Types/JsonDynamicTests.cs @@ -0,0 +1,380 @@ +using System; +using System.Text.Json; +using System.Text.Json.Nodes; +using System.Text.Json.Serialization; +using System.Threading.Tasks; +using Npgsql.Properties; +using NpgsqlTypes; +using NUnit.Framework; + +namespace Npgsql.Tests.Types; + +[TestFixture(MultiplexingMode.NonMultiplexing, NpgsqlDbType.Json)] +[TestFixture(MultiplexingMode.NonMultiplexing, NpgsqlDbType.Jsonb)] +[TestFixture(MultiplexingMode.Multiplexing, NpgsqlDbType.Json)] +[TestFixture(MultiplexingMode.Multiplexing, NpgsqlDbType.Jsonb)] +public class JsonDynamicTests : MultiplexingTestBase +{ +#if NET6_0_OR_GREATER + [Test] + public Task Roundtrip_JsonObject() + => AssertType( + new JsonObject { ["Bar"] = 8 }, + IsJsonb ? """{"Bar": 8}""" : """{"Bar":8}""", + PostgresType, + NpgsqlDbType, + // By default we map JsonObject to jsonb + isDefaultForWriting: IsJsonb, + isDefaultForReading: false, + isNpgsqlDbTypeInferredFromClrType: false, + comparer: (x, y) => x.ToString() == y.ToString()); + + [Test] + public Task Roundtrip_JsonArray() + => AssertType( + new JsonArray { 1, 2, 3 }, + IsJsonb ? "[1, 2, 3]" : "[1,2,3]", + PostgresType, + NpgsqlDbType, + // By default we map JsonArray to jsonb + isDefaultForWriting: IsJsonb, + isDefaultForReading: false, + isNpgsqlDbTypeInferredFromClrType: false, + comparer: (x, y) => x.ToString() == y.ToString()); + + [Test] + [IssueLink("https://github.com/npgsql/npgsql/issues/4537")] + public async Task Write_jsonobject_array_without_npgsqldbtype() + { + // By default we map JsonObject to jsonb + if (!IsJsonb) + return; + + await using var conn = await OpenConnectionAsync(); + var tableName = await TestUtil.CreateTempTable(conn, "key SERIAL PRIMARY KEY, ingredients json[]"); + + await using var cmd = new NpgsqlCommand { Connection = conn }; + + var jsonObject1 = new JsonObject + { + { "name", "value1" }, + { "amount", 1 }, + { "unit", "ml" } + }; + + var jsonObject2 = new JsonObject + { + { "name", "value2" }, + { "amount", 2 }, + { "unit", "g" } + }; + + cmd.CommandText = $"INSERT INTO {tableName} (ingredients) VALUES (@p)"; + cmd.Parameters.Add(new("p", new[] { jsonObject1, jsonObject2 })); + await cmd.ExecuteNonQueryAsync(); + } +#endif + + [Test] + public async Task As_poco() + => await AssertType( + new WeatherForecast + { + Date = new DateTime(2019, 9, 1), + Summary = "Partly cloudy", + TemperatureC = 10 + }, + IsJsonb + ? """{"Date": "2019-09-01T00:00:00", "Summary": "Partly cloudy", "TemperatureC": 10}""" + : """{"Date":"2019-09-01T00:00:00","TemperatureC":10,"Summary":"Partly cloudy"}""", + PostgresType, + NpgsqlDbType, + isDefault: false); + + [Test] + public async Task As_poco_long() + { + using var conn = CreateConnection(); + var bigString = new string('x', Math.Max(conn.Settings.ReadBufferSize, conn.Settings.WriteBufferSize)); + + await AssertType( + new WeatherForecast + { + Date = new DateTime(2019, 9, 1), + Summary = bigString, + TemperatureC = 10 + }, + // Warning: in theory jsonb order and whitespace may change across versions + IsJsonb + ? $$"""{"Date": "2019-09-01T00:00:00", "Summary": "{{bigString}}", "TemperatureC": 10}""" + : $$"""{"Date":"2019-09-01T00:00:00","TemperatureC":10,"Summary":"{{bigString}}"}""", + PostgresType, + NpgsqlDbType, + isDefault: false); + } + + [Test] + public async Task As_poco_supported_only_with_EnableDynamicJsonMappings() + { + // This test uses base.DataSource, which doesn't have EnableDynamicJsonMappings() + + var errorMessage = string.Format( + NpgsqlStrings.DynamicJsonNotEnabled, + nameof(WeatherForecast), + nameof(INpgsqlTypeMapperExtensions.EnableDynamicJsonMappings), + nameof(NpgsqlDataSourceBuilder)); + + var exception = await AssertTypeUnsupportedWrite( + new WeatherForecast + { + Date = new DateTime(2019, 9, 1), + Summary = "Partly cloudy", + TemperatureC = 10 + }, + PostgresType, + base.DataSource); + + Assert.IsInstanceOf(exception.InnerException); + Assert.That(exception.InnerException!.Message, Is.EqualTo(errorMessage)); + + exception = await AssertTypeUnsupportedRead( + IsJsonb + ? """{"Date": "2019-09-01T00:00:00", "Summary": "Partly cloudy", "TemperatureC": 10}""" + : """{"Date":"2019-09-01T00:00:00","TemperatureC":10,"Summary":"Partly cloudy"}""", + PostgresType, + base.DataSource); + + Assert.IsInstanceOf(exception.InnerException); + Assert.That(exception.InnerException!.Message, Is.EqualTo(errorMessage)); + } + + [Test] + public async Task Poco_does_not_stomp_GetValue_string() + { + var dataSourceBuilder = CreateDataSourceBuilder(); + var dataSource = dataSourceBuilder.EnableDynamicJsonMappings(null, new[] {typeof(WeatherForecast)}, new[] {typeof(WeatherForecast)}).Build(); + var sqlLiteral = + IsJsonb + ? """{"Date": "2019-09-01T00:00:00", "Summary": "Partly cloudy", "TemperatureC": 10}""" + : """{"Date":"2019-09-01T00:00:00","TemperatureC":10,"Summary":"Partly cloudy"}"""; + await using var conn = await dataSource.OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand($"SELECT '{sqlLiteral}'::{(IsJsonb ? "jsonb" : "json")}", conn); + await using var reader = await cmd.ExecuteReaderAsync(); + await reader.ReadAsync(); + + Assert.That(reader.GetValue(0), Is.TypeOf()); + } + + [Test] + public async Task Custom_JsonSerializerOptions() + { + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.EnableDynamicJsonMappings(new JsonSerializerOptions { PropertyNamingPolicy = JsonNamingPolicy.CamelCase }); + await using var dataSource = dataSourceBuilder.Build(); + + await AssertTypeWrite( + dataSource, + new WeatherForecast + { + Date = new DateTime(2019, 9, 1), + Summary = "Partly cloudy", + TemperatureC = 10 + }, + IsJsonb + ? """{"date": "2019-09-01T00:00:00", "summary": "Partly cloudy", "temperatureC": 10}""" + : """{"date":"2019-09-01T00:00:00","temperatureC":10,"summary":"Partly cloudy"}""", + PostgresType, + NpgsqlDbType, + isDefault: false); + } + + [Test, Ignore("TODO We should not change the default type for json/jsonb, it makes little sense.")] + public async Task Poco_default_mapping() + { + var dataSourceBuilder = CreateDataSourceBuilder(); + if (IsJsonb) + dataSourceBuilder.EnableDynamicJsonMappings(jsonbClrTypes: new[] { typeof(WeatherForecast) }); + else + dataSourceBuilder.EnableDynamicJsonMappings(jsonClrTypes: new[] { typeof(WeatherForecast) }); + await using var dataSource = dataSourceBuilder.Build(); + + await AssertType( + dataSource, + new WeatherForecast + { + Date = new DateTime(2019, 9, 1), + Summary = "Partly cloudy", + TemperatureC = 10 + }, + IsJsonb + ? """{"Date": "2019-09-01T00:00:00", "Summary": "Partly cloudy", "TemperatureC": 10}""" + : """{"Date":"2019-09-01T00:00:00","TemperatureC":10,"Summary":"Partly cloudy"}""", + PostgresType, + NpgsqlDbType, + isDefaultForReading: false, + isNpgsqlDbTypeInferredFromClrType: false); + } + + [Test] + public async Task Poco_polymorphic_mapping() + { + // We don't yet support polymorphic deserialization for jsonb as $type does not come back as the first property. + // This could be fixed by detecting PolymorphicOptions types, always buffering their values and modifying the text. + if (IsJsonb) + return; + + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.EnableDynamicJsonMappings(jsonClrTypes: new[] { typeof(WeatherForecast) }); + await using var dataSource = dataSourceBuilder.Build(); + + await AssertType( + dataSource, + new ExtendedDerivedWeatherForecast() + { + Date = new DateTime(2019, 9, 1), + Summary = "Partly cloudy", + TemperatureC = 10 + }, + """{"$type":"extended","TemperatureF":49,"Date":"2019-09-01T00:00:00","TemperatureC":10,"Summary":"Partly cloudy"}""", + PostgresType, + NpgsqlDbType, + isDefaultForReading: false, + isNpgsqlDbTypeInferredFromClrType: false); + } + + [Test] + public async Task Poco_polymorphic_mapping_read_parents() + { + // We don't yet support polymorphic deserialization for jsonb as $type does not come back as the first property. + // This could be fixed by detecting PolymorphicOptions types, always buffering their values and modifying the text. + if (IsJsonb) + return; + + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.EnableDynamicJsonMappings(jsonClrTypes: new[] { typeof(WeatherForecast) }); + await using var dataSource = dataSourceBuilder.Build(); + + var value = new ExtendedDerivedWeatherForecast() + { + Date = new DateTime(2019, 9, 1), + Summary = "Partly cloudy", + TemperatureC = 10 + }; + + var sql = """{"$type":"extended","TemperatureF":49,"Date":"2019-09-01T00:00:00","TemperatureC":10,"Summary":"Partly cloudy"}"""; + + await AssertTypeWrite( + dataSource, + value, + sql, + PostgresType, + NpgsqlDbType, + isNpgsqlDbTypeInferredFromClrType: false); + + // GetFieldValue + await AssertTypeRead(dataSource, sql, PostgresType, value, + comparer: (_, actual) => actual.GetType() == typeof(ExtendedDerivedWeatherForecast), + isDefault: false); + + await AssertTypeRead(dataSource, sql, PostgresType, value, + comparer: (_, actual) => actual.GetType() == typeof(DerivedWeatherForecast), isDefault: false); + + await AssertTypeRead(dataSource, sql, PostgresType, value, isDefault: false); + } + + + [Test] + public async Task Poco_exact_polymorphic_mapping() + { + // We don't yet support polymorphic deserialization for jsonb as $type does not come back as the first property. + // This could be fixed by detecting PolymorphicOptions types, always buffering their values and modifying the text. + if (IsJsonb) + return; + + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.EnableDynamicJsonMappings(jsonClrTypes: new[] { typeof(ExtendedDerivedWeatherForecast) }); + await using var dataSource = dataSourceBuilder.Build(); + + await AssertType( + dataSource, + new ExtendedDerivedWeatherForecast() + { + Date = new DateTime(2019, 9, 1), + Summary = "Partly cloudy", + TemperatureC = 10 + }, + """{"TemperatureF":49,"Date":"2019-09-01T00:00:00","TemperatureC":10,"Summary":"Partly cloudy"}""", + PostgresType, + NpgsqlDbType, + isDefaultForReading: false, + isNpgsqlDbTypeInferredFromClrType: false); + } + + [Test] + public async Task Poco_unspecified_polymorphic_mapping() + { + // We don't yet support polymorphic deserialization for jsonb as $type does not come back as the first property. + // This could be fixed by detecting PolymorphicOptions types, always buffering their values and modifying the text. + // In this case we don't have any statically mapped base type to check its PolymorphicOptions on. + // Detecting whether the type could be polymorphic would require us to duplicate STJ's nearest polymorphic ancestor search. + if (IsJsonb) + return; + + var value = new ExtendedDerivedWeatherForecast + { + Date = new DateTime(2019, 9, 1), + Summary = "Partly cloudy", + TemperatureC = 10 + }; + + var sql = """{"$type":"extended","TemperatureF":49,"Date":"2019-09-01T00:00:00","TemperatureC":10,"Summary":"Partly cloudy"}"""; + + await AssertType( + value, + sql, + PostgresType, + NpgsqlDbType, + isDefault: false); + + await AssertTypeRead(DataSource, sql, PostgresType, value, + comparer: (_, actual) => actual.GetType() == typeof(DerivedWeatherForecast), isDefault: false); + + await AssertTypeRead(DataSource, sql, PostgresType, value, + comparer: (_, actual) => actual.GetType() == typeof(ExtendedDerivedWeatherForecast), isDefault: false); + } + + [JsonDerivedType(typeof(ExtendedDerivedWeatherForecast), typeDiscriminator: "extended")] + record WeatherForecast + { + public DateTime Date { get; set; } + public int TemperatureC { get; set; } + public string Summary { get; set; } = ""; + } + + record DerivedWeatherForecast : WeatherForecast + { + } + + record ExtendedDerivedWeatherForecast : DerivedWeatherForecast + { + public int TemperatureF => 32 + (int)(TemperatureC / 0.5556); + } + + public JsonDynamicTests(MultiplexingMode multiplexingMode, NpgsqlDbType npgsqlDbType) + : base(multiplexingMode) + { + DataSource = CreateDataSource(b => b.EnableDynamicJsonMappings()); + + if (npgsqlDbType == NpgsqlDbType.Jsonb) + using (var conn = OpenConnection()) + TestUtil.MinimumPgVersion(conn, "9.4.0", "JSONB data type not yet introduced"); + + NpgsqlDbType = npgsqlDbType; + } + + protected override NpgsqlDataSource DataSource { get; } + + bool IsJsonb => NpgsqlDbType == NpgsqlDbType.Jsonb; + string PostgresType => IsJsonb ? "jsonb" : "json"; + readonly NpgsqlDbType NpgsqlDbType; +} diff --git a/test/Npgsql.Tests/Types/JsonTests.cs b/test/Npgsql.Tests/Types/JsonTests.cs index 3460a88a5c..96e1ae148c 100644 --- a/test/Npgsql.Tests/Types/JsonTests.cs +++ b/test/Npgsql.Tests/Types/JsonTests.cs @@ -84,106 +84,6 @@ await AssertTypeUnsupported( slimDataSource); } -#if NET6_0_OR_GREATER - [Test] - public Task Roundtrip_JsonObject() - => AssertType( - new JsonObject { ["Bar"] = 8 }, - IsJsonb ? """{"Bar": 8}""" : """{"Bar":8}""", - PostgresType, - NpgsqlDbType, - // By default we map JsonObject to jsonb - isDefaultForWriting: IsJsonb, - isDefaultForReading: false, - isNpgsqlDbTypeInferredFromClrType: false, - comparer: (x, y) => x.ToString() == y.ToString()); - - [Test] - public Task Roundtrip_JsonArray() - => AssertType( - new JsonArray { 1, 2, 3 }, - IsJsonb ? "[1, 2, 3]" : "[1,2,3]", - PostgresType, - NpgsqlDbType, - // By default we map JsonArray to jsonb - isDefaultForWriting: IsJsonb, - isDefaultForReading: false, - isNpgsqlDbTypeInferredFromClrType: false, - comparer: (x, y) => x.ToString() == y.ToString()); -#endif - - [Test] - public async Task As_poco() - => await AssertType( - new WeatherForecast - { - Date = new DateTime(2019, 9, 1), - Summary = "Partly cloudy", - TemperatureC = 10 - }, - IsJsonb - ? """{"Date": "2019-09-01T00:00:00", "Summary": "Partly cloudy", "TemperatureC": 10}""" - : """{"Date":"2019-09-01T00:00:00","TemperatureC":10,"Summary":"Partly cloudy"}""", - PostgresType, - NpgsqlDbType, - isDefault: false); - - [Test] - public async Task As_poco_long() - { - using var conn = CreateConnection(); - var bigString = new string('x', Math.Max(conn.Settings.ReadBufferSize, conn.Settings.WriteBufferSize)); - - await AssertType( - new WeatherForecast - { - Date = new DateTime(2019, 9, 1), - Summary = bigString, - TemperatureC = 10 - }, - // Warning: in theory jsonb order and whitespace may change across versions - IsJsonb - ? $$"""{"Date": "2019-09-01T00:00:00", "Summary": "{{bigString}}", "TemperatureC": 10}""" - : $$"""{"Date":"2019-09-01T00:00:00","TemperatureC":10,"Summary":"{{bigString}}"}""", - PostgresType, - NpgsqlDbType, - isDefault: false); - } - - [Test] - public async Task As_poco_supported_only_with_SystemTextJson() - { - await using var slimDataSource = new NpgsqlSlimDataSourceBuilder(ConnectionString).Build(); - - await AssertTypeUnsupported( - new WeatherForecast - { - Date = new DateTime(2019, 9, 1), - Summary = "Partly cloudy", - TemperatureC = 10 - }, - """{"Date": "2019-09-01T00:00:00", "Summary": "Partly cloudy", "TemperatureC": 10}""", - PostgresType, - slimDataSource); - } - - [Test] - public async Task Poco_does_not_stomp_GetValue_string() - { - var dataSourceBuilder = CreateDataSourceBuilder(); - var dataSource = dataSourceBuilder.UseSystemTextJson(null, new[] {typeof(WeatherForecast)}, new[] {typeof(WeatherForecast)}).Build(); - var sqlLiteral = - IsJsonb - ? """{"Date": "2019-09-01T00:00:00", "Summary": "Partly cloudy", "TemperatureC": 10}""" - : """{"Date":"2019-09-01T00:00:00","TemperatureC":10,"Summary":"Partly cloudy"}"""; - await using var conn = await dataSource.OpenConnectionAsync(); - await using var cmd = new NpgsqlCommand($"SELECT '{sqlLiteral}'::{(IsJsonb ? "jsonb" : "json")}", conn); - await using var reader = await cmd.ExecuteReaderAsync(); - await reader.ReadAsync(); - - Assert.That(reader.GetValue(0), Is.TypeOf()); - } - [Test] public Task Roundtrip_string() => AssertType( @@ -214,23 +114,6 @@ public Task Roundtrip_byte_array() isDefault: false, isNpgsqlDbTypeInferredFromClrType: false); - [JsonDerivedType(typeof(ExtendedDerivedWeatherForecast), typeDiscriminator: "extended")] - record WeatherForecast - { - public DateTime Date { get; set; } - public int TemperatureC { get; set; } - public string Summary { get; set; } = ""; - } - - record DerivedWeatherForecast : WeatherForecast - { - } - - record ExtendedDerivedWeatherForecast : DerivedWeatherForecast - { - public int TemperatureF => 32 + (int)(TemperatureC / 0.5556); - } - [Test] [IssueLink("https://github.com/npgsql/npgsql/issues/2811")] [IssueLink("https://github.com/npgsql/efcore.pg/issues/1177")] @@ -257,223 +140,13 @@ public async Task Can_read_two_json_documents() Assert.That(car.RootElement.GetProperty("key").GetString(), Is.EqualTo("foo")); } -#if NET6_0_OR_GREATER - [Test] - [IssueLink("https://github.com/npgsql/npgsql/issues/4537")] - public async Task Write_jsonobject_array_without_npgsqldbtype() - { - // By default we map JsonObject to jsonb - if (!IsJsonb) - return; - - await using var conn = await OpenConnectionAsync(); - var tableName = await TestUtil.CreateTempTable(conn, "key SERIAL PRIMARY KEY, ingredients json[]"); - - await using var cmd = new NpgsqlCommand { Connection = conn }; - - var jsonObject1 = new JsonObject - { - { "name", "value1" }, - { "amount", 1 }, - { "unit", "ml" } - }; - - var jsonObject2 = new JsonObject - { - { "name", "value2" }, - { "amount", 2 }, - { "unit", "g" } - }; - - cmd.CommandText = $"INSERT INTO {tableName} (ingredients) VALUES (@p)"; - cmd.Parameters.Add(new("p", new[] { jsonObject1, jsonObject2 })); - await cmd.ExecuteNonQueryAsync(); - } -#endif - - [Test] - public async Task Custom_JsonSerializerOptions() - { - var dataSourceBuilder = CreateDataSourceBuilder(); - dataSourceBuilder.UseSystemTextJson(new JsonSerializerOptions { PropertyNamingPolicy = JsonNamingPolicy.CamelCase }); - await using var dataSource = dataSourceBuilder.Build(); - - await AssertTypeWrite( - dataSource, - new WeatherForecast - { - Date = new DateTime(2019, 9, 1), - Summary = "Partly cloudy", - TemperatureC = 10 - }, - IsJsonb - ? """{"date": "2019-09-01T00:00:00", "summary": "Partly cloudy", "temperatureC": 10}""" - : """{"date":"2019-09-01T00:00:00","temperatureC":10,"summary":"Partly cloudy"}""", - PostgresType, - NpgsqlDbType, - isDefault: false); - } - - [Test, Ignore("TODO We should not change the default type for json/jsonb, it makes little sense.")] - public async Task Poco_default_mapping() - { - var dataSourceBuilder = CreateDataSourceBuilder(); - if (IsJsonb) - dataSourceBuilder.UseSystemTextJson(jsonbClrTypes: new[] { typeof(WeatherForecast) }); - else - dataSourceBuilder.UseSystemTextJson(jsonClrTypes: new[] { typeof(WeatherForecast) }); - await using var dataSource = dataSourceBuilder.Build(); - - await AssertType( - dataSource, - new WeatherForecast - { - Date = new DateTime(2019, 9, 1), - Summary = "Partly cloudy", - TemperatureC = 10 - }, - IsJsonb - ? """{"Date": "2019-09-01T00:00:00", "Summary": "Partly cloudy", "TemperatureC": 10}""" - : """{"Date":"2019-09-01T00:00:00","TemperatureC":10,"Summary":"Partly cloudy"}""", - PostgresType, - NpgsqlDbType, - isDefaultForReading: false, - isNpgsqlDbTypeInferredFromClrType: false); - } - - [Test] - public async Task Poco_polymorphic_mapping() - { - // We don't yet support polymorphic deserialization for jsonb as $type does not come back as the first property. - // This could be fixed by detecting PolymorphicOptions types, always buffering their values and modifying the text. - if (IsJsonb) - return; - - var dataSourceBuilder = CreateDataSourceBuilder(); - dataSourceBuilder.UseSystemTextJson(jsonClrTypes: new[] { typeof(WeatherForecast) }); - await using var dataSource = dataSourceBuilder.Build(); - - await AssertType( - dataSource, - new ExtendedDerivedWeatherForecast() - { - Date = new DateTime(2019, 9, 1), - Summary = "Partly cloudy", - TemperatureC = 10 - }, - """{"$type":"extended","TemperatureF":49,"Date":"2019-09-01T00:00:00","TemperatureC":10,"Summary":"Partly cloudy"}""", - PostgresType, - NpgsqlDbType, - isDefaultForReading: false, - isNpgsqlDbTypeInferredFromClrType: false); - } - - [Test] - public async Task Poco_polymorphic_mapping_read_parents() - { - // We don't yet support polymorphic deserialization for jsonb as $type does not come back as the first property. - // This could be fixed by detecting PolymorphicOptions types, always buffering their values and modifying the text. - if (IsJsonb) - return; - - var dataSourceBuilder = CreateDataSourceBuilder(); - dataSourceBuilder.UseSystemTextJson(jsonClrTypes: new[] { typeof(WeatherForecast) }); - await using var dataSource = dataSourceBuilder.Build(); - - var value = new ExtendedDerivedWeatherForecast() - { - Date = new DateTime(2019, 9, 1), - Summary = "Partly cloudy", - TemperatureC = 10 - }; - - var sql = """{"$type":"extended","TemperatureF":49,"Date":"2019-09-01T00:00:00","TemperatureC":10,"Summary":"Partly cloudy"}"""; - - await AssertTypeWrite( - dataSource, - value, - sql, - PostgresType, - NpgsqlDbType, - isNpgsqlDbTypeInferredFromClrType: false); - - // GetFieldValue - await AssertTypeRead(dataSource, sql, PostgresType, value, - comparer: (_, actual) => actual.GetType() == typeof(ExtendedDerivedWeatherForecast), - isDefault: false); - - await AssertTypeRead(dataSource, sql, PostgresType, value, - comparer: (_, actual) => actual.GetType() == typeof(DerivedWeatherForecast), isDefault: false); - - await AssertTypeRead(dataSource, sql, PostgresType, value, isDefault: false); - } - - - [Test] - public async Task Poco_exact_polymorphic_mapping() - { - // We don't yet support polymorphic deserialization for jsonb as $type does not come back as the first property. - // This could be fixed by detecting PolymorphicOptions types, always buffering their values and modifying the text. - if (IsJsonb) - return; - - var dataSourceBuilder = CreateDataSourceBuilder(); - dataSourceBuilder.UseSystemTextJson(jsonClrTypes: new[] { typeof(ExtendedDerivedWeatherForecast) }); - await using var dataSource = dataSourceBuilder.Build(); - - await AssertType( - dataSource, - new ExtendedDerivedWeatherForecast() - { - Date = new DateTime(2019, 9, 1), - Summary = "Partly cloudy", - TemperatureC = 10 - }, - """{"TemperatureF":49,"Date":"2019-09-01T00:00:00","TemperatureC":10,"Summary":"Partly cloudy"}""", - PostgresType, - NpgsqlDbType, - isDefaultForReading: false, - isNpgsqlDbTypeInferredFromClrType: false); - } - - [Test] - public async Task Poco_unspecified_polymorphic_mapping() - { - // We don't yet support polymorphic deserialization for jsonb as $type does not come back as the first property. - // This could be fixed by detecting PolymorphicOptions types, always buffering their values and modifying the text. - // In this case we don't have any statically mapped base type to check its PolymorphicOptions on. - // Detecting whether the type could be polymorphic would require us to duplicate STJ's nearest polymorphic ancestor search. - if (IsJsonb) - return; - - var value = new ExtendedDerivedWeatherForecast() - { - Date = new DateTime(2019, 9, 1), - Summary = "Partly cloudy", - TemperatureC = 10 - }; - - var sql = """{"$type":"extended","TemperatureF":49,"Date":"2019-09-01T00:00:00","TemperatureC":10,"Summary":"Partly cloudy"}"""; - - await AssertType( - value, - sql, - PostgresType, - NpgsqlDbType, - isDefault: false); - - await AssertTypeRead(DataSource, sql, PostgresType, value, - comparer: (_, actual) => actual.GetType() == typeof(DerivedWeatherForecast), isDefault: false); - - await AssertTypeRead(DataSource, sql, PostgresType, value, - comparer: (_, actual) => actual.GetType() == typeof(ExtendedDerivedWeatherForecast), isDefault: false); - } - public JsonTests(MultiplexingMode multiplexingMode, NpgsqlDbType npgsqlDbType) : base(multiplexingMode) { - using (var conn = OpenConnection()) - TestUtil.MinimumPgVersion(conn, "9.4.0", "JSONB data type not yet introduced"); + if (npgsqlDbType == NpgsqlDbType.Jsonb) + using (var conn = OpenConnection()) + TestUtil.MinimumPgVersion(conn, "9.4.0", "JSONB data type not yet introduced"); + NpgsqlDbType = npgsqlDbType; } diff --git a/test/Npgsql.Tests/Types/MultirangeTests.cs b/test/Npgsql.Tests/Types/MultirangeTests.cs index 84f815c63c..8d17bc5613 100644 --- a/test/Npgsql.Tests/Types/MultirangeTests.cs +++ b/test/Npgsql.Tests/Types/MultirangeTests.cs @@ -3,6 +3,7 @@ using System.Data; using System.Linq; using System.Threading.Tasks; +using Npgsql.Properties; using NpgsqlTypes; using NUnit.Framework; using static Npgsql.Tests.TestUtil; @@ -104,7 +105,7 @@ public Task Multirange_as_list( [NonParallelizable] public async Task Unmapped_multirange_with_mapped_subtype() { - await using var dataSource = CreateDataSource(csb => csb.MaxPoolSize = 1); + await using var dataSource = CreateDataSource(b => b.EnableUnmappedTypes().ConnectionStringBuilder.MaxPoolSize = 1); await using var conn = await dataSource.OpenConnectionAsync(); var typeName = await GetTempTypeName(conn); @@ -129,6 +130,38 @@ public async Task Unmapped_multirange_with_mapped_subtype() actual[0].LowerBound!.SequenceEqual(expected[0].LowerBound!) && actual[0].UpperBound!.SequenceEqual(expected[0].UpperBound!))); } + [Test] + public async Task Unmapped_multirange_supported_only_with_EnableUnmappedTypes() + { + await using var connection = await DataSource.OpenConnectionAsync(); + var rangeType = await GetTempTypeName(connection); + var multirangeTypeName = rangeType + "_multirange"; + await connection.ExecuteNonQueryAsync($"CREATE TYPE {rangeType} AS RANGE(subtype=text)"); + await Task.Yield(); // TODO: fix multiplexing deadlock bug + await connection.ReloadTypesAsync(); + + var errorMessage = string.Format( + NpgsqlStrings.UnmappedRangesNotEnabled, + nameof(INpgsqlTypeMapperExtensions.EnableUnmappedTypes), + nameof(NpgsqlDataSourceBuilder)); + + var exception = await AssertTypeUnsupportedWrite( + new NpgsqlRange[] + { + new("bar", "foo"), + new("moo", "zoo"), + }, + multirangeTypeName); + Assert.IsInstanceOf(exception.InnerException); + Assert.That(exception.InnerException!.Message, Is.EqualTo(errorMessage)); + + exception = await AssertTypeUnsupportedRead>( + """{["bar","foo"],["moo","zoo"]}""", + multirangeTypeName); + Assert.IsInstanceOf(exception.InnerException); + Assert.That(exception.InnerException!.Message, Is.EqualTo(errorMessage)); + } + protected override NpgsqlDataSource DataSource { get; } public MultirangeTests() => DataSource = CreateDataSource(builder => diff --git a/test/Npgsql.Tests/Types/RangeTests.cs b/test/Npgsql.Tests/Types/RangeTests.cs index b6b54ef2bf..ca7a04dc27 100644 --- a/test/Npgsql.Tests/Types/RangeTests.cs +++ b/test/Npgsql.Tests/Types/RangeTests.cs @@ -168,7 +168,7 @@ public async Task TimestampTz_range_with_DateTimeOffset() [NonParallelizable] public async Task Unmapped_range_with_mapped_subtype() { - await using var dataSource = CreateDataSource(csb => csb.MaxPoolSize = 1); + await using var dataSource = CreateDataSource(b => b.EnableUnmappedTypes().ConnectionStringBuilder.MaxPoolSize = 1); await using var conn = await dataSource.OpenConnectionAsync(); var typeName = await GetTempTypeName(conn); @@ -193,6 +193,29 @@ public async Task Unmapped_range_with_mapped_subtype() actual.LowerBound!.SequenceEqual(expected.LowerBound!) && actual.UpperBound!.SequenceEqual(expected.UpperBound!))); } + [Test] + public async Task Unmapped_range_supported_only_with_EnableUnmappedTypes() + { + await using var connection = await DataSource.OpenConnectionAsync(); + var rangeType = await GetTempTypeName(connection); + await connection.ExecuteNonQueryAsync($"CREATE TYPE {rangeType} AS RANGE(subtype=text)"); + await Task.Yield(); // TODO: fix multiplexing deadlock bug + await connection.ReloadTypesAsync(); + + var errorMessage = string.Format( + NpgsqlStrings.UnmappedRangesNotEnabled, + nameof(INpgsqlTypeMapperExtensions.EnableUnmappedTypes), + nameof(NpgsqlDataSourceBuilder)); + + var exception = await AssertTypeUnsupportedWrite(new NpgsqlRange("bar", "foo"), rangeType); + Assert.IsInstanceOf(exception.InnerException); + Assert.That(exception.InnerException!.Message, Is.EqualTo(errorMessage)); + + exception = await AssertTypeUnsupportedRead>("""["bar","foo"]""", rangeType); + Assert.IsInstanceOf(exception.InnerException); + Assert.That(exception.InnerException!.Message, Is.EqualTo(errorMessage)); + } + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/4441")] public async Task Array_of_range() { diff --git a/test/Npgsql.Tests/Types/RecordTests.cs b/test/Npgsql.Tests/Types/RecordTests.cs index f40f5f0965..95e95dc33e 100644 --- a/test/Npgsql.Tests/Types/RecordTests.cs +++ b/test/Npgsql.Tests/Types/RecordTests.cs @@ -33,8 +33,10 @@ public async Task Read_Record_as_object_array() [Test] public async Task Read_Record_as_ValueTuple() { + await using var dataSource = CreateDataSource(b => b.EnableRecordsAsTuples()); + await using var conn = await dataSource.OpenConnectionAsync(); + var recordLiteral = "(1,'foo'::text)::record"; - await using var conn = await OpenConnectionAsync(); await using var cmd = new NpgsqlCommand($"SELECT {recordLiteral}, ARRAY[{recordLiteral}, {recordLiteral}]", conn); await using var reader = await cmd.ExecuteReaderAsync(); reader.Read(); @@ -52,8 +54,10 @@ public async Task Read_Record_as_ValueTuple() [Test] public async Task Read_Record_as_Tuple() { + await using var dataSource = CreateDataSource(b => b.EnableRecordsAsTuples()); + await using var conn = await dataSource.OpenConnectionAsync(); + var recordLiteral = "(1,'foo'::text)::record"; - await using var conn = await OpenConnectionAsync(); await using var cmd = new NpgsqlCommand($"SELECT {recordLiteral}, ARRAY[{recordLiteral}, {recordLiteral}]", conn); await using var reader = await cmd.ExecuteReaderAsync(); reader.Read(); @@ -80,6 +84,25 @@ public async Task Record_with_non_int_field() Assert.That(record[1], Is.EqualTo(2)); } + [Test] + public async Task As_ValueTuple_supported_only_with_EnableRecordsAsTuples() + { + await using var connection = await DataSource.OpenConnectionAsync(); + await using var command = new NpgsqlCommand("SELECT (1, 'foo')::record", connection); + await using var reader = await command.ExecuteReaderAsync(); + await reader.ReadAsync(); + + var errorMessage = string.Format( + NpgsqlStrings.RecordsNotEnabled, + nameof(INpgsqlTypeMapperExtensions.EnableRecordsAsTuples), + nameof(NpgsqlDataSourceBuilder), + nameof(NpgsqlSlimDataSourceBuilder.EnableRecords)); + + var exception = Assert.Throws(() => reader.GetFieldValue<(int, string)>(0))!; + Assert.IsInstanceOf(exception.InnerException); + Assert.AreEqual(errorMessage, exception.InnerException!.Message); + } + [Test] public async Task Records_not_supported_by_default_on_NpgsqlSlimSourceBuilder() { @@ -95,8 +118,9 @@ public async Task Records_not_supported_by_default_on_NpgsqlSlimSourceBuilder() var errorMessage = string.Format( NpgsqlStrings.RecordsNotEnabled, - nameof(NpgsqlSlimDataSourceBuilder.EnableRecords), - nameof(NpgsqlSlimDataSourceBuilder)); + nameof(INpgsqlTypeMapperExtensions.EnableRecordsAsTuples), + nameof(NpgsqlSlimDataSourceBuilder), + nameof(NpgsqlSlimDataSourceBuilder.EnableRecords)); var exception = Assert.Throws(() => reader.GetValue(0))!; Assert.IsInstanceOf(exception.InnerException); From 82b01484129d1a9ddd304d7543db029c491e45ac Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Fri, 6 Oct 2023 14:55:19 +0200 Subject: [PATCH 240/761] Use consistent naming and properties for materialized view schema --- src/Npgsql/NpgsqlSchema.cs | 28 ++++++++++++++-------------- test/Npgsql.Tests/SchemaTests.cs | 2 +- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/src/Npgsql/NpgsqlSchema.cs b/src/Npgsql/NpgsqlSchema.cs index bf090058d7..f9688744ec 100644 --- a/src/Npgsql/NpgsqlSchema.cs +++ b/src/Npgsql/NpgsqlSchema.cs @@ -386,31 +386,31 @@ static Task GetMaterializedViews(NpgsqlConnection conn, string?[]? re Locale = CultureInfo.InvariantCulture, Columns = { - new DataColumn("schemaname"), - new DataColumn("matviewname"), - new DataColumn("matviewowner"), - new DataColumn("tablespace"), - new DataColumn("hasindexes", typeof(bool)), - new DataColumn("ispopulated", typeof(bool)) + new DataColumn("table_catalog"), + new DataColumn("table_schema"), + new DataColumn("table_name"), + new DataColumn("table_owner"), + new DataColumn("has_indexes", typeof(bool)), + new DataColumn("is_populated", typeof(bool)) } }; var sql = new StringBuilder(); - sql.Append(@"SELECT schemaname, matviewname, matviewowner, tablespace, hasindexes, ispopulated FROM pg_catalog.pg_matviews"); + sql.Append("""SELECT current_database(), schemaname, matviewname, matviewowner, hasindexes, ispopulated FROM pg_catalog.pg_matviews"""); return ParseResults( async, - BuildCommand(conn, sql, restrictions, "schemaname", "matviewname", "matviewowner", "tablespace"), + BuildCommand(conn, sql, restrictions, "current_database()", "schemaname", "matviewname", "matviewowner"), dataTable, (reader, row) => { - row["schemaname"] = GetFieldValueOrDBNull(reader, 0); - row["matviewname"] = GetFieldValueOrDBNull(reader, 1); - row["matviewowner"] = GetFieldValueOrDBNull(reader, 2); - row["tablespace"] = GetFieldValueOrDBNull(reader, 3); - row["hasindexes"] = GetFieldValueOrDBNull(reader, 4); - row["ispopulated"] = GetFieldValueOrDBNull(reader, 5); + row["table_catalog"] = GetFieldValueOrDBNull(reader, 0); + row["table_schema"] = GetFieldValueOrDBNull(reader, 1); + row["table_name"] = GetFieldValueOrDBNull(reader, 2); + row["table_owner"] = GetFieldValueOrDBNull(reader, 3); + row["has_indexes"] = GetFieldValueOrDBNull(reader, 4); + row["is_populated"] = GetFieldValueOrDBNull(reader, 5); }, cancellationToken); } diff --git a/test/Npgsql.Tests/SchemaTests.cs b/test/Npgsql.Tests/SchemaTests.cs index 53d27e7ef2..e65fc48cf2 100644 --- a/test/Npgsql.Tests/SchemaTests.cs +++ b/test/Npgsql.Tests/SchemaTests.cs @@ -367,7 +367,7 @@ public async Task GetSchema_materialized_views_with_restrictions() var dt = await GetSchema(conn, "MaterializedViews", new[] { null, viewName, null, null }); foreach (var row in dt.Rows.OfType()) - Assert.That(row["matviewname"], Is.EqualTo(viewName)); + Assert.That(row["table_name"], Is.EqualTo(viewName)); } [Test] From a8e051149a2cc3b3f1b69ab1da52012a4b9eeb83 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Emmanuel=20Andr=C3=A9?= <2341261+manandre@users.noreply.github.com> Date: Sun, 8 Oct 2023 15:25:51 +0200 Subject: [PATCH 241/761] Add AddSlimNpgsqlDataSource to Npgsql.DependencyInjection (#5316) --- .../NpgsqlServiceCollectionExtensions.cs | 159 +++++++++++++++++- .../DependencyInjectionTests.cs | 39 ++++- 2 files changed, 185 insertions(+), 13 deletions(-) diff --git a/src/Npgsql.DependencyInjection/NpgsqlServiceCollectionExtensions.cs b/src/Npgsql.DependencyInjection/NpgsqlServiceCollectionExtensions.cs index b1754b690b..61b5a3f015 100644 --- a/src/Npgsql.DependencyInjection/NpgsqlServiceCollectionExtensions.cs +++ b/src/Npgsql.DependencyInjection/NpgsqlServiceCollectionExtensions.cs @@ -1,6 +1,5 @@ -using System; +using System; using System.Data.Common; -using System.Diagnostics.CodeAnalysis; using Microsoft.Extensions.DependencyInjection.Extensions; using Microsoft.Extensions.Logging; using Npgsql; @@ -60,6 +59,53 @@ public static IServiceCollection AddNpgsqlDataSource( => AddNpgsqlDataSourceCore( serviceCollection, connectionString, dataSourceBuilderAction: null, connectionLifetime, dataSourceLifetime); + /// + /// Registers an and an in the . + /// + /// The to add services to. + /// An Npgsql connection string. + /// + /// An action to configure the for further customizations of the . + /// + /// + /// The lifetime with which to register the in the container. + /// Defaults to . + /// + /// + /// The lifetime with which to register the service in the container. + /// Defaults to . + /// + /// The same service collection so that multiple calls can be chained. + public static IServiceCollection AddNpgsqlSlimDataSource( + this IServiceCollection serviceCollection, + string connectionString, + Action dataSourceBuilderAction, + ServiceLifetime connectionLifetime = ServiceLifetime.Transient, + ServiceLifetime dataSourceLifetime = ServiceLifetime.Singleton) + => AddNpgsqlSlimDataSourceCore(serviceCollection, connectionString, dataSourceBuilderAction, connectionLifetime, dataSourceLifetime); + + /// + /// Registers an and an in the . + /// + /// The to add services to. + /// An Npgsql connection string. + /// + /// The lifetime with which to register the in the container. + /// Defaults to . + /// + /// + /// The lifetime with which to register the service in the container. + /// Defaults to . + /// + /// The same service collection so that multiple calls can be chained. + public static IServiceCollection AddNpgsqlSlimDataSource( + this IServiceCollection serviceCollection, + string connectionString, + ServiceLifetime connectionLifetime = ServiceLifetime.Transient, + ServiceLifetime dataSourceLifetime = ServiceLifetime.Singleton) + => AddNpgsqlSlimDataSourceCore( + serviceCollection, connectionString, dataSourceBuilderAction: null, connectionLifetime, dataSourceLifetime); + /// /// Registers an and an in the /// @@ -83,7 +129,7 @@ public static IServiceCollection AddMultiHostNpgsqlDataSource( Action dataSourceBuilderAction, ServiceLifetime connectionLifetime = ServiceLifetime.Transient, ServiceLifetime dataSourceLifetime = ServiceLifetime.Singleton) - => AddNpgsqlMultiHostDataSourceCore( + => AddMultiHostNpgsqlDataSourceCore( serviceCollection, connectionString, dataSourceBuilderAction, connectionLifetime, dataSourceLifetime); /// @@ -106,7 +152,56 @@ public static IServiceCollection AddMultiHostNpgsqlDataSource( string connectionString, ServiceLifetime connectionLifetime = ServiceLifetime.Transient, ServiceLifetime dataSourceLifetime = ServiceLifetime.Singleton) - => AddNpgsqlMultiHostDataSourceCore( + => AddMultiHostNpgsqlDataSourceCore( + serviceCollection, connectionString, dataSourceBuilderAction: null, connectionLifetime, dataSourceLifetime); + + /// + /// Registers an and an in the + /// + /// The to add services to. + /// An Npgsql connection string. + /// + /// An action to configure the for further customizations of the . + /// + /// + /// The lifetime with which to register the in the container. + /// Defaults to . + /// + /// + /// The lifetime with which to register the service in the container. + /// Defaults to . + /// + /// The same service collection so that multiple calls can be chained. + public static IServiceCollection AddMultiHostNpgsqlSlimDataSource( + this IServiceCollection serviceCollection, + string connectionString, + Action dataSourceBuilderAction, + ServiceLifetime connectionLifetime = ServiceLifetime.Transient, + ServiceLifetime dataSourceLifetime = ServiceLifetime.Singleton) + => AddMultiHostNpgsqlSlimDataSourceCore( + serviceCollection, connectionString, dataSourceBuilderAction, connectionLifetime, dataSourceLifetime); + + /// + /// Registers an and an in the + /// . + /// + /// The to add services to. + /// An Npgsql connection string. + /// + /// The lifetime with which to register the in the container. + /// Defaults to . + /// + /// + /// The lifetime with which to register the service in the container. + /// Defaults to . + /// + /// The same service collection so that multiple calls can be chained. + public static IServiceCollection AddMultiHostNpgsqlSlimDataSource( + this IServiceCollection serviceCollection, + string connectionString, + ServiceLifetime connectionLifetime = ServiceLifetime.Transient, + ServiceLifetime dataSourceLifetime = ServiceLifetime.Singleton) + => AddMultiHostNpgsqlSlimDataSourceCore( serviceCollection, connectionString, dataSourceBuilderAction: null, connectionLifetime, dataSourceLifetime); static IServiceCollection AddNpgsqlDataSourceCore( @@ -133,7 +228,31 @@ static IServiceCollection AddNpgsqlDataSourceCore( return serviceCollection; } - static IServiceCollection AddNpgsqlMultiHostDataSourceCore( + static IServiceCollection AddNpgsqlSlimDataSourceCore( + this IServiceCollection serviceCollection, + string connectionString, + Action? dataSourceBuilderAction, + ServiceLifetime connectionLifetime, + ServiceLifetime dataSourceLifetime) + { + serviceCollection.TryAdd( + new ServiceDescriptor( + typeof(NpgsqlDataSource), + sp => + { + var dataSourceBuilder = new NpgsqlSlimDataSourceBuilder(connectionString); + dataSourceBuilder.UseLoggerFactory(sp.GetService()); + dataSourceBuilderAction?.Invoke(dataSourceBuilder); + return dataSourceBuilder.Build(); + }, + dataSourceLifetime)); + + AddCommonServices(serviceCollection, connectionLifetime, dataSourceLifetime); + + return serviceCollection; + } + + static IServiceCollection AddMultiHostNpgsqlDataSourceCore( this IServiceCollection serviceCollection, string connectionString, Action? dataSourceBuilderAction, @@ -163,6 +282,36 @@ static IServiceCollection AddNpgsqlMultiHostDataSourceCore( return serviceCollection; } + static IServiceCollection AddMultiHostNpgsqlSlimDataSourceCore( + this IServiceCollection serviceCollection, + string connectionString, + Action? dataSourceBuilderAction, + ServiceLifetime connectionLifetime, + ServiceLifetime dataSourceLifetime) + { + serviceCollection.TryAdd( + new ServiceDescriptor( + typeof(NpgsqlMultiHostDataSource), + sp => + { + var dataSourceBuilder = new NpgsqlSlimDataSourceBuilder(connectionString); + dataSourceBuilder.UseLoggerFactory(sp.GetService()); + dataSourceBuilderAction?.Invoke(dataSourceBuilder); + return dataSourceBuilder.BuildMultiHost(); + }, + dataSourceLifetime)); + + serviceCollection.TryAdd( + new ServiceDescriptor( + typeof(NpgsqlDataSource), + sp => sp.GetRequiredService(), + dataSourceLifetime)); + + AddCommonServices(serviceCollection, connectionLifetime, dataSourceLifetime); + + return serviceCollection; + } + static void AddCommonServices( IServiceCollection serviceCollection, ServiceLifetime connectionLifetime, diff --git a/test/Npgsql.DependencyInjection.Tests/DependencyInjectionTests.cs b/test/Npgsql.DependencyInjection.Tests/DependencyInjectionTests.cs index 4deefc6a5a..486dae69ed 100644 --- a/test/Npgsql.DependencyInjection.Tests/DependencyInjectionTests.cs +++ b/test/Npgsql.DependencyInjection.Tests/DependencyInjectionTests.cs @@ -1,4 +1,5 @@ -using System.Data; +using System; +using System.Data; using System.Linq; using System.Threading.Tasks; using Microsoft.Extensions.DependencyInjection; @@ -9,13 +10,15 @@ namespace Npgsql.DependencyInjection.Tests; -public class DependencyInjectionTests +[TestFixture(DataSourceMode.Standard)] +[TestFixture(DataSourceMode.Slim)] +public class DependencyInjectionTests(DataSourceMode mode) { [Test] public async Task NpgsqlDataSource_is_registered_properly([Values] bool async) { var serviceCollection = new ServiceCollection(); - serviceCollection.AddNpgsqlDataSource(TestUtil.ConnectionString); + RegisterDataSource(serviceCollection, TestUtil.ConnectionString); await using var serviceProvider = serviceCollection.BuildServiceProvider(); var dataSource = serviceProvider.GetRequiredService(); @@ -29,7 +32,7 @@ public async Task NpgsqlDataSource_is_registered_properly([Values] bool async) public async Task NpgsqlMultiHostDataSource_is_registered_properly([Values] bool async) { var serviceCollection = new ServiceCollection(); - serviceCollection.AddMultiHostNpgsqlDataSource(TestUtil.ConnectionString); + RegisterMultiHostDataSource(serviceCollection, TestUtil.ConnectionString); await using var serviceProvider = serviceCollection.BuildServiceProvider(); var multiHostDataSource = serviceProvider.GetRequiredService(); @@ -46,7 +49,7 @@ public async Task NpgsqlMultiHostDataSource_is_registered_properly([Values] bool public void NpgsqlDataSource_is_registered_as_singleton_by_default() { var serviceCollection = new ServiceCollection(); - serviceCollection.AddNpgsqlDataSource(TestUtil.ConnectionString); + RegisterDataSource(serviceCollection, TestUtil.ConnectionString); using var serviceProvider = serviceCollection.BuildServiceProvider(); using var scope1 = serviceProvider.CreateScope(); @@ -64,7 +67,7 @@ public void NpgsqlDataSource_is_registered_as_singleton_by_default() public async Task NpgsqlConnection_is_registered_properly([Values] bool async) { var serviceCollection = new ServiceCollection(); - serviceCollection.AddNpgsqlDataSource(TestUtil.ConnectionString); + RegisterDataSource(serviceCollection, TestUtil.ConnectionString); using var serviceProvider = serviceCollection.BuildServiceProvider(); using var scope = serviceProvider.CreateScope(); @@ -84,7 +87,7 @@ public async Task NpgsqlConnection_is_registered_properly([Values] bool async) public void NpgsqlConnection_is_registered_as_transient_by_default() { var serviceCollection = new ServiceCollection(); - serviceCollection.AddNpgsqlDataSource("Host=localhost;Username=test;Password=test"); + RegisterDataSource(serviceCollection, "Host=localhost;Username=test;Password=test"); using var serviceProvider = serviceCollection.BuildServiceProvider(); using var scope1 = serviceProvider.CreateScope(); @@ -109,7 +112,7 @@ public async Task LoggerFactory_is_picked_up_from_ServiceCollection() var serviceCollection = new ServiceCollection(); serviceCollection.AddLogging(b => b.AddProvider(listLoggerProvider)); - serviceCollection.AddNpgsqlDataSource(TestUtil.ConnectionString); + RegisterDataSource(serviceCollection, TestUtil.ConnectionString); await using var serviceProvider = serviceCollection.BuildServiceProvider(); var dataSource = serviceProvider.GetRequiredService(); @@ -120,4 +123,24 @@ public async Task LoggerFactory_is_picked_up_from_ServiceCollection() Assert.That(listLoggerProvider.Log.Any(l => l.Id == NpgsqlEventId.CommandExecutionCompleted)); } + + private IServiceCollection RegisterDataSource(ServiceCollection serviceCollection, string connectionString) => mode switch + { + DataSourceMode.Standard => serviceCollection.AddNpgsqlDataSource(connectionString), + DataSourceMode.Slim => serviceCollection.AddNpgsqlSlimDataSource(connectionString), + _ => throw new NotSupportedException($"Mode {mode} not supported") + }; + + private IServiceCollection RegisterMultiHostDataSource(ServiceCollection serviceCollection, string connectionString) => mode switch + { + DataSourceMode.Standard => serviceCollection.AddMultiHostNpgsqlDataSource(connectionString), + DataSourceMode.Slim => serviceCollection.AddMultiHostNpgsqlSlimDataSource(connectionString), + _ => throw new NotSupportedException($"Mode {mode} not supported") + }; +} + +public enum DataSourceMode +{ + Standard, + Slim } From 5198e9127b7c471ce9a724cf78849644729caba0 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Sun, 8 Oct 2023 22:54:59 +0200 Subject: [PATCH 242/761] Add conversion operators to NpgsqlInet/Cidr (#5318) --- src/Npgsql/NpgsqlTypes/NpgsqlTypes.cs | 47 ++++++++++----------------- src/Npgsql/PublicAPI.Unshipped.txt | 4 +-- 2 files changed, 19 insertions(+), 32 deletions(-) diff --git a/src/Npgsql/NpgsqlTypes/NpgsqlTypes.cs b/src/Npgsql/NpgsqlTypes/NpgsqlTypes.cs index c4a69a0c58..fa36f7e6d2 100644 --- a/src/Npgsql/NpgsqlTypes/NpgsqlTypes.cs +++ b/src/Npgsql/NpgsqlTypes/NpgsqlTypes.cs @@ -430,23 +430,12 @@ public NpgsqlInet(IPAddress address) } public NpgsqlInet(string addr) - { - switch (addr.Split('/')) + => (Address, Netmask) = addr.Split('/') switch { - case { Length: 2 } segments: - Address = IPAddress.Parse(segments[0]); - Netmask = byte.Parse(segments[1]); - return; - - case { Length: 1 } segments: - Address = IPAddress.Parse(segments[0]); - Netmask = 32; - return; - - default: - throw new FormatException("Invalid number of parts in CIDR specification"); - } - } + { Length: 2 } segments => (IPAddress.Parse(segments[0]), byte.Parse(segments[1])), + { Length: 1 } segments => (IPAddress.Parse(segments[0]), (byte)32), + _ => throw new FormatException("Invalid number of parts in CIDR specification") + }; public override string ToString() => (Address.AddressFamily == AddressFamily.InterNetwork && Netmask == 32) || @@ -457,7 +446,7 @@ public override string ToString() public static explicit operator IPAddress(NpgsqlInet inet) => inet.Address; - public static explicit operator NpgsqlInet(IPAddress ip) + public static implicit operator NpgsqlInet(IPAddress ip) => new(ip); public void Deconstruct(out IPAddress address, out byte netmask) @@ -488,20 +477,18 @@ public NpgsqlCidr(IPAddress address, byte netmask) } public NpgsqlCidr(string addr) - { - switch (addr.Split('/')) + => (Address, Netmask) = addr.Split('/') switch { - case { Length: 2 } segments: - Address = IPAddress.Parse(segments[0]); - Netmask = byte.Parse(segments[1]); - return; - - case { Length: 1 } segments: - throw new FormatException("Missing netmask"); - default: - throw new FormatException("Invalid number of parts in CIDR specification"); - } - } + { Length: 2 } segments => (IPAddress.Parse(segments[0]), byte.Parse(segments[1])), + { Length: 1 } => throw new FormatException("Missing netmask"), + _ => throw new FormatException("Invalid number of parts in CIDR specification") + }; + + public static implicit operator NpgsqlInet(NpgsqlCidr cidr) + => new(cidr.Address, cidr.Netmask); + + public static explicit operator IPAddress(NpgsqlCidr cidr) + => cidr.Address; public override string ToString() => $"{Address}/{Netmask}"; diff --git a/src/Npgsql/PublicAPI.Unshipped.txt b/src/Npgsql/PublicAPI.Unshipped.txt index c3d776ae30..1264896889 100644 --- a/src/Npgsql/PublicAPI.Unshipped.txt +++ b/src/Npgsql/PublicAPI.Unshipped.txt @@ -114,12 +114,12 @@ override NpgsqlTypes.NpgsqlCidr.ToString() -> string! static Npgsql.INpgsqlTypeMapperExtensions.EnableDynamicJsonMappings(this T mapper, System.Text.Json.JsonSerializerOptions? serializerOptions = null, System.Type![]? jsonbClrTypes = null, System.Type![]? jsonClrTypes = null) -> T static Npgsql.INpgsqlTypeMapperExtensions.EnableRecordsAsTuples(this T mapper) -> T static Npgsql.INpgsqlTypeMapperExtensions.EnableUnmappedTypes(this T mapper) -> T -static NpgsqlTypes.NpgsqlInet.explicit operator NpgsqlTypes.NpgsqlInet(System.Net.IPAddress! ip) -> NpgsqlTypes.NpgsqlInet +static NpgsqlTypes.NpgsqlCidr.explicit operator System.Net.IPAddress!(NpgsqlTypes.NpgsqlCidr cidr) -> System.Net.IPAddress! +static NpgsqlTypes.NpgsqlCidr.implicit operator NpgsqlTypes.NpgsqlInet(NpgsqlTypes.NpgsqlCidr cidr) -> NpgsqlTypes.NpgsqlInet *REMOVED*Npgsql.NpgsqlParameter.ConvertedValue.get -> object? *REMOVED*Npgsql.NpgsqlParameter.ConvertedValue.set -> void *REMOVED*Npgsql.PostgresTypes.PostgresArrayType.PostgresArrayType(string! ns, string! internalName, uint oid, Npgsql.PostgresTypes.PostgresType! elementPostgresType) -> void *REMOVED*Npgsql.PostgresTypes.PostgresBaseType.PostgresBaseType(string! ns, string! internalName, uint oid) -> void -*REMOVED*static NpgsqlTypes.NpgsqlInet.implicit operator NpgsqlTypes.NpgsqlInet(System.Net.IPAddress! ip) -> NpgsqlTypes.NpgsqlInet *REMOVED*Npgsql.PostgresTypes.PostgresType.PostgresType(string! ns, string! name, string! internalName, uint oid) -> void *REMOVED*Npgsql.PostgresTypes.PostgresType.PostgresType(string! ns, string! name, uint oid) -> void *REMOVED*Npgsql.TypeMapping.INpgsqlTypeMapper.AddTypeResolverFactory(Npgsql.Internal.TypeHandling.TypeHandlerResolverFactory! resolverFactory) -> void From a4a1f5ae570bf0f84213dac8da0eb1ada531879f Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Tue, 10 Oct 2023 00:12:50 +0200 Subject: [PATCH 243/761] Fix thread switching issue (#5320) --- src/Npgsql/PoolingDataSource.cs | 29 ++++++++++++++++++++++------- 1 file changed, 22 insertions(+), 7 deletions(-) diff --git a/src/Npgsql/PoolingDataSource.cs b/src/Npgsql/PoolingDataSource.cs index 4d563207b4..f8a3204623 100644 --- a/src/Npgsql/PoolingDataSource.cs +++ b/src/Npgsql/PoolingDataSource.cs @@ -60,9 +60,6 @@ class PoolingDataSource : NpgsqlDataSource volatile int _isClearing; - static readonly ConcurrentExclusiveSchedulerPair ConstrainedConcurrencyScheduler = - new(TaskScheduler.Default, Math.Max(1, Environment.ProcessorCount / 2)); - #endregion internal sealed override (int Total, int Idle, int Busy) Statistics @@ -118,6 +115,8 @@ internal PoolingDataSource( _logger = LoggingConfiguration.ConnectionLogger; } + static SemaphoreSlim SyncOverAsyncSemaphore { get; } = new(Environment.ProcessorCount / 2); + internal sealed override ValueTask Get( NpgsqlConnection conn, NpgsqlTimeout timeout, bool async, CancellationToken cancellationToken) { @@ -149,11 +148,27 @@ async ValueTask RentAsync( { try { - var task = _idleConnectorReader.ReadAsync(finalToken); - if (!async && !task.IsCompleted) - await new TaskSchedulerAwaitable(ConstrainedConcurrencyScheduler.ConcurrentScheduler); + if (async) + connector = await _idleConnectorReader.ReadAsync(finalToken).ConfigureAwait(false); + else + { + SyncOverAsyncSemaphore.Wait(finalToken); + try + { + var awaiter = _idleConnectorReader.ReadAsync(finalToken).GetAwaiter(); + var mres = new ManualResetEventSlim(false, 0); + + // Cancellation happens through the ReadAsync call, which will complete the task. + awaiter.UnsafeOnCompleted(() => mres.Set()); + mres.Wait(CancellationToken.None); + connector = awaiter.GetResult(); + } + finally + { + SyncOverAsyncSemaphore.Release(); + } + } - connector = await task.ConfigureAwait(false); if (CheckIdleConnector(connector)) return connector; } From 11672d94e3d37f5fe68ec8e2775412c3ccd70593 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Tue, 10 Oct 2023 01:00:13 +0200 Subject: [PATCH 244/761] Make mres.Set be independent of sync ctx --- src/Npgsql/PoolingDataSource.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Npgsql/PoolingDataSource.cs b/src/Npgsql/PoolingDataSource.cs index f8a3204623..f5e20f18d2 100644 --- a/src/Npgsql/PoolingDataSource.cs +++ b/src/Npgsql/PoolingDataSource.cs @@ -155,7 +155,7 @@ async ValueTask RentAsync( SyncOverAsyncSemaphore.Wait(finalToken); try { - var awaiter = _idleConnectorReader.ReadAsync(finalToken).GetAwaiter(); + var awaiter = _idleConnectorReader.ReadAsync(finalToken).ConfigureAwait(false).GetAwaiter(); var mres = new ManualResetEventSlim(false, 0); // Cancellation happens through the ReadAsync call, which will complete the task. From 01f0bae69fa99a301af2e6dbc51d2eba771576bb Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Tue, 10 Oct 2023 01:57:04 +0200 Subject: [PATCH 245/761] Handle single core --- src/Npgsql/PoolingDataSource.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Npgsql/PoolingDataSource.cs b/src/Npgsql/PoolingDataSource.cs index f5e20f18d2..192a86c052 100644 --- a/src/Npgsql/PoolingDataSource.cs +++ b/src/Npgsql/PoolingDataSource.cs @@ -115,7 +115,7 @@ internal PoolingDataSource( _logger = LoggingConfiguration.ConnectionLogger; } - static SemaphoreSlim SyncOverAsyncSemaphore { get; } = new(Environment.ProcessorCount / 2); + static SemaphoreSlim SyncOverAsyncSemaphore { get; } = new(Math.Max(1, Environment.ProcessorCount / 2)); internal sealed override ValueTask Get( NpgsqlConnection conn, NpgsqlTimeout timeout, bool async, CancellationToken cancellationToken) From 33e53a8a0d1940db6cd7140b4e162dc1cc48a21b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Oct 2023 23:37:24 +0200 Subject: [PATCH 246/761] Bump SystemVersion from 8.0.0-rc.1.23419.4 to 8.0.0-rc.2.23479.6 (#5324) Bumps `SystemVersion` from 8.0.0-rc.1.23419.4 to 8.0.0-rc.2.23479.6. Updates `Microsoft.Extensions.Logging.Abstractions` from 8.0.0-rc.1.23419.4 to 8.0.0-rc.2.23479.6 - [Release notes](https://github.com/dotnet/runtime/releases) - [Commits](https://github.com/dotnet/runtime/commits) Updates `Microsoft.Extensions.DependencyInjection.Abstractions` from 8.0.0-rc.1.23419.4 to 8.0.0-rc.2.23479.6 - [Release notes](https://github.com/dotnet/runtime/releases) - [Commits](https://github.com/dotnet/runtime/commits) Updates `System.Threading.Channels` from 8.0.0-rc.1.23419.4 to 8.0.0-rc.2.23479.6 - [Release notes](https://github.com/dotnet/runtime/releases) - [Commits](https://github.com/dotnet/runtime/commits) Updates `System.Collections.Immutable` from 8.0.0-rc.1.23419.4 to 8.0.0-rc.2.23479.6 - [Release notes](https://github.com/dotnet/runtime/releases) - [Commits](https://github.com/dotnet/runtime/commits) Updates `System.Text.Json` from 8.0.0-rc.1.23419.4 to 8.0.0-rc.2.23479.6 - [Release notes](https://github.com/dotnet/runtime/releases) - [Commits](https://github.com/dotnet/runtime/commits) Updates `System.Diagnostics.DiagnosticSource` from 8.0.0-rc.1.23419.4 to 8.0.0-rc.2.23479.6 - [Release notes](https://github.com/dotnet/runtime/releases) - [Commits](https://github.com/dotnet/runtime/commits) Updates `Microsoft.Extensions.Logging` from 8.0.0-rc.1.23419.4 to 8.0.0-rc.2.23479.6 - [Release notes](https://github.com/dotnet/runtime/releases) - [Commits](https://github.com/dotnet/runtime/commits) Updates `Microsoft.Extensions.Logging.Console` from 8.0.0-rc.1.23419.4 to 8.0.0-rc.2.23479.6 - [Release notes](https://github.com/dotnet/runtime/releases) - [Commits](https://github.com/dotnet/runtime/commits) Updates `Microsoft.Extensions.DependencyInjection` from 8.0.0-rc.1.23419.4 to 8.0.0-rc.2.23479.6 - [Release notes](https://github.com/dotnet/runtime/releases) - [Commits](https://github.com/dotnet/runtime/commits) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 66707f188b..615e94b50d 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -1,6 +1,6 @@ - 8.0.0-rc.1.23419.4 + 8.0.0-rc.2.23479.6 $(SystemVersion) From 4c9921de2dfb48fb5a488787fc7422add3553f50 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Wed, 11 Oct 2023 11:45:49 +0200 Subject: [PATCH 247/761] Bump dotnet SDK to 8.0.0-rc.2 (#5325) --- .github/workflows/build.yml | 2 +- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/native-aot.yml | 2 +- .github/workflows/rich-code-nav.yml | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 66e0ff5bd6..fef4b3176c 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -15,7 +15,7 @@ concurrency: cancel-in-progress: true env: - dotnet_sdk_version: '8.0.100-rc.1.23463.5' + dotnet_sdk_version: '8.0.100-rc.2.23502.2' postgis_version: 3 DOTNET_SKIP_FIRST_TIME_EXPERIENCE: true # Windows comes with PG pre-installed, and defines the PGPASSWORD environment variable. Remove it as it interferes diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 93c46a180c..ce5b43bac7 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -32,7 +32,7 @@ concurrency: cancel-in-progress: true env: - dotnet_sdk_version: '8.0.100-rc.1.23463.5' + dotnet_sdk_version: '8.0.100-rc.2.23502.2' DOTNET_SKIP_FIRST_TIME_EXPERIENCE: true jobs: diff --git a/.github/workflows/native-aot.yml b/.github/workflows/native-aot.yml index 1c6ecce4ba..5ac384bf1c 100644 --- a/.github/workflows/native-aot.yml +++ b/.github/workflows/native-aot.yml @@ -15,7 +15,7 @@ concurrency: cancel-in-progress: true env: - dotnet_sdk_version: '8.0.100-rc.1.23463.5' + dotnet_sdk_version: '8.0.100-rc.2.23502.2' DOTNET_SKIP_FIRST_TIME_EXPERIENCE: true # Uncomment and edit the following to use nightly/preview builds # nuget_config: | diff --git a/.github/workflows/rich-code-nav.yml b/.github/workflows/rich-code-nav.yml index e47a8a2adb..1fd1f31349 100644 --- a/.github/workflows/rich-code-nav.yml +++ b/.github/workflows/rich-code-nav.yml @@ -9,7 +9,7 @@ on: - '*' env: - dotnet_sdk_version: '8.0.100-rc.1.23463.5' + dotnet_sdk_version: '8.0.100-rc.2.23502.2' DOTNET_SKIP_FIRST_TIME_EXPERIENCE: true jobs: From e1745257fc0ff1c24ca6c7cfd2d48ac94e5d69b1 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Wed, 11 Oct 2023 16:55:16 +0200 Subject: [PATCH 248/761] Bump version to 8.0.0-rtm --- Directory.Build.props | 2 +- test/Npgsql.Tests/Types/NetworkTypeTests.cs | 2 -- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/Directory.Build.props b/Directory.Build.props index bcae7c3d25..f145b53aa1 100644 --- a/Directory.Build.props +++ b/Directory.Build.props @@ -1,6 +1,6 @@  - 8.0.0-rc.2 + 8.0.0-rtm latest true enable diff --git a/test/Npgsql.Tests/Types/NetworkTypeTests.cs b/test/Npgsql.Tests/Types/NetworkTypeTests.cs index 994fdd45e4..f164b57d75 100644 --- a/test/Npgsql.Tests/Types/NetworkTypeTests.cs +++ b/test/Npgsql.Tests/Types/NetworkTypeTests.cs @@ -61,7 +61,6 @@ public Task Cidr() NpgsqlDbType.Cidr, isDefaultForWriting: false); -#pragma warning disable 618 // For NpgsqlInet [Test] public Task Inet_v4_as_NpgsqlInet() => AssertType( @@ -79,7 +78,6 @@ public Task Inet_v6_as_NpgsqlInet() "inet", NpgsqlDbType.Inet, isDefaultForReading: false); -#pragma warning restore 618 // For NpgsqlInet [Test] public Task Macaddr() From 736a0c4eaa852a0b39b54394a6f4c087060bd59e Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Thu, 12 Oct 2023 13:27:15 +0200 Subject: [PATCH 249/761] Add missing ODE assert exemptions for multiplexing --- test/Npgsql.Tests/TransactionTests.cs | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/test/Npgsql.Tests/TransactionTests.cs b/test/Npgsql.Tests/TransactionTests.cs index ca09836cc7..e0e61f95b4 100644 --- a/test/Npgsql.Tests/TransactionTests.cs +++ b/test/Npgsql.Tests/TransactionTests.cs @@ -66,7 +66,10 @@ public async Task CommitAsync([Values(PrepareOrNot.NotPrepared, PrepareOrNot.Pre Assert.That(await conn.ExecuteScalarAsync($"SELECT COUNT(*) FROM {table}"), Is.EqualTo(1)); } - Assert.That(() => tx.Connection, Throws.Exception.TypeOf()); + // With multiplexing we can't assume that disposed NpgsqlTransaction will throw ObjectDisposedException + // Because disposed NpgsqlTransaction might be reused by another thread + if (!IsMultiplexing) + Assert.That(() => tx.Connection, Throws.Exception.TypeOf()); } [Test, Description("Basic insert within a rolled back transaction")] @@ -92,7 +95,10 @@ public async Task Rollback([Values(PrepareOrNot.NotPrepared, PrepareOrNot.Prepar Assert.That(await conn.ExecuteScalarAsync($"SELECT COUNT(*) FROM {table}"), Is.EqualTo(0)); } - Assert.That(() => tx.Connection, Throws.Exception.TypeOf()); + // With multiplexing we can't assume that disposed NpgsqlTransaction will throw ObjectDisposedException + // Because disposed NpgsqlTransaction might be reused by another thread + if (!IsMultiplexing) + Assert.That(() => tx.Connection, Throws.Exception.TypeOf()); } [Test, Description("Basic insert within a rolled back transaction")] @@ -118,7 +124,10 @@ public async Task RollbackAsync([Values(PrepareOrNot.NotPrepared, PrepareOrNot.P Assert.That(await conn.ExecuteScalarAsync($"SELECT COUNT(*) FROM {table}"), Is.EqualTo(0)); } - Assert.That(() => tx.Connection, Throws.Exception.TypeOf()); + // With multiplexing we can't assume that disposed NpgsqlTransaction will throw ObjectDisposedException + // Because disposed NpgsqlTransaction might be reused by another thread + if (!IsMultiplexing) + Assert.That(() => tx.Connection, Throws.Exception.TypeOf()); } [Test, Description("Dispose a transaction in progress, should roll back")] From 437fdfb7d156b0082f01b9daff618626cf5176fa Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Thu, 12 Oct 2023 16:41:32 +0200 Subject: [PATCH 250/761] Fix DateTimeOffset infinity conversions (#5327) Fixes #5326 --- .../Converters/Temporal/DateTimeConverters.cs | 2 +- .../Types/DateTimeInfinityTests.cs | 260 ++++++------------ 2 files changed, 85 insertions(+), 177 deletions(-) diff --git a/src/Npgsql/Internal/Converters/Temporal/DateTimeConverters.cs b/src/Npgsql/Internal/Converters/Temporal/DateTimeConverters.cs index 0047e3c572..ed744bb099 100644 --- a/src/Npgsql/Internal/Converters/Temporal/DateTimeConverters.cs +++ b/src/Npgsql/Internal/Converters/Temporal/DateTimeConverters.cs @@ -40,7 +40,7 @@ public override bool CanConvert(DataFormat format, out BufferRequirements buffer } protected override DateTimeOffset ReadCore(PgReader reader) - => PgTimestamp.Decode(reader.ReadInt64(), DateTimeKind.Utc, _dateTimeInfinityConversions); + => new(PgTimestamp.Decode(reader.ReadInt64(), DateTimeKind.Utc, _dateTimeInfinityConversions), TimeSpan.Zero); protected override void WriteCore(PgWriter writer, DateTimeOffset value) { diff --git a/test/Npgsql.Tests/Types/DateTimeInfinityTests.cs b/test/Npgsql.Tests/Types/DateTimeInfinityTests.cs index bf2e0d0e65..8969215197 100644 --- a/test/Npgsql.Tests/Types/DateTimeInfinityTests.cs +++ b/test/Npgsql.Tests/Types/DateTimeInfinityTests.cs @@ -1,4 +1,5 @@ using System; +using System.Data; using System.Threading.Tasks; using NpgsqlTypes; using NUnit.Framework; @@ -13,191 +14,98 @@ namespace Npgsql.Tests.Types; #endif public class DateTimeInfinityTests : TestBase, IDisposable { - [Test] - public async Task TimestampTz_write() + static readonly TestCaseData[] TimestampDateTimeValues = { - await using var conn = await OpenConnectionAsync(); - - await using var cmd = new NpgsqlCommand("SELECT ($1 AT TIME ZONE 'UTC')::text", conn) - { - Parameters = - { - new() - { - Value = DisableDateTimeInfinityConversions ? DateTime.MinValue.ToUniversalTime().AddYears(1) : DateTime.MinValue, - NpgsqlDbType = NpgsqlDbType.TimestampTz - }, - } - }; - - Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo(DisableDateTimeInfinityConversions ? "0002-01-01 00:00:00" : "-infinity")); - - cmd.Parameters[0].Value = DateTime.MaxValue; - - if (DisableDateTimeInfinityConversions) - Assert.That(async () => await cmd.ExecuteScalarAsync(), Throws.Exception.TypeOf()); - else - Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo("infinity")); - } - - [Test] - public async Task TimestampTz_read() - { - await using var conn = await OpenConnectionAsync(); - - await using var cmd = new NpgsqlCommand( - "SELECT '-infinity'::timestamp with time zone, 'infinity'::timestamp with time zone", - conn); - - await using var reader = await cmd.ExecuteReaderAsync(); - await reader.ReadAsync(); - - if (DisableDateTimeInfinityConversions) - { - Assert.That(() => reader[0], Throws.Exception.TypeOf()); - Assert.That(() => reader[1], Throws.Exception.TypeOf()); - } - else - { - Assert.That(reader[0], Is.EqualTo(DateTime.MinValue)); - Assert.That(reader[1], Is.EqualTo(DateTime.MaxValue)); - } - } - - [Test] - public async Task Timestamp_write() - { - await using var conn = await OpenConnectionAsync(); - - await using var cmd = new NpgsqlCommand("SELECT $1::text, $2::text", conn) - { - Parameters = - { - new() { Value = DateTime.MinValue, NpgsqlDbType = NpgsqlDbType.Timestamp }, - new() { Value = DateTime.MaxValue, NpgsqlDbType = NpgsqlDbType.Timestamp }, - } - }; - await using (var reader = await cmd.ExecuteReaderAsync()) - { - await reader.ReadAsync(); - - Assert.That(reader[0], Is.EqualTo(DisableDateTimeInfinityConversions ? "0001-01-01 00:00:00" : "-infinity")); - Assert.That(reader[1], Is.EqualTo(DisableDateTimeInfinityConversions ? "9999-12-31 23:59:59.999999" : "infinity")); - } - } - - [Test] - public async Task Timestamp_read() + new TestCaseData(DateTime.MinValue.AddYears(1), "0002-01-01 00:00:00", "0002-01-01 00:00:00") + .SetName("MinValue_AddYear"), + new TestCaseData(DateTime.MinValue, "0001-01-01 00:00:00", "-infinity") + .SetName("MinValue"), + new TestCaseData(DateTime.MaxValue, "9999-12-31 23:59:59.999999", "infinity") + .SetName("MaxValue"), + }; + + static readonly TestCaseData[] TimestampTzDateTimeValues = { - await using var conn = await OpenConnectionAsync(); - - await using var cmd = new NpgsqlCommand( - "SELECT '-infinity'::timestamp without time zone, 'infinity'::timestamp without time zone", - conn); - - await using var reader = await cmd.ExecuteReaderAsync(); - await reader.ReadAsync(); - - if (DisableDateTimeInfinityConversions) - { - Assert.That(() => reader[0], Throws.Exception.TypeOf()); - Assert.That(() => reader[1], Throws.Exception.TypeOf()); - } - else - { - Assert.That(reader[0], Is.EqualTo(DateTime.MinValue)); - Assert.That(reader[1], Is.EqualTo(DateTime.MaxValue)); - } - } - - [Test, NonParallelizable] - public async Task Date_DateTime_write() + new TestCaseData(DateTime.MinValue.AddYears(1), "0002-01-01 00:00:00+00", "0002-01-01 00:00:00+00") + .SetName("MinValue_AddYear"), + new TestCaseData(DateTime.MinValue, "0001-01-01 00:00:00+00", "-infinity") + .SetName("MinValue"), + new TestCaseData(DateTime.MaxValue, "9999-12-31 23:59:59.999999+00", "infinity") + .SetName("MaxValue"), + }; + + static readonly TestCaseData[] TimestampTzDateTimeOffsetValues = { - await using var conn = await OpenConnectionAsync(); - - await using var cmd = new NpgsqlCommand("SELECT $1::text, $2::text", conn) - { - Parameters = - { - new() { Value = DateTime.MinValue, NpgsqlDbType = NpgsqlDbType.Date }, - new() { Value = DateTime.MaxValue, NpgsqlDbType = NpgsqlDbType.Date } - } - }; - - await using var reader = await cmd.ExecuteReaderAsync(); - await reader.ReadAsync(); - - Assert.That(reader[0], Is.EqualTo(DisableDateTimeInfinityConversions ? "0001-01-01" : "-infinity")); - Assert.That(reader[1], Is.EqualTo(DisableDateTimeInfinityConversions ? "9999-12-31" : "infinity")); - } - - [Test] - public async Task Date_DateTime_read() + new TestCaseData(DateTimeOffset.MinValue.ToUniversalTime().AddYears(1), "0002-01-01 00:00:00+00", "0002-01-01 00:00:00+00") + .SetName("MinValue_AddYear"), + new TestCaseData(DateTimeOffset.MinValue, "0001-01-01 00:00:00+00", "-infinity") + .SetName("MinValue"), + new TestCaseData(DateTimeOffset.MaxValue, "9999-12-31 23:59:59.999999+00", "infinity") + .SetName("MaxValue"), + }; + + static readonly TestCaseData[] DateDateTimeValues = { - await using var conn = await OpenConnectionAsync(); - - await using var cmd = new NpgsqlCommand("SELECT '-infinity'::date, 'infinity'::date", conn); - - await using var reader = await cmd.ExecuteReaderAsync(); - await reader.ReadAsync(); - - if (DisableDateTimeInfinityConversions) - { - Assert.That(() => reader[0], Throws.Exception.TypeOf()); - Assert.That(() => reader[1], Throws.Exception.TypeOf()); - } - else - { - Assert.That(reader[0], Is.EqualTo(DateTime.MinValue)); - Assert.That(reader[1], Is.EqualTo(DateTime.MaxValue)); - } - } + new TestCaseData(DateTime.MinValue.AddYears(1), "0002-01-01", "0002-01-01") + .SetName("MinValue_AddYear"), + new TestCaseData(DateTime.MinValue, "0001-01-01", "-infinity") + .SetName("MinValue"), + new TestCaseData(DateTime.MaxValue, "9999-12-31", "infinity") + .SetName("MaxValue"), + }; + + // As we can't roundtrip DateTime.MaxValue due to precision differences with postgres we are lenient with equality for this particular value. + static readonly Func MaxValuePrecisionLenientComparer = + (expected, actual) => expected == DateTime.MaxValue && actual == new DateTime(expected.Ticks - 9) || actual == expected; + + [Test, TestCaseSource(nameof(TimestampDateTimeValues))] + public Task Timestamp_DateTime(DateTime dateTime, string sqlLiteral, string infinityConvertedSqlLiteral) + => AssertType(dateTime, DisableDateTimeInfinityConversions ? sqlLiteral : infinityConvertedSqlLiteral, + "timestamp without time zone", NpgsqlDbType.Timestamp, DbType.DateTime2, + comparer: MaxValuePrecisionLenientComparer, + isDefault: true); + + [Test, TestCaseSource(nameof(TimestampTzDateTimeValues))] + public Task TimestampTz_DateTime(DateTime dateTime, string sqlLiteral, string infinityConvertedSqlLiteral) + => AssertType(new(dateTime.Ticks, DateTimeKind.Utc), DisableDateTimeInfinityConversions ? sqlLiteral : infinityConvertedSqlLiteral, + "timestamp with time zone", NpgsqlDbType.TimestampTz, DbType.DateTime, DbType.DateTime, + comparer: MaxValuePrecisionLenientComparer, + isDefault: true, isNpgsqlDbTypeInferredFromClrType: false); + + [Test, TestCaseSource(nameof(TimestampTzDateTimeOffsetValues))] + public Task TimestampTz_DateTimeOffset(DateTimeOffset dateTime, string sqlLiteral, string infinityConvertedSqlLiteral) + => AssertType(dateTime, DisableDateTimeInfinityConversions ? sqlLiteral : infinityConvertedSqlLiteral, + "timestamp with time zone", NpgsqlDbType.TimestampTz, DbType.DateTime, DbType.DateTime, + comparer: (expected, actual) => MaxValuePrecisionLenientComparer(expected.DateTime, actual.DateTime), + isDefault: false); + + [Test, TestCaseSource(nameof(DateDateTimeValues))] + public Task Date_DateTime(DateTime dateTime, string sqlLiteral, string infinityConvertedSqlLiteral) + => AssertType(DisableDateTimeInfinityConversions ? dateTime.Date : dateTime, DisableDateTimeInfinityConversions ? sqlLiteral : infinityConvertedSqlLiteral, + "date", NpgsqlDbType.Date, DbType.Date, + isDefault: false); #if NET6_0_OR_GREATER - [Test] - public async Task Date_DateOnly_write() + static readonly TestCaseData[] DateOnlyDateTimeValues = { - await using var conn = await OpenConnectionAsync(); - - await using var cmd = new NpgsqlCommand("SELECT $1::text, $2::text", conn) - { - Parameters = - { - new() { Value = DateOnly.MinValue, NpgsqlDbType = NpgsqlDbType.Date }, - new() { Value = DateOnly.MaxValue, NpgsqlDbType = NpgsqlDbType.Date } - } - }; - - await using var reader = await cmd.ExecuteReaderAsync(); - await reader.ReadAsync(); - - Assert.That(reader[0], Is.EqualTo(DisableDateTimeInfinityConversions ? "0001-01-01" : "-infinity")); - Assert.That(reader[1], Is.EqualTo(DisableDateTimeInfinityConversions ? "9999-12-31" : "infinity")); - } - - [Test] - public async Task Date_DateOnly_read() - { - await using var conn = await OpenConnectionAsync(); - - await using var cmd = new NpgsqlCommand("SELECT '-infinity'::date, 'infinity'::date", conn); - - await using var reader = await cmd.ExecuteReaderAsync(); - await reader.ReadAsync(); - - if (DisableDateTimeInfinityConversions) - { - Assert.That(() => reader[0], Throws.Exception.TypeOf()); - Assert.That(() => reader[1], Throws.Exception.TypeOf()); - } - else - { - Assert.That(reader.GetFieldValue(0), Is.EqualTo(DateOnly.MinValue)); - Assert.That(reader.GetFieldValue(1), Is.EqualTo(DateOnly.MaxValue)); - } - } + new TestCaseData(DateOnly.MinValue.AddYears(1), "0002-01-01", "0002-01-01") + .SetName("MinValue_AddYear"), + new TestCaseData(DateOnly.MinValue, "0001-01-01", "-infinity") + .SetName("MinValue"), + new TestCaseData(DateOnly.MaxValue, "9999-12-31", "infinity") + .SetName("MaxValue"), + }; + + [Test, TestCaseSource(nameof(DateOnlyDateTimeValues))] + public Task Date_DateOnly(DateOnly dateTime, string sqlLiteral, string infinityConvertedSqlLiteral) + => AssertType(dateTime, + DisableDateTimeInfinityConversions ? sqlLiteral : infinityConvertedSqlLiteral, "date", NpgsqlDbType.Date, DbType.Date, + isDefault: false); #endif + NpgsqlDataSource? _dataSource; + protected override NpgsqlDataSource DataSource => _dataSource ??= CreateDataSource(csb => csb.Timezone = "UTC"); + public DateTimeInfinityTests(bool disableDateTimeInfinityConversions) { #if DEBUG From 54221ff8f5f6ed3fb1422e43c76733dcd7614ef1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 14 Oct 2023 00:32:39 +0200 Subject: [PATCH 251/761] Bump xunit from 2.5.1 to 2.5.2 (#5329) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 615e94b50d..9ca3671714 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -38,7 +38,7 @@ - + From 043104333a7e07639009a8ae77e7243da4dea39b Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Mon, 16 Oct 2023 11:31:26 +0200 Subject: [PATCH 252/761] Correct isCidr when writing cidr (#5338) --- .../Internal/Converters/Networking/NpgsqlCidrConverter.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Npgsql/Internal/Converters/Networking/NpgsqlCidrConverter.cs b/src/Npgsql/Internal/Converters/Networking/NpgsqlCidrConverter.cs index 249ec9a68f..c6d0ab8d88 100644 --- a/src/Npgsql/Internal/Converters/Networking/NpgsqlCidrConverter.cs +++ b/src/Npgsql/Internal/Converters/Networking/NpgsqlCidrConverter.cs @@ -18,5 +18,5 @@ protected override NpgsqlCidr ReadCore(PgReader reader) } protected override void WriteCore(PgWriter writer, NpgsqlCidr value) - => NpgsqlInetConverter.WriteImpl(writer, (value.Address, value.Netmask), isCidr: false); + => NpgsqlInetConverter.WriteImpl(writer, (value.Address, value.Netmask), isCidr: true); } From 9629205f6d32b2f0b6ae8ab56ecd5303c638d80a Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Mon, 16 Oct 2023 13:36:01 +0200 Subject: [PATCH 253/761] Properly ignore date component of a DateTimeOffset for timetz (#5334) Fixes #5332 --- .../Converters/Temporal/TimeConverters.cs | 2 +- test/Npgsql.Tests/Types/DateTimeTests.cs | 29 +++++++++---------- 2 files changed, 14 insertions(+), 17 deletions(-) diff --git a/src/Npgsql/Internal/Converters/Temporal/TimeConverters.cs b/src/Npgsql/Internal/Converters/Temporal/TimeConverters.cs index d2fbf60fda..e756a03b85 100644 --- a/src/Npgsql/Internal/Converters/Temporal/TimeConverters.cs +++ b/src/Npgsql/Internal/Converters/Temporal/TimeConverters.cs @@ -46,7 +46,7 @@ protected override DateTimeOffset ReadCore(PgReader reader) protected override void WriteCore(PgWriter writer, DateTimeOffset value) { - writer.WriteInt64(value.Ticks / 10); + writer.WriteInt64(value.TimeOfDay.Ticks / 10); writer.WriteInt32(-(int)(value.Offset.Ticks / TimeSpan.TicksPerSecond)); } } diff --git a/test/Npgsql.Tests/Types/DateTimeTests.cs b/test/Npgsql.Tests/Types/DateTimeTests.cs index 7382891cf5..310bd6b190 100644 --- a/test/Npgsql.Tests/Types/DateTimeTests.cs +++ b/test/Npgsql.Tests/Types/DateTimeTests.cs @@ -129,24 +129,21 @@ public Task Time_as_TimeOnly() #region Time with timezone - [Test] - public async Task TimeTz_as_DateTimeOffset() + static readonly TestCaseData[] TimeTzValues = { - await AssertTypeRead("13:03:45.51+02", - "time with time zone", new DateTimeOffset(1, 1, 2, 13, 3, 45, 510, TimeSpan.FromHours(2))); - - await AssertTypeWrite( - new DateTimeOffset(1, 1, 1, 13, 3, 45, 510, TimeSpan.FromHours(2)), - "13:03:45.51+02", - "time with time zone", - NpgsqlDbType.TimeTz, - isDefault: false); - } + new TestCaseData(new DateTimeOffset(1, 1, 2, 13, 3, 45, 510, TimeSpan.FromHours(2)), "13:03:45.51+02") + .SetName("Timezone"), + new TestCaseData(new DateTimeOffset(1, 1, 2, 1, 0, 45, 510, TimeSpan.FromHours(-3)), "01:00:45.51-03") + .SetName("Negative_timezone"), + new TestCaseData(new DateTimeOffset(1212720130000, TimeSpan.Zero), "09:41:12.013+00") + .SetName("Utc"), + new TestCaseData(new DateTimeOffset(1, 1, 2, 1, 0, 0, new TimeSpan(0, 2, 0, 0)), "01:00:00+02") + .SetName("Before_utc_zero"), + }; - [Test] - public Task TimeTz_before_utc_zero() - => AssertTypeRead("01:00:00+02", - "time with time zone", new DateTimeOffset(1, 1, 2, 1, 0, 0, new TimeSpan(0, 2, 0, 0))); + [Test, TestCaseSource(nameof(TimeTzValues))] + public Task TimeTz_as_DateTimeOffset(DateTimeOffset time, string sqlLiteral) + => AssertType(time, sqlLiteral, "time with time zone", NpgsqlDbType.TimeTz, isDefault: false); #endregion From e3a5481291cff1141008de3be20a3de6470869db Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Mon, 16 Oct 2023 13:36:16 +0200 Subject: [PATCH 254/761] Fix missed object check (#5335) --- src/Npgsql/NpgsqlParameter`.cs | 2 +- test/Npgsql.Tests/CommandParameterTests.cs | 9 +++++++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/src/Npgsql/NpgsqlParameter`.cs b/src/Npgsql/NpgsqlParameter`.cs index 18ac5aff45..7c4117b299 100644 --- a/src/Npgsql/NpgsqlParameter`.cs +++ b/src/Npgsql/NpgsqlParameter`.cs @@ -108,7 +108,7 @@ private protected override void BindCore(bool allowNullReference = false) private protected override ValueTask WriteValue(bool async, PgWriter writer, CancellationToken cancellationToken) { - if (TypeInfo!.IsBoxing || _useSubStream) + if (typeof(T) == typeof(object) || TypeInfo!.IsBoxing || _useSubStream) return base.WriteValue(async, writer, cancellationToken); Debug.Assert(Converter is PgConverter); diff --git a/test/Npgsql.Tests/CommandParameterTests.cs b/test/Npgsql.Tests/CommandParameterTests.cs index aa2cb0ee15..1e4355df4b 100644 --- a/test/Npgsql.Tests/CommandParameterTests.cs +++ b/test/Npgsql.Tests/CommandParameterTests.cs @@ -201,6 +201,15 @@ await AssertTypeWrite(new[] {1, 1}, "{1,1}", "integer[]", NpgsqlDbType.I isNpgsqlDbTypeInferredFromClrType: true, skipArrayCheck: true); } + [Test] + public async Task Object_generic_parameter_works() + { + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT $1", conn); + cmd.Parameters.Add(new NpgsqlParameter { NpgsqlDbType = NpgsqlDbType.Integer, Value = 8 }); + Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo(8)); + } + public CommandParameterTests(MultiplexingMode multiplexingMode) : base(multiplexingMode) { } From 8631f5d8b0099c1c323c5ff13bbed5ee54f5f25f Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Mon, 16 Oct 2023 13:36:32 +0200 Subject: [PATCH 255/761] Fix importing null values (#5333) Fixes #5330 --- src/Npgsql/NpgsqlBinaryImporter.cs | 16 +++++++++++- test/Npgsql.Tests/CopyTests.cs | 40 ++++++++++++++++++++++++++++++ 2 files changed, 55 insertions(+), 1 deletion(-) diff --git a/src/Npgsql/NpgsqlBinaryImporter.cs b/src/Npgsql/NpgsqlBinaryImporter.cs index b7335990d5..b85cf2de94 100644 --- a/src/Npgsql/NpgsqlBinaryImporter.cs +++ b/src/Npgsql/NpgsqlBinaryImporter.cs @@ -296,8 +296,22 @@ async Task Write(T value, NpgsqlParameter param, bool async, CancellationToke if (_column == -1) throw new InvalidOperationException("A row hasn't been started"); - if (typeof(T) == typeof(object) || typeof(T) == typeof(DBNull)) + // Statically map any DBNull value during importing, generic parameters when T = DBNull normally won't find any mapping. + if (typeof(T) == typeof(DBNull)) { + await WriteNull(async, cancellationToken).ConfigureAwait(false); + return; + } + + if (typeof(T) == typeof(object)) + { + // Allow null values for object typed parameters, parameters exclusively accept DBNull.Value when T = object. + if (value == null || value is DBNull) + { + await WriteNull(async, cancellationToken).ConfigureAwait(false); + return; + } + if (param.GetType() != typeof(NpgsqlParameter)) { var newParam = _params[_column] = new NpgsqlParameter(); diff --git a/test/Npgsql.Tests/CopyTests.cs b/test/Npgsql.Tests/CopyTests.cs index 1ab6405956..cdf7800914 100644 --- a/test/Npgsql.Tests/CopyTests.cs +++ b/test/Npgsql.Tests/CopyTests.cs @@ -419,6 +419,46 @@ public async Task Import_direct_buffer() writer.Write(data); } + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/5330")] + public async Task Import_object_null() + { + using var conn = await OpenConnectionAsync(); + var table = await CreateTempTable(conn, "field TEXT[]"); + + using (var writer = conn.BeginBinaryImport($"COPY {table} (field) FROM STDIN BINARY")) + { + writer.StartRow(); + writer.Write(null, NpgsqlDbType.Boolean); + var rowsWritten = writer.Complete(); + Assert.That(rowsWritten, Is.EqualTo(1)); + } + + Assert.That(await conn.ExecuteScalarAsync($"SELECT field FROM {table}"), Is.EqualTo(DBNull.Value)); + } + + static readonly TestCaseData[] DBNullValues = + { + new TestCaseData(DBNull.Value).SetName("DBNull.Value"), + new TestCaseData(null).SetName("null") + }; + + [Test, TestCaseSource(nameof(DBNullValues))] + public async Task Import_dbnull(DBNull? value) + { + using var conn = await OpenConnectionAsync(); + var table = await CreateTempTable(conn, "field TEXT[]"); + + using (var writer = conn.BeginBinaryImport($"COPY {table} (field) FROM STDIN BINARY")) + { + writer.StartRow(); + writer.Write(value, NpgsqlDbType.Boolean); + var rowsWritten = writer.Complete(); + Assert.That(rowsWritten, Is.EqualTo(1)); + } + + Assert.That(await conn.ExecuteScalarAsync($"SELECT field FROM {table}"), Is.EqualTo(DBNull.Value)); + } + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/2330")] public async Task Wrong_table_definition_binary_import() { From f7760f2bd022fcee6f250bd04df1fab5cac83903 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 17 Oct 2023 00:51:01 +0200 Subject: [PATCH 256/761] Bump xunit from 2.5.2 to 2.5.3 (#5339) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 9ca3671714..190a85634e 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -38,7 +38,7 @@ - + From 10a6678173ab7fe3e2b6cf1f53eb3966d2732a7c Mon Sep 17 00:00:00 2001 From: Vyacheslav Brevnov Date: Tue, 17 Oct 2023 13:42:06 +0300 Subject: [PATCH 257/761] NpgsqlBox: If the corners are confused, swap them. (#5161) Closes #5113 --- src/Npgsql/NpgsqlTypes/NpgsqlTypes.cs | 58 ++++++++++++++++++- test/Npgsql.Tests/Types/GeometricTypeTests.cs | 37 ++++++++++-- 2 files changed, 87 insertions(+), 8 deletions(-) diff --git a/src/Npgsql/NpgsqlTypes/NpgsqlTypes.cs b/src/Npgsql/NpgsqlTypes/NpgsqlTypes.cs index fa36f7e6d2..912d3fc535 100644 --- a/src/Npgsql/NpgsqlTypes/NpgsqlTypes.cs +++ b/src/Npgsql/NpgsqlTypes/NpgsqlTypes.cs @@ -128,8 +128,62 @@ public override bool Equals(object? obj) /// public struct NpgsqlBox : IEquatable { - public NpgsqlPoint UpperRight { get; set; } - public NpgsqlPoint LowerLeft { get; set; } + public NpgsqlPoint UpperRight + { + get => _upperRight; + set + { + if (value.X < _lowerLeft.X) + { + _upperRight.X = _lowerLeft.X; + _lowerLeft.X = value.X; + } + else + { + _upperRight.X = value.X; + } + + if (value.Y < _lowerLeft.Y) + { + _upperRight.Y = _lowerLeft.Y; + _lowerLeft.Y = value.Y; + } + else + { + _upperRight.Y = value.Y; + } + } + } + private NpgsqlPoint _upperRight; + + + public NpgsqlPoint LowerLeft + { + get => _lowerLeft; + set + { + if (value.X > _upperRight.X) + { + _lowerLeft.X = _upperRight.X; + _upperRight.X = value.X; + } + else + { + _lowerLeft.X = value.X; + } + + if (value.Y > _upperRight.Y) + { + _lowerLeft.Y = _upperRight.Y; + _upperRight.Y = value.Y; + } + else + { + _lowerLeft.Y = value.Y; + } + } + } + private NpgsqlPoint _lowerLeft; public NpgsqlBox(NpgsqlPoint upperRight, NpgsqlPoint lowerLeft) : this() { diff --git a/test/Npgsql.Tests/Types/GeometricTypeTests.cs b/test/Npgsql.Tests/Types/GeometricTypeTests.cs index d84218bd12..67cdebf4b2 100644 --- a/test/Npgsql.Tests/Types/GeometricTypeTests.cs +++ b/test/Npgsql.Tests/Types/GeometricTypeTests.cs @@ -25,21 +25,46 @@ public Task LineSegment() => AssertType(new NpgsqlLSeg(1, 2, 3, 4), "[(1,2),(3,4)]", "lseg", NpgsqlDbType.LSeg); [Test] - public Task Box() - => AssertType(new NpgsqlBox(3, 4, 1, 2), "(4,3),(2,1)", "box", NpgsqlDbType.Box, + public async Task Box() + { + await AssertType( + new NpgsqlBox(top: 3, right: 4, bottom: 1, left: 2), + "(4,3),(2,1)", + "box", + NpgsqlDbType.Box, skipArrayCheck: true); // Uses semicolon instead of comma as separator + await AssertType( + new NpgsqlBox(top: 1, right: 2, bottom: 3, left: 4), + "(4,3),(2,1)", + "box", + NpgsqlDbType.Box, + skipArrayCheck: true); // Uses semicolon instead of comma as separator + } + [Test] - public Task Box_array() - => AssertType( + public async Task Box_array() + { + var boxarr = await AssertType( + new[] + { + new NpgsqlBox(top: 3, right: 4, bottom: 1, left: 2), + new NpgsqlBox(top: 5, right: 6, bottom: 3, left: 4), + }, + "{(4,3),(2,1);(6,5),(4,3)}", + "box[]", + NpgsqlDbType.Box | NpgsqlDbType.Array); + + await AssertType( new[] { - new NpgsqlBox(3, 4, 1, 2), - new NpgsqlBox(5, 6, 3, 4) + new NpgsqlBox(top: 1, right: 2, bottom: 3, left: 4), + new NpgsqlBox(top: 3, right: 4, bottom: 5, left: 6) }, "{(4,3),(2,1);(6,5),(4,3)}", "box[]", NpgsqlDbType.Box | NpgsqlDbType.Array); + } [Test] public Task Path_closed() From 6682cc37be550f307006bd0a5bdd0af4107eb906 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Tue, 17 Oct 2023 17:50:23 +0300 Subject: [PATCH 258/761] Add test for #5320 (#5322) --- test/Npgsql.Tests/ConnectionTests.cs | 43 ++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) diff --git a/test/Npgsql.Tests/ConnectionTests.cs b/test/Npgsql.Tests/ConnectionTests.cs index 01ce93b1d2..052f44462f 100644 --- a/test/Npgsql.Tests/ConnectionTests.cs +++ b/test/Npgsql.Tests/ConnectionTests.cs @@ -1395,6 +1395,49 @@ public async Task NoResetOnClose(bool noResetOnClose) : originalApplicationName)); } + [Test] + [Description("Test whether the internal NpgsqlConnection.Open method stays on the same thread with async=false")] + public async Task Sync_open_blocked_same_thread() + { + if (IsMultiplexing) + return; + + await using var dataSource = CreateDataSource(csb => + { + csb.MaxPoolSize = 1; + }); + + await using var openConnection = await dataSource.OpenConnectionAsync(); + + // 2 tasks are usually enough to reproduce the issue + const int taskCount = 2; + + var tcs = new TaskCompletionSource[taskCount]; + for (var i = 0; i < tcs.Length; i++) + { + tcs[i] = new TaskCompletionSource(TaskCreationOptions.RunContinuationsAsynchronously); + } + var sameThreadTasks = Enumerable.Range(0, taskCount).Select(x => Task.Run(async () => + { + var beforeOpenThread = Thread.CurrentThread; + tcs[x].SetResult(null); + using var conn = dataSource.CreateConnection(); + // even though we await it should complete synchronously due to async = false + await conn.Open(async: false, CancellationToken.None); + return beforeOpenThread == Thread.CurrentThread; + })).ToList(); + + await Task.WhenAll(tcs.Select(x => x.Task)); + // Just in case give them a second to block on getting a connection from the pool + await Task.Delay(1000); + await openConnection.CloseAsync(); + + foreach (var sameThreadTask in sameThreadTasks) + { + Assert.IsTrue(await sameThreadTask, "Synchronous open completed on different thread"); + } + } + #region Physical connection initialization [Test] From 47be8521d76cb44be92b4beb7e679acfba6af6e2 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Wed, 18 Oct 2023 14:44:02 +0200 Subject: [PATCH 259/761] Improve importing code (#5336) Fixes #5340 --- src/Npgsql/Internal/Postgres/PgTypeId.cs | 2 +- src/Npgsql/NpgsqlBinaryImporter.cs | 124 +++++++++++++---------- src/Npgsql/NpgsqlParameter.cs | 43 ++++++-- src/Npgsql/NpgsqlParameter`.cs | 2 + test/Npgsql.Tests/CopyTests.cs | 20 ++++ 5 files changed, 129 insertions(+), 62 deletions(-) diff --git a/src/Npgsql/Internal/Postgres/PgTypeId.cs b/src/Npgsql/Internal/Postgres/PgTypeId.cs index e363969a47..bf7457b116 100644 --- a/src/Npgsql/Internal/Postgres/PgTypeId.cs +++ b/src/Npgsql/Internal/Postgres/PgTypeId.cs @@ -27,7 +27,7 @@ public Oid Oid public static implicit operator PgTypeId(DataTypeName name) => new(name); public static implicit operator PgTypeId(Oid id) => new(id); - public override string ToString() => IsOid ? _oid.ToString() : _dataTypeName.Value; + public override string ToString() => IsOid ? "OID " + _oid : "DataTypeName " + _dataTypeName.Value; public bool Equals(PgTypeId other) => (this, other) switch diff --git a/src/Npgsql/NpgsqlBinaryImporter.cs b/src/Npgsql/NpgsqlBinaryImporter.cs index b85cf2de94..72574607a7 100644 --- a/src/Npgsql/NpgsqlBinaryImporter.cs +++ b/src/Npgsql/NpgsqlBinaryImporter.cs @@ -4,6 +4,7 @@ using Microsoft.Extensions.Logging; using Npgsql.BackendMessages; using Npgsql.Internal; +using Npgsql.Internal.Postgres; using NpgsqlTypes; using static Npgsql.Util.Statics; @@ -176,16 +177,28 @@ Task Write(bool async, T value, CancellationToken cancellationToken = default if (cancellationToken.IsCancellationRequested) return Task.FromCanceled(cancellationToken); - var p = _params[_column]; - if (p == null) + // First row, create the parameter object + ref var p = ref _params[_column]; + if (p is not NpgsqlParameter typedParam) + typedParam = new NpgsqlParameter(); + + // We only report previous values if anything actually changed, this saves some checks during the write. + // For object typed parameters when we don't have any other data we always have to pass the previousParam. + // In such cases the runtime type will define the entire postgres type lookup. + PgTypeInfo? previousTypeInfo = null; + PgConverter? previousConverter = null; + PgTypeId previousTypeId = default; + if (p is not null && (typeof(T) == typeof(object) || p._npgsqlDbType is not null || p._dataTypeName is not null)) { - // First row, create the parameter objects - _params[_column] = p = typeof(T) == typeof(object) - ? new NpgsqlParameter() - : new NpgsqlParameter(); + p.GetResolutionInfo(out previousTypeInfo, out previousConverter, out previousTypeId); + if (ReferenceEquals(p, typedParam)) + p.ResetDbType(); } - return Write(value, p, async, cancellationToken); + if (!ReferenceEquals(p, typedParam)) + p = typedParam; + + return Write(async, value, typedParam, previousTypeInfo, previousConverter, previousTypeId, cancellationToken); } /// @@ -225,20 +238,29 @@ Task Write(bool async, T value, NpgsqlDbType npgsqlDbType, CancellationToken if (cancellationToken.IsCancellationRequested) return Task.FromCanceled(cancellationToken); - var p = _params[_column]; - if (p == null) + // First row, create the parameter objects + ref var p = ref _params[_column]; + if (p is not NpgsqlParameter typedParam) + typedParam = new NpgsqlParameter { NpgsqlDbType = npgsqlDbType }; + + // We only report previous values if anything actually changed, this saves some checks during the write. + PgTypeInfo? previousTypeInfo = null; + PgConverter? previousConverter = null; + PgTypeId previousTypeId = default; + if (p is not null && (p._npgsqlDbType != npgsqlDbType || p._dataTypeName is not null)) { - // First row, create the parameter objects - _params[_column] = p = typeof(T) == typeof(object) || typeof(T) == typeof(DBNull) - ? new NpgsqlParameter() - : new NpgsqlParameter(); - p.NpgsqlDbType = npgsqlDbType; + p.GetResolutionInfo(out previousTypeInfo, out previousConverter, out previousTypeId); + if (ReferenceEquals(p, typedParam)) + { + p.ResetDbType(); + p.NpgsqlDbType = npgsqlDbType; + } } - if (npgsqlDbType != p.NpgsqlDbType) - throw new InvalidOperationException($"Can't change {nameof(p.NpgsqlDbType)} from {p.NpgsqlDbType} to {npgsqlDbType}"); + if (!ReferenceEquals(p, typedParam)) + p = typedParam; - return Write(value, p, async, cancellationToken); + return Write(async, value, typedParam, previousTypeInfo, previousConverter, previousTypeId, cancellationToken); } /// @@ -274,63 +296,57 @@ Task Write(bool async, T value, string dataTypeName, CancellationToken cancel if (cancellationToken.IsCancellationRequested) return Task.FromCanceled(cancellationToken); - var p = _params[_column]; - if (p == null) + // First row, create the parameter objects + ref var p = ref _params[_column]; + if (p is not NpgsqlParameter typedParam) + typedParam = new NpgsqlParameter { DataTypeName = dataTypeName }; + + // We only report previous values if anything actually changed, this saves some checks during the write. + PgTypeInfo? previousTypeInfo = null; + PgConverter? previousConverter = null; + PgTypeId previousTypeId = default; + if (p is not null && (p._npgsqlDbType is not null || p._dataTypeName != dataTypeName)) { - // First row, create the parameter objects - _params[_column] = p = typeof(T) == typeof(object) - ? new NpgsqlParameter() - : new NpgsqlParameter(); - p.DataTypeName = dataTypeName; + p.GetResolutionInfo(out previousTypeInfo, out previousConverter, out previousTypeId); + if (ReferenceEquals(p, typedParam)) + { + p.ResetDbType(); + p.DataTypeName = dataTypeName; + } } - //if (dataTypeName!= p.DataTypeName) - // throw new InvalidOperationException($"Can't change {nameof(p.DataTypeName)} from {p.DataTypeName} to {dataTypeName}"); + if (!ReferenceEquals(p, typedParam)) + p = typedParam; - return Write(value, p, async, cancellationToken); + return Write(async, value, typedParam, previousTypeInfo, previousConverter, previousTypeId, cancellationToken); } - async Task Write(T value, NpgsqlParameter param, bool async, CancellationToken cancellationToken = default) + async Task Write(bool async, T value, NpgsqlParameter param, PgTypeInfo? previousTypeInfo, PgConverter? previousConverter, PgTypeId previousTypeId, CancellationToken cancellationToken = default) { CheckReady(); if (_column == -1) throw new InvalidOperationException("A row hasn't been started"); // Statically map any DBNull value during importing, generic parameters when T = DBNull normally won't find any mapping. - if (typeof(T) == typeof(DBNull)) + // Also allow null values for object typed parameters, parameters exclusively accept DBNull.Value when T = object. + if (typeof(T) == typeof(DBNull) || (typeof(T) == typeof(object) && (value == null || value is DBNull))) { await WriteNull(async, cancellationToken).ConfigureAwait(false); return; } - if (typeof(T) == typeof(object)) - { - // Allow null values for object typed parameters, parameters exclusively accept DBNull.Value when T = object. - if (value == null || value is DBNull) - { - await WriteNull(async, cancellationToken).ConfigureAwait(false); - return; - } + param.TypedValue = value; + param.ResolveTypeInfo(_connector.SerializerOptions); - if (param.GetType() != typeof(NpgsqlParameter)) - { - var newParam = _params[_column] = new NpgsqlParameter(); - newParam.NpgsqlDbType = param.NpgsqlDbType; - param = newParam; - } - param.Value = value; - } - else + if (previousTypeInfo is not null && previousConverter is not null && param.PgTypeId != previousTypeId) { - if (param is not NpgsqlParameter typedParam) - { - _params[_column] = typedParam = new NpgsqlParameter(); - typedParam.NpgsqlDbType = param.NpgsqlDbType; - param = typedParam; - } - typedParam.TypedValue = value; + var currentPgTypeId = param.PgTypeId; + param.SetResolutionInfo(previousTypeInfo, previousConverter, previousTypeId); + throw new InvalidOperationException($"Write for column {_column} resolves to a different PostgreSQL type: {currentPgTypeId} than the first row resolved to ({previousTypeId}). " + + $"Please make sure to use clr types that resolve to the same PostgreSQL type across rows. " + + $"Alternatively pass the same NpgsqlDbType or DataTypeName to ensure the PostgreSQL type ends up to be identical." ); } - param.ResolveTypeInfo(_connector.SerializerOptions); + param.Bind(out _, out _); try { diff --git a/src/Npgsql/NpgsqlParameter.cs b/src/Npgsql/NpgsqlParameter.cs index eda05a7746..dab9f89484 100644 --- a/src/Npgsql/NpgsqlParameter.cs +++ b/src/Npgsql/NpgsqlParameter.cs @@ -2,6 +2,7 @@ using System.ComponentModel; using System.Data; using System.Data.Common; +using System.Diagnostics; using System.Diagnostics.CodeAnalysis; using System.IO; using System.Threading; @@ -26,8 +27,8 @@ public class NpgsqlParameter : DbParameter, IDbDataParameter, ICloneable private protected byte _scale; private protected int _size; - private protected NpgsqlDbType? _npgsqlDbType; - private protected string? _dataTypeName; + internal NpgsqlDbType? _npgsqlDbType; + internal string? _dataTypeName; private protected string _name = string.Empty; object? _value; @@ -40,7 +41,7 @@ public class NpgsqlParameter : DbParameter, IDbDataParameter, ICloneable internal PgTypeInfo? TypeInfo { get; private set; } - internal PgTypeId PgTypeId { get; set; } + internal PgTypeId PgTypeId { get; private set; } internal PgConverter? Converter { get; private set; } internal DataFormat Format { get; private protected set; } @@ -277,6 +278,8 @@ public override object? Value { if (value is null || _value?.GetType() != value.GetType()) ResetTypeInfo(); + else + ResetBindingInfo(); _value = value; } } @@ -496,11 +499,28 @@ public sealed override string SourceColumn Type? GetValueType(Type staticValueType) => staticValueType != typeof(object) ? staticValueType : Value?.GetType(); + internal void GetResolutionInfo(out PgTypeInfo? typeInfo, out PgConverter? converter, out PgTypeId pgTypeId) + { + typeInfo = TypeInfo; + converter = Converter; + pgTypeId = PgTypeId; + } + + internal void SetResolutionInfo(PgTypeInfo typeInfo, PgConverter converter, PgTypeId pgTypeId) + { + if (WriteSize is not null) + ResetBindingInfo(); + + TypeInfo = typeInfo; + Converter = converter; + PgTypeId = pgTypeId; + } + /// Attempt to resolve a type info based on available (postgres) type information on the parameter. internal void ResolveTypeInfo(PgSerializerOptions options) { - var previouslyBound = TypeInfo?.Options == options; - if (!previouslyBound) + var previouslyResolved = TypeInfo?.Options == options; + if (!previouslyResolved) { var staticValueType = StaticValueType; var valueType = GetValueType(StaticValueType); @@ -551,7 +571,7 @@ internal void ResolveTypeInfo(PgSerializerOptions options) // This step isn't part of BindValue because we need to know the PgTypeId beforehand for things like SchemaOnly with null values. // We never reuse resolutions for resolvers across executions as a mutable value itself may influence the result. // TODO we could expose a property on a Converter/TypeInfo to indicate whether it's immutable, at that point we can reuse. - if (!previouslyBound || TypeInfo is PgResolverTypeInfo) + if (!previouslyResolved || TypeInfo is PgResolverTypeInfo) { ResetConverterResolution(); var resolution = ResolveConverter(TypeInfo!); @@ -720,10 +740,19 @@ void ResetConverterResolution() ResetBindingInfo(); } - void ResetBindingInfo() + private protected void ResetBindingInfo() { + if (WriteSize is null) + { + Debug.Assert(_writeState == default && _useSubStream == default && Format == default && _bufferRequirement == default); + return; + } + if (_writeState is not null) + { TypeInfo?.DisposeWriteState(_writeState); + _writeState = null; + } if (_useSubStream) { _useSubStream = false; diff --git a/src/Npgsql/NpgsqlParameter`.cs b/src/Npgsql/NpgsqlParameter`.cs index 7c4117b299..5f6291771c 100644 --- a/src/Npgsql/NpgsqlParameter`.cs +++ b/src/Npgsql/NpgsqlParameter`.cs @@ -28,6 +28,8 @@ public T? TypedValue { if (typeof(T) == typeof(object) && (value is null || _typedValue?.GetType() != value.GetType())) ResetTypeInfo(); + else + ResetBindingInfo(); _typedValue = value; } } diff --git a/test/Npgsql.Tests/CopyTests.cs b/test/Npgsql.Tests/CopyTests.cs index cdf7800914..764c3db808 100644 --- a/test/Npgsql.Tests/CopyTests.cs +++ b/test/Npgsql.Tests/CopyTests.cs @@ -386,6 +386,26 @@ public async Task Import_string_array() Assert.That(await conn.ExecuteScalarAsync($"SELECT field FROM {table}"), Is.EqualTo(data)); } + [Test] + public async Task Import_reused_instance_mapping_info_identical_or_throws() + { + using var conn = await OpenConnectionAsync(); + var table = await CreateTempTable(conn, "field int4"); + + var data = 8; + using (var writer = conn.BeginBinaryImport($"COPY {table} (field) FROM STDIN BINARY")) + { + writer.StartRow(); + writer.Write(data, NpgsqlDbType.Integer); + writer.StartRow(); + Assert.Throws(Is.TypeOf().With.Property("Message").StartsWith("Write for column 0 resolves to a different PostgreSQL type"), + () => writer.Write(data, "int2")); + // Should be recoverable by using the same type again. + writer.Write(data, "int4"); + writer.Complete(); + } + } + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/816")] public async Task Import_string_with_buffer_length() { From 365ab4aa75821a3670493b34fa919bb6f6608cc7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 20 Oct 2023 21:44:31 +0000 Subject: [PATCH 260/761] Bump AdoNet.Specification.Tests from 2.0.0-beta.1 to 2.0.0-beta.2 (#5344) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 190a85634e..607a177ad4 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -41,7 +41,7 @@ - + From f723a24df93c2ee8cdd927559b6749dc24c747f4 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Sun, 22 Oct 2023 18:01:53 +0200 Subject: [PATCH 261/761] Fix list based converter resolver (#5347) Fixes #5346 --- .../Internal/Converters/ArrayConverter.cs | 12 +++++++--- src/Npgsql/Internal/TypeInfoMapping.cs | 8 +++---- test/Npgsql.Tests/Types/DateTimeTests.cs | 22 +++++++++++++++---- 3 files changed, 31 insertions(+), 11 deletions(-) diff --git a/src/Npgsql/Internal/Converters/ArrayConverter.cs b/src/Npgsql/Internal/Converters/ArrayConverter.cs index 2801714cc5..e8390f2f2f 100644 --- a/src/Npgsql/Internal/Converters/ArrayConverter.cs +++ b/src/Npgsql/Internal/Converters/ArrayConverter.cs @@ -557,9 +557,15 @@ public ArrayConverterResolver(PgResolverTypeInfo elementTypeInfo, Type effective protected override PgTypeId GetPgTypeId(PgTypeId effectivePgTypeId) => Options.GetArrayTypeId(effectivePgTypeId); protected override PgConverter CreateConverter(PgConverterResolution effectiveResolution) - => typeof(T).IsConstructedGenericType && typeof(T).GetGenericTypeDefinition() == typeof(List<>) - ? new ListBasedArrayConverter(effectiveResolution) - : new ArrayBasedArrayConverter(effectiveResolution, _effectiveType); + { + if (typeof(T).IsConstructedGenericType && typeof(T).GetGenericTypeDefinition() == typeof(List<>)) + return new ListBasedArrayConverter(effectiveResolution); + + if (typeof(T) == typeof(Array) || typeof(T).IsArray) + return new ArrayBasedArrayConverter(effectiveResolution, _effectiveType); + + throw new NotSupportedException($"Unknown type T: {typeof(T).FullName}"); + } protected override PgConverterResolution? GetEffectiveResolution(T? values, PgTypeId? expectedEffectivePgTypeId) { diff --git a/src/Npgsql/Internal/TypeInfoMapping.cs b/src/Npgsql/Internal/TypeInfoMapping.cs index b27402c0f7..24df062efb 100644 --- a/src/Npgsql/Internal/TypeInfoMapping.cs +++ b/src/Npgsql/Internal/TypeInfoMapping.cs @@ -629,10 +629,10 @@ static ArrayBasedArrayConverter CreateArrayBasedConverter CreateListBasedConverter(TypeInfoMapping mapping, PgTypeInfo elemInfo) + static ListBasedArrayConverter, TElement> CreateListBasedConverter(TypeInfoMapping mapping, PgTypeInfo elemInfo) { if (!elemInfo.IsBoxing) - return new ListBasedArrayConverter(elemInfo.GetResolution()); + return new ListBasedArrayConverter, TElement>(elemInfo.GetResolution()); ThrowBoxingNotSupported(resolver: false); return default; @@ -647,10 +647,10 @@ static ArrayConverterResolver CreateArrayBasedConverterResolver return default; } - static ArrayConverterResolver CreateListBasedConverterResolver(TypeInfoMapping mapping, PgResolverTypeInfo elemInfo) + static ArrayConverterResolver, TElement> CreateListBasedConverterResolver(TypeInfoMapping mapping, PgResolverTypeInfo elemInfo) { if (!elemInfo.IsBoxing) - return new ArrayConverterResolver(elemInfo, mapping.Type); + return new ArrayConverterResolver, TElement>(elemInfo, mapping.Type); ThrowBoxingNotSupported(resolver: true); return default; diff --git a/test/Npgsql.Tests/Types/DateTimeTests.cs b/test/Npgsql.Tests/Types/DateTimeTests.cs index 310bd6b190..f05ea65bc7 100644 --- a/test/Npgsql.Tests/Types/DateTimeTests.cs +++ b/test/Npgsql.Tests/Types/DateTimeTests.cs @@ -1,4 +1,5 @@ using System; +using System.Collections.Generic; using System.Data; using System.Threading.Tasks; using NpgsqlTypes; @@ -160,11 +161,17 @@ public Task TimeTz_as_DateTimeOffset(DateTimeOffset time, string sqlLiteral) }; [Test, TestCaseSource(nameof(TimestampValues))] - public Task Timestamp_as_DateTime(DateTime dateTime, string sqlLiteral) - => AssertType(dateTime, sqlLiteral, "timestamp without time zone", NpgsqlDbType.Timestamp, DbType.DateTime2, + public async Task Timestamp_as_DateTime(DateTime dateTime, string sqlLiteral) + { + await AssertType(dateTime, sqlLiteral, "timestamp without time zone", NpgsqlDbType.Timestamp, DbType.DateTime2, // Explicitly check kind as well. comparer: (actual, expected) => actual.Kind == expected.Kind && actual.Equals(expected)); + await AssertType( + new List { dateTime, dateTime }, $$"""{"{{sqlLiteral}}","{{sqlLiteral}}"}""", "timestamp without time zone[]", NpgsqlDbType.Timestamp | NpgsqlDbType.Array, + isDefaultForReading: false); + } + [Test] public Task Timestamp_cannot_write_utc_DateTime() => AssertTypeUnsupportedWrite(new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Utc), "timestamp without time zone"); @@ -252,11 +259,18 @@ await AssertType( }; [Test, TestCaseSource(nameof(TimestampTzWriteValues))] - public Task Timestamptz_as_DateTime(DateTime dateTime, string sqlLiteral) - => AssertType(dateTime, sqlLiteral, "timestamp with time zone", NpgsqlDbType.TimestampTz, DbType.DateTime, + public async Task Timestamptz_as_DateTime(DateTime dateTime, string sqlLiteral) + { + await AssertType(dateTime, sqlLiteral, "timestamp with time zone", NpgsqlDbType.TimestampTz, DbType.DateTime, // Explicitly check kind as well. comparer: (actual, expected) => actual.Kind == expected.Kind && actual.Equals(expected)); + await AssertType( + new List { dateTime, dateTime }, $$"""{"{{sqlLiteral}}","{{sqlLiteral}}"}""", "timestamp with time zone[]", NpgsqlDbType.TimestampTz | NpgsqlDbType.Array, + isDefaultForReading: false); + + } + [Test] public async Task Timestamptz_infinity_as_DateTime() { From 4ccfcd71b690afd4f8e2aa4e7c6642826a92d356 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Sun, 22 Oct 2023 19:07:56 +0200 Subject: [PATCH 262/761] Fix Npgsql{Slim}DataSourceBuilder name translator (#5348) Fixes #5345 --- src/Npgsql/NpgsqlDataSourceBuilder.cs | 3 -- src/Npgsql/NpgsqlSlimDataSourceBuilder.cs | 10 +++-- src/Npgsql/TypeMapping/GlobalTypeMapper.cs | 4 -- test/Npgsql.Tests/Types/CompositeTests.cs | 50 ++++++++++++++++++++++ 4 files changed, 56 insertions(+), 11 deletions(-) diff --git a/src/Npgsql/NpgsqlDataSourceBuilder.cs b/src/Npgsql/NpgsqlDataSourceBuilder.cs index 0d9d79ec58..8837a82782 100644 --- a/src/Npgsql/NpgsqlDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlDataSourceBuilder.cs @@ -3,15 +3,12 @@ using System.Diagnostics.CodeAnalysis; using System.Net.Security; using System.Security.Cryptography.X509Certificates; -using System.Text.Json; -using System.Text.Json.Nodes; using System.Threading; using System.Threading.Tasks; using Microsoft.Extensions.Logging; using Npgsql.Internal; using Npgsql.Internal.Resolvers; using Npgsql.TypeMapping; -using NpgsqlTypes; namespace Npgsql; diff --git a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs index 929cd5a850..dc4d53060d 100644 --- a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs @@ -3,7 +3,6 @@ using System.Diagnostics.CodeAnalysis; using System.Net.Security; using System.Security.Cryptography.X509Certificates; -using System.Text.Json; using System.Threading; using System.Threading.Tasks; using Microsoft.Extensions.Logging; @@ -11,7 +10,6 @@ using Npgsql.Internal.Resolvers; using Npgsql.Properties; using Npgsql.TypeMapping; -using NpgsqlTypes; namespace Npgsql; @@ -72,7 +70,7 @@ static NpgsqlSlimDataSourceBuilder() public NpgsqlSlimDataSourceBuilder(string? connectionString = null) { ConnectionStringBuilder = new NpgsqlConnectionStringBuilder(connectionString); - _userTypeMapper = new(); + _userTypeMapper = new() { DefaultNameTranslator = GlobalTypeMapper.Instance.DefaultNameTranslator }; // Reverse order AddTypeInfoResolver(UnsupportedTypeInfoResolver); AddTypeInfoResolver(new AdoTypeInfoResolver()); @@ -257,7 +255,11 @@ public NpgsqlSlimDataSourceBuilder UsePeriodicPasswordProvider( #region Type mapping /// - public INpgsqlNameTranslator DefaultNameTranslator { get; set; } = GlobalTypeMapper.Instance.DefaultNameTranslator; + public INpgsqlNameTranslator DefaultNameTranslator + { + get => _userTypeMapper.DefaultNameTranslator; + set => _userTypeMapper.DefaultNameTranslator = value; + } /// public INpgsqlTypeMapper MapEnum<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields)] TEnum>(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) diff --git a/src/Npgsql/TypeMapping/GlobalTypeMapper.cs b/src/Npgsql/TypeMapping/GlobalTypeMapper.cs index 5aba125808..c67044af27 100644 --- a/src/Npgsql/TypeMapping/GlobalTypeMapper.cs +++ b/src/Npgsql/TypeMapping/GlobalTypeMapper.cs @@ -2,13 +2,9 @@ using System.Collections.Generic; using System.Diagnostics.CodeAnalysis; using System.Linq; -using System.Text.Json; -using System.Text.Json.Nodes; using System.Threading; using Npgsql.Internal; using Npgsql.Internal.Postgres; -using Npgsql.Internal.Resolvers; -using NpgsqlTypes; namespace Npgsql.TypeMapping; diff --git a/test/Npgsql.Tests/Types/CompositeTests.cs b/test/Npgsql.Tests/Types/CompositeTests.cs index c62496b8b8..96e7dac0fa 100644 --- a/test/Npgsql.Tests/Types/CompositeTests.cs +++ b/test/Npgsql.Tests/Types/CompositeTests.cs @@ -31,6 +31,56 @@ await AssertType( npgsqlDbType: null); } + [Test] + public async Task Basic_with_custom_default_translator() + { + await using var adminConnection = await OpenConnectionAsync(); + var type = await GetTempTypeName(adminConnection); + + await adminConnection.ExecuteNonQueryAsync($"CREATE TYPE {type} AS (x int, s text)"); + + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.DefaultNameTranslator = new CustomTranslator(); + dataSourceBuilder.MapComposite(type); + await using var dataSource = dataSourceBuilder.Build(); + await using var connection = await dataSource.OpenConnectionAsync(); + + await AssertType( + connection, + new SomeComposite { SomeText = "foo", X = 8 }, + "(8,foo)", + type, + npgsqlDbType: null); + } + + [Test] + public async Task Basic_with_custom_translator() + { + await using var adminConnection = await OpenConnectionAsync(); + var type = await GetTempTypeName(adminConnection); + + await adminConnection.ExecuteNonQueryAsync($"CREATE TYPE {type} AS (x int, s text)"); + + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.MapComposite(type, new CustomTranslator()); + await using var dataSource = dataSourceBuilder.Build(); + await using var connection = await dataSource.OpenConnectionAsync(); + + await AssertType( + connection, + new SomeComposite { SomeText = "foo", X = 8 }, + "(8,foo)", + type, + npgsqlDbType: null); + } + + class CustomTranslator : INpgsqlNameTranslator + { + public string TranslateTypeName(string clrName) => throw new NotImplementedException(); + + public string TranslateMemberName(string clrName) => clrName[0].ToString().ToLowerInvariant(); + } + #pragma warning disable CS0618 // GlobalTypeMapper is obsolete [Test, NonParallelizable] public async Task Global_mapping() From 11b2d76396a459591eb679c79b2c52f34bb1af90 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Thu, 26 Oct 2023 15:50:49 +0200 Subject: [PATCH 263/761] Properly take offset into account during single segment decode (#5353) Fixes #5351 --- src/Npgsql/Shims/EncodingExtensions.cs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/Npgsql/Shims/EncodingExtensions.cs b/src/Npgsql/Shims/EncodingExtensions.cs index ae5622ce9d..bc5cb8651a 100644 --- a/src/Npgsql/Shims/EncodingExtensions.cs +++ b/src/Npgsql/Shims/EncodingExtensions.cs @@ -141,16 +141,20 @@ public static string GetString(this Encoding encoding, in ReadOnlySequence #else var rented = false; byte[] arr; + var offset = 0; var memory = bytes.First; if (MemoryMarshal.TryGetArray(memory, out var segment)) + { arr = segment.Array!; + offset = segment.Offset; + } else { rented = true; arr = ArrayPool.Shared.Rent(memory.Length); bytes.First.Span.CopyTo(arr); } - var ret = encoding.GetString(arr, 0, memory.Length); + var ret = encoding.GetString(arr, offset, memory.Length); if (rented) ArrayPool.Shared.Return(arr); return ret; From 7bc04bcc09de8463ac7c2bd435ea15c336114b86 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 26 Oct 2023 23:46:25 +0200 Subject: [PATCH 264/761] Bump Microsoft.Data.SqlClient from 5.1.1 to 5.1.2 (#5354) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 607a177ad4..0da0e92a0c 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -46,7 +46,7 @@ - + From 6b1e70c677068d10870a14bf22b5c34485102395 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Sun, 29 Oct 2023 18:02:02 +0300 Subject: [PATCH 265/761] Fix query cancellation on timeout for netstandard (#5357) Fixes #5356 --- src/Npgsql/Internal/NpgsqlReadBuffer.cs | 112 +++++++++++------------- 1 file changed, 52 insertions(+), 60 deletions(-) diff --git a/src/Npgsql/Internal/NpgsqlReadBuffer.cs b/src/Npgsql/Internal/NpgsqlReadBuffer.cs index cb28028815..9fb5c82afe 100644 --- a/src/Npgsql/Internal/NpgsqlReadBuffer.cs +++ b/src/Npgsql/Internal/NpgsqlReadBuffer.cs @@ -161,25 +161,23 @@ int ReadWithTimeout(Span buffer) isStreamBroken = connector.IsSecure && ex is IOException; #endif - if (!isStreamBroken) + // If we should attempt PostgreSQL cancellation, do it the first time we get a timeout. + // TODO: As an optimization, we can still attempt to send a cancellation request, but after + // that immediately break the connection + if (connector.AttemptPostgresCancellation && + !connector.PostgresCancellationPerformed && + connector.PerformPostgresCancellation() && + !isStreamBroken) { - // If we should attempt PostgreSQL cancellation, do it the first time we get a timeout. - // TODO: As an optimization, we can still attempt to send a cancellation request, but after - // that immediately break the connection - if (connector.AttemptPostgresCancellation && - !connector.PostgresCancellationPerformed && - connector.PerformPostgresCancellation()) + // Note that if the cancellation timeout is negative, we flow down and break the + // connection immediately. + var cancellationTimeout = connector.Settings.CancellationTimeout; + if (cancellationTimeout >= 0) { - // Note that if the cancellation timeout is negative, we flow down and break the - // connection immediately. - var cancellationTimeout = connector.Settings.CancellationTimeout; - if (cancellationTimeout >= 0) - { - if (cancellationTimeout > 0) - Timeout = TimeSpan.FromMilliseconds(cancellationTimeout); + if (cancellationTimeout > 0) + Timeout = TimeSpan.FromMilliseconds(cancellationTimeout); - continue; - } + continue; } } @@ -230,27 +228,24 @@ async ValueTask ReadWithTimeoutAsync(Memory buffer, CancellationToken // See #4305. isStreamBroken = connector.IsSecure && ex is IOException; #endif - - if (!isStreamBroken) + // If we should attempt PostgreSQL cancellation, do it the first time we get a timeout. + // TODO: As an optimization, we can still attempt to send a cancellation request, but after + // that immediately break the connection + if (connector.AttemptPostgresCancellation && + !connector.PostgresCancellationPerformed && + connector.PerformPostgresCancellation() && + !isStreamBroken) { - // If we should attempt PostgreSQL cancellation, do it the first time we get a timeout. - // TODO: As an optimization, we can still attempt to send a cancellation request, but after - // that immediately break the connection - if (connector.AttemptPostgresCancellation && - !connector.PostgresCancellationPerformed && - connector.PerformPostgresCancellation()) + // Note that if the cancellation timeout is negative, we flow down and break the + // connection immediately. + var cancellationTimeout = connector.Settings.CancellationTimeout; + if (cancellationTimeout >= 0) { - // Note that if the cancellation timeout is negative, we flow down and break the - // connection immediately. - var cancellationTimeout = connector.Settings.CancellationTimeout; - if (cancellationTimeout >= 0) - { - if (cancellationTimeout > 0) - Timeout = TimeSpan.FromMilliseconds(cancellationTimeout); + if (cancellationTimeout > 0) + Timeout = TimeSpan.FromMilliseconds(cancellationTimeout); - finalCt = Cts.Start(cancellationToken); - continue; - } + finalCt = Cts.Start(cancellationToken); + continue; } } @@ -361,35 +356,32 @@ static async Task EnsureLong( // See #4305. isStreamBroken = connector.IsSecure && e is IOException; #endif + // When reading notifications (Wait), just throw TimeoutException or + // OperationCanceledException immediately. + // Nothing to cancel, and no breaking of the connection. + if (readingNotifications && !isStreamBroken) + throw CreateException(connector); - if (!isStreamBroken) + // If we should attempt PostgreSQL cancellation, do it the first time we get a timeout. + // TODO: As an optimization, we can still attempt to send a cancellation request, but after + // that immediately break the connection + if (connector.AttemptPostgresCancellation && + !connector.PostgresCancellationPerformed && + connector.PerformPostgresCancellation() && + !isStreamBroken) { - // When reading notifications (Wait), just throw TimeoutException or - // OperationCanceledException immediately. - // Nothing to cancel, and no breaking of the connection. - if (readingNotifications) - throw CreateException(connector); - - // If we should attempt PostgreSQL cancellation, do it the first time we get a timeout. - // TODO: As an optimization, we can still attempt to send a cancellation request, but after - // that immediately break the connection - if (connector.AttemptPostgresCancellation && - !connector.PostgresCancellationPerformed && - connector.PerformPostgresCancellation()) + // Note that if the cancellation timeout is negative, we flow down and break the + // connection immediately. + var cancellationTimeout = connector.Settings.CancellationTimeout; + if (cancellationTimeout >= 0) { - // Note that if the cancellation timeout is negative, we flow down and break the - // connection immediately. - var cancellationTimeout = connector.Settings.CancellationTimeout; - if (cancellationTimeout >= 0) - { - if (cancellationTimeout > 0) - buffer.Timeout = TimeSpan.FromMilliseconds(cancellationTimeout); - - if (async) - finalCt = buffer.Cts.Start(); - - continue; - } + if (cancellationTimeout > 0) + buffer.Timeout = TimeSpan.FromMilliseconds(cancellationTimeout); + + if (async) + finalCt = buffer.Cts.Start(); + + continue; } } From 1530dee1a0709de67ee0502bff221710b0747bd1 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Sun, 29 Oct 2023 18:52:18 +0100 Subject: [PATCH 266/761] Replace DefaultJsonTypeInfoResolver by a custom resolver (#5359) --- .../Resolvers/JsonDynamicTypeInfoResolver.cs | 1 - .../Resolvers/JsonTypeInfoResolvers.cs | 30 +++++++++++++------ 2 files changed, 21 insertions(+), 10 deletions(-) diff --git a/src/Npgsql/Internal/Resolvers/JsonDynamicTypeInfoResolver.cs b/src/Npgsql/Internal/Resolvers/JsonDynamicTypeInfoResolver.cs index 26170d8a92..6084fb1bef 100644 --- a/src/Npgsql/Internal/Resolvers/JsonDynamicTypeInfoResolver.cs +++ b/src/Npgsql/Internal/Resolvers/JsonDynamicTypeInfoResolver.cs @@ -6,7 +6,6 @@ using System.Text.Json.Serialization.Metadata; using Npgsql.Internal.Converters; using Npgsql.Internal.Postgres; -using Npgsql.Properties; namespace Npgsql.Internal.Resolvers; diff --git a/src/Npgsql/Internal/Resolvers/JsonTypeInfoResolvers.cs b/src/Npgsql/Internal/Resolvers/JsonTypeInfoResolvers.cs index 7af57fde45..7ba40e4921 100644 --- a/src/Npgsql/Internal/Resolvers/JsonTypeInfoResolvers.cs +++ b/src/Npgsql/Internal/Resolvers/JsonTypeInfoResolvers.cs @@ -1,5 +1,4 @@ using System; -using System.Diagnostics.CodeAnalysis; using System.Text.Json; using System.Text.Json.Serialization.Metadata; using Npgsql.Internal.Converters; @@ -9,24 +8,25 @@ namespace Npgsql.Internal.Resolvers; class JsonTypeInfoResolver : IPgTypeInfoResolver { + static JsonSerializerOptions? DefaultSerializerOptions; + protected TypeInfoMappingCollection Mappings { get; } = new(); public JsonTypeInfoResolver(JsonSerializerOptions? serializerOptions = null) => AddTypeInfos(Mappings, serializerOptions); - [UnconditionalSuppressMessage("Trimming", "IL2026", Justification = "Only used to request rooted and statically known types (JsonDocument,JsonElement etc).")] - [UnconditionalSuppressMessage("Aot", "IL3050", Justification = "Only used to request rooted and statically known types (JsonDocument,JsonElement etc).")] static void AddTypeInfos(TypeInfoMappingCollection mappings, JsonSerializerOptions? serializerOptions = null) { -#if NET7_0_OR_GREATER - serializerOptions ??= JsonSerializerOptions.Default; -#else if (serializerOptions is null) { - serializerOptions = new JsonSerializerOptions(); - serializerOptions.TypeInfoResolver = new DefaultJsonTypeInfoResolver(); + serializerOptions = DefaultSerializerOptions; + if (serializerOptions is null) + { + serializerOptions = new JsonSerializerOptions(); + serializerOptions.TypeInfoResolver = new BasicJsonTypeInfoResolver(); + DefaultSerializerOptions = serializerOptions; + } } -#endif // Jsonb is the first default for JsonDocument foreach (var dataTypeName in new[] { DataTypeNames.Jsonb, DataTypeNames.Json }) @@ -51,6 +51,18 @@ protected static void AddArrayInfos(TypeInfoMappingCollection mappings) public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) => Mappings.Find(type, dataTypeName, options); + + sealed class BasicJsonTypeInfoResolver : IJsonTypeInfoResolver + { + public JsonTypeInfo? GetTypeInfo(Type type, JsonSerializerOptions options) + { + if (type == typeof(JsonDocument)) + return JsonMetadataServices.CreateValueInfo(options, JsonMetadataServices.JsonDocumentConverter); + if (type == typeof(JsonElement)) + return JsonMetadataServices.CreateValueInfo(options, JsonMetadataServices.JsonElementConverter); + return null; + } + } } sealed class JsonArrayTypeInfoResolver : JsonTypeInfoResolver, IPgTypeInfoResolver From a609af6e463126ccc4aac37ede4bed30dcbb26b8 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Tue, 31 Oct 2023 17:37:29 +0100 Subject: [PATCH 267/761] Fix TimeSpan to map to PG interval by default (#5364) Fixes #5363 --- .../Internal/Resolvers/AdoTypeInfoResolver.cs | 22 +++++++++---------- test/Npgsql.Tests/Types/DateTimeTests.cs | 8 +++---- 2 files changed, 13 insertions(+), 17 deletions(-) diff --git a/src/Npgsql/Internal/Resolvers/AdoTypeInfoResolver.cs b/src/Npgsql/Internal/Resolvers/AdoTypeInfoResolver.cs index e01af49bb3..7ae40ef9a4 100644 --- a/src/Npgsql/Internal/Resolvers/AdoTypeInfoResolver.cs +++ b/src/Npgsql/Internal/Resolvers/AdoTypeInfoResolver.cs @@ -226,6 +226,12 @@ static void AddInfos(TypeInfoMappingCollection mappings) static (options, mapping, _) => mapping.CreateInfo(options, new DateOnlyDateConverter(options.EnableDateTimeInfinityConversions))); #endif + // Interval + mappings.AddStructType(DataTypeNames.Interval, + static (options, mapping, _) => mapping.CreateInfo(options, new TimeSpanIntervalConverter()), isDefault: true); + mappings.AddStructType(DataTypeNames.Interval, + static (options, mapping, _) => mapping.CreateInfo(options, new NpgsqlIntervalConverter())); + // Time mappings.AddStructType(DataTypeNames.Time, static (options, mapping, _) => mapping.CreateInfo(options, new TimeSpanTimeConverter()), isDefault: true); @@ -241,13 +247,6 @@ static void AddInfos(TypeInfoMappingCollection mappings) static (options, mapping, _) => mapping.CreateInfo(options, new DateTimeOffsetTimeTzConverter()), MatchRequirement.DataTypeName); - // Interval - mappings.AddStructType(DataTypeNames.Interval, - static (options, mapping, _) => mapping.CreateInfo(options, new TimeSpanIntervalConverter()), - MatchRequirement.DataTypeName); - mappings.AddStructType(DataTypeNames.Interval, - static (options, mapping, _) => mapping.CreateInfo(options, new NpgsqlIntervalConverter())); - // Uuid mappings.AddStructType(DataTypeNames.Uuid, static (options, mapping, _) => mapping.CreateInfo(options, new GuidUuidConverter()), isDefault: true); @@ -411,6 +410,10 @@ protected static void AddArrayInfos(TypeInfoMappingCollection mappings) mappings.AddStructArrayType(DataTypeNames.Date); #endif + // Interval + mappings.AddStructArrayType(DataTypeNames.Interval); + mappings.AddStructArrayType(DataTypeNames.Interval); + // Time mappings.AddStructArrayType(DataTypeNames.Time); mappings.AddStructArrayType(DataTypeNames.Time); @@ -420,11 +423,6 @@ protected static void AddArrayInfos(TypeInfoMappingCollection mappings) // TimeTz mappings.AddStructArrayType(DataTypeNames.TimeTz); - - // Interval - mappings.AddStructArrayType(DataTypeNames.Interval); - mappings.AddStructArrayType(DataTypeNames.Interval); - // Uuid mappings.AddStructArrayType(DataTypeNames.Uuid); diff --git a/test/Npgsql.Tests/Types/DateTimeTests.cs b/test/Npgsql.Tests/Types/DateTimeTests.cs index f05ea65bc7..434b87705f 100644 --- a/test/Npgsql.Tests/Types/DateTimeTests.cs +++ b/test/Npgsql.Tests/Types/DateTimeTests.cs @@ -510,7 +510,7 @@ public async Task Array_of_nullable_timestamptz() [Test, TestCaseSource(nameof(IntervalValues))] public Task Interval_as_TimeSpan(TimeSpan timeSpan, string sqlLiteral) - => AssertType(timeSpan, sqlLiteral, "interval", NpgsqlDbType.Interval, isDefaultForWriting: false); + => AssertType(timeSpan, sqlLiteral, "interval", NpgsqlDbType.Interval); [Test] public Task Interval_write_as_TimeSpan_truncates_ticks() @@ -518,8 +518,7 @@ public Task Interval_write_as_TimeSpan_truncates_ticks() new TimeSpan(new TimeSpan(2, 3, 4).Ticks + 1), "02:03:04", "interval", - NpgsqlDbType.Interval, - isDefault: false); + NpgsqlDbType.Interval); [Test] public Task Interval_as_NpgsqlInterval() @@ -527,8 +526,7 @@ public Task Interval_as_NpgsqlInterval() new NpgsqlInterval(2, 15, 7384005000), "2 mons 15 days 02:03:04.005", "interval", NpgsqlDbType.Interval, - isDefaultForReading: false, - isDefaultForWriting: false); + isDefaultForReading: false); [Test] public Task Interval_with_months_cannot_read_as_TimeSpan() From 1e02c2007c56a9cdf64f817848a9e4e62b190a7b Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Tue, 31 Oct 2023 18:02:50 +0100 Subject: [PATCH 268/761] Add a flag to allow mixed io to GetStream (#5365) Fixes #5362 --- .../Internal/NetTopologySuiteConverter.cs | 4 +- src/Npgsql/Internal/PgWriter.cs | 44 ++++++++++++++----- 2 files changed, 33 insertions(+), 15 deletions(-) diff --git a/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteConverter.cs b/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteConverter.cs index 467356164e..45597e7059 100644 --- a/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteConverter.cs +++ b/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteConverter.cs @@ -32,10 +32,8 @@ public override Size GetSize(SizeContext context, T value, ref object? writeStat return (int)lengthStream.Length; } -#pragma warning disable CA2252 // GetStream() is a "preview" feature public override void Write(PgWriter writer, T value) - => _writer.Write(value, writer.GetStream()); -#pragma warning restore CA2252 + => _writer.Write(value, writer.GetStream(allowMixedIO: true)); // PostGisReader/PostGisWriter doesn't support async public override ValueTask WriteAsync(PgWriter writer, T value, CancellationToken cancellationToken = default) diff --git a/src/Npgsql/Internal/PgWriter.cs b/src/Npgsql/Internal/PgWriter.cs index c1e2983e17..adc511846e 100644 --- a/src/Npgsql/Internal/PgWriter.cs +++ b/src/Npgsql/Internal/PgWriter.cs @@ -366,6 +366,9 @@ async ValueTask Core(ReadOnlyMemory data, Encoding encoding, CancellationT } public void WriteBytes(ReadOnlySpan buffer) + => WriteBytes(allowMixedIO: false, buffer); + + internal void WriteBytes(bool allowMixedIO, ReadOnlySpan buffer) { while (!buffer.IsEmpty) { @@ -374,11 +377,14 @@ public void WriteBytes(ReadOnlySpan buffer) Advance(write); buffer = buffer.Slice(write); if (Remaining is 0) - Flush(); + Flush(allowWhenNonBlocking: allowMixedIO); } } public ValueTask WriteBytesAsync(ReadOnlyMemory buffer, CancellationToken cancellationToken = default) + => WriteBytesAsync(allowMixedIO: false, buffer, cancellationToken); + + internal ValueTask WriteBytesAsync(bool allowMixedIO, ReadOnlyMemory buffer, CancellationToken cancellationToken) { if (buffer.Length <= Remaining) { @@ -402,9 +408,13 @@ async ValueTask Core(ReadOnlyMemory buffer, CancellationToken cancellation } } } - - public Stream GetStream() - => new PgWriterStream(this); + /// + /// Gets a that can be used to write to the underlying buffer. + /// + /// Blocking flushes during writes that were expected to be non-blocking and vice versa cause an exception to be thrown unless allowMixedIO is set to true, false by default. + /// The stream. + public Stream GetStream(bool allowMixedIO = false) + => new PgWriterStream(this, allowMixedIO); public bool ShouldFlush(Size bufferRequirement) => ShouldFlush(bufferRequirement is { Kind: SizeKind.UpperBound } @@ -414,13 +424,16 @@ public bool ShouldFlush(Size bufferRequirement) public bool ShouldFlush(int byteCount) => Remaining < byteCount && FlushMode is not FlushMode.None; public void Flush(TimeSpan timeout = default) + => Flush(allowWhenNonBlocking: false, timeout); + + void Flush(bool allowWhenNonBlocking, TimeSpan timeout = default) { switch (FlushMode) { case FlushMode.None: return; - case FlushMode.NonBlocking: - throw new NotSupportedException($"Cannot call {nameof(Flush)} on a non-blocking {nameof(PgWriter)}, you might need to override {nameof(PgConverter.WriteAsync)} on {nameof(PgConverter)} if you want to call flush."); + case FlushMode.NonBlocking when !allowWhenNonBlocking: + throw new NotSupportedException($"Cannot call {nameof(Flush)} on a non-blocking {nameof(PgWriter)}, call FlushAsync instead."); } if (_writer is not IStreamingWriter writer) @@ -432,12 +445,15 @@ public void Flush(TimeSpan timeout = default) } public ValueTask FlushAsync(CancellationToken cancellationToken = default) + => FlushAsync(allowWhenBlocking: false, cancellationToken); + + ValueTask FlushAsync(bool allowWhenBlocking, CancellationToken cancellationToken = default) { switch (FlushMode) { case FlushMode.None: return new(); - case FlushMode.Blocking: + case FlushMode.Blocking when !allowWhenBlocking: throw new NotSupportedException($"Cannot call {nameof(FlushAsync)} on a blocking {nameof(PgWriter)}, call Flush instead."); } @@ -487,9 +503,13 @@ public ValueTask BeginNestedWriteAsync(Size bufferRequirement, sealed class PgWriterStream : Stream { readonly PgWriter _writer; + readonly bool _allowMixedIO; - internal PgWriterStream(PgWriter writer) - => _writer = writer; + internal PgWriterStream(PgWriter writer, bool allowMixedIO) + { + _writer = writer; + _allowMixedIO = allowMixedIO; + } public override void Write(byte[] buffer, int offset, int count) => Write(async: false, buffer: buffer, offset: offset, count: count, CancellationToken.None).GetAwaiter().GetResult(); @@ -513,15 +533,15 @@ Task Write(bool async, byte[] buffer, int offset, int count, CancellationToken c if (cancellationToken.IsCancellationRequested) return Task.FromCanceled(cancellationToken); - return _writer.WriteBytesAsync(buffer, cancellationToken).AsTask(); + return _writer.WriteBytesAsync(_allowMixedIO, buffer, cancellationToken).AsTask(); } - _writer.WriteBytes(new Span(buffer, offset, count)); + _writer.WriteBytes(_allowMixedIO, new Span(buffer, offset, count)); return Task.CompletedTask; } #if !NETSTANDARD2_0 - public override void Write(ReadOnlySpan buffer) => _writer.WriteBytes(buffer); + public override void Write(ReadOnlySpan buffer) => _writer.WriteBytes(_allowMixedIO, buffer); public override ValueTask WriteAsync(ReadOnlyMemory buffer, CancellationToken cancellationToken = default) { From 52b5809e5990da81977ee711b26d24cc2bde4c91 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Wed, 1 Nov 2023 21:29:57 +0300 Subject: [PATCH 269/761] Optimize connection usage in tests (#5319) --- test/Npgsql.Tests/CommandTests.cs | 6 ++--- test/Npgsql.Tests/ConnectionTests.cs | 4 ++-- test/Npgsql.Tests/DataSourceTests.cs | 4 ++-- test/Npgsql.Tests/MultipleHostsTests.cs | 2 +- test/Npgsql.Tests/StoredProcedureTests.cs | 24 +++++++++---------- test/Npgsql.Tests/Support/TestBase.cs | 11 +++++---- test/Npgsql.Tests/TestUtil.cs | 5 ++-- .../Types/DateTimeInfinityTests.cs | 10 +++++--- .../Npgsql.Tests/Types/LegacyDateTimeTests.cs | 1 + test/Npgsql.Tests/Types/MultirangeTests.cs | 3 +++ test/Npgsql.Tests/Types/RangeTests.cs | 3 +++ 11 files changed, 42 insertions(+), 31 deletions(-) diff --git a/test/Npgsql.Tests/CommandTests.cs b/test/Npgsql.Tests/CommandTests.cs index 0cbd9692e9..cd105548be 100644 --- a/test/Npgsql.Tests/CommandTests.cs +++ b/test/Npgsql.Tests/CommandTests.cs @@ -131,7 +131,7 @@ public async Task Multiple_statements_large_first_command() [NonParallelizable] // Disables sql rewriting public async Task Legacy_batching_is_not_supported_when_EnableSqlParsing_is_disabled() { - using var _ = DisableSqlRewriting(ClearDataSources); + using var _ = DisableSqlRewriting(); using var conn = await OpenConnectionAsync(); using var cmd = new NpgsqlCommand("SELECT 1; SELECT 2", conn); @@ -143,7 +143,7 @@ public async Task Legacy_batching_is_not_supported_when_EnableSqlParsing_is_disa [NonParallelizable] // Disables sql rewriting public async Task Positional_parameters_are_supported_when_EnableSqlParsing_is_disabled() { - using var _ = DisableSqlRewriting(ClearDataSources); + using var _ = DisableSqlRewriting(); using var conn = await OpenConnectionAsync(); using var cmd = new NpgsqlCommand("SELECT $1", conn); @@ -155,7 +155,7 @@ public async Task Positional_parameters_are_supported_when_EnableSqlParsing_is_d [NonParallelizable] // Disables sql rewriting public async Task Named_parameters_are_not_supported_when_EnableSqlParsing_is_disabled() { - using var _ = DisableSqlRewriting(ClearDataSources); + using var _ = DisableSqlRewriting(); using var conn = await OpenConnectionAsync(); using var cmd = new NpgsqlCommand("SELECT @p", conn); diff --git a/test/Npgsql.Tests/ConnectionTests.cs b/test/Npgsql.Tests/ConnectionTests.cs index 052f44462f..497cb888a2 100644 --- a/test/Npgsql.Tests/ConnectionTests.cs +++ b/test/Npgsql.Tests/ConnectionTests.cs @@ -1046,10 +1046,10 @@ public void Clone() [Test] public async Task Clone_with_data_source() { - await using var connection = await SharedDataSource.OpenConnectionAsync(); + await using var connection = await DataSource.OpenConnectionAsync(); await using var clonedConnection = (NpgsqlConnection)((ICloneable)connection).Clone(); - Assert.That(clonedConnection.NpgsqlDataSource, Is.SameAs(SharedDataSource)); + Assert.That(clonedConnection.NpgsqlDataSource, Is.SameAs(DataSource)); Assert.DoesNotThrowAsync(() => clonedConnection.OpenAsync()); } diff --git a/test/Npgsql.Tests/DataSourceTests.cs b/test/Npgsql.Tests/DataSourceTests.cs index b6a7266012..d2d8c78d21 100644 --- a/test/Npgsql.Tests/DataSourceTests.cs +++ b/test/Npgsql.Tests/DataSourceTests.cs @@ -185,7 +185,7 @@ public void No_password_without_PersistSecurityInfo() [Test] public async Task Cannot_access_connection_transaction_on_data_source_command() { - await using var command = SharedDataSource.CreateCommand(); + await using var command = DataSource.CreateCommand(); Assert.That(() => command.Connection, Throws.Exception.TypeOf()); Assert.That(() => command.Connection = null, Throws.Exception.TypeOf()); @@ -199,7 +199,7 @@ public async Task Cannot_access_connection_transaction_on_data_source_command() [Test] public async Task Cannot_access_connection_transaction_on_data_source_batch() { - await using var batch = SharedDataSource.CreateBatch(); + await using var batch = DataSource.CreateBatch(); Assert.That(() => batch.Connection, Throws.Exception.TypeOf()); Assert.That(() => batch.Connection = null, Throws.Exception.TypeOf()); diff --git a/test/Npgsql.Tests/MultipleHostsTests.cs b/test/Npgsql.Tests/MultipleHostsTests.cs index 5de416672b..2b2c3f5304 100644 --- a/test/Npgsql.Tests/MultipleHostsTests.cs +++ b/test/Npgsql.Tests/MultipleHostsTests.cs @@ -957,7 +957,7 @@ async Task Query(NpgsqlDataSource dataSource) [NonParallelizable] // Disables sql rewriting public async Task Multiple_hosts_with_disabled_sql_rewriting() { - using var _ = DisableSqlRewriting(ClearDataSources); + using var _ = DisableSqlRewriting(); var dataSourceBuilder = new NpgsqlDataSourceBuilder(ConnectionString) { diff --git a/test/Npgsql.Tests/StoredProcedureTests.cs b/test/Npgsql.Tests/StoredProcedureTests.cs index a77a6c8d85..8666740f74 100644 --- a/test/Npgsql.Tests/StoredProcedureTests.cs +++ b/test/Npgsql.Tests/StoredProcedureTests.cs @@ -15,17 +15,17 @@ public class StoredProcedureTests : TestBase [TestCase(true, true)] public async Task With_input_parameters(bool withPositional, bool withNamed) { - var table = await CreateTempTable(SharedDataSource, "foo int, bar int"); - var sproc = await GetTempProcedureName(SharedDataSource); + var table = await CreateTempTable(DataSource, "foo int, bar int"); + var sproc = await GetTempProcedureName(DataSource); - await SharedDataSource.ExecuteNonQueryAsync(@$" + await DataSource.ExecuteNonQueryAsync(@$" CREATE PROCEDURE {sproc}(a int, b int) LANGUAGE SQL AS $$ INSERT INTO {table} VALUES (a, b); $$"); - await using (var command = SharedDataSource.CreateCommand(sproc)) + await using (var command = DataSource.CreateCommand(sproc)) { command.CommandType = CommandType.StoredProcedure; @@ -40,7 +40,7 @@ LANGUAGE SQL await command.ExecuteNonQueryAsync(); } - await using (var command = SharedDataSource.CreateCommand($"SELECT * FROM {table}")) + await using (var command = DataSource.CreateCommand($"SELECT * FROM {table}")) await using (var reader = await command.ExecuteReaderAsync()) { await reader.ReadAsync(); @@ -55,11 +55,11 @@ LANGUAGE SQL [TestCase(true, true)] public async Task With_output_parameters(bool withPositional, bool withNamed) { - MinimumPgVersion(SharedDataSource, "14.0", "Stored procedure OUT parameters are only support starting with version 14"); + MinimumPgVersion(DataSource, "14.0", "Stored procedure OUT parameters are only support starting with version 14"); - var sproc = await GetTempProcedureName(SharedDataSource); + var sproc = await GetTempProcedureName(DataSource); - await SharedDataSource.ExecuteNonQueryAsync(@$" + await DataSource.ExecuteNonQueryAsync(@$" CREATE PROCEDURE {sproc}(a int, OUT out1 int, OUT out2 int, b int) LANGUAGE plpgsql AS $$ @@ -68,7 +68,7 @@ LANGUAGE plpgsql out2 = b; END$$"); - await using var command = SharedDataSource.CreateCommand(sproc); + await using var command = DataSource.CreateCommand(sproc); command.CommandType = CommandType.StoredProcedure; command.Parameters.Add(new() { Value = 8 }); @@ -96,9 +96,9 @@ LANGUAGE plpgsql [TestCase(true, true)] public async Task With_input_output_parameters(bool withPositional, bool withNamed) { - var sproc = await GetTempProcedureName(SharedDataSource); + var sproc = await GetTempProcedureName(DataSource); - await SharedDataSource.ExecuteNonQueryAsync(@$" + await DataSource.ExecuteNonQueryAsync(@$" CREATE PROCEDURE {sproc}(a int, INOUT inout1 int, INOUT inout2 int, b int) LANGUAGE plpgsql AS $$ @@ -107,7 +107,7 @@ LANGUAGE plpgsql inout2 = inout2 + b; END$$"); - await using var command = SharedDataSource.CreateCommand(sproc); + await using var command = DataSource.CreateCommand(sproc); command.CommandType = CommandType.StoredProcedure; command.Parameters.Add(new() { Value = 8 }); diff --git a/test/Npgsql.Tests/Support/TestBase.cs b/test/Npgsql.Tests/Support/TestBase.cs index 0b1a7210b9..d5bde0f142 100644 --- a/test/Npgsql.Tests/Support/TestBase.cs +++ b/test/Npgsql.Tests/Support/TestBase.cs @@ -464,8 +464,6 @@ static string ArrayLiteral(string elementLiteral) #region Utilities for use by tests - protected static readonly NpgsqlDataSource SharedDataSource = NpgsqlDataSource.Create(TestUtil.ConnectionString); - protected virtual NpgsqlDataSourceBuilder CreateDataSourceBuilder() => new(ConnectionString); @@ -497,7 +495,12 @@ protected static NpgsqlDataSource GetDataSource(string connectionString) { if (!DataSources.TryGetValue(connectionString, out dataSource)) { - DataSources[connectionString] = dataSource = NpgsqlDataSource.Create(connectionString); + var canonicalConnectionString = new NpgsqlConnectionStringBuilder(connectionString).ToString(); + if (!DataSources.TryGetValue(canonicalConnectionString, out dataSource)) + { + DataSources[canonicalConnectionString] = dataSource = NpgsqlDataSource.Create(connectionString); + } + DataSources[connectionString] = dataSource; } } } @@ -529,8 +532,6 @@ protected NpgsqlDataSource DefaultDataSource protected virtual NpgsqlDataSource DataSource => DefaultDataSource; - protected void ClearDataSources() => DataSources.Clear(); - protected virtual NpgsqlConnection CreateConnection() => DataSource.CreateConnection(); diff --git a/test/Npgsql.Tests/TestUtil.cs b/test/Npgsql.Tests/TestUtil.cs index ead7f8eae3..b2e61317ad 100644 --- a/test/Npgsql.Tests/TestUtil.cs +++ b/test/Npgsql.Tests/TestUtil.cs @@ -19,7 +19,7 @@ public static class TestUtil /// test database. /// public const string DefaultConnectionString = - "Server=localhost;Username=npgsql_tests;Password=npgsql_tests;Database=npgsql_tests;Timeout=0;Command Timeout=0;SSL Mode=Disable"; + "Host=localhost;Username=npgsql_tests;Password=npgsql_tests;Database=npgsql_tests;Timeout=0;Command Timeout=0;SSL Mode=Disable;Multiplexing=False"; /// /// The connection string that will be used when opening the connection to the tests database. @@ -387,10 +387,9 @@ internal static IDisposable SetCurrentCulture(CultureInfo culture) return new DeferredExecutionDisposable(() => CultureInfo.CurrentCulture = oldCulture); } - internal static IDisposable DisableSqlRewriting(Action clearDataSources) + internal static IDisposable DisableSqlRewriting() { #if DEBUG - clearDataSources(); NpgsqlCommand.EnableSqlRewriting = false; return new DeferredExecutionDisposable(() => NpgsqlCommand.EnableSqlRewriting = true); #else diff --git a/test/Npgsql.Tests/Types/DateTimeInfinityTests.cs b/test/Npgsql.Tests/Types/DateTimeInfinityTests.cs index 8969215197..fafed37bcf 100644 --- a/test/Npgsql.Tests/Types/DateTimeInfinityTests.cs +++ b/test/Npgsql.Tests/Types/DateTimeInfinityTests.cs @@ -12,7 +12,7 @@ namespace Npgsql.Tests.Types; [TestFixture(false)] [NonParallelizable] #endif -public class DateTimeInfinityTests : TestBase, IDisposable +public sealed class DateTimeInfinityTests : TestBase, IDisposable { static readonly TestCaseData[] TimestampDateTimeValues = { @@ -117,14 +117,18 @@ public DateTimeInfinityTests(bool disableDateTimeInfinityConversions) "DateTimeInfinityTests rely on the Npgsql.DisableDateTimeInfinityConversions AppContext switch and can only be run in DEBUG builds"); } #endif - // The switch is baked into the serializer options, so clear the sources on change here. - ClearDataSources(); + + DataSource = NpgsqlDataSource.Create(ConnectionString); } + protected override NpgsqlDataSource DataSource { get; } + public void Dispose() { #if DEBUG DisableDateTimeInfinityConversions = false; #endif + + DataSource.Dispose(); } } diff --git a/test/Npgsql.Tests/Types/LegacyDateTimeTests.cs b/test/Npgsql.Tests/Types/LegacyDateTimeTests.cs index 2b9ae54813..0aa88a456d 100644 --- a/test/Npgsql.Tests/Types/LegacyDateTimeTests.cs +++ b/test/Npgsql.Tests/Types/LegacyDateTimeTests.cs @@ -76,6 +76,7 @@ public void Setup() public void Teardown() { LegacyTimestampBehavior = false; + _dataSource.Dispose(); NpgsqlDataSourceBuilder.ResetGlobalMappings(overwrite: true); } #endif diff --git a/test/Npgsql.Tests/Types/MultirangeTests.cs b/test/Npgsql.Tests/Types/MultirangeTests.cs index 8d17bc5613..461177d080 100644 --- a/test/Npgsql.Tests/Types/MultirangeTests.cs +++ b/test/Npgsql.Tests/Types/MultirangeTests.cs @@ -175,4 +175,7 @@ public async Task Setup() await using var conn = await OpenConnectionAsync(); MinimumPgVersion(conn, "14.0", "Multirange types were introduced in PostgreSQL 14"); } + + [OneTimeTearDown] + public void TearDown() => DataSource.Dispose(); } diff --git a/test/Npgsql.Tests/Types/RangeTests.cs b/test/Npgsql.Tests/Types/RangeTests.cs index ca7a04dc27..74c417216c 100644 --- a/test/Npgsql.Tests/Types/RangeTests.cs +++ b/test/Npgsql.Tests/Types/RangeTests.cs @@ -467,4 +467,7 @@ public RangeTests(MultiplexingMode multiplexingMode) : base(multiplexingMode) { builder.ConnectionStringBuilder.Timezone = "Europe/Berlin"; }); + + [OneTimeTearDown] + public void TearDown() => DataSource.Dispose(); } From 9c98759e5aab5475839d37828a15b45219ae32c3 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Wed, 1 Nov 2023 21:39:56 +0300 Subject: [PATCH 270/761] Fix test compilation after 52b5809 --- test/Npgsql.Tests/Types/DateTimeInfinityTests.cs | 5 ----- 1 file changed, 5 deletions(-) diff --git a/test/Npgsql.Tests/Types/DateTimeInfinityTests.cs b/test/Npgsql.Tests/Types/DateTimeInfinityTests.cs index fafed37bcf..8508979b31 100644 --- a/test/Npgsql.Tests/Types/DateTimeInfinityTests.cs +++ b/test/Npgsql.Tests/Types/DateTimeInfinityTests.cs @@ -117,18 +117,13 @@ public DateTimeInfinityTests(bool disableDateTimeInfinityConversions) "DateTimeInfinityTests rely on the Npgsql.DisableDateTimeInfinityConversions AppContext switch and can only be run in DEBUG builds"); } #endif - - DataSource = NpgsqlDataSource.Create(ConnectionString); } - protected override NpgsqlDataSource DataSource { get; } - public void Dispose() { #if DEBUG DisableDateTimeInfinityConversions = false; #endif - DataSource.Dispose(); } } From 397e505585c4cdedfbdcfc313011216881fab3d0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 1 Nov 2023 22:51:57 +0100 Subject: [PATCH 271/761] Bump xunit from 2.5.3 to 2.6.0 (#5368) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 0da0e92a0c..c6f79ce8bd 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -38,7 +38,7 @@ - + From fe234aadb6cb718de779e1b58b62ecbf7e311a77 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 1 Nov 2023 22:52:17 +0100 Subject: [PATCH 272/761] Bump BenchmarkDotNet from 0.13.9 to 0.13.10 (#5367) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index c6f79ce8bd..a9e0a3dce6 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -45,7 +45,7 @@ - + From 5c106023a1a192a2fab0726fc7df9ca51e39cb22 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 1 Nov 2023 22:52:55 +0100 Subject: [PATCH 273/761] Bump BenchmarkDotNet.Diagnostics.Windows from 0.13.9 to 0.13.10 (#5366) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index a9e0a3dce6..c217111f94 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -47,7 +47,7 @@ - + From 4b5b897ad3865d3068a6c65239c8edbf1a327979 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Thu, 2 Nov 2023 17:50:24 +0100 Subject: [PATCH 274/761] Perf improvements (#5245) Should bring Npgsql back to or better than pre #5123 performance numbers --- .../Internal/GeoJSONConverter.cs | 14 +- .../BackendMessages/CommandCompleteMessage.cs | 145 ++--- .../BackendMessages/RowDescriptionMessage.cs | 70 +- .../Internal/Converters/ArrayConverter.cs | 7 +- .../Internal/Converters/AsyncHelpers.cs | 14 +- .../NpgsqlConnector.FrontendMessages.cs | 158 ++--- src/Npgsql/Internal/NpgsqlConnector.cs | 54 +- src/Npgsql/Internal/NpgsqlReadBuffer.cs | 157 +++-- src/Npgsql/Internal/NpgsqlWriteBuffer.cs | 124 ++-- src/Npgsql/Internal/PgReader.cs | 232 +++++-- src/Npgsql/Internal/PgTypeInfo.cs | 29 +- src/Npgsql/Internal/PgWriter.cs | 2 +- src/Npgsql/Internal/Size.cs | 2 +- src/Npgsql/MetricsReporter.cs | 12 +- src/Npgsql/MultiplexingDataSource.cs | 27 +- src/Npgsql/NpgsqlBatchCommand.cs | 13 +- src/Npgsql/NpgsqlBinaryExporter.cs | 18 +- src/Npgsql/NpgsqlBinaryImporter.cs | 2 +- src/Npgsql/NpgsqlCommand.cs | 165 ++--- src/Npgsql/NpgsqlConnection.cs | 42 +- src/Npgsql/NpgsqlDataReader.cs | 604 ++++++++++-------- src/Npgsql/NpgsqlDataSource.cs | 8 +- src/Npgsql/NpgsqlEventSource.cs | 48 +- src/Npgsql/NpgsqlSqlEventSource.cs | 8 +- src/Npgsql/PreparedStatement.cs | 22 +- src/Npgsql/PreparedStatementManager.cs | 29 +- .../Replication/PgOutput/ReplicationValue.cs | 20 +- src/Npgsql/SqlQueryParser.cs | 12 +- src/Npgsql/ThrowHelper.cs | 2 +- .../Util/ResettableCancellationTokenSource.cs | 10 +- src/Npgsql/Util/Statics.cs | 2 + test/Npgsql.Tests/PrepareTests.cs | 2 +- test/Npgsql.Tests/Support/PgServerMock.cs | 8 +- 33 files changed, 1168 insertions(+), 894 deletions(-) diff --git a/src/Npgsql.GeoJSON/Internal/GeoJSONConverter.cs b/src/Npgsql.GeoJSON/Internal/GeoJSONConverter.cs index 2f6ece1fd8..6384ec748c 100644 --- a/src/Npgsql.GeoJSON/Internal/GeoJSONConverter.cs +++ b/src/Npgsql.GeoJSON/Internal/GeoJSONConverter.cs @@ -291,11 +291,15 @@ static Position ReadPosition(PgReader reader, EwkbGeometryType type, bool little return position; double ReadDouble(bool littleEndian) - => littleEndian - // Netstandard is missing ReverseEndianness apis for double. - ? Unsafe.As(ref Unsafe.AsRef( - BinaryPrimitives.ReverseEndianness(Unsafe.As(ref Unsafe.AsRef(reader.ReadDouble()))))) - : reader.ReadDouble(); + { + if (littleEndian) + { + var value = BinaryPrimitives.ReverseEndianness(Unsafe.As(ref Unsafe.AsRef(reader.ReadDouble()))); + return Unsafe.As(ref value); + } + + return reader.ReadDouble(); + } } } diff --git a/src/Npgsql/BackendMessages/CommandCompleteMessage.cs b/src/Npgsql/BackendMessages/CommandCompleteMessage.cs index 63080052cb..98154d1a7e 100644 --- a/src/Npgsql/BackendMessages/CommandCompleteMessage.cs +++ b/src/Npgsql/BackendMessages/CommandCompleteMessage.cs @@ -1,122 +1,61 @@ -using System.Diagnostics; +using System; +using System.Buffers.Text; using Npgsql.Internal; namespace Npgsql.BackendMessages; sealed class CommandCompleteMessage : IBackendMessage { + uint _oid; + ulong _rows; internal StatementType StatementType { get; private set; } - internal uint OID { get; private set; } - internal ulong Rows { get; private set; } + + internal uint OID => _oid; + internal ulong Rows => _rows; internal CommandCompleteMessage Load(NpgsqlReadBuffer buf, int len) { - Rows = 0; - OID = 0; - - var bytes = buf.Buffer; - var i = buf.ReadPosition; + var bytes = buf.Span.Slice(0, len); buf.Skip(len); - switch (bytes[i]) - { - case (byte)'I': - if (!AreEqual(bytes, i, "INSERT ")) - goto default; - StatementType = StatementType.Insert; - i += 7; - OID = (uint) ParseNumber(bytes, ref i); - i++; - Rows = ParseNumber(bytes, ref i); - return this; - - case (byte)'D': - if (!AreEqual(bytes, i, "DELETE ")) - goto default; - StatementType = StatementType.Delete; - i += 7; - Rows = ParseNumber(bytes, ref i); - return this; - - case (byte)'U': - if (!AreEqual(bytes, i, "UPDATE ")) - goto default; - StatementType = StatementType.Update; - i += 7; - Rows = ParseNumber(bytes, ref i); - return this; - case (byte)'S': - if (!AreEqual(bytes, i, "SELECT ")) - goto default; - StatementType = StatementType.Select; - i += 7; - Rows = ParseNumber(bytes, ref i); - return this; - - case (byte)'M': - if (AreEqual(bytes, i, "MERGE ")) - { - StatementType = StatementType.Merge; - i += 6; - } - else if (AreEqual(bytes, i, "MOVE ")) - { - StatementType = StatementType.Move; - i += 5; - } - else - goto default; - Rows = ParseNumber(bytes, ref i); - return this; - - case (byte)'F': - if (!AreEqual(bytes, i, "FETCH ")) - goto default; - StatementType = StatementType.Fetch; - i += 6; - Rows = ParseNumber(bytes, ref i); - return this; - - case (byte)'C': - if (AreEqual(bytes, i, "COPY ")) - { - StatementType = StatementType.Copy; - i += 5; - Rows = ParseNumber(bytes, ref i); - return this; - } - if (bytes[i + 4] == 0 && AreEqual(bytes, i, "CALL")) - { - StatementType = StatementType.Call; - return this; - } + // PostgreSQL always writes these strings as ASCII, see https://github.com/postgres/postgres/blob/c8e1ba736b2b9e8c98d37a5b77c4ed31baf94147/src/backend/tcop/cmdtag.c#L130-L133 + (StatementType, var argumentsStart) = Convert.ToChar(bytes[0]) switch + { + 'S' when bytes.StartsWith("SELECT "u8) => (StatementType.Select, "SELECT ".Length), + 'I' when bytes.StartsWith("INSERT "u8) => (StatementType.Insert, "INSERT ".Length), + 'U' when bytes.StartsWith("UPDATE "u8) => (StatementType.Update, "UPDATE ".Length), + 'D' when bytes.StartsWith("DELETE "u8) => (StatementType.Delete, "DELETE ".Length), + 'M' when bytes.StartsWith("MERGE "u8) => (StatementType.Merge, "MERGE ".Length), + 'C' when bytes.StartsWith("COPY "u8) => (StatementType.Copy, "COPY ".Length), + 'C' when bytes.StartsWith("CALL"u8) => (StatementType.Call, "CALL".Length), + 'M' when bytes.StartsWith("MOVE "u8) => (StatementType.Move, "MOVE ".Length), + 'F' when bytes.StartsWith("FETCH "u8) => (StatementType.Fetch, "FETCH ".Length), + 'C' when bytes.StartsWith("CREATE TABLE AS "u8) => (StatementType.CreateTableAs, "CREATE TABLE AS ".Length), + _ => (StatementType.Other, 0) + }; + + _oid = 0; + _rows = 0; + + // Slice away the null terminator. + var arguments = bytes.Slice(argumentsStart, bytes.Length - argumentsStart - 1); + switch (StatementType) + { + case StatementType.Other: + case StatementType.Call: + break; + case StatementType.Insert: + if (!Utf8Parser.TryParse(arguments, out _oid, out var nextArgumentOffset)) + throw new InvalidOperationException("Invalid bytes in command complete message."); + arguments = arguments.Slice(nextArgumentOffset + 1); goto default; - default: - StatementType = StatementType.Other; - return this; + if (!Utf8Parser.TryParse(arguments, out _rows, out _)) + throw new InvalidOperationException("Invalid bytes in command complete message."); + break; } - } - static bool AreEqual(byte[] bytes, int pos, string s) - { - for (var i = 0; i < s.Length; i++) - { - if (bytes[pos+i] != s[i]) - return false; - } - return true; - } - - static ulong ParseNumber(byte[] bytes, ref int pos) - { - Debug.Assert(bytes[pos] >= '0' && bytes[pos] <= '9'); - ulong result = 0; - do - { - result = result * 10 + bytes[pos++] - '0'; - } while (bytes[pos] >= '0' && bytes[pos] <= '9'); - return result; + return this; } public BackendMessageCode Code => BackendMessageCode.CommandComplete; diff --git a/src/Npgsql/BackendMessages/RowDescriptionMessage.cs b/src/Npgsql/BackendMessages/RowDescriptionMessage.cs index 693db81516..d968b43632 100644 --- a/src/Npgsql/BackendMessages/RowDescriptionMessage.cs +++ b/src/Npgsql/BackendMessages/RowDescriptionMessage.cs @@ -12,6 +12,20 @@ namespace Npgsql.BackendMessages; +readonly struct ColumnInfo +{ + public ColumnInfo(PgConverterInfo converterInfo, DataFormat dataFormat, bool asObject) + { + ConverterInfo = converterInfo; + DataFormat = dataFormat; + AsObject = asObject; + } + + public PgConverterInfo ConverterInfo { get; } + public DataFormat DataFormat { get; } + public bool AsObject { get; } +} + /// /// A RowDescription message sent from the backend. /// @@ -24,7 +38,7 @@ sealed class RowDescriptionMessage : IBackendMessage, IReadOnlyList _nameIndex; Dictionary? _insensitiveIndex; - PgConverterInfo[]? _lastConverterInfoCache; + ColumnInfo[]? _lastConverterInfoCache; internal RowDescriptionMessage(bool connectorOwned, int numFields = 10) { @@ -119,14 +133,14 @@ public FieldDescription this[int index] } } - internal void SetConverterInfoCache(ReadOnlySpan values) + internal void SetConverterInfoCache(ReadOnlySpan values) { if (_connectorOwned || _lastConverterInfoCache is not null) return; Interlocked.CompareExchange(ref _lastConverterInfoCache, values.ToArray(), null); } - internal void LoadConverterInfoCache(PgConverterInfo[] values) + internal void LoadConverterInfoCache(ColumnInfo[] values) { if (_lastConverterInfoCache is not { } cache) return; @@ -328,17 +342,17 @@ internal void Populate( internal Type FieldType => ObjectOrDefaultInfo.TypeToConvert; - PgConverterInfo _objectOrDefaultInfo; + ColumnInfo _objectOrDefaultInfo; internal PgConverterInfo ObjectOrDefaultInfo { get { - if (!_objectOrDefaultInfo.IsDefault) - return _objectOrDefaultInfo; + if (!_objectOrDefaultInfo.ConverterInfo.IsDefault) + return _objectOrDefaultInfo.ConverterInfo; ref var info = ref _objectOrDefaultInfo; - GetInfo(null, ref _objectOrDefaultInfo, out _); - return info; + GetInfo(null, ref _objectOrDefaultInfo); + return info.ConverterInfo; } } @@ -350,64 +364,60 @@ internal FieldDescription Clone() return field; } - internal void GetInfo(Type? type, ref PgConverterInfo lastConverterInfo, out bool asObject) + internal void GetInfo(Type? type, ref ColumnInfo lastColumnInfo) { - Debug.Assert(lastConverterInfo.IsDefault || ( - ReferenceEquals(_serializerOptions, lastConverterInfo.TypeInfo.Options) && - lastConverterInfo.TypeInfo.PgTypeId == _serializerOptions.ToCanonicalTypeId(PostgresType)), "Cache is bleeding over"); + Debug.Assert(lastColumnInfo.ConverterInfo.IsDefault || ( + ReferenceEquals(_serializerOptions, lastColumnInfo.ConverterInfo.TypeInfo.Options) && + lastColumnInfo.ConverterInfo.TypeInfo.PgTypeId == _serializerOptions.ToCanonicalTypeId(PostgresType)), "Cache is bleeding over"); - if (!lastConverterInfo.IsDefault && lastConverterInfo.TypeToConvert == type) - { - asObject = lastConverterInfo.IsBoxingConverter; + if (!lastColumnInfo.ConverterInfo.IsDefault && lastColumnInfo.ConverterInfo.TypeToConvert == type) return; - } - var odfInfo = DataFormat is DataFormat.Text && type is not null ? ObjectOrDefaultInfo : _objectOrDefaultInfo; + var odfInfo = DataFormat is DataFormat.Text && type is not null ? ObjectOrDefaultInfo : _objectOrDefaultInfo.ConverterInfo; if (odfInfo is { IsDefault: false }) { if (typeof(object) == type) { - lastConverterInfo = odfInfo; - asObject = true; + lastColumnInfo = new(odfInfo, DataFormat, true); return; } if (odfInfo.TypeToConvert == type) { - lastConverterInfo = odfInfo; - asObject = lastConverterInfo.IsBoxingConverter; + lastColumnInfo = new(odfInfo, DataFormat, odfInfo.IsBoxingConverter); return; } } - GetInfoSlow(out lastConverterInfo, out asObject); + GetInfoSlow(out lastColumnInfo); [MethodImpl(MethodImplOptions.NoInlining)] - void GetInfoSlow(out PgConverterInfo lastConverterInfo, out bool asObject) + void GetInfoSlow(out ColumnInfo lastColumnInfo) { var typeInfo = AdoSerializerHelpers.GetTypeInfoForReading(type ?? typeof(object), PostgresType, _serializerOptions); + PgConverterInfo converterInfo; switch (DataFormat) { case DataFormat.Binary: // If we don't support binary we'll just throw. - lastConverterInfo = typeInfo.Bind(Field, DataFormat); - asObject = typeof(object) == type || lastConverterInfo.IsBoxingConverter; + converterInfo = typeInfo.Bind(Field, DataFormat); + lastColumnInfo = new(converterInfo, DataFormat.Binary, typeof(object) == type || converterInfo.IsBoxingConverter); break; default: // For text we'll fall back to any available text converter for the expected clr type or throw. - if (!typeInfo.TryBind(Field, DataFormat, out lastConverterInfo)) + if (!typeInfo.TryBind(Field, DataFormat, out converterInfo)) { typeInfo = AdoSerializerHelpers.GetTypeInfoForReading(type ?? typeof(string), _serializerOptions.UnknownPgType, _serializerOptions); - lastConverterInfo = typeInfo.Bind(Field, DataFormat); - asObject = type != lastConverterInfo.TypeToConvert || lastConverterInfo.IsBoxingConverter; + converterInfo = typeInfo.Bind(Field, DataFormat); + lastColumnInfo = new(converterInfo, DataFormat, type != converterInfo.TypeToConvert || converterInfo.IsBoxingConverter); } else - asObject = typeof(object) == type || lastConverterInfo.IsBoxingConverter; + lastColumnInfo = new(converterInfo, DataFormat, typeof(object) == type || converterInfo.IsBoxingConverter); break; } // We delay initializing ObjectOrDefaultInfo until after the first lookup (unless it is itself the first lookup). // When passed in an unsupported type it allows the error to be more specific, instead of just having object/null to deal with. - if (_objectOrDefaultInfo.IsDefault && type is not null) + if (_objectOrDefaultInfo.ConverterInfo.IsDefault && type is not null) _ = ObjectOrDefaultInfo; } } diff --git a/src/Npgsql/Internal/Converters/ArrayConverter.cs b/src/Npgsql/Internal/Converters/ArrayConverter.cs index e8390f2f2f..3007619c89 100644 --- a/src/Npgsql/Internal/Converters/ArrayConverter.cs +++ b/src/Npgsql/Internal/Converters/ArrayConverter.cs @@ -305,9 +305,10 @@ private protected ArrayConverter(int? expectedDimensions, PgConverterResolution public override T Read(PgReader reader) => (T)_pgArrayConverter.Read(async: false, reader).Result; public override ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) -#pragma warning disable CS9193 - => Unsafe.As, ValueTask>(ref Unsafe.AsRef(_pgArrayConverter.Read(async: true, reader, cancellationToken))); -#pragma warning restore + { + var value = _pgArrayConverter.Read(async: true, reader, cancellationToken); + return Unsafe.As, ValueTask>(ref value); + } public override Size GetSize(SizeContext context, T values, ref object? writeState) => _pgArrayConverter.GetSize(context, values, ref writeState); diff --git a/src/Npgsql/Internal/Converters/AsyncHelpers.cs b/src/Npgsql/Internal/Converters/AsyncHelpers.cs index 339378fdd7..6661ffed58 100644 --- a/src/Npgsql/Internal/Converters/AsyncHelpers.cs +++ b/src/Npgsql/Internal/Converters/AsyncHelpers.cs @@ -66,9 +66,10 @@ public Continuation(object handle, delegate* conti public static unsafe ValueTask ComposingReadAsync(this PgConverter instance, PgConverter effectiveConverter, PgReader reader, CancellationToken cancellationToken) { if (!typeof(T).IsValueType && !typeof(TEffective).IsValueType) -#pragma warning disable CS9193 - return Unsafe.As, ValueTask>(ref Unsafe.AsRef(effectiveConverter.ReadAsync(reader, cancellationToken))); -#pragma warning restore + { + var value = effectiveConverter.ReadAsync(reader, cancellationToken); + return Unsafe.As, ValueTask>(ref value); + } // Easy if we have all the data. var task = effectiveConverter.ReadAsync(reader, cancellationToken); if (task.IsCompletedSuccessfully) @@ -90,9 +91,10 @@ static void UnboxAndComplete(Task task, CompletionSource completionSource) public static unsafe ValueTask ComposingReadAsObjectAsync(this PgConverter instance, PgConverter effectiveConverter, PgReader reader, CancellationToken cancellationToken) { if (!typeof(T).IsValueType) -#pragma warning disable CS9193 - return Unsafe.As, ValueTask>(ref Unsafe.AsRef(effectiveConverter.ReadAsObjectAsync(reader, cancellationToken))); -#pragma warning restore + { + var value = effectiveConverter.ReadAsObjectAsync(reader, cancellationToken); + return Unsafe.As, ValueTask>(ref value); + } // Easy if we have all the data. var task = effectiveConverter.ReadAsObjectAsync(reader, cancellationToken); diff --git a/src/Npgsql/Internal/NpgsqlConnector.FrontendMessages.cs b/src/Npgsql/Internal/NpgsqlConnector.FrontendMessages.cs index ac57019a16..b3c21b9542 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.FrontendMessages.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.FrontendMessages.cs @@ -1,6 +1,7 @@ using System; using System.Collections.Generic; using System.Diagnostics; +using System.Runtime.CompilerServices; using System.Threading; using System.Threading.Tasks; @@ -8,34 +9,36 @@ namespace Npgsql.Internal; partial class NpgsqlConnector { - internal Task WriteDescribe(StatementOrPortal statementOrPortal, string name, bool async, CancellationToken cancellationToken = default) + internal Task WriteDescribe(StatementOrPortal statementOrPortal, byte[] asciiName, bool async, CancellationToken cancellationToken = default) { - NpgsqlWriteBuffer.AssertASCIIOnly(name); + NpgsqlWriteBuffer.AssertASCIIOnly(asciiName); var len = sizeof(byte) + // Message code sizeof(int) + // Length sizeof(byte) + // Statement or portal - (name.Length + 1); // Statement/portal name + (asciiName.Length + 1); // Statement/portal name - if (WriteBuffer.WriteSpaceLeft < len) - return FlushAndWrite(len, statementOrPortal, name, async, cancellationToken); + var writeBuffer = WriteBuffer; + if (writeBuffer.WriteSpaceLeft < len) + return FlushAndWrite(len, statementOrPortal, asciiName, async, cancellationToken); - Write(len, statementOrPortal, name); + Write(len, statementOrPortal, asciiName); return Task.CompletedTask; - async Task FlushAndWrite(int len, StatementOrPortal statementOrPortal, string name, bool async, CancellationToken cancellationToken) + async Task FlushAndWrite(int len, StatementOrPortal statementOrPortal, byte[] name, bool async, CancellationToken cancellationToken) { await Flush(async, cancellationToken).ConfigureAwait(false); - Debug.Assert(len <= WriteBuffer.WriteSpaceLeft, $"Message of type {GetType().Name} has length {len} which is bigger than the buffer ({WriteBuffer.WriteSpaceLeft})"); + Debug.Assert(len <= writeBuffer.WriteSpaceLeft, $"Message of type {GetType().Name} has length {len} which is bigger than the buffer ({writeBuffer.WriteSpaceLeft})"); Write(len, statementOrPortal, name); } - void Write(int len, StatementOrPortal statementOrPortal, string name) + [MethodImpl(MethodImplOptions.AggressiveInlining)] + void Write(int len, StatementOrPortal statementOrPortal, byte[] name) { - WriteBuffer.WriteByte(FrontendMessageCode.Describe); - WriteBuffer.WriteInt32(len - 1); - WriteBuffer.WriteByte((byte)statementOrPortal); - WriteBuffer.WriteNullTerminatedString(name); + writeBuffer.WriteByte(FrontendMessageCode.Describe); + writeBuffer.WriteInt32(len - 1); + writeBuffer.WriteByte((byte)statementOrPortal); + writeBuffer.WriteNullTerminatedString(name); } } @@ -44,7 +47,8 @@ internal Task WriteSync(bool async, CancellationToken cancellationToken = defaul const int len = sizeof(byte) + // Message code sizeof(int); // Length - if (WriteBuffer.WriteSpaceLeft < len) + var writeBuffer = WriteBuffer; + if (writeBuffer.WriteSpaceLeft < len) return FlushAndWrite(async, cancellationToken); Write(); @@ -53,14 +57,15 @@ internal Task WriteSync(bool async, CancellationToken cancellationToken = defaul async Task FlushAndWrite(bool async, CancellationToken cancellationToken) { await Flush(async, cancellationToken).ConfigureAwait(false); - Debug.Assert(len <= WriteBuffer.WriteSpaceLeft, $"Message of type {GetType().Name} has length {len} which is bigger than the buffer ({WriteBuffer.WriteSpaceLeft})"); + Debug.Assert(len <= writeBuffer.WriteSpaceLeft, $"Message of type {GetType().Name} has length {len} which is bigger than the buffer ({writeBuffer.WriteSpaceLeft})"); Write(); } + [MethodImpl(MethodImplOptions.AggressiveInlining)] void Write() { - WriteBuffer.WriteByte(FrontendMessageCode.Sync); - WriteBuffer.WriteInt32(len - 1); + writeBuffer.WriteByte(FrontendMessageCode.Sync); + writeBuffer.WriteInt32(len - 1); } } @@ -73,7 +78,8 @@ internal Task WriteExecute(int maxRows, bool async, CancellationToken cancellati sizeof(byte) + // Null-terminated portal name (always empty for now) sizeof(int); // Max number of rows - if (WriteBuffer.WriteSpaceLeft < len) + var writeBuffer = WriteBuffer; + if (writeBuffer.WriteSpaceLeft < len) return FlushAndWrite(maxRows, async, cancellationToken); Write(maxRows); @@ -82,22 +88,23 @@ internal Task WriteExecute(int maxRows, bool async, CancellationToken cancellati async Task FlushAndWrite(int maxRows, bool async, CancellationToken cancellationToken) { await Flush(async, cancellationToken).ConfigureAwait(false); - Debug.Assert(10 <= WriteBuffer.WriteSpaceLeft, $"Message of type {GetType().Name} has length 10 which is bigger than the buffer ({WriteBuffer.WriteSpaceLeft})"); + Debug.Assert(10 <= writeBuffer.WriteSpaceLeft, $"Message of type {GetType().Name} has length 10 which is bigger than the buffer ({writeBuffer.WriteSpaceLeft})"); Write(maxRows); } + [MethodImpl(MethodImplOptions.AggressiveInlining)] void Write(int maxRows) { - WriteBuffer.WriteByte(FrontendMessageCode.Execute); - WriteBuffer.WriteInt32(len - 1); - WriteBuffer.WriteByte(0); // Portal is always empty for now - WriteBuffer.WriteInt32(maxRows); + writeBuffer.WriteByte(FrontendMessageCode.Execute); + writeBuffer.WriteInt32(len - 1); + writeBuffer.WriteByte(0); // Portal is always empty for now + writeBuffer.WriteInt32(maxRows); } } - internal async Task WriteParse(string sql, string statementName, List inputParameters, bool async, CancellationToken cancellationToken = default) + internal async Task WriteParse(string sql, byte[] asciiName, List inputParameters, bool async, CancellationToken cancellationToken = default) { - NpgsqlWriteBuffer.AssertASCIIOnly(statementName); + NpgsqlWriteBuffer.AssertASCIIOnly(asciiName); int queryByteLen; try @@ -110,60 +117,63 @@ internal async Task WriteParse(string sql, string statementName, List parameters, string portal, - string statement, + byte[] asciiName, bool allResultTypesAreUnknown, bool[]? unknownResultTypeList, bool async, CancellationToken cancellationToken = default) { - NpgsqlWriteBuffer.AssertASCIIOnly(statement); + NpgsqlWriteBuffer.AssertASCIIOnly(asciiName); NpgsqlWriteBuffer.AssertASCIIOnly(portal); var headerLength = sizeof(byte) + // Message code sizeof(int) + // Message length sizeof(byte) + // Portal is always empty (only a null terminator) - statement.Length + sizeof(byte) + // Statement name plus null terminator + asciiName.Length + sizeof(byte) + // Statement name plus null terminator sizeof(ushort); // Number of parameter format codes that follow - if (WriteBuffer.WriteSpaceLeft < headerLength) + var writeBuffer = WriteBuffer; + if (writeBuffer.WriteSpaceLeft < headerLength) { - Debug.Assert(WriteBuffer.Size >= headerLength, "Write buffer too small for Bind header"); + Debug.Assert(writeBuffer.Size >= headerLength, "Write buffer too small for Bind header"); await Flush(async, cancellationToken).ConfigureAwait(false); } @@ -187,37 +197,37 @@ internal async Task WriteBind( sizeof(short) + // Number of result format codes sizeof(short) * (unknownResultTypeList?.Length ?? 1); // Result format codes - WriteBuffer.WriteByte(FrontendMessageCode.Bind); - WriteBuffer.WriteInt32(messageLength - 1); + writeBuffer.WriteByte(FrontendMessageCode.Bind); + writeBuffer.WriteInt32(messageLength - 1); Debug.Assert(portal == string.Empty); - WriteBuffer.WriteByte(0); // Portal is always empty + writeBuffer.WriteByte(0); // Portal is always empty - WriteBuffer.WriteNullTerminatedString(statement); - WriteBuffer.WriteInt16(formatCodeListLength); + writeBuffer.WriteNullTerminatedString(asciiName); + writeBuffer.WriteInt16((short)formatCodeListLength); // 0 length implicitly means all-text, 1 means all-binary, >1 means mix-and-match if (formatCodeListLength == 1) { - if (WriteBuffer.WriteSpaceLeft < sizeof(short)) + if (writeBuffer.WriteSpaceLeft < sizeof(short)) await Flush(async, cancellationToken).ConfigureAwait(false); - WriteBuffer.WriteInt16(DataFormat.Binary.ToFormatCode()); + writeBuffer.WriteInt16(DataFormat.Binary.ToFormatCode()); } else if (formatCodeListLength > 1) { for (var paramIndex = 0; paramIndex < parameters.Count; paramIndex++) { - if (WriteBuffer.WriteSpaceLeft < sizeof(short)) + if (writeBuffer.WriteSpaceLeft < sizeof(short)) await Flush(async, cancellationToken).ConfigureAwait(false); - WriteBuffer.WriteInt16(parameters[paramIndex].Format.ToFormatCode()); + writeBuffer.WriteInt16(parameters[paramIndex].Format.ToFormatCode()); } } - if (WriteBuffer.WriteSpaceLeft < sizeof(ushort)) + if (writeBuffer.WriteSpaceLeft < sizeof(ushort)) await Flush(async, cancellationToken).ConfigureAwait(false); - WriteBuffer.WriteUInt16((ushort)parameters.Count); + writeBuffer.WriteUInt16((ushort)parameters.Count); - var writer = WriteBuffer.GetWriter(DatabaseInfo, async ? FlushMode.NonBlocking : FlushMode.Blocking); + var writer = writeBuffer.GetWriter(DatabaseInfo, async ? FlushMode.NonBlocking : FlushMode.Blocking); try { for (var paramIndex = 0; paramIndex < parameters.Count; paramIndex++) @@ -234,52 +244,52 @@ internal async Task WriteBind( if (unknownResultTypeList != null) { - if (WriteBuffer.WriteSpaceLeft < 2 + unknownResultTypeList.Length * 2) + if (writeBuffer.WriteSpaceLeft < 2 + unknownResultTypeList.Length * 2) await Flush(async, cancellationToken).ConfigureAwait(false); - WriteBuffer.WriteInt16(unknownResultTypeList.Length); + writeBuffer.WriteInt16((short)unknownResultTypeList.Length); foreach (var t in unknownResultTypeList) - WriteBuffer.WriteInt16(t ? 0 : 1); + writeBuffer.WriteInt16((short)(t ? 0 : 1)); } else { - if (WriteBuffer.WriteSpaceLeft < 4) + if (writeBuffer.WriteSpaceLeft < 4) await Flush(async, cancellationToken).ConfigureAwait(false); - WriteBuffer.WriteInt16(1); - WriteBuffer.WriteInt16(allResultTypesAreUnknown ? 0 : 1); + writeBuffer.WriteInt16(1); + writeBuffer.WriteInt16((short)(allResultTypesAreUnknown ? 0 : 1)); } } - internal Task WriteClose(StatementOrPortal type, string name, bool async, CancellationToken cancellationToken = default) + internal Task WriteClose(StatementOrPortal type, byte[] asciiName, bool async, CancellationToken cancellationToken = default) { var len = sizeof(byte) + // Message code sizeof(int) + // Length sizeof(byte) + // Statement or portal - name.Length + sizeof(byte); // Statement or portal name plus null terminator + asciiName.Length + sizeof(byte); // Statement or portal name plus null terminator - if (WriteBuffer.WriteSpaceLeft < len) - return FlushAndWrite(len, type, name, async, cancellationToken); + var writeBuffer = WriteBuffer; + if (writeBuffer.WriteSpaceLeft < len) + return FlushAndWrite(len, type, asciiName, async, cancellationToken); - Write(len, type, name); + Write(len, type, asciiName); return Task.CompletedTask; - async Task FlushAndWrite(int len, StatementOrPortal type, string name, bool async, CancellationToken cancellationToken) + async Task FlushAndWrite(int len, StatementOrPortal type, byte[] name, bool async, CancellationToken cancellationToken) { await Flush(async, cancellationToken).ConfigureAwait(false); - Debug.Assert(len <= WriteBuffer.WriteSpaceLeft, $"Message of type {GetType().Name} has length {len} which is bigger than the buffer ({WriteBuffer.WriteSpaceLeft})"); + Debug.Assert(len <= writeBuffer.WriteSpaceLeft, $"Message of type {GetType().Name} has length {len} which is bigger than the buffer ({writeBuffer.WriteSpaceLeft})"); Write(len, type, name); } - void Write(int len, StatementOrPortal type, string name) + [MethodImpl(MethodImplOptions.AggressiveInlining)] + void Write(int len, StatementOrPortal type, byte[] name) { - WriteBuffer.WriteByte(FrontendMessageCode.Close); - WriteBuffer.WriteInt32(len - 1); - WriteBuffer.WriteByte((byte)type); - WriteBuffer.WriteNullTerminatedString(name); + writeBuffer.WriteByte(FrontendMessageCode.Close); + writeBuffer.WriteInt32(len - 1); + writeBuffer.WriteByte((byte)type); + writeBuffer.WriteNullTerminatedString(name); } } - internal void WriteQuery(string sql) => WriteQuery(sql, false).GetAwaiter().GetResult(); - internal async Task WriteQuery(string sql, bool async, CancellationToken cancellationToken = default) { var queryByteLen = TextEncoding.GetByteCount(sql); @@ -299,8 +309,6 @@ internal async Task WriteQuery(string sql, bool async, CancellationToken cancell WriteBuffer.WriteByte(0); // Null terminator } - internal void WriteCopyDone() => WriteCopyDone(false).GetAwaiter().GetResult(); - internal async Task WriteCopyDone(bool async, CancellationToken cancellationToken = default) { const int len = sizeof(byte) + // Message code diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index 83117f44ac..05120ac589 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -451,6 +451,10 @@ internal ConnectorState State var newState = (int)value; if (newState == _state) return; + + if (newState is < 0 or > (int)ConnectorState.Replication) + ThrowHelper.ThrowArgumentOutOfRangeException(nameof(value), "Unknown state: " + value); + Interlocked.Exchange(ref _state, newState); } } @@ -458,20 +462,7 @@ internal ConnectorState State /// /// Returns whether the connector is open, regardless of any task it is currently performing /// - bool IsConnected - => State switch - { - ConnectorState.Ready => true, - ConnectorState.Executing => true, - ConnectorState.Fetching => true, - ConnectorState.Waiting => true, - ConnectorState.Copy => true, - ConnectorState.Replication => true, - ConnectorState.Closed => false, - ConnectorState.Connecting => false, - ConnectorState.Broken => false, - _ => throw new ArgumentOutOfRangeException("Unknown state: " + State) - }; + bool IsConnected => State is not (ConnectorState.Closed or ConnectorState.Connecting or ConnectorState.Broken); internal bool IsReady => State == ConnectorState.Ready; internal bool IsClosed => State == ConnectorState.Closed; @@ -1322,6 +1313,9 @@ internal ValueTask ReadMessage( return new ValueTask(ParseServerMessage(ReadBuffer, messageCode, len, false))!; } +#if NET6_0_OR_GREATER + [AsyncMethodBuilder(typeof(PoolingAsyncValueTaskMethodBuilder<>))] +#endif async ValueTask ReadMessageLong( bool async, DataRowLoadingMode dataRowLoadingMode, @@ -1474,7 +1468,15 @@ internal ValueTask ReadMessage( } } - internal IBackendMessage? ParseServerMessage(NpgsqlReadBuffer buf, BackendMessageCode code, int len, bool isPrependedMessage) + internal IBackendMessage? ParseResultSetMessage(NpgsqlReadBuffer buf, BackendMessageCode code, int len, bool handleCallbacks = false) + => code switch + { + BackendMessageCode.DataRow => _dataRowMessage.Load(len), + BackendMessageCode.CommandComplete => _commandCompleteMessage.Load(buf, len), + _ => ParseServerMessage(buf, code, len, false, handleCallbacks) + }; + + internal IBackendMessage? ParseServerMessage(NpgsqlReadBuffer buf, BackendMessageCode code, int len, bool isPrependedMessage, bool handleCallbacks = true) { switch (code) { @@ -1510,12 +1512,18 @@ internal ValueTask ReadMessage( ReadParameterStatus(buf.GetNullTerminatedBytes(), buf.GetNullTerminatedBytes()); return null; case BackendMessageCode.NoticeResponse: - var notice = PostgresNotice.Load(buf, Settings.IncludeErrorDetail, LoggingConfiguration.ExceptionLogger); - LogMessages.ReceivedNotice(ConnectionLogger, notice.MessageText, Id); - Connection?.OnNotice(notice); + if (handleCallbacks) + { + var notice = PostgresNotice.Load(buf, Settings.IncludeErrorDetail, LoggingConfiguration.ExceptionLogger); + LogMessages.ReceivedNotice(ConnectionLogger, notice.MessageText, Id); + Connection?.OnNotice(notice); + } return null; case BackendMessageCode.NotificationResponse: - Connection?.OnNotification(new NpgsqlNotificationEventArgs(buf)); + if (handleCallbacks) + { + Connection?.OnNotification(new NpgsqlNotificationEventArgs(buf)); + } return null; case BackendMessageCode.AuthenticationRequest: @@ -1549,17 +1557,15 @@ internal ValueTask ReadMessage( case BackendMessageCode.CopyDone: return CopyDoneMessage.Instance; - case BackendMessageCode.PortalSuspended: - throw new NpgsqlException("Unimplemented message: " + code); case BackendMessageCode.ErrorResponse: return null; + case BackendMessageCode.PortalSuspended: case BackendMessageCode.FunctionCallResponse: // We don't use the obsolete function call protocol - throw new NpgsqlException("Unexpected backend message: " + code); - default: - throw new InvalidOperationException($"Internal Npgsql bug: unexpected value {code} of enum {nameof(BackendMessageCode)}. Please file a bug."); + ThrowHelper.ThrowInvalidOperationException($"Internal Npgsql bug: unexpected value {code} of enum {nameof(BackendMessageCode)}. Please file a bug."); + return null; } } diff --git a/src/Npgsql/Internal/NpgsqlReadBuffer.cs b/src/Npgsql/Internal/NpgsqlReadBuffer.cs index 9fb5c82afe..dc585b2e49 100644 --- a/src/Npgsql/Internal/NpgsqlReadBuffer.cs +++ b/src/Npgsql/Internal/NpgsqlReadBuffer.cs @@ -5,6 +5,7 @@ using System.IO; using System.Net.Sockets; using System.Runtime.CompilerServices; +using System.Runtime.InteropServices; using System.Text; using System.Threading; using System.Threading.Tasks; @@ -74,7 +75,9 @@ internal TimeSpan Timeout internal PgReader PgReader { get; } long _flushedBytes; // this will always fit at least one message. - internal long CumulativeReadPosition => unchecked(_flushedBytes + ReadPosition); + internal long CumulativeReadPosition + // Cast to uint to remove the sign extension (ReadPosition is never negative) + => _flushedBytes + (uint)ReadPosition; internal readonly byte[] Buffer; internal int FilledBytes; @@ -126,10 +129,10 @@ internal NpgsqlReadBuffer( #region I/O - public Task Ensure(int count, bool async) + public ValueTask Ensure(int count, bool async) => Ensure(count, async, readingNotifications: false); - public Task EnsureAsync(int count) + public ValueTask EnsureAsync(int count) => Ensure(count, async: true, readingNotifications: false); // Can't share due to Span vs Memory difference (can't make a memory out of a span). @@ -275,11 +278,14 @@ static Exception CreateCancelException(NpgsqlConnector connector) /// Ensures that bytes are available in the buffer, and if /// not, reads from the socket until enough is available. /// - internal Task Ensure(int count, bool async, bool readingNotifications) + internal ValueTask Ensure(int count, bool async, bool readingNotifications) { - return count <= ReadBytesLeft ? Task.CompletedTask : EnsureLong(this, count, async, readingNotifications); + return count <= ReadBytesLeft ? new() : EnsureLong(this, count, async, readingNotifications); - static async Task EnsureLong( +#if NET6_0_OR_GREATER + [AsyncMethodBuilder(typeof(PoolingAsyncValueTaskMethodBuilder))] +#endif + static async ValueTask EnsureLong( NpgsqlReadBuffer buffer, int count, bool async, @@ -413,7 +419,7 @@ static Exception CreateException(NpgsqlConnector connector) } } - internal Task ReadMore(bool async) => Ensure(ReadBytesLeft + 1, async); + internal ValueTask ReadMore(bool async) => Ensure(ReadBytesLeft + 1, async); internal NpgsqlReadBuffer AllocateOversize(int count) { @@ -463,112 +469,121 @@ public async Task Skip(int len, bool async) #region Read Simple [MethodImpl(MethodImplOptions.AggressiveInlining)] - public sbyte ReadSByte() => Read(); - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public byte ReadByte() => Read(); + public byte ReadByte() + { + CheckBounds(); + var result = Buffer[ReadPosition]; + ReadPosition += sizeof(byte); + return result; + } [MethodImpl(MethodImplOptions.AggressiveInlining)] public short ReadInt16() - => ReadInt16(false); - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public short ReadInt16(bool littleEndian) { - var result = Read(); - return littleEndian == BitConverter.IsLittleEndian - ? result : BinaryPrimitives.ReverseEndianness(result); + CheckBounds(); + var result = BitConverter.IsLittleEndian + ? BinaryPrimitives.ReverseEndianness(Unsafe.ReadUnaligned(ref Buffer[ReadPosition])) + : Unsafe.ReadUnaligned(ref Buffer[ReadPosition]); + ReadPosition += sizeof(short); + return result; } [MethodImpl(MethodImplOptions.AggressiveInlining)] public ushort ReadUInt16() - => ReadUInt16(false); - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public ushort ReadUInt16(bool littleEndian) { - var result = Read(); - return littleEndian == BitConverter.IsLittleEndian - ? result : BinaryPrimitives.ReverseEndianness(result); + CheckBounds(); + var result = BitConverter.IsLittleEndian + ? BinaryPrimitives.ReverseEndianness(Unsafe.ReadUnaligned(ref Buffer[ReadPosition])) + : Unsafe.ReadUnaligned(ref Buffer[ReadPosition]); + ReadPosition += sizeof(ushort); + return result; } [MethodImpl(MethodImplOptions.AggressiveInlining)] public int ReadInt32() - => ReadInt32(false); - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public int ReadInt32(bool littleEndian) { - var result = Read(); - return littleEndian == BitConverter.IsLittleEndian - ? result : BinaryPrimitives.ReverseEndianness(result); + CheckBounds(); + var result = BitConverter.IsLittleEndian + ? BinaryPrimitives.ReverseEndianness(Unsafe.ReadUnaligned(ref Buffer[ReadPosition])) + : Unsafe.ReadUnaligned(ref Buffer[ReadPosition]); + ReadPosition += sizeof(int); + return result; } [MethodImpl(MethodImplOptions.AggressiveInlining)] public uint ReadUInt32() - => ReadUInt32(false); - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public uint ReadUInt32(bool littleEndian) { - var result = Read(); - return littleEndian == BitConverter.IsLittleEndian - ? result : BinaryPrimitives.ReverseEndianness(result); + CheckBounds(); + var result = BitConverter.IsLittleEndian + ? BinaryPrimitives.ReverseEndianness(Unsafe.ReadUnaligned(ref Buffer[ReadPosition])) + : Unsafe.ReadUnaligned(ref Buffer[ReadPosition]); + ReadPosition += sizeof(uint); + return result; } [MethodImpl(MethodImplOptions.AggressiveInlining)] public long ReadInt64() - => ReadInt64(false); - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public long ReadInt64(bool littleEndian) { - var result = Read(); - return littleEndian == BitConverter.IsLittleEndian - ? result : BinaryPrimitives.ReverseEndianness(result); + CheckBounds(); + var result = BitConverter.IsLittleEndian + ? BinaryPrimitives.ReverseEndianness(Unsafe.ReadUnaligned(ref Buffer[ReadPosition])) + : Unsafe.ReadUnaligned(ref Buffer[ReadPosition]); + ReadPosition += sizeof(long); + return result; } [MethodImpl(MethodImplOptions.AggressiveInlining)] public ulong ReadUInt64() - => ReadUInt64(false); - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public ulong ReadUInt64(bool littleEndian) { - var result = Read(); - return littleEndian == BitConverter.IsLittleEndian - ? result : BinaryPrimitives.ReverseEndianness(result); + CheckBounds(); + var result = BitConverter.IsLittleEndian + ? BinaryPrimitives.ReverseEndianness(Unsafe.ReadUnaligned(ref Buffer[ReadPosition])) + : Unsafe.ReadUnaligned(ref Buffer[ReadPosition]); + ReadPosition += sizeof(ulong); + return result; } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public float ReadSingle() - => ReadSingle(false); [MethodImpl(MethodImplOptions.AggressiveInlining)] - public float ReadSingle(bool littleEndian) + public float ReadSingle() { - var result = ReadInt32(littleEndian); - return Unsafe.As(ref result); + CheckBounds(); + float result; + if (BitConverter.IsLittleEndian) + { + var value = BinaryPrimitives.ReverseEndianness(Unsafe.ReadUnaligned(ref Buffer[ReadPosition])); + result = Unsafe.As(ref value); + } + else + result = Unsafe.ReadUnaligned(ref Buffer[ReadPosition]); + ReadPosition += sizeof(float); + return result; } [MethodImpl(MethodImplOptions.AggressiveInlining)] public double ReadDouble() - => ReadDouble(false); - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public double ReadDouble(bool littleEndian) { - var result = ReadInt64(littleEndian); - return Unsafe.As(ref result); + CheckBounds(); + double result; + if (BitConverter.IsLittleEndian) + { + var value = BinaryPrimitives.ReverseEndianness(Unsafe.ReadUnaligned(ref Buffer[ReadPosition])); + result = Unsafe.As(ref value); + } + else + result = Unsafe.ReadUnaligned(ref Buffer[ReadPosition]); + ReadPosition += sizeof(double); + return result; } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - unsafe T Read() where T : unmanaged + [Conditional("DEBUG")] + unsafe void CheckBounds() where T : unmanaged { - Debug.Assert(sizeof(T) <= ReadBytesLeft, "There is not enough space left in the buffer."); - var result = Unsafe.ReadUnaligned(ref Buffer[ReadPosition]); - ReadPosition += sizeof(T); - return result; + if (sizeof(T) > ReadBytesLeft) + ThrowNoSpaceLeft(); + + static void ThrowNoSpaceLeft() + => ThrowHelper.ThrowInvalidOperationException("There is not enough space left in the buffer."); } public string ReadString(int byteLen) diff --git a/src/Npgsql/Internal/NpgsqlWriteBuffer.cs b/src/Npgsql/Internal/NpgsqlWriteBuffer.cs index eb619f58c9..c67d920f61 100644 --- a/src/Npgsql/Internal/NpgsqlWriteBuffer.cs +++ b/src/Npgsql/Internal/NpgsqlWriteBuffer.cs @@ -183,7 +183,6 @@ public async Task Flush(bool async, CancellationToken cancellationToken = defaul } NpgsqlEventSource.Log.BytesWritten(WritePosition); _metricsReporter?.ReportBytesWritten(WritePosition); - //NpgsqlEventSource.Log.RequestFailed(); WritePosition = 0; if (_copyMode) @@ -265,88 +264,91 @@ internal async Task DirectWrite(ReadOnlyMemory memory, bool async, Cancell #region Write Simple - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public void WriteSByte(sbyte value) => Write(value); - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public void WriteByte(byte value) => Write(value); [MethodImpl(MethodImplOptions.AggressiveInlining)] - internal void WriteInt16(int value) - => WriteInt16((short)value, false); + public void WriteByte(byte value) + { + CheckBounds(); + Buffer[WritePosition] = value; + WritePosition += sizeof(byte); + } [MethodImpl(MethodImplOptions.AggressiveInlining)] public void WriteInt16(short value) - => WriteInt16(value, false); - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public void WriteInt16(short value, bool littleEndian) - => Write(littleEndian == BitConverter.IsLittleEndian ? value : BinaryPrimitives.ReverseEndianness(value)); + { + CheckBounds(); + Unsafe.WriteUnaligned(ref Buffer[WritePosition], BitConverter.IsLittleEndian ? BinaryPrimitives.ReverseEndianness(value) : value); + WritePosition += sizeof(short); + } [MethodImpl(MethodImplOptions.AggressiveInlining)] public void WriteUInt16(ushort value) - => WriteUInt16(value, false); - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public void WriteUInt16(ushort value, bool littleEndian) - => Write(littleEndian == BitConverter.IsLittleEndian ? value : BinaryPrimitives.ReverseEndianness(value)); + { + CheckBounds(); + Unsafe.WriteUnaligned(ref Buffer[WritePosition], BitConverter.IsLittleEndian ? BinaryPrimitives.ReverseEndianness(value) : value); + WritePosition += sizeof(ushort); + } [MethodImpl(MethodImplOptions.AggressiveInlining)] public void WriteInt32(int value) - => WriteInt32(value, false); - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public void WriteInt32(int value, bool littleEndian) - => Write(littleEndian == BitConverter.IsLittleEndian ? value : BinaryPrimitives.ReverseEndianness(value)); + { + CheckBounds(); + Unsafe.WriteUnaligned(ref Buffer[WritePosition], BitConverter.IsLittleEndian ? BinaryPrimitives.ReverseEndianness(value) : value); + WritePosition += sizeof(int); + } [MethodImpl(MethodImplOptions.AggressiveInlining)] public void WriteUInt32(uint value) - => WriteUInt32(value, false); - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public void WriteUInt32(uint value, bool littleEndian) - => Write(littleEndian == BitConverter.IsLittleEndian ? value : BinaryPrimitives.ReverseEndianness(value)); + { + CheckBounds(); + Unsafe.WriteUnaligned(ref Buffer[WritePosition], BitConverter.IsLittleEndian ? BinaryPrimitives.ReverseEndianness(value) : value); + WritePosition += sizeof(uint); + } [MethodImpl(MethodImplOptions.AggressiveInlining)] public void WriteInt64(long value) - => WriteInt64(value, false); - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public void WriteInt64(long value, bool littleEndian) - => Write(littleEndian == BitConverter.IsLittleEndian ? value : BinaryPrimitives.ReverseEndianness(value)); + { + CheckBounds(); + Unsafe.WriteUnaligned(ref Buffer[WritePosition], BitConverter.IsLittleEndian ? BinaryPrimitives.ReverseEndianness(value) : value); + WritePosition += sizeof(long); + } [MethodImpl(MethodImplOptions.AggressiveInlining)] public void WriteUInt64(ulong value) - => WriteUInt64(value, false); + { + CheckBounds(); + Unsafe.WriteUnaligned(ref Buffer[WritePosition], BitConverter.IsLittleEndian ? BinaryPrimitives.ReverseEndianness(value) : value); + WritePosition += sizeof(ulong); + } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public void WriteUInt64(ulong value, bool littleEndian) - => Write(littleEndian == BitConverter.IsLittleEndian ? value : BinaryPrimitives.ReverseEndianness(value)); [MethodImpl(MethodImplOptions.AggressiveInlining)] public void WriteSingle(float value) - => WriteSingle(value, false); - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public void WriteSingle(float value, bool littleEndian) - => WriteInt32(Unsafe.As(ref value), littleEndian); + { + CheckBounds(); + if (BitConverter.IsLittleEndian) + Unsafe.WriteUnaligned(ref Buffer[WritePosition], BinaryPrimitives.ReverseEndianness(Unsafe.As(ref value))); + else + Unsafe.WriteUnaligned(ref Buffer[WritePosition], value); + WritePosition += sizeof(float); + } [MethodImpl(MethodImplOptions.AggressiveInlining)] public void WriteDouble(double value) - => WriteDouble(value, false); - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public void WriteDouble(double value, bool littleEndian) - => WriteInt64(Unsafe.As(ref value), littleEndian); + { + CheckBounds(); + if (BitConverter.IsLittleEndian) + Unsafe.WriteUnaligned(ref Buffer[WritePosition], BinaryPrimitives.ReverseEndianness(Unsafe.As(ref value))); + else + Unsafe.WriteUnaligned(ref Buffer[WritePosition], value); + WritePosition += sizeof(double); + } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - void Write(T value) + [Conditional("DEBUG")] + unsafe void CheckBounds() where T : unmanaged { - if (Unsafe.SizeOf() > WriteSpaceLeft) + if (sizeof(T) > WriteSpaceLeft) ThrowNotSpaceLeft(); - - Unsafe.WriteUnaligned(ref Buffer[WritePosition], value); - WritePosition += Unsafe.SizeOf(); } static void ThrowNotSpaceLeft() @@ -477,6 +479,14 @@ public void WriteNullTerminatedString(string s) WriteByte(0); } + public void WriteNullTerminatedString(byte[] s) + { + AssertASCIIOnly(s); + Debug.Assert(WriteSpaceLeft >= s.Length + 1); + WriteBytes(s); + WriteByte(0); + } + #endregion #region Write Complex @@ -588,5 +598,13 @@ internal static void AssertASCIIOnly(string s) Debug.Fail("Method only supports ASCII strings"); } + [Conditional("DEBUG")] + internal static void AssertASCIIOnly(byte[] s) + { + foreach (var c in s) + if (c >= 128) + Debug.Fail("Method only supports ASCII strings"); + } + #endregion } diff --git a/src/Npgsql/Internal/PgReader.cs b/src/Npgsql/Internal/PgReader.cs index f1f448bc65..2bf4456b9c 100644 --- a/src/Npgsql/Internal/PgReader.cs +++ b/src/Npgsql/Internal/PgReader.cs @@ -39,6 +39,9 @@ public class PgReader ArraySegment? _charsReadBuffer; bool _requiresCleanup; + // The field reading process of doing init/commit and startread/endread pairs is very perf sensitive. + // So this is used in Commit as a fast-path alternative to FieldRemaining to detect if the field was consumed succesfully. + bool _fieldConsumed; internal PgReader(NpgsqlReadBuffer buffer) { @@ -53,7 +56,7 @@ internal PgReader(NpgsqlReadBuffer buffer) internal int FieldOffset => (int)(_buffer.CumulativeReadPosition - _fieldStartPos); internal int FieldRemaining => FieldSize - FieldOffset; - bool HasCurrent => _currentSize >= 0; + bool HasCurrent => _currentSize is not -1; int CurrentSize => HasCurrent ? _currentSize : _fieldSize; public ValueMetadata Current => new() { Size = CurrentSize, Format = _fieldFormat, BufferRequirement = CurrentBufferRequirement }; @@ -62,9 +65,6 @@ internal PgReader(NpgsqlReadBuffer buffer) Size CurrentBufferRequirement => HasCurrent ? _currentBufferRequirement : _fieldBufferRequirement; int CurrentOffset => FieldOffset - _currentStartPos; - int BufferSize => _buffer.Size; - int BufferBytesRemaining => _buffer.ReadBytesLeft; - internal bool IsAtStart => FieldOffset is 0; internal bool Resumable => _resumable; public bool IsResumed => Resumable && CurrentSize != CurrentRemaining; @@ -193,7 +193,7 @@ NpgsqlReadBuffer.ColumnStream GetColumnStream(bool canSeek = false, int? length length ??= CurrentRemaining; CheckBounds(length.GetValueOrDefault()); - return _userActiveStream = _buffer.CreateStream(length.GetValueOrDefault(), canSeek && length <= BufferBytesRemaining); + return _userActiveStream = _buffer.CreateStream(length.GetValueOrDefault(), canSeek && length <= _buffer.ReadBytesLeft); } public TextReader GetTextReader(Encoding encoding) @@ -208,7 +208,7 @@ async ValueTask GetTextReader(bool async, Encoding encoding, Cancell const int maxPreparedSize = 1024 * 64; _requiresCleanup = true; - if (CurrentRemaining > BufferBytesRemaining || CurrentRemaining > maxPreparedSize) + if (CurrentRemaining > _buffer.ReadBytesLeft || CurrentRemaining > maxPreparedSize) return new StreamReader(GetColumnStream(), encoding, detectEncodingFromByteOrderMarks: false); if (_preparedTextReader is { IsDisposed: false }) @@ -229,16 +229,18 @@ public ValueTask ReadBytesAsync(Memory buffer, CancellationToken cancellat { var count = buffer.Length; CheckBounds(count); - if (BufferBytesRemaining >= count) + var offset = _buffer.ReadPosition; + var remaining = _buffer.FilledBytes - offset; + if (remaining >= count) { - _buffer.Buffer.AsSpan(_buffer.ReadPosition, count).CopyTo(buffer.Span); + _buffer.Buffer.AsSpan(offset, count).CopyTo(buffer.Span); _buffer.ReadPosition += count; return new(); } - return Slow(); + return Slow(count, buffer, cancellationToken); - async ValueTask Slow() + async ValueTask Slow(int count, Memory buffer, CancellationToken cancellationToken) { var stream = _buffer.CreateStream(count, canSeek: false); await using var _ = stream.ConfigureAwait(false); @@ -250,16 +252,18 @@ public void ReadBytes(Span buffer) { var count = buffer.Length; CheckBounds(count); - if (BufferBytesRemaining >= count) + var offset = _buffer.ReadPosition; + var remaining = _buffer.FilledBytes - offset; + if (remaining >= count) { - _buffer.Buffer.AsSpan(_buffer.ReadPosition, count).CopyTo(buffer); + _buffer.Buffer.AsSpan(offset, count).CopyTo(buffer); _buffer.ReadPosition += count; return; } - Slow(buffer); + Slow(count, buffer); - void Slow(Span buffer) + void Slow(int count, Span buffer) { using var stream = _buffer.CreateStream(count, canSeek: false); stream.ReadExactly(buffer); @@ -269,9 +273,11 @@ void Slow(Span buffer) public bool TryReadBytes(int count, out ReadOnlySpan bytes) { CheckBounds(count); - if (BufferBytesRemaining >= count) + var offset = _buffer.ReadPosition; + var remaining = _buffer.FilledBytes - offset; + if (remaining >= count) { - bytes = new ReadOnlySpan(_buffer.Buffer, _buffer.ReadPosition, count); + bytes = new ReadOnlySpan(_buffer.Buffer, offset, count); _buffer.ReadPosition += count; return true; } @@ -282,9 +288,11 @@ public bool TryReadBytes(int count, out ReadOnlySpan bytes) public bool TryReadBytes(int count, out ReadOnlyMemory bytes) { CheckBounds(count); - if (BufferBytesRemaining >= count) + var offset = _buffer.ReadPosition; + var remaining = _buffer.FilledBytes - offset; + if (remaining >= count) { - bytes = new ReadOnlyMemory(_buffer.Buffer, _buffer.ReadPosition, count); + bytes = new ReadOnlyMemory(_buffer.Buffer, offset, count); _buffer.ReadPosition += count; return true; } @@ -296,9 +304,11 @@ public bool TryReadBytes(int count, out ReadOnlyMemory bytes) public ReadOnlySequence ReadBytes(int count) { CheckBounds(count); - if (BufferBytesRemaining >= count) + var offset = _buffer.ReadPosition; + var remaining = _buffer.FilledBytes - offset; + if (remaining >= count) { - var result = new ReadOnlySequence(_buffer.Buffer, _buffer.ReadPosition, count); + var result = new ReadOnlySequence(_buffer.Buffer, offset, count); _buffer.ReadPosition += count; return result; } @@ -312,9 +322,11 @@ public ReadOnlySequence ReadBytes(int count) public async ValueTask> ReadBytesAsync(int count, CancellationToken cancellationToken = default) { CheckBounds(count); - if (BufferBytesRemaining >= count) + var offset = _buffer.ReadPosition; + var remaining = _buffer.FilledBytes - offset; + if (remaining >= count) { - var result = new ReadOnlySequence(_buffer.Buffer, _buffer.ReadPosition, count); + var result = new ReadOnlySequence(_buffer.Buffer, offset, count); _buffer.ReadPosition += count; return result; } @@ -415,28 +427,29 @@ internal void InitCharsRead(int dataOffset, ArraySegment? buffer, out int? internal PgReader Init(int fieldLength, DataFormat format, bool resumable = false) { - if (resumable) + if (Initialized) { - if (Resumable) + if (resumable) { - Debug.Assert(Initialized); - return this; + if (Resumable) + return this; + _resumable = true; + } + else + { + if (!IsAtStart) + ThrowHelper.ThrowInvalidOperationException("Cannot be initialized to be non-resumable until a commit is issued."); + _resumable = false; } - _resumable = true; - } - else if (Initialized) - { - if (!IsAtStart) - ThrowHelper.ThrowInvalidOperationException("Cannot be initialized to be non-resumable until a commit is issued."); - _resumable = false; } - // Debug.Assert(!Initialized || Resumable, "Reader wasn't properly committed before next init"); Debug.Assert(!_requiresCleanup, "Reader wasn't properly committed before next init"); _fieldStartPos = _buffer.CumulativeReadPosition; _fieldFormat = format; _fieldSize = fieldLength; + _resumable = resumable; + _fieldConsumed = false; return this; } @@ -469,6 +482,8 @@ internal void EndRead() if (FieldOffset != FieldSize) ThrowNotConsumedExactly(); + + _fieldConsumed = true; } internal ValueTask EndReadAsync() @@ -482,6 +497,8 @@ internal ValueTask EndReadAsync() if (FieldOffset != FieldSize) ThrowNotConsumedExactly(); + + _fieldConsumed = true; return new(); } @@ -544,43 +561,53 @@ internal void ThrowIfStreamActive() } internal bool CommitHasIO(bool resuming) => Initialized && !resuming && FieldRemaining > 0; - internal ValueTask Commit(bool async, bool resuming) + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + internal void Commit(bool resuming) { if (!Initialized) - return new(); + return; if (resuming) { if (!Resumable) ThrowHelper.ThrowInvalidOperationException("Cannot resume a non-resumable read."); - return new(); + return; } // We don't rely on CurrentRemaining, just to make sure we consume fully in the event of a nested scope not being disposed. // Also shut down any streaming, pooled arrays etc. - if (_requiresCleanup || FieldRemaining > 0) - return Slow(async); + if (_requiresCleanup || (!_fieldConsumed && FieldRemaining > 0)) + { + CommitSlow(); + return; + } - _fieldSize = default; _fieldStartPos = -1; - _resumable = false; - _fieldFormat = default; - if (_currentSize is not -1) + Debug.Assert(!Initialized); + + // These will always be re-initialized by Init() + // _fieldSize = default; + // _fieldFormat = default; + // _resumable = default; + // _fieldCompleted = default; + + if (HasCurrent) { _currentStartPos = 0; _currentBufferRequirement = default; _currentSize = -1; + Debug.Assert(!HasCurrent); } - Debug.Assert(!Initialized); - return new(); - async ValueTask Slow(bool async) + [MethodImpl(MethodImplOptions.NoInlining)] + void CommitSlow() { // Shut down any streaming and pooling going on on the column. if (_requiresCleanup) { if (_userActiveStream is { IsDisposed: false }) - await DisposeUserActiveStream(async).ConfigureAwait(false); + DisposeUserActiveStream(async: false).GetAwaiter().GetResult(); if (_pooledArray is not null) { @@ -597,15 +624,105 @@ async ValueTask Slow(bool async) _requiresCleanup = false; } - await Consume(async, count: FieldRemaining).ConfigureAwait(false); - _fieldSize = default; + Consume(async: false, count: FieldRemaining).GetAwaiter().GetResult(); + _fieldStartPos = -1; - _resumable = false; - _fieldFormat = default; + Debug.Assert(!Initialized); + + // These will always be re-initialized by Init() + // _fieldSize = default; + // _fieldFormat = default; + // _resumable = default; + // _fieldCompleted = default; + + if (HasCurrent) + { + _currentStartPos = 0; + _currentBufferRequirement = default; + _currentSize = -1; + Debug.Assert(!HasCurrent); + } + } + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + internal ValueTask CommitAsync(bool resuming) + { + if (!Initialized) + return new(); + + if (resuming) + { + if (!Resumable) + ThrowHelper.ThrowInvalidOperationException("Cannot resume a non-resumable read."); + return new(); + } + + // We don't rely on CurrentRemaining, just to make sure we consume fully in the event of a nested scope not being disposed. + // Also shut down any streaming, pooled arrays etc. + if (_requiresCleanup || (!_fieldConsumed && FieldRemaining > 0)) + return CommitSlow(); + + _fieldStartPos = -1; + Debug.Assert(!Initialized); + + // These will always be re-initialized by Init() + // _fieldSize = default; + // _fieldFormat = default; + // _resumable = default; + // _fieldCompleted = default; + + if (HasCurrent) + { _currentStartPos = 0; _currentBufferRequirement = default; _currentSize = -1; + Debug.Assert(!HasCurrent); + } + + return new(); + + async ValueTask CommitSlow() + { + // Shut down any streaming and pooling going on on the column. + if (_requiresCleanup) + { + if (_userActiveStream is { IsDisposed: false }) + await DisposeUserActiveStream(async: true).ConfigureAwait(false); + + if (_pooledArray is not null) + { + ArrayPool.Return(_pooledArray); + _pooledArray = null; + } + + if (_charsReadReader is not null) + { + _charsReadReader.Dispose(); + _charsReadReader = null; + _charsRead = default; + } + _requiresCleanup = false; + } + + await Consume(async: true, count: FieldRemaining).ConfigureAwait(false); + + _fieldStartPos = -1; Debug.Assert(!Initialized); + + // These will always be re-initialized by Init() + // _fieldSize = default; + // _fieldFormat = default; + // _resumable = default; + // _fieldCompleted = default; + + if (HasCurrent) + { + _currentStartPos = 0; + _currentBufferRequirement = default; + _currentSize = -1; + Debug.Assert(!HasCurrent); + } } } @@ -613,12 +730,17 @@ byte[] RentArray(int count) { _requiresCleanup = true; var pooledArray = _pooledArray; - var array = _pooledArray = ArrayPool.Rent(count); if (pooledArray is not null) + { + if (pooledArray.Length >= count) + return pooledArray; ArrayPool.Return(pooledArray); + } + var array = _pooledArray = ArrayPool.Rent(count); return array; } + [MethodImpl(MethodImplOptions.AggressiveInlining)] int GetBufferRequirementByteCount(Size bufferRequirement) => bufferRequirement is { Kind: SizeKind.UpperBound } ? Math.Min(CurrentRemaining, bufferRequirement.Value) @@ -630,12 +752,12 @@ public bool ShouldBuffer(Size bufferRequirement) => ShouldBuffer(GetBufferRequirementByteCount(bufferRequirement)); public bool ShouldBuffer(int byteCount) { - return BufferBytesRemaining < byteCount && ShouldBufferSlow(); + return _buffer.ReadBytesLeft < byteCount && ShouldBufferSlow(); [MethodImpl(MethodImplOptions.NoInlining)] bool ShouldBufferSlow() { - if (byteCount > BufferSize) + if (byteCount > _buffer.Size) ThrowArgumentOutOfRange(); if (byteCount > CurrentRemaining) ThrowArgumentOutOfRangeOfValue(); @@ -657,7 +779,7 @@ public void Buffer(Size bufferRequirement) public ValueTask BufferAsync(Size bufferRequirement, CancellationToken cancellationToken) => BufferAsync(GetBufferRequirementByteCount(bufferRequirement), cancellationToken); - public ValueTask BufferAsync(int byteCount, CancellationToken cancellationToken) => new(_buffer.EnsureAsync(byteCount)); + public ValueTask BufferAsync(int byteCount, CancellationToken cancellationToken) => _buffer.EnsureAsync(byteCount); internal ValueTask Buffer(bool async, Size bufferRequirement, CancellationToken cancellationToken) => Buffer(async, GetBufferRequirementByteCount(bufferRequirement), cancellationToken); diff --git a/src/Npgsql/Internal/PgTypeInfo.cs b/src/Npgsql/Internal/PgTypeInfo.cs index f8a808af15..edbc693728 100644 --- a/src/Npgsql/Internal/PgTypeInfo.cs +++ b/src/Npgsql/Internal/PgTypeInfo.cs @@ -296,34 +296,33 @@ public PgConverterResolution(PgConverter converter, PgTypeId pgTypeId) readonly struct PgConverterInfo { + readonly PgTypeInfo _typeInfo; + public PgConverterInfo(PgTypeInfo pgTypeInfo, PgConverter converter, Size bufferRequirement) { - TypeInfo = pgTypeInfo; + _typeInfo = pgTypeInfo; Converter = converter; BufferRequirement = bufferRequirement; + + // Object typed resolvers can return any type of converter, so we check the type of the converter instead. + // We cannot do this in general as we should respect the 'unboxed type' of infos, which can differ from the converter type. + if (pgTypeInfo.IsResolverInfo && pgTypeInfo.Type == typeof(object)) + TypeToConvert = Converter.TypeToConvert; + else + TypeToConvert = pgTypeInfo.Type; } - public bool IsDefault => TypeInfo is null; + public bool IsDefault => _typeInfo is null; - public Type TypeToConvert - { - get - { - // Object typed resolvers can return any type of converter, so we check the type of the converter instead. - // We cannot do this in general as we should respect the 'unboxed type' of infos, which can differ from the converter type. - if (TypeInfo.IsResolverInfo && TypeInfo.Type == typeof(object)) - return Converter.TypeToConvert; + public Type TypeToConvert { get; } - return TypeInfo.Type; - } - } + public PgTypeInfo TypeInfo => _typeInfo; - public PgTypeInfo TypeInfo { get; } public PgConverter Converter { get; } public Size BufferRequirement { get; } /// Whether Converter.TypeToConvert matches PgTypeInfo.Type, if it doesn't object apis should be used. - public bool IsBoxingConverter => TypeInfo.IsBoxing; + public bool IsBoxingConverter => _typeInfo.IsBoxing; public PgConverter GetConverter() => (PgConverter)Converter; } diff --git a/src/Npgsql/Internal/PgWriter.cs b/src/Npgsql/Internal/PgWriter.cs index adc511846e..e56136ff91 100644 --- a/src/Npgsql/Internal/PgWriter.cs +++ b/src/Npgsql/Internal/PgWriter.cs @@ -197,7 +197,7 @@ internal void Commit(int? expectedByteCount = null) var totalBytesWritten = _totalBytesWritten; _totalBytesWritten = 0; if (totalBytesWritten != expectedByteCount) - throw new InvalidOperationException($"Bytes written ({totalBytesWritten}) and expected byte count ({expectedByteCount}) don't match."); + ThrowHelper.ThrowInvalidOperationException($"Bytes written ({totalBytesWritten}) and expected byte count ({expectedByteCount}) don't match."); } } diff --git a/src/Npgsql/Internal/Size.cs b/src/Npgsql/Internal/Size.cs index f239453015..0d494b6dbd 100644 --- a/src/Npgsql/Internal/Size.cs +++ b/src/Npgsql/Internal/Size.cs @@ -3,7 +3,7 @@ namespace Npgsql.Internal; -public enum SizeKind : byte +public enum SizeKind { Unknown = 0, Exact, diff --git a/src/Npgsql/MetricsReporter.cs b/src/Npgsql/MetricsReporter.cs index 5cbaf9a9c0..27cd32f7d3 100644 --- a/src/Npgsql/MetricsReporter.cs +++ b/src/Npgsql/MetricsReporter.cs @@ -27,6 +27,7 @@ sealed class MetricsReporter : IDisposable static readonly UpDownCounter PendingConnectionRequests; static readonly UpDownCounter ConnectionTimeouts; static readonly Histogram ConnectionCreateTime; + static readonly ObservableUpDownCounter PreparedRatio; readonly NpgsqlDataSource _dataSource; readonly KeyValuePair _poolNameTag; @@ -106,7 +107,7 @@ static MetricsReporter() unit: "{connection}", description: "The maximum number of open connections allowed."); - Meter.CreateObservableUpDownCounter( + PreparedRatio = Meter.CreateObservableUpDownCounter( "db.client.commands.prepared_ratio", GetPreparedCommandsRatio, description: "The ratio of prepared command executions."); @@ -127,7 +128,8 @@ public MetricsReporter(NpgsqlDataSource dataSource) internal long ReportCommandStart() { CommandsExecuting.Add(1, _poolNameTag); - Interlocked.Increment(ref _commandCounters.CommandsStarted); + if (PreparedRatio.Enabled) + Interlocked.Increment(ref _commandCounters.CommandsStarted); return CommandDuration.Enabled ? Stopwatch.GetTimestamp() : 0; } @@ -147,7 +149,11 @@ internal void ReportCommandStop(long startTimestamp) } } - internal void CommandStartPrepared() => Interlocked.Increment(ref _commandCounters.PreparedCommandsStarted); + internal void CommandStartPrepared() + { + if (PreparedRatio.Enabled) + Interlocked.Increment(ref _commandCounters.PreparedCommandsStarted); + } internal void ReportCommandFailed() => CommandsFailed.Add(1, _poolNameTag); diff --git a/src/Npgsql/MultiplexingDataSource.cs b/src/Npgsql/MultiplexingDataSource.cs index d1245cfee4..277bc4e835 100644 --- a/src/Npgsql/MultiplexingDataSource.cs +++ b/src/Npgsql/MultiplexingDataSource.cs @@ -1,5 +1,6 @@ using System; using System.Diagnostics; +using System.Runtime.CompilerServices; using System.Threading; using System.Threading.Channels; using System.Threading.Tasks; @@ -16,8 +17,6 @@ sealed class MultiplexingDataSource : PoolingDataSource readonly bool _autoPrepare; - internal volatile bool StartupCheckPerformed; - readonly ChannelReader _multiplexCommandReader; internal ChannelWriter MultiplexCommandWriter { get; } @@ -190,15 +189,18 @@ async Task MultiplexingWriteLoop() // under our write threshold and timer delay. // Note we already have one command we read above, and have already updated the connector's // CommandsInFlightCount. Now write that command. - var writtenSynchronously = WriteCommand(connector, command, ref stats); - - while (connector.WriteBuffer.WritePosition < _writeCoalescingBufferThresholdBytes && - writtenSynchronously && - _multiplexCommandReader.TryRead(out command)) + var first = true; + bool writtenSynchronously; + do { - Interlocked.Increment(ref connector.CommandsInFlightCount); + if (first) + first = false; + else + Interlocked.Increment(ref connector.CommandsInFlightCount); writtenSynchronously = WriteCommand(connector, command, ref stats); - } + } while (connector.WriteBuffer.WritePosition < _writeCoalescingBufferThresholdBytes && + writtenSynchronously && + _multiplexCommandReader.TryRead(out command)); // If all commands were written synchronously (good path), complete the write here, flushing // and updating statistics. If not, CompleteRewrite is scheduled to run later, when the async @@ -212,6 +214,7 @@ async Task MultiplexingWriteLoop() } } + [MethodImpl(MethodImplOptions.AggressiveInlining)] bool WriteCommand(NpgsqlConnector connector, NpgsqlCommand command, ref MultiplexingStats stats) { // Note: this method *never* awaits on I/O - doing so would suspend all outgoing multiplexing commands @@ -283,7 +286,8 @@ bool WriteCommand(NpgsqlConnector connector, NpgsqlCommand command, ref Multiple default: Debug.Fail("When writing command to connector, task is in invalid state " + task.Status); - throw new Exception("When writing command to connector, task is in invalid state " + task.Status); + ThrowHelper.ThrowNpgsqlException("When writing command to connector, task is in invalid state " + task.Status); + return false; } } @@ -326,7 +330,8 @@ void Flush(NpgsqlConnector connector, ref MultiplexingStats stats) default: Debug.Fail("When flushing, task is in invalid state " + task.Status); - throw new Exception("When flushing, task is in invalid state " + task.Status); + ThrowHelper.ThrowNpgsqlException("When flushing, task is in invalid state " + task.Status); + return; } } diff --git a/src/Npgsql/NpgsqlBatchCommand.cs b/src/Npgsql/NpgsqlBatchCommand.cs index a27b76352b..4123a91506 100644 --- a/src/Npgsql/NpgsqlBatchCommand.cs +++ b/src/Npgsql/NpgsqlBatchCommand.cs @@ -13,6 +13,8 @@ namespace Npgsql; /// public sealed class NpgsqlBatchCommand : DbBatchCommand { + internal static readonly List EmptyParameters = new(); + string _commandText; /// @@ -35,8 +37,9 @@ public override string CommandText /// protected override DbParameterCollection DbParameterCollection => Parameters; + internal NpgsqlParameterCollection? _parameters; /// - public new NpgsqlParameterCollection Parameters { get; } = new(); + public new NpgsqlParameterCollection Parameters => _parameters ??= new(); #pragma warning disable CA1822 // Mark members as static @@ -152,6 +155,10 @@ internal List PositionalParameters set => _inputParameters = value; } + internal bool HasParameters => _inputParameters?.Count > 0 || _ownedInputParameters?.Count > 0; + + internal List CurrentParametersReadOnly => HasParameters ? PositionalParameters : EmptyParameters; + List? _ownedInputParameters; List? _inputParameters; @@ -191,9 +198,9 @@ internal PreparedStatement? PreparedStatement internal bool IsPreparing; /// - /// Holds the server-side (prepared) statement name. Empty string for non-prepared statements. + /// Holds the server-side (prepared) ASCII statement name. Empty string for non-prepared statements. /// - internal string StatementName => PreparedStatement?.Name ?? ""; + internal byte[] StatementName => PreparedStatement?.Name ?? Array.Empty(); /// /// Whether this statement has already been prepared (including automatic preparation). diff --git a/src/Npgsql/NpgsqlBinaryExporter.cs b/src/Npgsql/NpgsqlBinaryExporter.cs index bb1716eb0e..4941e35530 100644 --- a/src/Npgsql/NpgsqlBinaryExporter.cs +++ b/src/Npgsql/NpgsqlBinaryExporter.cs @@ -282,7 +282,7 @@ async ValueTask Read(bool async, NpgsqlDbType? type, CancellationToken can // Allow one more read if the field is a db null. // We cannot allow endless rereads otherwise it becomes quite unclear when a column advance happens. - if (PgReader is { Resumable: true, FieldSize: -1 }) + if (PgReader is { Initialized: true, Resumable: true, FieldSize: -1 }) { await Commit(async, resumableOp: false).ConfigureAwait(false); return DbNullOrThrow(); @@ -291,7 +291,7 @@ async ValueTask Read(bool async, NpgsqlDbType? type, CancellationToken can // We must commit the current column before reading the next one unless it was an IsNull call. PgConverterInfo info; bool asObject; - if (!PgReader.Resumable || PgReader.CurrentRemaining != PgReader.FieldSize) + if (!PgReader.Initialized || !PgReader.Resumable || PgReader.CurrentRemaining != PgReader.FieldSize) { await Commit(async, resumableOp: false).ConfigureAwait(false); info = GetInfo(out asObject); @@ -390,12 +390,17 @@ ValueTask Commit(bool async, bool resumableOp) var resuming = PgReader is { Initialized: true, Resumable: true } && resumableOp; if (!resuming) _column++; - return PgReader.Commit(async, resuming); + + if (async) + return PgReader.CommitAsync(resuming); + + PgReader.Commit(resuming); + return new(); } async ValueTask ReadColumnLenIfNeeded(bool async, bool resumableOp) { - if (PgReader is { Resumable: true, FieldSize: -1 }) + if (PgReader is { Initialized: true, Resumable: true, FieldSize: -1 }) return -1; await _buf.Ensure(4, async).ConfigureAwait(false); @@ -454,7 +459,10 @@ async ValueTask DisposeAsync(bool async) { using var registration = _connector.StartNestedCancellableOperation(attemptPgCancellation: false); // Be sure to commit the reader. - await PgReader.Commit(async, resuming: false).ConfigureAwait(false); + if (async) + await PgReader.CommitAsync(resuming: false).ConfigureAwait(false); + else + PgReader.Commit(resuming: false); // Finish the current CopyData message await _buf.Skip(checked((int)(_endOfMessagePos - _buf.CumulativeReadPosition)), async).ConfigureAwait(false); // Read to the end diff --git a/src/Npgsql/NpgsqlBinaryImporter.cs b/src/Npgsql/NpgsqlBinaryImporter.cs index 72574607a7..30b16cc75b 100644 --- a/src/Npgsql/NpgsqlBinaryImporter.cs +++ b/src/Npgsql/NpgsqlBinaryImporter.cs @@ -139,7 +139,7 @@ async Task StartRow(bool async, CancellationToken cancellationToken = default) if (_buf.WriteSpaceLeft < 2) await _buf.Flush(async, cancellationToken).ConfigureAwait(false); - _buf.WriteInt16(NumColumns); + _buf.WriteInt16((short)NumColumns); _pgWriter.Refresh(); _column = 0; diff --git a/src/Npgsql/NpgsqlCommand.cs b/src/Npgsql/NpgsqlCommand.cs index 90032d8476..940e4b8ae4 100644 --- a/src/Npgsql/NpgsqlCommand.cs +++ b/src/Npgsql/NpgsqlCommand.cs @@ -16,7 +16,6 @@ using System.Threading.Channels; using Microsoft.Extensions.Logging; using Npgsql.Internal; -using Npgsql.Internal.Postgres; using Npgsql.Properties; namespace Npgsql; @@ -45,7 +44,7 @@ public class NpgsqlCommand : DbCommand, ICloneable, IComponent string _commandText; CommandBehavior _behavior; int? _timeout; - readonly NpgsqlParameterCollection _parameters; + internal NpgsqlParameterCollection? _parameters; /// /// Whether this is wrapped by an . @@ -82,8 +81,6 @@ public class NpgsqlCommand : DbCommand, ICloneable, IComponent internal bool EnableErrorBarriers { get; set; } - static readonly List EmptyParameters = new(); - static readonly TaskScheduler ConstrainedConcurrencyScheduler = new ConcurrentExclusiveSchedulerPair(TaskScheduler.Default, Math.Max(1, Environment.ProcessorCount / 2)).ConcurrentScheduler; @@ -126,7 +123,6 @@ public NpgsqlCommand(string? cmdText, NpgsqlConnection? connection) { GC.SuppressFinalize(this); InternalBatchCommands = new List(1); - _parameters = new NpgsqlParameterCollection(); _commandText = cmdText ?? string.Empty; InternalConnection = connection; CommandType = CommandType.Text; @@ -408,7 +404,7 @@ internal CommandState State /// Gets the . /// /// The parameters of the SQL statement or function (stored procedure). The default is an empty collection. - public new NpgsqlParameterCollection Parameters => _parameters; + public new NpgsqlParameterCollection Parameters => _parameters ??= new(); #endregion @@ -663,7 +659,7 @@ Task Prepare(bool async, CancellationToken cancellationToken = default) { foreach (var batchCommand in InternalBatchCommands) { - batchCommand.Parameters.ProcessParameters(connector.SerializerOptions, validateValues: false, CommandType); + batchCommand._parameters?.ProcessParameters(connector.SerializerOptions, validateValues: false, CommandType); ProcessRawQuery(connector.SqlQueryParser, connector.UseConformingStrings, batchCommand); needToPrepare = batchCommand.ExplicitPrepare(connector) || needToPrepare; @@ -680,7 +676,7 @@ IEnumerable CommandTexts() } else { - Parameters.ProcessParameters(connector.SerializerOptions, validateValues: false, CommandType); + _parameters?.ProcessParameters(connector.SerializerOptions, validateValues: false, CommandType); ProcessRawQuery(connector.SqlQueryParser, connector.UseConformingStrings, batchCommand: null); foreach (var batchCommand in InternalBatchCommands) @@ -861,8 +857,8 @@ async Task Unprepare(bool async, CancellationToken cancellationToken = default) internal void ProcessRawQuery(SqlQueryParser? parser, bool standardConformingStrings, NpgsqlBatchCommand? batchCommand) { var (commandText, commandType, parameters) = batchCommand is null - ? (CommandText, CommandType, Parameters) - : (batchCommand.CommandText, batchCommand.CommandType, batchCommand.Parameters); + ? (CommandText, CommandType, _parameters) + : (batchCommand.CommandText, batchCommand.CommandType, batchCommand._parameters); if (string.IsNullOrEmpty(commandText)) ThrowHelper.ThrowInvalidOperationException("CommandText property has not been initialized"); @@ -870,7 +866,7 @@ internal void ProcessRawQuery(SqlQueryParser? parser, bool standardConformingStr switch (commandType) { case CommandType.Text: - switch (parameters.PlaceholderType) + switch (parameters?.PlaceholderType ?? PlaceholderType.NoParameters) { case PlaceholderType.Positional: // In positional parameter mode, we don't need to parse/rewrite the CommandText or reorder the parameters - just use @@ -880,12 +876,14 @@ internal void ProcessRawQuery(SqlQueryParser? parser, bool standardConformingStr { batchCommand = TruncateStatementsToOne(); batchCommand.FinalCommandText = CommandText; - batchCommand.PositionalParameters = Parameters.InternalList; + if (parameters is not null) + batchCommand.PositionalParameters = parameters.InternalList; } else { batchCommand.FinalCommandText = batchCommand.CommandText; - batchCommand.PositionalParameters = batchCommand.Parameters.InternalList; + if (parameters is not null) + batchCommand.PositionalParameters = parameters.InternalList; } ValidateParameterCount(batchCommand); @@ -908,7 +906,7 @@ internal void ProcessRawQuery(SqlQueryParser? parser, bool standardConformingStr if (batchCommand is null) { parser.ParseRawQuery(this, standardConformingStrings); - if (InternalBatchCommands.Count > 1 && _parameters.HasOutputParameters) + if (InternalBatchCommands.Count > 1 && _parameters?.HasOutputParameters == true) ThrowHelper.ThrowNotSupportedException("Commands with multiple queries cannot have out parameters"); for (var i = 0; i < InternalBatchCommands.Count; i++) ValidateParameterCount(InternalBatchCommands[i]); @@ -916,7 +914,7 @@ internal void ProcessRawQuery(SqlQueryParser? parser, bool standardConformingStr else { parser.ParseRawQuery(batchCommand, standardConformingStrings); - if (batchCommand.Parameters.HasOutputParameters) + if (batchCommand._parameters?.HasOutputParameters == true) ThrowHelper.ThrowNotSupportedException("Batches cannot cannot have out parameters"); ValidateParameterCount(batchCommand); } @@ -928,7 +926,7 @@ internal void ProcessRawQuery(SqlQueryParser? parser, bool standardConformingStr break; default: - ThrowHelper.ThrowArgumentOutOfRangeException(nameof(PlaceholderType), $"Unknown {nameof(PlaceholderType)} value: {{0}}", Parameters.PlaceholderType); + ThrowHelper.ThrowArgumentOutOfRangeException(nameof(PlaceholderType), $"Unknown {nameof(PlaceholderType)} value: {{0}}", _parameters?.PlaceholderType ?? PlaceholderType.NoParameters); break; } @@ -947,43 +945,46 @@ internal void ProcessRawQuery(SqlQueryParser? parser, bool standardConformingStr var isFirstParam = true; var seenNamedParam = false; - var inputParameters = new List(parameters.Count); - - for (var i = 0; i < parameters.Count; i++) + var inputParameters = NpgsqlBatchCommand.EmptyParameters; + if (parameters is not null) { - var parameter = parameters[i]; + inputParameters = new List(parameters.Count); + for (var i = 0; i < parameters.Count; i++) + { + var parameter = parameters[i]; - // With functions, output parameters are never present when calling the function (they only define the schema of the - // returned table). With stored procedures they must be specified in the CALL argument list (see below). - if (EnableStoredProcedureCompatMode && parameter.Direction == ParameterDirection.Output) - continue; + // With functions, output parameters are never present when calling the function (they only define the schema of the + // returned table). With stored procedures they must be specified in the CALL argument list (see below). + if (EnableStoredProcedureCompatMode && parameter.Direction == ParameterDirection.Output) + continue; - if (isFirstParam) - isFirstParam = false; - else - sqlBuilder.Append(", "); + if (isFirstParam) + isFirstParam = false; + else + sqlBuilder.Append(", "); - if (parameter.IsPositional) - { - if (seenNamedParam) - ThrowHelper.ThrowArgumentException(NpgsqlStrings.PositionalParameterAfterNamed); - } - else - { - seenNamedParam = true; + if (parameter.IsPositional) + { + if (seenNamedParam) + ThrowHelper.ThrowArgumentException(NpgsqlStrings.PositionalParameterAfterNamed); + } + else + { + seenNamedParam = true; - sqlBuilder - .Append('"') - .Append(parameter.TrimmedName.Replace("\"", "\"\"")) - .Append("\" := "); - } + sqlBuilder + .Append('"') + .Append(parameter.TrimmedName.Replace("\"", "\"\"")) + .Append("\" := "); + } - if (parameter.Direction == ParameterDirection.Output) - sqlBuilder.Append("NULL"); - else - { - inputParameters.Add(parameter); - sqlBuilder.Append('$').Append(inputParameters.Count); + if (parameter.Direction == ParameterDirection.Output) + sqlBuilder.Append("NULL"); + else + { + inputParameters!.Add(parameter); + sqlBuilder.Append('$').Append(inputParameters.Count); + } } } @@ -1003,7 +1004,7 @@ internal void ProcessRawQuery(SqlQueryParser? parser, bool standardConformingStr static void ValidateParameterCount(NpgsqlBatchCommand batchCommand) { - if (batchCommand.PositionalParameters.Count > ushort.MaxValue) + if (batchCommand.HasParameters && batchCommand.PositionalParameters.Count > ushort.MaxValue) ThrowHelper.ThrowNpgsqlException("A statement cannot have more than 65535 parameters"); } } @@ -1046,20 +1047,23 @@ async Task WriteExecute(NpgsqlConnector connector, bool async, bool flush, Cance if (pStatement?.StatementBeingReplaced != null) await connector.WriteClose(StatementOrPortal.Statement, pStatement.StatementBeingReplaced.Name!, async, cancellationToken).ConfigureAwait(false); - await connector.WriteParse(batchCommand.FinalCommandText, batchCommand.StatementName, batchCommand.PositionalParameters, async, cancellationToken).ConfigureAwait(false); + await connector.WriteParse(batchCommand.FinalCommandText, batchCommand.StatementName, + batchCommand.CurrentParametersReadOnly, async, cancellationToken).ConfigureAwait(false); await connector.WriteBind( - batchCommand.PositionalParameters, string.Empty, batchCommand.StatementName, AllResultTypesAreUnknown, + batchCommand.CurrentParametersReadOnly, + string.Empty, batchCommand.StatementName, AllResultTypesAreUnknown, i == 0 ? UnknownResultTypeList : null, async, cancellationToken).ConfigureAwait(false); - await connector.WriteDescribe(StatementOrPortal.Portal, string.Empty, async, cancellationToken).ConfigureAwait(false); + await connector.WriteDescribe(StatementOrPortal.Portal, Array.Empty(), async, cancellationToken).ConfigureAwait(false); } else { // The statement is already prepared, only a Bind is needed await connector.WriteBind( - batchCommand.PositionalParameters, string.Empty, batchCommand.StatementName, AllResultTypesAreUnknown, + batchCommand.CurrentParametersReadOnly, + string.Empty, batchCommand.StatementName, AllResultTypesAreUnknown, i == 0 ? UnknownResultTypeList : null, async, cancellationToken).ConfigureAwait(false); } @@ -1069,8 +1073,7 @@ await connector.WriteBind( if (batchCommand.AppendErrorBarrier ?? EnableErrorBarriers) await connector.WriteSync(async, cancellationToken).ConfigureAwait(false); - if (pStatement != null) - pStatement.LastUsed = DateTime.UtcNow; + pStatement?.RefreshLastUsed(); } if (batchCommand is null || !(batchCommand.AppendErrorBarrier ?? EnableErrorBarriers)) @@ -1097,7 +1100,8 @@ async Task WriteExecuteSchemaOnly(NpgsqlConnector connector, bool async, bool fl continue; // Prepared, we already have the RowDescription await connector.WriteParse(batchCommand.FinalCommandText!, batchCommand.StatementName, - batchCommand.PositionalParameters, async, cancellationToken).ConfigureAwait(false); + batchCommand.CurrentParametersReadOnly, + async, cancellationToken).ConfigureAwait(false); await connector.WriteDescribe(StatementOrPortal.Statement, batchCommand.StatementName, async, cancellationToken).ConfigureAwait(false); wroteSomething = true; } @@ -1123,8 +1127,8 @@ async Task SendDeriveParameters(NpgsqlConnector connector, bool async, Cancellat var batchCommand = InternalBatchCommands[i]; - await connector.WriteParse(batchCommand.FinalCommandText!, string.Empty, EmptyParameters, async, cancellationToken).ConfigureAwait(false); - await connector.WriteDescribe(StatementOrPortal.Statement, string.Empty, async, cancellationToken).ConfigureAwait(false); + await connector.WriteParse(batchCommand.FinalCommandText!, Array.Empty(), NpgsqlBatchCommand.EmptyParameters, async, cancellationToken).ConfigureAwait(false); + await connector.WriteDescribe(StatementOrPortal.Statement, Array.Empty(), async, cancellationToken).ConfigureAwait(false); } await connector.WriteSync(async, cancellationToken).ConfigureAwait(false); @@ -1154,7 +1158,7 @@ async Task SendPrepare(NpgsqlConnector connector, bool async, CancellationToken if (statementToClose != null) await connector.WriteClose(StatementOrPortal.Statement, statementToClose.Name!, async, cancellationToken).ConfigureAwait(false); - await connector.WriteParse(batchCommand.FinalCommandText!, pStatement.Name!, batchCommand.PositionalParameters, async, + await connector.WriteParse(batchCommand.FinalCommandText!, pStatement.Name!, batchCommand.CurrentParametersReadOnly, async, cancellationToken).ConfigureAwait(false); await connector.WriteDescribe(StatementOrPortal.Statement, pStatement.Name!, async, cancellationToken).ConfigureAwait(false); } @@ -1263,7 +1267,7 @@ async Task ExecuteNonQuery(bool async, CancellationToken cancellationToken) async ValueTask ExecuteScalar(bool async, CancellationToken cancellationToken) { var behavior = CommandBehavior.SingleRow; - if (IsWrappedByBatch || !Parameters.HasOutputParameters) + if (IsWrappedByBatch || _parameters?.HasOutputParameters != true) behavior |= CommandBehavior.SequentialAccess; var reader = await ExecuteReader(async, behavior, cancellationToken).ConfigureAwait(false); @@ -1395,7 +1399,7 @@ internal virtual async ValueTask ExecuteReader(bool async, Com goto case false; } - batchCommand.Parameters.ProcessParameters(connector.SerializerOptions, validateParameterValues, CommandType); + batchCommand._parameters?.ProcessParameters(connector.SerializerOptions, validateParameterValues, CommandType); } } else @@ -1408,7 +1412,7 @@ internal virtual async ValueTask ExecuteReader(bool async, Com ResetPreparation(); goto case false; } - Parameters.ProcessParameters(connector.SerializerOptions, validateParameterValues, CommandType); + _parameters?.ProcessParameters(connector.SerializerOptions, validateParameterValues, CommandType); } NpgsqlEventSource.Log.CommandStartPrepared(); @@ -1424,7 +1428,7 @@ internal virtual async ValueTask ExecuteReader(bool async, Com { var batchCommand = InternalBatchCommands[i]; - batchCommand.Parameters.ProcessParameters(connector.SerializerOptions, validateParameterValues, CommandType); + batchCommand._parameters?.ProcessParameters(connector.SerializerOptions, validateParameterValues, CommandType); ProcessRawQuery(connector.SqlQueryParser, connector.UseConformingStrings, batchCommand); if (connector.Settings.MaxAutoPrepare > 0 && batchCommand.TryAutoPrepare(connector)) @@ -1436,7 +1440,7 @@ internal virtual async ValueTask ExecuteReader(bool async, Com } else { - Parameters.ProcessParameters(connector.SerializerOptions, validateParameterValues, CommandType); + _parameters?.ProcessParameters(connector.SerializerOptions, validateParameterValues, CommandType); ProcessRawQuery(connector.SqlQueryParser, connector.UseConformingStrings, batchCommand: null); if (connector.Settings.MaxAutoPrepare > 0) @@ -1530,13 +1534,13 @@ internal virtual async ValueTask ExecuteReader(bool async, Com { foreach (var batchCommand in InternalBatchCommands) { - batchCommand.Parameters.ProcessParameters(dataSource.SerializerOptions, validateValues: true, CommandType); + batchCommand._parameters?.ProcessParameters(dataSource.SerializerOptions, validateValues: true, CommandType); ProcessRawQuery(null, standardConformingStrings: true, batchCommand); } } else { - Parameters.ProcessParameters(dataSource.SerializerOptions, validateValues: true, CommandType); + _parameters?.ProcessParameters(dataSource.SerializerOptions, validateValues: true, CommandType); ProcessRawQuery(null, standardConformingStrings: true, batchCommand: null); } @@ -1765,7 +1769,7 @@ internal void LogExecutingCompleted(NpgsqlConnector connector, bool executing) { var singleCommand = InternalBatchCommands[0]; - if (logParameters && singleCommand.PositionalParameters.Count > 0) + if (logParameters && singleCommand.HasParameters) { if (executing) { @@ -1820,9 +1824,10 @@ internal void LogExecutingCompleted(NpgsqlConnector connector, bool executing) object[] ParametersDbNullAsString(NpgsqlBatchCommand c) { - var parameters = new object[c.PositionalParameters.Count]; - for (var i = 0; i < c.PositionalParameters.Count; i++) - parameters[i] = c.PositionalParameters[i].Value == DBNull.Value ? "NULL" : c.PositionalParameters[i].Value!; + var positionalParameters = c.CurrentParametersReadOnly; + var parameters = new object[positionalParameters.Count]; + for (var i = 0; i < positionalParameters.Count; i++) + parameters[i] = positionalParameters[i].Value == DBNull.Value ? "NULL" : positionalParameters[i].Value!; return parameters; } } @@ -1847,31 +1852,27 @@ public virtual NpgsqlCommand Clone() _allResultTypesAreUnknown = _allResultTypesAreUnknown, _unknownResultTypeList = _unknownResultTypeList }; - _parameters.CloneTo(clone._parameters); + _parameters?.CloneTo(clone.Parameters); return clone; } NpgsqlConnection? CheckAndGetConnection() { - if (State == CommandState.Disposed) + if (State is CommandState.Disposed) ThrowHelper.ThrowObjectDisposedException(GetType().FullName); - if (InternalConnection == null) + + var conn = InternalConnection; + if (conn is null) { if (_connector is null) ThrowHelper.ThrowInvalidOperationException("Connection property has not been initialized."); return null; } - switch (InternalConnection.FullState) - { - case ConnectionState.Open: - case ConnectionState.Connecting: - case ConnectionState.Open | ConnectionState.Executing: - case ConnectionState.Open | ConnectionState.Fetching: - return InternalConnection; - default: + + if (!conn.FullState.HasFlag(ConnectionState.Open)) ThrowHelper.ThrowInvalidOperationException("Connection is not open"); - return null; - } + + return conn; } /// diff --git a/src/Npgsql/NpgsqlConnection.cs b/src/Npgsql/NpgsqlConnection.cs index b511f93841..bb71b6a6b6 100644 --- a/src/Npgsql/NpgsqlConnection.cs +++ b/src/Npgsql/NpgsqlConnection.cs @@ -252,31 +252,34 @@ internal Task Open(bool async, CancellationToken cancellationToken) if (_dataSource is null) { Debug.Assert(string.IsNullOrEmpty(_connectionString)); - - throw new InvalidOperationException("The ConnectionString property has not been initialized."); + ThrowHelper.ThrowInvalidOperationException("The ConnectionString property has not been initialized."); } - FullState = ConnectionState.Connecting; _userFacingConnectionString = _dataSource.ConnectionString; _connectionLogger = _dataSource.LoggingConfiguration.ConnectionLogger; - LogMessages.OpeningConnection(_connectionLogger, Settings.Host!, Settings.Port, Settings.Database!, _userFacingConnectionString); + if (_connectionLogger.IsEnabled(LogLevel.Trace)) + LogMessages.OpeningConnection(_connectionLogger, Settings.Host!, Settings.Port, Settings.Database!, _userFacingConnectionString); if (Settings.Multiplexing) { if (Settings.Enlist && Transaction.Current != null) { // TODO: Keep in mind that the TransactionScope can be disposed - throw new NotSupportedException(); + ThrowHelper.ThrowNotSupportedException(); } // We're opening in multiplexing mode, without a transaction. We don't actually do anything. // If we've never connected with this connection string, open a physical connector in order to generate // any exception (bad user/password, IP address...). This reproduces the standard error behavior. - if (!((MultiplexingDataSource)_dataSource).StartupCheckPerformed) + if (!_dataSource.IsBootstrapped) + { + FullState = ConnectionState.Connecting; return PerformMultiplexingStartupCheck(async, cancellationToken); + } - LogMessages.OpenedMultiplexingConnection(_connectionLogger, Settings.Host!, Settings.Port, Settings.Database!, _userFacingConnectionString); + if (_connectionLogger.IsEnabled(LogLevel.Debug)) + LogMessages.OpenedMultiplexingConnection(_connectionLogger, Settings.Host!, Settings.Port, Settings.Database!, _userFacingConnectionString); FullState = ConnectionState.Open; return Task.CompletedTask; @@ -288,6 +291,7 @@ async Task OpenAsync(bool async, CancellationToken cancellationToken) { Debug.Assert(!Settings.Multiplexing); + FullState = ConnectionState.Connecting; NpgsqlConnector? connector = null; try { @@ -348,7 +352,6 @@ async Task PerformMultiplexingStartupCheck(bool async, CancellationToken cancell EndBindingScope(ConnectorBindingScope.Connection); LogMessages.OpenedMultiplexingConnection(_connectionLogger, Settings.Host!, Settings.Port, Settings.Database!, _userFacingConnectionString); - ((MultiplexingDataSource)NpgsqlDataSource).StartupCheckPerformed = true; FullState = ConnectionState.Open; } @@ -494,11 +497,8 @@ public ConnectionState FullState case ConnectorState.Executing: return ConnectionState.Open | ConnectionState.Executing; case ConnectorState.Fetching: - return ConnectionState.Open | ConnectionState.Fetching; case ConnectorState.Copy: - return ConnectionState.Open | ConnectionState.Fetching; case ConnectorState.Replication: - return ConnectionState.Open | ConnectionState.Fetching; case ConnectorState.Waiting: return ConnectionState.Open | ConnectionState.Fetching; case ConnectorState.Connecting: @@ -515,10 +515,12 @@ public ConnectionState FullState } internal set { + if (value is < 0 or > ConnectionState.Broken) + ThrowHelper.ThrowArgumentOutOfRangeException(nameof(value), "Unknown connection state", value); + var originalOpen = _fullState.HasFlag(ConnectionState.Open); _fullState = value; - var currentOpen = _fullState.HasFlag(ConnectionState.Open); if (currentOpen != originalOpen) { @@ -1550,21 +1552,9 @@ void CheckClosed() { CheckDisposed(); - switch (FullState) - { - case ConnectionState.Closed: - case ConnectionState.Broken: - return; - case ConnectionState.Open: - case ConnectionState.Connecting: - case ConnectionState.Open | ConnectionState.Executing: - case ConnectionState.Open | ConnectionState.Fetching: + var fullState = FullState; + if (fullState is ConnectionState.Connecting || fullState.HasFlag(ConnectionState.Open)) ThrowHelper.ThrowInvalidOperationException("Connection already open"); - return; - default: - ThrowHelper.ThrowArgumentOutOfRangeException(); - return; - } } void CheckDisposed() diff --git a/src/Npgsql/NpgsqlDataReader.cs b/src/Npgsql/NpgsqlDataReader.cs index 17e5b46eeb..b028913420 100644 --- a/src/Npgsql/NpgsqlDataReader.cs +++ b/src/Npgsql/NpgsqlDataReader.cs @@ -74,6 +74,7 @@ public sealed class NpgsqlDataReader : DbDataReader, IDbColumnSchemaGenerator /// Used only in non-sequential mode. /// readonly List<(int Offset, int Length)> _columns = new(); + int _columnsStartPos; /// /// The index of the column that we're on, i.e. that has already been parsed, is @@ -103,7 +104,7 @@ public sealed class NpgsqlDataReader : DbDataReader, IDbColumnSchemaGenerator /// /// Stores the last converter info resolved by column, to speed up repeated reading. /// - PgConverterInfo[]? ColumnInfoCache { get; set; } + ColumnInfo[]? ColumnInfoCache { get; set; } ulong? _recordsAffected; @@ -164,11 +165,7 @@ internal void Init( public override bool Read() { CheckClosedOrDisposed(); - - var fastRead = TryFastRead(); - return fastRead.HasValue - ? fastRead.Value - : Read(false).GetAwaiter().GetResult(); + return TryRead()?.Result ?? Read(false).GetAwaiter().GetResult(); } /// @@ -181,56 +178,52 @@ public override bool Read() public override Task ReadAsync(CancellationToken cancellationToken) { CheckClosedOrDisposed(); - - var fastRead = TryFastRead(); - if (fastRead.HasValue) - return fastRead.Value ? TrueTask : FalseTask; - - return Read(async: true, cancellationToken); + return TryRead() ?? Read(async: true, cancellationToken); } - bool? TryFastRead() + // This is an optimized execution path that avoids calling any async methods for the (usual) + // case where the next row (or CommandComplete) is already in memory. + Task? TryRead() { - // This is an optimized execution path that avoids calling any async methods for the (usual) - // case where the next row (or CommandComplete) is already in memory. - - if (_behavior.HasFlag(CommandBehavior.SingleRow)) - return null; - switch (State) { case ReaderState.BeforeResult: // First Read() after NextResult. Data row has already been processed. State = ReaderState.InResult; - return true; + return TrueTask; case ReaderState.InResult: - if (!_canConsumeRowNonSequentially) - return null; - // We get here, if we're in a non-sequential mode (or the row is already in the buffer) - ConsumeRowNonSequential(); break; - case ReaderState.BetweenResults: - case ReaderState.Consumed: - case ReaderState.Closed: - case ReaderState.Disposed: - return false; + default: + return FalseTask; } - var readBuf = Connector.ReadBuffer; - if (readBuf.ReadBytesLeft < 5) + // We have a special case path for SingleRow. + if (_behavior.HasFlag(CommandBehavior.SingleRow) || !_canConsumeRowNonSequentially) + return null; + + ConsumeRowNonSequential(); + + const int headerSize = sizeof(byte) + sizeof(int); + var buffer = Buffer; + var readPosition = buffer.ReadPosition; + var bytesLeft = buffer.FilledBytes - readPosition; + if (bytesLeft < headerSize) return null; - var messageCode = (BackendMessageCode)readBuf.ReadByte(); - var len = readBuf.ReadInt32() - 4; // Transmitted length includes itself - if (messageCode != BackendMessageCode.DataRow || readBuf.ReadBytesLeft < len) + var messageCode = (BackendMessageCode)buffer.ReadByte(); + var len = buffer.ReadInt32() - sizeof(int); // Transmitted length includes itself + var isDataRow = messageCode is BackendMessageCode.DataRow; + // sizeof(short) is for the number of columns + var sufficientBytes = isDataRow && _isSequential ? headerSize + sizeof(short) : headerSize + len; + if (bytesLeft < sufficientBytes + || !isDataRow && (_statements[StatementIndex].AppendErrorBarrier ?? Command.EnableErrorBarriers) + // Could be an error, let main read handle it. + || Connector.ParseResultSetMessage(buffer, messageCode, len) is not { } msg) { - readBuf.ReadPosition -= 5; + buffer.ReadPosition = readPosition; return null; } - - var msg = Connector.ParseServerMessage(readBuf, BackendMessageCode.DataRow, len, false)!; - Debug.Assert(msg.Code == BackendMessageCode.DataRow); ProcessMessage(msg); - return true; + return isDataRow ? TrueTask : FalseTask; } async Task Read(bool async, CancellationToken cancellationToken = default) @@ -336,82 +329,47 @@ public override Task NextResultAsync(CancellationToken cancellationToken) /// /// Internal implementation of NextResult /// - [MethodImpl(MethodImplOptions.AggressiveInlining)] async Task NextResult(bool async, bool isConsuming = false, CancellationToken cancellationToken = default) { - CheckClosedOrDisposed(); - - IBackendMessage msg; Debug.Assert(!_isSchemaOnly); + CheckClosedOrDisposed(); - using var registration = isConsuming ? default : Connector.StartNestedCancellableOperation(cancellationToken); + if (State is ReaderState.Consumed) + return false; try { + using var registration = isConsuming ? default : Connector.StartNestedCancellableOperation(cancellationToken); // If we're in the middle of a resultset, consume it - switch (State) - { - case ReaderState.BeforeResult: - case ReaderState.InResult: - await ConsumeRow(async).ConfigureAwait(false); - while (true) - { - var completedMsg = await Connector.ReadMessage(async, DataRowLoadingMode.Skip).ConfigureAwait(false); - switch (completedMsg.Code) - { - case BackendMessageCode.CommandComplete: - case BackendMessageCode.EmptyQueryResponse: - ProcessMessage(completedMsg); - - var statement = _statements[StatementIndex]; - if (statement.IsPrepared && ColumnInfoCache is not null) - RowDescription!.SetConverterInfoCache(new(ColumnInfoCache, 0, _numColumns)); - - if (statement.AppendErrorBarrier ?? Command.EnableErrorBarriers) - Expect(await Connector.ReadMessage(async).ConfigureAwait(false), Connector); - - break; - - default: - continue; - } - - break; - } + if (State is ReaderState.BeforeResult or ReaderState.InResult) + await ConsumeResultSet(async).ConfigureAwait(false); - break; + Debug.Assert(State is ReaderState.BetweenResults); - case ReaderState.BetweenResults: - { - if (StatementIndex >= 0 && _statements[StatementIndex].IsPrepared && ColumnInfoCache is not null) - RowDescription!.SetConverterInfoCache(new(ColumnInfoCache, 0, _numColumns)); - break; - } - case ReaderState.Consumed: - case ReaderState.Closed: - case ReaderState.Disposed: - return false; - default: - ThrowHelper.ThrowArgumentOutOfRangeException(); - return false; - } - - Debug.Assert(State == ReaderState.BetweenResults); _hasRows = false; - if (_behavior.HasFlag(CommandBehavior.SingleResult) && StatementIndex == 0 && !isConsuming) + var statements = _statements; + var statementIndex = StatementIndex; + if (statementIndex >= 0) { - await Consume(async).ConfigureAwait(false); - return false; + if (RowDescription is { } description && statements[statementIndex].IsPrepared && ColumnInfoCache is { } cache) + description.SetConverterInfoCache(new(cache, 0, _numColumns)); + + if (statementIndex is 0 && _behavior.HasFlag(CommandBehavior.SingleResult) && !isConsuming) + { + await Consume(async).ConfigureAwait(false); + return false; + } } // We are now at the end of the previous result set. Read up to the next result set, if any. // Non-prepared statements receive ParseComplete, BindComplete, DescriptionRow/NoData, // prepared statements receive only BindComplete - for (StatementIndex++; StatementIndex < _statements.Count; StatementIndex++) + for (statementIndex = ++StatementIndex; statementIndex < statements.Count; statementIndex = ++StatementIndex) { - var statement = _statements[StatementIndex]; + var statement = statements[statementIndex]; + IBackendMessage msg; if (statement.TryGetPrepared(out var preparedStatement)) { Expect(await Connector.ReadMessage(async).ConfigureAwait(false), Connector); @@ -464,8 +422,8 @@ async Task NextResult(bool async, bool isConsuming = false, CancellationTo else { if (ColumnInfoCache is { } cache) - ArrayPool.Shared.Return(cache, clearArray: true); - ColumnInfoCache = ArrayPool.Shared.Rent(RowDescription.Count); + ArrayPool.Shared.Return(cache, clearArray: true); + ColumnInfoCache = ArrayPool.Shared.Rent(RowDescription.Count); } if (statement.IsPrepared) RowDescription.LoadConverterInfoCache(ColumnInfoCache); @@ -500,7 +458,7 @@ async Task NextResult(bool async, bool isConsuming = false, CancellationTo continue; } - if (!Command.IsWrappedByBatch && StatementIndex == 0 && Command.Parameters.HasOutputParameters) + if (!Command.IsWrappedByBatch && StatementIndex == 0 && Command._parameters?.HasOutputParameters == true) { // If output parameters are present and this is the first row of the first resultset, // we must always read it in non-sequential mode because it will be traversed twice (once @@ -526,12 +484,13 @@ async Task NextResult(bool async, bool isConsuming = false, CancellationTo Expect(await Connector.ReadMessage(async).ConfigureAwait(false), Connector); return true; default: - throw Connector.UnexpectedMessageReceived(msg.Code); + Connector.UnexpectedMessageReceived(msg.Code); + break; } } // There are no more queries, we're done. Read the RFQ. - if (_statements.Count == 0 || !(_statements[_statements.Count - 1].AppendErrorBarrier ?? Command.EnableErrorBarriers)) + if (_statements.Count is 0 || !(_statements[_statements.Count - 1].AppendErrorBarrier ?? Command.EnableErrorBarriers)) Expect(await Connector.ReadMessage(async).ConfigureAwait(false), Connector); State = ReaderState.Consumed; @@ -606,8 +565,38 @@ async Task NextResult(bool async, bool isConsuming = false, CancellationTo State = ReaderState.Consumed; throw; } + + async ValueTask ConsumeResultSet(bool async) + { + await ConsumeRow(async).ConfigureAwait(false); + while (true) + { + var completedMsg = await Connector.ReadMessage(async, DataRowLoadingMode.Skip).ConfigureAwait(false); + switch (completedMsg.Code) + { + case BackendMessageCode.CommandComplete: + case BackendMessageCode.EmptyQueryResponse: + ProcessMessage(completedMsg); + + var statement = _statements[StatementIndex]; + if (statement.IsPrepared && ColumnInfoCache is not null) + RowDescription!.SetConverterInfoCache(new(ColumnInfoCache, 0, _numColumns)); + + if (statement.AppendErrorBarrier ?? Command.EnableErrorBarriers) + Expect(await Connector.ReadMessage(async).ConfigureAwait(false), Connector); + + break; + default: + // TODO if we hit an ErrorResponse here (PG doesn't do this *today*) we should probably throw. + continue; + } + + break; + } + } } + void PopulateOutputParameters() { // The first row in a stored procedure command that has output parameters needs to be traversed twice - @@ -648,7 +637,7 @@ void PopulateOutputParameters() p.Value = pending.Dequeue(); } - PgReader.Commit(async: false, resuming: false).GetAwaiter().GetResult(); + PgReader.Commit(resuming: false); State = ReaderState.BeforeResult; // Set the state back Buffer.ReadPosition = currentPosition; // Restore position @@ -750,8 +739,8 @@ async Task NextResultSchemaOnly(bool async, bool isConsuming = false, Canc else { if (ColumnInfoCache is { } cache) - ArrayPool.Shared.Return(cache, clearArray: true); - ColumnInfoCache = ArrayPool.Shared.Rent(RowDescription.Count); + ArrayPool.Shared.Return(cache, clearArray: true); + ColumnInfoCache = ArrayPool.Shared.Rent(RowDescription.Count); } return true; } @@ -801,46 +790,13 @@ async Task NextResultSchemaOnly(bool async, bool isConsuming = false, Canc internal void ProcessMessage(IBackendMessage msg) { - switch (msg.Code) + if (msg.Code is not BackendMessageCode.DataRow) { - case BackendMessageCode.DataRow: - ProcessDataRowMessage((DataRowMessage)msg); - return; - - case BackendMessageCode.CommandComplete: - var completed = (CommandCompleteMessage)msg; - switch (completed.StatementType) - { - case StatementType.Update: - case StatementType.Insert: - case StatementType.Delete: - case StatementType.Copy: - case StatementType.Move: - case StatementType.Merge: - if (!_recordsAffected.HasValue) - _recordsAffected = 0; - _recordsAffected += completed.Rows; - break; - } - - _statements[StatementIndex].ApplyCommandComplete(completed); - goto case BackendMessageCode.EmptyQueryResponse; - - case BackendMessageCode.EmptyQueryResponse: - State = ReaderState.BetweenResults; - return; - - default: - ThrowUnexpectedBackendMessage(msg.Code); + HandleUncommon(msg); return; } - static void ThrowUnexpectedBackendMessage(BackendMessageCode code) - => throw new Exception("Received unexpected backend message of type " + code); - } - - void ProcessDataRowMessage(DataRowMessage msg) - { + var dataRow = (DataRowMessage)msg; // The connector's buffer can actually change between DataRows: // If a large DataRow exceeding the connector's current read buffer arrives, and we're // reading in non-sequential mode, a new oversize buffer is allocated. We thus have to @@ -849,30 +805,26 @@ void ProcessDataRowMessage(DataRowMessage msg) // (see #2003) if (!ReferenceEquals(Buffer, Connector.ReadBuffer)) Buffer = Connector.ReadBuffer; - - _hasRows = true; - _column = -1; - // We assume that the row's number of columns is identical to the description's - _numColumns = Buffer.ReadInt16(); - Debug.Assert(_numColumns == RowDescription!.Count, - $"Row's number of columns ({_numColumns}) differs from the row description's ({RowDescription.Count})"); - - _dataMsgEnd = Buffer.ReadPosition + msg.Length - 2; - _canConsumeRowNonSequentially = Buffer.ReadBytesLeft >= msg.Length - 2; + var numColumns = Buffer.ReadInt16(); + Debug.Assert(numColumns == RowDescription!.Count, + $"Row's number of columns ({numColumns}) differs from the row description's ({RowDescription.Count})"); + + var readPosition = Buffer.ReadPosition; + var msgRemainder = dataRow.Length - sizeof(short); + _dataMsgEnd = readPosition + msgRemainder; + _columnsStartPos = readPosition; + _canConsumeRowNonSequentially = msgRemainder <= Buffer.FilledBytes - readPosition; + _column = -1; - if (!_isSequential) - { - Debug.Assert(_canConsumeRowNonSequentially); - // Initialize our columns array with the offset and length of the first column + if (_columns.Count > 0) _columns.Clear(); - var len = Buffer.ReadInt32(); - _columns.Add((Buffer.ReadPosition, len)); - } switch (State) { case ReaderState.BetweenResults: + _numColumns = numColumns; + _hasRows = true; State = ReaderState.BeforeResult; break; case ReaderState.BeforeResult: @@ -881,7 +833,40 @@ void ProcessDataRowMessage(DataRowMessage msg) case ReaderState.InResult: break; default: - throw Connector.UnexpectedMessageReceived(BackendMessageCode.DataRow); + Connector.UnexpectedMessageReceived(BackendMessageCode.DataRow); + break; + } + + [MethodImpl(MethodImplOptions.NoInlining)] + void HandleUncommon(IBackendMessage msg) + { + switch (msg.Code) + { + case BackendMessageCode.CommandComplete: + var completed = (CommandCompleteMessage)msg; + switch (completed.StatementType) + { + case StatementType.Update: + case StatementType.Insert: + case StatementType.Delete: + case StatementType.Copy: + case StatementType.Move: + case StatementType.Merge: + _recordsAffected ??= 0; + _recordsAffected += completed.Rows; + break; + } + + _statements[StatementIndex].ApplyCommandComplete(completed); + State = ReaderState.BetweenResults; + break; + case BackendMessageCode.EmptyQueryResponse: + State = ReaderState.BetweenResults; + break; + default: + Connector.UnexpectedMessageReceived(msg.Code); + break; + } } } @@ -1171,7 +1156,7 @@ internal async Task Cleanup(bool async, bool connectionClosing = false, bool isD // has completed, throwing any exceptions it generated. If we don't do this, there's the possibility of a race condition where the // user executes a new command after reader.Dispose() returns, but some additional write stuff is still finishing up from the last // command. - if (_sendTask != null) + if (_sendTask is { Status: not TaskStatus.RanToCompletion }) { // If the connector is broken, we have no reason to wait for the sendTask to complete // as we're not going to send anything else over it @@ -1201,7 +1186,7 @@ internal async Task Cleanup(bool async, bool connectionClosing = false, bool isD if (ColumnInfoCache is { } cache) { ColumnInfoCache = null; - ArrayPool.Shared.Return(cache, clearArray: true); + ArrayPool.Shared.Return(cache, clearArray: true); } State = ReaderState.Closed; @@ -1398,7 +1383,7 @@ public override int GetValues(object[] values) if (field.DataFormat is DataFormat.Text || (elementType.InternalName != "record" && compositeType == null)) throw new InvalidCastException("GetData() not supported for type " + field.TypeDisplayName); - var columnLength = SeekToColumn(async: false, ordinal, field, resumableOp: true).GetAwaiter().GetResult(); + var columnLength = SeekToColumn(async: false, ordinal, field.DataFormat, resumableOp: true).GetAwaiter().GetResult(); if (columnLength is -1) ThrowHelper.ThrowInvalidCastException_NoValue(field); @@ -1445,7 +1430,7 @@ public override long GetBytes(int ordinal, long dataOffset, byte[]? buffer, int throw new IndexOutOfRangeException($"length must be between 0 and {buffer.Length - bufferOffset}"); var field = CheckRowAndGetField(ordinal); - var columnLength = SeekToColumn(async: false, ordinal, field, resumableOp: true).GetAwaiter().GetResult(); + var columnLength = SeekToColumn(async: false, ordinal, field.DataFormat, resumableOp: true).GetAwaiter().GetResult(); if (columnLength == -1) ThrowHelper.ThrowInvalidCastException_NoValue(field); @@ -1512,7 +1497,7 @@ public override long GetChars(int ordinal, long dataOffset, char[]? buffer, int var columnLength = SeekToColumn(async: false, ordinal, field, resumableOp: true).GetAwaiter().GetResult(); if (columnLength == -1) - ThrowHelper.ThrowInvalidCastException_NoValue(field); + ThrowHelper.ThrowInvalidCastException_NoValue(CheckRowAndGetField(ordinal)); dataOffset = buffer is null ? 0 : dataOffset; PgReader.InitCharsRead(checked((int)dataOffset), @@ -1569,27 +1554,25 @@ public override Task GetFieldValueAsync(int ordinal, CancellationToken can if (!_isSequential) return Task.FromResult(GetFieldValueCore(ordinal)); + // The only statically mapped converter, it always exists. + if (typeof(T) == typeof(Stream)) + return GetStream(ordinal, cancellationToken); + return Core(ordinal, cancellationToken).AsTask(); async ValueTask Core(int ordinal, CancellationToken cancellationToken) { using var registration = Connector.StartNestedCancellableOperation(cancellationToken, attemptPgCancellation: false); - var isStream = typeof(T) == typeof(Stream); - var field = GetInfo(ordinal, isStream ? null : typeof(T), out var converter, out var bufferRequirement, out var asObject); + + var field = GetInfo(ordinal, typeof(T), out var converter, out var bufferRequirement, out var asObject); var columnLength = await SeekToColumn(async: true, ordinal, field).ConfigureAwait(false); - if (columnLength == -1) - return DbNullValueOrThrow(field); + if (columnLength is -1) + return DbNullValueOrThrow(ordinal); - if (isStream || typeof(T) == typeof(TextReader)) - { + if (typeof(T) == typeof(TextReader)) PgReader.ThrowIfStreamActive(); - // The only statically mapped converter, it always exists. - if (isStream) - return (T)(object)PgReader.GetStream(canSeek: !_isSequential); - } - Debug.Assert(asObject || converter is PgConverter); await PgReader.StartReadAsync(bufferRequirement, cancellationToken).ConfigureAwait(false); var result = asObject @@ -1598,6 +1581,21 @@ async ValueTask Core(int ordinal, CancellationToken cancellationToken) await PgReader.EndReadAsync().ConfigureAwait(false); return result; } + + async Task GetStream(int ordinal, CancellationToken cancellationToken) + { + using var registration = Connector.StartNestedCancellableOperation(cancellationToken, attemptPgCancellation: false); + + var field = GetDefaultInfo(ordinal, out _, out _); + PgReader.ThrowIfStreamActive(); + + var columnLength = await SeekToColumn(async: true, ordinal, field).ConfigureAwait(false); + + if (columnLength == -1) + return DbNullValueOrThrow(ordinal); + + return (T)(object)PgReader.GetStream(canSeek: !_isSequential); + } } /// @@ -1612,16 +1610,19 @@ T GetFieldValueCore(int ordinal) { // The only statically mapped converter, it always exists. if (typeof(T) == typeof(Stream)) - return GetStream(); + return GetStream(ordinal); var field = GetInfo(ordinal, typeof(T), out var converter, out var bufferRequirement, out var asObject); if (typeof(T) == typeof(TextReader)) PgReader.ThrowIfStreamActive(); - var columnLength = SeekToColumn(async: false, ordinal, field).GetAwaiter().GetResult(); - if (columnLength == -1) - return DbNullValueOrThrow(field); + var columnLength = + _isSequential + ? SeekToColumnSequential(async: false, ordinal, field).GetAwaiter().GetResult() + : SeekToColumnNonSequential(ordinal, field); + if (columnLength is -1) + return DbNullValueOrThrow(ordinal); Debug.Assert(asObject || converter is PgConverter); PgReader.StartRead(bufferRequirement); @@ -1632,13 +1633,19 @@ T GetFieldValueCore(int ordinal) return result; [MethodImpl(MethodImplOptions.NoInlining)] - T GetStream() + T GetStream(int ordinal) { - var field = GetInfo(ordinal, null, out _, out _, out _); + var field = GetDefaultInfo(ordinal, out _, out _); PgReader.ThrowIfStreamActive(); - var columnLength = SeekToColumn(async: false, ordinal, field).GetAwaiter().GetResult(); + + var columnLength = + _isSequential + ? SeekToColumnSequential(async: false, ordinal, field).GetAwaiter().GetResult() + : SeekToColumnNonSequential(ordinal, field); + if (columnLength == -1) - return DbNullValueOrThrow(field); + return DbNullValueOrThrow(ordinal); + return (T)(object)PgReader.GetStream(canSeek: !_isSequential); } } @@ -1654,8 +1661,11 @@ T GetStream() /// The value of the specified column. public override object GetValue(int ordinal) { - var field = GetInfo(ordinal, null, out var converter, out var bufferRequirement, out _); - var columnLength = SeekToColumn(async: false, ordinal, field).GetAwaiter().GetResult(); + var field = GetDefaultInfo(ordinal, out var converter, out var bufferRequirement); + var columnLength = + _isSequential + ? SeekToColumnSequential(async: false, ordinal, field).GetAwaiter().GetResult() + : SeekToColumnNonSequential(ordinal, field); if (columnLength == -1) return DBNull.Value; @@ -1683,7 +1693,7 @@ public override object GetValue(int ordinal) /// The zero-based column ordinal. /// true if the specified column is equivalent to ; otherwise false. public override bool IsDBNull(int ordinal) - => SeekToColumn(async: false, ordinal, CheckRowAndGetField(ordinal), resumableOp: true).GetAwaiter().GetResult() is -1; + => SeekToColumn(async: false, ordinal, CheckRowAndGetField(ordinal).DataFormat, resumableOp: true).GetAwaiter().GetResult() is -1; /// /// An asynchronous version of , which gets a value that indicates whether the column contains non-existent or missing values. @@ -1704,7 +1714,7 @@ public override Task IsDBNullAsync(int ordinal, CancellationToken cancella async Task Core(int ordinal, CancellationToken cancellationToken) { using var registration = Connector.StartNestedCancellableOperation(cancellationToken, attemptPgCancellation: false); - return await SeekToColumn(async: true, ordinal, CheckRowAndGetField(ordinal), resumableOp: true).ConfigureAwait(false) is -1; + return await SeekToColumn(async: true, ordinal, CheckRowAndGetField(ordinal).DataFormat, resumableOp: true).ConfigureAwait(false) is -1; } } @@ -1904,36 +1914,86 @@ Task> GetColumnSchema(bool async, Cancellatio #region Seeking + /// + /// Seeks to the given column. The 4-byte length is read and returned. + /// [MethodImpl(MethodImplOptions.AggressiveInlining)] - ValueTask SeekToColumn(bool async, int ordinal, FieldDescription field, bool resumableOp = false) + ValueTask SeekToColumn(bool async, int ordinal, DataFormat dataFormat, bool resumableOp = false) => _isSequential - ? SeekToColumnSequential(async, ordinal, field, resumableOp) - : new(SeekToColumnNonSequential(ordinal, field, resumableOp)); + ? SeekToColumnSequential(async, ordinal, dataFormat, resumableOp) + : new(SeekToColumnNonSequential(ordinal, dataFormat, resumableOp)); - int SeekToColumnNonSequential(int ordinal, FieldDescription field, bool resumableOp = false) + int SeekToColumnNonSequential(int ordinal, DataFormat dataFormat, bool resumableOp = false) { - PgReader.Commit(async: false, _column == ordinal && PgReader.Resumable && resumableOp).GetAwaiter().GetResult(); + var currentColumn = _column; + var buffer = Buffer; + var pgReader = PgReader; - for (var lastColumnRead = _columns.Count; ordinal >= lastColumnRead; lastColumnRead++) + // Deals with current column commit and rereads + int columnLength; + if (currentColumn >= 0) { - (Buffer.ReadPosition, var lastColumnLen) = _columns[lastColumnRead - 1]; - if (lastColumnLen != -1) - Buffer.ReadPosition += lastColumnLen; - var len = Buffer.ReadInt32(); - _columns.Add((Buffer.ReadPosition, len)); + if (currentColumn == ordinal) + return HandleReread(pgReader.Resumable && resumableOp); + pgReader.Commit(resuming: false); } - (Buffer.ReadPosition, var columnLength) = _columns[ordinal]; - PgReader.Init(columnLength, field.DataFormat, resumableOp); + // Deals with forward movement + Debug.Assert(ordinal != currentColumn); + if (ordinal > currentColumn) + { + for (; currentColumn < ordinal - 1; currentColumn++) + { + columnLength = buffer.ReadInt32(); + if (columnLength is not -1) + buffer.Skip(columnLength); + } + columnLength = buffer.ReadInt32(); + } + else + columnLength = SeekBackwards(); + + pgReader.Init(columnLength, dataFormat, resumableOp); _column = ordinal; return columnLength; + + int HandleReread(bool resuming) + { + Debug.Assert(pgReader.Initialized); + var columnLength = pgReader.FieldSize; + pgReader.Commit(resuming); + if (!resuming && columnLength > 0) + buffer.ReadPosition -= columnLength; + pgReader.Init(columnLength, dataFormat, resumableOp); + return columnLength; + } + + // On the first call to SeekBackwards we'll fill up the columns list as we may need seek positions more than once. + [MethodImpl(MethodImplOptions.NoInlining)] + int SeekBackwards() + { + // Backfill the first column. + if (_columns.Count is 0) + { + buffer.ReadPosition = _columnsStartPos; + var len = buffer.ReadInt32(); + _columns.Add((buffer.ReadPosition, len)); + } + for (var lastColumnRead = _columns.Count; ordinal >= lastColumnRead; lastColumnRead++) + { + (Buffer.ReadPosition, var lastLen) = _columns[lastColumnRead - 1]; + if (lastLen is not -1) + buffer.Skip(lastLen); + var len = Buffer.ReadInt32(); + _columns.Add((Buffer.ReadPosition, len)); + } + (Buffer.ReadPosition, var columnLength) = _columns[ordinal]; + return columnLength; + } } - /// - /// Seeks to the given column. The 4-byte length is read and returned. - /// - ValueTask SeekToColumnSequential(bool async, int ordinal, FieldDescription field, bool resumableOp = false) + ValueTask SeekToColumnSequential(bool async, int ordinal, DataFormat dataFormat, bool resumableOp = false) { var reread = _column == ordinal; // Column rereading rules for sequential mode: @@ -1949,11 +2009,11 @@ ValueTask SeekToColumnSequential(bool async, int ordinal, FieldDescription var committed = false; if (!PgReader.CommitHasIO(reread)) { - PgReader.Commit(async: false, reread).GetAwaiter().GetResult(); + PgReader.Commit(reread); committed = true; if (TrySeekBuffered(ordinal, out var columnLength)) { - PgReader.Init(columnLength, field.DataFormat, columnLength is -1 || resumableOp); + PgReader.Init(columnLength, dataFormat, columnLength is -1 || resumableOp); return new(columnLength); } @@ -1961,12 +2021,12 @@ ValueTask SeekToColumnSequential(bool async, int ordinal, FieldDescription if (columnLength > -1) { // Resumable: true causes commit to consume without error. - PgReader.Init(columnLength, field.DataFormat, resumable: true); + PgReader.Init(columnLength, dataFormat, resumable: true); committed = false; } } - return Core(async, !committed, ordinal, field.DataFormat, resumableOp); + return Core(async, !committed, ordinal, dataFormat, resumableOp); #if NET6_0_OR_GREATER [AsyncMethodBuilder(typeof(PoolingAsyncValueTaskMethodBuilder<>))] @@ -1976,12 +2036,15 @@ async ValueTask Core(bool async, bool commit, int ordinal, DataFormat dataF if (commit) { Debug.Assert(ordinal != _column); - await PgReader.Commit(async, reread).ConfigureAwait(false); + if (async) + await PgReader.CommitAsync(reread).ConfigureAwait(false); + else + PgReader.Commit(reread); } if (ordinal == _column) { - PgReader.Init(PgReader.FieldSize, field.DataFormat, PgReader.FieldSize is -1 || resumableOp); + PgReader.Init(PgReader.FieldSize, dataFormat, PgReader.FieldSize is -1 || resumableOp); return PgReader.FieldSize; } @@ -2056,8 +2119,10 @@ Task ConsumeRow(bool async) async Task ConsumeRowSequential(bool async) { - await PgReader.Commit(async, resuming: false).ConfigureAwait(false); - + if (async) + await PgReader.CommitAsync(resuming: false).ConfigureAwait(false); + else + PgReader.Commit(resuming: false); // Skip over the remaining columns in the row for (; _column < _numColumns - 1; _column++) { @@ -2072,8 +2137,8 @@ async Task ConsumeRowSequential(bool async) [MethodImpl(MethodImplOptions.AggressiveInlining)] void ConsumeRowNonSequential() { - Debug.Assert(State == ReaderState.InResult || State == ReaderState.BeforeResult); - PgReader.Commit(async: false, resuming: false).GetAwaiter().GetResult(); + Debug.Assert(State is ReaderState.InResult or ReaderState.BeforeResult); + PgReader.Commit(resuming: false); Buffer.ReadPosition = _dataMsgEnd; } @@ -2101,7 +2166,7 @@ void CheckResultSet() } [MethodImpl(MethodImplOptions.NoInlining)] - static T DbNullValueOrThrow(FieldDescription field) + T DbNullValueOrThrow(int ordinal) { // When T is a Nullable (and only in that case), we support returning null if (default(T) is null && typeof(T).IsValueType) @@ -2110,29 +2175,54 @@ static T DbNullValueOrThrow(FieldDescription field) if (typeof(T) == typeof(object)) return (T)(object)DBNull.Value; - ThrowHelper.ThrowInvalidCastException_NoValue(field); + ThrowHelper.ThrowInvalidCastException_NoValue(CheckRowAndGetField(ordinal)); return default; } [MethodImpl(MethodImplOptions.AggressiveInlining)] - FieldDescription GetInfo(int ordinal, Type? type, out PgConverter converter, out Size bufferRequirement, out bool asObject) + DataFormat GetInfo(int ordinal, Type type, out PgConverter converter, out Size bufferRequirement, out bool asObject) { - var field = CheckRowAndGetField(ordinal); - - if (type is null) + var state = State; + if (state is not ReaderState.InResult || (uint)ordinal > (uint)_numColumns) { - var odfInfo = field.ObjectOrDefaultInfo; - converter = odfInfo.Converter; - bufferRequirement = odfInfo.BufferRequirement; - asObject = odfInfo.IsBoxingConverter; - return field; + Unsafe.SkipInit(out converter); + Unsafe.SkipInit(out bufferRequirement); + Unsafe.SkipInit(out asObject); + HandleInvalidState(state, _numColumns); + Debug.Fail("Should never get here"); } ref var info = ref ColumnInfoCache![ordinal]; - field.GetInfo(type, ref info, out asObject); - converter = info.Converter; - bufferRequirement = info.BufferRequirement; - return field; + if (info.ConverterInfo.TypeToConvert == type) + { + converter = info.ConverterInfo.Converter; + bufferRequirement = info.ConverterInfo.BufferRequirement; + asObject = info.AsObject; + return info.DataFormat; + } + + return Slow(ref info, out converter, out bufferRequirement, out asObject); + + [MethodImpl(MethodImplOptions.NoInlining)] + DataFormat Slow(ref ColumnInfo info, out PgConverter converter, out Size bufferRequirement, out bool asObject) + { + var field = CheckRowAndGetField(ordinal); + field.GetInfo(type, ref info); + converter = info.ConverterInfo.Converter; + bufferRequirement = info.ConverterInfo.BufferRequirement; + asObject = info.AsObject; + return field.DataFormat; + } + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + DataFormat GetDefaultInfo(int ordinal, out PgConverter converter, out Size bufferRequirement) + { + var field = CheckRowAndGetField(ordinal); + + converter = field.ObjectOrDefaultInfo.Converter; + bufferRequirement = field.ObjectOrDefaultInfo.BufferRequirement; + return field.DataFormat; } [MethodImpl(MethodImplOptions.AggressiveInlining)] @@ -2140,32 +2230,32 @@ FieldDescription CheckRowAndGetField(int column) { var columns = RowDescription; var state = State; - if (state is ReaderState.InResult && column >= 0 && column < columns!.Count) + if (state is ReaderState.InResult && (uint)column < (uint)columns!.Count) return columns[column]; return HandleInvalidState(state, columns?.Count ?? 0); + } - [MethodImpl(MethodImplOptions.NoInlining)] - static FieldDescription HandleInvalidState(ReaderState state, int maxColumns) + [DoesNotReturn] + [MethodImpl(MethodImplOptions.NoInlining)] + static FieldDescription HandleInvalidState(ReaderState state, int maxColumns) + { + switch (state) { - switch (state) - { - case ReaderState.InResult: - break; - case ReaderState.Closed: - ThrowHelper.ThrowInvalidOperationException("The reader is closed"); - break; - case ReaderState.Disposed: - ThrowHelper.ThrowObjectDisposedException(nameof(NpgsqlDataReader)); - break; - default: - ThrowHelper.ThrowInvalidOperationException("No row is available"); - break; - } - + case ReaderState.InResult: ThrowColumnOutOfRange(maxColumns); - return default!; + break; + case ReaderState.Closed: + ThrowHelper.ThrowInvalidOperationException("The reader is closed"); + break; + case ReaderState.Disposed: + ThrowHelper.ThrowObjectDisposedException(nameof(NpgsqlDataReader)); + break; + default: + ThrowHelper.ThrowInvalidOperationException("No row is available"); + break; } + return default!; } /// @@ -2186,17 +2276,25 @@ FieldDescription GetField(int column) void CheckClosedOrDisposed() { - switch (State) + if (State is (ReaderState.Closed or ReaderState.Disposed) and var state) + Throw(state); + + [MethodImpl(MethodImplOptions.NoInlining)] + static void Throw(ReaderState state) { - case ReaderState.Closed: - ThrowHelper.ThrowInvalidOperationException("The reader is closed"); - return; - case ReaderState.Disposed: - ThrowHelper.ThrowObjectDisposedException(nameof(NpgsqlDataReader)); - return; + switch (state) + { + case ReaderState.Closed: + ThrowHelper.ThrowInvalidOperationException("The reader is closed"); + return; + case ReaderState.Disposed: + ThrowHelper.ThrowObjectDisposedException(nameof(NpgsqlDataReader)); + return; + } } } + [DoesNotReturn] static void ThrowColumnOutOfRange(int maxIndex) => throw new IndexOutOfRangeException($"Column must be between {0} and {maxIndex - 1}"); diff --git a/src/Npgsql/NpgsqlDataSource.cs b/src/Npgsql/NpgsqlDataSource.cs index 18c0d05f32..b2eb8dc18a 100644 --- a/src/Npgsql/NpgsqlDataSource.cs +++ b/src/Npgsql/NpgsqlDataSource.cs @@ -57,7 +57,7 @@ public abstract class NpgsqlDataSource : DbDataSource readonly Task _passwordRefreshTask = null!; string? _password; - bool _isBootstrapped; + internal bool IsBootstrapped { get; private set; } volatile DatabaseStateInfo _databaseStateInfo = new(); @@ -223,7 +223,7 @@ internal async Task Bootstrap( bool async, CancellationToken cancellationToken) { - if (_isBootstrapped && !forceReload) + if (IsBootstrapped && !forceReload) return; var hasSemaphore = async @@ -235,7 +235,7 @@ internal async Task Bootstrap( try { - if (_isBootstrapped && !forceReload) + if (IsBootstrapped && !forceReload) return; // The type loading below will need to send queries to the database, and that depends on a type mapper being set up (even if its @@ -263,7 +263,7 @@ internal async Task Bootstrap( DefaultNameTranslator = _defaultNameTranslator }; - _isBootstrapped = true; + IsBootstrapped = true; } finally { diff --git a/src/Npgsql/NpgsqlEventSource.cs b/src/Npgsql/NpgsqlEventSource.cs index 43d4a9911b..1e9b82c9c5 100644 --- a/src/Npgsql/NpgsqlEventSource.cs +++ b/src/Npgsql/NpgsqlEventSource.cs @@ -3,7 +3,6 @@ using System.Diagnostics; using System.Threading; using System.Diagnostics.Tracing; -using System.Runtime.CompilerServices; namespace Npgsql; @@ -55,26 +54,46 @@ internal NpgsqlEventSource() : base(EventSourceName) {} // https://blogs.msdn.microsoft.com/vancem/2015/09/14/exploring-eventsource-activity-correlation-and-causation-features/ // - A stop event's event id must be next one after its start event. - internal void BytesWritten(long bytesWritten) => Interlocked.Add(ref _bytesWritten, bytesWritten); - internal void BytesRead(long bytesRead) => Interlocked.Add(ref _bytesRead, bytesRead); + internal void BytesWritten(long bytesWritten) + { + if (IsEnabled()) + Interlocked.Add(ref _bytesWritten, bytesWritten); + } + + internal void BytesRead(long bytesRead) + { + if (IsEnabled()) + Interlocked.Add(ref _bytesRead, bytesRead); + } public void CommandStart(string sql) { - Interlocked.Increment(ref _totalCommands); - Interlocked.Increment(ref _currentCommands); + if (IsEnabled()) + { + Interlocked.Increment(ref _totalCommands); + Interlocked.Increment(ref _currentCommands); + } NpgsqlSqlEventSource.Log.CommandStart(sql); } - [MethodImpl(MethodImplOptions.NoInlining)] public void CommandStop() { - Interlocked.Decrement(ref _currentCommands); + if (IsEnabled()) + Interlocked.Decrement(ref _currentCommands); NpgsqlSqlEventSource.Log.CommandStop(); } - internal void CommandStartPrepared() => Interlocked.Increment(ref _totalPreparedCommands); + internal void CommandStartPrepared() + { + if (IsEnabled()) + Interlocked.Increment(ref _totalPreparedCommands); + } - internal void CommandFailed() => Interlocked.Increment(ref _failedCommands); + internal void CommandFailed() + { + if (IsEnabled()) + Interlocked.Increment(ref _failedCommands); + } internal void DataSourceCreated(NpgsqlDataSource dataSource) { @@ -89,9 +108,12 @@ internal void DataSourceCreated(NpgsqlDataSource dataSource) internal void MultiplexingBatchSent(int numCommands, Stopwatch stopwatch) { // TODO: CAS loop instead of 3 separate interlocked operations? - Interlocked.Increment(ref _multiplexingBatchesSent); - Interlocked.Add(ref _multiplexingCommandsSent, numCommands); - Interlocked.Add(ref _multiplexingTicksWritten, stopwatch.ElapsedTicks); + if (IsEnabled()) + { + Interlocked.Increment(ref _multiplexingBatchesSent); + Interlocked.Add(ref _multiplexingCommandsSent, numCommands); + Interlocked.Add(ref _multiplexingTicksWritten, stopwatch.ElapsedTicks); + } } #if !NETSTANDARD2_0 @@ -206,4 +228,4 @@ protected override void OnEventCommand(EventCommandEventArgs command) } #endif -} \ No newline at end of file +} diff --git a/src/Npgsql/NpgsqlSqlEventSource.cs b/src/Npgsql/NpgsqlSqlEventSource.cs index d8dc66d157..1e37a2355f 100644 --- a/src/Npgsql/NpgsqlSqlEventSource.cs +++ b/src/Npgsql/NpgsqlSqlEventSource.cs @@ -1,5 +1,4 @@ using System.Diagnostics.Tracing; -using System.Runtime.CompilerServices; namespace Npgsql; @@ -22,9 +21,8 @@ internal NpgsqlSqlEventSource() : base(EventSourceName) {} // - A stop event's event id must be next one after its start event. [Event(CommandStartId, Level = EventLevel.Informational)] - public void CommandStart(string sql) => Log.WriteEvent(CommandStartId, sql); + public void CommandStart(string sql) => WriteEvent(CommandStartId, sql); - [MethodImpl(MethodImplOptions.NoInlining)] [Event(CommandStopId, Level = EventLevel.Informational)] - public void CommandStop() => Log.WriteEvent(CommandStopId); -} \ No newline at end of file + public void CommandStop() => WriteEvent(CommandStopId); +} diff --git a/src/Npgsql/PreparedStatement.cs b/src/Npgsql/PreparedStatement.cs index 9186df77c9..6b88289e5c 100644 --- a/src/Npgsql/PreparedStatement.cs +++ b/src/Npgsql/PreparedStatement.cs @@ -1,6 +1,7 @@ using System; using System.Collections.Generic; using System.Diagnostics; +using System.Text; using Npgsql.BackendMessages; using Npgsql.Internal.Postgres; @@ -17,7 +18,7 @@ sealed class PreparedStatement internal string Sql { get; } - internal string? Name; + internal byte[]? Name; internal RowDescriptionMessage? Description; @@ -41,7 +42,9 @@ sealed class PreparedStatement internal int AutoPreparedSlotIndex { get; set; } - internal DateTime LastUsed { get; set; } + internal long LastUsed { get; set; } + + internal void RefreshLastUsed() => LastUsed = Stopwatch.GetTimestamp(); /// /// Contains the handler types for a prepared statement's parameters, for overloaded cases (same SQL, different param types) @@ -58,7 +61,7 @@ internal static PreparedStatement CreateExplicit( { var pStatement = new PreparedStatement(manager, sql, true) { - Name = name, + Name = Encoding.ASCII.GetBytes(name), StatementBeingReplaced = statementBeingReplaced }; pStatement.SetParamTypes(parameters); @@ -91,14 +94,13 @@ internal void SetParamTypes(List parameters) internal bool DoParametersMatch(List parameters) { - if (ConverterParamTypes!.Length != parameters.Count) - return false; - - for (var i = 0; i < ConverterParamTypes.Length; i++) - if (ConverterParamTypes[i] != parameters[i].PgTypeId) - return false; + var paramTypes = ConverterParamTypes!; + var forall = paramTypes.Length == parameters.Count; + for (var i = 0; forall && i < paramTypes.Length; i++) + if (paramTypes[i] != parameters[i].PgTypeId) + forall = false; - return true; + return forall; } internal void AbortPrepare() diff --git a/src/Npgsql/PreparedStatementManager.cs b/src/Npgsql/PreparedStatementManager.cs index 227cd1314d..ef72879c6d 100644 --- a/src/Npgsql/PreparedStatementManager.cs +++ b/src/Npgsql/PreparedStatementManager.cs @@ -1,6 +1,7 @@ using System; using System.Collections.Generic; using System.Diagnostics; +using System.Text; using Microsoft.Extensions.Logging; using Npgsql.Internal; @@ -16,6 +17,8 @@ sealed class PreparedStatementManager readonly PreparedStatement?[] _candidates; + static readonly List EmptyParameters = new(); + /// /// Total number of current prepared statements (whether explicit or automatic). /// @@ -65,7 +68,7 @@ internal PreparedStatementManager(NpgsqlConnector connector) // Great, we've found an explicit prepared statement. // We just need to check that the parameter types correspond, since prepared statements are // only keyed by SQL (to prevent pointless allocations). If we have a mismatch, simply run unprepared. - return pStatement.DoParametersMatch(batchCommand.PositionalParameters!) + return pStatement.DoParametersMatch(batchCommand.CurrentParametersReadOnly) ? pStatement : null; } @@ -89,7 +92,7 @@ internal PreparedStatementManager(NpgsqlConnector connector) } // Statement hasn't been prepared yet - return BySql[sql] = PreparedStatement.CreateExplicit(this, sql, NextPreparedStatementName(), batchCommand.PositionalParameters, statementBeingReplaced); + return BySql[sql] = PreparedStatement.CreateExplicit(this, sql, NextPreparedStatementName(), batchCommand.CurrentParametersReadOnly, statementBeingReplaced); } internal PreparedStatement? TryGetAutoPrepared(NpgsqlBatchCommand batchCommand) @@ -99,7 +102,7 @@ internal PreparedStatementManager(NpgsqlConnector connector) { // New candidate. Find an empty candidate slot or eject a least-used one. int slotIndex = -1, leastUsages = int.MaxValue; - var lastUsed = DateTime.MaxValue; + var lastUsed = long.MaxValue; for (var i = 0; i < _candidates.Length; i++) { var candidate = _candidates[i]; @@ -143,10 +146,10 @@ internal PreparedStatementManager(NpgsqlConnector connector) // for preparation (earlier identical statement in the same command). // We just need to check that the parameter types correspond, since prepared statements are // only keyed by SQL (to prevent pointless allocations). If we have a mismatch, simply run unprepared. - if (!pStatement.DoParametersMatch(batchCommand.PositionalParameters)) + if (!pStatement.DoParametersMatch(batchCommand.CurrentParametersReadOnly)) return null; // Prevent this statement from being replaced within this batch - pStatement.LastUsed = DateTime.MaxValue; + pStatement.LastUsed = long.MaxValue; return pStatement; case PreparedState.BeingUnprepared: @@ -162,7 +165,7 @@ internal PreparedStatementManager(NpgsqlConnector connector) { // Statement still hasn't passed the usage threshold, no automatic preparation. // Return null for unprepared execution. - pStatement.LastUsed = DateTime.UtcNow; + pStatement.RefreshLastUsed(); return null; } @@ -170,7 +173,7 @@ internal PreparedStatementManager(NpgsqlConnector connector) LogMessages.AutoPreparingStatement(_commandLogger, sql, _connector.Id); // Look for either an empty autoprepare slot, or the least recently used prepared statement which we'll replace it. - var oldestTimestamp = DateTime.MaxValue; + var oldestLastUsed = long.MaxValue; var selectedIndex = -1; for (var i = 0; i < AutoPrepared.Length; i++) { @@ -186,10 +189,10 @@ internal PreparedStatementManager(NpgsqlConnector connector) switch (slot.State) { case PreparedState.Prepared: - if (slot.LastUsed < oldestTimestamp) + if (slot.LastUsed < oldestLastUsed) { selectedIndex = i; - oldestTimestamp = slot.LastUsed; + oldestLastUsed = slot.LastUsed; } break; @@ -217,7 +220,7 @@ internal PreparedStatementManager(NpgsqlConnector connector) if (oldPreparedStatement is null) { - pStatement.Name = "_auto" + selectedIndex; + pStatement.Name = Encoding.ASCII.GetBytes("_auto" + selectedIndex); } else { @@ -242,12 +245,12 @@ internal PreparedStatementManager(NpgsqlConnector connector) // Make sure this statement isn't replaced by a later statement in the same batch. - pStatement.LastUsed = DateTime.MaxValue; + pStatement.LastUsed = long.MaxValue; // Note that the parameter types are only set at the moment of preparation - in the candidate phase // there's no differentiation between overloaded statements, which are a pretty rare case, saving // allocations. - pStatement.SetParamTypes(batchCommand.PositionalParameters); + pStatement.SetParamTypes(batchCommand.CurrentParametersReadOnly); return pStatement; } @@ -278,4 +281,4 @@ internal void ClearAll() for (var i = 0; i < _candidates.Length; i++) _candidates[i] = null; } -} \ No newline at end of file +} diff --git a/src/Npgsql/Replication/PgOutput/ReplicationValue.cs b/src/Npgsql/Replication/PgOutput/ReplicationValue.cs index dfa8a6ecd4..c918325840 100644 --- a/src/Npgsql/Replication/PgOutput/ReplicationValue.cs +++ b/src/Npgsql/Replication/PgOutput/ReplicationValue.cs @@ -26,7 +26,7 @@ public class ReplicationValue public TupleDataKind Kind { get; private set; } FieldDescription _fieldDescription = null!; - PgConverterInfo _lastInfo; + ColumnInfo _lastInfo; bool _isConsumed; PgReader PgReader => _readBuffer.PgReader; @@ -88,7 +88,7 @@ public async ValueTask Get(CancellationToken cancellationToken = default) { CheckActive(); - _fieldDescription.GetInfo(typeof(T), ref _lastInfo, out var asObject); + _fieldDescription.GetInfo(typeof(T), ref _lastInfo); var info = _lastInfo; switch (Kind) @@ -112,10 +112,10 @@ public async ValueTask Get(CancellationToken cancellationToken = default) using var registration = _readBuffer.Connector.StartNestedCancellableOperation(cancellationToken, attemptPgCancellation: false); var reader = PgReader.Init(Length, _fieldDescription.DataFormat); - await reader.StartReadAsync(info.BufferRequirement, cancellationToken).ConfigureAwait(false); - var result = asObject - ? (T)await info.Converter.ReadAsObjectAsync(reader, cancellationToken).ConfigureAwait(false) - : await info.GetConverter().ReadAsync(reader, cancellationToken).ConfigureAwait(false); + await reader.StartReadAsync(info.ConverterInfo.BufferRequirement, cancellationToken).ConfigureAwait(false); + var result = info.AsObject + ? (T)await info.ConverterInfo.Converter.ReadAsObjectAsync(reader, cancellationToken).ConfigureAwait(false) + : await info.ConverterInfo.GetConverter().ReadAsync(reader, cancellationToken).ConfigureAwait(false); await reader.EndReadAsync().ConfigureAwait(false); return result; } @@ -158,7 +158,7 @@ public TextReader GetTextReader() CheckActive(); ref var info = ref _lastInfo; - _fieldDescription.GetInfo(typeof(TextReader), ref info, out _); + _fieldDescription.GetInfo(typeof(TextReader), ref info); switch (Kind) { @@ -171,8 +171,8 @@ public TextReader GetTextReader() } var reader = PgReader.Init(Length, _fieldDescription.DataFormat); - reader.StartRead(info.BufferRequirement); - var result = (TextReader)info.Converter.ReadAsObject(reader); + reader.StartRead(info.ConverterInfo.BufferRequirement); + var result = (TextReader)info.ConverterInfo.Converter.ReadAsObject(reader); reader.EndRead(); return result; } @@ -185,7 +185,7 @@ internal async Task Consume(CancellationToken cancellationToken) if (!PgReader.Initialized) PgReader.Init(Length, _fieldDescription.DataFormat); await PgReader.ConsumeAsync(cancellationToken: cancellationToken).ConfigureAwait(false); - await PgReader.Commit(async: true, resuming: false).ConfigureAwait(false); + await PgReader.CommitAsync(resuming: false).ConfigureAwait(false); _isConsumed = true; } diff --git a/src/Npgsql/SqlQueryParser.cs b/src/Npgsql/SqlQueryParser.cs index 2f554223ea..16c9992c14 100644 --- a/src/Npgsql/SqlQueryParser.cs +++ b/src/Npgsql/SqlQueryParser.cs @@ -7,6 +7,8 @@ namespace Npgsql; sealed class SqlQueryParser { + static NpgsqlParameterCollection EmptyParameters { get; } = new(); + readonly Dictionary _paramIndexMap = new(); readonly StringBuilder _rewrittenSql = new(); @@ -70,7 +72,7 @@ void ParseRawQuery( // Batching mode. We're processing only one batch - if we encounter a semicolon (legacy batching), that's an error. Debug.Assert(batchCommand is not null); sql = batchCommand.CommandText; - parameters = batchCommand.Parameters; + parameters = batchCommand._parameters ?? EmptyParameters; batchCommands = null; } else @@ -78,7 +80,7 @@ void ParseRawQuery( // Command mode. Semicolons (legacy batching) may occur. Debug.Assert(batchCommand is null); sql = command.CommandText; - parameters = command.Parameters; + parameters = command._parameters ?? EmptyParameters; batchCommands = command.InternalBatchCommands; MoveToNextBatchCommand(); } @@ -484,7 +486,11 @@ void ParseRawQuery( Finish: _rewrittenSql.Append(sql, currTokenBeg, end - currTokenBeg); - batchCommand.FinalCommandText = _rewrittenSql.ToString(); + if (statementIndex is 0 && _paramIndexMap.Count is 0) + // Single statement, no parameters, no rewriting necessary + batchCommand.FinalCommandText = sql; + else + batchCommand.FinalCommandText = _rewrittenSql.ToString(); if (batchCommands is not null && batchCommands.Count > statementIndex + 1) batchCommands.RemoveRange(statementIndex + 1, batchCommands.Count - (statementIndex + 1)); diff --git a/src/Npgsql/ThrowHelper.cs b/src/Npgsql/ThrowHelper.cs index d6666bd130..4012511787 100644 --- a/src/Npgsql/ThrowHelper.cs +++ b/src/Npgsql/ThrowHelper.cs @@ -113,7 +113,7 @@ internal static void ThrowIndexOutOfRangeException(string message) => throw new IndexOutOfRangeException(message); [DoesNotReturn] - internal static void ThrowNotSupportedException(string message) + internal static void ThrowNotSupportedException(string? message = null) => throw new NotSupportedException(message); [DoesNotReturn] diff --git a/src/Npgsql/Util/ResettableCancellationTokenSource.cs b/src/Npgsql/Util/ResettableCancellationTokenSource.cs index 0912ceb7b9..874d7a40f8 100644 --- a/src/Npgsql/Util/ResettableCancellationTokenSource.cs +++ b/src/Npgsql/Util/ResettableCancellationTokenSource.cs @@ -20,7 +20,7 @@ sealed class ResettableCancellationTokenSource : IDisposable public TimeSpan Timeout { get; set; } CancellationTokenSource _cts = new(); - CancellationTokenRegistration _registration; + CancellationTokenRegistration? _registration; /// /// Used, so we wouldn't concurently use the cts for the cancellation, while it's being disposed @@ -100,7 +100,8 @@ public void RestartTimeoutWithoutReset() /// The from the wrapped public CancellationToken Reset() { - _registration.Dispose(); + _registration?.Dispose(); + _registration = null; lock (lockObject) { // if there was an attempt to cancel while the connector was breaking @@ -155,7 +156,8 @@ public void ResetCts() /// public void Stop() { - _registration.Dispose(); + _registration?.Dispose(); + _registration = null; lock (lockObject) { // if there was an attempt to cancel while the connector was breaking @@ -219,7 +221,7 @@ public void Dispose() lock (lockObject) { - _registration.Dispose(); + _registration?.Dispose(); _cts.Dispose(); isDisposed = true; diff --git a/src/Npgsql/Util/Statics.cs b/src/Npgsql/Util/Statics.cs index b84cea4afd..982ab00c17 100644 --- a/src/Npgsql/Util/Statics.cs +++ b/src/Npgsql/Util/Statics.cs @@ -1,6 +1,7 @@ using Npgsql.Internal; using System; using System.Collections.Generic; +using System.Diagnostics; using System.Diagnostics.CodeAnalysis; using System.Runtime.CompilerServices; @@ -45,6 +46,7 @@ static void ThrowIfMsgWrongType(IBackendMessage msg, NpgsqlConnector connecto => throw connector.Break( new NpgsqlException($"Received backend message {msg.Code} while expecting {typeof(T).Name}. Please file a bug.")); + [Conditional("DEBUG")] internal static void ValidateBackendMessageCode(BackendMessageCode code) { switch (code) diff --git a/test/Npgsql.Tests/PrepareTests.cs b/test/Npgsql.Tests/PrepareTests.cs index f980069385..c0814daa3f 100644 --- a/test/Npgsql.Tests/PrepareTests.cs +++ b/test/Npgsql.Tests/PrepareTests.cs @@ -257,7 +257,7 @@ public void Reuse_prepared_statement() { using var dataSource = CreateDataSource(); using var conn1 = dataSource.OpenConnection(); - var preparedStatement = ""; + var preparedStatement = Array.Empty(); using (var cmd1 = new NpgsqlCommand("SELECT @p", conn1)) { cmd1.Parameters.AddWithValue("p", 8); diff --git a/test/Npgsql.Tests/Support/PgServerMock.cs b/test/Npgsql.Tests/Support/PgServerMock.cs index 0135059d0d..04ae5fc948 100644 --- a/test/Npgsql.Tests/Support/PgServerMock.cs +++ b/test/Npgsql.Tests/Support/PgServerMock.cs @@ -152,7 +152,7 @@ internal async Task ExpectSimpleQuery(string expectedSql) Assert.That(actualSql, Is.EqualTo(expectedSql)); } - internal Task WaitForData() => _readBuffer.EnsureAsync(1); + internal Task WaitForData() => _readBuffer.EnsureAsync(1).AsTask(); internal Task FlushAsync() { @@ -213,7 +213,7 @@ internal PgServerMock WriteRowDescription(params FieldDescription[] fields) _writeBuffer.WriteByte((byte)BackendMessageCode.RowDescription); _writeBuffer.WriteInt32(4 + 2 + fields.Sum(f => Encoding.GetByteCount(f.Name) + 1 + 18)); - _writeBuffer.WriteInt16(fields.Length); + _writeBuffer.WriteInt16((short)fields.Length); foreach (var field in fields) { @@ -251,7 +251,7 @@ internal PgServerMock WriteDataRow(params byte[][] columnValues) _writeBuffer.WriteByte((byte)BackendMessageCode.DataRow); _writeBuffer.WriteInt32(4 + 2 + columnValues.Sum(v => 4 + v.Length)); - _writeBuffer.WriteInt16(columnValues.Length); + _writeBuffer.WriteInt16((short)columnValues.Length); foreach (var field in columnValues) { @@ -271,7 +271,7 @@ internal async Task WriteDataRowWithFlush(params byte[][] columnValues) _writeBuffer.WriteByte((byte)BackendMessageCode.DataRow); _writeBuffer.WriteInt32(4 + 2 + columnValues.Sum(v => 4 + v.Length)); - _writeBuffer.WriteInt16(columnValues.Length); + _writeBuffer.WriteInt16((short)columnValues.Length); foreach (var field in columnValues) { From cd59e214b4d90578b96a72528176db7a95a6fbaf Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Thu, 2 Nov 2023 19:30:51 +0100 Subject: [PATCH 275/761] Lazy mapping setup (#5360) Contributes to aspnet/Benchmarks#1922 --- .../Internal/GeoJSONTypeInfoResolver.cs | 38 ++++++--- .../NetTopologySuiteTypeInfoResolver.cs | 37 ++++++--- .../NpgsqlNetTopologySuiteExtensions.cs | 3 + .../Internal/NodaTimeTypeInfoResolver.cs | 30 ++++--- .../NpgsqlNodaTimeExtensions.cs | 3 + .../BackendMessages/RowDescriptionMessage.cs | 5 +- src/Npgsql/Internal/PgTypeInfo.cs | 4 +- .../Internal/Resolvers/AdoTypeInfoResolver.cs | 30 +++---- .../Resolvers/ExtraConversionsResolver.cs | 22 +++--- .../FullTextSearchTypeInfoResolver.cs | 30 ++++--- .../Resolvers/GeometricTypeInfoResolver.cs | 30 ++++--- .../Resolvers/JsonDynamicTypeInfoResolver.cs | 57 +++++++++----- .../Resolvers/JsonTypeInfoResolvers.cs | 27 ++++--- .../Resolvers/LTreeTypeInfoResolver.cs | 30 ++++--- .../Resolvers/NetworkTypeInfoResolver.cs | 30 ++++--- .../Resolvers/RangeTypeInfoResolver.cs | 49 +++++------- .../Resolvers/RecordTypeInfoResolvers.cs | 51 ++++++------ .../Resolvers/UnsupportedTypeInfoResolver.cs | 1 - src/Npgsql/Internal/TypeInfoMapping.cs | 78 ++++++++++++------- src/Npgsql/NpgsqlDataSourceBuilder.cs | 8 ++ src/Npgsql/NpgsqlNestedDataReader.cs | 10 ++- src/Npgsql/NpgsqlParameter`.cs | 2 +- src/Npgsql/NpgsqlSlimDataSourceBuilder.cs | 5 ++ test/Npgsql.Tests/Types/LTreeTests.cs | 11 +++ test/Npgsql.Tests/Types/MiscTypeTests.cs | 4 +- 25 files changed, 367 insertions(+), 228 deletions(-) diff --git a/src/Npgsql.GeoJSON/Internal/GeoJSONTypeInfoResolver.cs b/src/Npgsql.GeoJSON/Internal/GeoJSONTypeInfoResolver.cs index 5ea3b8c9e3..78dec45cbd 100644 --- a/src/Npgsql.GeoJSON/Internal/GeoJSONTypeInfoResolver.cs +++ b/src/Npgsql.GeoJSON/Internal/GeoJSONTypeInfoResolver.cs @@ -6,22 +6,26 @@ namespace Npgsql.GeoJSON.Internal; -sealed class GeoJSONTypeInfoResolver : IPgTypeInfoResolver +class GeoJSONTypeInfoResolver : IPgTypeInfoResolver { - TypeInfoMappingCollection Mappings { get; } + readonly GeoJSONOptions _options; + readonly bool _geographyAsDefault; + readonly CrsMap? _crsMap; - internal GeoJSONTypeInfoResolver(GeoJSONOptions options, bool geographyAsDefault, CrsMap? crsMap = null) + TypeInfoMappingCollection? _mappings; + protected TypeInfoMappingCollection Mappings => _mappings ??= AddInfos(new(), _options, _geographyAsDefault, _crsMap); + + public GeoJSONTypeInfoResolver(GeoJSONOptions options, bool geographyAsDefault, CrsMap? crsMap = null) { - Mappings = new TypeInfoMappingCollection(); - AddInfos(Mappings, options, geographyAsDefault, crsMap); - // TODO opt-in arrays - AddArrayInfos(Mappings); + _options = options; + _geographyAsDefault = geographyAsDefault; + _crsMap = crsMap; } public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) => Mappings.Find(type, dataTypeName, options); - static void AddInfos(TypeInfoMappingCollection mappings, GeoJSONOptions geoJsonOptions, bool geographyAsDefault, CrsMap? crsMap) + static TypeInfoMappingCollection AddInfos(TypeInfoMappingCollection mappings, GeoJSONOptions geoJsonOptions, bool geographyAsDefault, CrsMap? crsMap) { crsMap ??= new CrsMap(CrsMap.WellKnown); @@ -57,9 +61,11 @@ static void AddInfos(TypeInfoMappingCollection mappings, GeoJSONOptions geoJsonO (options, mapping, _) => mapping.CreateInfo(options, new GeoJSONConverter(geoJsonOptions, crsMap)), matchRequirement); } + + return mappings; } - static void AddArrayInfos(TypeInfoMappingCollection mappings) + protected static TypeInfoMappingCollection AddArrayInfos(TypeInfoMappingCollection mappings) { foreach (var dataTypeName in new[] { "geometry", "geography" }) { @@ -72,5 +78,19 @@ static void AddArrayInfos(TypeInfoMappingCollection mappings) mappings.AddArrayType(dataTypeName); mappings.AddArrayType(dataTypeName); } + + return mappings; } } + +sealed class GeoJSONArrayTypeInfoResolver : GeoJSONTypeInfoResolver, IPgTypeInfoResolver +{ + TypeInfoMappingCollection? _mappings; + new TypeInfoMappingCollection Mappings => _mappings ??= AddArrayInfos(new(base.Mappings)); + + public GeoJSONArrayTypeInfoResolver(GeoJSONOptions options, bool geographyAsDefault, CrsMap? crsMap = null) + : base(options, geographyAsDefault, crsMap) { } + + public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); +} diff --git a/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeInfoResolver.cs b/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeInfoResolver.cs index a934bd2f86..d5a42172a9 100644 --- a/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeInfoResolver.cs +++ b/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeInfoResolver.cs @@ -7,9 +7,13 @@ namespace Npgsql.NetTopologySuite.Internal; -sealed class NetTopologySuiteTypeInfoResolver : IPgTypeInfoResolver +class NetTopologySuiteTypeInfoResolver : IPgTypeInfoResolver { - TypeInfoMappingCollection Mappings { get; } + readonly PostGisReader _gisReader; + readonly bool _geographyAsDefault; + + TypeInfoMappingCollection? _mappings; + protected TypeInfoMappingCollection Mappings => _mappings ??= AddInfos(new(), _gisReader, new(), _geographyAsDefault); public NetTopologySuiteTypeInfoResolver( CoordinateSequenceFactory? coordinateSequenceFactory, @@ -21,19 +25,14 @@ public NetTopologySuiteTypeInfoResolver( precisionModel ??= NtsGeometryServices.Instance.DefaultPrecisionModel; handleOrdinates = handleOrdinates == Ordinates.None ? coordinateSequenceFactory.Ordinates : handleOrdinates; - var reader = new PostGisReader(coordinateSequenceFactory, precisionModel, handleOrdinates); - var writer = new PostGisWriter(); - - Mappings = new TypeInfoMappingCollection(); - AddInfos(Mappings, reader, writer, geographyAsDefault); - // TODO: Opt-in only - AddArrayInfos(Mappings); + _geographyAsDefault = geographyAsDefault; + _gisReader = new PostGisReader(coordinateSequenceFactory, precisionModel, handleOrdinates); } public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) => Mappings.Find(type, dataTypeName, options); - static void AddInfos(TypeInfoMappingCollection mappings, PostGisReader reader, PostGisWriter writer, bool geographyAsDefault) + static TypeInfoMappingCollection AddInfos(TypeInfoMappingCollection mappings, PostGisReader reader, PostGisWriter writer, bool geographyAsDefault) { // geometry mappings.AddType("geometry", @@ -88,9 +87,11 @@ static void AddInfos(TypeInfoMappingCollection mappings, PostGisReader reader, P mappings.AddType("geography", (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer)), matchRequirement: geographyAsDefault ? MatchRequirement.All : MatchRequirement.DataTypeName); + + return mappings; } - static void AddArrayInfos(TypeInfoMappingCollection mappings) + protected static TypeInfoMappingCollection AddArrayInfos(TypeInfoMappingCollection mappings) { // geometry mappings.AddArrayType("geometry"); @@ -111,5 +112,19 @@ static void AddArrayInfos(TypeInfoMappingCollection mappings) mappings.AddArrayType("geography"); mappings.AddArrayType("geography"); mappings.AddArrayType("geography"); + + return mappings; } } + +sealed class NetTopologySuiteArrayTypeInfoResolver : NetTopologySuiteTypeInfoResolver, IPgTypeInfoResolver +{ + TypeInfoMappingCollection? _mappings; + new TypeInfoMappingCollection Mappings => _mappings ??= AddArrayInfos(new(base.Mappings)); + + public NetTopologySuiteArrayTypeInfoResolver(CoordinateSequenceFactory? coordinateSequenceFactory, PrecisionModel? precisionModel, Ordinates handleOrdinates, bool geographyAsDefault) + : base(coordinateSequenceFactory, precisionModel, handleOrdinates, geographyAsDefault) { } + + public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); +} diff --git a/src/Npgsql.NetTopologySuite/NpgsqlNetTopologySuiteExtensions.cs b/src/Npgsql.NetTopologySuite/NpgsqlNetTopologySuiteExtensions.cs index 1408709236..928c5c65f4 100644 --- a/src/Npgsql.NetTopologySuite/NpgsqlNetTopologySuiteExtensions.cs +++ b/src/Npgsql.NetTopologySuite/NpgsqlNetTopologySuiteExtensions.cs @@ -27,6 +27,9 @@ public static INpgsqlTypeMapper UseNetTopologySuite( Ordinates handleOrdinates = Ordinates.None, bool geographyAsDefault = false) { + // TODO opt-in of arrays. + // Reverse order + mapper.AddTypeInfoResolver(new NetTopologySuiteArrayTypeInfoResolver(coordinateSequenceFactory, precisionModel, handleOrdinates, geographyAsDefault)); mapper.AddTypeInfoResolver(new NetTopologySuiteTypeInfoResolver(coordinateSequenceFactory, precisionModel, handleOrdinates, geographyAsDefault)); return mapper; } diff --git a/src/Npgsql.NodaTime/Internal/NodaTimeTypeInfoResolver.cs b/src/Npgsql.NodaTime/Internal/NodaTimeTypeInfoResolver.cs index 66dcfc35dc..344695118e 100644 --- a/src/Npgsql.NodaTime/Internal/NodaTimeTypeInfoResolver.cs +++ b/src/Npgsql.NodaTime/Internal/NodaTimeTypeInfoResolver.cs @@ -9,7 +9,7 @@ namespace Npgsql.NodaTime.Internal; -sealed class NodaTimeTypeInfoResolver : IPgTypeInfoResolver +class NodaTimeTypeInfoResolver : IPgTypeInfoResolver { static DataTypeName TimestampTzDataTypeName => new("pg_catalog.timestamptz"); static DataTypeName TimestampDataTypeName => new("pg_catalog.timestamp"); @@ -25,20 +25,13 @@ sealed class NodaTimeTypeInfoResolver : IPgTypeInfoResolver static DataTypeName TimestampRangeDataTypeName => new("pg_catalog.tsrange"); static DataTypeName TimestampMultirangeDataTypeName => new("pg_catalog.tsmultirange"); - TypeInfoMappingCollection Mappings { get; } - - public NodaTimeTypeInfoResolver() - { - Mappings = new TypeInfoMappingCollection(); - AddInfos(Mappings); - // TODO: Opt-in only - AddArrayInfos(Mappings); - } + TypeInfoMappingCollection? _mappings; + protected TypeInfoMappingCollection Mappings => _mappings ??= AddInfos(new()); public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) => Mappings.Find(type, dataTypeName, options); - static void AddInfos(TypeInfoMappingCollection mappings) + static TypeInfoMappingCollection AddInfos(TypeInfoMappingCollection mappings) { // timestamp and timestamptz, legacy and non-legacy modes if (LegacyTimestampBehavior) @@ -190,9 +183,11 @@ static void AddInfos(TypeInfoMappingCollection mappings) mappings.AddType>>(DateMultirangeDataTypeName, static (options, mapping, _) => mapping.CreateInfo(options, CreateListMultirangeConverter(CreateRangeConverter(new LocalDateConverter(options.EnableDateTimeInfinityConversions), options), options))); + + return mappings; } - static void AddArrayInfos(TypeInfoMappingCollection mappings) + protected static TypeInfoMappingCollection AddArrayInfos(TypeInfoMappingCollection mappings) { // timestamptz mappings.AddStructArrayType(TimestampTzDataTypeName); @@ -261,5 +256,16 @@ static void AddArrayInfos(TypeInfoMappingCollection mappings) mappings.AddArrayType>(DateMultirangeDataTypeName); mappings.AddArrayType[]>(DateMultirangeDataTypeName); mappings.AddArrayType>>(DateMultirangeDataTypeName); + + return mappings; } } + +sealed class NodaTimeArrayTypeInfoResolver : NodaTimeTypeInfoResolver, IPgTypeInfoResolver +{ + TypeInfoMappingCollection? _mappings; + new TypeInfoMappingCollection Mappings => _mappings ??= AddArrayInfos(new(base.Mappings)); + + public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); +} diff --git a/src/Npgsql.NodaTime/NpgsqlNodaTimeExtensions.cs b/src/Npgsql.NodaTime/NpgsqlNodaTimeExtensions.cs index 030f1ec1be..f933b81ca6 100644 --- a/src/Npgsql.NodaTime/NpgsqlNodaTimeExtensions.cs +++ b/src/Npgsql.NodaTime/NpgsqlNodaTimeExtensions.cs @@ -15,6 +15,9 @@ public static class NpgsqlNodaTimeExtensions /// The type mapper to set up (global or connection-specific) public static INpgsqlTypeMapper UseNodaTime(this INpgsqlTypeMapper mapper) { + // TODO opt-in of arrays. + // Reverse order + mapper.AddTypeInfoResolver(new NodaTimeArrayTypeInfoResolver()); mapper.AddTypeInfoResolver(new NodaTimeTypeInfoResolver()); return mapper; } diff --git a/src/Npgsql/BackendMessages/RowDescriptionMessage.cs b/src/Npgsql/BackendMessages/RowDescriptionMessage.cs index d968b43632..ee2f120090 100644 --- a/src/Npgsql/BackendMessages/RowDescriptionMessage.cs +++ b/src/Npgsql/BackendMessages/RowDescriptionMessage.cs @@ -383,7 +383,10 @@ internal void GetInfo(Type? type, ref ColumnInfo lastColumnInfo) } if (odfInfo.TypeToConvert == type) { - lastColumnInfo = new(odfInfo, DataFormat, odfInfo.IsBoxingConverter); + // As TypeInfoMappingCollection is always adding object mappings for + // default/datatypename mappings, we'll also check Converter.TypeToConvert. + // If we have an exact match we are still able to use e.g. a converter for ints in an unboxed fashion. + lastColumnInfo = new(odfInfo, DataFormat, odfInfo.IsBoxingConverter && odfInfo.Converter.TypeToConvert != type); return; } } diff --git a/src/Npgsql/Internal/PgTypeInfo.cs b/src/Npgsql/Internal/PgTypeInfo.cs index edbc693728..56d7e0bd0f 100644 --- a/src/Npgsql/Internal/PgTypeInfo.cs +++ b/src/Npgsql/Internal/PgTypeInfo.cs @@ -244,7 +244,7 @@ DataFormat ResolveFormat(PgConverter converter, out BufferRequirements bufferReq public sealed class PgResolverTypeInfo : PgTypeInfo { - internal readonly PgConverterResolver _converterResolver; + readonly PgConverterResolver _converterResolver; public PgResolverTypeInfo(PgSerializerOptions options, PgConverterResolver converterResolver, PgTypeId? pgTypeId, Type? unboxedType = null) : base(options, @@ -278,6 +278,8 @@ public PgConverterResolution GetResolution(Field field) public PgConverterResolution GetDefaultResolution(PgTypeId? pgTypeId) => _converterResolver.GetDefaultInternal(ValidateResolution, Options.PortableTypeIds, pgTypeId ?? PgTypeId); + + public PgConverterResolver GetConverterResolver() => _converterResolver; } public readonly struct PgConverterResolution diff --git a/src/Npgsql/Internal/Resolvers/AdoTypeInfoResolver.cs b/src/Npgsql/Internal/Resolvers/AdoTypeInfoResolver.cs index 7ae40ef9a4..f7c619c42a 100644 --- a/src/Npgsql/Internal/Resolvers/AdoTypeInfoResolver.cs +++ b/src/Npgsql/Internal/Resolvers/AdoTypeInfoResolver.cs @@ -2,7 +2,6 @@ using System.Collections; using System.Collections.Generic; using System.Collections.Specialized; -using System.Diagnostics; using System.IO; using Npgsql.Internal.Converters; using Npgsql.Internal.Converters.Internal; @@ -16,15 +15,10 @@ namespace Npgsql.Internal.Resolvers; // Baseline types that are always supported. class AdoTypeInfoResolver : IPgTypeInfoResolver { - public AdoTypeInfoResolver() - { - Mappings = new TypeInfoMappingCollection(); - AddInfos(Mappings); - } - public static AdoTypeInfoResolver Instance { get; } = new(); - protected TypeInfoMappingCollection Mappings { get; } + TypeInfoMappingCollection? _mappings; + protected TypeInfoMappingCollection Mappings => _mappings ??= AddInfos(new()); public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) { @@ -46,7 +40,7 @@ public AdoTypeInfoResolver() return new PgTypeInfo(options, new StringTextConverter(options.TextEncoding), dataTypeName); } - static void AddInfos(TypeInfoMappingCollection mappings) + static TypeInfoMappingCollection AddInfos(TypeInfoMappingCollection mappings) { // Bool mappings.AddStructType(DataTypeNames.Bool, @@ -312,9 +306,11 @@ static void AddInfos(TypeInfoMappingCollection mappings) MatchRequirement.DataTypeName); mappings.AddStructType(DataTypeNames.PgLsn, static (options, mapping, _) => mapping.CreateInfo(options, new UInt64Converter())); + + return mappings; } - protected static void AddArrayInfos(TypeInfoMappingCollection mappings) + protected static TypeInfoMappingCollection AddArrayInfos(TypeInfoMappingCollection mappings) { // Bool mappings.AddStructArrayType(DataTypeNames.Bool); @@ -448,21 +444,15 @@ protected static void AddArrayInfos(TypeInfoMappingCollection mappings) // Int2vector mappings.AddArrayType(DataTypeNames.Int2Vector); + + return mappings; } } sealed class AdoArrayTypeInfoResolver : AdoTypeInfoResolver, IPgTypeInfoResolver { - new TypeInfoMappingCollection Mappings { get; } - - public AdoArrayTypeInfoResolver() - { - Mappings = new TypeInfoMappingCollection(base.Mappings); - var elementTypeCount = Mappings.Items.Count; - AddArrayInfos(Mappings); - // Make sure we have at least one mapping for each element type. - Debug.Assert(Mappings.Items.Count >= elementTypeCount * 2); - } + TypeInfoMappingCollection? _mappings; + new TypeInfoMappingCollection Mappings => _mappings ??= AddArrayInfos(new(base.Mappings)); public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) { diff --git a/src/Npgsql/Internal/Resolvers/ExtraConversionsResolver.cs b/src/Npgsql/Internal/Resolvers/ExtraConversionsResolver.cs index 5f642daf80..b495d987d3 100644 --- a/src/Npgsql/Internal/Resolvers/ExtraConversionsResolver.cs +++ b/src/Npgsql/Internal/Resolvers/ExtraConversionsResolver.cs @@ -8,14 +8,13 @@ namespace Npgsql.Internal.Resolvers; class ExtraConversionsResolver : IPgTypeInfoResolver { - public ExtraConversionsResolver() => AddInfos(Mappings); - - protected TypeInfoMappingCollection Mappings { get; } = new(); + TypeInfoMappingCollection? _mappings; + protected TypeInfoMappingCollection Mappings => _mappings ??= AddInfos(new()); public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) => Mappings.Find(type, dataTypeName, options); - static void AddInfos(TypeInfoMappingCollection mappings) + static TypeInfoMappingCollection AddInfos(TypeInfoMappingCollection mappings) { // Int2 mappings.AddStructType(DataTypeNames.Int2, @@ -139,9 +138,11 @@ static void AddInfos(TypeInfoMappingCollection mappings) // Hstore mappings.AddType>("hstore", static (options, mapping, _) => mapping.CreateInfo(options, new HstoreConverter>(options.TextEncoding, result => result.ToImmutableDictionary()))); + + return mappings; } - protected static void AddArrayInfos(TypeInfoMappingCollection mappings) + protected static TypeInfoMappingCollection AddArrayInfos(TypeInfoMappingCollection mappings) { // Int2 mappings.AddStructArrayType(DataTypeNames.Int2); @@ -217,18 +218,15 @@ protected static void AddArrayInfos(TypeInfoMappingCollection mappings) // Hstore mappings.AddArrayType>("hstore"); + + return mappings; } } sealed class ExtraConversionsArrayTypeInfoResolver : ExtraConversionsResolver, IPgTypeInfoResolver { - public ExtraConversionsArrayTypeInfoResolver() - { - Mappings = new TypeInfoMappingCollection(base.Mappings.Items); - AddArrayInfos(Mappings); - } - - new TypeInfoMappingCollection Mappings { get; } + TypeInfoMappingCollection? _mappings; + new TypeInfoMappingCollection Mappings => _mappings ??= AddArrayInfos(new(base.Mappings)); public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) => Mappings.Find(type, dataTypeName, options); diff --git a/src/Npgsql/Internal/Resolvers/FullTextSearchTypeInfoResolver.cs b/src/Npgsql/Internal/Resolvers/FullTextSearchTypeInfoResolver.cs index f3b3a90d79..75c46f3eae 100644 --- a/src/Npgsql/Internal/Resolvers/FullTextSearchTypeInfoResolver.cs +++ b/src/Npgsql/Internal/Resolvers/FullTextSearchTypeInfoResolver.cs @@ -6,22 +6,15 @@ namespace Npgsql.Internal.Resolvers; -sealed class FullTextSearchTypeInfoResolver : IPgTypeInfoResolver +class FullTextSearchTypeInfoResolver : IPgTypeInfoResolver { - TypeInfoMappingCollection Mappings { get; } - - public FullTextSearchTypeInfoResolver() - { - Mappings = new TypeInfoMappingCollection(); - AddInfos(Mappings); - // TODO: Opt-in only - AddArrayInfos(Mappings); - } + TypeInfoMappingCollection? _mappings; + protected TypeInfoMappingCollection Mappings => _mappings ??= AddInfos(new()); public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) => Mappings.Find(type, dataTypeName, options); - static void AddInfos(TypeInfoMappingCollection mappings) + static TypeInfoMappingCollection AddInfos(TypeInfoMappingCollection mappings) { // tsvector mappings.AddType(DataTypeNames.TsVector, @@ -42,9 +35,11 @@ static void AddInfos(TypeInfoMappingCollection mappings) static (options, mapping, _) => mapping.CreateInfo(options, new TsQueryConverter(options.TextEncoding))); mappings.AddType(DataTypeNames.TsQuery, static (options, mapping, _) => mapping.CreateInfo(options, new TsQueryConverter(options.TextEncoding))); + + return mappings; } - static void AddArrayInfos(TypeInfoMappingCollection mappings) + protected static TypeInfoMappingCollection AddArrayInfos(TypeInfoMappingCollection mappings) { // tsvector mappings.AddArrayType(DataTypeNames.TsVector); @@ -57,6 +52,8 @@ static void AddArrayInfos(TypeInfoMappingCollection mappings) mappings.AddArrayType(DataTypeNames.TsQuery); mappings.AddArrayType(DataTypeNames.TsQuery); mappings.AddArrayType(DataTypeNames.TsQuery); + + return mappings; } public static void CheckUnsupported(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) @@ -79,3 +76,12 @@ public static void CheckUnsupported(Type? type, DataTypeName? dataType string.Format(NpgsqlStrings.FullTextSearchNotEnabled, nameof(NpgsqlSlimDataSourceBuilder.EnableFullTextSearch), typeof(TBuilder).Name)); } } + +sealed class FullTextSearchArrayTypeInfoResolver : FullTextSearchTypeInfoResolver, IPgTypeInfoResolver +{ + TypeInfoMappingCollection? _mappings; + new TypeInfoMappingCollection Mappings => _mappings ??= AddArrayInfos(new(base.Mappings)); + + public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); +} diff --git a/src/Npgsql/Internal/Resolvers/GeometricTypeInfoResolver.cs b/src/Npgsql/Internal/Resolvers/GeometricTypeInfoResolver.cs index 6c24e1dcf9..0a0a9eef34 100644 --- a/src/Npgsql/Internal/Resolvers/GeometricTypeInfoResolver.cs +++ b/src/Npgsql/Internal/Resolvers/GeometricTypeInfoResolver.cs @@ -5,22 +5,15 @@ namespace Npgsql.Internal.Resolvers; -sealed class GeometricTypeInfoResolver : IPgTypeInfoResolver +class GeometricTypeInfoResolver : IPgTypeInfoResolver { - TypeInfoMappingCollection Mappings { get; } - - public GeometricTypeInfoResolver() - { - Mappings = new TypeInfoMappingCollection(); - AddInfos(Mappings); - // TODO: Opt-in only - AddArrayInfos(Mappings); - } + TypeInfoMappingCollection? _mappings; + protected TypeInfoMappingCollection Mappings => _mappings ??= AddInfos(new()); public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) => Mappings.Find(type, dataTypeName, options); - static void AddInfos(TypeInfoMappingCollection mappings) + static TypeInfoMappingCollection AddInfos(TypeInfoMappingCollection mappings) { mappings.AddStructType(DataTypeNames.Point, static (options, mapping, _) => mapping.CreateInfo(options, new PointConverter()), isDefault: true); @@ -36,9 +29,11 @@ static void AddInfos(TypeInfoMappingCollection mappings) static (options, mapping, _) => mapping.CreateInfo(options, new PathConverter()), isDefault: true); mappings.AddStructType(DataTypeNames.Circle, static (options, mapping, _) => mapping.CreateInfo(options, new CircleConverter()), isDefault: true); + + return mappings; } - static void AddArrayInfos(TypeInfoMappingCollection mappings) + protected static TypeInfoMappingCollection AddArrayInfos(TypeInfoMappingCollection mappings) { mappings.AddStructArrayType(DataTypeNames.Point); mappings.AddStructArrayType(DataTypeNames.Box); @@ -47,5 +42,16 @@ static void AddArrayInfos(TypeInfoMappingCollection mappings) mappings.AddStructArrayType(DataTypeNames.LSeg); mappings.AddStructArrayType(DataTypeNames.Path); mappings.AddStructArrayType(DataTypeNames.Circle); + + return mappings; } } + +sealed class GeometricArrayTypeInfoResolver : GeometricTypeInfoResolver, IPgTypeInfoResolver +{ + TypeInfoMappingCollection? _mappings; + new TypeInfoMappingCollection Mappings => _mappings ??= AddArrayInfos(new(base.Mappings)); + + public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); +} diff --git a/src/Npgsql/Internal/Resolvers/JsonDynamicTypeInfoResolver.cs b/src/Npgsql/Internal/Resolvers/JsonDynamicTypeInfoResolver.cs index 6084fb1bef..3e610275fa 100644 --- a/src/Npgsql/Internal/Resolvers/JsonDynamicTypeInfoResolver.cs +++ b/src/Npgsql/Internal/Resolvers/JsonDynamicTypeInfoResolver.cs @@ -13,21 +13,41 @@ namespace Npgsql.Internal.Resolvers; [RequiresDynamicCode("Serializing arbitrary types to json can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] class JsonDynamicTypeInfoResolver : DynamicTypeInfoResolver, IPgTypeInfoResolver { - protected TypeInfoMappingCollection Mappings { get; } = new(); - protected JsonSerializerOptions _serializerOptions; - - public JsonDynamicTypeInfoResolver(Type[]? jsonbClrTypes = null, Type[]? jsonClrTypes = null, JsonSerializerOptions? serializerOptions = null) - { + JsonSerializerOptions? _serializerOptions; + JsonSerializerOptions SerializerOptions #if NET7_0_OR_GREATER - _serializerOptions = serializerOptions ??= JsonSerializerOptions.Default; + => _serializerOptions ??= JsonSerializerOptions.Default; #else - _serializerOptions = serializerOptions ??= new JsonSerializerOptions(); + => _serializerOptions ??= new(); #endif - AddMappings(Mappings, jsonbClrTypes ?? Array.Empty(), jsonClrTypes ?? Array.Empty(), serializerOptions); + Type[] _jsonbClrTypes; + Type[] _jsonClrTypes; + TypeInfoMappingCollection? _mappings; + + protected TypeInfoMappingCollection Mappings + { + get + { + if (_mappings is not null) + return _mappings; + + // Publish _mappings before nulling _jsonbClrTypes and _jsonClrTypes as we may be called concurrently. + _mappings = AddInfos(new(), _jsonbClrTypes, _jsonClrTypes, SerializerOptions); + _jsonbClrTypes = null!; + _jsonClrTypes = null!; + return _mappings; + } } - void AddMappings(TypeInfoMappingCollection mappings, Type[] jsonbClrTypes, Type[] jsonClrTypes, JsonSerializerOptions serializerOptions) + public JsonDynamicTypeInfoResolver(Type[]? jsonbClrTypes = null, Type[]? jsonClrTypes = null, JsonSerializerOptions? serializerOptions = null) + { + _jsonbClrTypes = jsonbClrTypes ?? Array.Empty(); + _jsonClrTypes = jsonClrTypes ?? Array.Empty(); + _serializerOptions = serializerOptions; + } + + TypeInfoMappingCollection AddInfos(TypeInfoMappingCollection mappings, Type[] jsonbClrTypes, Type[] jsonClrTypes, JsonSerializerOptions serializerOptions) { // We do GetTypeInfo calls directly so we need a resolver. serializerOptions.TypeInfoResolver ??= new DefaultJsonTypeInfoResolver(); @@ -70,12 +90,14 @@ void AddUserMappings(bool jsonb, Type[] clrTypes) } mappings.AddRange(dynamicMappings.ToTypeInfoMappingCollection()); } + + return mappings; } - protected void AddArrayInfos(TypeInfoMappingCollection mappings, TypeInfoMappingCollection baseMappings) + protected TypeInfoMappingCollection AddArrayInfos(TypeInfoMappingCollection mappings, TypeInfoMappingCollection baseMappings) { if (baseMappings.Items.Count == 0) - return; + return mappings; foreach (var dataTypeName in new[] { DataTypeNames.Jsonb, DataTypeNames.Json }) { @@ -89,6 +111,8 @@ protected void AddArrayInfos(TypeInfoMappingCollection mappings, TypeInfoMapping foreach (var mapping in baseMappings.Items) dynamicMappings.AddArrayMapping(mapping.Type, mapping.DataTypeName); mappings.AddRange(dynamicMappings.ToTypeInfoMappingCollection()); + + return mappings; } public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) @@ -110,7 +134,7 @@ protected void AddArrayInfos(TypeInfoMappingCollection mappings, TypeInfoMapping var baseType = jsonb ? mapping.Type : typeof(object); return mapping.CreateInfo(options, - CreateSystemTextJsonConverter(mapping.Type, jsonb, options.TextEncoding, _serializerOptions, baseType)); + CreateSystemTextJsonConverter(mapping.Type, jsonb, options.TextEncoding, SerializerOptions, baseType)); }); } @@ -126,14 +150,11 @@ static PgConverter CreateSystemTextJsonConverter(Type valueType, bool jsonb, Enc [RequiresDynamicCode("Serializing arbitrary types to json can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] sealed class JsonDynamicArrayTypeInfoResolver : JsonDynamicTypeInfoResolver, IPgTypeInfoResolver { - new TypeInfoMappingCollection Mappings { get; } + TypeInfoMappingCollection? _mappings; + new TypeInfoMappingCollection Mappings => _mappings ??= AddArrayInfos(new(base.Mappings), base.Mappings); public JsonDynamicArrayTypeInfoResolver(Type[]? jsonbClrTypes = null, Type[]? jsonClrTypes = null, JsonSerializerOptions? serializerOptions = null) - : base(jsonbClrTypes, jsonClrTypes, serializerOptions) - { - Mappings = new TypeInfoMappingCollection(base.Mappings); - AddArrayInfos(Mappings, base.Mappings); - } + : base(jsonbClrTypes, jsonClrTypes, serializerOptions) { } public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) => Mappings.Find(type, dataTypeName, options) ?? base.GetTypeInfo(type, dataTypeName, options); diff --git a/src/Npgsql/Internal/Resolvers/JsonTypeInfoResolvers.cs b/src/Npgsql/Internal/Resolvers/JsonTypeInfoResolvers.cs index 7ba40e4921..d589f364ab 100644 --- a/src/Npgsql/Internal/Resolvers/JsonTypeInfoResolvers.cs +++ b/src/Npgsql/Internal/Resolvers/JsonTypeInfoResolvers.cs @@ -10,12 +10,11 @@ class JsonTypeInfoResolver : IPgTypeInfoResolver { static JsonSerializerOptions? DefaultSerializerOptions; - protected TypeInfoMappingCollection Mappings { get; } = new(); + readonly JsonSerializerOptions _serializerOptions; + TypeInfoMappingCollection? _mappings; + protected TypeInfoMappingCollection Mappings => _mappings ??= AddInfos(new(), _serializerOptions); public JsonTypeInfoResolver(JsonSerializerOptions? serializerOptions = null) - => AddTypeInfos(Mappings, serializerOptions); - - static void AddTypeInfos(TypeInfoMappingCollection mappings, JsonSerializerOptions? serializerOptions = null) { if (serializerOptions is null) { @@ -27,7 +26,11 @@ static void AddTypeInfos(TypeInfoMappingCollection mappings, JsonSerializerOptio DefaultSerializerOptions = serializerOptions; } } + _serializerOptions = serializerOptions; + } + static TypeInfoMappingCollection AddInfos(TypeInfoMappingCollection mappings, JsonSerializerOptions serializerOptions) + { // Jsonb is the first default for JsonDocument foreach (var dataTypeName in new[] { DataTypeNames.Jsonb, DataTypeNames.Json }) { @@ -38,15 +41,19 @@ static void AddTypeInfos(TypeInfoMappingCollection mappings, JsonSerializerOptio mappings.AddStructType(dataTypeName, (options, mapping, _) => mapping.CreateInfo(options, new JsonConverter(jsonb, options.TextEncoding, serializerOptions))); } + + return mappings; } - protected static void AddArrayInfos(TypeInfoMappingCollection mappings) + protected static TypeInfoMappingCollection AddArrayInfos(TypeInfoMappingCollection mappings) { foreach (var dataTypeName in new[] { DataTypeNames.Jsonb, DataTypeNames.Json }) { mappings.AddArrayType(dataTypeName); mappings.AddStructArrayType(dataTypeName); } + + return mappings; } public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) @@ -67,13 +74,11 @@ sealed class BasicJsonTypeInfoResolver : IJsonTypeInfoResolver sealed class JsonArrayTypeInfoResolver : JsonTypeInfoResolver, IPgTypeInfoResolver { - new TypeInfoMappingCollection Mappings { get; } + TypeInfoMappingCollection? _mappings; + new TypeInfoMappingCollection Mappings => _mappings ??= AddArrayInfos(new(base.Mappings)); - public JsonArrayTypeInfoResolver(JsonSerializerOptions? serializerOptions = null) : base(serializerOptions) - { - Mappings = new TypeInfoMappingCollection(base.Mappings); - AddArrayInfos(Mappings); - } + public JsonArrayTypeInfoResolver(JsonSerializerOptions? serializerOptions = null) + : base(serializerOptions) { } public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) => Mappings.Find(type, dataTypeName, options); diff --git a/src/Npgsql/Internal/Resolvers/LTreeTypeInfoResolver.cs b/src/Npgsql/Internal/Resolvers/LTreeTypeInfoResolver.cs index 129f73eecd..79767f21e1 100644 --- a/src/Npgsql/Internal/Resolvers/LTreeTypeInfoResolver.cs +++ b/src/Npgsql/Internal/Resolvers/LTreeTypeInfoResolver.cs @@ -5,23 +5,16 @@ namespace Npgsql.Internal.Resolvers; -sealed class LTreeTypeInfoResolver : IPgTypeInfoResolver +class LTreeTypeInfoResolver : IPgTypeInfoResolver { const byte LTreeVersion = 1; - TypeInfoMappingCollection Mappings { get; } - - public LTreeTypeInfoResolver() - { - Mappings = new TypeInfoMappingCollection(); - AddInfos(Mappings); - // TODO: Opt-in only - AddArrayInfos(Mappings); - } + TypeInfoMappingCollection? _mappings; + protected TypeInfoMappingCollection Mappings => _mappings ??= AddInfos(new()); public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) => Mappings.Find(type, dataTypeName, options); - static void AddInfos(TypeInfoMappingCollection mappings) + static TypeInfoMappingCollection AddInfos(TypeInfoMappingCollection mappings) { mappings.AddType("ltree", static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter(LTreeVersion, new StringTextConverter(options.TextEncoding))), @@ -32,13 +25,17 @@ static void AddInfos(TypeInfoMappingCollection mappings) mappings.AddType("ltxtquery", static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter(LTreeVersion, new StringTextConverter(options.TextEncoding))), MatchRequirement.DataTypeName); + + return mappings; } - static void AddArrayInfos(TypeInfoMappingCollection mappings) + protected static TypeInfoMappingCollection AddArrayInfos(TypeInfoMappingCollection mappings) { mappings.AddArrayType("ltree"); mappings.AddArrayType("lquery"); mappings.AddArrayType("ltxtquery"); + + return mappings; } public static void CheckUnsupported(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) @@ -49,3 +46,12 @@ public static void CheckUnsupported(Type? type, DataTypeName? dataType typeof(TBuilder).Name)); } } + +sealed class LTreeArrayTypeInfoResolver : LTreeTypeInfoResolver, IPgTypeInfoResolver +{ + TypeInfoMappingCollection? _mappings; + new TypeInfoMappingCollection Mappings => _mappings ??= AddArrayInfos(new(base.Mappings)); + + public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); +} diff --git a/src/Npgsql/Internal/Resolvers/NetworkTypeInfoResolver.cs b/src/Npgsql/Internal/Resolvers/NetworkTypeInfoResolver.cs index ecf393d7d8..9b1ed63734 100644 --- a/src/Npgsql/Internal/Resolvers/NetworkTypeInfoResolver.cs +++ b/src/Npgsql/Internal/Resolvers/NetworkTypeInfoResolver.cs @@ -8,22 +8,15 @@ namespace Npgsql.Internal.Resolvers; -sealed class NetworkTypeInfoResolver : IPgTypeInfoResolver +class NetworkTypeInfoResolver : IPgTypeInfoResolver { - TypeInfoMappingCollection Mappings { get; } - - public NetworkTypeInfoResolver() - { - Mappings = new TypeInfoMappingCollection(); - AddInfos(Mappings); - // TODO: Opt-in only - AddArrayInfos(Mappings); - } + TypeInfoMappingCollection? _mappings; + protected TypeInfoMappingCollection Mappings => _mappings ??= AddInfos(new()); public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) => Mappings.Find(type, dataTypeName, options); - static void AddInfos(TypeInfoMappingCollection mappings) + static TypeInfoMappingCollection AddInfos(TypeInfoMappingCollection mappings) { // macaddr mappings.AddType(DataTypeNames.MacAddr, @@ -59,9 +52,11 @@ static PgTypeInfo CreateInfo(PgSerializerOptions options, TypeInfoMapping resolv return resolvedMapping.CreateInfo(options, converter); } + + return mappings; } - static void AddArrayInfos(TypeInfoMappingCollection mappings) + protected static TypeInfoMappingCollection AddArrayInfos(TypeInfoMappingCollection mappings) { // macaddr mappings.AddArrayType(DataTypeNames.MacAddr); @@ -73,5 +68,16 @@ static void AddArrayInfos(TypeInfoMappingCollection mappings) // cidr mappings.AddStructArrayType(DataTypeNames.Cidr); + + return mappings; } } + +sealed class NetworkArrayTypeInfoResolver : NetworkTypeInfoResolver, IPgTypeInfoResolver +{ + TypeInfoMappingCollection? _mappings; + new TypeInfoMappingCollection Mappings => _mappings ??= AddArrayInfos(new(base.Mappings)); + + public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); +} diff --git a/src/Npgsql/Internal/Resolvers/RangeTypeInfoResolver.cs b/src/Npgsql/Internal/Resolvers/RangeTypeInfoResolver.cs index 57fc75e978..f82c26c462 100644 --- a/src/Npgsql/Internal/Resolvers/RangeTypeInfoResolver.cs +++ b/src/Npgsql/Internal/Resolvers/RangeTypeInfoResolver.cs @@ -14,21 +14,15 @@ namespace Npgsql.Internal.Resolvers; // TODO improve the ability to switch on server capability. class RangeTypeInfoResolver : IPgTypeInfoResolver { - protected TypeInfoMappingCollection Mappings { get; } - protected TypeInfoMappingCollection MappingsWithMultiRanges { get; } - - public RangeTypeInfoResolver() - { - Mappings = new TypeInfoMappingCollection(); - AddInfos(Mappings, supportsMultiRange: false); - MappingsWithMultiRanges = new TypeInfoMappingCollection(); - AddInfos(MappingsWithMultiRanges, supportsMultiRange: true); - } + TypeInfoMappingCollection? _mappings; + protected TypeInfoMappingCollection Mappings => _mappings ??= AddInfos(new(), supportsMultirange: false); + TypeInfoMappingCollection? _mappingsWithMultiranges; + protected TypeInfoMappingCollection MappingsWithMultiranges => _mappingsWithMultiranges ??= AddInfos(new(), supportsMultirange: true); public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) - => (options.DatabaseInfo.SupportsMultirangeTypes ? MappingsWithMultiRanges : Mappings).Find(type, dataTypeName, options); + => (options.DatabaseInfo.SupportsMultirangeTypes ? MappingsWithMultiranges : Mappings).Find(type, dataTypeName, options); - static void AddInfos(TypeInfoMappingCollection mappings, bool supportsMultiRange) + static TypeInfoMappingCollection AddInfos(TypeInfoMappingCollection mappings, bool supportsMultirange) { // numeric ranges mappings.AddStructType>(DataTypeNames.Int4Range, @@ -106,7 +100,7 @@ static void AddInfos(TypeInfoMappingCollection mappings, bool supportsMultiRange mapping.CreateInfo(options, CreateRangeConverter(new DateOnlyDateConverter(options.EnableDateTimeInfinityConversions), options))); #endif - if (supportsMultiRange) + if (supportsMultirange) { // int4multirange mappings.AddType[]>(DataTypeNames.Int4Multirange, @@ -150,14 +144,14 @@ static void AddInfos(TypeInfoMappingCollection mappings, bool supportsMultiRange } else { - mappings.AddType[]>(DataTypeNames.TsMultirange, + mappings.AddResolverType[]>(DataTypeNames.TsMultirange, static (options, mapping, dataTypeNameMatch) => mapping.CreateInfo(options, DateTimeConverterResolver.CreateMultirangeResolver[], NpgsqlRange>(options, options.GetCanonicalTypeId(DataTypeNames.TsTzMultirange), options.GetCanonicalTypeId(DataTypeNames.TsMultirange), options.EnableDateTimeInfinityConversions), dataTypeNameMatch), isDefault: true); - mappings.AddType>>(DataTypeNames.TsMultirange, + mappings.AddResolverType>>(DataTypeNames.TsMultirange, static (options, mapping, dataTypeNameMatch) => mapping.CreateInfo(options, DateTimeConverterResolver.CreateMultirangeResolver>, NpgsqlRange>(options, options.GetCanonicalTypeId(DataTypeNames.TsTzMultirange), @@ -195,14 +189,14 @@ static void AddInfos(TypeInfoMappingCollection mappings, bool supportsMultiRange } else { - mappings.AddType[]>(DataTypeNames.TsTzMultirange, + mappings.AddResolverType[]>(DataTypeNames.TsTzMultirange, static (options, mapping, dataTypeNameMatch) => mapping.CreateInfo(options, DateTimeConverterResolver.CreateMultirangeResolver[], NpgsqlRange>(options, options.GetCanonicalTypeId(DataTypeNames.TsTzMultirange), options.GetCanonicalTypeId(DataTypeNames.TsMultirange), options.EnableDateTimeInfinityConversions), dataTypeNameMatch), isDefault: true); - mappings.AddType>>(DataTypeNames.TsTzMultirange, + mappings.AddResolverType>>(DataTypeNames.TsTzMultirange, static (options, mapping, dataTypeNameMatch) => mapping.CreateInfo(options, DateTimeConverterResolver.CreateMultirangeResolver>, NpgsqlRange>(options, options.GetCanonicalTypeId(DataTypeNames.TsTzMultirange), @@ -247,9 +241,11 @@ static void AddInfos(TypeInfoMappingCollection mappings, bool supportsMultiRange CreateRangeConverter(new DateOnlyDateConverter(options.EnableDateTimeInfinityConversions), options), options))); #endif } + + return mappings; } - protected static void AddArrayInfos(TypeInfoMappingCollection mappings, bool supportsMultiRange) + protected static TypeInfoMappingCollection AddArrayInfos(TypeInfoMappingCollection mappings, bool supportsMultiRange) { // numeric ranges mappings.AddStructArrayType>(DataTypeNames.Int4Range); @@ -338,6 +334,8 @@ protected static void AddArrayInfos(TypeInfoMappingCollection mappings, bool sup mappings.AddArrayType>>(DataTypeNames.DateMultirange); #endif } + + return mappings; } public static void ThrowIfUnsupported(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) @@ -421,17 +419,12 @@ public static void ThrowIfUnsupported(Type? type, DataTypeName? dataTy sealed class RangeArrayTypeInfoResolver : RangeTypeInfoResolver, IPgTypeInfoResolver { - new TypeInfoMappingCollection Mappings { get; } - new TypeInfoMappingCollection MappingsWithMultiRanges { get; } + TypeInfoMappingCollection? _mappings; + new TypeInfoMappingCollection Mappings => _mappings ??= AddArrayInfos(new(base.Mappings), supportsMultiRange: false); - public RangeArrayTypeInfoResolver() - { - Mappings = new TypeInfoMappingCollection(base.Mappings); - AddArrayInfos(Mappings, supportsMultiRange: false); - MappingsWithMultiRanges = new TypeInfoMappingCollection(base.MappingsWithMultiRanges); - AddArrayInfos(MappingsWithMultiRanges, supportsMultiRange: true); - } + TypeInfoMappingCollection? _mappingsWithMultiranges; + new TypeInfoMappingCollection MappingsWithMultiranges => _mappingsWithMultiranges ??= AddArrayInfos(new(base.MappingsWithMultiranges), supportsMultiRange: true); public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) - => (options.DatabaseInfo.SupportsMultirangeTypes ? MappingsWithMultiRanges : Mappings).Find(type, dataTypeName, options); + => (options.DatabaseInfo.SupportsMultirangeTypes ? MappingsWithMultiranges : Mappings).Find(type, dataTypeName, options); } diff --git a/src/Npgsql/Internal/Resolvers/RecordTypeInfoResolvers.cs b/src/Npgsql/Internal/Resolvers/RecordTypeInfoResolvers.cs index 4cedd542f1..7b2920e799 100644 --- a/src/Npgsql/Internal/Resolvers/RecordTypeInfoResolvers.cs +++ b/src/Npgsql/Internal/Resolvers/RecordTypeInfoResolvers.cs @@ -4,25 +4,32 @@ using Npgsql.Internal.Converters; using Npgsql.Internal.Postgres; using Npgsql.Properties; -using Npgsql.TypeMapping; namespace Npgsql.Internal.Resolvers; class RecordTypeInfoResolver : IPgTypeInfoResolver { - protected TypeInfoMappingCollection Mappings { get; } = new(); - public RecordTypeInfoResolver() => AddInfos(Mappings); + TypeInfoMappingCollection? _mappings; + protected TypeInfoMappingCollection Mappings => _mappings ??= AddInfos(new()); public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) => Mappings.Find(type, dataTypeName, options); - static void AddInfos(TypeInfoMappingCollection mappings) - => mappings.AddType(DataTypeNames.Record, static (options, mapping, _) => + static TypeInfoMappingCollection AddInfos(TypeInfoMappingCollection mappings) + { + mappings.AddType(DataTypeNames.Record, static (options, mapping, _) => mapping.CreateInfo(options, new RecordConverter(options), supportsWriting: false), MatchRequirement.DataTypeName); - protected static void AddArrayInfos(TypeInfoMappingCollection mappings) - => mappings.AddArrayType(DataTypeNames.Record); + return mappings; + } + + protected static TypeInfoMappingCollection AddArrayInfos(TypeInfoMappingCollection mappings) + { + mappings.AddArrayType(DataTypeNames.Record); + + return mappings; + } public static void CheckUnsupported(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) { @@ -40,13 +47,8 @@ public static void CheckUnsupported(Type? type, DataTypeName? dataType sealed class RecordArrayTypeInfoResolver : RecordTypeInfoResolver, IPgTypeInfoResolver { - public RecordArrayTypeInfoResolver() - { - Mappings = new TypeInfoMappingCollection(base.Mappings.Items); - AddArrayInfos(Mappings); - } - - new TypeInfoMappingCollection Mappings { get; } + TypeInfoMappingCollection? _mappings; + new TypeInfoMappingCollection Mappings => _mappings ??= AddArrayInfos(new(base.Mappings)); public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) => Mappings.Find(type, dataTypeName, options); @@ -56,14 +58,14 @@ public RecordArrayTypeInfoResolver() [RequiresDynamicCode("Tupled records need to construct a generic converter for a statically unknown (value)tuple type.")] class TupledRecordTypeInfoResolver : IPgTypeInfoResolver { - protected TypeInfoMappingCollection Mappings { get; } = new(); - public TupledRecordTypeInfoResolver() => AddInfos(Mappings); + TypeInfoMappingCollection? _mappings; + protected TypeInfoMappingCollection Mappings => _mappings ??= AddInfos(new()); public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) => Mappings.Find(type, dataTypeName, options); // Stand-in type, type match predicate does the actual work. - static void AddInfos(TypeInfoMappingCollection mappings) + static TypeInfoMappingCollection AddInfos(TypeInfoMappingCollection mappings) { mappings.AddType>(DataTypeNames.Record, Factory, mapping => mapping with @@ -80,12 +82,16 @@ static void AddInfos(TypeInfoMappingCollection mappings) TypeMatchPredicate = type => type is { IsConstructedGenericType: true, FullName: not null } && type.FullName.StartsWith("System.ValueTuple", StringComparison.Ordinal) }); + + return mappings; } - protected static void AddArrayInfos(TypeInfoMappingCollection mappings) + protected static TypeInfoMappingCollection AddArrayInfos(TypeInfoMappingCollection mappings) { mappings.AddArrayType>(DataTypeNames.Record, suppressObjectMapping: true); mappings.AddStructArrayType>(DataTypeNames.Record, suppressObjectMapping: true); + + return mappings; } static readonly TypeInfoFactory Factory = static (options, mapping, _) => @@ -129,13 +135,8 @@ static Func CreateFactory(ConstructorInfo constructor, int const [RequiresDynamicCode("Tupled records need to construct a generic converter for a statically unknown (value)tuple type.")] sealed class TupledRecordArrayTypeInfoResolver : TupledRecordTypeInfoResolver, IPgTypeInfoResolver { - public TupledRecordArrayTypeInfoResolver() - { - Mappings = new TypeInfoMappingCollection(base.Mappings.Items); - AddArrayInfos(Mappings); - } - - new TypeInfoMappingCollection Mappings { get; } + TypeInfoMappingCollection? _mappings; + new TypeInfoMappingCollection Mappings => _mappings ??= AddArrayInfos(new(base.Mappings)); public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) => Mappings.Find(type, dataTypeName, options); diff --git a/src/Npgsql/Internal/Resolvers/UnsupportedTypeInfoResolver.cs b/src/Npgsql/Internal/Resolvers/UnsupportedTypeInfoResolver.cs index 3b98d26a2c..a2268f1bd3 100644 --- a/src/Npgsql/Internal/Resolvers/UnsupportedTypeInfoResolver.cs +++ b/src/Npgsql/Internal/Resolvers/UnsupportedTypeInfoResolver.cs @@ -3,7 +3,6 @@ using Npgsql.Internal.Postgres; using Npgsql.PostgresTypes; using Npgsql.Properties; -using Npgsql.TypeMapping; namespace Npgsql.Internal.Resolvers; diff --git a/src/Npgsql/Internal/TypeInfoMapping.cs b/src/Npgsql/Internal/TypeInfoMapping.cs index 24df062efb..8a4d4bc2e6 100644 --- a/src/Npgsql/Internal/TypeInfoMapping.cs +++ b/src/Npgsql/Internal/TypeInfoMapping.cs @@ -1,5 +1,4 @@ using System; -using System.Collections; using System.Collections.Generic; using System.Diagnostics; using System.Diagnostics.CodeAnalysis; @@ -154,7 +153,7 @@ public TypeInfoMappingCollection(IEnumerable items) return fallback?.Factory(options, fallback.Value, dataTypeName is not null); } - bool TryFindMapping(Type type, string dataTypeName, out TypeInfoMapping value) + bool TryGetMapping(Type type, string dataTypeName, out TypeInfoMapping value) { foreach (var mapping in _baseCollection?._items ?? _items) { @@ -171,8 +170,8 @@ bool TryFindMapping(Type type, string dataTypeName, out TypeInfoMapping value) } [MethodImpl(MethodImplOptions.NoInlining)] - TypeInfoMapping FindMapping(Type type, string dataTypeName) - => TryFindMapping(type, dataTypeName, out var info) ? info : throw new InvalidOperationException($"Could not find mapping for {type} <-> {dataTypeName}"); + TypeInfoMapping GetMapping(Type type, string dataTypeName) + => TryGetMapping(type, dataTypeName, out var info) ? info : throw new InvalidOperationException($"Could not find mapping for {type} <-> {dataTypeName}"); // Helper to eliminate generic display class duplication. static TypeInfoFactory CreateComposedFactory(Type mappingType, TypeInfoMapping innerMapping, Func mapper, bool copyPreferredFormat = false, bool supportsWriting = true) @@ -181,7 +180,7 @@ static TypeInfoFactory CreateComposedFactory(Type mappingType, TypeInfoMapping i var innerInfo = innerMapping.Factory(options, innerMapping, dataTypeNameMatch); var converter = mapper(mapping, innerInfo); var preferredFormat = copyPreferredFormat ? innerInfo.PreferredFormat : null; - var writingSupported = supportsWriting && innerInfo.SupportsWriting && mapping.Type != typeof(object); + var writingSupported = supportsWriting && innerInfo.SupportsWriting; var unboxedType = ComputeUnboxedType(defaultType: mappingType, converter.TypeToConvert, mapping.Type); return new PgTypeInfo(options, converter, TypeInfoMappingHelpers.ResolveFullyQualifiedName(options, mapping.DataTypeName), unboxedType) @@ -198,7 +197,7 @@ static TypeInfoFactory CreateComposedFactory(Type mappingType, TypeInfoMapping i var innerInfo = (PgResolverTypeInfo)innerMapping.Factory(options, innerMapping, dataTypeNameMatch); var resolver = mapper(mapping, innerInfo); var preferredFormat = copyPreferredFormat ? innerInfo.PreferredFormat : null; - var writingSupported = supportsWriting && innerInfo.SupportsWriting && mapping.Type != typeof(object); + var writingSupported = supportsWriting && innerInfo.SupportsWriting; var unboxedType = ComputeUnboxedType(defaultType: mappingType, resolver.TypeToConvert, mapping.Type); // We include the data type name if the inner info did so as well. // This way we can rely on its logic around resolvedDataTypeName, including when it ignores that flag. @@ -268,26 +267,41 @@ public void AddType(string dataTypeName, TypeInfoFactory createInfo, MatchReq public void AddType(string dataTypeName, TypeInfoFactory createInfo, Func? configure) where T : class { var mapping = new TypeInfoMapping(typeof(T), dataTypeName, createInfo); - _items.Add(configure?.Invoke(mapping) ?? mapping); + mapping = configure?.Invoke(mapping) ?? mapping; + if (typeof(T) != typeof(object) && mapping.MatchRequirement is MatchRequirement.DataTypeName or MatchRequirement.Single && !TryGetMapping(typeof(object), mapping.DataTypeName, out _)) + _items.Add(new TypeInfoMapping(typeof(object), dataTypeName, + CreateComposedFactory(typeof(T), mapping, static (_, info) => info.GetResolution().Converter, copyPreferredFormat: true)) + { + MatchRequirement = mapping.MatchRequirement + }); + _items.Add(mapping); } - // Aliased to AddType at this time. public void AddResolverType(string dataTypeName, TypeInfoFactory createInfo, bool isDefault = false) where T : class - => AddType(dataTypeName, createInfo, GetDefaultConfigure(isDefault)); + => AddResolverType(dataTypeName, createInfo, GetDefaultConfigure(isDefault)); - // Aliased to AddType at this time. public void AddResolverType(string dataTypeName, TypeInfoFactory createInfo, MatchRequirement matchRequirement) where T : class - => AddType(dataTypeName, createInfo, GetDefaultConfigure(matchRequirement)); + => AddResolverType(dataTypeName, createInfo, GetDefaultConfigure(matchRequirement)); - // Aliased to AddType at this time. public void AddResolverType(string dataTypeName, TypeInfoFactory createInfo, Func? configure) where T : class - => AddType(dataTypeName, createInfo, configure); + { + var mapping = new TypeInfoMapping(typeof(T), dataTypeName, createInfo); + mapping = configure?.Invoke(mapping) ?? mapping; + if (typeof(T) != typeof(object) && mapping.MatchRequirement is MatchRequirement.DataTypeName or MatchRequirement.Single && !TryGetMapping(typeof(object), mapping.DataTypeName, out _)) + _items.Add(new TypeInfoMapping(typeof(object), dataTypeName, + CreateComposedFactory(typeof(T), mapping, static (_, info) => info.GetConverterResolver(), copyPreferredFormat: true)) + { + MatchRequirement = mapping.MatchRequirement + }); + _items.Add(mapping); + } + public void AddArrayType(string elementDataTypeName) where TElement : class - => AddArrayType(FindMapping(typeof(TElement), elementDataTypeName), suppressObjectMapping: false); + => AddArrayType(GetMapping(typeof(TElement), elementDataTypeName), suppressObjectMapping: false); public void AddArrayType(string elementDataTypeName, bool suppressObjectMapping) where TElement : class - => AddArrayType(FindMapping(typeof(TElement), elementDataTypeName), suppressObjectMapping); + => AddArrayType(GetMapping(typeof(TElement), elementDataTypeName), suppressObjectMapping); public void AddArrayType(TypeInfoMapping elementMapping) where TElement : class => AddArrayType(elementMapping, suppressObjectMapping: false); @@ -300,7 +314,7 @@ public void AddArrayType(TypeInfoMapping elementMapping, bool suppress var arrayDataTypeName = GetArrayDataTypeName(elementMapping.DataTypeName); - AddArrayType(elementMapping, typeof(TElement[]), CreateArrayBasedConverter, arrayTypeMatchPredicate, suppressObjectMapping: suppressObjectMapping || TryFindMapping(typeof(object), arrayDataTypeName, out _)); + AddArrayType(elementMapping, typeof(TElement[]), CreateArrayBasedConverter, arrayTypeMatchPredicate, suppressObjectMapping: suppressObjectMapping || TryGetMapping(typeof(object), arrayDataTypeName, out _)); AddArrayType(elementMapping, typeof(List), CreateListBasedConverter, listTypeMatchPredicate, suppressObjectMapping: true); void AddArrayType(TypeInfoMapping elementMapping, Type type, Func converter, Func? typeMatchPredicate = null, bool suppressObjectMapping = false) @@ -324,10 +338,10 @@ void AddArrayType(TypeInfoMapping elementMapping, Type type, Func(string elementDataTypeName) where TElement : class - => AddResolverArrayType(FindMapping(typeof(TElement), elementDataTypeName), suppressObjectMapping: false); + => AddResolverArrayType(GetMapping(typeof(TElement), elementDataTypeName), suppressObjectMapping: false); public void AddResolverArrayType(string elementDataTypeName, bool suppressObjectMapping) where TElement : class - => AddResolverArrayType(FindMapping(typeof(TElement), elementDataTypeName), suppressObjectMapping); + => AddResolverArrayType(GetMapping(typeof(TElement), elementDataTypeName), suppressObjectMapping); public void AddResolverArrayType(TypeInfoMapping elementMapping) where TElement : class => AddResolverArrayType(elementMapping, suppressObjectMapping: false); @@ -340,7 +354,7 @@ public void AddResolverArrayType(TypeInfoMapping elementMapping, bool var arrayDataTypeName = GetArrayDataTypeName(elementMapping.DataTypeName); - AddResolverArrayType(elementMapping, typeof(TElement[]), CreateArrayBasedConverterResolver, arrayTypeMatchPredicate, suppressObjectMapping: suppressObjectMapping || TryFindMapping(typeof(object), arrayDataTypeName, out _)); + AddResolverArrayType(elementMapping, typeof(TElement[]), CreateArrayBasedConverterResolver, arrayTypeMatchPredicate, suppressObjectMapping: suppressObjectMapping || TryGetMapping(typeof(object), arrayDataTypeName, out _)); AddResolverArrayType(elementMapping, typeof(List), CreateListBasedConverterResolver, listTypeMatchPredicate, suppressObjectMapping: true); void AddResolverArrayType(TypeInfoMapping elementMapping, Type type, Func converter, Func? typeMatchPredicate = null, bool suppressObjectMapping = false) @@ -381,6 +395,12 @@ void AddStructType(Type type, Type nullableType, string dataTypeName, TypeInfoFa { var mapping = new TypeInfoMapping(type, dataTypeName, createInfo); mapping = configure?.Invoke(mapping) ?? mapping; + if (type != typeof(object) && mapping.MatchRequirement is MatchRequirement.DataTypeName or MatchRequirement.Single && !TryGetMapping(typeof(object), mapping.DataTypeName, out _)) + _items.Add(new TypeInfoMapping(typeof(object), dataTypeName, + CreateComposedFactory(type, mapping, static (_, info) => info.GetResolution().Converter, copyPreferredFormat: true)) + { + MatchRequirement = mapping.MatchRequirement + }); _items.Add(mapping); _items.Add(new TypeInfoMapping(nullableType, dataTypeName, CreateComposedFactory(nullableType, mapping, nullableConverter, copyPreferredFormat: true)) @@ -395,10 +415,10 @@ void AddStructType(Type type, Type nullableType, string dataTypeName, TypeInfoFa } public void AddStructArrayType(string elementDataTypeName) where TElement : struct - => AddStructArrayType(FindMapping(typeof(TElement), elementDataTypeName), FindMapping(typeof(TElement?), elementDataTypeName), suppressObjectMapping: false); + => AddStructArrayType(GetMapping(typeof(TElement), elementDataTypeName), GetMapping(typeof(TElement?), elementDataTypeName), suppressObjectMapping: false); public void AddStructArrayType(string elementDataTypeName, bool suppressObjectMapping) where TElement : struct - => AddStructArrayType(FindMapping(typeof(TElement), elementDataTypeName), FindMapping(typeof(TElement?), elementDataTypeName), suppressObjectMapping); + => AddStructArrayType(GetMapping(typeof(TElement), elementDataTypeName), GetMapping(typeof(TElement?), elementDataTypeName), suppressObjectMapping); public void AddStructArrayType(TypeInfoMapping elementMapping, TypeInfoMapping nullableElementMapping) where TElement : struct => AddStructArrayType(elementMapping, nullableElementMapping, suppressObjectMapping: false); @@ -416,7 +436,7 @@ public void AddStructArrayType(TypeInfoMapping elementMapping, TypeInf AddStructArrayType(elementMapping, nullableElementMapping, typeof(TElement[]), typeof(TElement?[]), CreateArrayBasedConverter, CreateArrayBasedConverter, - arrayTypeMatchPredicate, nullableArrayTypeMatchPredicate, suppressObjectMapping: suppressObjectMapping || TryFindMapping(typeof(object), arrayDataTypeName, out _)); + arrayTypeMatchPredicate, nullableArrayTypeMatchPredicate, suppressObjectMapping: suppressObjectMapping || TryGetMapping(typeof(object), arrayDataTypeName, out _)); // Don't add the object converter for the list based converter. AddStructArrayType(elementMapping, nullableElementMapping, typeof(List), typeof(List), @@ -491,6 +511,12 @@ void AddResolverStructType(Type type, Type nullableType, string dataTypeName, Ty { var mapping = new TypeInfoMapping(type, dataTypeName, createInfo); mapping = configure?.Invoke(mapping) ?? mapping; + if (type != typeof(object) && mapping.MatchRequirement is MatchRequirement.DataTypeName or MatchRequirement.Single && !TryGetMapping(typeof(object), mapping.DataTypeName, out _)) + _items.Add(new TypeInfoMapping(typeof(object), dataTypeName, + CreateComposedFactory(type, mapping, static (_, info) => info.GetConverterResolver(), copyPreferredFormat: true)) + { + MatchRequirement = mapping.MatchRequirement + }); _items.Add(mapping); _items.Add(new TypeInfoMapping(nullableType, dataTypeName, CreateComposedFactory(nullableType, mapping, nullableConverter, copyPreferredFormat: true)) @@ -505,10 +531,10 @@ void AddResolverStructType(Type type, Type nullableType, string dataTypeName, Ty } public void AddResolverStructArrayType(string elementDataTypeName) where TElement : struct - => AddResolverStructArrayType(FindMapping(typeof(TElement), elementDataTypeName), FindMapping(typeof(TElement?), elementDataTypeName), suppressObjectMapping: false); + => AddResolverStructArrayType(GetMapping(typeof(TElement), elementDataTypeName), GetMapping(typeof(TElement?), elementDataTypeName), suppressObjectMapping: false); public void AddResolverStructArrayType(string elementDataTypeName, bool suppressObjectMapping) where TElement : struct - => AddResolverStructArrayType(FindMapping(typeof(TElement), elementDataTypeName), FindMapping(typeof(TElement?), elementDataTypeName), suppressObjectMapping); + => AddResolverStructArrayType(GetMapping(typeof(TElement), elementDataTypeName), GetMapping(typeof(TElement?), elementDataTypeName), suppressObjectMapping); public void AddResolverStructArrayType(TypeInfoMapping elementMapping, TypeInfoMapping nullableElementMapping) where TElement : struct => AddResolverStructArrayType(elementMapping, nullableElementMapping, suppressObjectMapping: false); @@ -525,7 +551,7 @@ public void AddResolverStructArrayType(TypeInfoMapping elementMapping, AddResolverStructArrayType(elementMapping, nullableElementMapping, typeof(TElement[]), typeof(TElement?[]), CreateArrayBasedConverterResolver, - CreateArrayBasedConverterResolver, suppressObjectMapping: suppressObjectMapping || TryFindMapping(typeof(object), arrayDataTypeName, out _), arrayTypeMatchPredicate, nullableArrayTypeMatchPredicate); + CreateArrayBasedConverterResolver, suppressObjectMapping: suppressObjectMapping || TryGetMapping(typeof(object), arrayDataTypeName, out _), arrayTypeMatchPredicate, nullableArrayTypeMatchPredicate); // Don't add the object converter for the list based converter. AddResolverStructArrayType(elementMapping, nullableElementMapping, typeof(List), typeof(List), @@ -580,7 +606,7 @@ PgTypeInfo CreateComposedPerInstance(PgTypeInfo innerTypeInfo, PgTypeInfo nullab } public void AddPolymorphicResolverArrayType(string elementDataTypeName, Func> elementToArrayConverterFactory) - => AddPolymorphicResolverArrayType(FindMapping(typeof(object), elementDataTypeName), elementToArrayConverterFactory); + => AddPolymorphicResolverArrayType(GetMapping(typeof(object), elementDataTypeName), elementToArrayConverterFactory); public void AddPolymorphicResolverArrayType(TypeInfoMapping elementMapping, Func> elementToArrayConverterFactory) { diff --git a/src/Npgsql/NpgsqlDataSourceBuilder.cs b/src/Npgsql/NpgsqlDataSourceBuilder.cs index 8837a82782..a5ce88cd1d 100644 --- a/src/Npgsql/NpgsqlDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlDataSourceBuilder.cs @@ -66,6 +66,10 @@ internal static void ResetGlobalMappings(bool overwrite) new JsonArrayTypeInfoResolver(), new RangeArrayTypeInfoResolver(), new RecordArrayTypeInfoResolver(), + new FullTextSearchArrayTypeInfoResolver(), + new NetworkArrayTypeInfoResolver(), + new GeometricArrayTypeInfoResolver(), + new LTreeArrayTypeInfoResolver() }, overwrite); static NpgsqlDataSourceBuilder() @@ -86,6 +90,10 @@ void AddDefaultFeatures() AddTypeInfoResolver(UnsupportedTypeInfoResolver); // Reverse order arrays. + AddTypeInfoResolver(new LTreeArrayTypeInfoResolver()); + AddTypeInfoResolver(new GeometricArrayTypeInfoResolver()); + AddTypeInfoResolver(new NetworkArrayTypeInfoResolver()); + AddTypeInfoResolver(new FullTextSearchArrayTypeInfoResolver()); AddTypeInfoResolver(new RecordArrayTypeInfoResolver()); AddTypeInfoResolver(new RangeArrayTypeInfoResolver()); AddTypeInfoResolver(new JsonArrayTypeInfoResolver()); diff --git a/src/Npgsql/NpgsqlNestedDataReader.cs b/src/Npgsql/NpgsqlNestedDataReader.cs index 305e3dd197..1d499585f8 100644 --- a/src/Npgsql/NpgsqlNestedDataReader.cs +++ b/src/Npgsql/NpgsqlNestedDataReader.cs @@ -492,7 +492,10 @@ PgConverterInfo GetOrAddConverterInfo(Type type, ColumnInfo column, int ordinal, { if (column.LastConverterInfo is { IsDefault: false } lastInfo && lastInfo.TypeToConvert == type) { - asObject = lastInfo.IsBoxingConverter; + // As TypeInfoMappingCollection is always adding object mappings for + // default/datatypename mappings, we'll also check Converter.TypeToConvert. + // If we have an exact match we are still able to use e.g. a converter for ints in an unboxed fashion. + asObject = lastInfo.IsBoxingConverter && lastInfo.Converter.TypeToConvert != type; return lastInfo; } @@ -506,7 +509,10 @@ PgConverterInfo GetOrAddConverterInfo(Type type, ColumnInfo column, int ordinal, if (odfInfo.TypeToConvert == type) { - asObject = odfInfo.IsBoxingConverter; + // As TypeInfoMappingCollection is always adding object mappings for + // default/datatypename mappings, we'll also check Converter.TypeToConvert. + // If we have an exact match we are still able to use e.g. a converter for ints in an unboxed fashion. + asObject = odfInfo.IsBoxingConverter && odfInfo.Converter.TypeToConvert != type; return odfInfo; } } diff --git a/src/Npgsql/NpgsqlParameter`.cs b/src/Npgsql/NpgsqlParameter`.cs index 5f6291771c..5c0c34a09b 100644 --- a/src/Npgsql/NpgsqlParameter`.cs +++ b/src/Npgsql/NpgsqlParameter`.cs @@ -89,7 +89,7 @@ private protected override void BindCore(bool allowNullReference = false) // If we're object typed we should support DBNull, call into base BindCore. if (typeof(T) == typeof(object) || TypeInfo!.IsBoxing || _useSubStream) { - base.BindCore(TypeInfo!.IsBoxing || _useSubStream || allowNullReference); + base.BindCore(typeof(T) != typeof(object) && (TypeInfo!.IsBoxing || _useSubStream || allowNullReference)); return; } diff --git a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs index dc4d53060d..f7078b6182 100644 --- a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs @@ -342,6 +342,11 @@ internal void ResetTypeMappings() /// The same builder instance so that multiple calls can be chained. public NpgsqlSlimDataSourceBuilder EnableArrays() { + AddTypeInfoResolver(new LTreeArrayTypeInfoResolver()); + AddTypeInfoResolver(new GeometricArrayTypeInfoResolver()); + AddTypeInfoResolver(new NetworkArrayTypeInfoResolver()); + AddTypeInfoResolver(new FullTextSearchArrayTypeInfoResolver()); + AddTypeInfoResolver(new RecordArrayTypeInfoResolver()); AddTypeInfoResolver(new RangeArrayTypeInfoResolver()); AddTypeInfoResolver(new ExtraConversionsArrayTypeInfoResolver()); AddTypeInfoResolver(new AdoArrayTypeInfoResolver()); diff --git a/test/Npgsql.Tests/Types/LTreeTests.cs b/test/Npgsql.Tests/Types/LTreeTests.cs index 5d104a4c54..f836b49ca0 100644 --- a/test/Npgsql.Tests/Types/LTreeTests.cs +++ b/test/Npgsql.Tests/Types/LTreeTests.cs @@ -42,6 +42,17 @@ public async Task NpgsqlSlimSourceBuilder_EnableLTree() dataSourceBuilder.EnableLTree(); await using var dataSource = dataSourceBuilder.Build(); + await AssertType(dataSource, "Top.Science.Astronomy", "Top.Science.Astronomy", "ltree", NpgsqlDbType.LTree, isDefaultForWriting: false, skipArrayCheck: true); + } + + [Test] + public async Task NpgsqlSlimSourceBuilder_EnableArrays() + { + var dataSourceBuilder = new NpgsqlSlimDataSourceBuilder(ConnectionString); + dataSourceBuilder.EnableLTree(); + dataSourceBuilder.EnableArrays(); + await using var dataSource = dataSourceBuilder.Build(); + await AssertType(dataSource, "Top.Science.Astronomy", "Top.Science.Astronomy", "ltree", NpgsqlDbType.LTree, isDefaultForWriting: false); } diff --git a/test/Npgsql.Tests/Types/MiscTypeTests.cs b/test/Npgsql.Tests/Types/MiscTypeTests.cs index c080fa1f5a..d689a268ef 100644 --- a/test/Npgsql.Tests/Types/MiscTypeTests.cs +++ b/test/Npgsql.Tests/Types/MiscTypeTests.cs @@ -62,14 +62,14 @@ public async Task Null() } // Setting non-generic NpgsqlParameter.Value to null is not allowed, only DBNull.Value - await using (var cmd = new NpgsqlCommand("SELECT @p::TEXT", conn)) + await using (var cmd = new NpgsqlCommand("SELECT @p4::TEXT", conn)) { cmd.Parameters.AddWithValue("p4", NpgsqlDbType.Text, null!); Assert.That(async () => await cmd.ExecuteReaderAsync(), Throws.Exception.TypeOf()); } // Setting generic NpgsqlParameter.Value to null is not allowed, only DBNull.Value - await using (var cmd = new NpgsqlCommand("SELECT @p::TEXT", conn)) + await using (var cmd = new NpgsqlCommand("SELECT @p4::TEXT", conn)) { cmd.Parameters.Add(new NpgsqlParameter("p4", NpgsqlDbType.Text) { Value = null! }); Assert.That(async () => await cmd.ExecuteReaderAsync(), Throws.Exception.TypeOf()); From eb7a8f930d4b84cea9a89f86341891ccfe6357cd Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Thu, 2 Nov 2023 21:46:55 +0100 Subject: [PATCH 276/761] Fix type info cache being constructed with the wrong type (#5370) --- src/Npgsql/Internal/PgSerializerOptions.cs | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/src/Npgsql/Internal/PgSerializerOptions.cs b/src/Npgsql/Internal/PgSerializerOptions.cs index 5ee9077458..6edb92906f 100644 --- a/src/Npgsql/Internal/PgSerializerOptions.cs +++ b/src/Npgsql/Internal/PgSerializerOptions.cs @@ -16,14 +16,13 @@ public sealed class PgSerializerOptions internal static bool IntrospectionCaller { get; set; } readonly Func? _timeZoneProvider; - readonly object _typeInfoCache; + object? _typeInfoCache; internal PgSerializerOptions(NpgsqlDatabaseInfo databaseInfo, Func? timeZoneProvider = null) { _timeZoneProvider = timeZoneProvider; DatabaseInfo = databaseInfo; UnknownPgType = databaseInfo.GetPostgresType("unknown"); - _typeInfoCache = PortableTypeIds ? new TypeInfoCache(this) : new TypeInfoCache(this); } // Represents the 'unknown' type, which can be used for reading and writing arbitrary text values. @@ -62,8 +61,8 @@ internal bool IntrospectionMode // for. PgTypeInfo? GetTypeInfoCore(Type? type, PgTypeId? pgTypeId, bool defaultTypeFallback) => PortableTypeIds - ? Unsafe.As>(_typeInfoCache).GetOrAddInfo(type, pgTypeId?.DataTypeName, defaultTypeFallback) - : Unsafe.As>(_typeInfoCache).GetOrAddInfo(type, pgTypeId?.Oid, defaultTypeFallback); + ? Unsafe.As>(_typeInfoCache ??= new TypeInfoCache(this)).GetOrAddInfo(type, pgTypeId?.DataTypeName, defaultTypeFallback) + : Unsafe.As>(_typeInfoCache ??= new TypeInfoCache(this)).GetOrAddInfo(type, pgTypeId?.Oid, defaultTypeFallback); public PgTypeInfo? GetDefaultTypeInfo(PostgresType pgType) => GetTypeInfoCore(null, ToCanonicalTypeId(pgType), false); From d55b81395835b8624465ae31f573327b9e0284d6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 2 Nov 2023 21:18:49 +0000 Subject: [PATCH 277/761] Bump xunit from 2.6.0 to 2.6.1 (#5372) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index c217111f94..b59b1c527d 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -38,7 +38,7 @@ - + From 591f3300466fc9ceed59ba7eb0d675580a1d5068 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Fri, 3 Nov 2023 01:07:39 +0100 Subject: [PATCH 278/761] Fix observed race in lazy mapping code --- .../Internal/DynamicTypeInfoResolver.cs | 2 +- .../Resolvers/JsonDynamicTypeInfoResolver.cs | 24 +++++-------------- 2 files changed, 7 insertions(+), 19 deletions(-) diff --git a/src/Npgsql/Internal/DynamicTypeInfoResolver.cs b/src/Npgsql/Internal/DynamicTypeInfoResolver.cs index 45748a6164..637c337321 100644 --- a/src/Npgsql/Internal/DynamicTypeInfoResolver.cs +++ b/src/Npgsql/Internal/DynamicTypeInfoResolver.cs @@ -18,7 +18,7 @@ public abstract class DynamicTypeInfoResolver : IPgTypeInfoResolver return context?.Find(type, dataTypeName.GetValueOrDefault(), options); } - protected DynamicMappingCollection CreateCollection(TypeInfoMappingCollection? baseCollection = null) => new(baseCollection); + protected static DynamicMappingCollection CreateCollection(TypeInfoMappingCollection? baseCollection = null) => new(baseCollection); protected static bool IsTypeOrNullableOfType(Type type, Func predicate, out Type matchedType) { diff --git a/src/Npgsql/Internal/Resolvers/JsonDynamicTypeInfoResolver.cs b/src/Npgsql/Internal/Resolvers/JsonDynamicTypeInfoResolver.cs index 3e610275fa..df85c54974 100644 --- a/src/Npgsql/Internal/Resolvers/JsonDynamicTypeInfoResolver.cs +++ b/src/Npgsql/Internal/Resolvers/JsonDynamicTypeInfoResolver.cs @@ -4,6 +4,7 @@ using System.Text.Json; using System.Text.Json.Nodes; using System.Text.Json.Serialization.Metadata; +using System.Threading; using Npgsql.Internal.Converters; using Npgsql.Internal.Postgres; @@ -21,24 +22,11 @@ JsonSerializerOptions SerializerOptions => _serializerOptions ??= new(); #endif - Type[] _jsonbClrTypes; - Type[] _jsonClrTypes; + readonly Type[] _jsonbClrTypes; + readonly Type[] _jsonClrTypes; TypeInfoMappingCollection? _mappings; - protected TypeInfoMappingCollection Mappings - { - get - { - if (_mappings is not null) - return _mappings; - - // Publish _mappings before nulling _jsonbClrTypes and _jsonClrTypes as we may be called concurrently. - _mappings = AddInfos(new(), _jsonbClrTypes, _jsonClrTypes, SerializerOptions); - _jsonbClrTypes = null!; - _jsonClrTypes = null!; - return _mappings; - } - } + protected TypeInfoMappingCollection Mappings => _mappings ??= AddInfos(new(), _jsonbClrTypes, _jsonClrTypes, SerializerOptions); public JsonDynamicTypeInfoResolver(Type[]? jsonbClrTypes = null, Type[]? jsonClrTypes = null, JsonSerializerOptions? serializerOptions = null) { @@ -47,7 +35,7 @@ public JsonDynamicTypeInfoResolver(Type[]? jsonbClrTypes = null, Type[]? jsonClr _serializerOptions = serializerOptions; } - TypeInfoMappingCollection AddInfos(TypeInfoMappingCollection mappings, Type[] jsonbClrTypes, Type[] jsonClrTypes, JsonSerializerOptions serializerOptions) + static TypeInfoMappingCollection AddInfos(TypeInfoMappingCollection mappings, Type[] jsonbClrTypes, Type[] jsonClrTypes, JsonSerializerOptions serializerOptions) { // We do GetTypeInfo calls directly so we need a resolver. serializerOptions.TypeInfoResolver ??= new DefaultJsonTypeInfoResolver(); @@ -94,7 +82,7 @@ void AddUserMappings(bool jsonb, Type[] clrTypes) return mappings; } - protected TypeInfoMappingCollection AddArrayInfos(TypeInfoMappingCollection mappings, TypeInfoMappingCollection baseMappings) + protected static TypeInfoMappingCollection AddArrayInfos(TypeInfoMappingCollection mappings, TypeInfoMappingCollection baseMappings) { if (baseMappings.Items.Count == 0) return mappings; From 0355acf07990e2124fcb9d6a795d4b2fe6dc8330 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Fri, 3 Nov 2023 15:17:37 +0100 Subject: [PATCH 279/761] Write side perf improvements (#5371) Fixes aspnet/Benchmarks#1923 --- src/Npgsql/Internal/AdoSerializerHelpers.cs | 20 ++-- .../NpgsqlConnector.FrontendMessages.cs | 56 ++++----- src/Npgsql/Internal/NpgsqlWriteBuffer.cs | 12 +- src/Npgsql/Internal/PgWriter.cs | 112 ++++++++---------- src/Npgsql/Internal/Postgres/DataTypeName.cs | 2 +- src/Npgsql/Internal/Postgres/PgTypeId.cs | 13 +- src/Npgsql/NpgsqlBinaryImporter.cs | 4 +- src/Npgsql/NpgsqlParameter.cs | 108 +++++++++-------- src/Npgsql/NpgsqlParameter`.cs | 17 ++- src/Npgsql/PreparedStatement.cs | 10 +- 10 files changed, 179 insertions(+), 175 deletions(-) diff --git a/src/Npgsql/Internal/AdoSerializerHelpers.cs b/src/Npgsql/Internal/AdoSerializerHelpers.cs index f9e63e7a40..d0ea19c7a8 100644 --- a/src/Npgsql/Internal/AdoSerializerHelpers.cs +++ b/src/Npgsql/Internal/AdoSerializerHelpers.cs @@ -43,16 +43,20 @@ public static PgTypeInfo GetTypeInfoForWriting(Type? type, PgTypeId? pgTypeId, P { inner = ex; } - return typeInfo ?? ThrowWritingNotSupported(type, - pgTypeString: - pgTypeId is null ? "no NpgsqlDbType or DataTypeName. Try setting one of these values to the expected database type." : - npgsqlDbType is null - ? $"DataTypeName '{options.DatabaseInfo.FindPostgresType(pgTypeId.GetValueOrDefault())?.DisplayName ?? "unknown"}'" - : $"NpgsqlDbType '{npgsqlDbType}'", inner); + return typeInfo ?? ThrowWritingNotSupported(type, options, pgTypeId, npgsqlDbType, inner); // InvalidCastException thrown to align with ADO.NET convention. [DoesNotReturn] - static PgTypeInfo ThrowWritingNotSupported(Type? type, string pgTypeString, Exception? inner = null) - => throw new InvalidCastException($"Writing{(type is null ? "" : $" values of '{type.FullName}'")} is not supported for parameters having {pgTypeString}.", inner); + static PgTypeInfo ThrowWritingNotSupported(Type? type, PgSerializerOptions options, PgTypeId? pgTypeId, NpgsqlDbType? npgsqlDbType, Exception? inner = null) + { + var pgTypeString = pgTypeId is null + ? "no NpgsqlDbType or DataTypeName. Try setting one of these values to the expected database type." + : npgsqlDbType is null + ? $"DataTypeName '{options.DatabaseInfo.FindPostgresType(pgTypeId.GetValueOrDefault())?.DisplayName ?? "unknown"}'" + : $"NpgsqlDbType '{npgsqlDbType}'"; + + throw new InvalidCastException( + $"Writing{(type is null ? "" : $" values of '{type.FullName}'")} is not supported for parameters having {pgTypeString}.", inner); + } } } diff --git a/src/Npgsql/Internal/NpgsqlConnector.FrontendMessages.cs b/src/Npgsql/Internal/NpgsqlConnector.FrontendMessages.cs index b3c21b9542..f3e3173124 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.FrontendMessages.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.FrontendMessages.cs @@ -22,18 +22,18 @@ internal Task WriteDescribe(StatementOrPortal statementOrPortal, byte[] asciiNam if (writeBuffer.WriteSpaceLeft < len) return FlushAndWrite(len, statementOrPortal, asciiName, async, cancellationToken); - Write(len, statementOrPortal, asciiName); + Write(writeBuffer, len, statementOrPortal, asciiName); return Task.CompletedTask; async Task FlushAndWrite(int len, StatementOrPortal statementOrPortal, byte[] name, bool async, CancellationToken cancellationToken) { await Flush(async, cancellationToken).ConfigureAwait(false); - Debug.Assert(len <= writeBuffer.WriteSpaceLeft, $"Message of type {GetType().Name} has length {len} which is bigger than the buffer ({writeBuffer.WriteSpaceLeft})"); - Write(len, statementOrPortal, name); + Debug.Assert(len <= WriteBuffer.WriteSpaceLeft, $"Message of type {GetType().Name} has length {len} which is bigger than the buffer ({WriteBuffer.WriteSpaceLeft})"); + Write(WriteBuffer, len, statementOrPortal, name); } [MethodImpl(MethodImplOptions.AggressiveInlining)] - void Write(int len, StatementOrPortal statementOrPortal, byte[] name) + static void Write(NpgsqlWriteBuffer writeBuffer, int len, StatementOrPortal statementOrPortal, byte[] name) { writeBuffer.WriteByte(FrontendMessageCode.Describe); writeBuffer.WriteInt32(len - 1); @@ -51,18 +51,18 @@ internal Task WriteSync(bool async, CancellationToken cancellationToken = defaul if (writeBuffer.WriteSpaceLeft < len) return FlushAndWrite(async, cancellationToken); - Write(); + Write(writeBuffer); return Task.CompletedTask; async Task FlushAndWrite(bool async, CancellationToken cancellationToken) { await Flush(async, cancellationToken).ConfigureAwait(false); - Debug.Assert(len <= writeBuffer.WriteSpaceLeft, $"Message of type {GetType().Name} has length {len} which is bigger than the buffer ({writeBuffer.WriteSpaceLeft})"); - Write(); + Debug.Assert(len <= WriteBuffer.WriteSpaceLeft, $"Message of type {GetType().Name} has length {len} which is bigger than the buffer ({WriteBuffer.WriteSpaceLeft})"); + Write(WriteBuffer); } [MethodImpl(MethodImplOptions.AggressiveInlining)] - void Write() + static void Write(NpgsqlWriteBuffer writeBuffer) { writeBuffer.WriteByte(FrontendMessageCode.Sync); writeBuffer.WriteInt32(len - 1); @@ -82,18 +82,18 @@ internal Task WriteExecute(int maxRows, bool async, CancellationToken cancellati if (writeBuffer.WriteSpaceLeft < len) return FlushAndWrite(maxRows, async, cancellationToken); - Write(maxRows); + Write(writeBuffer, maxRows); return Task.CompletedTask; async Task FlushAndWrite(int maxRows, bool async, CancellationToken cancellationToken) { await Flush(async, cancellationToken).ConfigureAwait(false); - Debug.Assert(10 <= writeBuffer.WriteSpaceLeft, $"Message of type {GetType().Name} has length 10 which is bigger than the buffer ({writeBuffer.WriteSpaceLeft})"); - Write(maxRows); + Debug.Assert(10 <= WriteBuffer.WriteSpaceLeft, $"Message of type {GetType().Name} has length 10 which is bigger than the buffer ({WriteBuffer.WriteSpaceLeft})"); + Write(WriteBuffer, maxRows); } [MethodImpl(MethodImplOptions.AggressiveInlining)] - void Write(int maxRows) + static void Write(NpgsqlWriteBuffer writeBuffer, int maxRows) { writeBuffer.WriteByte(FrontendMessageCode.Execute); writeBuffer.WriteInt32(len - 1); @@ -226,20 +226,22 @@ internal async Task WriteBind( await Flush(async, cancellationToken).ConfigureAwait(false); writeBuffer.WriteUInt16((ushort)parameters.Count); - - var writer = writeBuffer.GetWriter(DatabaseInfo, async ? FlushMode.NonBlocking : FlushMode.Blocking); - try + if (parameters.Count > 0) { - for (var paramIndex = 0; paramIndex < parameters.Count; paramIndex++) + var writer = writeBuffer.GetWriter(DatabaseInfo, async ? FlushMode.NonBlocking : FlushMode.Blocking); + try { - var param = parameters[paramIndex]; - await param.Write(async, writer, cancellationToken).ConfigureAwait(false); + for (var paramIndex = 0; paramIndex < parameters.Count; paramIndex++) + { + var param = parameters[paramIndex]; + await param.Write(async, writer, cancellationToken).ConfigureAwait(false); + } + } + catch(Exception ex) + { + Break(ex); + throw; } - } - catch(Exception ex) - { - Break(ex); - throw; } if (unknownResultTypeList != null) @@ -270,18 +272,18 @@ internal Task WriteClose(StatementOrPortal type, byte[] asciiName, bool async, C if (writeBuffer.WriteSpaceLeft < len) return FlushAndWrite(len, type, asciiName, async, cancellationToken); - Write(len, type, asciiName); + Write(writeBuffer, len, type, asciiName); return Task.CompletedTask; async Task FlushAndWrite(int len, StatementOrPortal type, byte[] name, bool async, CancellationToken cancellationToken) { await Flush(async, cancellationToken).ConfigureAwait(false); - Debug.Assert(len <= writeBuffer.WriteSpaceLeft, $"Message of type {GetType().Name} has length {len} which is bigger than the buffer ({writeBuffer.WriteSpaceLeft})"); - Write(len, type, name); + Debug.Assert(len <= WriteBuffer.WriteSpaceLeft, $"Message of type {GetType().Name} has length {len} which is bigger than the buffer ({WriteBuffer.WriteSpaceLeft})"); + Write(WriteBuffer, len, type, name); } [MethodImpl(MethodImplOptions.AggressiveInlining)] - void Write(int len, StatementOrPortal type, byte[] name) + static void Write(NpgsqlWriteBuffer writeBuffer, int len, StatementOrPortal type, byte[] name) { writeBuffer.WriteByte(FrontendMessageCode.Close); writeBuffer.WriteInt32(len - 1); diff --git a/src/Npgsql/Internal/NpgsqlWriteBuffer.cs b/src/Npgsql/Internal/NpgsqlWriteBuffer.cs index c67d920f61..bb46ea0765 100644 --- a/src/Npgsql/Internal/NpgsqlWriteBuffer.cs +++ b/src/Npgsql/Internal/NpgsqlWriteBuffer.cs @@ -68,15 +68,9 @@ internal TimeSpan Timeout public int WriteSpaceLeft => Size - WritePosition; - internal PgWriter GetWriter(NpgsqlDatabaseInfo typeCatalog, FlushMode? flushMode = null) - { - // Make sure we'll refetch from the write buffer. - _pgWriter.Reset(); - var writer = _pgWriter.Init(typeCatalog); - if (flushMode is not null) - writer.WithFlushMode(flushMode.GetValueOrDefault()); - return writer; - } + // (Re)init to make sure we'll refetch from the write buffer. + internal PgWriter GetWriter(NpgsqlDatabaseInfo typeCatalog, FlushMode flushMode = FlushMode.None) + => _pgWriter.Init(typeCatalog, flushMode); internal readonly byte[] Buffer; readonly Encoder _textEncoder; diff --git a/src/Npgsql/Internal/PgWriter.cs b/src/Npgsql/Internal/PgWriter.cs index e56136ff91..7a60a35a2d 100644 --- a/src/Npgsql/Internal/PgWriter.cs +++ b/src/Npgsql/Internal/PgWriter.cs @@ -35,31 +35,35 @@ sealed class NpgsqlBufferWriter : IStreamingWriter public void Advance(int count) { if (_lastBufferSize < count || _buffer.WriteSpaceLeft < count) - throw new InvalidOperationException("Cannot advance past the end of the current buffer."); + ThrowHelper.ThrowInvalidOperationException("Cannot advance past the end of the current buffer."); _lastBufferSize = null; _buffer.WritePosition += count; } public Memory GetMemory(int sizeHint = 0) { - if (sizeHint > _buffer.WriteSpaceLeft) - throw new OutOfMemoryException("Not enough space left in buffer."); + var writePosition = _buffer.WritePosition; + var bufferSize = _buffer.Size - writePosition; + if (sizeHint > bufferSize) + ThrowOutOfMemoryException(); - var bufferSize = _buffer.WriteSpaceLeft; _lastBufferSize = bufferSize; - return _buffer.Buffer.AsMemory(_buffer.WritePosition, bufferSize); + return _buffer.Buffer.AsMemory(writePosition, bufferSize); } public Span GetSpan(int sizeHint = 0) { - if (sizeHint > _buffer.WriteSpaceLeft) - throw new OutOfMemoryException("Not enough space left in buffer."); + var writePosition = _buffer.WritePosition; + var bufferSize = _buffer.Size - writePosition; + if (sizeHint > bufferSize) + ThrowOutOfMemoryException(); - var bufferSize = _buffer.WriteSpaceLeft; _lastBufferSize = bufferSize; - return _buffer.Buffer.AsSpan(_buffer.WritePosition, bufferSize); + return _buffer.Buffer.AsSpan(writePosition, bufferSize); } + static void ThrowOutOfMemoryException() => throw new OutOfMemoryException("Not enough space left in buffer."); + public void Flush(TimeSpan timeout = default) { if (timeout == TimeSpan.Zero) @@ -104,42 +108,37 @@ public sealed class PgWriter internal PgWriter(IBufferWriter writer) => _writer = writer; - internal PgWriter Init(NpgsqlDatabaseInfo typeCatalog) - { - if (_typeCatalog is not null) - throw new InvalidOperationException("Invalid concurrent use or PgWriter was not reset properly."); - - _typeCatalog = typeCatalog; - return this; - } - - internal void Reset() + internal PgWriter Init(NpgsqlDatabaseInfo typeCatalog, FlushMode flushMode = FlushMode.None) { if (_pos != _offset) - throw new InvalidOperationException("PgWriter still has uncommitted bytes."); + ThrowHelper.ThrowInvalidOperationException("Invalid concurrent use or PgWriter was not committed properly, PgWriter still has uncommitted bytes."); + + // Elide write barrier if we can. + if (!ReferenceEquals(_typeCatalog, typeCatalog)) + _typeCatalog = typeCatalog; - _typeCatalog = null; - FlushMode = FlushMode.None; + FlushMode = flushMode; _totalBytesWritten = 0; - ResetBuffer(); + RequestBuffer(count: 0); + return this; } - void ResetBuffer() + [MethodImpl(MethodImplOptions.NoInlining)] + void RequestBuffer(int count) { - _buffer = null; - _pos = 0; - _offset = 0; - _length = 0; + // GetMemory will check whether count is larger than the max buffer size. + var mem = _writer.GetMemory(count); + if (!MemoryMarshal.TryGetArray(mem, out var segment)) + ThrowHelper.ThrowNotSupportedException("Only array backed writers are supported."); + + _buffer = segment.Array!; + _offset = _pos = segment.Offset; + _length = segment.Offset + segment.Count; } internal FlushMode FlushMode { get; private set; } - internal PgWriter Refresh() - { - if (_buffer is not null) - ResetBuffer(); - return this; - } + internal void RefreshBuffer() => RequestBuffer(count: 0); internal PgWriter WithFlushMode(FlushMode mode) { @@ -147,42 +146,27 @@ internal PgWriter WithFlushMode(FlushMode mode) return this; } - // TODO if we're working on a normal buffer writer we should use normal Ensure (so commit and get another buffer) semantics. void Ensure(int count = 1) { - if (_buffer is null) - SetBuffer(); + if (count <= Remaining) + return; - if (count > _length - _pos) - ThrowOutOfRange(); + Slow(count); - void ThrowOutOfRange() => throw new ArgumentOutOfRangeException(nameof(count), "Coud not ensure enough space in buffer."); [MethodImpl(MethodImplOptions.NoInlining)] - void SetBuffer() + void Slow(int count) { - // GetMemory will check whether count is larger than the max buffer size. - var mem = _writer.GetMemory(count); - if (!MemoryMarshal.TryGetArray(mem, out var segment)) - throw new NotSupportedException("Only array backed writers are supported."); - - _buffer = segment.Array!; - _offset = segment.Offset; - _pos = segment.Offset; - _length = segment.Offset + segment.Count; + // Try to re-request a larger size. + Commit(); + RequestBuffer(count); + // GetMemory is expected to throw if count is too large for the remaining space. + Debug.Assert(count <= Remaining); } } Span Span => _buffer.AsSpan(_pos, _length - _pos); - int Remaining - { - get - { - if (_buffer is null) - Ensure(count: 0); - return _length - _pos; - } - } + int Remaining => _length - _pos; void Advance(int count) => _pos += count; @@ -440,19 +424,19 @@ void Flush(bool allowWhenNonBlocking, TimeSpan timeout = default) throw new NotSupportedException($"Cannot call {nameof(Flush)} on a buffered {nameof(PgWriter)}, {nameof(FlushMode)}.{nameof(FlushMode.None)} should be used to prevent this."); Commit(); - ResetBuffer(); writer.Flush(timeout); + RequestBuffer(count: 0); } public ValueTask FlushAsync(CancellationToken cancellationToken = default) => FlushAsync(allowWhenBlocking: false, cancellationToken); - ValueTask FlushAsync(bool allowWhenBlocking, CancellationToken cancellationToken = default) + async ValueTask FlushAsync(bool allowWhenBlocking, CancellationToken cancellationToken = default) { switch (FlushMode) { case FlushMode.None: - return new(); + return; case FlushMode.Blocking when !allowWhenBlocking: throw new NotSupportedException($"Cannot call {nameof(FlushAsync)} on a blocking {nameof(PgWriter)}, call Flush instead."); } @@ -461,8 +445,8 @@ ValueTask FlushAsync(bool allowWhenBlocking, CancellationToken cancellationToken throw new NotSupportedException($"Cannot call {nameof(FlushAsync)} on a buffered {nameof(PgWriter)}, {nameof(FlushMode)}.{nameof(FlushMode.None)} should be used to prevent this."); Commit(); - ResetBuffer(); - return writer.FlushAsync(cancellationToken); + await writer.FlushAsync(cancellationToken).ConfigureAwait(false); + RequestBuffer(count: 0); } internal ValueTask Flush(bool async, CancellationToken cancellationToken = default) diff --git a/src/Npgsql/Internal/Postgres/DataTypeName.cs b/src/Npgsql/Internal/Postgres/DataTypeName.cs index 2384ec723d..96e7da067f 100644 --- a/src/Npgsql/Internal/Postgres/DataTypeName.cs +++ b/src/Npgsql/Internal/Postgres/DataTypeName.cs @@ -226,7 +226,7 @@ internal static string NormalizeName(string dataTypeName) } public override string ToString() => Value; - public bool Equals(DataTypeName other) => !IsDefault && !other.IsDefault && _value == other._value; + public bool Equals(DataTypeName other) => string.Equals(_value, other._value); public override bool Equals(object? obj) => obj is DataTypeName other && Equals(other); public override int GetHashCode() => _value.GetHashCode(); public static bool operator ==(DataTypeName left, DataTypeName right) => left.Equals(right); diff --git a/src/Npgsql/Internal/Postgres/PgTypeId.cs b/src/Npgsql/Internal/Postgres/PgTypeId.cs index bf7457b116..37be0d2066 100644 --- a/src/Npgsql/Internal/Postgres/PgTypeId.cs +++ b/src/Npgsql/Internal/Postgres/PgTypeId.cs @@ -30,12 +30,13 @@ public Oid Oid public override string ToString() => IsOid ? "OID " + _oid : "DataTypeName " + _dataTypeName.Value; public bool Equals(PgTypeId other) - => (this, other) switch - { - ({ IsOid: true }, { IsOid: true }) => _oid == other._oid, - ({ IsDataTypeName: true }, { IsDataTypeName: true }) => _dataTypeName.Equals(other._dataTypeName), - _ => false - }; + { + if (IsOid && other.IsOid) + return _oid == other._oid; + if (IsDataTypeName && other.IsDataTypeName) + return _dataTypeName.Equals(other._dataTypeName); + return false; + } public override bool Equals(object? obj) => obj is PgTypeId other && Equals(other); public override int GetHashCode() => IsOid ? _oid.GetHashCode() : _dataTypeName.GetHashCode(); diff --git a/src/Npgsql/NpgsqlBinaryImporter.cs b/src/Npgsql/NpgsqlBinaryImporter.cs index 30b16cc75b..afb0065508 100644 --- a/src/Npgsql/NpgsqlBinaryImporter.cs +++ b/src/Npgsql/NpgsqlBinaryImporter.cs @@ -141,7 +141,7 @@ async Task StartRow(bool async, CancellationToken cancellationToken = default) await _buf.Flush(async, cancellationToken).ConfigureAwait(false); _buf.WriteInt16((short)NumColumns); - _pgWriter.Refresh(); + _pgWriter.RefreshBuffer(); _column = 0; _rowsImported++; } @@ -381,7 +381,7 @@ async Task WriteNull(bool async, CancellationToken cancellationToken = default) await _buf.Flush(async, cancellationToken).ConfigureAwait(false); _buf.WriteInt32(-1); - _pgWriter.Refresh(); + _pgWriter.RefreshBuffer(); _column++; } diff --git a/src/Npgsql/NpgsqlParameter.cs b/src/Npgsql/NpgsqlParameter.cs index dab9f89484..e7b0cf8def 100644 --- a/src/Npgsql/NpgsqlParameter.cs +++ b/src/Npgsql/NpgsqlParameter.cs @@ -5,6 +5,7 @@ using System.Diagnostics; using System.Diagnostics.CodeAnalysis; using System.IO; +using System.Runtime.CompilerServices; using System.Threading; using System.Threading.Tasks; using Npgsql.Internal; @@ -48,6 +49,7 @@ public class NpgsqlParameter : DbParameter, IDbDataParameter, ICloneable private protected Size? WriteSize { get; set; } private protected object? _writeState; private protected Size _bufferRequirement; + private protected bool _asObject; #endregion @@ -519,41 +521,36 @@ internal void SetResolutionInfo(PgTypeInfo typeInfo, PgConverter converter, PgTy /// Attempt to resolve a type info based on available (postgres) type information on the parameter. internal void ResolveTypeInfo(PgSerializerOptions options) { - var previouslyResolved = TypeInfo?.Options == options; + var typeInfo = TypeInfo; + var previouslyResolved = ReferenceEquals(typeInfo?.Options, options); if (!previouslyResolved) { - var staticValueType = StaticValueType; - var valueType = GetValueType(StaticValueType); - - string? dataTypeName = null; - DataTypeName? builtinDataTypeName = null; - if (_npgsqlDbType is { } npgsqlDbType) - { - dataTypeName = npgsqlDbType.ToUnqualifiedDataTypeNameOrThrow(); - builtinDataTypeName = npgsqlDbType.ToDataTypeName(); - } - else if (_dataTypeName is not null) + var dataTypeName = + _npgsqlDbType is { } npgsqlDbType + ? npgsqlDbType.ToDataTypeName() ?? npgsqlDbType.ToUnqualifiedDataTypeNameOrThrow() + : _dataTypeName is not null + ? Internal.Postgres.DataTypeName.NormalizeName(_dataTypeName) + : null; + + PgTypeId? pgTypeId = null; + if (dataTypeName is not null) { - dataTypeName = Internal.Postgres.DataTypeName.NormalizeName(_dataTypeName); - // If we can find a match in an NpgsqlDbType we known we're dealing with a fully qualified built-in data type name. - builtinDataTypeName = NpgsqlDbTypeExtensions.ToNpgsqlDbType(dataTypeName)?.ToDataTypeName(); - } + if (!options.DatabaseInfo.TryGetPostgresTypeByName(dataTypeName, out var pgType)) + { + ThrowNotSupported(dataTypeName); + return; + } - var pgTypeId = dataTypeName is null - ? (PgTypeId?)null - : TryGetRepresentationalTypeId(builtinDataTypeName ?? dataTypeName, out var id) - ? id - : throw new NotSupportedException(_npgsqlDbType is not null - ? $"The NpgsqlDbType '{_npgsqlDbType}' isn't present in your database. You may need to install an extension or upgrade to a newer version." - : $"The data type name '{builtinDataTypeName ?? dataTypeName}' isn't present in your database. You may need to install an extension or upgrade to a newer version."); + pgTypeId = options.ToCanonicalTypeId(pgType.GetRepresentationalType()); + } - if (staticValueType == typeof(object)) + var valueType = StaticValueType; + if (valueType == typeof(object)) { - if (valueType == null && pgTypeId is null) + valueType = Value?.GetType(); + if (valueType is null && pgTypeId is null) { - var parameterName = !string.IsNullOrEmpty(ParameterName) ? ParameterName : $"${Collection?.IndexOf(this) + 1}"; - ThrowHelper.ThrowInvalidOperationException( - $"Parameter '{parameterName}' must have either its NpgsqlDbType or its DataTypeName or its Value set."); + ThrowNoTypeInfo(); return; } @@ -565,35 +562,38 @@ internal void ResolveTypeInfo(PgSerializerOptions options) } } - TypeInfo = AdoSerializerHelpers.GetTypeInfoForWriting(valueType, pgTypeId, options, _npgsqlDbType); + TypeInfo = typeInfo = AdoSerializerHelpers.GetTypeInfoForWriting(valueType, pgTypeId, options, _npgsqlDbType); } // This step isn't part of BindValue because we need to know the PgTypeId beforehand for things like SchemaOnly with null values. // We never reuse resolutions for resolvers across executions as a mutable value itself may influence the result. // TODO we could expose a property on a Converter/TypeInfo to indicate whether it's immutable, at that point we can reuse. - if (!previouslyResolved || TypeInfo is PgResolverTypeInfo) + if (!previouslyResolved || typeInfo!.IsResolverInfo) { - ResetConverterResolution(); - var resolution = ResolveConverter(TypeInfo!); + ResetBindingInfo(); // No need for ResetConverterResolution as we'll mutate those fields directly afterwards. + var resolution = ResolveConverter(typeInfo!); Converter = resolution.Converter; PgTypeId = resolution.PgTypeId; } - bool TryGetRepresentationalTypeId(string dataTypeName, out PgTypeId pgTypeId) - { - if (options.DatabaseInfo.TryGetPostgresTypeByName(dataTypeName, out var pgType)) - { - pgTypeId = options.ToCanonicalTypeId(pgType.GetRepresentationalType()); - return true; - } + void ThrowNoTypeInfo() + => ThrowHelper.ThrowInvalidOperationException( + $"Parameter '{(!string.IsNullOrEmpty(ParameterName) ? ParameterName : $"${Collection?.IndexOf(this) + 1}")}' must have either its NpgsqlDbType or its DataTypeName or its Value set."); - pgTypeId = default; - return false; + void ThrowNotSupported(string dataTypeName) + { + throw new NotSupportedException(_npgsqlDbType is not null + ? $"The NpgsqlDbType '{_npgsqlDbType}' isn't present in your database. You may need to install an extension or upgrade to a newer version." + : $"The data type name '{dataTypeName}' isn't present in your database. You may need to install an extension or upgrade to a newer version."); } } // Pull from Value so we also support object typed generic params. - private protected virtual PgConverterResolution ResolveConverter(PgTypeInfo typeInfo) => typeInfo.GetObjectResolution(Value); + private protected virtual PgConverterResolution ResolveConverter(PgTypeInfo typeInfo) + { + _asObject = true; + return typeInfo.GetObjectResolution(Value); + } /// Bind the current value to the type info, truncate (if applicable), take its size, and do any final validation before writing. internal void Bind(out DataFormat format, out Size size) @@ -612,12 +612,22 @@ internal void Bind(out DataFormat format, out Size size) return; } + if (_size > 0) + HandleSizeTruncation(); + + BindCore(); + format = Format; + size = WriteSize!.Value; + // Handle Size truncate behavior for a predetermined set of types and pg types. // Doesn't matter if we 'box' Value, all supported types are reference types. - if (_size > 0 && Converter!.TypeToConvert is var type && - (type == typeof(string) || type == typeof(char[]) || type == typeof(byte[]) || type == typeof(Stream)) && - Value is { } value) + [MethodImpl(MethodImplOptions.NoInlining)] + void HandleSizeTruncation() { + var type = Converter!.TypeToConvert; + if ((type != typeof(string) && type != typeof(char[]) && type != typeof(byte[]) && type != typeof(Stream)) || Value is not { } value) + return; + var dataTypeName = TypeInfo!.Options.GetDataTypeName(PgTypeId); if (dataTypeName == DataTypeNames.Text || dataTypeName == DataTypeNames.Varchar || dataTypeName == DataTypeNames.Bpchar) { @@ -639,13 +649,12 @@ internal void Bind(out DataFormat format, out Size size) Value = truncated; } else if (value is Stream) + { + _asObject = true; _useSubStream = true; + } } } - - BindCore(); - format = Format; - size = WriteSize!.Value; } private protected virtual void BindCore(bool allowNullReference = false) @@ -735,6 +744,7 @@ private protected void ResetTypeInfo() void ResetConverterResolution() { + _asObject = false; Converter = null; PgTypeId = default; ResetBindingInfo(); diff --git a/src/Npgsql/NpgsqlParameter`.cs b/src/Npgsql/NpgsqlParameter`.cs index 5c0c34a09b..94705aaae4 100644 --- a/src/Npgsql/NpgsqlParameter`.cs +++ b/src/Npgsql/NpgsqlParameter`.cs @@ -82,14 +82,21 @@ public NpgsqlParameter(string parameterName, DbType dbType) #endregion Constructors private protected override PgConverterResolution ResolveConverter(PgTypeInfo typeInfo) - => typeInfo.IsBoxing ? base.ResolveConverter(typeInfo) : typeInfo.GetResolution(TypedValue); + { + if (typeof(T) == typeof(object) || TypeInfo!.IsBoxing) + return base.ResolveConverter(typeInfo); + + _asObject = false; + return typeInfo.GetResolution(TypedValue); + } + // We ignore allowNullReference, it's just there to control the base implementation. private protected override void BindCore(bool allowNullReference = false) { - // If we're object typed we should support DBNull, call into base BindCore. - if (typeof(T) == typeof(object) || TypeInfo!.IsBoxing || _useSubStream) + if (_asObject) { - base.BindCore(typeof(T) != typeof(object) && (TypeInfo!.IsBoxing || _useSubStream || allowNullReference)); + // If we're object typed we should not support null. + base.BindCore(typeof(T) != typeof(object)); return; } @@ -110,7 +117,7 @@ private protected override void BindCore(bool allowNullReference = false) private protected override ValueTask WriteValue(bool async, PgWriter writer, CancellationToken cancellationToken) { - if (typeof(T) == typeof(object) || TypeInfo!.IsBoxing || _useSubStream) + if (_asObject) return base.WriteValue(async, writer, cancellationToken); Debug.Assert(Converter is PgConverter); diff --git a/src/Npgsql/PreparedStatement.cs b/src/Npgsql/PreparedStatement.cs index 6b88289e5c..f24905eb41 100644 --- a/src/Npgsql/PreparedStatement.cs +++ b/src/Npgsql/PreparedStatement.cs @@ -95,12 +95,14 @@ internal void SetParamTypes(List parameters) internal bool DoParametersMatch(List parameters) { var paramTypes = ConverterParamTypes!; - var forall = paramTypes.Length == parameters.Count; - for (var i = 0; forall && i < paramTypes.Length; i++) + if (paramTypes.Length != parameters.Count) + return false; + + for (var i = 0; i < paramTypes.Length; i++) if (paramTypes[i] != parameters[i].PgTypeId) - forall = false; + return false; - return forall; + return true; } internal void AbortPrepare() From 63f81e2090f95d2d97a32067d5171cd85c213018 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 3 Nov 2023 21:28:30 +0000 Subject: [PATCH 280/761] Bump NUnit from 3.13.3 to 3.14.0 (#5373) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index b59b1c527d..aaa20176f8 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -32,7 +32,7 @@ - + From ba9db54aac8eb2f1d5e79188aa1b72efa604e7fb Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Tue, 7 Nov 2023 18:22:29 +0100 Subject: [PATCH 281/761] Add stream support for text types (#5307) --- .../Internal/Resolvers/AdoTypeInfoResolver.cs | 15 ++++++++++++++- test/Npgsql.Tests/Types/ByteaTests.cs | 10 +++++----- test/Npgsql.Tests/Types/JsonTests.cs | 6 ++++++ test/Npgsql.Tests/Types/TextTests.cs | 5 +++++ 4 files changed, 30 insertions(+), 6 deletions(-) diff --git a/src/Npgsql/Internal/Resolvers/AdoTypeInfoResolver.cs b/src/Npgsql/Internal/Resolvers/AdoTypeInfoResolver.cs index f7c619c42a..d459059d16 100644 --- a/src/Npgsql/Internal/Resolvers/AdoTypeInfoResolver.cs +++ b/src/Npgsql/Internal/Resolvers/AdoTypeInfoResolver.cs @@ -74,6 +74,9 @@ static TypeInfoMappingCollection AddInfos(TypeInfoMappingCollection mappings) mappings.AddStructType>(DataTypeNames.Text, static (options, mapping, _) => mapping.CreateInfo(options, new ReadOnlyMemoryByteaConverter()), MatchRequirement.DataTypeName); + mappings.AddType(DataTypeNames.Text, + static (options, mapping, _) => new PgTypeInfo(options, new StreamByteaConverter(), new DataTypeName(mapping.DataTypeName), unboxedType: mapping.Type != typeof(Stream) ? mapping.Type : null), + mapping => mapping with { MatchRequirement = MatchRequirement.DataTypeName, TypeMatchPredicate = type => typeof(Stream).IsAssignableFrom(type) }); //Special mappings, these have no corresponding array mapping. mappings.AddType(DataTypeNames.Text, static (options, mapping, _) => mapping.CreateInfo(options, new TextReaderTextConverter(options.TextEncoding), supportsWriting: false, preferredFormat: DataFormat.Text), @@ -98,6 +101,9 @@ static TypeInfoMappingCollection AddInfos(TypeInfoMappingCollection mappings) mappings.AddStructType>(dataTypeName, static (options, mapping, _) => mapping.CreateInfo(options, new ReadOnlyMemoryByteaConverter()), MatchRequirement.DataTypeName); + mappings.AddType(dataTypeName, + static (options, mapping, _) => new PgTypeInfo(options, new StreamByteaConverter(), new DataTypeName(mapping.DataTypeName), unboxedType: mapping.Type != typeof(Stream) ? mapping.Type : null), + mapping => mapping with { MatchRequirement = MatchRequirement.DataTypeName, TypeMatchPredicate = type => typeof(Stream).IsAssignableFrom(type) }); //Special mappings, these have no corresponding array mapping. mappings.AddType(dataTypeName, static (options, mapping, _) => mapping.CreateInfo(options, new TextReaderTextConverter(options.TextEncoding), supportsWriting: false, preferredFormat: DataFormat.Text), @@ -119,6 +125,9 @@ static TypeInfoMappingCollection AddInfos(TypeInfoMappingCollection mappings) mappings.AddStructType>(DataTypeNames.Jsonb, static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter>(jsonbVersion, new ReadOnlyMemoryByteaConverter())), MatchRequirement.DataTypeName); + mappings.AddType(DataTypeNames.Jsonb, + static (options, mapping, _) => new PgTypeInfo(options, new VersionPrefixedTextConverter(jsonbVersion, new StreamByteaConverter()), new DataTypeName(mapping.DataTypeName), unboxedType: mapping.Type != typeof(Stream) ? mapping.Type : null), + mapping => mapping with { MatchRequirement = MatchRequirement.DataTypeName, TypeMatchPredicate = type => typeof(Stream).IsAssignableFrom(type) }); //Special mappings, these have no corresponding array mapping. mappings.AddType(DataTypeNames.Jsonb, static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter(jsonbVersion, new TextReaderTextConverter(options.TextEncoding)), supportsWriting: false, preferredFormat: DataFormat.Text), @@ -145,7 +154,7 @@ static TypeInfoMappingCollection AddInfos(TypeInfoMappingCollection mappings) mappings.AddStructType>(DataTypeNames.Bytea, static (options, mapping, _) => mapping.CreateInfo(options, new ReadOnlyMemoryByteaConverter())); mappings.AddType(DataTypeNames.Bytea, - static (options, mapping, _) => new PgTypeInfo(options, new StreamByteaConverter(), new DataTypeName(mapping.DataTypeName), unboxedType: mapping.Type), + static (options, mapping, _) => new PgTypeInfo(options, new StreamByteaConverter(), new DataTypeName(mapping.DataTypeName), unboxedType: mapping.Type != typeof(Stream) ? mapping.Type : null), mapping => mapping with { TypeMatchPredicate = type => typeof(Stream).IsAssignableFrom(type) }); // Varbit @@ -329,6 +338,7 @@ protected static TypeInfoMappingCollection AddArrayInfos(TypeInfoMappingCollecti mappings.AddStructArrayType(DataTypeNames.Text); mappings.AddArrayType(DataTypeNames.Text); mappings.AddStructArrayType>(DataTypeNames.Text); + mappings.AddArrayType(DataTypeNames.Text); // Alternative text types foreach(var dataTypeName in new[] { "citext", DataTypeNames.Varchar, @@ -339,6 +349,7 @@ protected static TypeInfoMappingCollection AddArrayInfos(TypeInfoMappingCollecti mappings.AddStructArrayType(dataTypeName); mappings.AddArrayType(dataTypeName); mappings.AddStructArrayType>(dataTypeName); + mappings.AddArrayType(dataTypeName); } // Jsonb @@ -346,6 +357,7 @@ protected static TypeInfoMappingCollection AddArrayInfos(TypeInfoMappingCollecti mappings.AddStructArrayType(DataTypeNames.Jsonb); mappings.AddArrayType(DataTypeNames.Jsonb); mappings.AddStructArrayType>(DataTypeNames.Jsonb); + mappings.AddArrayType(DataTypeNames.Jsonb); // Jsonpath mappings.AddArrayType(DataTypeNames.Jsonpath); @@ -353,6 +365,7 @@ protected static TypeInfoMappingCollection AddArrayInfos(TypeInfoMappingCollecti // Bytea mappings.AddArrayType(DataTypeNames.Bytea); mappings.AddStructArrayType>(DataTypeNames.Bytea); + mappings.AddArrayType(DataTypeNames.Bytea); // Varbit // Object mapping first. diff --git a/test/Npgsql.Tests/Types/ByteaTests.cs b/test/Npgsql.Tests/Types/ByteaTests.cs index 2db7aca492..1926f8f4e2 100644 --- a/test/Npgsql.Tests/Types/ByteaTests.cs +++ b/test/Npgsql.Tests/Types/ByteaTests.cs @@ -54,7 +54,7 @@ public Task AsArraySegment() [Test] public Task Write_as_MemoryStream() => AssertTypeWrite( - () => new MemoryStream(new byte[] { 1, 2, 3 }), "\\x010203", "bytea", NpgsqlDbType.Bytea, DbType.Binary, isDefault: false, skipArrayCheck: true); + () => new MemoryStream(new byte[] { 1, 2, 3 }), "\\x010203", "bytea", NpgsqlDbType.Bytea, DbType.Binary, isDefault: false); [Test] public Task Write_as_MemoryStream_truncated() @@ -67,7 +67,7 @@ public Task Write_as_MemoryStream_truncated() }; return AssertTypeWrite( - msFactory, "\\x020304", "bytea", NpgsqlDbType.Bytea, DbType.Binary, isDefault: false, skipArrayCheck: true); + msFactory, "\\x020304", "bytea", NpgsqlDbType.Bytea, DbType.Binary, isDefault: false); } [Test] @@ -79,7 +79,7 @@ public async Task Write_as_MemoryStream_long() var expectedSql = "\\x" + ToHex(bytes); await AssertTypeWrite( - () => new MemoryStream(bytes), expectedSql, "bytea", NpgsqlDbType.Bytea, DbType.Binary, isDefault: false, skipArrayCheck: true); + () => new MemoryStream(bytes), expectedSql, "bytea", NpgsqlDbType.Bytea, DbType.Binary, isDefault: false); } [Test] @@ -92,7 +92,7 @@ public async Task Write_as_FileStream() await File.WriteAllBytesAsync(filePath, new byte[] { 1, 2, 3 }); await AssertTypeWrite( - () => FileStreamFactory(filePath, fsList), "\\x010203", "bytea", NpgsqlDbType.Bytea, DbType.Binary, isDefault: false, skipArrayCheck: true); + () => FileStreamFactory(filePath, fsList), "\\x010203", "bytea", NpgsqlDbType.Bytea, DbType.Binary, isDefault: false); } finally { @@ -128,7 +128,7 @@ public async Task Write_as_FileStream_long() var expectedSql = "\\x" + ToHex(bytes); await AssertTypeWrite( - () => FileStreamFactory(filePath, fsList), expectedSql, "bytea", NpgsqlDbType.Bytea, DbType.Binary, isDefault: false, skipArrayCheck: true); + () => FileStreamFactory(filePath, fsList), expectedSql, "bytea", NpgsqlDbType.Bytea, DbType.Binary, isDefault: false); } finally { diff --git a/test/Npgsql.Tests/Types/JsonTests.cs b/test/Npgsql.Tests/Types/JsonTests.cs index 96e1ae148c..f1f650222c 100644 --- a/test/Npgsql.Tests/Types/JsonTests.cs +++ b/test/Npgsql.Tests/Types/JsonTests.cs @@ -1,4 +1,6 @@ using System; +using System.Data; +using System.IO; using System.Text; using System.Text.Json; using System.Text.Json.Nodes; @@ -62,6 +64,10 @@ public async Task Write_as_ArraySegment_of_char() => await AssertTypeWrite(new ArraySegment("""{"K": "V"}""".ToCharArray()), """{"K": "V"}""", PostgresType, NpgsqlDbType, isDefault: false); + [Test] + public Task As_MemoryStream() + => AssertTypeWrite(() => new MemoryStream("""{"K": "V"}"""u8.ToArray()), """{"K": "V"}""", PostgresType, NpgsqlDbType, isDefault: false); + [Test] public async Task As_JsonDocument() => await AssertType( diff --git a/test/Npgsql.Tests/Types/TextTests.cs b/test/Npgsql.Tests/Types/TextTests.cs index c4583151b4..7e86fb131b 100644 --- a/test/Npgsql.Tests/Types/TextTests.cs +++ b/test/Npgsql.Tests/Types/TextTests.cs @@ -1,5 +1,6 @@ using System; using System.Data; +using System.IO; using System.Text; using System.Threading.Tasks; using NpgsqlTypes; @@ -52,6 +53,10 @@ public async Task Citext_as_string() await AssertType("foo", "foo", "citext", NpgsqlDbType.Citext, inferredDbType: DbType.String, isDefaultForWriting: false); } + [Test] + public Task Text_as_MemoryStream() + => AssertTypeWrite(() => new MemoryStream("foo"u8.ToArray()), "foo", "text", NpgsqlDbType.Text, DbType.String, isDefault: false); + [Test] public async Task Text_long() { From 61ed4991b22242f4f2a8361d42dae4ae95ed939b Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Tue, 7 Nov 2023 18:25:23 +0100 Subject: [PATCH 282/761] Remove unconditional async element reading cost for PG arrays (#5304) --- .../Internal/Converters/ArrayConverter.cs | 66 +++++++++---------- src/Npgsql/Internal/PgStreamingConverter.cs | 13 ++++ 2 files changed, 44 insertions(+), 35 deletions(-) diff --git a/src/Npgsql/Internal/Converters/ArrayConverter.cs b/src/Npgsql/Internal/Converters/ArrayConverter.cs index 3007619c89..c9dc2add87 100644 --- a/src/Npgsql/Internal/Converters/ArrayConverter.cs +++ b/src/Npgsql/Internal/Converters/ArrayConverter.cs @@ -432,21 +432,19 @@ int IElementOperations.GetCollectionCount(object collection, out int[]? lengths) Size? IElementOperations.GetSizeOrDbNull(SizeContext context, object collection, int[] indices, ref object? writeState) => _elemConverter.GetSizeOrDbNull(context.Format, context.BufferRequirement, GetValue(collection, indices), ref writeState); - unsafe ValueTask IElementOperations.Read(bool async, PgReader reader, bool isDbNull, object collection, int[] indices, CancellationToken cancellationToken) + ValueTask IElementOperations.Read(bool async, PgReader reader, bool isDbNull, object collection, int[] indices, CancellationToken cancellationToken) { - TElement? result; - if (isDbNull) - result = default; - else if (!async) - result = _elemConverter.Read(reader); - else - { - var task = _elemConverter.ReadAsync(reader, cancellationToken); - if (!task.IsCompletedSuccessfully) - return AwaitTask(task.AsTask(), new(this, &SetResult), collection, indices); + if (!isDbNull && async && _elemConverter is PgStreamingConverter streamingConverter) + return ReadAsync(streamingConverter, reader, collection, indices, cancellationToken); - result = task.Result; - } + SetValue(collection, indices, isDbNull ? default : _elemConverter.Read(reader)); + return new(); + } + + unsafe ValueTask ReadAsync(PgStreamingConverter converter, PgReader reader, object collection, int[] indices, CancellationToken cancellationToken) + { + if (converter.ReadAsyncAsTask(reader, cancellationToken, out var result) is { } task) + return AwaitTask(task, new(this, &SetResult), collection, indices); SetValue(collection, indices, result); return new(); @@ -505,34 +503,32 @@ int IElementOperations.GetCollectionCount(object collection, out int[]? lengths) Size? IElementOperations.GetSizeOrDbNull(SizeContext context, object collection, int[] indices, ref object? writeState) => _elemConverter.GetSizeOrDbNull(context.Format, context.BufferRequirement, GetValue(collection, indices[0]), ref writeState); - unsafe ValueTask IElementOperations.Read(bool async, PgReader reader, bool isDbNull, object collection, int[] indices, CancellationToken cancellationToken) + ValueTask IElementOperations.Read(bool async, PgReader reader, bool isDbNull, object collection, int[] indices, CancellationToken cancellationToken) { Debug.Assert(indices.Length is 1); - TElement? result; - if (isDbNull) - result = default; - else if (!async) - result = _elemConverter.Read(reader); - else - { - var task = _elemConverter.ReadAsync(reader, cancellationToken); - if (!task.IsCompletedSuccessfully) - return AwaitTask(task.AsTask(), new(this, &SetResult), collection, indices); + if (!isDbNull && async && _elemConverter is PgStreamingConverter streamingConverter) + return ReadAsync(streamingConverter, reader, collection, indices, cancellationToken); - result = task.Result; - } - - SetValue(collection, indices[0], result); + SetValue(collection, indices[0], isDbNull ? default : _elemConverter.Read(reader)); return new(); - - // Using .Result on ValueTask is equivalent to GetAwaiter().GetResult(), this removes TaskAwaiter rooting. - static void SetResult(Task task, object collection, int[] indices) - { - Debug.Assert(task is Task); - SetValue(collection, indices[0], new ValueTask(Unsafe.As>(task)).Result); - } } + unsafe ValueTask ReadAsync(PgStreamingConverter converter, PgReader reader, object collection, int[] indices, CancellationToken cancellationToken) + { + if (converter.ReadAsyncAsTask(reader, cancellationToken, out var result) is { } task) + return AwaitTask(task, new(this, &SetResult), collection, indices); + + SetValue(collection, indices[0], result); + return new(); + + // Using .Result on ValueTask is equivalent to GetAwaiter().GetResult(), this removes TaskAwaiter rooting. + static void SetResult(Task task, object collection, int[] indices) + { + Debug.Assert(task is Task); + SetValue(collection, indices[0], new ValueTask(Unsafe.As>(task)).Result); + } + } + ValueTask IElementOperations.Write(bool async, PgWriter writer, object collection, int[] indices, CancellationToken cancellationToken) { Debug.Assert(indices.Length is 1); diff --git a/src/Npgsql/Internal/PgStreamingConverter.cs b/src/Npgsql/Internal/PgStreamingConverter.cs index 09176f82d9..70a45026db 100644 --- a/src/Npgsql/Internal/PgStreamingConverter.cs +++ b/src/Npgsql/Internal/PgStreamingConverter.cs @@ -16,6 +16,19 @@ public override bool CanConvert(DataFormat format, out BufferRequirements buffer return format is DataFormat.Binary; } + // Workaround for trimming https://github.com/dotnet/runtime/issues/92850#issuecomment-1744521361 + internal Task? ReadAsyncAsTask(PgReader reader, CancellationToken cancellationToken, out T result) + { + var task = ReadAsync(reader, cancellationToken); + if (task.IsCompletedSuccessfully) + { + result = task.Result; + return null; + } + result = default!; + return task.AsTask(); + } + internal sealed override unsafe ValueTask ReadAsObject( bool async, PgReader reader, CancellationToken cancellationToken) { From ab0b6684a862cc99e54e0dd391716f521106b97f Mon Sep 17 00:00:00 2001 From: Bogdan Yarotsky Date: Tue, 7 Nov 2023 18:59:07 +0100 Subject: [PATCH 283/761] Support for non-generic Enum mapping (#4852) Closes #3383 --- src/Npgsql/NpgsqlDataSourceBuilder.cs | 11 +++++ src/Npgsql/NpgsqlSlimDataSourceBuilder.cs | 14 ++++++ src/Npgsql/PublicAPI.Unshipped.txt | 6 +++ src/Npgsql/TypeMapping/GlobalTypeMapper.cs | 35 +++++++++++++++ src/Npgsql/TypeMapping/INpgsqlTypeMapper.cs | 43 +++++++++++++++++++ src/Npgsql/TypeMapping/UserTypeMapper.cs | 21 +++++++++ test/Npgsql.Tests/GlobalTypeMapperTests.cs | 38 ++++++++++++++++- test/Npgsql.Tests/Types/EnumTests.cs | 47 +++++++++++++++++++++ 8 files changed, 213 insertions(+), 2 deletions(-) diff --git a/src/Npgsql/NpgsqlDataSourceBuilder.cs b/src/Npgsql/NpgsqlDataSourceBuilder.cs index a5ce88cd1d..d3b392156e 100644 --- a/src/Npgsql/NpgsqlDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlDataSourceBuilder.cs @@ -291,6 +291,17 @@ void INpgsqlTypeMapper.Reset() where TEnum : struct, Enum => _internalBuilder.UnmapEnum(pgName, nameTranslator); + /// + [RequiresDynamicCode("Calling MapEnum with a Type can require creating new generic types or methods. This may not work when AOT compiling.")] + public INpgsqlTypeMapper MapEnum([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields | DynamicallyAccessedMemberTypes.PublicParameterlessConstructor)] + Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) + => _internalBuilder.MapEnum(clrType, pgName, nameTranslator); + + /// + public bool UnmapEnum([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields | DynamicallyAccessedMemberTypes.PublicParameterlessConstructor)] + Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) + => _internalBuilder.UnmapEnum(clrType, pgName, nameTranslator); + /// [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types, requiring require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] public INpgsqlTypeMapper MapComposite<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] T>( diff --git a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs index f7078b6182..9b0b650003 100644 --- a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs @@ -274,6 +274,20 @@ public INpgsqlNameTranslator DefaultNameTranslator where TEnum : struct, Enum => _userTypeMapper.UnmapEnum(pgName, nameTranslator); + /// + [RequiresDynamicCode("Calling MapEnum with a Type can require creating new generic types or methods. This may not work when AOT compiling.")] + public INpgsqlTypeMapper MapEnum([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields | DynamicallyAccessedMemberTypes.PublicParameterlessConstructor)] + Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) + { + _userTypeMapper.MapEnum(clrType, pgName, nameTranslator); + return this; + } + + /// + public bool UnmapEnum([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields | DynamicallyAccessedMemberTypes.PublicParameterlessConstructor)] + Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) + => _userTypeMapper.UnmapEnum(clrType, pgName, nameTranslator); + /// [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types, requiring require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] public INpgsqlTypeMapper MapComposite<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] T>( diff --git a/src/Npgsql/PublicAPI.Unshipped.txt b/src/Npgsql/PublicAPI.Unshipped.txt index 1264896889..592055725d 100644 --- a/src/Npgsql/PublicAPI.Unshipped.txt +++ b/src/Npgsql/PublicAPI.Unshipped.txt @@ -11,8 +11,10 @@ Npgsql.NpgsqlConnectionStringBuilder.ChannelBinding.set -> void Npgsql.NpgsqlBinaryImporter.WriteRow(params object?[]! values) -> void Npgsql.NpgsqlBinaryImporter.WriteRowAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken), params object?[]! values) -> System.Threading.Tasks.Task! Npgsql.NpgsqlDataSourceBuilder.AddTypeInfoResolver(Npgsql.Internal.IPgTypeInfoResolver! resolver) -> void +Npgsql.NpgsqlDataSourceBuilder.MapEnum(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! Npgsql.NpgsqlDataSourceBuilder.Name.get -> string? Npgsql.NpgsqlDataSourceBuilder.Name.set -> void +Npgsql.NpgsqlDataSourceBuilder.UnmapEnum(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> bool Npgsql.NpgsqlDataSourceBuilder.UseRootCertificate(System.Security.Cryptography.X509Certificates.X509Certificate2? rootCertificate) -> Npgsql.NpgsqlDataSourceBuilder! Npgsql.NpgsqlDataSourceBuilder.UseRootCertificateCallback(System.Func? rootCertificateCallback) -> Npgsql.NpgsqlDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder @@ -35,12 +37,14 @@ Npgsql.NpgsqlSlimDataSourceBuilder.EnableRecords() -> Npgsql.NpgsqlSlimDataSourc Npgsql.NpgsqlSlimDataSourceBuilder.EnableTransportSecurity() -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.MapComposite(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! Npgsql.NpgsqlSlimDataSourceBuilder.MapComposite(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! +Npgsql.NpgsqlSlimDataSourceBuilder.MapEnum(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! Npgsql.NpgsqlSlimDataSourceBuilder.MapEnum(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! Npgsql.NpgsqlSlimDataSourceBuilder.Name.get -> string? Npgsql.NpgsqlSlimDataSourceBuilder.Name.set -> void Npgsql.NpgsqlSlimDataSourceBuilder.NpgsqlSlimDataSourceBuilder(string? connectionString = null) -> void Npgsql.NpgsqlSlimDataSourceBuilder.UnmapComposite(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> bool Npgsql.NpgsqlSlimDataSourceBuilder.UnmapComposite(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> bool +Npgsql.NpgsqlSlimDataSourceBuilder.UnmapEnum(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> bool Npgsql.NpgsqlSlimDataSourceBuilder.UnmapEnum(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> bool Npgsql.NpgsqlSlimDataSourceBuilder.UseClientCertificate(System.Security.Cryptography.X509Certificates.X509Certificate? clientCertificate) -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.UseClientCertificates(System.Security.Cryptography.X509Certificates.X509CertificateCollection? clientCertificates) -> Npgsql.NpgsqlSlimDataSourceBuilder! @@ -58,6 +62,8 @@ Npgsql.Replication.PhysicalReplicationConnection.StartReplication(NpgsqlTypes.Np Npgsql.Replication.PhysicalReplicationSlot.PhysicalReplicationSlot(string! slotName, NpgsqlTypes.NpgsqlLogSequenceNumber? restartLsn = null, uint? restartTimeline = null) -> void Npgsql.Replication.PhysicalReplicationSlot.RestartTimeline.get -> uint? Npgsql.TypeMapping.INpgsqlTypeMapper.AddTypeInfoResolver(Npgsql.Internal.IPgTypeInfoResolver! resolver) -> void +Npgsql.TypeMapping.INpgsqlTypeMapper.MapEnum(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! +Npgsql.TypeMapping.INpgsqlTypeMapper.UnmapEnum(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> bool Npgsql.TypeMapping.UserTypeMapping Npgsql.TypeMapping.UserTypeMapping.ClrType.get -> System.Type! Npgsql.TypeMapping.UserTypeMapping.PgTypeName.get -> string! diff --git a/src/Npgsql/TypeMapping/GlobalTypeMapper.cs b/src/Npgsql/TypeMapping/GlobalTypeMapper.cs index c67044af27..cad4b469e4 100644 --- a/src/Npgsql/TypeMapping/GlobalTypeMapper.cs +++ b/src/Npgsql/TypeMapping/GlobalTypeMapper.cs @@ -215,6 +215,41 @@ public INpgsqlNameTranslator DefaultNameTranslator } } + /// + [RequiresDynamicCode("Calling MapEnum with a Type can require creating new generic types or methods. This may not work when AOT compiling.")] + public INpgsqlTypeMapper MapEnum([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields | DynamicallyAccessedMemberTypes.PublicParameterlessConstructor)] + Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) + { + _lock.EnterWriteLock(); + try + { + _userTypeMapper.MapEnum(clrType, pgName, nameTranslator); + ResetTypeMappingCache(); + return this; + } + finally + { + _lock.ExitWriteLock(); + } + } + + /// + public bool UnmapEnum([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields | DynamicallyAccessedMemberTypes.PublicParameterlessConstructor)] + Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) + { + _lock.EnterWriteLock(); + try + { + var result = _userTypeMapper.UnmapEnum(clrType, pgName, nameTranslator); + ResetTypeMappingCache(); + return result; + } + finally + { + _lock.ExitWriteLock(); + } + } + /// [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types, requiring require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] public INpgsqlTypeMapper MapComposite<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] T>(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) diff --git a/src/Npgsql/TypeMapping/INpgsqlTypeMapper.cs b/src/Npgsql/TypeMapping/INpgsqlTypeMapper.cs index fc77ee7316..8e20a5f03e 100644 --- a/src/Npgsql/TypeMapping/INpgsqlTypeMapper.cs +++ b/src/Npgsql/TypeMapping/INpgsqlTypeMapper.cs @@ -64,6 +64,49 @@ public interface INpgsqlTypeMapper INpgsqlNameTranslator? nameTranslator = null) where TEnum : struct, Enum; + /// + /// Maps a CLR enum to a PostgreSQL enum type. + /// + /// + /// CLR enum labels are mapped by name to PostgreSQL enum labels. + /// The translation strategy can be controlled by the parameter, + /// which defaults to . + /// You can also use the on your enum fields to manually specify a PostgreSQL enum label. + /// If there is a discrepancy between the .NET and database labels while an enum is read or written, + /// an exception will be raised. + /// + /// The .NET enum type to be mapped + /// + /// A PostgreSQL type name for the corresponding enum type in the database. + /// If null, the name translator given in will be used. + /// + /// + /// A component which will be used to translate CLR names (e.g. SomeClass) into database names (e.g. some_class). + /// Defaults to . + /// + [RequiresDynamicCode("Calling MapEnum with a Type can require creating new generic types or methods. This may not work when AOT compiling.")] + INpgsqlTypeMapper MapEnum( + [DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields | DynamicallyAccessedMemberTypes.PublicParameterlessConstructor)]Type clrType, + string? pgName = null, + INpgsqlNameTranslator? nameTranslator = null); + + /// + /// Removes an existing enum mapping. + /// + /// The .NET enum type to be mapped + /// + /// A PostgreSQL type name for the corresponding enum type in the database. + /// If null, the name translator given in will be used. + /// + /// + /// A component which will be used to translate CLR names (e.g. SomeClass) into database names (e.g. some_class). + /// Defaults to . + /// + bool UnmapEnum( + [DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields | DynamicallyAccessedMemberTypes.PublicParameterlessConstructor)]Type clrType, + string? pgName = null, + INpgsqlNameTranslator? nameTranslator = null); + /// /// Maps a CLR type to a PostgreSQL composite type. /// diff --git a/src/Npgsql/TypeMapping/UserTypeMapper.cs b/src/Npgsql/TypeMapping/UserTypeMapper.cs index 5ca68d0c37..e3c03040fe 100644 --- a/src/Npgsql/TypeMapping/UserTypeMapper.cs +++ b/src/Npgsql/TypeMapping/UserTypeMapper.cs @@ -57,6 +57,27 @@ sealed class UserTypeMapper where TEnum : struct, Enum => Unmap(typeof(TEnum), out _, pgName, nameTranslator ?? DefaultNameTranslator); + [UnconditionalSuppressMessage("Trimming", "IL2111", Justification = "MapEnum TEnum has less DAM annotations than clrType.")] + [RequiresDynamicCode("Calling MapEnum with a Type can require creating new generic types or methods. This may not work when AOT compiling.")] + public UserTypeMapper MapEnum([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields | DynamicallyAccessedMemberTypes.PublicParameterlessConstructor)]Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) + { + if (!clrType.IsEnum || !clrType.IsValueType) + throw new ArgumentException("Type must be a concrete Enum", nameof(clrType)); + + var openMethod = typeof(UserTypeMapper).GetMethod(nameof(MapEnum), new[] { typeof(string), typeof(INpgsqlNameTranslator) })!; + var method = openMethod.MakeGenericMethod(clrType); + method.Invoke(this, new object?[] { pgName, nameTranslator }); + return this; + } + + public bool UnmapEnum([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields)]Type clrType,string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) + { + if (!clrType.IsEnum || !clrType.IsValueType) + throw new ArgumentException("Type must be a concrete Enum", nameof(clrType)); + + return Unmap(clrType, out _, pgName, nameTranslator ?? DefaultNameTranslator); + } + [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types, requiring require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] public UserTypeMapper MapComposite<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicFields | DynamicallyAccessedMemberTypes.PublicProperties)] T>( string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) where T : class diff --git a/test/Npgsql.Tests/GlobalTypeMapperTests.cs b/test/Npgsql.Tests/GlobalTypeMapperTests.cs index 32c647731c..29ab55b4c1 100644 --- a/test/Npgsql.Tests/GlobalTypeMapperTests.cs +++ b/test/Npgsql.Tests/GlobalTypeMapperTests.cs @@ -34,11 +34,45 @@ public async Task MapEnum() // Global mapping changes have no effect on already-built data sources await AssertType(dataSource1, Mood.Happy, "happy", type, npgsqlDbType: null); - // But they do affect on new data sources + // But they do affect new data sources await using var dataSource2 = CreateDataSource(); await AssertType(dataSource2, "happy", "happy", type, npgsqlDbType: null, isDefault: false); } + [Test] + public async Task MapEnum_NonGeneric() + { + await using var adminConnection = await OpenConnectionAsync(); + var type = await GetTempTypeName(adminConnection); + NpgsqlConnection.GlobalTypeMapper.MapEnum(typeof(Mood), type); + + try + { + await using var dataSource1 = CreateDataSource(); + + await using (var connection = await dataSource1.OpenConnectionAsync()) + { + await connection.ExecuteNonQueryAsync($"CREATE TYPE {type} AS ENUM ('sad', 'ok', 'happy')"); + await connection.ReloadTypesAsync(); + + await AssertType(connection, Mood.Happy, "happy", type, npgsqlDbType: null); + } + + NpgsqlConnection.GlobalTypeMapper.UnmapEnum(typeof(Mood), type); + + // Global mapping changes have no effect on already-built data sources + await AssertType(dataSource1, Mood.Happy, "happy", type, npgsqlDbType: null); + + // But they do affect new data sources + await using var dataSource2 = CreateDataSource(); + Assert.ThrowsAsync(() => AssertType(dataSource2, Mood.Happy, "happy", type, npgsqlDbType: null)); + } + finally + { + NpgsqlConnection.GlobalTypeMapper.UnmapEnum(type); + } + } + [Test] public async Task Reset() { @@ -60,7 +94,7 @@ public async Task Reset() // Global mapping changes have no effect on already-built data sources await AssertType(dataSource1, Mood.Happy, "happy", type, npgsqlDbType: null); - // But they do affect on new data sources + // But they do affect new data sources await using var dataSource2 = CreateDataSource(); await AssertType(dataSource2, "happy", "happy", type, npgsqlDbType: null, isDefault: false); } diff --git a/test/Npgsql.Tests/Types/EnumTests.cs b/test/Npgsql.Tests/Types/EnumTests.cs index bdc05fc512..652cf45576 100644 --- a/test/Npgsql.Tests/Types/EnumTests.cs +++ b/test/Npgsql.Tests/Types/EnumTests.cs @@ -29,6 +29,53 @@ public async Task Data_source_mapping() await AssertType(dataSource, Mood.Happy, "happy", type, npgsqlDbType: null); } + [Test] + public async Task Data_source_unmap() + { + await using var adminConnection = await OpenConnectionAsync(); + var type = await GetTempTypeName(adminConnection); + await adminConnection.ExecuteNonQueryAsync($"CREATE TYPE {type} AS ENUM ('sad', 'ok', 'happy')"); + + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.MapEnum(type); + + var isUnmapSuccessful = dataSourceBuilder.UnmapEnum(type); + await using var dataSource = dataSourceBuilder.Build(); + + Assert.IsTrue(isUnmapSuccessful); + Assert.ThrowsAsync(() => AssertType(dataSource, Mood.Happy, "happy", type, npgsqlDbType: null)); + } + + [Test] + public async Task Data_source_mapping_non_generic() + { + await using var adminConnection = await OpenConnectionAsync(); + var type = await GetTempTypeName(adminConnection); + await adminConnection.ExecuteNonQueryAsync($"CREATE TYPE {type} AS ENUM ('sad', 'ok', 'happy')"); + + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.MapEnum(typeof(Mood), type); + await using var dataSource = dataSourceBuilder.Build(); + await AssertType(dataSource, Mood.Happy, "happy", type, npgsqlDbType: null); + } + + [Test] + public async Task Data_source_unmap_non_generic() + { + await using var adminConnection = await OpenConnectionAsync(); + var type = await GetTempTypeName(adminConnection); + await adminConnection.ExecuteNonQueryAsync($"CREATE TYPE {type} AS ENUM ('sad', 'ok', 'happy')"); + + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.MapEnum(typeof(Mood), type); + + var isUnmapSuccessful = dataSourceBuilder.UnmapEnum(typeof(Mood), type); + await using var dataSource = dataSourceBuilder.Build(); + + Assert.IsTrue(isUnmapSuccessful); + Assert.ThrowsAsync(() => AssertType(dataSource, Mood.Happy, "happy", type, npgsqlDbType: null)); + } + [Test] public async Task Dual_enums() { From 7d69a92aa489a8612d65c53989c2b5ab887d7f26 Mon Sep 17 00:00:00 2001 From: Brar Piening Date: Tue, 7 Nov 2023 22:00:03 +0100 Subject: [PATCH 284/761] Consume the stream in LogicalDecodingMessage if the API consumer doesn't (#5233) Closes #5208 --- .../PgOutput/PgOutputAsyncEnumerable.cs | 1 + .../Replication/PgOutputReplicationTests.cs | 44 ++++++++++++------- 2 files changed, 30 insertions(+), 15 deletions(-) diff --git a/src/Npgsql/Replication/PgOutput/PgOutputAsyncEnumerable.cs b/src/Npgsql/Replication/PgOutput/PgOutputAsyncEnumerable.cs index d7979a55d8..ae26d229f6 100644 --- a/src/Npgsql/Replication/PgOutput/PgOutputAsyncEnumerable.cs +++ b/src/Npgsql/Replication/PgOutput/PgOutputAsyncEnumerable.cs @@ -123,6 +123,7 @@ async IAsyncEnumerator StartReplicationInternal(Canc data.Init(checked((int)length), canSeek: false, commandScoped: false); yield return _logicalDecodingMessage.Populate(xLogData.WalStart, xLogData.WalEnd, xLogData.ServerClock, transactionXid, flags, messageLsn, prefix, data); + await data.DisposeAsync().ConfigureAwait(false); continue; } case BackendReplicationMessageCode.Commit: diff --git a/test/Npgsql.Tests/Replication/PgOutputReplicationTests.cs b/test/Npgsql.Tests/Replication/PgOutputReplicationTests.cs index 0f13770d11..3eb3921b79 100644 --- a/test/Npgsql.Tests/Replication/PgOutputReplicationTests.cs +++ b/test/Npgsql.Tests/Replication/PgOutputReplicationTests.cs @@ -641,10 +641,11 @@ await c.ExecuteNonQueryAsync(@$" }, nameof(Dispose_while_replicating)); [Platform(Exclude = "MacOsX", Reason = "Test is flaky in CI on Mac, see https://github.com/npgsql/npgsql/issues/5294")] - [TestCase(true)] - [TestCase(false)] + [TestCase(true, true)] + [TestCase(true, false)] + [TestCase(false, false)] [Test(Description = "Tests whether logical decoding messages get replicated as Logical Replication Protocol Messages on PostgreSQL 14 and above")] - public Task LogicalDecodingMessage(bool writeMessages) + public Task LogicalDecodingMessage(bool writeMessages, bool readMessages) => SafeReplicationTest( async (slotName, tableName, publicationName) => { @@ -689,9 +690,12 @@ public Task LogicalDecodingMessage(bool writeMessages) Assert.That(msg.Flags, Is.EqualTo(1)); Assert.That(msg.Prefix, Is.EqualTo(prefix)); Assert.That(msg.Data.Length, Is.EqualTo(transactionalMessage.Length)); - var buffer = new MemoryStream(); - await msg.Data.CopyToAsync(buffer, CancellationToken.None); - Assert.That(rc.Encoding.GetString(buffer.ToArray()), Is.EqualTo(transactionalMessage)); + if (readMessages) + { + var buffer = new MemoryStream(); + await msg.Data.CopyToAsync(buffer, CancellationToken.None); + Assert.That(rc.Encoding.GetString(buffer.ToArray()), Is.EqualTo(transactionalMessage)); + } } // Relation @@ -712,9 +716,12 @@ public Task LogicalDecodingMessage(bool writeMessages) Assert.That(msg.Flags, Is.EqualTo(0)); Assert.That(msg.Prefix, Is.EqualTo(prefix)); Assert.That(msg.Data.Length, Is.EqualTo(nonTransactionalMessage.Length)); - var buffer = new MemoryStream(); - await msg.Data.CopyToAsync(buffer, CancellationToken.None); - Assert.That(rc.Encoding.GetString(buffer.ToArray()), Is.EqualTo(nonTransactionalMessage)); + if (readMessages) + { + var buffer = new MemoryStream(); + await msg.Data.CopyToAsync(buffer, CancellationToken.None); + Assert.That(rc.Encoding.GetString(buffer.ToArray()), Is.EqualTo(nonTransactionalMessage)); + } } if (IsStreaming) @@ -737,9 +744,12 @@ public Task LogicalDecodingMessage(bool writeMessages) Assert.That(msg.Flags, Is.EqualTo(1)); Assert.That(msg.Prefix, Is.EqualTo(prefix)); Assert.That(msg.Data.Length, Is.EqualTo(transactionalMessage.Length)); - var buffer = new MemoryStream(); - await msg.Data.CopyToAsync(buffer, CancellationToken.None); - Assert.That(rc.Encoding.GetString(buffer.ToArray()), Is.EqualTo(transactionalMessage)); + if (readMessages) + { + var buffer = new MemoryStream(); + await msg.Data.CopyToAsync(buffer, CancellationToken.None); + Assert.That(rc.Encoding.GetString(buffer.ToArray()), Is.EqualTo(transactionalMessage)); + } } // Further inserts @@ -767,9 +777,13 @@ public Task LogicalDecodingMessage(bool writeMessages) Assert.That(msg.Flags, Is.EqualTo(0)); Assert.That(msg.Prefix, Is.EqualTo(prefix)); Assert.That(msg.Data.Length, Is.EqualTo(nonTransactionalMessage.Length)); - var buffer = new MemoryStream(); - await msg.Data.CopyToAsync(buffer, CancellationToken.None); - Assert.That(rc.Encoding.GetString(buffer.ToArray()), Is.EqualTo(nonTransactionalMessage)); + if (readMessages) + { + var buffer = new MemoryStream(); + await msg.Data.CopyToAsync(buffer, CancellationToken.None); + Assert.That(rc.Encoding.GetString(buffer.ToArray()), Is.EqualTo(nonTransactionalMessage)); + } + if (IsStreaming) await messages.MoveNextAsync(); } From 662d8b30651526a403343c7b66605f0da6704de9 Mon Sep 17 00:00:00 2001 From: Brar Piening Date: Tue, 7 Nov 2023 22:25:16 +0100 Subject: [PATCH 285/761] Add a test to verify #4181 (#4589) --- test/Npgsql.Tests/MultipleHostsTests.cs | 108 ++++++++++++++++++++++++ 1 file changed, 108 insertions(+) diff --git a/test/Npgsql.Tests/MultipleHostsTests.cs b/test/Npgsql.Tests/MultipleHostsTests.cs index 2b2c3f5304..bbd2064504 100644 --- a/test/Npgsql.Tests/MultipleHostsTests.cs +++ b/test/Npgsql.Tests/MultipleHostsTests.cs @@ -1059,6 +1059,114 @@ public async Task Build_with_multiple_hosts_is_supported() await using var connection = await dataSource.OpenConnectionAsync(); } + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/4181")] + [Explicit("Fails until #4181 is fixed.")] + public async Task LoadBalancing_is_fair_if_first_host_is_down([Values]TargetSessionAttributes targetSessionAttributes) + { + await using var pDown = PgPostmasterMock.Start(state: Primary, startupErrorCode: PostgresErrorCodes.CannotConnectNow); + await using var pRw1 = PgPostmasterMock.Start(state: Primary); + await using var pR1 = PgPostmasterMock.Start(state: PrimaryReadOnly); + await using var s1 = PgPostmasterMock.Start(state: Standby); + await using var pRw2 = PgPostmasterMock.Start(state: Primary); + await using var pR2 = PgPostmasterMock.Start(state: PrimaryReadOnly); + await using var s2 = PgPostmasterMock.Start(state: Standby); + + var hostList = $"{pDown.Host}:{pDown.Port}," + + $"{pRw1.Host}:{pRw1.Port}," + + $"{pR1.Host}:{pR1.Port}," + + $"{s1.Host}:{s1.Port}," + + $"{pRw2.Host}:{pRw2.Port}," + + $"{pR2.Host}:{pR2.Port}," + + $"{s2.Host}:{s2.Port}"; + + await using var dataSource = CreateDataSource(builder => + { + builder.Host = hostList; + builder.ServerCompatibilityMode = ServerCompatibilityMode.NoTypeLoading; + builder.LoadBalanceHosts = true; + builder.TargetSessionAttributesParsed = targetSessionAttributes; + + }); + var connections = Enumerable.Repeat(0, 12).Select(_ => dataSource.OpenConnection()).ToArray(); + await using var __ = new DisposableWrapper(connections); + + switch (targetSessionAttributes) + { + case TargetSessionAttributes.Any: + Assert.That(connections[0].Port, Is.EqualTo(pRw1.Port)); + Assert.That(connections[1].Port, Is.EqualTo(pR1.Port)); + Assert.That(connections[2].Port, Is.EqualTo(s1.Port)); + Assert.That(connections[3].Port, Is.EqualTo(pRw2.Port)); + Assert.That(connections[4].Port, Is.EqualTo(pR2.Port)); + Assert.That(connections[5].Port, Is.EqualTo(s2.Port)); + Assert.That(connections[6].Port, Is.EqualTo(pRw1.Port)); + Assert.That(connections[7].Port, Is.EqualTo(pR1.Port)); + Assert.That(connections[8].Port, Is.EqualTo(s1.Port)); + Assert.That(connections[9].Port, Is.EqualTo(pRw2.Port)); + Assert.That(connections[10].Port, Is.EqualTo(pR2.Port)); + Assert.That(connections[11].Port, Is.EqualTo(s2.Port)); + break; + case TargetSessionAttributes.ReadWrite: + Assert.That(connections[0].Port, Is.EqualTo(pRw1.Port)); + Assert.That(connections[1].Port, Is.EqualTo(pRw2.Port)); + Assert.That(connections[2].Port, Is.EqualTo(pRw1.Port)); + Assert.That(connections[3].Port, Is.EqualTo(pRw2.Port)); + Assert.That(connections[4].Port, Is.EqualTo(pRw1.Port)); + Assert.That(connections[5].Port, Is.EqualTo(pRw2.Port)); + Assert.That(connections[6].Port, Is.EqualTo(pRw1.Port)); + Assert.That(connections[7].Port, Is.EqualTo(pRw2.Port)); + Assert.That(connections[8].Port, Is.EqualTo(pRw1.Port)); + Assert.That(connections[9].Port, Is.EqualTo(pRw2.Port)); + Assert.That(connections[10].Port, Is.EqualTo(pRw1.Port)); + Assert.That(connections[11].Port, Is.EqualTo(pRw2.Port)); + break; + case TargetSessionAttributes.ReadOnly: + Assert.That(connections[0].Port, Is.EqualTo(pR1.Port)); + Assert.That(connections[1].Port, Is.EqualTo(s1.Port)); + Assert.That(connections[2].Port, Is.EqualTo(pR2.Port)); + Assert.That(connections[3].Port, Is.EqualTo(s2.Port)); + Assert.That(connections[4].Port, Is.EqualTo(pR1.Port)); + Assert.That(connections[5].Port, Is.EqualTo(s1.Port)); + Assert.That(connections[6].Port, Is.EqualTo(pR2.Port)); + Assert.That(connections[7].Port, Is.EqualTo(s2.Port)); + Assert.That(connections[8].Port, Is.EqualTo(pR1.Port)); + Assert.That(connections[9].Port, Is.EqualTo(s1.Port)); + Assert.That(connections[10].Port, Is.EqualTo(pR2.Port)); + Assert.That(connections[11].Port, Is.EqualTo(s2.Port)); + break; + case TargetSessionAttributes.Primary: + case TargetSessionAttributes.PreferPrimary: + Assert.That(connections[0].Port, Is.EqualTo(pRw1.Port)); + Assert.That(connections[1].Port, Is.EqualTo(pR1.Port)); + Assert.That(connections[2].Port, Is.EqualTo(pRw2.Port)); + Assert.That(connections[3].Port, Is.EqualTo(pR2.Port)); + Assert.That(connections[4].Port, Is.EqualTo(pRw1.Port)); + Assert.That(connections[5].Port, Is.EqualTo(pR1.Port)); + Assert.That(connections[6].Port, Is.EqualTo(pRw2.Port)); + Assert.That(connections[7].Port, Is.EqualTo(pR2.Port)); + Assert.That(connections[8].Port, Is.EqualTo(pRw1.Port)); + Assert.That(connections[9].Port, Is.EqualTo(pR1.Port)); + Assert.That(connections[10].Port, Is.EqualTo(pRw2.Port)); + Assert.That(connections[11].Port, Is.EqualTo(pR2.Port)); + break; + case TargetSessionAttributes.Standby: + case TargetSessionAttributes.PreferStandby: + Assert.That(connections[0].Port, Is.EqualTo(s1.Port)); + Assert.That(connections[1].Port, Is.EqualTo(s2.Port)); + Assert.That(connections[2].Port, Is.EqualTo(s1.Port)); + Assert.That(connections[3].Port, Is.EqualTo(s2.Port)); + Assert.That(connections[4].Port, Is.EqualTo(s1.Port)); + Assert.That(connections[5].Port, Is.EqualTo(s2.Port)); + Assert.That(connections[6].Port, Is.EqualTo(s1.Port)); + Assert.That(connections[7].Port, Is.EqualTo(s2.Port)); + Assert.That(connections[8].Port, Is.EqualTo(s1.Port)); + Assert.That(connections[9].Port, Is.EqualTo(s2.Port)); + Assert.That(connections[10].Port, Is.EqualTo(s1.Port)); + Assert.That(connections[11].Port, Is.EqualTo(s2.Port)); + break; + } + } + static string MultipleHosts(params PgPostmasterMock[] postmasters) => string.Join(",", postmasters.Select(p => $"{p.Host}:{p.Port}")); From f930b0786bbbf6a7ab74ee581c0fafe30b33bd70 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 9 Nov 2023 01:26:13 +0100 Subject: [PATCH 286/761] Bump Microsoft.NET.Test.Sdk from 17.7.2 to 17.8.0 (#5381) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index aaa20176f8..80082c149c 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -36,7 +36,7 @@ - + From 86cf1798a9218b2906e36892d21466a194ebe0e5 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Thu, 9 Nov 2023 15:56:55 +0100 Subject: [PATCH 287/761] Move flush to prevent unwanted flushes (#5385) Fixes #5382 --- src/Npgsql/Internal/PgWriter.cs | 12 ++++++------ test/Npgsql.Tests/WriteBufferTests.cs | 17 ++++++++++++++++- 2 files changed, 22 insertions(+), 7 deletions(-) diff --git a/src/Npgsql/Internal/PgWriter.cs b/src/Npgsql/Internal/PgWriter.cs index 7a60a35a2d..4321c10efa 100644 --- a/src/Npgsql/Internal/PgWriter.cs +++ b/src/Npgsql/Internal/PgWriter.cs @@ -356,12 +356,12 @@ internal void WriteBytes(bool allowMixedIO, ReadOnlySpan buffer) { while (!buffer.IsEmpty) { + if (Remaining is 0) + Flush(allowWhenNonBlocking: allowMixedIO); var write = Math.Min(buffer.Length, Remaining); buffer.Slice(0, write).CopyTo(Span); Advance(write); buffer = buffer.Slice(write); - if (Remaining is 0) - Flush(allowWhenNonBlocking: allowMixedIO); } } @@ -377,18 +377,18 @@ internal ValueTask WriteBytesAsync(bool allowMixedIO, ReadOnlyMemory buffe return new(); } - return Core(buffer, cancellationToken); + return Core(allowMixedIO, buffer, cancellationToken); - async ValueTask Core(ReadOnlyMemory buffer, CancellationToken cancellationToken) + async ValueTask Core(bool allowMixedIO, ReadOnlyMemory buffer, CancellationToken cancellationToken) { while (!buffer.IsEmpty) { + if (Remaining is 0) + await FlushAsync(allowWhenBlocking: allowMixedIO, cancellationToken).ConfigureAwait(false); var write = Math.Min(buffer.Length, Remaining); buffer.Span.Slice(0, write).CopyTo(Span); Advance(write); buffer = buffer.Slice(write); - if (Remaining is 0) - await FlushAsync(cancellationToken).ConfigureAwait(false); } } } diff --git a/test/Npgsql.Tests/WriteBufferTests.cs b/test/Npgsql.Tests/WriteBufferTests.cs index 5bd6cdf5a1..d1fbe68071 100644 --- a/test/Npgsql.Tests/WriteBufferTests.cs +++ b/test/Npgsql.Tests/WriteBufferTests.cs @@ -1,4 +1,5 @@ -using System.IO; +using System; +using System.IO; using Npgsql.Internal; using NUnit.Framework; @@ -7,6 +8,20 @@ namespace Npgsql.Tests; [FixtureLifeCycle(LifeCycle.InstancePerTestCase)] // Parallel access to a single buffer class WriteBufferTests { + [Test] + public void Buffered_full_buffer_no_flush() + { + WriteBuffer.WritePosition += WriteBuffer.WriteSpaceLeft - sizeof(int); + var writer = WriteBuffer.GetWriter(null!, FlushMode.NonBlocking); + Assert.That(writer.ShouldFlush(sizeof(int)), Is.False); + + Assert.DoesNotThrow(() => + { + Span intBytes = stackalloc byte[4]; + writer.WriteBytes(intBytes); + }); + } + [Test] public void GetWriter_Full_Buffer() { From d05ef7eced905d64675b39fd5d5429cd09023596 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Thu, 9 Nov 2023 18:43:53 +0100 Subject: [PATCH 288/761] Fixes to OpenTelemetry metrics support (#5379) * Do not emit Nan/Infinity measurements for prepared command ratio. * Change prepared command ratio to be a gauge, as it's not additive. * Change connection timeouts to be a non-up/down counter, aligning with the semantic conventions. Fixes #5369 --- src/Npgsql/MetricsReporter.cs | 54 +++++++++++++++++------------------ 1 file changed, 26 insertions(+), 28 deletions(-) diff --git a/src/Npgsql/MetricsReporter.cs b/src/Npgsql/MetricsReporter.cs index 27cd32f7d3..f92cfdcf37 100644 --- a/src/Npgsql/MetricsReporter.cs +++ b/src/Npgsql/MetricsReporter.cs @@ -25,9 +25,9 @@ sealed class MetricsReporter : IDisposable static readonly Counter BytesRead; static readonly UpDownCounter PendingConnectionRequests; - static readonly UpDownCounter ConnectionTimeouts; + static readonly Counter ConnectionTimeouts; static readonly Histogram ConnectionCreateTime; - static readonly ObservableUpDownCounter PreparedRatio; + static readonly ObservableGauge PreparedRatio; readonly NpgsqlDataSource _dataSource; readonly KeyValuePair _poolNameTag; @@ -48,23 +48,20 @@ static MetricsReporter() { Meter = new("Npgsql", Version); - CommandsExecuting = - Meter.CreateUpDownCounter( - "db.client.commands.executing", - unit: "{command}", - description: "The number of currently executing database commands."); + CommandsExecuting = Meter.CreateUpDownCounter( + "db.client.commands.executing", + unit: "{command}", + description: "The number of currently executing database commands."); - CommandsFailed - = Meter.CreateCounter( - "db.client.commands.failed", - unit: "{command}", - description: "The number of database commands which have failed."); + CommandsFailed = Meter.CreateCounter( + "db.client.commands.failed", + unit: "{command}", + description: "The number of database commands which have failed."); - CommandDuration - = Meter.CreateHistogram( - "db.client.commands.duration", - unit: "s", - description: "The duration of database commands, in seconds."); + CommandDuration = Meter.CreateHistogram( + "db.client.commands.duration", + unit: "s", + description: "The duration of database commands, in seconds."); BytesWritten = Meter.CreateCounter( "db.client.commands.bytes_written", @@ -79,17 +76,17 @@ static MetricsReporter() PendingConnectionRequests = Meter.CreateUpDownCounter( "db.client.connections.pending_requests", unit: "{request}", - "The number of pending requests for an open connection, cumulative for the entire pool."); + description: "The number of pending requests for an open connection, cumulative for the entire pool."); - ConnectionTimeouts = Meter.CreateUpDownCounter( + ConnectionTimeouts = Meter.CreateCounter( "db.client.connections.timeouts", unit: "{timeout}", description: "The number of connection timeouts that have occurred trying to obtain a connection from the pool."); - ConnectionCreateTime - = Meter.CreateHistogram( + + ConnectionCreateTime = Meter.CreateHistogram( "db.client.connections.create_time", - "s", - "The time it took to create a new connection."); + unit: "s", + description: "The time it took to create a new connection."); // Observable metrics; these are for values we already track internally (and efficiently) inside the connection pool implementation. Meter.CreateObservableUpDownCounter( @@ -107,7 +104,7 @@ static MetricsReporter() unit: "{connection}", description: "The maximum number of open connections allowed."); - PreparedRatio = Meter.CreateObservableUpDownCounter( + PreparedRatio = Meter.CreateObservableGauge( "db.client.commands.prepared_ratio", GetPreparedCommandsRatio, description: "The ratio of prepared command executions."); @@ -223,7 +220,7 @@ static IEnumerable> GetPreparedCommandsRatio() { lock (Reporters) { - var measurements = new Measurement[Reporters.Count]; + var measurements = new List>(Reporters.Count); for (var i = 0; i < Reporters.Count; i++) { @@ -234,9 +231,10 @@ static IEnumerable> GetPreparedCommandsRatio() All = Interlocked.Exchange(ref reporter._commandCounters.All, default) }; - measurements[i] = new Measurement( - (double)counters.PreparedCommandsStarted / counters.CommandsStarted * 100, - reporter._poolNameTag); + var value = (double)counters.PreparedCommandsStarted / counters.CommandsStarted * 100; + + if (double.IsNormal(value)) + measurements.Add(new Measurement(value, reporter._poolNameTag)); } return measurements; From fadcfeee0d15017494822c9a10466fe26ddcd1da Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Fri, 10 Nov 2023 10:54:58 +0100 Subject: [PATCH 289/761] Correct metrics to use IsFinite instead of IsNormal (#5388) --- src/Npgsql/MetricsReporter.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Npgsql/MetricsReporter.cs b/src/Npgsql/MetricsReporter.cs index f92cfdcf37..f29f0c47e2 100644 --- a/src/Npgsql/MetricsReporter.cs +++ b/src/Npgsql/MetricsReporter.cs @@ -233,7 +233,7 @@ static IEnumerable> GetPreparedCommandsRatio() var value = (double)counters.PreparedCommandsStarted / counters.CommandsStarted * 100; - if (double.IsNormal(value)) + if (double.IsFinite(value)) measurements.Add(new Measurement(value, reporter._poolNameTag)); } From dfa5c16d255bfac1b93277cdb187325dc24ea434 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Mon, 13 Nov 2023 12:42:22 +0100 Subject: [PATCH 290/761] Remove obsoletions (#5393) Closes #4520 Closes #4756 Closes #4946 --- src/Npgsql/NpgsqlConnection.cs | 6 - src/Npgsql/NpgsqlConnectionStringBuilder.cs | 160 ------------------ src/Npgsql/NpgsqlLargeObjectManager.cs | 4 +- src/Npgsql/NpgsqlLargeObjectStream.cs | 1 + src/Npgsql/NpgsqlNotificationEventArgs.cs | 14 +- src/Npgsql/NpgsqlTypes/NpgsqlTsQuery.cs | 1 + src/Npgsql/NpgsqlTypes/NpgsqlTsVector.cs | 3 +- src/Npgsql/PostgresException.cs | 11 -- src/Npgsql/PostgresNotice.cs | 12 +- src/Npgsql/PublicAPI.Unshipped.txt | 30 ++++ .../PgOutput/Messages/DeleteMessage.cs | 8 +- .../PgOutput/Messages/InsertMessage.cs | 8 +- .../PgOutput/Messages/UpdateMessage.cs | 6 - test/Npgsql.Tests/LargeObjectTests.cs | 4 +- .../Npgsql.Tests/Types/FullTextSearchTests.cs | 2 + test/Npgsql.Tests/TypesTests.cs | 4 + 16 files changed, 50 insertions(+), 224 deletions(-) diff --git a/src/Npgsql/NpgsqlConnection.cs b/src/Npgsql/NpgsqlConnection.cs index bb71b6a6b6..e55a9ba38c 100644 --- a/src/Npgsql/NpgsqlConnection.cs +++ b/src/Npgsql/NpgsqlConnection.cs @@ -453,12 +453,6 @@ public override string ConnectionString /// public override string DataSource => Connector?.Settings.DataSourceCached ?? _dataSource?.Settings.DataSourceCached ?? string.Empty; - /// - /// Whether to use Windows integrated security to log in. - /// - [Obsolete("The IntegratedSecurity parameter is no longer needed and does nothing.")] - public bool IntegratedSecurity => Settings.IntegratedSecurity; - /// /// User name. /// diff --git a/src/Npgsql/NpgsqlConnectionStringBuilder.cs b/src/Npgsql/NpgsqlConnectionStringBuilder.cs index 1f5858bfaa..86f0edaa28 100644 --- a/src/Npgsql/NpgsqlConnectionStringBuilder.cs +++ b/src/Npgsql/NpgsqlConnectionStringBuilder.cs @@ -1393,166 +1393,6 @@ public ServerCompatibilityMode ServerCompatibilityMode #region Properties - Obsolete - /// - /// Whether to use Windows integrated security to log in. - /// - [Category("Security")] - [Description("Whether to use Windows integrated security to log in.")] - [DisplayName("Integrated Security")] - [NpgsqlConnectionStringProperty] - [Obsolete("The IntegratedSecurity parameter is no longer needed and does nothing.")] - public bool IntegratedSecurity - { - get => _integratedSecurity; - set - { - _integratedSecurity = value; - SetValue(nameof(IntegratedSecurity), value); - } - } - bool _integratedSecurity; - - /// - /// Obsolete, see https://www.npgsql.org/doc/release-notes/6.0.html - /// - [Category("Compatibility")] - [Description("Makes MaxValue and MinValue timestamps and dates readable as infinity and negative infinity.")] - [DisplayName("Convert Infinity DateTime")] - [NpgsqlConnectionStringProperty] - [Obsolete("The ConvertInfinityDateTime parameter is no longer supported.")] - public bool ConvertInfinityDateTime - { - get => false; - set => throw new NotSupportedException("The Convert Infinity DateTime parameter is no longer supported; Npgsql 6.0 and above convert min/max values to Infinity by default. See https://www.npgsql.org/doc/types/datetime.html for more details."); - } - - /// - /// Obsolete, see https://www.npgsql.org/doc/release-notes/3.1.html - /// - [Category("Obsolete")] - [Description("Obsolete, see https://www.npgsql.org/doc/release-notes/3.1.html")] - [DisplayName("Continuous Processing")] - [NpgsqlConnectionStringProperty] - [Obsolete("The ContinuousProcessing parameter is no longer supported.")] - public bool ContinuousProcessing - { - get => false; - set => throw new NotSupportedException("The ContinuousProcessing parameter is no longer supported. Please see https://www.npgsql.org/doc/release-notes/3.1.html"); - } - - /// - /// Obsolete, see https://www.npgsql.org/doc/release-notes/3.1.html - /// - [Category("Obsolete")] - [Description("Obsolete, see https://www.npgsql.org/doc/release-notes/3.1.html")] - [DisplayName("Backend Timeouts")] - [NpgsqlConnectionStringProperty] - [Obsolete("The BackendTimeouts parameter is no longer supported")] - public bool BackendTimeouts - { - get => false; - set => throw new NotSupportedException("The BackendTimeouts parameter is no longer supported. Please see https://www.npgsql.org/doc/release-notes/3.1.html"); - } - - /// - /// Obsolete, see https://www.npgsql.org/doc/release-notes/3.0.html - /// - [Category("Obsolete")] - [Description("Obsolete, see https://www.npgsql.org/doc/v/3.0.html")] - [DisplayName("Preload Reader")] - [NpgsqlConnectionStringProperty] - [Obsolete("The PreloadReader parameter is no longer supported")] - public bool PreloadReader - { - get => false; - set => throw new NotSupportedException("The PreloadReader parameter is no longer supported. Please see https://www.npgsql.org/doc/release-notes/3.0.html"); - } - - /// - /// Obsolete, see https://www.npgsql.org/doc/release-notes/3.0.html - /// - [Category("Obsolete")] - [Description("Obsolete, see https://www.npgsql.org/doc/release-notes/3.0.html")] - [DisplayName("Use Extended Types")] - [NpgsqlConnectionStringProperty] - [Obsolete("The UseExtendedTypes parameter is no longer supported")] - public bool UseExtendedTypes - { - get => false; - set => throw new NotSupportedException("The UseExtendedTypes parameter is no longer supported. Please see https://www.npgsql.org/doc/release-notes/3.0.html"); - } - - /// - /// Obsolete, see https://www.npgsql.org/doc/release-notes/4.1.html - /// - [Category("Obsolete")] - [Description("Obsolete, see https://www.npgsql.org/doc/release-notes/4.1.html")] - [DisplayName("Use Ssl Stream")] - [NpgsqlConnectionStringProperty] - [Obsolete("The UseSslStream parameter is no longer supported (always true)")] - public bool UseSslStream - { - get => true; - set => throw new NotSupportedException("The UseSslStream parameter is no longer supported (SslStream is always used). Please see https://www.npgsql.org/doc/release-notes/4.1.html"); - } - - /// - /// Writes connection performance information to performance counters. - /// - [Category("Obsolete")] - [Description("Writes connection performance information to performance counters.")] - [DisplayName("Use Perf Counters")] - [NpgsqlConnectionStringProperty] - [Obsolete("The UsePerfCounters parameter is no longer supported")] - public bool UsePerfCounters - { - get => false; - set => throw new NotSupportedException("The UsePerfCounters parameter is no longer supported. Please see https://www.npgsql.org/doc/release-notes/5.0.html"); - } - - /// - /// Location of a client certificate to be sent to the server. - /// - [Category("Obsolete")] - [Description("Location of a client certificate to be sent to the server.")] - [DisplayName("Client Certificate")] - [NpgsqlConnectionStringProperty] - [Obsolete("Use NpgsqlConnectionStringBuilder.SslKey instead")] - public string? ClientCertificate - { - get => SslKey; - set => SslKey = value; - } - - /// - /// Key for a client certificate to be sent to the server. - /// - [Category("Obsolete")] - [Description("Key for a client certificate to be sent to the server.")] - [DisplayName("Client Certificate Key")] - [NpgsqlConnectionStringProperty] - [Obsolete("Use NpgsqlConnectionStringBuilder.SslPassword instead")] - public string? ClientCertificateKey - { - get => SslPassword; - set => SslPassword = value; - } - - /// - /// When enabled, PostgreSQL error details are included on and - /// . These can contain sensitive data. - /// - [Category("Obsolete")] - [Description("When enabled, PostgreSQL error and notice details are included on PostgresException.Detail and PostgresNotice.Detail. These can contain sensitive data.")] - [DisplayName("Include Error Details")] - [NpgsqlConnectionStringProperty] - [Obsolete("Use NpgsqlConnectionStringBuilder.IncludeErrorDetail instead")] - public bool IncludeErrorDetails - { - get => IncludeErrorDetail; - set => IncludeErrorDetail = value; - } - /// /// Whether to trust the server certificate without validating it. /// diff --git a/src/Npgsql/NpgsqlLargeObjectManager.cs b/src/Npgsql/NpgsqlLargeObjectManager.cs index 74b30e535e..2bc6c02751 100644 --- a/src/Npgsql/NpgsqlLargeObjectManager.cs +++ b/src/Npgsql/NpgsqlLargeObjectManager.cs @@ -1,4 +1,5 @@ -using Npgsql.Util; +using System; +using Npgsql.Util; using System.Data; using System.Text; using System.Threading; @@ -9,6 +10,7 @@ namespace Npgsql; /// /// Large object manager. This class can be used to store very large files in a PostgreSQL database. /// +[Obsolete("NpgsqlLargeObjectManager allows manipulating PostgreSQL large objects via publicly available PostgreSQL functions (lo_read, lo_write); call these yourself directly.")] public class NpgsqlLargeObjectManager { const int InvWrite = 0x00020000; diff --git a/src/Npgsql/NpgsqlLargeObjectStream.cs b/src/Npgsql/NpgsqlLargeObjectStream.cs index 42c757a237..2f3c8b19b0 100644 --- a/src/Npgsql/NpgsqlLargeObjectStream.cs +++ b/src/Npgsql/NpgsqlLargeObjectStream.cs @@ -10,6 +10,7 @@ namespace Npgsql; /// An interface to remotely control the seekable stream for an opened large object on a PostgreSQL server. /// Note that the OpenRead/OpenReadWrite method as well as all operations performed on this stream must be wrapped inside a database transaction. /// +[Obsolete("NpgsqlLargeObjectStream allows manipulating PostgreSQL large objects via publicly available PostgreSQL functions (lo_read, lo_write); call these yourself directly.")] public sealed class NpgsqlLargeObjectStream : Stream { readonly NpgsqlLargeObjectManager _manager; diff --git a/src/Npgsql/NpgsqlNotificationEventArgs.cs b/src/Npgsql/NpgsqlNotificationEventArgs.cs index 454ccdc98a..82e00b18a6 100644 --- a/src/Npgsql/NpgsqlNotificationEventArgs.cs +++ b/src/Npgsql/NpgsqlNotificationEventArgs.cs @@ -26,22 +26,10 @@ public sealed class NpgsqlNotificationEventArgs : EventArgs /// public string Payload { get; } - /// - /// The channel on which the notification was sent. - /// - [Obsolete("Use Channel instead")] - public string Condition => Channel; - - /// - /// An optional payload string that was sent with this notification. - /// - [Obsolete("Use Payload instead")] - public string AdditionalInformation => Payload; - internal NpgsqlNotificationEventArgs(NpgsqlReadBuffer buf) { PID = buf.ReadInt32(); Channel = buf.ReadNullTerminatedString(); Payload = buf.ReadNullTerminatedString(); } -} \ No newline at end of file +} diff --git a/src/Npgsql/NpgsqlTypes/NpgsqlTsQuery.cs b/src/Npgsql/NpgsqlTypes/NpgsqlTsQuery.cs index cd68ecc3c8..9d49dc7c3a 100644 --- a/src/Npgsql/NpgsqlTypes/NpgsqlTsQuery.cs +++ b/src/Npgsql/NpgsqlTypes/NpgsqlTsQuery.cs @@ -77,6 +77,7 @@ public override string ToString() /// /// /// + [Obsolete("Client-side parsing of NpgsqlTsQuery is unreliable and cannot fully duplicate the PostgreSQL logic. Use PG functions instead (e.g. to_tsquery)")] public static NpgsqlTsQuery Parse(string value) { if (value == null) diff --git a/src/Npgsql/NpgsqlTypes/NpgsqlTsVector.cs b/src/Npgsql/NpgsqlTypes/NpgsqlTsVector.cs index 0cb4f5e371..76f097f0ac 100644 --- a/src/Npgsql/NpgsqlTypes/NpgsqlTsVector.cs +++ b/src/Npgsql/NpgsqlTypes/NpgsqlTsVector.cs @@ -74,6 +74,7 @@ internal NpgsqlTsVector(List lexemes, bool noCheck = false) /// /// /// + [Obsolete("Client-side parsing of NpgsqlTsVector is unreliable and cannot fully duplicate the PostgreSQL logic. Use PG functions instead (e.g. to_tsvector)")] public static NpgsqlTsVector Parse(string value) { if (value == null) @@ -551,4 +552,4 @@ public bool Equals(Lexeme o) /// public static bool operator !=(Lexeme left, Lexeme right) => !left.Equals(right); } -} \ No newline at end of file +} diff --git a/src/Npgsql/PostgresException.cs b/src/Npgsql/PostgresException.cs index 4b2377a363..51b8e1e543 100644 --- a/src/Npgsql/PostgresException.cs +++ b/src/Npgsql/PostgresException.cs @@ -269,17 +269,6 @@ public override bool IsTransient public string SqlState { get; } #endif - /// - /// The SQLSTATE code for the error. - /// - /// - /// Always present. - /// Constants are defined in . - /// See https://www.postgresql.org/docs/current/static/errcodes-appendix.html - /// - [Obsolete("Use SqlState instead")] - public string Code => SqlState; - /// /// The primary human-readable error message. This should be accurate but terse. /// diff --git a/src/Npgsql/PostgresNotice.cs b/src/Npgsql/PostgresNotice.cs index 62e7b886c6..ef55ad4e13 100644 --- a/src/Npgsql/PostgresNotice.cs +++ b/src/Npgsql/PostgresNotice.cs @@ -38,16 +38,6 @@ public sealed class PostgresNotice /// public string SqlState { get; set; } - /// - /// The SQLSTATE code for the error. - /// - /// - /// Always present. - /// See https://www.postgresql.org/docs/current/static/errcodes-appendix.html - /// - [Obsolete("Use SqlState instead")] - public string Code => SqlState; - /// /// The primary human-readable error message. This should be accurate but terse. /// @@ -211,4 +201,4 @@ internal NpgsqlNoticeEventArgs(PostgresNotice notice) { Notice = notice; } -} \ No newline at end of file +} diff --git a/src/Npgsql/PublicAPI.Unshipped.txt b/src/Npgsql/PublicAPI.Unshipped.txt index 592055725d..2336158e2c 100644 --- a/src/Npgsql/PublicAPI.Unshipped.txt +++ b/src/Npgsql/PublicAPI.Unshipped.txt @@ -122,10 +122,40 @@ static Npgsql.INpgsqlTypeMapperExtensions.EnableRecordsAsTuples(this T mapper static Npgsql.INpgsqlTypeMapperExtensions.EnableUnmappedTypes(this T mapper) -> T static NpgsqlTypes.NpgsqlCidr.explicit operator System.Net.IPAddress!(NpgsqlTypes.NpgsqlCidr cidr) -> System.Net.IPAddress! static NpgsqlTypes.NpgsqlCidr.implicit operator NpgsqlTypes.NpgsqlInet(NpgsqlTypes.NpgsqlCidr cidr) -> NpgsqlTypes.NpgsqlInet +*REMOVED*Npgsql.NpgsqlConnection.IntegratedSecurity.get -> bool +*REMOVED*Npgsql.NpgsqlConnectionStringBuilder.BackendTimeouts.get -> bool +*REMOVED*Npgsql.NpgsqlConnectionStringBuilder.BackendTimeouts.set -> void +*REMOVED*Npgsql.NpgsqlConnectionStringBuilder.ClientCertificate.get -> string? +*REMOVED*Npgsql.NpgsqlConnectionStringBuilder.ClientCertificate.set -> void +*REMOVED*Npgsql.NpgsqlConnectionStringBuilder.ClientCertificateKey.get -> string? +*REMOVED*Npgsql.NpgsqlConnectionStringBuilder.ClientCertificateKey.set -> void +*REMOVED*Npgsql.NpgsqlConnectionStringBuilder.ContinuousProcessing.get -> bool +*REMOVED*Npgsql.NpgsqlConnectionStringBuilder.ContinuousProcessing.set -> void +*REMOVED*Npgsql.NpgsqlConnectionStringBuilder.ConvertInfinityDateTime.get -> bool +*REMOVED*Npgsql.NpgsqlConnectionStringBuilder.ConvertInfinityDateTime.set -> void +*REMOVED*Npgsql.NpgsqlConnectionStringBuilder.IncludeErrorDetails.get -> bool +*REMOVED*Npgsql.NpgsqlConnectionStringBuilder.IncludeErrorDetails.set -> void +*REMOVED*Npgsql.NpgsqlConnectionStringBuilder.IntegratedSecurity.get -> bool +*REMOVED*Npgsql.NpgsqlConnectionStringBuilder.IntegratedSecurity.set -> void +*REMOVED*Npgsql.NpgsqlConnectionStringBuilder.PreloadReader.get -> bool +*REMOVED*Npgsql.NpgsqlConnectionStringBuilder.PreloadReader.set -> void +*REMOVED*Npgsql.NpgsqlConnectionStringBuilder.UseExtendedTypes.get -> bool +*REMOVED*Npgsql.NpgsqlConnectionStringBuilder.UseExtendedTypes.set -> void +*REMOVED*Npgsql.NpgsqlConnectionStringBuilder.UsePerfCounters.get -> bool +*REMOVED*Npgsql.NpgsqlConnectionStringBuilder.UsePerfCounters.set -> void +*REMOVED*Npgsql.NpgsqlConnectionStringBuilder.UseSslStream.get -> bool +*REMOVED*Npgsql.NpgsqlConnectionStringBuilder.UseSslStream.set -> void +*REMOVED*Npgsql.NpgsqlNotificationEventArgs.AdditionalInformation.get -> string! +*REMOVED*Npgsql.NpgsqlNotificationEventArgs.Condition.get -> string! *REMOVED*Npgsql.NpgsqlParameter.ConvertedValue.get -> object? *REMOVED*Npgsql.NpgsqlParameter.ConvertedValue.set -> void +*REMOVED*Npgsql.PostgresException.Code.get -> string! +*REMOVED*Npgsql.PostgresNotice.Code.get -> string! *REMOVED*Npgsql.PostgresTypes.PostgresArrayType.PostgresArrayType(string! ns, string! internalName, uint oid, Npgsql.PostgresTypes.PostgresType! elementPostgresType) -> void *REMOVED*Npgsql.PostgresTypes.PostgresBaseType.PostgresBaseType(string! ns, string! internalName, uint oid) -> void *REMOVED*Npgsql.PostgresTypes.PostgresType.PostgresType(string! ns, string! name, string! internalName, uint oid) -> void *REMOVED*Npgsql.PostgresTypes.PostgresType.PostgresType(string! ns, string! name, uint oid) -> void +*REMOVED*Npgsql.Replication.PgOutput.Messages.DeleteMessage.RelationId.get -> uint +*REMOVED*Npgsql.Replication.PgOutput.Messages.InsertMessage.RelationId.get -> uint +*REMOVED*Npgsql.Replication.PgOutput.Messages.UpdateMessage.RelationId.get -> uint *REMOVED*Npgsql.TypeMapping.INpgsqlTypeMapper.AddTypeResolverFactory(Npgsql.Internal.TypeHandling.TypeHandlerResolverFactory! resolverFactory) -> void diff --git a/src/Npgsql/Replication/PgOutput/Messages/DeleteMessage.cs b/src/Npgsql/Replication/PgOutput/Messages/DeleteMessage.cs index eaacd1ab0c..c1057dabdd 100644 --- a/src/Npgsql/Replication/PgOutput/Messages/DeleteMessage.cs +++ b/src/Npgsql/Replication/PgOutput/Messages/DeleteMessage.cs @@ -13,12 +13,6 @@ public abstract class DeleteMessage : TransactionalMessage /// public RelationMessage Relation { get; private set; } = null!; - /// - /// ID of the relation corresponding to the ID in the relation message. - /// - [Obsolete("Use Relation.RelationId")] - public uint RelationId => Relation.RelationId; - private protected DeleteMessage() {} private protected DeleteMessage Populate( @@ -31,4 +25,4 @@ private protected DeleteMessage Populate( return this; } -} \ No newline at end of file +} diff --git a/src/Npgsql/Replication/PgOutput/Messages/InsertMessage.cs b/src/Npgsql/Replication/PgOutput/Messages/InsertMessage.cs index fe862ead1b..df413f6b21 100644 --- a/src/Npgsql/Replication/PgOutput/Messages/InsertMessage.cs +++ b/src/Npgsql/Replication/PgOutput/Messages/InsertMessage.cs @@ -18,12 +18,6 @@ public sealed class InsertMessage : TransactionalMessage /// public RelationMessage Relation { get; private set; } = null!; - /// - /// ID of the relation corresponding to the ID in the relation message. - /// - [Obsolete("Use Relation.RelationId")] - public uint RelationId => Relation.RelationId; - /// /// Columns representing the new row. /// @@ -46,4 +40,4 @@ internal InsertMessage Populate( internal Task Consume(CancellationToken cancellationToken) => _tupleEnumerable.Consume(cancellationToken); -} \ No newline at end of file +} diff --git a/src/Npgsql/Replication/PgOutput/Messages/UpdateMessage.cs b/src/Npgsql/Replication/PgOutput/Messages/UpdateMessage.cs index 8cecc44b6e..135ff0ddaf 100644 --- a/src/Npgsql/Replication/PgOutput/Messages/UpdateMessage.cs +++ b/src/Npgsql/Replication/PgOutput/Messages/UpdateMessage.cs @@ -18,12 +18,6 @@ public abstract class UpdateMessage : TransactionalMessage /// public RelationMessage Relation { get; private set; } = null!; - /// - /// ID of the relation corresponding to the ID in the relation message. - /// - [Obsolete("Use Relation.RelationId")] - public uint RelationId => Relation.RelationId; - /// /// Columns representing the new row. /// diff --git a/test/Npgsql.Tests/LargeObjectTests.cs b/test/Npgsql.Tests/LargeObjectTests.cs index 0471ab66ff..fb7179abb4 100644 --- a/test/Npgsql.Tests/LargeObjectTests.cs +++ b/test/Npgsql.Tests/LargeObjectTests.cs @@ -4,6 +4,8 @@ namespace Npgsql.Tests; +#pragma warning disable CS0618 // Large object support is obsolete + public class LargeObjectTests : TestBase { [Test] @@ -45,4 +47,4 @@ public void Test() transaction.Rollback(); } -} \ No newline at end of file +} diff --git a/test/Npgsql.Tests/Types/FullTextSearchTests.cs b/test/Npgsql.Tests/Types/FullTextSearchTests.cs index 359c1b18d6..eda874b12a 100644 --- a/test/Npgsql.Tests/Types/FullTextSearchTests.cs +++ b/test/Npgsql.Tests/Types/FullTextSearchTests.cs @@ -5,6 +5,8 @@ using NpgsqlTypes; using NUnit.Framework; +#pragma warning disable CS0618 // NpgsqlTsVector.Parse is obsolete + namespace Npgsql.Tests.Types; public class FullTextSearchTests : MultiplexingTestBase diff --git a/test/Npgsql.Tests/TypesTests.cs b/test/Npgsql.Tests/TypesTests.cs index de2b1beed0..30bae390d3 100644 --- a/test/Npgsql.Tests/TypesTests.cs +++ b/test/Npgsql.Tests/TypesTests.cs @@ -10,6 +10,7 @@ namespace Npgsql.Tests; /// public class TypesTests { +#pragma warning disable CS0618 // {NpgsqlTsVector,NpgsqlTsQuery}.Parse are obsolete [Test] public void TsVector() { @@ -83,6 +84,7 @@ public void TsQuery() Assert.Throws(typeof(FormatException), () => NpgsqlTsQuery.Parse("a b")); Assert.Throws(typeof(FormatException), () => NpgsqlTsQuery.Parse("a <-1> b")); } +#pragma warning restore CS0618 // {NpgsqlTsVector,NpgsqlTsQuery}.Parse are obsolete [Test] public void TsQueryEquatibility() @@ -173,6 +175,7 @@ void AreNotEqual(NpgsqlTsQuery left, NpgsqlTsQuery right) } } +#pragma warning disable CS0618 // {NpgsqlTsVector,NpgsqlTsQuery}.Parse are obsolete [Test] public void TsQueryOperatorPrecedence() { @@ -180,6 +183,7 @@ public void TsQueryOperatorPrecedence() var expectedGrouping = NpgsqlTsQuery.Parse("((!(a) <-> b) & c) | (d & e)"); Assert.AreEqual(expectedGrouping.ToString(), query.ToString()); } +#pragma warning restore CS0618 // {NpgsqlTsVector,NpgsqlTsQuery}.Parse are obsolete [Test] public void NpgsqlPath_empty() From 8e875724cda150eb9ea93fd343c7cb8f02f513df Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Mon, 13 Nov 2023 15:13:47 +0100 Subject: [PATCH 291/761] Properly invalidate prepared statement column info cache after ReloadTypes (#5386) --- .../BackendMessages/RowDescriptionMessage.cs | 14 ++++++++--- src/Npgsql/NpgsqlDataReader.cs | 9 ++++--- test/Npgsql.Tests/CommandTests.cs | 25 +++++++++++++++++++ 3 files changed, 42 insertions(+), 6 deletions(-) diff --git a/src/Npgsql/BackendMessages/RowDescriptionMessage.cs b/src/Npgsql/BackendMessages/RowDescriptionMessage.cs index ee2f120090..c9986da85c 100644 --- a/src/Npgsql/BackendMessages/RowDescriptionMessage.cs +++ b/src/Npgsql/BackendMessages/RowDescriptionMessage.cs @@ -133,17 +133,25 @@ public FieldDescription this[int index] } } - internal void SetConverterInfoCache(ReadOnlySpan values) + internal void SetColumnInfoCache(ReadOnlySpan values) { if (_connectorOwned || _lastConverterInfoCache is not null) return; Interlocked.CompareExchange(ref _lastConverterInfoCache, values.ToArray(), null); } - internal void LoadConverterInfoCache(ColumnInfo[] values) + internal void LoadColumnInfoCache(PgSerializerOptions options, ColumnInfo[] values) { if (_lastConverterInfoCache is not { } cache) return; + + // If the options have changed (for instance due to ReloadTypes) we need to invalidate the cache. + if (Count > 0 && !ReferenceEquals(options, _fields[0]!._serializerOptions)) + { + Interlocked.CompareExchange(ref _lastConverterInfoCache, null, cache); + return; + } + cache.CopyTo(values.AsSpan()); } @@ -356,7 +364,7 @@ internal PgConverterInfo ObjectOrDefaultInfo } } - PgSerializerOptions _serializerOptions; + internal PgSerializerOptions _serializerOptions; internal FieldDescription Clone() { diff --git a/src/Npgsql/NpgsqlDataReader.cs b/src/Npgsql/NpgsqlDataReader.cs index b028913420..5d9b83c5a6 100644 --- a/src/Npgsql/NpgsqlDataReader.cs +++ b/src/Npgsql/NpgsqlDataReader.cs @@ -353,7 +353,7 @@ async Task NextResult(bool async, bool isConsuming = false, CancellationTo if (statementIndex >= 0) { if (RowDescription is { } description && statements[statementIndex].IsPrepared && ColumnInfoCache is { } cache) - description.SetConverterInfoCache(new(cache, 0, _numColumns)); + description.SetColumnInfoCache(new(cache, 0, _numColumns)); if (statementIndex is 0 && _behavior.HasFlag(CommandBehavior.SingleResult) && !isConsuming) { @@ -426,7 +426,7 @@ async Task NextResult(bool async, bool isConsuming = false, CancellationTo ColumnInfoCache = ArrayPool.Shared.Rent(RowDescription.Count); } if (statement.IsPrepared) - RowDescription.LoadConverterInfoCache(ColumnInfoCache); + RowDescription.LoadColumnInfoCache(Connector.SerializerOptions, ColumnInfoCache); } else { @@ -580,7 +580,7 @@ async ValueTask ConsumeResultSet(bool async) var statement = _statements[StatementIndex]; if (statement.IsPrepared && ColumnInfoCache is not null) - RowDescription!.SetConverterInfoCache(new(ColumnInfoCache, 0, _numColumns)); + RowDescription!.SetColumnInfoCache(new(ColumnInfoCache, 0, _numColumns)); if (statement.AppendErrorBarrier ?? Command.EnableErrorBarriers) Expect(await Connector.ReadMessage(async).ConfigureAwait(false), Connector); @@ -2193,6 +2193,9 @@ DataFormat GetInfo(int ordinal, Type type, out PgConverter converter, out Size b } ref var info = ref ColumnInfoCache![ordinal]; + + Debug.Assert(info.ConverterInfo.IsDefault || ReferenceEquals(Connector.SerializerOptions, info.ConverterInfo.TypeInfo.Options), "Cache is bleeding over"); + if (info.ConverterInfo.TypeToConvert == type) { converter = info.ConverterInfo.Converter; diff --git a/test/Npgsql.Tests/CommandTests.cs b/test/Npgsql.Tests/CommandTests.cs index cd105548be..024840b837 100644 --- a/test/Npgsql.Tests/CommandTests.cs +++ b/test/Npgsql.Tests/CommandTests.cs @@ -828,6 +828,31 @@ public async Task Use_across_connection_change([Values(PrepareOrNot.Prepared, Pr Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo(1)); } + // The asserts we're testing are debug only. + [Test] + public async Task Use_after_reload_types_invalidates_cached_infos() + { + if (IsMultiplexing) + return; + + using var conn1 = await OpenConnectionAsync(); + using var cmd = new NpgsqlCommand("SELECT 1", conn1); + cmd.Prepare(); + using (var reader = await cmd.ExecuteReaderAsync()) + { + await reader.ReadAsync(); + Assert.DoesNotThrow(() => reader.GetInt32(0)); + } + + await conn1.ReloadTypesAsync(); + + using (var reader = await cmd.ExecuteReaderAsync()) + { + await reader.ReadAsync(); + Assert.DoesNotThrow(() => reader.GetInt32(0)); + } + } + [Test, Description("CreateCommand before connection open")] [IssueLink("https://github.com/npgsql/npgsql/issues/565")] public async Task Create_command_before_connection_open() From fdc018654841d78e1693480b59aa9801a31faec7 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Mon, 13 Nov 2023 15:36:22 +0100 Subject: [PATCH 292/761] Also remove obsolete TimestampTZ/TimeTZ (#5394) Closes #4788 --- src/Npgsql/NpgsqlTypes/NpgsqlDbType.cs | 14 -------------- src/Npgsql/PublicAPI.Unshipped.txt | 2 ++ 2 files changed, 2 insertions(+), 14 deletions(-) diff --git a/src/Npgsql/NpgsqlTypes/NpgsqlDbType.cs b/src/Npgsql/NpgsqlTypes/NpgsqlDbType.cs index b05f623867..37876a74fa 100644 --- a/src/Npgsql/NpgsqlTypes/NpgsqlDbType.cs +++ b/src/Npgsql/NpgsqlTypes/NpgsqlDbType.cs @@ -223,13 +223,6 @@ public enum NpgsqlDbType [BuiltInPostgresType("timestamp", baseOID: 1114, arrayOID: 1115, rangeName: "tsrange", rangeOID: 3908, multirangeName: "tsmultirange", multirangeOID: 4533)] Timestamp = 21, - /// - /// Corresponds to the PostgreSQL "timestamp with time zone" type. - /// - /// See https://www.postgresql.org/docs/current/static/datatype-datetime.html - [Obsolete("Use TimestampTz instead")] // NOTE: Don't remove this (see #1694) - TimestampTZ = TimestampTz, - /// /// Corresponds to the PostgreSQL "timestamp with time zone" type. /// @@ -244,13 +237,6 @@ public enum NpgsqlDbType [BuiltInPostgresType("interval", baseOID: 1186, arrayOID: 1187)] Interval = 30, - /// - /// Corresponds to the PostgreSQL "time with time zone" type. - /// - /// See https://www.postgresql.org/docs/current/static/datatype-datetime.html - [Obsolete("Use TimeTz instead")] // NOTE: Don't remove this (see #1694) - TimeTZ = TimeTz, - /// /// Corresponds to the PostgreSQL "time with time zone" type. /// diff --git a/src/Npgsql/PublicAPI.Unshipped.txt b/src/Npgsql/PublicAPI.Unshipped.txt index 2336158e2c..5796ca1a3d 100644 --- a/src/Npgsql/PublicAPI.Unshipped.txt +++ b/src/Npgsql/PublicAPI.Unshipped.txt @@ -87,6 +87,8 @@ override Npgsql.NpgsqlBatch.Dispose() -> void *REMOVED*static NpgsqlTypes.NpgsqlPath.Parse(string! s) -> NpgsqlTypes.NpgsqlPath *REMOVED*static NpgsqlTypes.NpgsqlPoint.Parse(string! s) -> NpgsqlTypes.NpgsqlPoint *REMOVED*static NpgsqlTypes.NpgsqlPolygon.Parse(string! s) -> NpgsqlTypes.NpgsqlPolygon +*REMOVED*NpgsqlTypes.NpgsqlDbType.TimestampTZ = 26 -> NpgsqlTypes.NpgsqlDbType +*REMOVED*NpgsqlTypes.NpgsqlDbType.TimeTZ = 31 -> NpgsqlTypes.NpgsqlDbType *REMOVED*NpgsqlTypes.NpgsqlInet.Deconstruct(out System.Net.IPAddress! address, out int netmask) -> void *REMOVED*NpgsqlTypes.NpgsqlInet.NpgsqlInet(System.Net.IPAddress! address, int netmask) -> void *REMOVED*NpgsqlTypes.NpgsqlInet.Address.set -> void From a1a0c5ccbc0833c5a459de77650e7fd6ed567262 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Mon, 13 Nov 2023 16:02:27 +0100 Subject: [PATCH 293/761] Add keyed services support (#5387) Closes #5134 --- .../NpgsqlServiceCollectionExtensions.cs | 177 +++++++++++++----- .../DependencyInjectionTests.cs | 64 +++++-- 2 files changed, 179 insertions(+), 62 deletions(-) diff --git a/src/Npgsql.DependencyInjection/NpgsqlServiceCollectionExtensions.cs b/src/Npgsql.DependencyInjection/NpgsqlServiceCollectionExtensions.cs index 61b5a3f015..c1841ece60 100644 --- a/src/Npgsql.DependencyInjection/NpgsqlServiceCollectionExtensions.cs +++ b/src/Npgsql.DependencyInjection/NpgsqlServiceCollectionExtensions.cs @@ -28,19 +28,22 @@ public static class NpgsqlServiceCollectionExtensions /// The lifetime with which to register the service in the container. /// Defaults to . /// + /// The of the data source. /// The same service collection so that multiple calls can be chained. public static IServiceCollection AddNpgsqlDataSource( this IServiceCollection serviceCollection, string connectionString, Action dataSourceBuilderAction, ServiceLifetime connectionLifetime = ServiceLifetime.Transient, - ServiceLifetime dataSourceLifetime = ServiceLifetime.Singleton) - => AddNpgsqlDataSourceCore(serviceCollection, connectionString, dataSourceBuilderAction, connectionLifetime, dataSourceLifetime); + ServiceLifetime dataSourceLifetime = ServiceLifetime.Singleton, + object? serviceKey = null) + => AddNpgsqlDataSourceCore(serviceCollection, serviceKey, connectionString, dataSourceBuilderAction, connectionLifetime, dataSourceLifetime); /// /// Registers an and an in the . /// /// The to add services to. + /// The of the data source. /// An Npgsql connection string. /// /// The lifetime with which to register the in the container. @@ -55,9 +58,10 @@ public static IServiceCollection AddNpgsqlDataSource( this IServiceCollection serviceCollection, string connectionString, ServiceLifetime connectionLifetime = ServiceLifetime.Transient, - ServiceLifetime dataSourceLifetime = ServiceLifetime.Singleton) + ServiceLifetime dataSourceLifetime = ServiceLifetime.Singleton, + object? serviceKey = null) => AddNpgsqlDataSourceCore( - serviceCollection, connectionString, dataSourceBuilderAction: null, connectionLifetime, dataSourceLifetime); + serviceCollection, serviceKey, connectionString, dataSourceBuilderAction: null, connectionLifetime, dataSourceLifetime); /// /// Registers an and an in the . @@ -75,14 +79,16 @@ public static IServiceCollection AddNpgsqlDataSource( /// The lifetime with which to register the service in the container. /// Defaults to . /// + /// The of the data source. /// The same service collection so that multiple calls can be chained. public static IServiceCollection AddNpgsqlSlimDataSource( this IServiceCollection serviceCollection, string connectionString, Action dataSourceBuilderAction, ServiceLifetime connectionLifetime = ServiceLifetime.Transient, - ServiceLifetime dataSourceLifetime = ServiceLifetime.Singleton) - => AddNpgsqlSlimDataSourceCore(serviceCollection, connectionString, dataSourceBuilderAction, connectionLifetime, dataSourceLifetime); + ServiceLifetime dataSourceLifetime = ServiceLifetime.Singleton, + object? serviceKey = null) + => AddNpgsqlSlimDataSourceCore(serviceCollection, serviceKey, connectionString, dataSourceBuilderAction, connectionLifetime, dataSourceLifetime); /// /// Registers an and an in the . @@ -97,14 +103,16 @@ public static IServiceCollection AddNpgsqlSlimDataSource( /// The lifetime with which to register the service in the container. /// Defaults to . /// + /// The of the data source. /// The same service collection so that multiple calls can be chained. public static IServiceCollection AddNpgsqlSlimDataSource( this IServiceCollection serviceCollection, string connectionString, ServiceLifetime connectionLifetime = ServiceLifetime.Transient, - ServiceLifetime dataSourceLifetime = ServiceLifetime.Singleton) + ServiceLifetime dataSourceLifetime = ServiceLifetime.Singleton, + object? serviceKey = null) => AddNpgsqlSlimDataSourceCore( - serviceCollection, connectionString, dataSourceBuilderAction: null, connectionLifetime, dataSourceLifetime); + serviceCollection, serviceKey, connectionString, dataSourceBuilderAction: null, connectionLifetime, dataSourceLifetime); /// /// Registers an and an in the @@ -122,15 +130,17 @@ public static IServiceCollection AddNpgsqlSlimDataSource( /// The lifetime with which to register the service in the container. /// Defaults to . /// + /// The of the data source. /// The same service collection so that multiple calls can be chained. public static IServiceCollection AddMultiHostNpgsqlDataSource( this IServiceCollection serviceCollection, string connectionString, Action dataSourceBuilderAction, ServiceLifetime connectionLifetime = ServiceLifetime.Transient, - ServiceLifetime dataSourceLifetime = ServiceLifetime.Singleton) + ServiceLifetime dataSourceLifetime = ServiceLifetime.Singleton, + object? serviceKey = null) => AddMultiHostNpgsqlDataSourceCore( - serviceCollection, connectionString, dataSourceBuilderAction, connectionLifetime, dataSourceLifetime); + serviceCollection, serviceKey, connectionString, dataSourceBuilderAction, connectionLifetime, dataSourceLifetime); /// /// Registers an and an in the @@ -146,14 +156,16 @@ public static IServiceCollection AddMultiHostNpgsqlDataSource( /// The lifetime with which to register the service in the container. /// Defaults to . /// + /// The of the data source. /// The same service collection so that multiple calls can be chained. public static IServiceCollection AddMultiHostNpgsqlDataSource( this IServiceCollection serviceCollection, string connectionString, ServiceLifetime connectionLifetime = ServiceLifetime.Transient, - ServiceLifetime dataSourceLifetime = ServiceLifetime.Singleton) + ServiceLifetime dataSourceLifetime = ServiceLifetime.Singleton, + object? serviceKey = null) => AddMultiHostNpgsqlDataSourceCore( - serviceCollection, connectionString, dataSourceBuilderAction: null, connectionLifetime, dataSourceLifetime); + serviceCollection, serviceKey, connectionString, dataSourceBuilderAction: null, connectionLifetime, dataSourceLifetime); /// /// Registers an and an in the @@ -171,15 +183,17 @@ public static IServiceCollection AddMultiHostNpgsqlDataSource( /// The lifetime with which to register the service in the container. /// Defaults to . /// + /// The of the data source. /// The same service collection so that multiple calls can be chained. public static IServiceCollection AddMultiHostNpgsqlSlimDataSource( this IServiceCollection serviceCollection, string connectionString, Action dataSourceBuilderAction, ServiceLifetime connectionLifetime = ServiceLifetime.Transient, - ServiceLifetime dataSourceLifetime = ServiceLifetime.Singleton) + ServiceLifetime dataSourceLifetime = ServiceLifetime.Singleton, + object? serviceKey = null) => AddMultiHostNpgsqlSlimDataSourceCore( - serviceCollection, connectionString, dataSourceBuilderAction, connectionLifetime, dataSourceLifetime); + serviceCollection, serviceKey, connectionString, dataSourceBuilderAction, connectionLifetime, dataSourceLifetime); /// /// Registers an and an in the @@ -195,17 +209,20 @@ public static IServiceCollection AddMultiHostNpgsqlSlimDataSource( /// The lifetime with which to register the service in the container. /// Defaults to . /// + /// The of the data source. /// The same service collection so that multiple calls can be chained. public static IServiceCollection AddMultiHostNpgsqlSlimDataSource( this IServiceCollection serviceCollection, string connectionString, ServiceLifetime connectionLifetime = ServiceLifetime.Transient, - ServiceLifetime dataSourceLifetime = ServiceLifetime.Singleton) + ServiceLifetime dataSourceLifetime = ServiceLifetime.Singleton, + object? serviceKey = null) => AddMultiHostNpgsqlSlimDataSourceCore( - serviceCollection, connectionString, dataSourceBuilderAction: null, connectionLifetime, dataSourceLifetime); + serviceCollection, serviceKey, connectionString, dataSourceBuilderAction: null, connectionLifetime, dataSourceLifetime); static IServiceCollection AddNpgsqlDataSourceCore( this IServiceCollection serviceCollection, + object? serviceKey, string connectionString, Action? dataSourceBuilderAction, ServiceLifetime connectionLifetime, @@ -214,7 +231,8 @@ static IServiceCollection AddNpgsqlDataSourceCore( serviceCollection.TryAdd( new ServiceDescriptor( typeof(NpgsqlDataSource), - sp => + serviceKey, + (sp, key) => { var dataSourceBuilder = new NpgsqlDataSourceBuilder(connectionString); dataSourceBuilder.UseLoggerFactory(sp.GetService()); @@ -223,13 +241,14 @@ static IServiceCollection AddNpgsqlDataSourceCore( }, dataSourceLifetime)); - AddCommonServices(serviceCollection, connectionLifetime, dataSourceLifetime); + AddCommonServices(serviceCollection, serviceKey, connectionLifetime, dataSourceLifetime); return serviceCollection; } static IServiceCollection AddNpgsqlSlimDataSourceCore( this IServiceCollection serviceCollection, + object? serviceKey, string connectionString, Action? dataSourceBuilderAction, ServiceLifetime connectionLifetime, @@ -238,7 +257,8 @@ static IServiceCollection AddNpgsqlSlimDataSourceCore( serviceCollection.TryAdd( new ServiceDescriptor( typeof(NpgsqlDataSource), - sp => + serviceKey, + (sp, key) => { var dataSourceBuilder = new NpgsqlSlimDataSourceBuilder(connectionString); dataSourceBuilder.UseLoggerFactory(sp.GetService()); @@ -247,13 +267,14 @@ static IServiceCollection AddNpgsqlSlimDataSourceCore( }, dataSourceLifetime)); - AddCommonServices(serviceCollection, connectionLifetime, dataSourceLifetime); + AddCommonServices(serviceCollection, serviceKey, connectionLifetime, dataSourceLifetime); return serviceCollection; } static IServiceCollection AddMultiHostNpgsqlDataSourceCore( this IServiceCollection serviceCollection, + object? serviceKey, string connectionString, Action? dataSourceBuilderAction, ServiceLifetime connectionLifetime, @@ -262,7 +283,8 @@ static IServiceCollection AddMultiHostNpgsqlDataSourceCore( serviceCollection.TryAdd( new ServiceDescriptor( typeof(NpgsqlMultiHostDataSource), - sp => + serviceKey, + (sp, key) => { var dataSourceBuilder = new NpgsqlDataSourceBuilder(connectionString); dataSourceBuilder.UseLoggerFactory(sp.GetService()); @@ -271,19 +293,33 @@ static IServiceCollection AddMultiHostNpgsqlDataSourceCore( }, dataSourceLifetime)); - serviceCollection.TryAdd( - new ServiceDescriptor( - typeof(NpgsqlDataSource), - sp => sp.GetRequiredService(), - dataSourceLifetime)); + if (serviceKey is not null) + { + serviceCollection.TryAdd( + new ServiceDescriptor( + typeof(NpgsqlDataSource), + serviceKey, + (sp, key) => sp.GetRequiredKeyedService(key), + dataSourceLifetime)); + } + else + { + serviceCollection.TryAdd( + new ServiceDescriptor( + typeof(NpgsqlDataSource), + sp => sp.GetRequiredService(), + dataSourceLifetime)); + + } - AddCommonServices(serviceCollection, connectionLifetime, dataSourceLifetime); + AddCommonServices(serviceCollection, serviceKey, connectionLifetime, dataSourceLifetime); return serviceCollection; } static IServiceCollection AddMultiHostNpgsqlSlimDataSourceCore( this IServiceCollection serviceCollection, + object? serviceKey, string connectionString, Action? dataSourceBuilderAction, ServiceLifetime connectionLifetime, @@ -292,7 +328,8 @@ static IServiceCollection AddMultiHostNpgsqlSlimDataSourceCore( serviceCollection.TryAdd( new ServiceDescriptor( typeof(NpgsqlMultiHostDataSource), - sp => + serviceKey, + (sp, _) => { var dataSourceBuilder = new NpgsqlSlimDataSourceBuilder(connectionString); dataSourceBuilder.UseLoggerFactory(sp.GetService()); @@ -301,38 +338,80 @@ static IServiceCollection AddMultiHostNpgsqlSlimDataSourceCore( }, dataSourceLifetime)); - serviceCollection.TryAdd( - new ServiceDescriptor( - typeof(NpgsqlDataSource), - sp => sp.GetRequiredService(), - dataSourceLifetime)); + if (serviceKey is not null) + { + serviceCollection.TryAdd( + new ServiceDescriptor( + typeof(NpgsqlDataSource), + serviceKey, + (sp, key) => sp.GetRequiredKeyedService(key), + dataSourceLifetime)); + } + else + { + serviceCollection.TryAdd( + new ServiceDescriptor( + typeof(NpgsqlDataSource), + sp => sp.GetRequiredService(), + dataSourceLifetime)); + + } - AddCommonServices(serviceCollection, connectionLifetime, dataSourceLifetime); + AddCommonServices(serviceCollection, serviceKey, connectionLifetime, dataSourceLifetime); return serviceCollection; } static void AddCommonServices( IServiceCollection serviceCollection, + object? serviceKey, ServiceLifetime connectionLifetime, ServiceLifetime dataSourceLifetime) { - serviceCollection.TryAdd( - new ServiceDescriptor( - typeof(NpgsqlConnection), - sp => sp.GetRequiredService().CreateConnection(), - connectionLifetime)); + // We don't try to invoke KeyedService methods if there is no service key. + // This allows user code that use non-standard containers without support for IKeyedServiceProvider to keep on working. + if (serviceKey is not null) + { + serviceCollection.TryAdd( + new ServiceDescriptor( + typeof(NpgsqlConnection), + serviceKey, + (sp, key) => sp.GetRequiredKeyedService(key).CreateConnection(), + connectionLifetime)); - serviceCollection.TryAdd( - new ServiceDescriptor( - typeof(DbDataSource), - sp => sp.GetRequiredService(), - dataSourceLifetime)); + serviceCollection.TryAdd( + new ServiceDescriptor( + typeof(DbDataSource), + serviceKey, + (sp, key) => sp.GetRequiredKeyedService(key), + dataSourceLifetime)); - serviceCollection.TryAdd( - new ServiceDescriptor( - typeof(DbConnection), - sp => sp.GetRequiredService(), - connectionLifetime)); + serviceCollection.TryAdd( + new ServiceDescriptor( + typeof(DbConnection), + serviceKey, + (sp, key) => sp.GetRequiredKeyedService(key), + connectionLifetime)); + } + else + { + serviceCollection.TryAdd( + new ServiceDescriptor( + typeof(NpgsqlConnection), + sp => sp.GetRequiredService().CreateConnection(), + connectionLifetime)); + + serviceCollection.TryAdd( + new ServiceDescriptor( + typeof(DbDataSource), + sp => sp.GetRequiredService(), + dataSourceLifetime)); + + serviceCollection.TryAdd( + new ServiceDescriptor( + typeof(DbConnection), + sp => sp.GetRequiredService(), + connectionLifetime)); + } } } diff --git a/test/Npgsql.DependencyInjection.Tests/DependencyInjectionTests.cs b/test/Npgsql.DependencyInjection.Tests/DependencyInjectionTests.cs index 486dae69ed..ebbf0e2388 100644 --- a/test/Npgsql.DependencyInjection.Tests/DependencyInjectionTests.cs +++ b/test/Npgsql.DependencyInjection.Tests/DependencyInjectionTests.cs @@ -45,6 +45,42 @@ public async Task NpgsqlMultiHostDataSource_is_registered_properly([Values] bool : dataSource.OpenConnection(); } + [Test] + public async Task NpgsqlDataSource_with_service_key_is_registered_properly([Values] bool async) + { + const string serviceKey = "key"; + var serviceCollection = new ServiceCollection(); + RegisterDataSource(serviceCollection, TestUtil.ConnectionString, serviceKey); + + await using var serviceProvider = serviceCollection.BuildServiceProvider(); + var dataSource = serviceProvider.GetRequiredKeyedService(serviceKey); + Assert.Throws(() => serviceProvider.GetRequiredService()); + + await using var connection = async + ? await dataSource.OpenConnectionAsync() + : dataSource.OpenConnection(); + } + + [Test] + public async Task NpgsqlMultiHostDataSource_with_service_key_is_registered_properly([Values] bool async) + { + const string serviceKey = "key"; + var serviceCollection = new ServiceCollection(); + RegisterMultiHostDataSource(serviceCollection, TestUtil.ConnectionString, serviceKey); + + await using var serviceProvider = serviceCollection.BuildServiceProvider(); + var multiHostDataSource = serviceProvider.GetRequiredKeyedService(serviceKey); + var dataSource = serviceProvider.GetRequiredKeyedService(serviceKey); + Assert.Throws(() => serviceProvider.GetRequiredService()); + Assert.Throws(() => serviceProvider.GetRequiredService()); + + Assert.That(dataSource, Is.SameAs(multiHostDataSource)); + + await using var connection = async + ? await dataSource.OpenConnectionAsync() + : dataSource.OpenConnection(); + } + [Test] public void NpgsqlDataSource_is_registered_as_singleton_by_default() { @@ -124,19 +160,21 @@ public async Task LoggerFactory_is_picked_up_from_ServiceCollection() Assert.That(listLoggerProvider.Log.Any(l => l.Id == NpgsqlEventId.CommandExecutionCompleted)); } - private IServiceCollection RegisterDataSource(ServiceCollection serviceCollection, string connectionString) => mode switch - { - DataSourceMode.Standard => serviceCollection.AddNpgsqlDataSource(connectionString), - DataSourceMode.Slim => serviceCollection.AddNpgsqlSlimDataSource(connectionString), - _ => throw new NotSupportedException($"Mode {mode} not supported") - }; - - private IServiceCollection RegisterMultiHostDataSource(ServiceCollection serviceCollection, string connectionString) => mode switch - { - DataSourceMode.Standard => serviceCollection.AddMultiHostNpgsqlDataSource(connectionString), - DataSourceMode.Slim => serviceCollection.AddMultiHostNpgsqlSlimDataSource(connectionString), - _ => throw new NotSupportedException($"Mode {mode} not supported") - }; + IServiceCollection RegisterDataSource(ServiceCollection serviceCollection, string connectionString, object? serviceKey = null) + => mode switch + { + DataSourceMode.Standard => serviceCollection.AddNpgsqlDataSource(connectionString, serviceKey: serviceKey), + DataSourceMode.Slim => serviceCollection.AddNpgsqlSlimDataSource(connectionString, serviceKey: serviceKey), + _ => throw new NotSupportedException($"Mode {mode} not supported") + }; + + IServiceCollection RegisterMultiHostDataSource(ServiceCollection serviceCollection, string connectionString, object? serviceKey = null) + => mode switch + { + DataSourceMode.Standard => serviceCollection.AddMultiHostNpgsqlDataSource(connectionString, serviceKey: serviceKey), + DataSourceMode.Slim => serviceCollection.AddMultiHostNpgsqlSlimDataSource(connectionString, serviceKey: serviceKey), + _ => throw new NotSupportedException($"Mode {mode} not supported") + }; } public enum DataSourceMode From 88e46e5da16856dbef215a8b08c96946fd9f01b5 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Tue, 14 Nov 2023 17:53:04 +0100 Subject: [PATCH 294/761] Rename EnableDynamicJsonMappings (#5396) --- .../Resolvers/UnsupportedTypeInfoResolver.cs | 2 +- src/Npgsql/PublicAPI.Unshipped.txt | 2 +- .../INpgsqlTypeMapperExtensions.cs | 2 +- test/Npgsql.Tests/Types/JsonDynamicTests.cs | 22 +++++++++---------- 4 files changed, 14 insertions(+), 14 deletions(-) diff --git a/src/Npgsql/Internal/Resolvers/UnsupportedTypeInfoResolver.cs b/src/Npgsql/Internal/Resolvers/UnsupportedTypeInfoResolver.cs index a2268f1bd3..73d783f8fa 100644 --- a/src/Npgsql/Internal/Resolvers/UnsupportedTypeInfoResolver.cs +++ b/src/Npgsql/Internal/Resolvers/UnsupportedTypeInfoResolver.cs @@ -31,7 +31,7 @@ sealed class UnsupportedTypeInfoResolver : IPgTypeInfoResolver string.Format( NpgsqlStrings.DynamicJsonNotEnabled, type == typeof(object) ? "" : type.Name, - nameof(INpgsqlTypeMapperExtensions.EnableDynamicJsonMappings), + nameof(INpgsqlTypeMapperExtensions.EnableDynamicJson), typeof(TBuilder).Name)); case not null when options.DatabaseInfo.GetPostgresType(dataTypeName) is PostgresEnumType: diff --git a/src/Npgsql/PublicAPI.Unshipped.txt b/src/Npgsql/PublicAPI.Unshipped.txt index 5796ca1a3d..bbf6d1e341 100644 --- a/src/Npgsql/PublicAPI.Unshipped.txt +++ b/src/Npgsql/PublicAPI.Unshipped.txt @@ -119,7 +119,7 @@ override NpgsqlTypes.NpgsqlCidr.ToString() -> string! *REMOVED*static NpgsqlTypes.NpgsqlInet.ToIPAddress(NpgsqlTypes.NpgsqlInet inet) -> System.Net.IPAddress! *REMOVED*static NpgsqlTypes.NpgsqlInet.ToNpgsqlInet(System.Net.IPAddress? ip) -> NpgsqlTypes.NpgsqlInet *REMOVED*Npgsql.NpgsqlDataSourceBuilder.AddTypeResolverFactory(Npgsql.Internal.TypeHandling.TypeHandlerResolverFactory! resolverFactory) -> void -static Npgsql.INpgsqlTypeMapperExtensions.EnableDynamicJsonMappings(this T mapper, System.Text.Json.JsonSerializerOptions? serializerOptions = null, System.Type![]? jsonbClrTypes = null, System.Type![]? jsonClrTypes = null) -> T +static Npgsql.INpgsqlTypeMapperExtensions.EnableDynamicJson(this T mapper, System.Text.Json.JsonSerializerOptions? serializerOptions = null, System.Type![]? jsonbClrTypes = null, System.Type![]? jsonClrTypes = null) -> T static Npgsql.INpgsqlTypeMapperExtensions.EnableRecordsAsTuples(this T mapper) -> T static Npgsql.INpgsqlTypeMapperExtensions.EnableUnmappedTypes(this T mapper) -> T static NpgsqlTypes.NpgsqlCidr.explicit operator System.Net.IPAddress!(NpgsqlTypes.NpgsqlCidr cidr) -> System.Net.IPAddress! diff --git a/src/Npgsql/TypeMapping/INpgsqlTypeMapperExtensions.cs b/src/Npgsql/TypeMapping/INpgsqlTypeMapperExtensions.cs index 91758822b8..c5f574960f 100644 --- a/src/Npgsql/TypeMapping/INpgsqlTypeMapperExtensions.cs +++ b/src/Npgsql/TypeMapping/INpgsqlTypeMapperExtensions.cs @@ -31,7 +31,7 @@ public static class INpgsqlTypeMapperExtensions /// [RequiresUnreferencedCode("Json serializer may perform reflection on trimmed types.")] [RequiresDynamicCode("Serializing arbitrary types to json can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] - public static T EnableDynamicJsonMappings( + public static T EnableDynamicJson( this T mapper, JsonSerializerOptions? serializerOptions = null, Type[]? jsonbClrTypes = null, diff --git a/test/Npgsql.Tests/Types/JsonDynamicTests.cs b/test/Npgsql.Tests/Types/JsonDynamicTests.cs index 774f74c1e3..304d4db50f 100644 --- a/test/Npgsql.Tests/Types/JsonDynamicTests.cs +++ b/test/Npgsql.Tests/Types/JsonDynamicTests.cs @@ -114,14 +114,14 @@ await AssertType( } [Test] - public async Task As_poco_supported_only_with_EnableDynamicJsonMappings() + public async Task As_poco_supported_only_with_EnableDynamicJson() { - // This test uses base.DataSource, which doesn't have EnableDynamicJsonMappings() + // This test uses base.DataSource, which doesn't have EnableDynamicJson() var errorMessage = string.Format( NpgsqlStrings.DynamicJsonNotEnabled, nameof(WeatherForecast), - nameof(INpgsqlTypeMapperExtensions.EnableDynamicJsonMappings), + nameof(INpgsqlTypeMapperExtensions.EnableDynamicJson), nameof(NpgsqlDataSourceBuilder)); var exception = await AssertTypeUnsupportedWrite( @@ -152,7 +152,7 @@ public async Task As_poco_supported_only_with_EnableDynamicJsonMappings() public async Task Poco_does_not_stomp_GetValue_string() { var dataSourceBuilder = CreateDataSourceBuilder(); - var dataSource = dataSourceBuilder.EnableDynamicJsonMappings(null, new[] {typeof(WeatherForecast)}, new[] {typeof(WeatherForecast)}).Build(); + var dataSource = dataSourceBuilder.EnableDynamicJson(null, new[] {typeof(WeatherForecast)}, new[] {typeof(WeatherForecast)}).Build(); var sqlLiteral = IsJsonb ? """{"Date": "2019-09-01T00:00:00", "Summary": "Partly cloudy", "TemperatureC": 10}""" @@ -169,7 +169,7 @@ public async Task Poco_does_not_stomp_GetValue_string() public async Task Custom_JsonSerializerOptions() { var dataSourceBuilder = CreateDataSourceBuilder(); - dataSourceBuilder.EnableDynamicJsonMappings(new JsonSerializerOptions { PropertyNamingPolicy = JsonNamingPolicy.CamelCase }); + dataSourceBuilder.EnableDynamicJson(new JsonSerializerOptions { PropertyNamingPolicy = JsonNamingPolicy.CamelCase }); await using var dataSource = dataSourceBuilder.Build(); await AssertTypeWrite( @@ -193,9 +193,9 @@ public async Task Poco_default_mapping() { var dataSourceBuilder = CreateDataSourceBuilder(); if (IsJsonb) - dataSourceBuilder.EnableDynamicJsonMappings(jsonbClrTypes: new[] { typeof(WeatherForecast) }); + dataSourceBuilder.EnableDynamicJson(jsonbClrTypes: new[] { typeof(WeatherForecast) }); else - dataSourceBuilder.EnableDynamicJsonMappings(jsonClrTypes: new[] { typeof(WeatherForecast) }); + dataSourceBuilder.EnableDynamicJson(jsonClrTypes: new[] { typeof(WeatherForecast) }); await using var dataSource = dataSourceBuilder.Build(); await AssertType( @@ -224,7 +224,7 @@ public async Task Poco_polymorphic_mapping() return; var dataSourceBuilder = CreateDataSourceBuilder(); - dataSourceBuilder.EnableDynamicJsonMappings(jsonClrTypes: new[] { typeof(WeatherForecast) }); + dataSourceBuilder.EnableDynamicJson(jsonClrTypes: new[] { typeof(WeatherForecast) }); await using var dataSource = dataSourceBuilder.Build(); await AssertType( @@ -251,7 +251,7 @@ public async Task Poco_polymorphic_mapping_read_parents() return; var dataSourceBuilder = CreateDataSourceBuilder(); - dataSourceBuilder.EnableDynamicJsonMappings(jsonClrTypes: new[] { typeof(WeatherForecast) }); + dataSourceBuilder.EnableDynamicJson(jsonClrTypes: new[] { typeof(WeatherForecast) }); await using var dataSource = dataSourceBuilder.Build(); var value = new ExtendedDerivedWeatherForecast() @@ -292,7 +292,7 @@ public async Task Poco_exact_polymorphic_mapping() return; var dataSourceBuilder = CreateDataSourceBuilder(); - dataSourceBuilder.EnableDynamicJsonMappings(jsonClrTypes: new[] { typeof(ExtendedDerivedWeatherForecast) }); + dataSourceBuilder.EnableDynamicJson(jsonClrTypes: new[] { typeof(ExtendedDerivedWeatherForecast) }); await using var dataSource = dataSourceBuilder.Build(); await AssertType( @@ -363,7 +363,7 @@ record ExtendedDerivedWeatherForecast : DerivedWeatherForecast public JsonDynamicTests(MultiplexingMode multiplexingMode, NpgsqlDbType npgsqlDbType) : base(multiplexingMode) { - DataSource = CreateDataSource(b => b.EnableDynamicJsonMappings()); + DataSource = CreateDataSource(b => b.EnableDynamicJson()); if (npgsqlDbType == NpgsqlDbType.Jsonb) using (var conn = OpenConnection()) From 354112981d7e376194d38528e844abdd0602ff04 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Tue, 14 Nov 2023 18:39:13 +0100 Subject: [PATCH 295/761] Remove BuiltInPostgresType attribute reflection (#5375) Closes #4949 --- .../Resolvers/RangeTypeInfoResolver.cs | 24 +-- src/Npgsql/NpgsqlTypes/NpgsqlDbType.cs | 88 -------- src/Npgsql/PostgresMinimalDatabaseInfo.cs | 106 +++++++--- src/Npgsql/PostgresTypes/PostgresArrayType.cs | 14 +- src/Npgsql/PostgresTypes/PostgresBaseType.cs | 15 +- .../PostgresTypes/PostgresCompositeType.cs | 11 +- .../PostgresTypes/PostgresDomainType.cs | 16 +- src/Npgsql/PostgresTypes/PostgresEnumType.cs | 13 +- .../PostgresTypes/PostgresMultirangeType.cs | 16 +- src/Npgsql/PostgresTypes/PostgresRangeType.cs | 16 +- src/Npgsql/PostgresTypes/PostgresType.cs | 12 ++ src/Npgsql/PostgresTypes/PostgresTypeKind.cs | 21 ++ src/Npgsql/TypeMapping/DefaultPgTypes.cs | 191 ------------------ test/Npgsql.Tests/BugTests.cs | 3 +- test/Npgsql.Tests/CommandTests.cs | 5 +- test/Npgsql.Tests/ReaderTests.cs | 7 +- test/Npgsql.Tests/Support/PgServerMock.cs | 7 +- 17 files changed, 221 insertions(+), 344 deletions(-) create mode 100644 src/Npgsql/PostgresTypes/PostgresTypeKind.cs delete mode 100644 src/Npgsql/TypeMapping/DefaultPgTypes.cs diff --git a/src/Npgsql/Internal/Resolvers/RangeTypeInfoResolver.cs b/src/Npgsql/Internal/Resolvers/RangeTypeInfoResolver.cs index f82c26c462..cd3cb9dcf2 100644 --- a/src/Npgsql/Internal/Resolvers/RangeTypeInfoResolver.cs +++ b/src/Npgsql/Internal/Resolvers/RangeTypeInfoResolver.cs @@ -3,8 +3,8 @@ using System.Numerics; using Npgsql.Internal.Converters; using Npgsql.Internal.Postgres; +using Npgsql.PostgresTypes; using Npgsql.Properties; -using Npgsql.TypeMapping; using Npgsql.Util; using NpgsqlTypes; using static Npgsql.Internal.PgConverterFactory; @@ -343,16 +343,16 @@ public static void ThrowIfUnsupported(Type? type, DataTypeName? dataTy var kind = CheckUnsupported(type, dataTypeName, options); switch (kind) { - case PgTypeKind.Range when kind.Value.HasFlag(PgTypeKind.Array): + case PostgresTypeKind.Range when kind.Value.HasFlag(PostgresTypeKind.Array): throw new NotSupportedException( string.Format(NpgsqlStrings.RangeArraysNotEnabled, nameof(NpgsqlSlimDataSourceBuilder.EnableArrays), typeof(TBuilder).Name)); - case PgTypeKind.Range: + case PostgresTypeKind.Range: throw new NotSupportedException( string.Format(NpgsqlStrings.RangesNotEnabled, nameof(NpgsqlSlimDataSourceBuilder.EnableRanges), typeof(TBuilder).Name)); - case PgTypeKind.Multirange when kind.Value.HasFlag(PgTypeKind.Array): + case PostgresTypeKind.Multirange when kind.Value.HasFlag(PostgresTypeKind.Array): throw new NotSupportedException( string.Format(NpgsqlStrings.MultirangeArraysNotEnabled, nameof(NpgsqlSlimDataSourceBuilder.EnableArrays), typeof(TBuilder).Name)); - case PgTypeKind.Multirange: + case PostgresTypeKind.Multirange: throw new NotSupportedException( string.Format(NpgsqlStrings.MultirangesNotEnabled, nameof(NpgsqlSlimDataSourceBuilder.EnableMultiranges), typeof(TBuilder).Name)); default: @@ -360,7 +360,7 @@ public static void ThrowIfUnsupported(Type? type, DataTypeName? dataTy } } - public static PgTypeKind? CheckUnsupported(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + public static PostgresTypeKind? CheckUnsupported(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) { // Only trigger on well known data type names. var npgsqlDbType = dataTypeName?.ToNpgsqlDbType(); @@ -371,12 +371,12 @@ public static void ThrowIfUnsupported(Type? type, DataTypeName? dataTy if (npgsqlDbType.Value.HasFlag(NpgsqlDbType.Range)) return dataTypeName?.IsArray == true - ? PgTypeKind.Array | PgTypeKind.Range - : PgTypeKind.Range; + ? PostgresTypeKind.Array | PostgresTypeKind.Range + : PostgresTypeKind.Range; return dataTypeName?.IsArray == true - ? PgTypeKind.Array | PgTypeKind.Multirange - : PgTypeKind.Multirange; + ? PostgresTypeKind.Array | PostgresTypeKind.Multirange + : PostgresTypeKind.Multirange; } if (type == typeof(object)) @@ -407,10 +407,10 @@ public static void ThrowIfUnsupported(Type? type, DataTypeName? dataTy // If we don't know more than the clr type, default to a Multirange kind over Array as they share the same types. foreach (var argument in matchingArguments) if (argument == type) - return isArray ? PgTypeKind.Multirange : PgTypeKind.Range; + return isArray ? PostgresTypeKind.Multirange : PostgresTypeKind.Range; if (type.AssemblyQualifiedName == "System.Numerics.BigInteger,System.Runtime.Numerics") - return isArray ? PgTypeKind.Multirange : PgTypeKind.Range; + return isArray ? PostgresTypeKind.Multirange : PostgresTypeKind.Range; } return null; diff --git a/src/Npgsql/NpgsqlTypes/NpgsqlDbType.cs b/src/Npgsql/NpgsqlTypes/NpgsqlDbType.cs index 37876a74fa..687ebf16b7 100644 --- a/src/Npgsql/NpgsqlTypes/NpgsqlDbType.cs +++ b/src/Npgsql/NpgsqlTypes/NpgsqlDbType.cs @@ -29,49 +29,42 @@ public enum NpgsqlDbType /// Corresponds to the PostgreSQL 8-byte "bigint" type. /// /// See https://www.postgresql.org/docs/current/static/datatype-numeric.html - [BuiltInPostgresType("int8", baseOID: 20, arrayOID: 1016, rangeName: "int8range", rangeOID: 3926, multirangeName: "int8multirange", multirangeOID: 4536)] Bigint = 1, /// /// Corresponds to the PostgreSQL 8-byte floating-point "double" type. /// /// See https://www.postgresql.org/docs/current/static/datatype-numeric.html - [BuiltInPostgresType("float8", baseOID: 701, arrayOID: 1022)] Double = 8, /// /// Corresponds to the PostgreSQL 4-byte "integer" type. /// /// See https://www.postgresql.org/docs/current/static/datatype-numeric.html - [BuiltInPostgresType("int4", baseOID: 23, arrayOID: 1007, rangeName: "int4range", rangeOID: 3904, multirangeName: "int4multirange", multirangeOID: 4451)] Integer = 9, /// /// Corresponds to the PostgreSQL arbitrary-precision "numeric" type. /// /// See https://www.postgresql.org/docs/current/static/datatype-numeric.html - [BuiltInPostgresType("numeric", baseOID: 1700, arrayOID: 1231, rangeName: "numrange", rangeOID: 3906, multirangeName: "nummultirange", multirangeOID: 4532)] Numeric = 13, /// /// Corresponds to the PostgreSQL floating-point "real" type. /// /// See https://www.postgresql.org/docs/current/static/datatype-numeric.html - [BuiltInPostgresType("float4", baseOID: 700, arrayOID: 1021)] Real = 17, /// /// Corresponds to the PostgreSQL 2-byte "smallint" type. /// /// See https://www.postgresql.org/docs/current/static/datatype-numeric.html - [BuiltInPostgresType("int2", baseOID: 21, arrayOID: 1005)] Smallint = 18, /// /// Corresponds to the PostgreSQL "money" type. /// /// See https://www.postgresql.org/docs/current/static/datatype-money.html - [BuiltInPostgresType("money", baseOID: 790, arrayOID: 791)] Money = 12, #endregion @@ -82,7 +75,6 @@ public enum NpgsqlDbType /// Corresponds to the PostgreSQL "boolean" type. /// /// See https://www.postgresql.org/docs/current/static/datatype-boolean.html - [BuiltInPostgresType("bool", baseOID: 16, arrayOID: 1000)] Boolean = 2, #endregion @@ -93,49 +85,42 @@ public enum NpgsqlDbType /// Corresponds to the PostgreSQL geometric "box" type. /// /// See https://www.postgresql.org/docs/current/static/datatype-geometric.html - [BuiltInPostgresType("box", baseOID: 603, arrayOID: 1020)] Box = 3, /// /// Corresponds to the PostgreSQL geometric "circle" type. /// /// See https://www.postgresql.org/docs/current/static/datatype-geometric.html - [BuiltInPostgresType("circle", baseOID: 718, arrayOID: 719)] Circle = 5, /// /// Corresponds to the PostgreSQL geometric "line" type. /// /// See https://www.postgresql.org/docs/current/static/datatype-geometric.html - [BuiltInPostgresType("line", baseOID: 628, arrayOID: 629)] Line = 10, /// /// Corresponds to the PostgreSQL geometric "lseg" type. /// /// See https://www.postgresql.org/docs/current/static/datatype-geometric.html - [BuiltInPostgresType("lseg", baseOID: 601, arrayOID: 1018)] LSeg = 11, /// /// Corresponds to the PostgreSQL geometric "path" type. /// /// See https://www.postgresql.org/docs/current/static/datatype-geometric.html - [BuiltInPostgresType("path", baseOID: 602, arrayOID: 1019)] Path = 14, /// /// Corresponds to the PostgreSQL geometric "point" type. /// /// See https://www.postgresql.org/docs/current/static/datatype-geometric.html - [BuiltInPostgresType("point", baseOID: 600, arrayOID: 1017)] Point = 15, /// /// Corresponds to the PostgreSQL geometric "polygon" type. /// /// See https://www.postgresql.org/docs/current/static/datatype-geometric.html - [BuiltInPostgresType("polygon", baseOID: 604, arrayOID: 1027)] Polygon = 16, #endregion @@ -146,28 +131,24 @@ public enum NpgsqlDbType /// Corresponds to the PostgreSQL "char(n)" type. /// /// See https://www.postgresql.org/docs/current/static/datatype-character.html - [BuiltInPostgresType("bpchar", baseOID: 1042, arrayOID: 1014)] Char = 6, /// /// Corresponds to the PostgreSQL "text" type. /// /// See https://www.postgresql.org/docs/current/static/datatype-character.html - [BuiltInPostgresType("text", baseOID: 25, arrayOID: 1009)] Text = 19, /// /// Corresponds to the PostgreSQL "varchar" type. /// /// See https://www.postgresql.org/docs/current/static/datatype-character.html - [BuiltInPostgresType("varchar", baseOID: 1043, arrayOID: 1015)] Varchar = 22, /// /// Corresponds to the PostgreSQL internal "name" type. /// /// See https://www.postgresql.org/docs/current/static/datatype-character.html - [BuiltInPostgresType("name", baseOID: 19, arrayOID: 1003)] Name = 32, /// @@ -184,7 +165,6 @@ public enum NpgsqlDbType /// /// See https://www.postgresql.org/docs/current/static/datatype-text.html /// - [BuiltInPostgresType("char", baseOID: 18, arrayOID: 1002)] InternalChar = 38, #endregion @@ -195,7 +175,6 @@ public enum NpgsqlDbType /// Corresponds to the PostgreSQL "bytea" type, holding a raw byte string. /// /// See https://www.postgresql.org/docs/current/static/datatype-binary.html - [BuiltInPostgresType("bytea", baseOID: 17, arrayOID: 1001)] Bytea = 4, #endregion @@ -206,42 +185,36 @@ public enum NpgsqlDbType /// Corresponds to the PostgreSQL "date" type. /// /// See https://www.postgresql.org/docs/current/static/datatype-datetime.html - [BuiltInPostgresType("date", baseOID: 1082, arrayOID: 1182, rangeName: "daterange", rangeOID: 3912, multirangeName: "datemultirange", multirangeOID: 4535)] Date = 7, /// /// Corresponds to the PostgreSQL "time" type. /// /// See https://www.postgresql.org/docs/current/static/datatype-datetime.html - [BuiltInPostgresType("time", baseOID: 1083, arrayOID: 1183)] Time = 20, /// /// Corresponds to the PostgreSQL "timestamp" type. /// /// See https://www.postgresql.org/docs/current/static/datatype-datetime.html - [BuiltInPostgresType("timestamp", baseOID: 1114, arrayOID: 1115, rangeName: "tsrange", rangeOID: 3908, multirangeName: "tsmultirange", multirangeOID: 4533)] Timestamp = 21, /// /// Corresponds to the PostgreSQL "timestamp with time zone" type. /// /// See https://www.postgresql.org/docs/current/static/datatype-datetime.html - [BuiltInPostgresType("timestamptz", baseOID: 1184, arrayOID: 1185, rangeName: "tstzrange", rangeOID: 3910, multirangeName: "tstzmultirange", multirangeOID: 4534)] TimestampTz = 26, /// /// Corresponds to the PostgreSQL "interval" type. /// /// See https://www.postgresql.org/docs/current/static/datatype-datetime.html - [BuiltInPostgresType("interval", baseOID: 1186, arrayOID: 1187)] Interval = 30, /// /// Corresponds to the PostgreSQL "time with time zone" type. /// /// See https://www.postgresql.org/docs/current/static/datatype-datetime.html - [BuiltInPostgresType("timetz", baseOID: 1266, arrayOID: 1270)] TimeTz = 31, /// @@ -259,28 +232,24 @@ public enum NpgsqlDbType /// Corresponds to the PostgreSQL "inet" type. /// /// See https://www.postgresql.org/docs/current/static/datatype-net-types.html - [BuiltInPostgresType("inet", baseOID: 869, arrayOID: 1041)] Inet = 24, /// /// Corresponds to the PostgreSQL "cidr" type, a field storing an IPv4 or IPv6 network. /// /// See https://www.postgresql.org/docs/current/static/datatype-net-types.html - [BuiltInPostgresType("cidr", baseOID: 650, arrayOID: 651)] Cidr = 44, /// /// Corresponds to the PostgreSQL "macaddr" type, a field storing a 6-byte physical address. /// /// See https://www.postgresql.org/docs/current/static/datatype-net-types.html - [BuiltInPostgresType("macaddr", baseOID: 829, arrayOID: 1040)] MacAddr = 34, /// /// Corresponds to the PostgreSQL "macaddr8" type, a field storing a 6-byte or 8-byte physical address. /// /// See https://www.postgresql.org/docs/current/static/datatype-net-types.html - [BuiltInPostgresType("macaddr8", baseOID: 774, arrayOID: 775)] MacAddr8 = 54, #endregion @@ -291,14 +260,12 @@ public enum NpgsqlDbType /// Corresponds to the PostgreSQL "bit" type. /// /// See https://www.postgresql.org/docs/current/static/datatype-bit.html - [BuiltInPostgresType("bit", baseOID: 1560, arrayOID: 1561)] Bit = 25, /// /// Corresponds to the PostgreSQL "varbit" type, a field storing a variable-length string of bits. /// /// See https://www.postgresql.org/docs/current/static/datatype-boolean.html - [BuiltInPostgresType("varbit", baseOID: 1562, arrayOID: 1563)] Varbit = 39, #endregion @@ -309,21 +276,18 @@ public enum NpgsqlDbType /// Corresponds to the PostgreSQL "tsvector" type. /// /// See https://www.postgresql.org/docs/current/static/datatype-textsearch.html - [BuiltInPostgresType("tsvector", baseOID: 3614, arrayOID: 3643)] TsVector = 45, /// /// Corresponds to the PostgreSQL "tsquery" type. /// /// See https://www.postgresql.org/docs/current/static/datatype-textsearch.html - [BuiltInPostgresType("tsquery", baseOID: 3615, arrayOID: 3645)] TsQuery = 46, /// /// Corresponds to the PostgreSQL "regconfig" type. /// /// See https://www.postgresql.org/docs/current/static/datatype-textsearch.html - [BuiltInPostgresType("regconfig", baseOID: 3734, arrayOID: 3735)] Regconfig = 56, #endregion @@ -334,7 +298,6 @@ public enum NpgsqlDbType /// Corresponds to the PostgreSQL "uuid" type. /// /// See https://www.postgresql.org/docs/current/static/datatype-uuid.html - [BuiltInPostgresType("uuid", baseOID: 2950, arrayOID: 2951)] Uuid = 27, #endregion @@ -345,7 +308,6 @@ public enum NpgsqlDbType /// Corresponds to the PostgreSQL "xml" type. /// /// See https://www.postgresql.org/docs/current/static/datatype-xml.html - [BuiltInPostgresType("xml", baseOID: 142, arrayOID: 143)] Xml = 28, #endregion @@ -357,7 +319,6 @@ public enum NpgsqlDbType /// /// See https://www.postgresql.org/docs/current/static/datatype-json.html /// - [BuiltInPostgresType("json", baseOID: 114, arrayOID: 199)] Json = 35, /// @@ -368,7 +329,6 @@ public enum NpgsqlDbType /// Supported since PostgreSQL 9.4. /// See https://www.postgresql.org/docs/current/static/datatype-json.html /// - [BuiltInPostgresType("jsonb", baseOID: 3802, arrayOID: 3807)] Jsonb = 36, /// @@ -379,7 +339,6 @@ public enum NpgsqlDbType /// Supported since PostgreSQL 12. /// See https://www.postgresql.org/docs/current/datatype-json.html#DATATYPE-JSONPATH /// - [BuiltInPostgresType("jsonpath", baseOID: 4072, arrayOID: 4073)] JsonPath = 57, #endregion @@ -399,60 +358,51 @@ public enum NpgsqlDbType /// /// Corresponds to the PostgreSQL "refcursor" type. /// - [BuiltInPostgresType("refcursor", baseOID: 1790, arrayOID: 2201)] Refcursor = 23, /// /// Corresponds to the PostgreSQL internal "oidvector" type. /// /// See https://www.postgresql.org/docs/current/static/datatype-oid.html - [BuiltInPostgresType("oidvector", baseOID: 30, arrayOID: 1013)] Oidvector = 29, /// /// Corresponds to the PostgreSQL internal "int2vector" type. /// - [BuiltInPostgresType("int2vector", baseOID: 22, arrayOID: 1006)] Int2Vector = 52, /// /// Corresponds to the PostgreSQL "oid" type. /// /// See https://www.postgresql.org/docs/current/static/datatype-oid.html - [BuiltInPostgresType("oid", baseOID: 26, arrayOID: 1028)] Oid = 41, /// /// Corresponds to the PostgreSQL "xid" type, an internal transaction identifier. /// /// See https://www.postgresql.org/docs/current/static/datatype-oid.html - [BuiltInPostgresType("xid", baseOID: 28, arrayOID: 1011)] Xid = 42, /// /// Corresponds to the PostgreSQL "xid8" type, an internal transaction identifier. /// /// See https://www.postgresql.org/docs/current/static/datatype-oid.html - [BuiltInPostgresType("xid8", baseOID: 5069, arrayOID: 271)] Xid8 = 64, /// /// Corresponds to the PostgreSQL "cid" type, an internal command identifier. /// /// See https://www.postgresql.org/docs/current/static/datatype-oid.html - [BuiltInPostgresType("cid", baseOID: 29, arrayOID: 1012)] Cid = 43, /// /// Corresponds to the PostgreSQL "regtype" type, a numeric (OID) ID of a type in the pg_type table. /// - [BuiltInPostgresType("regtype", baseOID: 2206, arrayOID: 2211)] Regtype = 49, /// /// Corresponds to the PostgreSQL "tid" type, a tuple id identifying the physical location of a row within its table. /// - [BuiltInPostgresType("tid", baseOID: 27, arrayOID: 1010)] Tid = 53, /// @@ -463,7 +413,6 @@ public enum NpgsqlDbType /// See: https://www.postgresql.org/docs/current/datatype-pg-lsn.html and /// https://git.postgresql.org/gitweb/?p=postgresql.git;a=commit;h=7d03a83f4d0736ba869fa6f93973f7623a27038a /// - [BuiltInPostgresType("pg_lsn", baseOID: 3220, arrayOID: 3221)] PgLsn = 59, #endregion @@ -479,7 +428,6 @@ public enum NpgsqlDbType /// This value shouldn't ordinarily be used, and makes sense only when sending a data type /// unsupported by Npgsql. /// - [BuiltInPostgresType("unknown", baseOID: 705, arrayOID: 0)] Unknown = 40, #endregion @@ -1036,39 +984,3 @@ _ when unqualifiedName.StartsWith("_", StringComparison.Ordinal) }; } } - -/// -/// Represents a built-in PostgreSQL type as it appears in pg_type, including its name and OID. -/// Extension types with variable OIDs are not represented. -/// -sealed class BuiltInPostgresType : Attribute -{ - internal string Name { get; } - internal uint BaseOID { get; } - internal uint ArrayOID { get; } - - internal string? RangeName { get; } - internal uint RangeOID { get; } - internal string? MultirangeName { get; } - internal uint MultirangeOID { get; } - - internal BuiltInPostgresType(string name, uint baseOID, uint arrayOID) - { - Name = name; - BaseOID = baseOID; - ArrayOID = arrayOID; - } - - internal BuiltInPostgresType( - string name, uint baseOID, uint arrayOID, string rangeName, uint rangeOID, string multirangeName, uint multirangeOID) - { - Name = name; - BaseOID = baseOID; - ArrayOID = arrayOID; - - RangeName = rangeName; - RangeOID = rangeOID; - MultirangeName = multirangeName; - MultirangeOID = multirangeOID; - } -} diff --git a/src/Npgsql/PostgresMinimalDatabaseInfo.cs b/src/Npgsql/PostgresMinimalDatabaseInfo.cs index 01afa29abf..eb90453062 100644 --- a/src/Npgsql/PostgresMinimalDatabaseInfo.cs +++ b/src/Npgsql/PostgresMinimalDatabaseInfo.cs @@ -1,10 +1,9 @@ using System.Collections.Generic; -using System.Reflection; using System.Threading.Tasks; using Npgsql.Internal; +using Npgsql.Internal.Postgres; using Npgsql.PostgresTypes; using Npgsql.Util; -using NpgsqlTypes; namespace Npgsql; @@ -23,34 +22,93 @@ sealed class PostgresMinimalDatabaseInfo : PostgresDatabaseInfo static PostgresType[] CreateTypes(bool withMultiranges) { - var builtinTypes = new List(); - foreach (var field in typeof(NpgsqlDbType).GetFields()) - if (field.GetCustomAttribute() is { } attr) - builtinTypes.Add(attr); + var types = new List(); - var pgTypes = new List(); - foreach (var attr in builtinTypes) + Add(DataTypeNames.Int2, oid: 21, arrayOid: 1005); + AddWithRange(DataTypeNames.Int4, oid: 23, arrayOid: 1007, + rangeName: DataTypeNames.Int4Range, rangeOid: 3904, rangeArrayOid: 3905, multirangeOid: 4451, multirangeArrayOid: 6150); + Add(DataTypeNames.Int8, oid: 20, arrayOid: 1016); + AddWithRange(DataTypeNames.Int8, oid: 20, arrayOid: 1016, + rangeName: DataTypeNames.Int8Range, rangeOid: 3926, rangeArrayOid: 3927, multirangeOid: 4536, multirangeArrayOid: 6157); + Add(DataTypeNames.Float4, oid: 700, arrayOid: 1021); + Add(DataTypeNames.Float8, oid: 701, arrayOid: 1022); + AddWithRange(DataTypeNames.Numeric, oid: 1700, arrayOid: 1231, + rangeName: DataTypeNames.NumRange, rangeOid: 3906, rangeArrayOid: 3907, multirangeOid: 4532, multirangeArrayOid: 6151); + Add(DataTypeNames.Money, oid: 790, arrayOid: 791); + Add(DataTypeNames.Bool, oid: 16, arrayOid: 1000); + Add(DataTypeNames.Box, oid: 603, arrayOid: 1020); + Add(DataTypeNames.Circle, oid: 718, arrayOid: 719); + Add(DataTypeNames.Line, oid: 628, arrayOid: 629); + Add(DataTypeNames.LSeg, oid: 601, arrayOid: 1018); + Add(DataTypeNames.Path, oid: 602, arrayOid: 1019); + Add(DataTypeNames.Point, oid: 600, arrayOid: 1017); + Add(DataTypeNames.Polygon, oid: 604, arrayOid: 1027); + Add(DataTypeNames.Bpchar, oid: 1042, arrayOid: 1014); + Add(DataTypeNames.Text, oid: 25, arrayOid: 1009); + Add(DataTypeNames.Varchar, oid: 1043, arrayOid: 1015); + Add(DataTypeNames.Name, oid: 19, arrayOid: 1003); + Add(DataTypeNames.Bytea, oid: 17, arrayOid: 1001); + AddWithRange(DataTypeNames.Date, oid: 1082, arrayOid: 1182, + rangeName: DataTypeNames.DateRange, rangeOid: 3912, rangeArrayOid: 3913, multirangeOid: 4535, multirangeArrayOid: 6155); + Add(DataTypeNames.Time, oid: 1083, arrayOid: 1183); + AddWithRange(DataTypeNames.Timestamp, oid: 1114, arrayOid: 1115, + rangeName: DataTypeNames.TsRange, rangeOid: 3908, rangeArrayOid: 3909, multirangeOid: 4533, multirangeArrayOid: 6152); + AddWithRange(DataTypeNames.TimestampTz, oid: 1184, arrayOid: 1185, + rangeName: DataTypeNames.TsTzRange, rangeOid: 3910, rangeArrayOid: 3911, multirangeOid: 4534, multirangeArrayOid: 6153); + Add(DataTypeNames.Interval, oid: 1186, arrayOid: 1187); + Add(DataTypeNames.TimeTz, oid: 1266, arrayOid: 1270); + Add(DataTypeNames.Inet, oid: 869, arrayOid: 1041); + Add(DataTypeNames.Cidr, oid: 650, arrayOid: 651); + Add(DataTypeNames.MacAddr, oid: 829, arrayOid: 1040); + Add(DataTypeNames.MacAddr8, oid: 774, arrayOid: 775); + Add(DataTypeNames.Bit, oid: 1560, arrayOid: 1561); + Add(DataTypeNames.Varbit, oid: 1562, arrayOid: 1563); + Add(DataTypeNames.TsVector, oid: 3614, arrayOid: 3643); + Add(DataTypeNames.TsQuery, oid: 3615, arrayOid: 3645); + Add(DataTypeNames.RegConfig, oid: 3734, arrayOid: 3735); + Add(DataTypeNames.Uuid, oid: 2950, arrayOid: 2951); + Add(DataTypeNames.Xml, oid: 142, arrayOid: 143); + Add(DataTypeNames.Json, oid: 114, arrayOid: 199); + Add(DataTypeNames.Jsonb, oid: 3802, arrayOid: 3807); + Add(DataTypeNames.Jsonpath, oid: 4072, arrayOid: 4073); + Add(DataTypeNames.RefCursor, oid: 1790, arrayOid: 2201); + Add(DataTypeNames.OidVector, oid: 30, arrayOid: 1013); + Add(DataTypeNames.Int2Vector, oid: 22, arrayOid: 1006); + Add(DataTypeNames.Oid, oid: 26, arrayOid: 1028); + Add(DataTypeNames.Xid, oid: 28, arrayOid: 1011); + Add(DataTypeNames.Xid8, oid: 5069, arrayOid: 271); + Add(DataTypeNames.Cid, oid: 29, arrayOid: 1012); + Add(DataTypeNames.RegType, oid: 2206, arrayOid: 2211); + Add(DataTypeNames.Tid, oid: 27, arrayOid: 1010); + Add(DataTypeNames.PgLsn, oid: 3220, arrayOid: 3221); + Add(DataTypeNames.Unknown, oid: 705, arrayOid: 0); + Add(DataTypeNames.Void, oid: 2278, arrayOid: 0); + + return types.ToArray(); + + void Add(DataTypeName name, uint oid, uint arrayOid) { - var baseType = new PostgresBaseType("pg_catalog", attr.Name, attr.BaseOID); - var arrayType = new PostgresArrayType("pg_catalog", "_" + attr.Name, attr.ArrayOID, baseType); + var type = new PostgresBaseType(name, oid); + types.Add(type); + if (arrayOid is not 0) + types.Add(new PostgresArrayType(name.ToArrayName(), arrayOid, type)); + } - if (attr.RangeName is null) - pgTypes.AddRange(new PostgresType[] { baseType, arrayType }); - else + void AddWithRange(DataTypeName name, uint oid, uint arrayOid, DataTypeName rangeName, uint rangeOid, uint rangeArrayOid, uint multirangeOid, uint multirangeArrayOid) + { + var type = new PostgresBaseType(name, oid); + var rangeType = new PostgresRangeType(rangeName, rangeOid, type); + types.Add(type); + types.Add(new PostgresArrayType(name.ToArrayName(), arrayOid, type)); + types.Add(rangeType); + types.Add(new PostgresArrayType(rangeName.ToArrayName(), rangeArrayOid, rangeType)); + if (withMultiranges) { - var rangeType = new PostgresRangeType("pg_catalog", attr.RangeName, attr.RangeOID, baseType); - - pgTypes.AddRange(withMultiranges - ? new PostgresType[] - { - baseType, arrayType, rangeType, - new PostgresMultirangeType("pg_catalog", attr.MultirangeName!, attr.MultirangeOID, rangeType) - } - : new PostgresType[] { baseType, arrayType, rangeType }); + var multirangeType = new PostgresMultirangeType(rangeName.ToDefaultMultirangeName(), multirangeOid, rangeType); + types.Add(multirangeType); + types.Add(new PostgresArrayType(multirangeType.DataTypeName.ToArrayName(), multirangeArrayOid, multirangeType)); } } - - return pgTypes.ToArray(); } protected override IEnumerable GetTypes() diff --git a/src/Npgsql/PostgresTypes/PostgresArrayType.cs b/src/Npgsql/PostgresTypes/PostgresArrayType.cs index 7f0b2246d3..2f46d31cf2 100644 --- a/src/Npgsql/PostgresTypes/PostgresArrayType.cs +++ b/src/Npgsql/PostgresTypes/PostgresArrayType.cs @@ -1,4 +1,6 @@ -namespace Npgsql.PostgresTypes; +using Npgsql.Internal.Postgres; + +namespace Npgsql.PostgresTypes; /// /// Represents a PostgreSQL array data type, which can hold several multiple values in a single column. @@ -23,6 +25,16 @@ protected internal PostgresArrayType(string ns, string name, uint oid, PostgresT Element.Array = this; } + /// + /// Constructs a representation of a PostgreSQL array data type. + /// + internal PostgresArrayType(DataTypeName dataTypeName, Oid oid, PostgresType elementPostgresType) + : base(dataTypeName, oid) + { + Element = elementPostgresType; + Element.Array = this; + } + // PostgreSQL array types have an underscore-prefixed name (_text), but we // want to return the public text[] instead /// diff --git a/src/Npgsql/PostgresTypes/PostgresBaseType.cs b/src/Npgsql/PostgresTypes/PostgresBaseType.cs index a7cb0857cc..11c289b1a8 100644 --- a/src/Npgsql/PostgresTypes/PostgresBaseType.cs +++ b/src/Npgsql/PostgresTypes/PostgresBaseType.cs @@ -1,4 +1,6 @@  +using Npgsql.Internal.Postgres; + namespace Npgsql.PostgresTypes; /// @@ -6,10 +8,17 @@ namespace Npgsql.PostgresTypes; /// public class PostgresBaseType : PostgresType { - /// + /// + /// Constructs a representation of a PostgreSQL base data type. + /// protected internal PostgresBaseType(string ns, string name, uint oid) - : base(ns, name, oid) - {} + : base(ns, name, oid) {} + + /// + /// Constructs a representation of a PostgreSQL base data type. + /// + internal PostgresBaseType(DataTypeName dataTypeName, Oid oid) + : base(dataTypeName, oid) {} /// internal override string GetPartialNameWithFacets(int typeModifier) diff --git a/src/Npgsql/PostgresTypes/PostgresCompositeType.cs b/src/Npgsql/PostgresTypes/PostgresCompositeType.cs index fb31254900..2d53199e6f 100644 --- a/src/Npgsql/PostgresTypes/PostgresCompositeType.cs +++ b/src/Npgsql/PostgresTypes/PostgresCompositeType.cs @@ -1,4 +1,5 @@ using System.Collections.Generic; +using Npgsql.Internal.Postgres; namespace Npgsql.PostgresTypes; @@ -20,10 +21,14 @@ public class PostgresCompositeType : PostgresType /// /// Constructs a representation of a PostgreSQL array data type. /// -#pragma warning disable CA2222 // Do not decrease inherited member visibility internal PostgresCompositeType(string ns, string name, uint oid) : base(ns, name, oid) {} -#pragma warning restore CA2222 // Do not decrease inherited member visibility + + /// + /// Constructs a representation of a PostgreSQL domain data type. + /// + internal PostgresCompositeType(DataTypeName dataTypeName, Oid oid) + : base(dataTypeName, oid) {} /// /// Represents a field in a PostgreSQL composite data type. @@ -48,4 +53,4 @@ internal Field(string name, PostgresType type) /// public override string ToString() => $"{Name} => {Type}"; } -} \ No newline at end of file +} diff --git a/src/Npgsql/PostgresTypes/PostgresDomainType.cs b/src/Npgsql/PostgresTypes/PostgresDomainType.cs index f9b504b0ca..cab9323015 100644 --- a/src/Npgsql/PostgresTypes/PostgresDomainType.cs +++ b/src/Npgsql/PostgresTypes/PostgresDomainType.cs @@ -1,4 +1,6 @@ -namespace Npgsql.PostgresTypes; +using Npgsql.Internal.Postgres; + +namespace Npgsql.PostgresTypes; /// /// Represents a PostgreSQL domain type. @@ -33,6 +35,16 @@ protected internal PostgresDomainType(string ns, string name, uint oid, Postgres NotNull = notNull; } + /// + /// Constructs a representation of a PostgreSQL domain data type. + /// + internal PostgresDomainType(DataTypeName dataTypeName, Oid oid, PostgresType baseType, bool notNull) + : base(dataTypeName, oid) + { + BaseType = baseType; + NotNull = notNull; + } + internal override PostgresFacets GetFacets(int typeModifier) => BaseType.GetFacets(typeModifier); -} \ No newline at end of file +} diff --git a/src/Npgsql/PostgresTypes/PostgresEnumType.cs b/src/Npgsql/PostgresTypes/PostgresEnumType.cs index f456946070..7e4440252e 100644 --- a/src/Npgsql/PostgresTypes/PostgresEnumType.cs +++ b/src/Npgsql/PostgresTypes/PostgresEnumType.cs @@ -1,4 +1,5 @@ using System.Collections.Generic; +using Npgsql.Internal.Postgres; namespace Npgsql.PostgresTypes; @@ -21,6 +22,12 @@ public class PostgresEnumType : PostgresType /// Constructs a representation of a PostgreSQL enum data type. /// protected internal PostgresEnumType(string ns, string name, uint oid) - : base(ns, name, oid) - {} -} \ No newline at end of file + : base(ns, name, oid) {} + + /// + /// Constructs a representation of a PostgreSQL enum data type. + /// + internal PostgresEnumType(DataTypeName dataTypeName, Oid oid) + : base(dataTypeName, oid) {} + +} diff --git a/src/Npgsql/PostgresTypes/PostgresMultirangeType.cs b/src/Npgsql/PostgresTypes/PostgresMultirangeType.cs index 2e57075cb3..2769df87f8 100644 --- a/src/Npgsql/PostgresTypes/PostgresMultirangeType.cs +++ b/src/Npgsql/PostgresTypes/PostgresMultirangeType.cs @@ -1,4 +1,6 @@ -namespace Npgsql.PostgresTypes; +using Npgsql.Internal.Postgres; + +namespace Npgsql.PostgresTypes; /// /// Represents a PostgreSQL multirange data type. @@ -15,7 +17,7 @@ public class PostgresMultirangeType : PostgresType public PostgresRangeType Subrange { get; } /// - /// Constructs a representation of a PostgreSQL range data type. + /// Constructs a representation of a PostgreSQL multirange data type. /// protected internal PostgresMultirangeType(string ns, string name, uint oid, PostgresRangeType rangePostgresType) : base(ns, name, oid) @@ -23,4 +25,14 @@ protected internal PostgresMultirangeType(string ns, string name, uint oid, Post Subrange = rangePostgresType; Subrange.Multirange = this; } + + /// + /// Constructs a representation of a PostgreSQL multirange data type. + /// + internal PostgresMultirangeType(DataTypeName dataTypeName, Oid oid, PostgresRangeType rangePostgresType) + : base(dataTypeName, oid) + { + Subrange = rangePostgresType; + Subrange.Multirange = this; + } } diff --git a/src/Npgsql/PostgresTypes/PostgresRangeType.cs b/src/Npgsql/PostgresTypes/PostgresRangeType.cs index bc981bd998..a26a71afae 100644 --- a/src/Npgsql/PostgresTypes/PostgresRangeType.cs +++ b/src/Npgsql/PostgresTypes/PostgresRangeType.cs @@ -1,4 +1,6 @@ -namespace Npgsql.PostgresTypes; +using Npgsql.Internal.Postgres; + +namespace Npgsql.PostgresTypes; /// /// Represents a PostgreSQL range data type. @@ -28,4 +30,14 @@ protected internal PostgresRangeType( Subtype = subtypePostgresType; Subtype.Range = this; } -} \ No newline at end of file + + /// + /// Constructs a representation of a PostgreSQL range data type. + /// + internal PostgresRangeType(DataTypeName dataTypeName, Oid oid, PostgresType subtypePostgresType) + : base(dataTypeName, oid) + { + Subtype = subtypePostgresType; + Subtype.Range = this; + } +} diff --git a/src/Npgsql/PostgresTypes/PostgresType.cs b/src/Npgsql/PostgresTypes/PostgresType.cs index 8cc5fb7b63..1182588c8c 100644 --- a/src/Npgsql/PostgresTypes/PostgresType.cs +++ b/src/Npgsql/PostgresTypes/PostgresType.cs @@ -29,6 +29,18 @@ private protected PostgresType(string ns, string name, uint oid) FullName = Namespace + "." + Name; } + /// + /// Constructs a representation of a PostgreSQL data type. + /// + /// The data type's fully qualified name. + /// The data type's OID. + private protected PostgresType(DataTypeName dataTypeName, Oid oid) + { + DataTypeName = dataTypeName; + OID = oid.Value; + FullName = Namespace + "." + Name; + } + #endregion #region Public Properties diff --git a/src/Npgsql/PostgresTypes/PostgresTypeKind.cs b/src/Npgsql/PostgresTypes/PostgresTypeKind.cs new file mode 100644 index 0000000000..03330f9050 --- /dev/null +++ b/src/Npgsql/PostgresTypes/PostgresTypeKind.cs @@ -0,0 +1,21 @@ +namespace Npgsql.PostgresTypes; + +enum PostgresTypeKind +{ + /// A base type. + Base, + /// An enum carrying its variants. + Enum, + /// A pseudo type like anyarray. + Pseudo, + // An array carrying its element type. + Array, + // A range carrying its element type. + Range, + // A multi-range carrying its element type. + Multirange, + // A domain carrying its underlying type. + Domain, + // A composite carrying its constituent fields. + Composite +} diff --git a/src/Npgsql/TypeMapping/DefaultPgTypes.cs b/src/Npgsql/TypeMapping/DefaultPgTypes.cs deleted file mode 100644 index 015e338a2c..0000000000 --- a/src/Npgsql/TypeMapping/DefaultPgTypes.cs +++ /dev/null @@ -1,191 +0,0 @@ -using System; -using System.Collections.Generic; -using Npgsql.Internal.Postgres; -using static Npgsql.TypeMapping.PgTypeGroup; - -namespace Npgsql.TypeMapping; - -static class DefaultPgTypes -{ - static IEnumerable> GetIdentifiers() - { - var list = new List>(); - foreach (var group in Items) - { - list.Add(new(group.Oid, group.Name)); - list.Add(new(group.ArrayOid, group.ArrayName)); - if (group.TypeKind is PgTypeKind.Range) - { - list.Add(new(group.MultirangeOid!.Value, group.MultirangeName!.Value)); - list.Add(new(group.MultirangeArrayOid!.Value, group.MultirangeArrayName!.Value)); - } - } - - return list; - } - - static Dictionary? _oidMap; - public static IReadOnlyDictionary OidMap - { - get - { - if (_oidMap is not null) - return _oidMap; - - var dict = new Dictionary(); - foreach (var element in GetIdentifiers()) - dict.Add(element.Key, element.Value); - - return _oidMap = dict; - } - } - - static Dictionary? _dataTypeNameMap; - public static IReadOnlyDictionary DataTypeNameMap - { - get - { - if (_dataTypeNameMap is not null) - return _dataTypeNameMap; - - var dict = new Dictionary(); - foreach (var element in GetIdentifiers()) - dict.Add(element.Value, element.Key); - - return _dataTypeNameMap = dict; - } - } - - // We could also codegen this from pg_type.dat that lives in the postgres repo. - public static IEnumerable Items - => new[] - { - Create(DataTypeNames.Int2, oid: 21, arrayOid: 1005), - Create(DataTypeNames.Int4, oid: 23, arrayOid: 1007), - Create(DataTypeNames.Int4Range, oid: 3904, arrayOid: 3905, multirangeOid: 4451, multirangeArrayOid: 6150, typeKind: PgTypeKind.Range), - Create(DataTypeNames.Int8, oid: 20, arrayOid: 1016), - Create(DataTypeNames.Int8Range, oid: 3926, arrayOid: 3927, multirangeOid: 4536, multirangeArrayOid: 6157, typeKind: PgTypeKind.Range), - Create(DataTypeNames.Float4, oid: 700, arrayOid: 1021), - Create(DataTypeNames.Float8, oid: 701, arrayOid: 1022), - Create(DataTypeNames.Numeric, oid: 1700, arrayOid: 1231), - Create(DataTypeNames.NumRange, oid: 3906, arrayOid: 3907, multirangeOid: 4532, multirangeArrayOid: 6151, typeKind: PgTypeKind.Range), - Create(DataTypeNames.Money, oid: 790, arrayOid: 791), - Create(DataTypeNames.Bool, oid: 16, arrayOid: 1000), - Create(DataTypeNames.Box, oid: 603, arrayOid: 1020), - Create(DataTypeNames.Circle, oid: 718, arrayOid: 719), - Create(DataTypeNames.Line, oid: 628, arrayOid: 629), - Create(DataTypeNames.LSeg, oid: 601, arrayOid: 1018), - Create(DataTypeNames.Path, oid: 602, arrayOid: 1019), - Create(DataTypeNames.Point, oid: 600, arrayOid: 1017), - Create(DataTypeNames.Polygon, oid: 604, arrayOid: 1027), - Create(DataTypeNames.Bpchar, oid: 1042, arrayOid: 1014), - Create(DataTypeNames.Text, oid: 25, arrayOid: 1009), - Create(DataTypeNames.Varchar, oid: 1043, arrayOid: 1015), - Create(DataTypeNames.Name, oid: 19, arrayOid: 1003), - Create(DataTypeNames.Bytea, oid: 17, arrayOid: 1001), - Create(DataTypeNames.Date, oid: 1082, arrayOid: 1182), - Create(DataTypeNames.DateRange, oid: 3912, arrayOid: 3913, multirangeOid: 4535, multirangeArrayOid: 6155, typeKind: PgTypeKind.Range), - Create(DataTypeNames.Time, oid: 1083, arrayOid: 1183), - Create(DataTypeNames.Timestamp, oid: 1114, arrayOid: 1115), - Create(DataTypeNames.TsRange, oid: 3908, arrayOid: 3909, multirangeOid: 4533, multirangeArrayOid: 6152, typeKind: PgTypeKind.Range), - Create(DataTypeNames.TimestampTz, oid: 1184, arrayOid: 1185), - Create(DataTypeNames.TsTzRange, oid: 3910, arrayOid: 3911, multirangeOid: 4534, multirangeArrayOid: 6153, typeKind: PgTypeKind.Range), - Create(DataTypeNames.Interval, oid: 1186, arrayOid: 1187), - Create(DataTypeNames.TimeTz, oid: 1266, arrayOid: 1270), - Create(DataTypeNames.Inet, oid: 869, arrayOid: 1041), - Create(DataTypeNames.Cidr, oid: 650, arrayOid: 651), - Create(DataTypeNames.MacAddr, oid: 829, arrayOid: 1040), - Create(DataTypeNames.MacAddr8, oid: 774, arrayOid: 775), - Create(DataTypeNames.Bit, oid: 1560, arrayOid: 1561), - Create(DataTypeNames.Varbit, oid: 1562, arrayOid: 1563), - Create(DataTypeNames.TsVector, oid: 3614, arrayOid: 3643), - Create(DataTypeNames.TsQuery, oid: 3615, arrayOid: 3645), - Create(DataTypeNames.RegConfig, oid: 3734, arrayOid: 3735), - Create(DataTypeNames.Uuid, oid: 2950, arrayOid: 2951), - Create(DataTypeNames.Xml, oid: 142, arrayOid: 143), - Create(DataTypeNames.Json, oid: 114, arrayOid: 199), - Create(DataTypeNames.Jsonb, oid: 3802, arrayOid: 3807), - Create(DataTypeNames.Jsonpath, oid: 4072, arrayOid: 4073), - Create(DataTypeNames.RefCursor, oid: 1790, arrayOid: 2201), - Create(DataTypeNames.OidVector, oid: 30, arrayOid: 1013), - Create(DataTypeNames.Int2Vector, oid: 22, arrayOid: 1006), - Create(DataTypeNames.Oid, oid: 26, arrayOid: 1028), - Create(DataTypeNames.Xid, oid: 28, arrayOid: 1011), - Create(DataTypeNames.Xid8, oid: 5069, arrayOid: 271), - Create(DataTypeNames.Cid, oid: 29, arrayOid: 1012), - Create(DataTypeNames.RegType, oid: 2206, arrayOid: 2211), - Create(DataTypeNames.Tid, oid: 27, arrayOid: 1010), - Create(DataTypeNames.PgLsn, oid: 3220, arrayOid: 3221), - Create(DataTypeNames.Unknown, oid: 705, arrayOid: 0, typeKind: PgTypeKind.Pseudo), - Create(DataTypeNames.Void, oid: 2278, arrayOid: 0, typeKind: PgTypeKind.Pseudo), - }; -} - -enum PgTypeKind -{ - /// A base type. - Base, - /// An enum carying its variants. - Enum, - /// A pseudo type like anyarray. - Pseudo, - // An array carying its element type. - Array, - // A range carying its element type. - Range, - // A multi-range carying its element type. - Multirange, - // A domain carying its underlying type. - Domain, - // A composite carying its constituent fields. - Composite -} - -readonly struct PgTypeGroup -{ - public required PgTypeKind TypeKind { get; init; } - public required DataTypeName Name { get; init; } - public required Oid Oid { get; init; } - public required DataTypeName ArrayName { get; init; } - public required Oid ArrayOid { get; init; } - public DataTypeName? MultirangeName { get; init; } - public Oid? MultirangeOid { get; init; } - public DataTypeName? MultirangeArrayName { get; init; } - public Oid? MultirangeArrayOid { get; init; } - - public static PgTypeGroup Create(DataTypeName name, Oid oid, Oid arrayOid, string? multirangeName = null, Oid? multirangeOid = null, Oid? multirangeArrayOid = null, PgTypeKind typeKind = PgTypeKind.Base) - { - DataTypeName? multirangeDataTypeName = null; - if (typeKind is PgTypeKind.Range) - { - if (multirangeOid is null) - throw new ArgumentException("When a range is supplied its multirange oid cannot be omitted."); - if (multirangeArrayOid is null) - throw new ArgumentException("When a range is supplied its multirange array oid cannot be omitted."); - multirangeDataTypeName = multirangeName is not null ? DataTypeName.CreateFullyQualifiedName(multirangeName) : name.ToDefaultMultirangeName(); - } - else - { - if (multirangeName is not null || multirangeOid is not null) - throw new ArgumentException("Only range types can have a multirange oid or name."); - - if (multirangeArrayOid is not null) - throw new ArgumentException("Only range types can have a multirange array oid."); - } - - return new PgTypeGroup - { - TypeKind = typeKind, - Name = name, - Oid = oid, - - ArrayName = name.ToArrayName(), - ArrayOid = arrayOid, - - MultirangeName = multirangeDataTypeName, - MultirangeOid = multirangeOid, - MultirangeArrayName = multirangeDataTypeName?.ToArrayName(), - MultirangeArrayOid = multirangeArrayOid - }; - } -} diff --git a/test/Npgsql.Tests/BugTests.cs b/test/Npgsql.Tests/BugTests.cs index 6dac813475..e3c05dd5fb 100644 --- a/test/Npgsql.Tests/BugTests.cs +++ b/test/Npgsql.Tests/BugTests.cs @@ -1,6 +1,5 @@ using Npgsql.BackendMessages; using Npgsql.Tests.Support; -using Npgsql.TypeMapping; using NpgsqlTypes; using NUnit.Framework; using System; @@ -16,7 +15,7 @@ namespace Npgsql.Tests; public class BugTests : TestBase { - static uint ByteaOid => DefaultPgTypes.DataTypeNameMap[DataTypeNames.Bytea].Value; + static uint ByteaOid => PostgresMinimalDatabaseInfo.DefaultTypeCatalog.GetOid(DataTypeNames.Bytea).Value; #region Sequential reader bugs diff --git a/test/Npgsql.Tests/CommandTests.cs b/test/Npgsql.Tests/CommandTests.cs index 024840b837..9c42da9c0c 100644 --- a/test/Npgsql.Tests/CommandTests.cs +++ b/test/Npgsql.Tests/CommandTests.cs @@ -1,7 +1,6 @@ using Npgsql.BackendMessages; using Npgsql.Internal; using Npgsql.Tests.Support; -using Npgsql.TypeMapping; using NpgsqlTypes; using NUnit.Framework; using System; @@ -19,8 +18,8 @@ namespace Npgsql.Tests; public class CommandTests : MultiplexingTestBase { - static uint Int4Oid => DefaultPgTypes.DataTypeNameMap[DataTypeNames.Int4].Value; - static uint TextOid => DefaultPgTypes.DataTypeNameMap[DataTypeNames.Text].Value; + static uint Int4Oid => PostgresMinimalDatabaseInfo.DefaultTypeCatalog.GetOid(DataTypeNames.Int4).Value; + static uint TextOid => PostgresMinimalDatabaseInfo.DefaultTypeCatalog.GetOid(DataTypeNames.Text).Value; #region Legacy batching diff --git a/test/Npgsql.Tests/ReaderTests.cs b/test/Npgsql.Tests/ReaderTests.cs index 790b1b48e0..2f59075a10 100644 --- a/test/Npgsql.Tests/ReaderTests.cs +++ b/test/Npgsql.Tests/ReaderTests.cs @@ -13,7 +13,6 @@ using Npgsql.Internal.Postgres; using Npgsql.PostgresTypes; using Npgsql.Tests.Support; -using Npgsql.TypeMapping; using Npgsql.Util; using NpgsqlTypes; using NUnit.Framework; @@ -27,8 +26,8 @@ namespace Npgsql.Tests; [TestFixture(MultiplexingMode.Multiplexing, CommandBehavior.SequentialAccess)] public class ReaderTests : MultiplexingTestBase { - static uint Int4Oid => DefaultPgTypes.DataTypeNameMap[DataTypeNames.Int4].Value; - static uint ByteaOid => DefaultPgTypes.DataTypeNameMap[DataTypeNames.Bytea].Value; + static uint Int4Oid => PostgresMinimalDatabaseInfo.DefaultTypeCatalog.GetOid(DataTypeNames.Int4).Value; + static uint ByteaOid => PostgresMinimalDatabaseInfo.DefaultTypeCatalog.GetOid(DataTypeNames.Bytea).Value; [Test] public async Task Resumable_non_consumed_to_non_resumable() @@ -1238,7 +1237,7 @@ public async Task Dispose_does_not_swallow_exceptions([Values(true, false)] bool await pgMock .WriteParseComplete() .WriteBindComplete() - .WriteRowDescription(new FieldDescription(DefaultPgTypes.DataTypeNameMap[DataTypeNames.Int4].Value)) + .WriteRowDescription(new FieldDescription(Int4Oid)) .WriteDataRow(BitConverter.GetBytes(BinaryPrimitives.ReverseEndianness(1))) .FlushAsync(); diff --git a/test/Npgsql.Tests/Support/PgServerMock.cs b/test/Npgsql.Tests/Support/PgServerMock.cs index 04ae5fc948..0b81b40021 100644 --- a/test/Npgsql.Tests/Support/PgServerMock.cs +++ b/test/Npgsql.Tests/Support/PgServerMock.cs @@ -8,16 +8,15 @@ using Npgsql.BackendMessages; using Npgsql.Internal; using Npgsql.Internal.Postgres; -using Npgsql.TypeMapping; using NUnit.Framework; namespace Npgsql.Tests.Support; class PgServerMock : IDisposable { - static uint BoolOid => DefaultPgTypes.DataTypeNameMap[DataTypeNames.Bool].Value; - static uint Int4Oid => DefaultPgTypes.DataTypeNameMap[DataTypeNames.Int4].Value; - static uint TextOid => DefaultPgTypes.DataTypeNameMap[DataTypeNames.Text].Value; + static uint BoolOid => PostgresMinimalDatabaseInfo.DefaultTypeCatalog.GetOid(DataTypeNames.Bool).Value; + static uint Int4Oid => PostgresMinimalDatabaseInfo.DefaultTypeCatalog.GetOid(DataTypeNames.Int4).Value; + static uint TextOid => PostgresMinimalDatabaseInfo.DefaultTypeCatalog.GetOid(DataTypeNames.Text).Value; static readonly Encoding Encoding = NpgsqlWriteBuffer.UTF8Encoding; From e8f20a0f74c26dd59b2e02e2bf1889afc231246d Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Tue, 14 Nov 2023 19:06:38 +0100 Subject: [PATCH 296/761] Introduce resolver factories (#5374) Closes #5272 --- .../Internal/GeoJSONTypeInfoResolver.cs | 96 ---- .../GeoJSONTypeInfoResolverFactory.cs | 116 ++++ src/Npgsql.GeoJSON/NpgsqlGeoJSONExtensions.cs | 4 +- .../Internal/JsonNetPocoTypeInfoResolver.cs | 105 ---- .../JsonNetPocoTypeInfoResolverFactory.cs | 132 +++++ .../Internal/JsonNetTypeInfoResolver.cs | 67 --- .../JsonNetTypeInfoResolverFactory.cs | 78 +++ .../NpgsqlJsonNetExtensions.cs | 7 +- .../NetTopologySuiteTypeInfoResolver.cs | 130 ----- ...NetTopologySuiteTypeInfoResolverFactory.cs | 154 ++++++ .../NpgsqlNetTopologySuiteExtensions.cs | 5 +- .../Internal/NodaTimeTypeInfoResolver.cs | 271 --------- ...aTimeTypeInfoResolverFactory.Multirange.cs | 149 +++++ .../NodaTimeTypeInfoResolverFactory.Range.cs | 93 ++++ .../NodaTimeTypeInfoResolverFactory.cs | 149 +++++ .../NpgsqlNodaTimeExtensions.cs | 5 +- .../PgTypeInfoResolverChainBuilder.cs | 129 +++++ .../Internal/PgTypeInfoResolverFactory.cs | 13 + .../AdoTypeInfoResolverFactory.Multirange.cs} | 367 +++++------- .../AdoTypeInfoResolverFactory.Range.cs | 219 ++++++++ .../AdoTypeInfoResolverFactory.cs | 522 ++++++++++++++++++ ...ExtraConversionsTypeInfoResolverFactory.cs | 239 ++++++++ .../FullTextSearchTypeInfoResolverFactory.cs | 93 ++++ .../GeometricTypeInfoResolverFactory.cs | 63 +++ .../JsonDynamicTypeInfoResolverFactory.cs | 173 ++++++ .../JsonTypeInfoResolverFactory.cs | 100 ++++ .../LTreeTypeInfoResolverFactory.cs | 66 +++ .../NetworkTypeInfoResolverFactory.cs | 96 ++++ .../RecordTypeInfoResolverFactory.cs | 59 ++ .../TupledRecordTypeInfoResolverFactory.cs | 103 ++++ .../UnmappedTypeInfoResolverFactory.cs | 169 ++++++ .../UnsupportedTypeInfoResolver.cs | 11 +- .../Internal/Resolvers/AdoTypeInfoResolver.cs | 514 ----------------- .../Resolvers/ExtraConversionsResolver.cs | 233 -------- .../FullTextSearchTypeInfoResolver.cs | 87 --- .../Resolvers/GeometricTypeInfoResolver.cs | 57 -- .../Resolvers/JsonDynamicTypeInfoResolver.cs | 154 ------ .../Resolvers/JsonTypeInfoResolvers.cs | 85 --- .../Resolvers/LTreeTypeInfoResolver.cs | 57 -- .../Resolvers/NetworkTypeInfoResolver.cs | 83 --- .../Resolvers/RecordTypeInfoResolvers.cs | 143 ----- .../Resolvers/UnmappedEnumTypeInfoResolver.cs | 50 -- .../UnmappedMultirangeTypeInfoResolver.cs | 59 -- .../UnmappedRangeTypeInfoResolver.cs | 59 -- src/Npgsql/NpgsqlDataSource.cs | 4 +- src/Npgsql/NpgsqlDataSourceBuilder.cs | 100 ++-- src/Npgsql/NpgsqlSlimDataSourceBuilder.cs | 101 ++-- src/Npgsql/PublicAPI.Unshipped.txt | 6 +- src/Npgsql/TypeMapping/GlobalTypeMapper.cs | 58 +- src/Npgsql/TypeMapping/INpgsqlTypeMapper.cs | 8 +- .../INpgsqlTypeMapperExtensions.cs | 17 +- src/Npgsql/TypeMapping/UserTypeMapper.cs | 69 ++- test/Npgsql.Tests/GlobalTypeMapperTests.cs | 12 +- test/Npgsql.Tests/ReaderTests.cs | 25 +- test/Npgsql.Tests/TypeMapperTests.cs | 23 +- .../Npgsql.Tests/Types/LegacyDateTimeTests.cs | 5 +- 56 files changed, 3256 insertions(+), 2736 deletions(-) delete mode 100644 src/Npgsql.GeoJSON/Internal/GeoJSONTypeInfoResolver.cs create mode 100644 src/Npgsql.GeoJSON/Internal/GeoJSONTypeInfoResolverFactory.cs delete mode 100644 src/Npgsql.Json.NET/Internal/JsonNetPocoTypeInfoResolver.cs create mode 100644 src/Npgsql.Json.NET/Internal/JsonNetPocoTypeInfoResolverFactory.cs delete mode 100644 src/Npgsql.Json.NET/Internal/JsonNetTypeInfoResolver.cs create mode 100644 src/Npgsql.Json.NET/Internal/JsonNetTypeInfoResolverFactory.cs delete mode 100644 src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeInfoResolver.cs create mode 100644 src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeInfoResolverFactory.cs delete mode 100644 src/Npgsql.NodaTime/Internal/NodaTimeTypeInfoResolver.cs create mode 100644 src/Npgsql.NodaTime/Internal/NodaTimeTypeInfoResolverFactory.Multirange.cs create mode 100644 src/Npgsql.NodaTime/Internal/NodaTimeTypeInfoResolverFactory.Range.cs create mode 100644 src/Npgsql.NodaTime/Internal/NodaTimeTypeInfoResolverFactory.cs create mode 100644 src/Npgsql/Internal/PgTypeInfoResolverChainBuilder.cs create mode 100644 src/Npgsql/Internal/PgTypeInfoResolverFactory.cs rename src/Npgsql/Internal/{Resolvers/RangeTypeInfoResolver.cs => ResolverFactories/AdoTypeInfoResolverFactory.Multirange.cs} (51%) create mode 100644 src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.Range.cs create mode 100644 src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.cs create mode 100644 src/Npgsql/Internal/ResolverFactories/ExtraConversionsTypeInfoResolverFactory.cs create mode 100644 src/Npgsql/Internal/ResolverFactories/FullTextSearchTypeInfoResolverFactory.cs create mode 100644 src/Npgsql/Internal/ResolverFactories/GeometricTypeInfoResolverFactory.cs create mode 100644 src/Npgsql/Internal/ResolverFactories/JsonDynamicTypeInfoResolverFactory.cs create mode 100644 src/Npgsql/Internal/ResolverFactories/JsonTypeInfoResolverFactory.cs create mode 100644 src/Npgsql/Internal/ResolverFactories/LTreeTypeInfoResolverFactory.cs create mode 100644 src/Npgsql/Internal/ResolverFactories/NetworkTypeInfoResolverFactory.cs create mode 100644 src/Npgsql/Internal/ResolverFactories/RecordTypeInfoResolverFactory.cs create mode 100644 src/Npgsql/Internal/ResolverFactories/TupledRecordTypeInfoResolverFactory.cs create mode 100644 src/Npgsql/Internal/ResolverFactories/UnmappedTypeInfoResolverFactory.cs rename src/Npgsql/Internal/{Resolvers => ResolverFactories}/UnsupportedTypeInfoResolver.cs (83%) delete mode 100644 src/Npgsql/Internal/Resolvers/AdoTypeInfoResolver.cs delete mode 100644 src/Npgsql/Internal/Resolvers/ExtraConversionsResolver.cs delete mode 100644 src/Npgsql/Internal/Resolvers/FullTextSearchTypeInfoResolver.cs delete mode 100644 src/Npgsql/Internal/Resolvers/GeometricTypeInfoResolver.cs delete mode 100644 src/Npgsql/Internal/Resolvers/JsonDynamicTypeInfoResolver.cs delete mode 100644 src/Npgsql/Internal/Resolvers/JsonTypeInfoResolvers.cs delete mode 100644 src/Npgsql/Internal/Resolvers/LTreeTypeInfoResolver.cs delete mode 100644 src/Npgsql/Internal/Resolvers/NetworkTypeInfoResolver.cs delete mode 100644 src/Npgsql/Internal/Resolvers/RecordTypeInfoResolvers.cs delete mode 100644 src/Npgsql/Internal/Resolvers/UnmappedEnumTypeInfoResolver.cs delete mode 100644 src/Npgsql/Internal/Resolvers/UnmappedMultirangeTypeInfoResolver.cs delete mode 100644 src/Npgsql/Internal/Resolvers/UnmappedRangeTypeInfoResolver.cs diff --git a/src/Npgsql.GeoJSON/Internal/GeoJSONTypeInfoResolver.cs b/src/Npgsql.GeoJSON/Internal/GeoJSONTypeInfoResolver.cs deleted file mode 100644 index 78dec45cbd..0000000000 --- a/src/Npgsql.GeoJSON/Internal/GeoJSONTypeInfoResolver.cs +++ /dev/null @@ -1,96 +0,0 @@ -using System; -using GeoJSON.Net; -using GeoJSON.Net.Geometry; -using Npgsql.Internal; -using Npgsql.Internal.Postgres; - -namespace Npgsql.GeoJSON.Internal; - -class GeoJSONTypeInfoResolver : IPgTypeInfoResolver -{ - readonly GeoJSONOptions _options; - readonly bool _geographyAsDefault; - readonly CrsMap? _crsMap; - - TypeInfoMappingCollection? _mappings; - protected TypeInfoMappingCollection Mappings => _mappings ??= AddInfos(new(), _options, _geographyAsDefault, _crsMap); - - public GeoJSONTypeInfoResolver(GeoJSONOptions options, bool geographyAsDefault, CrsMap? crsMap = null) - { - _options = options; - _geographyAsDefault = geographyAsDefault; - _crsMap = crsMap; - } - - public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) - => Mappings.Find(type, dataTypeName, options); - - static TypeInfoMappingCollection AddInfos(TypeInfoMappingCollection mappings, GeoJSONOptions geoJsonOptions, bool geographyAsDefault, CrsMap? crsMap) - { - crsMap ??= new CrsMap(CrsMap.WellKnown); - - var geometryMatchRequirement = !geographyAsDefault ? MatchRequirement.Single : MatchRequirement.DataTypeName; - var geographyMatchRequirement = geographyAsDefault ? MatchRequirement.Single : MatchRequirement.DataTypeName; - - foreach (var dataTypeName in new[] { "geometry", "geography" }) - { - var matchRequirement = dataTypeName == "geometry" ? geometryMatchRequirement : geographyMatchRequirement; - - mappings.AddType(dataTypeName, - (options, mapping, _) => mapping.CreateInfo(options, new GeoJSONConverter(geoJsonOptions, crsMap)), - matchRequirement); - mappings.AddType(dataTypeName, - (options, mapping, _) => mapping.CreateInfo(options, new GeoJSONConverter(geoJsonOptions, crsMap)), - matchRequirement); - mappings.AddType(dataTypeName, - (options, mapping, _) => mapping.CreateInfo(options, new GeoJSONConverter(geoJsonOptions, crsMap)), - matchRequirement); - mappings.AddType(dataTypeName, - (options, mapping, _) => mapping.CreateInfo(options, new GeoJSONConverter(geoJsonOptions, crsMap)), - matchRequirement); - mappings.AddType(dataTypeName, - (options, mapping, _) => mapping.CreateInfo(options, new GeoJSONConverter(geoJsonOptions, crsMap)), - matchRequirement); - mappings.AddType(dataTypeName, - (options, mapping, _) => mapping.CreateInfo(options, new GeoJSONConverter(geoJsonOptions, crsMap)), - matchRequirement); - mappings.AddType(dataTypeName, - (options, mapping, _) => mapping.CreateInfo(options, new GeoJSONConverter(geoJsonOptions, crsMap)), - matchRequirement); - mappings.AddType(dataTypeName, - (options, mapping, _) => mapping.CreateInfo(options, new GeoJSONConverter(geoJsonOptions, crsMap)), - matchRequirement); - } - - return mappings; - } - - protected static TypeInfoMappingCollection AddArrayInfos(TypeInfoMappingCollection mappings) - { - foreach (var dataTypeName in new[] { "geometry", "geography" }) - { - mappings.AddArrayType(dataTypeName); - mappings.AddArrayType(dataTypeName); - mappings.AddArrayType(dataTypeName); - mappings.AddArrayType(dataTypeName); - mappings.AddArrayType(dataTypeName); - mappings.AddArrayType(dataTypeName); - mappings.AddArrayType(dataTypeName); - mappings.AddArrayType(dataTypeName); - } - - return mappings; - } -} - -sealed class GeoJSONArrayTypeInfoResolver : GeoJSONTypeInfoResolver, IPgTypeInfoResolver -{ - TypeInfoMappingCollection? _mappings; - new TypeInfoMappingCollection Mappings => _mappings ??= AddArrayInfos(new(base.Mappings)); - - public GeoJSONArrayTypeInfoResolver(GeoJSONOptions options, bool geographyAsDefault, CrsMap? crsMap = null) - : base(options, geographyAsDefault, crsMap) { } - - public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) - => Mappings.Find(type, dataTypeName, options); -} diff --git a/src/Npgsql.GeoJSON/Internal/GeoJSONTypeInfoResolverFactory.cs b/src/Npgsql.GeoJSON/Internal/GeoJSONTypeInfoResolverFactory.cs new file mode 100644 index 0000000000..c25118f1d7 --- /dev/null +++ b/src/Npgsql.GeoJSON/Internal/GeoJSONTypeInfoResolverFactory.cs @@ -0,0 +1,116 @@ +using System; +using GeoJSON.Net; +using GeoJSON.Net.Geometry; +using Npgsql.Internal; +using Npgsql.Internal.Postgres; + +namespace Npgsql.GeoJSON.Internal; + +sealed class GeoJSONTypeInfoResolverFactory : PgTypeInfoResolverFactory +{ + readonly GeoJSONOptions _options; + readonly bool _geographyAsDefault; + readonly CrsMap? _crsMap; + + public GeoJSONTypeInfoResolverFactory(GeoJSONOptions options, bool geographyAsDefault, CrsMap? crsMap = null) + { + _options = options; + _geographyAsDefault = geographyAsDefault; + _crsMap = crsMap; + } + + public override IPgTypeInfoResolver CreateResolver() => new Resolver(_options, _geographyAsDefault, _crsMap); + public override IPgTypeInfoResolver? CreateArrayResolver() => new ArrayResolver(_options, _geographyAsDefault, _crsMap); + + class Resolver : IPgTypeInfoResolver + { + readonly GeoJSONOptions _options; + readonly bool _geographyAsDefault; + readonly CrsMap? _crsMap; + + TypeInfoMappingCollection? _mappings; + protected TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(), _options, _geographyAsDefault, _crsMap); + + public Resolver(GeoJSONOptions options, bool geographyAsDefault, CrsMap? crsMap = null) + { + _options = options; + _geographyAsDefault = geographyAsDefault; + _crsMap = crsMap; + } + + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); + + static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings, GeoJSONOptions geoJsonOptions, + bool geographyAsDefault, CrsMap? crsMap) + { + crsMap ??= new CrsMap(CrsMap.WellKnown); + + var geometryMatchRequirement = !geographyAsDefault ? MatchRequirement.Single : MatchRequirement.DataTypeName; + var geographyMatchRequirement = geographyAsDefault ? MatchRequirement.Single : MatchRequirement.DataTypeName; + + foreach (var dataTypeName in new[] { "geometry", "geography" }) + { + var matchRequirement = dataTypeName == "geometry" ? geometryMatchRequirement : geographyMatchRequirement; + + mappings.AddType(dataTypeName, + (options, mapping, _) => mapping.CreateInfo(options, new GeoJSONConverter(geoJsonOptions, crsMap)), + matchRequirement); + mappings.AddType(dataTypeName, + (options, mapping, _) => mapping.CreateInfo(options, new GeoJSONConverter(geoJsonOptions, crsMap)), + matchRequirement); + mappings.AddType(dataTypeName, + (options, mapping, _) => mapping.CreateInfo(options, new GeoJSONConverter(geoJsonOptions, crsMap)), + matchRequirement); + mappings.AddType(dataTypeName, + (options, mapping, _) => mapping.CreateInfo(options, new GeoJSONConverter(geoJsonOptions, crsMap)), + matchRequirement); + mappings.AddType(dataTypeName, + (options, mapping, _) => mapping.CreateInfo(options, new GeoJSONConverter(geoJsonOptions, crsMap)), + matchRequirement); + mappings.AddType(dataTypeName, + (options, mapping, _) => mapping.CreateInfo(options, new GeoJSONConverter(geoJsonOptions, crsMap)), + matchRequirement); + mappings.AddType(dataTypeName, + (options, mapping, _) => mapping.CreateInfo(options, new GeoJSONConverter(geoJsonOptions, crsMap)), + matchRequirement); + mappings.AddType(dataTypeName, + (options, mapping, _) => mapping.CreateInfo(options, new GeoJSONConverter(geoJsonOptions, crsMap)), + matchRequirement); + } + + return mappings; + } + } + + sealed class ArrayResolver : Resolver, IPgTypeInfoResolver + { + TypeInfoMappingCollection? _mappings; + new TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(base.Mappings)); + + public ArrayResolver(GeoJSONOptions options, bool geographyAsDefault, CrsMap? crsMap = null) + : base(options, geographyAsDefault, crsMap) + { + } + + public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); + + static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) + { + foreach (var dataTypeName in new[] { "geometry", "geography" }) + { + mappings.AddArrayType(dataTypeName); + mappings.AddArrayType(dataTypeName); + mappings.AddArrayType(dataTypeName); + mappings.AddArrayType(dataTypeName); + mappings.AddArrayType(dataTypeName); + mappings.AddArrayType(dataTypeName); + mappings.AddArrayType(dataTypeName); + mappings.AddArrayType(dataTypeName); + } + + return mappings; + } + } +} diff --git a/src/Npgsql.GeoJSON/NpgsqlGeoJSONExtensions.cs b/src/Npgsql.GeoJSON/NpgsqlGeoJSONExtensions.cs index 6817094caa..b47a9b211f 100644 --- a/src/Npgsql.GeoJSON/NpgsqlGeoJSONExtensions.cs +++ b/src/Npgsql.GeoJSON/NpgsqlGeoJSONExtensions.cs @@ -18,7 +18,7 @@ public static class NpgsqlGeoJSONExtensions /// Specifies that the geography type is used for mapping by default. public static INpgsqlTypeMapper UseGeoJson(this INpgsqlTypeMapper mapper, GeoJSONOptions options = GeoJSONOptions.None, bool geographyAsDefault = false) { - mapper.AddTypeInfoResolver(new GeoJSONTypeInfoResolver(options, geographyAsDefault, crsMap: null)); + mapper.AddTypeInfoResolverFactory(new GeoJSONTypeInfoResolverFactory(options, geographyAsDefault, crsMap: null)); return mapper; } @@ -31,7 +31,7 @@ public static INpgsqlTypeMapper UseGeoJson(this INpgsqlTypeMapper mapper, GeoJSO /// Specifies that the geography type is used for mapping by default. public static INpgsqlTypeMapper UseGeoJson(this INpgsqlTypeMapper mapper, CrsMap crsMap, GeoJSONOptions options = GeoJSONOptions.None, bool geographyAsDefault = false) { - mapper.AddTypeInfoResolver(new GeoJSONTypeInfoResolver(options, geographyAsDefault, crsMap)); + mapper.AddTypeInfoResolverFactory(new GeoJSONTypeInfoResolverFactory(options, geographyAsDefault, crsMap)); return mapper; } } diff --git a/src/Npgsql.Json.NET/Internal/JsonNetPocoTypeInfoResolver.cs b/src/Npgsql.Json.NET/Internal/JsonNetPocoTypeInfoResolver.cs deleted file mode 100644 index 9d92dca4db..0000000000 --- a/src/Npgsql.Json.NET/Internal/JsonNetPocoTypeInfoResolver.cs +++ /dev/null @@ -1,105 +0,0 @@ -using System; -using System.Diagnostics.CodeAnalysis; -using System.Text; -using Newtonsoft.Json; -using Npgsql.Internal; -using Npgsql.Internal.Postgres; - -namespace Npgsql.Json.NET.Internal; - -[RequiresUnreferencedCode("Json serializer may perform reflection on trimmed types.")] -[RequiresDynamicCode("Serializing arbitrary types to json can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] -class JsonNetPocoTypeInfoResolver : DynamicTypeInfoResolver, IPgTypeInfoResolver -{ - protected TypeInfoMappingCollection Mappings { get; } = new(); - protected JsonSerializerSettings _serializerSettings; - - const string JsonDataTypeName = "pg_catalog.json"; - const string JsonbDataTypeName = "pg_catalog.jsonb"; - - public JsonNetPocoTypeInfoResolver(Type[]? jsonbClrTypes = null, Type[]? jsonClrTypes = null, JsonSerializerSettings? serializerSettings = null) - { - // Capture default settings during construction. - _serializerSettings = serializerSettings ??= JsonConvert.DefaultSettings?.Invoke() ?? new JsonSerializerSettings(); - - AddMappings(Mappings, jsonbClrTypes ?? Array.Empty(), jsonClrTypes ?? Array.Empty(), serializerSettings); - } - - void AddMappings(TypeInfoMappingCollection mappings, Type[] jsonbClrTypes, Type[] jsonClrTypes, JsonSerializerSettings serializerSettings) - { - AddUserMappings(jsonb: true, jsonbClrTypes); - AddUserMappings(jsonb: false, jsonClrTypes); - - void AddUserMappings(bool jsonb, Type[] clrTypes) - { - var dynamicMappings = CreateCollection(); - var dataTypeName = jsonb ? JsonbDataTypeName : JsonDataTypeName; - foreach (var jsonType in clrTypes) - { - dynamicMappings.AddMapping(jsonType, dataTypeName, - factory: (options, mapping, _) => mapping.CreateInfo(options, - CreateConverter(mapping.Type, jsonb, options.TextEncoding, serializerSettings))); - } - mappings.AddRange(dynamicMappings.ToTypeInfoMappingCollection()); - } - } - - protected void AddArrayInfos(TypeInfoMappingCollection mappings, TypeInfoMappingCollection baseMappings) - { - if (baseMappings.Items.Count == 0) - return; - - var dynamicMappings = CreateCollection(baseMappings); - foreach (var mapping in baseMappings.Items) - dynamicMappings.AddArrayMapping(mapping.Type, mapping.DataTypeName); - mappings.AddRange(dynamicMappings.ToTypeInfoMappingCollection()); - } - - public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) - => Mappings.Find(type, dataTypeName, options) ?? base.GetTypeInfo(type, dataTypeName, options); - - protected override DynamicMappingCollection? GetMappings(Type? type, DataTypeName dataTypeName, PgSerializerOptions options) - { - // Match all types except null, object and text types as long as DataTypeName (json/jsonb) is present. - if (type is null || type == typeof(object) || Array.IndexOf(PgSerializerOptions.WellKnownTextTypes, type) != -1 - || dataTypeName != JsonbDataTypeName && dataTypeName != JsonDataTypeName) - return null; - - return CreateCollection().AddMapping(type, dataTypeName, (options, mapping, _) => - { - var jsonb = dataTypeName == JsonbDataTypeName; - return mapping.CreateInfo(options, - CreateConverter(mapping.Type, jsonb, options.TextEncoding, _serializerSettings)); - }); - } - - static PgConverter CreateConverter(Type valueType, bool jsonb, Encoding textEncoding, JsonSerializerSettings settings) - => (PgConverter)Activator.CreateInstance( - typeof(JsonNetJsonConverter<>).MakeGenericType(valueType), - jsonb, - textEncoding, - settings - )!; -} - -[RequiresUnreferencedCode("Json serializer may perform reflection on trimmed types.")] -[RequiresDynamicCode("Serializing arbitrary types to json can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] -sealed class JsonNetPocoArrayTypeInfoResolver : JsonNetPocoTypeInfoResolver, IPgTypeInfoResolver -{ - new TypeInfoMappingCollection Mappings { get; } - - public JsonNetPocoArrayTypeInfoResolver(Type[]? jsonbClrTypes = null, Type[]? jsonClrTypes = null, JsonSerializerSettings? serializerSettings = null) - : base(jsonbClrTypes, jsonClrTypes, serializerSettings) - { - Mappings = new TypeInfoMappingCollection(base.Mappings); - AddArrayInfos(Mappings, base.Mappings); - } - - public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) - => Mappings.Find(type, dataTypeName, options) ?? base.GetTypeInfo(type, dataTypeName, options); - - protected override DynamicMappingCollection? GetMappings(Type? type, DataTypeName dataTypeName, PgSerializerOptions options) - => type is not null && IsArrayLikeType(type, out var elementType) && IsArrayDataTypeName(dataTypeName, options, out var elementDataTypeName) - ? base.GetMappings(elementType, elementDataTypeName, options)?.AddArrayMapping(elementType, elementDataTypeName) - : null; -} diff --git a/src/Npgsql.Json.NET/Internal/JsonNetPocoTypeInfoResolverFactory.cs b/src/Npgsql.Json.NET/Internal/JsonNetPocoTypeInfoResolverFactory.cs new file mode 100644 index 0000000000..7f4b4219f0 --- /dev/null +++ b/src/Npgsql.Json.NET/Internal/JsonNetPocoTypeInfoResolverFactory.cs @@ -0,0 +1,132 @@ +using System; +using System.Diagnostics.CodeAnalysis; +using System.Text; +using Newtonsoft.Json; +using Npgsql.Internal; +using Npgsql.Internal.Postgres; + +namespace Npgsql.Json.NET.Internal; + +[RequiresUnreferencedCode("Json serializer may perform reflection on trimmed types.")] +[RequiresDynamicCode("Serializing arbitrary types to json can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] +sealed class JsonNetPocoTypeInfoResolverFactory : PgTypeInfoResolverFactory +{ + readonly Type[]? _jsonbClrTypes; + readonly Type[]? _jsonClrTypes; + readonly JsonSerializerSettings? _serializerSettings; + + public JsonNetPocoTypeInfoResolverFactory(Type[]? jsonbClrTypes = null, Type[]? jsonClrTypes = null, JsonSerializerSettings? serializerSettings = null) + { + _jsonbClrTypes = jsonbClrTypes; + _jsonClrTypes = jsonClrTypes; + _serializerSettings = serializerSettings; + } + + public override IPgTypeInfoResolver CreateResolver() => new Resolver(_jsonbClrTypes, _jsonClrTypes, _serializerSettings); + public override IPgTypeInfoResolver? CreateArrayResolver() => new ArrayResolver(_jsonbClrTypes, _jsonClrTypes, _serializerSettings); + + [RequiresUnreferencedCode("Json serializer may perform reflection on trimmed types.")] + [RequiresDynamicCode("Serializing arbitrary types to json can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] + class Resolver : DynamicTypeInfoResolver, IPgTypeInfoResolver + { + readonly Type[]? _jsonbClrTypes; + readonly Type[]? _jsonClrTypes; + readonly JsonSerializerSettings _serializerSettings; + + TypeInfoMappingCollection? _mappings; + protected TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(), _jsonbClrTypes ?? Array.Empty(), _jsonClrTypes ?? Array.Empty(), _serializerSettings); + + const string JsonDataTypeName = "pg_catalog.json"; + const string JsonbDataTypeName = "pg_catalog.jsonb"; + + public Resolver(Type[]? jsonbClrTypes = null, Type[]? jsonClrTypes = null, JsonSerializerSettings? serializerSettings = null) + { + _jsonbClrTypes = jsonbClrTypes; + _jsonClrTypes = jsonClrTypes; + // Capture default settings during construction. + _serializerSettings = serializerSettings ?? JsonConvert.DefaultSettings?.Invoke() ?? new JsonSerializerSettings(); + } + + TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings, Type[] jsonbClrTypes, Type[] jsonClrTypes, JsonSerializerSettings serializerSettings) + { + AddUserMappings(mappings, jsonb: true, jsonbClrTypes, serializerSettings); + AddUserMappings(mappings, jsonb: false, jsonClrTypes, serializerSettings); + return mappings; + + static void AddUserMappings(TypeInfoMappingCollection mappings, bool jsonb, Type[] clrTypes, JsonSerializerSettings serializerSettings) + { + var dynamicMappings = CreateCollection(); + var dataTypeName = jsonb ? JsonbDataTypeName : JsonDataTypeName; + foreach (var jsonType in clrTypes) + { + dynamicMappings.AddMapping(jsonType, dataTypeName, + factory: (options, mapping, _) => mapping.CreateInfo(options, + CreateConverter(mapping.Type, jsonb, options.TextEncoding, serializerSettings))); + } + mappings.AddRange(dynamicMappings.ToTypeInfoMappingCollection()); + } + } + + public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options) ?? base.GetTypeInfo(type, dataTypeName, options); + + protected override DynamicMappingCollection? GetMappings(Type? type, DataTypeName dataTypeName, PgSerializerOptions options) + { + // Match all types except null, object and text types as long as DataTypeName (json/jsonb) is present. + if (type is null || type == typeof(object) || Array.IndexOf(PgSerializerOptions.WellKnownTextTypes, type) != -1 + || dataTypeName != JsonbDataTypeName && dataTypeName != JsonDataTypeName) + return null; + + return CreateCollection().AddMapping(type, dataTypeName, (options, mapping, _) => + { + var jsonb = dataTypeName == JsonbDataTypeName; + return mapping.CreateInfo(options, + CreateConverter(mapping.Type, jsonb, options.TextEncoding, _serializerSettings)); + }); + } + + static PgConverter CreateConverter(Type valueType, bool jsonb, Encoding textEncoding, JsonSerializerSettings settings) + => (PgConverter)Activator.CreateInstance( + typeof(JsonNetJsonConverter<>).MakeGenericType(valueType), + jsonb, + textEncoding, + settings + )!; + } + + [RequiresUnreferencedCode("Json serializer may perform reflection on trimmed types.")] + [RequiresDynamicCode("Serializing arbitrary types to json can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] + sealed class ArrayResolver : Resolver, IPgTypeInfoResolver + { + TypeInfoMappingCollection? _mappings; + new TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(base.Mappings), base.Mappings); + + public ArrayResolver(Type[]? jsonbClrTypes = null, Type[]? jsonClrTypes = null, JsonSerializerSettings? serializerSettings = null) + : base(jsonbClrTypes, jsonClrTypes, serializerSettings) + { + } + + public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options) ?? base.GetTypeInfo(type, dataTypeName, options); + + TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings, TypeInfoMappingCollection baseMappings) + { + if (baseMappings.Items.Count == 0) + return mappings; + + var dynamicMappings = CreateCollection(baseMappings); + foreach (var mapping in baseMappings.Items) + dynamicMappings.AddArrayMapping(mapping.Type, mapping.DataTypeName); + mappings.AddRange(dynamicMappings.ToTypeInfoMappingCollection()); + + return mappings; + } + + protected override DynamicMappingCollection? GetMappings(Type? type, DataTypeName dataTypeName, PgSerializerOptions options) + => type is not null && IsArrayLikeType(type, out var elementType) && IsArrayDataTypeName(dataTypeName, options, out var elementDataTypeName) + ? base.GetMappings(elementType, elementDataTypeName, options)?.AddArrayMapping(elementType, elementDataTypeName) + : null; + } + +} + diff --git a/src/Npgsql.Json.NET/Internal/JsonNetTypeInfoResolver.cs b/src/Npgsql.Json.NET/Internal/JsonNetTypeInfoResolver.cs deleted file mode 100644 index 7954c4bb2f..0000000000 --- a/src/Npgsql.Json.NET/Internal/JsonNetTypeInfoResolver.cs +++ /dev/null @@ -1,67 +0,0 @@ -using System; -using System.Diagnostics.CodeAnalysis; -using System.Reflection; -using System.Text; -using Newtonsoft.Json; -using Newtonsoft.Json.Linq; -using Npgsql.Internal; -using Npgsql.Internal.Postgres; - -namespace Npgsql.Json.NET.Internal; - -class JsonNetTypeInfoResolver : IPgTypeInfoResolver -{ - protected TypeInfoMappingCollection Mappings { get; } = new(); - - public JsonNetTypeInfoResolver(JsonSerializerSettings? settings = null) - => AddTypeInfos(Mappings, settings); - - static void AddTypeInfos(TypeInfoMappingCollection mappings, JsonSerializerSettings? settings = null) - { - // Capture default settings during construction. - settings ??= JsonConvert.DefaultSettings?.Invoke() ?? new JsonSerializerSettings(); - - // Jsonb is the first default for JToken etc. - foreach (var dataTypeName in new[] { "jsonb", "json" }) - { - var jsonb = dataTypeName == "jsonb"; - mappings.AddType(dataTypeName, (options, mapping, _) => - mapping.CreateInfo(options, new JsonNetJsonConverter(jsonb, options.TextEncoding, settings)), - isDefault: true); - mappings.AddType(dataTypeName, (options, mapping, _) => - mapping.CreateInfo(options, new JsonNetJsonConverter(jsonb, options.TextEncoding, settings))); - mappings.AddType(dataTypeName, (options, mapping, _) => - mapping.CreateInfo(options, new JsonNetJsonConverter(jsonb, options.TextEncoding, settings))); - mappings.AddType(dataTypeName, (options, mapping, _) => - mapping.CreateInfo(options, new JsonNetJsonConverter(jsonb, options.TextEncoding, settings))); - } - } - - protected static void AddArrayInfos(TypeInfoMappingCollection mappings) - { - foreach (var dataTypeName in new[] { "jsonb", "json" }) - { - mappings.AddArrayType(dataTypeName); - mappings.AddArrayType(dataTypeName); - mappings.AddArrayType(dataTypeName); - mappings.AddArrayType(dataTypeName); - } - } - - public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) - => Mappings.Find(type, dataTypeName, options); -} - -sealed class JsonNetArrayTypeInfoResolver : JsonNetTypeInfoResolver, IPgTypeInfoResolver -{ - new TypeInfoMappingCollection Mappings { get; } - - public JsonNetArrayTypeInfoResolver(JsonSerializerSettings? settings = null) : base(settings) - { - Mappings = new TypeInfoMappingCollection(base.Mappings); - AddArrayInfos(Mappings); - } - - public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) - => Mappings.Find(type, dataTypeName, options); -} diff --git a/src/Npgsql.Json.NET/Internal/JsonNetTypeInfoResolverFactory.cs b/src/Npgsql.Json.NET/Internal/JsonNetTypeInfoResolverFactory.cs new file mode 100644 index 0000000000..4a125c5ab8 --- /dev/null +++ b/src/Npgsql.Json.NET/Internal/JsonNetTypeInfoResolverFactory.cs @@ -0,0 +1,78 @@ +using System; +using Newtonsoft.Json; +using Newtonsoft.Json.Linq; +using Npgsql.Internal; +using Npgsql.Internal.Postgres; + +namespace Npgsql.Json.NET.Internal; + +sealed class JsonNetTypeInfoResolverFactory : PgTypeInfoResolverFactory +{ + readonly JsonSerializerSettings? _settings; + + public JsonNetTypeInfoResolverFactory(JsonSerializerSettings? settings = null) => _settings = settings; + + public override IPgTypeInfoResolver CreateResolver() => new Resolver(_settings); + public override IPgTypeInfoResolver? CreateArrayResolver() => new ArrayResolver(_settings); + + class Resolver : IPgTypeInfoResolver + { + TypeInfoMappingCollection? _mappings; + readonly JsonSerializerSettings _serializerSettings; + protected TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(), _serializerSettings); + + public Resolver(JsonSerializerSettings? settings = null) + { + // Capture default settings during construction. + _serializerSettings = settings ?? JsonConvert.DefaultSettings?.Invoke() ?? new JsonSerializerSettings(); + } + + static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings, JsonSerializerSettings settings) + { + // Jsonb is the first default for JToken etc. + foreach (var dataTypeName in new[] { "jsonb", "json" }) + { + var jsonb = dataTypeName == "jsonb"; + mappings.AddType(dataTypeName, (options, mapping, _) => + mapping.CreateInfo(options, new JsonNetJsonConverter(jsonb, options.TextEncoding, settings)), + isDefault: true); + mappings.AddType(dataTypeName, (options, mapping, _) => + mapping.CreateInfo(options, new JsonNetJsonConverter(jsonb, options.TextEncoding, settings))); + mappings.AddType(dataTypeName, (options, mapping, _) => + mapping.CreateInfo(options, new JsonNetJsonConverter(jsonb, options.TextEncoding, settings))); + mappings.AddType(dataTypeName, (options, mapping, _) => + mapping.CreateInfo(options, new JsonNetJsonConverter(jsonb, options.TextEncoding, settings))); + } + + return mappings; + } + + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); + } + + sealed class ArrayResolver : Resolver, IPgTypeInfoResolver + { + TypeInfoMappingCollection? _mappings; + new TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(base.Mappings)); + + public ArrayResolver(JsonSerializerSettings? settings = null) : base(settings) {} + + public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); + + static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) + { + foreach (var dataTypeName in new[] { "jsonb", "json" }) + { + mappings.AddArrayType(dataTypeName); + mappings.AddArrayType(dataTypeName); + mappings.AddArrayType(dataTypeName); + mappings.AddArrayType(dataTypeName); + } + + return mappings; + } + } +} + diff --git a/src/Npgsql.Json.NET/NpgsqlJsonNetExtensions.cs b/src/Npgsql.Json.NET/NpgsqlJsonNetExtensions.cs index 9bfa11df25..f2b33933b8 100644 --- a/src/Npgsql.Json.NET/NpgsqlJsonNetExtensions.cs +++ b/src/Npgsql.Json.NET/NpgsqlJsonNetExtensions.cs @@ -32,12 +32,9 @@ public static INpgsqlTypeMapper UseJsonNet( Type[]? jsonbClrTypes = null, Type[]? jsonClrTypes = null) { - // TODO opt-in of arrays. // Reverse order - mapper.AddTypeInfoResolver(new JsonNetPocoArrayTypeInfoResolver(jsonbClrTypes, jsonClrTypes, settings)); - mapper.AddTypeInfoResolver(new JsonNetArrayTypeInfoResolver(settings)); - mapper.AddTypeInfoResolver(new JsonNetPocoTypeInfoResolver(jsonbClrTypes, jsonClrTypes, settings)); - mapper.AddTypeInfoResolver(new JsonNetTypeInfoResolver(settings)); + mapper.AddTypeInfoResolverFactory(new JsonNetPocoTypeInfoResolverFactory(jsonbClrTypes, jsonClrTypes, settings)); + mapper.AddTypeInfoResolverFactory(new JsonNetTypeInfoResolverFactory(settings)); return mapper; } } diff --git a/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeInfoResolver.cs b/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeInfoResolver.cs deleted file mode 100644 index d5a42172a9..0000000000 --- a/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeInfoResolver.cs +++ /dev/null @@ -1,130 +0,0 @@ -using System; -using NetTopologySuite; -using NetTopologySuite.Geometries; -using NetTopologySuite.IO; -using Npgsql.Internal; -using Npgsql.Internal.Postgres; - -namespace Npgsql.NetTopologySuite.Internal; - -class NetTopologySuiteTypeInfoResolver : IPgTypeInfoResolver -{ - readonly PostGisReader _gisReader; - readonly bool _geographyAsDefault; - - TypeInfoMappingCollection? _mappings; - protected TypeInfoMappingCollection Mappings => _mappings ??= AddInfos(new(), _gisReader, new(), _geographyAsDefault); - - public NetTopologySuiteTypeInfoResolver( - CoordinateSequenceFactory? coordinateSequenceFactory, - PrecisionModel? precisionModel, - Ordinates handleOrdinates, - bool geographyAsDefault) - { - coordinateSequenceFactory ??= NtsGeometryServices.Instance.DefaultCoordinateSequenceFactory; - precisionModel ??= NtsGeometryServices.Instance.DefaultPrecisionModel; - handleOrdinates = handleOrdinates == Ordinates.None ? coordinateSequenceFactory.Ordinates : handleOrdinates; - - _geographyAsDefault = geographyAsDefault; - _gisReader = new PostGisReader(coordinateSequenceFactory, precisionModel, handleOrdinates); - } - - public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) - => Mappings.Find(type, dataTypeName, options); - - static TypeInfoMappingCollection AddInfos(TypeInfoMappingCollection mappings, PostGisReader reader, PostGisWriter writer, bool geographyAsDefault) - { - // geometry - mappings.AddType("geometry", - (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer)), - matchRequirement: !geographyAsDefault ? MatchRequirement.Single : MatchRequirement.DataTypeName); - - mappings.AddType("geometry", - (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer)), - matchRequirement: !geographyAsDefault ? MatchRequirement.All : MatchRequirement.DataTypeName); - mappings.AddType("geometry", - (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer)), - matchRequirement: !geographyAsDefault ? MatchRequirement.All : MatchRequirement.DataTypeName); - mappings.AddType("geometry", - (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer)), - matchRequirement: !geographyAsDefault ? MatchRequirement.All : MatchRequirement.DataTypeName); - mappings.AddType("geometry", - (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer)), - matchRequirement: !geographyAsDefault ? MatchRequirement.All : MatchRequirement.DataTypeName); - mappings.AddType("geometry", - (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer)), - matchRequirement: !geographyAsDefault ? MatchRequirement.All : MatchRequirement.DataTypeName); - mappings.AddType("geometry", - (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer)), - matchRequirement: !geographyAsDefault ? MatchRequirement.All : MatchRequirement.DataTypeName); - mappings.AddType("geometry", - (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer)), - matchRequirement: !geographyAsDefault ? MatchRequirement.All : MatchRequirement.DataTypeName); - - // geography - mappings.AddType("geography", - (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer)), - matchRequirement: geographyAsDefault ? MatchRequirement.Single : MatchRequirement.DataTypeName); - - mappings.AddType("geography", - (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer)), - matchRequirement: geographyAsDefault ? MatchRequirement.All : MatchRequirement.DataTypeName); - mappings.AddType("geography", - (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer)), - matchRequirement: geographyAsDefault ? MatchRequirement.All : MatchRequirement.DataTypeName); - mappings.AddType("geography", - (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer)), - matchRequirement: geographyAsDefault ? MatchRequirement.All : MatchRequirement.DataTypeName); - mappings.AddType("geography", - (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer)), - matchRequirement: geographyAsDefault ? MatchRequirement.All : MatchRequirement.DataTypeName); - mappings.AddType("geography", - (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer)), - matchRequirement: geographyAsDefault ? MatchRequirement.All : MatchRequirement.DataTypeName); - mappings.AddType("geography", - (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer)), - matchRequirement: geographyAsDefault ? MatchRequirement.All : MatchRequirement.DataTypeName); - mappings.AddType("geography", - (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer)), - matchRequirement: geographyAsDefault ? MatchRequirement.All : MatchRequirement.DataTypeName); - - return mappings; - } - - protected static TypeInfoMappingCollection AddArrayInfos(TypeInfoMappingCollection mappings) - { - // geometry - mappings.AddArrayType("geometry"); - mappings.AddArrayType("geometry"); - mappings.AddArrayType("geometry"); - mappings.AddArrayType("geometry"); - mappings.AddArrayType("geometry"); - mappings.AddArrayType("geometry"); - mappings.AddArrayType("geometry"); - mappings.AddArrayType("geometry"); - - // geography - mappings.AddArrayType("geography"); - mappings.AddArrayType("geography"); - mappings.AddArrayType("geography"); - mappings.AddArrayType("geography"); - mappings.AddArrayType("geography"); - mappings.AddArrayType("geography"); - mappings.AddArrayType("geography"); - mappings.AddArrayType("geography"); - - return mappings; - } -} - -sealed class NetTopologySuiteArrayTypeInfoResolver : NetTopologySuiteTypeInfoResolver, IPgTypeInfoResolver -{ - TypeInfoMappingCollection? _mappings; - new TypeInfoMappingCollection Mappings => _mappings ??= AddArrayInfos(new(base.Mappings)); - - public NetTopologySuiteArrayTypeInfoResolver(CoordinateSequenceFactory? coordinateSequenceFactory, PrecisionModel? precisionModel, Ordinates handleOrdinates, bool geographyAsDefault) - : base(coordinateSequenceFactory, precisionModel, handleOrdinates, geographyAsDefault) { } - - public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) - => Mappings.Find(type, dataTypeName, options); -} diff --git a/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeInfoResolverFactory.cs b/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeInfoResolverFactory.cs new file mode 100644 index 0000000000..0509e90158 --- /dev/null +++ b/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeInfoResolverFactory.cs @@ -0,0 +1,154 @@ +using System; +using NetTopologySuite; +using NetTopologySuite.Geometries; +using NetTopologySuite.IO; +using Npgsql.Internal; +using Npgsql.Internal.Postgres; + +namespace Npgsql.NetTopologySuite.Internal; + +sealed class NetTopologySuiteTypeInfoResolverFactory : PgTypeInfoResolverFactory +{ + readonly CoordinateSequenceFactory? _coordinateSequenceFactory; + readonly PrecisionModel? _precisionModel; + readonly Ordinates _handleOrdinates; + readonly bool _geographyAsDefault; + + public NetTopologySuiteTypeInfoResolverFactory(CoordinateSequenceFactory? coordinateSequenceFactory, PrecisionModel? precisionModel, + Ordinates handleOrdinates, bool geographyAsDefault) + { + _coordinateSequenceFactory = coordinateSequenceFactory; + _precisionModel = precisionModel; + _handleOrdinates = handleOrdinates; + _geographyAsDefault = geographyAsDefault; + } + + public override IPgTypeInfoResolver CreateResolver() => new Resolver(_coordinateSequenceFactory, _precisionModel, _handleOrdinates, _geographyAsDefault); + public override IPgTypeInfoResolver? CreateArrayResolver() => new ArrayResolver(_coordinateSequenceFactory, _precisionModel, _handleOrdinates, _geographyAsDefault); + + class Resolver : IPgTypeInfoResolver + { + readonly PostGisReader _gisReader; + readonly bool _geographyAsDefault; + + TypeInfoMappingCollection? _mappings; + protected TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(), _gisReader, new(), _geographyAsDefault); + + public Resolver( + CoordinateSequenceFactory? coordinateSequenceFactory, + PrecisionModel? precisionModel, + Ordinates handleOrdinates, + bool geographyAsDefault) + { + coordinateSequenceFactory ??= NtsGeometryServices.Instance.DefaultCoordinateSequenceFactory; + precisionModel ??= NtsGeometryServices.Instance.DefaultPrecisionModel; + handleOrdinates = handleOrdinates == Ordinates.None ? coordinateSequenceFactory.Ordinates : handleOrdinates; + + _geographyAsDefault = geographyAsDefault; + _gisReader = new PostGisReader(coordinateSequenceFactory, precisionModel, handleOrdinates); + } + + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); + + static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings, PostGisReader reader, PostGisWriter writer, + bool geographyAsDefault) + { + // geometry + mappings.AddType("geometry", + (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer)), + matchRequirement: !geographyAsDefault ? MatchRequirement.Single : MatchRequirement.DataTypeName); + + mappings.AddType("geometry", + (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer)), + matchRequirement: !geographyAsDefault ? MatchRequirement.All : MatchRequirement.DataTypeName); + mappings.AddType("geometry", + (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer)), + matchRequirement: !geographyAsDefault ? MatchRequirement.All : MatchRequirement.DataTypeName); + mappings.AddType("geometry", + (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer)), + matchRequirement: !geographyAsDefault ? MatchRequirement.All : MatchRequirement.DataTypeName); + mappings.AddType("geometry", + (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer)), + matchRequirement: !geographyAsDefault ? MatchRequirement.All : MatchRequirement.DataTypeName); + mappings.AddType("geometry", + (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer)), + matchRequirement: !geographyAsDefault ? MatchRequirement.All : MatchRequirement.DataTypeName); + mappings.AddType("geometry", + (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer)), + matchRequirement: !geographyAsDefault ? MatchRequirement.All : MatchRequirement.DataTypeName); + mappings.AddType("geometry", + (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer)), + matchRequirement: !geographyAsDefault ? MatchRequirement.All : MatchRequirement.DataTypeName); + + // geography + mappings.AddType("geography", + (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer)), + matchRequirement: geographyAsDefault ? MatchRequirement.Single : MatchRequirement.DataTypeName); + + mappings.AddType("geography", + (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer)), + matchRequirement: geographyAsDefault ? MatchRequirement.All : MatchRequirement.DataTypeName); + mappings.AddType("geography", + (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer)), + matchRequirement: geographyAsDefault ? MatchRequirement.All : MatchRequirement.DataTypeName); + mappings.AddType("geography", + (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer)), + matchRequirement: geographyAsDefault ? MatchRequirement.All : MatchRequirement.DataTypeName); + mappings.AddType("geography", + (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer)), + matchRequirement: geographyAsDefault ? MatchRequirement.All : MatchRequirement.DataTypeName); + mappings.AddType("geography", + (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer)), + matchRequirement: geographyAsDefault ? MatchRequirement.All : MatchRequirement.DataTypeName); + mappings.AddType("geography", + (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer)), + matchRequirement: geographyAsDefault ? MatchRequirement.All : MatchRequirement.DataTypeName); + mappings.AddType("geography", + (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer)), + matchRequirement: geographyAsDefault ? MatchRequirement.All : MatchRequirement.DataTypeName); + + return mappings; + } + } + + sealed class ArrayResolver : Resolver, IPgTypeInfoResolver + { + TypeInfoMappingCollection? _mappings; + new TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(base.Mappings)); + + public ArrayResolver(CoordinateSequenceFactory? coordinateSequenceFactory, PrecisionModel? precisionModel, + Ordinates handleOrdinates, bool geographyAsDefault) + : base(coordinateSequenceFactory, precisionModel, handleOrdinates, geographyAsDefault) + { + } + + public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); + + static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) + { + // geometry + mappings.AddArrayType("geometry"); + mappings.AddArrayType("geometry"); + mappings.AddArrayType("geometry"); + mappings.AddArrayType("geometry"); + mappings.AddArrayType("geometry"); + mappings.AddArrayType("geometry"); + mappings.AddArrayType("geometry"); + mappings.AddArrayType("geometry"); + + // geography + mappings.AddArrayType("geography"); + mappings.AddArrayType("geography"); + mappings.AddArrayType("geography"); + mappings.AddArrayType("geography"); + mappings.AddArrayType("geography"); + mappings.AddArrayType("geography"); + mappings.AddArrayType("geography"); + mappings.AddArrayType("geography"); + + return mappings; + } + } +} diff --git a/src/Npgsql.NetTopologySuite/NpgsqlNetTopologySuiteExtensions.cs b/src/Npgsql.NetTopologySuite/NpgsqlNetTopologySuiteExtensions.cs index 928c5c65f4..a30d023891 100644 --- a/src/Npgsql.NetTopologySuite/NpgsqlNetTopologySuiteExtensions.cs +++ b/src/Npgsql.NetTopologySuite/NpgsqlNetTopologySuiteExtensions.cs @@ -27,10 +27,7 @@ public static INpgsqlTypeMapper UseNetTopologySuite( Ordinates handleOrdinates = Ordinates.None, bool geographyAsDefault = false) { - // TODO opt-in of arrays. - // Reverse order - mapper.AddTypeInfoResolver(new NetTopologySuiteArrayTypeInfoResolver(coordinateSequenceFactory, precisionModel, handleOrdinates, geographyAsDefault)); - mapper.AddTypeInfoResolver(new NetTopologySuiteTypeInfoResolver(coordinateSequenceFactory, precisionModel, handleOrdinates, geographyAsDefault)); + mapper.AddTypeInfoResolverFactory(new NetTopologySuiteTypeInfoResolverFactory(coordinateSequenceFactory, precisionModel, handleOrdinates, geographyAsDefault)); return mapper; } } diff --git a/src/Npgsql.NodaTime/Internal/NodaTimeTypeInfoResolver.cs b/src/Npgsql.NodaTime/Internal/NodaTimeTypeInfoResolver.cs deleted file mode 100644 index 344695118e..0000000000 --- a/src/Npgsql.NodaTime/Internal/NodaTimeTypeInfoResolver.cs +++ /dev/null @@ -1,271 +0,0 @@ -using System; -using System.Collections.Generic; -using NodaTime; -using Npgsql.Internal; -using Npgsql.Internal.Postgres; -using NpgsqlTypes; -using static Npgsql.NodaTime.Internal.NodaTimeUtils; -using static Npgsql.Internal.PgConverterFactory; - -namespace Npgsql.NodaTime.Internal; - -class NodaTimeTypeInfoResolver : IPgTypeInfoResolver -{ - static DataTypeName TimestampTzDataTypeName => new("pg_catalog.timestamptz"); - static DataTypeName TimestampDataTypeName => new("pg_catalog.timestamp"); - static DataTypeName DateDataTypeName => new("pg_catalog.date"); - static DataTypeName TimeDataTypeName => new("pg_catalog.time"); - static DataTypeName TimeTzDataTypeName => new("pg_catalog.timetz"); - static DataTypeName IntervalDataTypeName => new("pg_catalog.interval"); - - static DataTypeName DateRangeDataTypeName => new("pg_catalog.daterange"); - static DataTypeName DateMultirangeDataTypeName => new("pg_catalog.datemultirange"); - static DataTypeName TimestampTzRangeDataTypeName => new("pg_catalog.tstzrange"); - static DataTypeName TimestampTzMultirangeDataTypeName => new("pg_catalog.tstzmultirange"); - static DataTypeName TimestampRangeDataTypeName => new("pg_catalog.tsrange"); - static DataTypeName TimestampMultirangeDataTypeName => new("pg_catalog.tsmultirange"); - - TypeInfoMappingCollection? _mappings; - protected TypeInfoMappingCollection Mappings => _mappings ??= AddInfos(new()); - - public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) - => Mappings.Find(type, dataTypeName, options); - - static TypeInfoMappingCollection AddInfos(TypeInfoMappingCollection mappings) - { - // timestamp and timestamptz, legacy and non-legacy modes - if (LegacyTimestampBehavior) - { - // timestamptz - mappings.AddStructType(new DataTypeName("pg_catalog.timestamptz"), - static (options, mapping, _) => - mapping.CreateInfo(options, new InstantConverter(options.EnableDateTimeInfinityConversions)), isDefault: false); - mappings.AddStructType(new DataTypeName("pg_catalog.timestamptz"), - static (options, mapping, _) => - mapping.CreateInfo(options, new LegacyTimestampTzZonedDateTimeConverter( - DateTimeZoneProviders.Tzdb[options.TimeZone], options.EnableDateTimeInfinityConversions))); - mappings.AddStructType(new DataTypeName("pg_catalog.timestamptz"), - static (options, mapping, _) => - mapping.CreateInfo(options, new LegacyTimestampTzOffsetDateTimeConverter( - DateTimeZoneProviders.Tzdb[options.TimeZone], options.EnableDateTimeInfinityConversions))); - - // timestamp - mappings.AddStructType(TimestampDataTypeName, - static (options, mapping, _) => - mapping.CreateInfo(options, new InstantConverter(options.EnableDateTimeInfinityConversions)), - isDefault: true); - mappings.AddStructType(TimestampDataTypeName, - static (options, mapping, _) => - mapping.CreateInfo(options, new LocalDateTimeConverter(options.EnableDateTimeInfinityConversions)), - isDefault: false); - } - else - { - // timestamptz - mappings.AddStructType(TimestampTzDataTypeName, - static (options, mapping, _) => - mapping.CreateInfo(options, new InstantConverter(options.EnableDateTimeInfinityConversions)), isDefault: true); - mappings.AddStructType(new DataTypeName("pg_catalog.timestamptz"), - static (options, mapping, _) => - mapping.CreateInfo(options, new ZonedDateTimeConverter(options.EnableDateTimeInfinityConversions))); - mappings.AddStructType(new DataTypeName("pg_catalog.timestamptz"), - static (options, mapping, _) => - mapping.CreateInfo(options, new OffsetDateTimeConverter(options.EnableDateTimeInfinityConversions))); - - // timestamp - mappings.AddStructType(TimestampDataTypeName, - static (options, mapping, _) => - mapping.CreateInfo(options, new LocalDateTimeConverter(options.EnableDateTimeInfinityConversions)), isDefault: true); - } - - // date - mappings.AddStructType(DateDataTypeName, - static (options, mapping, _) => - mapping.CreateInfo(options, new LocalDateConverter(options.EnableDateTimeInfinityConversions)), isDefault: true); - - // time - mappings.AddStructType(TimeDataTypeName, - static (options, mapping, _) => mapping.CreateInfo(options, new LocalTimeConverter()), isDefault: true); - - // timetz - mappings.AddStructType(TimeTzDataTypeName, - static (options, mapping, _) => mapping.CreateInfo(options, new OffsetTimeConverter()), isDefault: true); - - // interval - mappings.AddType(IntervalDataTypeName, - static (options, mapping, _) => mapping.CreateInfo(options, new PeriodConverter()), isDefault: true); - mappings.AddStructType(IntervalDataTypeName, - static (options, mapping, _) => mapping.CreateInfo(options, new DurationConverter())); - - // tstzrange - mappings.AddStructType(TimestampTzRangeDataTypeName, - static (options, mapping, _) => - mapping.CreateInfo(options, new IntervalConverter(CreateRangeConverter(new InstantConverter(options.EnableDateTimeInfinityConversions), options))), isDefault: true); - mappings.AddStructType>(TimestampTzRangeDataTypeName, - static (options, mapping, _) => mapping.CreateInfo(options, - CreateRangeConverter(new InstantConverter(options.EnableDateTimeInfinityConversions), options))); - mappings.AddStructType>(TimestampTzRangeDataTypeName, - static (options, mapping, _) => mapping.CreateInfo(options, - CreateRangeConverter(new ZonedDateTimeConverter(options.EnableDateTimeInfinityConversions), options))); - mappings.AddStructType>(TimestampTzRangeDataTypeName, - static (options, mapping, _) => mapping.CreateInfo(options, - CreateRangeConverter(new OffsetDateTimeConverter(options.EnableDateTimeInfinityConversions), options))); - - // tstzmultirange - mappings.AddType(TimestampTzMultirangeDataTypeName, - static (options, mapping, _) => - mapping.CreateInfo(options, CreateArrayMultirangeConverter(new IntervalConverter( - CreateRangeConverter(new InstantConverter(options.EnableDateTimeInfinityConversions), options)), options)), - isDefault: true); - mappings.AddType>(TimestampTzMultirangeDataTypeName, - static (options, mapping, _) => - mapping.CreateInfo(options, CreateListMultirangeConverter(new IntervalConverter( - CreateRangeConverter(new InstantConverter(options.EnableDateTimeInfinityConversions), options)), options))); - mappings.AddType[]>(TimestampTzMultirangeDataTypeName, - static (options, mapping, _) => - mapping.CreateInfo(options, CreateArrayMultirangeConverter(CreateRangeConverter(new InstantConverter(options.EnableDateTimeInfinityConversions), options), options))); - mappings.AddType>>(TimestampTzMultirangeDataTypeName, - static (options, mapping, _) => - mapping.CreateInfo(options, CreateListMultirangeConverter(CreateRangeConverter(new InstantConverter(options.EnableDateTimeInfinityConversions), options), options))); - mappings.AddType[]>(TimestampTzMultirangeDataTypeName, - static (options, mapping, _) => - mapping.CreateInfo(options, CreateArrayMultirangeConverter(CreateRangeConverter(new ZonedDateTimeConverter(options.EnableDateTimeInfinityConversions), options), options))); - mappings.AddType>>(TimestampTzMultirangeDataTypeName, - static (options, mapping, _) => - mapping.CreateInfo(options, CreateListMultirangeConverter(CreateRangeConverter(new ZonedDateTimeConverter(options.EnableDateTimeInfinityConversions), options), options))); - mappings.AddType[]>(TimestampTzMultirangeDataTypeName, - static (options, mapping, _) => - mapping.CreateInfo(options, CreateArrayMultirangeConverter(CreateRangeConverter(new OffsetDateTimeConverter(options.EnableDateTimeInfinityConversions), options), options))); - mappings.AddType>>(TimestampTzMultirangeDataTypeName, - static (options, mapping, _) => - mapping.CreateInfo(options, CreateListMultirangeConverter(CreateRangeConverter(new OffsetDateTimeConverter(options.EnableDateTimeInfinityConversions), options), options))); - - // tsrange - mappings.AddStructType>(TimestampRangeDataTypeName, - static (options, mapping, _) => - mapping.CreateInfo(options, CreateRangeConverter(new LocalDateTimeConverter(options.EnableDateTimeInfinityConversions), options)), - isDefault: true); - - // tsmultirange - mappings.AddType[]>(TimestampMultirangeDataTypeName, - static (options, mapping, _) => - mapping.CreateInfo(options, CreateArrayMultirangeConverter(CreateRangeConverter(new LocalDateTimeConverter(options.EnableDateTimeInfinityConversions), options), options)), - isDefault: true); - mappings.AddType>>(TimestampMultirangeDataTypeName, - static (options, mapping, _) => - mapping.CreateInfo(options, CreateListMultirangeConverter(CreateRangeConverter(new LocalDateTimeConverter(options.EnableDateTimeInfinityConversions), options), options))); - - // daterange - mappings.AddType(DateRangeDataTypeName, - static (options, mapping, _) => - mapping.CreateInfo(options, new DateIntervalConverter( - CreateRangeConverter(new LocalDateConverter(options.EnableDateTimeInfinityConversions), options), - options.EnableDateTimeInfinityConversions)), isDefault: true); - mappings.AddStructType>(DateRangeDataTypeName, - static (options, mapping, _) => mapping.CreateInfo(options, - CreateRangeConverter(new LocalDateConverter(options.EnableDateTimeInfinityConversions), options))); - - // datemultirange - mappings.AddType(DateMultirangeDataTypeName, - static (options, mapping, _) => - mapping.CreateInfo(options, CreateArrayMultirangeConverter(new DateIntervalConverter( - CreateRangeConverter(new LocalDateConverter(options.EnableDateTimeInfinityConversions), options), - options.EnableDateTimeInfinityConversions), options)), - isDefault: true); - mappings.AddType>(DateMultirangeDataTypeName, - static (options, mapping, _) => - mapping.CreateInfo(options, CreateListMultirangeConverter(new DateIntervalConverter( - CreateRangeConverter(new LocalDateConverter(options.EnableDateTimeInfinityConversions), options), - options.EnableDateTimeInfinityConversions), options))); - mappings.AddType[]>(DateMultirangeDataTypeName, - static (options, mapping, _) => - mapping.CreateInfo(options, CreateArrayMultirangeConverter(CreateRangeConverter(new LocalDateConverter(options.EnableDateTimeInfinityConversions), options), options))); - mappings.AddType>>(DateMultirangeDataTypeName, - static (options, mapping, _) => - mapping.CreateInfo(options, CreateListMultirangeConverter(CreateRangeConverter(new LocalDateConverter(options.EnableDateTimeInfinityConversions), options), options))); - - return mappings; - } - - protected static TypeInfoMappingCollection AddArrayInfos(TypeInfoMappingCollection mappings) - { - // timestamptz - mappings.AddStructArrayType(TimestampTzDataTypeName); - mappings.AddStructArrayType(TimestampTzDataTypeName); - mappings.AddStructArrayType(TimestampTzDataTypeName); - - // timestamp - if (LegacyTimestampBehavior) - { - mappings.AddStructArrayType(TimestampDataTypeName); - - mappings.AddStructType(TimestampDataTypeName, - static (options, mapping, _) => - mapping.CreateInfo(options, new InstantConverter(options.EnableDateTimeInfinityConversions)), - isDefault: true); - mappings.AddStructType(TimestampDataTypeName, - static (options, mapping, _) => - mapping.CreateInfo(options, new LocalDateTimeConverter(options.EnableDateTimeInfinityConversions)), - isDefault: false); - } - else - { - mappings.AddStructType(TimestampDataTypeName, - static (options, mapping, _) => - mapping.CreateInfo(options, new LocalDateTimeConverter(options.EnableDateTimeInfinityConversions)), - isDefault: true); - } - mappings.AddStructArrayType(TimestampDataTypeName); - - // other - mappings.AddStructArrayType(DateDataTypeName); - mappings.AddStructArrayType(TimeDataTypeName); - mappings.AddStructArrayType(TimeTzDataTypeName); - mappings.AddArrayType(IntervalDataTypeName); - mappings.AddStructArrayType(IntervalDataTypeName); - - // tstzrange - mappings.AddStructArrayType(TimestampTzRangeDataTypeName); - mappings.AddStructArrayType>(TimestampTzRangeDataTypeName); - mappings.AddStructArrayType>(TimestampTzRangeDataTypeName); - mappings.AddStructArrayType>(TimestampTzRangeDataTypeName); - - // tstzmultirange - mappings.AddArrayType(TimestampTzMultirangeDataTypeName); - mappings.AddArrayType>(TimestampTzMultirangeDataTypeName); - mappings.AddArrayType[]>(TimestampTzMultirangeDataTypeName); - mappings.AddArrayType>>(TimestampTzMultirangeDataTypeName); - mappings.AddArrayType[]>(TimestampTzMultirangeDataTypeName); - mappings.AddArrayType>>(TimestampTzMultirangeDataTypeName); - mappings.AddArrayType[]>(TimestampTzMultirangeDataTypeName); - mappings.AddArrayType>>(TimestampTzMultirangeDataTypeName); - - // tsrange - mappings.AddStructArrayType>(TimestampRangeDataTypeName); - - // tsmultirange - mappings.AddArrayType[]>(TimestampMultirangeDataTypeName); - mappings.AddArrayType>>(TimestampMultirangeDataTypeName); - - // daterange - mappings.AddArrayType(DateRangeDataTypeName); - mappings.AddStructArrayType>(DateRangeDataTypeName); - - // datemultirange - mappings.AddArrayType(DateMultirangeDataTypeName); - mappings.AddArrayType>(DateMultirangeDataTypeName); - mappings.AddArrayType[]>(DateMultirangeDataTypeName); - mappings.AddArrayType>>(DateMultirangeDataTypeName); - - return mappings; - } -} - -sealed class NodaTimeArrayTypeInfoResolver : NodaTimeTypeInfoResolver, IPgTypeInfoResolver -{ - TypeInfoMappingCollection? _mappings; - new TypeInfoMappingCollection Mappings => _mappings ??= AddArrayInfos(new(base.Mappings)); - - public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) - => Mappings.Find(type, dataTypeName, options); -} diff --git a/src/Npgsql.NodaTime/Internal/NodaTimeTypeInfoResolverFactory.Multirange.cs b/src/Npgsql.NodaTime/Internal/NodaTimeTypeInfoResolverFactory.Multirange.cs new file mode 100644 index 0000000000..42c6360dad --- /dev/null +++ b/src/Npgsql.NodaTime/Internal/NodaTimeTypeInfoResolverFactory.Multirange.cs @@ -0,0 +1,149 @@ +using System; +using System.Collections.Generic; +using NodaTime; +using Npgsql.Internal; +using Npgsql.Internal.Postgres; +using NpgsqlTypes; +using static Npgsql.Internal.PgConverterFactory; + +namespace Npgsql.NodaTime.Internal; + +sealed partial class NodaTimeTypeInfoResolverFactory +{ + public override IPgTypeInfoResolver? CreateMultirangeResolver() => new MultirangeResolver(); + public override IPgTypeInfoResolver? CreateMultirangeArrayResolver() => new MultirangeArrayResolver(); + + class MultirangeResolver : IPgTypeInfoResolver + { + protected static DataTypeName DateMultirangeDataTypeName => new("pg_catalog.datemultirange"); + protected static DataTypeName TimestampTzMultirangeDataTypeName => new("pg_catalog.tstzmultirange"); + protected static DataTypeName TimestampMultirangeDataTypeName => new("pg_catalog.tsmultirange"); + + TypeInfoMappingCollection? _mappings; + protected TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new()); + + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); + + static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) + { + // tstzmultirange + mappings.AddType(TimestampTzMultirangeDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, CreateArrayMultirangeConverter(new IntervalConverter( + CreateRangeConverter(new InstantConverter(options.EnableDateTimeInfinityConversions), options)), options)), + isDefault: true); + mappings.AddType>(TimestampTzMultirangeDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, CreateListMultirangeConverter(new IntervalConverter( + CreateRangeConverter(new InstantConverter(options.EnableDateTimeInfinityConversions), options)), options))); + mappings.AddType[]>(TimestampTzMultirangeDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, + CreateArrayMultirangeConverter( + CreateRangeConverter(new InstantConverter(options.EnableDateTimeInfinityConversions), options), options))); + mappings.AddType>>(TimestampTzMultirangeDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, + CreateListMultirangeConverter( + CreateRangeConverter(new InstantConverter(options.EnableDateTimeInfinityConversions), options), options))); + mappings.AddType[]>(TimestampTzMultirangeDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, + CreateArrayMultirangeConverter( + CreateRangeConverter(new ZonedDateTimeConverter(options.EnableDateTimeInfinityConversions), options), + options))); + mappings.AddType>>(TimestampTzMultirangeDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, + CreateListMultirangeConverter( + CreateRangeConverter(new ZonedDateTimeConverter(options.EnableDateTimeInfinityConversions), options), + options))); + mappings.AddType[]>(TimestampTzMultirangeDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, + CreateArrayMultirangeConverter( + CreateRangeConverter(new OffsetDateTimeConverter(options.EnableDateTimeInfinityConversions), options), + options))); + mappings.AddType>>(TimestampTzMultirangeDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, + CreateListMultirangeConverter( + CreateRangeConverter(new OffsetDateTimeConverter(options.EnableDateTimeInfinityConversions), options), + options))); + + // tsmultirange + mappings.AddType[]>(TimestampMultirangeDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, + CreateArrayMultirangeConverter( + CreateRangeConverter(new LocalDateTimeConverter(options.EnableDateTimeInfinityConversions), options), options)), + isDefault: true); + mappings.AddType>>(TimestampMultirangeDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, + CreateListMultirangeConverter( + CreateRangeConverter(new LocalDateTimeConverter(options.EnableDateTimeInfinityConversions), options), + options))); + + // datemultirange + mappings.AddType(DateMultirangeDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, CreateArrayMultirangeConverter(new DateIntervalConverter( + CreateRangeConverter(new LocalDateConverter(options.EnableDateTimeInfinityConversions), options), + options.EnableDateTimeInfinityConversions), options)), + isDefault: true); + mappings.AddType>(DateMultirangeDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, CreateListMultirangeConverter(new DateIntervalConverter( + CreateRangeConverter(new LocalDateConverter(options.EnableDateTimeInfinityConversions), options), + options.EnableDateTimeInfinityConversions), options))); + mappings.AddType[]>(DateMultirangeDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, + CreateArrayMultirangeConverter( + CreateRangeConverter(new LocalDateConverter(options.EnableDateTimeInfinityConversions), options), options))); + mappings.AddType>>(DateMultirangeDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, + CreateListMultirangeConverter( + CreateRangeConverter(new LocalDateConverter(options.EnableDateTimeInfinityConversions), options), options))); + + return mappings; + } + } + + sealed class MultirangeArrayResolver : MultirangeResolver, IPgTypeInfoResolver + { + TypeInfoMappingCollection? _mappings; + new TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(base.Mappings)); + + public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); + + static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) + { + // tstzmultirange + mappings.AddArrayType(TimestampTzMultirangeDataTypeName); + mappings.AddArrayType>(TimestampTzMultirangeDataTypeName); + mappings.AddArrayType[]>(TimestampTzMultirangeDataTypeName); + mappings.AddArrayType>>(TimestampTzMultirangeDataTypeName); + mappings.AddArrayType[]>(TimestampTzMultirangeDataTypeName); + mappings.AddArrayType>>(TimestampTzMultirangeDataTypeName); + mappings.AddArrayType[]>(TimestampTzMultirangeDataTypeName); + mappings.AddArrayType>>(TimestampTzMultirangeDataTypeName); + + // tsmultirange + mappings.AddArrayType[]>(TimestampMultirangeDataTypeName); + mappings.AddArrayType>>(TimestampMultirangeDataTypeName); + + // datemultirange + mappings.AddArrayType(DateMultirangeDataTypeName); + mappings.AddArrayType>(DateMultirangeDataTypeName); + mappings.AddArrayType[]>(DateMultirangeDataTypeName); + mappings.AddArrayType>>(DateMultirangeDataTypeName); + + return mappings; + } + } +} diff --git a/src/Npgsql.NodaTime/Internal/NodaTimeTypeInfoResolverFactory.Range.cs b/src/Npgsql.NodaTime/Internal/NodaTimeTypeInfoResolverFactory.Range.cs new file mode 100644 index 0000000000..f62669333c --- /dev/null +++ b/src/Npgsql.NodaTime/Internal/NodaTimeTypeInfoResolverFactory.Range.cs @@ -0,0 +1,93 @@ +using System; +using NodaTime; +using Npgsql.Internal; +using Npgsql.Internal.Postgres; +using NpgsqlTypes; +using static Npgsql.Internal.PgConverterFactory; + +namespace Npgsql.NodaTime.Internal; + +sealed partial class NodaTimeTypeInfoResolverFactory +{ + public override IPgTypeInfoResolver? CreateRangeResolver() => new RangeResolver(); + public override IPgTypeInfoResolver? CreateRangeArrayResolver() => new RangeArrayResolver(); + + class RangeResolver : IPgTypeInfoResolver + { + protected static DataTypeName DateRangeDataTypeName => new("pg_catalog.daterange"); + protected static DataTypeName TimestampTzRangeDataTypeName => new("pg_catalog.tstzrange"); + protected static DataTypeName TimestampRangeDataTypeName => new("pg_catalog.tsrange"); + + TypeInfoMappingCollection? _mappings; + protected TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new()); + + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); + + static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) + { + // tstzrange + mappings.AddStructType(TimestampTzRangeDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, + new IntervalConverter( + CreateRangeConverter(new InstantConverter(options.EnableDateTimeInfinityConversions), options))), + isDefault: true); + mappings.AddStructType>(TimestampTzRangeDataTypeName, + static (options, mapping, _) => mapping.CreateInfo(options, + CreateRangeConverter(new InstantConverter(options.EnableDateTimeInfinityConversions), options))); + mappings.AddStructType>(TimestampTzRangeDataTypeName, + static (options, mapping, _) => mapping.CreateInfo(options, + CreateRangeConverter(new ZonedDateTimeConverter(options.EnableDateTimeInfinityConversions), options))); + mappings.AddStructType>(TimestampTzRangeDataTypeName, + static (options, mapping, _) => mapping.CreateInfo(options, + CreateRangeConverter(new OffsetDateTimeConverter(options.EnableDateTimeInfinityConversions), options))); + + // tsrange + mappings.AddStructType>(TimestampRangeDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, + CreateRangeConverter(new LocalDateTimeConverter(options.EnableDateTimeInfinityConversions), options)), + isDefault: true); + + // daterange + mappings.AddType(DateRangeDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, new DateIntervalConverter( + CreateRangeConverter(new LocalDateConverter(options.EnableDateTimeInfinityConversions), options), + options.EnableDateTimeInfinityConversions)), isDefault: true); + mappings.AddStructType>(DateRangeDataTypeName, + static (options, mapping, _) => mapping.CreateInfo(options, + CreateRangeConverter(new LocalDateConverter(options.EnableDateTimeInfinityConversions), options))); + + return mappings; + } + } + + sealed class RangeArrayResolver : RangeResolver, IPgTypeInfoResolver + { + TypeInfoMappingCollection? _mappings; + new TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(base.Mappings)); + + public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); + + static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) + { + // tstzrange + mappings.AddStructArrayType(TimestampTzRangeDataTypeName); + mappings.AddStructArrayType>(TimestampTzRangeDataTypeName); + mappings.AddStructArrayType>(TimestampTzRangeDataTypeName); + mappings.AddStructArrayType>(TimestampTzRangeDataTypeName); + + // tsrange + mappings.AddStructArrayType>(TimestampRangeDataTypeName); + + // daterange + mappings.AddArrayType(DateRangeDataTypeName); + mappings.AddStructArrayType>(DateRangeDataTypeName); + + return mappings; + } + } +} diff --git a/src/Npgsql.NodaTime/Internal/NodaTimeTypeInfoResolverFactory.cs b/src/Npgsql.NodaTime/Internal/NodaTimeTypeInfoResolverFactory.cs new file mode 100644 index 0000000000..dce258b453 --- /dev/null +++ b/src/Npgsql.NodaTime/Internal/NodaTimeTypeInfoResolverFactory.cs @@ -0,0 +1,149 @@ +using System; +using NodaTime; +using Npgsql.Internal; +using Npgsql.Internal.Postgres; +using static Npgsql.NodaTime.Internal.NodaTimeUtils; + +namespace Npgsql.NodaTime.Internal; + +sealed partial class NodaTimeTypeInfoResolverFactory : PgTypeInfoResolverFactory +{ + public override IPgTypeInfoResolver CreateResolver() => new Resolver(); + public override IPgTypeInfoResolver? CreateArrayResolver() => new ArrayResolver(); + + class Resolver : IPgTypeInfoResolver + { + protected static DataTypeName TimestampTzDataTypeName => new("pg_catalog.timestamptz"); + protected static DataTypeName TimestampDataTypeName => new("pg_catalog.timestamp"); + protected static DataTypeName DateDataTypeName => new("pg_catalog.date"); + protected static DataTypeName TimeDataTypeName => new("pg_catalog.time"); + protected static DataTypeName TimeTzDataTypeName => new("pg_catalog.timetz"); + protected static DataTypeName IntervalDataTypeName => new("pg_catalog.interval"); + + TypeInfoMappingCollection? _mappings; + protected TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new()); + + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); + + static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) + { + // timestamp and timestamptz, legacy and non-legacy modes + if (LegacyTimestampBehavior) + { + // timestamptz + mappings.AddStructType(new DataTypeName("pg_catalog.timestamptz"), + static (options, mapping, _) => + mapping.CreateInfo(options, new InstantConverter(options.EnableDateTimeInfinityConversions)), isDefault: false); + mappings.AddStructType(new DataTypeName("pg_catalog.timestamptz"), + static (options, mapping, _) => + mapping.CreateInfo(options, new LegacyTimestampTzZonedDateTimeConverter( + DateTimeZoneProviders.Tzdb[options.TimeZone], options.EnableDateTimeInfinityConversions))); + mappings.AddStructType(new DataTypeName("pg_catalog.timestamptz"), + static (options, mapping, _) => + mapping.CreateInfo(options, new LegacyTimestampTzOffsetDateTimeConverter( + DateTimeZoneProviders.Tzdb[options.TimeZone], options.EnableDateTimeInfinityConversions))); + + // timestamp + mappings.AddStructType(TimestampDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, new InstantConverter(options.EnableDateTimeInfinityConversions)), + isDefault: true); + mappings.AddStructType(TimestampDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, new LocalDateTimeConverter(options.EnableDateTimeInfinityConversions)), + isDefault: false); + } + else + { + // timestamptz + mappings.AddStructType(TimestampTzDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, new InstantConverter(options.EnableDateTimeInfinityConversions)), isDefault: true); + mappings.AddStructType(new DataTypeName("pg_catalog.timestamptz"), + static (options, mapping, _) => + mapping.CreateInfo(options, new ZonedDateTimeConverter(options.EnableDateTimeInfinityConversions))); + mappings.AddStructType(new DataTypeName("pg_catalog.timestamptz"), + static (options, mapping, _) => + mapping.CreateInfo(options, new OffsetDateTimeConverter(options.EnableDateTimeInfinityConversions))); + + // timestamp + mappings.AddStructType(TimestampDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, new LocalDateTimeConverter(options.EnableDateTimeInfinityConversions)), + isDefault: true); + } + + // date + mappings.AddStructType(DateDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, new LocalDateConverter(options.EnableDateTimeInfinityConversions)), isDefault: true); + + // time + mappings.AddStructType(TimeDataTypeName, + static (options, mapping, _) => mapping.CreateInfo(options, new LocalTimeConverter()), isDefault: true); + + // timetz + mappings.AddStructType(TimeTzDataTypeName, + static (options, mapping, _) => mapping.CreateInfo(options, new OffsetTimeConverter()), isDefault: true); + + // interval + mappings.AddType(IntervalDataTypeName, + static (options, mapping, _) => mapping.CreateInfo(options, new PeriodConverter()), isDefault: true); + mappings.AddStructType(IntervalDataTypeName, + static (options, mapping, _) => mapping.CreateInfo(options, new DurationConverter())); + + return mappings; + } + } + + sealed class ArrayResolver : Resolver, IPgTypeInfoResolver + { + TypeInfoMappingCollection? _mappings; + new TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(base.Mappings)); + + public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); + + static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) + { + // timestamptz + mappings.AddStructArrayType(TimestampTzDataTypeName); + mappings.AddStructArrayType(TimestampTzDataTypeName); + mappings.AddStructArrayType(TimestampTzDataTypeName); + + // timestamp + if (LegacyTimestampBehavior) + { + mappings.AddStructArrayType(TimestampDataTypeName); + + mappings.AddStructType(TimestampDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, new InstantConverter(options.EnableDateTimeInfinityConversions)), + isDefault: true); + mappings.AddStructType(TimestampDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, new LocalDateTimeConverter(options.EnableDateTimeInfinityConversions)), + isDefault: false); + } + else + { + mappings.AddStructType(TimestampDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, new LocalDateTimeConverter(options.EnableDateTimeInfinityConversions)), + isDefault: true); + } + + mappings.AddStructArrayType(TimestampDataTypeName); + + // other + mappings.AddStructArrayType(DateDataTypeName); + mappings.AddStructArrayType(TimeDataTypeName); + mappings.AddStructArrayType(TimeTzDataTypeName); + mappings.AddArrayType(IntervalDataTypeName); + mappings.AddStructArrayType(IntervalDataTypeName); + + return mappings; + } + } +} diff --git a/src/Npgsql.NodaTime/NpgsqlNodaTimeExtensions.cs b/src/Npgsql.NodaTime/NpgsqlNodaTimeExtensions.cs index f933b81ca6..9ebf42e83f 100644 --- a/src/Npgsql.NodaTime/NpgsqlNodaTimeExtensions.cs +++ b/src/Npgsql.NodaTime/NpgsqlNodaTimeExtensions.cs @@ -15,10 +15,7 @@ public static class NpgsqlNodaTimeExtensions /// The type mapper to set up (global or connection-specific) public static INpgsqlTypeMapper UseNodaTime(this INpgsqlTypeMapper mapper) { - // TODO opt-in of arrays. - // Reverse order - mapper.AddTypeInfoResolver(new NodaTimeArrayTypeInfoResolver()); - mapper.AddTypeInfoResolver(new NodaTimeTypeInfoResolver()); + mapper.AddTypeInfoResolverFactory(new NodaTimeTypeInfoResolverFactory()); return mapper; } } diff --git a/src/Npgsql/Internal/PgTypeInfoResolverChainBuilder.cs b/src/Npgsql/Internal/PgTypeInfoResolverChainBuilder.cs new file mode 100644 index 0000000000..2f59344d3e --- /dev/null +++ b/src/Npgsql/Internal/PgTypeInfoResolverChainBuilder.cs @@ -0,0 +1,129 @@ +using System; +using System.Collections.Generic; + +namespace Npgsql.Internal; + +struct PgTypeInfoResolverChainBuilder +{ + readonly List _factories = new(); + Action>? _addRangeResolvers; + Action>? _addMultirangeResolvers; + RangeArrayHandler _rangeArrayHandler = RangeArrayHandler.Instance; + MultirangeArrayHandler _multirangeArrayHandler = MultirangeArrayHandler.Instance; + Action>? _arrayResolvers; + + public PgTypeInfoResolverChainBuilder() + { + } + + public void Clear() => _factories.Clear(); + + public void AppendResolverFactory(PgTypeInfoResolverFactory factory) => AddResolverFactory(factory); + public void PrependResolverFactory(PgTypeInfoResolverFactory factory) => AddResolverFactory(factory, prepend: true); + + void AddResolverFactory(PgTypeInfoResolverFactory factory, bool prepend = false) + { + var type = factory.GetType(); + + for (var i = 0; i < _factories.Count; i++) + if (_factories[i].GetType() == type) + { + _factories.RemoveAt(i); + break; + } + + if (prepend) + _factories.Insert(0, factory); + else + _factories.Add(factory); + } + + public void EnableRanges() + { + _addRangeResolvers ??= AddResolvers; + _rangeArrayHandler = RangeArrayHandlerImpl.Instance; + + static void AddResolvers(PgTypeInfoResolverChainBuilder instance, List resolvers) + { + foreach (var factory in instance._factories) + if (factory.CreateRangeResolver() is { } resolver) + resolvers.Add(resolver); + } + } + + public void EnableMultiranges() + { + _addMultirangeResolvers ??= AddResolvers; + _multirangeArrayHandler = MultirangeArrayHandlerImpl.Instance; + + static void AddResolvers(PgTypeInfoResolverChainBuilder instance, List resolvers) + { + foreach (var factory in instance._factories) + if (factory.CreateMultirangeResolver() is { } resolver) + resolvers.Add(resolver); + } + } + + public void EnableArrays() + { + _arrayResolvers ??= AddResolvers; + + static void AddResolvers(PgTypeInfoResolverChainBuilder instance, List resolvers) + { + foreach (var factory in instance._factories) + if (factory.CreateArrayResolver() is { } resolver) + resolvers.Add(resolver); + + if (instance._addRangeResolvers is not null) + foreach (var factory in instance._factories) + if (instance._rangeArrayHandler.CreateRangeArrayResolver(factory) is { } resolver) + resolvers.Add(resolver); + + if (instance._addMultirangeResolvers is not null) + foreach (var factory in instance._factories) + if (instance._multirangeArrayHandler.CreateMultirangeArrayResolver(factory) is { } resolver) + resolvers.Add(resolver); + } + } + + public IEnumerable Build(Action>? configure = null) + { + var resolvers = new List(); + foreach (var factory in _factories) + resolvers.Add(factory.CreateResolver()); + var instance = this; + _addRangeResolvers?.Invoke(instance, resolvers); + _addMultirangeResolvers?.Invoke(instance, resolvers); + _arrayResolvers?.Invoke(instance, resolvers); + configure?.Invoke(resolvers); + return resolvers; + } + + class RangeArrayHandler + { + public static RangeArrayHandler Instance { get; } = new(); + + public virtual IPgTypeInfoResolver? CreateRangeArrayResolver(PgTypeInfoResolverFactory factory) => null; + } + + sealed class RangeArrayHandlerImpl : RangeArrayHandler + { + public new static RangeArrayHandlerImpl Instance { get; } = new(); + + public override IPgTypeInfoResolver? CreateRangeArrayResolver(PgTypeInfoResolverFactory factory) => factory.CreateRangeArrayResolver(); + } + + class MultirangeArrayHandler + { + public static MultirangeArrayHandler Instance { get; } = new(); + + public virtual IPgTypeInfoResolver? CreateMultirangeArrayResolver(PgTypeInfoResolverFactory factory) => null; + } + + sealed class MultirangeArrayHandlerImpl : MultirangeArrayHandler + { + public new static MultirangeArrayHandlerImpl Instance { get; } = new(); + + public override IPgTypeInfoResolver? CreateMultirangeArrayResolver(PgTypeInfoResolverFactory factory) => factory.CreateMultirangeArrayResolver(); + } +} diff --git a/src/Npgsql/Internal/PgTypeInfoResolverFactory.cs b/src/Npgsql/Internal/PgTypeInfoResolverFactory.cs new file mode 100644 index 0000000000..f30059c7ec --- /dev/null +++ b/src/Npgsql/Internal/PgTypeInfoResolverFactory.cs @@ -0,0 +1,13 @@ +namespace Npgsql.Internal; + +public abstract class PgTypeInfoResolverFactory +{ + public abstract IPgTypeInfoResolver CreateResolver(); + public abstract IPgTypeInfoResolver? CreateArrayResolver(); + + public virtual IPgTypeInfoResolver? CreateRangeResolver() => null; + public virtual IPgTypeInfoResolver? CreateRangeArrayResolver() => null; + + public virtual IPgTypeInfoResolver? CreateMultirangeResolver() => null; + public virtual IPgTypeInfoResolver? CreateMultirangeArrayResolver() => null; +} diff --git a/src/Npgsql/Internal/Resolvers/RangeTypeInfoResolver.cs b/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.Multirange.cs similarity index 51% rename from src/Npgsql/Internal/Resolvers/RangeTypeInfoResolver.cs rename to src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.Multirange.cs index cd3cb9dcf2..753f216e53 100644 --- a/src/Npgsql/Internal/Resolvers/RangeTypeInfoResolver.cs +++ b/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.Multirange.cs @@ -1,6 +1,5 @@ using System; using System.Collections.Generic; -using System.Numerics; using Npgsql.Internal.Converters; using Npgsql.Internal.Postgres; using Npgsql.PostgresTypes; @@ -9,125 +8,120 @@ using NpgsqlTypes; using static Npgsql.Internal.PgConverterFactory; -namespace Npgsql.Internal.Resolvers; +namespace Npgsql.Internal.ResolverFactories; -// TODO improve the ability to switch on server capability. -class RangeTypeInfoResolver : IPgTypeInfoResolver +sealed partial class AdoTypeInfoResolverFactory { - TypeInfoMappingCollection? _mappings; - protected TypeInfoMappingCollection Mappings => _mappings ??= AddInfos(new(), supportsMultirange: false); - TypeInfoMappingCollection? _mappingsWithMultiranges; - protected TypeInfoMappingCollection MappingsWithMultiranges => _mappingsWithMultiranges ??= AddInfos(new(), supportsMultirange: true); + public override IPgTypeInfoResolver CreateMultirangeResolver() => new MultirangeResolver(); + public override IPgTypeInfoResolver CreateMultirangeArrayResolver() => new MultirangeArrayResolver(); - public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) - => (options.DatabaseInfo.SupportsMultirangeTypes ? MappingsWithMultiranges : Mappings).Find(type, dataTypeName, options); - - static TypeInfoMappingCollection AddInfos(TypeInfoMappingCollection mappings, bool supportsMultirange) + public static void ThrowIfMultirangeUnsupported(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) { - // numeric ranges - mappings.AddStructType>(DataTypeNames.Int4Range, - static (options, mapping, _) => mapping.CreateInfo(options, CreateRangeConverter(new Int4Converter(), options)), - isDefault: true); - mappings.AddStructType>(DataTypeNames.Int8Range, - static (options, mapping, _) => mapping.CreateInfo(options, CreateRangeConverter(new Int8Converter(), options)), - isDefault: true); - mappings.AddStructType>(DataTypeNames.NumRange, - static (options, mapping, _) => - mapping.CreateInfo(options, CreateRangeConverter(new DecimalNumericConverter(), options)), - isDefault: true); - mappings.AddStructType>(DataTypeNames.NumRange, - static (options, mapping, _) => mapping.CreateInfo(options, CreateRangeConverter(new BigIntegerNumericConverter(), options))); - - // tsrange - if (Statics.LegacyTimestampBehavior) - { - mappings.AddStructType>(DataTypeNames.TsRange, - static (options, mapping, _) => mapping.CreateInfo(options, - CreateRangeConverter(new LegacyDateTimeConverter(options.EnableDateTimeInfinityConversions, timestamp: true), options)), - isDefault: true); - } - else + var kind = CheckMultirangeUnsupported(type, dataTypeName, options); + switch (kind) { - mappings.AddResolverStructType>(DataTypeNames.TsRange, - static (options, mapping, dataTypeNameMatch) => mapping.CreateInfo(options, - DateTimeConverterResolver.CreateRangeResolver(options, - options.GetCanonicalTypeId(DataTypeNames.TsTzRange), - options.GetCanonicalTypeId(DataTypeNames.TsRange), - options.EnableDateTimeInfinityConversions), dataTypeNameMatch), - isDefault: true); + case PostgresTypeKind.Multirange when kind.Value.HasFlag(PostgresTypeKind.Array): + throw new NotSupportedException( + string.Format(NpgsqlStrings.MultirangeArraysNotEnabled, nameof(NpgsqlSlimDataSourceBuilder.EnableArrays), typeof(TBuilder).Name)); + case PostgresTypeKind.Multirange: + throw new NotSupportedException( + string.Format(NpgsqlStrings.MultirangesNotEnabled, nameof(NpgsqlSlimDataSourceBuilder.EnableMultiranges), typeof(TBuilder).Name)); + default: + return; } - mappings.AddStructType>(DataTypeNames.TsRange, - static (options, mapping, _) => - mapping.CreateInfo(options, CreateRangeConverter(new Int8Converter(), options))); + } - // tstzrange - if (Statics.LegacyTimestampBehavior) + public static PostgresTypeKind? CheckMultirangeUnsupported(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + { + // Only trigger on well known data type names. + var npgsqlDbType = dataTypeName?.ToNpgsqlDbType(); + if (type != typeof(object)) { - mappings.AddStructType>(DataTypeNames.TsTzRange, - static (options, mapping, _) => mapping.CreateInfo(options, - CreateRangeConverter(new LegacyDateTimeConverter(options.EnableDateTimeInfinityConversions, timestamp: false), options)), - isDefault: true); - mappings.AddStructType>(DataTypeNames.TsTzRange, - static (options, mapping, _) => mapping.CreateInfo(options, - CreateRangeConverter(new LegacyDateTimeOffsetConverter(options.EnableDateTimeInfinityConversions), options))); + if (npgsqlDbType?.HasFlag(NpgsqlDbType.Multirange) != true) + return null; + + return dataTypeName?.IsArray == true + ? PostgresTypeKind.Array | PostgresTypeKind.Multirange + : PostgresTypeKind.Multirange; } - else + + if (type == typeof(object)) + return null; + + if (!TypeInfoMappingCollection.IsArrayLikeType(type, out var elementType)) + return null; + + type = elementType; + + if (type is { IsConstructedGenericType: true } && type.GetGenericTypeDefinition() == typeof(Nullable<>)) + type = type.GetGenericArguments()[0]; + + if (type is { IsConstructedGenericType: true } && type.GetGenericTypeDefinition() == typeof(NpgsqlRange<>)) { - mappings.AddResolverStructType>(DataTypeNames.TsTzRange, - static (options, mapping, dataTypeNameMatch) => mapping.CreateInfo(options, - DateTimeConverterResolver.CreateRangeResolver(options, - options.GetCanonicalTypeId(DataTypeNames.TsTzRange), - options.GetCanonicalTypeId(DataTypeNames.TsRange), - options.EnableDateTimeInfinityConversions), dataTypeNameMatch), - isDefault: true); - mappings.AddStructType>(DataTypeNames.TsTzRange, - static (options, mapping, _) => mapping.CreateInfo(options, - CreateRangeConverter(new DateTimeOffsetConverter(options.EnableDateTimeInfinityConversions), options))); - } - mappings.AddStructType>(DataTypeNames.TsTzRange, - static (options, mapping, _) => mapping.CreateInfo(options, CreateRangeConverter(new Int8Converter(), options))); - - // daterange - mappings.AddStructType>(DataTypeNames.DateRange, - static (options, mapping, _) => mapping.CreateInfo(options, - CreateRangeConverter(new DateTimeDateConverter(options.EnableDateTimeInfinityConversions), options)), - isDefault: true); - mappings.AddStructType>(DataTypeNames.DateRange, - static (options, mapping, _) => mapping.CreateInfo(options, CreateRangeConverter(new Int4Converter(), options))); -#if NET6_0_OR_GREATER - mappings.AddStructType>(DataTypeNames.DateRange, - static (options, mapping, _) => - mapping.CreateInfo(options, CreateRangeConverter(new DateOnlyDateConverter(options.EnableDateTimeInfinityConversions), options))); + type = type.GetGenericArguments()[0]; + var matchingArguments = + new[] + { + typeof(int), typeof(long), typeof(decimal), typeof(DateTime), +# if NET6_0_OR_GREATER + typeof(DateOnly) #endif + }; + + // If we don't know more than the clr type, default to a Multirange kind over Array as they share the same types. + foreach (var argument in matchingArguments) + if (argument == type) + return PostgresTypeKind.Multirange; + + if (type.AssemblyQualifiedName == "System.Numerics.BigInteger,System.Runtime.Numerics") + return PostgresTypeKind.Multirange; + } + + return null; + } - if (supportsMultirange) + class MultirangeResolver : IPgTypeInfoResolver + { + TypeInfoMappingCollection? _mappings; + protected TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new()); + + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => options.DatabaseInfo.SupportsMultirangeTypes ? Mappings.Find(type, dataTypeName, options) : null; + + static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) { // int4multirange mappings.AddType[]>(DataTypeNames.Int4Multirange, static (options, mapping, _) => - mapping.CreateInfo(options, CreateArrayMultirangeConverter(CreateRangeConverter(new Int4Converter(), options), options)), + mapping.CreateInfo(options, + CreateArrayMultirangeConverter(CreateRangeConverter(new Int4Converter(), options), options)), isDefault: true); mappings.AddType>>(DataTypeNames.Int4Multirange, static (options, mapping, _) => - mapping.CreateInfo(options, CreateListMultirangeConverter(CreateRangeConverter(new Int4Converter(), options), options))); + mapping.CreateInfo(options, + CreateListMultirangeConverter(CreateRangeConverter(new Int4Converter(), options), options))); // int8multirange mappings.AddType[]>(DataTypeNames.Int8Multirange, static (options, mapping, _) => - mapping.CreateInfo(options, CreateArrayMultirangeConverter(CreateRangeConverter(new Int8Converter(), options), options)), + mapping.CreateInfo(options, + CreateArrayMultirangeConverter(CreateRangeConverter(new Int8Converter(), options), options)), isDefault: true); mappings.AddType>>(DataTypeNames.Int8Multirange, static (options, mapping, _) => - mapping.CreateInfo(options, CreateListMultirangeConverter(CreateRangeConverter(new Int8Converter(), options), options))); + mapping.CreateInfo(options, + CreateListMultirangeConverter(CreateRangeConverter(new Int8Converter(), options), options))); // nummultirange mappings.AddType[]>(DataTypeNames.NumMultirange, static (options, mapping, _) => - mapping.CreateInfo(options, CreateArrayMultirangeConverter(CreateRangeConverter(new DecimalNumericConverter(), options), options)), + mapping.CreateInfo(options, + CreateArrayMultirangeConverter(CreateRangeConverter(new DecimalNumericConverter(), options), options)), isDefault: true); mappings.AddType>>(DataTypeNames.NumMultirange, static (options, mapping, _) => - mapping.CreateInfo(options, CreateListMultirangeConverter(CreateRangeConverter(new DecimalNumericConverter(), options), options))); + mapping.CreateInfo(options, + CreateListMultirangeConverter(CreateRangeConverter(new DecimalNumericConverter(), options), options))); // tsmultirange if (Statics.LegacyTimestampBehavior) @@ -135,12 +129,14 @@ static TypeInfoMappingCollection AddInfos(TypeInfoMappingCollection mappings, bo mappings.AddType[]>(DataTypeNames.TsMultirange, static (options, mapping, _) => mapping.CreateInfo(options, CreateArrayMultirangeConverter( - CreateRangeConverter(new LegacyDateTimeConverter(options.EnableDateTimeInfinityConversions, timestamp: true), options), options)), + CreateRangeConverter(new LegacyDateTimeConverter(options.EnableDateTimeInfinityConversions, timestamp: true), + options), options)), isDefault: true); mappings.AddType>>(DataTypeNames.TsMultirange, static (options, mapping, _) => mapping.CreateInfo(options, CreateListMultirangeConverter( - CreateRangeConverter(new LegacyDateTimeConverter(options.EnableDateTimeInfinityConversions, timestamp: true), options), options))); + CreateRangeConverter(new LegacyDateTimeConverter(options.EnableDateTimeInfinityConversions, timestamp: true), + options), options))); } else { @@ -158,12 +154,15 @@ static TypeInfoMappingCollection AddInfos(TypeInfoMappingCollection mappings, bo options.GetCanonicalTypeId(DataTypeNames.TsMultirange), options.EnableDateTimeInfinityConversions), dataTypeNameMatch)); } + mappings.AddType[]>(DataTypeNames.TsMultirange, static (options, mapping, _) => - mapping.CreateInfo(options, CreateArrayMultirangeConverter(CreateRangeConverter(new Int8Converter(), options), options))); + mapping.CreateInfo(options, + CreateArrayMultirangeConverter(CreateRangeConverter(new Int8Converter(), options), options))); mappings.AddType>>(DataTypeNames.TsMultirange, static (options, mapping, _) => - mapping.CreateInfo(options, CreateListMultirangeConverter(CreateRangeConverter(new Int8Converter(), options), options))); + mapping.CreateInfo(options, + CreateListMultirangeConverter(CreateRangeConverter(new Int8Converter(), options), options))); // tstzmultirange if (Statics.LegacyTimestampBehavior) @@ -171,21 +170,25 @@ static TypeInfoMappingCollection AddInfos(TypeInfoMappingCollection mappings, bo mappings.AddType[]>(DataTypeNames.TsTzMultirange, static (options, mapping, _) => mapping.CreateInfo(options, CreateArrayMultirangeConverter( - CreateRangeConverter(new LegacyDateTimeConverter(options.EnableDateTimeInfinityConversions, timestamp: false), options), options)), + CreateRangeConverter(new LegacyDateTimeConverter(options.EnableDateTimeInfinityConversions, timestamp: false), + options), options)), isDefault: true); mappings.AddType>>(DataTypeNames.TsTzMultirange, static (options, mapping, _) => mapping.CreateInfo(options, CreateListMultirangeConverter( - CreateRangeConverter(new LegacyDateTimeConverter(options.EnableDateTimeInfinityConversions, timestamp: false), options), options))); + CreateRangeConverter(new LegacyDateTimeConverter(options.EnableDateTimeInfinityConversions, timestamp: false), + options), options))); mappings.AddType[]>(DataTypeNames.TsTzMultirange, static (options, mapping, _) => mapping.CreateInfo(options, CreateArrayMultirangeConverter( - CreateRangeConverter(new LegacyDateTimeOffsetConverter(options.EnableDateTimeInfinityConversions), options), options)), + CreateRangeConverter(new LegacyDateTimeOffsetConverter(options.EnableDateTimeInfinityConversions), options), + options)), isDefault: true); mappings.AddType>>(DataTypeNames.TsTzMultirange, static (options, mapping, _) => mapping.CreateInfo(options, CreateListMultirangeConverter( - CreateRangeConverter(new LegacyDateTimeOffsetConverter(options.EnableDateTimeInfinityConversions), options), options))); + CreateRangeConverter(new LegacyDateTimeOffsetConverter(options.EnableDateTimeInfinityConversions), options), + options))); } else { @@ -212,12 +215,15 @@ static TypeInfoMappingCollection AddInfos(TypeInfoMappingCollection mappings, bo mapping.CreateInfo(options, CreateListMultirangeConverter( CreateRangeConverter(new DateTimeOffsetConverter(options.EnableDateTimeInfinityConversions), options), options))); } + mappings.AddType[]>(DataTypeNames.TsTzMultirange, static (options, mapping, _) => - mapping.CreateInfo(options, CreateArrayMultirangeConverter(CreateRangeConverter(new Int8Converter(), options), options))); + mapping.CreateInfo(options, + CreateArrayMultirangeConverter(CreateRangeConverter(new Int8Converter(), options), options))); mappings.AddType>>(DataTypeNames.TsTzMultirange, static (options, mapping, _) => - mapping.CreateInfo(options, CreateListMultirangeConverter(CreateRangeConverter(new Int8Converter(), options), options))); + mapping.CreateInfo(options, + CreateListMultirangeConverter(CreateRangeConverter(new Int8Converter(), options), options))); // datemultirange mappings.AddType[]>(DataTypeNames.DateMultirange, @@ -229,58 +235,31 @@ static TypeInfoMappingCollection AddInfos(TypeInfoMappingCollection mappings, bo static (options, mapping, _) => mapping.CreateInfo(options, CreateListMultirangeConverter( CreateRangeConverter(new DateTimeDateConverter(options.EnableDateTimeInfinityConversions), options), options))); -#if NET6_0_OR_GREATER - mappings.AddType[]>(DataTypeNames.DateMultirange, - static (options, mapping, _) => - mapping.CreateInfo(options, CreateArrayMultirangeConverter( - CreateRangeConverter(new DateOnlyDateConverter(options.EnableDateTimeInfinityConversions), options), options)), - isDefault: true); - mappings.AddType>>(DataTypeNames.DateMultirange, - static (options, mapping, _) => - mapping.CreateInfo(options, CreateListMultirangeConverter( - CreateRangeConverter(new DateOnlyDateConverter(options.EnableDateTimeInfinityConversions), options), options))); -#endif - } + #if NET6_0_OR_GREATER + mappings.AddType[]>(DataTypeNames.DateMultirange, + static (options, mapping, _) => + mapping.CreateInfo(options, CreateArrayMultirangeConverter( + CreateRangeConverter(new DateOnlyDateConverter(options.EnableDateTimeInfinityConversions), options), options)), + isDefault: true); + mappings.AddType>>(DataTypeNames.DateMultirange, + static (options, mapping, _) => + mapping.CreateInfo(options, CreateListMultirangeConverter( + CreateRangeConverter(new DateOnlyDateConverter(options.EnableDateTimeInfinityConversions), options), options))); + #endif - return mappings; + return mappings; + } } - protected static TypeInfoMappingCollection AddArrayInfos(TypeInfoMappingCollection mappings, bool supportsMultiRange) + sealed class MultirangeArrayResolver : MultirangeResolver, IPgTypeInfoResolver { - // numeric ranges - mappings.AddStructArrayType>(DataTypeNames.Int4Range); - mappings.AddStructArrayType>(DataTypeNames.Int8Range); - mappings.AddStructArrayType>(DataTypeNames.NumRange); - mappings.AddStructArrayType>(DataTypeNames.NumRange); - - // tsrange - if (Statics.LegacyTimestampBehavior) - mappings.AddStructArrayType>(DataTypeNames.TsRange); - else - mappings.AddResolverStructArrayType>(DataTypeNames.TsRange); - mappings.AddStructArrayType>(DataTypeNames.TsRange); - - // tstzrange - if (Statics.LegacyTimestampBehavior) - { - mappings.AddStructArrayType>(DataTypeNames.TsTzRange); - mappings.AddStructArrayType>(DataTypeNames.TsTzRange); - } - else - { - mappings.AddResolverStructArrayType>(DataTypeNames.TsTzRange); - mappings.AddStructArrayType>(DataTypeNames.TsTzRange); - } - mappings.AddStructArrayType>(DataTypeNames.TsTzRange); + TypeInfoMappingCollection? _mappings; + new TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(base.Mappings)); - // daterange - mappings.AddStructArrayType>(DataTypeNames.DateRange); - mappings.AddStructArrayType>(DataTypeNames.DateRange); -#if NET6_0_OR_GREATER - mappings.AddStructArrayType>(DataTypeNames.DateRange); -#endif + public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => options.DatabaseInfo.SupportsMultirangeTypes ? Mappings.Find(type, dataTypeName, options) : null; - if (supportsMultiRange) + static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) { // int4multirange mappings.AddArrayType[]>(DataTypeNames.Int4Multirange); @@ -305,6 +284,7 @@ protected static TypeInfoMappingCollection AddArrayInfos(TypeInfoMappingCollecti mappings.AddResolverArrayType[]>(DataTypeNames.TsMultirange); mappings.AddResolverArrayType>>(DataTypeNames.TsMultirange); } + mappings.AddArrayType[]>(DataTypeNames.TsMultirange); mappings.AddArrayType>>(DataTypeNames.TsMultirange); @@ -323,108 +303,19 @@ protected static TypeInfoMappingCollection AddArrayInfos(TypeInfoMappingCollecti mappings.AddArrayType[]>(DataTypeNames.TsTzMultirange); mappings.AddArrayType>>(DataTypeNames.TsTzMultirange); } + mappings.AddArrayType[]>(DataTypeNames.TsTzMultirange); mappings.AddArrayType>>(DataTypeNames.TsTzMultirange); // datemultirange mappings.AddArrayType[]>(DataTypeNames.DateMultirange); mappings.AddArrayType>>(DataTypeNames.DateMultirange); -#if NET6_0_OR_GREATER - mappings.AddArrayType[]>(DataTypeNames.DateMultirange); - mappings.AddArrayType>>(DataTypeNames.DateMultirange); -#endif - } + #if NET6_0_OR_GREATER + mappings.AddArrayType[]>(DataTypeNames.DateMultirange); + mappings.AddArrayType>>(DataTypeNames.DateMultirange); + #endif - return mappings; - } - - public static void ThrowIfUnsupported(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) - { - var kind = CheckUnsupported(type, dataTypeName, options); - switch (kind) - { - case PostgresTypeKind.Range when kind.Value.HasFlag(PostgresTypeKind.Array): - throw new NotSupportedException( - string.Format(NpgsqlStrings.RangeArraysNotEnabled, nameof(NpgsqlSlimDataSourceBuilder.EnableArrays), typeof(TBuilder).Name)); - case PostgresTypeKind.Range: - throw new NotSupportedException( - string.Format(NpgsqlStrings.RangesNotEnabled, nameof(NpgsqlSlimDataSourceBuilder.EnableRanges), typeof(TBuilder).Name)); - case PostgresTypeKind.Multirange when kind.Value.HasFlag(PostgresTypeKind.Array): - throw new NotSupportedException( - string.Format(NpgsqlStrings.MultirangeArraysNotEnabled, nameof(NpgsqlSlimDataSourceBuilder.EnableArrays), typeof(TBuilder).Name)); - case PostgresTypeKind.Multirange: - throw new NotSupportedException( - string.Format(NpgsqlStrings.MultirangesNotEnabled, nameof(NpgsqlSlimDataSourceBuilder.EnableMultiranges), typeof(TBuilder).Name)); - default: - return; + return mappings; } } - - public static PostgresTypeKind? CheckUnsupported(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) - { - // Only trigger on well known data type names. - var npgsqlDbType = dataTypeName?.ToNpgsqlDbType(); - if (type != typeof(object)) - { - if (npgsqlDbType?.HasFlag(NpgsqlDbType.Range) != true && npgsqlDbType?.HasFlag(NpgsqlDbType.Multirange) != true) - return null; - - if (npgsqlDbType.Value.HasFlag(NpgsqlDbType.Range)) - return dataTypeName?.IsArray == true - ? PostgresTypeKind.Array | PostgresTypeKind.Range - : PostgresTypeKind.Range; - - return dataTypeName?.IsArray == true - ? PostgresTypeKind.Array | PostgresTypeKind.Multirange - : PostgresTypeKind.Multirange; - } - - if (type == typeof(object)) - return null; - - var isArray = false; - if (TypeInfoMappingCollection.IsArrayLikeType(type, out var elementType)) - { - type = elementType; - isArray = true; - } - - if (type is { IsConstructedGenericType: true } && type.GetGenericTypeDefinition() == typeof(Nullable<>)) - type = type.GetGenericArguments()[0]; - - if (type is { IsConstructedGenericType: true } && type.GetGenericTypeDefinition() == typeof(NpgsqlRange<>)) - { - type = type.GetGenericArguments()[0]; - var matchingArguments = - new[] - { - typeof(int), typeof(long), typeof(decimal), typeof(DateTime), -# if NET6_0_OR_GREATER - typeof(DateOnly) -#endif - }; - - // If we don't know more than the clr type, default to a Multirange kind over Array as they share the same types. - foreach (var argument in matchingArguments) - if (argument == type) - return isArray ? PostgresTypeKind.Multirange : PostgresTypeKind.Range; - - if (type.AssemblyQualifiedName == "System.Numerics.BigInteger,System.Runtime.Numerics") - return isArray ? PostgresTypeKind.Multirange : PostgresTypeKind.Range; - } - - return null; - } -} - -sealed class RangeArrayTypeInfoResolver : RangeTypeInfoResolver, IPgTypeInfoResolver -{ - TypeInfoMappingCollection? _mappings; - new TypeInfoMappingCollection Mappings => _mappings ??= AddArrayInfos(new(base.Mappings), supportsMultiRange: false); - - TypeInfoMappingCollection? _mappingsWithMultiranges; - new TypeInfoMappingCollection MappingsWithMultiranges => _mappingsWithMultiranges ??= AddArrayInfos(new(base.MappingsWithMultiranges), supportsMultiRange: true); - - public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) - => (options.DatabaseInfo.SupportsMultirangeTypes ? MappingsWithMultiranges : Mappings).Find(type, dataTypeName, options); } diff --git a/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.Range.cs b/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.Range.cs new file mode 100644 index 0000000000..5e77275da5 --- /dev/null +++ b/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.Range.cs @@ -0,0 +1,219 @@ +using System; +using System.Numerics; +using Npgsql.Internal.Converters; +using Npgsql.Internal.Postgres; +using Npgsql.PostgresTypes; +using Npgsql.Properties; +using Npgsql.Util; +using NpgsqlTypes; +using static Npgsql.Internal.PgConverterFactory; + +namespace Npgsql.Internal.ResolverFactories; + +sealed partial class AdoTypeInfoResolverFactory +{ + public override IPgTypeInfoResolver CreateRangeResolver() => new RangeResolver(); + public override IPgTypeInfoResolver CreateRangeArrayResolver() => new RangeArrayResolver(); + + public static void ThrowIfRangeUnsupported(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + { + var kind = CheckRangeUnsupported(type, dataTypeName, options); + switch (kind) + { + case PostgresTypeKind.Range when kind.Value.HasFlag(PostgresTypeKind.Array): + throw new NotSupportedException( + string.Format(NpgsqlStrings.RangeArraysNotEnabled, nameof(NpgsqlSlimDataSourceBuilder.EnableArrays), typeof(TBuilder).Name)); + case PostgresTypeKind.Range: + throw new NotSupportedException( + string.Format(NpgsqlStrings.RangesNotEnabled, nameof(NpgsqlSlimDataSourceBuilder.EnableRanges), typeof(TBuilder).Name)); + default: + return; + } + } + + public static PostgresTypeKind? CheckRangeUnsupported(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + { + // Only trigger on well known data type names. + var npgsqlDbType = dataTypeName?.ToNpgsqlDbType(); + if (type != typeof(object)) + { + if (npgsqlDbType?.HasFlag(NpgsqlDbType.Range) != true && npgsqlDbType?.HasFlag(NpgsqlDbType.Multirange) != true) + return null; + + if (npgsqlDbType.Value.HasFlag(NpgsqlDbType.Range)) + return dataTypeName?.IsArray == true + ? PostgresTypeKind.Array | PostgresTypeKind.Range + : PostgresTypeKind.Range; + + return dataTypeName?.IsArray == true + ? PostgresTypeKind.Array | PostgresTypeKind.Multirange + : PostgresTypeKind.Multirange; + } + + if (type == typeof(object)) + return null; + + if (type is { IsConstructedGenericType: true } && type.GetGenericTypeDefinition() == typeof(Nullable<>)) + type = type.GetGenericArguments()[0]; + + if (type is { IsConstructedGenericType: true } && type.GetGenericTypeDefinition() == typeof(NpgsqlRange<>)) + { + type = type.GetGenericArguments()[0]; + var matchingArguments = + new[] + { + typeof(int), typeof(long), typeof(decimal), typeof(DateTime), +# if NET6_0_OR_GREATER + typeof(DateOnly) +#endif + }; + + // If we don't know more than the clr type, default to a Multirange kind over Array as they share the same types. + foreach (var argument in matchingArguments) + if (argument == type) + return PostgresTypeKind.Range; + + if (type.AssemblyQualifiedName == "System.Numerics.BigInteger,System.Runtime.Numerics") + return PostgresTypeKind.Range; + } + + return null; + } + + class RangeResolver : IPgTypeInfoResolver + { + TypeInfoMappingCollection? _mappings; + protected TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new()); + + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); + + static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) + { + // numeric ranges + mappings.AddStructType>(DataTypeNames.Int4Range, + static (options, mapping, _) => mapping.CreateInfo(options, CreateRangeConverter(new Int4Converter(), options)), + isDefault: true); + mappings.AddStructType>(DataTypeNames.Int8Range, + static (options, mapping, _) => mapping.CreateInfo(options, CreateRangeConverter(new Int8Converter(), options)), + isDefault: true); + mappings.AddStructType>(DataTypeNames.NumRange, + static (options, mapping, _) => + mapping.CreateInfo(options, CreateRangeConverter(new DecimalNumericConverter(), options)), + isDefault: true); + mappings.AddStructType>(DataTypeNames.NumRange, + static (options, mapping, _) => mapping.CreateInfo(options, CreateRangeConverter(new BigIntegerNumericConverter(), options))); + + // tsrange + if (Statics.LegacyTimestampBehavior) + { + mappings.AddStructType>(DataTypeNames.TsRange, + static (options, mapping, _) => mapping.CreateInfo(options, + CreateRangeConverter(new LegacyDateTimeConverter(options.EnableDateTimeInfinityConversions, timestamp: true), options)), + isDefault: true); + } + else + { + mappings.AddResolverStructType>(DataTypeNames.TsRange, + static (options, mapping, dataTypeNameMatch) => mapping.CreateInfo(options, + DateTimeConverterResolver.CreateRangeResolver(options, + options.GetCanonicalTypeId(DataTypeNames.TsTzRange), + options.GetCanonicalTypeId(DataTypeNames.TsRange), + options.EnableDateTimeInfinityConversions), dataTypeNameMatch), + isDefault: true); + } + mappings.AddStructType>(DataTypeNames.TsRange, + static (options, mapping, _) => + mapping.CreateInfo(options, CreateRangeConverter(new Int8Converter(), options))); + + // tstzrange + if (Statics.LegacyTimestampBehavior) + { + mappings.AddStructType>(DataTypeNames.TsTzRange, + static (options, mapping, _) => mapping.CreateInfo(options, + CreateRangeConverter(new LegacyDateTimeConverter(options.EnableDateTimeInfinityConversions, timestamp: false), options)), + isDefault: true); + mappings.AddStructType>(DataTypeNames.TsTzRange, + static (options, mapping, _) => mapping.CreateInfo(options, + CreateRangeConverter(new LegacyDateTimeOffsetConverter(options.EnableDateTimeInfinityConversions), options))); + } + else + { + mappings.AddResolverStructType>(DataTypeNames.TsTzRange, + static (options, mapping, dataTypeNameMatch) => mapping.CreateInfo(options, + DateTimeConverterResolver.CreateRangeResolver(options, + options.GetCanonicalTypeId(DataTypeNames.TsTzRange), + options.GetCanonicalTypeId(DataTypeNames.TsRange), + options.EnableDateTimeInfinityConversions), dataTypeNameMatch), + isDefault: true); + mappings.AddStructType>(DataTypeNames.TsTzRange, + static (options, mapping, _) => mapping.CreateInfo(options, + CreateRangeConverter(new DateTimeOffsetConverter(options.EnableDateTimeInfinityConversions), options))); + } + mappings.AddStructType>(DataTypeNames.TsTzRange, + static (options, mapping, _) => mapping.CreateInfo(options, CreateRangeConverter(new Int8Converter(), options))); + + // daterange + mappings.AddStructType>(DataTypeNames.DateRange, + static (options, mapping, _) => mapping.CreateInfo(options, + CreateRangeConverter(new DateTimeDateConverter(options.EnableDateTimeInfinityConversions), options)), + isDefault: true); + mappings.AddStructType>(DataTypeNames.DateRange, + static (options, mapping, _) => mapping.CreateInfo(options, CreateRangeConverter(new Int4Converter(), options))); + #if NET6_0_OR_GREATER + mappings.AddStructType>(DataTypeNames.DateRange, + static (options, mapping, _) => + mapping.CreateInfo(options, CreateRangeConverter(new DateOnlyDateConverter(options.EnableDateTimeInfinityConversions), options))); + #endif + + return mappings; + } + } + + sealed class RangeArrayResolver : RangeResolver, IPgTypeInfoResolver + { + TypeInfoMappingCollection? _mappings; + new TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(base.Mappings)); + + public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); + + static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) + { + // numeric ranges + mappings.AddStructArrayType>(DataTypeNames.Int4Range); + mappings.AddStructArrayType>(DataTypeNames.Int8Range); + mappings.AddStructArrayType>(DataTypeNames.NumRange); + mappings.AddStructArrayType>(DataTypeNames.NumRange); + + // tsrange + if (Statics.LegacyTimestampBehavior) + mappings.AddStructArrayType>(DataTypeNames.TsRange); + else + mappings.AddResolverStructArrayType>(DataTypeNames.TsRange); + mappings.AddStructArrayType>(DataTypeNames.TsRange); + + // tstzrange + if (Statics.LegacyTimestampBehavior) + { + mappings.AddStructArrayType>(DataTypeNames.TsTzRange); + mappings.AddStructArrayType>(DataTypeNames.TsTzRange); + } + else + { + mappings.AddResolverStructArrayType>(DataTypeNames.TsTzRange); + mappings.AddStructArrayType>(DataTypeNames.TsTzRange); + } + mappings.AddStructArrayType>(DataTypeNames.TsTzRange); + + // daterange + mappings.AddStructArrayType>(DataTypeNames.DateRange); + mappings.AddStructArrayType>(DataTypeNames.DateRange); +#if NET6_0_OR_GREATER + mappings.AddStructArrayType>(DataTypeNames.DateRange); +#endif + + return mappings; + } + } +} diff --git a/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.cs new file mode 100644 index 0000000000..d0c4fde8fe --- /dev/null +++ b/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.cs @@ -0,0 +1,522 @@ +using System; +using System.Collections; +using System.Collections.Generic; +using System.Collections.Specialized; +using System.IO; +using Npgsql.Internal.Converters; +using Npgsql.Internal.Converters.Internal; +using Npgsql.Internal.Postgres; +using Npgsql.PostgresTypes; +using Npgsql.Util; +using NpgsqlTypes; + +namespace Npgsql.Internal.ResolverFactories; + +sealed partial class AdoTypeInfoResolverFactory : PgTypeInfoResolverFactory +{ + Resolver ResolverInstance { get; } = new(); + + public static AdoTypeInfoResolverFactory Instance { get; } = new(); + + public override IPgTypeInfoResolver CreateResolver() => ResolverInstance; + public override IPgTypeInfoResolver CreateArrayResolver() => new ArrayResolver(); + + // Baseline types that are always supported. + class Resolver : IPgTypeInfoResolver + { + TypeInfoMappingCollection? _mappings; + protected TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new()); + + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + { + var info = Mappings.Find(type, dataTypeName, options); + if (info is null && dataTypeName is not null) + info = GetEnumTypeInfo(type, dataTypeName.GetValueOrDefault(), options); + + return info; + } + + static PgTypeInfo? GetEnumTypeInfo(Type? type, DataTypeName dataTypeName, PgSerializerOptions options) + { + if (type is not null && type != typeof(string)) + return null; + + if (options.DatabaseInfo.GetPostgresType(dataTypeName) is not PostgresEnumType) + return null; + + return new PgTypeInfo(options, new StringTextConverter(options.TextEncoding), dataTypeName); + } + + static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) + { + // Bool + mappings.AddStructType(DataTypeNames.Bool, + static (options, mapping, _) => mapping.CreateInfo(options, new BoolConverter()), isDefault: true); + + // Numeric + mappings.AddStructType(DataTypeNames.Int2, + static (options, mapping, _) => mapping.CreateInfo(options, new Int2Converter()), isDefault: true); + mappings.AddStructType(DataTypeNames.Int4, + static (options, mapping, _) => mapping.CreateInfo(options, new Int4Converter()), isDefault: true); + mappings.AddStructType(DataTypeNames.Int8, + static (options, mapping, _) => mapping.CreateInfo(options, new Int8Converter()), isDefault: true); + mappings.AddStructType(DataTypeNames.Float4, + static (options, mapping, _) => mapping.CreateInfo(options, new RealConverter()), isDefault: true); + mappings.AddStructType(DataTypeNames.Float8, + static (options, mapping, _) => mapping.CreateInfo(options, new DoubleConverter()), isDefault: true); + mappings.AddStructType(DataTypeNames.Numeric, + static (options, mapping, _) => mapping.CreateInfo(options, new DecimalNumericConverter()), isDefault: true); + mappings.AddStructType(DataTypeNames.Money, + static (options, mapping, _) => mapping.CreateInfo(options, new MoneyConverter()), MatchRequirement.DataTypeName); + + // Text + mappings.AddType(DataTypeNames.Text, + static (options, mapping, _) => mapping.CreateInfo(options, new StringTextConverter(options.TextEncoding), preferredFormat: DataFormat.Text), isDefault: true); + mappings.AddStructType(DataTypeNames.Text, + static (options, mapping, _) => mapping.CreateInfo(options, new CharTextConverter(options.TextEncoding), preferredFormat: DataFormat.Text)); + // Uses the bytea converters, as neither type has a header. + mappings.AddType(DataTypeNames.Text, + static (options, mapping, _) => mapping.CreateInfo(options, new ArrayByteaConverter()), + MatchRequirement.DataTypeName); + mappings.AddStructType>(DataTypeNames.Text, + static (options, mapping, _) => mapping.CreateInfo(options, new ReadOnlyMemoryByteaConverter()), + MatchRequirement.DataTypeName); + mappings.AddType(DataTypeNames.Text, + static (options, mapping, _) => new PgTypeInfo(options, new StreamByteaConverter(), new DataTypeName(mapping.DataTypeName), unboxedType: mapping.Type != typeof(Stream) ? mapping.Type : null), + mapping => mapping with { MatchRequirement = MatchRequirement.DataTypeName, TypeMatchPredicate = type => typeof(Stream).IsAssignableFrom(type) }); + //Special mappings, these have no corresponding array mapping. + mappings.AddType(DataTypeNames.Text, + static (options, mapping, _) => mapping.CreateInfo(options, new TextReaderTextConverter(options.TextEncoding), supportsWriting: false, preferredFormat: DataFormat.Text), + MatchRequirement.DataTypeName); + mappings.AddStructType(DataTypeNames.Text, + static (options, mapping, _) => mapping.CreateInfo(options, new GetCharsTextConverter(options.TextEncoding), supportsWriting: false, preferredFormat: DataFormat.Text), + MatchRequirement.DataTypeName); + + // Alternative text types + foreach(var dataTypeName in new[] { "citext", DataTypeNames.Varchar, + DataTypeNames.Bpchar, DataTypeNames.Json, + DataTypeNames.Xml, DataTypeNames.Name, DataTypeNames.RefCursor }) + { + mappings.AddType(dataTypeName, + static (options, mapping, _) => mapping.CreateInfo(options, new StringTextConverter(options.TextEncoding), preferredFormat: DataFormat.Text), isDefault: true); + mappings.AddStructType(dataTypeName, + static (options, mapping, _) => mapping.CreateInfo(options, new CharTextConverter(options.TextEncoding), preferredFormat: DataFormat.Text)); + // Uses the bytea converters, as neither type has a header. + mappings.AddType(dataTypeName, + static (options, mapping, _) => mapping.CreateInfo(options, new ArrayByteaConverter()), + MatchRequirement.DataTypeName); + mappings.AddStructType>(dataTypeName, + static (options, mapping, _) => mapping.CreateInfo(options, new ReadOnlyMemoryByteaConverter()), + MatchRequirement.DataTypeName); + mappings.AddType(dataTypeName, + static (options, mapping, _) => new PgTypeInfo(options, new StreamByteaConverter(), new DataTypeName(mapping.DataTypeName), unboxedType: mapping.Type != typeof(Stream) ? mapping.Type : null), + mapping => mapping with { MatchRequirement = MatchRequirement.DataTypeName, TypeMatchPredicate = type => typeof(Stream).IsAssignableFrom(type) }); + //Special mappings, these have no corresponding array mapping. + mappings.AddType(dataTypeName, + static (options, mapping, _) => mapping.CreateInfo(options, new TextReaderTextConverter(options.TextEncoding), supportsWriting: false, preferredFormat: DataFormat.Text), + MatchRequirement.DataTypeName); + mappings.AddStructType(dataTypeName, + static (options, mapping, _) => mapping.CreateInfo(options, new GetCharsTextConverter(options.TextEncoding), supportsWriting: false, preferredFormat: DataFormat.Text), + MatchRequirement.DataTypeName); + } + + // Jsonb + const byte jsonbVersion = 1; + mappings.AddType(DataTypeNames.Jsonb, + static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter(jsonbVersion, new StringTextConverter(options.TextEncoding))), isDefault: true); + mappings.AddStructType(DataTypeNames.Jsonb, + static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter(jsonbVersion, new CharTextConverter(options.TextEncoding)))); + mappings.AddType(DataTypeNames.Jsonb, + static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter(jsonbVersion, new ArrayByteaConverter())), + MatchRequirement.DataTypeName); + mappings.AddStructType>(DataTypeNames.Jsonb, + static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter>(jsonbVersion, new ReadOnlyMemoryByteaConverter())), + MatchRequirement.DataTypeName); + mappings.AddType(DataTypeNames.Jsonb, + static (options, mapping, _) => new PgTypeInfo(options, new VersionPrefixedTextConverter(jsonbVersion, new StreamByteaConverter()), new DataTypeName(mapping.DataTypeName), unboxedType: mapping.Type != typeof(Stream) ? mapping.Type : null), + mapping => mapping with { MatchRequirement = MatchRequirement.DataTypeName, TypeMatchPredicate = type => typeof(Stream).IsAssignableFrom(type) }); + //Special mappings, these have no corresponding array mapping. + mappings.AddType(DataTypeNames.Jsonb, + static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter(jsonbVersion, new TextReaderTextConverter(options.TextEncoding)), supportsWriting: false, preferredFormat: DataFormat.Text), + MatchRequirement.DataTypeName); + mappings.AddStructType(DataTypeNames.Jsonb, + static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter(jsonbVersion, new GetCharsTextConverter(options.TextEncoding)), supportsWriting: false, preferredFormat: DataFormat.Text), + MatchRequirement.DataTypeName); + + // Jsonpath + const byte jsonpathVersion = 1; + mappings.AddType(DataTypeNames.Jsonpath, + static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter(jsonpathVersion, new StringTextConverter(options.TextEncoding))), isDefault: true); + //Special mappings, these have no corresponding array mapping. + mappings.AddType(DataTypeNames.Jsonpath, + static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter(jsonpathVersion, new TextReaderTextConverter(options.TextEncoding)), supportsWriting: false, preferredFormat: DataFormat.Text), + MatchRequirement.DataTypeName); + mappings.AddStructType(DataTypeNames.Jsonpath, + static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter(jsonpathVersion, new GetCharsTextConverter(options.TextEncoding)), supportsWriting: false, preferredFormat: DataFormat.Text), + MatchRequirement.DataTypeName); + + // Bytea + mappings.AddType(DataTypeNames.Bytea, + static (options, mapping, _) => mapping.CreateInfo(options, new ArrayByteaConverter()), isDefault: true); + mappings.AddStructType>(DataTypeNames.Bytea, + static (options, mapping, _) => mapping.CreateInfo(options, new ReadOnlyMemoryByteaConverter())); + mappings.AddType(DataTypeNames.Bytea, + static (options, mapping, _) => new PgTypeInfo(options, new StreamByteaConverter(), new DataTypeName(mapping.DataTypeName), unboxedType: mapping.Type != typeof(Stream) ? mapping.Type : null), + mapping => mapping with { TypeMatchPredicate = type => typeof(Stream).IsAssignableFrom(type) }); + + // Varbit + mappings.AddType(DataTypeNames.Varbit, + static (options, mapping, _) => mapping.CreateInfo(options, + new PolymorphicBitStringConverterResolver(options.GetCanonicalTypeId(DataTypeNames.Varbit)), supportsWriting: false)); + mappings.AddType(DataTypeNames.Varbit, + static (options, mapping, _) => mapping.CreateInfo(options, new BitArrayBitStringConverter()), isDefault: true); + mappings.AddStructType(DataTypeNames.Varbit, + static (options, mapping, _) => mapping.CreateInfo(options, new BoolBitStringConverter())); + mappings.AddStructType(DataTypeNames.Varbit, + static (options, mapping, _) => mapping.CreateInfo(options, new BitVector32BitStringConverter())); + + // Bit + mappings.AddType(DataTypeNames.Bit, + static (options, mapping, _) => mapping.CreateInfo(options, + new PolymorphicBitStringConverterResolver(options.GetCanonicalTypeId(DataTypeNames.Bit)), supportsWriting: false)); + mappings.AddType(DataTypeNames.Bit, + static (options, mapping, _) => mapping.CreateInfo(options, new BitArrayBitStringConverter()), isDefault: true); + mappings.AddStructType(DataTypeNames.Bit, + static (options, mapping, _) => mapping.CreateInfo(options, new BoolBitStringConverter())); + mappings.AddStructType(DataTypeNames.Bit, + static (options, mapping, _) => mapping.CreateInfo(options, new BitVector32BitStringConverter())); + + // Timestamp + if (Statics.LegacyTimestampBehavior) + { + mappings.AddStructType(DataTypeNames.Timestamp, + static (options, mapping, _) => mapping.CreateInfo(options, + new LegacyDateTimeConverter(options.EnableDateTimeInfinityConversions, timestamp: true)), isDefault: true); + } + else + { + mappings.AddResolverStructType(DataTypeNames.Timestamp, + static (options, mapping, dataTypeNameMatch) => mapping.CreateInfo(options, + DateTimeConverterResolver.CreateResolver(options, options.GetCanonicalTypeId(DataTypeNames.TimestampTz), options.GetCanonicalTypeId(DataTypeNames.Timestamp), + options.EnableDateTimeInfinityConversions), dataTypeNameMatch), isDefault: true); + } + mappings.AddStructType(DataTypeNames.Timestamp, + static (options, mapping, _) => mapping.CreateInfo(options, new Int8Converter())); + + // TimestampTz + if (Statics.LegacyTimestampBehavior) + { + mappings.AddStructType(DataTypeNames.TimestampTz, + static (options, mapping, _) => mapping.CreateInfo(options, + new LegacyDateTimeConverter(options.EnableDateTimeInfinityConversions, timestamp: false)), matchRequirement: MatchRequirement.DataTypeName); + mappings.AddStructType(DataTypeNames.TimestampTz, + static (options, mapping, _) => mapping.CreateInfo(options, new LegacyDateTimeOffsetConverter(options.EnableDateTimeInfinityConversions))); + } + else + { + mappings.AddResolverStructType(DataTypeNames.TimestampTz, + static (options, mapping, dataTypeNameMatch) => mapping.CreateInfo(options, + DateTimeConverterResolver.CreateResolver(options, options.GetCanonicalTypeId(DataTypeNames.TimestampTz), options.GetCanonicalTypeId(DataTypeNames.Timestamp), + options.EnableDateTimeInfinityConversions), dataTypeNameMatch), isDefault: true); + mappings.AddStructType(DataTypeNames.TimestampTz, + static (options, mapping, _) => mapping.CreateInfo(options, new DateTimeOffsetConverter(options.EnableDateTimeInfinityConversions))); + } + mappings.AddStructType(DataTypeNames.TimestampTz, + static (options, mapping, _) => mapping.CreateInfo(options, new Int8Converter())); + + // Date + mappings.AddStructType(DataTypeNames.Date, + static (options, mapping, _) => + mapping.CreateInfo(options, new DateTimeDateConverter(options.EnableDateTimeInfinityConversions)), + MatchRequirement.DataTypeName); + mappings.AddStructType(DataTypeNames.Date, + static (options, mapping, _) => mapping.CreateInfo(options, new Int4Converter())); + #if NET6_0_OR_GREATER + mappings.AddStructType(DataTypeNames.Date, + static (options, mapping, _) => mapping.CreateInfo(options, new DateOnlyDateConverter(options.EnableDateTimeInfinityConversions))); + #endif + + // Interval + mappings.AddStructType(DataTypeNames.Interval, + static (options, mapping, _) => mapping.CreateInfo(options, new TimeSpanIntervalConverter()), isDefault: true); + mappings.AddStructType(DataTypeNames.Interval, + static (options, mapping, _) => mapping.CreateInfo(options, new NpgsqlIntervalConverter())); + + // Time + mappings.AddStructType(DataTypeNames.Time, + static (options, mapping, _) => mapping.CreateInfo(options, new TimeSpanTimeConverter()), isDefault: true); + mappings.AddStructType(DataTypeNames.Time, + static (options, mapping, _) => mapping.CreateInfo(options, new Int8Converter())); + #if NET6_0_OR_GREATER + mappings.AddStructType(DataTypeNames.Time, + static (options, mapping, _) => mapping.CreateInfo(options, new TimeOnlyTimeConverter())); + #endif + + // TimeTz + mappings.AddStructType(DataTypeNames.TimeTz, + static (options, mapping, _) => mapping.CreateInfo(options, new DateTimeOffsetTimeTzConverter()), + MatchRequirement.DataTypeName); + + // Uuid + mappings.AddStructType(DataTypeNames.Uuid, + static (options, mapping, _) => mapping.CreateInfo(options, new GuidUuidConverter()), isDefault: true); + + // Hstore + mappings.AddType>("hstore", + static (options, mapping, _) => mapping.CreateInfo(options, new HstoreConverter>(options.TextEncoding)), isDefault: true); + mappings.AddType>("hstore", + static (options, mapping, _) => mapping.CreateInfo(options, new HstoreConverter>(options.TextEncoding))); + + // Unknown + mappings.AddType(DataTypeNames.Unknown, + static (options, mapping, _) => mapping.CreateInfo(options, new StringTextConverter(options.TextEncoding), preferredFormat: DataFormat.Text), + MatchRequirement.DataTypeName); + + // Void + mappings.AddType(DataTypeNames.Void, + static (options, mapping, _) => mapping.CreateInfo(options, new VoidConverter(), supportsWriting: false), + MatchRequirement.DataTypeName); + + // UInt internal types + foreach (var dataTypeName in new[] { DataTypeNames.Oid, DataTypeNames.Xid, DataTypeNames.Cid, DataTypeNames.RegType, DataTypeNames.RegConfig }) + { + mappings.AddStructType(dataTypeName, + static (options, mapping, _) => mapping.CreateInfo(options, new UInt32Converter()), + MatchRequirement.DataTypeName); + } + + // Char + mappings.AddStructType(DataTypeNames.Char, + static (options, mapping, _) => mapping.CreateInfo(options, new InternalCharConverter()), + MatchRequirement.DataTypeName); + mappings.AddStructType(DataTypeNames.Char, + static (options, mapping, _) => mapping.CreateInfo(options, new InternalCharConverter())); + + // Xid8 + mappings.AddStructType(DataTypeNames.Xid8, + static (options, mapping, _) => mapping.CreateInfo(options, new UInt64Converter()), + MatchRequirement.DataTypeName); + + // Oidvector + mappings.AddType( + DataTypeNames.OidVector, + static (options, mapping, _) => mapping.CreateInfo(options, + new ArrayBasedArrayConverter(new(new UInt32Converter(), new PgTypeId(DataTypeNames.Oid)), pgLowerBound: 0)), + MatchRequirement.DataTypeName); + + // Int2vector + mappings.AddType( + DataTypeNames.Int2Vector, + static (options, mapping, _) => mapping.CreateInfo(options, + new ArrayBasedArrayConverter(new(new Int2Converter(), new PgTypeId(DataTypeNames.Int2)), pgLowerBound: 0)), + MatchRequirement.DataTypeName); + + // Tid + mappings.AddStructType(DataTypeNames.Tid, + static (options, mapping, _) => mapping.CreateInfo(options, new TidConverter()), + MatchRequirement.DataTypeName); + + // PgLsn + mappings.AddStructType(DataTypeNames.PgLsn, + static (options, mapping, _) => mapping.CreateInfo(options, new PgLsnConverter()), + MatchRequirement.DataTypeName); + mappings.AddStructType(DataTypeNames.PgLsn, + static (options, mapping, _) => mapping.CreateInfo(options, new UInt64Converter())); + + return mappings; + } + } + + sealed class ArrayResolver : Resolver, IPgTypeInfoResolver + { + TypeInfoMappingCollection? _mappings; + new TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(base.Mappings)); + + public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + { + var info = Mappings.Find(type, dataTypeName, options); + + Type? elementType = null; + if (info is null && dataTypeName is not null && + (type is null || type == typeof(object) || TypeInfoMappingCollection.IsArrayLikeType(type, out elementType)) + && options.DatabaseInfo.GetPostgresType(dataTypeName) is PostgresArrayType { Element: var pgElementType }) + { + info = GetEnumArrayTypeInfo(elementType, pgElementType, type, dataTypeName.GetValueOrDefault(), options) ?? + GetObjectArrayTypeInfo(elementType, pgElementType, type, dataTypeName.GetValueOrDefault(), options); + } + return info; + } + + static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) + { + // Bool + mappings.AddStructArrayType(DataTypeNames.Bool); + + // Numeric + mappings.AddStructArrayType(DataTypeNames.Int2); + mappings.AddStructArrayType(DataTypeNames.Int4); + mappings.AddStructArrayType(DataTypeNames.Int8); + mappings.AddStructArrayType(DataTypeNames.Float4); + mappings.AddStructArrayType(DataTypeNames.Float8); + mappings.AddStructArrayType(DataTypeNames.Numeric); + mappings.AddStructArrayType(DataTypeNames.Money); + + // Text + mappings.AddArrayType(DataTypeNames.Text); + mappings.AddStructArrayType(DataTypeNames.Text); + mappings.AddArrayType(DataTypeNames.Text); + mappings.AddStructArrayType>(DataTypeNames.Text); + mappings.AddArrayType(DataTypeNames.Text); + + // Alternative text types + foreach(var dataTypeName in new[] { "citext", DataTypeNames.Varchar, + DataTypeNames.Bpchar, DataTypeNames.Json, + DataTypeNames.Xml, DataTypeNames.Name, DataTypeNames.RefCursor }) + { + mappings.AddArrayType(dataTypeName); + mappings.AddStructArrayType(dataTypeName); + mappings.AddArrayType(dataTypeName); + mappings.AddStructArrayType>(dataTypeName); + mappings.AddArrayType(dataTypeName); + } + + // Jsonb + mappings.AddArrayType(DataTypeNames.Jsonb); + mappings.AddStructArrayType(DataTypeNames.Jsonb); + mappings.AddArrayType(DataTypeNames.Jsonb); + mappings.AddStructArrayType>(DataTypeNames.Jsonb); + mappings.AddArrayType(DataTypeNames.Jsonb); + + // Jsonpath + mappings.AddArrayType(DataTypeNames.Jsonpath); + + // Bytea + mappings.AddArrayType(DataTypeNames.Bytea); + mappings.AddStructArrayType>(DataTypeNames.Bytea); + mappings.AddArrayType(DataTypeNames.Bytea); + + // Varbit + // Object mapping first. + mappings.AddPolymorphicResolverArrayType(DataTypeNames.Varbit, static options => resolution => resolution.Converter switch + { + BoolBitStringConverter => PgConverterFactory.CreatePolymorphicArrayConverter( + () => new ArrayBasedArrayConverter(resolution, typeof(Array)), + () => new ArrayBasedArrayConverter(new(new NullableConverter(resolution.GetConverter()), resolution.PgTypeId), typeof(Array)), + options), + BitArrayBitStringConverter => new ArrayBasedArrayConverter(resolution, typeof(Array)), + _ => throw new NotSupportedException() + }); + mappings.AddArrayType(DataTypeNames.Varbit); + mappings.AddStructArrayType(DataTypeNames.Varbit); + mappings.AddStructArrayType(DataTypeNames.Varbit); + + // Bit + // Object mapping first. + mappings.AddPolymorphicResolverArrayType(DataTypeNames.Bit, static options => resolution => resolution.Converter switch + { + BoolBitStringConverter => PgConverterFactory.CreatePolymorphicArrayConverter( + () => new ArrayBasedArrayConverter(resolution, typeof(Array)), + () => new ArrayBasedArrayConverter(new(new NullableConverter(resolution.GetConverter()), resolution.PgTypeId), typeof(Array)), + options), + BitArrayBitStringConverter => new ArrayBasedArrayConverter(resolution, typeof(Array)), + _ => throw new NotSupportedException() + }); + mappings.AddArrayType(DataTypeNames.Bit); + mappings.AddStructArrayType(DataTypeNames.Bit); + mappings.AddStructArrayType(DataTypeNames.Bit); + + // Timestamp + if (Statics.LegacyTimestampBehavior) + mappings.AddStructArrayType(DataTypeNames.Timestamp); + else + mappings.AddResolverStructArrayType(DataTypeNames.Timestamp); + mappings.AddStructArrayType(DataTypeNames.Timestamp); + + // TimestampTz + if (Statics.LegacyTimestampBehavior) + mappings.AddStructArrayType(DataTypeNames.TimestampTz); + else + mappings.AddResolverStructArrayType(DataTypeNames.TimestampTz); + mappings.AddStructArrayType(DataTypeNames.TimestampTz); + mappings.AddStructArrayType(DataTypeNames.TimestampTz); + + // Date + mappings.AddStructArrayType(DataTypeNames.Date); + mappings.AddStructArrayType(DataTypeNames.Date); + #if NET6_0_OR_GREATER + mappings.AddStructArrayType(DataTypeNames.Date); + #endif + + // Interval + mappings.AddStructArrayType(DataTypeNames.Interval); + mappings.AddStructArrayType(DataTypeNames.Interval); + + // Time + mappings.AddStructArrayType(DataTypeNames.Time); + mappings.AddStructArrayType(DataTypeNames.Time); + #if NET6_0_OR_GREATER + mappings.AddStructArrayType(DataTypeNames.Time); + #endif + + // TimeTz + mappings.AddStructArrayType(DataTypeNames.TimeTz); + // Uuid + mappings.AddStructArrayType(DataTypeNames.Uuid); + + // Hstore + mappings.AddArrayType>("hstore"); + mappings.AddArrayType>("hstore"); + + // UInt internal types + foreach (var dataTypeName in new[] { DataTypeNames.Oid, DataTypeNames.Xid, DataTypeNames.Cid, DataTypeNames.RegType, (string)DataTypeNames.RegConfig }) + { + mappings.AddStructArrayType(dataTypeName); + } + + // Char + mappings.AddStructArrayType(DataTypeNames.Char); + mappings.AddStructArrayType(DataTypeNames.Char); + + // Xid8 + mappings.AddStructArrayType(DataTypeNames.Xid8); + + // Oidvector + mappings.AddArrayType(DataTypeNames.OidVector); + + // Int2vector + mappings.AddArrayType(DataTypeNames.Int2Vector); + + return mappings; + } + + static PgTypeInfo? GetObjectArrayTypeInfo(Type? elementType, PostgresType pgElementType, Type? type, DataTypeName dataTypeName, + PgSerializerOptions options) + { + if (elementType != typeof(object)) + return null; + + // Probe if there is any mapping at all for this element type. + var elementId = options.ToCanonicalTypeId(pgElementType); + if (options.GetDefaultTypeInfo(elementId) is null) + return null; + + var mappings = new TypeInfoMappingCollection(); + mappings.AddType(pgElementType.DataTypeName, + (options, mapping, _) => mapping.CreateInfo(options, new ObjectConverter(options, elementId)), MatchRequirement.DataTypeName); + mappings.AddArrayType(pgElementType.DataTypeName); + return mappings.Find(type, dataTypeName, options); + } + + static PgTypeInfo? GetEnumArrayTypeInfo(Type? elementType, PostgresType pgElementType, Type? type, DataTypeName dataTypeName, PgSerializerOptions options) + { + if ((type != typeof(object) && elementType is not null && elementType != typeof(string)) || pgElementType is not PostgresEnumType enumType) + return null; + + var mappings = new TypeInfoMappingCollection(); + mappings.AddType(enumType.DataTypeName, + (options, mapping, _) => mapping.CreateInfo(options, new StringTextConverter(options.TextEncoding)), MatchRequirement.DataTypeName); + mappings.AddArrayType(enumType.DataTypeName); + return mappings.Find(type, dataTypeName, options); + } + } +} diff --git a/src/Npgsql/Internal/ResolverFactories/ExtraConversionsTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/ExtraConversionsTypeInfoResolverFactory.cs new file mode 100644 index 0000000000..1614b6f1e4 --- /dev/null +++ b/src/Npgsql/Internal/ResolverFactories/ExtraConversionsTypeInfoResolverFactory.cs @@ -0,0 +1,239 @@ +using System; +using System.Collections.Immutable; +using System.Numerics; +using Npgsql.Internal.Converters; +using Npgsql.Internal.Postgres; + +namespace Npgsql.Internal.ResolverFactories; + +sealed class ExtraConversionResolverFactory : PgTypeInfoResolverFactory +{ + public override IPgTypeInfoResolver CreateResolver() => new Resolver(); + public override IPgTypeInfoResolver CreateArrayResolver() => new ArrayResolver(); + + class Resolver : IPgTypeInfoResolver + { + TypeInfoMappingCollection? _mappings; + protected TypeInfoMappingCollection Mappings => _mappings ??= AddInfos(new()); + + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); + + static TypeInfoMappingCollection AddInfos(TypeInfoMappingCollection mappings) + { + // Int2 + mappings.AddStructType(DataTypeNames.Int2, + static (options, mapping, _) => mapping.CreateInfo(options, new Int2Converter())); + mappings.AddStructType(DataTypeNames.Int2, + static (options, mapping, _) => mapping.CreateInfo(options, new Int2Converter())); + mappings.AddStructType(DataTypeNames.Int2, + static (options, mapping, _) => mapping.CreateInfo(options, new Int2Converter())); + mappings.AddStructType(DataTypeNames.Int2, + static (options, mapping, _) => mapping.CreateInfo(options, new Int2Converter())); + mappings.AddStructType(DataTypeNames.Int2, + static (options, mapping, _) => mapping.CreateInfo(options, new Int2Converter())); + mappings.AddStructType(DataTypeNames.Int2, + static (options, mapping, _) => mapping.CreateInfo(options, new Int2Converter())); + mappings.AddStructType(DataTypeNames.Int2, + static (options, mapping, _) => mapping.CreateInfo(options, new Int2Converter())); + + // Int4 + mappings.AddStructType(DataTypeNames.Int4, + static (options, mapping, _) => mapping.CreateInfo(options, new Int4Converter())); + mappings.AddStructType(DataTypeNames.Int4, + static (options, mapping, _) => mapping.CreateInfo(options, new Int4Converter())); + mappings.AddStructType(DataTypeNames.Int4, + static (options, mapping, _) => mapping.CreateInfo(options, new Int4Converter())); + mappings.AddStructType(DataTypeNames.Int4, + static (options, mapping, _) => mapping.CreateInfo(options, new Int4Converter())); + mappings.AddStructType(DataTypeNames.Int4, + static (options, mapping, _) => mapping.CreateInfo(options, new Int4Converter())); + mappings.AddStructType(DataTypeNames.Int4, + static (options, mapping, _) => mapping.CreateInfo(options, new Int4Converter())); + mappings.AddStructType(DataTypeNames.Int4, + static (options, mapping, _) => mapping.CreateInfo(options, new Int4Converter())); + + // Int8 + mappings.AddStructType(DataTypeNames.Int8, + static (options, mapping, _) => mapping.CreateInfo(options, new Int8Converter())); + mappings.AddStructType(DataTypeNames.Int8, + static (options, mapping, _) => mapping.CreateInfo(options, new Int8Converter())); + mappings.AddStructType(DataTypeNames.Int8, + static (options, mapping, _) => mapping.CreateInfo(options, new Int8Converter())); + mappings.AddStructType(DataTypeNames.Int8, + static (options, mapping, _) => mapping.CreateInfo(options, new Int8Converter())); + mappings.AddStructType(DataTypeNames.Int8, + static (options, mapping, _) => mapping.CreateInfo(options, new Int8Converter())); + mappings.AddStructType(DataTypeNames.Int8, + static (options, mapping, _) => mapping.CreateInfo(options, new Int8Converter())); + mappings.AddStructType(DataTypeNames.Int8, + static (options, mapping, _) => mapping.CreateInfo(options, new Int8Converter())); + + // Float4 + mappings.AddStructType(DataTypeNames.Float4, + static (options, mapping, _) => mapping.CreateInfo(options, new RealConverter())); + + // Float8 + mappings.AddStructType(DataTypeNames.Float8, + static (options, mapping, _) => mapping.CreateInfo(options, new DoubleConverter())); + + // Numeric + mappings.AddStructType(DataTypeNames.Numeric, + static (options, mapping, _) => mapping.CreateInfo(options, new DecimalNumericConverter())); + mappings.AddStructType(DataTypeNames.Numeric, + static (options, mapping, _) => mapping.CreateInfo(options, new DecimalNumericConverter())); + mappings.AddStructType(DataTypeNames.Numeric, + static (options, mapping, _) => mapping.CreateInfo(options, new DecimalNumericConverter())); + mappings.AddStructType(DataTypeNames.Numeric, + static (options, mapping, _) => mapping.CreateInfo(options, new DecimalNumericConverter())); + mappings.AddStructType(DataTypeNames.Numeric, + static (options, mapping, _) => mapping.CreateInfo(options, new DecimalNumericConverter())); + mappings.AddStructType(DataTypeNames.Numeric, + static (options, mapping, _) => mapping.CreateInfo(options, new DecimalNumericConverter())); + mappings.AddStructType(DataTypeNames.Numeric, + static (options, mapping, _) => mapping.CreateInfo(options, new BigIntegerNumericConverter())); + + // Bytea + mappings.AddStructType>(DataTypeNames.Bytea, + static (options, mapping, _) => mapping.CreateInfo(options, new ArraySegmentByteaConverter())); + mappings.AddStructType>(DataTypeNames.Bytea, + static (options, mapping, _) => mapping.CreateInfo(options, new MemoryByteaConverter())); + + // Varbit + mappings.AddType(DataTypeNames.Varbit, + static (options, mapping, _) => mapping.CreateInfo(options, new StringBitStringConverter())); + + // Bit + mappings.AddType(DataTypeNames.Bit, + static (options, mapping, _) => mapping.CreateInfo(options, new StringBitStringConverter())); + + // Text + mappings.AddType(DataTypeNames.Text, + static (options, mapping, _) => mapping.CreateInfo(options, new CharArrayTextConverter(options.TextEncoding), preferredFormat: DataFormat.Text)); + mappings.AddStructType>(DataTypeNames.Text, + static (options, mapping, _) => mapping.CreateInfo(options, new ReadOnlyMemoryTextConverter(options.TextEncoding), preferredFormat: DataFormat.Text)); + mappings.AddStructType>(DataTypeNames.Text, + static (options, mapping, _) => mapping.CreateInfo(options, new CharArraySegmentTextConverter(options.TextEncoding), preferredFormat: DataFormat.Text)); + + // Alternative text types + foreach(var dataTypeName in new[] { "citext", DataTypeNames.Varchar, + DataTypeNames.Bpchar, DataTypeNames.Json, + DataTypeNames.Xml, DataTypeNames.Name, DataTypeNames.RefCursor }) + { + mappings.AddType(dataTypeName, + static (options, mapping, _) => mapping.CreateInfo(options, new CharArrayTextConverter(options.TextEncoding), + preferredFormat: DataFormat.Text)); + mappings.AddStructType>(dataTypeName, + static (options, mapping, _) => mapping.CreateInfo(options, new ReadOnlyMemoryTextConverter(options.TextEncoding), + preferredFormat: DataFormat.Text)); + mappings.AddStructType>(dataTypeName, + static (options, mapping, _) => mapping.CreateInfo(options, new CharArraySegmentTextConverter(options.TextEncoding), + preferredFormat: DataFormat.Text)); + } + + // Jsonb + const byte jsonbVersion = 1; + mappings.AddType(DataTypeNames.Jsonb, + static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter(jsonbVersion, new CharArrayTextConverter(options.TextEncoding)))); + mappings.AddStructType>(DataTypeNames.Jsonb, + static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter>(jsonbVersion, new ReadOnlyMemoryTextConverter(options.TextEncoding)))); + mappings.AddStructType>(DataTypeNames.Jsonb, + static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter>(jsonbVersion, new CharArraySegmentTextConverter(options.TextEncoding)))); + + // Hstore + mappings.AddType>("hstore", + static (options, mapping, _) => mapping.CreateInfo(options, new HstoreConverter>(options.TextEncoding, result => result.ToImmutableDictionary()))); + + return mappings; + } + } + + sealed class ArrayResolver : Resolver, IPgTypeInfoResolver + { + TypeInfoMappingCollection? _mappings; + new TypeInfoMappingCollection Mappings => _mappings ??= AddArrayInfos(new(base.Mappings)); + + public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); + + static TypeInfoMappingCollection AddArrayInfos(TypeInfoMappingCollection mappings) + { + // Int2 + mappings.AddStructArrayType(DataTypeNames.Int2); + mappings.AddStructArrayType(DataTypeNames.Int2); + mappings.AddStructArrayType(DataTypeNames.Int2); + mappings.AddStructArrayType(DataTypeNames.Int2); + mappings.AddStructArrayType(DataTypeNames.Int2); + mappings.AddStructArrayType(DataTypeNames.Int2); + mappings.AddStructArrayType(DataTypeNames.Int2); + + // Int4 + mappings.AddStructArrayType(DataTypeNames.Int4); + mappings.AddStructArrayType(DataTypeNames.Int4); + mappings.AddStructArrayType(DataTypeNames.Int4); + mappings.AddStructArrayType(DataTypeNames.Int4); + mappings.AddStructArrayType(DataTypeNames.Int4); + mappings.AddStructArrayType(DataTypeNames.Int4); + mappings.AddStructArrayType(DataTypeNames.Int4); + + // Int8 + mappings.AddStructArrayType(DataTypeNames.Int8); + mappings.AddStructArrayType(DataTypeNames.Int8); + mappings.AddStructArrayType(DataTypeNames.Int8); + mappings.AddStructArrayType(DataTypeNames.Int8); + mappings.AddStructArrayType(DataTypeNames.Int8); + mappings.AddStructArrayType(DataTypeNames.Int8); + mappings.AddStructArrayType(DataTypeNames.Int8); + + // Float4 + mappings.AddStructArrayType(DataTypeNames.Float4); + + // Float8 + mappings.AddStructArrayType(DataTypeNames.Float8); + + // Numeric + mappings.AddStructArrayType(DataTypeNames.Numeric); + mappings.AddStructArrayType(DataTypeNames.Numeric); + mappings.AddStructArrayType(DataTypeNames.Numeric); + mappings.AddStructArrayType(DataTypeNames.Numeric); + mappings.AddStructArrayType(DataTypeNames.Numeric); + mappings.AddStructArrayType(DataTypeNames.Numeric); + mappings.AddStructArrayType(DataTypeNames.Numeric); + + // Bytea + mappings.AddStructArrayType>(DataTypeNames.Bytea); + mappings.AddStructArrayType>(DataTypeNames.Bytea); + + // Varbit + mappings.AddArrayType(DataTypeNames.Varbit); + + // Bit + mappings.AddArrayType(DataTypeNames.Bit); + + // Text + mappings.AddArrayType(DataTypeNames.Text); + mappings.AddStructArrayType>(DataTypeNames.Text); + mappings.AddStructArrayType>(DataTypeNames.Text); + + // Alternative text types + foreach(var dataTypeName in new[] { "citext", DataTypeNames.Varchar, + DataTypeNames.Bpchar, DataTypeNames.Json, + DataTypeNames.Xml, DataTypeNames.Name, DataTypeNames.RefCursor }) + { + mappings.AddArrayType(dataTypeName); + mappings.AddStructArrayType>(dataTypeName); + mappings.AddStructArrayType>(dataTypeName); + } + + // Jsonb + mappings.AddArrayType(DataTypeNames.Jsonb); + mappings.AddStructArrayType>(DataTypeNames.Jsonb); + mappings.AddStructArrayType>(DataTypeNames.Jsonb); + + // Hstore + mappings.AddArrayType>("hstore"); + + return mappings; + } + } +} diff --git a/src/Npgsql/Internal/ResolverFactories/FullTextSearchTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/FullTextSearchTypeInfoResolverFactory.cs new file mode 100644 index 0000000000..38358ea430 --- /dev/null +++ b/src/Npgsql/Internal/ResolverFactories/FullTextSearchTypeInfoResolverFactory.cs @@ -0,0 +1,93 @@ +using System; +using Npgsql.Internal.Converters; +using Npgsql.Internal.Postgres; +using Npgsql.Properties; +using NpgsqlTypes; + +namespace Npgsql.Internal.ResolverFactories; + +sealed class FullTextSearchTypeInfoResolverFactory : PgTypeInfoResolverFactory +{ + public override IPgTypeInfoResolver CreateResolver() => new Resolver(); + public override IPgTypeInfoResolver CreateArrayResolver() => new ArrayResolver(); + + public static void CheckUnsupported(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + { + if (type != typeof(object) && (dataTypeName == DataTypeNames.TsQuery || dataTypeName == DataTypeNames.TsVector)) + throw new NotSupportedException( + string.Format(NpgsqlStrings.FullTextSearchNotEnabled, nameof(NpgsqlSlimDataSourceBuilder.EnableFullTextSearch), typeof(TBuilder).Name)); + + if (type is null) + return; + + if (TypeInfoMappingCollection.IsArrayLikeType(type, out var elementType)) + type = elementType; + + if (type is { IsConstructedGenericType: true } && type.GetGenericTypeDefinition() == typeof(Nullable<>)) + type = type.GetGenericArguments()[0]; + + if (type == typeof(NpgsqlTsVector) || typeof(NpgsqlTsQuery).IsAssignableFrom(type)) + throw new NotSupportedException( + string.Format(NpgsqlStrings.FullTextSearchNotEnabled, nameof(NpgsqlSlimDataSourceBuilder.EnableFullTextSearch), typeof(TBuilder).Name)); + } + + class Resolver : IPgTypeInfoResolver + { + TypeInfoMappingCollection? _mappings; + protected TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new()); + + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); + + static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) + { + // tsvector + mappings.AddType(DataTypeNames.TsVector, + static (options, mapping, _) => mapping.CreateInfo(options, new TsVectorConverter(options.TextEncoding)), isDefault: true); + + // tsquery + mappings.AddType(DataTypeNames.TsQuery, + static (options, mapping, _) => mapping.CreateInfo(options, new TsQueryConverter(options.TextEncoding)), isDefault: true); + mappings.AddType(DataTypeNames.TsQuery, + static (options, mapping, _) => mapping.CreateInfo(options, new TsQueryConverter(options.TextEncoding))); + mappings.AddType(DataTypeNames.TsQuery, + static (options, mapping, _) => mapping.CreateInfo(options, new TsQueryConverter(options.TextEncoding))); + mappings.AddType(DataTypeNames.TsQuery, + static (options, mapping, _) => mapping.CreateInfo(options, new TsQueryConverter(options.TextEncoding))); + mappings.AddType(DataTypeNames.TsQuery, + static (options, mapping, _) => mapping.CreateInfo(options, new TsQueryConverter(options.TextEncoding))); + mappings.AddType(DataTypeNames.TsQuery, + static (options, mapping, _) => mapping.CreateInfo(options, new TsQueryConverter(options.TextEncoding))); + mappings.AddType(DataTypeNames.TsQuery, + static (options, mapping, _) => mapping.CreateInfo(options, new TsQueryConverter(options.TextEncoding))); + + return mappings; + } + } + + sealed class ArrayResolver : Resolver, IPgTypeInfoResolver + { + TypeInfoMappingCollection? _mappings; + new TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(base.Mappings)); + + public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); + + static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) + { + // tsvector + mappings.AddArrayType(DataTypeNames.TsVector); + + // tsquery + mappings.AddArrayType(DataTypeNames.TsQuery); + mappings.AddArrayType(DataTypeNames.TsQuery); + mappings.AddArrayType(DataTypeNames.TsQuery); + mappings.AddArrayType(DataTypeNames.TsQuery); + mappings.AddArrayType(DataTypeNames.TsQuery); + mappings.AddArrayType(DataTypeNames.TsQuery); + mappings.AddArrayType(DataTypeNames.TsQuery); + + return mappings; + } + } +} diff --git a/src/Npgsql/Internal/ResolverFactories/GeometricTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/GeometricTypeInfoResolverFactory.cs new file mode 100644 index 0000000000..a365434f54 --- /dev/null +++ b/src/Npgsql/Internal/ResolverFactories/GeometricTypeInfoResolverFactory.cs @@ -0,0 +1,63 @@ +using System; +using Npgsql.Internal.Converters; +using Npgsql.Internal.Postgres; +using NpgsqlTypes; + +namespace Npgsql.Internal.ResolverFactories; + +sealed class GeometricTypeInfoResolverFactory : PgTypeInfoResolverFactory +{ + public override IPgTypeInfoResolver CreateResolver() => new Resolver(); + public override IPgTypeInfoResolver CreateArrayResolver() => new ArrayResolver(); + + class Resolver : IPgTypeInfoResolver + { + TypeInfoMappingCollection? _mappings; + protected TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new()); + + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); + + static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) + { + mappings.AddStructType(DataTypeNames.Point, + static (options, mapping, _) => mapping.CreateInfo(options, new PointConverter()), isDefault: true); + mappings.AddStructType(DataTypeNames.Box, + static (options, mapping, _) => mapping.CreateInfo(options, new BoxConverter()), isDefault: true); + mappings.AddStructType(DataTypeNames.Polygon, + static (options, mapping, _) => mapping.CreateInfo(options, new PolygonConverter()), isDefault: true); + mappings.AddStructType(DataTypeNames.Line, + static (options, mapping, _) => mapping.CreateInfo(options, new LineConverter()), isDefault: true); + mappings.AddStructType(DataTypeNames.LSeg, + static (options, mapping, _) => mapping.CreateInfo(options, new LineSegmentConverter()), isDefault: true); + mappings.AddStructType(DataTypeNames.Path, + static (options, mapping, _) => mapping.CreateInfo(options, new PathConverter()), isDefault: true); + mappings.AddStructType(DataTypeNames.Circle, + static (options, mapping, _) => mapping.CreateInfo(options, new CircleConverter()), isDefault: true); + + return mappings; + } + } + + sealed class ArrayResolver : Resolver, IPgTypeInfoResolver + { + TypeInfoMappingCollection? _mappings; + new TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(base.Mappings)); + + public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); + + static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) + { + mappings.AddStructArrayType(DataTypeNames.Point); + mappings.AddStructArrayType(DataTypeNames.Box); + mappings.AddStructArrayType(DataTypeNames.Polygon); + mappings.AddStructArrayType(DataTypeNames.Line); + mappings.AddStructArrayType(DataTypeNames.LSeg); + mappings.AddStructArrayType(DataTypeNames.Path); + mappings.AddStructArrayType(DataTypeNames.Circle); + + return mappings; + } + } +} diff --git a/src/Npgsql/Internal/ResolverFactories/JsonDynamicTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/JsonDynamicTypeInfoResolverFactory.cs new file mode 100644 index 0000000000..b354f3852c --- /dev/null +++ b/src/Npgsql/Internal/ResolverFactories/JsonDynamicTypeInfoResolverFactory.cs @@ -0,0 +1,173 @@ +using System; +using System.Diagnostics.CodeAnalysis; +using System.Text; +using System.Text.Json; +using System.Text.Json.Nodes; +using System.Text.Json.Serialization.Metadata; +using Npgsql.Internal.Converters; +using Npgsql.Internal.Postgres; + +namespace Npgsql.Internal.ResolverFactories; + +[RequiresUnreferencedCode("Json serializer may perform reflection on trimmed types.")] +[RequiresDynamicCode("Serializing arbitrary types to json can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] +sealed class JsonDynamicTypeInfoResolverFactory : PgTypeInfoResolverFactory +{ + readonly Type[]? _jsonbClrTypes; + readonly Type[]? _jsonClrTypes; + readonly JsonSerializerOptions? _serializerOptions; + + public JsonDynamicTypeInfoResolverFactory(Type[]? jsonbClrTypes = null, Type[]? jsonClrTypes = null, JsonSerializerOptions? serializerOptions = null) + { + _jsonbClrTypes = jsonbClrTypes; + _jsonClrTypes = jsonClrTypes; + _serializerOptions = serializerOptions; + } + + public override IPgTypeInfoResolver CreateResolver() => new Resolver(_jsonbClrTypes, _jsonClrTypes, _serializerOptions); + public override IPgTypeInfoResolver CreateArrayResolver() => new ArrayResolver(_jsonbClrTypes, _jsonClrTypes, _serializerOptions); + + [RequiresUnreferencedCode("Json serializer may perform reflection on trimmed types.")] + [RequiresDynamicCode("Serializing arbitrary types to json can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] + class Resolver : DynamicTypeInfoResolver, IPgTypeInfoResolver + { + JsonSerializerOptions? _serializerOptions; + JsonSerializerOptions SerializerOptions + #if NET7_0_OR_GREATER + => _serializerOptions ??= JsonSerializerOptions.Default; + #else + => _serializerOptions ??= new(); + #endif + + readonly Type[] _jsonbClrTypes; + readonly Type[] _jsonClrTypes; + TypeInfoMappingCollection? _mappings; + + protected TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(), _jsonbClrTypes, _jsonClrTypes, SerializerOptions); + + public Resolver(Type[]? jsonbClrTypes = null, Type[]? jsonClrTypes = null, JsonSerializerOptions? serializerOptions = null) + { + _jsonbClrTypes = jsonbClrTypes ?? Array.Empty(); + _jsonClrTypes = jsonClrTypes ?? Array.Empty(); + _serializerOptions = serializerOptions; + } + + public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options) ?? base.GetTypeInfo(type, dataTypeName, options); + + static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings, Type[] jsonbClrTypes, Type[] jsonClrTypes, JsonSerializerOptions serializerOptions) + { + // We do GetTypeInfo calls directly so we need a resolver. + serializerOptions.TypeInfoResolver ??= new DefaultJsonTypeInfoResolver(); + + // These live in the RUC/RDC part as JsonValues can contain any .NET type. + foreach (var dataTypeName in new[] { DataTypeNames.Jsonb, DataTypeNames.Json }) + { + var jsonb = dataTypeName == DataTypeNames.Jsonb; + mappings.AddType(dataTypeName, (options, mapping, _) => + mapping.CreateInfo(options, new JsonConverter(jsonb, options.TextEncoding, serializerOptions))); + mappings.AddType(dataTypeName, (options, mapping, _) => + mapping.CreateInfo(options, new JsonConverter(jsonb, options.TextEncoding, serializerOptions))); + mappings.AddType(dataTypeName, (options, mapping, _) => + mapping.CreateInfo(options, new JsonConverter(jsonb, options.TextEncoding, serializerOptions))); + mappings.AddType(dataTypeName, (options, mapping, _) => + mapping.CreateInfo(options, new JsonConverter(jsonb, options.TextEncoding, serializerOptions))); + } + + AddUserMappings(jsonb: true, jsonbClrTypes); + AddUserMappings(jsonb: false, jsonClrTypes); + + void AddUserMappings(bool jsonb, Type[] clrTypes) + { + var dynamicMappings = CreateCollection(); + var dataTypeName = (string)(jsonb ? DataTypeNames.Jsonb : DataTypeNames.Json); + foreach (var jsonType in clrTypes) + { + var jsonTypeInfo = serializerOptions.GetTypeInfo(jsonType); + dynamicMappings.AddMapping(jsonTypeInfo.Type, dataTypeName, + factory: (options, mapping, _) => mapping.CreateInfo(options, + CreateSystemTextJsonConverter(mapping.Type, jsonb, options.TextEncoding, serializerOptions, jsonType))); + + if (!jsonType.IsValueType && jsonTypeInfo.PolymorphismOptions is not null) + { + foreach (var derived in jsonTypeInfo.PolymorphismOptions.DerivedTypes) + dynamicMappings.AddMapping(derived.DerivedType, dataTypeName, + factory: (options, mapping, _) => mapping.CreateInfo(options, + CreateSystemTextJsonConverter(mapping.Type, jsonb, options.TextEncoding, serializerOptions, jsonType))); + } + } + mappings.AddRange(dynamicMappings.ToTypeInfoMappingCollection()); + } + + return mappings; + } + + protected override DynamicMappingCollection? GetMappings(Type? type, DataTypeName dataTypeName, PgSerializerOptions options) + { + // Match all types except null, object and text types as long as DataTypeName (json/jsonb) is present. + if (type is null || type == typeof(object) || Array.IndexOf(PgSerializerOptions.WellKnownTextTypes, type) != -1 + || dataTypeName != DataTypeNames.Jsonb && dataTypeName != DataTypeNames.Json) + return null; + + return CreateCollection().AddMapping(type, dataTypeName, (options, mapping, _) => + { + var jsonb = dataTypeName == DataTypeNames.Jsonb; + + // For jsonb we can't properly support polymorphic serialization unless we do quite some additional work + // so we default to mapping.Type instead (exact types will never serialize their "$type" fields, essentially disabling the feature). + var baseType = jsonb ? mapping.Type : typeof(object); + + return mapping.CreateInfo(options, + CreateSystemTextJsonConverter(mapping.Type, jsonb, options.TextEncoding, SerializerOptions, baseType)); + }); + } + + static PgConverter CreateSystemTextJsonConverter(Type valueType, bool jsonb, Encoding textEncoding, JsonSerializerOptions serializerOptions, Type baseType) + => (PgConverter)Activator.CreateInstance( + typeof(JsonConverter<,>).MakeGenericType(valueType, baseType), + jsonb, + textEncoding, + serializerOptions)!; + } + + [RequiresUnreferencedCode("Json serializer may perform reflection on trimmed types.")] + [RequiresDynamicCode("Serializing arbitrary types to json can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] + sealed class ArrayResolver : Resolver, IPgTypeInfoResolver + { + TypeInfoMappingCollection? _mappings; + new TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(base.Mappings), base.Mappings); + + public ArrayResolver(Type[]? jsonbClrTypes = null, Type[]? jsonClrTypes = null, JsonSerializerOptions? serializerOptions = null) + : base(jsonbClrTypes, jsonClrTypes, serializerOptions) { } + + public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options) ?? base.GetTypeInfo(type, dataTypeName, options); + + protected override DynamicMappingCollection? GetMappings(Type? type, DataTypeName dataTypeName, PgSerializerOptions options) + => type is not null && IsArrayLikeType(type, out var elementType) && IsArrayDataTypeName(dataTypeName, options, out var elementDataTypeName) + ? base.GetMappings(elementType, elementDataTypeName, options)?.AddArrayMapping(elementType, elementDataTypeName) + : null; + + static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings, TypeInfoMappingCollection baseMappings) + { + if (baseMappings.Items.Count == 0) + return mappings; + + foreach (var dataTypeName in new[] { DataTypeNames.Jsonb, DataTypeNames.Json }) + { + mappings.AddArrayType(dataTypeName); + mappings.AddArrayType(dataTypeName); + mappings.AddArrayType(dataTypeName); + mappings.AddArrayType(dataTypeName); + } + + var dynamicMappings = CreateCollection(baseMappings); + foreach (var mapping in baseMappings.Items) + dynamicMappings.AddArrayMapping(mapping.Type, mapping.DataTypeName); + mappings.AddRange(dynamicMappings.ToTypeInfoMappingCollection()); + + return mappings; + } + } +} + diff --git a/src/Npgsql/Internal/ResolverFactories/JsonTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/JsonTypeInfoResolverFactory.cs new file mode 100644 index 0000000000..a94d5d36f8 --- /dev/null +++ b/src/Npgsql/Internal/ResolverFactories/JsonTypeInfoResolverFactory.cs @@ -0,0 +1,100 @@ +using System; +using System.Text.Json; +using System.Text.Json.Serialization.Metadata; +using Npgsql.Internal.Converters; +using Npgsql.Internal.Postgres; + +namespace Npgsql.Internal.ResolverFactories; + +sealed class JsonTypeInfoResolverFactory : PgTypeInfoResolverFactory +{ + readonly JsonSerializerOptions? _serializerOptions; + + public JsonTypeInfoResolverFactory(JsonSerializerOptions? serializerOptions = null) => _serializerOptions = serializerOptions; + + public override IPgTypeInfoResolver CreateResolver() => new Resolver(_serializerOptions); + public override IPgTypeInfoResolver CreateArrayResolver() => new ArrayResolver(_serializerOptions); + + class Resolver : IPgTypeInfoResolver + { + static JsonSerializerOptions? DefaultSerializerOptions; + + readonly JsonSerializerOptions _serializerOptions; + TypeInfoMappingCollection? _mappings; + protected TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(), _serializerOptions); + + public Resolver(JsonSerializerOptions? serializerOptions = null) + { + if (serializerOptions is null) + { + serializerOptions = DefaultSerializerOptions; + if (serializerOptions is null) + { + serializerOptions = new JsonSerializerOptions(); + serializerOptions.TypeInfoResolver = new BasicJsonTypeInfoResolver(); + DefaultSerializerOptions = serializerOptions; + } + } + + _serializerOptions = serializerOptions; + } + + static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings, JsonSerializerOptions serializerOptions) + { + // Jsonb is the first default for JsonDocument + foreach (var dataTypeName in new[] { DataTypeNames.Jsonb, DataTypeNames.Json }) + { + var jsonb = dataTypeName == DataTypeNames.Jsonb; + mappings.AddType(dataTypeName, (options, mapping, _) => + mapping.CreateInfo(options, + new JsonConverter(jsonb, options.TextEncoding, serializerOptions)), + isDefault: true); + mappings.AddStructType(dataTypeName, (options, mapping, _) => + mapping.CreateInfo(options, + new JsonConverter(jsonb, options.TextEncoding, serializerOptions))); + } + + return mappings; + } + + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); + + sealed class BasicJsonTypeInfoResolver : IJsonTypeInfoResolver + { + public JsonTypeInfo? GetTypeInfo(Type type, JsonSerializerOptions options) + { + if (type == typeof(JsonDocument)) + return JsonMetadataServices.CreateValueInfo(options, JsonMetadataServices.JsonDocumentConverter); + if (type == typeof(JsonElement)) + return JsonMetadataServices.CreateValueInfo(options, JsonMetadataServices.JsonElementConverter); + return null; + } + } + } + + sealed class ArrayResolver : Resolver, IPgTypeInfoResolver + { + TypeInfoMappingCollection? _mappings; + new TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(base.Mappings)); + + public ArrayResolver(JsonSerializerOptions? serializerOptions = null) + : base(serializerOptions) + { + } + + public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); + + static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) + { + foreach (var dataTypeName in new[] { DataTypeNames.Jsonb, DataTypeNames.Json }) + { + mappings.AddArrayType(dataTypeName); + mappings.AddStructArrayType(dataTypeName); + } + + return mappings; + } + } +} diff --git a/src/Npgsql/Internal/ResolverFactories/LTreeTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/LTreeTypeInfoResolverFactory.cs new file mode 100644 index 0000000000..7cb2f75a6a --- /dev/null +++ b/src/Npgsql/Internal/ResolverFactories/LTreeTypeInfoResolverFactory.cs @@ -0,0 +1,66 @@ +using System; +using Npgsql.Internal.Converters; +using Npgsql.Internal.Postgres; +using Npgsql.Properties; + +namespace Npgsql.Internal.ResolverFactories; + +sealed class LTreeTypeInfoResolverFactory : PgTypeInfoResolverFactory +{ + public override IPgTypeInfoResolver CreateResolver() => new Resolver(); + public override IPgTypeInfoResolver CreateArrayResolver() => new ArrayResolver(); + + public static void CheckUnsupported(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + { + if (type != typeof(object) && dataTypeName is { UnqualifiedName: "ltree" or "lquery" or "ltxtquery" }) + throw new NotSupportedException( + string.Format(NpgsqlStrings.LTreeNotEnabled, nameof(NpgsqlSlimDataSourceBuilder.EnableLTree), + typeof(TBuilder).Name)); + } + + class Resolver : IPgTypeInfoResolver + { + const byte LTreeVersion = 1; + TypeInfoMappingCollection? _mappings; + protected TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new()); + + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); + + static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) + { + mappings.AddType("ltree", + static (options, mapping, _) => mapping.CreateInfo(options, + new VersionPrefixedTextConverter(LTreeVersion, new StringTextConverter(options.TextEncoding))), + MatchRequirement.DataTypeName); + mappings.AddType("lquery", + static (options, mapping, _) => mapping.CreateInfo(options, + new VersionPrefixedTextConverter(LTreeVersion, new StringTextConverter(options.TextEncoding))), + MatchRequirement.DataTypeName); + mappings.AddType("ltxtquery", + static (options, mapping, _) => mapping.CreateInfo(options, + new VersionPrefixedTextConverter(LTreeVersion, new StringTextConverter(options.TextEncoding))), + MatchRequirement.DataTypeName); + + return mappings; + } + } + + sealed class ArrayResolver : Resolver, IPgTypeInfoResolver + { + TypeInfoMappingCollection? _mappings; + new TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(base.Mappings)); + + public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); + + static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) + { + mappings.AddArrayType("ltree"); + mappings.AddArrayType("lquery"); + mappings.AddArrayType("ltxtquery"); + + return mappings; + } + } +} diff --git a/src/Npgsql/Internal/ResolverFactories/NetworkTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/NetworkTypeInfoResolverFactory.cs new file mode 100644 index 0000000000..da738f54d0 --- /dev/null +++ b/src/Npgsql/Internal/ResolverFactories/NetworkTypeInfoResolverFactory.cs @@ -0,0 +1,96 @@ +using System; +using System.Diagnostics.CodeAnalysis; +using System.Net; +using System.Net.NetworkInformation; +using Npgsql.Internal.Converters; +using Npgsql.Internal.Postgres; +using NpgsqlTypes; + +namespace Npgsql.Internal.ResolverFactories; + +sealed class NetworkTypeInfoResolverFactory : PgTypeInfoResolverFactory +{ + public override IPgTypeInfoResolver CreateResolver() => new Resolver(); + public override IPgTypeInfoResolver CreateArrayResolver() => new ArrayResolver(); + + class Resolver : IPgTypeInfoResolver + { + TypeInfoMappingCollection? _mappings; + protected TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new()); + + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); + + static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) + { + // macaddr + mappings.AddType(DataTypeNames.MacAddr, + static (options, mapping, _) => mapping.CreateInfo(options, new MacaddrConverter(macaddr8: false)), isDefault: true); + mappings.AddType(DataTypeNames.MacAddr8, + static (options, mapping, _) => mapping.CreateInfo(options, new MacaddrConverter(macaddr8: true)), + mapping => mapping with { MatchRequirement = MatchRequirement.DataTypeName }); + + // inet + // This is one of the rare mappings that force us to use reflection for a lack of any alternative. + // There are certain IPAddress values like Loopback or Any that return a *private* derived type (see https://github.com/dotnet/runtime/issues/27870). + // However we still need to be able to resolve an exactly typed converter for those values. + // We do so by wrapping our converter in a casting converter constructed over the derived type. + // Finally we add a custom predicate to be able to match any type which values are assignable to IPAddress. + mappings.AddType(DataTypeNames.Inet, + CreateInfo, + mapping => mapping with + { + MatchRequirement = MatchRequirement.Single, + TypeMatchPredicate = type => type is null || typeof(IPAddress).IsAssignableFrom(type) + }); + mappings.AddStructType(DataTypeNames.Inet, + static (options, mapping, _) => mapping.CreateInfo(options, new NpgsqlInetConverter())); + + // cidr + mappings.AddStructType(DataTypeNames.Cidr, + static (options, mapping, _) => mapping.CreateInfo(options, new NpgsqlCidrConverter()), isDefault: true); + + // Code is split out to a local method as suppression attributes on lambdas aren't properly handled by the ILLink analyzer yet. + [UnconditionalSuppressMessage("AotAnalysis", "IL3050", + Justification = "MakeGenericType is safe because the target will only ever be a reference type.")] + static PgTypeInfo CreateInfo(PgSerializerOptions options, TypeInfoMapping resolvedMapping, bool _) + { + var derivedType = resolvedMapping.Type != typeof(IPAddress); + PgConverter converter = new IPAddressConverter(); + if (derivedType) + // There is not much more we can do, the deriving type IPAddress+ReadOnlyIPAddress isn't public. + converter = (PgConverter)Activator.CreateInstance(typeof(CastingConverter<>).MakeGenericType(resolvedMapping.Type), + converter)!; + + return resolvedMapping.CreateInfo(options, converter); + } + + return mappings; + } + } + + sealed class ArrayResolver : Resolver, IPgTypeInfoResolver + { + TypeInfoMappingCollection? _mappings; + new TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(base.Mappings)); + + public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); + + static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) + { + // macaddr + mappings.AddArrayType(DataTypeNames.MacAddr); + mappings.AddArrayType(DataTypeNames.MacAddr8); + + // inet + mappings.AddArrayType(DataTypeNames.Inet); + mappings.AddStructArrayType(DataTypeNames.Inet); + + // cidr + mappings.AddStructArrayType(DataTypeNames.Cidr); + + return mappings; + } + } +} diff --git a/src/Npgsql/Internal/ResolverFactories/RecordTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/RecordTypeInfoResolverFactory.cs new file mode 100644 index 0000000000..b6f143be03 --- /dev/null +++ b/src/Npgsql/Internal/ResolverFactories/RecordTypeInfoResolverFactory.cs @@ -0,0 +1,59 @@ +using System; +using Npgsql.Internal.Converters; +using Npgsql.Internal.Postgres; +using Npgsql.Properties; + +namespace Npgsql.Internal.ResolverFactories; + +sealed class RecordTypeInfoResolverFactory : PgTypeInfoResolverFactory +{ + public override IPgTypeInfoResolver CreateResolver() => new Resolver(); + public override IPgTypeInfoResolver CreateArrayResolver() => new ArrayResolver(); + + public static void CheckUnsupported(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + { + if (type != typeof(object) && dataTypeName == DataTypeNames.Record) + { + throw new NotSupportedException( + string.Format( + NpgsqlStrings.RecordsNotEnabled, + nameof(INpgsqlTypeMapperExtensions.EnableRecordsAsTuples), + typeof(TBuilder).Name, + nameof(NpgsqlSlimDataSourceBuilder.EnableRecords))); + } + } + + class Resolver : IPgTypeInfoResolver + { + TypeInfoMappingCollection? _mappings; + protected TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new()); + + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); + + static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) + { + mappings.AddType(DataTypeNames.Record, static (options, mapping, _) => + mapping.CreateInfo(options, new RecordConverter(options), supportsWriting: false), + MatchRequirement.DataTypeName); + + return mappings; + } + } + + sealed class ArrayResolver : Resolver, IPgTypeInfoResolver + { + TypeInfoMappingCollection? _mappings; + new TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(base.Mappings)); + + public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); + + static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) + { + mappings.AddArrayType(DataTypeNames.Record); + + return mappings; + } + } +} diff --git a/src/Npgsql/Internal/ResolverFactories/TupledRecordTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/TupledRecordTypeInfoResolverFactory.cs new file mode 100644 index 0000000000..1265dfc432 --- /dev/null +++ b/src/Npgsql/Internal/ResolverFactories/TupledRecordTypeInfoResolverFactory.cs @@ -0,0 +1,103 @@ +using System; +using System.Diagnostics.CodeAnalysis; +using System.Reflection; +using Npgsql.Internal.Converters; +using Npgsql.Internal.Postgres; + +namespace Npgsql.Internal.ResolverFactories; + +[RequiresUnreferencedCode("Tupled record resolver may perform reflection on trimmed tuple types.")] +[RequiresDynamicCode("Tupled records need to construct a generic converter for a statically unknown (value)tuple type.")] +sealed class TupledRecordTypeInfoResolverFactory : PgTypeInfoResolverFactory +{ + public override IPgTypeInfoResolver CreateResolver() => new Resolver(); + public override IPgTypeInfoResolver CreateArrayResolver() => new ArrayResolver(); + + [RequiresUnreferencedCode("Tupled record resolver may perform reflection on trimmed tuple types.")] + [RequiresDynamicCode("Tupled records need to construct a generic converter for a statically unknown (value)tuple type.")] + class Resolver : IPgTypeInfoResolver + { + TypeInfoMappingCollection? _mappings; + protected TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new()); + + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); + + // Stand-in type, type match predicate does the actual work. + static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) + { + mappings.AddType>(DataTypeNames.Record, Factory, + mapping => mapping with + { + MatchRequirement = MatchRequirement.DataTypeName, + TypeMatchPredicate = type => type is { IsConstructedGenericType: true, FullName: not null } + && type.FullName.StartsWith("System.Tuple", StringComparison.Ordinal) + }); + + mappings.AddStructType>(DataTypeNames.Record, Factory, + mapping => mapping with + { + MatchRequirement = MatchRequirement.DataTypeName, + TypeMatchPredicate = type => type is { IsConstructedGenericType: true, FullName: not null } + && type.FullName.StartsWith("System.ValueTuple", StringComparison.Ordinal) + }); + + return mappings; + } + + static readonly TypeInfoFactory Factory = static (options, mapping, _) => + { + var constructors = mapping.Type.GetConstructors(); + ConstructorInfo? constructor = null; + if (constructors.Length is 1) + constructor = constructors[0]; + else + { + var args = mapping.Type.GenericTypeArguments.Length; + foreach (var ctor in constructors) + if (ctor.GetParameters().Length == args) + { + constructor = ctor; + break; + } + } + + if (constructor is null) + throw new InvalidOperationException($"Couldn't find a suitable constructor for record type: {mapping.Type.FullName}"); + + var factory = typeof(Resolver).GetMethod(nameof(CreateFactory), BindingFlags.Static | BindingFlags.NonPublic)! + .MakeGenericMethod(mapping.Type) + .Invoke(null, new object[] { constructor, constructor.GetParameters().Length }); + + var converterType = typeof(RecordConverter<>).MakeGenericType(mapping.Type); + var converter = (PgConverter)Activator.CreateInstance(converterType, options, factory)!; + return mapping.CreateInfo(options, converter, supportsWriting: false); + }; + + static Func CreateFactory(ConstructorInfo constructor, int constructorParameters) => array => + { + if (array.Length != constructorParameters) + throw new InvalidCastException($"Cannot read record type with {array.Length} fields as {typeof(T)}"); + return (T)constructor.Invoke(array); + }; + } + + [RequiresUnreferencedCode("Tupled record resolver may perform reflection on trimmed tuple types.")] + [RequiresDynamicCode("Tupled records need to construct a generic converter for a statically unknown (value)tuple type.")] + sealed class ArrayResolver : Resolver, IPgTypeInfoResolver + { + TypeInfoMappingCollection? _mappings; + new TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(base.Mappings)); + + public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); + + static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) + { + mappings.AddArrayType>(DataTypeNames.Record, suppressObjectMapping: true); + mappings.AddStructArrayType>(DataTypeNames.Record, suppressObjectMapping: true); + + return mappings; + } + } +} diff --git a/src/Npgsql/Internal/ResolverFactories/UnmappedTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/UnmappedTypeInfoResolverFactory.cs new file mode 100644 index 0000000000..1fc961e556 --- /dev/null +++ b/src/Npgsql/Internal/ResolverFactories/UnmappedTypeInfoResolverFactory.cs @@ -0,0 +1,169 @@ +using System; +using System.Collections.Generic; +using System.Diagnostics.CodeAnalysis; +using System.Reflection; +using Npgsql.Internal.Converters; +using Npgsql.Internal.Postgres; +using Npgsql.PostgresTypes; +using NpgsqlTypes; + +namespace Npgsql.Internal.ResolverFactories; + +[RequiresUnreferencedCode("The use of unmapped enums, ranges or multiranges requires reflection usage which is incompatible with trimming.")] +[RequiresDynamicCode("The use of unmapped enums, ranges or multiranges requires dynamic code usage which is incompatible with NativeAOT.")] +sealed partial class UnmappedTypeInfoResolverFactory : PgTypeInfoResolverFactory +{ + public override IPgTypeInfoResolver CreateResolver() => new EnumResolver(); + public override IPgTypeInfoResolver CreateArrayResolver() => new EnumArrayResolver(); + + public override IPgTypeInfoResolver CreateRangeResolver() => new RangeResolver(); + public override IPgTypeInfoResolver CreateRangeArrayResolver() => new RangeArrayResolver(); + + public override IPgTypeInfoResolver? CreateMultirangeResolver() => new MultirangeResolver(); + public override IPgTypeInfoResolver? CreateMultirangeArrayResolver() => new MultirangeArrayResolver(); + + [RequiresUnreferencedCode("The use of unmapped enums, ranges or multiranges requires reflection usage which is incompatible with trimming.")] + [RequiresDynamicCode("The use of unmapped enums, ranges or multiranges requires dynamic code usage which is incompatible with NativeAOT.")] + class EnumResolver : DynamicTypeInfoResolver + { + protected override DynamicMappingCollection? GetMappings(Type? type, DataTypeName dataTypeName, PgSerializerOptions options) + { + if (type is null || !IsTypeOrNullableOfType(type, static type => type.IsEnum, out var matchedType) || options.DatabaseInfo.GetPostgresType(dataTypeName) is not PostgresEnumType) + return null; + + return CreateCollection().AddMapping(matchedType, dataTypeName, static (options, mapping, _) => + { + var enumToLabel = new Dictionary(); + var labelToEnum = new Dictionary(); + foreach (var field in mapping.Type.GetFields(BindingFlags.Static | BindingFlags.Public)) + { + var attribute = (PgNameAttribute?)field.GetCustomAttribute(typeof(PgNameAttribute), false); + var enumName = attribute?.PgName ?? options.DefaultNameTranslator.TranslateMemberName(field.Name); + var enumValue = (Enum)field.GetValue(null)!; + + enumToLabel[enumValue] = enumName; + labelToEnum[enumName] = enumValue; + } + + return mapping.CreateInfo(options, (PgConverter)Activator.CreateInstance(typeof(EnumConverter<>).MakeGenericType(mapping.Type), + enumToLabel, labelToEnum, + options.TextEncoding)!); + }); + } + } + + [RequiresUnreferencedCode("The use of unmapped enums, ranges or multiranges requires reflection usage which is incompatible with trimming.")] + [RequiresDynamicCode("The use of unmapped enums, ranges or multiranges requires dynamic code usage which is incompatible with NativeAOT.")] + sealed class EnumArrayResolver : EnumResolver + { + protected override DynamicMappingCollection? GetMappings(Type? type, DataTypeName dataTypeName, PgSerializerOptions options) + => type is not null && IsArrayLikeType(type, out var elementType) && IsArrayDataTypeName(dataTypeName, options, out var elementDataTypeName) + ? base.GetMappings(elementType, elementDataTypeName, options)?.AddArrayMapping(elementType, elementDataTypeName) + : null; + } + + [RequiresUnreferencedCode("The use of unmapped enums, ranges or multiranges requires reflection usage which is incompatible with trimming.")] + [RequiresDynamicCode("The use of unmapped enums, ranges or multiranges requires dynamic code usage which is incompatible with NativeAOT.")] + class RangeResolver : DynamicTypeInfoResolver + { + protected override DynamicMappingCollection? GetMappings(Type? type, DataTypeName dataTypeName, PgSerializerOptions options) + { + var matchedType = type; + if (type is not null && !IsTypeOrNullableOfType(type, + static type => type.IsConstructedGenericType && type.GetGenericTypeDefinition() == typeof(NpgsqlRange<>), + out matchedType) + || options.DatabaseInfo.GetPostgresType(dataTypeName) is not PostgresRangeType rangeType) + return null; + + var subInfo = + matchedType is null + ? options.GetDefaultTypeInfo(rangeType.Subtype) + // Input matchedType here as we don't want an NpgsqlRange over Nullable (it has its own nullability tracking, for better or worse) + : options.GetTypeInfo(matchedType.GetGenericArguments()[0], rangeType.Subtype); + + // We have no generic RangeConverterResolver so we would not know how to compose a range mapping for such infos. + // See https://github.com/npgsql/npgsql/issues/5268 + if (subInfo is not { IsResolverInfo: false }) + return null; + + subInfo = subInfo.ToNonBoxing(); + + matchedType ??= typeof(NpgsqlRange<>).MakeGenericType(subInfo.Type); + + return CreateCollection().AddMapping(matchedType, dataTypeName, + (options, mapping, _) => mapping.CreateInfo(options, + (PgConverter)Activator.CreateInstance(typeof(RangeConverter<>).MakeGenericType(subInfo.Type), + subInfo.GetResolution().Converter)!, + preferredFormat: subInfo.PreferredFormat, supportsWriting: subInfo.SupportsWriting), + mapping => mapping with { MatchRequirement = MatchRequirement.Single }); + } + } + + [RequiresUnreferencedCode("The use of unmapped enums, ranges or multiranges requires reflection usage which is incompatible with trimming.")] + [RequiresDynamicCode("The use of unmapped enums, ranges or multiranges requires dynamic code usage which is incompatible with NativeAOT.")] + sealed class RangeArrayResolver : RangeResolver + { + protected override DynamicMappingCollection? GetMappings(Type? type, DataTypeName dataTypeName, PgSerializerOptions options) + { + Type? elementType = null; + if (!((type is null || IsArrayLikeType(type, out elementType)) && + IsArrayDataTypeName(dataTypeName, options, out var elementDataTypeName))) + return null; + + var mappings = base.GetMappings(elementType, elementDataTypeName, options); + elementType ??= mappings?.Find(null, elementDataTypeName, options)?.Type; // Try to get the default mapping. + return elementType is null ? null : mappings?.AddArrayMapping(elementType, elementDataTypeName); + } + } + + [RequiresUnreferencedCode("The use of unmapped enums, ranges or multiranges requires reflection usage which is incompatible with trimming.")] + [RequiresDynamicCode("The use of unmapped enums, ranges or multiranges requires dynamic code usage which is incompatible with NativeAOT.")] + class MultirangeResolver : DynamicTypeInfoResolver + { + protected override DynamicMappingCollection? GetMappings(Type? type, DataTypeName dataTypeName, PgSerializerOptions options) + { + Type? elementType = null; + if (type is not null && !IsArrayLikeType(type, out elementType) + || elementType is not null && !IsTypeOrNullableOfType(elementType, + static type => type.IsConstructedGenericType && type.GetGenericTypeDefinition() == typeof(NpgsqlRange<>), out _) + || options.DatabaseInfo.GetPostgresType(dataTypeName) is not PostgresMultirangeType multirangeType) + return null; + + var subInfo = + elementType is null + ? options.GetDefaultTypeInfo(multirangeType.Subrange) + : options.GetTypeInfo(elementType, multirangeType.Subrange); + + // We have no generic MultirangeConverterResolver so we would not know how to compose a range mapping for such infos. + // See https://github.com/npgsql/npgsql/issues/5268 + if (subInfo is not { IsResolverInfo: false }) + return null; + + subInfo = subInfo.ToNonBoxing(); + + type ??= subInfo.Type.MakeArrayType(); + + return CreateCollection().AddMapping(type, dataTypeName, + (options, mapping, _) => mapping.CreateInfo(options, + (PgConverter)Activator.CreateInstance(typeof(MultirangeConverter<,>).MakeGenericType(type, subInfo.Type), subInfo.GetResolution().Converter)!, + preferredFormat: subInfo.PreferredFormat, supportsWriting: subInfo.SupportsWriting), + mapping => mapping with { MatchRequirement = MatchRequirement.Single }); + } + } + + [RequiresUnreferencedCode("The use of unmapped enums, ranges or multiranges requires reflection usage which is incompatible with trimming.")] + [RequiresDynamicCode("The use of unmapped enums, ranges or multiranges requires dynamic code usage which is incompatible with NativeAOT.")] + sealed class MultirangeArrayResolver : MultirangeResolver + { + protected override DynamicMappingCollection? GetMappings(Type? type, DataTypeName dataTypeName, PgSerializerOptions options) + { + Type? elementType = null; + if (!((type is null || IsArrayLikeType(type, out elementType)) && IsArrayDataTypeName(dataTypeName, options, out var elementDataTypeName))) + return null; + + var mappings = base.GetMappings(elementType, elementDataTypeName, options); + elementType ??= mappings?.Find(null, elementDataTypeName, options)?.Type; // Try to get the default mapping. + return elementType is null ? null : mappings?.AddArrayMapping(elementType, elementDataTypeName); + } + } +} diff --git a/src/Npgsql/Internal/Resolvers/UnsupportedTypeInfoResolver.cs b/src/Npgsql/Internal/ResolverFactories/UnsupportedTypeInfoResolver.cs similarity index 83% rename from src/Npgsql/Internal/Resolvers/UnsupportedTypeInfoResolver.cs rename to src/Npgsql/Internal/ResolverFactories/UnsupportedTypeInfoResolver.cs index 73d783f8fa..7f5755963b 100644 --- a/src/Npgsql/Internal/Resolvers/UnsupportedTypeInfoResolver.cs +++ b/src/Npgsql/Internal/ResolverFactories/UnsupportedTypeInfoResolver.cs @@ -4,7 +4,7 @@ using Npgsql.PostgresTypes; using Npgsql.Properties; -namespace Npgsql.Internal.Resolvers; +namespace Npgsql.Internal.ResolverFactories; sealed class UnsupportedTypeInfoResolver : IPgTypeInfoResolver { @@ -13,10 +13,11 @@ sealed class UnsupportedTypeInfoResolver : IPgTypeInfoResolver if (options.IntrospectionMode) return null; - RecordTypeInfoResolver.CheckUnsupported(type, dataTypeName, options); - RangeTypeInfoResolver.ThrowIfUnsupported(type, dataTypeName, options); - FullTextSearchTypeInfoResolver.CheckUnsupported(type, dataTypeName, options); - LTreeTypeInfoResolver.CheckUnsupported(type, dataTypeName, options); + RecordTypeInfoResolverFactory.CheckUnsupported(type, dataTypeName, options); + AdoTypeInfoResolverFactory.ThrowIfRangeUnsupported(type, dataTypeName, options); + AdoTypeInfoResolverFactory.ThrowIfMultirangeUnsupported(type, dataTypeName, options); + FullTextSearchTypeInfoResolverFactory.CheckUnsupported(type, dataTypeName, options); + LTreeTypeInfoResolverFactory.CheckUnsupported(type, dataTypeName, options); if (type is null) return null; diff --git a/src/Npgsql/Internal/Resolvers/AdoTypeInfoResolver.cs b/src/Npgsql/Internal/Resolvers/AdoTypeInfoResolver.cs deleted file mode 100644 index d459059d16..0000000000 --- a/src/Npgsql/Internal/Resolvers/AdoTypeInfoResolver.cs +++ /dev/null @@ -1,514 +0,0 @@ -using System; -using System.Collections; -using System.Collections.Generic; -using System.Collections.Specialized; -using System.IO; -using Npgsql.Internal.Converters; -using Npgsql.Internal.Converters.Internal; -using Npgsql.Internal.Postgres; -using Npgsql.PostgresTypes; -using Npgsql.Util; -using NpgsqlTypes; - -namespace Npgsql.Internal.Resolvers; - -// Baseline types that are always supported. -class AdoTypeInfoResolver : IPgTypeInfoResolver -{ - public static AdoTypeInfoResolver Instance { get; } = new(); - - TypeInfoMappingCollection? _mappings; - protected TypeInfoMappingCollection Mappings => _mappings ??= AddInfos(new()); - - public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) - { - var info = Mappings.Find(type, dataTypeName, options); - if (info is null && dataTypeName is not null) - info = GetEnumTypeInfo(type, dataTypeName.GetValueOrDefault(), options); - - return info; - } - - protected static PgTypeInfo? GetEnumTypeInfo(Type? type, DataTypeName dataTypeName, PgSerializerOptions options) - { - if (type is not null && type != typeof(string)) - return null; - - if (options.DatabaseInfo.GetPostgresType(dataTypeName) is not PostgresEnumType) - return null; - - return new PgTypeInfo(options, new StringTextConverter(options.TextEncoding), dataTypeName); - } - - static TypeInfoMappingCollection AddInfos(TypeInfoMappingCollection mappings) - { - // Bool - mappings.AddStructType(DataTypeNames.Bool, - static (options, mapping, _) => mapping.CreateInfo(options, new BoolConverter()), isDefault: true); - - // Numeric - mappings.AddStructType(DataTypeNames.Int2, - static (options, mapping, _) => mapping.CreateInfo(options, new Int2Converter()), isDefault: true); - mappings.AddStructType(DataTypeNames.Int4, - static (options, mapping, _) => mapping.CreateInfo(options, new Int4Converter()), isDefault: true); - mappings.AddStructType(DataTypeNames.Int8, - static (options, mapping, _) => mapping.CreateInfo(options, new Int8Converter()), isDefault: true); - mappings.AddStructType(DataTypeNames.Float4, - static (options, mapping, _) => mapping.CreateInfo(options, new RealConverter()), isDefault: true); - mappings.AddStructType(DataTypeNames.Float8, - static (options, mapping, _) => mapping.CreateInfo(options, new DoubleConverter()), isDefault: true); - mappings.AddStructType(DataTypeNames.Numeric, - static (options, mapping, _) => mapping.CreateInfo(options, new DecimalNumericConverter()), isDefault: true); - mappings.AddStructType(DataTypeNames.Money, - static (options, mapping, _) => mapping.CreateInfo(options, new MoneyConverter()), MatchRequirement.DataTypeName); - - // Text - mappings.AddType(DataTypeNames.Text, - static (options, mapping, _) => mapping.CreateInfo(options, new StringTextConverter(options.TextEncoding), preferredFormat: DataFormat.Text), isDefault: true); - mappings.AddStructType(DataTypeNames.Text, - static (options, mapping, _) => mapping.CreateInfo(options, new CharTextConverter(options.TextEncoding), preferredFormat: DataFormat.Text)); - // Uses the bytea converters, as neither type has a header. - mappings.AddType(DataTypeNames.Text, - static (options, mapping, _) => mapping.CreateInfo(options, new ArrayByteaConverter()), - MatchRequirement.DataTypeName); - mappings.AddStructType>(DataTypeNames.Text, - static (options, mapping, _) => mapping.CreateInfo(options, new ReadOnlyMemoryByteaConverter()), - MatchRequirement.DataTypeName); - mappings.AddType(DataTypeNames.Text, - static (options, mapping, _) => new PgTypeInfo(options, new StreamByteaConverter(), new DataTypeName(mapping.DataTypeName), unboxedType: mapping.Type != typeof(Stream) ? mapping.Type : null), - mapping => mapping with { MatchRequirement = MatchRequirement.DataTypeName, TypeMatchPredicate = type => typeof(Stream).IsAssignableFrom(type) }); - //Special mappings, these have no corresponding array mapping. - mappings.AddType(DataTypeNames.Text, - static (options, mapping, _) => mapping.CreateInfo(options, new TextReaderTextConverter(options.TextEncoding), supportsWriting: false, preferredFormat: DataFormat.Text), - MatchRequirement.DataTypeName); - mappings.AddStructType(DataTypeNames.Text, - static (options, mapping, _) => mapping.CreateInfo(options, new GetCharsTextConverter(options.TextEncoding), supportsWriting: false, preferredFormat: DataFormat.Text), - MatchRequirement.DataTypeName); - - // Alternative text types - foreach(var dataTypeName in new[] { "citext", DataTypeNames.Varchar, - DataTypeNames.Bpchar, DataTypeNames.Json, - DataTypeNames.Xml, DataTypeNames.Name, DataTypeNames.RefCursor }) - { - mappings.AddType(dataTypeName, - static (options, mapping, _) => mapping.CreateInfo(options, new StringTextConverter(options.TextEncoding), preferredFormat: DataFormat.Text), isDefault: true); - mappings.AddStructType(dataTypeName, - static (options, mapping, _) => mapping.CreateInfo(options, new CharTextConverter(options.TextEncoding), preferredFormat: DataFormat.Text)); - // Uses the bytea converters, as neither type has a header. - mappings.AddType(dataTypeName, - static (options, mapping, _) => mapping.CreateInfo(options, new ArrayByteaConverter()), - MatchRequirement.DataTypeName); - mappings.AddStructType>(dataTypeName, - static (options, mapping, _) => mapping.CreateInfo(options, new ReadOnlyMemoryByteaConverter()), - MatchRequirement.DataTypeName); - mappings.AddType(dataTypeName, - static (options, mapping, _) => new PgTypeInfo(options, new StreamByteaConverter(), new DataTypeName(mapping.DataTypeName), unboxedType: mapping.Type != typeof(Stream) ? mapping.Type : null), - mapping => mapping with { MatchRequirement = MatchRequirement.DataTypeName, TypeMatchPredicate = type => typeof(Stream).IsAssignableFrom(type) }); - //Special mappings, these have no corresponding array mapping. - mappings.AddType(dataTypeName, - static (options, mapping, _) => mapping.CreateInfo(options, new TextReaderTextConverter(options.TextEncoding), supportsWriting: false, preferredFormat: DataFormat.Text), - MatchRequirement.DataTypeName); - mappings.AddStructType(dataTypeName, - static (options, mapping, _) => mapping.CreateInfo(options, new GetCharsTextConverter(options.TextEncoding), supportsWriting: false, preferredFormat: DataFormat.Text), - MatchRequirement.DataTypeName); - } - - // Jsonb - const byte jsonbVersion = 1; - mappings.AddType(DataTypeNames.Jsonb, - static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter(jsonbVersion, new StringTextConverter(options.TextEncoding))), isDefault: true); - mappings.AddStructType(DataTypeNames.Jsonb, - static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter(jsonbVersion, new CharTextConverter(options.TextEncoding)))); - mappings.AddType(DataTypeNames.Jsonb, - static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter(jsonbVersion, new ArrayByteaConverter())), - MatchRequirement.DataTypeName); - mappings.AddStructType>(DataTypeNames.Jsonb, - static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter>(jsonbVersion, new ReadOnlyMemoryByteaConverter())), - MatchRequirement.DataTypeName); - mappings.AddType(DataTypeNames.Jsonb, - static (options, mapping, _) => new PgTypeInfo(options, new VersionPrefixedTextConverter(jsonbVersion, new StreamByteaConverter()), new DataTypeName(mapping.DataTypeName), unboxedType: mapping.Type != typeof(Stream) ? mapping.Type : null), - mapping => mapping with { MatchRequirement = MatchRequirement.DataTypeName, TypeMatchPredicate = type => typeof(Stream).IsAssignableFrom(type) }); - //Special mappings, these have no corresponding array mapping. - mappings.AddType(DataTypeNames.Jsonb, - static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter(jsonbVersion, new TextReaderTextConverter(options.TextEncoding)), supportsWriting: false, preferredFormat: DataFormat.Text), - MatchRequirement.DataTypeName); - mappings.AddStructType(DataTypeNames.Jsonb, - static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter(jsonbVersion, new GetCharsTextConverter(options.TextEncoding)), supportsWriting: false, preferredFormat: DataFormat.Text), - MatchRequirement.DataTypeName); - - // Jsonpath - const byte jsonpathVersion = 1; - mappings.AddType(DataTypeNames.Jsonpath, - static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter(jsonpathVersion, new StringTextConverter(options.TextEncoding))), isDefault: true); - //Special mappings, these have no corresponding array mapping. - mappings.AddType(DataTypeNames.Jsonpath, - static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter(jsonpathVersion, new TextReaderTextConverter(options.TextEncoding)), supportsWriting: false, preferredFormat: DataFormat.Text), - MatchRequirement.DataTypeName); - mappings.AddStructType(DataTypeNames.Jsonpath, - static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter(jsonpathVersion, new GetCharsTextConverter(options.TextEncoding)), supportsWriting: false, preferredFormat: DataFormat.Text), - MatchRequirement.DataTypeName); - - // Bytea - mappings.AddType(DataTypeNames.Bytea, - static (options, mapping, _) => mapping.CreateInfo(options, new ArrayByteaConverter()), isDefault: true); - mappings.AddStructType>(DataTypeNames.Bytea, - static (options, mapping, _) => mapping.CreateInfo(options, new ReadOnlyMemoryByteaConverter())); - mappings.AddType(DataTypeNames.Bytea, - static (options, mapping, _) => new PgTypeInfo(options, new StreamByteaConverter(), new DataTypeName(mapping.DataTypeName), unboxedType: mapping.Type != typeof(Stream) ? mapping.Type : null), - mapping => mapping with { TypeMatchPredicate = type => typeof(Stream).IsAssignableFrom(type) }); - - // Varbit - mappings.AddType(DataTypeNames.Varbit, - static (options, mapping, _) => mapping.CreateInfo(options, - new PolymorphicBitStringConverterResolver(options.GetCanonicalTypeId(DataTypeNames.Varbit)), supportsWriting: false)); - mappings.AddType(DataTypeNames.Varbit, - static (options, mapping, _) => mapping.CreateInfo(options, new BitArrayBitStringConverter()), isDefault: true); - mappings.AddStructType(DataTypeNames.Varbit, - static (options, mapping, _) => mapping.CreateInfo(options, new BoolBitStringConverter())); - mappings.AddStructType(DataTypeNames.Varbit, - static (options, mapping, _) => mapping.CreateInfo(options, new BitVector32BitStringConverter())); - - // Bit - mappings.AddType(DataTypeNames.Bit, - static (options, mapping, _) => mapping.CreateInfo(options, - new PolymorphicBitStringConverterResolver(options.GetCanonicalTypeId(DataTypeNames.Bit)), supportsWriting: false)); - mappings.AddType(DataTypeNames.Bit, - static (options, mapping, _) => mapping.CreateInfo(options, new BitArrayBitStringConverter()), isDefault: true); - mappings.AddStructType(DataTypeNames.Bit, - static (options, mapping, _) => mapping.CreateInfo(options, new BoolBitStringConverter())); - mappings.AddStructType(DataTypeNames.Bit, - static (options, mapping, _) => mapping.CreateInfo(options, new BitVector32BitStringConverter())); - - // Timestamp - if (Statics.LegacyTimestampBehavior) - { - mappings.AddStructType(DataTypeNames.Timestamp, - static (options, mapping, _) => mapping.CreateInfo(options, - new LegacyDateTimeConverter(options.EnableDateTimeInfinityConversions, timestamp: true)), isDefault: true); - } - else - { - mappings.AddResolverStructType(DataTypeNames.Timestamp, - static (options, mapping, dataTypeNameMatch) => mapping.CreateInfo(options, - DateTimeConverterResolver.CreateResolver(options, options.GetCanonicalTypeId(DataTypeNames.TimestampTz), options.GetCanonicalTypeId(DataTypeNames.Timestamp), - options.EnableDateTimeInfinityConversions), dataTypeNameMatch), isDefault: true); - } - mappings.AddStructType(DataTypeNames.Timestamp, - static (options, mapping, _) => mapping.CreateInfo(options, new Int8Converter())); - - // TimestampTz - if (Statics.LegacyTimestampBehavior) - { - mappings.AddStructType(DataTypeNames.TimestampTz, - static (options, mapping, _) => mapping.CreateInfo(options, - new LegacyDateTimeConverter(options.EnableDateTimeInfinityConversions, timestamp: false)), matchRequirement: MatchRequirement.DataTypeName); - mappings.AddStructType(DataTypeNames.TimestampTz, - static (options, mapping, _) => mapping.CreateInfo(options, new LegacyDateTimeOffsetConverter(options.EnableDateTimeInfinityConversions))); - } - else - { - mappings.AddResolverStructType(DataTypeNames.TimestampTz, - static (options, mapping, dataTypeNameMatch) => mapping.CreateInfo(options, - DateTimeConverterResolver.CreateResolver(options, options.GetCanonicalTypeId(DataTypeNames.TimestampTz), options.GetCanonicalTypeId(DataTypeNames.Timestamp), - options.EnableDateTimeInfinityConversions), dataTypeNameMatch), isDefault: true); - mappings.AddStructType(DataTypeNames.TimestampTz, - static (options, mapping, _) => mapping.CreateInfo(options, new DateTimeOffsetConverter(options.EnableDateTimeInfinityConversions))); - } - mappings.AddStructType(DataTypeNames.TimestampTz, - static (options, mapping, _) => mapping.CreateInfo(options, new Int8Converter())); - - // Date - mappings.AddStructType(DataTypeNames.Date, - static (options, mapping, _) => - mapping.CreateInfo(options, new DateTimeDateConverter(options.EnableDateTimeInfinityConversions)), - MatchRequirement.DataTypeName); - mappings.AddStructType(DataTypeNames.Date, - static (options, mapping, _) => mapping.CreateInfo(options, new Int4Converter())); -#if NET6_0_OR_GREATER - mappings.AddStructType(DataTypeNames.Date, - static (options, mapping, _) => mapping.CreateInfo(options, new DateOnlyDateConverter(options.EnableDateTimeInfinityConversions))); -#endif - - // Interval - mappings.AddStructType(DataTypeNames.Interval, - static (options, mapping, _) => mapping.CreateInfo(options, new TimeSpanIntervalConverter()), isDefault: true); - mappings.AddStructType(DataTypeNames.Interval, - static (options, mapping, _) => mapping.CreateInfo(options, new NpgsqlIntervalConverter())); - - // Time - mappings.AddStructType(DataTypeNames.Time, - static (options, mapping, _) => mapping.CreateInfo(options, new TimeSpanTimeConverter()), isDefault: true); - mappings.AddStructType(DataTypeNames.Time, - static (options, mapping, _) => mapping.CreateInfo(options, new Int8Converter())); -#if NET6_0_OR_GREATER - mappings.AddStructType(DataTypeNames.Time, - static (options, mapping, _) => mapping.CreateInfo(options, new TimeOnlyTimeConverter())); -#endif - - // TimeTz - mappings.AddStructType(DataTypeNames.TimeTz, - static (options, mapping, _) => mapping.CreateInfo(options, new DateTimeOffsetTimeTzConverter()), - MatchRequirement.DataTypeName); - - // Uuid - mappings.AddStructType(DataTypeNames.Uuid, - static (options, mapping, _) => mapping.CreateInfo(options, new GuidUuidConverter()), isDefault: true); - - // Hstore - mappings.AddType>("hstore", - static (options, mapping, _) => mapping.CreateInfo(options, new HstoreConverter>(options.TextEncoding)), isDefault: true); - mappings.AddType>("hstore", - static (options, mapping, _) => mapping.CreateInfo(options, new HstoreConverter>(options.TextEncoding))); - - // Unknown - mappings.AddType(DataTypeNames.Unknown, - static (options, mapping, _) => mapping.CreateInfo(options, new StringTextConverter(options.TextEncoding), preferredFormat: DataFormat.Text), - MatchRequirement.DataTypeName); - - // Void - mappings.AddType(DataTypeNames.Void, - static (options, mapping, _) => mapping.CreateInfo(options, new VoidConverter(), supportsWriting: false), - MatchRequirement.DataTypeName); - - // UInt internal types - foreach (var dataTypeName in new[] { DataTypeNames.Oid, DataTypeNames.Xid, DataTypeNames.Cid, DataTypeNames.RegType, DataTypeNames.RegConfig }) - { - mappings.AddStructType(dataTypeName, - static (options, mapping, _) => mapping.CreateInfo(options, new UInt32Converter()), - MatchRequirement.DataTypeName); - } - - // Char - mappings.AddStructType(DataTypeNames.Char, - static (options, mapping, _) => mapping.CreateInfo(options, new InternalCharConverter()), - MatchRequirement.DataTypeName); - mappings.AddStructType(DataTypeNames.Char, - static (options, mapping, _) => mapping.CreateInfo(options, new InternalCharConverter())); - - // Xid8 - mappings.AddStructType(DataTypeNames.Xid8, - static (options, mapping, _) => mapping.CreateInfo(options, new UInt64Converter()), - MatchRequirement.DataTypeName); - - // Oidvector - mappings.AddType( - DataTypeNames.OidVector, - static (options, mapping, _) => mapping.CreateInfo(options, - new ArrayBasedArrayConverter(new(new UInt32Converter(), new PgTypeId(DataTypeNames.Oid)), pgLowerBound: 0)), - MatchRequirement.DataTypeName); - - // Int2vector - mappings.AddType( - DataTypeNames.Int2Vector, - static (options, mapping, _) => mapping.CreateInfo(options, - new ArrayBasedArrayConverter(new(new Int2Converter(), new PgTypeId(DataTypeNames.Int2)), pgLowerBound: 0)), - MatchRequirement.DataTypeName); - - // Tid - mappings.AddStructType(DataTypeNames.Tid, - static (options, mapping, _) => mapping.CreateInfo(options, new TidConverter()), - MatchRequirement.DataTypeName); - - // PgLsn - mappings.AddStructType(DataTypeNames.PgLsn, - static (options, mapping, _) => mapping.CreateInfo(options, new PgLsnConverter()), - MatchRequirement.DataTypeName); - mappings.AddStructType(DataTypeNames.PgLsn, - static (options, mapping, _) => mapping.CreateInfo(options, new UInt64Converter())); - - return mappings; - } - - protected static TypeInfoMappingCollection AddArrayInfos(TypeInfoMappingCollection mappings) - { - // Bool - mappings.AddStructArrayType(DataTypeNames.Bool); - - // Numeric - mappings.AddStructArrayType(DataTypeNames.Int2); - mappings.AddStructArrayType(DataTypeNames.Int4); - mappings.AddStructArrayType(DataTypeNames.Int8); - mappings.AddStructArrayType(DataTypeNames.Float4); - mappings.AddStructArrayType(DataTypeNames.Float8); - mappings.AddStructArrayType(DataTypeNames.Numeric); - mappings.AddStructArrayType(DataTypeNames.Money); - - // Text - mappings.AddArrayType(DataTypeNames.Text); - mappings.AddStructArrayType(DataTypeNames.Text); - mappings.AddArrayType(DataTypeNames.Text); - mappings.AddStructArrayType>(DataTypeNames.Text); - mappings.AddArrayType(DataTypeNames.Text); - - // Alternative text types - foreach(var dataTypeName in new[] { "citext", DataTypeNames.Varchar, - DataTypeNames.Bpchar, DataTypeNames.Json, - DataTypeNames.Xml, DataTypeNames.Name, DataTypeNames.RefCursor }) - { - mappings.AddArrayType(dataTypeName); - mappings.AddStructArrayType(dataTypeName); - mappings.AddArrayType(dataTypeName); - mappings.AddStructArrayType>(dataTypeName); - mappings.AddArrayType(dataTypeName); - } - - // Jsonb - mappings.AddArrayType(DataTypeNames.Jsonb); - mappings.AddStructArrayType(DataTypeNames.Jsonb); - mappings.AddArrayType(DataTypeNames.Jsonb); - mappings.AddStructArrayType>(DataTypeNames.Jsonb); - mappings.AddArrayType(DataTypeNames.Jsonb); - - // Jsonpath - mappings.AddArrayType(DataTypeNames.Jsonpath); - - // Bytea - mappings.AddArrayType(DataTypeNames.Bytea); - mappings.AddStructArrayType>(DataTypeNames.Bytea); - mappings.AddArrayType(DataTypeNames.Bytea); - - // Varbit - // Object mapping first. - mappings.AddPolymorphicResolverArrayType(DataTypeNames.Varbit, static options => resolution => resolution.Converter switch - { - BoolBitStringConverter => PgConverterFactory.CreatePolymorphicArrayConverter( - () => new ArrayBasedArrayConverter(resolution, typeof(Array)), - () => new ArrayBasedArrayConverter(new(new NullableConverter(resolution.GetConverter()), resolution.PgTypeId), typeof(Array)), - options), - BitArrayBitStringConverter => new ArrayBasedArrayConverter(resolution, typeof(Array)), - _ => throw new NotSupportedException() - }); - mappings.AddArrayType(DataTypeNames.Varbit); - mappings.AddStructArrayType(DataTypeNames.Varbit); - mappings.AddStructArrayType(DataTypeNames.Varbit); - - // Bit - // Object mapping first. - mappings.AddPolymorphicResolverArrayType(DataTypeNames.Bit, static options => resolution => resolution.Converter switch - { - BoolBitStringConverter => PgConverterFactory.CreatePolymorphicArrayConverter( - () => new ArrayBasedArrayConverter(resolution, typeof(Array)), - () => new ArrayBasedArrayConverter(new(new NullableConverter(resolution.GetConverter()), resolution.PgTypeId), typeof(Array)), - options), - BitArrayBitStringConverter => new ArrayBasedArrayConverter(resolution, typeof(Array)), - _ => throw new NotSupportedException() - }); - mappings.AddArrayType(DataTypeNames.Bit); - mappings.AddStructArrayType(DataTypeNames.Bit); - mappings.AddStructArrayType(DataTypeNames.Bit); - - // Timestamp - if (Statics.LegacyTimestampBehavior) - mappings.AddStructArrayType(DataTypeNames.Timestamp); - else - mappings.AddResolverStructArrayType(DataTypeNames.Timestamp); - mappings.AddStructArrayType(DataTypeNames.Timestamp); - - // TimestampTz - if (Statics.LegacyTimestampBehavior) - mappings.AddStructArrayType(DataTypeNames.TimestampTz); - else - mappings.AddResolverStructArrayType(DataTypeNames.TimestampTz); - mappings.AddStructArrayType(DataTypeNames.TimestampTz); - mappings.AddStructArrayType(DataTypeNames.TimestampTz); - - // Date - mappings.AddStructArrayType(DataTypeNames.Date); - mappings.AddStructArrayType(DataTypeNames.Date); -#if NET6_0_OR_GREATER - mappings.AddStructArrayType(DataTypeNames.Date); -#endif - - // Interval - mappings.AddStructArrayType(DataTypeNames.Interval); - mappings.AddStructArrayType(DataTypeNames.Interval); - - // Time - mappings.AddStructArrayType(DataTypeNames.Time); - mappings.AddStructArrayType(DataTypeNames.Time); -#if NET6_0_OR_GREATER - mappings.AddStructArrayType(DataTypeNames.Time); -#endif - - // TimeTz - mappings.AddStructArrayType(DataTypeNames.TimeTz); - // Uuid - mappings.AddStructArrayType(DataTypeNames.Uuid); - - // Hstore - mappings.AddArrayType>("hstore"); - mappings.AddArrayType>("hstore"); - - // UInt internal types - foreach (var dataTypeName in new[] { DataTypeNames.Oid, DataTypeNames.Xid, DataTypeNames.Cid, DataTypeNames.RegType, (string)DataTypeNames.RegConfig }) - { - mappings.AddStructArrayType(dataTypeName); - } - - // Char - mappings.AddStructArrayType(DataTypeNames.Char); - mappings.AddStructArrayType(DataTypeNames.Char); - - // Xid8 - mappings.AddStructArrayType(DataTypeNames.Xid8); - - // Oidvector - mappings.AddArrayType(DataTypeNames.OidVector); - - // Int2vector - mappings.AddArrayType(DataTypeNames.Int2Vector); - - return mappings; - } -} - -sealed class AdoArrayTypeInfoResolver : AdoTypeInfoResolver, IPgTypeInfoResolver -{ - TypeInfoMappingCollection? _mappings; - new TypeInfoMappingCollection Mappings => _mappings ??= AddArrayInfos(new(base.Mappings)); - - public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) - { - var info = Mappings.Find(type, dataTypeName, options); - - Type? elementType = null; - if (info is null && dataTypeName is not null && - (type is null || type == typeof(object) || TypeInfoMappingCollection.IsArrayLikeType(type, out elementType)) - && options.DatabaseInfo.GetPostgresType(dataTypeName) is PostgresArrayType { Element: var pgElementType }) - { - info = GetEnumArrayTypeInfo(elementType, pgElementType, type, dataTypeName.GetValueOrDefault(), options) ?? - GetObjectArrayTypeInfo(elementType, pgElementType, type, dataTypeName.GetValueOrDefault(), options); - } - return info; - } - - static PgTypeInfo? GetObjectArrayTypeInfo(Type? elementType, PostgresType pgElementType, Type? type, DataTypeName dataTypeName, - PgSerializerOptions options) - { - if (elementType != typeof(object)) - return null; - - // Probe if there is any mapping at all for this element type. - var elementId = options.ToCanonicalTypeId(pgElementType); - if (options.GetDefaultTypeInfo(elementId) is null) - return null; - - var mappings = new TypeInfoMappingCollection(); - mappings.AddType(pgElementType.DataTypeName, - (options, mapping, _) => mapping.CreateInfo(options, new ObjectConverter(options, elementId)), MatchRequirement.DataTypeName); - mappings.AddArrayType(pgElementType.DataTypeName); - return mappings.Find(type, dataTypeName, options); - } - - static PgTypeInfo? GetEnumArrayTypeInfo(Type? elementType, PostgresType pgElementType, Type? type, DataTypeName dataTypeName, PgSerializerOptions options) - { - if ((type != typeof(object) && elementType is not null && elementType != typeof(string)) || pgElementType is not PostgresEnumType enumType) - return null; - - var mappings = new TypeInfoMappingCollection(); - mappings.AddType(enumType.DataTypeName, - (options, mapping, _) => mapping.CreateInfo(options, new StringTextConverter(options.TextEncoding)), MatchRequirement.DataTypeName); - mappings.AddArrayType(enumType.DataTypeName); - return mappings.Find(type, dataTypeName, options); - } -} diff --git a/src/Npgsql/Internal/Resolvers/ExtraConversionsResolver.cs b/src/Npgsql/Internal/Resolvers/ExtraConversionsResolver.cs deleted file mode 100644 index b495d987d3..0000000000 --- a/src/Npgsql/Internal/Resolvers/ExtraConversionsResolver.cs +++ /dev/null @@ -1,233 +0,0 @@ -using System; -using System.Collections.Immutable; -using System.Numerics; -using Npgsql.Internal.Converters; -using Npgsql.Internal.Postgres; - -namespace Npgsql.Internal.Resolvers; - -class ExtraConversionsResolver : IPgTypeInfoResolver -{ - TypeInfoMappingCollection? _mappings; - protected TypeInfoMappingCollection Mappings => _mappings ??= AddInfos(new()); - - public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) - => Mappings.Find(type, dataTypeName, options); - - static TypeInfoMappingCollection AddInfos(TypeInfoMappingCollection mappings) - { - // Int2 - mappings.AddStructType(DataTypeNames.Int2, - static (options, mapping, _) => mapping.CreateInfo(options, new Int2Converter())); - mappings.AddStructType(DataTypeNames.Int2, - static (options, mapping, _) => mapping.CreateInfo(options, new Int2Converter())); - mappings.AddStructType(DataTypeNames.Int2, - static (options, mapping, _) => mapping.CreateInfo(options, new Int2Converter())); - mappings.AddStructType(DataTypeNames.Int2, - static (options, mapping, _) => mapping.CreateInfo(options, new Int2Converter())); - mappings.AddStructType(DataTypeNames.Int2, - static (options, mapping, _) => mapping.CreateInfo(options, new Int2Converter())); - mappings.AddStructType(DataTypeNames.Int2, - static (options, mapping, _) => mapping.CreateInfo(options, new Int2Converter())); - mappings.AddStructType(DataTypeNames.Int2, - static (options, mapping, _) => mapping.CreateInfo(options, new Int2Converter())); - - // Int4 - mappings.AddStructType(DataTypeNames.Int4, - static (options, mapping, _) => mapping.CreateInfo(options, new Int4Converter())); - mappings.AddStructType(DataTypeNames.Int4, - static (options, mapping, _) => mapping.CreateInfo(options, new Int4Converter())); - mappings.AddStructType(DataTypeNames.Int4, - static (options, mapping, _) => mapping.CreateInfo(options, new Int4Converter())); - mappings.AddStructType(DataTypeNames.Int4, - static (options, mapping, _) => mapping.CreateInfo(options, new Int4Converter())); - mappings.AddStructType(DataTypeNames.Int4, - static (options, mapping, _) => mapping.CreateInfo(options, new Int4Converter())); - mappings.AddStructType(DataTypeNames.Int4, - static (options, mapping, _) => mapping.CreateInfo(options, new Int4Converter())); - mappings.AddStructType(DataTypeNames.Int4, - static (options, mapping, _) => mapping.CreateInfo(options, new Int4Converter())); - - // Int8 - mappings.AddStructType(DataTypeNames.Int8, - static (options, mapping, _) => mapping.CreateInfo(options, new Int8Converter())); - mappings.AddStructType(DataTypeNames.Int8, - static (options, mapping, _) => mapping.CreateInfo(options, new Int8Converter())); - mappings.AddStructType(DataTypeNames.Int8, - static (options, mapping, _) => mapping.CreateInfo(options, new Int8Converter())); - mappings.AddStructType(DataTypeNames.Int8, - static (options, mapping, _) => mapping.CreateInfo(options, new Int8Converter())); - mappings.AddStructType(DataTypeNames.Int8, - static (options, mapping, _) => mapping.CreateInfo(options, new Int8Converter())); - mappings.AddStructType(DataTypeNames.Int8, - static (options, mapping, _) => mapping.CreateInfo(options, new Int8Converter())); - mappings.AddStructType(DataTypeNames.Int8, - static (options, mapping, _) => mapping.CreateInfo(options, new Int8Converter())); - - // Float4 - mappings.AddStructType(DataTypeNames.Float4, - static (options, mapping, _) => mapping.CreateInfo(options, new RealConverter())); - - // Float8 - mappings.AddStructType(DataTypeNames.Float8, - static (options, mapping, _) => mapping.CreateInfo(options, new DoubleConverter())); - - // Numeric - mappings.AddStructType(DataTypeNames.Numeric, - static (options, mapping, _) => mapping.CreateInfo(options, new DecimalNumericConverter())); - mappings.AddStructType(DataTypeNames.Numeric, - static (options, mapping, _) => mapping.CreateInfo(options, new DecimalNumericConverter())); - mappings.AddStructType(DataTypeNames.Numeric, - static (options, mapping, _) => mapping.CreateInfo(options, new DecimalNumericConverter())); - mappings.AddStructType(DataTypeNames.Numeric, - static (options, mapping, _) => mapping.CreateInfo(options, new DecimalNumericConverter())); - mappings.AddStructType(DataTypeNames.Numeric, - static (options, mapping, _) => mapping.CreateInfo(options, new DecimalNumericConverter())); - mappings.AddStructType(DataTypeNames.Numeric, - static (options, mapping, _) => mapping.CreateInfo(options, new DecimalNumericConverter())); - mappings.AddStructType(DataTypeNames.Numeric, - static (options, mapping, _) => mapping.CreateInfo(options, new BigIntegerNumericConverter())); - - // Bytea - mappings.AddStructType>(DataTypeNames.Bytea, - static (options, mapping, _) => mapping.CreateInfo(options, new ArraySegmentByteaConverter())); - mappings.AddStructType>(DataTypeNames.Bytea, - static (options, mapping, _) => mapping.CreateInfo(options, new MemoryByteaConverter())); - - // Varbit - mappings.AddType(DataTypeNames.Varbit, - static (options, mapping, _) => mapping.CreateInfo(options, new StringBitStringConverter())); - - // Bit - mappings.AddType(DataTypeNames.Bit, - static (options, mapping, _) => mapping.CreateInfo(options, new StringBitStringConverter())); - - // Text - mappings.AddType(DataTypeNames.Text, - static (options, mapping, _) => mapping.CreateInfo(options, new CharArrayTextConverter(options.TextEncoding), preferredFormat: DataFormat.Text)); - mappings.AddStructType>(DataTypeNames.Text, - static (options, mapping, _) => mapping.CreateInfo(options, new ReadOnlyMemoryTextConverter(options.TextEncoding), preferredFormat: DataFormat.Text)); - mappings.AddStructType>(DataTypeNames.Text, - static (options, mapping, _) => mapping.CreateInfo(options, new CharArraySegmentTextConverter(options.TextEncoding), preferredFormat: DataFormat.Text)); - - // Alternative text types - foreach(var dataTypeName in new[] { "citext", DataTypeNames.Varchar, - DataTypeNames.Bpchar, DataTypeNames.Json, - DataTypeNames.Xml, DataTypeNames.Name, DataTypeNames.RefCursor }) - { - mappings.AddType(dataTypeName, - static (options, mapping, _) => mapping.CreateInfo(options, new CharArrayTextConverter(options.TextEncoding), - preferredFormat: DataFormat.Text)); - mappings.AddStructType>(dataTypeName, - static (options, mapping, _) => mapping.CreateInfo(options, new ReadOnlyMemoryTextConverter(options.TextEncoding), - preferredFormat: DataFormat.Text)); - mappings.AddStructType>(dataTypeName, - static (options, mapping, _) => mapping.CreateInfo(options, new CharArraySegmentTextConverter(options.TextEncoding), - preferredFormat: DataFormat.Text)); - } - - // Jsonb - const byte jsonbVersion = 1; - mappings.AddType(DataTypeNames.Jsonb, - static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter(jsonbVersion, new CharArrayTextConverter(options.TextEncoding)))); - mappings.AddStructType>(DataTypeNames.Jsonb, - static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter>(jsonbVersion, new ReadOnlyMemoryTextConverter(options.TextEncoding)))); - mappings.AddStructType>(DataTypeNames.Jsonb, - static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter>(jsonbVersion, new CharArraySegmentTextConverter(options.TextEncoding)))); - - // Hstore - mappings.AddType>("hstore", - static (options, mapping, _) => mapping.CreateInfo(options, new HstoreConverter>(options.TextEncoding, result => result.ToImmutableDictionary()))); - - return mappings; - } - - protected static TypeInfoMappingCollection AddArrayInfos(TypeInfoMappingCollection mappings) - { - // Int2 - mappings.AddStructArrayType(DataTypeNames.Int2); - mappings.AddStructArrayType(DataTypeNames.Int2); - mappings.AddStructArrayType(DataTypeNames.Int2); - mappings.AddStructArrayType(DataTypeNames.Int2); - mappings.AddStructArrayType(DataTypeNames.Int2); - mappings.AddStructArrayType(DataTypeNames.Int2); - mappings.AddStructArrayType(DataTypeNames.Int2); - - // Int4 - mappings.AddStructArrayType(DataTypeNames.Int4); - mappings.AddStructArrayType(DataTypeNames.Int4); - mappings.AddStructArrayType(DataTypeNames.Int4); - mappings.AddStructArrayType(DataTypeNames.Int4); - mappings.AddStructArrayType(DataTypeNames.Int4); - mappings.AddStructArrayType(DataTypeNames.Int4); - mappings.AddStructArrayType(DataTypeNames.Int4); - - // Int8 - mappings.AddStructArrayType(DataTypeNames.Int8); - mappings.AddStructArrayType(DataTypeNames.Int8); - mappings.AddStructArrayType(DataTypeNames.Int8); - mappings.AddStructArrayType(DataTypeNames.Int8); - mappings.AddStructArrayType(DataTypeNames.Int8); - mappings.AddStructArrayType(DataTypeNames.Int8); - mappings.AddStructArrayType(DataTypeNames.Int8); - - // Float4 - mappings.AddStructArrayType(DataTypeNames.Float4); - - // Float8 - mappings.AddStructArrayType(DataTypeNames.Float8); - - // Numeric - mappings.AddStructArrayType(DataTypeNames.Numeric); - mappings.AddStructArrayType(DataTypeNames.Numeric); - mappings.AddStructArrayType(DataTypeNames.Numeric); - mappings.AddStructArrayType(DataTypeNames.Numeric); - mappings.AddStructArrayType(DataTypeNames.Numeric); - mappings.AddStructArrayType(DataTypeNames.Numeric); - mappings.AddStructArrayType(DataTypeNames.Numeric); - - // Bytea - mappings.AddStructArrayType>(DataTypeNames.Bytea); - mappings.AddStructArrayType>(DataTypeNames.Bytea); - - // Varbit - mappings.AddArrayType(DataTypeNames.Varbit); - - // Bit - mappings.AddArrayType(DataTypeNames.Bit); - - // Text - mappings.AddArrayType(DataTypeNames.Text); - mappings.AddStructArrayType>(DataTypeNames.Text); - mappings.AddStructArrayType>(DataTypeNames.Text); - - // Alternative text types - foreach(var dataTypeName in new[] { "citext", DataTypeNames.Varchar, - DataTypeNames.Bpchar, DataTypeNames.Json, - DataTypeNames.Xml, DataTypeNames.Name, DataTypeNames.RefCursor }) - { - mappings.AddArrayType(dataTypeName); - mappings.AddStructArrayType>(dataTypeName); - mappings.AddStructArrayType>(dataTypeName); - } - - // Jsonb - mappings.AddArrayType(DataTypeNames.Jsonb); - mappings.AddStructArrayType>(DataTypeNames.Jsonb); - mappings.AddStructArrayType>(DataTypeNames.Jsonb); - - // Hstore - mappings.AddArrayType>("hstore"); - - return mappings; - } -} - -sealed class ExtraConversionsArrayTypeInfoResolver : ExtraConversionsResolver, IPgTypeInfoResolver -{ - TypeInfoMappingCollection? _mappings; - new TypeInfoMappingCollection Mappings => _mappings ??= AddArrayInfos(new(base.Mappings)); - - public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) - => Mappings.Find(type, dataTypeName, options); -} diff --git a/src/Npgsql/Internal/Resolvers/FullTextSearchTypeInfoResolver.cs b/src/Npgsql/Internal/Resolvers/FullTextSearchTypeInfoResolver.cs deleted file mode 100644 index 75c46f3eae..0000000000 --- a/src/Npgsql/Internal/Resolvers/FullTextSearchTypeInfoResolver.cs +++ /dev/null @@ -1,87 +0,0 @@ -using System; -using Npgsql.Internal.Converters; -using Npgsql.Internal.Postgres; -using Npgsql.Properties; -using NpgsqlTypes; - -namespace Npgsql.Internal.Resolvers; - -class FullTextSearchTypeInfoResolver : IPgTypeInfoResolver -{ - TypeInfoMappingCollection? _mappings; - protected TypeInfoMappingCollection Mappings => _mappings ??= AddInfos(new()); - - public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) - => Mappings.Find(type, dataTypeName, options); - - static TypeInfoMappingCollection AddInfos(TypeInfoMappingCollection mappings) - { - // tsvector - mappings.AddType(DataTypeNames.TsVector, - static (options, mapping, _) => mapping.CreateInfo(options, new TsVectorConverter(options.TextEncoding)), isDefault: true); - - // tsquery - mappings.AddType(DataTypeNames.TsQuery, - static (options, mapping, _) => mapping.CreateInfo(options, new TsQueryConverter(options.TextEncoding)), isDefault: true); - mappings.AddType(DataTypeNames.TsQuery, - static (options, mapping, _) => mapping.CreateInfo(options, new TsQueryConverter(options.TextEncoding))); - mappings.AddType(DataTypeNames.TsQuery, - static (options, mapping, _) => mapping.CreateInfo(options, new TsQueryConverter(options.TextEncoding))); - mappings.AddType(DataTypeNames.TsQuery, - static (options, mapping, _) => mapping.CreateInfo(options, new TsQueryConverter(options.TextEncoding))); - mappings.AddType(DataTypeNames.TsQuery, - static (options, mapping, _) => mapping.CreateInfo(options, new TsQueryConverter(options.TextEncoding))); - mappings.AddType(DataTypeNames.TsQuery, - static (options, mapping, _) => mapping.CreateInfo(options, new TsQueryConverter(options.TextEncoding))); - mappings.AddType(DataTypeNames.TsQuery, - static (options, mapping, _) => mapping.CreateInfo(options, new TsQueryConverter(options.TextEncoding))); - - return mappings; - } - - protected static TypeInfoMappingCollection AddArrayInfos(TypeInfoMappingCollection mappings) - { - // tsvector - mappings.AddArrayType(DataTypeNames.TsVector); - - // tsquery - mappings.AddArrayType(DataTypeNames.TsQuery); - mappings.AddArrayType(DataTypeNames.TsQuery); - mappings.AddArrayType(DataTypeNames.TsQuery); - mappings.AddArrayType(DataTypeNames.TsQuery); - mappings.AddArrayType(DataTypeNames.TsQuery); - mappings.AddArrayType(DataTypeNames.TsQuery); - mappings.AddArrayType(DataTypeNames.TsQuery); - - return mappings; - } - - public static void CheckUnsupported(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) - { - if (type != typeof(object) && (dataTypeName == DataTypeNames.TsQuery || dataTypeName == DataTypeNames.TsVector)) - throw new NotSupportedException( - string.Format(NpgsqlStrings.FullTextSearchNotEnabled, nameof(NpgsqlSlimDataSourceBuilder.EnableFullTextSearch), typeof(TBuilder).Name)); - - if (type is null) - return; - - if (TypeInfoMappingCollection.IsArrayLikeType(type, out var elementType)) - type = elementType; - - if (type is { IsConstructedGenericType: true } && type.GetGenericTypeDefinition() == typeof(Nullable<>)) - type = type.GetGenericArguments()[0]; - - if (type == typeof(NpgsqlTsVector) || typeof(NpgsqlTsQuery).IsAssignableFrom(type)) - throw new NotSupportedException( - string.Format(NpgsqlStrings.FullTextSearchNotEnabled, nameof(NpgsqlSlimDataSourceBuilder.EnableFullTextSearch), typeof(TBuilder).Name)); - } -} - -sealed class FullTextSearchArrayTypeInfoResolver : FullTextSearchTypeInfoResolver, IPgTypeInfoResolver -{ - TypeInfoMappingCollection? _mappings; - new TypeInfoMappingCollection Mappings => _mappings ??= AddArrayInfos(new(base.Mappings)); - - public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) - => Mappings.Find(type, dataTypeName, options); -} diff --git a/src/Npgsql/Internal/Resolvers/GeometricTypeInfoResolver.cs b/src/Npgsql/Internal/Resolvers/GeometricTypeInfoResolver.cs deleted file mode 100644 index 0a0a9eef34..0000000000 --- a/src/Npgsql/Internal/Resolvers/GeometricTypeInfoResolver.cs +++ /dev/null @@ -1,57 +0,0 @@ -using System; -using Npgsql.Internal.Converters; -using Npgsql.Internal.Postgres; -using NpgsqlTypes; - -namespace Npgsql.Internal.Resolvers; - -class GeometricTypeInfoResolver : IPgTypeInfoResolver -{ - TypeInfoMappingCollection? _mappings; - protected TypeInfoMappingCollection Mappings => _mappings ??= AddInfos(new()); - - public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) - => Mappings.Find(type, dataTypeName, options); - - static TypeInfoMappingCollection AddInfos(TypeInfoMappingCollection mappings) - { - mappings.AddStructType(DataTypeNames.Point, - static (options, mapping, _) => mapping.CreateInfo(options, new PointConverter()), isDefault: true); - mappings.AddStructType(DataTypeNames.Box, - static (options, mapping, _) => mapping.CreateInfo(options, new BoxConverter()), isDefault: true); - mappings.AddStructType(DataTypeNames.Polygon, - static (options, mapping, _) => mapping.CreateInfo(options, new PolygonConverter()), isDefault: true); - mappings.AddStructType(DataTypeNames.Line, - static (options, mapping, _) => mapping.CreateInfo(options, new LineConverter()), isDefault: true); - mappings.AddStructType(DataTypeNames.LSeg, - static (options, mapping, _) => mapping.CreateInfo(options, new LineSegmentConverter()), isDefault: true); - mappings.AddStructType(DataTypeNames.Path, - static (options, mapping, _) => mapping.CreateInfo(options, new PathConverter()), isDefault: true); - mappings.AddStructType(DataTypeNames.Circle, - static (options, mapping, _) => mapping.CreateInfo(options, new CircleConverter()), isDefault: true); - - return mappings; - } - - protected static TypeInfoMappingCollection AddArrayInfos(TypeInfoMappingCollection mappings) - { - mappings.AddStructArrayType(DataTypeNames.Point); - mappings.AddStructArrayType(DataTypeNames.Box); - mappings.AddStructArrayType(DataTypeNames.Polygon); - mappings.AddStructArrayType(DataTypeNames.Line); - mappings.AddStructArrayType(DataTypeNames.LSeg); - mappings.AddStructArrayType(DataTypeNames.Path); - mappings.AddStructArrayType(DataTypeNames.Circle); - - return mappings; - } -} - -sealed class GeometricArrayTypeInfoResolver : GeometricTypeInfoResolver, IPgTypeInfoResolver -{ - TypeInfoMappingCollection? _mappings; - new TypeInfoMappingCollection Mappings => _mappings ??= AddArrayInfos(new(base.Mappings)); - - public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) - => Mappings.Find(type, dataTypeName, options); -} diff --git a/src/Npgsql/Internal/Resolvers/JsonDynamicTypeInfoResolver.cs b/src/Npgsql/Internal/Resolvers/JsonDynamicTypeInfoResolver.cs deleted file mode 100644 index df85c54974..0000000000 --- a/src/Npgsql/Internal/Resolvers/JsonDynamicTypeInfoResolver.cs +++ /dev/null @@ -1,154 +0,0 @@ -using System; -using System.Diagnostics.CodeAnalysis; -using System.Text; -using System.Text.Json; -using System.Text.Json.Nodes; -using System.Text.Json.Serialization.Metadata; -using System.Threading; -using Npgsql.Internal.Converters; -using Npgsql.Internal.Postgres; - -namespace Npgsql.Internal.Resolvers; - -[RequiresUnreferencedCode("Json serializer may perform reflection on trimmed types.")] -[RequiresDynamicCode("Serializing arbitrary types to json can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] -class JsonDynamicTypeInfoResolver : DynamicTypeInfoResolver, IPgTypeInfoResolver -{ - JsonSerializerOptions? _serializerOptions; - JsonSerializerOptions SerializerOptions -#if NET7_0_OR_GREATER - => _serializerOptions ??= JsonSerializerOptions.Default; -#else - => _serializerOptions ??= new(); -#endif - - readonly Type[] _jsonbClrTypes; - readonly Type[] _jsonClrTypes; - TypeInfoMappingCollection? _mappings; - - protected TypeInfoMappingCollection Mappings => _mappings ??= AddInfos(new(), _jsonbClrTypes, _jsonClrTypes, SerializerOptions); - - public JsonDynamicTypeInfoResolver(Type[]? jsonbClrTypes = null, Type[]? jsonClrTypes = null, JsonSerializerOptions? serializerOptions = null) - { - _jsonbClrTypes = jsonbClrTypes ?? Array.Empty(); - _jsonClrTypes = jsonClrTypes ?? Array.Empty(); - _serializerOptions = serializerOptions; - } - - static TypeInfoMappingCollection AddInfos(TypeInfoMappingCollection mappings, Type[] jsonbClrTypes, Type[] jsonClrTypes, JsonSerializerOptions serializerOptions) - { - // We do GetTypeInfo calls directly so we need a resolver. - serializerOptions.TypeInfoResolver ??= new DefaultJsonTypeInfoResolver(); - - // These live in the RUC/RDC part as JsonValues can contain any .NET type. - foreach (var dataTypeName in new[] { DataTypeNames.Jsonb, DataTypeNames.Json }) - { - var jsonb = dataTypeName == DataTypeNames.Jsonb; - mappings.AddType(dataTypeName, (options, mapping, _) => - mapping.CreateInfo(options, new JsonConverter(jsonb, options.TextEncoding, serializerOptions))); - mappings.AddType(dataTypeName, (options, mapping, _) => - mapping.CreateInfo(options, new JsonConverter(jsonb, options.TextEncoding, serializerOptions))); - mappings.AddType(dataTypeName, (options, mapping, _) => - mapping.CreateInfo(options, new JsonConverter(jsonb, options.TextEncoding, serializerOptions))); - mappings.AddType(dataTypeName, (options, mapping, _) => - mapping.CreateInfo(options, new JsonConverter(jsonb, options.TextEncoding, serializerOptions))); - } - - AddUserMappings(jsonb: true, jsonbClrTypes); - AddUserMappings(jsonb: false, jsonClrTypes); - - void AddUserMappings(bool jsonb, Type[] clrTypes) - { - var dynamicMappings = CreateCollection(); - var dataTypeName = (string)(jsonb ? DataTypeNames.Jsonb : DataTypeNames.Json); - foreach (var jsonType in clrTypes) - { - var jsonTypeInfo = serializerOptions.GetTypeInfo(jsonType); - dynamicMappings.AddMapping(jsonTypeInfo.Type, dataTypeName, - factory: (options, mapping, _) => mapping.CreateInfo(options, - CreateSystemTextJsonConverter(mapping.Type, jsonb, options.TextEncoding, serializerOptions, jsonType))); - - if (!jsonType.IsValueType && jsonTypeInfo.PolymorphismOptions is not null) - { - foreach (var derived in jsonTypeInfo.PolymorphismOptions.DerivedTypes) - dynamicMappings.AddMapping(derived.DerivedType, dataTypeName, - factory: (options, mapping, _) => mapping.CreateInfo(options, - CreateSystemTextJsonConverter(mapping.Type, jsonb, options.TextEncoding, serializerOptions, jsonType))); - } - } - mappings.AddRange(dynamicMappings.ToTypeInfoMappingCollection()); - } - - return mappings; - } - - protected static TypeInfoMappingCollection AddArrayInfos(TypeInfoMappingCollection mappings, TypeInfoMappingCollection baseMappings) - { - if (baseMappings.Items.Count == 0) - return mappings; - - foreach (var dataTypeName in new[] { DataTypeNames.Jsonb, DataTypeNames.Json }) - { - mappings.AddArrayType(dataTypeName); - mappings.AddArrayType(dataTypeName); - mappings.AddArrayType(dataTypeName); - mappings.AddArrayType(dataTypeName); - } - - var dynamicMappings = CreateCollection(baseMappings); - foreach (var mapping in baseMappings.Items) - dynamicMappings.AddArrayMapping(mapping.Type, mapping.DataTypeName); - mappings.AddRange(dynamicMappings.ToTypeInfoMappingCollection()); - - return mappings; - } - - public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) - => Mappings.Find(type, dataTypeName, options) ?? base.GetTypeInfo(type, dataTypeName, options); - - protected override DynamicMappingCollection? GetMappings(Type? type, DataTypeName dataTypeName, PgSerializerOptions options) - { - // Match all types except null, object and text types as long as DataTypeName (json/jsonb) is present. - if (type is null || type == typeof(object) || Array.IndexOf(PgSerializerOptions.WellKnownTextTypes, type) != -1 - || dataTypeName != DataTypeNames.Jsonb && dataTypeName != DataTypeNames.Json) - return null; - - return CreateCollection().AddMapping(type, dataTypeName, (options, mapping, _) => - { - var jsonb = dataTypeName == DataTypeNames.Jsonb; - - // For jsonb we can't properly support polymorphic serialization unless we do quite some additional work - // so we default to mapping.Type instead (exact types will never serialize their "$type" fields, essentially disabling the feature). - var baseType = jsonb ? mapping.Type : typeof(object); - - return mapping.CreateInfo(options, - CreateSystemTextJsonConverter(mapping.Type, jsonb, options.TextEncoding, SerializerOptions, baseType)); - }); - } - - static PgConverter CreateSystemTextJsonConverter(Type valueType, bool jsonb, Encoding textEncoding, JsonSerializerOptions serializerOptions, Type baseType) - => (PgConverter)Activator.CreateInstance( - typeof(JsonConverter<,>).MakeGenericType(valueType, baseType), - jsonb, - textEncoding, - serializerOptions)!; -} - -[RequiresUnreferencedCode("Json serializer may perform reflection on trimmed types.")] -[RequiresDynamicCode("Serializing arbitrary types to json can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] -sealed class JsonDynamicArrayTypeInfoResolver : JsonDynamicTypeInfoResolver, IPgTypeInfoResolver -{ - TypeInfoMappingCollection? _mappings; - new TypeInfoMappingCollection Mappings => _mappings ??= AddArrayInfos(new(base.Mappings), base.Mappings); - - public JsonDynamicArrayTypeInfoResolver(Type[]? jsonbClrTypes = null, Type[]? jsonClrTypes = null, JsonSerializerOptions? serializerOptions = null) - : base(jsonbClrTypes, jsonClrTypes, serializerOptions) { } - - public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) - => Mappings.Find(type, dataTypeName, options) ?? base.GetTypeInfo(type, dataTypeName, options); - - protected override DynamicMappingCollection? GetMappings(Type? type, DataTypeName dataTypeName, PgSerializerOptions options) - => type is not null && IsArrayLikeType(type, out var elementType) && IsArrayDataTypeName(dataTypeName, options, out var elementDataTypeName) - ? base.GetMappings(elementType, elementDataTypeName, options)?.AddArrayMapping(elementType, elementDataTypeName) - : null; -} diff --git a/src/Npgsql/Internal/Resolvers/JsonTypeInfoResolvers.cs b/src/Npgsql/Internal/Resolvers/JsonTypeInfoResolvers.cs deleted file mode 100644 index d589f364ab..0000000000 --- a/src/Npgsql/Internal/Resolvers/JsonTypeInfoResolvers.cs +++ /dev/null @@ -1,85 +0,0 @@ -using System; -using System.Text.Json; -using System.Text.Json.Serialization.Metadata; -using Npgsql.Internal.Converters; -using Npgsql.Internal.Postgres; - -namespace Npgsql.Internal.Resolvers; - -class JsonTypeInfoResolver : IPgTypeInfoResolver -{ - static JsonSerializerOptions? DefaultSerializerOptions; - - readonly JsonSerializerOptions _serializerOptions; - TypeInfoMappingCollection? _mappings; - protected TypeInfoMappingCollection Mappings => _mappings ??= AddInfos(new(), _serializerOptions); - - public JsonTypeInfoResolver(JsonSerializerOptions? serializerOptions = null) - { - if (serializerOptions is null) - { - serializerOptions = DefaultSerializerOptions; - if (serializerOptions is null) - { - serializerOptions = new JsonSerializerOptions(); - serializerOptions.TypeInfoResolver = new BasicJsonTypeInfoResolver(); - DefaultSerializerOptions = serializerOptions; - } - } - _serializerOptions = serializerOptions; - } - - static TypeInfoMappingCollection AddInfos(TypeInfoMappingCollection mappings, JsonSerializerOptions serializerOptions) - { - // Jsonb is the first default for JsonDocument - foreach (var dataTypeName in new[] { DataTypeNames.Jsonb, DataTypeNames.Json }) - { - var jsonb = dataTypeName == DataTypeNames.Jsonb; - mappings.AddType(dataTypeName, (options, mapping, _) => - mapping.CreateInfo(options, new JsonConverter(jsonb, options.TextEncoding, serializerOptions)), - isDefault: true); - mappings.AddStructType(dataTypeName, (options, mapping, _) => - mapping.CreateInfo(options, new JsonConverter(jsonb, options.TextEncoding, serializerOptions))); - } - - return mappings; - } - - protected static TypeInfoMappingCollection AddArrayInfos(TypeInfoMappingCollection mappings) - { - foreach (var dataTypeName in new[] { DataTypeNames.Jsonb, DataTypeNames.Json }) - { - mappings.AddArrayType(dataTypeName); - mappings.AddStructArrayType(dataTypeName); - } - - return mappings; - } - - public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) - => Mappings.Find(type, dataTypeName, options); - - sealed class BasicJsonTypeInfoResolver : IJsonTypeInfoResolver - { - public JsonTypeInfo? GetTypeInfo(Type type, JsonSerializerOptions options) - { - if (type == typeof(JsonDocument)) - return JsonMetadataServices.CreateValueInfo(options, JsonMetadataServices.JsonDocumentConverter); - if (type == typeof(JsonElement)) - return JsonMetadataServices.CreateValueInfo(options, JsonMetadataServices.JsonElementConverter); - return null; - } - } -} - -sealed class JsonArrayTypeInfoResolver : JsonTypeInfoResolver, IPgTypeInfoResolver -{ - TypeInfoMappingCollection? _mappings; - new TypeInfoMappingCollection Mappings => _mappings ??= AddArrayInfos(new(base.Mappings)); - - public JsonArrayTypeInfoResolver(JsonSerializerOptions? serializerOptions = null) - : base(serializerOptions) { } - - public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) - => Mappings.Find(type, dataTypeName, options); -} diff --git a/src/Npgsql/Internal/Resolvers/LTreeTypeInfoResolver.cs b/src/Npgsql/Internal/Resolvers/LTreeTypeInfoResolver.cs deleted file mode 100644 index 79767f21e1..0000000000 --- a/src/Npgsql/Internal/Resolvers/LTreeTypeInfoResolver.cs +++ /dev/null @@ -1,57 +0,0 @@ -using System; -using Npgsql.Internal.Converters; -using Npgsql.Internal.Postgres; -using Npgsql.Properties; - -namespace Npgsql.Internal.Resolvers; - -class LTreeTypeInfoResolver : IPgTypeInfoResolver -{ - const byte LTreeVersion = 1; - TypeInfoMappingCollection? _mappings; - protected TypeInfoMappingCollection Mappings => _mappings ??= AddInfos(new()); - - public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) - => Mappings.Find(type, dataTypeName, options); - - static TypeInfoMappingCollection AddInfos(TypeInfoMappingCollection mappings) - { - mappings.AddType("ltree", - static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter(LTreeVersion, new StringTextConverter(options.TextEncoding))), - MatchRequirement.DataTypeName); - mappings.AddType("lquery", - static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter(LTreeVersion, new StringTextConverter(options.TextEncoding))), - MatchRequirement.DataTypeName); - mappings.AddType("ltxtquery", - static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter(LTreeVersion, new StringTextConverter(options.TextEncoding))), - MatchRequirement.DataTypeName); - - return mappings; - } - - protected static TypeInfoMappingCollection AddArrayInfos(TypeInfoMappingCollection mappings) - { - mappings.AddArrayType("ltree"); - mappings.AddArrayType("lquery"); - mappings.AddArrayType("ltxtquery"); - - return mappings; - } - - public static void CheckUnsupported(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) - { - if (type != typeof(object) && dataTypeName is { UnqualifiedName: "ltree" or "lquery" or "ltxtquery" }) - throw new NotSupportedException( - string.Format(NpgsqlStrings.LTreeNotEnabled, nameof(NpgsqlSlimDataSourceBuilder.EnableLTree), - typeof(TBuilder).Name)); - } -} - -sealed class LTreeArrayTypeInfoResolver : LTreeTypeInfoResolver, IPgTypeInfoResolver -{ - TypeInfoMappingCollection? _mappings; - new TypeInfoMappingCollection Mappings => _mappings ??= AddArrayInfos(new(base.Mappings)); - - public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) - => Mappings.Find(type, dataTypeName, options); -} diff --git a/src/Npgsql/Internal/Resolvers/NetworkTypeInfoResolver.cs b/src/Npgsql/Internal/Resolvers/NetworkTypeInfoResolver.cs deleted file mode 100644 index 9b1ed63734..0000000000 --- a/src/Npgsql/Internal/Resolvers/NetworkTypeInfoResolver.cs +++ /dev/null @@ -1,83 +0,0 @@ -using System; -using System.Diagnostics.CodeAnalysis; -using System.Net; -using System.Net.NetworkInformation; -using Npgsql.Internal.Converters; -using Npgsql.Internal.Postgres; -using NpgsqlTypes; - -namespace Npgsql.Internal.Resolvers; - -class NetworkTypeInfoResolver : IPgTypeInfoResolver -{ - TypeInfoMappingCollection? _mappings; - protected TypeInfoMappingCollection Mappings => _mappings ??= AddInfos(new()); - - public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) - => Mappings.Find(type, dataTypeName, options); - - static TypeInfoMappingCollection AddInfos(TypeInfoMappingCollection mappings) - { - // macaddr - mappings.AddType(DataTypeNames.MacAddr, - static (options, mapping, _) => mapping.CreateInfo(options, new MacaddrConverter(macaddr8: false)), isDefault: true); - mappings.AddType(DataTypeNames.MacAddr8, - static (options, mapping, _) => mapping.CreateInfo(options, new MacaddrConverter(macaddr8: true)), - mapping => mapping with { MatchRequirement = MatchRequirement.DataTypeName }); - - // inet - // This is one of the rare mappings that force us to use reflection for a lack of any alternative. - // There are certain IPAddress values like Loopback or Any that return a *private* derived type (see https://github.com/dotnet/runtime/issues/27870). - // However we still need to be able to resolve an exactly typed converter for those values. - // We do so by wrapping our converter in a casting converter constructed over the derived type. - // Finally we add a custom predicate to be able to match any type which values are assignable to IPAddress. - mappings.AddType(DataTypeNames.Inet, - CreateInfo, mapping => mapping with { MatchRequirement = MatchRequirement.Single, TypeMatchPredicate = type => type is null || typeof(IPAddress).IsAssignableFrom(type) }); - mappings.AddStructType(DataTypeNames.Inet, - static (options, mapping, _) => mapping.CreateInfo(options, new NpgsqlInetConverter())); - - // cidr - mappings.AddStructType(DataTypeNames.Cidr, - static (options, mapping, _) => mapping.CreateInfo(options, new NpgsqlCidrConverter()), isDefault: true); - - // Code is split out to a local method as suppression attributes on lambdas aren't properly handled by the ILLink analyzer yet. - [UnconditionalSuppressMessage("AotAnalysis", "IL3050", Justification = "MakeGenericType is safe because the target will only ever be a reference type.")] - static PgTypeInfo CreateInfo(PgSerializerOptions options, TypeInfoMapping resolvedMapping, bool _) - { - var derivedType = resolvedMapping.Type != typeof(IPAddress); - PgConverter converter = new IPAddressConverter(); - if (derivedType) - // There is not much more we can do, the deriving type IPAddress+ReadOnlyIPAddress isn't public. - converter = (PgConverter)Activator.CreateInstance(typeof(CastingConverter<>).MakeGenericType(resolvedMapping.Type), converter)!; - - return resolvedMapping.CreateInfo(options, converter); - } - - return mappings; - } - - protected static TypeInfoMappingCollection AddArrayInfos(TypeInfoMappingCollection mappings) - { - // macaddr - mappings.AddArrayType(DataTypeNames.MacAddr); - mappings.AddArrayType(DataTypeNames.MacAddr8); - - // inet - mappings.AddArrayType(DataTypeNames.Inet); - mappings.AddStructArrayType(DataTypeNames.Inet); - - // cidr - mappings.AddStructArrayType(DataTypeNames.Cidr); - - return mappings; - } -} - -sealed class NetworkArrayTypeInfoResolver : NetworkTypeInfoResolver, IPgTypeInfoResolver -{ - TypeInfoMappingCollection? _mappings; - new TypeInfoMappingCollection Mappings => _mappings ??= AddArrayInfos(new(base.Mappings)); - - public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) - => Mappings.Find(type, dataTypeName, options); -} diff --git a/src/Npgsql/Internal/Resolvers/RecordTypeInfoResolvers.cs b/src/Npgsql/Internal/Resolvers/RecordTypeInfoResolvers.cs deleted file mode 100644 index 7b2920e799..0000000000 --- a/src/Npgsql/Internal/Resolvers/RecordTypeInfoResolvers.cs +++ /dev/null @@ -1,143 +0,0 @@ -using System; -using System.Diagnostics.CodeAnalysis; -using System.Reflection; -using Npgsql.Internal.Converters; -using Npgsql.Internal.Postgres; -using Npgsql.Properties; - -namespace Npgsql.Internal.Resolvers; - -class RecordTypeInfoResolver : IPgTypeInfoResolver -{ - TypeInfoMappingCollection? _mappings; - protected TypeInfoMappingCollection Mappings => _mappings ??= AddInfos(new()); - - public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) - => Mappings.Find(type, dataTypeName, options); - - static TypeInfoMappingCollection AddInfos(TypeInfoMappingCollection mappings) - { - mappings.AddType(DataTypeNames.Record, static (options, mapping, _) => - mapping.CreateInfo(options, new RecordConverter(options), supportsWriting: false), - MatchRequirement.DataTypeName); - - return mappings; - } - - protected static TypeInfoMappingCollection AddArrayInfos(TypeInfoMappingCollection mappings) - { - mappings.AddArrayType(DataTypeNames.Record); - - return mappings; - } - - public static void CheckUnsupported(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) - { - if (type != typeof(object) && dataTypeName == DataTypeNames.Record) - { - throw new NotSupportedException( - string.Format( - NpgsqlStrings.RecordsNotEnabled, - nameof(INpgsqlTypeMapperExtensions.EnableRecordsAsTuples), - typeof(TBuilder).Name, - nameof(NpgsqlSlimDataSourceBuilder.EnableRecords))); - } - } -} - -sealed class RecordArrayTypeInfoResolver : RecordTypeInfoResolver, IPgTypeInfoResolver -{ - TypeInfoMappingCollection? _mappings; - new TypeInfoMappingCollection Mappings => _mappings ??= AddArrayInfos(new(base.Mappings)); - - public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) - => Mappings.Find(type, dataTypeName, options); -} - -[RequiresUnreferencedCode("Tupled record resolver may perform reflection on trimmed tuple types.")] -[RequiresDynamicCode("Tupled records need to construct a generic converter for a statically unknown (value)tuple type.")] -class TupledRecordTypeInfoResolver : IPgTypeInfoResolver -{ - TypeInfoMappingCollection? _mappings; - protected TypeInfoMappingCollection Mappings => _mappings ??= AddInfos(new()); - - public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) - => Mappings.Find(type, dataTypeName, options); - - // Stand-in type, type match predicate does the actual work. - static TypeInfoMappingCollection AddInfos(TypeInfoMappingCollection mappings) - { - mappings.AddType>(DataTypeNames.Record, Factory, - mapping => mapping with - { - MatchRequirement = MatchRequirement.DataTypeName, - TypeMatchPredicate = type => type is { IsConstructedGenericType: true, FullName: not null } - && type.FullName.StartsWith("System.Tuple", StringComparison.Ordinal) - }); - - mappings.AddStructType>(DataTypeNames.Record, Factory, - mapping => mapping with - { - MatchRequirement = MatchRequirement.DataTypeName, - TypeMatchPredicate = type => type is { IsConstructedGenericType: true, FullName: not null } - && type.FullName.StartsWith("System.ValueTuple", StringComparison.Ordinal) - }); - - return mappings; - } - - protected static TypeInfoMappingCollection AddArrayInfos(TypeInfoMappingCollection mappings) - { - mappings.AddArrayType>(DataTypeNames.Record, suppressObjectMapping: true); - mappings.AddStructArrayType>(DataTypeNames.Record, suppressObjectMapping: true); - - return mappings; - } - - static readonly TypeInfoFactory Factory = static (options, mapping, _) => - { - var constructors = mapping.Type.GetConstructors(); - ConstructorInfo? constructor = null; - if (constructors.Length is 1) - constructor = constructors[0]; - else - { - var args = mapping.Type.GenericTypeArguments.Length; - foreach (var ctor in constructors) - if (ctor.GetParameters().Length == args) - { - constructor = ctor; - break; - } - } - - if (constructor is null) - throw new InvalidOperationException($"Couldn't find a suitable constructor for record type: {mapping.Type.FullName}"); - - var factory = typeof(TupledRecordTypeInfoResolver).GetMethod(nameof(CreateFactory), BindingFlags.Static | BindingFlags.NonPublic)! - .MakeGenericMethod(mapping.Type) - .Invoke(null, new object[] { constructor, constructor.GetParameters().Length }); - - var converterType = typeof(RecordConverter<>).MakeGenericType(mapping.Type); - var converter = (PgConverter)Activator.CreateInstance(converterType, options, factory)!; - return mapping.CreateInfo(options, converter, supportsWriting: false); - }; - - static Func CreateFactory(ConstructorInfo constructor, int constructorParameters) => array => - { - if (array.Length != constructorParameters) - throw new InvalidCastException($"Cannot read record type with {array.Length} fields as {typeof(T)}"); - return (T)constructor.Invoke(array); - }; -} - -[RequiresUnreferencedCode("Tupled record resolver may perform reflection on trimmed tuple types.")] -[RequiresDynamicCode("Tupled records need to construct a generic converter for a statically unknown (value)tuple type.")] -sealed class TupledRecordArrayTypeInfoResolver : TupledRecordTypeInfoResolver, IPgTypeInfoResolver -{ - TypeInfoMappingCollection? _mappings; - new TypeInfoMappingCollection Mappings => _mappings ??= AddArrayInfos(new(base.Mappings)); - - public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) - => Mappings.Find(type, dataTypeName, options); -} diff --git a/src/Npgsql/Internal/Resolvers/UnmappedEnumTypeInfoResolver.cs b/src/Npgsql/Internal/Resolvers/UnmappedEnumTypeInfoResolver.cs deleted file mode 100644 index 5c6d19db9b..0000000000 --- a/src/Npgsql/Internal/Resolvers/UnmappedEnumTypeInfoResolver.cs +++ /dev/null @@ -1,50 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Diagnostics.CodeAnalysis; -using System.Reflection; -using Npgsql.Internal.Converters; -using Npgsql.Internal.Postgres; -using Npgsql.PostgresTypes; -using NpgsqlTypes; - -namespace Npgsql.Internal.Resolvers; - -[RequiresUnreferencedCode("Unmapped enum resolver may perform reflection on types with fields that were trimmed if not referenced directly.")] -[RequiresDynamicCode("Unmapped enums need to construct a generic converter for a statically unknown enum type.")] -class UnmappedEnumTypeInfoResolver : DynamicTypeInfoResolver -{ - protected override DynamicMappingCollection? GetMappings(Type? type, DataTypeName dataTypeName, PgSerializerOptions options) - { - if (type is null || !IsTypeOrNullableOfType(type, static type => type.IsEnum, out var matchedType) || options.DatabaseInfo.GetPostgresType(dataTypeName) is not PostgresEnumType) - return null; - - return CreateCollection().AddMapping(matchedType, dataTypeName, static (options, mapping, _) => - { - var enumToLabel = new Dictionary(); - var labelToEnum = new Dictionary(); - foreach (var field in mapping.Type.GetFields(BindingFlags.Static | BindingFlags.Public)) - { - var attribute = (PgNameAttribute?)field.GetCustomAttribute(typeof(PgNameAttribute), false); - var enumName = attribute?.PgName ?? options.DefaultNameTranslator.TranslateMemberName(field.Name); - var enumValue = (Enum)field.GetValue(null)!; - - enumToLabel[enumValue] = enumName; - labelToEnum[enumName] = enumValue; - } - - return mapping.CreateInfo(options, (PgConverter)Activator.CreateInstance(typeof(EnumConverter<>).MakeGenericType(mapping.Type), - enumToLabel, labelToEnum, - options.TextEncoding)!); - }); - } -} - -[RequiresUnreferencedCode("Unmapped enum resolver may perform reflection on types with fields that were trimmed if not referenced directly.")] -[RequiresDynamicCode("Unmapped enums need to construct a generic converter for a statically unknown enum type")] -sealed class UnmappedEnumArrayTypeInfoResolver : UnmappedEnumTypeInfoResolver -{ - protected override DynamicMappingCollection? GetMappings(Type? type, DataTypeName dataTypeName, PgSerializerOptions options) - => type is not null && IsArrayLikeType(type, out var elementType) && IsArrayDataTypeName(dataTypeName, options, out var elementDataTypeName) - ? base.GetMappings(elementType, elementDataTypeName, options)?.AddArrayMapping(elementType, elementDataTypeName) - : null; -} diff --git a/src/Npgsql/Internal/Resolvers/UnmappedMultirangeTypeInfoResolver.cs b/src/Npgsql/Internal/Resolvers/UnmappedMultirangeTypeInfoResolver.cs deleted file mode 100644 index 33a76c0749..0000000000 --- a/src/Npgsql/Internal/Resolvers/UnmappedMultirangeTypeInfoResolver.cs +++ /dev/null @@ -1,59 +0,0 @@ -using System; -using System.Diagnostics.CodeAnalysis; -using Npgsql.Internal.Converters; -using Npgsql.Internal.Postgres; -using Npgsql.PostgresTypes; -using NpgsqlTypes; - -namespace Npgsql.Internal.Resolvers; - -[RequiresUnreferencedCode("A dynamic type info resolver may perform reflection on types that were trimmed if not referenced directly.")] -[RequiresDynamicCode("A dynamic type info resolver may need to construct a generic converter for a statically unknown type.")] -class UnmappedMultirangeTypeInfoResolver : DynamicTypeInfoResolver -{ - protected override DynamicMappingCollection? GetMappings(Type? type, DataTypeName dataTypeName, PgSerializerOptions options) - { - Type? elementType = null; - if (type is not null && !IsArrayLikeType(type, out elementType) - || elementType is not null && !IsTypeOrNullableOfType(elementType, - static type => type.IsConstructedGenericType && type.GetGenericTypeDefinition() == typeof(NpgsqlRange<>), out _) - || options.DatabaseInfo.GetPostgresType(dataTypeName) is not PostgresMultirangeType multirangeType) - return null; - - var subInfo = - elementType is null - ? options.GetDefaultTypeInfo(multirangeType.Subrange) - : options.GetTypeInfo(elementType, multirangeType.Subrange); - - // We have no generic MultirangeConverterResolver so we would not know how to compose a range mapping for such infos. - // See https://github.com/npgsql/npgsql/issues/5268 - if (subInfo is not { IsResolverInfo: false }) - return null; - - subInfo = subInfo.ToNonBoxing(); - - type ??= subInfo.Type.MakeArrayType(); - - return CreateCollection().AddMapping(type, dataTypeName, - (options, mapping, _) => mapping.CreateInfo(options, - (PgConverter)Activator.CreateInstance(typeof(MultirangeConverter<,>).MakeGenericType(type, subInfo.Type), subInfo.GetResolution().Converter)!, - preferredFormat: subInfo.PreferredFormat, supportsWriting: subInfo.SupportsWriting), - mapping => mapping with { MatchRequirement = MatchRequirement.Single }); - } -} - -[RequiresUnreferencedCode("A dynamic type info resolver may perform reflection on types that were trimmed if not referenced directly.")] -[RequiresDynamicCode("A dynamic type info resolver may need to construct a generic converter for a statically unknown type.")] -sealed class UnmappedMultirangeArrayTypeInfoResolver : UnmappedMultirangeTypeInfoResolver -{ - protected override DynamicMappingCollection? GetMappings(Type? type, DataTypeName dataTypeName, PgSerializerOptions options) - { - Type? elementType = null; - if (!((type is null || IsArrayLikeType(type, out elementType)) && IsArrayDataTypeName(dataTypeName, options, out var elementDataTypeName))) - return null; - - var mappings = base.GetMappings(elementType, elementDataTypeName, options); - elementType ??= mappings?.Find(null, elementDataTypeName, options)?.Type; // Try to get the default mapping. - return elementType is null ? null : mappings?.AddArrayMapping(elementType, elementDataTypeName); - } -} diff --git a/src/Npgsql/Internal/Resolvers/UnmappedRangeTypeInfoResolver.cs b/src/Npgsql/Internal/Resolvers/UnmappedRangeTypeInfoResolver.cs deleted file mode 100644 index 69211df68b..0000000000 --- a/src/Npgsql/Internal/Resolvers/UnmappedRangeTypeInfoResolver.cs +++ /dev/null @@ -1,59 +0,0 @@ -using System; -using System.Diagnostics.CodeAnalysis; -using Npgsql.Internal.Converters; -using Npgsql.Internal.Postgres; -using Npgsql.PostgresTypes; -using NpgsqlTypes; - -namespace Npgsql.Internal.Resolvers; - -[RequiresUnreferencedCode("A dynamic type info resolver may perform reflection on types that were trimmed if not referenced directly.")] -[RequiresDynamicCode("A dynamic type info resolver may need to construct a generic converter for a statically unknown type.")] -class UnmappedRangeTypeInfoResolver : DynamicTypeInfoResolver -{ - protected override DynamicMappingCollection? GetMappings(Type? type, DataTypeName dataTypeName, PgSerializerOptions options) - { - var matchedType = type; - if (type is not null && !IsTypeOrNullableOfType(type, - static type => type.IsConstructedGenericType && type.GetGenericTypeDefinition() == typeof(NpgsqlRange<>), out matchedType) - || options.DatabaseInfo.GetPostgresType(dataTypeName) is not PostgresRangeType rangeType) - return null; - - var subInfo = - matchedType is null - ? options.GetDefaultTypeInfo(rangeType.Subtype) - // Input matchedType here as we don't want an NpgsqlRange over Nullable (it has its own nullability tracking, for better or worse) - : options.GetTypeInfo(matchedType.GetGenericArguments()[0], rangeType.Subtype); - - // We have no generic RangeConverterResolver so we would not know how to compose a range mapping for such infos. - // See https://github.com/npgsql/npgsql/issues/5268 - if (subInfo is not { IsResolverInfo: false }) - return null; - - subInfo = subInfo.ToNonBoxing(); - - matchedType ??= typeof(NpgsqlRange<>).MakeGenericType(subInfo.Type); - - return CreateCollection().AddMapping(matchedType, dataTypeName, - (options, mapping, _) => mapping.CreateInfo(options, - (PgConverter)Activator.CreateInstance(typeof(RangeConverter<>).MakeGenericType(subInfo.Type), subInfo.GetResolution().Converter)!, - preferredFormat: subInfo.PreferredFormat, supportsWriting: subInfo.SupportsWriting), - mapping => mapping with { MatchRequirement = MatchRequirement.Single }); - } -} - -[RequiresUnreferencedCode("A dynamic type info resolver may perform reflection on types that were trimmed if not referenced directly.")] -[RequiresDynamicCode("A dynamic type info resolver may need to construct a generic converter for a statically unknown type.")] -sealed class UnmappedRangeArrayTypeInfoResolver : UnmappedRangeTypeInfoResolver -{ - protected override DynamicMappingCollection? GetMappings(Type? type, DataTypeName dataTypeName, PgSerializerOptions options) - { - Type? elementType = null; - if (!((type is null || IsArrayLikeType(type, out elementType)) && IsArrayDataTypeName(dataTypeName, options, out var elementDataTypeName))) - return null; - - var mappings = base.GetMappings(elementType, elementDataTypeName, options); - elementType ??= mappings?.Find(null, elementDataTypeName, options)?.Type; // Try to get the default mapping. - return elementType is null ? null : mappings?.AddArrayMapping(elementType, elementDataTypeName); - } -} diff --git a/src/Npgsql/NpgsqlDataSource.cs b/src/Npgsql/NpgsqlDataSource.cs index b2eb8dc18a..9ca100e0db 100644 --- a/src/Npgsql/NpgsqlDataSource.cs +++ b/src/Npgsql/NpgsqlDataSource.cs @@ -11,7 +11,7 @@ using System.Transactions; using Microsoft.Extensions.Logging; using Npgsql.Internal; -using Npgsql.Internal.Resolvers; +using Npgsql.Internal.ResolverFactories; using Npgsql.Properties; using Npgsql.Util; @@ -244,7 +244,7 @@ internal async Task Bootstrap( new(PostgresMinimalDatabaseInfo.DefaultTypeCatalog) { TextEncoding = connector.TextEncoding, - TypeInfoResolver = AdoTypeInfoResolver.Instance + TypeInfoResolver = AdoTypeInfoResolverFactory.Instance.CreateResolver() }; NpgsqlDatabaseInfo databaseInfo; diff --git a/src/Npgsql/NpgsqlDataSourceBuilder.cs b/src/Npgsql/NpgsqlDataSourceBuilder.cs index d3b392156e..5b92677e4a 100644 --- a/src/Npgsql/NpgsqlDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlDataSourceBuilder.cs @@ -1,5 +1,4 @@ using System; -using System.Collections.Generic; using System.Diagnostics.CodeAnalysis; using System.Net.Security; using System.Security.Cryptography.X509Certificates; @@ -7,7 +6,7 @@ using System.Threading.Tasks; using Microsoft.Extensions.Logging; using Npgsql.Internal; -using Npgsql.Internal.Resolvers; +using Npgsql.Internal.ResolverFactories; using Npgsql.TypeMapping; namespace Npgsql; @@ -48,28 +47,23 @@ public INpgsqlNameTranslator DefaultNameTranslator public string ConnectionString => _internalBuilder.ConnectionString; internal static void ResetGlobalMappings(bool overwrite) - => GlobalTypeMapper.Instance.AddGlobalTypeMappingResolvers(new IPgTypeInfoResolver[] + => GlobalTypeMapper.Instance.AddGlobalTypeMappingResolvers(new PgTypeInfoResolverFactory[] { - overwrite ? new AdoTypeInfoResolver() : AdoTypeInfoResolver.Instance, - new ExtraConversionsResolver(), - new JsonTypeInfoResolver(), - new RangeTypeInfoResolver(), - new RecordTypeInfoResolver(), - new FullTextSearchTypeInfoResolver(), - new NetworkTypeInfoResolver(), - new GeometricTypeInfoResolver(), - new LTreeTypeInfoResolver(), - - // Arrays - new AdoArrayTypeInfoResolver(), - new ExtraConversionsArrayTypeInfoResolver(), - new JsonArrayTypeInfoResolver(), - new RangeArrayTypeInfoResolver(), - new RecordArrayTypeInfoResolver(), - new FullTextSearchArrayTypeInfoResolver(), - new NetworkArrayTypeInfoResolver(), - new GeometricArrayTypeInfoResolver(), - new LTreeArrayTypeInfoResolver() + overwrite ? new AdoTypeInfoResolverFactory() : AdoTypeInfoResolverFactory.Instance, + new ExtraConversionResolverFactory(), + new JsonTypeInfoResolverFactory(), + new RecordTypeInfoResolverFactory(), + new FullTextSearchTypeInfoResolverFactory(), + new NetworkTypeInfoResolverFactory(), + new GeometricTypeInfoResolverFactory(), + new LTreeTypeInfoResolverFactory(), + }, static () => + { + var builder = new PgTypeInfoResolverChainBuilder(); + builder.EnableRanges(); + builder.EnableMultiranges(); + builder.EnableArrays(); + return builder; }, overwrite); static NpgsqlDataSourceBuilder() @@ -81,41 +75,13 @@ static NpgsqlDataSourceBuilder() public NpgsqlDataSourceBuilder(string? connectionString = null) { _internalBuilder = new(new NpgsqlConnectionStringBuilder(connectionString)); - AddDefaultFeatures(); - - void AddDefaultFeatures() - { - _internalBuilder.EnableTransportSecurity(); - _internalBuilder.EnableIntegratedSecurity(); - AddTypeInfoResolver(UnsupportedTypeInfoResolver); - - // Reverse order arrays. - AddTypeInfoResolver(new LTreeArrayTypeInfoResolver()); - AddTypeInfoResolver(new GeometricArrayTypeInfoResolver()); - AddTypeInfoResolver(new NetworkArrayTypeInfoResolver()); - AddTypeInfoResolver(new FullTextSearchArrayTypeInfoResolver()); - AddTypeInfoResolver(new RecordArrayTypeInfoResolver()); - AddTypeInfoResolver(new RangeArrayTypeInfoResolver()); - AddTypeInfoResolver(new JsonArrayTypeInfoResolver()); - AddTypeInfoResolver(new ExtraConversionsArrayTypeInfoResolver()); - AddTypeInfoResolver(new AdoArrayTypeInfoResolver()); - - // Reverse order. - AddTypeInfoResolver(new LTreeTypeInfoResolver()); - AddTypeInfoResolver(new GeometricTypeInfoResolver()); - AddTypeInfoResolver(new NetworkTypeInfoResolver()); - AddTypeInfoResolver(new FullTextSearchTypeInfoResolver()); - AddTypeInfoResolver(new RecordTypeInfoResolver()); - AddTypeInfoResolver(new RangeTypeInfoResolver()); - AddTypeInfoResolver(new JsonTypeInfoResolver()); - AddTypeInfoResolver(new ExtraConversionsResolver()); - AddTypeInfoResolver(AdoTypeInfoResolver.Instance); - - var plugins = new List(GlobalTypeMapper.Instance.GetPluginResolvers()); - plugins.Reverse(); - foreach (var plugin in plugins) - AddTypeInfoResolver(plugin); - } + _internalBuilder.EnableTransportSecurity(); + _internalBuilder.EnableIntegratedSecurity(); + _internalBuilder.ConfigureResolverChain = chain => chain.Add(UnsupportedTypeInfoResolver); + _internalBuilder.EnableRanges(); + _internalBuilder.EnableMultiranges(); + _internalBuilder.EnableArrays(); + ResetResolverFactories(); } /// @@ -271,12 +237,24 @@ public NpgsqlDataSourceBuilder UsePeriodicPasswordProvider( #region Type mapping /// - public void AddTypeInfoResolver(IPgTypeInfoResolver resolver) - => _internalBuilder.AddTypeInfoResolver(resolver); + public void AddTypeInfoResolverFactory(PgTypeInfoResolverFactory factory) + => _internalBuilder.AddTypeInfoResolverFactory(factory); /// void INpgsqlTypeMapper.Reset() - => _internalBuilder.ResetTypeMappings(); + => ResetResolverFactories(); + + void ResetResolverFactories() + { + _internalBuilder.ResetResolverFactories(); + _internalBuilder.AppendResolverFactory(new ExtraConversionResolverFactory()); + _internalBuilder.AppendResolverFactory(new JsonTypeInfoResolverFactory()); + _internalBuilder.AppendResolverFactory(new RecordTypeInfoResolverFactory()); + _internalBuilder.AppendResolverFactory(new FullTextSearchTypeInfoResolverFactory()); + _internalBuilder.AppendResolverFactory(new NetworkTypeInfoResolverFactory()); + _internalBuilder.AppendResolverFactory(new GeometricTypeInfoResolverFactory()); + _internalBuilder.AppendResolverFactory(new LTreeTypeInfoResolverFactory()); + } /// public INpgsqlTypeMapper MapEnum<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields)] TEnum>(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) diff --git a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs index 9b0b650003..da5feedee9 100644 --- a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs @@ -7,7 +7,7 @@ using System.Threading.Tasks; using Microsoft.Extensions.Logging; using Npgsql.Internal; -using Npgsql.Internal.Resolvers; +using Npgsql.Internal.ResolverFactories; using Npgsql.Properties; using Npgsql.TypeMapping; @@ -36,7 +36,8 @@ public sealed class NpgsqlSlimDataSourceBuilder : INpgsqlTypeMapper Func>? _periodicPasswordProvider; TimeSpan _periodicPasswordSuccessRefreshInterval, _periodicPasswordFailureRefreshInterval; - readonly List _resolverChain = new(); + PgTypeInfoResolverChainBuilder _resolverChainBuilder = new(); // mutable struct, don't make readonly. + readonly UserTypeMapper _userTypeMapper; Action? _syncConnectionInitializer; @@ -53,10 +54,7 @@ public sealed class NpgsqlSlimDataSourceBuilder : INpgsqlTypeMapper public string ConnectionString => ConnectionStringBuilder.ToString(); static NpgsqlSlimDataSourceBuilder() - => GlobalTypeMapper.Instance.AddGlobalTypeMappingResolvers(new [] - { - AdoTypeInfoResolver.Instance - }); + => GlobalTypeMapper.Instance.AddGlobalTypeMappingResolvers(new PgTypeInfoResolverFactory[] { new AdoTypeInfoResolverFactory() }); /// /// A diagnostics name used by Npgsql when generating tracing, logging and metrics. @@ -68,23 +66,14 @@ static NpgsqlSlimDataSourceBuilder() /// . /// public NpgsqlSlimDataSourceBuilder(string? connectionString = null) - { - ConnectionStringBuilder = new NpgsqlConnectionStringBuilder(connectionString); - _userTypeMapper = new() { DefaultNameTranslator = GlobalTypeMapper.Instance.DefaultNameTranslator }; - // Reverse order - AddTypeInfoResolver(UnsupportedTypeInfoResolver); - AddTypeInfoResolver(new AdoTypeInfoResolver()); - // When used publicly we start off with our slim defaults. - var plugins = new List(GlobalTypeMapper.Instance.GetPluginResolvers()); - plugins.Reverse(); - foreach (var plugin in plugins) - AddTypeInfoResolver(plugin); - } + : this(new NpgsqlConnectionStringBuilder(connectionString)) + => ResetResolverFactories(); internal NpgsqlSlimDataSourceBuilder(NpgsqlConnectionStringBuilder connectionStringBuilder) { ConnectionStringBuilder = connectionStringBuilder; - _userTypeMapper = new(); + _userTypeMapper = new() { DefaultNameTranslator = GlobalTypeMapper.Instance.DefaultNameTranslator }; + ConfigureResolverChain = chain => chain.Add(UnsupportedTypeInfoResolver); } /// @@ -318,32 +307,26 @@ public bool UnmapComposite([DynamicallyAccessedMembers(DynamicallyAccessedMember Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) => _userTypeMapper.UnmapComposite(clrType, pgName, nameTranslator); - /// - /// Adds a type info resolver which can add or modify support for PostgreSQL types. - /// Typically used by plugins. - /// - /// The type resolver to be added. - public void AddTypeInfoResolver(IPgTypeInfoResolver resolver) - { - var type = resolver.GetType(); - - for (var i = 0; i < _resolverChain.Count; i++) - if (_resolverChain[i].GetType() == type) - { - _resolverChain.RemoveAt(i); - break; - } - _resolverChain.Insert(0, resolver); - } + /// + public void AddTypeInfoResolverFactory(PgTypeInfoResolverFactory factory) => _resolverChainBuilder.PrependResolverFactory(factory); void INpgsqlTypeMapper.Reset() - => ResetTypeMappings(); + => ResetResolverFactories(); + + internal Action> ConfigureResolverChain { get; set; } + internal void AppendResolverFactory(PgTypeInfoResolverFactory factory) => _resolverChainBuilder.AppendResolverFactory(factory); - internal void ResetTypeMappings() + internal void ResetResolverFactories() { - _resolverChain.Clear(); - _resolverChain.AddRange(GlobalTypeMapper.Instance.GetPluginResolvers()); + _resolverChainBuilder.Clear(); + // When used publicly we start off with our slim defaults. + _resolverChainBuilder.AppendResolverFactory(_userTypeMapper); + if (GlobalTypeMapper.Instance.GetUserMappingsResolverFactory() is { } userMappingsResolverFactory) + _resolverChainBuilder.AppendResolverFactory(userMappingsResolverFactory); + foreach (var factory in GlobalTypeMapper.Instance.GetPluginResolverFactories()) + _resolverChainBuilder.AppendResolverFactory(factory); + _resolverChainBuilder.AppendResolverFactory(new AdoTypeInfoResolverFactory()); } #endregion Type mapping @@ -356,14 +339,7 @@ internal void ResetTypeMappings() /// The same builder instance so that multiple calls can be chained. public NpgsqlSlimDataSourceBuilder EnableArrays() { - AddTypeInfoResolver(new LTreeArrayTypeInfoResolver()); - AddTypeInfoResolver(new GeometricArrayTypeInfoResolver()); - AddTypeInfoResolver(new NetworkArrayTypeInfoResolver()); - AddTypeInfoResolver(new FullTextSearchArrayTypeInfoResolver()); - AddTypeInfoResolver(new RecordArrayTypeInfoResolver()); - AddTypeInfoResolver(new RangeArrayTypeInfoResolver()); - AddTypeInfoResolver(new ExtraConversionsArrayTypeInfoResolver()); - AddTypeInfoResolver(new AdoArrayTypeInfoResolver()); + _resolverChainBuilder.EnableArrays(); return this; } @@ -373,7 +349,7 @@ public NpgsqlSlimDataSourceBuilder EnableArrays() /// The same builder instance so that multiple calls can be chained. public NpgsqlSlimDataSourceBuilder EnableRanges() { - AddTypeInfoResolver(new RangeTypeInfoResolver()); + _resolverChainBuilder.EnableRanges(); return this; } @@ -383,7 +359,7 @@ public NpgsqlSlimDataSourceBuilder EnableRanges() /// The same builder instance so that multiple calls can be chained. public NpgsqlSlimDataSourceBuilder EnableMultiranges() { - AddTypeInfoResolver(new RangeTypeInfoResolver()); + _resolverChainBuilder.EnableMultiranges(); return this; } @@ -393,7 +369,7 @@ public NpgsqlSlimDataSourceBuilder EnableMultiranges() /// The same builder instance so that multiple calls can be chained. public NpgsqlSlimDataSourceBuilder EnableRecords() { - AddTypeInfoResolver(new RecordTypeInfoResolver()); + AddTypeInfoResolverFactory(new RecordTypeInfoResolverFactory()); return this; } @@ -403,7 +379,7 @@ public NpgsqlSlimDataSourceBuilder EnableRecords() /// The same builder instance so that multiple calls can be chained. public NpgsqlSlimDataSourceBuilder EnableFullTextSearch() { - AddTypeInfoResolver(new FullTextSearchTypeInfoResolver()); + AddTypeInfoResolverFactory(new FullTextSearchTypeInfoResolverFactory()); return this; } @@ -413,7 +389,7 @@ public NpgsqlSlimDataSourceBuilder EnableFullTextSearch() /// The same builder instance so that multiple calls can be chained. public NpgsqlSlimDataSourceBuilder EnableLTree() { - AddTypeInfoResolver(new LTreeTypeInfoResolver()); + AddTypeInfoResolverFactory(new LTreeTypeInfoResolverFactory()); return this; } @@ -423,7 +399,7 @@ public NpgsqlSlimDataSourceBuilder EnableLTree() /// The same builder instance so that multiple calls can be chained. public NpgsqlSlimDataSourceBuilder EnableExtraConversions() { - AddTypeInfoResolver(new ExtraConversionsResolver()); + AddTypeInfoResolverFactory(new ExtraConversionResolverFactory()); return this; } @@ -546,27 +522,12 @@ _loggerFactory is null _periodicPasswordProvider, _periodicPasswordSuccessRefreshInterval, _periodicPasswordFailureRefreshInterval, - Resolvers(), + _resolverChainBuilder.Build(ConfigureResolverChain), HackyEnumMappings(), DefaultNameTranslator, _syncConnectionInitializer, _asyncConnectionInitializer); - IEnumerable Resolvers() - { - var resolvers = new List(); - - if (_userTypeMapper.Items.Count > 0) - resolvers.Add(_userTypeMapper.Build()); - - if (GlobalTypeMapper.Instance.GetUserMappingsResolver() is { } globalUserTypeMapper) - resolvers.Add(globalUserTypeMapper); - - resolvers.AddRange(_resolverChain); - - return resolvers; - } - List HackyEnumMappings() { var mappings = new List(); diff --git a/src/Npgsql/PublicAPI.Unshipped.txt b/src/Npgsql/PublicAPI.Unshipped.txt index bbf6d1e341..d294068b78 100644 --- a/src/Npgsql/PublicAPI.Unshipped.txt +++ b/src/Npgsql/PublicAPI.Unshipped.txt @@ -10,7 +10,7 @@ Npgsql.NpgsqlConnectionStringBuilder.ChannelBinding.get -> Npgsql.ChannelBinding Npgsql.NpgsqlConnectionStringBuilder.ChannelBinding.set -> void Npgsql.NpgsqlBinaryImporter.WriteRow(params object?[]! values) -> void Npgsql.NpgsqlBinaryImporter.WriteRowAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken), params object?[]! values) -> System.Threading.Tasks.Task! -Npgsql.NpgsqlDataSourceBuilder.AddTypeInfoResolver(Npgsql.Internal.IPgTypeInfoResolver! resolver) -> void +Npgsql.NpgsqlDataSourceBuilder.AddTypeInfoResolverFactory(Npgsql.Internal.PgTypeInfoResolverFactory! factory) -> void Npgsql.NpgsqlDataSourceBuilder.MapEnum(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! Npgsql.NpgsqlDataSourceBuilder.Name.get -> string? Npgsql.NpgsqlDataSourceBuilder.Name.set -> void @@ -18,7 +18,7 @@ Npgsql.NpgsqlDataSourceBuilder.UnmapEnum(System.Type! clrType, string? pgName = Npgsql.NpgsqlDataSourceBuilder.UseRootCertificate(System.Security.Cryptography.X509Certificates.X509Certificate2? rootCertificate) -> Npgsql.NpgsqlDataSourceBuilder! Npgsql.NpgsqlDataSourceBuilder.UseRootCertificateCallback(System.Func? rootCertificateCallback) -> Npgsql.NpgsqlDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder -Npgsql.NpgsqlSlimDataSourceBuilder.AddTypeInfoResolver(Npgsql.Internal.IPgTypeInfoResolver! resolver) -> void +Npgsql.NpgsqlSlimDataSourceBuilder.AddTypeInfoResolverFactory(Npgsql.Internal.PgTypeInfoResolverFactory! factory) -> void Npgsql.NpgsqlSlimDataSourceBuilder.Build() -> Npgsql.NpgsqlDataSource! Npgsql.NpgsqlSlimDataSourceBuilder.BuildMultiHost() -> Npgsql.NpgsqlMultiHostDataSource! Npgsql.NpgsqlSlimDataSourceBuilder.ConnectionString.get -> string! @@ -61,7 +61,7 @@ Npgsql.Replication.PhysicalReplicationConnection.StartReplication(Npgsql.Replica Npgsql.Replication.PhysicalReplicationConnection.StartReplication(NpgsqlTypes.NpgsqlLogSequenceNumber walLocation, System.Threading.CancellationToken cancellationToken, uint timeline = 0) -> System.Collections.Generic.IAsyncEnumerable! Npgsql.Replication.PhysicalReplicationSlot.PhysicalReplicationSlot(string! slotName, NpgsqlTypes.NpgsqlLogSequenceNumber? restartLsn = null, uint? restartTimeline = null) -> void Npgsql.Replication.PhysicalReplicationSlot.RestartTimeline.get -> uint? -Npgsql.TypeMapping.INpgsqlTypeMapper.AddTypeInfoResolver(Npgsql.Internal.IPgTypeInfoResolver! resolver) -> void +Npgsql.TypeMapping.INpgsqlTypeMapper.AddTypeInfoResolverFactory(Npgsql.Internal.PgTypeInfoResolverFactory! factory) -> void Npgsql.TypeMapping.INpgsqlTypeMapper.MapEnum(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! Npgsql.TypeMapping.INpgsqlTypeMapper.UnmapEnum(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> bool Npgsql.TypeMapping.UserTypeMapping diff --git a/src/Npgsql/TypeMapping/GlobalTypeMapper.cs b/src/Npgsql/TypeMapping/GlobalTypeMapper.cs index cad4b469e4..4725265735 100644 --- a/src/Npgsql/TypeMapping/GlobalTypeMapper.cs +++ b/src/Npgsql/TypeMapping/GlobalTypeMapper.cs @@ -12,19 +12,19 @@ namespace Npgsql.TypeMapping; sealed class GlobalTypeMapper : INpgsqlTypeMapper { readonly UserTypeMapper _userTypeMapper = new(); - readonly List _pluginResolvers = new(); + readonly List _pluginResolverFactories = new(); readonly ReaderWriterLockSlim _lock = new(); - IPgTypeInfoResolver[] _typeMappingResolvers = Array.Empty(); + PgTypeInfoResolverFactory[] _typeMappingResolvers = Array.Empty(); internal List HackyEnumTypeMappings { get; } = new(); - internal IEnumerable GetPluginResolvers() + internal IEnumerable GetPluginResolverFactories() { - var resolvers = new List(); + var resolvers = new List(); _lock.EnterReadLock(); try { - resolvers.AddRange(_pluginResolvers); + resolvers.AddRange(_pluginResolverFactories); } finally { @@ -34,12 +34,12 @@ internal IEnumerable GetPluginResolvers() return resolvers; } - internal IPgTypeInfoResolver? GetUserMappingsResolver() + internal PgTypeInfoResolverFactory? GetUserMappingsResolverFactory() { _lock.EnterReadLock(); try { - return _userTypeMapper.Items.Count > 0 ? _userTypeMapper.Build() : null; + return _userTypeMapper.Items.Count > 0 ? _userTypeMapper : null; } finally { @@ -47,12 +47,13 @@ internal IEnumerable GetPluginResolvers() } } - internal void AddGlobalTypeMappingResolvers(IPgTypeInfoResolver[] resolvers, bool overwrite = false) + internal void AddGlobalTypeMappingResolvers(PgTypeInfoResolverFactory[] factories, Func? builderFactory = null, bool overwrite = false) { // Good enough logic to prevent SlimBuilder overriding the normal Builder. - if (overwrite || resolvers.Length > _typeMappingResolvers.Length) + if (overwrite || factories.Length > _typeMappingResolvers.Length) { - _typeMappingResolvers = resolvers; + _builderFactory = builderFactory; + _typeMappingResolvers = factories; ResetTypeMappingCache(); } } @@ -60,6 +61,7 @@ internal void AddGlobalTypeMappingResolvers(IPgTypeInfoResolver[] resolvers, boo void ResetTypeMappingCache() => _typeMappingOptions = null; PgSerializerOptions? _typeMappingOptions; + Func? _builderFactory; PgSerializerOptions TypeMappingOptions { @@ -71,17 +73,19 @@ PgSerializerOptions TypeMappingOptions _lock.EnterReadLock(); try { - var resolvers = new List(); - resolvers.Add(_userTypeMapper.Build()); - resolvers.AddRange(_pluginResolvers); - resolvers.AddRange(_typeMappingResolvers); + var builder = _builderFactory?.Invoke() ?? new(); + builder.AppendResolverFactory(_userTypeMapper); + foreach (var factory in _pluginResolverFactories) + builder.AppendResolverFactory(factory); + foreach (var factory in _typeMappingResolvers) + builder.AppendResolverFactory(factory); return _typeMappingOptions = new(PostgresMinimalDatabaseInfo.DefaultTypeCatalog) { // This means we don't ever have a missing oid for a datatypename as our canonical format is datatypenames. PortableTypeIds = true, // Don't throw if our catalog doesn't know the datatypename. IntrospectionMode = true, - TypeInfoResolver = new TypeInfoResolverChain(resolvers) + TypeInfoResolver = new TypeInfoResolverChain(builder.Build()) }; } finally @@ -114,32 +118,28 @@ PgSerializerOptions TypeMappingOptions static GlobalTypeMapper() => Instance = new GlobalTypeMapper(); - /// - /// Adds a type info resolver which can add or modify support for PostgreSQL types. - /// Typically used by plugins. - /// - /// The type resolver to be added. - public void AddTypeInfoResolver(IPgTypeInfoResolver resolver) + /// + public void AddTypeInfoResolverFactory(PgTypeInfoResolverFactory factory) { _lock.EnterWriteLock(); try { - var type = resolver.GetType(); + var type = factory.GetType(); // Since EFCore.PG plugins (and possibly other users) repeatedly call NpgsqlConnection.GlobalTypeMapper.UseNodaTime, // we replace an existing resolver of the same CLR type. - if (_pluginResolvers.Count > 0 && _pluginResolvers[0].GetType() == type) - _pluginResolvers[0] = resolver; - for (var i = 0; i < _pluginResolvers.Count; i++) + if (_pluginResolverFactories.Count > 0 && _pluginResolverFactories[0].GetType() == type) + _pluginResolverFactories[0] = factory; + for (var i = 0; i < _pluginResolverFactories.Count; i++) { - if (_pluginResolvers[i].GetType() == type) + if (_pluginResolverFactories[i].GetType() == type) { - _pluginResolvers.RemoveAt(i); + _pluginResolverFactories.RemoveAt(i); break; } } - _pluginResolvers.Insert(0, resolver); + _pluginResolverFactories.Insert(0, factory); ResetTypeMappingCache(); } finally @@ -154,7 +154,7 @@ public void Reset() _lock.EnterWriteLock(); try { - _pluginResolvers.Clear(); + _pluginResolverFactories.Clear(); _userTypeMapper.Items.Clear(); HackyEnumTypeMappings.Clear(); } diff --git a/src/Npgsql/TypeMapping/INpgsqlTypeMapper.cs b/src/Npgsql/TypeMapping/INpgsqlTypeMapper.cs index 8e20a5f03e..678b61fd15 100644 --- a/src/Npgsql/TypeMapping/INpgsqlTypeMapper.cs +++ b/src/Npgsql/TypeMapping/INpgsqlTypeMapper.cs @@ -1,7 +1,5 @@ using System; using System.Diagnostics.CodeAnalysis; -using System.Text.Json; -using System.Text.Json.Nodes; using Npgsql.Internal; using Npgsql.NameTranslation; using NpgsqlTypes; @@ -192,11 +190,11 @@ bool UnmapComposite( INpgsqlNameTranslator? nameTranslator = null); /// - /// Adds a type info resolver which can add or modify support for PostgreSQL types. + /// Adds a type info resolver factory which can add or modify support for PostgreSQL types. /// Typically used by plugins. /// - /// The type resolver to be added. - void AddTypeInfoResolver(IPgTypeInfoResolver resolver); + /// The type resolver factory to be added. + void AddTypeInfoResolverFactory(PgTypeInfoResolverFactory factory); /// /// Resets all mapping changes performed on this type mapper and reverts it to its original, starting state. diff --git a/src/Npgsql/TypeMapping/INpgsqlTypeMapperExtensions.cs b/src/Npgsql/TypeMapping/INpgsqlTypeMapperExtensions.cs index c5f574960f..0efacd7687 100644 --- a/src/Npgsql/TypeMapping/INpgsqlTypeMapperExtensions.cs +++ b/src/Npgsql/TypeMapping/INpgsqlTypeMapperExtensions.cs @@ -2,7 +2,7 @@ using System.Diagnostics.CodeAnalysis; using System.Text.Json; using System.Text.Json.Nodes; -using Npgsql.Internal.Resolvers; +using Npgsql.Internal.ResolverFactories; using Npgsql.TypeMapping; using NpgsqlTypes; @@ -38,8 +38,7 @@ public static T EnableDynamicJson( Type[]? jsonClrTypes = null) where T : INpgsqlTypeMapper { - mapper.AddTypeInfoResolver(new JsonDynamicTypeInfoResolver(jsonbClrTypes, jsonClrTypes, serializerOptions)); - mapper.AddTypeInfoResolver(new JsonDynamicArrayTypeInfoResolver(jsonbClrTypes, jsonClrTypes, serializerOptions)); + mapper.AddTypeInfoResolverFactory(new JsonDynamicTypeInfoResolverFactory(jsonbClrTypes, jsonClrTypes, serializerOptions)); return mapper; } @@ -51,8 +50,7 @@ public static T EnableDynamicJson( [RequiresDynamicCode("The mapping of PostgreSQL records as .NET tuples requires dynamic code usage which is incompatible with NativeAOT.")] public static T EnableRecordsAsTuples(this T mapper) where T : INpgsqlTypeMapper { - mapper.AddTypeInfoResolver(new TupledRecordTypeInfoResolver()); - mapper.AddTypeInfoResolver(new TupledRecordArrayTypeInfoResolver()); + mapper.AddTypeInfoResolverFactory(new TupledRecordTypeInfoResolverFactory()); return mapper; } @@ -64,14 +62,7 @@ public static T EnableRecordsAsTuples(this T mapper) where T : INpgsqlTypeMap [RequiresDynamicCode("The use of unmapped enums, ranges or multiranges requires dynamic code usage which is incompatible with NativeAOT.")] public static T EnableUnmappedTypes(this T mapper) where T : INpgsqlTypeMapper { - mapper.AddTypeInfoResolver(new UnmappedEnumTypeInfoResolver()); - mapper.AddTypeInfoResolver(new UnmappedRangeTypeInfoResolver()); - mapper.AddTypeInfoResolver(new UnmappedMultirangeTypeInfoResolver()); - - mapper.AddTypeInfoResolver(new UnmappedEnumArrayTypeInfoResolver()); - mapper.AddTypeInfoResolver(new UnmappedRangeArrayTypeInfoResolver()); - mapper.AddTypeInfoResolver(new UnmappedMultirangeArrayTypeInfoResolver()); - + mapper.AddTypeInfoResolverFactory(new UnmappedTypeInfoResolverFactory()); return mapper; } } diff --git a/src/Npgsql/TypeMapping/UserTypeMapper.cs b/src/Npgsql/TypeMapping/UserTypeMapper.cs index e3c03040fe..627ecbd37b 100644 --- a/src/Npgsql/TypeMapping/UserTypeMapper.cs +++ b/src/Npgsql/TypeMapping/UserTypeMapper.cs @@ -29,10 +29,11 @@ public abstract class UserTypeMapping internal UserTypeMapping(string pgTypeName, Type type) => (PgTypeName, ClrType) = (pgTypeName, type); - internal abstract void Build(TypeInfoMappingCollection mappings); + internal abstract void AddMapping(TypeInfoMappingCollection mappings); + internal abstract void AddArrayMapping(TypeInfoMappingCollection mappings); } -sealed class UserTypeMapper +sealed class UserTypeMapper : PgTypeInfoResolverFactory { readonly List _mappings; public IList Items => _mappings; @@ -144,21 +145,46 @@ static string GetPgName(Type type, INpgsqlNameTranslator nameTranslator) => type.GetCustomAttribute()?.PgName ?? nameTranslator.TranslateTypeName(type.Name); - public IPgTypeInfoResolver Build() + public override IPgTypeInfoResolver CreateResolver() => new Resolver(new(_mappings)); + public override IPgTypeInfoResolver CreateArrayResolver() => new ArrayResolver(new(_mappings)); + + class Resolver : IPgTypeInfoResolver { - var infoMappings = new TypeInfoMappingCollection(); - foreach (var mapping in _mappings) - mapping.Build(infoMappings); + protected readonly List _userTypeMappings; + TypeInfoMappingCollection? _mappings; + protected TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new()); + + public Resolver(List userTypeMappings) => _userTypeMappings = userTypeMappings; - return new UserMappingResolver(infoMappings); + PgTypeInfo? IPgTypeInfoResolver.GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); + + TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) + { + foreach (var userTypeMapping in _userTypeMappings) + userTypeMapping.AddMapping(mappings); + + return mappings; + } } - sealed class UserMappingResolver : IPgTypeInfoResolver + sealed class ArrayResolver : Resolver, IPgTypeInfoResolver { - readonly TypeInfoMappingCollection _mappings; - public UserMappingResolver(TypeInfoMappingCollection mappings) => _mappings = mappings; + TypeInfoMappingCollection? _mappings; + new TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(base.Mappings)); + + public ArrayResolver(List userTypeMappings) : base(userTypeMappings) { } + PgTypeInfo? IPgTypeInfoResolver.GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) - => _mappings.Find(type, dataTypeName, options); + => Mappings.Find(type, dataTypeName, options); + + TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) + { + foreach (var userTypeMapping in _userTypeMappings) + userTypeMapping.AddArrayMapping(mappings); + + return mappings; + } } [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types, requiring require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] @@ -170,7 +196,7 @@ public CompositeMapping(string pgTypeName, INpgsqlNameTranslator nameTranslator) : base(pgTypeName, typeof(T)) => _nameTranslator = nameTranslator; - internal override void Build(TypeInfoMappingCollection mappings) + internal override void AddMapping(TypeInfoMappingCollection mappings) { mappings.AddType(PgTypeName, (options, mapping, _) => { @@ -181,9 +207,9 @@ internal override void Build(TypeInfoMappingCollection mappings) return mapping.CreateInfo(options, new CompositeConverter( ReflectionCompositeInfoFactory.CreateCompositeInfo(compositeType, _nameTranslator, options))); }, isDefault: true); - // TODO this should be split out so we can enjoy EnableArray trimming. - mappings.AddArrayType(PgTypeName); } + + internal override void AddArrayMapping(TypeInfoMappingCollection mappings) => mappings.AddArrayType(PgTypeName); } [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types, requiring require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] @@ -195,7 +221,7 @@ public StructCompositeMapping(string pgTypeName, INpgsqlNameTranslator nameTrans : base(pgTypeName, typeof(T)) => _nameTranslator = nameTranslator; - internal override void Build(TypeInfoMappingCollection mappings) + internal override void AddMapping(TypeInfoMappingCollection mappings) { mappings.AddStructType(PgTypeName, (options, mapping, dataTypeNameMatch) => { @@ -206,9 +232,9 @@ internal override void Build(TypeInfoMappingCollection mappings) return mapping.CreateInfo(options, new CompositeConverter( ReflectionCompositeInfoFactory.CreateCompositeInfo(compositeType, _nameTranslator, options))); }, isDefault: true); - // TODO this should be split out so we can enjoy EnableArray trimming. - mappings.AddStructArrayType(PgTypeName); } + + internal override void AddArrayMapping(TypeInfoMappingCollection mappings) => mappings.AddStructArrayType(PgTypeName); } internal abstract class EnumMapping : UserTypeMapping @@ -242,14 +268,11 @@ public EnumMapping(string pgTypeName, INpgsqlNameTranslator nameTranslator) } } - internal override void Build(TypeInfoMappingCollection mappings) - { - mappings.AddStructType(PgTypeName, (options, mapping, _) => + internal override void AddMapping(TypeInfoMappingCollection mappings) + => mappings.AddStructType(PgTypeName, (options, mapping, _) => mapping.CreateInfo(options, new EnumConverter(_enumToLabel, _labelToEnum, options.TextEncoding), preferredFormat: DataFormat.Text), isDefault: true); - // TODO this should be split out so we can enjoy EnableArray trimming. - mappings.AddStructArrayType(PgTypeName); - } + internal override void AddArrayMapping(TypeInfoMappingCollection mappings) => mappings.AddStructArrayType(PgTypeName); } } diff --git a/test/Npgsql.Tests/GlobalTypeMapperTests.cs b/test/Npgsql.Tests/GlobalTypeMapperTests.cs index 29ab55b4c1..a5c75e41bf 100644 --- a/test/Npgsql.Tests/GlobalTypeMapperTests.cs +++ b/test/Npgsql.Tests/GlobalTypeMapperTests.cs @@ -103,7 +103,7 @@ public async Task Reset() public void Reset_and_add_resolver() { NpgsqlConnection.GlobalTypeMapper.Reset(); - NpgsqlConnection.GlobalTypeMapper.AddTypeInfoResolver(new DummyResolver()); + NpgsqlConnection.GlobalTypeMapper.AddTypeInfoResolverFactory(new DummyResolverFactory()); } [TearDown] @@ -112,8 +112,14 @@ public void Teardown() enum Mood { Sad, Ok, Happy } - class DummyResolver : IPgTypeInfoResolver + class DummyResolverFactory : PgTypeInfoResolverFactory { - public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) => null; + public override IPgTypeInfoResolver CreateResolver() => new DummyResolver(); + public override IPgTypeInfoResolver? CreateArrayResolver() => null; + + class DummyResolver : IPgTypeInfoResolver + { + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) => null; + } } } diff --git a/test/Npgsql.Tests/ReaderTests.cs b/test/Npgsql.Tests/ReaderTests.cs index 2f59075a10..0ef71bb1a8 100644 --- a/test/Npgsql.Tests/ReaderTests.cs +++ b/test/Npgsql.Tests/ReaderTests.cs @@ -1746,7 +1746,7 @@ public async Task SafeReadException() { var dataSourceBuilder = CreateDataSourceBuilder(); // Temporarily reroute integer to go to a type handler which generates SafeReadExceptions - dataSourceBuilder.AddTypeInfoResolver(new ExplodingTypeHandlerResolver(safe: true)); + dataSourceBuilder.AddTypeInfoResolverFactory(new ExplodingTypeHandlerResolverFactory(safe: true)); await using var dataSource = dataSourceBuilder.Build(); await using var connection = await dataSource.OpenConnectionAsync(); @@ -1763,7 +1763,7 @@ public async Task Non_SafeReadException() { var dataSourceBuilder = CreateDataSourceBuilder(); // Temporarily reroute integer to go to a type handler which generates some exception - dataSourceBuilder.AddTypeInfoResolver(new ExplodingTypeHandlerResolver(safe: false)); + dataSourceBuilder.AddTypeInfoResolverFactory(new ExplodingTypeHandlerResolverFactory(safe: false)); await using var dataSource = dataSourceBuilder.Build(); await using var connection = await dataSource.OpenConnectionAsync(); @@ -2249,17 +2249,26 @@ public ReaderTests(MultiplexingMode multiplexingMode, CommandBehavior behavior) #region Mock Type Handlers -class ExplodingTypeHandlerResolver : IPgTypeInfoResolver +sealed class ExplodingTypeHandlerResolverFactory : PgTypeInfoResolverFactory { readonly bool _safe; - public ExplodingTypeHandlerResolver(bool safe) => _safe = safe; + public ExplodingTypeHandlerResolverFactory(bool safe) => _safe = safe; - public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + public override IPgTypeInfoResolver CreateResolver() => new Resolver(_safe); + public override IPgTypeInfoResolver? CreateArrayResolver() => null; + + sealed class Resolver : IPgTypeInfoResolver { - if (dataTypeName == DataTypeNames.Int4 && (type == typeof(int) || type is null)) - return new(options, new ExplodingTypeHandler(_safe), DataTypeNames.Int4); + readonly bool _safe; + public Resolver(bool safe) => _safe = safe; + + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + { + if (dataTypeName == DataTypeNames.Int4 && (type == typeof(int) || type is null)) + return new(options, new ExplodingTypeHandler(_safe), DataTypeNames.Int4); - return null; + return null; + } } } diff --git a/test/Npgsql.Tests/TypeMapperTests.cs b/test/Npgsql.Tests/TypeMapperTests.cs index 92db0bdea1..d0d1e36587 100644 --- a/test/Npgsql.Tests/TypeMapperTests.cs +++ b/test/Npgsql.Tests/TypeMapperTests.cs @@ -49,7 +49,7 @@ public async Task String_to_citext() await EnsureExtensionAsync(adminConnection, "citext"); var dataSourceBuilder = CreateDataSourceBuilder(); - dataSourceBuilder.AddTypeInfoResolver(new CitextToStringTypeHandlerResolverFactory()); + dataSourceBuilder.AddTypeInfoResolverFactory(new CitextToStringTypeHandlerResolverFactory()); await using var dataSource = dataSourceBuilder.Build(); await using var connection = await dataSource.OpenConnectionAsync(); @@ -90,16 +90,23 @@ await conn.ExecuteNonQueryAsync(@$" #region Support - class CitextToStringTypeHandlerResolverFactory : IPgTypeInfoResolver + class CitextToStringTypeHandlerResolverFactory : PgTypeInfoResolverFactory { - public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) - { - if (type == typeof(string) || dataTypeName?.UnqualifiedName == "citext") - if (options.DatabaseInfo.TryGetPostgresTypeByName("citext", out var pgType)) - return new(options, new StringTextConverter(options.TextEncoding), options.ToCanonicalTypeId(pgType)); + public override IPgTypeInfoResolver CreateResolver() => new Resolver(); + public override IPgTypeInfoResolver? CreateArrayResolver() => null; - return null; + sealed class Resolver : IPgTypeInfoResolver + { + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + { + if (type == typeof(string) || dataTypeName?.UnqualifiedName == "citext") + if (options.DatabaseInfo.TryGetPostgresTypeByName("citext", out var pgType)) + return new(options, new StringTextConverter(options.TextEncoding), options.ToCanonicalTypeId(pgType)); + + return null; + } } + } enum Mood { Sad, Ok, Happy } diff --git a/test/Npgsql.Tests/Types/LegacyDateTimeTests.cs b/test/Npgsql.Tests/Types/LegacyDateTimeTests.cs index 0aa88a456d..5016525268 100644 --- a/test/Npgsql.Tests/Types/LegacyDateTimeTests.cs +++ b/test/Npgsql.Tests/Types/LegacyDateTimeTests.cs @@ -1,7 +1,7 @@ using System; using System.Data; using System.Threading.Tasks; -using Npgsql.Internal.Resolvers; +using Npgsql.Internal.ResolverFactories; using NpgsqlTypes; using NUnit.Framework; using static Npgsql.Util.Statics; @@ -60,8 +60,7 @@ public void Setup() _dataSource = CreateDataSource(builder => { // Can't use the static AdoTypeInfoResolver instance, it already captured the feature flag. - builder.AddTypeInfoResolver(new AdoTypeInfoResolver()); - builder.AddTypeInfoResolver(new AdoArrayTypeInfoResolver()); + builder.AddTypeInfoResolverFactory(new AdoTypeInfoResolverFactory()); builder.ConnectionStringBuilder.Timezone = "Europe/Berlin"; }); NpgsqlDataSourceBuilder.ResetGlobalMappings(overwrite: true); From f262fd1601b08c51e64d35effd3ec6db69dd45dd Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Tue, 14 Nov 2023 21:30:15 +0300 Subject: [PATCH 297/761] Optimize NpgsqlConnectionStringBuilder AOT size (#4943) --- .../NpgsqlConnectionStringBuilder.snbtxt | 132 +++++++----------- ...lConnectionStringBuilderSourceGenerator.cs | 33 ++++- src/Npgsql/NpgsqlConnectionStringBuilder.cs | 56 +++++--- 3 files changed, 113 insertions(+), 108 deletions(-) diff --git a/src/Npgsql.SourceGenerators/NpgsqlConnectionStringBuilder.snbtxt b/src/Npgsql.SourceGenerators/NpgsqlConnectionStringBuilder.snbtxt index fae3d00e6c..9ad343124c 100644 --- a/src/Npgsql.SourceGenerators/NpgsqlConnectionStringBuilder.snbtxt +++ b/src/Npgsql.SourceGenerators/NpgsqlConnectionStringBuilder.snbtxt @@ -4,7 +4,7 @@ using System.Collections.Generic; #nullable disable #pragma warning disable CS1591 // Missing XML comment for publicly visible type or member #pragma warning disable RS0016 // Add public types and members to the declared API -#pragma warning disable 618 // Member is obsolete +#pragma warning disable CS0618 // Member is obsolete namespace Npgsql { @@ -13,18 +13,18 @@ namespace Npgsql private partial int Init() { // Set the strongly-typed properties to their default values - {{ + {{~ for p in properties if p.is_obsolete continue end if (p.default_value != null) - }} + ~}} {{ p.name }} = {{ p.default_value }}; - {{ + {{~ end - end }} + end ~}} // Setting the strongly-typed properties here also set the string-based properties in the base class. // Clear them (default settings = empty connection string) @@ -33,91 +33,59 @@ namespace Npgsql return 0; } - private partial int GeneratedSetter(string keyword, object value) + private partial bool GeneratedActions(GeneratedAction action, string keyword, ref object value) { switch (keyword) { - {{ for kv in properties_by_keyword }} + {{~ for kv in properties_by_keyword ~}} case "{{ kv.key }}": - {{ p = kv.value }} - {{ if p.is_enum }} + {{~ for alternative in kv.value.alternatives ~}} + case "{{ alternative }}": + {{~ end ~}} { - {{ p.name }} = value is string s - ? ({{ p.type_name }})Enum.Parse(typeof({{ p.type_name }}), s, ignoreCase: true) - : ({{ p.type_name }})Convert.ChangeType(value, typeof({{ p.type_name }})); - } - {{ else }} - {{ p.name }} = ({{ p.type_name }})Convert.ChangeType(value, typeof({{ p.type_name }})); - {{ end }} - break; - {{ end }} - - default: - throw new KeyNotFoundException(); - } - - return 0; - } - - private partial bool TryGetValueGenerated(string keyword, out object value) - { - switch (keyword) - { - {{ for kv in properties_by_keyword }} - case "{{ kv.key }}": - {{ p = kv.value }} - value = (object){{ p.name }} ?? ""; + {{~ p = kv.value ~}} + const string canonicalName = "{{ p.canonical_name }}"; + switch(action) + { + case GeneratedAction.Remove: + var removed = base.ContainsKey(canonicalName); + {{~ if p.default_value == null ~}} + {{ p.name }} = default; + {{~ else ~}} + {{ p.name }} = {{ p.default_value }}; + {{~ end ~}} + {{~ if p.type_name != "String" ~}} + base.Remove(canonicalName); + {{~ else ~}} + // String property setters call SetValue, which itself calls base.Remove(). + {{~ end ~}} + return removed; + case GeneratedAction.Set: + {{~ if p.is_enum ~}} + {{ p.name }} = ({{ p.type_name }})GetValue(typeof({{ p.type_name }}), value); + {{~ else ~}} + {{ p.name }} = ({{ p.type_name }})Convert.ChangeType(value, typeof({{ p.type_name }})); + {{~ end ~}} + break; + case GeneratedAction.Get: + value = (object){{ p.name }} ?? ""; + break; + case GeneratedAction.GetCanonical: + value = canonicalName; + break; + } return true; - {{ end }} } - - value = null; - return false; - } - - private partial bool ContainsKeyGenerated(string keyword) - => keyword switch - { - {{ for kv in properties_by_keyword }} - "{{ kv.key }}" => true, - {{ end }} - - _ => false - }; - - private partial bool RemoveGenerated(string keyword) - { - switch (keyword) - { - {{ for kv in properties_by_keyword }} - case "{{ kv.key }}": - { - {{ p = kv.value }} - var removed = base.ContainsKey("{{ p.canonical_name }}"); - // Note that string property setters call SetValue, which itself calls base.Remove(). - {{ if p.default_value == null }} - {{ p.name }} = default; - {{ else }} - {{ p.name }} = {{ p.default_value }}; - {{ end }} - base.Remove("{{ p.canonical_name }}"); - return removed; - } - {{ end }} - - default: - throw new KeyNotFoundException(); + {{~ end ~}} } + if (action is GeneratedAction.Get or GeneratedAction.GetCanonical) + return false; + throw new KeyNotFoundException(); + + static object GetValue(Type type, object value) + => value is string s + ? Enum.Parse(type, s, ignoreCase: true) + : Convert.ChangeType(value, type); } - - private partial string ToCanonicalKeyword(string keyword) - => keyword switch - { - {{ for kv in properties_by_keyword }} - "{{ kv.key }}" => "{{ kv.value.canonical_name }}", - {{ end }} - - _ => throw new KeyNotFoundException() - }; } } diff --git a/src/Npgsql.SourceGenerators/NpgsqlConnectionStringBuilderSourceGenerator.cs b/src/Npgsql.SourceGenerators/NpgsqlConnectionStringBuilderSourceGenerator.cs index a25495a40a..665789e74e 100644 --- a/src/Npgsql.SourceGenerators/NpgsqlConnectionStringBuilderSourceGenerator.cs +++ b/src/Npgsql.SourceGenerators/NpgsqlConnectionStringBuilderSourceGenerator.cs @@ -1,3 +1,4 @@ +using System; using System.Collections.Generic; using System.Linq; using System.Text; @@ -89,11 +90,24 @@ public void Execute(GeneratorExecutionContext context) propertiesByKeyword[displayName.ToUpperInvariant()] = propertyDetails; if (property.Name != displayName) - propertiesByKeyword[property.Name.ToUpperInvariant()] = propertyDetails; + { + var propertyName = property.Name.ToUpperInvariant(); + if (!propertiesByKeyword.ContainsKey(propertyName)) + propertyDetails.Alternatives.Add(propertyName); + } + if (propertyAttribute.ConstructorArguments.Length == 1) + { foreach (var synonymArg in propertyAttribute.ConstructorArguments[0].Values) + { if (synonymArg.Value is string synonym) - propertiesByKeyword[synonym.ToUpperInvariant()] = propertyDetails; + { + var synonymName = synonym.ToUpperInvariant(); + if (!propertiesByKeyword.ContainsKey(synonymName)) + propertyDetails.Alternatives.Add(synonymName); + } + } + } } var template = Template.Parse(EmbeddedResource.GetContent("NpgsqlConnectionStringBuilder.snbtxt"), "NpgsqlConnectionStringBuilder.snbtxt"); @@ -115,5 +129,18 @@ sealed class PropertyDetails public bool IsEnum { get; set; } public bool IsObsolete { get; set; } public object? DefaultValue { get; set; } + + public HashSet Alternatives { get; } = new(StringComparer.Ordinal); + + public PropertyDetails Clone() + => new() + { + Name = Name, + CanonicalName = CanonicalName, + TypeName = TypeName, + IsEnum = IsEnum, + IsObsolete = IsObsolete, + DefaultValue = DefaultValue + }; } -} \ No newline at end of file +} diff --git a/src/Npgsql/NpgsqlConnectionStringBuilder.cs b/src/Npgsql/NpgsqlConnectionStringBuilder.cs index 86f0edaa28..b4b038ed24 100644 --- a/src/Npgsql/NpgsqlConnectionStringBuilder.cs +++ b/src/Npgsql/NpgsqlConnectionStringBuilder.cs @@ -63,6 +63,19 @@ public NpgsqlConnectionStringBuilder(string? connectionString) // Method fake-returns an int only to make sure it's code-generated private partial int Init(); + /// + /// GeneratedAction and GeneratedActions exist to be able to produce a streamlined binary footprint for NativeAOT. + /// An idiomatic approach where each action has its own method would double the binary size of NpgsqlConnectionStringBuilder. + /// + enum GeneratedAction + { + Set, + Get, + Remove, + GetCanonical + } + private partial bool GeneratedActions(GeneratedAction action, string keyword, ref object? value); + #endregion #region Non-static property handling @@ -91,7 +104,8 @@ public override object this[string keyword] try { - GeneratedSetter(keyword.ToUpperInvariant(), value); + var val = value; + GeneratedActions(GeneratedAction.Set, keyword.ToUpperInvariant(), ref val); } catch (Exception e) { @@ -100,9 +114,6 @@ public override object this[string keyword] } } - // Method fake-returns an int only to make sure it's code-generated - private partial int GeneratedSetter(string keyword, object? value); - object? IDictionary.this[string keyword] { get => this[keyword]; @@ -125,9 +136,10 @@ public void Add(KeyValuePair item) /// The key of the key/value pair to be removed from the connection string in this DbConnectionStringBuilder. /// true if the key existed within the connection string and was removed; false if the key did not exist. public override bool Remove(string keyword) - => RemoveGenerated(keyword.ToUpperInvariant()); - - private partial bool RemoveGenerated(string keyword); + { + object? value = null; + return GeneratedActions(GeneratedAction.Remove, keyword.ToUpperInvariant(), ref value); + } /// /// Removes the entry from the DbConnectionStringBuilder instance. @@ -153,11 +165,10 @@ public override void Clear() /// The key to locate in the . /// true if the contains an entry with the specified key; otherwise false. public override bool ContainsKey(string keyword) - => keyword is null - ? throw new ArgumentNullException(nameof(keyword)) - : ContainsKeyGenerated(keyword.ToUpperInvariant()); - - private partial bool ContainsKeyGenerated(string keyword); + { + object? value = null; + return GeneratedActions(GeneratedAction.GetCanonical, (keyword ?? throw new ArgumentNullException(nameof(keyword))).ToUpperInvariant(), ref value); + } /// /// Determines whether the contains a specific key-value pair. @@ -176,25 +187,24 @@ public bool Contains(KeyValuePair item) /// true if keyword was found within the connection string, false otherwise. public override bool TryGetValue(string keyword, [NotNullWhen(true)] out object? value) { - if (keyword == null) - throw new ArgumentNullException(nameof(keyword)); - - return TryGetValueGenerated(keyword.ToUpperInvariant(), out value); + object? v = null; + var result = GeneratedActions(GeneratedAction.Get, (keyword ?? throw new ArgumentNullException(nameof(keyword))).ToUpperInvariant(), ref v); + value = v; + return result; } - private partial bool TryGetValueGenerated(string keyword, [NotNullWhen(true)] out object? value); - void SetValue(string propertyName, object? value) { - var canonicalKeyword = ToCanonicalKeyword(propertyName.ToUpperInvariant()); + object? canonicalKeyword = null; + var result = GeneratedActions(GeneratedAction.GetCanonical, (propertyName ?? throw new ArgumentNullException(nameof(propertyName))).ToUpperInvariant(), ref canonicalKeyword); + if (!result) + throw new KeyNotFoundException(); if (value == null) - base.Remove(canonicalKeyword); + base.Remove((string)canonicalKeyword!); else - base[canonicalKeyword] = value; + base[(string)canonicalKeyword!] = value; } - private partial string ToCanonicalKeyword(string keyword); - #endregion #region Properties - Connection From dd2ed815b3a76f6f7e955b08d801dac5104af836 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Tue, 14 Nov 2023 20:28:55 +0100 Subject: [PATCH 298/761] Bump SDK and dependencies to 8.0 GA (#5397) --- .github/workflows/build.yml | 2 +- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/native-aot.yml | 2 +- .github/workflows/rich-code-nav.yml | 2 +- Directory.Packages.props | 2 +- global.json | 2 +- src/Npgsql.GeoJSON/Internal/GeoJSONConverter.cs | 3 ++- 7 files changed, 8 insertions(+), 7 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index fef4b3176c..6bf4ed6070 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -15,7 +15,7 @@ concurrency: cancel-in-progress: true env: - dotnet_sdk_version: '8.0.100-rc.2.23502.2' + dotnet_sdk_version: '8.0.100' postgis_version: 3 DOTNET_SKIP_FIRST_TIME_EXPERIENCE: true # Windows comes with PG pre-installed, and defines the PGPASSWORD environment variable. Remove it as it interferes diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index ce5b43bac7..0e721ebc22 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -32,7 +32,7 @@ concurrency: cancel-in-progress: true env: - dotnet_sdk_version: '8.0.100-rc.2.23502.2' + dotnet_sdk_version: '8.0.100' DOTNET_SKIP_FIRST_TIME_EXPERIENCE: true jobs: diff --git a/.github/workflows/native-aot.yml b/.github/workflows/native-aot.yml index 5ac384bf1c..6ff04ffa5f 100644 --- a/.github/workflows/native-aot.yml +++ b/.github/workflows/native-aot.yml @@ -15,7 +15,7 @@ concurrency: cancel-in-progress: true env: - dotnet_sdk_version: '8.0.100-rc.2.23502.2' + dotnet_sdk_version: '8.0.100' DOTNET_SKIP_FIRST_TIME_EXPERIENCE: true # Uncomment and edit the following to use nightly/preview builds # nuget_config: | diff --git a/.github/workflows/rich-code-nav.yml b/.github/workflows/rich-code-nav.yml index 1fd1f31349..1990c8ff78 100644 --- a/.github/workflows/rich-code-nav.yml +++ b/.github/workflows/rich-code-nav.yml @@ -9,7 +9,7 @@ on: - '*' env: - dotnet_sdk_version: '8.0.100-rc.2.23502.2' + dotnet_sdk_version: '8.0.100' DOTNET_SKIP_FIRST_TIME_EXPERIENCE: true jobs: diff --git a/Directory.Packages.props b/Directory.Packages.props index 80082c149c..7134d16d4b 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -1,6 +1,6 @@ - 8.0.0-rc.2.23479.6 + 8.0.0 $(SystemVersion) diff --git a/global.json b/global.json index cfe92c4218..c4fc1c4611 100644 --- a/global.json +++ b/global.json @@ -2,6 +2,6 @@ "sdk": { "version": "8.0.100", "rollForward": "latestMajor", - "allowPrerelease": "true" + "allowPrerelease": "false" } } diff --git a/src/Npgsql.GeoJSON/Internal/GeoJSONConverter.cs b/src/Npgsql.GeoJSON/Internal/GeoJSONConverter.cs index 6384ec748c..8e7692384e 100644 --- a/src/Npgsql.GeoJSON/Internal/GeoJSONConverter.cs +++ b/src/Npgsql.GeoJSON/Internal/GeoJSONConverter.cs @@ -294,7 +294,8 @@ double ReadDouble(bool littleEndian) { if (littleEndian) { - var value = BinaryPrimitives.ReverseEndianness(Unsafe.As(ref Unsafe.AsRef(reader.ReadDouble()))); + var doubleValue = reader.ReadDouble(); + var value = BinaryPrimitives.ReverseEndianness(Unsafe.As(ref doubleValue)); return Unsafe.As(ref value); } From c047ecacd6b711d20dcfa5e82cb12e77f8b0acf0 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Wed, 15 Nov 2023 17:05:57 +0100 Subject: [PATCH 299/761] Fix typo in RDC explanation (#5398) --- src/Npgsql/NpgsqlDataSourceBuilder.cs | 8 ++++---- src/Npgsql/NpgsqlSlimDataSourceBuilder.cs | 8 ++++---- src/Npgsql/TypeMapping/GlobalTypeMapper.cs | 8 ++++---- src/Npgsql/TypeMapping/INpgsqlTypeMapper.cs | 8 ++++---- src/Npgsql/TypeMapping/UserTypeMapper.cs | 10 +++++----- 5 files changed, 21 insertions(+), 21 deletions(-) diff --git a/src/Npgsql/NpgsqlDataSourceBuilder.cs b/src/Npgsql/NpgsqlDataSourceBuilder.cs index 5b92677e4a..0ea6fe733d 100644 --- a/src/Npgsql/NpgsqlDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlDataSourceBuilder.cs @@ -281,7 +281,7 @@ public bool UnmapEnum([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes => _internalBuilder.UnmapEnum(clrType, pgName, nameTranslator); /// - [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types, requiring require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] + [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types which can require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] public INpgsqlTypeMapper MapComposite<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] T>( string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) { @@ -290,7 +290,7 @@ public bool UnmapEnum([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes } /// - [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types, requiring require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] + [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types which can require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] public INpgsqlTypeMapper MapComposite([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) { @@ -299,13 +299,13 @@ public INpgsqlTypeMapper MapComposite([DynamicallyAccessedMembers(DynamicallyAcc } /// - [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types, requiring require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] + [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types which can require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] public bool UnmapComposite<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] T>( string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) => _internalBuilder.UnmapComposite(pgName, nameTranslator); /// - [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types, requiring require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] + [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types which can require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] public bool UnmapComposite([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) => _internalBuilder.UnmapComposite(clrType, pgName, nameTranslator); diff --git a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs index da5feedee9..fc04078871 100644 --- a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs @@ -278,7 +278,7 @@ public bool UnmapEnum([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes => _userTypeMapper.UnmapEnum(clrType, pgName, nameTranslator); /// - [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types, requiring require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] + [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types which can require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] public INpgsqlTypeMapper MapComposite<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] T>( string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) { @@ -287,13 +287,13 @@ public bool UnmapEnum([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes } /// - [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types, requiring require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] + [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types which can require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] public bool UnmapComposite<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] T>( string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) => _userTypeMapper.UnmapComposite(typeof(T), pgName, nameTranslator); /// - [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types, requiring require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] + [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types which can require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] public INpgsqlTypeMapper MapComposite([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) { @@ -302,7 +302,7 @@ public INpgsqlTypeMapper MapComposite([DynamicallyAccessedMembers(DynamicallyAcc } /// - [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types, requiring require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] + [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types which can require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] public bool UnmapComposite([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) => _userTypeMapper.UnmapComposite(clrType, pgName, nameTranslator); diff --git a/src/Npgsql/TypeMapping/GlobalTypeMapper.cs b/src/Npgsql/TypeMapping/GlobalTypeMapper.cs index 4725265735..8d07d73335 100644 --- a/src/Npgsql/TypeMapping/GlobalTypeMapper.cs +++ b/src/Npgsql/TypeMapping/GlobalTypeMapper.cs @@ -251,17 +251,17 @@ public bool UnmapEnum([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes } /// - [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types, requiring require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] + [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types which can require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] public INpgsqlTypeMapper MapComposite<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] T>(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) => MapComposite(typeof(T), pgName, nameTranslator); /// - [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types, requiring require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] + [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types which can require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] public bool UnmapComposite<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] T>(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) => UnmapComposite(typeof(T), pgName, nameTranslator); /// - [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types, requiring require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] + [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types which can require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] public INpgsqlTypeMapper MapComposite([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) { @@ -279,7 +279,7 @@ public INpgsqlTypeMapper MapComposite([DynamicallyAccessedMembers(DynamicallyAcc } /// - [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types, requiring require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] + [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types which can require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] public bool UnmapComposite([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) { diff --git a/src/Npgsql/TypeMapping/INpgsqlTypeMapper.cs b/src/Npgsql/TypeMapping/INpgsqlTypeMapper.cs index 678b61fd15..23642121d8 100644 --- a/src/Npgsql/TypeMapping/INpgsqlTypeMapper.cs +++ b/src/Npgsql/TypeMapping/INpgsqlTypeMapper.cs @@ -125,7 +125,7 @@ bool UnmapEnum( /// Defaults to . /// /// The .NET type to be mapped - [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types, requiring require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] + [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types which can require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] INpgsqlTypeMapper MapComposite<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] T>( string? pgName = null, INpgsqlNameTranslator? nameTranslator = null); @@ -141,7 +141,7 @@ bool UnmapEnum( /// A component which will be used to translate CLR names (e.g. SomeClass) into database names (e.g. some_class). /// Defaults to /// - [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types, requiring require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] + [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types which can require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] bool UnmapComposite<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] T>( string? pgName = null, INpgsqlNameTranslator? nameTranslator = null); @@ -165,7 +165,7 @@ bool UnmapEnum( /// A component which will be used to translate CLR names (e.g. SomeClass) into database names (e.g. some_class). /// Defaults to . /// - [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types, requiring require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] + [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types which can require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] INpgsqlTypeMapper MapComposite( [DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] Type clrType, string? pgName = null, @@ -183,7 +183,7 @@ INpgsqlTypeMapper MapComposite( /// A component which will be used to translate CLR names (e.g. SomeClass) into database names (e.g. some_class). /// Defaults to . /// - [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types, requiring require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] + [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types which can require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] bool UnmapComposite( [DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] Type clrType, string? pgName = null, diff --git a/src/Npgsql/TypeMapping/UserTypeMapper.cs b/src/Npgsql/TypeMapping/UserTypeMapper.cs index 627ecbd37b..35fabb90fe 100644 --- a/src/Npgsql/TypeMapping/UserTypeMapper.cs +++ b/src/Npgsql/TypeMapping/UserTypeMapper.cs @@ -79,7 +79,7 @@ public bool UnmapEnum([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes return Unmap(clrType, out _, pgName, nameTranslator ?? DefaultNameTranslator); } - [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types, requiring require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] + [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types which can require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] public UserTypeMapper MapComposite<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicFields | DynamicallyAccessedMemberTypes.PublicProperties)] T>( string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) where T : class { @@ -88,7 +88,7 @@ public bool UnmapEnum([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes return this; } - [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types, requiring require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] + [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types which can require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] public UserTypeMapper MapStructComposite<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicFields | DynamicallyAccessedMemberTypes.PublicProperties)] T>( string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) where T : struct { @@ -98,7 +98,7 @@ public bool UnmapEnum([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes } [UnconditionalSuppressMessage("Trimming", "IL2111", Justification = "MapStructComposite and MapComposite have identical DAM annotations to clrType.")] - [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types, requiring require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] + [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types which can require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] public UserTypeMapper MapComposite([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) { @@ -187,7 +187,7 @@ TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) } } - [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types, requiring require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] + [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types which can require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] sealed class CompositeMapping<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicFields | DynamicallyAccessedMemberTypes.PublicProperties)] T> : UserTypeMapping where T : class { readonly INpgsqlNameTranslator _nameTranslator; @@ -212,7 +212,7 @@ internal override void AddMapping(TypeInfoMappingCollection mappings) internal override void AddArrayMapping(TypeInfoMappingCollection mappings) => mappings.AddArrayType(PgTypeName); } - [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types, requiring require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] + [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types which can require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] sealed class StructCompositeMapping<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicFields | DynamicallyAccessedMemberTypes.PublicProperties)] T> : UserTypeMapping where T : struct { readonly INpgsqlNameTranslator _nameTranslator; From aeb44f9b8fdfd57ec729ec14852c35dad710c305 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Wed, 15 Nov 2023 22:14:47 +0100 Subject: [PATCH 300/761] Cleanup fully qualified name lookup code (#5311) --- src/Npgsql/Internal/TypeInfoMapping.cs | 95 +++++++++++++++++--------- 1 file changed, 61 insertions(+), 34 deletions(-) diff --git a/src/Npgsql/Internal/TypeInfoMapping.cs b/src/Npgsql/Internal/TypeInfoMapping.cs index 8a4d4bc2e6..75af8ec41d 100644 --- a/src/Npgsql/Internal/TypeInfoMapping.cs +++ b/src/Npgsql/Internal/TypeInfoMapping.cs @@ -129,28 +129,39 @@ public TypeInfoMappingCollection(IEnumerable items) var typeMatch = type is not null && looseTypeMatch; var dataTypeMatch = dataTypeName is not null && mapping.DataTypeNameEquals(dataTypeName.Value.Value); - switch (mapping.MatchRequirement) + var matchRequirement = mapping.MatchRequirement; + if (dataTypeMatch && typeMatch + || matchRequirement is not MatchRequirement.All && dataTypeMatch && looseTypeMatch + || matchRequirement is MatchRequirement.Single && dataTypeName is null && typeMatch) { - case var _ when dataTypeMatch && typeMatch: - case not MatchRequirement.All when dataTypeMatch && looseTypeMatch: - case MatchRequirement.Single when dataTypeName is null && looseTypeMatch: - var resolvedMapping = mapping with - { - Type = type ?? mapping.Type, - // Make sure plugins (which match on unqualified names) and resolvers get the fully qualified name to canonicalize. - DataTypeName = dataTypeName is not null ? dataTypeName.GetValueOrDefault().Value : mapping.DataTypeName - }; - return resolvedMapping.Factory(options, resolvedMapping, dataTypeName is not null); - // DataTypeName is explicitly requiring dataTypeName so it won't be used for a fallback, Single would have matched above already. - case MatchRequirement.All when fallback is null && dataTypeName is null && typeMatch: - fallback = mapping.TypeMatchPredicate is not null ? mapping with { Type = type! } : mapping; - break; - default: - continue; + var resolvedDataTypeName = ResolveFullyQualifiedDataTypeName(dataTypeName, mapping.DataTypeName, options); + return mapping.Factory(options, mapping with { Type = type ?? mapping.Type, DataTypeName = resolvedDataTypeName }, dataTypeName is not null); } + + // DataTypeName is explicitly requiring dataTypeName so it won't be used for a fallback, Single would have matched above already. + if (matchRequirement is MatchRequirement.All && fallback is null && dataTypeName is null && typeMatch) + fallback = mapping; + } + + if (fallback is { } fbMapping) + { + var resolvedDataTypeName = ResolveFullyQualifiedDataTypeName(dataTypeName, fbMapping.DataTypeName, options); + return fbMapping.Factory(options, fbMapping with { Type = type!, DataTypeName = resolvedDataTypeName }, dataTypeName is not null); } - return fallback?.Factory(options, fallback.Value, dataTypeName is not null); + return null; + + static string ResolveFullyQualifiedDataTypeName(DataTypeName? dataTypeName, string mappingDataTypeName, PgSerializerOptions options) + { + // Make sure plugins (which match on unqualified names) and converter resolvers get the fully qualified name to canonicalize. + if (dataTypeName is not null) + return dataTypeName.GetValueOrDefault().Value; + + if (TypeInfoMappingHelpers.TryResolveFullyQualifiedName(options, mappingDataTypeName, out var fqDataTypeName)) + return fqDataTypeName.Value; + + throw new NotSupportedException($"Cannot resolve '{mappingDataTypeName}' to a fully qualified datatype name. The datatype was not found in the current database info."); + } } bool TryGetMapping(Type type, string dataTypeName, out TypeInfoMapping value) @@ -177,13 +188,17 @@ TypeInfoMapping GetMapping(Type type, string dataTypeName) static TypeInfoFactory CreateComposedFactory(Type mappingType, TypeInfoMapping innerMapping, Func mapper, bool copyPreferredFormat = false, bool supportsWriting = true) => (options, mapping, dataTypeNameMatch) => { - var innerInfo = innerMapping.Factory(options, innerMapping, dataTypeNameMatch); + var resolvedInnerMapping = innerMapping; + if (!DataTypeName.IsFullyQualified(innerMapping.DataTypeName.AsSpan())) + resolvedInnerMapping = innerMapping with { DataTypeName = new DataTypeName(mapping.DataTypeName).Schema + "." + innerMapping.DataTypeName }; + + var innerInfo = innerMapping.Factory(options, resolvedInnerMapping, dataTypeNameMatch); var converter = mapper(mapping, innerInfo); var preferredFormat = copyPreferredFormat ? innerInfo.PreferredFormat : null; var writingSupported = supportsWriting && innerInfo.SupportsWriting; var unboxedType = ComputeUnboxedType(defaultType: mappingType, converter.TypeToConvert, mapping.Type); - return new PgTypeInfo(options, converter, TypeInfoMappingHelpers.ResolveFullyQualifiedName(options, mapping.DataTypeName), unboxedType) + return new PgTypeInfo(options, converter, options.GetCanonicalTypeId(new DataTypeName(mapping.DataTypeName)), unboxedType) { PreferredFormat = preferredFormat, SupportsWriting = writingSupported @@ -194,7 +209,11 @@ static TypeInfoFactory CreateComposedFactory(Type mappingType, TypeInfoMapping i static TypeInfoFactory CreateComposedFactory(Type mappingType, TypeInfoMapping innerMapping, Func mapper, bool copyPreferredFormat = false, bool supportsWriting = true) => (options, mapping, dataTypeNameMatch) => { - var innerInfo = (PgResolverTypeInfo)innerMapping.Factory(options, innerMapping, dataTypeNameMatch); + var resolvedInnerMapping = innerMapping; + if (!DataTypeName.IsFullyQualified(innerMapping.DataTypeName.AsSpan())) + resolvedInnerMapping = innerMapping with { DataTypeName = new DataTypeName(mapping.DataTypeName).Schema + "." + innerMapping.DataTypeName }; + + var innerInfo = (PgResolverTypeInfo)innerMapping.Factory(options, resolvedInnerMapping, dataTypeNameMatch); var resolver = mapper(mapping, innerInfo); var preferredFormat = copyPreferredFormat ? innerInfo.PreferredFormat : null; var writingSupported = supportsWriting && innerInfo.SupportsWriting; @@ -202,7 +221,7 @@ static TypeInfoFactory CreateComposedFactory(Type mappingType, TypeInfoMapping i // We include the data type name if the inner info did so as well. // This way we can rely on its logic around resolvedDataTypeName, including when it ignores that flag. PgTypeId? pgTypeId = innerInfo.PgTypeId is not null - ? TypeInfoMappingHelpers.ResolveFullyQualifiedName(options, mapping.DataTypeName) + ? options.GetCanonicalTypeId(new DataTypeName(mapping.DataTypeName)) : null; return new PgResolverTypeInfo(options, resolver, pgTypeId, unboxedType) { @@ -689,30 +708,38 @@ static void ThrowBoxingNotSupported(bool resolver) public static class TypeInfoMappingHelpers { - internal static PgTypeId ResolveFullyQualifiedName(PgSerializerOptions options, string dataTypeName) - => !DataTypeName.IsFullyQualified(dataTypeName.AsSpan()) - ? options.ToCanonicalTypeId(options.DatabaseInfo.GetPostgresType(dataTypeName)) - : new(new DataTypeName(dataTypeName)); + internal static bool TryResolveFullyQualifiedName(PgSerializerOptions options, string dataTypeName, out DataTypeName fqDataTypeName) + { + if (DataTypeName.IsFullyQualified(dataTypeName.AsSpan())) + { + fqDataTypeName = new DataTypeName(dataTypeName); + return true; + } + + if (options.DatabaseInfo.TryGetPostgresTypeByName(dataTypeName, out var pgType)) + { + fqDataTypeName = pgType.DataTypeName; + return true; + } + + fqDataTypeName = default; + return false; + } internal static PostgresType GetPgType(this TypeInfoMapping mapping, PgSerializerOptions options) - => !DataTypeName.IsFullyQualified(mapping.DataTypeName.AsSpan()) - ? options.DatabaseInfo.GetPostgresType(mapping.DataTypeName) - : options.DatabaseInfo.GetPostgresType(new DataTypeName(mapping.DataTypeName)); + => options.DatabaseInfo.GetPostgresType(new DataTypeName(mapping.DataTypeName)); public static PgTypeInfo CreateInfo(this TypeInfoMapping mapping, PgSerializerOptions options, PgConverter converter, DataFormat? preferredFormat = null, bool supportsWriting = true) - => new(options, converter, ResolveFullyQualifiedName(options, mapping.DataTypeName)) + => new(options, converter, new DataTypeName(mapping.DataTypeName)) { PreferredFormat = preferredFormat, SupportsWriting = supportsWriting }; public static PgResolverTypeInfo CreateInfo(this TypeInfoMapping mapping, PgSerializerOptions options, PgConverterResolver resolver, bool includeDataTypeName = true, DataFormat? preferredFormat = null, bool supportsWriting = true) - { - PgTypeId? pgTypeId = includeDataTypeName ? ResolveFullyQualifiedName(options, mapping.DataTypeName) : null; - return new(options, resolver, pgTypeId) + => new(options, resolver, includeDataTypeName ? new DataTypeName(mapping.DataTypeName) : null) { PreferredFormat = preferredFormat, SupportsWriting = supportsWriting }; - } } From a3a635036db477072f040ca3e7cd03be7b8480c5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 15 Nov 2023 21:32:32 +0000 Subject: [PATCH 301/761] Bump Microsoft.SourceLink.GitHub from 1.1.1 to 8.0.0 (#5401) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 7134d16d4b..e0905f2e6b 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -25,7 +25,7 @@ - + From 7b16e9d8c5e7a398b97eb28149943d688b44a2c9 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Thu, 16 Nov 2023 02:15:40 +0100 Subject: [PATCH 302/761] Add missed ShouldBuffer (#5403) --- src/Npgsql/Internal/Converters/ArrayConverter.cs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/Npgsql/Internal/Converters/ArrayConverter.cs b/src/Npgsql/Internal/Converters/ArrayConverter.cs index c9dc2add87..71c9c60fe3 100644 --- a/src/Npgsql/Internal/Converters/ArrayConverter.cs +++ b/src/Npgsql/Internal/Converters/ArrayConverter.cs @@ -135,6 +135,9 @@ sealed class WriteState : MultiWriteState public async ValueTask Read(bool async, PgReader reader, CancellationToken cancellationToken = default) { + if (reader.ShouldBuffer(sizeof(int) + sizeof(int) + sizeof(uint))) + await reader.Buffer(async, sizeof(int) + sizeof(int) + sizeof(uint), cancellationToken).ConfigureAwait(false); + var dimensions = reader.ReadInt32(); var containsNulls = reader.ReadInt32() is 1; _ = reader.ReadUInt32(); // Element OID. From 23a0b73f2a96d7e0443b4ed49d4a4fb84cb16fea Mon Sep 17 00:00:00 2001 From: Mark Pflug Date: Wed, 15 Nov 2023 17:32:57 -0800 Subject: [PATCH 303/761] Fix GetColumnSchema when called via IDbColumnSchemaGenerator interface. (#5404) --- src/Npgsql/NpgsqlDataReader.cs | 2 +- test/Npgsql.Tests/ReaderNewSchemaTests.cs | 20 ++++++++++++++++++++ 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/src/Npgsql/NpgsqlDataReader.cs b/src/Npgsql/NpgsqlDataReader.cs index 5d9b83c5a6..5bed6bf176 100644 --- a/src/Npgsql/NpgsqlDataReader.cs +++ b/src/Npgsql/NpgsqlDataReader.cs @@ -1792,7 +1792,7 @@ ReadOnlyCollection IDbColumnSchemaGenerator.GetColumnSchema() var columns = GetColumnSchema(); var result = new DbColumn[columns.Count]; var i = 0; - foreach (var column in result) + foreach (var column in columns) result[i++] = column; return new ReadOnlyCollection(result); diff --git a/test/Npgsql.Tests/ReaderNewSchemaTests.cs b/test/Npgsql.Tests/ReaderNewSchemaTests.cs index 7489bae711..01e46cdd06 100644 --- a/test/Npgsql.Tests/ReaderNewSchemaTests.cs +++ b/test/Npgsql.Tests/ReaderNewSchemaTests.cs @@ -1,5 +1,6 @@ using System.Collections.ObjectModel; using System.Data; +using System.Data.Common; using System.Linq; using System.Threading.Tasks; using Npgsql.PostgresTypes; @@ -742,6 +743,25 @@ public async Task With_parameter_without_value() Assert.That(columns[0].ColumnName, Is.EqualTo("foo")); } + [Test] + public async Task GetColumnSchema_via_interface() + { + await using var conn = await OpenConnectionAsync(); + var table = await CreateTempTable(conn, "foo INTEGER"); + + using var cmd = new NpgsqlCommand($"SELECT foo FROM {table} WHERE foo > @p", conn) + { + Parameters = { new() { ParameterName = "p", NpgsqlDbType = NpgsqlTypes.NpgsqlDbType.Integer } } + }; + await using var reader = await cmd.ExecuteReaderAsync(CommandBehavior.SchemaOnly | CommandBehavior.KeyInfo); + + var iface = (IDbColumnSchemaGenerator)reader; + var schema = iface.GetColumnSchema(); + Assert.NotNull(schema); + Assert.AreEqual(1, schema.Count); + Assert.NotNull(schema[0]); + } + #region Not supported [Test] From b4cd2972339e55682dee6c0b73926f6b75f5d1e5 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Thu, 16 Nov 2023 21:05:33 +0100 Subject: [PATCH 304/761] Use RowDescription.Count in favor of _numColumns (#5408) We were accessing _numColumns which won't be set if a DataRow was not processed before ending on NextResult Fixes #5406 --- src/Npgsql/NpgsqlDataReader.cs | 36 +++++++++++++++------------------- 1 file changed, 16 insertions(+), 20 deletions(-) diff --git a/src/Npgsql/NpgsqlDataReader.cs b/src/Npgsql/NpgsqlDataReader.cs index 5bed6bf176..9e15dd2257 100644 --- a/src/Npgsql/NpgsqlDataReader.cs +++ b/src/Npgsql/NpgsqlDataReader.cs @@ -64,11 +64,6 @@ public sealed class NpgsqlDataReader : DbDataReader, IDbColumnSchemaGenerator /// internal int StatementIndex { get; private set; } - /// - /// The number of columns in the current row - /// - int _numColumns; - /// /// Records, for each column, its starting offset and length in the current row. /// Used only in non-sequential mode. @@ -101,6 +96,8 @@ public sealed class NpgsqlDataReader : DbDataReader, IDbColumnSchemaGenerator /// internal RowDescriptionMessage? RowDescription; + int ColumnCount => RowDescription!.Count; + /// /// Stores the last converter info resolved by column, to speed up repeated reading. /// @@ -353,7 +350,7 @@ async Task NextResult(bool async, bool isConsuming = false, CancellationTo if (statementIndex >= 0) { if (RowDescription is { } description && statements[statementIndex].IsPrepared && ColumnInfoCache is { } cache) - description.SetColumnInfoCache(new(cache, 0, _numColumns)); + description.SetColumnInfoCache(new(cache, 0, ColumnCount)); if (statementIndex is 0 && _behavior.HasFlag(CommandBehavior.SingleResult) && !isConsuming) { @@ -417,13 +414,13 @@ async Task NextResult(bool async, bool isConsuming = false, CancellationTo if (RowDescription is not null) { - if (ColumnInfoCache?.Length >= RowDescription.Count) - Array.Clear(ColumnInfoCache, 0, RowDescription.Count); + if (ColumnInfoCache?.Length >= ColumnCount) + Array.Clear(ColumnInfoCache, 0, ColumnCount); else { if (ColumnInfoCache is { } cache) ArrayPool.Shared.Return(cache, clearArray: true); - ColumnInfoCache = ArrayPool.Shared.Rent(RowDescription.Count); + ColumnInfoCache = ArrayPool.Shared.Rent(ColumnCount); } if (statement.IsPrepared) RowDescription.LoadColumnInfoCache(Connector.SerializerOptions, ColumnInfoCache); @@ -580,7 +577,7 @@ async ValueTask ConsumeResultSet(bool async) var statement = _statements[StatementIndex]; if (statement.IsPrepared && ColumnInfoCache is not null) - RowDescription!.SetColumnInfoCache(new(ColumnInfoCache, 0, _numColumns)); + RowDescription!.SetColumnInfoCache(new(ColumnInfoCache, 0, ColumnCount)); if (statement.AppendErrorBarrier ?? Command.EnableErrorBarriers) Expect(await Connector.ReadMessage(async).ConfigureAwait(false), Connector); @@ -734,13 +731,13 @@ async Task NextResultSchemaOnly(bool async, bool isConsuming = false, Canc // Found a resultset if (RowDescription is not null) { - if (ColumnInfoCache?.Length >= RowDescription.Count) - Array.Clear(ColumnInfoCache, 0, RowDescription.Count); + if (ColumnInfoCache?.Length >= ColumnCount) + Array.Clear(ColumnInfoCache, 0, ColumnCount); else { if (ColumnInfoCache is { } cache) ArrayPool.Shared.Return(cache, clearArray: true); - ColumnInfoCache = ArrayPool.Shared.Rent(RowDescription.Count); + ColumnInfoCache = ArrayPool.Shared.Rent(ColumnCount); } return true; } @@ -807,8 +804,8 @@ internal void ProcessMessage(IBackendMessage msg) Buffer = Connector.ReadBuffer; // We assume that the row's number of columns is identical to the description's var numColumns = Buffer.ReadInt16(); - Debug.Assert(numColumns == RowDescription!.Count, - $"Row's number of columns ({numColumns}) differs from the row description's ({RowDescription.Count})"); + if (ColumnCount != numColumns) + ThrowHelper.ThrowArgumentException($"Row's number of columns ({numColumns}) differs from the row description's ({ColumnCount})"); var readPosition = Buffer.ReadPosition; var msgRemainder = dataRow.Length - sizeof(short); @@ -823,7 +820,6 @@ internal void ProcessMessage(IBackendMessage msg) switch (State) { case ReaderState.BetweenResults: - _numColumns = numColumns; _hasRows = true; State = ReaderState.BeforeResult; break; @@ -1810,7 +1806,7 @@ public Task> GetColumnSchemaAsync(Cancellatio => GetColumnSchema(async: true, cancellationToken); Task> GetColumnSchema(bool async, CancellationToken cancellationToken = default) - => RowDescription == null || RowDescription.Count == 0 + => RowDescription == null || ColumnCount == 0 ? Task.FromResult(new List().AsReadOnly()) : new DbColumnSchemaGenerator(_connection!, RowDescription, _behavior.HasFlag(CommandBehavior.KeyInfo)) .GetColumnSchema(async, cancellationToken); @@ -2124,7 +2120,7 @@ async Task ConsumeRowSequential(bool async) else PgReader.Commit(resuming: false); // Skip over the remaining columns in the row - for (; _column < _numColumns - 1; _column++) + for (; _column < ColumnCount - 1; _column++) { await Buffer.Ensure(4, async).ConfigureAwait(false); var len = Buffer.ReadInt32(); @@ -2183,12 +2179,12 @@ T DbNullValueOrThrow(int ordinal) DataFormat GetInfo(int ordinal, Type type, out PgConverter converter, out Size bufferRequirement, out bool asObject) { var state = State; - if (state is not ReaderState.InResult || (uint)ordinal > (uint)_numColumns) + if (state is not ReaderState.InResult || (uint)ordinal > (uint)ColumnCount) { Unsafe.SkipInit(out converter); Unsafe.SkipInit(out bufferRequirement); Unsafe.SkipInit(out asObject); - HandleInvalidState(state, _numColumns); + HandleInvalidState(state, ColumnCount); Debug.Fail("Should never get here"); } From 69ebb3b8eb8704bf7eb04d974f892e193b340fda Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Fri, 17 Nov 2023 18:50:42 +0100 Subject: [PATCH 305/761] Add uncached password provider support (#5399) Closes #5186 --- src/Npgsql/NpgsqlDataSource.cs | 53 ++++++++++++++----- src/Npgsql/NpgsqlDataSourceBuilder.cs | 24 +++++++++ src/Npgsql/NpgsqlDataSourceConfiguration.cs | 2 + src/Npgsql/NpgsqlSlimDataSourceBuilder.cs | 52 +++++++++++++++--- .../Properties/NpgsqlStrings.Designer.cs | 12 +++++ src/Npgsql/Properties/NpgsqlStrings.resx | 6 +++ src/Npgsql/PublicAPI.Unshipped.txt | 2 + test/Npgsql.Tests/AuthenticationTests.cs | 43 +++++++++++++++ 8 files changed, 173 insertions(+), 21 deletions(-) diff --git a/src/Npgsql/NpgsqlDataSource.cs b/src/Npgsql/NpgsqlDataSource.cs index 9ca100e0db..90cb82c1a5 100644 --- a/src/Npgsql/NpgsqlDataSource.cs +++ b/src/Npgsql/NpgsqlDataSource.cs @@ -3,7 +3,6 @@ using System.Data.Common; using System.Diagnostics; using System.Diagnostics.CodeAnalysis; -using System.Linq; using System.Net.Security; using System.Security.Cryptography.X509Certificates; using System.Threading; @@ -44,6 +43,8 @@ public abstract class NpgsqlDataSource : DbDataSource internal RemoteCertificateValidationCallback? UserCertificateValidationCallback { get; } internal Action? ClientCertificatesCallback { get; } + readonly Func? _passwordProvider; + readonly Func>? _passwordProviderAsync; readonly Func>? _periodicPasswordProvider; readonly TimeSpan _periodicPasswordSuccessRefreshInterval, _periodicPasswordFailureRefreshInterval; @@ -52,7 +53,7 @@ public abstract class NpgsqlDataSource : DbDataSource internal Action? ConnectionInitializer { get; } internal Func? ConnectionInitializerAsync { get; } - readonly Timer? _passwordProviderTimer; + readonly Timer? _periodicPasswordProviderTimer; readonly CancellationTokenSource? _timerPasswordProviderCancellationTokenSource; readonly Task _passwordRefreshTask = null!; string? _password; @@ -101,6 +102,8 @@ internal NpgsqlDataSource( IntegratedSecurityHandler, UserCertificateValidationCallback, ClientCertificatesCallback, + _passwordProvider, + _passwordProviderAsync, _periodicPasswordProvider, _periodicPasswordSuccessRefreshInterval, _periodicPasswordFailureRefreshInterval, @@ -112,6 +115,8 @@ internal NpgsqlDataSource( = dataSourceConfig; _connectionLogger = LoggingConfiguration.ConnectionLogger; + Debug.Assert(_passwordProvider is null || _passwordProviderAsync is not null); + // TODO probably want this on the options so it can devirt unconditionally. _resolver = new TypeInfoResolverChain(resolverChain); _password = settings.Password; @@ -123,7 +128,7 @@ internal NpgsqlDataSource( _timerPasswordProviderCancellationTokenSource = new(); // Create the timer, but don't start it; the manual run below will will schedule the first refresh. - _passwordProviderTimer = new Timer(state => _ = RefreshPassword(), null, Timeout.InfiniteTimeSpan, Timeout.InfiniteTimeSpan); + _periodicPasswordProviderTimer = new Timer(state => _ = RefreshPassword(), null, Timeout.InfiniteTimeSpan, Timeout.InfiniteTimeSpan); // Trigger the first refresh attempt right now, outside the timer; this allows us to capture the Task so it can be observed // in GetPasswordAsync. _passwordRefreshTask = Task.Run(RefreshPassword); @@ -293,28 +298,48 @@ public string Password { set { - if (_periodicPasswordProvider is not null) + if (_passwordProvider is not null || _periodicPasswordProvider is not null) throw new NotSupportedException(NpgsqlStrings.CannotSetBothPasswordProviderAndPassword); _password = value; } } - internal async ValueTask GetPassword(bool async, CancellationToken cancellationToken = default) + internal ValueTask GetPassword(bool async, CancellationToken cancellationToken = default) { + if (_passwordProvider is not null) + return GetPassword(async, cancellationToken); + // A periodic password provider is configured, but the first refresh hasn't completed yet (race condition). - // Wait until it completes. if (_password is null && _periodicPasswordProvider is not null) + return GetInitialPeriodicPassword(async); + + return new(_password); + + async ValueTask GetInitialPeriodicPassword(bool async) { if (async) await _passwordRefreshTask.ConfigureAwait(false); else _passwordRefreshTask.GetAwaiter().GetResult(); - Debug.Assert(_password is not null); + + return _password; } - return _password; + async ValueTask GetPassword(bool async, CancellationToken cancellationToken) + { + try + { + return async ? await _passwordProviderAsync!(Settings, cancellationToken).ConfigureAwait(false) : _passwordProvider(Settings); + } + catch (Exception e) + { + _connectionLogger.LogError(e, "Password provider threw an exception"); + + throw new NpgsqlException("An exception was thrown from the password provider", e); + } + } } async Task RefreshPassword() @@ -323,13 +348,13 @@ async Task RefreshPassword() { _password = await _periodicPasswordProvider!(Settings, _timerPasswordProviderCancellationTokenSource!.Token).ConfigureAwait(false); - _passwordProviderTimer!.Change(_periodicPasswordSuccessRefreshInterval, Timeout.InfiniteTimeSpan); + _periodicPasswordProviderTimer!.Change(_periodicPasswordSuccessRefreshInterval, Timeout.InfiniteTimeSpan); } catch (Exception e) { _connectionLogger.LogError(e, "Periodic password provider threw an exception"); - _passwordProviderTimer!.Change(_periodicPasswordFailureRefreshInterval, Timeout.InfiniteTimeSpan); + _periodicPasswordProviderTimer!.Change(_periodicPasswordFailureRefreshInterval, Timeout.InfiniteTimeSpan); throw new NpgsqlException("An exception was thrown from the periodic password provider", e); } @@ -448,7 +473,7 @@ protected virtual void DisposeBase() cancellationTokenSource.Dispose(); } - _passwordProviderTimer?.Dispose(); + _periodicPasswordProviderTimer?.Dispose(); _setupMappingsSemaphore.Dispose(); MetricsReporter.Dispose(); // TODO: This is probably too early, dispose only when all connections have been closed? @@ -475,12 +500,12 @@ protected virtual async ValueTask DisposeAsyncBase() cancellationTokenSource.Dispose(); } - if (_passwordProviderTimer is not null) + if (_periodicPasswordProviderTimer is not null) { #if NET5_0_OR_GREATER - await _passwordProviderTimer.DisposeAsync().ConfigureAwait(false); + await _periodicPasswordProviderTimer.DisposeAsync().ConfigureAwait(false); #else - _passwordProviderTimer.Dispose(); + _periodicPasswordProviderTimer.Dispose(); #endif } diff --git a/src/Npgsql/NpgsqlDataSourceBuilder.cs b/src/Npgsql/NpgsqlDataSourceBuilder.cs index 0ea6fe733d..6aa9d7ef44 100644 --- a/src/Npgsql/NpgsqlDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlDataSourceBuilder.cs @@ -232,6 +232,30 @@ public NpgsqlDataSourceBuilder UsePeriodicPasswordProvider( return this; } + /// + /// Configures a password provider, which is called by the data source when opening connections. + /// + /// + /// A callback that may be invoked during which returns the password to be sent to PostgreSQL. + /// + /// + /// A callback that may be invoked during which returns the password to be sent to PostgreSQL. + /// + /// The same builder instance so that multiple calls can be chained. + /// + /// + /// The provided callback is invoked when opening connections. Therefore its important the callback internally depends on cached + /// data or returns quickly otherwise. Any unnecessary delay will affect connection opening time. + /// + /// + public NpgsqlDataSourceBuilder UsePasswordProvider( + Func? passwordProvider, + Func>? passwordProviderAsync) + { + _internalBuilder.UsePasswordProvider(passwordProvider, passwordProviderAsync); + return this; + } + #endregion Authentication #region Type mapping diff --git a/src/Npgsql/NpgsqlDataSourceConfiguration.cs b/src/Npgsql/NpgsqlDataSourceConfiguration.cs index 9fbfdb94f5..eae6a3c19d 100644 --- a/src/Npgsql/NpgsqlDataSourceConfiguration.cs +++ b/src/Npgsql/NpgsqlDataSourceConfiguration.cs @@ -14,6 +14,8 @@ sealed record NpgsqlDataSourceConfiguration(string? Name, IntegratedSecurityHandler userCertificateValidationCallback, RemoteCertificateValidationCallback? UserCertificateValidationCallback, Action? ClientCertificatesCallback, + Func? PasswordProvider, + Func>? PasswordProviderAsync, Func>? PeriodicPasswordProvider, TimeSpan PeriodicPasswordSuccessRefreshInterval, TimeSpan PeriodicPasswordFailureRefreshInterval, diff --git a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs index fc04078871..38bbd4a3e7 100644 --- a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs @@ -33,6 +33,9 @@ public sealed class NpgsqlSlimDataSourceBuilder : INpgsqlTypeMapper IntegratedSecurityHandler _integratedSecurityHandler = new(); + Func? _passwordProvider; + Func>? _passwordProviderAsync; + Func>? _periodicPasswordProvider; TimeSpan _periodicPasswordSuccessRefreshInterval, _periodicPasswordFailureRefreshInterval; @@ -40,8 +43,8 @@ public sealed class NpgsqlSlimDataSourceBuilder : INpgsqlTypeMapper readonly UserTypeMapper _userTypeMapper; - Action? _syncConnectionInitializer; - Func? _asyncConnectionInitializer; + Action? _connectionInitializer; + Func? _connectionInitializerAsync; /// /// A connection string builder that can be used to configured the connection string on the builder. @@ -239,6 +242,34 @@ public NpgsqlSlimDataSourceBuilder UsePeriodicPasswordProvider( return this; } + /// + /// Configures a password provider, which is called by the data source when opening connections. + /// + /// + /// A callback that may be invoked during which returns the password to be sent to PostgreSQL. + /// + /// + /// A callback that may be invoked during which returns the password to be sent to PostgreSQL. + /// + /// The same builder instance so that multiple calls can be chained. + /// + /// + /// The provided callback is invoked when opening connections. Therefore its important the callback internally depends on cached + /// data or returns quickly otherwise. Any unnecessary delay will affect connection opening time. + /// + /// + public NpgsqlSlimDataSourceBuilder UsePasswordProvider( + Func? passwordProvider, + Func>? passwordProviderAsync) + { + if (passwordProvider is null != passwordProviderAsync is null) + throw new ArgumentException(NpgsqlStrings.SyncAndAsyncPasswordProvidersRequired); + + _passwordProvider = passwordProvider; + _passwordProviderAsync = passwordProviderAsync; + return this; + } + #endregion Authentication #region Type mapping @@ -455,8 +486,8 @@ public NpgsqlSlimDataSourceBuilder UsePhysicalConnectionInitializer( if (connectionInitializer is null != connectionInitializerAsync is null) throw new ArgumentException(NpgsqlStrings.SyncAndAsyncConnectionInitializersRequired); - _syncConnectionInitializer = connectionInitializer; - _asyncConnectionInitializer = connectionInitializerAsync; + _connectionInitializer = connectionInitializer; + _connectionInitializerAsync = connectionInitializerAsync; return this; } @@ -504,7 +535,12 @@ NpgsqlDataSourceConfiguration PrepareConfiguration() throw new InvalidOperationException(NpgsqlStrings.TransportSecurityDisabled); } - if (_periodicPasswordProvider is not null && + if (_passwordProvider is not null && _periodicPasswordProvider is not null) + { + throw new NotSupportedException(NpgsqlStrings.CannotSetMultiplePasswordProviderKinds); + } + + if ((_passwordProvider is not null || _periodicPasswordProvider is not null) && (ConnectionStringBuilder.Password is not null || ConnectionStringBuilder.Passfile is not null)) { throw new NotSupportedException(NpgsqlStrings.CannotSetBothPasswordProviderAndPassword); @@ -519,14 +555,16 @@ _loggerFactory is null _integratedSecurityHandler, _userCertificateValidationCallback, _clientCertificatesCallback, + _passwordProvider, + _passwordProviderAsync, _periodicPasswordProvider, _periodicPasswordSuccessRefreshInterval, _periodicPasswordFailureRefreshInterval, _resolverChainBuilder.Build(ConfigureResolverChain), HackyEnumMappings(), DefaultNameTranslator, - _syncConnectionInitializer, - _asyncConnectionInitializer); + _connectionInitializer, + _connectionInitializerAsync); List HackyEnumMappings() { diff --git a/src/Npgsql/Properties/NpgsqlStrings.Designer.cs b/src/Npgsql/Properties/NpgsqlStrings.Designer.cs index e9eb4810a4..d09510d783 100644 --- a/src/Npgsql/Properties/NpgsqlStrings.Designer.cs +++ b/src/Npgsql/Properties/NpgsqlStrings.Designer.cs @@ -93,6 +93,18 @@ internal static string CannotSetBothPasswordProviderAndPassword { } } + internal static string CannotSetMultiplePasswordProviderKinds { + get { + return ResourceManager.GetString("CannotSetMultiplePasswordProviderKinds", resourceCulture); + } + } + + internal static string SyncAndAsyncPasswordProvidersRequired { + get { + return ResourceManager.GetString("SyncAndAsyncPasswordProvidersRequired", resourceCulture); + } + } + internal static string PasswordProviderMissing { get { return ResourceManager.GetString("PasswordProviderMissing", resourceCulture); diff --git a/src/Npgsql/Properties/NpgsqlStrings.resx b/src/Npgsql/Properties/NpgsqlStrings.resx index 375b516576..df2642e85e 100644 --- a/src/Npgsql/Properties/NpgsqlStrings.resx +++ b/src/Npgsql/Properties/NpgsqlStrings.resx @@ -42,6 +42,12 @@ When registering a password provider, a password or password file may not be set. + + Multiple kinds of password providers were found, only one kind may be configured per DbDataSource. + + + Both sync and async password providers must be provided. + The right type of password provider (sync or async) was not found. diff --git a/src/Npgsql/PublicAPI.Unshipped.txt b/src/Npgsql/PublicAPI.Unshipped.txt index d294068b78..6837192430 100644 --- a/src/Npgsql/PublicAPI.Unshipped.txt +++ b/src/Npgsql/PublicAPI.Unshipped.txt @@ -15,6 +15,7 @@ Npgsql.NpgsqlDataSourceBuilder.MapEnum(System.Type! clrType, string? pgName = nu Npgsql.NpgsqlDataSourceBuilder.Name.get -> string? Npgsql.NpgsqlDataSourceBuilder.Name.set -> void Npgsql.NpgsqlDataSourceBuilder.UnmapEnum(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> bool +Npgsql.NpgsqlDataSourceBuilder.UsePasswordProvider(System.Func? passwordProvider, System.Func>? passwordProviderAsync) -> Npgsql.NpgsqlDataSourceBuilder! Npgsql.NpgsqlDataSourceBuilder.UseRootCertificate(System.Security.Cryptography.X509Certificates.X509Certificate2? rootCertificate) -> Npgsql.NpgsqlDataSourceBuilder! Npgsql.NpgsqlDataSourceBuilder.UseRootCertificateCallback(System.Func? rootCertificateCallback) -> Npgsql.NpgsqlDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder @@ -50,6 +51,7 @@ Npgsql.NpgsqlSlimDataSourceBuilder.UseClientCertificate(System.Security.Cryptogr Npgsql.NpgsqlSlimDataSourceBuilder.UseClientCertificates(System.Security.Cryptography.X509Certificates.X509CertificateCollection? clientCertificates) -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.UseClientCertificatesCallback(System.Action? clientCertificatesCallback) -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.UseLoggerFactory(Microsoft.Extensions.Logging.ILoggerFactory? loggerFactory) -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.UsePasswordProvider(System.Func? passwordProvider, System.Func>? passwordProviderAsync) -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.UsePeriodicPasswordProvider(System.Func>? passwordProvider, System.TimeSpan successRefreshInterval, System.TimeSpan failureRefreshInterval) -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.UsePhysicalConnectionInitializer(System.Action? connectionInitializer, System.Func? connectionInitializerAsync) -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.UseRootCertificate(System.Security.Cryptography.X509Certificates.X509Certificate2? rootCertificate) -> Npgsql.NpgsqlSlimDataSourceBuilder! diff --git a/test/Npgsql.Tests/AuthenticationTests.cs b/test/Npgsql.Tests/AuthenticationTests.cs index 487bc5457c..5a041a7aca 100644 --- a/test/Npgsql.Tests/AuthenticationTests.cs +++ b/test/Npgsql.Tests/AuthenticationTests.cs @@ -47,6 +47,38 @@ public async Task Set_Password_on_NpgsqlDataSource() await using var connection2 = dataSource.OpenConnection(); } + [Test] + public async Task Password_provider([Values]bool async) + { + var dataSourceBuilder = GetPasswordlessDataSourceBuilder(); + var password = new NpgsqlConnectionStringBuilder(TestUtil.ConnectionString).Password!; + var syncProviderCalled = false; + var asyncProviderCalled = false; + dataSourceBuilder.UsePasswordProvider(_ => + { + syncProviderCalled = true; + return password; + }, (_,_) => + { + asyncProviderCalled = true; + return new(password); + }); + + using var dataSource = dataSourceBuilder.Build(); + using var conn = async ? await dataSource.OpenConnectionAsync() : dataSource.OpenConnection(); + Assert.True(async ? asyncProviderCalled : syncProviderCalled, "Password_provider not used"); + } + + [Test] + public void Password_provider_exception() + { + var dataSourceBuilder = GetPasswordlessDataSourceBuilder(); + dataSourceBuilder.UsePasswordProvider(_ => throw new Exception(), (_,_) => throw new Exception()); + + using var dataSource = dataSourceBuilder.Build(); + Assert.ThrowsAsync(async () => await dataSource.OpenConnectionAsync()); + } + [Test] public async Task Periodic_password_provider() { @@ -129,6 +161,17 @@ public void Both_password_and_password_provider_is_not_supported() .With.Message.EqualTo(NpgsqlStrings.CannotSetBothPasswordProviderAndPassword)); } + [Test] + public void Multiple_password_providers_is_not_supported() + { + var dataSourceBuilder = new NpgsqlDataSourceBuilder(TestUtil.ConnectionString); + dataSourceBuilder + .UsePeriodicPasswordProvider((_, _) => new("foo"), TimeSpan.FromMinutes(1), TimeSpan.FromSeconds(10)) + .UsePasswordProvider(_ => "foo", (_,_) => new("foo")); + Assert.That(() => dataSourceBuilder.Build(), Throws.Exception.TypeOf() + .With.Message.EqualTo(NpgsqlStrings.CannotSetMultiplePasswordProviderKinds)); + } + #region pgpass [Test] From edfd082cbb0531f59cfd0ee3b7eb0ed29cd72875 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Sat, 18 Nov 2023 01:20:41 +0100 Subject: [PATCH 306/761] Remove single use of IReadOnlyCollection implementation of RowDescription (#5410) --- .../BackendMessages/RowDescriptionMessage.cs | 37 +------------------ src/Npgsql/Schema/DbColumnSchemaGenerator.cs | 3 +- 2 files changed, 3 insertions(+), 37 deletions(-) diff --git a/src/Npgsql/BackendMessages/RowDescriptionMessage.cs b/src/Npgsql/BackendMessages/RowDescriptionMessage.cs index c9986da85c..b364d3f934 100644 --- a/src/Npgsql/BackendMessages/RowDescriptionMessage.cs +++ b/src/Npgsql/BackendMessages/RowDescriptionMessage.cs @@ -32,7 +32,7 @@ public ColumnInfo(PgConverterInfo converterInfo, DataFormat dataFormat, bool asO /// /// See https://www.postgresql.org/docs/current/static/protocol-message-formats.html /// -sealed class RowDescriptionMessage : IBackendMessage, IReadOnlyList +sealed class RowDescriptionMessage : IBackendMessage { readonly bool _connectorOwned; FieldDescription?[] _fields; @@ -157,9 +157,6 @@ internal void LoadColumnInfoCache(PgSerializerOptions options, ColumnInfo[] valu public int Count { get; private set; } - public IEnumerator GetEnumerator() => new Enumerator(this); - IEnumerator IEnumerable.GetEnumerator() => GetEnumerator(); - /// /// Given a string name, returns the field's ordinal index in the row. /// @@ -212,38 +209,6 @@ public bool Equals(string? x, string? y) public int GetHashCode(string o) => CompareInfo.GetSortKey(o, CompareOptions.IgnoreWidth | CompareOptions.IgnoreCase | CompareOptions.IgnoreKanaType).GetHashCode(); } - - sealed class Enumerator : IEnumerator - { - readonly RowDescriptionMessage _rowDescription; - int _pos = -1; - - public Enumerator(RowDescriptionMessage rowDescription) - => _rowDescription = rowDescription; - - public FieldDescription Current - { - get - { - if (_pos < 0) - ThrowHelper.ThrowInvalidOperationException(); - return _rowDescription[_pos]; - } - } - - object IEnumerator.Current => Current; - - public bool MoveNext() - { - if (_pos == _rowDescription.Count - 1) - return false; - _pos++; - return true; - } - - public void Reset() => _pos = -1; - public void Dispose() { } - } } /// diff --git a/src/Npgsql/Schema/DbColumnSchemaGenerator.cs b/src/Npgsql/Schema/DbColumnSchemaGenerator.cs index d587c8a6b7..300001e72d 100644 --- a/src/Npgsql/Schema/DbColumnSchemaGenerator.cs +++ b/src/Npgsql/Schema/DbColumnSchemaGenerator.cs @@ -112,8 +112,9 @@ internal async Task> GetColumnSchema(bool asy // the backend (if fetchAdditionalInfo is true), for the latter we only have the RowDescription var filters = new List(); - foreach (var f in _rowDescription) + for (var index = 0; index < _rowDescription.Count; index++) { + var f = _rowDescription[index]; // Only column fields if (f.TableOID != 0) filters.Add($"(attr.attrelid={f.TableOID} AND attr.attnum={f.ColumnAttributeNumber})"); From b0f7b5efbf150ef0efdcc226a1e95c5457f29ac1 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Sun, 19 Nov 2023 19:21:25 +0100 Subject: [PATCH 307/761] Remove display classes (#5417) --- src/Npgsql/BackendMessages/RowDescriptionMessage.cs | 4 ++-- src/Npgsql/Internal/TypeInfoCache.cs | 4 ++-- src/Npgsql/NpgsqlBinaryExporter.cs | 6 +++--- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/Npgsql/BackendMessages/RowDescriptionMessage.cs b/src/Npgsql/BackendMessages/RowDescriptionMessage.cs index b364d3f934..1b7ea8a2a6 100644 --- a/src/Npgsql/BackendMessages/RowDescriptionMessage.cs +++ b/src/Npgsql/BackendMessages/RowDescriptionMessage.cs @@ -364,10 +364,10 @@ internal void GetInfo(Type? type, ref ColumnInfo lastColumnInfo) } } - GetInfoSlow(out lastColumnInfo); + GetInfoSlow(type, out lastColumnInfo); [MethodImpl(MethodImplOptions.NoInlining)] - void GetInfoSlow(out ColumnInfo lastColumnInfo) + void GetInfoSlow(Type? type, out ColumnInfo lastColumnInfo) { var typeInfo = AdoSerializerHelpers.GetTypeInfoForReading(type ?? typeof(object), PostgresType, _serializerOptions); PgConverterInfo converterInfo; diff --git a/src/Npgsql/Internal/TypeInfoCache.cs b/src/Npgsql/Internal/TypeInfoCache.cs index 2fce6eb585..5c72463d03 100644 --- a/src/Npgsql/Internal/TypeInfoCache.cs +++ b/src/Npgsql/Internal/TypeInfoCache.cs @@ -48,7 +48,7 @@ public TypeInfoCache(PgSerializerOptions options, bool validatePgTypeIds = true) if (FindMatch(type, infos, defaultTypeFallback) is { } info) return info; - return AddEntryById(id, infos, defaultTypeFallback); + return AddEntryById(type, id, infos, defaultTypeFallback); } if (type is not null) @@ -91,7 +91,7 @@ public TypeInfoCache(PgSerializerOptions options, bool validatePgTypeIds = true) : _cacheByClrType[type]; } - PgTypeInfo? AddEntryById(TPgTypeId pgTypeId, (Type? Type, PgTypeInfo? Info)[]? infos, bool defaultTypeFallback) + PgTypeInfo? AddEntryById(Type? type, TPgTypeId pgTypeId, (Type? Type, PgTypeInfo? Info)[]? infos, bool defaultTypeFallback) { // We cache negatives (null info) to allow 'object or default' checks to never hit the resolvers after the first lookup. var info = CreateInfo(type, pgTypeId, _options, defaultTypeFallback, _validatePgTypeIds); diff --git a/src/Npgsql/NpgsqlBinaryExporter.cs b/src/Npgsql/NpgsqlBinaryExporter.cs index 4941e35530..5794b73b94 100644 --- a/src/Npgsql/NpgsqlBinaryExporter.cs +++ b/src/Npgsql/NpgsqlBinaryExporter.cs @@ -294,7 +294,7 @@ async ValueTask Read(bool async, NpgsqlDbType? type, CancellationToken can if (!PgReader.Initialized || !PgReader.Resumable || PgReader.CurrentRemaining != PgReader.FieldSize) { await Commit(async, resumableOp: false).ConfigureAwait(false); - info = GetInfo(out asObject); + info = GetInfo(type, out asObject); // We need to get info after potential I/O as we don't know beforehand at what column we're at. var columnLen = await ReadColumnLenIfNeeded(async, resumableOp: false).ConfigureAwait(false); @@ -306,7 +306,7 @@ async ValueTask Read(bool async, NpgsqlDbType? type, CancellationToken can } else - info = GetInfo(out asObject); + info = GetInfo(type, out asObject); T result; if (async) @@ -328,7 +328,7 @@ async ValueTask Read(bool async, NpgsqlDbType? type, CancellationToken can return result; - PgConverterInfo GetInfo(out bool asObject) + PgConverterInfo GetInfo(NpgsqlDbType? type, out bool asObject) { ref var cachedInfo = ref _columnInfoCache[_column]; var converterInfo = cachedInfo.IsDefault ? cachedInfo = CreateConverterInfo(typeof(T), type) : cachedInfo; From f25ff29e0979f89b9da2af8f5210cda3d5a6dd05 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Sun, 19 Nov 2023 19:33:18 +0100 Subject: [PATCH 308/761] Remove ordinal comparer passed into Dictionary (#5400) --- src/Npgsql/NpgsqlParameterCollection.cs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Npgsql/NpgsqlParameterCollection.cs b/src/Npgsql/NpgsqlParameterCollection.cs index 58c0315753..a10f9dceb0 100644 --- a/src/Npgsql/NpgsqlParameterCollection.cs +++ b/src/Npgsql/NpgsqlParameterCollection.cs @@ -404,7 +404,7 @@ public override int IndexOf(string parameterName) void BuildLookup() { if (TwoPassCompatMode) - _caseSensitiveLookup = new Dictionary(InternalList.Count, StringComparer.Ordinal); + _caseSensitiveLookup = new Dictionary(InternalList.Count); _caseInsensitiveLookup = new Dictionary(InternalList.Count, StringComparer.OrdinalIgnoreCase); @@ -674,7 +674,7 @@ internal void CloneTo(NpgsqlParameterCollection other) if (TwoPassCompatMode) { Debug.Assert(_caseSensitiveLookup is not null); - other._caseSensitiveLookup = new Dictionary(_caseSensitiveLookup, StringComparer.Ordinal); + other._caseSensitiveLookup = new Dictionary(_caseSensitiveLookup); } } } From 6a96dac7ff146df8acf963dda001746d31a14ec8 Mon Sep 17 00:00:00 2001 From: Calvin Spring <73836053+spcalvin@users.noreply.github.com> Date: Sun, 19 Nov 2023 15:26:21 -0500 Subject: [PATCH 309/761] Bump devcontainer dotnet SDK and add PostGIS (#5416) --- .devcontainer/db/Dockerfile | 6 +++--- .devcontainer/db/init-db.sh | 11 +++++++---- .devcontainer/docker-compose.yml | 2 +- 3 files changed, 11 insertions(+), 8 deletions(-) diff --git a/.devcontainer/db/Dockerfile b/.devcontainer/db/Dockerfile index 76eb48a2fa..64cc3febb1 100644 --- a/.devcontainer/db/Dockerfile +++ b/.devcontainer/db/Dockerfile @@ -1,3 +1,3 @@ -FROM postgres:alpine -RUN apk update && \ - apk add --no-cache openssl +FROM postgres +RUN apt-get update && \ + apt-get install -y --no-install-recommends openssl postgresql-16-postgis-3 diff --git a/.devcontainer/db/init-db.sh b/.devcontainer/db/init-db.sh index 24804402fe..b4ccb371e9 100644 --- a/.devcontainer/db/init-db.sh +++ b/.devcontainer/db/init-db.sh @@ -19,7 +19,10 @@ echo "Configuring md5 authentication in $PGDATA/pg_hba.conf" echo 'local all all trust' > $PGDATA/pg_hba.conf echo "host all all all md5" >> $PGDATA/pg_hba.conf -# Standard test account for Npgsql -psql -U postgres -c "CREATE USER npgsql_tests SUPERUSER PASSWORD 'npgsql_tests'" -psql -U postgres -c "CREATE DATABASE npgsql_tests OWNER npgsql_tests" -psql -U postgres -c "CREATE EXTENSION ltree" npgsql_tests +# Standard test account for Npgsql and enable extensions +psql -U postgres < Date: Sun, 19 Nov 2023 21:26:57 +0100 Subject: [PATCH 310/761] Reintroduce limited IList support (#5418) --- .../Internal/Converters/ArrayConverter.cs | 38 +++++++++++-------- src/Npgsql/Internal/TypeInfoMapping.cs | 38 +++++++++++-------- test/Npgsql.Tests/Types/ArrayTests.cs | 20 +++++++--- 3 files changed, 60 insertions(+), 36 deletions(-) diff --git a/src/Npgsql/Internal/Converters/ArrayConverter.cs b/src/Npgsql/Internal/Converters/ArrayConverter.cs index 71c9c60fe3..e5fc7f10ca 100644 --- a/src/Npgsql/Internal/Converters/ArrayConverter.cs +++ b/src/Npgsql/Internal/Converters/ArrayConverter.cs @@ -1,6 +1,5 @@ using System; using System.Buffers; -using System.Collections; using System.Collections.Generic; using System.Collections.Concurrent; using System.Diagnostics; @@ -368,7 +367,7 @@ public Continuation(object handle, delegate* continua } } -sealed class ArrayBasedArrayConverter : ArrayConverter, IElementOperations where T : class, IList +sealed class ArrayBasedArrayConverter : ArrayConverter, IElementOperations where T : class { readonly PgConverter _elemConverter; @@ -470,7 +469,7 @@ ValueTask IElementOperations.Write(bool async, PgWriter writer, object collectio } } -sealed class ListBasedArrayConverter : ArrayConverter, IElementOperations where T : class, IList +sealed class ListBasedArrayConverter : ArrayConverter, IElementOperations where T : class { readonly PgConverter _elemConverter; @@ -481,15 +480,15 @@ public ListBasedArrayConverter(PgConverterResolution elemResolution, int pgLower [MethodImpl(MethodImplOptions.AggressiveInlining)] static TElement? GetValue(object collection, int index) { - Debug.Assert(collection is List); - return Unsafe.As>(collection)[index]; + Debug.Assert(collection is IList); + return Unsafe.As>(collection)[index]; } [MethodImpl(MethodImplOptions.AggressiveInlining)] static void SetValue(object collection, int index, TElement? value) { - Debug.Assert(collection is List); - var list = Unsafe.As>(collection); + Debug.Assert(collection is IList); + var list = Unsafe.As>(collection); list.Insert(index, value); } @@ -498,9 +497,9 @@ object IElementOperations.CreateCollection(int[] lengths) int IElementOperations.GetCollectionCount(object collection, out int[]? lengths) { - Debug.Assert(collection is List); + Debug.Assert(collection is IList); lengths = null; - return Unsafe.As>(collection).Count; + return Unsafe.As>(collection).Count; } Size? IElementOperations.GetSizeOrDbNull(SizeContext context, object collection, int[] indices, ref object? writeState) @@ -543,7 +542,7 @@ ValueTask IElementOperations.Write(bool async, PgWriter writer, object collectio } } -sealed class ArrayConverterResolver : PgComposingConverterResolver where T : class, IList +sealed class ArrayConverterResolver : PgComposingConverterResolver where T : class { readonly Type _effectiveType; @@ -558,12 +557,12 @@ public ArrayConverterResolver(PgResolverTypeInfo elementTypeInfo, Type effective protected override PgConverter CreateConverter(PgConverterResolution effectiveResolution) { - if (typeof(T).IsConstructedGenericType && typeof(T).GetGenericTypeDefinition() == typeof(List<>)) - return new ListBasedArrayConverter(effectiveResolution); - if (typeof(T) == typeof(Array) || typeof(T).IsArray) return new ArrayBasedArrayConverter(effectiveResolution, _effectiveType); + if (typeof(T).IsConstructedGenericType && typeof(T).GetGenericTypeDefinition() == typeof(IList<>)) + return new ListBasedArrayConverter(effectiveResolution); + throw new NotSupportedException($"Unknown type T: {typeof(T).FullName}"); } @@ -585,6 +584,13 @@ protected override PgConverter CreateConverter(PgConverterResolution effectiv resolution ??= result; } break; + case List list: + foreach (var value in list) + { + var result = EffectiveTypeInfo.GetResolution(value, resolution?.PgTypeId ?? expectedEffectivePgTypeId); + resolution ??= result; + } + break; case IList list: foreach (var value in list) { @@ -592,13 +598,15 @@ protected override PgConverter CreateConverter(PgConverterResolution effectiv resolution ??= result; } break; - default: - foreach (var value in values) + case Array array: + foreach (var value in array) { var result = EffectiveTypeInfo.GetResolutionAsObject(value, resolution?.PgTypeId ?? expectedEffectivePgTypeId); resolution ??= result; } break; + default: + throw new NotSupportedException(); } } diff --git a/src/Npgsql/Internal/TypeInfoMapping.cs b/src/Npgsql/Internal/TypeInfoMapping.cs index 75af8ec41d..e2d8927527 100644 --- a/src/Npgsql/Internal/TypeInfoMapping.cs +++ b/src/Npgsql/Internal/TypeInfoMapping.cs @@ -274,7 +274,8 @@ Func GetDefaultConfigure(MatchRequirement matc Func GetArrayTypeMatchPredicate(Func elementTypeMatchPredicate) => type => type is null ? elementTypeMatchPredicate(null) : type.IsArray && elementTypeMatchPredicate.Invoke(type.GetElementType()!); Func GetListTypeMatchPredicate(Func elementTypeMatchPredicate) - => type => type is null ? elementTypeMatchPredicate(null) : type.IsConstructedGenericType && type.GetGenericTypeDefinition() == typeof(List<>) + => type => type is null ? elementTypeMatchPredicate(null) : type.IsConstructedGenericType && type.GetGenericTypeDefinition() is { } def + && (def == typeof(List<>) || def == typeof(IList<>)) && elementTypeMatchPredicate(type.GetGenericArguments()[0]); public void AddType(string dataTypeName, TypeInfoFactory createInfo, bool isDefault = false) where T : class @@ -329,12 +330,12 @@ public void AddArrayType(TypeInfoMapping elementMapping, bool suppress { // Always use a predicate to match all dimensions. var arrayTypeMatchPredicate = GetArrayTypeMatchPredicate(elementMapping.TypeMatchPredicate ?? (static type => type is null || type == typeof(TElement))); - var listTypeMatchPredicate = elementMapping.TypeMatchPredicate is not null ? GetListTypeMatchPredicate(elementMapping.TypeMatchPredicate) : null; + var listTypeMatchPredicate = GetListTypeMatchPredicate(elementMapping.TypeMatchPredicate ?? (static type => type is null || type == typeof(TElement))); var arrayDataTypeName = GetArrayDataTypeName(elementMapping.DataTypeName); AddArrayType(elementMapping, typeof(TElement[]), CreateArrayBasedConverter, arrayTypeMatchPredicate, suppressObjectMapping: suppressObjectMapping || TryGetMapping(typeof(object), arrayDataTypeName, out _)); - AddArrayType(elementMapping, typeof(List), CreateListBasedConverter, listTypeMatchPredicate, suppressObjectMapping: true); + AddArrayType(elementMapping, typeof(IList), CreateListBasedConverter, listTypeMatchPredicate, suppressObjectMapping: true); void AddArrayType(TypeInfoMapping elementMapping, Type type, Func converter, Func? typeMatchPredicate = null, bool suppressObjectMapping = false) { @@ -369,12 +370,12 @@ public void AddResolverArrayType(TypeInfoMapping elementMapping, bool { // Always use a predicate to match all dimensions. var arrayTypeMatchPredicate = GetArrayTypeMatchPredicate(elementMapping.TypeMatchPredicate ?? (static type => type is null || type == typeof(TElement))); - var listTypeMatchPredicate = elementMapping.TypeMatchPredicate is not null ? GetListTypeMatchPredicate(elementMapping.TypeMatchPredicate) : null; + var listTypeMatchPredicate = GetListTypeMatchPredicate(elementMapping.TypeMatchPredicate ?? (static type => type is null || type == typeof(TElement))); var arrayDataTypeName = GetArrayDataTypeName(elementMapping.DataTypeName); AddResolverArrayType(elementMapping, typeof(TElement[]), CreateArrayBasedConverterResolver, arrayTypeMatchPredicate, suppressObjectMapping: suppressObjectMapping || TryGetMapping(typeof(object), arrayDataTypeName, out _)); - AddResolverArrayType(elementMapping, typeof(List), CreateListBasedConverterResolver, listTypeMatchPredicate, suppressObjectMapping: true); + AddResolverArrayType(elementMapping, typeof(IList), CreateListBasedConverterResolver, listTypeMatchPredicate, suppressObjectMapping: true); void AddResolverArrayType(TypeInfoMapping elementMapping, Type type, Func converter, Func? typeMatchPredicate = null, bool suppressObjectMapping = false) { @@ -448,8 +449,9 @@ public void AddStructArrayType(TypeInfoMapping elementMapping, TypeInf var arrayTypeMatchPredicate = GetArrayTypeMatchPredicate(elementMapping.TypeMatchPredicate ?? (static type => type is null || type == typeof(TElement))); var nullableArrayTypeMatchPredicate = GetArrayTypeMatchPredicate(nullableElementMapping.TypeMatchPredicate ?? (static type => type is null || (Nullable.GetUnderlyingType(type) is { } underlying && underlying == typeof(TElement)))); - var listTypeMatchPredicate = elementMapping.TypeMatchPredicate is not null ? GetListTypeMatchPredicate(elementMapping.TypeMatchPredicate) : null; - var nullableListTypeMatchPredicate = nullableElementMapping.TypeMatchPredicate is not null ? GetListTypeMatchPredicate(nullableElementMapping.TypeMatchPredicate) : null; + var listTypeMatchPredicate = GetListTypeMatchPredicate(elementMapping.TypeMatchPredicate ?? (static type => type is null || type == typeof(TElement))); + var nullableListTypeMatchPredicate = GetListTypeMatchPredicate(nullableElementMapping.TypeMatchPredicate ?? (static type => + type is null || (Nullable.GetUnderlyingType(type) is { } underlying && underlying == typeof(TElement)))); var arrayDataTypeName = GetArrayDataTypeName(elementMapping.DataTypeName); @@ -458,7 +460,7 @@ public void AddStructArrayType(TypeInfoMapping elementMapping, TypeInf arrayTypeMatchPredicate, nullableArrayTypeMatchPredicate, suppressObjectMapping: suppressObjectMapping || TryGetMapping(typeof(object), arrayDataTypeName, out _)); // Don't add the object converter for the list based converter. - AddStructArrayType(elementMapping, nullableElementMapping, typeof(List), typeof(List), + AddStructArrayType(elementMapping, nullableElementMapping, typeof(IList), typeof(IList), CreateListBasedConverter, CreateListBasedConverter, listTypeMatchPredicate, nullableListTypeMatchPredicate, suppressObjectMapping: true); } @@ -564,7 +566,9 @@ public void AddResolverStructArrayType(TypeInfoMapping elementMapping, var arrayTypeMatchPredicate = GetArrayTypeMatchPredicate(elementMapping.TypeMatchPredicate ?? (static type => type is null || type == typeof(TElement))); var nullableArrayTypeMatchPredicate = GetArrayTypeMatchPredicate(nullableElementMapping.TypeMatchPredicate ?? (static type => type is null || (Nullable.GetUnderlyingType(type) is { } underlying && underlying == typeof(TElement)))); - var listTypeMatchPredicate = elementMapping.TypeMatchPredicate is not null ? GetListTypeMatchPredicate(elementMapping.TypeMatchPredicate) : null; + var listTypeMatchPredicate = GetListTypeMatchPredicate(elementMapping.TypeMatchPredicate ?? (static type => type is null || type == typeof(TElement))); + var nullableListTypeMatchPredicate = GetListTypeMatchPredicate(nullableElementMapping.TypeMatchPredicate ?? (static type => + type is null || (Nullable.GetUnderlyingType(type) is { } underlying && underlying == typeof(TElement)))); var arrayDataTypeName = GetArrayDataTypeName(elementMapping.DataTypeName); @@ -573,9 +577,9 @@ public void AddResolverStructArrayType(TypeInfoMapping elementMapping, CreateArrayBasedConverterResolver, suppressObjectMapping: suppressObjectMapping || TryGetMapping(typeof(object), arrayDataTypeName, out _), arrayTypeMatchPredicate, nullableArrayTypeMatchPredicate); // Don't add the object converter for the list based converter. - AddResolverStructArrayType(elementMapping, nullableElementMapping, typeof(List), typeof(List), + AddResolverStructArrayType(elementMapping, nullableElementMapping, typeof(IList), typeof(IList), CreateListBasedConverterResolver, - CreateListBasedConverterResolver, suppressObjectMapping: true, listTypeMatchPredicate, nullableArrayTypeMatchPredicate); + CreateListBasedConverterResolver, suppressObjectMapping: true, listTypeMatchPredicate, nullableListTypeMatchPredicate); } // Lives outside to prevent capture of TElement. @@ -653,7 +657,9 @@ public static bool IsArrayLikeType(Type type, [NotNullWhen(true)] out Type? elem elementType = type switch { { IsArray: true } => type.GetElementType(), - { IsConstructedGenericType: true } when type.GetGenericTypeDefinition() == typeof(List<>) => type.GetGenericArguments()[0], + { IsConstructedGenericType: true } when type.GetGenericTypeDefinition() is { } def && + (def == typeof(List<>) || def == typeof(IList<>)) + => type.GetGenericArguments()[0], _ => null }; @@ -674,10 +680,10 @@ static ArrayBasedArrayConverter CreateArrayBasedConverter, TElement> CreateListBasedConverter(TypeInfoMapping mapping, PgTypeInfo elemInfo) + static ListBasedArrayConverter, TElement> CreateListBasedConverter(TypeInfoMapping mapping, PgTypeInfo elemInfo) { if (!elemInfo.IsBoxing) - return new ListBasedArrayConverter, TElement>(elemInfo.GetResolution()); + return new ListBasedArrayConverter, TElement>(elemInfo.GetResolution()); ThrowBoxingNotSupported(resolver: false); return default; @@ -692,10 +698,10 @@ static ArrayConverterResolver CreateArrayBasedConverterResolver return default; } - static ArrayConverterResolver, TElement> CreateListBasedConverterResolver(TypeInfoMapping mapping, PgResolverTypeInfo elemInfo) + static ArrayConverterResolver, TElement> CreateListBasedConverterResolver(TypeInfoMapping mapping, PgResolverTypeInfo elemInfo) { if (!elemInfo.IsBoxing) - return new ArrayConverterResolver, TElement>(elemInfo, mapping.Type); + return new ArrayConverterResolver, TElement>(elemInfo, mapping.Type); ThrowBoxingNotSupported(resolver: true); return default; diff --git a/test/Npgsql.Tests/Types/ArrayTests.cs b/test/Npgsql.Tests/Types/ArrayTests.cs index aab21df1a7..e27d7bc2b1 100644 --- a/test/Npgsql.Tests/Types/ArrayTests.cs +++ b/test/Npgsql.Tests/Types/ArrayTests.cs @@ -1,5 +1,6 @@ using System; using System.Collections.Generic; +using System.Collections.Immutable; using System.Data; using System.Linq; using System.Text; @@ -154,6 +155,20 @@ public async Task Generic_List() => await AssertType( new List { 1, 2, 3 }, "{1,2,3}", "integer[]", NpgsqlDbType.Integer | NpgsqlDbType.Array, isDefaultForReading: false); + [Test] + public async Task Generic_IList() + { + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT @p1", conn); + + var expected = ImmutableArray.Create(1,2,3); + cmd.Parameters.Add(new NpgsqlParameter>("p1", NpgsqlDbType.Array | NpgsqlDbType.Integer) { TypedValue = expected }); + + var reader = await cmd.ExecuteReaderAsync(); + reader.Read(); + Assert.AreEqual(expected, reader.GetFieldValue(0)); + } + [Test, Description("Verifies that an InvalidOperationException is thrown when the returned array has a different number of dimensions from what was requested.")] public async Task Wrong_array_dimensions_throws() { @@ -382,10 +397,5 @@ public async Task NpgsqlSlimSourceBuilder_EnableArrays() await AssertType(dataSource, new[] { 1, 2, 3 }, "{1,2,3}", "integer[]", NpgsqlDbType.Integer | NpgsqlDbType.Array); } - class IntList : List { } - // ReSharper disable UnusedTypeParameter - class MisleadingIntList : List { } - // ReSharper restore UnusedTypeParameter - public ArrayTests(MultiplexingMode multiplexingMode) : base(multiplexingMode) {} } From cb4a2805d825f10175f376563b286167cd1caa2d Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Mon, 20 Nov 2023 19:35:17 +0100 Subject: [PATCH 311/761] Fix breaking change di (#5420) --- .../Npgsql.DependencyInjection.csproj | 4 + .../NpgsqlServiceCollectionExtensions.cs | 203 +++++++++++++++++- .../PublicAPI.Shipped.txt | 1 + .../PublicAPI.Unshipped.txt | 18 ++ 4 files changed, 225 insertions(+), 1 deletion(-) create mode 100644 src/Npgsql.DependencyInjection/PublicAPI.Shipped.txt create mode 100644 src/Npgsql.DependencyInjection/PublicAPI.Unshipped.txt diff --git a/src/Npgsql.DependencyInjection/Npgsql.DependencyInjection.csproj b/src/Npgsql.DependencyInjection/Npgsql.DependencyInjection.csproj index 8910b7fad9..b3b92f69c6 100644 --- a/src/Npgsql.DependencyInjection/Npgsql.DependencyInjection.csproj +++ b/src/Npgsql.DependencyInjection/Npgsql.DependencyInjection.csproj @@ -13,6 +13,10 @@ + + + + diff --git a/src/Npgsql.DependencyInjection/NpgsqlServiceCollectionExtensions.cs b/src/Npgsql.DependencyInjection/NpgsqlServiceCollectionExtensions.cs index c1841ece60..755d6b1357 100644 --- a/src/Npgsql.DependencyInjection/NpgsqlServiceCollectionExtensions.cs +++ b/src/Npgsql.DependencyInjection/NpgsqlServiceCollectionExtensions.cs @@ -1,4 +1,5 @@ using System; +using System.ComponentModel; using System.Data.Common; using Microsoft.Extensions.DependencyInjection.Extensions; using Microsoft.Extensions.Logging; @@ -43,8 +44,10 @@ public static IServiceCollection AddNpgsqlDataSource( /// Registers an and an in the . /// /// The to add services to. - /// The of the data source. /// An Npgsql connection string. + /// + /// An action to configure the for further customizations of the . + /// /// /// The lifetime with which to register the in the container. /// Defaults to . @@ -54,6 +57,30 @@ public static IServiceCollection AddNpgsqlDataSource( /// Defaults to . /// /// The same service collection so that multiple calls can be chained. + [EditorBrowsable(EditorBrowsableState.Never), Obsolete("Defined for binary compatibility with 7.0")] + public static IServiceCollection AddNpgsqlDataSource( + this IServiceCollection serviceCollection, + string connectionString, + Action dataSourceBuilderAction, + ServiceLifetime connectionLifetime, + ServiceLifetime dataSourceLifetime) + => AddNpgsqlDataSourceCore(serviceCollection, serviceKey: null, connectionString, dataSourceBuilderAction, connectionLifetime, dataSourceLifetime); + + /// + /// Registers an and an in the . + /// + /// The to add services to. + /// An Npgsql connection string. + /// + /// The lifetime with which to register the in the container. + /// Defaults to . + /// + /// + /// The lifetime with which to register the service in the container. + /// Defaults to . + /// + /// The of the data source. + /// The same service collection so that multiple calls can be chained. public static IServiceCollection AddNpgsqlDataSource( this IServiceCollection serviceCollection, string connectionString, @@ -63,6 +90,29 @@ public static IServiceCollection AddNpgsqlDataSource( => AddNpgsqlDataSourceCore( serviceCollection, serviceKey, connectionString, dataSourceBuilderAction: null, connectionLifetime, dataSourceLifetime); + /// + /// Registers an and an in the . + /// + /// The to add services to. + /// An Npgsql connection string. + /// + /// The lifetime with which to register the in the container. + /// Defaults to . + /// + /// + /// The lifetime with which to register the service in the container. + /// Defaults to . + /// + /// The same service collection so that multiple calls can be chained. + [EditorBrowsable(EditorBrowsableState.Never), Obsolete("Defined for binary compatibility with 7.0")] + public static IServiceCollection AddNpgsqlDataSource( + this IServiceCollection serviceCollection, + string connectionString, + ServiceLifetime connectionLifetime, + ServiceLifetime dataSourceLifetime) + => AddNpgsqlDataSourceCore( + serviceCollection, serviceKey: null, connectionString, dataSourceBuilderAction: null, connectionLifetime, dataSourceLifetime); + /// /// Registers an and an in the . /// @@ -90,6 +140,32 @@ public static IServiceCollection AddNpgsqlSlimDataSource( object? serviceKey = null) => AddNpgsqlSlimDataSourceCore(serviceCollection, serviceKey, connectionString, dataSourceBuilderAction, connectionLifetime, dataSourceLifetime); + /// + /// Registers an and an in the . + /// + /// The to add services to. + /// An Npgsql connection string. + /// + /// An action to configure the for further customizations of the . + /// + /// + /// The lifetime with which to register the in the container. + /// Defaults to . + /// + /// + /// The lifetime with which to register the service in the container. + /// Defaults to . + /// + /// The same service collection so that multiple calls can be chained. + [EditorBrowsable(EditorBrowsableState.Never), Obsolete("Defined for binary compatibility with 7.0")] + public static IServiceCollection AddNpgsqlSlimDataSource( + this IServiceCollection serviceCollection, + string connectionString, + Action dataSourceBuilderAction, + ServiceLifetime connectionLifetime, + ServiceLifetime dataSourceLifetime) + => AddNpgsqlSlimDataSourceCore(serviceCollection, serviceKey: null, connectionString, dataSourceBuilderAction, connectionLifetime, dataSourceLifetime); + /// /// Registers an and an in the . /// @@ -114,6 +190,29 @@ public static IServiceCollection AddNpgsqlSlimDataSource( => AddNpgsqlSlimDataSourceCore( serviceCollection, serviceKey, connectionString, dataSourceBuilderAction: null, connectionLifetime, dataSourceLifetime); + /// + /// Registers an and an in the . + /// + /// The to add services to. + /// An Npgsql connection string. + /// + /// The lifetime with which to register the in the container. + /// Defaults to . + /// + /// + /// The lifetime with which to register the service in the container. + /// Defaults to . + /// + /// The same service collection so that multiple calls can be chained. + [EditorBrowsable(EditorBrowsableState.Never), Obsolete("Defined for binary compatibility with 7.0")] + public static IServiceCollection AddNpgsqlSlimDataSource( + this IServiceCollection serviceCollection, + string connectionString, + ServiceLifetime connectionLifetime, + ServiceLifetime dataSourceLifetime) + => AddNpgsqlSlimDataSourceCore( + serviceCollection, serviceKey: null, connectionString, dataSourceBuilderAction: null, connectionLifetime, dataSourceLifetime); + /// /// Registers an and an in the /// @@ -142,6 +241,33 @@ public static IServiceCollection AddMultiHostNpgsqlDataSource( => AddMultiHostNpgsqlDataSourceCore( serviceCollection, serviceKey, connectionString, dataSourceBuilderAction, connectionLifetime, dataSourceLifetime); + /// + /// Registers an and an in the + /// + /// The to add services to. + /// An Npgsql connection string. + /// + /// An action to configure the for further customizations of the . + /// + /// + /// The lifetime with which to register the in the container. + /// Defaults to . + /// + /// + /// The lifetime with which to register the service in the container. + /// Defaults to . + /// + /// The same service collection so that multiple calls can be chained. + [EditorBrowsable(EditorBrowsableState.Never), Obsolete("Defined for binary compatibility with 7.0")] + public static IServiceCollection AddMultiHostNpgsqlDataSource( + this IServiceCollection serviceCollection, + string connectionString, + Action dataSourceBuilderAction, + ServiceLifetime connectionLifetime, + ServiceLifetime dataSourceLifetime) + => AddMultiHostNpgsqlDataSourceCore( + serviceCollection, serviceKey: null, connectionString, dataSourceBuilderAction, connectionLifetime, dataSourceLifetime); + /// /// Registers an and an in the /// . @@ -167,6 +293,30 @@ public static IServiceCollection AddMultiHostNpgsqlDataSource( => AddMultiHostNpgsqlDataSourceCore( serviceCollection, serviceKey, connectionString, dataSourceBuilderAction: null, connectionLifetime, dataSourceLifetime); + /// + /// Registers an and an in the + /// . + /// + /// The to add services to. + /// An Npgsql connection string. + /// + /// The lifetime with which to register the in the container. + /// Defaults to . + /// + /// + /// The lifetime with which to register the service in the container. + /// Defaults to . + /// + /// The same service collection so that multiple calls can be chained. + [EditorBrowsable(EditorBrowsableState.Never), Obsolete("Defined for binary compatibility with 7.0")] + public static IServiceCollection AddMultiHostNpgsqlDataSource( + this IServiceCollection serviceCollection, + string connectionString, + ServiceLifetime connectionLifetime, + ServiceLifetime dataSourceLifetime) + => AddMultiHostNpgsqlDataSourceCore( + serviceCollection, serviceKey: null, connectionString, dataSourceBuilderAction: null, connectionLifetime, dataSourceLifetime); + /// /// Registers an and an in the /// @@ -195,6 +345,33 @@ public static IServiceCollection AddMultiHostNpgsqlSlimDataSource( => AddMultiHostNpgsqlSlimDataSourceCore( serviceCollection, serviceKey, connectionString, dataSourceBuilderAction, connectionLifetime, dataSourceLifetime); + /// + /// Registers an and an in the + /// + /// The to add services to. + /// An Npgsql connection string. + /// + /// An action to configure the for further customizations of the . + /// + /// + /// The lifetime with which to register the in the container. + /// Defaults to . + /// + /// + /// The lifetime with which to register the service in the container. + /// Defaults to . + /// + /// The same service collection so that multiple calls can be chained. + [EditorBrowsable(EditorBrowsableState.Never), Obsolete("Defined for binary compatibility with 7.0")] + public static IServiceCollection AddMultiHostNpgsqlSlimDataSource( + this IServiceCollection serviceCollection, + string connectionString, + Action dataSourceBuilderAction, + ServiceLifetime connectionLifetime, + ServiceLifetime dataSourceLifetime) + => AddMultiHostNpgsqlSlimDataSourceCore( + serviceCollection, serviceKey: null, connectionString, dataSourceBuilderAction, connectionLifetime, dataSourceLifetime); + /// /// Registers an and an in the /// . @@ -220,6 +397,30 @@ public static IServiceCollection AddMultiHostNpgsqlSlimDataSource( => AddMultiHostNpgsqlSlimDataSourceCore( serviceCollection, serviceKey, connectionString, dataSourceBuilderAction: null, connectionLifetime, dataSourceLifetime); + /// + /// Registers an and an in the + /// . + /// + /// The to add services to. + /// An Npgsql connection string. + /// + /// The lifetime with which to register the in the container. + /// Defaults to . + /// + /// + /// The lifetime with which to register the service in the container. + /// Defaults to . + /// + /// The same service collection so that multiple calls can be chained. + [EditorBrowsable(EditorBrowsableState.Never), Obsolete("Defined for binary compatibility with 7.0")] + public static IServiceCollection AddMultiHostNpgsqlSlimDataSource( + this IServiceCollection serviceCollection, + string connectionString, + ServiceLifetime connectionLifetime, + ServiceLifetime dataSourceLifetime) + => AddMultiHostNpgsqlSlimDataSourceCore( + serviceCollection, serviceKey: null, connectionString, dataSourceBuilderAction: null, connectionLifetime, dataSourceLifetime); + static IServiceCollection AddNpgsqlDataSourceCore( this IServiceCollection serviceCollection, object? serviceKey, diff --git a/src/Npgsql.DependencyInjection/PublicAPI.Shipped.txt b/src/Npgsql.DependencyInjection/PublicAPI.Shipped.txt new file mode 100644 index 0000000000..ab058de62d --- /dev/null +++ b/src/Npgsql.DependencyInjection/PublicAPI.Shipped.txt @@ -0,0 +1 @@ +#nullable enable diff --git a/src/Npgsql.DependencyInjection/PublicAPI.Unshipped.txt b/src/Npgsql.DependencyInjection/PublicAPI.Unshipped.txt new file mode 100644 index 0000000000..4066bf5273 --- /dev/null +++ b/src/Npgsql.DependencyInjection/PublicAPI.Unshipped.txt @@ -0,0 +1,18 @@ +#nullable enable +Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions +static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddMultiHostNpgsqlDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Transient, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Singleton, object? serviceKey = null) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! +static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddMultiHostNpgsqlDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! +static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddMultiHostNpgsqlDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, System.Action! dataSourceBuilderAction, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Transient, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Singleton, object? serviceKey = null) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! +static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddMultiHostNpgsqlDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, System.Action! dataSourceBuilderAction, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! +static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddMultiHostNpgsqlSlimDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Transient, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Singleton, object? serviceKey = null) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! +static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddMultiHostNpgsqlSlimDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! +static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddMultiHostNpgsqlSlimDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, System.Action! dataSourceBuilderAction, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Transient, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Singleton, object? serviceKey = null) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! +static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddMultiHostNpgsqlSlimDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, System.Action! dataSourceBuilderAction, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! +static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddNpgsqlDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Transient, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Singleton, object? serviceKey = null) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! +static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddNpgsqlDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! +static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddNpgsqlDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, System.Action! dataSourceBuilderAction, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Transient, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Singleton, object? serviceKey = null) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! +static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddNpgsqlDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, System.Action! dataSourceBuilderAction, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! +static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddNpgsqlSlimDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Transient, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Singleton, object? serviceKey = null) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! +static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddNpgsqlSlimDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! +static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddNpgsqlSlimDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, System.Action! dataSourceBuilderAction, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Transient, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Singleton, object? serviceKey = null) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! +static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddNpgsqlSlimDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, System.Action! dataSourceBuilderAction, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! From 81ace5c0da072bcd211bf5814692fd76caf746d3 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Mon, 20 Nov 2023 19:52:30 +0100 Subject: [PATCH 312/761] Implement ConfigureJsonOptions (#5419) Closes #5412 --- .../RecordTypeInfoResolverFactory.cs | 2 +- .../UnsupportedTypeInfoResolver.cs | 8 +- src/Npgsql/NpgsqlDataSourceBuilder.cs | 90 +++++++++++++++---- src/Npgsql/NpgsqlSlimDataSourceBuilder.cs | 79 ++++++++++++++-- src/Npgsql/PublicAPI.Unshipped.txt | 12 ++- src/Npgsql/TypeMapping/GlobalTypeMapper.cs | 89 ++++++++++++++++++ .../INpgsqlTypeMapperExtensions.cs | 68 -------------- test/Npgsql.Tests/Types/EnumTests.cs | 2 +- test/Npgsql.Tests/Types/JsonDynamicTests.cs | 14 +-- test/Npgsql.Tests/Types/MultirangeTests.cs | 2 +- test/Npgsql.Tests/Types/RangeTests.cs | 2 +- test/Npgsql.Tests/Types/RecordTests.cs | 4 +- 12 files changed, 262 insertions(+), 110 deletions(-) delete mode 100644 src/Npgsql/TypeMapping/INpgsqlTypeMapperExtensions.cs diff --git a/src/Npgsql/Internal/ResolverFactories/RecordTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/RecordTypeInfoResolverFactory.cs index b6f143be03..7a26e168b9 100644 --- a/src/Npgsql/Internal/ResolverFactories/RecordTypeInfoResolverFactory.cs +++ b/src/Npgsql/Internal/ResolverFactories/RecordTypeInfoResolverFactory.cs @@ -17,7 +17,7 @@ public static void CheckUnsupported(Type? type, DataTypeName? dataType throw new NotSupportedException( string.Format( NpgsqlStrings.RecordsNotEnabled, - nameof(INpgsqlTypeMapperExtensions.EnableRecordsAsTuples), + nameof(NpgsqlSlimDataSourceBuilder.EnableRecordsAsTuples), typeof(TBuilder).Name, nameof(NpgsqlSlimDataSourceBuilder.EnableRecords))); } diff --git a/src/Npgsql/Internal/ResolverFactories/UnsupportedTypeInfoResolver.cs b/src/Npgsql/Internal/ResolverFactories/UnsupportedTypeInfoResolver.cs index 7f5755963b..b8fdaa0030 100644 --- a/src/Npgsql/Internal/ResolverFactories/UnsupportedTypeInfoResolver.cs +++ b/src/Npgsql/Internal/ResolverFactories/UnsupportedTypeInfoResolver.cs @@ -32,28 +32,28 @@ sealed class UnsupportedTypeInfoResolver : IPgTypeInfoResolver string.Format( NpgsqlStrings.DynamicJsonNotEnabled, type == typeof(object) ? "" : type.Name, - nameof(INpgsqlTypeMapperExtensions.EnableDynamicJson), + nameof(NpgsqlSlimDataSourceBuilder.EnableDynamicJson), typeof(TBuilder).Name)); case not null when options.DatabaseInfo.GetPostgresType(dataTypeName) is PostgresEnumType: throw new NotSupportedException( string.Format( NpgsqlStrings.UnmappedEnumsNotEnabled, - nameof(INpgsqlTypeMapperExtensions.EnableUnmappedTypes), + nameof(NpgsqlSlimDataSourceBuilder.EnableUnmappedTypes), typeof(TBuilder).Name)); case not null when options.DatabaseInfo.GetPostgresType(dataTypeName) is PostgresRangeType: throw new NotSupportedException( string.Format( NpgsqlStrings.UnmappedRangesNotEnabled, - nameof(INpgsqlTypeMapperExtensions.EnableUnmappedTypes), + nameof(NpgsqlSlimDataSourceBuilder.EnableUnmappedTypes), typeof(TBuilder).Name)); case not null when options.DatabaseInfo.GetPostgresType(dataTypeName) is PostgresMultirangeType: throw new NotSupportedException( string.Format( NpgsqlStrings.UnmappedRangesNotEnabled, - nameof(INpgsqlTypeMapperExtensions.EnableUnmappedTypes), + nameof(NpgsqlSlimDataSourceBuilder.EnableUnmappedTypes), typeof(TBuilder).Name)); } } diff --git a/src/Npgsql/NpgsqlDataSourceBuilder.cs b/src/Npgsql/NpgsqlDataSourceBuilder.cs index 6aa9d7ef44..f5746ec38b 100644 --- a/src/Npgsql/NpgsqlDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlDataSourceBuilder.cs @@ -2,12 +2,15 @@ using System.Diagnostics.CodeAnalysis; using System.Net.Security; using System.Security.Cryptography.X509Certificates; +using System.Text.Json; +using System.Text.Json.Nodes; using System.Threading; using System.Threading.Tasks; using Microsoft.Extensions.Logging; using Npgsql.Internal; using Npgsql.Internal.ResolverFactories; using Npgsql.TypeMapping; +using NpgsqlTypes; namespace Npgsql; @@ -75,13 +78,23 @@ static NpgsqlDataSourceBuilder() public NpgsqlDataSourceBuilder(string? connectionString = null) { _internalBuilder = new(new NpgsqlConnectionStringBuilder(connectionString)); + _internalBuilder.ConfigureDefaultFactories = static instance => + { + instance.AppendDefaultFactories(); + instance.AppendResolverFactory(new ExtraConversionResolverFactory()); + instance.AppendResolverFactory(new JsonTypeInfoResolverFactory(instance.JsonSerializerOptions)); + instance.AppendResolverFactory(new RecordTypeInfoResolverFactory()); + instance.AppendResolverFactory(new FullTextSearchTypeInfoResolverFactory()); + instance.AppendResolverFactory(new NetworkTypeInfoResolverFactory()); + instance.AppendResolverFactory(new GeometricTypeInfoResolverFactory()); + instance.AppendResolverFactory(new LTreeTypeInfoResolverFactory()); + }; + _internalBuilder.ConfigureResolverChain = static chain => chain.Add(UnsupportedTypeInfoResolver); _internalBuilder.EnableTransportSecurity(); _internalBuilder.EnableIntegratedSecurity(); - _internalBuilder.ConfigureResolverChain = chain => chain.Add(UnsupportedTypeInfoResolver); _internalBuilder.EnableRanges(); _internalBuilder.EnableMultiranges(); _internalBuilder.EnableArrays(); - ResetResolverFactories(); } /// @@ -108,6 +121,64 @@ public NpgsqlDataSourceBuilder EnableParameterLogging(bool parameterLoggingEnabl return this; } + /// + /// Configures the JSON serializer options used when reading and writing all System.Text.Json data. + /// + /// Options to customize JSON serialization and deserialization. + /// + public NpgsqlDataSourceBuilder ConfigureJsonOptions(JsonSerializerOptions serializerOptions) + { + _internalBuilder.ConfigureJsonOptions(serializerOptions); + return this; + } + + /// + /// Sets up dynamic System.Text.Json mappings. This allows mapping arbitrary .NET types to PostgreSQL json and jsonb + /// types, as well as and its derived types. + /// + /// + /// A list of CLR types to map to PostgreSQL jsonb (no need to specify ). + /// + /// + /// A list of CLR types to map to PostgreSQL json (no need to specify ). + /// + /// + /// Due to the dynamic nature of these mappings, they are not compatible with NativeAOT or trimming. + /// + [RequiresUnreferencedCode("Json serializer may perform reflection on trimmed types.")] + [RequiresDynamicCode("Serializing arbitrary types to json can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] + public NpgsqlDataSourceBuilder EnableDynamicJson( + Type[]? jsonbClrTypes = null, + Type[]? jsonClrTypes = null) + { + _internalBuilder.EnableDynamicJson(jsonbClrTypes, jsonClrTypes); + return this; + } + + /// + /// Sets up mappings for the PostgreSQL record type as a .NET or . + /// + /// The same builder instance so that multiple calls can be chained. + [RequiresUnreferencedCode("The mapping of PostgreSQL records as .NET tuples requires reflection usage which is incompatible with trimming.")] + [RequiresDynamicCode("The mapping of PostgreSQL records as .NET tuples requires dynamic code usage which is incompatible with NativeAOT.")] + public NpgsqlDataSourceBuilder EnableRecordsAsTuples() + { + AddTypeInfoResolverFactory(new TupledRecordTypeInfoResolverFactory()); + return this; + } + + /// + /// Sets up mappings allowing the use of unmapped enum, range and multirange types. + /// + /// The same builder instance so that multiple calls can be chained. + [RequiresUnreferencedCode("The use of unmapped enums, ranges or multiranges requires reflection usage which is incompatible with trimming.")] + [RequiresDynamicCode("The use of unmapped enums, ranges or multiranges requires dynamic code usage which is incompatible with NativeAOT.")] + public NpgsqlDataSourceBuilder EnableUnmappedTypes() + { + AddTypeInfoResolverFactory(new UnmappedTypeInfoResolverFactory()); + return this; + } + #region Authentication /// @@ -265,20 +336,7 @@ public void AddTypeInfoResolverFactory(PgTypeInfoResolverFactory factory) => _internalBuilder.AddTypeInfoResolverFactory(factory); /// - void INpgsqlTypeMapper.Reset() - => ResetResolverFactories(); - - void ResetResolverFactories() - { - _internalBuilder.ResetResolverFactories(); - _internalBuilder.AppendResolverFactory(new ExtraConversionResolverFactory()); - _internalBuilder.AppendResolverFactory(new JsonTypeInfoResolverFactory()); - _internalBuilder.AppendResolverFactory(new RecordTypeInfoResolverFactory()); - _internalBuilder.AppendResolverFactory(new FullTextSearchTypeInfoResolverFactory()); - _internalBuilder.AppendResolverFactory(new NetworkTypeInfoResolverFactory()); - _internalBuilder.AppendResolverFactory(new GeometricTypeInfoResolverFactory()); - _internalBuilder.AppendResolverFactory(new LTreeTypeInfoResolverFactory()); - } + void INpgsqlTypeMapper.Reset() => ((INpgsqlTypeMapper)_internalBuilder).Reset(); /// public INpgsqlTypeMapper MapEnum<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields)] TEnum>(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) diff --git a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs index 38bbd4a3e7..cbf515b0b8 100644 --- a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs @@ -3,6 +3,8 @@ using System.Diagnostics.CodeAnalysis; using System.Net.Security; using System.Security.Cryptography.X509Certificates; +using System.Text.Json; +using System.Text.Json.Nodes; using System.Threading; using System.Threading.Tasks; using Microsoft.Extensions.Logging; @@ -10,6 +12,7 @@ using Npgsql.Internal.ResolverFactories; using Npgsql.Properties; using Npgsql.TypeMapping; +using NpgsqlTypes; namespace Npgsql; @@ -46,6 +49,10 @@ public sealed class NpgsqlSlimDataSourceBuilder : INpgsqlTypeMapper Action? _connectionInitializer; Func? _connectionInitializerAsync; + internal JsonSerializerOptions? JsonSerializerOptions { get; private set; } + + internal Action ConfigureDefaultFactories { get; set; } + /// /// A connection string builder that can be used to configured the connection string on the builder. /// @@ -70,13 +77,14 @@ static NpgsqlSlimDataSourceBuilder() /// public NpgsqlSlimDataSourceBuilder(string? connectionString = null) : this(new NpgsqlConnectionStringBuilder(connectionString)) - => ResetResolverFactories(); + {} internal NpgsqlSlimDataSourceBuilder(NpgsqlConnectionStringBuilder connectionStringBuilder) { ConnectionStringBuilder = connectionStringBuilder; _userTypeMapper = new() { DefaultNameTranslator = GlobalTypeMapper.Instance.DefaultNameTranslator }; - ConfigureResolverChain = chain => chain.Add(UnsupportedTypeInfoResolver); + ConfigureDefaultFactories = static instance => instance.AppendDefaultFactories(); + ConfigureResolverChain = static chain => chain.Add(UnsupportedTypeInfoResolver); } /// @@ -103,6 +111,17 @@ public NpgsqlSlimDataSourceBuilder EnableParameterLogging(bool parameterLoggingE return this; } + /// + /// Configures the JSON serializer options used when reading and writing all System.Text.Json data. + /// + /// Options to customize JSON serialization and deserialization. + /// + public NpgsqlSlimDataSourceBuilder ConfigureJsonOptions(JsonSerializerOptions serializerOptions) + { + JsonSerializerOptions = serializerOptions; + return this; + } + #region Authentication /// @@ -342,15 +361,14 @@ public bool UnmapComposite([DynamicallyAccessedMembers(DynamicallyAccessedMember /// public void AddTypeInfoResolverFactory(PgTypeInfoResolverFactory factory) => _resolverChainBuilder.PrependResolverFactory(factory); - void INpgsqlTypeMapper.Reset() - => ResetResolverFactories(); + /// + void INpgsqlTypeMapper.Reset() => _resolverChainBuilder.Clear(); internal Action> ConfigureResolverChain { get; set; } internal void AppendResolverFactory(PgTypeInfoResolverFactory factory) => _resolverChainBuilder.AppendResolverFactory(factory); - internal void ResetResolverFactories() + internal void AppendDefaultFactories() { - _resolverChainBuilder.Clear(); // When used publicly we start off with our slim defaults. _resolverChainBuilder.AppendResolverFactory(_userTypeMapper); if (GlobalTypeMapper.Instance.GetUserMappingsResolverFactory() is { } userMappingsResolverFactory) @@ -456,6 +474,53 @@ public NpgsqlSlimDataSourceBuilder EnableIntegratedSecurity() return this; } + /// + /// Sets up dynamic System.Text.Json mappings. This allows mapping arbitrary .NET types to PostgreSQL json and jsonb + /// types, as well as and its derived types. + /// + /// + /// A list of CLR types to map to PostgreSQL jsonb (no need to specify ). + /// + /// + /// A list of CLR types to map to PostgreSQL json (no need to specify ). + /// + /// + /// Due to the dynamic nature of these mappings, they are not compatible with NativeAOT or trimming. + /// + [RequiresUnreferencedCode("Json serializer may perform reflection on trimmed types.")] + [RequiresDynamicCode("Serializing arbitrary types to json can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] + public NpgsqlSlimDataSourceBuilder EnableDynamicJson( + Type[]? jsonbClrTypes = null, + Type[]? jsonClrTypes = null) + { + _resolverChainBuilder.AppendResolverFactory(new JsonDynamicTypeInfoResolverFactory(jsonbClrTypes, jsonClrTypes, JsonSerializerOptions)); + return this; + } + + /// + /// Sets up mappings for the PostgreSQL record type as a .NET or . + /// + /// The same builder instance so that multiple calls can be chained. + [RequiresUnreferencedCode("The mapping of PostgreSQL records as .NET tuples requires reflection usage which is incompatible with trimming.")] + [RequiresDynamicCode("The mapping of PostgreSQL records as .NET tuples requires dynamic code usage which is incompatible with NativeAOT.")] + public NpgsqlSlimDataSourceBuilder EnableRecordsAsTuples() + { + AddTypeInfoResolverFactory(new TupledRecordTypeInfoResolverFactory()); + return this; + } + + /// + /// Sets up mappings allowing the use of unmapped enum, range and multirange types. + /// + /// The same builder instance so that multiple calls can be chained. + [RequiresUnreferencedCode("The use of unmapped enums, ranges or multiranges requires reflection usage which is incompatible with trimming.")] + [RequiresDynamicCode("The use of unmapped enums, ranges or multiranges requires dynamic code usage which is incompatible with NativeAOT.")] + public NpgsqlSlimDataSourceBuilder EnableUnmappedTypes() + { + AddTypeInfoResolverFactory(new UnmappedTypeInfoResolverFactory()); + return this; + } + #endregion Optional opt-ins /// @@ -546,6 +611,8 @@ NpgsqlDataSourceConfiguration PrepareConfiguration() throw new NotSupportedException(NpgsqlStrings.CannotSetBothPasswordProviderAndPassword); } + ConfigureDefaultFactories(this); + return new( Name, _loggerFactory is null diff --git a/src/Npgsql/PublicAPI.Unshipped.txt b/src/Npgsql/PublicAPI.Unshipped.txt index 6837192430..caec564d37 100644 --- a/src/Npgsql/PublicAPI.Unshipped.txt +++ b/src/Npgsql/PublicAPI.Unshipped.txt @@ -4,13 +4,16 @@ Npgsql.ChannelBinding Npgsql.ChannelBinding.Disable = 0 -> Npgsql.ChannelBinding Npgsql.ChannelBinding.Prefer = 1 -> Npgsql.ChannelBinding Npgsql.ChannelBinding.Require = 2 -> Npgsql.ChannelBinding -Npgsql.INpgsqlTypeMapperExtensions Npgsql.NpgsqlBatch.CreateBatchCommand() -> Npgsql.NpgsqlBatchCommand! Npgsql.NpgsqlConnectionStringBuilder.ChannelBinding.get -> Npgsql.ChannelBinding Npgsql.NpgsqlConnectionStringBuilder.ChannelBinding.set -> void Npgsql.NpgsqlBinaryImporter.WriteRow(params object?[]! values) -> void Npgsql.NpgsqlBinaryImporter.WriteRowAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken), params object?[]! values) -> System.Threading.Tasks.Task! Npgsql.NpgsqlDataSourceBuilder.AddTypeInfoResolverFactory(Npgsql.Internal.PgTypeInfoResolverFactory! factory) -> void +Npgsql.NpgsqlDataSourceBuilder.ConfigureJsonOptions(System.Text.Json.JsonSerializerOptions! serializerOptions) -> Npgsql.NpgsqlDataSourceBuilder! +Npgsql.NpgsqlDataSourceBuilder.EnableDynamicJson(System.Type![]? jsonbClrTypes = null, System.Type![]? jsonClrTypes = null) -> Npgsql.NpgsqlDataSourceBuilder! +Npgsql.NpgsqlDataSourceBuilder.EnableRecordsAsTuples() -> Npgsql.NpgsqlDataSourceBuilder! +Npgsql.NpgsqlDataSourceBuilder.EnableUnmappedTypes() -> Npgsql.NpgsqlDataSourceBuilder! Npgsql.NpgsqlDataSourceBuilder.MapEnum(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! Npgsql.NpgsqlDataSourceBuilder.Name.get -> string? Npgsql.NpgsqlDataSourceBuilder.Name.set -> void @@ -22,11 +25,13 @@ Npgsql.NpgsqlSlimDataSourceBuilder Npgsql.NpgsqlSlimDataSourceBuilder.AddTypeInfoResolverFactory(Npgsql.Internal.PgTypeInfoResolverFactory! factory) -> void Npgsql.NpgsqlSlimDataSourceBuilder.Build() -> Npgsql.NpgsqlDataSource! Npgsql.NpgsqlSlimDataSourceBuilder.BuildMultiHost() -> Npgsql.NpgsqlMultiHostDataSource! +Npgsql.NpgsqlSlimDataSourceBuilder.ConfigureJsonOptions(System.Text.Json.JsonSerializerOptions! serializerOptions) -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.ConnectionString.get -> string! Npgsql.NpgsqlSlimDataSourceBuilder.ConnectionStringBuilder.get -> Npgsql.NpgsqlConnectionStringBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.DefaultNameTranslator.get -> Npgsql.INpgsqlNameTranslator! Npgsql.NpgsqlSlimDataSourceBuilder.DefaultNameTranslator.set -> void Npgsql.NpgsqlSlimDataSourceBuilder.EnableArrays() -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.EnableDynamicJson(System.Type![]? jsonbClrTypes = null, System.Type![]? jsonClrTypes = null) -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.EnableExtraConversions() -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.EnableFullTextSearch() -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.EnableIntegratedSecurity() -> Npgsql.NpgsqlSlimDataSourceBuilder! @@ -35,7 +40,9 @@ Npgsql.NpgsqlSlimDataSourceBuilder.EnableMultiranges() -> Npgsql.NpgsqlSlimDataS Npgsql.NpgsqlSlimDataSourceBuilder.EnableParameterLogging(bool parameterLoggingEnabled = true) -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.EnableRanges() -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.EnableRecords() -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.EnableRecordsAsTuples() -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.EnableTransportSecurity() -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.EnableUnmappedTypes() -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.MapComposite(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! Npgsql.NpgsqlSlimDataSourceBuilder.MapComposite(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! Npgsql.NpgsqlSlimDataSourceBuilder.MapEnum(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! @@ -121,9 +128,6 @@ override NpgsqlTypes.NpgsqlCidr.ToString() -> string! *REMOVED*static NpgsqlTypes.NpgsqlInet.ToIPAddress(NpgsqlTypes.NpgsqlInet inet) -> System.Net.IPAddress! *REMOVED*static NpgsqlTypes.NpgsqlInet.ToNpgsqlInet(System.Net.IPAddress? ip) -> NpgsqlTypes.NpgsqlInet *REMOVED*Npgsql.NpgsqlDataSourceBuilder.AddTypeResolverFactory(Npgsql.Internal.TypeHandling.TypeHandlerResolverFactory! resolverFactory) -> void -static Npgsql.INpgsqlTypeMapperExtensions.EnableDynamicJson(this T mapper, System.Text.Json.JsonSerializerOptions? serializerOptions = null, System.Type![]? jsonbClrTypes = null, System.Type![]? jsonClrTypes = null) -> T -static Npgsql.INpgsqlTypeMapperExtensions.EnableRecordsAsTuples(this T mapper) -> T -static Npgsql.INpgsqlTypeMapperExtensions.EnableUnmappedTypes(this T mapper) -> T static NpgsqlTypes.NpgsqlCidr.explicit operator System.Net.IPAddress!(NpgsqlTypes.NpgsqlCidr cidr) -> System.Net.IPAddress! static NpgsqlTypes.NpgsqlCidr.implicit operator NpgsqlTypes.NpgsqlInet(NpgsqlTypes.NpgsqlCidr cidr) -> NpgsqlTypes.NpgsqlInet *REMOVED*Npgsql.NpgsqlConnection.IntegratedSecurity.get -> bool diff --git a/src/Npgsql/TypeMapping/GlobalTypeMapper.cs b/src/Npgsql/TypeMapping/GlobalTypeMapper.cs index 8d07d73335..ba31c7dcdf 100644 --- a/src/Npgsql/TypeMapping/GlobalTypeMapper.cs +++ b/src/Npgsql/TypeMapping/GlobalTypeMapper.cs @@ -2,9 +2,13 @@ using System.Collections.Generic; using System.Diagnostics.CodeAnalysis; using System.Linq; +using System.Text.Json; +using System.Text.Json.Nodes; using System.Threading; using Npgsql.Internal; using Npgsql.Internal.Postgres; +using Npgsql.Internal.ResolverFactories; +using NpgsqlTypes; namespace Npgsql.TypeMapping; @@ -62,6 +66,7 @@ internal void AddGlobalTypeMappingResolvers(PgTypeInfoResolverFactory[] factorie PgSerializerOptions? _typeMappingOptions; Func? _builderFactory; + JsonSerializerOptions? _jsonSerializerOptions; PgSerializerOptions TypeMappingOptions { @@ -148,6 +153,30 @@ public void AddTypeInfoResolverFactory(PgTypeInfoResolverFactory factory) } } + void ReplaceTypeInfoResolverFactory(PgTypeInfoResolverFactory factory) + { + _lock.EnterWriteLock(); + try + { + var type = factory.GetType(); + + for (var i = 0; i < _pluginResolverFactories.Count; i++) + { + if (_pluginResolverFactories[i].GetType() == type) + { + _pluginResolverFactories[i] = factory; + break; + } + } + + ResetTypeMappingCache(); + } + finally + { + _lock.ExitWriteLock(); + } + } + /// public void Reset() { @@ -171,6 +200,66 @@ public INpgsqlNameTranslator DefaultNameTranslator set => _userTypeMapper.DefaultNameTranslator = value; } + /// + /// Configures the JSON serializer options used when reading and writing all System.Text.Json data. + /// + /// Options to customize JSON serialization and deserialization. + /// + public INpgsqlTypeMapper ConfigureJsonOptions(JsonSerializerOptions serializerOptions) + { + _jsonSerializerOptions = serializerOptions; + // If JsonTypeInfoResolverFactory exists we replace it with a configured instance on the same index of the array. + ReplaceTypeInfoResolverFactory(new JsonTypeInfoResolverFactory(serializerOptions)); + return this; + } + + /// + /// Sets up dynamic System.Text.Json mappings. This allows mapping arbitrary .NET types to PostgreSQL json and jsonb + /// types, as well as and its derived types. + /// + /// + /// A list of CLR types to map to PostgreSQL jsonb (no need to specify ). + /// + /// + /// A list of CLR types to map to PostgreSQL json (no need to specify ). + /// + /// + /// Due to the dynamic nature of these mappings, they are not compatible with NativeAOT or trimming. + /// + [RequiresUnreferencedCode("Json serializer may perform reflection on trimmed types.")] + [RequiresDynamicCode("Serializing arbitrary types to json can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] + public INpgsqlTypeMapper EnableDynamicJson( + Type[]? jsonbClrTypes = null, + Type[]? jsonClrTypes = null) + { + AddTypeInfoResolverFactory(new JsonDynamicTypeInfoResolverFactory(jsonbClrTypes, jsonClrTypes, _jsonSerializerOptions)); + return this; + } + + /// + /// Sets up mappings for the PostgreSQL record type as a .NET or . + /// + /// The same builder instance so that multiple calls can be chained. + [RequiresUnreferencedCode("The mapping of PostgreSQL records as .NET tuples requires reflection usage which is incompatible with trimming.")] + [RequiresDynamicCode("The mapping of PostgreSQL records as .NET tuples requires dynamic code usage which is incompatible with NativeAOT.")] + public INpgsqlTypeMapper EnableRecordsAsTuples() + { + AddTypeInfoResolverFactory(new TupledRecordTypeInfoResolverFactory()); + return this; + } + + /// + /// Sets up mappings allowing the use of unmapped enum, range and multirange types. + /// + /// The same builder instance so that multiple calls can be chained. + [RequiresUnreferencedCode("The use of unmapped enums, ranges or multiranges requires reflection usage which is incompatible with trimming.")] + [RequiresDynamicCode("The use of unmapped enums, ranges or multiranges requires dynamic code usage which is incompatible with NativeAOT.")] + public INpgsqlTypeMapper EnableUnmappedTypes() + { + AddTypeInfoResolverFactory(new UnmappedTypeInfoResolverFactory()); + return this; + } + /// public INpgsqlTypeMapper MapEnum<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields)] TEnum>(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) where TEnum : struct, Enum { diff --git a/src/Npgsql/TypeMapping/INpgsqlTypeMapperExtensions.cs b/src/Npgsql/TypeMapping/INpgsqlTypeMapperExtensions.cs deleted file mode 100644 index 0efacd7687..0000000000 --- a/src/Npgsql/TypeMapping/INpgsqlTypeMapperExtensions.cs +++ /dev/null @@ -1,68 +0,0 @@ -using System; -using System.Diagnostics.CodeAnalysis; -using System.Text.Json; -using System.Text.Json.Nodes; -using Npgsql.Internal.ResolverFactories; -using Npgsql.TypeMapping; -using NpgsqlTypes; - -// ReSharper disable once CheckNamespace -namespace Npgsql; - -/// -/// Extension methods over . -/// -public static class INpgsqlTypeMapperExtensions -{ - /// - /// Sets up dynamic System.Text.Json mappings. This allows mapping arbitrary .NET types to PostgreSQL json and jsonb - /// types, as well as and its derived types. - /// - /// The type mapper. - /// Options to customize JSON serialization and deserialization. - /// - /// A list of CLR types to map to PostgreSQL jsonb (no need to specify ). - /// - /// - /// A list of CLR types to map to PostgreSQL json (no need to specify ). - /// - /// - /// Due to the dynamic nature of these mappings, they are not compatible with NativeAOT or trimming. - /// - [RequiresUnreferencedCode("Json serializer may perform reflection on trimmed types.")] - [RequiresDynamicCode("Serializing arbitrary types to json can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] - public static T EnableDynamicJson( - this T mapper, - JsonSerializerOptions? serializerOptions = null, - Type[]? jsonbClrTypes = null, - Type[]? jsonClrTypes = null) - where T : INpgsqlTypeMapper - { - mapper.AddTypeInfoResolverFactory(new JsonDynamicTypeInfoResolverFactory(jsonbClrTypes, jsonClrTypes, serializerOptions)); - return mapper; - } - - /// - /// Sets up mappings for the PostgreSQL record type as a .NET or . - /// - /// The same builder instance so that multiple calls can be chained. - [RequiresUnreferencedCode("The mapping of PostgreSQL records as .NET tuples requires reflection usage which is incompatible with trimming.")] - [RequiresDynamicCode("The mapping of PostgreSQL records as .NET tuples requires dynamic code usage which is incompatible with NativeAOT.")] - public static T EnableRecordsAsTuples(this T mapper) where T : INpgsqlTypeMapper - { - mapper.AddTypeInfoResolverFactory(new TupledRecordTypeInfoResolverFactory()); - return mapper; - } - - /// - /// Sets up mappings allowing the use of unmapped enum, range and multirange types. - /// - /// The same builder instance so that multiple calls can be chained. - [RequiresUnreferencedCode("The use of unmapped enums, ranges or multiranges requires reflection usage which is incompatible with trimming.")] - [RequiresDynamicCode("The use of unmapped enums, ranges or multiranges requires dynamic code usage which is incompatible with NativeAOT.")] - public static T EnableUnmappedTypes(this T mapper) where T : INpgsqlTypeMapper - { - mapper.AddTypeInfoResolverFactory(new UnmappedTypeInfoResolverFactory()); - return mapper; - } -} diff --git a/test/Npgsql.Tests/Types/EnumTests.cs b/test/Npgsql.Tests/Types/EnumTests.cs index 652cf45576..c36514d6d3 100644 --- a/test/Npgsql.Tests/Types/EnumTests.cs +++ b/test/Npgsql.Tests/Types/EnumTests.cs @@ -166,7 +166,7 @@ public async Task Unmapped_enum_as_clr_enum_supported_only_with_EnableUnmappedTy var errorMessage = string.Format( NpgsqlStrings.UnmappedEnumsNotEnabled, - nameof(INpgsqlTypeMapperExtensions.EnableUnmappedTypes), + nameof(NpgsqlSlimDataSourceBuilder.EnableUnmappedTypes), nameof(NpgsqlDataSourceBuilder)); var exception = await AssertTypeUnsupportedWrite(Mood.Happy, enumType); diff --git a/test/Npgsql.Tests/Types/JsonDynamicTests.cs b/test/Npgsql.Tests/Types/JsonDynamicTests.cs index 304d4db50f..73b0965d12 100644 --- a/test/Npgsql.Tests/Types/JsonDynamicTests.cs +++ b/test/Npgsql.Tests/Types/JsonDynamicTests.cs @@ -121,7 +121,7 @@ public async Task As_poco_supported_only_with_EnableDynamicJson() var errorMessage = string.Format( NpgsqlStrings.DynamicJsonNotEnabled, nameof(WeatherForecast), - nameof(INpgsqlTypeMapperExtensions.EnableDynamicJson), + nameof(NpgsqlSlimDataSourceBuilder.EnableDynamicJson), nameof(NpgsqlDataSourceBuilder)); var exception = await AssertTypeUnsupportedWrite( @@ -151,8 +151,9 @@ public async Task As_poco_supported_only_with_EnableDynamicJson() [Test] public async Task Poco_does_not_stomp_GetValue_string() { - var dataSourceBuilder = CreateDataSourceBuilder(); - var dataSource = dataSourceBuilder.EnableDynamicJson(null, new[] {typeof(WeatherForecast)}, new[] {typeof(WeatherForecast)}).Build(); + var dataSource = CreateDataSourceBuilder() + .EnableDynamicJson(new[] {typeof(WeatherForecast)}, new[] {typeof(WeatherForecast)}) + .Build(); var sqlLiteral = IsJsonb ? """{"Date": "2019-09-01T00:00:00", "Summary": "Partly cloudy", "TemperatureC": 10}""" @@ -168,9 +169,10 @@ public async Task Poco_does_not_stomp_GetValue_string() [Test] public async Task Custom_JsonSerializerOptions() { - var dataSourceBuilder = CreateDataSourceBuilder(); - dataSourceBuilder.EnableDynamicJson(new JsonSerializerOptions { PropertyNamingPolicy = JsonNamingPolicy.CamelCase }); - await using var dataSource = dataSourceBuilder.Build(); + await using var dataSource = CreateDataSourceBuilder() + .ConfigureJsonOptions(new JsonSerializerOptions { PropertyNamingPolicy = JsonNamingPolicy.CamelCase }) + .EnableDynamicJson() + .Build(); await AssertTypeWrite( dataSource, diff --git a/test/Npgsql.Tests/Types/MultirangeTests.cs b/test/Npgsql.Tests/Types/MultirangeTests.cs index 461177d080..92f27bb50f 100644 --- a/test/Npgsql.Tests/Types/MultirangeTests.cs +++ b/test/Npgsql.Tests/Types/MultirangeTests.cs @@ -142,7 +142,7 @@ public async Task Unmapped_multirange_supported_only_with_EnableUnmappedTypes() var errorMessage = string.Format( NpgsqlStrings.UnmappedRangesNotEnabled, - nameof(INpgsqlTypeMapperExtensions.EnableUnmappedTypes), + nameof(NpgsqlSlimDataSourceBuilder.EnableUnmappedTypes), nameof(NpgsqlDataSourceBuilder)); var exception = await AssertTypeUnsupportedWrite( diff --git a/test/Npgsql.Tests/Types/RangeTests.cs b/test/Npgsql.Tests/Types/RangeTests.cs index 74c417216c..75c11e04ce 100644 --- a/test/Npgsql.Tests/Types/RangeTests.cs +++ b/test/Npgsql.Tests/Types/RangeTests.cs @@ -204,7 +204,7 @@ public async Task Unmapped_range_supported_only_with_EnableUnmappedTypes() var errorMessage = string.Format( NpgsqlStrings.UnmappedRangesNotEnabled, - nameof(INpgsqlTypeMapperExtensions.EnableUnmappedTypes), + nameof(NpgsqlSlimDataSourceBuilder.EnableUnmappedTypes), nameof(NpgsqlDataSourceBuilder)); var exception = await AssertTypeUnsupportedWrite(new NpgsqlRange("bar", "foo"), rangeType); diff --git a/test/Npgsql.Tests/Types/RecordTests.cs b/test/Npgsql.Tests/Types/RecordTests.cs index 95e95dc33e..268a7027aa 100644 --- a/test/Npgsql.Tests/Types/RecordTests.cs +++ b/test/Npgsql.Tests/Types/RecordTests.cs @@ -94,7 +94,7 @@ public async Task As_ValueTuple_supported_only_with_EnableRecordsAsTuples() var errorMessage = string.Format( NpgsqlStrings.RecordsNotEnabled, - nameof(INpgsqlTypeMapperExtensions.EnableRecordsAsTuples), + nameof(NpgsqlSlimDataSourceBuilder.EnableRecordsAsTuples), nameof(NpgsqlDataSourceBuilder), nameof(NpgsqlSlimDataSourceBuilder.EnableRecords)); @@ -118,7 +118,7 @@ public async Task Records_not_supported_by_default_on_NpgsqlSlimSourceBuilder() var errorMessage = string.Format( NpgsqlStrings.RecordsNotEnabled, - nameof(INpgsqlTypeMapperExtensions.EnableRecordsAsTuples), + nameof(NpgsqlSlimDataSourceBuilder.EnableRecordsAsTuples), nameof(NpgsqlSlimDataSourceBuilder), nameof(NpgsqlSlimDataSourceBuilder.EnableRecords)); From 52734da52740e63b7d7feb02baefa31f239f353a Mon Sep 17 00:00:00 2001 From: Bruce Bowyer-Smyth Date: Tue, 21 Nov 2023 05:02:04 +1000 Subject: [PATCH 313/761] Avoid duplicating stream in bytea parameters, special case MemoryStream (#5414) Closes #5299 --- .../Converters/Primitive/ByteaConverters.cs | 47 +++++++++++++++---- src/Npgsql/Internal/PgWriter.cs | 2 +- test/Npgsql.Tests/Types/ByteaTests.cs | 18 +++++++ 3 files changed, 58 insertions(+), 9 deletions(-) diff --git a/src/Npgsql/Internal/Converters/Primitive/ByteaConverters.cs b/src/Npgsql/Internal/Converters/Primitive/ByteaConverters.cs index 1d2b1ce531..1ff6131f88 100644 --- a/src/Npgsql/Internal/Converters/Primitive/ByteaConverters.cs +++ b/src/Npgsql/Internal/Converters/Primitive/ByteaConverters.cs @@ -101,7 +101,10 @@ public override ValueTask ReadAsync(PgReader reader, CancellationToken c public override Size GetSize(SizeContext context, Stream value, ref object? writeState) { - var memoryStream = new MemoryStream(value.CanSeek ? (int)(value.Length - value.Position) : 0); + if (value.CanSeek) + return checked((int)(value.Length - value.Position)); + + var memoryStream = new MemoryStream(); value.CopyTo(memoryStream); writeState = memoryStream; return checked((int)memoryStream.Length); @@ -109,16 +112,44 @@ public override Size GetSize(SizeContext context, Stream value, ref object? writ public override void Write(PgWriter writer, Stream value) { - if (!((MemoryStream)writer.Current.WriteState!).TryGetBuffer(out var segment)) - throw new InvalidOperationException(); - writer.WriteBytes(segment.AsSpan()); + if (writer.Current.WriteState is not null) + { + if (!((MemoryStream)writer.Current.WriteState!).TryGetBuffer(out var writeStateSegment)) + throw new InvalidOperationException(); + + writer.WriteBytes(writeStateSegment.AsSpan()); + return; + } + + // Non-derived MemoryStream fast path + if (value.GetType() == typeof(MemoryStream) && ((MemoryStream)value).TryGetBuffer(out var segment)) + writer.WriteBytes(segment.AsSpan((int)value.Position)); + else + value.CopyTo(writer.GetStream()); } public override ValueTask WriteAsync(PgWriter writer, Stream value, CancellationToken cancellationToken = default) { - if (!((MemoryStream)writer.Current.WriteState!).TryGetBuffer(out var segment)) - throw new InvalidOperationException(); - - return writer.WriteBytesAsync(segment.AsMemory(), cancellationToken); + if (writer.Current.WriteState is not null) + { + if (!((MemoryStream)writer.Current.WriteState!).TryGetBuffer(out var writeStateSegment)) + throw new InvalidOperationException(); + + return writer.WriteBytesAsync(writeStateSegment.AsMemory(), cancellationToken); + } + + // Non-derived MemoryStream fast path + if (value.GetType() == typeof(MemoryStream) && ((MemoryStream)value).TryGetBuffer(out var segment)) + { + return writer.WriteBytesAsync(segment.AsMemory((int)value.Position), cancellationToken); + } + else + { +#if NETSTANDARD2_0 + return new ValueTask(value.CopyToAsync(writer.GetStream())); +#else + return new ValueTask(value.CopyToAsync(writer.GetStream(), cancellationToken)); +#endif + } } } diff --git a/src/Npgsql/Internal/PgWriter.cs b/src/Npgsql/Internal/PgWriter.cs index 4321c10efa..2325683b68 100644 --- a/src/Npgsql/Internal/PgWriter.cs +++ b/src/Npgsql/Internal/PgWriter.cs @@ -517,7 +517,7 @@ Task Write(bool async, byte[] buffer, int offset, int count, CancellationToken c if (cancellationToken.IsCancellationRequested) return Task.FromCanceled(cancellationToken); - return _writer.WriteBytesAsync(_allowMixedIO, buffer, cancellationToken).AsTask(); + return _writer.WriteBytesAsync(_allowMixedIO, buffer.AsMemory(offset, count), cancellationToken).AsTask(); } _writer.WriteBytes(_allowMixedIO, new Span(buffer, offset, count)); diff --git a/test/Npgsql.Tests/Types/ByteaTests.cs b/test/Npgsql.Tests/Types/ByteaTests.cs index 1926f8f4e2..c34bce04ff 100644 --- a/test/Npgsql.Tests/Types/ByteaTests.cs +++ b/test/Npgsql.Tests/Types/ByteaTests.cs @@ -70,6 +70,24 @@ public Task Write_as_MemoryStream_truncated() msFactory, "\\x020304", "bytea", NpgsqlDbType.Bytea, DbType.Binary, isDefault: false); } + [Test] + public Task Write_as_MemoryStream_exposableArray() + { + var msFactory = () => + { + var ms = new MemoryStream(20); + ms.WriteByte(1); + ms.WriteByte(2); + ms.WriteByte(3); + ms.WriteByte(4); + ms.Position = 1; + return ms; + }; + + return AssertTypeWrite( + msFactory, "\\x020304", "bytea", NpgsqlDbType.Bytea, DbType.Binary, isDefault: false); + } + [Test] public async Task Write_as_MemoryStream_long() { From 2df3b4972479943e8ff0ee72c2f69e43d7f7a2c1 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Mon, 20 Nov 2023 20:03:17 +0100 Subject: [PATCH 314/761] Allow all MemoryStream derived types to use the fast path too --- src/Npgsql/Internal/Converters/Primitive/ByteaConverters.cs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Npgsql/Internal/Converters/Primitive/ByteaConverters.cs b/src/Npgsql/Internal/Converters/Primitive/ByteaConverters.cs index 1ff6131f88..f7760f836c 100644 --- a/src/Npgsql/Internal/Converters/Primitive/ByteaConverters.cs +++ b/src/Npgsql/Internal/Converters/Primitive/ByteaConverters.cs @@ -122,7 +122,7 @@ public override void Write(PgWriter writer, Stream value) } // Non-derived MemoryStream fast path - if (value.GetType() == typeof(MemoryStream) && ((MemoryStream)value).TryGetBuffer(out var segment)) + if (value is MemoryStream memoryStream && memoryStream.TryGetBuffer(out var segment)) writer.WriteBytes(segment.AsSpan((int)value.Position)); else value.CopyTo(writer.GetStream()); @@ -139,7 +139,7 @@ public override ValueTask WriteAsync(PgWriter writer, Stream value, Cancellation } // Non-derived MemoryStream fast path - if (value.GetType() == typeof(MemoryStream) && ((MemoryStream)value).TryGetBuffer(out var segment)) + if (value is MemoryStream memoryStream && memoryStream.TryGetBuffer(out var segment)) { return writer.WriteBytesAsync(segment.AsMemory((int)value.Position), cancellationToken); } From 5893c6a9837fa774ab31a5beb4dc5b9f879f1d56 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Mon, 20 Nov 2023 22:18:51 +0300 Subject: [PATCH 315/761] Obsolete InternalCommandTimeout (#4856) Related to #3045 --- src/Npgsql/Internal/NpgsqlConnector.cs | 43 ++--------------- src/Npgsql/NpgsqlBinaryExporter.cs | 7 +-- src/Npgsql/NpgsqlBinaryImporter.cs | 3 +- src/Npgsql/NpgsqlConnectionStringBuilder.cs | 47 ++++++++++--------- src/Npgsql/NpgsqlRawCopyStream.cs | 7 +-- .../Replication/ReplicationConnection.cs | 4 +- src/Npgsql/Util/NpgsqlTimeout.cs | 5 -- 7 files changed, 35 insertions(+), 81 deletions(-) diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index 05120ac589..cf10f2a081 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -214,13 +214,6 @@ internal void FlagAsWritableForMultiplexing() throw new Exception("Multiplexing lock was not taken when releasing. Please report a bug."); } - /// - /// The timeout for reading messages that are part of the user's command - /// (i.e. which aren't internal prepended commands). - /// - /// Precision is milliseconds - internal int UserTimeout { private get; set; } - /// /// A lock that's taken while a cancellation is being delivered; new queries are blocked until the /// cancellation is delivered. This reduces the chance that a cancellation meant for a previous @@ -414,26 +407,6 @@ internal NpgsqlConnector(NpgsqlDataSource dataSource, NpgsqlConnection conn) string KerberosServiceName => Settings.KerberosServiceName; int ConnectionTimeout => Settings.Timeout; - /// - /// The actual command timeout value that gets set on internal commands. - /// - /// Precision is milliseconds - int InternalCommandTimeout - { - get - { - var internalTimeout = Settings.InternalCommandTimeout; - if (internalTimeout == -1) - return Math.Max(Settings.CommandTimeout, MinimumInternalCommandTimeout) * 1000; - - // Todo: Decide what we really want here - // This assertion can easily fail if InternalCommandTimeout is set to 1 or 2 in the connection string - // We probably don't want to allow these values but in that case a Debug.Assert is the wrong way to enforce it. - Debug.Assert(internalTimeout == 0 || internalTimeout >= MinimumInternalCommandTimeout); - return internalTimeout * 1000; - } - } - #endregion Configuration settings #region State management @@ -1328,7 +1301,6 @@ internal ValueTask ReadMessage( try { // TODO: There could be room for optimization here, rather than the async call(s) - ReadBuffer.Timeout = TimeSpan.FromMilliseconds(InternalCommandTimeout); for (; PendingPrependedResponses > 0; PendingPrependedResponses--) await ReadMessageLong(async, DataRowLoadingMode.Skip, readingNotifications: false, isReadingPrependedMessage: true).ConfigureAwait(false); // We've read all the prepended response. @@ -1348,8 +1320,6 @@ internal ValueTask ReadMessage( try { - ReadBuffer.Timeout = TimeSpan.FromMilliseconds(UserTimeout); - while (true) { await ReadBuffer.Ensure(5, async, readingNotifications).ConfigureAwait(false); @@ -1788,7 +1758,6 @@ internal void PerformUserCancellation() { if (cancellationTimeout > 0) { - UserTimeout = cancellationTimeout; ReadBuffer.Timeout = TimeSpan.FromMilliseconds(cancellationTimeout); ReadBuffer.Cts.CancelAfter(cancellationTimeout); } @@ -1797,7 +1766,6 @@ internal void PerformUserCancellation() } } - UserTimeout = -1; ReadBuffer.Timeout = _cancelImmediatelyTimeout; ReadBuffer.Cts.Cancel(); } @@ -2482,10 +2450,10 @@ UserAction DoStartUserAction(ConnectorState newState, NpgsqlCommand? command, StartCancellableOperation(cancellationToken, attemptPgCancellation); - // We reset the UserTimeout for every user action, so it wouldn't leak from the previous query or action + // We reset the ReadBuffer.Timeout for every user action, so it wouldn't leak from the previous query or action // For example, we might have successfully cancelled the previous query (so the connection is not broken) // But the next time, we call the Prepare, which doesn't set it's own timeout - UserTimeout = (command?.CommandTimeout ?? Settings.CommandTimeout) * 1000; + ReadBuffer.Timeout = TimeSpan.FromSeconds(command?.CommandTimeout ?? Settings.CommandTimeout); return new UserAction(this); } @@ -2584,9 +2552,8 @@ void PerformKeepAlive(object? state) LogMessages.SendingKeepalive(ConnectionLogger, Id); AttemptPostgresCancellation = false; - var timeout = InternalCommandTimeout; - WriteBuffer.Timeout = TimeSpan.FromSeconds(timeout); - UserTimeout = timeout; + var timeout = Math.Max(Settings.CommandTimeout, MinimumInternalCommandTimeout); + ReadBuffer.Timeout = WriteBuffer.Timeout = TimeSpan.FromSeconds(timeout); WriteSync(async: false).GetAwaiter().GetResult(); Flush(); SkipUntil(BackendMessageCode.ReadyForQuery); @@ -2628,7 +2595,7 @@ internal async Task Wait(bool async, int timeout, CancellationToken cancel cancellationToken.ThrowIfCancellationRequested(); var timeoutForKeepalive = _isKeepAliveEnabled && (timeout <= 0 || keepaliveMs < timeout); - UserTimeout = timeoutForKeepalive ? keepaliveMs : timeout; + ReadBuffer.Timeout = TimeSpan.FromMilliseconds(timeoutForKeepalive ? keepaliveMs : timeout); try { var msg = await ReadMessageWithNotifications(async).ConfigureAwait(false); diff --git a/src/Npgsql/NpgsqlBinaryExporter.cs b/src/Npgsql/NpgsqlBinaryExporter.cs index 5794b73b94..248198418e 100644 --- a/src/Npgsql/NpgsqlBinaryExporter.cs +++ b/src/Npgsql/NpgsqlBinaryExporter.cs @@ -46,12 +46,7 @@ public sealed class NpgsqlBinaryExporter : ICancelable /// public TimeSpan Timeout { - set - { - _buf.Timeout = value; - // While calling Complete(), we're using the connector, which overwrites the buffer's timeout with it's own - _connector.UserTimeout = (int)value.TotalMilliseconds; - } + set => _buf.Timeout = value; } #endregion diff --git a/src/Npgsql/NpgsqlBinaryImporter.cs b/src/Npgsql/NpgsqlBinaryImporter.cs index afb0065508..b644b2e710 100644 --- a/src/Npgsql/NpgsqlBinaryImporter.cs +++ b/src/Npgsql/NpgsqlBinaryImporter.cs @@ -52,8 +52,7 @@ public TimeSpan Timeout set { _buf.Timeout = value; - // While calling Complete(), we're using the connector, which overwrites the buffer's timeout with it's own - _connector.UserTimeout = (int)value.TotalMilliseconds; + _connector.ReadBuffer.Timeout = value; } } diff --git a/src/Npgsql/NpgsqlConnectionStringBuilder.cs b/src/Npgsql/NpgsqlConnectionStringBuilder.cs index b4b038ed24..30a2c44d87 100644 --- a/src/Npgsql/NpgsqlConnectionStringBuilder.cs +++ b/src/Npgsql/NpgsqlConnectionStringBuilder.cs @@ -844,29 +844,6 @@ public int CommandTimeout } int _commandTimeout; - /// - /// The time to wait (in seconds) while trying to execute a an internal command before terminating the attempt and generating an error. - /// - [Category("Timeouts")] - [Description("The time to wait (in seconds) while trying to execute a an internal command before terminating the attempt and generating an error. -1 uses CommandTimeout, 0 means no timeout.")] - [DisplayName("Internal Command Timeout")] - [NpgsqlConnectionStringProperty] - [DefaultValue(-1)] - public int InternalCommandTimeout - { - get => _internalCommandTimeout; - set - { - if (value != 0 && value != -1 && value < NpgsqlConnector.MinimumInternalCommandTimeout) - throw new ArgumentOutOfRangeException(nameof(value), value, - $"InternalCommandTimeout must be >= {NpgsqlConnector.MinimumInternalCommandTimeout}, 0 (infinite) or -1 (use CommandTimeout)"); - - _internalCommandTimeout = value; - SetValue(nameof(InternalCommandTimeout), value); - } - } - int _internalCommandTimeout; - /// /// The time to wait (in milliseconds) while trying to read a response for a cancellation request for a timed out or cancelled query, before terminating the attempt and generating an error. /// Zero for infinity, -1 to skip the wait. @@ -1422,6 +1399,30 @@ public bool TrustServerCertificate } bool _trustServerCertificate; + /// + /// The time to wait (in seconds) while trying to execute a an internal command before terminating the attempt and generating an error. + /// + [Category("Obsolete")] + [Description("The time to wait (in seconds) while trying to execute a an internal command before terminating the attempt and generating an error. -1 uses CommandTimeout, 0 means no timeout.")] + [DisplayName("Internal Command Timeout")] + [NpgsqlConnectionStringProperty] + [DefaultValue(-1)] + [Obsolete("The InternalCommandTimeout parameter is no longer needed and does nothing.")] + public int InternalCommandTimeout + { + get => _internalCommandTimeout; + set + { + if (value != 0 && value != -1 && value < NpgsqlConnector.MinimumInternalCommandTimeout) + throw new ArgumentOutOfRangeException(nameof(value), value, + $"InternalCommandTimeout must be >= {NpgsqlConnector.MinimumInternalCommandTimeout}, 0 (infinite) or -1 (use CommandTimeout)"); + + _internalCommandTimeout = value; + SetValue(nameof(InternalCommandTimeout), value); + } + } + int _internalCommandTimeout; + #endregion #region Misc diff --git a/src/Npgsql/NpgsqlRawCopyStream.cs b/src/Npgsql/NpgsqlRawCopyStream.cs index 5656e9c865..d963e411c8 100644 --- a/src/Npgsql/NpgsqlRawCopyStream.cs +++ b/src/Npgsql/NpgsqlRawCopyStream.cs @@ -47,12 +47,7 @@ public override int WriteTimeout public override int ReadTimeout { get => (int) _readBuf.Timeout.TotalMilliseconds; - set - { - _readBuf.Timeout = TimeSpan.FromMilliseconds(value); - // While calling the connector it will overwrite our read buffer timeout - _connector.UserTimeout = value; - } + set => _readBuf.Timeout = TimeSpan.FromMilliseconds(value); } /// diff --git a/src/Npgsql/Replication/ReplicationConnection.cs b/src/Npgsql/Replication/ReplicationConnection.cs index c30892760c..762d45db87 100644 --- a/src/Npgsql/Replication/ReplicationConnection.cs +++ b/src/Npgsql/Replication/ReplicationConnection.cs @@ -915,7 +915,9 @@ static byte[] ParseByteaEscape(ReadOnlySpan inBytes) void SetTimeouts(TimeSpan readTimeout, TimeSpan writeTimeout) { var connector = Connector; - connector.UserTimeout = readTimeout > TimeSpan.Zero ? (int)readTimeout.TotalMilliseconds : 0; + var readBuffer = connector.ReadBuffer; + if (readBuffer != null) + readBuffer.Timeout = readTimeout > TimeSpan.Zero ? readTimeout : TimeSpan.Zero; var writeBuffer = connector.WriteBuffer; if (writeBuffer != null) diff --git a/src/Npgsql/Util/NpgsqlTimeout.cs b/src/Npgsql/Util/NpgsqlTimeout.cs index eb4fb06aed..79c44d6c4b 100644 --- a/src/Npgsql/Util/NpgsqlTimeout.cs +++ b/src/Npgsql/Util/NpgsqlTimeout.cs @@ -34,11 +34,6 @@ internal void CheckAndApply(NpgsqlConnector connector) var timeLeft = CheckAndGetTimeLeft(); // Set the remaining timeout on the read and write buffers connector.ReadBuffer.Timeout = connector.WriteBuffer.Timeout = timeLeft; - - // Note that we set UserTimeout as well, otherwise the read timeout will get overwritten in ReadMessage - // Note also that we must set the read buffer's timeout directly (above), since the SSL handshake - // reads data directly from the buffer, without going through ReadMessage. - connector.UserTimeout = (int) Math.Ceiling(timeLeft.TotalMilliseconds); } internal bool IsSet => _expiration != DateTime.MaxValue; From b35cbcbf275a3041bf55abf770e5035c7efd682f Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Mon, 20 Nov 2023 20:25:03 +0100 Subject: [PATCH 316/761] Add keyed DI services to the Npgsql.DependencyInjection docs (#5421) --- src/Npgsql.DependencyInjection/README.md | 25 +++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/src/Npgsql.DependencyInjection/README.md b/src/Npgsql.DependencyInjection/README.md index fc5063bea6..7b22c8d15d 100644 --- a/src/Npgsql.DependencyInjection/README.md +++ b/src/Npgsql.DependencyInjection/README.md @@ -42,7 +42,7 @@ app.MapGet("/", async (NpgsqlDataSource dataSource) => }); ``` -Finally, the `AddNpgsqlDataSource` method also accepts a lambda parameter allowing you to configure aspects of Npgsql beyond the connection string, e.g. to configure `UseLoggerFactory` and `UseNetTopologySuite`: +The `AddNpgsqlDataSource` method also accepts a lambda parameter allowing you to configure aspects of Npgsql beyond the connection string, e.g. to configure `UseLoggerFactory` and `UseNetTopologySuite`: ```csharp var builder = WebApplication.CreateBuilder(args); @@ -54,4 +54,27 @@ builder.Services.AddNpgsqlDataSource( .UseNetTopologySuite()); ``` +Finally, starting with Npgsql and .NET 8.0, you can now register multiple data sources (and connections), using a service key to distinguish between them: + +```c# +var builder = WebApplication.CreateBuilder(args); + +builder.Services + .AddNpgsqlDataSource("Host=localhost;Database=CustomersDB;Username=test;Password=test", serviceKey: DatabaseType.CustomerDb) + .AddNpgsqlDataSource("Host=localhost;Database=OrdersDB;Username=test;Password=test", serviceKey: DatabaseType.OrdersDb); + +var app = builder.Build(); + +app.MapGet("/", async ([FromKeyedServices(DatabaseType.OrdersDb)] NpgsqlConnection connection) + => connection.ConnectionString); + +app.Run(); + +enum DatabaseType +{ + CustomerDb, + OrdersDb +} +``` + For more information, [see the Npgsql documentation](https://www.npgsql.org/doc/index.html). From 6375ac0559bf61012b53c6a19e55fee1f2991a4f Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Mon, 20 Nov 2023 20:58:20 +0100 Subject: [PATCH 317/761] Remove old EF6 connection string parameters (#5422) --- src/Npgsql/NpgsqlConnection.cs | 6 --- src/Npgsql/NpgsqlConnectionStringBuilder.cs | 54 ++----------------- src/Npgsql/Properties/AssemblyInfo.cs | 14 ----- src/Npgsql/PublicAPI.Unshipped.txt | 4 ++ .../ConnectionStringBuilderTests.cs | 9 ---- 5 files changed, 8 insertions(+), 79 deletions(-) diff --git a/src/Npgsql/NpgsqlConnection.cs b/src/Npgsql/NpgsqlConnection.cs index e55a9ba38c..638cd602e7 100644 --- a/src/Npgsql/NpgsqlConnection.cs +++ b/src/Npgsql/NpgsqlConnection.cs @@ -458,12 +458,6 @@ public override string ConnectionString /// public string? UserName => Settings.Username; - // The following two lines are here for backwards compatibility with the EF6 provider - // ReSharper disable UnusedMember.Global - internal string? EntityTemplateDatabase => Settings.EntityTemplateDatabase; - internal string? EntityAdminDatabase => Settings.EntityAdminDatabase; - // ReSharper restore UnusedMember.Global - #endregion Configuration settings #region State management diff --git a/src/Npgsql/NpgsqlConnectionStringBuilder.cs b/src/Npgsql/NpgsqlConnectionStringBuilder.cs index 30a2c44d87..77f397c8e1 100644 --- a/src/Npgsql/NpgsqlConnectionStringBuilder.cs +++ b/src/Npgsql/NpgsqlConnectionStringBuilder.cs @@ -270,10 +270,10 @@ public string? Database string? _database; /// - /// The username to connect with. Not required if using GSS/SSPI/Kerberos. + /// The username to connect with. /// [Category("Connection")] - [Description("The username to connect with. Not required if using IntegratedSecurity.")] + [Description("The username to connect with.")] [DisplayName("Username")] [NpgsqlConnectionStringProperty("User Name", "UserId", "User Id", "UID")] public string? Username @@ -288,10 +288,10 @@ public string? Username string? _username; /// - /// The password to connect with. Not required if using GSS/SSPI/Kerberos. + /// The password to connect with. /// [Category("Connection")] - [Description("The password to connect with. Not required if using IntegratedSecurity.")] + [Description("The password to connect with.")] [PasswordPropertyText(true)] [DisplayName("Password")] [NpgsqlConnectionStringProperty("PSW", "PWD")] @@ -959,52 +959,6 @@ public int HostRecheckSeconds #endregion Properties - Failover and load balancing - #region Properties - Entity Framework - - /// - /// The database template to specify when creating a database in Entity Framework. If not specified, - /// PostgreSQL defaults to "template1". - /// - /// - /// https://www.postgresql.org/docs/current/static/manage-ag-templatedbs.html - /// - [Category("Entity Framework")] - [Description("The database template to specify when creating a database in Entity Framework. If not specified, PostgreSQL defaults to \"template1\".")] - [DisplayName("EF Template Database")] - [NpgsqlConnectionStringProperty] - public string? EntityTemplateDatabase - { - get => _entityTemplateDatabase; - set - { - _entityTemplateDatabase = value; - SetValue(nameof(EntityTemplateDatabase), value); - } - } - string? _entityTemplateDatabase; - - /// - /// The database admin to specify when creating and dropping a database in Entity Framework. This is needed because - /// Npgsql needs to connect to a database in order to send the create/drop database command. - /// If not specified, defaults to "template1". Check NpgsqlServices.UsingPostgresDBConnection for more information. - /// - [Category("Entity Framework")] - [Description("The database admin to specify when creating and dropping a database in Entity Framework. If not specified, defaults to \"template1\".")] - [DisplayName("EF Admin Database")] - [NpgsqlConnectionStringProperty] - public string? EntityAdminDatabase - { - get => _entityAdminDatabase; - set - { - _entityAdminDatabase = value; - SetValue(nameof(EntityAdminDatabase), value); - } - } - string? _entityAdminDatabase; - - #endregion - #region Properties - Advanced /// diff --git a/src/Npgsql/Properties/AssemblyInfo.cs b/src/Npgsql/Properties/AssemblyInfo.cs index 666ee3f170..80500e0028 100644 --- a/src/Npgsql/Properties/AssemblyInfo.cs +++ b/src/Npgsql/Properties/AssemblyInfo.cs @@ -12,20 +12,6 @@ [module: SkipLocalsInit] #endif -[assembly: InternalsVisibleTo("EntityFramework6.Npgsql, PublicKey=" + -"0024000004800000940000000602000000240000525341310004000001000100" + -"2b3c590b2a4e3d347e6878dc0ff4d21eb056a50420250c6617044330701d35c9" + -"8078a5df97a62d83c9a2db2d072523a8fc491398254c6b89329b8c1dcef43a1e" + -"7aa16153bcea2ae9a471145624826f60d7c8e71cd025b554a0177bd935a78096" + -"29f0a7afc778ebb4ad033e1bf512c1a9c6ceea26b077bc46cac93800435e77ee")] - -[assembly: InternalsVisibleTo("EntityFramework5.Npgsql, PublicKey=" + -"0024000004800000940000000602000000240000525341310004000001000100" + -"2b3c590b2a4e3d347e6878dc0ff4d21eb056a50420250c6617044330701d35c9" + -"8078a5df97a62d83c9a2db2d072523a8fc491398254c6b89329b8c1dcef43a1e" + -"7aa16153bcea2ae9a471145624826f60d7c8e71cd025b554a0177bd935a78096" + -"29f0a7afc778ebb4ad033e1bf512c1a9c6ceea26b077bc46cac93800435e77ee")] - [assembly: InternalsVisibleTo("Npgsql.Tests, PublicKey=" + "0024000004800000940000000602000000240000525341310004000001000100" + "2b3c590b2a4e3d347e6878dc0ff4d21eb056a50420250c6617044330701d35c9" + diff --git a/src/Npgsql/PublicAPI.Unshipped.txt b/src/Npgsql/PublicAPI.Unshipped.txt index caec564d37..0955094090 100644 --- a/src/Npgsql/PublicAPI.Unshipped.txt +++ b/src/Npgsql/PublicAPI.Unshipped.txt @@ -141,6 +141,10 @@ static NpgsqlTypes.NpgsqlCidr.implicit operator NpgsqlTypes.NpgsqlInet(NpgsqlTyp *REMOVED*Npgsql.NpgsqlConnectionStringBuilder.ContinuousProcessing.set -> void *REMOVED*Npgsql.NpgsqlConnectionStringBuilder.ConvertInfinityDateTime.get -> bool *REMOVED*Npgsql.NpgsqlConnectionStringBuilder.ConvertInfinityDateTime.set -> void +*REMOVED*Npgsql.NpgsqlConnectionStringBuilder.EntityAdminDatabase.get -> string? +*REMOVED*Npgsql.NpgsqlConnectionStringBuilder.EntityAdminDatabase.set -> void +*REMOVED*Npgsql.NpgsqlConnectionStringBuilder.EntityTemplateDatabase.get -> string? +*REMOVED*Npgsql.NpgsqlConnectionStringBuilder.EntityTemplateDatabase.set -> void *REMOVED*Npgsql.NpgsqlConnectionStringBuilder.IncludeErrorDetails.get -> bool *REMOVED*Npgsql.NpgsqlConnectionStringBuilder.IncludeErrorDetails.set -> void *REMOVED*Npgsql.NpgsqlConnectionStringBuilder.IntegratedSecurity.get -> bool diff --git a/test/Npgsql.Tests/ConnectionStringBuilderTests.cs b/test/Npgsql.Tests/ConnectionStringBuilderTests.cs index 58c2ba4bd2..6e2d2e3a04 100644 --- a/test/Npgsql.Tests/ConnectionStringBuilderTests.cs +++ b/test/Npgsql.Tests/ConnectionStringBuilderTests.cs @@ -20,15 +20,6 @@ public void Basic() Assert.That(builder.Count, Is.EqualTo(0)); } - [Test] - public void From_string() - { - var builder = new NpgsqlConnectionStringBuilder(); - builder.ConnectionString = "Host=myhost;EF Template Database=foo"; - Assert.That(builder.Host, Is.EqualTo("myhost")); - Assert.That(builder.EntityTemplateDatabase, Is.EqualTo("foo")); - } - [Test] public void TryGetValue() { From ad35829bd985f97de4dbc7ee9c13a0ee050325f5 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Mon, 20 Nov 2023 22:11:07 +0100 Subject: [PATCH 318/761] Use configured bcl comparer (#5411) --- .../BackendMessages/RowDescriptionMessage.cs | 30 +++++-------------- 1 file changed, 8 insertions(+), 22 deletions(-) diff --git a/src/Npgsql/BackendMessages/RowDescriptionMessage.cs b/src/Npgsql/BackendMessages/RowDescriptionMessage.cs index 1b7ea8a2a6..9688fa7fc7 100644 --- a/src/Npgsql/BackendMessages/RowDescriptionMessage.cs +++ b/src/Npgsql/BackendMessages/RowDescriptionMessage.cs @@ -1,5 +1,4 @@ using System; -using System.Collections; using System.Collections.Generic; using System.Diagnostics; using System.Globalization; @@ -34,6 +33,12 @@ public ColumnInfo(PgConverterInfo converterInfo, DataFormat dataFormat, bool asO /// sealed class RowDescriptionMessage : IBackendMessage { + // We should really have CompareOptions.IgnoreKanaType here, but see + // https://github.com/dotnet/corefx/issues/12518#issuecomment-389658716 + static readonly StringComparer InvariantIgnoreCaseAndKanaWidthComparer = + CultureInfo.InvariantCulture.CompareInfo.GetStringComparer( + CompareOptions.IgnoreWidth | CompareOptions.IgnoreCase | CompareOptions.IgnoreKanaType); + readonly bool _connectorOwned; FieldDescription?[] _fields; readonly Dictionary _nameIndex; @@ -55,7 +60,7 @@ internal RowDescriptionMessage(bool connectorOwned, int numFields = 10) _fields[i] = source._fields[i]!.Clone(); _nameIndex = new Dictionary(source._nameIndex); if (source._insensitiveIndex?.Count > 0) - _insensitiveIndex = new Dictionary(source._insensitiveIndex); + _insensitiveIndex = new Dictionary(source._insensitiveIndex, InvariantIgnoreCaseAndKanaWidthComparer); } internal RowDescriptionMessage Load(NpgsqlReadBuffer buf, PgSerializerOptions options) @@ -178,7 +183,7 @@ internal bool TryGetFieldIndex(string name, out int fieldIndex) if (_insensitiveIndex is null || _insensitiveIndex.Count == 0) { if (_insensitiveIndex == null) - _insensitiveIndex = new Dictionary(InsensitiveComparer.Instance); + _insensitiveIndex = new Dictionary(InvariantIgnoreCaseAndKanaWidthComparer); foreach (var kv in _nameIndex) _insensitiveIndex.TryAdd(kv.Key, kv.Value); @@ -190,25 +195,6 @@ internal bool TryGetFieldIndex(string name, out int fieldIndex) public BackendMessageCode Code => BackendMessageCode.RowDescription; internal RowDescriptionMessage Clone() => new(this); - - /// - /// Comparer that's case-insensitive and Kana width-insensitive - /// - sealed class InsensitiveComparer : IEqualityComparer - { - public static readonly InsensitiveComparer Instance = new(); - static readonly CompareInfo CompareInfo = CultureInfo.InvariantCulture.CompareInfo; - - InsensitiveComparer() { } - - // We should really have CompareOptions.IgnoreKanaType here, but see - // https://github.com/dotnet/corefx/issues/12518#issuecomment-389658716 - public bool Equals(string? x, string? y) - => CompareInfo.Compare(x, y, CompareOptions.IgnoreWidth | CompareOptions.IgnoreCase | CompareOptions.IgnoreKanaType) == 0; - - public int GetHashCode(string o) - => CompareInfo.GetSortKey(o, CompareOptions.IgnoreWidth | CompareOptions.IgnoreCase | CompareOptions.IgnoreKanaType).GetHashCode(); - } } /// From 0ac9e0b8b6755dd22c73c70fb3c6dcde718f66d3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 20 Nov 2023 22:46:37 +0100 Subject: [PATCH 319/761] Bump xunit from 2.6.1 to 2.6.2 (#5423) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index e0905f2e6b..13ad03619b 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -38,7 +38,7 @@ - + From f876bb445b7f7c87d5dedc618f49dc94179d4b7e Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Mon, 20 Nov 2023 23:55:03 +0100 Subject: [PATCH 320/761] Add opt-in granular read side bounds checks (#5405) --- src/Npgsql/Internal/NpgsqlReadBuffer.cs | 40 +++++++++++++++---------- src/Npgsql/Internal/PgReader.cs | 12 ++++++-- src/Npgsql/Util/Statics.cs | 5 +++- 3 files changed, 37 insertions(+), 20 deletions(-) diff --git a/src/Npgsql/Internal/NpgsqlReadBuffer.cs b/src/Npgsql/Internal/NpgsqlReadBuffer.cs index dc585b2e49..a78679e383 100644 --- a/src/Npgsql/Internal/NpgsqlReadBuffer.cs +++ b/src/Npgsql/Internal/NpgsqlReadBuffer.cs @@ -5,7 +5,6 @@ using System.IO; using System.Net.Sockets; using System.Runtime.CompilerServices; -using System.Runtime.InteropServices; using System.Text; using System.Threading; using System.Threading.Tasks; @@ -22,6 +21,12 @@ sealed partial class NpgsqlReadBuffer : IDisposable { #region Fields and Properties +#if DEBUG + internal static readonly bool BufferBoundsChecks = true; +#else + internal static readonly bool BufferBoundsChecks = Statics.EnableDiagnostics; +#endif + public NpgsqlConnection Connection => Connector.Connection!; internal readonly NpgsqlConnector Connector; internal Stream Underlying { private get; set; } @@ -471,7 +476,7 @@ public async Task Skip(int len, bool async) [MethodImpl(MethodImplOptions.AggressiveInlining)] public byte ReadByte() { - CheckBounds(); + CheckBounds(sizeof(byte)); var result = Buffer[ReadPosition]; ReadPosition += sizeof(byte); return result; @@ -480,7 +485,7 @@ public byte ReadByte() [MethodImpl(MethodImplOptions.AggressiveInlining)] public short ReadInt16() { - CheckBounds(); + CheckBounds(sizeof(short)); var result = BitConverter.IsLittleEndian ? BinaryPrimitives.ReverseEndianness(Unsafe.ReadUnaligned(ref Buffer[ReadPosition])) : Unsafe.ReadUnaligned(ref Buffer[ReadPosition]); @@ -491,7 +496,7 @@ public short ReadInt16() [MethodImpl(MethodImplOptions.AggressiveInlining)] public ushort ReadUInt16() { - CheckBounds(); + CheckBounds(sizeof(ushort)); var result = BitConverter.IsLittleEndian ? BinaryPrimitives.ReverseEndianness(Unsafe.ReadUnaligned(ref Buffer[ReadPosition])) : Unsafe.ReadUnaligned(ref Buffer[ReadPosition]); @@ -502,7 +507,7 @@ public ushort ReadUInt16() [MethodImpl(MethodImplOptions.AggressiveInlining)] public int ReadInt32() { - CheckBounds(); + CheckBounds(sizeof(int)); var result = BitConverter.IsLittleEndian ? BinaryPrimitives.ReverseEndianness(Unsafe.ReadUnaligned(ref Buffer[ReadPosition])) : Unsafe.ReadUnaligned(ref Buffer[ReadPosition]); @@ -513,7 +518,7 @@ public int ReadInt32() [MethodImpl(MethodImplOptions.AggressiveInlining)] public uint ReadUInt32() { - CheckBounds(); + CheckBounds(sizeof(uint)); var result = BitConverter.IsLittleEndian ? BinaryPrimitives.ReverseEndianness(Unsafe.ReadUnaligned(ref Buffer[ReadPosition])) : Unsafe.ReadUnaligned(ref Buffer[ReadPosition]); @@ -524,7 +529,7 @@ public uint ReadUInt32() [MethodImpl(MethodImplOptions.AggressiveInlining)] public long ReadInt64() { - CheckBounds(); + CheckBounds(sizeof(long)); var result = BitConverter.IsLittleEndian ? BinaryPrimitives.ReverseEndianness(Unsafe.ReadUnaligned(ref Buffer[ReadPosition])) : Unsafe.ReadUnaligned(ref Buffer[ReadPosition]); @@ -535,7 +540,7 @@ public long ReadInt64() [MethodImpl(MethodImplOptions.AggressiveInlining)] public ulong ReadUInt64() { - CheckBounds(); + CheckBounds(sizeof(ulong)); var result = BitConverter.IsLittleEndian ? BinaryPrimitives.ReverseEndianness(Unsafe.ReadUnaligned(ref Buffer[ReadPosition])) : Unsafe.ReadUnaligned(ref Buffer[ReadPosition]); @@ -547,7 +552,7 @@ public ulong ReadUInt64() [MethodImpl(MethodImplOptions.AggressiveInlining)] public float ReadSingle() { - CheckBounds(); + CheckBounds(sizeof(float)); float result; if (BitConverter.IsLittleEndian) { @@ -563,7 +568,7 @@ public float ReadSingle() [MethodImpl(MethodImplOptions.AggressiveInlining)] public double ReadDouble() { - CheckBounds(); + CheckBounds(sizeof(double)); double result; if (BitConverter.IsLittleEndian) { @@ -576,14 +581,17 @@ public double ReadDouble() return result; } - [Conditional("DEBUG")] - unsafe void CheckBounds() where T : unmanaged + void CheckBounds(int count) { - if (sizeof(T) > ReadBytesLeft) - ThrowNoSpaceLeft(); + if (BufferBoundsChecks) + Core(count); - static void ThrowNoSpaceLeft() - => ThrowHelper.ThrowInvalidOperationException("There is not enough space left in the buffer."); + [MethodImpl(MethodImplOptions.NoInlining)] + void Core(int count) + { + if (count > ReadBytesLeft) + ThrowHelper.ThrowInvalidOperationException("There is not enough data left in the buffer."); + } } public string ReadString(int byteLen) diff --git a/src/Npgsql/Internal/PgReader.cs b/src/Npgsql/Internal/PgReader.cs index 2bf4456b9c..18a5544677 100644 --- a/src/Npgsql/Internal/PgReader.cs +++ b/src/Npgsql/Internal/PgReader.cs @@ -87,11 +87,17 @@ internal void Revert(int size, int startPos, Size bufferRequirement) _currentSize = size; } - [Conditional("DEBUG")] void CheckBounds(int count) { - if (count > FieldRemaining) - ThrowHelper.ThrowInvalidOperationException("Attempt to read past the end of the field."); + if (NpgsqlReadBuffer.BufferBoundsChecks) + Core(count); + + [MethodImpl(MethodImplOptions.NoInlining)] + void Core(int count) + { + if (count > FieldRemaining) + ThrowHelper.ThrowInvalidOperationException("Attempt to read past the end of the field."); + } } public byte ReadByte() diff --git a/src/Npgsql/Util/Statics.cs b/src/Npgsql/Util/Statics.cs index 982ab00c17..d25df2086a 100644 --- a/src/Npgsql/Util/Statics.cs +++ b/src/Npgsql/Util/Statics.cs @@ -10,16 +10,19 @@ namespace Npgsql.Util; static class Statics { #if DEBUG + internal static bool EnableDiagnostics; internal static bool LegacyTimestampBehavior; internal static bool DisableDateTimeInfinityConversions; #else + internal static readonly bool EnableDiagnostics; internal static readonly bool LegacyTimestampBehavior; internal static readonly bool DisableDateTimeInfinityConversions; #endif static Statics() { - LegacyTimestampBehavior = AppContext.TryGetSwitch("Npgsql.EnableLegacyTimestampBehavior", out var enabled) && enabled; + EnableDiagnostics = AppContext.TryGetSwitch("Npgsql.EnableDiagnostics", out var enabled) && enabled; + LegacyTimestampBehavior = AppContext.TryGetSwitch("Npgsql.EnableLegacyTimestampBehavior", out enabled) && enabled; DisableDateTimeInfinityConversions = AppContext.TryGetSwitch("Npgsql.DisableDateTimeInfinityConversions", out enabled) && enabled; } From 8979836e18d736eff6fb6e3e4aad971afd13f9fd Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Tue, 21 Nov 2023 00:36:08 +0100 Subject: [PATCH 321/761] Expose opt-in methods via INpgsqlTypeMapper (#5424) --- src/Npgsql/NpgsqlDataSourceBuilder.cs | 27 ++++++++++-- src/Npgsql/NpgsqlSlimDataSourceBuilder.cs | 23 ++++++++++ src/Npgsql/PublicAPI.Unshipped.txt | 4 ++ src/Npgsql/TypeMapping/GlobalTypeMapper.cs | 30 ++----------- src/Npgsql/TypeMapping/INpgsqlTypeMapper.cs | 48 +++++++++++++++++++++ 5 files changed, 103 insertions(+), 29 deletions(-) diff --git a/src/Npgsql/NpgsqlDataSourceBuilder.cs b/src/Npgsql/NpgsqlDataSourceBuilder.cs index f5746ec38b..b6e8972f96 100644 --- a/src/Npgsql/NpgsqlDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlDataSourceBuilder.cs @@ -147,9 +147,7 @@ public NpgsqlDataSourceBuilder ConfigureJsonOptions(JsonSerializerOptions serial /// [RequiresUnreferencedCode("Json serializer may perform reflection on trimmed types.")] [RequiresDynamicCode("Serializing arbitrary types to json can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] - public NpgsqlDataSourceBuilder EnableDynamicJson( - Type[]? jsonbClrTypes = null, - Type[]? jsonClrTypes = null) + public NpgsqlDataSourceBuilder EnableDynamicJson(Type[]? jsonbClrTypes = null, Type[]? jsonClrTypes = null) { _internalBuilder.EnableDynamicJson(jsonbClrTypes, jsonClrTypes); return this; @@ -434,4 +432,27 @@ public NpgsqlDataSource Build() /// public NpgsqlMultiHostDataSource BuildMultiHost() => _internalBuilder.BuildMultiHost(); + + INpgsqlTypeMapper INpgsqlTypeMapper.ConfigureJsonOptions(JsonSerializerOptions serializerOptions) + => ConfigureJsonOptions(serializerOptions); + + [RequiresUnreferencedCode("Json serializer may perform reflection on trimmed types.")] + [RequiresDynamicCode( + "Serializing arbitrary types to json can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] + INpgsqlTypeMapper INpgsqlTypeMapper.EnableDynamicJson(Type[]? jsonbClrTypes, Type[]? jsonClrTypes) + => EnableDynamicJson(jsonbClrTypes, jsonClrTypes); + + [RequiresUnreferencedCode( + "The mapping of PostgreSQL records as .NET tuples requires reflection usage which is incompatible with trimming.")] + [RequiresDynamicCode( + "The mapping of PostgreSQL records as .NET tuples requires dynamic code usage which is incompatible with NativeAOT.")] + INpgsqlTypeMapper INpgsqlTypeMapper.EnableRecordsAsTuples() + => EnableRecordsAsTuples(); + + [RequiresUnreferencedCode( + "The use of unmapped enums, ranges or multiranges requires reflection usage which is incompatible with trimming.")] + [RequiresDynamicCode( + "The use of unmapped enums, ranges or multiranges requires dynamic code usage which is incompatible with NativeAOT.")] + INpgsqlTypeMapper INpgsqlTypeMapper.EnableUnmappedTypes() + => EnableUnmappedTypes(); } diff --git a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs index cbf515b0b8..3dc2b0b52c 100644 --- a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs @@ -658,4 +658,27 @@ void ValidateMultiHost() if (ConnectionStringBuilder.ReplicationMode != ReplicationMode.Off) throw new NotSupportedException("Replication is not supported with multiple hosts"); } + + INpgsqlTypeMapper INpgsqlTypeMapper.ConfigureJsonOptions(JsonSerializerOptions serializerOptions) + => ConfigureJsonOptions(serializerOptions); + + [RequiresUnreferencedCode("Json serializer may perform reflection on trimmed types.")] + [RequiresDynamicCode( + "Serializing arbitrary types to json can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] + INpgsqlTypeMapper INpgsqlTypeMapper.EnableDynamicJson(Type[]? jsonbClrTypes, Type[]? jsonClrTypes) + => EnableDynamicJson(jsonbClrTypes, jsonClrTypes); + + [RequiresUnreferencedCode( + "The mapping of PostgreSQL records as .NET tuples requires reflection usage which is incompatible with trimming.")] + [RequiresDynamicCode( + "The mapping of PostgreSQL records as .NET tuples requires dynamic code usage which is incompatible with NativeAOT.")] + INpgsqlTypeMapper INpgsqlTypeMapper.EnableRecordsAsTuples() + => EnableRecordsAsTuples(); + + [RequiresUnreferencedCode( + "The use of unmapped enums, ranges or multiranges requires reflection usage which is incompatible with trimming.")] + [RequiresDynamicCode( + "The use of unmapped enums, ranges or multiranges requires dynamic code usage which is incompatible with NativeAOT.")] + INpgsqlTypeMapper INpgsqlTypeMapper.EnableUnmappedTypes() + => EnableUnmappedTypes(); } diff --git a/src/Npgsql/PublicAPI.Unshipped.txt b/src/Npgsql/PublicAPI.Unshipped.txt index 0955094090..786c7241c9 100644 --- a/src/Npgsql/PublicAPI.Unshipped.txt +++ b/src/Npgsql/PublicAPI.Unshipped.txt @@ -71,6 +71,10 @@ Npgsql.Replication.PhysicalReplicationConnection.StartReplication(NpgsqlTypes.Np Npgsql.Replication.PhysicalReplicationSlot.PhysicalReplicationSlot(string! slotName, NpgsqlTypes.NpgsqlLogSequenceNumber? restartLsn = null, uint? restartTimeline = null) -> void Npgsql.Replication.PhysicalReplicationSlot.RestartTimeline.get -> uint? Npgsql.TypeMapping.INpgsqlTypeMapper.AddTypeInfoResolverFactory(Npgsql.Internal.PgTypeInfoResolverFactory! factory) -> void +Npgsql.TypeMapping.INpgsqlTypeMapper.ConfigureJsonOptions(System.Text.Json.JsonSerializerOptions! serializerOptions) -> Npgsql.TypeMapping.INpgsqlTypeMapper! +Npgsql.TypeMapping.INpgsqlTypeMapper.EnableDynamicJson(System.Type![]? jsonbClrTypes = null, System.Type![]? jsonClrTypes = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! +Npgsql.TypeMapping.INpgsqlTypeMapper.EnableRecordsAsTuples() -> Npgsql.TypeMapping.INpgsqlTypeMapper! +Npgsql.TypeMapping.INpgsqlTypeMapper.EnableUnmappedTypes() -> Npgsql.TypeMapping.INpgsqlTypeMapper! Npgsql.TypeMapping.INpgsqlTypeMapper.MapEnum(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! Npgsql.TypeMapping.INpgsqlTypeMapper.UnmapEnum(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> bool Npgsql.TypeMapping.UserTypeMapping diff --git a/src/Npgsql/TypeMapping/GlobalTypeMapper.cs b/src/Npgsql/TypeMapping/GlobalTypeMapper.cs index ba31c7dcdf..c2c01a1887 100644 --- a/src/Npgsql/TypeMapping/GlobalTypeMapper.cs +++ b/src/Npgsql/TypeMapping/GlobalTypeMapper.cs @@ -200,11 +200,7 @@ public INpgsqlNameTranslator DefaultNameTranslator set => _userTypeMapper.DefaultNameTranslator = value; } - /// - /// Configures the JSON serializer options used when reading and writing all System.Text.Json data. - /// - /// Options to customize JSON serialization and deserialization. - /// + /// public INpgsqlTypeMapper ConfigureJsonOptions(JsonSerializerOptions serializerOptions) { _jsonSerializerOptions = serializerOptions; @@ -213,19 +209,7 @@ public INpgsqlTypeMapper ConfigureJsonOptions(JsonSerializerOptions serializerOp return this; } - /// - /// Sets up dynamic System.Text.Json mappings. This allows mapping arbitrary .NET types to PostgreSQL json and jsonb - /// types, as well as and its derived types. - /// - /// - /// A list of CLR types to map to PostgreSQL jsonb (no need to specify ). - /// - /// - /// A list of CLR types to map to PostgreSQL json (no need to specify ). - /// - /// - /// Due to the dynamic nature of these mappings, they are not compatible with NativeAOT or trimming. - /// + /// [RequiresUnreferencedCode("Json serializer may perform reflection on trimmed types.")] [RequiresDynamicCode("Serializing arbitrary types to json can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] public INpgsqlTypeMapper EnableDynamicJson( @@ -236,10 +220,7 @@ public INpgsqlTypeMapper EnableDynamicJson( return this; } - /// - /// Sets up mappings for the PostgreSQL record type as a .NET or . - /// - /// The same builder instance so that multiple calls can be chained. + /// [RequiresUnreferencedCode("The mapping of PostgreSQL records as .NET tuples requires reflection usage which is incompatible with trimming.")] [RequiresDynamicCode("The mapping of PostgreSQL records as .NET tuples requires dynamic code usage which is incompatible with NativeAOT.")] public INpgsqlTypeMapper EnableRecordsAsTuples() @@ -248,10 +229,7 @@ public INpgsqlTypeMapper EnableRecordsAsTuples() return this; } - /// - /// Sets up mappings allowing the use of unmapped enum, range and multirange types. - /// - /// The same builder instance so that multiple calls can be chained. + /// [RequiresUnreferencedCode("The use of unmapped enums, ranges or multiranges requires reflection usage which is incompatible with trimming.")] [RequiresDynamicCode("The use of unmapped enums, ranges or multiranges requires dynamic code usage which is incompatible with NativeAOT.")] public INpgsqlTypeMapper EnableUnmappedTypes() diff --git a/src/Npgsql/TypeMapping/INpgsqlTypeMapper.cs b/src/Npgsql/TypeMapping/INpgsqlTypeMapper.cs index 23642121d8..83728785d6 100644 --- a/src/Npgsql/TypeMapping/INpgsqlTypeMapper.cs +++ b/src/Npgsql/TypeMapping/INpgsqlTypeMapper.cs @@ -1,6 +1,9 @@ using System; using System.Diagnostics.CodeAnalysis; +using System.Text.Json; +using System.Text.Json.Nodes; using Npgsql.Internal; +using Npgsql.Internal.ResolverFactories; using Npgsql.NameTranslation; using NpgsqlTypes; @@ -196,6 +199,51 @@ bool UnmapComposite( /// The type resolver factory to be added. void AddTypeInfoResolverFactory(PgTypeInfoResolverFactory factory); + /// + /// Configures the JSON serializer options used when reading and writing all System.Text.Json data. + /// + /// Options to customize JSON serialization and deserialization. + /// + INpgsqlTypeMapper ConfigureJsonOptions(JsonSerializerOptions serializerOptions); + + /// + /// Sets up dynamic System.Text.Json mappings. This allows mapping arbitrary .NET types to PostgreSQL json and jsonb + /// types, as well as and its derived types. + /// + /// + /// A list of CLR types to map to PostgreSQL jsonb (no need to specify ). + /// + /// + /// A list of CLR types to map to PostgreSQL json (no need to specify ). + /// + /// + /// Due to the dynamic nature of these mappings, they are not compatible with NativeAOT or trimming. + /// + [RequiresUnreferencedCode("Json serializer may perform reflection on trimmed types.")] + [RequiresDynamicCode( + "Serializing arbitrary types to json can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] + INpgsqlTypeMapper EnableDynamicJson(Type[]? jsonbClrTypes = null, Type[]? jsonClrTypes = null); + + /// + /// Sets up mappings for the PostgreSQL record type as a .NET or . + /// + /// The same builder instance so that multiple calls can be chained. + [RequiresUnreferencedCode( + "The mapping of PostgreSQL records as .NET tuples requires reflection usage which is incompatible with trimming.")] + [RequiresDynamicCode( + "The mapping of PostgreSQL records as .NET tuples requires dynamic code usage which is incompatible with NativeAOT.")] + INpgsqlTypeMapper EnableRecordsAsTuples(); + + /// + /// Sets up mappings allowing the use of unmapped enum, range and multirange types. + /// + /// The same builder instance so that multiple calls can be chained. + [RequiresUnreferencedCode( + "The use of unmapped enums, ranges or multiranges requires reflection usage which is incompatible with trimming.")] + [RequiresDynamicCode( + "The use of unmapped enums, ranges or multiranges requires dynamic code usage which is incompatible with NativeAOT.")] + INpgsqlTypeMapper EnableUnmappedTypes(); + /// /// Resets all mapping changes performed on this type mapper and reverts it to its original, starting state. /// From 507e2d9760dff4090ab71326a7d8b9ae721db553 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Tue, 21 Nov 2023 00:52:32 +0100 Subject: [PATCH 322/761] Bump version to 8.0.0 --- Directory.Build.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Build.props b/Directory.Build.props index f145b53aa1..c3f00e9d91 100644 --- a/Directory.Build.props +++ b/Directory.Build.props @@ -1,6 +1,6 @@  - 8.0.0-rtm + 8.0.0 latest true enable From 0e8e36933a020a7966afbae9c01861eea8c3b8e1 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Tue, 21 Nov 2023 01:35:42 +0100 Subject: [PATCH 323/761] Move all Unshipped public APIs to Shipped and resort --- .../PublicAPI.Shipped.txt | 17 ++ .../PublicAPI.Unshipped.txt | 17 -- src/Npgsql.GeoJSON/PublicAPI.Shipped.txt | 7 +- src/Npgsql.GeoJSON/PublicAPI.Unshipped.txt | 6 +- src/Npgsql.Json.NET/PublicAPI.Shipped.txt | 2 +- src/Npgsql.Json.NET/PublicAPI.Unshipped.txt | 2 - src/Npgsql/PublicAPI.Shipped.txt | 178 ++++++++++-------- src/Npgsql/PublicAPI.Unshipped.txt | 176 ----------------- 8 files changed, 125 insertions(+), 280 deletions(-) diff --git a/src/Npgsql.DependencyInjection/PublicAPI.Shipped.txt b/src/Npgsql.DependencyInjection/PublicAPI.Shipped.txt index ab058de62d..4066bf5273 100644 --- a/src/Npgsql.DependencyInjection/PublicAPI.Shipped.txt +++ b/src/Npgsql.DependencyInjection/PublicAPI.Shipped.txt @@ -1 +1,18 @@ #nullable enable +Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions +static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddMultiHostNpgsqlDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Transient, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Singleton, object? serviceKey = null) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! +static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddMultiHostNpgsqlDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! +static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddMultiHostNpgsqlDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, System.Action! dataSourceBuilderAction, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Transient, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Singleton, object? serviceKey = null) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! +static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddMultiHostNpgsqlDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, System.Action! dataSourceBuilderAction, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! +static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddMultiHostNpgsqlSlimDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Transient, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Singleton, object? serviceKey = null) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! +static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddMultiHostNpgsqlSlimDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! +static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddMultiHostNpgsqlSlimDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, System.Action! dataSourceBuilderAction, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Transient, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Singleton, object? serviceKey = null) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! +static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddMultiHostNpgsqlSlimDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, System.Action! dataSourceBuilderAction, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! +static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddNpgsqlDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Transient, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Singleton, object? serviceKey = null) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! +static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddNpgsqlDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! +static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddNpgsqlDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, System.Action! dataSourceBuilderAction, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Transient, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Singleton, object? serviceKey = null) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! +static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddNpgsqlDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, System.Action! dataSourceBuilderAction, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! +static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddNpgsqlSlimDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Transient, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Singleton, object? serviceKey = null) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! +static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddNpgsqlSlimDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! +static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddNpgsqlSlimDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, System.Action! dataSourceBuilderAction, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Transient, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Singleton, object? serviceKey = null) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! +static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddNpgsqlSlimDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, System.Action! dataSourceBuilderAction, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! diff --git a/src/Npgsql.DependencyInjection/PublicAPI.Unshipped.txt b/src/Npgsql.DependencyInjection/PublicAPI.Unshipped.txt index 4066bf5273..ab058de62d 100644 --- a/src/Npgsql.DependencyInjection/PublicAPI.Unshipped.txt +++ b/src/Npgsql.DependencyInjection/PublicAPI.Unshipped.txt @@ -1,18 +1 @@ #nullable enable -Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions -static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddMultiHostNpgsqlDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Transient, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Singleton, object? serviceKey = null) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! -static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddMultiHostNpgsqlDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! -static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddMultiHostNpgsqlDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, System.Action! dataSourceBuilderAction, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Transient, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Singleton, object? serviceKey = null) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! -static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddMultiHostNpgsqlDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, System.Action! dataSourceBuilderAction, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! -static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddMultiHostNpgsqlSlimDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Transient, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Singleton, object? serviceKey = null) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! -static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddMultiHostNpgsqlSlimDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! -static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddMultiHostNpgsqlSlimDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, System.Action! dataSourceBuilderAction, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Transient, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Singleton, object? serviceKey = null) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! -static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddMultiHostNpgsqlSlimDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, System.Action! dataSourceBuilderAction, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! -static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddNpgsqlDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Transient, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Singleton, object? serviceKey = null) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! -static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddNpgsqlDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! -static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddNpgsqlDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, System.Action! dataSourceBuilderAction, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Transient, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Singleton, object? serviceKey = null) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! -static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddNpgsqlDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, System.Action! dataSourceBuilderAction, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! -static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddNpgsqlSlimDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Transient, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Singleton, object? serviceKey = null) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! -static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddNpgsqlSlimDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! -static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddNpgsqlSlimDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, System.Action! dataSourceBuilderAction, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Transient, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Singleton, object? serviceKey = null) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! -static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddNpgsqlSlimDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, System.Action! dataSourceBuilderAction, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! diff --git a/src/Npgsql.GeoJSON/PublicAPI.Shipped.txt b/src/Npgsql.GeoJSON/PublicAPI.Shipped.txt index a5e3b621d4..7f92ef111d 100644 --- a/src/Npgsql.GeoJSON/PublicAPI.Shipped.txt +++ b/src/Npgsql.GeoJSON/PublicAPI.Shipped.txt @@ -1,8 +1,13 @@ #nullable enable +Npgsql.GeoJSON.CrsMap +Npgsql.GeoJSON.CrsMapExtensions Npgsql.GeoJSONOptions Npgsql.GeoJSONOptions.BoundingBox = 1 -> Npgsql.GeoJSONOptions Npgsql.GeoJSONOptions.LongCRS = 4 -> Npgsql.GeoJSONOptions Npgsql.GeoJSONOptions.None = 0 -> Npgsql.GeoJSONOptions Npgsql.GeoJSONOptions.ShortCRS = 2 -> Npgsql.GeoJSONOptions Npgsql.NpgsqlGeoJSONExtensions -static Npgsql.NpgsqlGeoJSONExtensions.UseGeoJson(this Npgsql.TypeMapping.INpgsqlTypeMapper! mapper, Npgsql.GeoJSONOptions options = Npgsql.GeoJSONOptions.None, bool geographyAsDefault = false) -> Npgsql.TypeMapping.INpgsqlTypeMapper! +static Npgsql.GeoJSON.CrsMapExtensions.GetCrsMap(this Npgsql.NpgsqlDataSource! dataSource) -> Npgsql.GeoJSON.CrsMap! +static Npgsql.GeoJSON.CrsMapExtensions.GetCrsMapAsync(this Npgsql.NpgsqlDataSource! dataSource) -> System.Threading.Tasks.Task! +static Npgsql.NpgsqlGeoJSONExtensions.UseGeoJson(this Npgsql.TypeMapping.INpgsqlTypeMapper! mapper, Npgsql.GeoJSON.CrsMap! crsMap, Npgsql.GeoJSONOptions options = Npgsql.GeoJSONOptions.None, bool geographyAsDefault = false) -> Npgsql.TypeMapping.INpgsqlTypeMapper! +static Npgsql.NpgsqlGeoJSONExtensions.UseGeoJson(this Npgsql.TypeMapping.INpgsqlTypeMapper! mapper, Npgsql.GeoJSONOptions options = Npgsql.GeoJSONOptions.None, bool geographyAsDefault = false) -> Npgsql.TypeMapping.INpgsqlTypeMapper! \ No newline at end of file diff --git a/src/Npgsql.GeoJSON/PublicAPI.Unshipped.txt b/src/Npgsql.GeoJSON/PublicAPI.Unshipped.txt index be72efeb37..ab058de62d 100644 --- a/src/Npgsql.GeoJSON/PublicAPI.Unshipped.txt +++ b/src/Npgsql.GeoJSON/PublicAPI.Unshipped.txt @@ -1,5 +1 @@ -Npgsql.GeoJSON.CrsMap -Npgsql.GeoJSON.CrsMapExtensions -static Npgsql.GeoJSON.CrsMapExtensions.GetCrsMap(this Npgsql.NpgsqlDataSource! dataSource) -> Npgsql.GeoJSON.CrsMap! -static Npgsql.GeoJSON.CrsMapExtensions.GetCrsMapAsync(this Npgsql.NpgsqlDataSource! dataSource) -> System.Threading.Tasks.Task! -static Npgsql.NpgsqlGeoJSONExtensions.UseGeoJson(this Npgsql.TypeMapping.INpgsqlTypeMapper! mapper, Npgsql.GeoJSON.CrsMap! crsMap, Npgsql.GeoJSONOptions options = Npgsql.GeoJSONOptions.None, bool geographyAsDefault = false) -> Npgsql.TypeMapping.INpgsqlTypeMapper! \ No newline at end of file +#nullable enable diff --git a/src/Npgsql.Json.NET/PublicAPI.Shipped.txt b/src/Npgsql.Json.NET/PublicAPI.Shipped.txt index dd615d73a6..912eb76bcb 100644 --- a/src/Npgsql.Json.NET/PublicAPI.Shipped.txt +++ b/src/Npgsql.Json.NET/PublicAPI.Shipped.txt @@ -1,3 +1,3 @@ #nullable enable Npgsql.NpgsqlJsonNetExtensions -static Npgsql.NpgsqlJsonNetExtensions.UseJsonNet(this Npgsql.TypeMapping.INpgsqlTypeMapper! mapper, System.Type![]? jsonbClrTypes = null, System.Type![]? jsonClrTypes = null, Newtonsoft.Json.JsonSerializerSettings? settings = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! +static Npgsql.NpgsqlJsonNetExtensions.UseJsonNet(this Npgsql.TypeMapping.INpgsqlTypeMapper! mapper, Newtonsoft.Json.JsonSerializerSettings? settings = null, System.Type![]? jsonbClrTypes = null, System.Type![]? jsonClrTypes = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! diff --git a/src/Npgsql.Json.NET/PublicAPI.Unshipped.txt b/src/Npgsql.Json.NET/PublicAPI.Unshipped.txt index 6372f0638d..ab058de62d 100644 --- a/src/Npgsql.Json.NET/PublicAPI.Unshipped.txt +++ b/src/Npgsql.Json.NET/PublicAPI.Unshipped.txt @@ -1,3 +1 @@ #nullable enable -static Npgsql.NpgsqlJsonNetExtensions.UseJsonNet(this Npgsql.TypeMapping.INpgsqlTypeMapper! mapper, Newtonsoft.Json.JsonSerializerSettings? settings = null, System.Type![]? jsonbClrTypes = null, System.Type![]? jsonClrTypes = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! -*REMOVED*static Npgsql.NpgsqlJsonNetExtensions.UseJsonNet(this Npgsql.TypeMapping.INpgsqlTypeMapper! mapper, System.Type![]? jsonbClrTypes = null, System.Type![]? jsonClrTypes = null, Newtonsoft.Json.JsonSerializerSettings? settings = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! diff --git a/src/Npgsql/PublicAPI.Shipped.txt b/src/Npgsql/PublicAPI.Shipped.txt index a3ebbabc6b..220210350c 100644 --- a/src/Npgsql/PublicAPI.Shipped.txt +++ b/src/Npgsql/PublicAPI.Shipped.txt @@ -89,6 +89,7 @@ const Npgsql.PostgresErrorCodes.ForeignKeyViolation = "23503" -> string! const Npgsql.PostgresErrorCodes.FunctionExecutedNoReturnStatementSqlRoutineException = "2F005" -> string! const Npgsql.PostgresErrorCodes.GroupingError = "42803" -> string! const Npgsql.PostgresErrorCodes.HeldCursorRequiresSameIsolationLevel = "25008" -> string! +const Npgsql.PostgresErrorCodes.IdleSessionTimeout = "57P05" -> string! const Npgsql.PostgresErrorCodes.ImplicitZeroBitPaddingWarning = "01008" -> string! const Npgsql.PostgresErrorCodes.InappropriateAccessModeForBranchTransaction = "25003" -> string! const Npgsql.PostgresErrorCodes.InappropriateIsolationLevelForBranchTransaction = "25004" -> string! @@ -248,6 +249,10 @@ Npgsql.BackendMessages.FieldDescription.TypeModifier.get -> int Npgsql.BackendMessages.FieldDescription.TypeModifier.set -> void Npgsql.BackendMessages.FieldDescription.TypeSize.get -> short Npgsql.BackendMessages.FieldDescription.TypeSize.set -> void +Npgsql.ChannelBinding +Npgsql.ChannelBinding.Disable = 0 -> Npgsql.ChannelBinding +Npgsql.ChannelBinding.Prefer = 1 -> Npgsql.ChannelBinding +Npgsql.ChannelBinding.Require = 2 -> Npgsql.ChannelBinding Npgsql.INpgsqlNameTranslator Npgsql.INpgsqlNameTranslator.TranslateMemberName(string! clrName) -> string! Npgsql.INpgsqlNameTranslator.TranslateTypeName(string! clrName) -> string! @@ -256,8 +261,8 @@ Npgsql.NameTranslation.NpgsqlNullNameTranslator.NpgsqlNullNameTranslator() -> vo Npgsql.NameTranslation.NpgsqlNullNameTranslator.TranslateMemberName(string! clrName) -> string! Npgsql.NameTranslation.NpgsqlNullNameTranslator.TranslateTypeName(string! clrName) -> string! Npgsql.NameTranslation.NpgsqlSnakeCaseNameTranslator -Npgsql.NameTranslation.NpgsqlSnakeCaseNameTranslator.NpgsqlSnakeCaseNameTranslator(System.Globalization.CultureInfo? culture = null) -> void Npgsql.NameTranslation.NpgsqlSnakeCaseNameTranslator.NpgsqlSnakeCaseNameTranslator(bool legacyMode, System.Globalization.CultureInfo? culture = null) -> void +Npgsql.NameTranslation.NpgsqlSnakeCaseNameTranslator.NpgsqlSnakeCaseNameTranslator(System.Globalization.CultureInfo? culture = null) -> void Npgsql.NameTranslation.NpgsqlSnakeCaseNameTranslator.TranslateMemberName(string! clrName) -> string! Npgsql.NameTranslation.NpgsqlSnakeCaseNameTranslator.TranslateTypeName(string! clrName) -> string! Npgsql.NoticeEventHandler @@ -266,6 +271,7 @@ Npgsql.NpgsqlBatch Npgsql.NpgsqlBatch.BatchCommands.get -> Npgsql.NpgsqlBatchCommandCollection! Npgsql.NpgsqlBatch.Connection.get -> Npgsql.NpgsqlConnection? Npgsql.NpgsqlBatch.Connection.set -> void +Npgsql.NpgsqlBatch.CreateBatchCommand() -> Npgsql.NpgsqlBatchCommand! Npgsql.NpgsqlBatch.EnableErrorBarriers.get -> bool Npgsql.NpgsqlBatch.EnableErrorBarriers.set -> void Npgsql.NpgsqlBatch.ExecuteReader(System.Data.CommandBehavior behavior = System.Data.CommandBehavior.Default) -> Npgsql.NpgsqlDataReader! @@ -325,8 +331,8 @@ Npgsql.NpgsqlBinaryImporter.WriteAsync(T value, string! dataTypeName, System. Npgsql.NpgsqlBinaryImporter.WriteAsync(T value, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! Npgsql.NpgsqlBinaryImporter.WriteNull() -> void Npgsql.NpgsqlBinaryImporter.WriteNullAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! -Npgsql.NpgsqlBinaryImporter.WriteRow(params object![]! values) -> void -Npgsql.NpgsqlBinaryImporter.WriteRowAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken), params object![]! values) -> System.Threading.Tasks.Task! +Npgsql.NpgsqlBinaryImporter.WriteRow(params object?[]! values) -> void +Npgsql.NpgsqlBinaryImporter.WriteRowAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken), params object?[]! values) -> System.Threading.Tasks.Task! Npgsql.NpgsqlCommand Npgsql.NpgsqlCommand.AllResultTypesAreUnknown.get -> bool Npgsql.NpgsqlCommand.AllResultTypesAreUnknown.set -> void @@ -382,7 +388,6 @@ Npgsql.NpgsqlConnection.Disposed -> System.EventHandler? Npgsql.NpgsqlConnection.FullState.get -> System.Data.ConnectionState Npgsql.NpgsqlConnection.HasIntegerDateTimes.get -> bool Npgsql.NpgsqlConnection.Host.get -> string? -Npgsql.NpgsqlConnection.IntegratedSecurity.get -> bool Npgsql.NpgsqlConnection.Notice -> Npgsql.NoticeEventHandler? Npgsql.NpgsqlConnection.Notification -> Npgsql.NotificationEventHandler? Npgsql.NpgsqlConnection.NpgsqlConnection() -> void @@ -417,16 +422,12 @@ Npgsql.NpgsqlConnectionStringBuilder.ArrayNullabilityMode.get -> Npgsql.ArrayNul Npgsql.NpgsqlConnectionStringBuilder.ArrayNullabilityMode.set -> void Npgsql.NpgsqlConnectionStringBuilder.AutoPrepareMinUsages.get -> int Npgsql.NpgsqlConnectionStringBuilder.AutoPrepareMinUsages.set -> void -Npgsql.NpgsqlConnectionStringBuilder.BackendTimeouts.get -> bool -Npgsql.NpgsqlConnectionStringBuilder.BackendTimeouts.set -> void Npgsql.NpgsqlConnectionStringBuilder.CancellationTimeout.get -> int Npgsql.NpgsqlConnectionStringBuilder.CancellationTimeout.set -> void +Npgsql.NpgsqlConnectionStringBuilder.ChannelBinding.get -> Npgsql.ChannelBinding +Npgsql.NpgsqlConnectionStringBuilder.ChannelBinding.set -> void Npgsql.NpgsqlConnectionStringBuilder.CheckCertificateRevocation.get -> bool Npgsql.NpgsqlConnectionStringBuilder.CheckCertificateRevocation.set -> void -Npgsql.NpgsqlConnectionStringBuilder.ClientCertificate.get -> string? -Npgsql.NpgsqlConnectionStringBuilder.ClientCertificate.set -> void -Npgsql.NpgsqlConnectionStringBuilder.ClientCertificateKey.get -> string? -Npgsql.NpgsqlConnectionStringBuilder.ClientCertificateKey.set -> void Npgsql.NpgsqlConnectionStringBuilder.ClientEncoding.get -> string? Npgsql.NpgsqlConnectionStringBuilder.ClientEncoding.set -> void Npgsql.NpgsqlConnectionStringBuilder.CommandTimeout.get -> int @@ -438,10 +439,6 @@ Npgsql.NpgsqlConnectionStringBuilder.ConnectionLifetime.set -> void Npgsql.NpgsqlConnectionStringBuilder.ConnectionPruningInterval.get -> int Npgsql.NpgsqlConnectionStringBuilder.ConnectionPruningInterval.set -> void Npgsql.NpgsqlConnectionStringBuilder.Contains(System.Collections.Generic.KeyValuePair item) -> bool -Npgsql.NpgsqlConnectionStringBuilder.ContinuousProcessing.get -> bool -Npgsql.NpgsqlConnectionStringBuilder.ContinuousProcessing.set -> void -Npgsql.NpgsqlConnectionStringBuilder.ConvertInfinityDateTime.get -> bool -Npgsql.NpgsqlConnectionStringBuilder.ConvertInfinityDateTime.set -> void Npgsql.NpgsqlConnectionStringBuilder.CopyTo(System.Collections.Generic.KeyValuePair[]! array, int arrayIndex) -> void Npgsql.NpgsqlConnectionStringBuilder.Database.get -> string? Npgsql.NpgsqlConnectionStringBuilder.Database.set -> void @@ -449,10 +446,6 @@ Npgsql.NpgsqlConnectionStringBuilder.Encoding.get -> string! Npgsql.NpgsqlConnectionStringBuilder.Encoding.set -> void Npgsql.NpgsqlConnectionStringBuilder.Enlist.get -> bool Npgsql.NpgsqlConnectionStringBuilder.Enlist.set -> void -Npgsql.NpgsqlConnectionStringBuilder.EntityAdminDatabase.get -> string? -Npgsql.NpgsqlConnectionStringBuilder.EntityAdminDatabase.set -> void -Npgsql.NpgsqlConnectionStringBuilder.EntityTemplateDatabase.get -> string? -Npgsql.NpgsqlConnectionStringBuilder.EntityTemplateDatabase.set -> void Npgsql.NpgsqlConnectionStringBuilder.GetEnumerator() -> System.Collections.Generic.IEnumerator>! Npgsql.NpgsqlConnectionStringBuilder.Host.get -> string? Npgsql.NpgsqlConnectionStringBuilder.Host.set -> void @@ -460,12 +453,8 @@ Npgsql.NpgsqlConnectionStringBuilder.HostRecheckSeconds.get -> int Npgsql.NpgsqlConnectionStringBuilder.HostRecheckSeconds.set -> void Npgsql.NpgsqlConnectionStringBuilder.IncludeErrorDetail.get -> bool Npgsql.NpgsqlConnectionStringBuilder.IncludeErrorDetail.set -> void -Npgsql.NpgsqlConnectionStringBuilder.IncludeErrorDetails.get -> bool -Npgsql.NpgsqlConnectionStringBuilder.IncludeErrorDetails.set -> void Npgsql.NpgsqlConnectionStringBuilder.IncludeRealm.get -> bool Npgsql.NpgsqlConnectionStringBuilder.IncludeRealm.set -> void -Npgsql.NpgsqlConnectionStringBuilder.IntegratedSecurity.get -> bool -Npgsql.NpgsqlConnectionStringBuilder.IntegratedSecurity.set -> void Npgsql.NpgsqlConnectionStringBuilder.InternalCommandTimeout.get -> int Npgsql.NpgsqlConnectionStringBuilder.InternalCommandTimeout.set -> void Npgsql.NpgsqlConnectionStringBuilder.KeepAlive.get -> int @@ -504,8 +493,6 @@ Npgsql.NpgsqlConnectionStringBuilder.Pooling.get -> bool Npgsql.NpgsqlConnectionStringBuilder.Pooling.set -> void Npgsql.NpgsqlConnectionStringBuilder.Port.get -> int Npgsql.NpgsqlConnectionStringBuilder.Port.set -> void -Npgsql.NpgsqlConnectionStringBuilder.PreloadReader.get -> bool -Npgsql.NpgsqlConnectionStringBuilder.PreloadReader.set -> void Npgsql.NpgsqlConnectionStringBuilder.ReadBufferSize.get -> int Npgsql.NpgsqlConnectionStringBuilder.ReadBufferSize.set -> void Npgsql.NpgsqlConnectionStringBuilder.Remove(System.Collections.Generic.KeyValuePair item) -> bool @@ -541,14 +528,8 @@ Npgsql.NpgsqlConnectionStringBuilder.Timezone.get -> string? Npgsql.NpgsqlConnectionStringBuilder.Timezone.set -> void Npgsql.NpgsqlConnectionStringBuilder.TrustServerCertificate.get -> bool Npgsql.NpgsqlConnectionStringBuilder.TrustServerCertificate.set -> void -Npgsql.NpgsqlConnectionStringBuilder.UseExtendedTypes.get -> bool -Npgsql.NpgsqlConnectionStringBuilder.UseExtendedTypes.set -> void -Npgsql.NpgsqlConnectionStringBuilder.UsePerfCounters.get -> bool -Npgsql.NpgsqlConnectionStringBuilder.UsePerfCounters.set -> void Npgsql.NpgsqlConnectionStringBuilder.Username.get -> string? Npgsql.NpgsqlConnectionStringBuilder.Username.set -> void -Npgsql.NpgsqlConnectionStringBuilder.UseSslStream.get -> bool -Npgsql.NpgsqlConnectionStringBuilder.UseSslStream.set -> void Npgsql.NpgsqlConnectionStringBuilder.Values.get -> System.Collections.Generic.ICollection! Npgsql.NpgsqlConnectionStringBuilder.WriteBufferSize.get -> int Npgsql.NpgsqlConnectionStringBuilder.WriteBufferSize.set -> void @@ -597,27 +578,38 @@ Npgsql.NpgsqlDataSource.OpenConnection() -> Npgsql.NpgsqlConnection! Npgsql.NpgsqlDataSource.OpenConnectionAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.ValueTask Npgsql.NpgsqlDataSource.Password.set -> void Npgsql.NpgsqlDataSourceBuilder -Npgsql.NpgsqlDataSourceBuilder.AddTypeResolverFactory(Npgsql.Internal.TypeHandling.TypeHandlerResolverFactory! resolverFactory) -> void +Npgsql.NpgsqlDataSourceBuilder.AddTypeInfoResolverFactory(Npgsql.Internal.PgTypeInfoResolverFactory! factory) -> void Npgsql.NpgsqlDataSourceBuilder.Build() -> Npgsql.NpgsqlDataSource! Npgsql.NpgsqlDataSourceBuilder.BuildMultiHost() -> Npgsql.NpgsqlMultiHostDataSource! +Npgsql.NpgsqlDataSourceBuilder.ConfigureJsonOptions(System.Text.Json.JsonSerializerOptions! serializerOptions) -> Npgsql.NpgsqlDataSourceBuilder! Npgsql.NpgsqlDataSourceBuilder.ConnectionString.get -> string! Npgsql.NpgsqlDataSourceBuilder.ConnectionStringBuilder.get -> Npgsql.NpgsqlConnectionStringBuilder! Npgsql.NpgsqlDataSourceBuilder.DefaultNameTranslator.get -> Npgsql.INpgsqlNameTranslator! Npgsql.NpgsqlDataSourceBuilder.DefaultNameTranslator.set -> void +Npgsql.NpgsqlDataSourceBuilder.EnableDynamicJson(System.Type![]? jsonbClrTypes = null, System.Type![]? jsonClrTypes = null) -> Npgsql.NpgsqlDataSourceBuilder! Npgsql.NpgsqlDataSourceBuilder.EnableParameterLogging(bool parameterLoggingEnabled = true) -> Npgsql.NpgsqlDataSourceBuilder! +Npgsql.NpgsqlDataSourceBuilder.EnableRecordsAsTuples() -> Npgsql.NpgsqlDataSourceBuilder! +Npgsql.NpgsqlDataSourceBuilder.EnableUnmappedTypes() -> Npgsql.NpgsqlDataSourceBuilder! Npgsql.NpgsqlDataSourceBuilder.MapComposite(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! Npgsql.NpgsqlDataSourceBuilder.MapComposite(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! +Npgsql.NpgsqlDataSourceBuilder.MapEnum(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! Npgsql.NpgsqlDataSourceBuilder.MapEnum(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! +Npgsql.NpgsqlDataSourceBuilder.Name.get -> string? +Npgsql.NpgsqlDataSourceBuilder.Name.set -> void Npgsql.NpgsqlDataSourceBuilder.NpgsqlDataSourceBuilder(string? connectionString = null) -> void Npgsql.NpgsqlDataSourceBuilder.UnmapComposite(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> bool Npgsql.NpgsqlDataSourceBuilder.UnmapComposite(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> bool +Npgsql.NpgsqlDataSourceBuilder.UnmapEnum(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> bool Npgsql.NpgsqlDataSourceBuilder.UnmapEnum(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> bool Npgsql.NpgsqlDataSourceBuilder.UseClientCertificate(System.Security.Cryptography.X509Certificates.X509Certificate? clientCertificate) -> Npgsql.NpgsqlDataSourceBuilder! Npgsql.NpgsqlDataSourceBuilder.UseClientCertificates(System.Security.Cryptography.X509Certificates.X509CertificateCollection? clientCertificates) -> Npgsql.NpgsqlDataSourceBuilder! Npgsql.NpgsqlDataSourceBuilder.UseClientCertificatesCallback(System.Action? clientCertificatesCallback) -> Npgsql.NpgsqlDataSourceBuilder! Npgsql.NpgsqlDataSourceBuilder.UseLoggerFactory(Microsoft.Extensions.Logging.ILoggerFactory? loggerFactory) -> Npgsql.NpgsqlDataSourceBuilder! +Npgsql.NpgsqlDataSourceBuilder.UsePasswordProvider(System.Func? passwordProvider, System.Func>? passwordProviderAsync) -> Npgsql.NpgsqlDataSourceBuilder! Npgsql.NpgsqlDataSourceBuilder.UsePeriodicPasswordProvider(System.Func>? passwordProvider, System.TimeSpan successRefreshInterval, System.TimeSpan failureRefreshInterval) -> Npgsql.NpgsqlDataSourceBuilder! Npgsql.NpgsqlDataSourceBuilder.UsePhysicalConnectionInitializer(System.Action? connectionInitializer, System.Func? connectionInitializerAsync) -> Npgsql.NpgsqlDataSourceBuilder! +Npgsql.NpgsqlDataSourceBuilder.UseRootCertificate(System.Security.Cryptography.X509Certificates.X509Certificate2? rootCertificate) -> Npgsql.NpgsqlDataSourceBuilder! +Npgsql.NpgsqlDataSourceBuilder.UseRootCertificateCallback(System.Func? rootCertificateCallback) -> Npgsql.NpgsqlDataSourceBuilder! Npgsql.NpgsqlDataSourceBuilder.UseUserCertificateValidationCallback(System.Net.Security.RemoteCertificateValidationCallback! userCertificateValidationCallback) -> Npgsql.NpgsqlDataSourceBuilder! Npgsql.NpgsqlException Npgsql.NpgsqlException.BatchCommand.get -> Npgsql.NpgsqlBatchCommand? @@ -662,9 +654,7 @@ Npgsql.NpgsqlNestedDataReader.GetData(int ordinal) -> Npgsql.NpgsqlNestedDataRea Npgsql.NpgsqlNoticeEventArgs Npgsql.NpgsqlNoticeEventArgs.Notice.get -> Npgsql.PostgresNotice! Npgsql.NpgsqlNotificationEventArgs -Npgsql.NpgsqlNotificationEventArgs.AdditionalInformation.get -> string! Npgsql.NpgsqlNotificationEventArgs.Channel.get -> string! -Npgsql.NpgsqlNotificationEventArgs.Condition.get -> string! Npgsql.NpgsqlNotificationEventArgs.Payload.get -> string! Npgsql.NpgsqlNotificationEventArgs.PID.get -> int Npgsql.NpgsqlOperationInProgressException @@ -674,8 +664,6 @@ Npgsql.NpgsqlParameter Npgsql.NpgsqlParameter.Clone() -> Npgsql.NpgsqlParameter! Npgsql.NpgsqlParameter.Collection.get -> Npgsql.NpgsqlParameterCollection? Npgsql.NpgsqlParameter.Collection.set -> void -Npgsql.NpgsqlParameter.ConvertedValue.get -> object? -Npgsql.NpgsqlParameter.ConvertedValue.set -> void Npgsql.NpgsqlParameter.DataTypeName.get -> string? Npgsql.NpgsqlParameter.DataTypeName.set -> void Npgsql.NpgsqlParameter.NpgsqlDbType.get -> NpgsqlTypes.NpgsqlDbType @@ -736,13 +724,55 @@ Npgsql.NpgsqlRowUpdatedEventHandler Npgsql.NpgsqlRowUpdatingEventArgs Npgsql.NpgsqlRowUpdatingEventArgs.NpgsqlRowUpdatingEventArgs(System.Data.DataRow! dataRow, System.Data.IDbCommand? command, System.Data.StatementType statementType, System.Data.Common.DataTableMapping! tableMapping) -> void Npgsql.NpgsqlRowUpdatingEventHandler +Npgsql.NpgsqlSlimDataSourceBuilder +Npgsql.NpgsqlSlimDataSourceBuilder.AddTypeInfoResolverFactory(Npgsql.Internal.PgTypeInfoResolverFactory! factory) -> void +Npgsql.NpgsqlSlimDataSourceBuilder.Build() -> Npgsql.NpgsqlDataSource! +Npgsql.NpgsqlSlimDataSourceBuilder.BuildMultiHost() -> Npgsql.NpgsqlMultiHostDataSource! +Npgsql.NpgsqlSlimDataSourceBuilder.ConfigureJsonOptions(System.Text.Json.JsonSerializerOptions! serializerOptions) -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.ConnectionString.get -> string! +Npgsql.NpgsqlSlimDataSourceBuilder.ConnectionStringBuilder.get -> Npgsql.NpgsqlConnectionStringBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.DefaultNameTranslator.get -> Npgsql.INpgsqlNameTranslator! +Npgsql.NpgsqlSlimDataSourceBuilder.DefaultNameTranslator.set -> void +Npgsql.NpgsqlSlimDataSourceBuilder.EnableArrays() -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.EnableDynamicJson(System.Type![]? jsonbClrTypes = null, System.Type![]? jsonClrTypes = null) -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.EnableExtraConversions() -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.EnableFullTextSearch() -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.EnableIntegratedSecurity() -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.EnableLTree() -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.EnableMultiranges() -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.EnableParameterLogging(bool parameterLoggingEnabled = true) -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.EnableRanges() -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.EnableRecords() -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.EnableRecordsAsTuples() -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.EnableTransportSecurity() -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.EnableUnmappedTypes() -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.MapComposite(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! +Npgsql.NpgsqlSlimDataSourceBuilder.MapComposite(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! +Npgsql.NpgsqlSlimDataSourceBuilder.MapEnum(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! +Npgsql.NpgsqlSlimDataSourceBuilder.MapEnum(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! +Npgsql.NpgsqlSlimDataSourceBuilder.Name.get -> string? +Npgsql.NpgsqlSlimDataSourceBuilder.Name.set -> void +Npgsql.NpgsqlSlimDataSourceBuilder.NpgsqlSlimDataSourceBuilder(string? connectionString = null) -> void +Npgsql.NpgsqlSlimDataSourceBuilder.UnmapComposite(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> bool +Npgsql.NpgsqlSlimDataSourceBuilder.UnmapComposite(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> bool +Npgsql.NpgsqlSlimDataSourceBuilder.UnmapEnum(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> bool +Npgsql.NpgsqlSlimDataSourceBuilder.UnmapEnum(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> bool +Npgsql.NpgsqlSlimDataSourceBuilder.UseClientCertificate(System.Security.Cryptography.X509Certificates.X509Certificate? clientCertificate) -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.UseClientCertificates(System.Security.Cryptography.X509Certificates.X509CertificateCollection? clientCertificates) -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.UseClientCertificatesCallback(System.Action? clientCertificatesCallback) -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.UseLoggerFactory(Microsoft.Extensions.Logging.ILoggerFactory? loggerFactory) -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.UsePasswordProvider(System.Func? passwordProvider, System.Func>? passwordProviderAsync) -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.UsePeriodicPasswordProvider(System.Func>? passwordProvider, System.TimeSpan successRefreshInterval, System.TimeSpan failureRefreshInterval) -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.UsePhysicalConnectionInitializer(System.Action? connectionInitializer, System.Func? connectionInitializerAsync) -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.UseRootCertificate(System.Security.Cryptography.X509Certificates.X509Certificate2? rootCertificate) -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.UseRootCertificateCallback(System.Func? rootCertificateCallback) -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.UseUserCertificateValidationCallback(System.Net.Security.RemoteCertificateValidationCallback! userCertificateValidationCallback) -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlTracingOptions Npgsql.NpgsqlTracingOptions.NpgsqlTracingOptions() -> void Npgsql.NpgsqlTransaction Npgsql.NpgsqlTransaction.Connection.get -> Npgsql.NpgsqlConnection? Npgsql.PostgresErrorCodes Npgsql.PostgresException -Npgsql.PostgresException.Code.get -> string! Npgsql.PostgresException.ColumnName.get -> string? Npgsql.PostgresException.ConstraintName.get -> string? Npgsql.PostgresException.DataTypeName.get -> string? @@ -763,7 +793,6 @@ Npgsql.PostgresException.Severity.get -> string! Npgsql.PostgresException.TableName.get -> string? Npgsql.PostgresException.Where.get -> string? Npgsql.PostgresNotice -Npgsql.PostgresNotice.Code.get -> string! Npgsql.PostgresNotice.ColumnName.get -> string? Npgsql.PostgresNotice.ColumnName.set -> void Npgsql.PostgresNotice.ConstraintName.get -> string? @@ -803,9 +832,9 @@ Npgsql.PostgresNotice.Where.get -> string? Npgsql.PostgresNotice.Where.set -> void Npgsql.PostgresTypes.PostgresArrayType Npgsql.PostgresTypes.PostgresArrayType.Element.get -> Npgsql.PostgresTypes.PostgresType! -Npgsql.PostgresTypes.PostgresArrayType.PostgresArrayType(string! ns, string! internalName, uint oid, Npgsql.PostgresTypes.PostgresType! elementPostgresType) -> void +Npgsql.PostgresTypes.PostgresArrayType.PostgresArrayType(string! ns, string! name, uint oid, Npgsql.PostgresTypes.PostgresType! elementPostgresType) -> void Npgsql.PostgresTypes.PostgresBaseType -Npgsql.PostgresTypes.PostgresBaseType.PostgresBaseType(string! ns, string! internalName, uint oid) -> void +Npgsql.PostgresTypes.PostgresBaseType.PostgresBaseType(string! ns, string! name, uint oid) -> void Npgsql.PostgresTypes.PostgresCompositeType Npgsql.PostgresTypes.PostgresCompositeType.Field Npgsql.PostgresTypes.PostgresCompositeType.Field.Name.get -> string! @@ -833,8 +862,6 @@ Npgsql.PostgresTypes.PostgresType.InternalName.get -> string! Npgsql.PostgresTypes.PostgresType.Name.get -> string! Npgsql.PostgresTypes.PostgresType.Namespace.get -> string! Npgsql.PostgresTypes.PostgresType.OID.get -> uint -Npgsql.PostgresTypes.PostgresType.PostgresType(string! ns, string! name, string! internalName, uint oid) -> void -Npgsql.PostgresTypes.PostgresType.PostgresType(string! ns, string! name, uint oid) -> void Npgsql.PostgresTypes.PostgresType.Range.get -> Npgsql.PostgresTypes.PostgresRangeType? Npgsql.PostgresTypes.UnknownBackendType Npgsql.ProvideClientCertificatesCallback @@ -873,7 +900,6 @@ Npgsql.Replication.PgOutput.Messages.CommitPreparedMessage.TransactionCommitTime Npgsql.Replication.PgOutput.Messages.DefaultUpdateMessage Npgsql.Replication.PgOutput.Messages.DeleteMessage Npgsql.Replication.PgOutput.Messages.DeleteMessage.Relation.get -> Npgsql.Replication.PgOutput.Messages.RelationMessage! -Npgsql.Replication.PgOutput.Messages.DeleteMessage.RelationId.get -> uint Npgsql.Replication.PgOutput.Messages.FullDeleteMessage Npgsql.Replication.PgOutput.Messages.FullDeleteMessage.OldRow.get -> Npgsql.Replication.PgOutput.ReplicationTuple! Npgsql.Replication.PgOutput.Messages.FullUpdateMessage @@ -883,7 +909,6 @@ Npgsql.Replication.PgOutput.Messages.IndexUpdateMessage.Key.get -> Npgsql.Replic Npgsql.Replication.PgOutput.Messages.InsertMessage Npgsql.Replication.PgOutput.Messages.InsertMessage.NewRow.get -> Npgsql.Replication.PgOutput.ReplicationTuple! Npgsql.Replication.PgOutput.Messages.InsertMessage.Relation.get -> Npgsql.Replication.PgOutput.Messages.RelationMessage! -Npgsql.Replication.PgOutput.Messages.InsertMessage.RelationId.get -> uint Npgsql.Replication.PgOutput.Messages.KeyDeleteMessage Npgsql.Replication.PgOutput.Messages.KeyDeleteMessage.Key.get -> Npgsql.Replication.PgOutput.ReplicationTuple! Npgsql.Replication.PgOutput.Messages.LogicalDecodingMessage @@ -973,7 +998,6 @@ Npgsql.Replication.PgOutput.Messages.TypeMessage.Namespace.get -> string! Npgsql.Replication.PgOutput.Messages.TypeMessage.TypeId.get -> uint Npgsql.Replication.PgOutput.Messages.UpdateMessage Npgsql.Replication.PgOutput.Messages.UpdateMessage.Relation.get -> Npgsql.Replication.PgOutput.Messages.RelationMessage! -Npgsql.Replication.PgOutput.Messages.UpdateMessage.RelationId.get -> uint Npgsql.Replication.PgOutput.PgOutputReplicationOptions Npgsql.Replication.PgOutput.PgOutputReplicationOptions.Binary.get -> bool? Npgsql.Replication.PgOutput.PgOutputReplicationOptions.Equals(Npgsql.Replication.PgOutput.PgOutputReplicationOptions? other) -> bool @@ -1014,12 +1038,12 @@ Npgsql.Replication.PhysicalReplicationConnection.PhysicalReplicationConnection() Npgsql.Replication.PhysicalReplicationConnection.PhysicalReplicationConnection(string? connectionString) -> void Npgsql.Replication.PhysicalReplicationConnection.ReadReplicationSlot(string! slotName, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! Npgsql.Replication.PhysicalReplicationConnection.StartReplication(Npgsql.Replication.PhysicalReplicationSlot! slot, System.Threading.CancellationToken cancellationToken) -> System.Collections.Generic.IAsyncEnumerable! -Npgsql.Replication.PhysicalReplicationConnection.StartReplication(Npgsql.Replication.PhysicalReplicationSlot? slot, NpgsqlTypes.NpgsqlLogSequenceNumber walLocation, System.Threading.CancellationToken cancellationToken, ulong timeline = 0) -> System.Collections.Generic.IAsyncEnumerable! -Npgsql.Replication.PhysicalReplicationConnection.StartReplication(NpgsqlTypes.NpgsqlLogSequenceNumber walLocation, System.Threading.CancellationToken cancellationToken, ulong timeline = 0) -> System.Collections.Generic.IAsyncEnumerable! +Npgsql.Replication.PhysicalReplicationConnection.StartReplication(Npgsql.Replication.PhysicalReplicationSlot? slot, NpgsqlTypes.NpgsqlLogSequenceNumber walLocation, System.Threading.CancellationToken cancellationToken, uint timeline = 0) -> System.Collections.Generic.IAsyncEnumerable! +Npgsql.Replication.PhysicalReplicationConnection.StartReplication(NpgsqlTypes.NpgsqlLogSequenceNumber walLocation, System.Threading.CancellationToken cancellationToken, uint timeline = 0) -> System.Collections.Generic.IAsyncEnumerable! Npgsql.Replication.PhysicalReplicationSlot -Npgsql.Replication.PhysicalReplicationSlot.PhysicalReplicationSlot(string! slotName, NpgsqlTypes.NpgsqlLogSequenceNumber? restartLsn = null, ulong? restartTimeline = null) -> void +Npgsql.Replication.PhysicalReplicationSlot.PhysicalReplicationSlot(string! slotName, NpgsqlTypes.NpgsqlLogSequenceNumber? restartLsn = null, uint? restartTimeline = null) -> void Npgsql.Replication.PhysicalReplicationSlot.RestartLsn.get -> NpgsqlTypes.NpgsqlLogSequenceNumber? -Npgsql.Replication.PhysicalReplicationSlot.RestartTimeline.get -> ulong? +Npgsql.Replication.PhysicalReplicationSlot.RestartTimeline.get -> uint? Npgsql.Replication.ReplicationConnection Npgsql.Replication.ReplicationConnection.CommandTimeout.get -> System.TimeSpan Npgsql.Replication.ReplicationConnection.CommandTimeout.set -> void @@ -1164,16 +1188,25 @@ Npgsql.StatementType.Select = 1 -> Npgsql.StatementType Npgsql.StatementType.Unknown = 0 -> Npgsql.StatementType Npgsql.StatementType.Update = 4 -> Npgsql.StatementType Npgsql.TypeMapping.INpgsqlTypeMapper -Npgsql.TypeMapping.INpgsqlTypeMapper.AddTypeResolverFactory(Npgsql.Internal.TypeHandling.TypeHandlerResolverFactory! resolverFactory) -> void +Npgsql.TypeMapping.INpgsqlTypeMapper.AddTypeInfoResolverFactory(Npgsql.Internal.PgTypeInfoResolverFactory! factory) -> void +Npgsql.TypeMapping.INpgsqlTypeMapper.ConfigureJsonOptions(System.Text.Json.JsonSerializerOptions! serializerOptions) -> Npgsql.TypeMapping.INpgsqlTypeMapper! Npgsql.TypeMapping.INpgsqlTypeMapper.DefaultNameTranslator.get -> Npgsql.INpgsqlNameTranslator! Npgsql.TypeMapping.INpgsqlTypeMapper.DefaultNameTranslator.set -> void +Npgsql.TypeMapping.INpgsqlTypeMapper.EnableDynamicJson(System.Type![]? jsonbClrTypes = null, System.Type![]? jsonClrTypes = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! +Npgsql.TypeMapping.INpgsqlTypeMapper.EnableRecordsAsTuples() -> Npgsql.TypeMapping.INpgsqlTypeMapper! +Npgsql.TypeMapping.INpgsqlTypeMapper.EnableUnmappedTypes() -> Npgsql.TypeMapping.INpgsqlTypeMapper! Npgsql.TypeMapping.INpgsqlTypeMapper.MapComposite(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! Npgsql.TypeMapping.INpgsqlTypeMapper.MapComposite(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! +Npgsql.TypeMapping.INpgsqlTypeMapper.MapEnum(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! Npgsql.TypeMapping.INpgsqlTypeMapper.MapEnum(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! Npgsql.TypeMapping.INpgsqlTypeMapper.Reset() -> void Npgsql.TypeMapping.INpgsqlTypeMapper.UnmapComposite(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> bool Npgsql.TypeMapping.INpgsqlTypeMapper.UnmapComposite(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> bool +Npgsql.TypeMapping.INpgsqlTypeMapper.UnmapEnum(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> bool Npgsql.TypeMapping.INpgsqlTypeMapper.UnmapEnum(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> bool +Npgsql.TypeMapping.UserTypeMapping +Npgsql.TypeMapping.UserTypeMapping.ClrType.get -> System.Type! +Npgsql.TypeMapping.UserTypeMapping.PgTypeName.get -> string! Npgsql.Util.NpgsqlTimeout Npgsql.Util.NpgsqlTimeout.NpgsqlTimeout() -> void NpgsqlTypes.NpgsqlBox @@ -1192,6 +1225,13 @@ NpgsqlTypes.NpgsqlBox.Top.get -> double NpgsqlTypes.NpgsqlBox.UpperRight.get -> NpgsqlTypes.NpgsqlPoint NpgsqlTypes.NpgsqlBox.UpperRight.set -> void NpgsqlTypes.NpgsqlBox.Width.get -> double +NpgsqlTypes.NpgsqlCidr +NpgsqlTypes.NpgsqlCidr.Address.get -> System.Net.IPAddress! +NpgsqlTypes.NpgsqlCidr.Deconstruct(out System.Net.IPAddress! address, out byte netmask) -> void +NpgsqlTypes.NpgsqlCidr.Netmask.get -> byte +NpgsqlTypes.NpgsqlCidr.NpgsqlCidr() -> void +NpgsqlTypes.NpgsqlCidr.NpgsqlCidr(string! addr) -> void +NpgsqlTypes.NpgsqlCidr.NpgsqlCidr(System.Net.IPAddress! address, byte netmask) -> void NpgsqlTypes.NpgsqlCircle NpgsqlTypes.NpgsqlCircle.Center.get -> NpgsqlTypes.NpgsqlPoint NpgsqlTypes.NpgsqlCircle.Center.set -> void @@ -1268,11 +1308,9 @@ NpgsqlTypes.NpgsqlDbType.Time = 20 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.Timestamp = 21 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.TimestampMultirange = 536870933 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.TimestampRange = 1073741845 -> NpgsqlTypes.NpgsqlDbType -NpgsqlTypes.NpgsqlDbType.TimestampTZ = 26 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.TimestampTz = 26 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.TimestampTzMultirange = 536870938 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.TimestampTzRange = 1073741850 -> NpgsqlTypes.NpgsqlDbType -NpgsqlTypes.NpgsqlDbType.TimeTZ = 31 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.TimeTz = 31 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.TsQuery = 46 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.TsVector = 45 -> NpgsqlTypes.NpgsqlDbType @@ -1285,15 +1323,12 @@ NpgsqlTypes.NpgsqlDbType.Xid8 = 64 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.Xml = 28 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlInet NpgsqlTypes.NpgsqlInet.Address.get -> System.Net.IPAddress! -NpgsqlTypes.NpgsqlInet.Address.set -> void -NpgsqlTypes.NpgsqlInet.Deconstruct(out System.Net.IPAddress! address, out int netmask) -> void -NpgsqlTypes.NpgsqlInet.Equals(NpgsqlTypes.NpgsqlInet other) -> bool -NpgsqlTypes.NpgsqlInet.Netmask.get -> int -NpgsqlTypes.NpgsqlInet.Netmask.set -> void +NpgsqlTypes.NpgsqlInet.Deconstruct(out System.Net.IPAddress! address, out byte netmask) -> void +NpgsqlTypes.NpgsqlInet.Netmask.get -> byte NpgsqlTypes.NpgsqlInet.NpgsqlInet() -> void NpgsqlTypes.NpgsqlInet.NpgsqlInet(string! addr) -> void NpgsqlTypes.NpgsqlInet.NpgsqlInet(System.Net.IPAddress! address) -> void -NpgsqlTypes.NpgsqlInet.NpgsqlInet(System.Net.IPAddress! address, int netmask) -> void +NpgsqlTypes.NpgsqlInet.NpgsqlInet(System.Net.IPAddress! address, byte netmask) -> void NpgsqlTypes.NpgsqlInterval NpgsqlTypes.NpgsqlInterval.Days.get -> int NpgsqlTypes.NpgsqlInterval.Equals(NpgsqlTypes.NpgsqlInterval other) -> bool @@ -1421,9 +1456,9 @@ NpgsqlTypes.NpgsqlTsQueryBinOp.Right.set -> void NpgsqlTypes.NpgsqlTsQueryEmpty NpgsqlTypes.NpgsqlTsQueryEmpty.NpgsqlTsQueryEmpty() -> void NpgsqlTypes.NpgsqlTsQueryFollowedBy -NpgsqlTypes.NpgsqlTsQueryFollowedBy.Distance.get -> int +NpgsqlTypes.NpgsqlTsQueryFollowedBy.Distance.get -> short NpgsqlTypes.NpgsqlTsQueryFollowedBy.Distance.set -> void -NpgsqlTypes.NpgsqlTsQueryFollowedBy.NpgsqlTsQueryFollowedBy(NpgsqlTypes.NpgsqlTsQuery! left, int distance, NpgsqlTypes.NpgsqlTsQuery! right) -> void +NpgsqlTypes.NpgsqlTsQueryFollowedBy.NpgsqlTsQueryFollowedBy(NpgsqlTypes.NpgsqlTsQuery! left, short distance, NpgsqlTypes.NpgsqlTsQuery! right) -> void NpgsqlTypes.NpgsqlTsQueryLexeme NpgsqlTypes.NpgsqlTsQueryLexeme.IsPrefixSearch.get -> bool NpgsqlTypes.NpgsqlTsQueryLexeme.IsPrefixSearch.set -> void @@ -1482,6 +1517,7 @@ override Npgsql.NpgsqlBatch.DbConnection.get -> System.Data.Common.DbConnection? override Npgsql.NpgsqlBatch.DbConnection.set -> void override Npgsql.NpgsqlBatch.DbTransaction.get -> System.Data.Common.DbTransaction? override Npgsql.NpgsqlBatch.DbTransaction.set -> void +override Npgsql.NpgsqlBatch.Dispose() -> void override Npgsql.NpgsqlBatch.ExecuteDbDataReader(System.Data.CommandBehavior behavior) -> System.Data.Common.DbDataReader! override Npgsql.NpgsqlBatch.ExecuteDbDataReaderAsync(System.Data.CommandBehavior behavior, System.Threading.CancellationToken cancellationToken) -> System.Threading.Tasks.Task! override Npgsql.NpgsqlBatch.ExecuteNonQuery() -> int @@ -1492,10 +1528,12 @@ override Npgsql.NpgsqlBatch.Prepare() -> void override Npgsql.NpgsqlBatch.PrepareAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! override Npgsql.NpgsqlBatch.Timeout.get -> int override Npgsql.NpgsqlBatch.Timeout.set -> void +override Npgsql.NpgsqlBatchCommand.CanCreateParameter.get -> bool override Npgsql.NpgsqlBatchCommand.CommandText.get -> string! override Npgsql.NpgsqlBatchCommand.CommandText.set -> void override Npgsql.NpgsqlBatchCommand.CommandType.get -> System.Data.CommandType override Npgsql.NpgsqlBatchCommand.CommandType.set -> void +override Npgsql.NpgsqlBatchCommand.CreateParameter() -> Npgsql.NpgsqlParameter! override Npgsql.NpgsqlBatchCommand.RecordsAffected.get -> int override Npgsql.NpgsqlBatchCommand.ToString() -> string! override Npgsql.NpgsqlBatchCommandCollection.Add(System.Data.Common.DbBatchCommand! item) -> void @@ -1595,9 +1633,6 @@ override Npgsql.NpgsqlDataReader.GetInt32(int ordinal) -> int override Npgsql.NpgsqlDataReader.GetInt64(int ordinal) -> long override Npgsql.NpgsqlDataReader.GetName(int ordinal) -> string! override Npgsql.NpgsqlDataReader.GetOrdinal(string! name) -> int -override Npgsql.NpgsqlDataReader.GetProviderSpecificFieldType(int ordinal) -> System.Type! -override Npgsql.NpgsqlDataReader.GetProviderSpecificValue(int ordinal) -> object! -override Npgsql.NpgsqlDataReader.GetProviderSpecificValues(object![]! values) -> int override Npgsql.NpgsqlDataReader.GetSchemaTable() -> System.Data.DataTable? override Npgsql.NpgsqlDataReader.GetSchemaTableAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! override Npgsql.NpgsqlDataReader.GetStream(int ordinal) -> System.IO.Stream! @@ -1668,9 +1703,6 @@ override Npgsql.NpgsqlNestedDataReader.GetInt32(int ordinal) -> int override Npgsql.NpgsqlNestedDataReader.GetInt64(int ordinal) -> long override Npgsql.NpgsqlNestedDataReader.GetName(int ordinal) -> string! override Npgsql.NpgsqlNestedDataReader.GetOrdinal(string! name) -> int -override Npgsql.NpgsqlNestedDataReader.GetProviderSpecificFieldType(int ordinal) -> System.Type! -override Npgsql.NpgsqlNestedDataReader.GetProviderSpecificValue(int ordinal) -> object! -override Npgsql.NpgsqlNestedDataReader.GetProviderSpecificValues(object![]! values) -> int override Npgsql.NpgsqlNestedDataReader.GetString(int ordinal) -> string! override Npgsql.NpgsqlNestedDataReader.GetValue(int ordinal) -> object! override Npgsql.NpgsqlNestedDataReader.GetValues(object![]! values) -> int @@ -1761,11 +1793,10 @@ override Npgsql.Schema.NpgsqlDbColumn.this[string! propertyName].get -> object? override NpgsqlTypes.NpgsqlBox.Equals(object? obj) -> bool override NpgsqlTypes.NpgsqlBox.GetHashCode() -> int override NpgsqlTypes.NpgsqlBox.ToString() -> string! +override NpgsqlTypes.NpgsqlCidr.ToString() -> string! override NpgsqlTypes.NpgsqlCircle.Equals(object? obj) -> bool override NpgsqlTypes.NpgsqlCircle.GetHashCode() -> int override NpgsqlTypes.NpgsqlCircle.ToString() -> string! -override NpgsqlTypes.NpgsqlInet.Equals(object? obj) -> bool -override NpgsqlTypes.NpgsqlInet.GetHashCode() -> int override NpgsqlTypes.NpgsqlInet.ToString() -> string! override NpgsqlTypes.NpgsqlInterval.Equals(object? obj) -> bool override NpgsqlTypes.NpgsqlInterval.GetHashCode() -> int @@ -1853,19 +1884,14 @@ static Npgsql.Replication.TestDecodingConnectionExtensions.CreateTestDecodingRep static Npgsql.Replication.TestDecodingConnectionExtensions.StartReplication(this Npgsql.Replication.LogicalReplicationConnection! connection, Npgsql.Replication.TestDecoding.TestDecodingReplicationSlot! slot, System.Threading.CancellationToken cancellationToken, Npgsql.Replication.TestDecoding.TestDecodingOptions? options = null, NpgsqlTypes.NpgsqlLogSequenceNumber? walLocation = null) -> System.Collections.Generic.IAsyncEnumerable! static NpgsqlTypes.NpgsqlBox.operator !=(NpgsqlTypes.NpgsqlBox x, NpgsqlTypes.NpgsqlBox y) -> bool static NpgsqlTypes.NpgsqlBox.operator ==(NpgsqlTypes.NpgsqlBox x, NpgsqlTypes.NpgsqlBox y) -> bool -static NpgsqlTypes.NpgsqlBox.Parse(string! s) -> NpgsqlTypes.NpgsqlBox +static NpgsqlTypes.NpgsqlCidr.explicit operator System.Net.IPAddress!(NpgsqlTypes.NpgsqlCidr cidr) -> System.Net.IPAddress! +static NpgsqlTypes.NpgsqlCidr.implicit operator NpgsqlTypes.NpgsqlInet(NpgsqlTypes.NpgsqlCidr cidr) -> NpgsqlTypes.NpgsqlInet static NpgsqlTypes.NpgsqlCircle.operator !=(NpgsqlTypes.NpgsqlCircle x, NpgsqlTypes.NpgsqlCircle y) -> bool static NpgsqlTypes.NpgsqlCircle.operator ==(NpgsqlTypes.NpgsqlCircle x, NpgsqlTypes.NpgsqlCircle y) -> bool -static NpgsqlTypes.NpgsqlCircle.Parse(string! s) -> NpgsqlTypes.NpgsqlCircle static NpgsqlTypes.NpgsqlInet.explicit operator System.Net.IPAddress!(NpgsqlTypes.NpgsqlInet inet) -> System.Net.IPAddress! static NpgsqlTypes.NpgsqlInet.implicit operator NpgsqlTypes.NpgsqlInet(System.Net.IPAddress! ip) -> NpgsqlTypes.NpgsqlInet -static NpgsqlTypes.NpgsqlInet.operator !=(NpgsqlTypes.NpgsqlInet x, NpgsqlTypes.NpgsqlInet y) -> bool -static NpgsqlTypes.NpgsqlInet.operator ==(NpgsqlTypes.NpgsqlInet x, NpgsqlTypes.NpgsqlInet y) -> bool -static NpgsqlTypes.NpgsqlInet.ToIPAddress(NpgsqlTypes.NpgsqlInet inet) -> System.Net.IPAddress! -static NpgsqlTypes.NpgsqlInet.ToNpgsqlInet(System.Net.IPAddress? ip) -> NpgsqlTypes.NpgsqlInet static NpgsqlTypes.NpgsqlLine.operator !=(NpgsqlTypes.NpgsqlLine x, NpgsqlTypes.NpgsqlLine y) -> bool static NpgsqlTypes.NpgsqlLine.operator ==(NpgsqlTypes.NpgsqlLine x, NpgsqlTypes.NpgsqlLine y) -> bool -static NpgsqlTypes.NpgsqlLine.Parse(string! s) -> NpgsqlTypes.NpgsqlLine static NpgsqlTypes.NpgsqlLogSequenceNumber.explicit operator NpgsqlTypes.NpgsqlLogSequenceNumber(ulong value) -> NpgsqlTypes.NpgsqlLogSequenceNumber static NpgsqlTypes.NpgsqlLogSequenceNumber.explicit operator ulong(NpgsqlTypes.NpgsqlLogSequenceNumber value) -> ulong static NpgsqlTypes.NpgsqlLogSequenceNumber.Larger(NpgsqlTypes.NpgsqlLogSequenceNumber value1, NpgsqlTypes.NpgsqlLogSequenceNumber value2) -> NpgsqlTypes.NpgsqlLogSequenceNumber @@ -1885,16 +1911,12 @@ static NpgsqlTypes.NpgsqlLogSequenceNumber.TryParse(string! s, out NpgsqlTypes.N static NpgsqlTypes.NpgsqlLogSequenceNumber.TryParse(System.ReadOnlySpan s, out NpgsqlTypes.NpgsqlLogSequenceNumber result) -> bool static NpgsqlTypes.NpgsqlLSeg.operator !=(NpgsqlTypes.NpgsqlLSeg x, NpgsqlTypes.NpgsqlLSeg y) -> bool static NpgsqlTypes.NpgsqlLSeg.operator ==(NpgsqlTypes.NpgsqlLSeg x, NpgsqlTypes.NpgsqlLSeg y) -> bool -static NpgsqlTypes.NpgsqlLSeg.Parse(string! s) -> NpgsqlTypes.NpgsqlLSeg static NpgsqlTypes.NpgsqlPath.operator !=(NpgsqlTypes.NpgsqlPath x, NpgsqlTypes.NpgsqlPath y) -> bool static NpgsqlTypes.NpgsqlPath.operator ==(NpgsqlTypes.NpgsqlPath x, NpgsqlTypes.NpgsqlPath y) -> bool -static NpgsqlTypes.NpgsqlPath.Parse(string! s) -> NpgsqlTypes.NpgsqlPath static NpgsqlTypes.NpgsqlPoint.operator !=(NpgsqlTypes.NpgsqlPoint x, NpgsqlTypes.NpgsqlPoint y) -> bool static NpgsqlTypes.NpgsqlPoint.operator ==(NpgsqlTypes.NpgsqlPoint x, NpgsqlTypes.NpgsqlPoint y) -> bool -static NpgsqlTypes.NpgsqlPoint.Parse(string! s) -> NpgsqlTypes.NpgsqlPoint static NpgsqlTypes.NpgsqlPolygon.operator !=(NpgsqlTypes.NpgsqlPolygon x, NpgsqlTypes.NpgsqlPolygon y) -> bool static NpgsqlTypes.NpgsqlPolygon.operator ==(NpgsqlTypes.NpgsqlPolygon x, NpgsqlTypes.NpgsqlPolygon y) -> bool -static NpgsqlTypes.NpgsqlPolygon.Parse(string! s) -> NpgsqlTypes.NpgsqlPolygon static NpgsqlTypes.NpgsqlRange.operator !=(NpgsqlTypes.NpgsqlRange x, NpgsqlTypes.NpgsqlRange y) -> bool static NpgsqlTypes.NpgsqlRange.operator ==(NpgsqlTypes.NpgsqlRange x, NpgsqlTypes.NpgsqlRange y) -> bool static NpgsqlTypes.NpgsqlRange.Parse(string! value) -> NpgsqlTypes.NpgsqlRange diff --git a/src/Npgsql/PublicAPI.Unshipped.txt b/src/Npgsql/PublicAPI.Unshipped.txt index 786c7241c9..ab058de62d 100644 --- a/src/Npgsql/PublicAPI.Unshipped.txt +++ b/src/Npgsql/PublicAPI.Unshipped.txt @@ -1,177 +1 @@ #nullable enable -const Npgsql.PostgresErrorCodes.IdleSessionTimeout = "57P05" -> string! -Npgsql.ChannelBinding -Npgsql.ChannelBinding.Disable = 0 -> Npgsql.ChannelBinding -Npgsql.ChannelBinding.Prefer = 1 -> Npgsql.ChannelBinding -Npgsql.ChannelBinding.Require = 2 -> Npgsql.ChannelBinding -Npgsql.NpgsqlBatch.CreateBatchCommand() -> Npgsql.NpgsqlBatchCommand! -Npgsql.NpgsqlConnectionStringBuilder.ChannelBinding.get -> Npgsql.ChannelBinding -Npgsql.NpgsqlConnectionStringBuilder.ChannelBinding.set -> void -Npgsql.NpgsqlBinaryImporter.WriteRow(params object?[]! values) -> void -Npgsql.NpgsqlBinaryImporter.WriteRowAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken), params object?[]! values) -> System.Threading.Tasks.Task! -Npgsql.NpgsqlDataSourceBuilder.AddTypeInfoResolverFactory(Npgsql.Internal.PgTypeInfoResolverFactory! factory) -> void -Npgsql.NpgsqlDataSourceBuilder.ConfigureJsonOptions(System.Text.Json.JsonSerializerOptions! serializerOptions) -> Npgsql.NpgsqlDataSourceBuilder! -Npgsql.NpgsqlDataSourceBuilder.EnableDynamicJson(System.Type![]? jsonbClrTypes = null, System.Type![]? jsonClrTypes = null) -> Npgsql.NpgsqlDataSourceBuilder! -Npgsql.NpgsqlDataSourceBuilder.EnableRecordsAsTuples() -> Npgsql.NpgsqlDataSourceBuilder! -Npgsql.NpgsqlDataSourceBuilder.EnableUnmappedTypes() -> Npgsql.NpgsqlDataSourceBuilder! -Npgsql.NpgsqlDataSourceBuilder.MapEnum(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! -Npgsql.NpgsqlDataSourceBuilder.Name.get -> string? -Npgsql.NpgsqlDataSourceBuilder.Name.set -> void -Npgsql.NpgsqlDataSourceBuilder.UnmapEnum(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> bool -Npgsql.NpgsqlDataSourceBuilder.UsePasswordProvider(System.Func? passwordProvider, System.Func>? passwordProviderAsync) -> Npgsql.NpgsqlDataSourceBuilder! -Npgsql.NpgsqlDataSourceBuilder.UseRootCertificate(System.Security.Cryptography.X509Certificates.X509Certificate2? rootCertificate) -> Npgsql.NpgsqlDataSourceBuilder! -Npgsql.NpgsqlDataSourceBuilder.UseRootCertificateCallback(System.Func? rootCertificateCallback) -> Npgsql.NpgsqlDataSourceBuilder! -Npgsql.NpgsqlSlimDataSourceBuilder -Npgsql.NpgsqlSlimDataSourceBuilder.AddTypeInfoResolverFactory(Npgsql.Internal.PgTypeInfoResolverFactory! factory) -> void -Npgsql.NpgsqlSlimDataSourceBuilder.Build() -> Npgsql.NpgsqlDataSource! -Npgsql.NpgsqlSlimDataSourceBuilder.BuildMultiHost() -> Npgsql.NpgsqlMultiHostDataSource! -Npgsql.NpgsqlSlimDataSourceBuilder.ConfigureJsonOptions(System.Text.Json.JsonSerializerOptions! serializerOptions) -> Npgsql.NpgsqlSlimDataSourceBuilder! -Npgsql.NpgsqlSlimDataSourceBuilder.ConnectionString.get -> string! -Npgsql.NpgsqlSlimDataSourceBuilder.ConnectionStringBuilder.get -> Npgsql.NpgsqlConnectionStringBuilder! -Npgsql.NpgsqlSlimDataSourceBuilder.DefaultNameTranslator.get -> Npgsql.INpgsqlNameTranslator! -Npgsql.NpgsqlSlimDataSourceBuilder.DefaultNameTranslator.set -> void -Npgsql.NpgsqlSlimDataSourceBuilder.EnableArrays() -> Npgsql.NpgsqlSlimDataSourceBuilder! -Npgsql.NpgsqlSlimDataSourceBuilder.EnableDynamicJson(System.Type![]? jsonbClrTypes = null, System.Type![]? jsonClrTypes = null) -> Npgsql.NpgsqlSlimDataSourceBuilder! -Npgsql.NpgsqlSlimDataSourceBuilder.EnableExtraConversions() -> Npgsql.NpgsqlSlimDataSourceBuilder! -Npgsql.NpgsqlSlimDataSourceBuilder.EnableFullTextSearch() -> Npgsql.NpgsqlSlimDataSourceBuilder! -Npgsql.NpgsqlSlimDataSourceBuilder.EnableIntegratedSecurity() -> Npgsql.NpgsqlSlimDataSourceBuilder! -Npgsql.NpgsqlSlimDataSourceBuilder.EnableLTree() -> Npgsql.NpgsqlSlimDataSourceBuilder! -Npgsql.NpgsqlSlimDataSourceBuilder.EnableMultiranges() -> Npgsql.NpgsqlSlimDataSourceBuilder! -Npgsql.NpgsqlSlimDataSourceBuilder.EnableParameterLogging(bool parameterLoggingEnabled = true) -> Npgsql.NpgsqlSlimDataSourceBuilder! -Npgsql.NpgsqlSlimDataSourceBuilder.EnableRanges() -> Npgsql.NpgsqlSlimDataSourceBuilder! -Npgsql.NpgsqlSlimDataSourceBuilder.EnableRecords() -> Npgsql.NpgsqlSlimDataSourceBuilder! -Npgsql.NpgsqlSlimDataSourceBuilder.EnableRecordsAsTuples() -> Npgsql.NpgsqlSlimDataSourceBuilder! -Npgsql.NpgsqlSlimDataSourceBuilder.EnableTransportSecurity() -> Npgsql.NpgsqlSlimDataSourceBuilder! -Npgsql.NpgsqlSlimDataSourceBuilder.EnableUnmappedTypes() -> Npgsql.NpgsqlSlimDataSourceBuilder! -Npgsql.NpgsqlSlimDataSourceBuilder.MapComposite(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! -Npgsql.NpgsqlSlimDataSourceBuilder.MapComposite(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! -Npgsql.NpgsqlSlimDataSourceBuilder.MapEnum(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! -Npgsql.NpgsqlSlimDataSourceBuilder.MapEnum(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! -Npgsql.NpgsqlSlimDataSourceBuilder.Name.get -> string? -Npgsql.NpgsqlSlimDataSourceBuilder.Name.set -> void -Npgsql.NpgsqlSlimDataSourceBuilder.NpgsqlSlimDataSourceBuilder(string? connectionString = null) -> void -Npgsql.NpgsqlSlimDataSourceBuilder.UnmapComposite(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> bool -Npgsql.NpgsqlSlimDataSourceBuilder.UnmapComposite(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> bool -Npgsql.NpgsqlSlimDataSourceBuilder.UnmapEnum(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> bool -Npgsql.NpgsqlSlimDataSourceBuilder.UnmapEnum(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> bool -Npgsql.NpgsqlSlimDataSourceBuilder.UseClientCertificate(System.Security.Cryptography.X509Certificates.X509Certificate? clientCertificate) -> Npgsql.NpgsqlSlimDataSourceBuilder! -Npgsql.NpgsqlSlimDataSourceBuilder.UseClientCertificates(System.Security.Cryptography.X509Certificates.X509CertificateCollection? clientCertificates) -> Npgsql.NpgsqlSlimDataSourceBuilder! -Npgsql.NpgsqlSlimDataSourceBuilder.UseClientCertificatesCallback(System.Action? clientCertificatesCallback) -> Npgsql.NpgsqlSlimDataSourceBuilder! -Npgsql.NpgsqlSlimDataSourceBuilder.UseLoggerFactory(Microsoft.Extensions.Logging.ILoggerFactory? loggerFactory) -> Npgsql.NpgsqlSlimDataSourceBuilder! -Npgsql.NpgsqlSlimDataSourceBuilder.UsePasswordProvider(System.Func? passwordProvider, System.Func>? passwordProviderAsync) -> Npgsql.NpgsqlSlimDataSourceBuilder! -Npgsql.NpgsqlSlimDataSourceBuilder.UsePeriodicPasswordProvider(System.Func>? passwordProvider, System.TimeSpan successRefreshInterval, System.TimeSpan failureRefreshInterval) -> Npgsql.NpgsqlSlimDataSourceBuilder! -Npgsql.NpgsqlSlimDataSourceBuilder.UsePhysicalConnectionInitializer(System.Action? connectionInitializer, System.Func? connectionInitializerAsync) -> Npgsql.NpgsqlSlimDataSourceBuilder! -Npgsql.NpgsqlSlimDataSourceBuilder.UseRootCertificate(System.Security.Cryptography.X509Certificates.X509Certificate2? rootCertificate) -> Npgsql.NpgsqlSlimDataSourceBuilder! -Npgsql.NpgsqlSlimDataSourceBuilder.UseRootCertificateCallback(System.Func? rootCertificateCallback) -> Npgsql.NpgsqlSlimDataSourceBuilder! -Npgsql.NpgsqlSlimDataSourceBuilder.UseUserCertificateValidationCallback(System.Net.Security.RemoteCertificateValidationCallback! userCertificateValidationCallback) -> Npgsql.NpgsqlSlimDataSourceBuilder! -Npgsql.PostgresTypes.PostgresArrayType.PostgresArrayType(string! ns, string! name, uint oid, Npgsql.PostgresTypes.PostgresType! elementPostgresType) -> void -Npgsql.PostgresTypes.PostgresBaseType.PostgresBaseType(string! ns, string! name, uint oid) -> void -Npgsql.Replication.PhysicalReplicationConnection.StartReplication(Npgsql.Replication.PhysicalReplicationSlot? slot, NpgsqlTypes.NpgsqlLogSequenceNumber walLocation, System.Threading.CancellationToken cancellationToken, uint timeline = 0) -> System.Collections.Generic.IAsyncEnumerable! -Npgsql.Replication.PhysicalReplicationConnection.StartReplication(NpgsqlTypes.NpgsqlLogSequenceNumber walLocation, System.Threading.CancellationToken cancellationToken, uint timeline = 0) -> System.Collections.Generic.IAsyncEnumerable! -Npgsql.Replication.PhysicalReplicationSlot.PhysicalReplicationSlot(string! slotName, NpgsqlTypes.NpgsqlLogSequenceNumber? restartLsn = null, uint? restartTimeline = null) -> void -Npgsql.Replication.PhysicalReplicationSlot.RestartTimeline.get -> uint? -Npgsql.TypeMapping.INpgsqlTypeMapper.AddTypeInfoResolverFactory(Npgsql.Internal.PgTypeInfoResolverFactory! factory) -> void -Npgsql.TypeMapping.INpgsqlTypeMapper.ConfigureJsonOptions(System.Text.Json.JsonSerializerOptions! serializerOptions) -> Npgsql.TypeMapping.INpgsqlTypeMapper! -Npgsql.TypeMapping.INpgsqlTypeMapper.EnableDynamicJson(System.Type![]? jsonbClrTypes = null, System.Type![]? jsonClrTypes = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! -Npgsql.TypeMapping.INpgsqlTypeMapper.EnableRecordsAsTuples() -> Npgsql.TypeMapping.INpgsqlTypeMapper! -Npgsql.TypeMapping.INpgsqlTypeMapper.EnableUnmappedTypes() -> Npgsql.TypeMapping.INpgsqlTypeMapper! -Npgsql.TypeMapping.INpgsqlTypeMapper.MapEnum(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! -Npgsql.TypeMapping.INpgsqlTypeMapper.UnmapEnum(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> bool -Npgsql.TypeMapping.UserTypeMapping -Npgsql.TypeMapping.UserTypeMapping.ClrType.get -> System.Type! -Npgsql.TypeMapping.UserTypeMapping.PgTypeName.get -> string! -NpgsqlTypes.NpgsqlCidr -NpgsqlTypes.NpgsqlCidr.Address.get -> System.Net.IPAddress! -NpgsqlTypes.NpgsqlCidr.Deconstruct(out System.Net.IPAddress! address, out byte netmask) -> void -NpgsqlTypes.NpgsqlCidr.Netmask.get -> byte -NpgsqlTypes.NpgsqlCidr.NpgsqlCidr() -> void -NpgsqlTypes.NpgsqlCidr.NpgsqlCidr(string! addr) -> void -NpgsqlTypes.NpgsqlCidr.NpgsqlCidr(System.Net.IPAddress! address, byte netmask) -> void -NpgsqlTypes.NpgsqlInet.Deconstruct(out System.Net.IPAddress! address, out byte netmask) -> void -NpgsqlTypes.NpgsqlInet.NpgsqlInet(System.Net.IPAddress! address, byte netmask) -> void -NpgsqlTypes.NpgsqlInet.Netmask.get -> byte -NpgsqlTypes.NpgsqlTsQueryFollowedBy.Distance.get -> short -NpgsqlTypes.NpgsqlTsQueryFollowedBy.NpgsqlTsQueryFollowedBy(NpgsqlTypes.NpgsqlTsQuery! left, short distance, NpgsqlTypes.NpgsqlTsQuery! right) -> void -override Npgsql.NpgsqlBatch.Dispose() -> void -*REMOVED*static NpgsqlTypes.NpgsqlBox.Parse(string! s) -> NpgsqlTypes.NpgsqlBox -*REMOVED*static NpgsqlTypes.NpgsqlCircle.Parse(string! s) -> NpgsqlTypes.NpgsqlCircle -*REMOVED*static NpgsqlTypes.NpgsqlLine.Parse(string! s) -> NpgsqlTypes.NpgsqlLine -*REMOVED*static NpgsqlTypes.NpgsqlLSeg.Parse(string! s) -> NpgsqlTypes.NpgsqlLSeg -*REMOVED*static NpgsqlTypes.NpgsqlPath.Parse(string! s) -> NpgsqlTypes.NpgsqlPath -*REMOVED*static NpgsqlTypes.NpgsqlPoint.Parse(string! s) -> NpgsqlTypes.NpgsqlPoint -*REMOVED*static NpgsqlTypes.NpgsqlPolygon.Parse(string! s) -> NpgsqlTypes.NpgsqlPolygon -*REMOVED*NpgsqlTypes.NpgsqlDbType.TimestampTZ = 26 -> NpgsqlTypes.NpgsqlDbType -*REMOVED*NpgsqlTypes.NpgsqlDbType.TimeTZ = 31 -> NpgsqlTypes.NpgsqlDbType -*REMOVED*NpgsqlTypes.NpgsqlInet.Deconstruct(out System.Net.IPAddress! address, out int netmask) -> void -*REMOVED*NpgsqlTypes.NpgsqlInet.NpgsqlInet(System.Net.IPAddress! address, int netmask) -> void -*REMOVED*NpgsqlTypes.NpgsqlInet.Address.set -> void -*REMOVED*NpgsqlTypes.NpgsqlInet.Equals(NpgsqlTypes.NpgsqlInet other) -> bool -*REMOVED*NpgsqlTypes.NpgsqlInet.Netmask.get -> int -*REMOVED*NpgsqlTypes.NpgsqlInet.Netmask.set -> void -*REMOVED*NpgsqlTypes.NpgsqlTsQueryFollowedBy.Distance.get -> int -*REMOVED*NpgsqlTypes.NpgsqlTsQueryFollowedBy.NpgsqlTsQueryFollowedBy(NpgsqlTypes.NpgsqlTsQuery! left, int distance, NpgsqlTypes.NpgsqlTsQuery! right) -> void -*REMOVED*Npgsql.NpgsqlBinaryImporter.WriteRow(params object![]! values) -> void -*REMOVED*Npgsql.NpgsqlBinaryImporter.WriteRowAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken), params object![]! values) -> System.Threading.Tasks.Task! -*REMOVED*override Npgsql.NpgsqlDataReader.GetProviderSpecificFieldType(int ordinal) -> System.Type! -*REMOVED*override Npgsql.NpgsqlDataReader.GetProviderSpecificValue(int ordinal) -> object! -*REMOVED*override Npgsql.NpgsqlDataReader.GetProviderSpecificValues(object![]! values) -> int -*REMOVED*override Npgsql.NpgsqlNestedDataReader.GetProviderSpecificFieldType(int ordinal) -> System.Type! -*REMOVED*override Npgsql.NpgsqlNestedDataReader.GetProviderSpecificValue(int ordinal) -> object! -*REMOVED*override Npgsql.NpgsqlNestedDataReader.GetProviderSpecificValues(object![]! values) -> int -*REMOVED*override NpgsqlTypes.NpgsqlInet.Equals(object? obj) -> bool -*REMOVED*override NpgsqlTypes.NpgsqlInet.GetHashCode() -> int -*REMOVED*Npgsql.Replication.PhysicalReplicationConnection.StartReplication(Npgsql.Replication.PhysicalReplicationSlot? slot, NpgsqlTypes.NpgsqlLogSequenceNumber walLocation, System.Threading.CancellationToken cancellationToken, ulong timeline = 0) -> System.Collections.Generic.IAsyncEnumerable! -*REMOVED*Npgsql.Replication.PhysicalReplicationConnection.StartReplication(NpgsqlTypes.NpgsqlLogSequenceNumber walLocation, System.Threading.CancellationToken cancellationToken, ulong timeline = 0) -> System.Collections.Generic.IAsyncEnumerable! -*REMOVED*Npgsql.Replication.PhysicalReplicationSlot.PhysicalReplicationSlot(string! slotName, NpgsqlTypes.NpgsqlLogSequenceNumber? restartLsn = null, ulong? restartTimeline = null) -> void -*REMOVED*Npgsql.Replication.PhysicalReplicationSlot.RestartTimeline.get -> ulong? -override Npgsql.NpgsqlBatchCommand.CanCreateParameter.get -> bool -override Npgsql.NpgsqlBatchCommand.CreateParameter() -> Npgsql.NpgsqlParameter! -override NpgsqlTypes.NpgsqlCidr.ToString() -> string! -*REMOVED*static NpgsqlTypes.NpgsqlInet.operator !=(NpgsqlTypes.NpgsqlInet x, NpgsqlTypes.NpgsqlInet y) -> bool -*REMOVED*static NpgsqlTypes.NpgsqlInet.operator ==(NpgsqlTypes.NpgsqlInet x, NpgsqlTypes.NpgsqlInet y) -> bool -*REMOVED*static NpgsqlTypes.NpgsqlInet.ToIPAddress(NpgsqlTypes.NpgsqlInet inet) -> System.Net.IPAddress! -*REMOVED*static NpgsqlTypes.NpgsqlInet.ToNpgsqlInet(System.Net.IPAddress? ip) -> NpgsqlTypes.NpgsqlInet -*REMOVED*Npgsql.NpgsqlDataSourceBuilder.AddTypeResolverFactory(Npgsql.Internal.TypeHandling.TypeHandlerResolverFactory! resolverFactory) -> void -static NpgsqlTypes.NpgsqlCidr.explicit operator System.Net.IPAddress!(NpgsqlTypes.NpgsqlCidr cidr) -> System.Net.IPAddress! -static NpgsqlTypes.NpgsqlCidr.implicit operator NpgsqlTypes.NpgsqlInet(NpgsqlTypes.NpgsqlCidr cidr) -> NpgsqlTypes.NpgsqlInet -*REMOVED*Npgsql.NpgsqlConnection.IntegratedSecurity.get -> bool -*REMOVED*Npgsql.NpgsqlConnectionStringBuilder.BackendTimeouts.get -> bool -*REMOVED*Npgsql.NpgsqlConnectionStringBuilder.BackendTimeouts.set -> void -*REMOVED*Npgsql.NpgsqlConnectionStringBuilder.ClientCertificate.get -> string? -*REMOVED*Npgsql.NpgsqlConnectionStringBuilder.ClientCertificate.set -> void -*REMOVED*Npgsql.NpgsqlConnectionStringBuilder.ClientCertificateKey.get -> string? -*REMOVED*Npgsql.NpgsqlConnectionStringBuilder.ClientCertificateKey.set -> void -*REMOVED*Npgsql.NpgsqlConnectionStringBuilder.ContinuousProcessing.get -> bool -*REMOVED*Npgsql.NpgsqlConnectionStringBuilder.ContinuousProcessing.set -> void -*REMOVED*Npgsql.NpgsqlConnectionStringBuilder.ConvertInfinityDateTime.get -> bool -*REMOVED*Npgsql.NpgsqlConnectionStringBuilder.ConvertInfinityDateTime.set -> void -*REMOVED*Npgsql.NpgsqlConnectionStringBuilder.EntityAdminDatabase.get -> string? -*REMOVED*Npgsql.NpgsqlConnectionStringBuilder.EntityAdminDatabase.set -> void -*REMOVED*Npgsql.NpgsqlConnectionStringBuilder.EntityTemplateDatabase.get -> string? -*REMOVED*Npgsql.NpgsqlConnectionStringBuilder.EntityTemplateDatabase.set -> void -*REMOVED*Npgsql.NpgsqlConnectionStringBuilder.IncludeErrorDetails.get -> bool -*REMOVED*Npgsql.NpgsqlConnectionStringBuilder.IncludeErrorDetails.set -> void -*REMOVED*Npgsql.NpgsqlConnectionStringBuilder.IntegratedSecurity.get -> bool -*REMOVED*Npgsql.NpgsqlConnectionStringBuilder.IntegratedSecurity.set -> void -*REMOVED*Npgsql.NpgsqlConnectionStringBuilder.PreloadReader.get -> bool -*REMOVED*Npgsql.NpgsqlConnectionStringBuilder.PreloadReader.set -> void -*REMOVED*Npgsql.NpgsqlConnectionStringBuilder.UseExtendedTypes.get -> bool -*REMOVED*Npgsql.NpgsqlConnectionStringBuilder.UseExtendedTypes.set -> void -*REMOVED*Npgsql.NpgsqlConnectionStringBuilder.UsePerfCounters.get -> bool -*REMOVED*Npgsql.NpgsqlConnectionStringBuilder.UsePerfCounters.set -> void -*REMOVED*Npgsql.NpgsqlConnectionStringBuilder.UseSslStream.get -> bool -*REMOVED*Npgsql.NpgsqlConnectionStringBuilder.UseSslStream.set -> void -*REMOVED*Npgsql.NpgsqlNotificationEventArgs.AdditionalInformation.get -> string! -*REMOVED*Npgsql.NpgsqlNotificationEventArgs.Condition.get -> string! -*REMOVED*Npgsql.NpgsqlParameter.ConvertedValue.get -> object? -*REMOVED*Npgsql.NpgsqlParameter.ConvertedValue.set -> void -*REMOVED*Npgsql.PostgresException.Code.get -> string! -*REMOVED*Npgsql.PostgresNotice.Code.get -> string! -*REMOVED*Npgsql.PostgresTypes.PostgresArrayType.PostgresArrayType(string! ns, string! internalName, uint oid, Npgsql.PostgresTypes.PostgresType! elementPostgresType) -> void -*REMOVED*Npgsql.PostgresTypes.PostgresBaseType.PostgresBaseType(string! ns, string! internalName, uint oid) -> void -*REMOVED*Npgsql.PostgresTypes.PostgresType.PostgresType(string! ns, string! name, string! internalName, uint oid) -> void -*REMOVED*Npgsql.PostgresTypes.PostgresType.PostgresType(string! ns, string! name, uint oid) -> void -*REMOVED*Npgsql.Replication.PgOutput.Messages.DeleteMessage.RelationId.get -> uint -*REMOVED*Npgsql.Replication.PgOutput.Messages.InsertMessage.RelationId.get -> uint -*REMOVED*Npgsql.Replication.PgOutput.Messages.UpdateMessage.RelationId.get -> uint -*REMOVED*Npgsql.TypeMapping.INpgsqlTypeMapper.AddTypeResolverFactory(Npgsql.Internal.TypeHandling.TypeHandlerResolverFactory! resolverFactory) -> void From f5cd070ac4691a95d461bf7645fd2ea231ad97c0 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Tue, 21 Nov 2023 01:46:48 +0100 Subject: [PATCH 324/761] Bump version to 9.0.0-preview.1 --- Directory.Build.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Build.props b/Directory.Build.props index c3f00e9d91..2b4dfb1b41 100644 --- a/Directory.Build.props +++ b/Directory.Build.props @@ -1,6 +1,6 @@  - 8.0.0 + 9.0.0-preview.1 latest true enable From aebf620f7d75683a6ffebe84af238222bee917ad Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Tue, 21 Nov 2023 15:15:39 +0100 Subject: [PATCH 325/761] Reuse parameter index for all case permutations (#5432) Fixes #5428 --- src/Npgsql/SqlQueryParser.cs | 2 +- test/Npgsql.Tests/NpgsqlParameterCollectionTests.cs | 9 +++++++++ test/Npgsql.Tests/NpgsqlParameterTests.cs | 10 ++++++++++ 3 files changed, 20 insertions(+), 1 deletion(-) diff --git a/src/Npgsql/SqlQueryParser.cs b/src/Npgsql/SqlQueryParser.cs index 16c9992c14..2a76755f0b 100644 --- a/src/Npgsql/SqlQueryParser.cs +++ b/src/Npgsql/SqlQueryParser.cs @@ -9,7 +9,7 @@ sealed class SqlQueryParser { static NpgsqlParameterCollection EmptyParameters { get; } = new(); - readonly Dictionary _paramIndexMap = new(); + readonly Dictionary _paramIndexMap = new(StringComparer.OrdinalIgnoreCase); readonly StringBuilder _rewrittenSql = new(); /// diff --git a/test/Npgsql.Tests/NpgsqlParameterCollectionTests.cs b/test/Npgsql.Tests/NpgsqlParameterCollectionTests.cs index 87875117d2..6c09b7b708 100644 --- a/test/Npgsql.Tests/NpgsqlParameterCollectionTests.cs +++ b/test/Npgsql.Tests/NpgsqlParameterCollectionTests.cs @@ -227,6 +227,15 @@ public void Positional_parameter_lookup_returns_first_match([Values(LookupThresh Assert.That(command.Parameters.IndexOf(""), Is.EqualTo(0)); } + [Test] + public void Throw_multiple_positions_same_instance() + { + using var cmd = new NpgsqlCommand("SELECT $1, $2"); + var p = new NpgsqlParameter("", "Hello world"); + cmd.Parameters.Add(p); + Assert.Throws(() => cmd.Parameters.Add(p)); + } + [Test] public void IndexOf_falls_back_to_first_insensitive_match([Values] bool manyParams) { diff --git a/test/Npgsql.Tests/NpgsqlParameterTests.cs b/test/Npgsql.Tests/NpgsqlParameterTests.cs index 1678b3b37e..59f26cbe26 100644 --- a/test/Npgsql.Tests/NpgsqlParameterTests.cs +++ b/test/Npgsql.Tests/NpgsqlParameterTests.cs @@ -3,6 +3,7 @@ using System; using System.Data; using System.Data.Common; +using System.Threading.Tasks; namespace Npgsql.Tests; @@ -435,6 +436,15 @@ public void Parameter_type() Assert.AreEqual(NpgsqlDbType.Bytea, p.NpgsqlDbType, "#J4"); } + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/5428")] + public async Task Match_param_index_case_insensitively() + { + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT @p,@P", conn); + cmd.Parameters.AddWithValue("p", "Hello world"); + await cmd.ExecuteNonQueryAsync(); + } + [Test] [Ignore("")] public void ParameterName() From 63aed3570fa2b04eaf26807c12e5cfc551e7aedd Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Tue, 21 Nov 2023 18:10:47 +0100 Subject: [PATCH 326/761] Actually rename switch to EnableAssertions (#5433) --- src/Npgsql/Internal/NpgsqlReadBuffer.cs | 2 +- src/Npgsql/Util/Statics.cs | 5 ++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/src/Npgsql/Internal/NpgsqlReadBuffer.cs b/src/Npgsql/Internal/NpgsqlReadBuffer.cs index a78679e383..3578688200 100644 --- a/src/Npgsql/Internal/NpgsqlReadBuffer.cs +++ b/src/Npgsql/Internal/NpgsqlReadBuffer.cs @@ -24,7 +24,7 @@ sealed partial class NpgsqlReadBuffer : IDisposable #if DEBUG internal static readonly bool BufferBoundsChecks = true; #else - internal static readonly bool BufferBoundsChecks = Statics.EnableDiagnostics; + internal static readonly bool BufferBoundsChecks = Statics.EnableAssertions; #endif public NpgsqlConnection Connection => Connector.Connection!; diff --git a/src/Npgsql/Util/Statics.cs b/src/Npgsql/Util/Statics.cs index d25df2086a..2b1101171b 100644 --- a/src/Npgsql/Util/Statics.cs +++ b/src/Npgsql/Util/Statics.cs @@ -9,19 +9,18 @@ namespace Npgsql.Util; static class Statics { + internal static readonly bool EnableAssertions; #if DEBUG - internal static bool EnableDiagnostics; internal static bool LegacyTimestampBehavior; internal static bool DisableDateTimeInfinityConversions; #else - internal static readonly bool EnableDiagnostics; internal static readonly bool LegacyTimestampBehavior; internal static readonly bool DisableDateTimeInfinityConversions; #endif static Statics() { - EnableDiagnostics = AppContext.TryGetSwitch("Npgsql.EnableDiagnostics", out var enabled) && enabled; + EnableAssertions = AppContext.TryGetSwitch("Npgsql.EnableAssertions", out var enabled) && enabled; LegacyTimestampBehavior = AppContext.TryGetSwitch("Npgsql.EnableLegacyTimestampBehavior", out enabled) && enabled; DisableDateTimeInfinityConversions = AppContext.TryGetSwitch("Npgsql.DisableDateTimeInfinityConversions", out enabled) && enabled; } From 0ad0d615a055b81f0f170abcd509299bf7a6e267 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Tue, 21 Nov 2023 23:44:41 +0300 Subject: [PATCH 327/761] Drop NETSTANDARD (#5434) Closes #5296 It's time :fire: --- .github/workflows/build.yml | 4 - .../Npgsql.DependencyInjection.csproj | 8 +- .../Properties/AssemblyInfo.cs | 2 - .../Internal/GeoJSONConverter.cs | 19 +- src/Npgsql.GeoJSON/Npgsql.GeoJSON.csproj | 2 +- src/Npgsql.GeoJSON/Properties/AssemblyInfo.cs | 2 - src/Npgsql.Json.NET/Npgsql.Json.NET.csproj | 4 +- .../Properties/AssemblyInfo.cs | 2 - .../Npgsql.NetTopologySuite.csproj | 5 +- .../Properties/AssemblyInfo.cs | 2 - src/Npgsql.NodaTime/Npgsql.NodaTime.csproj | 7 +- .../Properties/AssemblyInfo.cs | 2 - .../Npgsql.OpenTelemetry.csproj | 2 +- .../Properties/AssemblyInfo.cs | 2 - .../Composites/Metadata/CompositeFieldInfo.cs | 5 +- .../ReflectionCompositeInfoFactory.cs | 2 - .../Internal/Converters/ArrayConverter.cs | 2 - .../Internal/Converters/AsyncHelpers.cs | 5 +- .../Internal/Converters/JsonConverter.cs | 14 +- .../Networking/NpgsqlInetConverter.cs | 4 - .../Converters/Primitive/ByteaConverters.cs | 6 - .../Internal/Converters/Primitive/PgMoney.cs | 44 ---- .../Converters/Primitive/PgNumeric.cs | 72 +----- .../Converters/Primitive/TextConverters.cs | 14 +- .../Converters/Temporal/DateConverters.cs | 2 - .../Converters/Temporal/TimeConverters.cs | 2 - src/Npgsql/Internal/NpgsqlConnector.Auth.cs | 2 - .../Internal/NpgsqlConnector.OldAuth.cs | 23 -- src/Npgsql/Internal/NpgsqlConnector.cs | 57 +---- .../Internal/NpgsqlReadBuffer.Stream.cs | 15 -- src/Npgsql/Internal/NpgsqlReadBuffer.cs | 36 +-- src/Npgsql/Internal/PgStreamingConverter.cs | 2 - src/Npgsql/Internal/PgWriter.cs | 13 +- .../AdoTypeInfoResolverFactory.Multirange.cs | 31 +-- .../AdoTypeInfoResolverFactory.Range.cs | 9 +- .../AdoTypeInfoResolverFactory.cs | 8 - src/Npgsql/KerberosUsernameProvider.cs | 9 +- src/Npgsql/MetricsReporter.cs | 18 -- src/Npgsql/Npgsql.csproj | 2 +- src/Npgsql/NpgsqlCommand.cs | 4 - src/Npgsql/NpgsqlConnection.cs | 40 +--- src/Npgsql/NpgsqlConnectionStringBuilder.cs | 3 +- src/Npgsql/NpgsqlDataReader.cs | 20 +- src/Npgsql/NpgsqlDataSource.cs | 6 - src/Npgsql/NpgsqlEventSource.cs | 7 - src/Npgsql/NpgsqlException.cs | 12 - src/Npgsql/NpgsqlFactory.cs | 4 - src/Npgsql/NpgsqlMultiHostDataSource.cs | 4 - src/Npgsql/NpgsqlRawCopyStream.cs | 28 --- src/Npgsql/NpgsqlSchema.cs | 4 - src/Npgsql/NpgsqlTransaction.cs | 40 ---- .../NpgsqlTypes/NpgsqlLogSequenceNumber.cs | 7 +- src/Npgsql/PostgresException.cs | 4 - src/Npgsql/PreparedTextReader.cs | 10 +- src/Npgsql/Properties/AssemblyInfo.cs | 2 - .../PgOutput/PgOutputReplicationOptions.cs | 15 +- .../Replication/ReplicationConnection.cs | 21 -- .../TestDecoding/TestDecodingOptions.cs | 17 +- src/Npgsql/Shims/Batching.cs | 130 ----------- .../Shims/ConcurrentDictionaryExtensions.cs | 15 -- src/Npgsql/Shims/DbDataReaderExtensions.cs | 22 -- src/Npgsql/Shims/DictonaryExtensions.cs | 19 -- src/Npgsql/Shims/EncodingExtensions.cs | 219 ------------------ .../Shims/ReadOnlySequenceExtensions.cs | 13 -- .../Shims/ReadOnlySpanOfCharExtensions.cs | 15 -- src/Npgsql/Shims/ReferenceEqualityComparer.cs | 48 ---- .../Shims/RequiresPreviewFeaturesAttribute.cs | 48 ---- src/Npgsql/Shims/StreamExtensions.cs | 65 +----- src/Npgsql/Shims/StringBuilderExtensions.cs | 33 --- src/Npgsql/Shims/TaskExtensions.cs | 65 ------ src/Npgsql/Shims/UnixDomainSocketEndPoint.cs | 89 ------- src/Npgsql/Shims/UnreachableException.cs | 2 - src/Npgsql/Shims/WaitHandleExtensions.cs | 42 ---- src/Npgsql/Util/SubReadStream.cs | 23 -- src/Shared/CodeAnalysis.cs | 165 +------------ test/Directory.Build.props | 3 +- .../Npgsql.DependencyInjection.Tests.csproj | 5 - .../Npgsql.NativeAotTests.csproj | 2 - test/Npgsql.PluginTests/NodaTimeTests.cs | 4 - test/Npgsql.Tests/BatchTests.cs | 2 - test/Npgsql.Tests/CopyTests.cs | 2 - test/Npgsql.Tests/TestUtil.cs | 17 -- .../Types/DateTimeInfinityTests.cs | 2 - test/Npgsql.Tests/Types/DateTimeTests.cs | 4 - test/Npgsql.Tests/Types/JsonDynamicTests.cs | 2 - test/Npgsql.Tests/Types/MultirangeTests.cs | 2 - 86 files changed, 57 insertions(+), 1709 deletions(-) delete mode 100644 src/Npgsql/Shims/Batching.cs delete mode 100644 src/Npgsql/Shims/ConcurrentDictionaryExtensions.cs delete mode 100644 src/Npgsql/Shims/DbDataReaderExtensions.cs delete mode 100644 src/Npgsql/Shims/DictonaryExtensions.cs delete mode 100644 src/Npgsql/Shims/EncodingExtensions.cs delete mode 100644 src/Npgsql/Shims/ReadOnlySequenceExtensions.cs delete mode 100644 src/Npgsql/Shims/ReadOnlySpanOfCharExtensions.cs delete mode 100644 src/Npgsql/Shims/ReferenceEqualityComparer.cs delete mode 100644 src/Npgsql/Shims/RequiresPreviewFeaturesAttribute.cs delete mode 100644 src/Npgsql/Shims/StringBuilderExtensions.cs delete mode 100644 src/Npgsql/Shims/TaskExtensions.cs delete mode 100644 src/Npgsql/Shims/UnixDomainSocketEndPoint.cs delete mode 100644 src/Npgsql/Shims/WaitHandleExtensions.cs diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 6bf4ed6070..c1e63484ab 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -38,10 +38,6 @@ jobs: pg_major: 16 config: Debug test_tfm: net8.0 - - os: ubuntu-22.04 - pg_major: 16 - config: Release - test_tfm: netcoreapp3.1 - os: macos-12 pg_major: 14 config: Release diff --git a/src/Npgsql.DependencyInjection/Npgsql.DependencyInjection.csproj b/src/Npgsql.DependencyInjection/Npgsql.DependencyInjection.csproj index b3b92f69c6..357003cf07 100644 --- a/src/Npgsql.DependencyInjection/Npgsql.DependencyInjection.csproj +++ b/src/Npgsql.DependencyInjection/Npgsql.DependencyInjection.csproj @@ -3,7 +3,8 @@ Shay Rojansky - netstandard2.0;net7.0 + + net6.0;net8.0 net8.0 npgsql;postgresql;postgres;ado;ado.net;database;sql;di;dependency injection README.md @@ -12,13 +13,10 @@ - - - - + diff --git a/src/Npgsql.DependencyInjection/Properties/AssemblyInfo.cs b/src/Npgsql.DependencyInjection/Properties/AssemblyInfo.cs index 1a340b1a15..d128ee1ec1 100644 --- a/src/Npgsql.DependencyInjection/Properties/AssemblyInfo.cs +++ b/src/Npgsql.DependencyInjection/Properties/AssemblyInfo.cs @@ -1,5 +1,3 @@ using System.Runtime.CompilerServices; -#if NET5_0_OR_GREATER [module: SkipLocalsInit] -#endif diff --git a/src/Npgsql.GeoJSON/Internal/GeoJSONConverter.cs b/src/Npgsql.GeoJSON/Internal/GeoJSONConverter.cs index 8e7692384e..755c8acc19 100644 --- a/src/Npgsql.GeoJSON/Internal/GeoJSONConverter.cs +++ b/src/Npgsql.GeoJSON/Internal/GeoJSONConverter.cs @@ -51,9 +51,9 @@ public override ValueTask WriteAsync(PgWriter writer, T value, CancellationToken if (crsType == GeoJSONOptions.None) return null; -#if NETSTANDARD2_0 - return cachedCrs.GetOrAdd(srid, srid => + return cachedCrs.GetOrAdd(srid, static (srid, state) => { + var (crsMap, crsType) = state; var authority = crsMap.GetAuthority(srid); return authority is null @@ -61,20 +61,7 @@ public override ValueTask WriteAsync(PgWriter writer, T value, CancellationToken : new NamedCRS(crsType == GeoJSONOptions.LongCRS ? "urn:ogc:def:crs:" + authority + "::" + srid : authority + ":" + srid); - }); -#else - return cachedCrs.GetOrAdd(srid, static (srid, state) => - { - var (crsMap, crsType) = state; - var authority = crsMap.GetAuthority(srid); - - return authority is null - ? throw new InvalidOperationException($"SRID {srid} unknown in spatial_ref_sys table") - : new NamedCRS(crsType == GeoJSONOptions.LongCRS - ? "urn:ogc:def:crs:" + authority + "::" + srid - : authority + ":" + srid); - }, (crsMap, crsType)); -#endif + }, (crsMap, crsType)); }; } diff --git a/src/Npgsql.GeoJSON/Npgsql.GeoJSON.csproj b/src/Npgsql.GeoJSON/Npgsql.GeoJSON.csproj index 6441951b97..a802ca5653 100644 --- a/src/Npgsql.GeoJSON/Npgsql.GeoJSON.csproj +++ b/src/Npgsql.GeoJSON/Npgsql.GeoJSON.csproj @@ -3,7 +3,7 @@ Yoh Deadfall;Shay Rojansky GeoJSON plugin for Npgsql, allowing mapping of PostGIS geometry types to GeoJSON types. npgsql;postgresql;postgres;postgis;geojson;spatial;ado;ado.net;database;sql - netstandard2.0 + net6.0 net8.0 diff --git a/src/Npgsql.GeoJSON/Properties/AssemblyInfo.cs b/src/Npgsql.GeoJSON/Properties/AssemblyInfo.cs index 1a340b1a15..d128ee1ec1 100644 --- a/src/Npgsql.GeoJSON/Properties/AssemblyInfo.cs +++ b/src/Npgsql.GeoJSON/Properties/AssemblyInfo.cs @@ -1,5 +1,3 @@ using System.Runtime.CompilerServices; -#if NET5_0_OR_GREATER [module: SkipLocalsInit] -#endif diff --git a/src/Npgsql.Json.NET/Npgsql.Json.NET.csproj b/src/Npgsql.Json.NET/Npgsql.Json.NET.csproj index 0c740dca34..49707eb02f 100644 --- a/src/Npgsql.Json.NET/Npgsql.Json.NET.csproj +++ b/src/Npgsql.Json.NET/Npgsql.Json.NET.csproj @@ -3,8 +3,8 @@ Shay Rojansky Json.NET plugin for Npgsql, allowing transparent serialization/deserialization of JSON objects directly to and from the database. npgsql;postgresql;json;postgres;ado;ado.net;database;sql - netstandard2.0;net6.0 - net8.0 + net6.0 + net8.0 enable diff --git a/src/Npgsql.Json.NET/Properties/AssemblyInfo.cs b/src/Npgsql.Json.NET/Properties/AssemblyInfo.cs index 1a340b1a15..d128ee1ec1 100644 --- a/src/Npgsql.Json.NET/Properties/AssemblyInfo.cs +++ b/src/Npgsql.Json.NET/Properties/AssemblyInfo.cs @@ -1,5 +1,3 @@ using System.Runtime.CompilerServices; -#if NET5_0_OR_GREATER [module: SkipLocalsInit] -#endif diff --git a/src/Npgsql.NetTopologySuite/Npgsql.NetTopologySuite.csproj b/src/Npgsql.NetTopologySuite/Npgsql.NetTopologySuite.csproj index c36aec8652..fd2342614b 100644 --- a/src/Npgsql.NetTopologySuite/Npgsql.NetTopologySuite.csproj +++ b/src/Npgsql.NetTopologySuite/Npgsql.NetTopologySuite.csproj @@ -4,7 +4,7 @@ NetTopologySuite plugin for Npgsql, allowing mapping of PostGIS geometry types to NetTopologySuite types. npgsql;postgresql;postgres;postgis;spatial;nettopologysuite;nts;ado;ado.net;database;sql README.md - netstandard2.0 + net6.0 net8.0 $(NoWarn);NU5104 @@ -12,9 +12,6 @@ - - - diff --git a/src/Npgsql.NetTopologySuite/Properties/AssemblyInfo.cs b/src/Npgsql.NetTopologySuite/Properties/AssemblyInfo.cs index 1a340b1a15..d128ee1ec1 100644 --- a/src/Npgsql.NetTopologySuite/Properties/AssemblyInfo.cs +++ b/src/Npgsql.NetTopologySuite/Properties/AssemblyInfo.cs @@ -1,5 +1,3 @@ using System.Runtime.CompilerServices; -#if NET5_0_OR_GREATER [module: SkipLocalsInit] -#endif diff --git a/src/Npgsql.NodaTime/Npgsql.NodaTime.csproj b/src/Npgsql.NodaTime/Npgsql.NodaTime.csproj index a6b65d79c5..4ac9e068fa 100644 --- a/src/Npgsql.NodaTime/Npgsql.NodaTime.csproj +++ b/src/Npgsql.NodaTime/Npgsql.NodaTime.csproj @@ -4,15 +4,12 @@ NodaTime plugin for Npgsql, allowing mapping of PostgreSQL date/time types to NodaTime types. npgsql;postgresql;postgres;nodatime;date;time;ado;ado;net;database;sql README.md - netstandard2.0;net6.0 - net8.0 + net6.0 + net8.0 - - - diff --git a/src/Npgsql.NodaTime/Properties/AssemblyInfo.cs b/src/Npgsql.NodaTime/Properties/AssemblyInfo.cs index a03d5a93d6..2582e7fb33 100644 --- a/src/Npgsql.NodaTime/Properties/AssemblyInfo.cs +++ b/src/Npgsql.NodaTime/Properties/AssemblyInfo.cs @@ -1,8 +1,6 @@ using System.Runtime.CompilerServices; -#if NET5_0_OR_GREATER [module: SkipLocalsInit] -#endif [assembly: InternalsVisibleTo("Npgsql.PluginTests, PublicKey=" + "0024000004800000940000000602000000240000525341310004000001000100" + diff --git a/src/Npgsql.OpenTelemetry/Npgsql.OpenTelemetry.csproj b/src/Npgsql.OpenTelemetry/Npgsql.OpenTelemetry.csproj index d2b8e620a7..7aff759251 100644 --- a/src/Npgsql.OpenTelemetry/Npgsql.OpenTelemetry.csproj +++ b/src/Npgsql.OpenTelemetry/Npgsql.OpenTelemetry.csproj @@ -2,7 +2,7 @@ Shay Rojansky - netstandard2.0 + net6.0 net8.0 npgsql;postgresql;postgres;ado;ado.net;database;sql;opentelemetry;tracing;diagnostics;instrumentation README.md diff --git a/src/Npgsql.OpenTelemetry/Properties/AssemblyInfo.cs b/src/Npgsql.OpenTelemetry/Properties/AssemblyInfo.cs index 1a340b1a15..d128ee1ec1 100644 --- a/src/Npgsql.OpenTelemetry/Properties/AssemblyInfo.cs +++ b/src/Npgsql.OpenTelemetry/Properties/AssemblyInfo.cs @@ -1,5 +1,3 @@ using System.Runtime.CompilerServices; -#if NET5_0_OR_GREATER [module: SkipLocalsInit] -#endif diff --git a/src/Npgsql/Internal/Composites/Metadata/CompositeFieldInfo.cs b/src/Npgsql/Internal/Composites/Metadata/CompositeFieldInfo.cs index 1e6f321f6b..31a1b0e5f4 100644 --- a/src/Npgsql/Internal/Composites/Metadata/CompositeFieldInfo.cs +++ b/src/Npgsql/Internal/Composites/Metadata/CompositeFieldInfo.cs @@ -79,9 +79,8 @@ protected ValueTask ReadAsObject(bool async, PgConverter converter, CompositeBui else AddValue(builder, converter.ReadAsObject(reader)); return new(); -#if NET6_0_OR_GREATER + [AsyncMethodBuilder(typeof(PoolingAsyncValueTaskMethodBuilder))] -#endif async ValueTask Core(CompositeBuilder builder, ValueTask task) { builder.AddValue(await task.ConfigureAwait(false)); @@ -221,9 +220,7 @@ public override ValueTask Read(bool async, PgConverter converter, CompositeBuild builder.AddValue(((PgConverter)converter).Read(reader)); return new(); -#if NET6_0_OR_GREATER [AsyncMethodBuilder(typeof(PoolingAsyncValueTaskMethodBuilder))] -#endif async ValueTask Core(CompositeBuilder builder, ValueTask task) { builder.AddValue(await task.ConfigureAwait(false)); diff --git a/src/Npgsql/Internal/Composites/ReflectionCompositeInfoFactory.cs b/src/Npgsql/Internal/Composites/ReflectionCompositeInfoFactory.cs index c0e0e25b94..3e81867c0e 100644 --- a/src/Npgsql/Internal/Composites/ReflectionCompositeInfoFactory.cs +++ b/src/Npgsql/Internal/Composites/ReflectionCompositeInfoFactory.cs @@ -151,10 +151,8 @@ static Delegate CreateSetter(PropertyInfo info) static Expression UnboxAny(Expression expression, Type type) => type.IsValueType ? Expression.Unbox(expression, type) : Expression.Convert(expression, type, null); -#if !NETSTANDARD [DynamicDependency("TypedValue", typeof(StrongBox<>))] [DynamicDependency("Length", typeof(StrongBox[]))] -#endif [UnconditionalSuppressMessage("Trimming", "IL2026", Justification = "DynamicDependencies in place for the System.Linq.Expression.Property calls")] static Func CreateStrongBoxConstructor(ConstructorInfo constructorInfo) { diff --git a/src/Npgsql/Internal/Converters/ArrayConverter.cs b/src/Npgsql/Internal/Converters/ArrayConverter.cs index e5fc7f10ca..5c2ff9133f 100644 --- a/src/Npgsql/Internal/Converters/ArrayConverter.cs +++ b/src/Npgsql/Internal/Converters/ArrayConverter.cs @@ -326,9 +326,7 @@ public override ValueTask WriteAsync(PgWriter writer, T values, CancellationToke // The alternatives are: // 1. Add a virtual method and make AwaitTask call into it (bloating the vtable of all derived types). // 2. Using a delegate, meaning we add a static field + an alloc per T + metadata, slightly slower dispatch perf so overall strictly worse as well. -#if NET6_0_OR_GREATER [AsyncMethodBuilder(typeof(PoolingAsyncValueTaskMethodBuilder))] -#endif private protected static async ValueTask AwaitTask(Task task, Continuation continuation, object collection, int[] indices) { await task.ConfigureAwait(false); diff --git a/src/Npgsql/Internal/Converters/AsyncHelpers.cs b/src/Npgsql/Internal/Converters/AsyncHelpers.cs index 6661ffed58..ae824019b3 100644 --- a/src/Npgsql/Internal/Converters/AsyncHelpers.cs +++ b/src/Npgsql/Internal/Converters/AsyncHelpers.cs @@ -32,11 +32,8 @@ abstract class CompletionSource sealed class CompletionSource : CompletionSource { -#if NETSTANDARD - AsyncValueTaskMethodBuilder _amb = AsyncValueTaskMethodBuilder.Create(); -#else PoolingAsyncValueTaskMethodBuilder _amb = PoolingAsyncValueTaskMethodBuilder.Create(); -#endif + public ValueTask Task => _amb.Task; public void SetResult(T value) diff --git a/src/Npgsql/Internal/Converters/JsonConverter.cs b/src/Npgsql/Internal/Converters/JsonConverter.cs index 7e89489674..3591bbacda 100644 --- a/src/Npgsql/Internal/Converters/JsonConverter.cs +++ b/src/Npgsql/Internal/Converters/JsonConverter.cs @@ -114,21 +114,11 @@ public static bool TryReadStream(bool jsonb, Encoding encoding, PgReader reader, var isUtf8 = encoding.CodePage == Encoding.UTF8.CodePage; byteCount = reader.CurrentRemaining; // We always fall back to buffers on older targets - if (isUtf8 -#if !NETSTANDARD - || byteCount >= StreamingThreshold -#endif - ) + if (isUtf8 || byteCount >= StreamingThreshold) { - stream = -#if !NETSTANDARD - !isUtf8 + stream = !isUtf8 ? Encoding.CreateTranscodingStream(reader.GetStream(), encoding, Encoding.UTF8) : reader.GetStream(); -#else - reader.GetStream(); - Debug.Assert(isUtf8); -#endif } else stream = null; diff --git a/src/Npgsql/Internal/Converters/Networking/NpgsqlInetConverter.cs b/src/Npgsql/Internal/Converters/Networking/NpgsqlInetConverter.cs index f3af04e80a..26ce7cfa96 100644 --- a/src/Npgsql/Internal/Converters/Networking/NpgsqlInetConverter.cs +++ b/src/Npgsql/Internal/Converters/Networking/NpgsqlInetConverter.cs @@ -44,11 +44,7 @@ internal static (IPAddress Address, byte Netmask) ReadImpl(PgReader reader, bool var numBytes = reader.ReadByte(); Span bytes = stackalloc byte[numBytes]; reader.Read(bytes); -#if NETSTANDARD2_0 - return (new IPAddress(bytes.ToArray()), mask); -#else return (new IPAddress(bytes), mask); -#endif } protected override void WriteCore(PgWriter writer, NpgsqlInet value) diff --git a/src/Npgsql/Internal/Converters/Primitive/ByteaConverters.cs b/src/Npgsql/Internal/Converters/Primitive/ByteaConverters.cs index f7760f836c..fce9fa93bd 100644 --- a/src/Npgsql/Internal/Converters/Primitive/ByteaConverters.cs +++ b/src/Npgsql/Internal/Converters/Primitive/ByteaConverters.cs @@ -26,9 +26,7 @@ public override void Write(PgWriter writer, T value) public override ValueTask WriteAsync(PgWriter writer, T value, CancellationToken cancellationToken = default) => writer.WriteBytesAsync(ConvertTo(value), cancellationToken); -#if NET6_0_OR_GREATER [AsyncMethodBuilder(typeof(PoolingAsyncValueTaskMethodBuilder<>))] -#endif async ValueTask Read(bool async, PgReader reader, CancellationToken cancellationToken) { var bytes = new byte[reader.CurrentRemaining]; @@ -145,11 +143,7 @@ public override ValueTask WriteAsync(PgWriter writer, Stream value, Cancellation } else { -#if NETSTANDARD2_0 - return new ValueTask(value.CopyToAsync(writer.GetStream())); -#else return new ValueTask(value.CopyToAsync(writer.GetStream(), cancellationToken)); -#endif } } } diff --git a/src/Npgsql/Internal/Converters/Primitive/PgMoney.cs b/src/Npgsql/Internal/Converters/Primitive/PgMoney.cs index 495e2a8aba..dc8755de1f 100644 --- a/src/Npgsql/Internal/Converters/Primitive/PgMoney.cs +++ b/src/Npgsql/Internal/Converters/Primitive/PgMoney.cs @@ -50,55 +50,11 @@ static void GetDecimalBits(decimal value, Span destination, out short scal { Debug.Assert(destination.Length >= DecimalBits); -#if NETSTANDARD - var raw = new DecimalRaw(value); - destination[0] = raw.Low; - destination[1] = raw.Mid; - destination[2] = raw.High; - destination[3] = (uint)raw.Flags; - scale = raw.Scale; -#else decimal.GetBits(value, MemoryMarshal.Cast(destination)); -#endif #if NET7_0_OR_GREATER scale = value.Scale; #else scale = (byte)(destination[3] >> 16); #endif } - -#if NETSTANDARD - // Zero-alloc access to the decimal bits on netstandard. - [StructLayout(LayoutKind.Explicit)] - readonly struct DecimalRaw - { - const int ScaleMask = 0x00FF0000; - const int ScaleShift = 16; - - // Do not change the order in which these fields are declared. It - // should be same as in the System.Decimal.DecCalc struct. - [FieldOffset(0)] - readonly decimal _value; - [FieldOffset(0)] - readonly int _flags; - [FieldOffset(4)] - readonly uint _high; - [FieldOffset(8)] - readonly ulong _low64; - - // Convenience aliased fields but their usage needs to take endianness into account. - [FieldOffset(8)] - readonly uint _low; - [FieldOffset(12)] - readonly uint _mid; - - public DecimalRaw(decimal value) : this() => _value = value; - - public uint High => _high; - public uint Mid => BitConverter.IsLittleEndian ? _mid : _low; - public uint Low => BitConverter.IsLittleEndian ? _low : _mid; - public int Flags => _flags; - public short Scale => (short)((_flags & ScaleMask) >> ScaleShift); - } -#endif } diff --git a/src/Npgsql/Internal/Converters/Primitive/PgNumeric.cs b/src/Npgsql/Internal/Converters/Primitive/PgNumeric.cs index fad0fd50a9..fd01b6a4f1 100644 --- a/src/Npgsql/Internal/Converters/Primitive/PgNumeric.cs +++ b/src/Npgsql/Internal/Converters/Primitive/PgNumeric.cs @@ -38,16 +38,8 @@ static void GetDecimalBits(decimal value, Span destination, out short scal { Debug.Assert(destination.Length >= DecimalBits); -#if NETSTANDARD - var raw = new DecimalRaw(value); - destination[0] = raw.Low; - destination[1] = raw.Mid; - destination[2] = raw.High; - destination[3] = (uint)raw.Flags; - scale = raw.Scale; -#else decimal.GetBits(value, MemoryMarshal.Cast(destination)); -#endif + #if NET7_0_OR_GREATER scale = value.Scale; #else @@ -65,16 +57,9 @@ public static int GetDigitCount(decimal value) public static int GetDigitCount(BigInteger value) { -# if NETSTANDARD2_0 - var bits = value.ToByteArray().AsSpan(); - // Detect the presence of a padding byte and slice it away (as we don't have isUnsigned: true overloads on ns2.0). - if (value.Sign == 1 && bits.Length > 2 && (bits[bits.Length - 2] & 0x80) != 0 && bits[bits.Length - 1] == 0) - bits = bits.Slice(0, bits.Length - 1); - var uintRoundedByteCount = (bits.Length + (sizeof(uint) - 1)) / sizeof(uint) * sizeof(uint); -# else var absValue = BigInteger.Abs(value); // isUnsigned: true fails for negative values. var uintRoundedByteCount = (absValue.GetByteCount(isUnsigned: true) + (sizeof(uint) - 1)) / sizeof(uint) * sizeof(uint); -#endif + byte[]? uintRoundedBitsFromPool = null; var uintRoundedBits = (uintRoundedByteCount <= StackAllocByteThreshold ? stackalloc byte[StackAllocByteThreshold] @@ -83,12 +68,9 @@ public static int GetDigitCount(BigInteger value) // Fill the last uint worth of bytes as it may only be partially written to. uintRoundedBits.Slice(uintRoundedBits.Length - sizeof(uint)).Fill(0); -#if NETSTANDARD2_0 - bits.CopyTo(uintRoundedBits); -#else var success = absValue.TryWriteBytes(uintRoundedBits, out _, isUnsigned: true); Debug.Assert(success); -#endif + var uintBits = MemoryMarshal.Cast(uintRoundedBits); if (!BitConverter.IsLittleEndian) for (var i = 0; i < uintBits.Length; i++) @@ -220,16 +202,9 @@ public Builder(decimal value, Span destination) /// If the destination ends up being too small the builder allocates instead public Builder(BigInteger value, Span destination) { -# if NETSTANDARD2_0 - var bits = value.ToByteArray().AsSpan(); - // Detect the presence of a padding byte and slice it away (as we don't have isUnsigned: true overloads on ns2.0). - if (value.Sign == 1 && bits.Length > 2 && (bits[bits.Length - 2] & 0x80) != 0 && bits[bits.Length - 1] == 0) - bits = bits.Slice(0, bits.Length - 1); - var uintRoundedByteCount = (bits.Length + (sizeof(uint) - 1)) / sizeof(uint) * sizeof(uint); -# else var absValue = BigInteger.Abs(value); // isUnsigned: true fails for negative values. var uintRoundedByteCount = (absValue.GetByteCount(isUnsigned: true) + (sizeof(uint) - 1)) / sizeof(uint) * sizeof(uint); -#endif + byte[]? uintRoundedBitsFromPool = null; var uintRoundedBits = (uintRoundedByteCount <= StackAllocByteThreshold ? stackalloc byte[StackAllocByteThreshold] @@ -238,12 +213,8 @@ public Builder(BigInteger value, Span destination) // Fill the last uint worth of bytes as it may only be partially written to. uintRoundedBits.Slice(uintRoundedBits.Length - sizeof(uint)).Fill(0); -#if NETSTANDARD2_0 - bits.CopyTo(uintRoundedBits); -#else var success = absValue.TryWriteBytes(uintRoundedBits, out _, isUnsigned: true); Debug.Assert(success); -#endif var uintBits = MemoryMarshal.Cast(uintRoundedBits); // Our calculations are all done in little endian, meaning the least significant *uint* is first, just like in BigInteger. @@ -424,39 +395,4 @@ internal static BigInteger ToBigInteger(short weight, ushort sign, Span d return sign == SignNegative ? -result : result; } } - -#if NETSTANDARD - // Zero-alloc access to the decimal bits on netstandard. - [StructLayout(LayoutKind.Explicit)] - readonly struct DecimalRaw - { - const int ScaleMask = 0x00FF0000; - const int ScaleShift = 16; - - // Do not change the order in which these fields are declared. It - // should be same as in the System.Decimal.DecCalc struct. - [FieldOffset(0)] - readonly decimal _value; - [FieldOffset(0)] - readonly int _flags; - [FieldOffset(4)] - readonly uint _high; - [FieldOffset(8)] - readonly ulong _low64; - - // Convenience aliased fields but their usage needs to take endianness into account. - [FieldOffset(8)] - readonly uint _low; - [FieldOffset(12)] - readonly uint _mid; - - public DecimalRaw(decimal value) : this() => _value = value; - - public uint High => _high; - public uint Mid => BitConverter.IsLittleEndian ? _mid : _low; - public uint Low => BitConverter.IsLittleEndian ? _low : _mid; - public int Flags => _flags; - public short Scale => (short)((_flags & ScaleMask) >> ScaleShift); - } -#endif } diff --git a/src/Npgsql/Internal/Converters/Primitive/TextConverters.cs b/src/Npgsql/Internal/Converters/Primitive/TextConverters.cs index 8fc04f1360..b0e3a1b5bd 100644 --- a/src/Npgsql/Internal/Converters/Primitive/TextConverters.cs +++ b/src/Npgsql/Internal/Converters/Primitive/TextConverters.cs @@ -46,9 +46,7 @@ ValueTask Read(bool async, PgReader reader, Encoding encoding, CancellationTo ? ReadAsync(reader, encoding, cancellationToken) : new(ConvertFrom(encoding.GetString(reader.ReadBytes(reader.CurrentRemaining)))); -#if NET6_0_OR_GREATER [AsyncMethodBuilder(typeof(PoolingAsyncValueTaskMethodBuilder<>))] -#endif async ValueTask ReadAsync(PgReader reader, Encoding encoding, CancellationToken cancellationToken) => ConvertFrom(encoding.GetString(await reader.ReadBytesAsync(reader.CurrentRemaining, cancellationToken).ConfigureAwait(false))); } @@ -100,9 +98,7 @@ ValueTask Read(bool async, PgReader reader, Encoding encoding) { return async ? ReadAsync(reader, encoding) : new(ConvertFrom(GetSegment(reader.ReadBytes(reader.CurrentRemaining), encoding))); -#if NET6_0_OR_GREATER [AsyncMethodBuilder(typeof(PoolingAsyncValueTaskMethodBuilder<>))] -#endif async ValueTask ReadAsync(PgReader reader, Encoding encoding) => ConvertFrom(GetSegment(await reader.ReadBytesAsync(reader.CurrentRemaining).ConfigureAwait(false), encoding)); @@ -157,7 +153,7 @@ protected override char ReadCore(PgReader reader) { var byteSeq = reader.ReadBytes(Math.Min(_oneCharMaxByteCount.Value, reader.CurrentRemaining)); Debug.Assert(byteSeq.IsSingleSegment); - var bytes = byteSeq.GetFirstSpan(); + var bytes = byteSeq.FirstSpan; var chars = _encoding.GetCharCount(bytes); if (chars < 1) @@ -259,21 +255,13 @@ static int ConsumeChars(TextReader reader, int? count) return 0; const int maxStackAlloc = 512; -#if NETSTANDARD - var tempCharBuf = new char[maxStackAlloc]; -#else Span tempCharBuf = stackalloc char[maxStackAlloc]; -#endif var totalRead = 0; var fin = false; while (!fin) { var toRead = count is null ? maxStackAlloc : Math.Min(maxStackAlloc, count.Value - totalRead); -#if NETSTANDARD - var read = reader.ReadBlock(tempCharBuf, 0, toRead); -#else var read = reader.ReadBlock(tempCharBuf.Slice(0, toRead)); -#endif totalRead += read; if (count is not null && read is 0) throw new EndOfStreamException(); diff --git a/src/Npgsql/Internal/Converters/Temporal/DateConverters.cs b/src/Npgsql/Internal/Converters/Temporal/DateConverters.cs index 261d305439..79aabf1d58 100644 --- a/src/Npgsql/Internal/Converters/Temporal/DateConverters.cs +++ b/src/Npgsql/Internal/Converters/Temporal/DateConverters.cs @@ -52,7 +52,6 @@ protected override void WriteCore(PgWriter writer, DateTime value) } } -#if NET6_0_OR_GREATER sealed class DateOnlyDateConverter : PgBufferedConverter { readonly bool _dateTimeInfinityConversions; @@ -100,4 +99,3 @@ protected override void WriteCore(PgWriter writer, DateOnly value) writer.WriteInt32(value.DayNumber - BaseValue.DayNumber); } } -#endif diff --git a/src/Npgsql/Internal/Converters/Temporal/TimeConverters.cs b/src/Npgsql/Internal/Converters/Temporal/TimeConverters.cs index e756a03b85..b93a878032 100644 --- a/src/Npgsql/Internal/Converters/Temporal/TimeConverters.cs +++ b/src/Npgsql/Internal/Converters/Temporal/TimeConverters.cs @@ -14,7 +14,6 @@ public override bool CanConvert(DataFormat format, out BufferRequirements buffer protected override void WriteCore(PgWriter writer, TimeSpan value) => writer.WriteInt64(value.Ticks / 10); } -#if NET6_0_OR_GREATER sealed class TimeOnlyTimeConverter : PgBufferedConverter { public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) @@ -25,7 +24,6 @@ public override bool CanConvert(DataFormat format, out BufferRequirements buffer protected override TimeOnly ReadCore(PgReader reader) => new(reader.ReadInt64() * 10); protected override void WriteCore(PgWriter writer, TimeOnly value) => writer.WriteInt64(value.Ticks / 10); } -#endif sealed class DateTimeOffsetTimeTzConverter : PgBufferedConverter { diff --git a/src/Npgsql/Internal/NpgsqlConnector.Auth.cs b/src/Npgsql/Internal/NpgsqlConnector.Auth.cs index 6eeb0fa44b..8fe1bfe402 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.Auth.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.Auth.cs @@ -250,10 +250,8 @@ internal void AuthenticateSASLSha256Plus(ref string mechanism, ref string cbindF } } -#if NET6_0_OR_GREATER static byte[] Hi(string str, byte[] salt, int count) => Rfc2898DeriveBytes.Pbkdf2(str, salt, count, HashAlgorithmName.SHA256, 256 / 8); -#endif static byte[] Xor(byte[] buffer1, byte[] buffer2) { diff --git a/src/Npgsql/Internal/NpgsqlConnector.OldAuth.cs b/src/Npgsql/Internal/NpgsqlConnector.OldAuth.cs index e750e730cb..6d60251773 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.OldAuth.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.OldAuth.cs @@ -14,29 +14,6 @@ namespace Npgsql.Internal; partial class NpgsqlConnector { -#if !NET6_0_OR_GREATER - static byte[] Hi(string str, byte[] salt, int count) - { - using var hmac = new HMACSHA256(Encoding.UTF8.GetBytes(str)); - var salt1 = new byte[salt.Length + 4]; - byte[] hi, u1; - - Buffer.BlockCopy(salt, 0, salt1, 0, salt.Length); - salt1[salt1.Length - 1] = 1; - - hi = u1 = hmac.ComputeHash(salt1); - - for (var i = 1; i < count; i++) - { - var u2 = hmac.ComputeHash(u1); - NpgsqlConnector.Xor(hi, u2); - u1 = u2; - } - - return hi; - } -#endif - #if !NET7_0_OR_GREATER internal async Task AuthenticateGSS(bool async) { diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index cf10f2a081..4f644d0b6a 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -806,7 +806,6 @@ internal async Task NegotiateEncryption(SslMode sslMode, NpgsqlTimeout timeout, X509Certificate2? cert = null; if (Path.GetExtension(certPath).ToUpperInvariant() != ".PFX") { -#if NET5_0_OR_GREATER // It's PEM time var keyPath = Settings.SslKey ?? PostgresEnvironment.SslKey ?? PostgresEnvironment.SslKeyDefault; cert = string.IsNullOrEmpty(password) @@ -819,13 +818,6 @@ internal async Task NegotiateEncryption(SslMode sslMode, NpgsqlTimeout timeout, using var previousCert = cert; cert = new X509Certificate2(cert.Export(X509ContentType.Pkcs12)); } - -#else - // Technically PEM certificates are supported as of .NET 5 but we don't build for the net5.0 - // TFM anymore since .NET 5 is out of support - // This is a breaking change for .NET 5 as of Npgsql 8! - throw new NotSupportedException("PEM certificates are only supported with .NET 6 and higher"); -#endif } cert ??= new X509Certificate2(certPath, password); @@ -884,16 +876,10 @@ internal async Task NegotiateEncryption(SslMode sslMode, NpgsqlTimeout timeout, { var sslStream = new SslStream(_stream, leaveInnerStreamOpen: false, certificateValidationCallback); - var sslProtocols = SslProtocols.None; -#if NETSTANDARD2_0 - // On .NET Framework SslProtocols.None can be disabled, see #3718 - sslProtocols = SslProtocols.Tls | SslProtocols.Tls11 | SslProtocols.Tls12; -#endif - if (async) - await sslStream.AuthenticateAsClientAsync(Host, clientCertificates, sslProtocols, checkCertificateRevocation).ConfigureAwait(false); + await sslStream.AuthenticateAsClientAsync(Host, clientCertificates, SslProtocols.None, checkCertificateRevocation).ConfigureAwait(false); else - sslStream.AuthenticateAsClient(Host, clientCertificates, sslProtocols, checkCertificateRevocation); + sslStream.AuthenticateAsClient(Host, clientCertificates, SslProtocols.None, checkCertificateRevocation); _stream = sslStream; } @@ -987,11 +973,7 @@ void Connect(NpgsqlTimeout timeout) async Task ConnectAsync(NpgsqlTimeout timeout, CancellationToken cancellationToken) { Task GetHostAddressesAsync(CancellationToken ct) => -#if NET6_0_OR_GREATER Dns.GetHostAddressesAsync(Host, ct); -#else - Dns.GetHostAddressesAsync(Host); -#endif // Whether the framework and/or the OS platform support Dns.GetHostAddressesAsync cancellation API or they do not, // we always fake-cancel the operation with the help of TaskTimeoutAndCancellation.ExecuteAsync. It stops waiting @@ -1057,11 +1039,7 @@ static Task OpenSocketConnectionAsync(Socket socket, EndPoint endpoint, NpgsqlTi // we always fake-cancel the operation with the help of TaskTimeoutAndCancellation.ExecuteAsync. It stops waiting // and raises the exception, while the actual task may be left running. Task ConnectAsync(CancellationToken ct) => -#if NET5_0_OR_GREATER socket.ConnectAsync(endpoint, ct).AsTask(); -#else - socket.ConnectAsync(endpoint); -#endif return TaskTimeoutAndCancellation.ExecuteAsync(ConnectAsync, perIpTimeout, cancellationToken); } } @@ -1094,34 +1072,9 @@ void SetSocketOptions(Socket socket) ? Settings.TcpKeepAliveInterval : Settings.TcpKeepAliveTime; -#if NETSTANDARD2_0 || NETSTANDARD2_1 - var timeMilliseconds = timeSeconds * 1000; - var intervalMilliseconds = intervalSeconds * 1000; - - // For the following see https://msdn.microsoft.com/en-us/library/dd877220.aspx - var uintSize = Marshal.SizeOf(typeof(uint)); - var inOptionValues = new byte[uintSize * 3]; - BitConverter.GetBytes((uint)1).CopyTo(inOptionValues, 0); - BitConverter.GetBytes((uint)timeMilliseconds).CopyTo(inOptionValues, uintSize); - BitConverter.GetBytes((uint)intervalMilliseconds).CopyTo(inOptionValues, uintSize * 2); - var result = 0; - try - { - result = socket.IOControl(IOControlCode.KeepAliveValues, inOptionValues, null); - } - catch (PlatformNotSupportedException) - { - throw new PlatformNotSupportedException("Setting TCP Keepalive Time and TCP Keepalive Interval is supported only on Windows, Mono and .NET Core 3.1+. " + - "TCP keepalives can still be used on other systems but are enabled via the TcpKeepAlive option or configured globally for the machine, see the relevant docs."); - } - - if (result != 0) - throw new NpgsqlException($"Got non-zero value when trying to set TCP keepalive: {result}"); -#else socket.SetSocketOption(SocketOptionLevel.Socket, SocketOptionName.KeepAlive, true); socket.SetSocketOption(SocketOptionLevel.Tcp, SocketOptionName.TcpKeepAliveTime, timeSeconds); socket.SetSocketOption(SocketOptionLevel.Tcp, SocketOptionName.TcpKeepAliveInterval, intervalSeconds); -#endif } } @@ -1286,9 +1239,7 @@ internal ValueTask ReadMessage( return new ValueTask(ParseServerMessage(ReadBuffer, messageCode, len, false))!; } -#if NET6_0_OR_GREATER [AsyncMethodBuilder(typeof(PoolingAsyncValueTaskMethodBuilder<>))] -#endif async ValueTask ReadMessageLong( bool async, DataRowLoadingMode dataRowLoadingMode, @@ -1686,19 +1637,15 @@ static RemoteCertificateValidationCallback SslRootValidation(bool verifyFull, st else { Debug.Assert(caCertificate is null); -#if NET5_0_OR_GREATER if (Path.GetExtension(certRootPath).ToUpperInvariant() != ".PFX") certs.ImportFromPemFile(certRootPath); -#endif if (certs.Count == 0) certs.Add(new X509Certificate2(certRootPath)); } -#if NET5_0_OR_GREATER chain.ChainPolicy.CustomTrustStore.AddRange(certs); chain.ChainPolicy.TrustMode = X509ChainTrustMode.CustomRootTrust; -#endif chain.ChainPolicy.ExtraStore.AddRange(certs); diff --git a/src/Npgsql/Internal/NpgsqlReadBuffer.Stream.cs b/src/Npgsql/Internal/NpgsqlReadBuffer.Stream.cs index e99b77fa1b..78e17d4a82 100644 --- a/src/Npgsql/Internal/NpgsqlReadBuffer.Stream.cs +++ b/src/Npgsql/Internal/NpgsqlReadBuffer.Stream.cs @@ -9,9 +9,6 @@ namespace Npgsql.Internal; sealed partial class NpgsqlReadBuffer { internal sealed class ColumnStream : Stream -#if NETSTANDARD2_0 - , IAsyncDisposable -#endif { readonly NpgsqlConnector _connector; readonly NpgsqlReadBuffer _buf; @@ -154,11 +151,7 @@ public override Task ReadAsync(byte[] buffer, int offset, int count, Cancel return ReadAsync(new Memory(buffer, offset, count), cancellationToken).AsTask(); } -#if NETSTANDARD2_0 - public int Read(Span span) -#else public override int Read(Span span) -#endif { CheckDisposed(); @@ -173,11 +166,7 @@ public override int Read(Span span) return read; } -#if NETSTANDARD2_0 - public ValueTask ReadAsync(Memory buffer, CancellationToken cancellationToken = default) -#else public override ValueTask ReadAsync(Memory buffer, CancellationToken cancellationToken = default) -#endif { CheckDisposed(); @@ -208,11 +197,7 @@ void CheckDisposed() protected override void Dispose(bool disposing) => DisposeAsync(disposing, async: false).GetAwaiter().GetResult(); -#if NETSTANDARD2_0 - public ValueTask DisposeAsync() -#else public override ValueTask DisposeAsync() -#endif => DisposeAsync(disposing: true, async: true); async ValueTask DisposeAsync(bool disposing, bool async) diff --git a/src/Npgsql/Internal/NpgsqlReadBuffer.cs b/src/Npgsql/Internal/NpgsqlReadBuffer.cs index 3578688200..e9dee8dfc4 100644 --- a/src/Npgsql/Internal/NpgsqlReadBuffer.cs +++ b/src/Npgsql/Internal/NpgsqlReadBuffer.cs @@ -161,21 +161,12 @@ int ReadWithTimeout(Span buffer) case IOException e when (e.InnerException as SocketException)?.SocketErrorCode == (Type.GetType("Mono.Runtime") == null ? SocketError.TimedOut : SocketError.WouldBlock): { - var isStreamBroken = false; -#if NETSTANDARD2_0 - // SslStream on .NET Framework treats any IOException (including timeouts) as fatal and may - // return garbage if reused. To prevent this, we flow down and break the connection immediately. - // See #4305. - isStreamBroken = connector.IsSecure && ex is IOException; -#endif - // If we should attempt PostgreSQL cancellation, do it the first time we get a timeout. // TODO: As an optimization, we can still attempt to send a cancellation request, but after // that immediately break the connection if (connector.AttemptPostgresCancellation && !connector.PostgresCancellationPerformed && - connector.PerformPostgresCancellation() && - !isStreamBroken) + connector.PerformPostgresCancellation()) { // Note that if the cancellation timeout is negative, we flow down and break the // connection immediately. @@ -229,20 +220,13 @@ async ValueTask ReadWithTimeoutAsync(Memory buffer, CancellationToken (Type.GetType("Mono.Runtime") == null ? SocketError.TimedOut : SocketError.WouldBlock): { Debug.Assert(ex is OperationCanceledException); - var isStreamBroken = false; -#if NETSTANDARD2_0 - // SslStream on .NET Framework treats any IOException (including timeouts) as fatal and may - // return garbage if reused. To prevent this, we flow down and break the connection immediately. - // See #4305. - isStreamBroken = connector.IsSecure && ex is IOException; -#endif + // If we should attempt PostgreSQL cancellation, do it the first time we get a timeout. // TODO: As an optimization, we can still attempt to send a cancellation request, but after // that immediately break the connection if (connector.AttemptPostgresCancellation && !connector.PostgresCancellationPerformed && - connector.PerformPostgresCancellation() && - !isStreamBroken) + connector.PerformPostgresCancellation()) { // Note that if the cancellation timeout is negative, we flow down and break the // connection immediately. @@ -287,9 +271,7 @@ internal ValueTask Ensure(int count, bool async, bool readingNotifications) { return count <= ReadBytesLeft ? new() : EnsureLong(this, count, async, readingNotifications); -#if NET6_0_OR_GREATER [AsyncMethodBuilder(typeof(PoolingAsyncValueTaskMethodBuilder))] -#endif static async ValueTask EnsureLong( NpgsqlReadBuffer buffer, int count, @@ -360,17 +342,10 @@ static async ValueTask EnsureLong( { Debug.Assert(e is OperationCanceledException ? async : !async); - var isStreamBroken = false; -#if NETSTANDARD2_0 - // SslStream on .NET Framework treats any IOException (including timeouts) as fatal and may - // return garbage if reused. To prevent this, we flow down and break the connection immediately. - // See #4305. - isStreamBroken = connector.IsSecure && e is IOException; -#endif // When reading notifications (Wait), just throw TimeoutException or // OperationCanceledException immediately. // Nothing to cancel, and no breaking of the connection. - if (readingNotifications && !isStreamBroken) + if (readingNotifications) throw CreateException(connector); // If we should attempt PostgreSQL cancellation, do it the first time we get a timeout. @@ -378,8 +353,7 @@ static async ValueTask EnsureLong( // that immediately break the connection if (connector.AttemptPostgresCancellation && !connector.PostgresCancellationPerformed && - connector.PerformPostgresCancellation() && - !isStreamBroken) + connector.PerformPostgresCancellation()) { // Note that if the cancellation timeout is negative, we flow down and break the // connection immediately. diff --git a/src/Npgsql/Internal/PgStreamingConverter.cs b/src/Npgsql/Internal/PgStreamingConverter.cs index 70a45026db..7a9a6ce72d 100644 --- a/src/Npgsql/Internal/PgStreamingConverter.cs +++ b/src/Npgsql/Internal/PgStreamingConverter.cs @@ -68,9 +68,7 @@ static class PgStreamingConverterHelpers { // Split out from the generic class to amortize the huge size penalty per async state machine, which would otherwise be per // instantiation. -#if NET6_0_OR_GREATER [AsyncMethodBuilder(typeof(PoolingAsyncValueTaskMethodBuilder<>))] -#endif public static async ValueTask AwaitTask(Task task, Continuation continuation) { await task.ConfigureAwait(false); diff --git a/src/Npgsql/Internal/PgWriter.cs b/src/Npgsql/Internal/PgWriter.cs index 2325683b68..a0fa545718 100644 --- a/src/Npgsql/Internal/PgWriter.cs +++ b/src/Npgsql/Internal/PgWriter.cs @@ -259,24 +259,16 @@ public void WriteUInt64(ulong value) public void WriteFloat(float value) { -#if NET5_0_OR_GREATER Ensure(sizeof(float)); BinaryPrimitives.WriteSingleBigEndian(Span, value); Advance(sizeof(float)); -#else - WriteUInt32(Unsafe.As(ref value)); -#endif } public void WriteDouble(double value) { -#if NET5_0_OR_GREATER Ensure(sizeof(double)); BinaryPrimitives.WriteDoubleBigEndian(Span, value); Advance(sizeof(double)); -#else - WriteUInt64(Unsafe.As(ref value)); -#endif } public void WriteChars(ReadOnlySpan data, Encoding encoding) @@ -467,9 +459,8 @@ internal ValueTask BeginNestedWrite(bool async, Size bufferReq _current = new() { Format = _current.Format, Size = byteCount, BufferRequirement = bufferRequirement, WriteState = state }; return new(new NestedWriteScope()); -#if NET6_0_OR_GREATER + [AsyncMethodBuilder(typeof(PoolingAsyncValueTaskMethodBuilder<>))] -#endif async ValueTask Core(bool async, Size bufferRequirement, int byteCount, object? state, CancellationToken cancellationToken) { await Flush(async, cancellationToken).ConfigureAwait(false); @@ -524,7 +515,6 @@ Task Write(bool async, byte[] buffer, int offset, int count, CancellationToken c return Task.CompletedTask; } -#if !NETSTANDARD2_0 public override void Write(ReadOnlySpan buffer) => _writer.WriteBytes(_allowMixedIO, buffer); public override ValueTask WriteAsync(ReadOnlyMemory buffer, CancellationToken cancellationToken = default) @@ -534,7 +524,6 @@ public override ValueTask WriteAsync(ReadOnlyMemory buffer, CancellationTo return _writer.WriteBytesAsync(buffer, cancellationToken); } -#endif public override void Flush() => _writer.Flush(); diff --git a/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.Multirange.cs b/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.Multirange.cs index 753f216e53..ada1c9a3b5 100644 --- a/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.Multirange.cs +++ b/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.Multirange.cs @@ -62,10 +62,7 @@ public static void ThrowIfMultirangeUnsupported(Type? type, DataTypeNa var matchingArguments = new[] { - typeof(int), typeof(long), typeof(decimal), typeof(DateTime), -# if NET6_0_OR_GREATER - typeof(DateOnly) -#endif + typeof(int), typeof(long), typeof(decimal), typeof(DateTime), typeof(DateOnly) }; // If we don't know more than the clr type, default to a Multirange kind over Array as they share the same types. @@ -235,17 +232,15 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) static (options, mapping, _) => mapping.CreateInfo(options, CreateListMultirangeConverter( CreateRangeConverter(new DateTimeDateConverter(options.EnableDateTimeInfinityConversions), options), options))); - #if NET6_0_OR_GREATER - mappings.AddType[]>(DataTypeNames.DateMultirange, - static (options, mapping, _) => - mapping.CreateInfo(options, CreateArrayMultirangeConverter( - CreateRangeConverter(new DateOnlyDateConverter(options.EnableDateTimeInfinityConversions), options), options)), - isDefault: true); - mappings.AddType>>(DataTypeNames.DateMultirange, - static (options, mapping, _) => - mapping.CreateInfo(options, CreateListMultirangeConverter( - CreateRangeConverter(new DateOnlyDateConverter(options.EnableDateTimeInfinityConversions), options), options))); - #endif + mappings.AddType[]>(DataTypeNames.DateMultirange, + static (options, mapping, _) => + mapping.CreateInfo(options, CreateArrayMultirangeConverter( + CreateRangeConverter(new DateOnlyDateConverter(options.EnableDateTimeInfinityConversions), options), options)), + isDefault: true); + mappings.AddType>>(DataTypeNames.DateMultirange, + static (options, mapping, _) => + mapping.CreateInfo(options, CreateListMultirangeConverter( + CreateRangeConverter(new DateOnlyDateConverter(options.EnableDateTimeInfinityConversions), options), options))); return mappings; } @@ -310,10 +305,8 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) // datemultirange mappings.AddArrayType[]>(DataTypeNames.DateMultirange); mappings.AddArrayType>>(DataTypeNames.DateMultirange); - #if NET6_0_OR_GREATER - mappings.AddArrayType[]>(DataTypeNames.DateMultirange); - mappings.AddArrayType>>(DataTypeNames.DateMultirange); - #endif + mappings.AddArrayType[]>(DataTypeNames.DateMultirange); + mappings.AddArrayType>>(DataTypeNames.DateMultirange); return mappings; } diff --git a/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.Range.cs b/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.Range.cs index 5e77275da5..ae4d1a294f 100644 --- a/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.Range.cs +++ b/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.Range.cs @@ -62,10 +62,7 @@ public static void ThrowIfRangeUnsupported(Type? type, DataTypeName? d var matchingArguments = new[] { - typeof(int), typeof(long), typeof(decimal), typeof(DateTime), -# if NET6_0_OR_GREATER - typeof(DateOnly) -#endif + typeof(int), typeof(long), typeof(decimal), typeof(DateTime), typeof(DateOnly) }; // If we don't know more than the clr type, default to a Multirange kind over Array as they share the same types. @@ -160,11 +157,9 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) isDefault: true); mappings.AddStructType>(DataTypeNames.DateRange, static (options, mapping, _) => mapping.CreateInfo(options, CreateRangeConverter(new Int4Converter(), options))); - #if NET6_0_OR_GREATER mappings.AddStructType>(DataTypeNames.DateRange, static (options, mapping, _) => mapping.CreateInfo(options, CreateRangeConverter(new DateOnlyDateConverter(options.EnableDateTimeInfinityConversions), options))); - #endif return mappings; } @@ -209,9 +204,7 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) // daterange mappings.AddStructArrayType>(DataTypeNames.DateRange); mappings.AddStructArrayType>(DataTypeNames.DateRange); -#if NET6_0_OR_GREATER mappings.AddStructArrayType>(DataTypeNames.DateRange); -#endif return mappings; } diff --git a/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.cs index d0c4fde8fe..65b54efde1 100644 --- a/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.cs +++ b/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.cs @@ -231,10 +231,8 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) MatchRequirement.DataTypeName); mappings.AddStructType(DataTypeNames.Date, static (options, mapping, _) => mapping.CreateInfo(options, new Int4Converter())); - #if NET6_0_OR_GREATER mappings.AddStructType(DataTypeNames.Date, static (options, mapping, _) => mapping.CreateInfo(options, new DateOnlyDateConverter(options.EnableDateTimeInfinityConversions))); - #endif // Interval mappings.AddStructType(DataTypeNames.Interval, @@ -247,10 +245,8 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) static (options, mapping, _) => mapping.CreateInfo(options, new TimeSpanTimeConverter()), isDefault: true); mappings.AddStructType(DataTypeNames.Time, static (options, mapping, _) => mapping.CreateInfo(options, new Int8Converter())); - #if NET6_0_OR_GREATER mappings.AddStructType(DataTypeNames.Time, static (options, mapping, _) => mapping.CreateInfo(options, new TimeOnlyTimeConverter())); - #endif // TimeTz mappings.AddStructType(DataTypeNames.TimeTz, @@ -443,9 +439,7 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) // Date mappings.AddStructArrayType(DataTypeNames.Date); mappings.AddStructArrayType(DataTypeNames.Date); - #if NET6_0_OR_GREATER mappings.AddStructArrayType(DataTypeNames.Date); - #endif // Interval mappings.AddStructArrayType(DataTypeNames.Interval); @@ -454,9 +448,7 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) // Time mappings.AddStructArrayType(DataTypeNames.Time); mappings.AddStructArrayType(DataTypeNames.Time); - #if NET6_0_OR_GREATER mappings.AddStructArrayType(DataTypeNames.Time); - #endif // TimeTz mappings.AddStructArrayType(DataTypeNames.TimeTz); diff --git a/src/Npgsql/KerberosUsernameProvider.cs b/src/Npgsql/KerberosUsernameProvider.cs index a962a6fdc2..d36f2b06c9 100644 --- a/src/Npgsql/KerberosUsernameProvider.cs +++ b/src/Npgsql/KerberosUsernameProvider.cs @@ -48,16 +48,11 @@ sealed class KerberosUsernameProvider async ValueTask GetUsernameAsyncInternal() #pragma warning restore CS1998 { -#if NET5_0_OR_GREATER if (async) await process.WaitForExitAsync(cancellationToken).ConfigureAwait(false); else // ReSharper disable once MethodHasAsyncOverloadWithCancellation process.WaitForExit(); -#else - // ReSharper disable once MethodHasAsyncOverload - process.WaitForExit(); -#endif if (process.ExitCode != 0) { @@ -70,10 +65,8 @@ sealed class KerberosUsernameProvider // ReSharper disable once MethodHasAsyncOverload #if NET7_0_OR_GREATER if ((line = async ? await process.StandardOutput.ReadLineAsync(cancellationToken).ConfigureAwait(false) : process.StandardOutput.ReadLine()) == null) -#elif NET5_0_OR_GREATER - if ((line = async ? await process.StandardOutput.ReadLineAsync().ConfigureAwait(false) : process.StandardOutput.ReadLine()) == null) #else - if ((line = process.StandardOutput.ReadLine()) == null) + if ((line = async ? await process.StandardOutput.ReadLineAsync().ConfigureAwait(false) : process.StandardOutput.ReadLine()) == null) #endif { connectionLogger.LogDebug("Unexpected output from klist, aborting Kerberos username detection"); diff --git a/src/Npgsql/MetricsReporter.cs b/src/Npgsql/MetricsReporter.cs index f29f0c47e2..f806e44852 100644 --- a/src/Npgsql/MetricsReporter.cs +++ b/src/Npgsql/MetricsReporter.cs @@ -2,7 +2,6 @@ namespace Npgsql; -#if NET6_0_OR_GREATER using System.Collections.Generic; using System.Diagnostics; using System.Diagnostics.Metrics; @@ -256,20 +255,3 @@ public void Dispose() static readonly double StopWatchTickFrequency = (double)TicksPerSecond / Stopwatch.Frequency; #endif } -#else -sealed class MetricsReporter : IDisposable -{ - public MetricsReporter(NpgsqlDataSource _) {} - internal long ReportCommandStart() => 0; - internal void ReportCommandStop(long startTimestamp) {} - internal void CommandStartPrepared() {} - internal void ReportCommandFailed() {} - internal void ReportBytesWritten(long bytesWritten) {} - internal void ReportBytesRead(long bytesRead) {} - internal void ReportConnectionPoolTimeout() {} - internal void ReportPendingConnectionRequestStart() {} - internal void ReportPendingConnectionRequestStop() {} - internal void ReportConnectionCreateTime(TimeSpan duration) {} - public void Dispose() {} -} -#endif diff --git a/src/Npgsql/Npgsql.csproj b/src/Npgsql/Npgsql.csproj index 52806c98dc..5e6b6e3c89 100644 --- a/src/Npgsql/Npgsql.csproj +++ b/src/Npgsql/Npgsql.csproj @@ -5,7 +5,7 @@ Npgsql is the open source .NET data provider for PostgreSQL. npgsql;postgresql;postgres;ado;ado.net;database;sql README.md - netstandard2.0;netstandard2.1;net6.0;net7.0;net8.0 + net6.0;net8.0 net8.0 $(NoWarn);CA2017 diff --git a/src/Npgsql/NpgsqlCommand.cs b/src/Npgsql/NpgsqlCommand.cs index 940e4b8ae4..593be24efe 100644 --- a/src/Npgsql/NpgsqlCommand.cs +++ b/src/Npgsql/NpgsqlCommand.cs @@ -637,11 +637,7 @@ void DeriveParametersForQuery(NpgsqlConnector connector) /// /// An optional token to cancel the asynchronous operation. The default value is . /// -#if NETSTANDARD2_0 - public virtual Task PrepareAsync(CancellationToken cancellationToken = default) -#else public override Task PrepareAsync(CancellationToken cancellationToken = default) -#endif => Prepare(async: true, cancellationToken); Task Prepare(bool async, CancellationToken cancellationToken = default) diff --git a/src/Npgsql/NpgsqlConnection.cs b/src/Npgsql/NpgsqlConnection.cs index 638cd602e7..da8262636c 100644 --- a/src/Npgsql/NpgsqlConnection.cs +++ b/src/Npgsql/NpgsqlConnection.cs @@ -579,7 +579,6 @@ public override ConnectionState State /// internal NpgsqlBatch? CachedBatch { get; set; } -#if NET6_0_OR_GREATER /// public override bool CanCreateBatch => true; @@ -600,13 +599,6 @@ public override ConnectionState State return NpgsqlBatch.CreateCachedBatch(this); } -#else - /// - /// Creates and returns a object associated with the . - /// - /// A object. - public NpgsqlBatch CreateBatch() => new(this); -#endif #endregion Command / Batch creation @@ -673,7 +665,6 @@ async ValueTask BeginTransaction(bool async, IsolationLevel l } } -#if !NETSTANDARD2_0 /// /// Asynchronously begins a database transaction. /// @@ -715,7 +706,6 @@ protected override async ValueTask BeginDbTransactionAsync(Isolat /// public new ValueTask BeginTransactionAsync(IsolationLevel level, CancellationToken cancellationToken = default) => BeginTransaction(async: true, level, cancellationToken); -#endif /// /// Enlist transaction. @@ -783,11 +773,7 @@ public override void EnlistTransaction(Transaction? transaction) /// Releases the connection. If the connection is pooled, it will be returned to the pool and made available for re-use. /// If it is non-pooled, the physical connection will be closed. /// -#if NETSTANDARD2_0 - public Task CloseAsync() -#else public override Task CloseAsync() -#endif => Close(async: true); internal bool TakeCloseLock() => Interlocked.Exchange(ref _closing, 1) == 0; @@ -941,11 +927,7 @@ protected override void Dispose(bool disposing) /// /// Releases all resources used by the . /// -#if NETSTANDARD2_0 - public async ValueTask DisposeAsync() -#else public override async ValueTask DisposeAsync() -#endif { if (_disposed) return; @@ -1407,14 +1389,8 @@ async Task BeginRawBinaryCopy(bool async, string copyComman } } - static bool IsValidCopyCommand(string copyCommand) - { - #if NET6_0_OR_GREATER || NETSTANDARD2_1 - return copyCommand.AsSpan().TrimStart().StartsWith("COPY", StringComparison.OrdinalIgnoreCase); - #else - return copyCommand.TrimStart().StartsWith("COPY", StringComparison.OrdinalIgnoreCase); - #endif - } + static bool IsValidCopyCommand(string copyCommand) => copyCommand.AsSpan().TrimStart().StartsWith("COPY", StringComparison.OrdinalIgnoreCase); + #endregion #region Wait @@ -1719,11 +1695,7 @@ public override DataTable GetSchema(string? collectionName, string?[]? restricti /// An optional token to cancel the asynchronous operation. The default value is . /// /// The collection specified. -#if NET5_0_OR_GREATER public override Task GetSchemaAsync(CancellationToken cancellationToken = default) -#else - public Task GetSchemaAsync(CancellationToken cancellationToken = default) -#endif => GetSchemaAsync("MetaDataCollections", null, cancellationToken); /// @@ -1734,11 +1706,7 @@ public Task GetSchemaAsync(CancellationToken cancellationToken = defa /// An optional token to cancel the asynchronous operation. The default value is . /// /// The collection specified. -#if NET5_0_OR_GREATER public override Task GetSchemaAsync(string collectionName, CancellationToken cancellationToken = default) -#else - public Task GetSchemaAsync(string collectionName, CancellationToken cancellationToken = default) -#endif => GetSchemaAsync(collectionName, null, cancellationToken); /// @@ -1753,11 +1721,7 @@ public Task GetSchemaAsync(string collectionName, CancellationToken c /// An optional token to cancel the asynchronous operation. The default value is . /// /// The collection specified. -#if NET5_0_OR_GREATER public override Task GetSchemaAsync(string collectionName, string?[]? restrictions, CancellationToken cancellationToken = default) -#else - public Task GetSchemaAsync(string collectionName, string?[]? restrictions, CancellationToken cancellationToken = default) -#endif { return NpgsqlSchema.GetSchema(async: true, this, collectionName, restrictions, cancellationToken); } diff --git a/src/Npgsql/NpgsqlConnectionStringBuilder.cs b/src/Npgsql/NpgsqlConnectionStringBuilder.cs index 77f397c8e1..166a93ea68 100644 --- a/src/Npgsql/NpgsqlConnectionStringBuilder.cs +++ b/src/Npgsql/NpgsqlConnectionStringBuilder.cs @@ -7,7 +7,6 @@ using System.Diagnostics; using System.Diagnostics.CodeAnalysis; using Npgsql.Internal; -using Npgsql.Netstandard20; using Npgsql.Replication; namespace Npgsql; @@ -1434,7 +1433,7 @@ internal static bool TrySplitHostPort(ReadOnlySpan originalHost, [NotNullW var ipv6End = originalHost.LastIndexOf(']'); if (otherColon == -1 || portSeparator > ipv6End && otherColon < ipv6End) { - port = originalHost.Slice(portSeparator + 1).ParseInt(); + port = int.Parse(originalHost.Slice(portSeparator + 1)); host = originalHost.Slice(0, portSeparator).ToString(); return true; } diff --git a/src/Npgsql/NpgsqlDataReader.cs b/src/Npgsql/NpgsqlDataReader.cs index 9e15dd2257..681fc8c94a 100644 --- a/src/Npgsql/NpgsqlDataReader.cs +++ b/src/Npgsql/NpgsqlDataReader.cs @@ -1031,11 +1031,7 @@ protected override void Dispose(bool disposing) /// /// Releases the resources used by the . /// -#if NETSTANDARD2_0 - public async ValueTask DisposeAsync() -#else public override async ValueTask DisposeAsync() -#endif { try { @@ -1076,11 +1072,7 @@ static bool AllPostgresExceptions(ReadOnlyCollection collection) /// /// Closes the reader, allowing a new command to be executed. /// -#if NETSTANDARD2_0 - public Task CloseAsync() -#else public override Task CloseAsync() -#endif => Close(async: true, connectionClosing: false, isDisposing: false); internal async Task Close(bool async, bool connectionClosing, bool isDisposing) @@ -1798,11 +1790,7 @@ ReadOnlyCollection IDbColumnSchemaGenerator.GetColumnSchema() /// Asynchronously returns schema information for the columns in the current resultset. /// /// -#if NET5_0_OR_GREATER public new Task> GetColumnSchemaAsync(CancellationToken cancellationToken = default) -#else - public Task> GetColumnSchemaAsync(CancellationToken cancellationToken = default) -#endif => GetColumnSchema(async: true, cancellationToken); Task> GetColumnSchema(bool async, CancellationToken cancellationToken = default) @@ -1828,14 +1816,10 @@ Task> GetColumnSchema(bool async, Cancellatio /// [UnconditionalSuppressMessage( "Composite type mapping currently isn't trimming-safe, and warnings are generated at the MapComposite level.", "IL2026")] -#if NET5_0_OR_GREATER public override Task GetSchemaTableAsync(CancellationToken cancellationToken = default) -#else - public Task GetSchemaTableAsync(CancellationToken cancellationToken = default) -#endif => GetSchemaTable(async: true, cancellationToken); - [UnconditionalSuppressMessage("Trimming", "IL2111", Justification = "typeof(Type).TypeInitializer is not used.")] + [UnconditionalSuppressMessage("Trimming", "IL2111", Justification = "typeof(Type).TypeInitializer is not used.")] async Task GetSchemaTable(bool async, CancellationToken cancellationToken = default) { if (FieldCount == 0) // No resultset @@ -2024,9 +2008,7 @@ ValueTask SeekToColumnSequential(bool async, int ordinal, DataFormat dataFo return Core(async, !committed, ordinal, dataFormat, resumableOp); -#if NET6_0_OR_GREATER [AsyncMethodBuilder(typeof(PoolingAsyncValueTaskMethodBuilder<>))] -#endif async ValueTask Core(bool async, bool commit, int ordinal, DataFormat dataFormat, bool resumableOp) { if (commit) diff --git a/src/Npgsql/NpgsqlDataSource.cs b/src/Npgsql/NpgsqlDataSource.cs index 90cb82c1a5..3281359ab9 100644 --- a/src/Npgsql/NpgsqlDataSource.cs +++ b/src/Npgsql/NpgsqlDataSource.cs @@ -501,13 +501,7 @@ protected virtual async ValueTask DisposeAsyncBase() } if (_periodicPasswordProviderTimer is not null) - { -#if NET5_0_OR_GREATER await _periodicPasswordProviderTimer.DisposeAsync().ConfigureAwait(false); -#else - _periodicPasswordProviderTimer.Dispose(); -#endif - } _setupMappingsSemaphore.Dispose(); diff --git a/src/Npgsql/NpgsqlEventSource.cs b/src/Npgsql/NpgsqlEventSource.cs index 1e9b82c9c5..d50979bb64 100644 --- a/src/Npgsql/NpgsqlEventSource.cs +++ b/src/Npgsql/NpgsqlEventSource.cs @@ -15,7 +15,6 @@ sealed class NpgsqlEventSource : EventSource internal const int CommandStartId = 3; internal const int CommandStopId = 4; -#if !NETSTANDARD2_0 IncrementingPollingCounter? _bytesWrittenPerSecondCounter; IncrementingPollingCounter? _bytesReadPerSecondCounter; @@ -31,7 +30,6 @@ sealed class NpgsqlEventSource : EventSource PollingCounter? _multiplexingAverageCommandsPerBatchCounter; PollingCounter? _multiplexingAverageWriteTimePerBatchCounter; -#endif long _bytesWritten; long _bytesRead; @@ -97,12 +95,10 @@ internal void CommandFailed() internal void DataSourceCreated(NpgsqlDataSource dataSource) { -#if !NETSTANDARD2_0 lock (_dataSourcesLock) { _dataSources.Add(dataSource, null); } -#endif } internal void MultiplexingBatchSent(int numCommands, Stopwatch stopwatch) @@ -116,7 +112,6 @@ internal void MultiplexingBatchSent(int numCommands, Stopwatch stopwatch) } } -#if !NETSTANDARD2_0 double GetDataSourceCount() { lock (_dataSourcesLock) @@ -226,6 +221,4 @@ protected override void OnEventCommand(EventCommandEventArgs command) } } } - -#endif } diff --git a/src/Npgsql/NpgsqlException.cs b/src/Npgsql/NpgsqlException.cs index 57c47a514c..91eb84adef 100644 --- a/src/Npgsql/NpgsqlException.cs +++ b/src/Npgsql/NpgsqlException.cs @@ -42,26 +42,14 @@ public NpgsqlException(string? message) /// Specifies whether the exception is considered transient, that is, whether retrying the operation could /// succeed (e.g. a network error or a timeout). /// -#if NET5_0_OR_GREATER public override bool IsTransient -#else - public virtual bool IsTransient -#endif => InnerException is IOException or SocketException or TimeoutException or NpgsqlException { IsTransient: true }; -#if NET6_0_OR_GREATER /// public new NpgsqlBatchCommand? BatchCommand { get; set; } /// protected override DbBatchCommand? DbBatchCommand => BatchCommand; -#else - /// - /// If the exception was thrown as a result of executing a , references the within - /// the batch which triggered the exception. Otherwise . - /// - public NpgsqlBatchCommand? BatchCommand { get; set; } -#endif #region Serialization diff --git a/src/Npgsql/NpgsqlFactory.cs b/src/Npgsql/NpgsqlFactory.cs index 7d21a917a0..6df62a9cdc 100644 --- a/src/Npgsql/NpgsqlFactory.cs +++ b/src/Npgsql/NpgsqlFactory.cs @@ -48,7 +48,6 @@ public sealed class NpgsqlFactory : DbProviderFactory, IServiceProvider /// public override DbDataAdapter CreateDataAdapter() => new NpgsqlDataAdapter(); -#if !NETSTANDARD2_0 /// /// Specifies whether the specific supports the class. /// @@ -58,9 +57,7 @@ public sealed class NpgsqlFactory : DbProviderFactory, IServiceProvider /// Specifies whether the specific supports the class. /// public override bool CanCreateCommandBuilder => true; -#endif -#if NET6_0_OR_GREATER /// public override bool CanCreateBatch => true; @@ -69,7 +66,6 @@ public sealed class NpgsqlFactory : DbProviderFactory, IServiceProvider /// public override DbBatchCommand CreateBatchCommand() => new NpgsqlBatchCommand(); -#endif #if NET7_0_OR_GREATER /// diff --git a/src/Npgsql/NpgsqlMultiHostDataSource.cs b/src/Npgsql/NpgsqlMultiHostDataSource.cs index 813460b557..4b8731e5b6 100644 --- a/src/Npgsql/NpgsqlMultiHostDataSource.cs +++ b/src/Npgsql/NpgsqlMultiHostDataSource.cs @@ -53,11 +53,7 @@ internal NpgsqlMultiHostDataSource(NpgsqlConnectionStringBuilder settings, Npgsq : new UnpooledDataSource(poolSettings, dataSourceConfig); } -#if NETSTANDARD - var targetSessionAttributeValues = Enum.GetValues(typeof(TargetSessionAttributes)).Cast().ToArray(); -#else var targetSessionAttributeValues = Enum.GetValues().ToArray(); -#endif var highestValue = 0; foreach (var value in targetSessionAttributeValues) if ((int)value > highestValue) diff --git a/src/Npgsql/NpgsqlRawCopyStream.cs b/src/Npgsql/NpgsqlRawCopyStream.cs index d963e411c8..ffae8e9fc4 100644 --- a/src/Npgsql/NpgsqlRawCopyStream.cs +++ b/src/Npgsql/NpgsqlRawCopyStream.cs @@ -120,11 +120,7 @@ public override Task WriteAsync(byte[] buffer, int offset, int count, Cancellati return WriteAsync(new Memory(buffer, offset, count), cancellationToken).AsTask(); } -#if NETSTANDARD2_0 - public void Write(ReadOnlySpan buffer) -#else public override void Write(ReadOnlySpan buffer) -#endif { CheckDisposed(); if (!CanWrite) @@ -151,11 +147,7 @@ public override void Write(ReadOnlySpan buffer) _writeBuf.DirectWrite(buffer); } -#if NETSTANDARD2_0 - public ValueTask WriteAsync(ReadOnlyMemory buffer, CancellationToken cancellationToken = default) -#else public override ValueTask WriteAsync(ReadOnlyMemory buffer, CancellationToken cancellationToken = default) -#endif { CheckDisposed(); if (!CanWrite) @@ -221,11 +213,7 @@ public override Task ReadAsync(byte[] buffer, int offset, int count, Cancel return ReadAsync(new Memory(buffer, offset, count), cancellationToken).AsTask(); } -#if NETSTANDARD2_0 - public int Read(Span span) -#else public override int Read(Span span) -#endif { CheckDisposed(); if (!CanRead) @@ -237,11 +225,7 @@ public override int Read(Span span) return count; } -#if NETSTANDARD2_0 - public ValueTask ReadAsync(Memory buffer, CancellationToken cancellationToken) -#else public override ValueTask ReadAsync(Memory buffer, CancellationToken cancellationToken) -#endif { CheckDisposed(); if (!CanRead) @@ -365,11 +349,7 @@ async Task Cancel(bool async) protected override void Dispose(bool disposing) => DisposeAsync(disposing, false).GetAwaiter().GetResult(); -#if NETSTANDARD2_0 - public ValueTask DisposeAsync() -#else public override ValueTask DisposeAsync() -#endif => DisposeAsync(disposing: true, async: true); @@ -507,14 +487,6 @@ public void Cancel() /// Cancels and terminates an ongoing import. Any data already written will be discarded. /// public Task CancelAsync() => ((NpgsqlRawCopyStream)BaseStream).CancelAsync(); - -#if NETSTANDARD2_0 - public ValueTask DisposeAsync() - { - Dispose(); - return default; - } -#endif } /// diff --git a/src/Npgsql/NpgsqlSchema.cs b/src/Npgsql/NpgsqlSchema.cs index f9688744ec..ba18c0acc7 100644 --- a/src/Npgsql/NpgsqlSchema.cs +++ b/src/Npgsql/NpgsqlSchema.cs @@ -1134,11 +1134,7 @@ static async Task ParseResults(bool async, NpgsqlCommand command, Dat { if (reader is not null) await reader.DisposeAsync().ConfigureAwait(false); -#if NETSTANDARD2_0 - command.Dispose(); -#else await command.DisposeAsync().ConfigureAwait(false); -#endif } else { diff --git a/src/Npgsql/NpgsqlTransaction.cs b/src/Npgsql/NpgsqlTransaction.cs index 5ca2821cfd..6481e185af 100644 --- a/src/Npgsql/NpgsqlTransaction.cs +++ b/src/Npgsql/NpgsqlTransaction.cs @@ -143,11 +143,7 @@ async Task Commit(bool async, CancellationToken cancellationToken = default) /// /// An optional token to cancel the asynchronous operation. The default value is . /// -#if NETSTANDARD2_0 - public Task CommitAsync(CancellationToken cancellationToken = default) -#else public override Task CommitAsync(CancellationToken cancellationToken = default) -#endif => Commit(async: true, cancellationToken); #endregion @@ -179,11 +175,7 @@ async Task Rollback(bool async, CancellationToken cancellationToken = default) /// /// An optional token to cancel the asynchronous operation. The default value is . /// -#if NETSTANDARD2_0 - public Task RollbackAsync(CancellationToken cancellationToken = default) -#else public override Task RollbackAsync(CancellationToken cancellationToken = default) -#endif => Rollback(async: true, cancellationToken); #endregion @@ -198,11 +190,7 @@ public override Task RollbackAsync(CancellationToken cancellationToken = default /// This method does not cause a database roundtrip to be made. The savepoint creation statement will instead be sent along with /// the next command. /// -#if NET5_0_OR_GREATER public override void Save(string name) -#else - public void Save(string name) -#endif { if (name == null) throw new ArgumentNullException(nameof(name)); @@ -249,11 +237,7 @@ public void Save(string name) /// This method does not cause a database roundtrip to be made, and will therefore always complete synchronously. /// The savepoint creation statement will instead be sent along with the next command. /// -#if NET5_0_OR_GREATER public override Task SaveAsync(string name, CancellationToken cancellationToken = default) -#else - public Task SaveAsync(string name, CancellationToken cancellationToken = default) -#endif { Save(name); return Task.CompletedTask; @@ -281,11 +265,7 @@ async Task Rollback(bool async, string name, CancellationToken cancellationToken /// Rolls back a transaction from a pending savepoint state. /// /// The name of the savepoint. -#if NET5_0_OR_GREATER public override void Rollback(string name) -#else - public void Rollback(string name) -#endif => Rollback(async: false, name).GetAwaiter().GetResult(); /// @@ -295,11 +275,7 @@ public void Rollback(string name) /// /// An optional token to cancel the asynchronous operation. The default value is . /// -#if NET5_0_OR_GREATER public override Task RollbackAsync(string name, CancellationToken cancellationToken = default) -#else - public Task RollbackAsync(string name, CancellationToken cancellationToken = default) -#endif => Rollback(async: true, name, cancellationToken); async Task Release(bool async, string name, CancellationToken cancellationToken = default) @@ -324,11 +300,7 @@ async Task Release(bool async, string name, CancellationToken cancellationToken /// Releases a transaction from a pending savepoint state. /// /// The name of the savepoint. -#if NET5_0_OR_GREATER public override void Release(string name) -#else - public void Release(string name) -#endif => Release(async: false, name).GetAwaiter().GetResult(); /// @@ -338,21 +310,13 @@ public void Release(string name) /// /// An optional token to cancel the asynchronous operation. The default value is . /// -#if NET5_0_OR_GREATER public override Task ReleaseAsync(string name, CancellationToken cancellationToken = default) -#else - public Task ReleaseAsync(string name, CancellationToken cancellationToken = default) -#endif => Release(async: false, name, cancellationToken); /// /// Indicates whether this transaction supports database savepoints. /// -#if NET5_0_OR_GREATER public override bool SupportsSavepoints -#else - public bool SupportsSavepoints -#endif { get => _connector.DatabaseInfo.SupportsTransactions; } @@ -392,11 +356,7 @@ protected override void Dispose(bool disposing) /// /// Disposes the transaction, rolling it back if it is still pending. /// -#if NETSTANDARD2_0 - public ValueTask DisposeAsync() -#else public override ValueTask DisposeAsync() -#endif { if (!IsDisposed) { diff --git a/src/Npgsql/NpgsqlTypes/NpgsqlLogSequenceNumber.cs b/src/Npgsql/NpgsqlTypes/NpgsqlLogSequenceNumber.cs index 00ff4131e4..b9fc6da358 100644 --- a/src/Npgsql/NpgsqlTypes/NpgsqlLogSequenceNumber.cs +++ b/src/Npgsql/NpgsqlTypes/NpgsqlLogSequenceNumber.cs @@ -150,13 +150,8 @@ public static bool TryParse(ReadOnlySpan s, out NpgsqlLogSequenceNumber re { if (s[i] != '/') continue; -#if NETSTANDARD2_0 - var firstPart = s.Slice(0, i).ToString(); - var secondPart = s.Slice(++i).ToString(); -#else var firstPart = s.Slice(0, i); var secondPart = s.Slice(++i); -#endif if (!uint.TryParse(firstPart, NumberStyles.AllowHexSpecifier, null, out var first)) { @@ -337,4 +332,4 @@ public static NpgsqlLogSequenceNumber Smaller(NpgsqlLogSequenceNumber value1, Np => double.IsNaN(nbytes) || double.IsInfinity(nbytes) ? throw new NotFiniteNumberException($"Cannot add {nbytes} to {nameof(NpgsqlLogSequenceNumber)}", nbytes) : new NpgsqlLogSequenceNumber(checked((ulong)(lsn._value + nbytes))); -} \ No newline at end of file +} diff --git a/src/Npgsql/PostgresException.cs b/src/Npgsql/PostgresException.cs index 51b8e1e543..a157f0ab87 100644 --- a/src/Npgsql/PostgresException.cs +++ b/src/Npgsql/PostgresException.cs @@ -263,11 +263,7 @@ public override bool IsTransient /// Constants are defined in . /// See https://www.postgresql.org/docs/current/static/errcodes-appendix.html /// -#if NET5_0_OR_GREATER public override string SqlState { get; } -#else - public string SqlState { get; } -#endif /// /// The primary human-readable error message. This should be accurate but terse. diff --git a/src/Npgsql/PreparedTextReader.cs b/src/Npgsql/PreparedTextReader.cs index 8862daa3e7..4831850684 100644 --- a/src/Npgsql/PreparedTextReader.cs +++ b/src/Npgsql/PreparedTextReader.cs @@ -42,11 +42,7 @@ public override int Read() : -1; } -#if NETSTANDARD2_0 - public int Read(Span buffer) -#else public override int Read(Span buffer) -#endif { CheckDisposed(); @@ -80,11 +76,7 @@ public override int Read(char[] buffer, int index, int count) public override Task ReadAsync(char[] buffer, int index, int count) => Task.FromResult(Read(buffer, index, count)); - public -#if !NETSTANDARD2_0 - override -#endif - ValueTask ReadAsync(Memory buffer, CancellationToken cancellationToken = default) => new(Read(buffer.Span)); + public override ValueTask ReadAsync(Memory buffer, CancellationToken cancellationToken = default) => new(Read(buffer.Span)); public override Task ReadLineAsync() => Task.FromResult(ReadLine()); diff --git a/src/Npgsql/Properties/AssemblyInfo.cs b/src/Npgsql/Properties/AssemblyInfo.cs index 80500e0028..464a017b15 100644 --- a/src/Npgsql/Properties/AssemblyInfo.cs +++ b/src/Npgsql/Properties/AssemblyInfo.cs @@ -8,9 +8,7 @@ [assembly: AssemblyTrademark("")] [assembly: SecurityRules(SecurityRuleSet.Level1)] -#if NET5_0_OR_GREATER [module: SkipLocalsInit] -#endif [assembly: InternalsVisibleTo("Npgsql.Tests, PublicKey=" + "0024000004800000940000000602000000240000525341310004000001000100" + diff --git a/src/Npgsql/Replication/PgOutput/PgOutputReplicationOptions.cs b/src/Npgsql/Replication/PgOutput/PgOutputReplicationOptions.cs index 93039fdf25..5835b88ee2 100644 --- a/src/Npgsql/Replication/PgOutput/PgOutputReplicationOptions.cs +++ b/src/Npgsql/Replication/PgOutput/PgOutputReplicationOptions.cs @@ -125,18 +125,5 @@ public override bool Equals(object? obj) => obj is PgOutputReplicationOptions other && other.Equals(this); /// - public override int GetHashCode() - { -#if NETSTANDARD2_0 - var hashCode = ProtocolVersion.GetHashCode(); - hashCode = (hashCode * 397) ^ PublicationNames.GetHashCode(); - hashCode = (hashCode * 397) ^ Binary.GetHashCode(); - hashCode = (hashCode * 397) ^ Streaming.GetHashCode(); - hashCode = (hashCode * 397) ^ Messages.GetHashCode(); - hashCode = (hashCode * 397) ^ TwoPhase.GetHashCode(); - return hashCode; -#else - return HashCode.Combine(ProtocolVersion, PublicationNames, Binary, Streaming, Messages, TwoPhase); -#endif - } + public override int GetHashCode() => HashCode.Combine(ProtocolVersion, PublicationNames, Binary, Streaming, Messages, TwoPhase); } diff --git a/src/Npgsql/Replication/ReplicationConnection.cs b/src/Npgsql/Replication/ReplicationConnection.cs index 762d45db87..283ffd6d3f 100644 --- a/src/Npgsql/Replication/ReplicationConnection.cs +++ b/src/Npgsql/Replication/ReplicationConnection.cs @@ -541,31 +541,10 @@ internal async IAsyncEnumerator StartReplicationInternal( if (columnStream != null && !bypassingStream && !_replicationCancellationTokenSource.Token.IsCancellationRequested) await columnStream.DisposeAsync().ConfigureAwait(false); -#if NETSTANDARD2_0 - if (_sendFeedbackTimer != null) - { - var mre = new ManualResetEvent(false); - var actuallyDisposed = _sendFeedbackTimer.Dispose(mre); - Debug.Assert(actuallyDisposed, $"{nameof(_sendFeedbackTimer)} had already been disposed when completing replication"); - if (actuallyDisposed) - await mre.WaitOneAsync(cancellationToken).ConfigureAwait(false); - } - - if (_requestFeedbackTimer != null) - { - var mre = new ManualResetEvent(false); - var actuallyDisposed = _requestFeedbackTimer.Dispose(mre); - Debug.Assert(actuallyDisposed, $"{nameof(_requestFeedbackTimer)} had already been disposed when completing replication"); - if (actuallyDisposed) - await mre.WaitOneAsync(cancellationToken).ConfigureAwait(false); - } -#else - if (_sendFeedbackTimer != null) await _sendFeedbackTimer.DisposeAsync().ConfigureAwait(false); if (_requestFeedbackTimer != null) await _requestFeedbackTimer.DisposeAsync().ConfigureAwait(false); -#endif _sendFeedbackTimer = null; _requestFeedbackTimer = null; diff --git a/src/Npgsql/Replication/TestDecoding/TestDecodingOptions.cs b/src/Npgsql/Replication/TestDecoding/TestDecodingOptions.cs index b0887a3885..12372cb793 100644 --- a/src/Npgsql/Replication/TestDecoding/TestDecodingOptions.cs +++ b/src/Npgsql/Replication/TestDecoding/TestDecodingOptions.cs @@ -97,18 +97,5 @@ public override bool Equals(object? obj) /// public override int GetHashCode() - { -#if NETSTANDARD2_0 - var hashCode = IncludeXids.GetHashCode(); - hashCode = (hashCode * 397) ^ IncludeTimestamp.GetHashCode(); - hashCode = (hashCode * 397) ^ ForceBinary.GetHashCode(); - hashCode = (hashCode * 397) ^ SkipEmptyXacts.GetHashCode(); - hashCode = (hashCode * 397) ^ OnlyLocal.GetHashCode(); - hashCode = (hashCode * 397) ^ IncludeRewrites.GetHashCode(); - hashCode = (hashCode * 397) ^ StreamChanges.GetHashCode(); - return hashCode; -#else - return HashCode.Combine(IncludeXids, IncludeTimestamp, ForceBinary, SkipEmptyXacts, OnlyLocal, IncludeRewrites, StreamChanges); -#endif - } -} \ No newline at end of file + => HashCode.Combine(IncludeXids, IncludeTimestamp, ForceBinary, SkipEmptyXacts, OnlyLocal, IncludeRewrites, StreamChanges); +} diff --git a/src/Npgsql/Shims/Batching.cs b/src/Npgsql/Shims/Batching.cs deleted file mode 100644 index c8e7ddec1c..0000000000 --- a/src/Npgsql/Shims/Batching.cs +++ /dev/null @@ -1,130 +0,0 @@ -#if !NET6_0_OR_GREATER -using System.Collections; -using System.Collections.Generic; -using System.Threading; -using System.Threading.Tasks; - -#pragma warning disable 1591,RS0016 - -// ReSharper disable once CheckNamespace -namespace System.Data.Common -{ - public abstract class DbBatch : IDisposable, IAsyncDisposable - { - public DbBatchCommandCollection BatchCommands => DbBatchCommands; - - protected abstract DbBatchCommandCollection DbBatchCommands { get; } - - public abstract int Timeout { get; set; } - - public DbConnection? Connection - { - get => DbConnection; - set => DbConnection = value; - } - - protected abstract DbConnection? DbConnection { get; set; } - - public DbTransaction? Transaction - { - get => DbTransaction; - set => DbTransaction = value; - } - - protected abstract DbTransaction? DbTransaction { get; set; } - - public DbDataReader ExecuteReader(CommandBehavior behavior = CommandBehavior.Default) - => ExecuteDbDataReader(behavior); - - protected abstract DbDataReader ExecuteDbDataReader(CommandBehavior behavior); - - public Task ExecuteReaderAsync(CancellationToken cancellationToken = default) - => ExecuteDbDataReaderAsync(CommandBehavior.Default, cancellationToken); - - public Task ExecuteReaderAsync( - CommandBehavior behavior, - CancellationToken cancellationToken = default) - => ExecuteDbDataReaderAsync(behavior, cancellationToken); - - protected abstract Task ExecuteDbDataReaderAsync( - CommandBehavior behavior, - CancellationToken cancellationToken); - - public abstract int ExecuteNonQuery(); - - public abstract Task ExecuteNonQueryAsync(CancellationToken cancellationToken = default); - - public abstract object? ExecuteScalar(); - - public abstract Task ExecuteScalarAsync(CancellationToken cancellationToken = default); - - public abstract void Prepare(); - - public abstract Task PrepareAsync(CancellationToken cancellationToken = default); - - public abstract void Cancel(); - - public DbBatchCommand CreateBatchCommand() => CreateDbBatchCommand(); - - protected abstract DbBatchCommand CreateDbBatchCommand(); - - public virtual void Dispose() {} - - public virtual ValueTask DisposeAsync() - { - Dispose(); - return default; - } - } - - public abstract class DbBatchCommand - { - public abstract string CommandText { get; set; } - - public abstract CommandType CommandType { get; set; } - - public abstract int RecordsAffected { get; } - - public DbParameterCollection Parameters => DbParameterCollection; - - protected abstract DbParameterCollection DbParameterCollection { get; } - } - - public abstract class DbBatchCommandCollection : IList - { - public abstract IEnumerator GetEnumerator(); - - IEnumerator IEnumerable.GetEnumerator() => GetEnumerator(); - - public abstract void Add(DbBatchCommand item); - - public abstract void Clear(); - - public abstract bool Contains(DbBatchCommand item); - - public abstract void CopyTo(DbBatchCommand[] array, int arrayIndex); - - public abstract bool Remove(DbBatchCommand item); - - public abstract int Count { get; } - - public abstract bool IsReadOnly { get; } - - public abstract int IndexOf(DbBatchCommand item); - - public abstract void Insert(int index, DbBatchCommand item); - - public abstract void RemoveAt(int index); - - public DbBatchCommand this[int index] - { - get => GetBatchCommand(index); - set => SetBatchCommand(index, value); - } - - protected abstract DbBatchCommand GetBatchCommand(int index); - - protected abstract void SetBatchCommand(int index, DbBatchCommand batchCommand); - } -} -#endif diff --git a/src/Npgsql/Shims/ConcurrentDictionaryExtensions.cs b/src/Npgsql/Shims/ConcurrentDictionaryExtensions.cs deleted file mode 100644 index 02f5c2077c..0000000000 --- a/src/Npgsql/Shims/ConcurrentDictionaryExtensions.cs +++ /dev/null @@ -1,15 +0,0 @@ -namespace System.Collections.Concurrent; - -#if NETSTANDARD2_0 -static class ConcurrentDictionaryExtensions -{ - public static TValue GetOrAdd(this ConcurrentDictionary instance, TKey key, - Func valueFactory, TArg factoryArgument) - { - // The actual closure capture exists in a local function to prevent a display class allocation at the start of the method. - return instance.TryGetValue(key, out var value) ? value : GetOrAddWithClosure(instance, key, valueFactory, factoryArgument); - - static TValue GetOrAddWithClosure(ConcurrentDictionary instance, TKey key, Func valuefactory, TArg factoryargument) => instance.GetOrAdd(key, key => valuefactory(key, factoryargument)); - } -} -#endif diff --git a/src/Npgsql/Shims/DbDataReaderExtensions.cs b/src/Npgsql/Shims/DbDataReaderExtensions.cs deleted file mode 100644 index 5b56c31f55..0000000000 --- a/src/Npgsql/Shims/DbDataReaderExtensions.cs +++ /dev/null @@ -1,22 +0,0 @@ -#if NETSTANDARD2_0 - -#pragma warning disable 1591 - -using System.Data.Common; - -// ReSharper disable once CheckNamespace -namespace System.Data -{ - static class DataReaderExtensions - { - public static char GetChar(this DbDataReader reader, string name) - => reader.GetChar(reader.GetOrdinal(name)); - - public static string GetString(this DbDataReader reader, string name) - => reader.GetString(reader.GetOrdinal(name)); - - public static bool IsDBNull(this DbDataReader reader, string name) - => reader.IsDBNull(reader.GetOrdinal(name)); - } -} -#endif diff --git a/src/Npgsql/Shims/DictonaryExtensions.cs b/src/Npgsql/Shims/DictonaryExtensions.cs deleted file mode 100644 index a13397a39e..0000000000 --- a/src/Npgsql/Shims/DictonaryExtensions.cs +++ /dev/null @@ -1,19 +0,0 @@ -#if NETSTANDARD2_0 -// ReSharper disable once CheckNamespace -namespace System.Collections.Generic; - -// Helpers for Dictionary before netstandard 2.1 -static class DictonaryExtensions -{ - public static bool TryAdd(this Dictionary dictionary, TKey key, TValue value) - { - if (!dictionary.ContainsKey(key)) - { - dictionary.Add(key, value); - return true; - } - - return false; - } -} -#endif diff --git a/src/Npgsql/Shims/EncodingExtensions.cs b/src/Npgsql/Shims/EncodingExtensions.cs deleted file mode 100644 index bc5cb8651a..0000000000 --- a/src/Npgsql/Shims/EncodingExtensions.cs +++ /dev/null @@ -1,219 +0,0 @@ -// ReSharper disable RedundantUsingDirective -using System.Buffers; -using System.Collections.Generic; -using System.Runtime.InteropServices; -// ReSharper restore RedundantUsingDirective - -// ReSharper disable once CheckNamespace -namespace System.Text; - -static class EncodingExtensions -{ -#if NETSTANDARD2_0 - public static unsafe int GetByteCount(this Encoding encoding, ReadOnlySpan chars) - { - fixed (char* charsPtr = chars) - { - return encoding.GetByteCount(charsPtr, chars.Length); - } - } - - public static unsafe int GetBytes(this Encoding encoding, ReadOnlySpan chars, Span bytes) - { - fixed (char* charsPtr = chars) - fixed (byte* bytesPtr = bytes) - { - return encoding.GetBytes(charsPtr, chars.Length, bytesPtr, bytes.Length); - } - } - - public static unsafe int GetCharCount(this Encoding encoding, ReadOnlySpan bytes) - { - fixed (byte* bytesPtr = bytes) - { - return encoding.GetCharCount(bytesPtr, bytes.Length); - } - } - - public static unsafe int GetCharCount(this Decoder encoding, ReadOnlySpan bytes, bool flush) - { - fixed (byte* bytesPtr = bytes) - { - return encoding.GetCharCount(bytesPtr, bytes.Length, flush); - } - } - - public static unsafe int GetChars(this Decoder encoding, ReadOnlySpan bytes, Span chars, bool flush) - { - fixed (byte* bytesPtr = bytes) - fixed (char* charsPtr = chars) - { - return encoding.GetChars(bytesPtr, bytes.Length, charsPtr, chars.Length, flush); - } - } - - public static unsafe int GetChars(this Encoding encoding, ReadOnlySpan bytes, Span chars) - { - fixed (byte* bytesPtr = bytes) - fixed (char* charsPtr = chars) - { - return encoding.GetChars(bytesPtr, bytes.Length, charsPtr, chars.Length); - } - } - - public static unsafe void Convert(this Encoder encoder, ReadOnlySpan chars, Span bytes, bool flush, out int charsUsed, out int bytesUsed, out bool completed) - { - fixed (char* charsPtr = chars) - fixed (byte* bytesPtr = bytes) - { - encoder.Convert(charsPtr, chars.Length, bytesPtr, bytes.Length, flush, out charsUsed, out bytesUsed, out completed); - } - } - - public static unsafe void Convert(this Decoder encoder, ReadOnlySpan bytes, Span chars, bool flush, out int bytesUsed, out int charsUsed, out bool completed) - { - fixed (byte* bytesPtr = bytes) - fixed (char* charsPtr = chars) - { - encoder.Convert(bytesPtr, bytes.Length, charsPtr, chars.Length, flush, out bytesUsed, out charsUsed, out completed); - } - } -#endif - -#if NETSTANDARD - /// - /// Decodes the specified to s using the specified - /// and outputs the result to . - /// - /// The which represents how the data in is encoded. - /// The to decode to characters. - /// The destination buffer to which the decoded characters will be written. - /// The number of chars written to . - /// Thrown if is not large enough to contain the encoded form of . - /// Thrown if contains data that cannot be decoded and is configured - /// to throw an exception when such data is seen. - public static int GetChars(this Encoding encoding, in ReadOnlySequence bytes, Span chars) - { - if (encoding is null) - throw new ArgumentNullException(nameof(encoding)); - - if (bytes.IsSingleSegment) - { - // If the incoming sequence is single-segment, one-shot this. - - return encoding.GetChars(bytes.First.Span, chars); - } - else - { - // If the incoming sequence is multi-segment, create a stateful Decoder - // and use it as the workhorse. On the final iteration we'll pass flush=true. - - var remainingBytes = bytes; - var originalCharsLength = chars.Length; - var decoder = encoding.GetDecoder(); - bool isFinalSegment; - - do - { - var firstSpan = remainingBytes.First.Span; - var next = remainingBytes.GetPosition(firstSpan.Length); - isFinalSegment = remainingBytes.IsSingleSegment; - - var charsWrittenJustNow = decoder.GetChars(firstSpan, chars, flush: isFinalSegment); - chars = chars.Slice(charsWrittenJustNow); - remainingBytes = remainingBytes.Slice(next); - } while (!isFinalSegment); - - return originalCharsLength - chars.Length; // total number of chars we wrote - } - } - - public static string GetString(this Encoding encoding, in ReadOnlySequence bytes) - { - if (encoding is null) - throw new ArgumentNullException(nameof(encoding)); - - // If the incoming sequence is single-segment, one-shot this. - if (bytes.IsSingleSegment) - { -#if NETSTANDARD2_1 - return encoding.GetString(bytes.First.Span); -#else - var rented = false; - byte[] arr; - var offset = 0; - var memory = bytes.First; - if (MemoryMarshal.TryGetArray(memory, out var segment)) - { - arr = segment.Array!; - offset = segment.Offset; - } - else - { - rented = true; - arr = ArrayPool.Shared.Rent(memory.Length); - bytes.First.Span.CopyTo(arr); - } - var ret = encoding.GetString(arr, offset, memory.Length); - if (rented) - ArrayPool.Shared.Return(arr); - return ret; -#endif - } - - // If the incoming sequence is multi-segment, create a stateful Decoder - // and use it as the workhorse. On the final iteration we'll pass flush=true. - - var decoder = encoding.GetDecoder(); - - // Maintain a list of all the segments we'll need to concat together. - // These will be released back to the pool at the end of the method. - - var listOfSegments = new List<(char[], int)>(); - var totalCharCount = 0; - - var remainingBytes = bytes; - bool isFinalSegment; - - do - { - var firstSpan = remainingBytes.First.Span; - var next = remainingBytes.GetPosition(firstSpan.Length); - isFinalSegment = remainingBytes.IsSingleSegment; - - var charCountThisIteration = decoder.GetCharCount(firstSpan, flush: isFinalSegment); // could throw ArgumentException if overflow would occur - var rentedArray = ArrayPool.Shared.Rent(charCountThisIteration); - var actualCharsWrittenThisIteration = decoder.GetChars(firstSpan, rentedArray, flush: isFinalSegment); - listOfSegments.Add((rentedArray, actualCharsWrittenThisIteration)); - - totalCharCount += actualCharsWrittenThisIteration; - if (totalCharCount < 0) - { - // If we overflowed, call string.Create, passing int.MaxValue. - // This will end up throwing the expected OutOfMemoryException - // since strings are limited to under int.MaxValue elements in length. - - totalCharCount = int.MaxValue; - break; - } - - remainingBytes = remainingBytes.Slice(next); - } while (!isFinalSegment); - - // Now build up the string to return, then release all of our scratch buffers - // back to the shared pool. - var chars = ArrayPool.Shared.Rent(totalCharCount); - var span = chars.AsSpan(); - foreach (var (array, length) in listOfSegments) - { - array.AsSpan(0, length).CopyTo(span); - ArrayPool.Shared.Return(array); - span = span.Slice(length); - } - - var str = new string(chars); - ArrayPool.Shared.Return(chars); - return str; - } -#endif -} diff --git a/src/Npgsql/Shims/ReadOnlySequenceExtensions.cs b/src/Npgsql/Shims/ReadOnlySequenceExtensions.cs deleted file mode 100644 index 0370285a7d..0000000000 --- a/src/Npgsql/Shims/ReadOnlySequenceExtensions.cs +++ /dev/null @@ -1,13 +0,0 @@ -namespace System.Buffers; - -static class ReadOnlySequenceExtensions -{ - public static ReadOnlySpan GetFirstSpan(this ReadOnlySequence sequence) - { -#if NETSTANDARD - return sequence.First.Span; -# else - return sequence.FirstSpan; -#endif - } -} diff --git a/src/Npgsql/Shims/ReadOnlySpanOfCharExtensions.cs b/src/Npgsql/Shims/ReadOnlySpanOfCharExtensions.cs deleted file mode 100644 index 11a70c9793..0000000000 --- a/src/Npgsql/Shims/ReadOnlySpanOfCharExtensions.cs +++ /dev/null @@ -1,15 +0,0 @@ -using System; -using System.Runtime.CompilerServices; - -namespace Npgsql.Netstandard20; - -static class ReadOnlySpanOfCharExtensions -{ - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public static int ParseInt(this ReadOnlySpan span) - => int.Parse(span -#if NETSTANDARD2_0 - .ToString() -#endif - ); -} \ No newline at end of file diff --git a/src/Npgsql/Shims/ReferenceEqualityComparer.cs b/src/Npgsql/Shims/ReferenceEqualityComparer.cs deleted file mode 100644 index 38515ed90f..0000000000 --- a/src/Npgsql/Shims/ReferenceEqualityComparer.cs +++ /dev/null @@ -1,48 +0,0 @@ -using System.Runtime.CompilerServices; - -namespace System.Collections.Generic; - -#if NETSTANDARD -sealed class ReferenceEqualityComparer : IEqualityComparer, IEqualityComparer -{ - ReferenceEqualityComparer() { } - - /// - /// Gets the singleton instance. - /// - public static ReferenceEqualityComparer Instance { get; } = new(); - - /// - /// Determines whether two object references refer to the same object instance. - /// - /// The first object to compare. - /// The second object to compare. - /// - /// if both and refer to the same object instance - /// or if both are ; otherwise, . - /// - /// - /// This API is a wrapper around . - /// It is not necessarily equivalent to calling . - /// - public new bool Equals(object? x, object? y) => ReferenceEquals(x, y); - - /// - /// Returns a hash code for the specified object. The returned hash code is based on the object - /// identity, not on the contents of the object. - /// - /// The object for which to retrieve the hash code. - /// A hash code for the identity of . - /// - /// This API is a wrapper around . - /// It is not necessarily equivalent to calling . - /// - public int GetHashCode(object? obj) - { - // Depending on target framework, RuntimeHelpers.GetHashCode might not be annotated - // with the proper nullability attribute. We'll suppress any warning that might - // result. - return RuntimeHelpers.GetHashCode(obj!); - } -} -#endif diff --git a/src/Npgsql/Shims/RequiresPreviewFeaturesAttribute.cs b/src/Npgsql/Shims/RequiresPreviewFeaturesAttribute.cs deleted file mode 100644 index 4f7673959f..0000000000 --- a/src/Npgsql/Shims/RequiresPreviewFeaturesAttribute.cs +++ /dev/null @@ -1,48 +0,0 @@ -#if !NET6_0_OR_GREATER - -// ReSharper disable once CheckNamespace -namespace System.Runtime.Versioning; - -#pragma warning disable CS1591 // Missing XML comment for publicly visible type or member -#pragma warning disable RS0016 // Add public types and members to the declared API - -[AttributeUsage(AttributeTargets.Assembly | - AttributeTargets.Module | - AttributeTargets.Class | - AttributeTargets.Interface | - AttributeTargets.Delegate | - AttributeTargets.Struct | - AttributeTargets.Enum | - AttributeTargets.Constructor | - AttributeTargets.Method | - AttributeTargets.Property | - AttributeTargets.Field | - AttributeTargets.Event, Inherited = false)] -public sealed class RequiresPreviewFeaturesAttribute : Attribute -{ - /// - /// Initializes a new instance of the class. - /// - public RequiresPreviewFeaturesAttribute() { } - - /// - /// Initializes a new instance of the class with the specified message. - /// - /// An optional message associated with this attribute instance. - public RequiresPreviewFeaturesAttribute(string? message) - { - Message = message; - } - - /// - /// Returns the optional message associated with this attribute instance. - /// - public string? Message { get; } - - /// - /// Returns the optional URL associated with this attribute instance. - /// - public string? Url { get; set; } -} - -#endif \ No newline at end of file diff --git a/src/Npgsql/Shims/StreamExtensions.cs b/src/Npgsql/Shims/StreamExtensions.cs index 6a6a54231b..60dbf9ca3b 100644 --- a/src/Npgsql/Shims/StreamExtensions.cs +++ b/src/Npgsql/Shims/StreamExtensions.cs @@ -1,9 +1,6 @@ -#if NETSTANDARD2_0 || !NET7_0_OR_GREATER -using System.Buffers; -using System.Diagnostics; +#if !NET7_0_OR_GREATER using System.Threading; using System.Threading.Tasks; -using Npgsql; // ReSharper disable once CheckNamespace namespace System.IO @@ -36,66 +33,6 @@ public static async ValueTask ReadExactlyAsync(this Stream stream, Memory totalRead += read; } } - -#if NETSTANDARD2_0 - public static int Read(this Stream stream, Span buffer) - { - var sharedBuffer = ArrayPool.Shared.Rent(buffer.Length); - try - { - var numRead = stream.Read(sharedBuffer, 0, buffer.Length); - new Span(sharedBuffer, 0, numRead).CopyTo(buffer); - return numRead; - } - finally - { - ArrayPool.Shared.Return(sharedBuffer); - } - } - - public static async ValueTask ReadAsync(this Stream stream, Memory buffer, CancellationToken cancellationToken = default) - { - var sharedBuffer = ArrayPool.Shared.Rent(buffer.Length); - try - { - var result = await stream.ReadAsync(sharedBuffer, 0, buffer.Length, cancellationToken).ConfigureAwait(false); - new Span(sharedBuffer, 0, result).CopyTo(buffer.Span); - return result; - } - finally - { - ArrayPool.Shared.Return(sharedBuffer); - } - } - - public static void Write(this Stream stream, ReadOnlySpan buffer) - { - var sharedBuffer = ArrayPool.Shared.Rent(buffer.Length); - try - { - buffer.CopyTo(sharedBuffer); - stream.Write(sharedBuffer, 0, buffer.Length); - } - finally - { - ArrayPool.Shared.Return(sharedBuffer); - } - } - - public static async ValueTask WriteAsync(this Stream stream, ReadOnlyMemory buffer, CancellationToken cancellationToken = default) - { - var sharedBuffer = ArrayPool.Shared.Rent(buffer.Length); - buffer.Span.CopyTo(sharedBuffer); - try - { - await stream.WriteAsync(sharedBuffer, 0, buffer.Length, cancellationToken).ConfigureAwait(false); - } - finally - { - ArrayPool.Shared.Return(sharedBuffer); - } - } -#endif } } #endif diff --git a/src/Npgsql/Shims/StringBuilderExtensions.cs b/src/Npgsql/Shims/StringBuilderExtensions.cs deleted file mode 100644 index 8d152be563..0000000000 --- a/src/Npgsql/Shims/StringBuilderExtensions.cs +++ /dev/null @@ -1,33 +0,0 @@ -#if NETSTANDARD2_0 - -// ReSharper disable once CheckNamespace -namespace System.Text -{ - /// - /// A set of extension methods to to allow runtime compatibility. - /// - static class StringBuilderExtensions - { - /// - /// Appends the provided to the . - /// - /// The to append to. - /// The to append. - public static StringBuilder Append(this StringBuilder stringBuilder, ReadOnlySpan span) - { - if (span.Length > 0) - { - unsafe - { - fixed (char* value = &span.GetPinnableReference()) - { - return stringBuilder.Append(value, span.Length); - } - } - } - - return stringBuilder; - } - } -} -#endif diff --git a/src/Npgsql/Shims/TaskExtensions.cs b/src/Npgsql/Shims/TaskExtensions.cs deleted file mode 100644 index a7d56948e9..0000000000 --- a/src/Npgsql/Shims/TaskExtensions.cs +++ /dev/null @@ -1,65 +0,0 @@ -#if !NET6_0_OR_GREATER -using System.Collections.Generic; - -namespace System.Threading.Tasks; - -static class TaskExtensions -{ - /// - /// Gets a that will complete when this completes, when the specified timeout expires, or when the specified has cancellation requested. - /// - /// The representing the asynchronous wait. - /// The timeout after which the should be faulted with a if it hasn't otherwise completed. - /// The to monitor for a cancellation request. - /// The representing the asynchronous wait. - /// This method reproduces new to the .NET 6.0 API .WaitAsync. - public static async Task WaitAsync(this Task task, TimeSpan timeout, CancellationToken cancellationToken) - { - var tasks = new List(3); - - Task? cancellationTask = default; - CancellationTokenRegistration registration = default; - if (cancellationToken.CanBeCanceled) - { - var tcs = new TaskCompletionSource(); - registration = cancellationToken.Register(s => ((TaskCompletionSource)s!).TrySetResult(true), tcs); - cancellationTask = tcs.Task; - tasks.Add(cancellationTask); - } - - Task? delayTask = default; - CancellationTokenSource? delayCts = default; - if (timeout != Timeout.InfiniteTimeSpan) - { - var timeLeft = timeout; - delayCts = new CancellationTokenSource(); - delayTask = Task.Delay(timeLeft, delayCts.Token); - tasks.Add(delayTask); - } - - try - { - if (tasks.Count != 0) - { - tasks.Add(task); - var result = await Task.WhenAny(tasks).ConfigureAwait(false); - if (result == cancellationTask) - { - task = Task.FromCanceled(cancellationToken); - } - else if (result == delayTask) - { - task = Task.FromException(new TimeoutException()); - } - } - await task.ConfigureAwait(false); - } - finally - { - delayCts?.Cancel(); - delayCts?.Dispose(); - registration.Dispose(); - } - } -} -#endif diff --git a/src/Npgsql/Shims/UnixDomainSocketEndPoint.cs b/src/Npgsql/Shims/UnixDomainSocketEndPoint.cs deleted file mode 100644 index 6135590493..0000000000 --- a/src/Npgsql/Shims/UnixDomainSocketEndPoint.cs +++ /dev/null @@ -1,89 +0,0 @@ -#if NETSTANDARD2_0 -using System.Net.Sockets; -using System.Text; - -// ReSharper disable once CheckNamespace -namespace System.Net -{ - // Copied and adapted from https://github.com/mono/mono/blob/master/mcs/class/Mono.Posix/Mono.Unix/UnixEndPoint.cs - sealed class UnixDomainSocketEndPoint : EndPoint - { - string _filename; - - public UnixDomainSocketEndPoint (string filename) - { - if (filename == null) - throw new ArgumentNullException(nameof(filename)); - if (filename == "") - throw new ArgumentException ("Cannot be empty.", nameof(filename)); - _filename = filename; - } - - public string Filename { - get => _filename; - set => _filename = value; - } - - public override AddressFamily AddressFamily => AddressFamily.Unix; - - public override EndPoint Create(SocketAddress socketAddress) - { - /* - * Should also check this - * - int addr = (int) AddressFamily.Unix; - if (socketAddress [0] != (addr & 0xFF)) - throw new ArgumentException ("socketAddress is not a unix socket address."); - if (socketAddress [1] != ((addr & 0xFF00) >> 8)) - throw new ArgumentException ("socketAddress is not a unix socket address."); - */ - - if (socketAddress.Size == 2) { - // Empty filename. - // Probably from RemoteEndPoint which on linux does not return the file name. - return new UnixDomainSocketEndPoint("a") { _filename = "" }; - } - var size = socketAddress.Size - 2; - var bytes = new byte[size]; - for (var i = 0; i < bytes.Length; i++) { - bytes[i] = socketAddress[i + 2]; - // There may be junk after the null terminator, so ignore it all. - if (bytes[i] == 0) { - size = i; - break; - } - } - - var name = Encoding.UTF8.GetString(bytes, 0, size); - return new UnixDomainSocketEndPoint(name); - } - - public override SocketAddress Serialize() - { - var bytes = Encoding.UTF8.GetBytes(_filename); - var sa = new SocketAddress(AddressFamily, 2 + bytes.Length + 1); - // sa [0] -> family low byte, sa [1] -> family high byte - for (var i = 0; i < bytes.Length; i++) - sa[2 + i] = bytes[i]; - - //NULL suffix for non-abstract path - sa[2 + bytes.Length] = 0; - - return sa; - } - - public override string ToString() => _filename; - - public override int GetHashCode () => _filename.GetHashCode(); - - public override bool Equals(object? o) - { - var other = o as UnixDomainSocketEndPoint; - if (other == null) - return false; - - return (other._filename == _filename); - } - } -} -#endif diff --git a/src/Npgsql/Shims/UnreachableException.cs b/src/Npgsql/Shims/UnreachableException.cs index c45f3fd1d8..f75989df13 100644 --- a/src/Npgsql/Shims/UnreachableException.cs +++ b/src/Npgsql/Shims/UnreachableException.cs @@ -1,6 +1,4 @@ #if !NET7_0_OR_GREATER -using System; - namespace System.Diagnostics; /// diff --git a/src/Npgsql/Shims/WaitHandleExtensions.cs b/src/Npgsql/Shims/WaitHandleExtensions.cs deleted file mode 100644 index 5f746cc296..0000000000 --- a/src/Npgsql/Shims/WaitHandleExtensions.cs +++ /dev/null @@ -1,42 +0,0 @@ -#if NETSTANDARD2_0 -using System.Threading.Tasks; - -// ReSharper disable once CheckNamespace -namespace System.Threading -{ - // https://thomaslevesque.com/2015/06/04/async-and-cancellation-support-for-wait-handles/ - static class WaitHandleExtensions - { - internal static async Task WaitOneAsync( - this WaitHandle handle, int millisecondsTimeout, CancellationToken cancellationToken = default) - { - var tcs = new TaskCompletionSource(); - using var tokenRegistration = - cancellationToken.Register(state => ((TaskCompletionSource)state!).TrySetCanceled(), tcs); - - RegisteredWaitHandle? registeredHandle = null; - try - { - registeredHandle = ThreadPool.RegisterWaitForSingleObject( - handle, - (state, timedOut) => ((TaskCompletionSource)state!).TrySetResult(!timedOut), - state: tcs, - millisecondsTimeout, - executeOnlyOnce: true); - return await tcs.Task.ConfigureAwait(false); - } - finally - { - registeredHandle?.Unregister(null); - } - } - - internal static Task WaitOneAsync(this WaitHandle handle, TimeSpan timeout, CancellationToken cancellationToken = default) - => handle.WaitOneAsync((int)timeout.TotalMilliseconds, cancellationToken); - - internal static Task WaitOneAsync(this WaitHandle handle, CancellationToken cancellationToken = default) - => handle.WaitOneAsync(Timeout.Infinite, cancellationToken); - } -} - -#endif diff --git a/src/Npgsql/Util/SubReadStream.cs b/src/Npgsql/Util/SubReadStream.cs index 6aaee9651a..9f0176b631 100644 --- a/src/Npgsql/Util/SubReadStream.cs +++ b/src/Npgsql/Util/SubReadStream.cs @@ -108,11 +108,7 @@ public override int Read(byte[] buffer, int offset, int count) return ret; } -#if !NETSTANDARD2_0 public override int Read(Span destination) -#else - int Read(Span destination) -#endif { // parameter validation sent to _superStream.Read var origCount = destination.Length; @@ -147,11 +143,7 @@ public override Task ReadAsync(byte[] buffer, int offset, int count, Cancel return ReadAsync(new Memory(buffer, offset, count), cancellationToken).AsTask(); } -#if !NETSTANDARD2_0 public override ValueTask ReadAsync(Memory buffer, CancellationToken cancellationToken = default) -#else - ValueTask ReadAsync(Memory buffer, CancellationToken cancellationToken = default) -#endif { ThrowIfDisposed(); ThrowIfCantRead(); @@ -209,19 +201,4 @@ protected override void Dispose(bool disposing) } base.Dispose(disposing); } - -#if NETSTANDARD - void ValidateBufferArguments(byte[]? buffer, int offset, int count) - { - if (buffer is null) - ThrowHelper.ThrowArgumentNullException(nameof(buffer)); - - if (offset < 0) - ThrowHelper.ThrowArgumentOutOfRangeException(nameof(offset), "Offset is less than 0"); - - if ((uint)count > buffer.Length - offset) - ThrowHelper.ThrowArgumentOutOfRangeException(nameof(count), "Count larger than buffer minus offset"); - - } -#endif } diff --git a/src/Shared/CodeAnalysis.cs b/src/Shared/CodeAnalysis.cs index 8e8e3b3d9e..75e3055189 100644 --- a/src/Shared/CodeAnalysis.cs +++ b/src/Shared/CodeAnalysis.cs @@ -53,173 +53,10 @@ sealed class UnscopedRefAttribute : Attribute public UnscopedRefAttribute() { } } #endif -#if NETSTANDARD2_0 - [AttributeUsageAttribute(AttributeTargets.Field | AttributeTargets.Parameter | AttributeTargets.Property)] - sealed class AllowNullAttribute : Attribute - { - } - - [AttributeUsageAttribute(AttributeTargets.Field | AttributeTargets.Parameter | AttributeTargets.Property)] - sealed class DisallowNullAttribute : Attribute - { - } - - [AttributeUsageAttribute(AttributeTargets.Method)] - sealed class DoesNotReturnAttribute : Attribute - { - } - - [AttributeUsageAttribute(AttributeTargets.Parameter)] - sealed class DoesNotReturnIfAttribute : Attribute - { - public DoesNotReturnIfAttribute(bool parameterValue) => ParameterValue = parameterValue; - public bool ParameterValue { get; } - } - - [AttributeUsageAttribute(AttributeTargets.Assembly | AttributeTargets.Class | AttributeTargets.Constructor | AttributeTargets.Event | AttributeTargets.Method | AttributeTargets.Property | AttributeTargets.Struct, AllowMultiple = false)] - sealed class ExcludeFromCodeCoverageAttribute : Attribute - { - } - - [AttributeUsageAttribute(AttributeTargets.Field | AttributeTargets.Parameter | AttributeTargets.Property | AttributeTargets.ReturnValue)] - sealed class MaybeNullAttribute : Attribute - { - } - - [AttributeUsageAttribute(AttributeTargets.Parameter)] - sealed class MaybeNullWhenAttribute : Attribute - { - public MaybeNullWhenAttribute(bool returnValue) => ReturnValue = returnValue; - public bool ReturnValue { get; } - } - - [AttributeUsageAttribute(AttributeTargets.Field | AttributeTargets.Parameter | AttributeTargets.Property | AttributeTargets.ReturnValue)] - sealed class NotNullAttribute : Attribute - { - } - - [AttributeUsageAttribute(AttributeTargets.Parameter | AttributeTargets.Property | AttributeTargets.ReturnValue, AllowMultiple = true)] - sealed class NotNullIfNotNullAttribute : Attribute - { - public NotNullIfNotNullAttribute(string parameterName) => ParameterName = parameterName; - public string ParameterName { get; } - } - - [AttributeUsageAttribute(AttributeTargets.Parameter)] - sealed class NotNullWhenAttribute : Attribute - { - public NotNullWhenAttribute(bool returnValue) => ReturnValue = returnValue; - public bool ReturnValue { get; } - } -#endif - -#if !NET5_0_OR_GREATER - [AttributeUsage(AttributeTargets.Method | AttributeTargets.Property, AllowMultiple = true, Inherited = false)] - sealed class MemberNotNullAttribute : Attribute - { - public MemberNotNullAttribute(string member) => Members = new string[] - { - member - }; - - public MemberNotNullAttribute(params string[] members) => Members = members; - - public string[] Members { get; } - } - - [AttributeUsage(AttributeTargets.Method | AttributeTargets.Property, AllowMultiple = true, Inherited = false)] - sealed class MemberNotNullWhenAttribute : Attribute - { - public MemberNotNullWhenAttribute(bool returnValue, string member) - { - ReturnValue = returnValue; - Members = new string[1] { member }; - } - - public MemberNotNullWhenAttribute(bool returnValue, params string[] members) - { - ReturnValue = returnValue; - Members = members; - } - - public bool ReturnValue { get; } - - public string[] Members { get; } - } - - [AttributeUsage(AttributeTargets.Method | AttributeTargets.Constructor | AttributeTargets.Class, Inherited = false)] - sealed class RequiresUnreferencedCodeAttribute : Attribute - { - public RequiresUnreferencedCodeAttribute(string message) - { - Message = message; - } - - public string Message { get; } - - public string? Url { get; set; } - } - - [AttributeUsage( - AttributeTargets.Field | AttributeTargets.ReturnValue | AttributeTargets.GenericParameter | - AttributeTargets.Parameter | AttributeTargets.Property | AttributeTargets.Method | - AttributeTargets.Class | AttributeTargets.Interface | AttributeTargets.Struct, - Inherited = false)] - sealed class DynamicallyAccessedMembersAttribute : Attribute - { - public DynamicallyAccessedMembersAttribute(DynamicallyAccessedMemberTypes memberTypes) - { - MemberTypes = memberTypes; - } - - public DynamicallyAccessedMemberTypes MemberTypes { get; } - } - - [Flags] - enum DynamicallyAccessedMemberTypes - { - None = 0, - PublicParameterlessConstructor = 0x0001, - PublicConstructors = 0x0002 | PublicParameterlessConstructor, - NonPublicConstructors = 0x0004, - PublicMethods = 0x0008, - NonPublicMethods = 0x0010, - PublicFields = 0x0020, - NonPublicFields = 0x0040, - PublicNestedTypes = 0x0080, - NonPublicNestedTypes = 0x0100, - PublicProperties = 0x0200, - NonPublicProperties = 0x0400, - PublicEvents = 0x0800, - NonPublicEvents = 0x1000, - Interfaces = 0x2000, - All = ~None - } - - [AttributeUsage(AttributeTargets.All, Inherited = false, AllowMultiple = true)] - sealed class UnconditionalSuppressMessageAttribute : Attribute - { - public UnconditionalSuppressMessageAttribute(string category, string checkId) - { - Category = category; - CheckId = checkId; - } - - public string Category { get; } - public string CheckId { get; } - public string? Scope { get; set; } - public string? Target { get; set; } - public string? MessageId { get; set; } - public string? Justification { get; set; } - } -#endif } namespace System.Runtime.CompilerServices { -#if !NET5_0_OR_GREATER - static class IsExternalInit {} -#endif #if !NET7_0_OR_GREATER [AttributeUsage(AttributeTargets.Class | AttributeTargets.Struct | AttributeTargets.Field | AttributeTargets.Property, AllowMultiple = false, Inherited = false)] sealed class RequiredMemberAttribute : Attribute @@ -241,7 +78,7 @@ public CompilerFeatureRequiredAttribute(string featureName) /// /// If true, the compiler can choose to allow access to the location where this attribute is applied if it does not understand . /// - public bool IsOptional { get; init; } + public bool IsOptional { get; set; } /// /// The used for the ref structs C# feature. diff --git a/test/Directory.Build.props b/test/Directory.Build.props index 59f7665837..7fefd32afd 100644 --- a/test/Directory.Build.props +++ b/test/Directory.Build.props @@ -2,8 +2,7 @@ - net8.0;netcoreapp3.1 - net8.0 + net8.0 false diff --git a/test/Npgsql.DependencyInjection.Tests/Npgsql.DependencyInjection.Tests.csproj b/test/Npgsql.DependencyInjection.Tests/Npgsql.DependencyInjection.Tests.csproj index 9637e56366..5f0006d79c 100644 --- a/test/Npgsql.DependencyInjection.Tests/Npgsql.DependencyInjection.Tests.csproj +++ b/test/Npgsql.DependencyInjection.Tests/Npgsql.DependencyInjection.Tests.csproj @@ -1,9 +1,4 @@ - - - net8.0 - - diff --git a/test/Npgsql.NativeAotTests/Npgsql.NativeAotTests.csproj b/test/Npgsql.NativeAotTests/Npgsql.NativeAotTests.csproj index bc680c3052..f384594eb3 100644 --- a/test/Npgsql.NativeAotTests/Npgsql.NativeAotTests.csproj +++ b/test/Npgsql.NativeAotTests/Npgsql.NativeAotTests.csproj @@ -2,8 +2,6 @@ exe true - - net8.0 true true true diff --git a/test/Npgsql.PluginTests/NodaTimeTests.cs b/test/Npgsql.PluginTests/NodaTimeTests.cs index adccd163cc..a6af632723 100644 --- a/test/Npgsql.PluginTests/NodaTimeTests.cs +++ b/test/Npgsql.PluginTests/NodaTimeTests.cs @@ -586,7 +586,6 @@ await AssertType( isNpgsqlDbTypeInferredFromClrType: false); } -#if NET6_0_OR_GREATER [Test] public Task Date_as_DateOnly() => AssertType(new DateOnly(2020, 10, 1), "2020-10-01", "date", NpgsqlDbType.Date, DbType.Date, isDefaultForReading: false); @@ -618,7 +617,6 @@ await AssertType( "datemultirange", NpgsqlDbType.DateMultirange, isDefault: false, skipArrayCheck: true); } -#endif [Test] public async Task Daterange_array_as_array_of_DateInterval() @@ -673,7 +671,6 @@ public Task Time_as_TimeSpan() DbType.Time, isDefault: false); -#if NET6_0_OR_GREATER [Test] public Task Time_as_TimeOnly() => AssertType( @@ -683,7 +680,6 @@ public Task Time_as_TimeOnly() NpgsqlDbType.Time, DbType.Time, isDefaultForReading: false); -#endif #endregion Time diff --git a/test/Npgsql.Tests/BatchTests.cs b/test/Npgsql.Tests/BatchTests.cs index bd38ea2e87..977bb5f98f 100644 --- a/test/Npgsql.Tests/BatchTests.cs +++ b/test/Npgsql.Tests/BatchTests.cs @@ -781,7 +781,6 @@ public async Task Batch_with_auto_prepare_reuse() } } -#if NET6_0_OR_GREATER // no batch reuse until 6.0 [Test, IssueLink("https://github.com/npgsql/npgsql/issues/5239")] public async Task Batch_dispose_reuse() { @@ -814,7 +813,6 @@ public async Task Batch_dispose_reuse() Assert.That(await batch.ExecuteScalarAsync(), Is.EqualTo(3)); } } -#endif #endregion Miscellaneous diff --git a/test/Npgsql.Tests/CopyTests.cs b/test/Npgsql.Tests/CopyTests.cs index 764c3db808..64406b6b96 100644 --- a/test/Npgsql.Tests/CopyTests.cs +++ b/test/Npgsql.Tests/CopyTests.cs @@ -598,9 +598,7 @@ public async Task Export_long_string() { var str = reader.Read(); Assert.That(str.Length, Is.EqualTo(len)); -#if NET6_0_OR_GREATER Assert.True(str.AsSpan().IndexOfAnyExcept('x') is -1); -#endif } } Assert.That(row, Is.EqualTo(100)); diff --git a/test/Npgsql.Tests/TestUtil.cs b/test/Npgsql.Tests/TestUtil.cs index b2e61317ad..81b1140f48 100644 --- a/test/Npgsql.Tests/TestUtil.cs +++ b/test/Npgsql.Tests/TestUtil.cs @@ -538,20 +538,3 @@ public enum PooledOrNot Pooled, Unpooled } - -#if NETSTANDARD2_0 - static class QueueExtensions - { - public static bool TryDequeue(this Queue queue, out T result) - { - if (queue.Count == 0) - { - result = default; - return false; - } - - result = queue.Dequeue(); - return true; - } - } -#endif diff --git a/test/Npgsql.Tests/Types/DateTimeInfinityTests.cs b/test/Npgsql.Tests/Types/DateTimeInfinityTests.cs index 8508979b31..e1ccad4445 100644 --- a/test/Npgsql.Tests/Types/DateTimeInfinityTests.cs +++ b/test/Npgsql.Tests/Types/DateTimeInfinityTests.cs @@ -85,7 +85,6 @@ public Task Date_DateTime(DateTime dateTime, string sqlLiteral, string infinityC "date", NpgsqlDbType.Date, DbType.Date, isDefault: false); -#if NET6_0_OR_GREATER static readonly TestCaseData[] DateOnlyDateTimeValues = { new TestCaseData(DateOnly.MinValue.AddYears(1), "0002-01-01", "0002-01-01") @@ -101,7 +100,6 @@ public Task Date_DateOnly(DateOnly dateTime, string sqlLiteral, string infinityC => AssertType(dateTime, DisableDateTimeInfinityConversions ? sqlLiteral : infinityConvertedSqlLiteral, "date", NpgsqlDbType.Date, DbType.Date, isDefault: false); -#endif NpgsqlDataSource? _dataSource; protected override NpgsqlDataSource DataSource => _dataSource ??= CreateDataSource(csb => csb.Timezone = "UTC"); diff --git a/test/Npgsql.Tests/Types/DateTimeTests.cs b/test/Npgsql.Tests/Types/DateTimeTests.cs index 434b87705f..ec91148f2c 100644 --- a/test/Npgsql.Tests/Types/DateTimeTests.cs +++ b/test/Npgsql.Tests/Types/DateTimeTests.cs @@ -53,7 +53,6 @@ await AssertType( isDefaultForWriting: false); } -#if NET6_0_OR_GREATER [Test] public Task Date_as_DateOnly() => AssertType(new DateOnly(2020, 10, 1), "2020-10-01", "date", NpgsqlDbType.Date, DbType.Date, isDefaultForReading: false); @@ -98,7 +97,6 @@ await AssertType( NpgsqlDbType.DateMultirange, isDefaultForReading: false); } -#endif #endregion @@ -114,7 +112,6 @@ public Task Time_as_TimeSpan() DbType.Time, isDefaultForWriting: false); -#if NET6_0_OR_GREATER [Test] public Task Time_as_TimeOnly() => AssertType( @@ -124,7 +121,6 @@ public Task Time_as_TimeOnly() NpgsqlDbType.Time, DbType.Time, isDefaultForReading: false); -#endif #endregion diff --git a/test/Npgsql.Tests/Types/JsonDynamicTests.cs b/test/Npgsql.Tests/Types/JsonDynamicTests.cs index 73b0965d12..3c948a4816 100644 --- a/test/Npgsql.Tests/Types/JsonDynamicTests.cs +++ b/test/Npgsql.Tests/Types/JsonDynamicTests.cs @@ -15,7 +15,6 @@ namespace Npgsql.Tests.Types; [TestFixture(MultiplexingMode.Multiplexing, NpgsqlDbType.Jsonb)] public class JsonDynamicTests : MultiplexingTestBase { -#if NET6_0_OR_GREATER [Test] public Task Roundtrip_JsonObject() => AssertType( @@ -73,7 +72,6 @@ public async Task Write_jsonobject_array_without_npgsqldbtype() cmd.Parameters.Add(new("p", new[] { jsonObject1, jsonObject2 })); await cmd.ExecuteNonQueryAsync(); } -#endif [Test] public async Task As_poco() diff --git a/test/Npgsql.Tests/Types/MultirangeTests.cs b/test/Npgsql.Tests/Types/MultirangeTests.cs index 92f27bb50f..1eeb95f5ac 100644 --- a/test/Npgsql.Tests/Types/MultirangeTests.cs +++ b/test/Npgsql.Tests/Types/MultirangeTests.cs @@ -75,7 +75,6 @@ public class MultirangeTests : TestBase """{["2020-01-01 01:00:00+01","2020-01-05 01:00:00+01"),["2020-01-10 01:00:00+01",)}""", "tstzmultirange", NpgsqlDbType.TimestampTzMultirange, true, true, default(NpgsqlRange)) .SetName("DateTime TimestampTzMultirange"), -#if NET6_0_OR_GREATER new TestCaseData( new NpgsqlRange[] { @@ -84,7 +83,6 @@ public class MultirangeTests : TestBase }, "{[2020-01-01,2020-01-05),[2020-01-10,)}", "datemultirange", NpgsqlDbType.DateMultirange, false, false, default(NpgsqlRange)) .SetName("DateOnly"), -#endif }; [Test, TestCaseSource(nameof(MultirangeTestCases))] From f36116723482287ca03ee3d3976512a4b9965e27 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 21 Nov 2023 21:15:26 +0000 Subject: [PATCH 328/761] Bump xunit.runner.visualstudio from 2.4.5 to 2.5.4 (#5435) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 13ad03619b..00a81aefa3 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -39,7 +39,7 @@ - + From 1663e381938f82a87916e1ee39f3ce245435b4fb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Emmanuel=20Andr=C3=A9?= <2341261+manandre@users.noreply.github.com> Date: Wed, 22 Nov 2023 00:02:45 +0100 Subject: [PATCH 329/761] Additional cleanup after netstandard drop (#5436) --- .github/workflows/build.yml | 1 - Directory.Build.props | 7 ------- Directory.Packages.props | 3 --- src/Npgsql/Internal/Converters/JsonConverter.cs | 5 ++--- src/Npgsql/Internal/HackyEnumTypeMapping.cs | 1 - src/Npgsql/Internal/NpgsqlConnector.cs | 6 +----- src/Npgsql/Internal/NpgsqlWriteBuffer.cs | 1 - src/Npgsql/KerberosUsernameProvider.cs | 2 -- src/Npgsql/Npgsql.csproj | 10 +--------- src/Npgsql/NpgsqlBatchCommand.cs | 2 -- src/Npgsql/NpgsqlDataReader.cs | 1 - src/Npgsql/NpgsqlDataSource.cs | 2 -- src/Npgsql/NpgsqlFactory.cs | 1 - src/Npgsql/NpgsqlParameter.cs | 4 ---- src/Npgsql/NpgsqlTypes/NpgsqlTsQuery.cs | 3 --- src/Npgsql/NpgsqlTypes/NpgsqlTsVector.cs | 1 - src/Npgsql/PostgresTypes/PostgresUnknownType.cs | 2 -- src/Npgsql/PregeneratedMessages.cs | 2 -- src/Shared/CodeAnalysis.cs | 4 ---- test/Directory.Build.props | 11 +---------- test/Npgsql.Tests/ConnectionTests.cs | 6 +----- test/Npgsql.Tests/ExceptionTests.cs | 3 --- test/Npgsql.Tests/MultipleHostsTests.cs | 4 ---- test/Npgsql.Tests/ReadBufferTests.cs | 2 -- test/Npgsql.Tests/TypesTests.cs | 2 -- test/Npgsql.Tests/WriteBufferTests.cs | 2 -- 26 files changed, 6 insertions(+), 82 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index c1e63484ab..d1ab46b71b 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -69,7 +69,6 @@ jobs: with: dotnet-version: | ${{ env.dotnet_sdk_version }} - 3.1.x - name: Build run: dotnet build -c ${{ matrix.config }} diff --git a/Directory.Build.props b/Directory.Build.props index 2b4dfb1b41..57494750c7 100644 --- a/Directory.Build.props +++ b/Directory.Build.props @@ -19,16 +19,9 @@ true snupkg true - $(NoWarn);NETSDK1138 true - - - disable - $(NoWarn);CS8632;CS8600 - - diff --git a/Directory.Packages.props b/Directory.Packages.props index 00a81aefa3..b2535c4bfc 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -10,11 +10,8 @@ - - - diff --git a/src/Npgsql/Internal/Converters/JsonConverter.cs b/src/Npgsql/Internal/Converters/JsonConverter.cs index 3591bbacda..fa654c9c64 100644 --- a/src/Npgsql/Internal/Converters/JsonConverter.cs +++ b/src/Npgsql/Internal/Converters/JsonConverter.cs @@ -113,12 +113,11 @@ public static bool TryReadStream(bool jsonb, Encoding encoding, PgReader reader, var isUtf8 = encoding.CodePage == Encoding.UTF8.CodePage; byteCount = reader.CurrentRemaining; - // We always fall back to buffers on older targets if (isUtf8 || byteCount >= StreamingThreshold) { stream = !isUtf8 - ? Encoding.CreateTranscodingStream(reader.GetStream(), encoding, Encoding.UTF8) - : reader.GetStream(); + ? Encoding.CreateTranscodingStream(reader.GetStream(), encoding, Encoding.UTF8) + : reader.GetStream(); } else stream = null; diff --git a/src/Npgsql/Internal/HackyEnumTypeMapping.cs b/src/Npgsql/Internal/HackyEnumTypeMapping.cs index de50c40fad..1aa4b27554 100644 --- a/src/Npgsql/Internal/HackyEnumTypeMapping.cs +++ b/src/Npgsql/Internal/HackyEnumTypeMapping.cs @@ -8,7 +8,6 @@ namespace Npgsql.Internal; -#pragma warning disable CS1591 // Missing XML comment for publicly visible type or member /// /// Hacky temporary measure used by EFCore.PG to extract user-configured enum mappings. Accessed via reflection only. diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index 4f644d0b6a..8b65d0f4a7 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -1035,7 +1035,7 @@ Task GetHostAddressesAsync(CancellationToken ct) => static Task OpenSocketConnectionAsync(Socket socket, EndPoint endpoint, NpgsqlTimeout perIpTimeout, CancellationToken cancellationToken) { - // Whether the framework and/or the OS platform support Socket.ConnectAsync cancellation API or they do not, + // Whether the OS platform supports Socket.ConnectAsync cancellation API or not, // we always fake-cancel the operation with the help of TaskTimeoutAndCancellation.ExecuteAsync. It stops waiting // and raises the exception, while the actual task may be left running. Task ConnectAsync(CancellationToken ct) => @@ -2484,7 +2484,6 @@ internal void EndUserAction() #region Keepalive -#pragma warning disable CA1801 // Review unused parameters void PerformKeepAlive(object? state) { Debug.Assert(_isKeepAliveEnabled); @@ -2523,7 +2522,6 @@ void PerformKeepAlive(object? state) Monitor.Exit(SyncObj); } } -#pragma warning restore CA1801 // Review unused parameters #endregion @@ -2792,9 +2790,7 @@ enum ConnectorState Replication, } -#pragma warning disable CA1717 enum TransactionStatus : byte -#pragma warning restore CA1717 { /// /// Currently not in a transaction block diff --git a/src/Npgsql/Internal/NpgsqlWriteBuffer.cs b/src/Npgsql/Internal/NpgsqlWriteBuffer.cs index bb46ea0765..316495eaa8 100644 --- a/src/Npgsql/Internal/NpgsqlWriteBuffer.cs +++ b/src/Npgsql/Internal/NpgsqlWriteBuffer.cs @@ -10,7 +10,6 @@ using Npgsql.Util; using static System.Threading.Timeout; -#pragma warning disable CS1591 // Missing XML comment for publicly visible type or member namespace Npgsql.Internal; /// diff --git a/src/Npgsql/KerberosUsernameProvider.cs b/src/Npgsql/KerberosUsernameProvider.cs index d36f2b06c9..0395bca337 100644 --- a/src/Npgsql/KerberosUsernameProvider.cs +++ b/src/Npgsql/KerberosUsernameProvider.cs @@ -44,9 +44,7 @@ sealed class KerberosUsernameProvider return GetUsernameAsyncInternal(); -#pragma warning disable CS1998 async ValueTask GetUsernameAsyncInternal() -#pragma warning restore CS1998 { if (async) await process.WaitForExitAsync(cancellationToken).ConfigureAwait(false); diff --git a/src/Npgsql/Npgsql.csproj b/src/Npgsql/Npgsql.csproj index 5e6b6e3c89..ecae24940a 100644 --- a/src/Npgsql/Npgsql.csproj +++ b/src/Npgsql/Npgsql.csproj @@ -22,16 +22,8 @@ - - - - - - - - - + diff --git a/src/Npgsql/NpgsqlBatchCommand.cs b/src/Npgsql/NpgsqlBatchCommand.cs index 4123a91506..8175afa614 100644 --- a/src/Npgsql/NpgsqlBatchCommand.cs +++ b/src/Npgsql/NpgsqlBatchCommand.cs @@ -41,7 +41,6 @@ public override string CommandText /// public new NpgsqlParameterCollection Parameters => _parameters ??= new(); -#pragma warning disable CA1822 // Mark members as static #if NET8_0_OR_GREATER /// @@ -66,7 +65,6 @@ public bool CanCreateParameter #endif => true; -#pragma warning restore CA1822 // Mark members as static /// /// Appends an error barrier after this batch command. Defaults to the value of on the diff --git a/src/Npgsql/NpgsqlDataReader.cs b/src/Npgsql/NpgsqlDataReader.cs index 681fc8c94a..3f2b7fcb80 100644 --- a/src/Npgsql/NpgsqlDataReader.cs +++ b/src/Npgsql/NpgsqlDataReader.cs @@ -21,7 +21,6 @@ using NpgsqlTypes; using static Npgsql.Util.Statics; -#pragma warning disable CA2222 // Do not decrease inherited member visibility namespace Npgsql; /// diff --git a/src/Npgsql/NpgsqlDataSource.cs b/src/Npgsql/NpgsqlDataSource.cs index 3281359ab9..b26dd62b61 100644 --- a/src/Npgsql/NpgsqlDataSource.cs +++ b/src/Npgsql/NpgsqlDataSource.cs @@ -489,7 +489,6 @@ protected sealed override ValueTask DisposeAsyncCore() return default; } -#pragma warning disable CS1998 /// protected virtual async ValueTask DisposeAsyncBase() { @@ -508,7 +507,6 @@ protected virtual async ValueTask DisposeAsyncBase() // TODO: async Clear, #4499 Clear(); } -#pragma warning restore CS1998 private protected void CheckDisposed() { diff --git a/src/Npgsql/NpgsqlFactory.cs b/src/Npgsql/NpgsqlFactory.cs index 6df62a9cdc..15a1cd431e 100644 --- a/src/Npgsql/NpgsqlFactory.cs +++ b/src/Npgsql/NpgsqlFactory.cs @@ -1,6 +1,5 @@ using System; using System.Data.Common; -using System.Diagnostics.CodeAnalysis; namespace Npgsql; diff --git a/src/Npgsql/NpgsqlParameter.cs b/src/Npgsql/NpgsqlParameter.cs index e7b0cf8def..997eb6f56d 100644 --- a/src/Npgsql/NpgsqlParameter.cs +++ b/src/Npgsql/NpgsqlParameter.cs @@ -418,7 +418,6 @@ public string? DataTypeName [Category("Data")] public sealed override ParameterDirection Direction { get; set; } -#pragma warning disable CS0109 /// /// Gets or sets the maximum number of digits used to represent the property. /// @@ -444,7 +443,6 @@ public string? DataTypeName get => _scale; set => _scale = value; } -#pragma warning restore CS0109 /// [DefaultValue(0)] @@ -478,12 +476,10 @@ public sealed override string SourceColumn /// public sealed override bool SourceColumnNullMapping { get; set; } -#pragma warning disable CA2227 /// /// The collection to which this parameter belongs, if any. /// public NpgsqlParameterCollection? Collection { get; set; } -#pragma warning restore CA2227 /// /// The PostgreSQL data type, such as int4 or text, as discovered from pg_type. diff --git a/src/Npgsql/NpgsqlTypes/NpgsqlTsQuery.cs b/src/Npgsql/NpgsqlTypes/NpgsqlTsQuery.cs index 9d49dc7c3a..eb1dfbb86b 100644 --- a/src/Npgsql/NpgsqlTypes/NpgsqlTsQuery.cs +++ b/src/Npgsql/NpgsqlTypes/NpgsqlTsQuery.cs @@ -2,7 +2,6 @@ using System.Collections.Generic; using System.Text; -#pragma warning disable CA1034 // ReSharper disable once CheckNamespace namespace NpgsqlTypes; @@ -470,10 +469,8 @@ public NpgsqlTsQueryLexeme(string text, Weight weights, bool isPrefixSearch) /// /// Weight enum, can be OR'ed together. /// -#pragma warning disable CA1714 [Flags] public enum Weight -#pragma warning restore CA1714 { /// /// None diff --git a/src/Npgsql/NpgsqlTypes/NpgsqlTsVector.cs b/src/Npgsql/NpgsqlTypes/NpgsqlTsVector.cs index 76f097f0ac..2ec4c66afe 100644 --- a/src/Npgsql/NpgsqlTypes/NpgsqlTsVector.cs +++ b/src/Npgsql/NpgsqlTypes/NpgsqlTsVector.cs @@ -3,7 +3,6 @@ using System.Collections.Generic; using System.Text; -#pragma warning disable CA1040, CA1034 // ReSharper disable once CheckNamespace namespace NpgsqlTypes; diff --git a/src/Npgsql/PostgresTypes/PostgresUnknownType.cs b/src/Npgsql/PostgresTypes/PostgresUnknownType.cs index bbe952726d..2955295000 100644 --- a/src/Npgsql/PostgresTypes/PostgresUnknownType.cs +++ b/src/Npgsql/PostgresTypes/PostgresUnknownType.cs @@ -10,7 +10,5 @@ public sealed class UnknownBackendType : PostgresType /// /// Constructs a the unknown backend type. /// -#pragma warning disable CA2222 // Do not decrease inherited member visibility UnknownBackendType() : base("", "", 0) { } -#pragma warning restore CA2222 // Do not decrease inherited member visibility } diff --git a/src/Npgsql/PregeneratedMessages.cs b/src/Npgsql/PregeneratedMessages.cs index b6d2e4dd02..54c736b64c 100644 --- a/src/Npgsql/PregeneratedMessages.cs +++ b/src/Npgsql/PregeneratedMessages.cs @@ -9,11 +9,9 @@ static class PregeneratedMessages { static PregeneratedMessages() { -#pragma warning disable CS8625 // This is the only use of a write buffer without a connector, for in-memory construction of // pregenerated messages. using var buf = new NpgsqlWriteBuffer(null, new MemoryStream(), null, NpgsqlWriteBuffer.MinimumSize, Encoding.ASCII); -#pragma warning restore CS8625 BeginTransRepeatableRead = Generate(buf, "BEGIN ISOLATION LEVEL REPEATABLE READ"); BeginTransSerializable = Generate(buf, "BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE"); diff --git a/src/Shared/CodeAnalysis.cs b/src/Shared/CodeAnalysis.cs index 75e3055189..0e98a03210 100644 --- a/src/Shared/CodeAnalysis.cs +++ b/src/Shared/CodeAnalysis.cs @@ -1,7 +1,3 @@ -using System; -using System.Diagnostics.CodeAnalysis; - -#pragma warning disable 1591 namespace System.Diagnostics.CodeAnalysis { diff --git a/test/Directory.Build.props b/test/Directory.Build.props index 7fefd32afd..b51b1c04ba 100644 --- a/test/Directory.Build.props +++ b/test/Directory.Build.props @@ -1,4 +1,4 @@ - + @@ -9,15 +9,6 @@ $(NoWarn);CA2252 - - - true - - diff --git a/test/Npgsql.Tests/ConnectionTests.cs b/test/Npgsql.Tests/ConnectionTests.cs index 497cb888a2..d0fb7f827b 100644 --- a/test/Npgsql.Tests/ConnectionTests.cs +++ b/test/Npgsql.Tests/ConnectionTests.cs @@ -720,12 +720,8 @@ public async Task Reload_types_keepalive_concurrent() var startTimestamp = Stopwatch.GetTimestamp(); // Give a few seconds for a KeepAlive to possibly perform - while (GetElapsedTime(startTimestamp).TotalSeconds < 2) + while (Stopwatch.GetElapsedTime(startTimestamp).TotalSeconds < 2) Assert.DoesNotThrow(conn.ReloadTypes); - - // dotnet 3.1 doesn't have Stopwatch.GetElapsedTime method. - static TimeSpan GetElapsedTime(long startingTimestamp) => - new((long)((Stopwatch.GetTimestamp() - startingTimestamp) * ((double)10000000 / Stopwatch.Frequency))); } #region ChangeDatabase diff --git a/test/Npgsql.Tests/ExceptionTests.cs b/test/Npgsql.Tests/ExceptionTests.cs index f9f8821c4d..ac87ef2b0e 100644 --- a/test/Npgsql.Tests/ExceptionTests.cs +++ b/test/Npgsql.Tests/ExceptionTests.cs @@ -250,7 +250,6 @@ PostgresException CreateWithSqlState(string sqlState) #pragma warning disable SYSLIB0011 #pragma warning disable SYSLIB0050 -#pragma warning disable 618 [Test] public void Serialization() { @@ -286,9 +285,7 @@ public void Serialization() } SerializationInfo CreateSerializationInfo() => new(typeof(PostgresException), new FormatterConverter()); -#pragma warning restore 618 #pragma warning restore SYSLIB0011 -#pragma warning disable SYSLIB0050 #pragma warning disable SYSLIB0051 [Test] diff --git a/test/Npgsql.Tests/MultipleHostsTests.cs b/test/Npgsql.Tests/MultipleHostsTests.cs index bbd2064504..f4f2dfffb7 100644 --- a/test/Npgsql.Tests/MultipleHostsTests.cs +++ b/test/Npgsql.Tests/MultipleHostsTests.cs @@ -161,10 +161,6 @@ public async Task Valid_host_not_found(TargetSessionAttributes targetSessionAttr [Test, Platform(Exclude = "MacOsX", Reason = "#3786")] public void All_hosts_are_down() { - // Different exception raised in .NET Core 3.1, skip (NUnit doesn't seem to support detecting .NET Core versions) - if (RuntimeInformation.FrameworkDescription.StartsWith(".NET Core 3.1")) - return; - var endpoint = new IPEndPoint(IPAddress.Loopback, 0); using var socket1 = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp); diff --git a/test/Npgsql.Tests/ReadBufferTests.cs b/test/Npgsql.Tests/ReadBufferTests.cs index b9ace59606..7d33bf68e1 100644 --- a/test/Npgsql.Tests/ReadBufferTests.cs +++ b/test/Npgsql.Tests/ReadBufferTests.cs @@ -84,7 +84,6 @@ public async Task ReadNullTerminatedString_with_io() Assert.That(ReadBuffer.ReadNullTerminatedString(), Is.EqualTo("bar")); } -#pragma warning disable CS8625 [SetUp] public void SetUp() { @@ -92,7 +91,6 @@ public void SetUp() ReadBuffer = new NpgsqlReadBuffer(null, stream, null, NpgsqlReadBuffer.DefaultSize, NpgsqlWriteBuffer.UTF8Encoding, NpgsqlWriteBuffer.RelaxedUTF8Encoding); Writer = stream.Writer; } -#pragma warning restore CS8625 // ReSharper disable once InconsistentNaming NpgsqlReadBuffer ReadBuffer = default!; diff --git a/test/Npgsql.Tests/TypesTests.cs b/test/Npgsql.Tests/TypesTests.cs index 30bae390d3..610d640a02 100644 --- a/test/Npgsql.Tests/TypesTests.cs +++ b/test/Npgsql.Tests/TypesTests.cs @@ -202,7 +202,6 @@ public void Bug1011018() var o = p.Value; } -#pragma warning disable 618 [Test] [IssueLink("https://github.com/npgsql/npgsql/issues/750")] public void NpgsqlInet() @@ -210,5 +209,4 @@ public void NpgsqlInet() var v = new NpgsqlInet(IPAddress.Parse("2001:1db8:85a3:1142:1000:8a2e:1370:7334"), 32); Assert.That(v.ToString(), Is.EqualTo("2001:1db8:85a3:1142:1000:8a2e:1370:7334/32")); } -#pragma warning restore 618 } diff --git a/test/Npgsql.Tests/WriteBufferTests.cs b/test/Npgsql.Tests/WriteBufferTests.cs index d1fbe68071..99e5626b75 100644 --- a/test/Npgsql.Tests/WriteBufferTests.cs +++ b/test/Npgsql.Tests/WriteBufferTests.cs @@ -107,14 +107,12 @@ public void Chunked_char_array_encoding_fits_with_surrogates() Assert.That(completed, Is.False); } -#pragma warning disable CS8625 [SetUp] public void SetUp() { Underlying = new MemoryStream(); WriteBuffer = new NpgsqlWriteBuffer(null, Underlying, null, NpgsqlReadBuffer.DefaultSize, NpgsqlWriteBuffer.UTF8Encoding); } -#pragma warning restore CS8625 // ReSharper disable once InconsistentNaming NpgsqlWriteBuffer WriteBuffer = default!; From 01f92e08672b090b5ec6c985fc3482499f406713 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Wed, 22 Nov 2023 16:31:25 +0100 Subject: [PATCH 330/761] Remove EOL PostgreSQL version 11 from CI (#5441) --- .github/workflows/build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index d1ab46b71b..1136792994 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -30,7 +30,7 @@ jobs: fail-fast: false matrix: os: [ubuntu-22.04, windows-2022] - pg_major: [16, 15, 14, 13, 12, 11] + pg_major: [16, 15, 14, 13, 12] config: [Release] test_tfm: [net8.0] include: From 7b515c7da1cb6913dfc2487b5aa0adcc8c6c8ba1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 24 Nov 2023 23:19:14 +0100 Subject: [PATCH 331/761] Bump Microsoft.CodeAnalysis.CSharp from 4.7.0 to 4.8.0 (#5444) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index b2535c4bfc..c9f6dd6409 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -24,7 +24,7 @@ - + From 9e3a8dffe7dbdde13767e38a674bdeb0b3828d11 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Mon, 27 Nov 2023 21:28:27 +0300 Subject: [PATCH 332/761] Fix explicit batch preparation reset (#5459) Fixes #5458 --- src/Npgsql/NpgsqlCommand.cs | 1 + test/Npgsql.Tests/PrepareTests.cs | 126 ++++++++++++++++++++++ test/Npgsql.Tests/Support/PgServerMock.cs | 14 +++ 3 files changed, 141 insertions(+) diff --git a/src/Npgsql/NpgsqlCommand.cs b/src/Npgsql/NpgsqlCommand.cs index 593be24efe..eaf11d51ff 100644 --- a/src/Npgsql/NpgsqlCommand.cs +++ b/src/Npgsql/NpgsqlCommand.cs @@ -659,6 +659,7 @@ Task Prepare(bool async, CancellationToken cancellationToken = default) ProcessRawQuery(connector.SqlQueryParser, connector.UseConformingStrings, batchCommand); needToPrepare = batchCommand.ExplicitPrepare(connector) || needToPrepare; + batchCommand.ConnectorPreparedOn = connector; } if (logger.IsEnabled(LogLevel.Debug) && needToPrepare) diff --git a/test/Npgsql.Tests/PrepareTests.cs b/test/Npgsql.Tests/PrepareTests.cs index c0814daa3f..1d9c6dde85 100644 --- a/test/Npgsql.Tests/PrepareTests.cs +++ b/test/Npgsql.Tests/PrepareTests.cs @@ -5,6 +5,9 @@ using System.Text; using System.Threading; using System.Threading.Tasks; +using Npgsql.BackendMessages; +using Npgsql.Internal.Postgres; +using Npgsql.Tests.Support; using NpgsqlTypes; using NUnit.Framework; using static Npgsql.Tests.TestUtil; @@ -13,6 +16,8 @@ namespace Npgsql.Tests; public class PrepareTests: TestBase { + static uint Int4Oid => PostgresMinimalDatabaseInfo.DefaultTypeCatalog.GetOid(DataTypeNames.Int4).Value; + [Test] public void Basic() { @@ -771,6 +776,127 @@ public async Task Explicit_prepare_unprepare_many_queries() await cmd.UnprepareAsync(); } + [Test] + public async Task Explicitly_prepared_batch_sends_prepared_queries() + { + await using var postmaster = PgPostmasterMock.Start(ConnectionString); + await using var dataSource = CreateDataSource(postmaster.ConnectionString); + + await using var conn = await dataSource.OpenConnectionAsync(); + var server = await postmaster.WaitForServerConnection(); + + await using var batch = new NpgsqlBatch(conn) + { + BatchCommands = { new("SELECT 1"), new("SELECT 2") } + }; + + var prepareTask = batch.PrepareAsync(); + + await server.ExpectMessages( + FrontendMessageCode.Parse, FrontendMessageCode.Describe, + FrontendMessageCode.Parse, FrontendMessageCode.Describe, + FrontendMessageCode.Sync); + + await server + .WriteParseComplete() + .WriteParameterDescription(new FieldDescription(Int4Oid)) + .WriteRowDescription(new FieldDescription(Int4Oid)) + .WriteParseComplete() + .WriteParameterDescription(new FieldDescription(Int4Oid)) + .WriteRowDescription(new FieldDescription(Int4Oid)) + .WriteReadyForQuery() + .FlushAsync(); + + await prepareTask; + + for (var i = 0; i < 2; i++) + await ExecutePreparedBatch(batch, server); + + async Task ExecutePreparedBatch(NpgsqlBatch batch, PgServerMock server) + { + var executeBatchTask = batch.ExecuteNonQueryAsync(); + + await server.ExpectMessages( + FrontendMessageCode.Bind, FrontendMessageCode.Execute, + FrontendMessageCode.Bind, FrontendMessageCode.Execute, + FrontendMessageCode.Sync); + + await server + .WriteBindComplete() + .WriteCommandComplete() + .WriteBindComplete() + .WriteCommandComplete() + .WriteReadyForQuery() + .FlushAsync(); + + await executeBatchTask; + } + } + + [Test] + public async Task Auto_prepared_batch_sends_prepared_queries() + { + var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + { + AutoPrepareMinUsages = 1, + MaxAutoPrepare = 10 + }; + await using var postmaster = PgPostmasterMock.Start(csb.ConnectionString); + await using var dataSource = CreateDataSource(postmaster.ConnectionString); + + await using var conn = await dataSource.OpenConnectionAsync(); + var server = await postmaster.WaitForServerConnection(); + + await using var batch = new NpgsqlBatch(conn) + { + BatchCommands = { new("SELECT 1"), new("SELECT 2") } + }; + + var firstBatchExecuteTask = batch.ExecuteNonQueryAsync(); + + await server.ExpectMessages( + FrontendMessageCode.Parse, FrontendMessageCode.Bind, FrontendMessageCode.Describe, FrontendMessageCode.Execute, + FrontendMessageCode.Parse, FrontendMessageCode.Bind, FrontendMessageCode.Describe, FrontendMessageCode.Execute, + FrontendMessageCode.Sync); + + await server + .WriteParseComplete() + .WriteBindComplete() + .WriteRowDescription(new FieldDescription(Int4Oid)) + .WriteCommandComplete() + .WriteParseComplete() + .WriteBindComplete() + .WriteRowDescription(new FieldDescription(Int4Oid)) + .WriteCommandComplete() + .WriteReadyForQuery() + .FlushAsync(); + + await firstBatchExecuteTask; + + for (var i = 0; i < 2; i++) + await ExecutePreparedBatch(batch, server); + + async Task ExecutePreparedBatch(NpgsqlBatch batch, PgServerMock server) + { + var executeBatchTask = batch.ExecuteNonQueryAsync(); + + await server.ExpectMessages( + FrontendMessageCode.Bind, FrontendMessageCode.Execute, + FrontendMessageCode.Bind, FrontendMessageCode.Execute, + FrontendMessageCode.Sync); + + await server + .WriteBindComplete() + .WriteCommandComplete() + .WriteBindComplete() + .WriteCommandComplete() + .WriteReadyForQuery() + .FlushAsync(); + + await executeBatchTask; + } + } + NpgsqlConnection OpenConnectionAndUnprepare() { var conn = OpenConnection(); diff --git a/test/Npgsql.Tests/Support/PgServerMock.cs b/test/Npgsql.Tests/Support/PgServerMock.cs index 0b81b40021..c34a9315c8 100644 --- a/test/Npgsql.Tests/Support/PgServerMock.cs +++ b/test/Npgsql.Tests/Support/PgServerMock.cs @@ -228,6 +228,20 @@ internal PgServerMock WriteRowDescription(params FieldDescription[] fields) return this; } + internal PgServerMock WriteParameterDescription(params FieldDescription[] fields) + { + CheckDisposed(); + + _writeBuffer.WriteByte((byte)BackendMessageCode.ParameterDescription); + _writeBuffer.WriteInt32(1 + 4 + 2 + fields.Length * 4); + _writeBuffer.WriteUInt16((ushort)fields.Length); + + foreach (var field in fields) + _writeBuffer.WriteUInt32(field.TypeOID); + + return this; + } + internal PgServerMock WriteNoData() { CheckDisposed(); From 69ee3a4dacfdf2a87adb8391671190861d9b33da Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Tue, 28 Nov 2023 00:24:25 +0100 Subject: [PATCH 333/761] Fix timestamp to be returned as Unspecified DateTime in legacy mode (#5466) Fixes #5465 --- .../Converters/Temporal/LegacyDateTimeConverter.cs | 7 ++++++- test/Npgsql.Tests/Types/LegacyDateTimeTests.cs | 8 ++++++++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/src/Npgsql/Internal/Converters/Temporal/LegacyDateTimeConverter.cs b/src/Npgsql/Internal/Converters/Temporal/LegacyDateTimeConverter.cs index 5e6306da56..a6d53a54a2 100644 --- a/src/Npgsql/Internal/Converters/Temporal/LegacyDateTimeConverter.cs +++ b/src/Npgsql/Internal/Converters/Temporal/LegacyDateTimeConverter.cs @@ -21,8 +21,13 @@ public override bool CanConvert(DataFormat format, out BufferRequirements buffer protected override DateTime ReadCore(PgReader reader) { + if (_timestamp) + { + return PgTimestamp.Decode(reader.ReadInt64(), DateTimeKind.Unspecified, _dateTimeInfinityConversions); + } + var dateTime = PgTimestamp.Decode(reader.ReadInt64(), DateTimeKind.Utc, _dateTimeInfinityConversions); - return !_timestamp && (!_dateTimeInfinityConversions || dateTime != DateTime.MaxValue && dateTime != DateTime.MinValue) + return !_dateTimeInfinityConversions || dateTime != DateTime.MaxValue && dateTime != DateTime.MinValue ? dateTime.ToLocalTime() : dateTime; } diff --git a/test/Npgsql.Tests/Types/LegacyDateTimeTests.cs b/test/Npgsql.Tests/Types/LegacyDateTimeTests.cs index 5016525268..78460c27db 100644 --- a/test/Npgsql.Tests/Types/LegacyDateTimeTests.cs +++ b/test/Npgsql.Tests/Types/LegacyDateTimeTests.cs @@ -21,6 +21,14 @@ public Task Timestamp_with_all_DateTime_kinds([Values] DateTimeKind kind) NpgsqlDbType.Timestamp, DbType.DateTime); + [Test] + public async Task Timestamp_read_as_Unspecified_DateTime() + { + await using var command = DataSource.CreateCommand("SELECT '2020-03-01T10:30:00'::timestamp"); + var dateTime = (DateTime)(await command.ExecuteScalarAsync())!; + Assert.That(dateTime.Kind, Is.EqualTo(DateTimeKind.Unspecified)); + } + [Test] [TestCase(DateTimeKind.Utc, TestName = "Timestamptz_write_utc_DateTime_does_not_convert")] [TestCase(DateTimeKind.Unspecified, TestName = "Timestamptz_write_unspecified_DateTime_does_not_convert")] From 5ba15925fc7379b983632d50eb749f1f69e16a2c Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Tue, 28 Nov 2023 23:39:27 +0100 Subject: [PATCH 334/761] Respect max name length (#5463) Fixes #5460 --- src/Npgsql/Internal/Postgres/DataTypeName.cs | 7 +++--- test/Npgsql.Tests/DataTypeNameTests.cs | 26 ++++++++++++++++++++ 2 files changed, 30 insertions(+), 3 deletions(-) create mode 100644 test/Npgsql.Tests/DataTypeNameTests.cs diff --git a/src/Npgsql/Internal/Postgres/DataTypeName.cs b/src/Npgsql/Internal/Postgres/DataTypeName.cs index 96e7da067f..c95ea7684b 100644 --- a/src/Npgsql/Internal/Postgres/DataTypeName.cs +++ b/src/Npgsql/Internal/Postgres/DataTypeName.cs @@ -16,7 +16,7 @@ namespace Npgsql.Internal.Postgres; /// We need to respect this to get to valid names when deriving them (for multirange/arrays etc). /// This does not include the namespace. /// - const int NAMEDATALEN = 64 - 1; // Minus null terminator. + internal const int NAMEDATALEN = 64 - 1; // Minus null terminator. readonly string _value; @@ -32,10 +32,11 @@ namespace Npgsql.Internal.Postgres; if (fullyQualifiedDataTypeName.AsSpan(schemaEndIndex).EndsWith("[]".AsSpan())) fullyQualifiedDataTypeName = NormalizeName(fullyQualifiedDataTypeName); - var typeNameLength = fullyQualifiedDataTypeName.Length - schemaEndIndex + 1; + var typeNameLength = fullyQualifiedDataTypeName.Length - (schemaEndIndex + 1); if (typeNameLength > NAMEDATALEN) throw new ArgumentException( - $"Name is too long and would be truncated to: {fullyQualifiedDataTypeName.Substring(0, fullyQualifiedDataTypeName.Length - typeNameLength + NAMEDATALEN)}"); + $"Name is too long and would be truncated to: {fullyQualifiedDataTypeName.Substring(0, + fullyQualifiedDataTypeName.Length - typeNameLength + NAMEDATALEN)}"); } _value = fullyQualifiedDataTypeName; diff --git a/test/Npgsql.Tests/DataTypeNameTests.cs b/test/Npgsql.Tests/DataTypeNameTests.cs new file mode 100644 index 0000000000..fd366d8258 --- /dev/null +++ b/test/Npgsql.Tests/DataTypeNameTests.cs @@ -0,0 +1,26 @@ +using System; +using Npgsql.Internal.Postgres; +using NUnit.Framework; + +namespace Npgsql.Tests; + +public class DataTypeNameTests +{ + [Test] + public void MaxLengthDataTypeName() + { + var name = new string('a', DataTypeName.NAMEDATALEN); + var fullyQualifiedDataTypeName= $"public.{name}"; + Assert.DoesNotThrow(() => new DataTypeName(fullyQualifiedDataTypeName)); + Assert.AreEqual(new DataTypeName(fullyQualifiedDataTypeName).Value, fullyQualifiedDataTypeName); + } + + [Test] + public void TooLongDataTypeName() + { + var name = new string('a', DataTypeName.NAMEDATALEN + 1); + var fullyQualifiedDataTypeName= $"public.{name}"; + var exception = Assert.Throws(() => new DataTypeName(fullyQualifiedDataTypeName)); + Assert.That(exception!.Message, Does.EndWith($": public.{new string('a', DataTypeName.NAMEDATALEN)}")); + } +} From 6cfd3992de3d9ad64a3da57cefced3d1a88fc48a Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Wed, 29 Nov 2023 00:08:16 +0100 Subject: [PATCH 335/761] Fix sequential buffered seek (#5440) Fixes #5439 --- src/Npgsql/NpgsqlDataReader.cs | 45 +++++++++++++++++++++----------- test/Npgsql.Tests/ReaderTests.cs | 16 ++++++++++++ 2 files changed, 46 insertions(+), 15 deletions(-) diff --git a/src/Npgsql/NpgsqlDataReader.cs b/src/Npgsql/NpgsqlDataReader.cs index 3f2b7fcb80..8d7beb4128 100644 --- a/src/Npgsql/NpgsqlDataReader.cs +++ b/src/Npgsql/NpgsqlDataReader.cs @@ -1921,10 +1921,13 @@ int SeekToColumnNonSequential(int ordinal, DataFormat dataFormat, bool resumable Debug.Assert(ordinal != currentColumn); if (ordinal > currentColumn) { - for (; currentColumn < ordinal - 1; currentColumn++) + // Written as a while to be able to increment _column directly after reading into it. + while (_column < ordinal - 1) { columnLength = buffer.ReadInt32(); - if (columnLength is not -1) + _column++; + Debug.Assert(columnLength >= -1); + if (columnLength > 0) buffer.Skip(columnLength); } columnLength = buffer.ReadInt32(); @@ -1962,7 +1965,7 @@ int SeekBackwards() for (var lastColumnRead = _columns.Count; ordinal >= lastColumnRead; lastColumnRead++) { (Buffer.ReadPosition, var lastLen) = _columns[lastColumnRead - 1]; - if (lastLen is not -1) + if (lastLen > 0) buffer.Skip(lastLen); var len = Buffer.ReadInt32(); _columns.Add((Buffer.ReadPosition, len)); @@ -2026,17 +2029,21 @@ async ValueTask Core(bool async, bool commit, int ordinal, DataFormat dataF } // Seek to the requested column + int columnLength; var buffer = Buffer; - for (; _column < ordinal - 1; _column++) + // Written as a while to be able to increment _column directly after reading into it. + while (_column < ordinal - 1) { await buffer.Ensure(4, async).ConfigureAwait(false); - var len = buffer.ReadInt32(); - if (len != -1) - await buffer.Skip(len, async).ConfigureAwait(false); + columnLength = buffer.ReadInt32(); + _column++; + Debug.Assert(columnLength >= -1); + if (columnLength > 0) + await buffer.Skip(columnLength, async).ConfigureAwait(false); } await buffer.Ensure(4, async).ConfigureAwait(false); - var columnLength = buffer.ReadInt32(); + columnLength = buffer.ReadInt32(); _column = ordinal; PgReader.Init(columnLength, dataFormat, resumableOp); @@ -2054,11 +2061,14 @@ bool TrySeekBuffered(int ordinal, out int columnLength) // Skip over unwanted fields columnLength = -1; var buffer = Buffer; - for (; _column < ordinal - 1; _column++) + // Written as a while to be able to increment _column directly after reading into it. + while (_column < ordinal - 1) { if (buffer.ReadBytesLeft < 4) return false; columnLength = buffer.ReadInt32(); + _column++; + Debug.Assert(columnLength >= -1); if (columnLength > 0) { if (buffer.ReadBytesLeft < columnLength) @@ -2085,7 +2095,7 @@ bool TrySeekBuffered(int ordinal, out int columnLength) Task ConsumeRow(bool async) { - Debug.Assert(State == ReaderState.InResult || State == ReaderState.BeforeResult); + Debug.Assert(State is ReaderState.InResult or ReaderState.BeforeResult); if (!_canConsumeRowNonSequentially) return ConsumeRowSequential(async); @@ -2100,13 +2110,18 @@ async Task ConsumeRowSequential(bool async) await PgReader.CommitAsync(resuming: false).ConfigureAwait(false); else PgReader.Commit(resuming: false); + // Skip over the remaining columns in the row - for (; _column < ColumnCount - 1; _column++) + var buffer = Buffer; + // Written as a while to be able to increment _column directly after reading into it. + while (_column < ColumnCount - 1) { - await Buffer.Ensure(4, async).ConfigureAwait(false); - var len = Buffer.ReadInt32(); - if (len != -1) - await Buffer.Skip(len, async).ConfigureAwait(false); + await buffer.Ensure(4, async).ConfigureAwait(false); + var columnLength = buffer.ReadInt32(); + _column++; + Debug.Assert(columnLength >= -1); + if (columnLength > 0) + await buffer.Skip(columnLength, async).ConfigureAwait(false); } } } diff --git a/test/Npgsql.Tests/ReaderTests.cs b/test/Npgsql.Tests/ReaderTests.cs index 0ef71bb1a8..aad79bc2bf 100644 --- a/test/Npgsql.Tests/ReaderTests.cs +++ b/test/Npgsql.Tests/ReaderTests.cs @@ -894,6 +894,22 @@ public async Task Interval_as_TimeSpan() var ts = dr.GetTimeSpan(0); } + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/5439")] + public async Task SequentialBufferedSeek() + { + await using var conn = await OpenConnectionAsync(); + using var cmd = conn.CreateCommand(); + cmd.CommandText = """select v.i, jsonb_build_object(), current_timestamp + make_interval(0, 0, 0, 0, 0, 0, v.i), null::jsonb, '{"value": 42}'::jsonb from generate_series(1, 1000) as v(i)"""; + var rdr = await cmd.ExecuteReaderAsync(CommandBehavior.SequentialAccess); + while (await rdr.ReadAsync()) { + var v1 = rdr[0]; + var v2 = rdr[1]; + //_ = rdr[2]; // uncomment line for successful execution + var v3 = rdr[3]; + var v4 = rdr[4]; + } + } + [Test] public async Task Close_connection_in_middle_of_row() { From 42ea28fda69dc7d54cff1df679942d8c1f49ef0e Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Wed, 29 Nov 2023 00:32:07 +0100 Subject: [PATCH 336/761] Fix EndRead throwing exceptions when a stream is active (#5454) Fixes #5450 --- src/Npgsql/Internal/PgReader.cs | 14 ++++++++------ test/Npgsql.Tests/ReaderTests.cs | 29 +++++++++++++++++++++++++++++ 2 files changed, 37 insertions(+), 6 deletions(-) diff --git a/src/Npgsql/Internal/PgReader.cs b/src/Npgsql/Internal/PgReader.cs index 18a5544677..1d4e4221e1 100644 --- a/src/Npgsql/Internal/PgReader.cs +++ b/src/Npgsql/Internal/PgReader.cs @@ -363,7 +363,7 @@ public void Rewind(int count) /// The stream length, if any async ValueTask DisposeUserActiveStream(bool async) { - if (_userActiveStream is { IsDisposed: false }) + if (StreamActive) { if (async) await _userActiveStream.DisposeAsync().ConfigureAwait(false); @@ -486,7 +486,7 @@ internal void EndRead() return; } - if (FieldOffset != FieldSize) + if (FieldOffset != FieldSize && !StreamActive) ThrowNotConsumedExactly(); _fieldConsumed = true; @@ -501,7 +501,7 @@ internal ValueTask EndReadAsync() if (_fieldBufferRequirement is { Kind: SizeKind.UpperBound }) return ConsumeAsync(FieldRemaining); - if (FieldOffset != FieldSize) + if (FieldOffset != FieldSize && !StreamActive) ThrowNotConsumedExactly(); _fieldConsumed = true; @@ -560,9 +560,11 @@ internal async ValueTask Consume(bool async, int? count = null, CancellationToke public void Consume(int? count = null) => Consume(async: false, count).GetAwaiter().GetResult(); public ValueTask ConsumeAsync(int? count = null, CancellationToken cancellationToken = default) => Consume(async: true, count, cancellationToken); + [MemberNotNullWhen(true, nameof(_userActiveStream))] + bool StreamActive => _userActiveStream is { IsDisposed: false }; internal void ThrowIfStreamActive() { - if (_userActiveStream is { IsDisposed: false}) + if (StreamActive) ThrowHelper.ThrowInvalidOperationException("A stream is already open for this reader"); } @@ -612,7 +614,7 @@ void CommitSlow() // Shut down any streaming and pooling going on on the column. if (_requiresCleanup) { - if (_userActiveStream is { IsDisposed: false }) + if (StreamActive) DisposeUserActiveStream(async: false).GetAwaiter().GetResult(); if (_pooledArray is not null) @@ -693,7 +695,7 @@ async ValueTask CommitSlow() // Shut down any streaming and pooling going on on the column. if (_requiresCleanup) { - if (_userActiveStream is { IsDisposed: false }) + if (StreamActive) await DisposeUserActiveStream(async: true).ConfigureAwait(false); if (_pooledArray is not null) diff --git a/test/Npgsql.Tests/ReaderTests.cs b/test/Npgsql.Tests/ReaderTests.cs index aad79bc2bf..9cfc948d71 100644 --- a/test/Npgsql.Tests/ReaderTests.cs +++ b/test/Npgsql.Tests/ReaderTests.cs @@ -1757,6 +1757,35 @@ public async Task GetTextReader_in_middle_of_column_throws([Values] bool async) #endregion GetChars / GetTextReader + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/5450")] + public async Task EndRead_StreamActive([Values]bool async) + { + if (IsMultiplexing) + return; + + const int columnLength = 1; + + await using var conn = await OpenConnectionAsync(); + var buffer = conn.Connector!.ReadBuffer; + buffer.FilledBytes += columnLength; + var reader = buffer.PgReader; + reader.Init(columnLength, DataFormat.Binary, resumable: false); + if (async) + await reader.StartReadAsync(Size.Unknown, CancellationToken.None); + else + reader.StartRead(Size.Unknown); + + await using (var _ = reader.GetStream()) + { + if (async) + Assert.DoesNotThrowAsync(async () => await reader.EndReadAsync()); + else + Assert.DoesNotThrow(() => reader.EndRead()); + } + + reader.Commit(resuming: false); + } + [Test, Description("Tests that everything goes well when a type handler generates a NpgsqlSafeReadException")] public async Task SafeReadException() { From b21c41c58002d6320bb554b442387d6108124546 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Wed, 29 Nov 2023 00:32:53 +0100 Subject: [PATCH 337/761] Fix sync writing of hstore values (#5452) fixes #5445 --- src/Npgsql/Internal/Converters/HstoreConverter.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Npgsql/Internal/Converters/HstoreConverter.cs b/src/Npgsql/Internal/Converters/HstoreConverter.cs index 5f99fd128c..e2e8762d8e 100644 --- a/src/Npgsql/Internal/Converters/HstoreConverter.cs +++ b/src/Npgsql/Internal/Converters/HstoreConverter.cs @@ -147,7 +147,7 @@ async ValueTask Write(bool async, PgWriter writer, T value, CancellationToken ca if (async) await writer.WriteCharsAsync(kv.Value.AsMemory(), _encoding, cancellationToken).ConfigureAwait(false); else - writer.WriteChars(kv.Key.AsSpan(), _encoding); + writer.WriteChars(kv.Value.AsSpan(), _encoding); } i += 2; } From 7a288d88552923f847e3e69947c3ba89f0ad28be Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Sun, 3 Dec 2023 18:57:40 +0100 Subject: [PATCH 338/761] Use row buffered status in GetFieldValueAsync and IsDBNullAsync (#5470) --- src/Npgsql/NpgsqlDataReader.cs | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/src/Npgsql/NpgsqlDataReader.cs b/src/Npgsql/NpgsqlDataReader.cs index 8d7beb4128..c503a359e1 100644 --- a/src/Npgsql/NpgsqlDataReader.cs +++ b/src/Npgsql/NpgsqlDataReader.cs @@ -88,7 +88,7 @@ public sealed class NpgsqlDataReader : DbDataReader, IDbColumnSchemaGenerator /// Mostly useful for a sequential mode, when the row is already in the buffer. /// Should always be true for the non-sequential mode. /// - bool _canConsumeRowNonSequentially; + bool _isRowBuffered; /// /// The RowDescription message for the current resultset being processed @@ -194,7 +194,7 @@ public override Task ReadAsync(CancellationToken cancellationToken) } // We have a special case path for SingleRow. - if (_behavior.HasFlag(CommandBehavior.SingleRow) || !_canConsumeRowNonSequentially) + if (_behavior.HasFlag(CommandBehavior.SingleRow) || !_isRowBuffered) return null; ConsumeRowNonSequential(); @@ -810,7 +810,8 @@ internal void ProcessMessage(IBackendMessage msg) var msgRemainder = dataRow.Length - sizeof(short); _dataMsgEnd = readPosition + msgRemainder; _columnsStartPos = readPosition; - _canConsumeRowNonSequentially = msgRemainder <= Buffer.FilledBytes - readPosition; + _isRowBuffered = msgRemainder <= Buffer.FilledBytes - readPosition; + Debug.Assert(_isRowBuffered || _isSequential); _column = -1; if (_columns.Count > 0) @@ -1537,8 +1538,8 @@ public Task GetTextReaderAsync(int ordinal, CancellationToken cancel /// public override Task GetFieldValueAsync(int ordinal, CancellationToken cancellationToken) { - // In non-sequential, we know that the column is already buffered - no I/O will take place - if (!_isSequential) + // As the row is buffered we know the column is too - no I/O will take place + if (_isRowBuffered) return Task.FromResult(GetFieldValueCore(ordinal)); // The only statically mapped converter, it always exists. @@ -1693,7 +1694,7 @@ public override bool IsDBNull(int ordinal) /// true if the specified column value is equivalent to otherwise false. public override Task IsDBNullAsync(int ordinal, CancellationToken cancellationToken) { - if (!_isSequential) + if (_isRowBuffered) return IsDBNull(ordinal) ? TrueTask : FalseTask; return Core(ordinal, cancellationToken); @@ -2097,7 +2098,7 @@ Task ConsumeRow(bool async) { Debug.Assert(State is ReaderState.InResult or ReaderState.BeforeResult); - if (!_canConsumeRowNonSequentially) + if (!_isRowBuffered) return ConsumeRowSequential(async); // We get here, if we're in a non-sequential mode (or the row is already in the buffer) From cdf984108767bffb06a6479a0334d81d0450d8dd Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Sun, 3 Dec 2023 21:36:36 +0100 Subject: [PATCH 339/761] Redo binary exporter column reading (#5464) Fixes #5457 --- src/Npgsql/Internal/PgReader.cs | 8 +- src/Npgsql/NpgsqlBinaryExporter.cs | 153 +++++++++++++++-------------- test/Npgsql.Tests/CopyTests.cs | 67 +++++++++++++ 3 files changed, 148 insertions(+), 80 deletions(-) diff --git a/src/Npgsql/Internal/PgReader.cs b/src/Npgsql/Internal/PgReader.cs index 1d4e4221e1..524680a352 100644 --- a/src/Npgsql/Internal/PgReader.cs +++ b/src/Npgsql/Internal/PgReader.cs @@ -12,6 +12,9 @@ namespace Npgsql.Internal; public class PgReader { + // We don't want to add a ton of memory pressure for large strings. + internal const int MaxPreparedTextReaderSize = 1024 * 64; + readonly NpgsqlReadBuffer _buffer; bool _resumable; @@ -210,11 +213,8 @@ public ValueTask GetTextReaderAsync(Encoding encoding, CancellationT async ValueTask GetTextReader(bool async, Encoding encoding, CancellationToken cancellationToken) { - // We don't want to add a ton of memory pressure for large strings. - const int maxPreparedSize = 1024 * 64; - _requiresCleanup = true; - if (CurrentRemaining > _buffer.ReadBytesLeft || CurrentRemaining > maxPreparedSize) + if (CurrentRemaining > _buffer.ReadBytesLeft || CurrentRemaining > MaxPreparedTextReaderSize) return new StreamReader(GetColumnStream(), encoding, detectEncodingFromByteOrderMarks: false); if (_preparedTextReader is { IsDisposed: false }) diff --git a/src/Npgsql/NpgsqlBinaryExporter.cs b/src/Npgsql/NpgsqlBinaryExporter.cs index 248198418e..d910e70a62 100644 --- a/src/Npgsql/NpgsqlBinaryExporter.cs +++ b/src/Npgsql/NpgsqlBinaryExporter.cs @@ -35,7 +35,7 @@ public sealed class NpgsqlBinaryExporter : ICancelable /// /// The number of columns, as returned from the backend in the CopyInResponse. /// - internal int NumColumns { get; private set; } + int NumColumns { get; set; } PgConverterInfo[] _columnInfoCache; @@ -140,8 +140,7 @@ async Task ReadHeader(bool async) async ValueTask StartRow(bool async, CancellationToken cancellationToken = default) { - - CheckDisposed(); + ThrowIfDisposed(); if (_isConsumed) return -1; @@ -149,7 +148,10 @@ async ValueTask StartRow(bool async, CancellationToken cancellationToken = // Consume and advance any active column. if (_column >= 0) - await Commit(async, resumableOp: false).ConfigureAwait(false); + { + await Commit(async).ConfigureAwait(false); + _column++; + } // The very first row (i.e. _column == -1) is included in the header's CopyData message. // Otherwise we need to read in a new CopyData row (the docs specify that there's a CopyData @@ -210,29 +212,6 @@ public ValueTask ReadAsync(CancellationToken cancellationToken = default) ValueTask Read(bool async, CancellationToken cancellationToken = default) => Read(async, null, cancellationToken); - PgConverterInfo CreateConverterInfo(Type type, NpgsqlDbType? npgsqlDbType = null) - { - var options = _connector.SerializerOptions; - PgTypeId? pgTypeId = null; - if (npgsqlDbType.HasValue) - { - pgTypeId = npgsqlDbType.Value.ToDataTypeName() is { } name - ? options.GetCanonicalTypeId(name) - // Handle plugin types via lookup. - : GetRepresentationalOrDefault(npgsqlDbType.Value.ToUnqualifiedDataTypeNameOrThrow()); - } - var info = options.GetTypeInfo(type, pgTypeId) - ?? throw new NotSupportedException($"Reading is not supported for type '{type}'{(npgsqlDbType is null ? "" : $" and NpgsqlDbType '{npgsqlDbType}'")}"); - // Binary export has no type info so we only do caller-directed interpretation of data. - return info.Bind(new Field("?", info.PgTypeId!.Value, -1), DataFormat.Binary); - - PgTypeId GetRepresentationalOrDefault(string dataTypeName) - { - var type = options.DatabaseInfo.GetPostgresType(dataTypeName); - return options.ToCanonicalTypeId(type.GetRepresentationalType()); - } - } - /// /// Reads the current column, returns its value according to and /// moves ahead to the next column. @@ -269,39 +248,22 @@ public ValueTask ReadAsync(NpgsqlDbType type, CancellationToken cancellati async ValueTask Read(bool async, NpgsqlDbType? type, CancellationToken cancellationToken) { - CheckDisposed(); - if (_column is BeforeRow) - ThrowHelper.ThrowInvalidOperationException("Not reading a row"); + ThrowIfNotOnRow(); using var registration = _connector.StartNestedCancellableOperation(cancellationToken, attemptPgCancellation: false); - // Allow one more read if the field is a db null. - // We cannot allow endless rereads otherwise it becomes quite unclear when a column advance happens. - if (PgReader is { Initialized: true, Resumable: true, FieldSize: -1 }) - { - await Commit(async, resumableOp: false).ConfigureAwait(false); - return DbNullOrThrow(); - } + if (!IsInitializedAndAtStart) + await MoveNextColumn(async, resumableOp: false).ConfigureAwait(false); - // We must commit the current column before reading the next one unless it was an IsNull call. - PgConverterInfo info; - bool asObject; - if (!PgReader.Initialized || !PgReader.Resumable || PgReader.CurrentRemaining != PgReader.FieldSize) + if (PgReader.FieldSize is (-1 or 0) and var fieldSize) { - await Commit(async, resumableOp: false).ConfigureAwait(false); - info = GetInfo(type, out asObject); - - // We need to get info after potential I/O as we don't know beforehand at what column we're at. - var columnLen = await ReadColumnLenIfNeeded(async, resumableOp: false).ConfigureAwait(false); - if (_column == NumColumns) - ThrowHelper.ThrowInvalidOperationException("No more columns left in the current row"); - - if (columnLen is -1) + // Commit, otherwise we'll have no way of knowing this column is finished. + await Commit(async).ConfigureAwait(false); + if (fieldSize is -1) return DbNullOrThrow(); - } - else - info = GetInfo(type, out asObject); + + var info = GetInfo(type, out var asObject); T result; if (async) @@ -323,6 +285,14 @@ async ValueTask Read(bool async, NpgsqlDbType? type, CancellationToken can return result; + static T DbNullOrThrow() + { + // When T is a Nullable, we support returning null + if (default(T) is null && typeof(T).IsValueType) + return default!; + throw new InvalidCastException("Column is null"); + } + PgConverterInfo GetInfo(NpgsqlDbType? type, out bool asObject) { ref var cachedInfo = ref _columnInfoCache[_column]; @@ -331,12 +301,27 @@ PgConverterInfo GetInfo(NpgsqlDbType? type, out bool asObject) return converterInfo; } - T DbNullOrThrow() + PgConverterInfo CreateConverterInfo(Type type, NpgsqlDbType? npgsqlDbType = null) { - // When T is a Nullable, we support returning null - if (default(T) is null && typeof(T).IsValueType) - return default!; - throw new InvalidCastException("Column is null"); + var options = _connector.SerializerOptions; + PgTypeId? pgTypeId = null; + if (npgsqlDbType.HasValue) + { + pgTypeId = npgsqlDbType.Value.ToDataTypeName() is { } name + ? options.GetCanonicalTypeId(name) + // Handle plugin types via lookup. + : GetRepresentationalOrDefault(npgsqlDbType.Value.ToUnqualifiedDataTypeNameOrThrow()); + } + var info = options.GetTypeInfo(type, pgTypeId) + ?? throw new NotSupportedException($"Reading is not supported for type '{type}'{(npgsqlDbType is null ? "" : $" and NpgsqlDbType '{npgsqlDbType}'")}"); + // Binary export has no type info so we only do caller-directed interpretation of data. + return info.Bind(new Field("?", info.PgTypeId!.Value, -1), DataFormat.Binary); + + PgTypeId GetRepresentationalOrDefault(string dataTypeName) + { + var type = options.DatabaseInfo.GetPostgresType(dataTypeName); + return options.ToCanonicalTypeId(type.GetRepresentationalType()); + } } } @@ -347,8 +332,11 @@ public bool IsNull { get { - Commit(async: false, resumableOp: true); - return ReadColumnLenIfNeeded(async: false, resumableOp: true).GetAwaiter().GetResult() is -1; + ThrowIfNotOnRow(); + if (!IsInitializedAndAtStart) + return MoveNextColumn(async: false, resumableOp: true).GetAwaiter().GetResult() is -1; + + return PgReader.FieldSize is - 1; } } @@ -365,46 +353,59 @@ public Task SkipAsync(CancellationToken cancellationToken = default) async Task Skip(bool async, CancellationToken cancellationToken = default) { - CheckDisposed(); + ThrowIfNotOnRow(); using var registration = _connector.StartNestedCancellableOperation(cancellationToken); - // We allow IsNull to have been called before skip. - if (PgReader.Initialized && PgReader is not { Resumable: true, FieldSize: -1 }) - await Commit(async, resumableOp: false).ConfigureAwait(false); - await ReadColumnLenIfNeeded(async, resumableOp: false).ConfigureAwait(false); + if (!IsInitializedAndAtStart) + await MoveNextColumn(async, resumableOp: false).ConfigureAwait(false); + await PgReader.Consume(async, cancellationToken: cancellationToken).ConfigureAwait(false); + + // Commit, otherwise we'll have no way of knowing this column is finished. + if (PgReader.FieldSize is -1 or 0) + await Commit(async).ConfigureAwait(false); } #endregion #region Utilities - ValueTask Commit(bool async, bool resumableOp) - { - var resuming = PgReader is { Initialized: true, Resumable: true } && resumableOp; - if (!resuming) - _column++; + bool IsInitializedAndAtStart => PgReader.Initialized && (PgReader.FieldSize is -1 || PgReader.FieldOffset is 0); + ValueTask Commit(bool async) + { if (async) - return PgReader.CommitAsync(resuming); + return PgReader.CommitAsync(resuming: false); - PgReader.Commit(resuming); + PgReader.Commit(resuming: false); return new(); } - async ValueTask ReadColumnLenIfNeeded(bool async, bool resumableOp) + async ValueTask MoveNextColumn(bool async, bool resumableOp) { - if (PgReader is { Initialized: true, Resumable: true, FieldSize: -1 }) - return -1; + if (async) + await PgReader.CommitAsync(resuming: false).ConfigureAwait(false); + else + PgReader.Commit(resuming: false); + if (_column + 1 == NumColumns) + ThrowHelper.ThrowInvalidOperationException("No more columns left in the current row"); + _column++; await _buf.Ensure(4, async).ConfigureAwait(false); var columnLen = _buf.ReadInt32(); PgReader.Init(columnLen, DataFormat.Binary, resumableOp); return PgReader.FieldSize; } - void CheckDisposed() + void ThrowIfNotOnRow() + { + ThrowIfDisposed(); + if (_column is BeforeRow) + ThrowHelper.ThrowInvalidOperationException("Not reading a row"); + } + + void ThrowIfDisposed() { if (_isDisposed) ThrowHelper.ThrowObjectDisposedException(nameof(NpgsqlBinaryExporter), "The COPY operation has already ended."); diff --git a/test/Npgsql.Tests/CopyTests.cs b/test/Npgsql.Tests/CopyTests.cs index 64406b6b96..f7a18a2884 100644 --- a/test/Npgsql.Tests/CopyTests.cs +++ b/test/Npgsql.Tests/CopyTests.cs @@ -510,6 +510,73 @@ public async Task Wrong_table_definition_binary_export() Assert.That(await conn.ExecuteScalarAsync("SELECT 1"), Is.EqualTo(1)); } + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/5457")] + public async Task MixedOperations() + { + if (IsMultiplexing) + Assert.Ignore("Multiplexing: fails"); + using var conn = await OpenConnectionAsync(); + + var reader = conn.BeginBinaryExport(""" + COPY (values ('foo', 1), ('bar', null), (null, 2)) TO STDOUT BINARY + """); + while(reader.StartRow() != -1) + { + string? col1 = null; + if (reader.IsNull) + reader.Skip(); + else + col1 = reader.Read(); + int? col2 = null; + if (reader.IsNull) + reader.Skip(); + else + col2 = reader.Read(); + } + } + + [Test] + public async Task ReadMoreColumnsThanExist() + { + if (IsMultiplexing) + Assert.Ignore("Multiplexing: fails"); + using var conn = await OpenConnectionAsync(); + + var reader = conn.BeginBinaryExport(""" + COPY (values ('foo', 1), ('bar', null), (null, 2)) TO STDOUT BINARY + """); + while(reader.StartRow() != -1) + { + string? col1 = null; + if (reader.IsNull) + reader.Skip(); + else + col1 = reader.Read(); + int? col2 = null; + if (reader.IsNull) + reader.Skip(); + else + col2 = reader.Read(); + + Assert.Throws(() => _ = reader.IsNull); + } + } + + [Test] + public async Task StreamingRead() + { + if (IsMultiplexing) + Assert.Ignore("Multiplexing: fails"); + using var conn = await OpenConnectionAsync(); + + var str = new string('a', PgReader.MaxPreparedTextReaderSize + 1); + var reader = conn.BeginBinaryExport($"""COPY (values ('{str}')) TO STDOUT BINARY"""); + while (reader.StartRow() != -1) + { + using var _ = reader.Read(NpgsqlDbType.Text); + } + } + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/2330")] public async Task Wrong_format_binary_export() { From 074703eb3596eb2451f7858bf9ea04b6f3e2e720 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 4 Dec 2023 22:40:40 +0100 Subject: [PATCH 340/761] Bump actions/setup-dotnet from 3.2.0 to 4.0.0 (#5474) --- .github/workflows/build.yml | 6 +++--- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/native-aot.yml | 2 +- .github/workflows/rich-code-nav.yml | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 1136792994..deb7d29a96 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -65,7 +65,7 @@ jobs: ${{ runner.os }}-nuget- - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v3.2.0 + uses: actions/setup-dotnet@v4.0.0 with: dotnet-version: | ${{ env.dotnet_sdk_version }} @@ -345,7 +345,7 @@ jobs: ${{ runner.os }}-nuget- - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v3.2.0 + uses: actions/setup-dotnet@v4.0.0 with: dotnet-version: ${{ env.dotnet_sdk_version }} @@ -379,7 +379,7 @@ jobs: uses: actions/checkout@v4 - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v3.2.0 + uses: actions/setup-dotnet@v4.0.0 with: dotnet-version: ${{ env.dotnet_sdk_version }} diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 0e721ebc22..f7f5a7ffdc 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -66,7 +66,7 @@ jobs: # queries: ./path/to/local/query, your-org/your-repo/queries@main - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v3.2.0 + uses: actions/setup-dotnet@v4.0.0 with: dotnet-version: ${{ env.dotnet_sdk_version }} diff --git a/.github/workflows/native-aot.yml b/.github/workflows/native-aot.yml index 6ff04ffa5f..b4c1385506 100644 --- a/.github/workflows/native-aot.yml +++ b/.github/workflows/native-aot.yml @@ -64,7 +64,7 @@ jobs: ${{ runner.os }}-nuget- - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v3.2.0 + uses: actions/setup-dotnet@v4.0.0 with: dotnet-version: | ${{ env.dotnet_sdk_version }} diff --git a/.github/workflows/rich-code-nav.yml b/.github/workflows/rich-code-nav.yml index 1990c8ff78..ddda4e816d 100644 --- a/.github/workflows/rich-code-nav.yml +++ b/.github/workflows/rich-code-nav.yml @@ -29,7 +29,7 @@ jobs: ${{ runner.os }}-nuget- - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v3.2.0 + uses: actions/setup-dotnet@v4.0.0 with: dotnet-version: ${{ env.dotnet_sdk_version }} From dbac4593b799a7aae30a1121034dfdc0e34006f7 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Thu, 7 Dec 2023 16:55:50 +0100 Subject: [PATCH 341/761] Pick the task constructor by using the right ValueTask type (#5487) Fixes #5484 --- src/Npgsql/Internal/PgStreamingConverter.cs | 4 +++- test/Npgsql.Tests/ReaderTests.cs | 14 ++++++++++++++ 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/src/Npgsql/Internal/PgStreamingConverter.cs b/src/Npgsql/Internal/PgStreamingConverter.cs index 7a9a6ce72d..ff9c6b5eb2 100644 --- a/src/Npgsql/Internal/PgStreamingConverter.cs +++ b/src/Npgsql/Internal/PgStreamingConverter.cs @@ -43,7 +43,9 @@ internal sealed override unsafe ValueTask ReadAsObject( static object BoxResult(Task task) { Debug.Assert(task is Task); - return new ValueTask(Unsafe.As>(task)).Result; + // We're using ValueTask.Result here to avoid rooting any TaskAwaiter or ValueTaskAwaiter types. + // On ValueTask calling .Result is equivalent to GetAwaiter().GetResult() w.r.t. exception wrapping. + return new ValueTask(task: Unsafe.As>(task)).Result!; } } diff --git a/test/Npgsql.Tests/ReaderTests.cs b/test/Npgsql.Tests/ReaderTests.cs index 9cfc948d71..661d94de2c 100644 --- a/test/Npgsql.Tests/ReaderTests.cs +++ b/test/Npgsql.Tests/ReaderTests.cs @@ -910,6 +910,20 @@ public async Task SequentialBufferedSeek() } } + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/5484")] + public async Task GetFieldValueAsync_AsyncRead() + { + await using var conn = await OpenConnectionAsync(); + using var cmd = conn.CreateCommand(); + var expected = new string('a', conn.Settings.ReadBufferSize + 1); + cmd.CommandText = $"""select repeat('a', {conn.Settings.ReadBufferSize+1}) from generate_series(1, 1000)"""; + var reader = await cmd.ExecuteReaderAsync(CommandBehavior.SequentialAccess); + while (await reader.ReadAsync()) + { + Assert.AreEqual(expected, await reader.GetFieldValueAsync(0)); + } + } + [Test] public async Task Close_connection_in_middle_of_row() { From 2933e92bc62698d501e3be406c8e40851e1c8dac Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 8 Dec 2023 22:38:05 +0100 Subject: [PATCH 342/761] Bump BenchmarkDotNet from 0.13.10 to 0.13.11 (#5490) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index c9f6dd6409..a94e3018a6 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -42,7 +42,7 @@ - + From 3fe4d6ee874c28ee4d6c5a1b66d1493b98a19488 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Sun, 10 Dec 2023 20:05:09 +0100 Subject: [PATCH 343/761] Use PG mock for testing async field read (#5488) --- test/Npgsql.Tests/ReaderTests.cs | 31 +++++++++++++++++++++++++------ 1 file changed, 25 insertions(+), 6 deletions(-) diff --git a/test/Npgsql.Tests/ReaderTests.cs b/test/Npgsql.Tests/ReaderTests.cs index 661d94de2c..7b871c8ab0 100644 --- a/test/Npgsql.Tests/ReaderTests.cs +++ b/test/Npgsql.Tests/ReaderTests.cs @@ -913,14 +913,33 @@ public async Task SequentialBufferedSeek() [Test, IssueLink("https://github.com/npgsql/npgsql/issues/5484")] public async Task GetFieldValueAsync_AsyncRead() { - await using var conn = await OpenConnectionAsync(); - using var cmd = conn.CreateCommand(); - var expected = new string('a', conn.Settings.ReadBufferSize + 1); - cmd.CommandText = $"""select repeat('a', {conn.Settings.ReadBufferSize+1}) from generate_series(1, 1000)"""; - var reader = await cmd.ExecuteReaderAsync(CommandBehavior.SequentialAccess); + if (!IsSequential) + return; + + await using var postmasterMock = PgPostmasterMock.Start(ConnectionString); + await using var dataSource = CreateDataSource(postmasterMock.ConnectionString); + await using var conn = await dataSource.OpenConnectionAsync(); + + var expected = new byte[10000]; + expected.AsSpan().Fill(1); + + var pgMock = await postmasterMock.WaitForServerConnection(); + await pgMock + .WriteParseComplete() + .WriteBindComplete() + .WriteRowDescription(new FieldDescription(ByteaOid)) + .WriteDataRowWithFlush(expected); + + using var cmd = new NpgsqlCommand("irrelevant", conn); + var reader = await cmd.ExecuteReaderAsync(Behavior); while (await reader.ReadAsync()) { - Assert.AreEqual(expected, await reader.GetFieldValueAsync(0)); + var task = reader.GetFieldValueAsync(0); + await pgMock + .WriteCommandComplete() + .WriteReadyForQuery() + .FlushAsync(); + Assert.AreEqual(expected, await task); } } From b509f5376440e40cb70101ff3fe16ce207164fbc Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Tue, 12 Dec 2023 13:37:09 +0200 Subject: [PATCH 344/761] Fix reading infinity as DateTimeOffset in legacy mode (#5496) Fixes #5492 --- .../Temporal/LegacyDateTimeConverter.cs | 19 +++++++++++++------ .../Npgsql.Tests/Types/LegacyDateTimeTests.cs | 17 +++++++++++++++++ 2 files changed, 30 insertions(+), 6 deletions(-) diff --git a/src/Npgsql/Internal/Converters/Temporal/LegacyDateTimeConverter.cs b/src/Npgsql/Internal/Converters/Temporal/LegacyDateTimeConverter.cs index a6d53a54a2..99ad4ed599 100644 --- a/src/Npgsql/Internal/Converters/Temporal/LegacyDateTimeConverter.cs +++ b/src/Npgsql/Internal/Converters/Temporal/LegacyDateTimeConverter.cs @@ -27,9 +27,9 @@ protected override DateTime ReadCore(PgReader reader) } var dateTime = PgTimestamp.Decode(reader.ReadInt64(), DateTimeKind.Utc, _dateTimeInfinityConversions); - return !_dateTimeInfinityConversions || dateTime != DateTime.MaxValue && dateTime != DateTime.MinValue - ? dateTime.ToLocalTime() - : dateTime; + return (dateTime == DateTime.MinValue || dateTime == DateTime.MaxValue) && _dateTimeInfinityConversions + ? dateTime + : dateTime.ToLocalTime(); } protected override void WriteCore(PgWriter writer, DateTime value) @@ -57,9 +57,16 @@ public override bool CanConvert(DataFormat format, out BufferRequirements buffer protected override DateTimeOffset ReadCore(PgReader reader) { var dateTime = PgTimestamp.Decode(reader.ReadInt64(), DateTimeKind.Utc, _dateTimeInfinityConversions); - return !_dateTimeInfinityConversions || dateTime != DateTime.MaxValue && dateTime != DateTime.MinValue - ? dateTime.ToLocalTime() - : dateTime; + + if (_dateTimeInfinityConversions) + { + if (dateTime == DateTime.MinValue) + return DateTimeOffset.MinValue; + if (dateTime == DateTime.MaxValue) + return DateTimeOffset.MaxValue; + } + + return dateTime.ToLocalTime(); } protected override void WriteCore(PgWriter writer, DateTimeOffset value) diff --git a/test/Npgsql.Tests/Types/LegacyDateTimeTests.cs b/test/Npgsql.Tests/Types/LegacyDateTimeTests.cs index 78460c27db..c500324986 100644 --- a/test/Npgsql.Tests/Types/LegacyDateTimeTests.cs +++ b/test/Npgsql.Tests/Types/LegacyDateTimeTests.cs @@ -29,6 +29,23 @@ public async Task Timestamp_read_as_Unspecified_DateTime() Assert.That(dateTime.Kind, Is.EqualTo(DateTimeKind.Unspecified)); } + [Test] + public async Task Timestamptz_negative_infinity() + { + var dto = await AssertType(DateTimeOffset.MinValue, "-infinity", "timestamp with time zone", NpgsqlDbType.TimestampTz, + DbType.DateTimeOffset, isDefaultForReading: false); + Assert.That(dto.Offset, Is.EqualTo(TimeSpan.Zero)); + } + + [Test] + public async Task Timestamptz_infinity() + { + var dto = await AssertType( + DateTimeOffset.MaxValue, "infinity", "timestamp with time zone", NpgsqlDbType.TimestampTz, DbType.DateTimeOffset, + isDefaultForReading: false); + Assert.That(dto.Offset, Is.EqualTo(TimeSpan.Zero)); + } + [Test] [TestCase(DateTimeKind.Utc, TestName = "Timestamptz_write_utc_DateTime_does_not_convert")] [TestCase(DateTimeKind.Unspecified, TestName = "Timestamptz_write_unspecified_DateTime_does_not_convert")] From 56e8344da93b03ec84ba24fae9ed8db69caa5e83 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 16 Dec 2023 10:16:17 +0200 Subject: [PATCH 345/761] Bump actions/upload-artifact from 3 to 4 (#5499) --- .github/workflows/build.yml | 4 ++-- .github/workflows/native-aot.yml | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index deb7d29a96..a57ccf366f 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -353,7 +353,7 @@ jobs: run: dotnet pack Npgsql.sln --configuration Release --property:PackageOutputPath="$PWD/nupkgs" --version-suffix "ci.$(date -u +%Y%m%dT%H%M%S)+sha.${GITHUB_SHA:0:9}" -p:ContinuousIntegrationBuild=true - name: Upload artifacts (nupkg) - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: Npgsql.CI path: nupkgs @@ -387,7 +387,7 @@ jobs: run: dotnet pack Npgsql.sln --configuration Release --property:PackageOutputPath="$PWD/nupkgs" -p:ContinuousIntegrationBuild=true - name: Upload artifacts - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: Npgsql.Release path: nupkgs diff --git a/.github/workflows/native-aot.yml b/.github/workflows/native-aot.yml index b4c1385506..77c75f343d 100644 --- a/.github/workflows/native-aot.yml +++ b/.github/workflows/native-aot.yml @@ -103,21 +103,21 @@ jobs: run: dotnet run --project test/MStatDumper/MStatDumper.csproj -c release -f net8.0 -- "test/Npgsql.NativeAotTests/obj/Release/net8.0/linux-x64/native/Npgsql.NativeAotTests.mstat" md >> $GITHUB_STEP_SUMMARY - name: Upload mstat - uses: actions/upload-artifact@v3.1.2 + uses: actions/upload-artifact@v4 with: name: npgsql.mstat path: "test/Npgsql.NativeAotTests/obj/Release/net8.0/linux-x64/native/Npgsql.NativeAotTests.mstat" retention-days: 3 - name: Upload codedgen dgml - uses: actions/upload-artifact@v3.1.2 + uses: actions/upload-artifact@v4 with: name: npgsql.codegen.dgml.xml path: "test/Npgsql.NativeAotTests/obj/Release/net8.0/linux-x64/native/Npgsql.NativeAotTests.codegen.dgml.xml" retention-days: 3 - name: Upload scan dgml - uses: actions/upload-artifact@v3.1.2 + uses: actions/upload-artifact@v4 with: name: npgsql.scan.dgml.xml path: "test/Npgsql.NativeAotTests/obj/Release/net8.0/linux-x64/native/Npgsql.NativeAotTests.scan.dgml.xml" From 5726af1c00de1f8ab48555966ba418c879ceec0f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 17 Dec 2023 00:10:13 +0200 Subject: [PATCH 346/761] Bump github/codeql-action from 2 to 3 (#5498) --- .github/workflows/codeql-analysis.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index f7f5a7ffdc..868ea2418b 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -57,7 +57,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@v2 + uses: github/codeql-action/init@v3 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -90,4 +90,4 @@ jobs: # make release - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v2 + uses: github/codeql-action/analyze@v3 From d0a858782d4004fdafe4c887661995afbce9b038 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 17 Dec 2023 00:12:33 +0200 Subject: [PATCH 347/761] Bump xunit from 2.6.2 to 2.6.3 (#5495) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index a94e3018a6..472bc18b2e 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -35,7 +35,7 @@ - + From 4e398678b4b5ffc52b7d81ba7e7412414bc27a65 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 26 Dec 2023 09:31:43 +0200 Subject: [PATCH 348/761] Bump NodaTime from 3.1.9 to 3.1.10 (#5506) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 472bc18b2e..36ddf0559e 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -17,7 +17,7 @@ - + From 77277187ad8099753433199dabbd9b2ef8a3d8a2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 26 Dec 2023 09:32:04 +0200 Subject: [PATCH 349/761] Bump xunit from 2.6.3 to 2.6.4 (#5505) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 36ddf0559e..d0cc3d90e3 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -35,7 +35,7 @@ - + From df0f1def70314021fac636b8f6f1d1df01f9bbbe Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 26 Dec 2023 09:32:59 +0200 Subject: [PATCH 350/761] Bump xunit.runner.visualstudio from 2.5.4 to 2.5.6 (#5504) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index d0cc3d90e3..01c03a730d 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -36,7 +36,7 @@ - + From 19a98bbb5eca5750744718979b883bcfaaa5f826 Mon Sep 17 00:00:00 2001 From: Noelle Caldwell Date: Wed, 3 Jan 2024 10:40:59 -0800 Subject: [PATCH 351/761] Turn of Code Index (rich-code-nav) (#5507) We've noticed you're indexing this repository regularly, but this service is not currently supported in GitHub scenarios; to save you build time, we recommend turning off this additional pipeline until the service is integrated into GitHub. --- .github/workflows/rich-code-nav.yml | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/.github/workflows/rich-code-nav.yml b/.github/workflows/rich-code-nav.yml index ddda4e816d..26fc0e7f8a 100644 --- a/.github/workflows/rich-code-nav.yml +++ b/.github/workflows/rich-code-nav.yml @@ -1,12 +1,7 @@ name: Rich Code Navigation on: - push: - branches: - - main - - stable - tags: - - '*' + workflow_dispatch: env: dotnet_sdk_version: '8.0.100' @@ -38,7 +33,7 @@ jobs: shell: bash - name: Rich Navigation Indexing - uses: microsoft/RichCodeNavIndexer@v0.1 + uses: microsoft/RichCodeNavIndexer@v0 with: languages: csharp repo-token: ${{ github.token }} From 0fde265e1f39afb5ce8335e2dd928455c84a955c Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Fri, 5 Jan 2024 14:50:37 +0300 Subject: [PATCH 352/761] Fix memory leak while asynchronously disposing NpgsqlDataSource (#5514) Fixes #5512 --- src/Npgsql/NpgsqlDataSource.cs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/Npgsql/NpgsqlDataSource.cs b/src/Npgsql/NpgsqlDataSource.cs index b26dd62b61..47f44c74f5 100644 --- a/src/Npgsql/NpgsqlDataSource.cs +++ b/src/Npgsql/NpgsqlDataSource.cs @@ -475,7 +475,7 @@ protected virtual void DisposeBase() _periodicPasswordProviderTimer?.Dispose(); _setupMappingsSemaphore.Dispose(); - MetricsReporter.Dispose(); // TODO: This is probably too early, dispose only when all connections have been closed? + MetricsReporter.Dispose(); Clear(); } @@ -503,6 +503,7 @@ protected virtual async ValueTask DisposeAsyncBase() await _periodicPasswordProviderTimer.DisposeAsync().ConfigureAwait(false); _setupMappingsSemaphore.Dispose(); + MetricsReporter.Dispose(); // TODO: async Clear, #4499 Clear(); From 2856631c6473fb4c861109f8a46910d9483a43f9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 6 Jan 2024 02:08:43 +0200 Subject: [PATCH 353/761] Bump xunit from 2.6.4 to 2.6.5 (#5515) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 01c03a730d..379a2db5d2 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -35,7 +35,7 @@ - + From 093ba7006302d75c86fae13f25c914395626efd2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 6 Jan 2024 02:09:05 +0200 Subject: [PATCH 354/761] Bump BenchmarkDotNet from 0.13.11 to 0.13.12 (#5516) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 379a2db5d2..84608f8ef5 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -42,7 +42,7 @@ - + From 0a5c7666aab8897bddb3231ceb160e0557991aa0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 10 Jan 2024 09:16:16 +0200 Subject: [PATCH 355/761] Bump Microsoft.Data.SqlClient from 5.1.2 to 5.1.4 (#5519) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 84608f8ef5..122f757ce0 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -43,7 +43,7 @@ - + From 9c3d3874908498f3a2cf5e45c89a3d5c24081945 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 15 Jan 2024 23:33:29 +0200 Subject: [PATCH 356/761] Bump xunit from 2.6.5 to 2.6.6 (#5527) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 122f757ce0..8176504e3d 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -35,7 +35,7 @@ - + From 062e82026f37f74af3c3fba3110a550780814f59 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Wed, 17 Jan 2024 20:38:59 +0100 Subject: [PATCH 357/761] StreamActive check should apply before any other EndRead checks (#5479) --- src/Npgsql/Internal/PgReader.cs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/Npgsql/Internal/PgReader.cs b/src/Npgsql/Internal/PgReader.cs index 524680a352..2de9086c9e 100644 --- a/src/Npgsql/Internal/PgReader.cs +++ b/src/Npgsql/Internal/PgReader.cs @@ -476,7 +476,7 @@ internal ValueTask StartReadAsync(Size bufferRequirement, CancellationToken canc internal void EndRead() { - if (_resumable) + if (_resumable || StreamActive) return; // If it was upper bound we should consume. @@ -486,7 +486,7 @@ internal void EndRead() return; } - if (FieldOffset != FieldSize && !StreamActive) + if (FieldOffset != FieldSize) ThrowNotConsumedExactly(); _fieldConsumed = true; @@ -494,14 +494,14 @@ internal void EndRead() internal ValueTask EndReadAsync() { - if (_resumable) + if (_resumable || StreamActive) return new(); // If it was upper bound we should consume. if (_fieldBufferRequirement is { Kind: SizeKind.UpperBound }) return ConsumeAsync(FieldRemaining); - if (FieldOffset != FieldSize && !StreamActive) + if (FieldOffset != FieldSize) ThrowNotConsumedExactly(); _fieldConsumed = true; From b10cc19d78ae00ff7fea5cffb6bcd92eac78e775 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Wed, 17 Jan 2024 20:39:23 +0100 Subject: [PATCH 358/761] Default to JToken instead of JObject (#5477) Fixes #5475 --- .../JsonNetTypeInfoResolverFactory.cs | 7 ++-- test/Npgsql.PluginTests/JsonNetTests.cs | 38 ++++++++++++++++--- 2 files changed, 36 insertions(+), 9 deletions(-) diff --git a/src/Npgsql.Json.NET/Internal/JsonNetTypeInfoResolverFactory.cs b/src/Npgsql.Json.NET/Internal/JsonNetTypeInfoResolverFactory.cs index 4a125c5ab8..1f07bf0252 100644 --- a/src/Npgsql.Json.NET/Internal/JsonNetTypeInfoResolverFactory.cs +++ b/src/Npgsql.Json.NET/Internal/JsonNetTypeInfoResolverFactory.cs @@ -33,11 +33,10 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings, foreach (var dataTypeName in new[] { "jsonb", "json" }) { var jsonb = dataTypeName == "jsonb"; - mappings.AddType(dataTypeName, (options, mapping, _) => - mapping.CreateInfo(options, new JsonNetJsonConverter(jsonb, options.TextEncoding, settings)), - isDefault: true); mappings.AddType(dataTypeName, (options, mapping, _) => - mapping.CreateInfo(options, new JsonNetJsonConverter(jsonb, options.TextEncoding, settings))); + mapping.CreateInfo(options, new JsonNetJsonConverter(jsonb, options.TextEncoding, settings)), isDefault: true); + mappings.AddType(dataTypeName, (options, mapping, _) => + mapping.CreateInfo(options, new JsonNetJsonConverter(jsonb, options.TextEncoding, settings))); mappings.AddType(dataTypeName, (options, mapping, _) => mapping.CreateInfo(options, new JsonNetJsonConverter(jsonb, options.TextEncoding, settings))); mappings.AddType(dataTypeName, (options, mapping, _) => diff --git a/test/Npgsql.PluginTests/JsonNetTests.cs b/test/Npgsql.PluginTests/JsonNetTests.cs index c3da30a386..b3fb1e26bb 100644 --- a/test/Npgsql.PluginTests/JsonNetTests.cs +++ b/test/Npgsql.PluginTests/JsonNetTests.cs @@ -194,14 +194,42 @@ public class Bug3464Class public string? SomeString { get; set; } } + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/5475")] + public async Task Read_jarray_from_get_value() + { + await using var conn = await JsonDataSource.OpenConnectionAsync(); + + await using var cmd = new NpgsqlCommand { Connection = conn }; + + var json = new JArray(new JObject { { "name", "value1" } }); + + cmd.CommandText = $"SELECT @p"; + cmd.Parameters.Add(new("p", json)); + await cmd.ExecuteScalarAsync(); + } [Test] - [IssueLink("https://github.com/npgsql/npgsql/issues/4537")] - public async Task Write_jobject_array_without_npgsqldbtype() + public async Task Write_jobject_without_npgsqldbtype() { - // By default we map JObject to jsonb - if (!IsJsonb) - return; + await using var conn = await JsonDataSource.OpenConnectionAsync(); + var tableName = await TestUtil.CreateTempTable(conn, "key SERIAL PRIMARY KEY, ingredients json"); + await using var cmd = new NpgsqlCommand { Connection = conn }; + + var jsonObject = new JObject + { + { "name", "value1" }, + { "amount", 1 }, + { "unit", "ml" } + }; + + cmd.CommandText = $"INSERT INTO {tableName} (ingredients) VALUES (@p)"; + cmd.Parameters.Add(new("p", jsonObject)); + await cmd.ExecuteNonQueryAsync(); + } + + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/4537")] + public async Task Write_jobject_array_without_npgsqldbtype() + { await using var conn = await JsonDataSource.OpenConnectionAsync(); var tableName = await TestUtil.CreateTempTable(conn, "key SERIAL PRIMARY KEY, ingredients json[]"); From 4f1120aa4814bc4620707cd6e03fdf3399d42580 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Wed, 17 Jan 2024 21:04:32 +0100 Subject: [PATCH 359/761] Fix duplicate field check (#5531) Closes #5517 --- .../Internal/Composites/ReflectionCompositeInfoFactory.cs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/Npgsql/Internal/Composites/ReflectionCompositeInfoFactory.cs b/src/Npgsql/Internal/Composites/ReflectionCompositeInfoFactory.cs index 3e81867c0e..8445cf40ec 100644 --- a/src/Npgsql/Internal/Composites/ReflectionCompositeInfoFactory.cs +++ b/src/Npgsql/Internal/Composites/ReflectionCompositeInfoFactory.cs @@ -200,7 +200,7 @@ static CompositeFieldInfo CreateCompositeFieldInfo(string name, Type type, PgTyp return new KeyValuePair(name, x); }).ToArray(); - var duplicates = propertiesAndNames.Except(propertiesAndNames.Distinct()).ToArray(); + var duplicates = propertiesAndNames.GroupBy(x => x.Key).Where(g => g.Count() > 1).ToArray(); if (duplicates.Length > 0) throw new AmbiguousMatchException($"Multiple properties are mapped to the '{duplicates[0].Key}' field."); @@ -228,7 +228,7 @@ static CompositeFieldInfo CreateCompositeFieldInfo(string name, Type type, PgTyp return new KeyValuePair(name, x); }).ToArray(); - var duplicates = clrFieldsAndNames.Except(clrFieldsAndNames.Distinct()).ToArray(); + var duplicates = clrFieldsAndNames.GroupBy(x => x.Key).Where(g => g.Count() > 1).ToArray(); if (duplicates.Length > 0) throw new AmbiguousMatchException($"Multiple properties are mapped to the '{duplicates[0].Key}' field."); @@ -283,9 +283,9 @@ static CompositeFieldInfo CreateCompositeFieldInfo(string name, Type type, PgTyp } } - var duplicates = parametersMap.Except(parametersMap.Distinct()).ToArray(); + var duplicates = parametersMap.GroupBy(x => x).Where(g => g.Count() > 1).ToArray(); if (duplicates.Length > 0) - throw new AmbiguousMatchException($"Multiple constructor parameters are mapped to the '{fields[duplicates[0]].Name}' field."); + throw new AmbiguousMatchException($"Multiple constructor parameters are mapped to the '{fields[duplicates[0].Key].Name}' field."); if (parametersMapped == parameters.Length) return (constructor, parametersMap); From 6ef2c357ad86108efb0f3a99d3082004369de07d Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Wed, 17 Jan 2024 21:26:22 +0100 Subject: [PATCH 360/761] Add LinearRing and simplify resolver code (#5532) Fixes #5509 --- ...NetTopologySuiteTypeInfoResolverFactory.cs | 113 ++++++------------ .../NetTopologySuiteTests.cs | 14 ++- 2 files changed, 51 insertions(+), 76 deletions(-) diff --git a/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeInfoResolverFactory.cs b/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeInfoResolverFactory.cs index 0509e90158..b9a559c12f 100644 --- a/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeInfoResolverFactory.cs +++ b/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeInfoResolverFactory.cs @@ -29,7 +29,7 @@ public NetTopologySuiteTypeInfoResolverFactory(CoordinateSequenceFactory? coordi class Resolver : IPgTypeInfoResolver { readonly PostGisReader _gisReader; - readonly bool _geographyAsDefault; + protected readonly bool _geographyAsDefault; TypeInfoMappingCollection? _mappings; protected TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(), _gisReader, new(), _geographyAsDefault); @@ -54,59 +54,29 @@ public Resolver( static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings, PostGisReader reader, PostGisWriter writer, bool geographyAsDefault) { - // geometry - mappings.AddType("geometry", - (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer)), - matchRequirement: !geographyAsDefault ? MatchRequirement.Single : MatchRequirement.DataTypeName); - - mappings.AddType("geometry", - (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer)), - matchRequirement: !geographyAsDefault ? MatchRequirement.All : MatchRequirement.DataTypeName); - mappings.AddType("geometry", - (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer)), - matchRequirement: !geographyAsDefault ? MatchRequirement.All : MatchRequirement.DataTypeName); - mappings.AddType("geometry", - (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer)), - matchRequirement: !geographyAsDefault ? MatchRequirement.All : MatchRequirement.DataTypeName); - mappings.AddType("geometry", - (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer)), - matchRequirement: !geographyAsDefault ? MatchRequirement.All : MatchRequirement.DataTypeName); - mappings.AddType("geometry", - (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer)), - matchRequirement: !geographyAsDefault ? MatchRequirement.All : MatchRequirement.DataTypeName); - mappings.AddType("geometry", - (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer)), - matchRequirement: !geographyAsDefault ? MatchRequirement.All : MatchRequirement.DataTypeName); - mappings.AddType("geometry", - (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer)), - matchRequirement: !geographyAsDefault ? MatchRequirement.All : MatchRequirement.DataTypeName); - - // geography - mappings.AddType("geography", - (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer)), - matchRequirement: geographyAsDefault ? MatchRequirement.Single : MatchRequirement.DataTypeName); - - mappings.AddType("geography", - (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer)), - matchRequirement: geographyAsDefault ? MatchRequirement.All : MatchRequirement.DataTypeName); - mappings.AddType("geography", - (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer)), - matchRequirement: geographyAsDefault ? MatchRequirement.All : MatchRequirement.DataTypeName); - mappings.AddType("geography", - (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer)), - matchRequirement: geographyAsDefault ? MatchRequirement.All : MatchRequirement.DataTypeName); - mappings.AddType("geography", - (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer)), - matchRequirement: geographyAsDefault ? MatchRequirement.All : MatchRequirement.DataTypeName); - mappings.AddType("geography", - (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer)), - matchRequirement: geographyAsDefault ? MatchRequirement.All : MatchRequirement.DataTypeName); - mappings.AddType("geography", - (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer)), - matchRequirement: geographyAsDefault ? MatchRequirement.All : MatchRequirement.DataTypeName); - mappings.AddType("geography", - (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer)), - matchRequirement: geographyAsDefault ? MatchRequirement.All : MatchRequirement.DataTypeName); + foreach (var dataTypeName in geographyAsDefault ? new[] {"geography", "geometry"} : new[] { "geometry", "geography" }) + { + mappings.AddType(dataTypeName, + (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer)), + isDefault: true); + + mappings.AddType(dataTypeName, + (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer))); + mappings.AddType(dataTypeName, + (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer))); + mappings.AddType(dataTypeName, + (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer))); + mappings.AddType(dataTypeName, + (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer))); + mappings.AddType(dataTypeName, + (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer))); + mappings.AddType(dataTypeName, + (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer))); + mappings.AddType(dataTypeName, + (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer))); + mappings.AddType(dataTypeName, + (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer))); + } return mappings; } @@ -115,7 +85,7 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings, sealed class ArrayResolver : Resolver, IPgTypeInfoResolver { TypeInfoMappingCollection? _mappings; - new TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(base.Mappings)); + new TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(base.Mappings), _geographyAsDefault); public ArrayResolver(CoordinateSequenceFactory? coordinateSequenceFactory, PrecisionModel? precisionModel, Ordinates handleOrdinates, bool geographyAsDefault) @@ -126,27 +96,20 @@ public ArrayResolver(CoordinateSequenceFactory? coordinateSequenceFactory, Preci public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) => Mappings.Find(type, dataTypeName, options); - static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) + static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings, bool geographyAsDefault) { - // geometry - mappings.AddArrayType("geometry"); - mappings.AddArrayType("geometry"); - mappings.AddArrayType("geometry"); - mappings.AddArrayType("geometry"); - mappings.AddArrayType("geometry"); - mappings.AddArrayType("geometry"); - mappings.AddArrayType("geometry"); - mappings.AddArrayType("geometry"); - - // geography - mappings.AddArrayType("geography"); - mappings.AddArrayType("geography"); - mappings.AddArrayType("geography"); - mappings.AddArrayType("geography"); - mappings.AddArrayType("geography"); - mappings.AddArrayType("geography"); - mappings.AddArrayType("geography"); - mappings.AddArrayType("geography"); + foreach (var dataTypeName in geographyAsDefault ? new[] { "geography", "geometry" } : new[] { "geometry", "geography" }) + { + mappings.AddArrayType(dataTypeName); + mappings.AddArrayType(dataTypeName); + mappings.AddArrayType(dataTypeName); + mappings.AddArrayType(dataTypeName); + mappings.AddArrayType(dataTypeName); + mappings.AddArrayType(dataTypeName); + mappings.AddArrayType(dataTypeName); + mappings.AddArrayType(dataTypeName); + mappings.AddArrayType(dataTypeName); + } return mappings; } diff --git a/test/Npgsql.PluginTests/NetTopologySuiteTests.cs b/test/Npgsql.PluginTests/NetTopologySuiteTests.cs index 2fb33f678d..4e225d121c 100644 --- a/test/Npgsql.PluginTests/NetTopologySuiteTests.cs +++ b/test/Npgsql.PluginTests/NetTopologySuiteTests.cs @@ -129,7 +129,19 @@ public class NetTopologySuiteTests : TestBase new DotSpatialAffineCoordinateSequence(new[] { 1d, 2d }, new[] { 3d }, new[] { 4d }), GeometryFactory.Default), "st_makepoint(1,2,3,4)") - .SetName("PointXYZM") + .SetName("PointXYZM"), + + new TestCaseData( + Ordinates.None, + new LinearRing(new[] + { + new Coordinate(1d, 1d), + new Coordinate(2d, 2d), + new Coordinate(3d, 3d), + new Coordinate(1d, 1d) + }), + "st_makeline(ARRAY[st_makepoint(1,1),st_makepoint(2,2),st_makepoint(3,3),st_makepoint(1,1)])") + .SetName("LinearRing") }; [Test, TestCaseSource(nameof(TestCases))] From 17f092683a55efe68dba7ffb3863aef1f6ec0622 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Wed, 17 Jan 2024 21:26:34 +0100 Subject: [PATCH 361/761] Remove delayed commit in exporter (#5530) * Remove delayed commit in exporter Fixes #5522 As the api model doesn't allow for it when there are zero-sized columns * Add test --- src/Npgsql/NpgsqlBinaryExporter.cs | 56 ++++++++++++++++-------------- test/Npgsql.Tests/CopyTests.cs | 32 +++++++++++++++++ 2 files changed, 61 insertions(+), 27 deletions(-) diff --git a/src/Npgsql/NpgsqlBinaryExporter.cs b/src/Npgsql/NpgsqlBinaryExporter.cs index d910e70a62..0a05ac2337 100644 --- a/src/Npgsql/NpgsqlBinaryExporter.cs +++ b/src/Npgsql/NpgsqlBinaryExporter.cs @@ -255,36 +255,42 @@ async ValueTask Read(bool async, NpgsqlDbType? type, CancellationToken can if (!IsInitializedAndAtStart) await MoveNextColumn(async, resumableOp: false).ConfigureAwait(false); - if (PgReader.FieldSize is (-1 or 0) and var fieldSize) + try { - // Commit, otherwise we'll have no way of knowing this column is finished. - await Commit(async).ConfigureAwait(false); - if (fieldSize is -1) + var reader = PgReader; + if (reader.FieldSize is -1) return DbNullOrThrow(); - } - var info = GetInfo(type, out var asObject); + var info = GetInfo(type, out var asObject); - T result; - if (async) - { - await PgReader.StartReadAsync(info.BufferRequirement, cancellationToken).ConfigureAwait(false); - result = asObject - ? (T)await info.Converter.ReadAsObjectAsync(PgReader, cancellationToken).ConfigureAwait(false) - : await info.GetConverter().ReadAsync(PgReader, cancellationToken).ConfigureAwait(false); - await PgReader.EndReadAsync().ConfigureAwait(false); + T result; + if (async) + { + await reader.StartReadAsync(info.BufferRequirement, cancellationToken).ConfigureAwait(false); + result = asObject + ? (T)await info.Converter.ReadAsObjectAsync(reader, cancellationToken).ConfigureAwait(false) + : await info.GetConverter().ReadAsync(reader, cancellationToken).ConfigureAwait(false); + await reader.EndReadAsync().ConfigureAwait(false); + } + else + { + reader.StartRead(info.BufferRequirement); + result = asObject + ? (T)info.Converter.ReadAsObject(reader) + : info.GetConverter().Read(reader); + reader.EndRead(); + } + + return result; } - else + finally { - PgReader.StartRead(info.BufferRequirement); - result = asObject - ? (T)info.Converter.ReadAsObject(PgReader) - : info.GetConverter().Read(PgReader); - PgReader.EndRead(); + // Don't delay committing the current column, just do it immediately (as opposed to on the next action: Read, IsNull, Skip). + // Zero length columns would otherwise create an edge-case where we'd have to immediately commit as we won't know whether we're at the end. + // To guarantee the commit happens in that case we would still need this try finally, at which point it's just better to be consistent. + await Commit(async).ConfigureAwait(false); } - return result; - static T DbNullOrThrow() { // When T is a Nullable, we support returning null @@ -360,11 +366,7 @@ async Task Skip(bool async, CancellationToken cancellationToken = default) if (!IsInitializedAndAtStart) await MoveNextColumn(async, resumableOp: false).ConfigureAwait(false); - await PgReader.Consume(async, cancellationToken: cancellationToken).ConfigureAwait(false); - - // Commit, otherwise we'll have no way of knowing this column is finished. - if (PgReader.FieldSize is -1 or 0) - await Commit(async).ConfigureAwait(false); + await Commit(async).ConfigureAwait(false); } #endregion diff --git a/test/Npgsql.Tests/CopyTests.cs b/test/Npgsql.Tests/CopyTests.cs index f7a18a2884..6ca991a54c 100644 --- a/test/Npgsql.Tests/CopyTests.cs +++ b/test/Npgsql.Tests/CopyTests.cs @@ -562,6 +562,38 @@ public async Task ReadMoreColumnsThanExist() } } + [Test] + public async Task ReadZeroSizedColumns() + { + if (IsMultiplexing) + Assert.Ignore("Multiplexing: fails"); + using var conn = await OpenConnectionAsync(); + + var reader = conn.BeginBinaryExport(""" + COPY (values (1, '', ''), (2, null, ''), (3, '', null)) TO STDOUT BINARY + """); + while(reader.StartRow() != -1) + { + int? col1 = null; + if (reader.IsNull) + reader.Skip(); + else + col1 = reader.Read(); + + string? col2 = null; + if (reader.IsNull) + reader.Skip(); + else + col2 = reader.Read(); + + string? col3 = null; + if (reader.IsNull) + reader.Skip(); + else + col3 = reader.Read(); + } + } + [Test] public async Task StreamingRead() { From 38e05d6adba9b8dd7d009a2a7be98bfafeb0df7c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 17 Jan 2024 21:13:23 +0000 Subject: [PATCH 362/761] Bump actions/cache from 3 to 4 (#5534) --- .github/workflows/build.yml | 4 ++-- .github/workflows/native-aot.yml | 2 +- .github/workflows/rich-code-nav.yml | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index a57ccf366f..78cf887e9d 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -57,7 +57,7 @@ jobs: uses: actions/checkout@v4 - name: NuGet Cache - uses: actions/cache@v3 + uses: actions/cache@v4 with: path: ~/.nuget/packages key: ${{ runner.os }}-nuget-${{ hashFiles('**/Directory.Build.targets') }} @@ -337,7 +337,7 @@ jobs: uses: actions/checkout@v4 - name: NuGet Cache - uses: actions/cache@v3 + uses: actions/cache@v4 with: path: ~/.nuget/packages key: ${{ runner.os }}-nuget-${{ hashFiles('**/Directory.Build.targets') }} diff --git a/.github/workflows/native-aot.yml b/.github/workflows/native-aot.yml index 77c75f343d..97cf2878e7 100644 --- a/.github/workflows/native-aot.yml +++ b/.github/workflows/native-aot.yml @@ -56,7 +56,7 @@ jobs: uses: actions/checkout@v4 - name: NuGet Cache - uses: actions/cache@v3 + uses: actions/cache@v4 with: path: ~/.nuget/packages key: ${{ runner.os }}-nuget-${{ hashFiles('**/Directory.Build.targets') }} diff --git a/.github/workflows/rich-code-nav.yml b/.github/workflows/rich-code-nav.yml index 26fc0e7f8a..7ee82bfeb9 100644 --- a/.github/workflows/rich-code-nav.yml +++ b/.github/workflows/rich-code-nav.yml @@ -16,7 +16,7 @@ jobs: uses: actions/checkout@v4 - name: NuGet Cache - uses: actions/cache@v3 + uses: actions/cache@v4 with: path: ~/.nuget/packages key: ${{ runner.os }}-nuget-${{ hashFiles('**/Directory.Build.targets') }} From bccdbdac20a191af8736ee143ead753005392e87 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Wed, 17 Jan 2024 22:39:09 +0100 Subject: [PATCH 363/761] Move byte and sbyte from extra conversions (#5533) Fixes #5521 --- .../ResolverFactories/AdoTypeInfoResolverFactory.cs | 13 +++++++++++-- .../ExtraConversionsTypeInfoResolverFactory.cs | 6 ------ test/Npgsql.Tests/Types/NumericTypeTests.cs | 4 +++- 3 files changed, 14 insertions(+), 9 deletions(-) diff --git a/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.cs index 65b54efde1..9b024a5e09 100644 --- a/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.cs +++ b/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.cs @@ -56,6 +56,11 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) // Numeric mappings.AddStructType(DataTypeNames.Int2, static (options, mapping, _) => mapping.CreateInfo(options, new Int2Converter()), isDefault: true); + // Clr byte/sbyte maps to 'int2' as there is no byte type in PostgreSQL. + mappings.AddStructType(DataTypeNames.Int2, + static (options, mapping, _) => mapping.CreateInfo(options, new Int2Converter()), isDefault: true); + mappings.AddStructType(DataTypeNames.Int2, + static (options, mapping, _) => mapping.CreateInfo(options, new Int2Converter()), isDefault: true); mappings.AddStructType(DataTypeNames.Int4, static (options, mapping, _) => mapping.CreateInfo(options, new Int4Converter()), isDefault: true); mappings.AddStructType(DataTypeNames.Int8, @@ -286,7 +291,8 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) static (options, mapping, _) => mapping.CreateInfo(options, new InternalCharConverter()), MatchRequirement.DataTypeName); mappings.AddStructType(DataTypeNames.Char, - static (options, mapping, _) => mapping.CreateInfo(options, new InternalCharConverter())); + static (options, mapping, _) => mapping.CreateInfo(options, new InternalCharConverter()), + MatchRequirement.DataTypeName); // Xid8 mappings.AddStructType(DataTypeNames.Xid8, @@ -317,7 +323,8 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) static (options, mapping, _) => mapping.CreateInfo(options, new PgLsnConverter()), MatchRequirement.DataTypeName); mappings.AddStructType(DataTypeNames.PgLsn, - static (options, mapping, _) => mapping.CreateInfo(options, new UInt64Converter())); + static (options, mapping, _) => mapping.CreateInfo(options, new UInt64Converter()), + MatchRequirement.DataTypeName); return mappings; } @@ -350,6 +357,8 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) // Numeric mappings.AddStructArrayType(DataTypeNames.Int2); + mappings.AddStructArrayType(DataTypeNames.Int2); + mappings.AddStructArrayType(DataTypeNames.Int2); mappings.AddStructArrayType(DataTypeNames.Int4); mappings.AddStructArrayType(DataTypeNames.Int8); mappings.AddStructArrayType(DataTypeNames.Float4); diff --git a/src/Npgsql/Internal/ResolverFactories/ExtraConversionsTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/ExtraConversionsTypeInfoResolverFactory.cs index 1614b6f1e4..c92d687d8e 100644 --- a/src/Npgsql/Internal/ResolverFactories/ExtraConversionsTypeInfoResolverFactory.cs +++ b/src/Npgsql/Internal/ResolverFactories/ExtraConversionsTypeInfoResolverFactory.cs @@ -26,10 +26,6 @@ static TypeInfoMappingCollection AddInfos(TypeInfoMappingCollection mappings) static (options, mapping, _) => mapping.CreateInfo(options, new Int2Converter())); mappings.AddStructType(DataTypeNames.Int2, static (options, mapping, _) => mapping.CreateInfo(options, new Int2Converter())); - mappings.AddStructType(DataTypeNames.Int2, - static (options, mapping, _) => mapping.CreateInfo(options, new Int2Converter())); - mappings.AddStructType(DataTypeNames.Int2, - static (options, mapping, _) => mapping.CreateInfo(options, new Int2Converter())); mappings.AddStructType(DataTypeNames.Int2, static (options, mapping, _) => mapping.CreateInfo(options, new Int2Converter())); mappings.AddStructType(DataTypeNames.Int2, @@ -161,8 +157,6 @@ static TypeInfoMappingCollection AddArrayInfos(TypeInfoMappingCollection mapping // Int2 mappings.AddStructArrayType(DataTypeNames.Int2); mappings.AddStructArrayType(DataTypeNames.Int2); - mappings.AddStructArrayType(DataTypeNames.Int2); - mappings.AddStructArrayType(DataTypeNames.Int2); mappings.AddStructArrayType(DataTypeNames.Int2); mappings.AddStructArrayType(DataTypeNames.Int2); mappings.AddStructArrayType(DataTypeNames.Int2); diff --git a/test/Npgsql.Tests/Types/NumericTypeTests.cs b/test/Npgsql.Tests/Types/NumericTypeTests.cs index 78dc2f7fa7..9fcd5b695b 100644 --- a/test/Npgsql.Tests/Types/NumericTypeTests.cs +++ b/test/Npgsql.Tests/Types/NumericTypeTests.cs @@ -20,10 +20,12 @@ public class NumericTypeTests : MultiplexingTestBase public async Task Int16() { await AssertType((short)8, "8", "smallint", NpgsqlDbType.Smallint, DbType.Int16); + // Clr byte/sbyte maps to 'int2' as there is no byte type in PostgreSQL, byte[] maps to bytea however. + await AssertType((byte)8, "8", "smallint", NpgsqlDbType.Smallint, DbType.Int16, isDefaultForReading: false, skipArrayCheck: true); + await AssertType((sbyte)8, "8", "smallint", NpgsqlDbType.Smallint, DbType.Int16, isDefaultForReading: false); await AssertType(8, "8", "smallint", NpgsqlDbType.Smallint, DbType.Int16, isDefault: false); await AssertType(8L, "8", "smallint", NpgsqlDbType.Smallint, DbType.Int16, isDefault: false); - await AssertType((byte)8, "8", "smallint", NpgsqlDbType.Smallint, DbType.Int16, isDefault: false); await AssertType(8F, "8", "smallint", NpgsqlDbType.Smallint, DbType.Int16, isDefault: false); await AssertType(8D, "8", "smallint", NpgsqlDbType.Smallint, DbType.Int16, isDefault: false); await AssertType(8M, "8", "smallint", NpgsqlDbType.Smallint, DbType.Int16, isDefault: false); From eb050c0a0fd4b70f9d11c6d41027076e9e4e01c1 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Mon, 22 Jan 2024 09:00:20 +0200 Subject: [PATCH 364/761] Correct JsonDocument deserialization for JSON null (#5541) Fixes #5540 --- .../Internal/Converters/JsonConverter.cs | 40 +++++++++++-------- test/Npgsql.Tests/Types/JsonTests.cs | 22 ++++++++++ 2 files changed, 46 insertions(+), 16 deletions(-) diff --git a/src/Npgsql/Internal/Converters/JsonConverter.cs b/src/Npgsql/Internal/Converters/JsonConverter.cs index fa654c9c64..77157875b3 100644 --- a/src/Npgsql/Internal/Converters/JsonConverter.cs +++ b/src/Npgsql/Internal/Converters/JsonConverter.cs @@ -48,28 +48,36 @@ public JsonConverter(bool jsonb, Encoding textEncoding, JsonSerializerOptions se if (JsonConverter.TryReadStream(_jsonb, _textEncoding, reader, out var byteCount, out var stream)) { using var _ = stream; - if (_jsonTypeInfo is JsonTypeInfo typeInfoOfT) - return async + return _jsonTypeInfo switch + { + JsonTypeInfo => (T)(object)(async + ? await JsonDocument.ParseAsync(stream, cancellationToken: cancellationToken).ConfigureAwait(false) + : JsonDocument.Parse(stream)), + + JsonTypeInfo typeInfoOfT => async ? await JsonSerializer.DeserializeAsync(stream, typeInfoOfT, cancellationToken).ConfigureAwait(false) - : JsonSerializer.Deserialize(stream, typeInfoOfT); + : JsonSerializer.Deserialize(stream, typeInfoOfT), - return (T?)(async - ? await JsonSerializer.DeserializeAsync(stream, (JsonTypeInfo)_jsonTypeInfo, cancellationToken).ConfigureAwait(false) - : JsonSerializer.Deserialize(stream, (JsonTypeInfo)_jsonTypeInfo)); + _ => (T?)(async + ? await JsonSerializer.DeserializeAsync(stream, (JsonTypeInfo)_jsonTypeInfo, cancellationToken) + .ConfigureAwait(false) + : JsonSerializer.Deserialize(stream, (JsonTypeInfo)_jsonTypeInfo)) + }; } - else + + var (rentedChars, rentedBytes) = await JsonConverter.ReadRentedBuffer(async, _textEncoding, byteCount, reader, cancellationToken).ConfigureAwait(false); + var result = _jsonTypeInfo switch { - var (rentedChars, rentedBytes) = await JsonConverter.ReadRentedBuffer(async, _textEncoding, byteCount, reader, cancellationToken).ConfigureAwait(false); - var result = _jsonTypeInfo is JsonTypeInfo typeInfoOfT - ? JsonSerializer.Deserialize(rentedChars.AsSpan(), typeInfoOfT) - : (T?)JsonSerializer.Deserialize(rentedChars.AsSpan(), (JsonTypeInfo)_jsonTypeInfo); + JsonTypeInfo => (T)(object)JsonDocument.Parse(rentedChars.AsMemory()), + JsonTypeInfo typeInfoOfT => JsonSerializer.Deserialize(rentedChars.AsSpan(), typeInfoOfT), + _ => (T?)JsonSerializer.Deserialize(rentedChars.AsSpan(), (JsonTypeInfo)_jsonTypeInfo) + }; - ArrayPool.Shared.Return(rentedChars.Array!); - if (rentedBytes is not null) - ArrayPool.Shared.Return(rentedBytes); + ArrayPool.Shared.Return(rentedChars.Array!); + if (rentedBytes is not null) + ArrayPool.Shared.Return(rentedBytes); - return result; - } + return result; } public override Size GetSize(SizeContext context, T? value, ref object? writeState) diff --git a/test/Npgsql.Tests/Types/JsonTests.cs b/test/Npgsql.Tests/Types/JsonTests.cs index f1f650222c..e7a9b4576e 100644 --- a/test/Npgsql.Tests/Types/JsonTests.cs +++ b/test/Npgsql.Tests/Types/JsonTests.cs @@ -78,6 +78,28 @@ public async Task As_JsonDocument() isDefault: false, comparer: (x, y) => x.RootElement.GetProperty("K").GetString() == y.RootElement.GetProperty("K").GetString()); + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/5540")] + public async Task As_JsonDocument_with_null_root() + => await AssertType( + JsonDocument.Parse("null"), + "null", + PostgresType, + NpgsqlDbType, + isDefault: false, + comparer: (x, y) => x.RootElement.ValueKind == y.RootElement.ValueKind, + skipArrayCheck: true); + + [Test] + public async Task As_JsonElement_with_null_root() + => await AssertType( + JsonDocument.Parse("null").RootElement, + "null", + PostgresType, + NpgsqlDbType, + isDefault: false, + comparer: (x, y) => x.ValueKind == y.ValueKind, + skipArrayCheck: true); + [Test] public async Task As_JsonDocument_supported_only_with_SystemTextJson() { From c1574f5004884c641b76d6d05488d20e4ff4c172 Mon Sep 17 00:00:00 2001 From: Brar Piening Date: Wed, 24 Jan 2024 21:54:31 +0100 Subject: [PATCH 365/761] Add test to verify correct behavior of unchanged toasted values (#5489) --- .../Replication/PgOutputReplicationTests.cs | 78 +++++++++++++++++++ 1 file changed, 78 insertions(+) diff --git a/test/Npgsql.Tests/Replication/PgOutputReplicationTests.cs b/test/Npgsql.Tests/Replication/PgOutputReplicationTests.cs index 3eb3921b79..e3d81a63f5 100644 --- a/test/Npgsql.Tests/Replication/PgOutputReplicationTests.cs +++ b/test/Npgsql.Tests/Replication/PgOutputReplicationTests.cs @@ -1267,6 +1267,84 @@ await c.ExecuteNonQueryAsync(@$" }, 2); } + [Test(Description = $"Tests whether {nameof(FullUpdateMessage)} instances with unchanged toasted values behave as expected."), Explicit("Massive inserts")] + public Task Update_for_full_replica_identity_with_unchanged_toasted_value() + => SafeReplicationTest( + async (slotName, tableName, publicationName) => + { + await using var c = await OpenConnectionAsync(); + await c.ExecuteNonQueryAsync($$""" + CREATE TABLE {{tableName}} (id INT PRIMARY KEY, name JSONB NOT NULL, something_else INT NULL); + ALTER TABLE {{tableName}} REPLICA IDENTITY FULL; + INSERT INTO {{tableName}} SELECT i, ('{"row_' || i::text || '": [{{string.Join(", ", Enumerable.Range(1, 1024))}}]}')::jsonb, NULL FROM generate_series(1, 15000) s(i); + CREATE PUBLICATION {{publicationName}} FOR TABLE {{tableName}}; + """); + await using var rc = await OpenReplicationConnectionAsync(); + var slot = await rc.CreatePgOutputReplicationSlot(slotName); + + await using var tran = await c.BeginTransactionAsync(); + await c.ExecuteNonQueryAsync($""" + UPDATE {tableName} SET name='"val1_updated"' WHERE id = 1; + UPDATE {tableName} SET something_else = id WHERE id > 1 + """); + await tran.CommitAsync(); + + using var streamingCts = new CancellationTokenSource(); + var messages = SkipEmptyTransactions(rc.StartReplication(slot, GetOptions(publicationName), streamingCts.Token)) + .GetAsyncEnumerator(); + + // Begin Transaction + var transactionXid = await AssertTransactionStart(messages); + + // Relation + var relationMsg = await NextMessage(messages); + + // Update of the first row (updating the jsonb column) + var updateMsg = await NextMessage(messages); + Assert.That(updateMsg.TransactionXid, IsStreaming ? Is.EqualTo(transactionXid) : Is.Null); + Assert.That(updateMsg.Relation, Is.SameAs(relationMsg)); + + var newRowColumnEnumerator = updateMsg.NewRow.GetAsyncEnumerator(); + Assert.That(await newRowColumnEnumerator.MoveNextAsync(), Is.True); + Assert.That(await newRowColumnEnumerator.MoveNextAsync(), Is.True); + Assert.That(newRowColumnEnumerator.Current.IsUnchangedToastedValue, Is.False); + Assert.That(await newRowColumnEnumerator.Current.Get(), Is.EqualTo("\"val1_updated\"")); + Assert.That(await newRowColumnEnumerator.MoveNextAsync(), Is.True); + Assert.That(newRowColumnEnumerator.Current.IsDBNull, Is.True); + Assert.That(await newRowColumnEnumerator.MoveNextAsync(), Is.False); + + + // Update of the following rows (not updating the jsonb column) + updateMsg = await NextMessage(messages); + Assert.That(updateMsg.TransactionXid, IsStreaming ? Is.EqualTo(transactionXid) : Is.Null); + Assert.That(updateMsg.Relation, Is.SameAs(relationMsg)); + + newRowColumnEnumerator = updateMsg.NewRow.GetAsyncEnumerator(); + Assert.That(await newRowColumnEnumerator.MoveNextAsync(), Is.True); + Assert.That(await newRowColumnEnumerator.MoveNextAsync(), Is.True); + Assert.That(newRowColumnEnumerator.Current.IsUnchangedToastedValue, Is.True); + Assert.That(async () => await newRowColumnEnumerator.Current.Get(), + Throws.TypeOf() + .With.Message.EqualTo("Column 'name' is an unchanged TOASTed value (actual value not sent).")); + Assert.That(await newRowColumnEnumerator.MoveNextAsync(), Is.True); + Assert.That(newRowColumnEnumerator.Current.IsDBNull, Is.False); + Assert.That(await newRowColumnEnumerator.MoveNextAsync(), Is.False); + + // Remaining updates + for (var updateCount = 0; updateCount < 14998; updateCount++) + await NextMessage(messages); + + // Commit Transaction + await AssertTransactionCommit(messages); + + streamingCts.Cancel(); + Assert.That(async () => await messages.MoveNextAsync(), Throws.Exception.AssignableTo() + .With.InnerException.InstanceOf() + .And.InnerException.Property(nameof(PostgresException.SqlState)) + .EqualTo(PostgresErrorCodes.QueryCanceled)); + await rc.DropReplicationSlot(slotName, cancellationToken: CancellationToken.None); + }); + #region Non-Test stuff (helper methods, initialization, enums, ...) async Task AssertTransactionStart(IAsyncEnumerator messages) From dffa843e92db988b98ea7af7f4837af917c6f916 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Wed, 24 Jan 2024 22:46:18 +0100 Subject: [PATCH 366/761] Fix another sequential buffered seek exit state issue (#5486) Fixes #5430 --- src/Npgsql/NpgsqlDataReader.cs | 24 ++++++++++-------- test/Npgsql.Tests/ReaderTests.cs | 42 +++++++++++++++++++++++++++++++- 2 files changed, 55 insertions(+), 11 deletions(-) diff --git a/src/Npgsql/NpgsqlDataReader.cs b/src/Npgsql/NpgsqlDataReader.cs index c503a359e1..59293e989b 100644 --- a/src/Npgsql/NpgsqlDataReader.cs +++ b/src/Npgsql/NpgsqlDataReader.cs @@ -1992,9 +1992,16 @@ ValueTask SeekToColumnSequential(bool async, int ordinal, DataFormat dataFo var committed = false; if (!PgReader.CommitHasIO(reread)) { + var columnLength = PgReader.FieldSize; PgReader.Commit(reread); committed = true; - if (TrySeekBuffered(ordinal, out var columnLength)) + if (reread) + { + PgReader.Init(columnLength, dataFormat, columnLength is -1 || resumableOp); + return new(columnLength); + } + + if (TrySeekBuffered(ordinal, out columnLength)) { PgReader.Init(columnLength, dataFormat, columnLength is -1 || resumableOp); return new(columnLength); @@ -2009,10 +2016,10 @@ ValueTask SeekToColumnSequential(bool async, int ordinal, DataFormat dataFo } } - return Core(async, !committed, ordinal, dataFormat, resumableOp); + return Core(async, reread, !committed, ordinal, dataFormat, resumableOp); [AsyncMethodBuilder(typeof(PoolingAsyncValueTaskMethodBuilder<>))] - async ValueTask Core(bool async, bool commit, int ordinal, DataFormat dataFormat, bool resumableOp) + async ValueTask Core(bool async, bool reread, bool commit, int ordinal, DataFormat dataFormat, bool resumableOp) { if (commit) { @@ -2023,7 +2030,7 @@ async ValueTask Core(bool async, bool commit, int ordinal, DataFormat dataF PgReader.Commit(reread); } - if (ordinal == _column) + if (reread) { PgReader.Init(PgReader.FieldSize, dataFormat, PgReader.FieldSize is -1 || resumableOp); return PgReader.FieldSize; @@ -2053,12 +2060,6 @@ async ValueTask Core(bool async, bool commit, int ordinal, DataFormat dataF bool TrySeekBuffered(int ordinal, out int columnLength) { - if (ordinal == _column) - { - columnLength = PgReader.FieldSize; - return true; - } - // Skip over unwanted fields columnLength = -1; var buffer = Buffer; @@ -2066,7 +2067,10 @@ bool TrySeekBuffered(int ordinal, out int columnLength) while (_column < ordinal - 1) { if (buffer.ReadBytesLeft < 4) + { + columnLength = -1; return false; + } columnLength = buffer.ReadInt32(); _column++; Debug.Assert(columnLength >= -1); diff --git a/test/Npgsql.Tests/ReaderTests.cs b/test/Npgsql.Tests/ReaderTests.cs index 7b871c8ab0..e37bea6b31 100644 --- a/test/Npgsql.Tests/ReaderTests.cs +++ b/test/Npgsql.Tests/ReaderTests.cs @@ -900,7 +900,7 @@ public async Task SequentialBufferedSeek() await using var conn = await OpenConnectionAsync(); using var cmd = conn.CreateCommand(); cmd.CommandText = """select v.i, jsonb_build_object(), current_timestamp + make_interval(0, 0, 0, 0, 0, 0, v.i), null::jsonb, '{"value": 42}'::jsonb from generate_series(1, 1000) as v(i)"""; - var rdr = await cmd.ExecuteReaderAsync(CommandBehavior.SequentialAccess); + var rdr = await cmd.ExecuteReaderAsync(Behavior); while (await rdr.ReadAsync()) { var v1 = rdr[0]; var v2 = rdr[1]; @@ -910,6 +910,46 @@ public async Task SequentialBufferedSeek() } } + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/5430")] + public async Task SequentialBufferedSeekLong() + { + await using var conn = await OpenConnectionAsync(); + using var cmd = conn.CreateCommand(); + cmd.CommandText = """select v.i, repeat('1', 10), repeat('2', 10), repeat('3', 10), repeat('4', 10), 1, 2 from generate_series(1, 1000) as v(i)"""; + var rdr = await cmd.ExecuteReaderAsync(Behavior); + while (await rdr.ReadAsync()) + { + _ = rdr[0]; + _ = rdr[1]; + //_ = rdr[2]; + //_ = rdr[3]; + //_ = rdr[4]; + //_ = rdr[5]; // uncomment lines for successful execution + _ = rdr[6]; + } + } + + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/5430")] + public async Task SequentialBufferedSeekReread() + { + await using var conn = await OpenConnectionAsync(); + using var cmd = conn.CreateCommand(); + cmd.CommandText = """select v.i, repeat('1', 10), repeat('2', 10), repeat('3', 10), repeat('4', 10), 1, NULL from generate_series(1, 1000) as v(i)"""; + var rdr = await cmd.ExecuteReaderAsync(Behavior); + while (await rdr.ReadAsync()) + { + _ = rdr[0]; + _ = rdr[1]; + //_ = rdr[2]; + //_ = rdr[3]; + //_ = rdr[4]; + //_ = rdr[5]; // uncomment lines for successful execution + _ = rdr.IsDBNull(6); + _ = rdr[6]; + Assert.True(rdr.IsDBNull(6)); + } + } + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/5484")] public async Task GetFieldValueAsync_AsyncRead() { From e1165ea7ca94ef7602291ce9f22b21ca61793572 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 24 Jan 2024 22:48:54 +0100 Subject: [PATCH 367/761] Bump OpenTelemetry.API from 1.6.0 to 1.7.0 (#5494) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 8176504e3d..bb54e88a9b 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -7,7 +7,7 @@ - + From d85234eb7eb6acf4078d12e269601dbf57789be0 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Thu, 25 Jan 2024 00:52:48 +0300 Subject: [PATCH 368/761] Remove isFirstAttempt argument from NpgsqlConnector.Open (#5545) Since it's not used anyway --- src/Npgsql/Internal/NpgsqlConnector.cs | 14 ++++++-------- src/Npgsql/Internal/TransportSecurityHandler.cs | 6 +++--- 2 files changed, 9 insertions(+), 11 deletions(-) diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index 8b65d0f4a7..2db090cd46 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -540,10 +540,9 @@ static async Task OpenCore( SslMode sslMode, NpgsqlTimeout timeout, bool async, - CancellationToken cancellationToken, - bool isFirstAttempt = true) + CancellationToken cancellationToken) { - await conn.RawOpen(sslMode, timeout, async, cancellationToken, isFirstAttempt).ConfigureAwait(false); + await conn.RawOpen(sslMode, timeout, async, cancellationToken).ConfigureAwait(false); var username = await conn.GetUsernameAsync(async, cancellationToken).ConfigureAwait(false); @@ -572,8 +571,7 @@ await OpenCore( sslMode == SslMode.Prefer ? SslMode.Disable : SslMode.Require, timeout, async, - cancellationToken, - isFirstAttempt: false).ConfigureAwait(false); + cancellationToken).ConfigureAwait(false); return; } @@ -720,7 +718,7 @@ async ValueTask GetUsernameAsyncInternal() } } - async Task RawOpen(SslMode sslMode, NpgsqlTimeout timeout, bool async, CancellationToken cancellationToken, bool isFirstAttempt = true) + async Task RawOpen(SslMode sslMode, NpgsqlTimeout timeout, bool async, CancellationToken cancellationToken) { try { @@ -769,7 +767,7 @@ async Task RawOpen(SslMode sslMode, NpgsqlTimeout timeout, bool async, Cancellat throw new NpgsqlException("SSL connection requested. No SSL enabled connection from this host is configured."); break; case 'S': - await DataSource.TransportSecurityHandler.NegotiateEncryption(async, this, sslMode, timeout, isFirstAttempt).ConfigureAwait(false); + await DataSource.TransportSecurityHandler.NegotiateEncryption(async, this, sslMode, timeout).ConfigureAwait(false); break; } @@ -794,7 +792,7 @@ async Task RawOpen(SslMode sslMode, NpgsqlTimeout timeout, bool async, Cancellat } } - internal async Task NegotiateEncryption(SslMode sslMode, NpgsqlTimeout timeout, bool async, bool isFirstAttempt) + internal async Task NegotiateEncryption(SslMode sslMode, NpgsqlTimeout timeout, bool async) { var clientCertificates = new X509Certificate2Collection(); var certPath = Settings.SslCertificate ?? PostgresEnvironment.SslCert ?? PostgresEnvironment.SslCertDefault; diff --git a/src/Npgsql/Internal/TransportSecurityHandler.cs b/src/Npgsql/Internal/TransportSecurityHandler.cs index e34b2444a7..ecb447c6da 100644 --- a/src/Npgsql/Internal/TransportSecurityHandler.cs +++ b/src/Npgsql/Internal/TransportSecurityHandler.cs @@ -16,7 +16,7 @@ public virtual Func? RootCertificateCallback set => throw new NotSupportedException(string.Format(NpgsqlStrings.TransportSecurityDisabled, nameof(NpgsqlSlimDataSourceBuilder.EnableTransportSecurity))); } - public virtual Task NegotiateEncryption(bool async, NpgsqlConnector connector, SslMode sslMode, NpgsqlTimeout timeout, bool isFirstAttempt) + public virtual Task NegotiateEncryption(bool async, NpgsqlConnector connector, SslMode sslMode, NpgsqlTimeout timeout) => throw new NotSupportedException(string.Format(NpgsqlStrings.TransportSecurityDisabled, nameof(NpgsqlSlimDataSourceBuilder.EnableTransportSecurity))); public virtual void AuthenticateSASLSha256Plus(NpgsqlConnector connector, ref string mechanism, ref string cbindFlag, ref string cbind, @@ -30,8 +30,8 @@ sealed class RealTransportSecurityHandler : TransportSecurityHandler public override Func? RootCertificateCallback { get; set; } - public override Task NegotiateEncryption(bool async, NpgsqlConnector connector, SslMode sslMode, NpgsqlTimeout timeout, bool isFirstAttempt) - => connector.NegotiateEncryption(sslMode, timeout, async, isFirstAttempt); + public override Task NegotiateEncryption(bool async, NpgsqlConnector connector, SslMode sslMode, NpgsqlTimeout timeout) + => connector.NegotiateEncryption(sslMode, timeout, async); public override void AuthenticateSASLSha256Plus(NpgsqlConnector connector, ref string mechanism, ref string cbindFlag, ref string cbind, ref bool successfulBind) From 010878c6947af8056f188ef1a19eb15992d47a0c Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Wed, 24 Jan 2024 23:58:38 +0100 Subject: [PATCH 369/761] Correctly handle resolver info in binary exporter (#5546) Fixes #5520 --- src/Npgsql/Internal/PgTypeInfo.cs | 6 ++--- src/Npgsql/NpgsqlBinaryExporter.cs | 4 ++- test/Npgsql.Tests/CopyTests.cs | 42 +++++++++++++++++++++++++++--- 3 files changed, 45 insertions(+), 7 deletions(-) diff --git a/src/Npgsql/Internal/PgTypeInfo.cs b/src/Npgsql/Internal/PgTypeInfo.cs index 56d7e0bd0f..0c1f2f4ede 100644 --- a/src/Npgsql/Internal/PgTypeInfo.cs +++ b/src/Npgsql/Internal/PgTypeInfo.cs @@ -150,7 +150,7 @@ internal bool TryBind(Field field, DataFormat format, out PgConverterInfo info) return true; case PgResolverTypeInfo resolverInfo: var resolution = resolverInfo.GetResolution(field); - if (!HasCachedInfo(resolution.Converter) + if (HasCachedInfo(resolution.Converter) ? !CachedCanConvert(format, out bufferRequirements) : !resolution.Converter.CanConvert(format, out bufferRequirements)) { @@ -276,8 +276,8 @@ PgConverterResolution ThrowNotSupportedType(Type? type) public PgConverterResolution GetResolution(Field field) => _converterResolver.GetInternal(this, field); - public PgConverterResolution GetDefaultResolution(PgTypeId? pgTypeId) - => _converterResolver.GetDefaultInternal(ValidateResolution, Options.PortableTypeIds, pgTypeId ?? PgTypeId); + public PgConverterResolution GetDefaultResolution(PgTypeId? expectedPgTypeId) + => _converterResolver.GetDefaultInternal(ValidateResolution, Options.PortableTypeIds, expectedPgTypeId ?? PgTypeId); public PgConverterResolver GetConverterResolver() => _converterResolver; } diff --git a/src/Npgsql/NpgsqlBinaryExporter.cs b/src/Npgsql/NpgsqlBinaryExporter.cs index 0a05ac2337..35dea6985d 100644 --- a/src/Npgsql/NpgsqlBinaryExporter.cs +++ b/src/Npgsql/NpgsqlBinaryExporter.cs @@ -320,8 +320,10 @@ PgConverterInfo CreateConverterInfo(Type type, NpgsqlDbType? npgsqlDbType = null } var info = options.GetTypeInfo(type, pgTypeId) ?? throw new NotSupportedException($"Reading is not supported for type '{type}'{(npgsqlDbType is null ? "" : $" and NpgsqlDbType '{npgsqlDbType}'")}"); + // Binary export has no type info so we only do caller-directed interpretation of data. - return info.Bind(new Field("?", info.PgTypeId!.Value, -1), DataFormat.Binary); + return info.Bind(new Field("?", + info.PgTypeId ?? ((PgResolverTypeInfo)info).GetDefaultResolution(null).PgTypeId, -1), DataFormat.Binary); PgTypeId GetRepresentationalOrDefault(string dataTypeName) { diff --git a/test/Npgsql.Tests/CopyTests.cs b/test/Npgsql.Tests/CopyTests.cs index 6ca991a54c..d84ad7b53a 100644 --- a/test/Npgsql.Tests/CopyTests.cs +++ b/test/Npgsql.Tests/CopyTests.cs @@ -517,7 +517,7 @@ public async Task MixedOperations() Assert.Ignore("Multiplexing: fails"); using var conn = await OpenConnectionAsync(); - var reader = conn.BeginBinaryExport(""" + using var reader = conn.BeginBinaryExport(""" COPY (values ('foo', 1), ('bar', null), (null, 2)) TO STDOUT BINARY """); while(reader.StartRow() != -1) @@ -542,7 +542,7 @@ public async Task ReadMoreColumnsThanExist() Assert.Ignore("Multiplexing: fails"); using var conn = await OpenConnectionAsync(); - var reader = conn.BeginBinaryExport(""" + using var reader = conn.BeginBinaryExport(""" COPY (values ('foo', 1), ('bar', null), (null, 2)) TO STDOUT BINARY """); while(reader.StartRow() != -1) @@ -569,7 +569,7 @@ public async Task ReadZeroSizedColumns() Assert.Ignore("Multiplexing: fails"); using var conn = await OpenConnectionAsync(); - var reader = conn.BeginBinaryExport(""" + using var reader = conn.BeginBinaryExport(""" COPY (values (1, '', ''), (2, null, ''), (3, '', null)) TO STDOUT BINARY """); while(reader.StartRow() != -1) @@ -594,6 +594,42 @@ public async Task ReadZeroSizedColumns() } } + [Test] + public async Task ReadConverterResolverType() + { + if (IsMultiplexing) + Assert.Ignore("Multiplexing: fails"); + using var conn = await OpenConnectionAsync(); + + using (var reader = conn.BeginBinaryExport(""" + COPY (values (NOW()), (NULL)) TO STDOUT BINARY + """)) + { + while (reader.StartRow() != -1) + { + DateTime? col1 = null; + if (reader.IsNull) + reader.Skip(); + else + col1 = reader.Read(); + } + } + + using (var reader = conn.BeginBinaryExport(""" + COPY (values (NOW()), (NULL)) TO STDOUT BINARY + """)) + { + while (reader.StartRow() != -1) + { + DateTimeOffset? col1 = null; + if (reader.IsNull) + reader.Skip(); + else + col1 = reader.Read(); + } + } + } + [Test] public async Task StreamingRead() { From 7087812c0182df2177fc98d1e11084684038e9de Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Thu, 25 Jan 2024 16:29:39 +0300 Subject: [PATCH 370/761] Stop sending Host with SslStream if it's an IP address (#5547) Fixes #5543 --- src/Npgsql/Internal/NpgsqlConnector.cs | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index 2db090cd46..c3726180a1 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -868,6 +868,18 @@ internal async Task NegotiateEncryption(SslMode sslMode, NpgsqlTimeout timeout, certificateValidationCallback = SslVerifyFullValidation; } + var host = Host; + +#if !NET8_0_OR_GREATER + // If the host is a valid IP address - replace it with an empty string + // We do that because .NET uses targetHost argument to send SNI to the server + // RFC explicitly prohibits sending an IP address so some servers might fail + // This was already fixed for .NET 8 + // See #5543 for discussion + if (IPAddress.TryParse(host, out _)) + host = string.Empty; +#endif + timeout.CheckAndApply(this); try @@ -875,9 +887,9 @@ internal async Task NegotiateEncryption(SslMode sslMode, NpgsqlTimeout timeout, var sslStream = new SslStream(_stream, leaveInnerStreamOpen: false, certificateValidationCallback); if (async) - await sslStream.AuthenticateAsClientAsync(Host, clientCertificates, SslProtocols.None, checkCertificateRevocation).ConfigureAwait(false); + await sslStream.AuthenticateAsClientAsync(host, clientCertificates, SslProtocols.None, checkCertificateRevocation).ConfigureAwait(false); else - sslStream.AuthenticateAsClient(Host, clientCertificates, SslProtocols.None, checkCertificateRevocation); + sslStream.AuthenticateAsClient(host, clientCertificates, SslProtocols.None, checkCertificateRevocation); _stream = sslStream; } From 02b872632119050b305ddd9ef58a4a34d4cb0792 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 29 Jan 2024 23:24:30 +0200 Subject: [PATCH 371/761] Bump Microsoft.Data.SqlClient from 5.1.4 to 5.1.5 (#5548) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index bb54e88a9b..b12dc4ae42 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -43,7 +43,7 @@ - + From 43b3d0fc16515d2831bbdee7db808aa41302dcaa Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Wed, 31 Jan 2024 17:15:01 +0100 Subject: [PATCH 372/761] Tupled record array fix (#5551) Fixes #5518 --- .../TupledRecordTypeInfoResolverFactory.cs | 57 +++++-------------- test/Npgsql.Tests/Types/RecordTests.cs | 16 ++++-- 2 files changed, 24 insertions(+), 49 deletions(-) diff --git a/src/Npgsql/Internal/ResolverFactories/TupledRecordTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/TupledRecordTypeInfoResolverFactory.cs index 1265dfc432..189f84a868 100644 --- a/src/Npgsql/Internal/ResolverFactories/TupledRecordTypeInfoResolverFactory.cs +++ b/src/Npgsql/Internal/ResolverFactories/TupledRecordTypeInfoResolverFactory.cs @@ -15,37 +15,16 @@ sealed class TupledRecordTypeInfoResolverFactory : PgTypeInfoResolverFactory [RequiresUnreferencedCode("Tupled record resolver may perform reflection on trimmed tuple types.")] [RequiresDynamicCode("Tupled records need to construct a generic converter for a statically unknown (value)tuple type.")] - class Resolver : IPgTypeInfoResolver + class Resolver : DynamicTypeInfoResolver { - TypeInfoMappingCollection? _mappings; - protected TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new()); - - public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) - => Mappings.Find(type, dataTypeName, options); - - // Stand-in type, type match predicate does the actual work. - static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) + protected override DynamicMappingCollection? GetMappings(Type? type, DataTypeName dataTypeName, PgSerializerOptions options) { - mappings.AddType>(DataTypeNames.Record, Factory, - mapping => mapping with - { - MatchRequirement = MatchRequirement.DataTypeName, - TypeMatchPredicate = type => type is { IsConstructedGenericType: true, FullName: not null } - && type.FullName.StartsWith("System.Tuple", StringComparison.Ordinal) - }); - - mappings.AddStructType>(DataTypeNames.Record, Factory, - mapping => mapping with - { - MatchRequirement = MatchRequirement.DataTypeName, - TypeMatchPredicate = type => type is { IsConstructedGenericType: true, FullName: not null } - && type.FullName.StartsWith("System.ValueTuple", StringComparison.Ordinal) - }); - - return mappings; - } + if (!(dataTypeName == DataTypeNames.Record && type is { IsConstructedGenericType: true, FullName: not null } && ( + type.FullName.StartsWith("System.Tuple", StringComparison.Ordinal) + || type.FullName.StartsWith("System.ValueTuple", StringComparison.Ordinal)))) + return null; - static readonly TypeInfoFactory Factory = static (options, mapping, _) => + return CreateCollection().AddMapping(type, dataTypeName, (options, mapping, _) => { var constructors = mapping.Type.GetConstructors(); ConstructorInfo? constructor = null; @@ -72,7 +51,8 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) var converterType = typeof(RecordConverter<>).MakeGenericType(mapping.Type); var converter = (PgConverter)Activator.CreateInstance(converterType, options, factory)!; return mapping.CreateInfo(options, converter, supportsWriting: false); - }; + }); + } static Func CreateFactory(ConstructorInfo constructor, int constructorParameters) => array => { @@ -84,20 +64,11 @@ static Func CreateFactory(ConstructorInfo constructor, int const [RequiresUnreferencedCode("Tupled record resolver may perform reflection on trimmed tuple types.")] [RequiresDynamicCode("Tupled records need to construct a generic converter for a statically unknown (value)tuple type.")] - sealed class ArrayResolver : Resolver, IPgTypeInfoResolver + sealed class ArrayResolver : Resolver { - TypeInfoMappingCollection? _mappings; - new TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(base.Mappings)); - - public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) - => Mappings.Find(type, dataTypeName, options); - - static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) - { - mappings.AddArrayType>(DataTypeNames.Record, suppressObjectMapping: true); - mappings.AddStructArrayType>(DataTypeNames.Record, suppressObjectMapping: true); - - return mappings; - } + protected override DynamicMappingCollection? GetMappings(Type? type, DataTypeName dataTypeName, PgSerializerOptions options) + => type is not null && IsArrayLikeType(type, out var elementType) && IsArrayDataTypeName(dataTypeName, options, out var elementDataTypeName) + ? base.GetMappings(elementType, elementDataTypeName, options)?.AddArrayMapping(elementType, elementDataTypeName) + : null; } } diff --git a/test/Npgsql.Tests/Types/RecordTests.cs b/test/Npgsql.Tests/Types/RecordTests.cs index 268a7027aa..7aefe1e98d 100644 --- a/test/Npgsql.Tests/Types/RecordTests.cs +++ b/test/Npgsql.Tests/Types/RecordTests.cs @@ -45,10 +45,12 @@ public async Task Read_Record_as_ValueTuple() Assert.That(record.Item1, Is.EqualTo(1)); Assert.That(record.Item2, Is.EqualTo("foo")); - var array = (object[][])reader[1]; + var array = reader.GetFieldValue<(int, string)[]>(1); Assert.That(array.Length, Is.EqualTo(2)); - Assert.That(array[0][0], Is.EqualTo(1)); - Assert.That(array[1][0], Is.EqualTo(1)); + Assert.That(array[0].Item1, Is.EqualTo(1)); + Assert.That(array[0].Item2, Is.EqualTo("foo")); + Assert.That(array[1].Item1, Is.EqualTo(1)); + Assert.That(array[1].Item2, Is.EqualTo("foo")); } [Test] @@ -66,10 +68,12 @@ public async Task Read_Record_as_Tuple() Assert.That(record.Item1, Is.EqualTo(1)); Assert.That(record.Item2, Is.EqualTo("foo")); - var array = (object[][])reader[1]; + var array = reader.GetFieldValue[]>(1); Assert.That(array.Length, Is.EqualTo(2)); - Assert.That(array[0][0], Is.EqualTo(1)); - Assert.That(array[1][0], Is.EqualTo(1)); + Assert.That(array[0].Item1, Is.EqualTo(1)); + Assert.That(array[0].Item2, Is.EqualTo("foo")); + Assert.That(array[1].Item1, Is.EqualTo(1)); + Assert.That(array[1].Item2, Is.EqualTo("foo")); } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/1238")] From c51e6d0721abb345cc47fac54387232dc8d6f7d2 Mon Sep 17 00:00:00 2001 From: Xavier Fischer Date: Wed, 31 Jan 2024 18:02:59 +0100 Subject: [PATCH 373/761] Decimal scale was lost when dealing with decimal zero (#5544) Fixes #5552 --- .../Internal/Converters/Primitive/PgNumeric.cs | 4 ++-- test/Npgsql.Tests/Types/NumericTests.cs | 15 +++++++++++++++ 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/src/Npgsql/Internal/Converters/Primitive/PgNumeric.cs b/src/Npgsql/Internal/Converters/Primitive/PgNumeric.cs index fd01b6a4f1..19266cda1f 100644 --- a/src/Npgsql/Internal/Converters/Primitive/PgNumeric.cs +++ b/src/Npgsql/Internal/Converters/Primitive/PgNumeric.cs @@ -315,10 +315,11 @@ internal static decimal ToDecimal(short scale, short weight, ushort sign, Span MaxDecimalScale) throw new OverflowException("Numeric value does not fit in a System.Decimal"); + var scaleFactor = new decimal(1, 0, 0, false, (byte)(scale > 0 ? scale : 0)); if (digitCount == 0) return sign switch { - SignPositive or SignNegative => decimal.Zero, + SignPositive or SignNegative => decimal.Zero * scaleFactor, SignNan => throw new InvalidCastException("Numeric NaN not supported by System.Decimal"), SignPinf => throw new InvalidCastException("Numeric Infinity not supported by System.Decimal"), SignNinf => throw new InvalidCastException("Numeric -Infinity not supported by System.Decimal"), @@ -360,7 +361,6 @@ internal static decimal ToDecimal(short scale, short weight, ushort sign, Span 0 ? scale : 0)); result *= scaleFactor; return sign == SignNegative ? -result : result; } diff --git a/test/Npgsql.Tests/Types/NumericTests.cs b/test/Npgsql.Tests/Types/NumericTests.cs index b0cf9596f1..43dd846a8c 100644 --- a/test/Npgsql.Tests/Types/NumericTests.cs +++ b/test/Npgsql.Tests/Types/NumericTests.cs @@ -196,5 +196,20 @@ public async Task BigInteger_large() Assert.That(rdr.GetFieldValue(1), Is.EqualTo(num)); } + [Test] + public async Task NumericZero_WithScale() + { + // Scale should not be lost when dealing with 0 + using var conn = await OpenConnectionAsync(); + using var cmd = new NpgsqlCommand("SELECT @p", conn); + var param = new NpgsqlParameter("p", DbType.Decimal, 10, null, ParameterDirection.Input, false, 10, 2, DataRowVersion.Default, 0.00M); + cmd.Parameters.Add(param); + using var rdr = await cmd.ExecuteReaderAsync(); + await rdr.ReadAsync(); + var value = rdr.GetFieldValue(0); + + Assert.That(value.Scale, Is.EqualTo(2)); + } + public NumericTests(MultiplexingMode multiplexingMode) : base(multiplexingMode) {} } From df80534056f2216a39613a63ce42d3317416856f Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Wed, 31 Jan 2024 22:34:06 +0100 Subject: [PATCH 374/761] Fix nullable read creating an allocation (#5555) --- .../Internal/Converters/AsyncHelpers.cs | 19 ++++++++----------- .../Internal/Converters/CastingConverter.cs | 2 +- .../Internal/Converters/NullableConverter.cs | 2 +- 3 files changed, 10 insertions(+), 13 deletions(-) diff --git a/src/Npgsql/Internal/Converters/AsyncHelpers.cs b/src/Npgsql/Internal/Converters/AsyncHelpers.cs index ae824019b3..ccf8780ca0 100644 --- a/src/Npgsql/Internal/Converters/AsyncHelpers.cs +++ b/src/Npgsql/Internal/Converters/AsyncHelpers.cs @@ -60,32 +60,29 @@ public Continuation(object handle, delegate* conti public void Invoke(Task task, CompletionSource tcs) => _continuation(task, tcs); } - public static unsafe ValueTask ComposingReadAsync(this PgConverter instance, PgConverter effectiveConverter, PgReader reader, CancellationToken cancellationToken) + public static unsafe ValueTask ReadAsyncAsNullable(this PgConverter instance, PgConverter effectiveConverter, PgReader reader, CancellationToken cancellationToken) + where T : struct { - if (!typeof(T).IsValueType && !typeof(TEffective).IsValueType) - { - var value = effectiveConverter.ReadAsync(reader, cancellationToken); - return Unsafe.As, ValueTask>(ref value); - } // Easy if we have all the data. var task = effectiveConverter.ReadAsync(reader, cancellationToken); if (task.IsCompletedSuccessfully) - return new((T)(object)task.Result!); + return new(new T?(task.Result)); // Otherwise we do one additional allocation, this allow us to share state machine codegen for all Ts. - var source = new CompletionSource(); + var source = new CompletionSource(); AwaitTask(task.AsTask(), source, new(instance, &UnboxAndComplete)); return source.Task; static void UnboxAndComplete(Task task, CompletionSource completionSource) { + // Justification: unsafe exact cast used to reduce generic duplication cost. Debug.Assert(task is Task); - Debug.Assert(completionSource is CompletionSource); - Unsafe.As>(completionSource).SetResult(new ValueTask(Unsafe.As>(task)).Result); + Debug.Assert(completionSource is CompletionSource); + Unsafe.As>(completionSource).SetResult(new T?(new ValueTask(Unsafe.As>(task)).Result)); } } - public static unsafe ValueTask ComposingReadAsObjectAsync(this PgConverter instance, PgConverter effectiveConverter, PgReader reader, CancellationToken cancellationToken) + public static unsafe ValueTask ReadAsObjectAsyncAsT(this PgConverter instance, PgConverter effectiveConverter, PgReader reader, CancellationToken cancellationToken) { if (!typeof(T).IsValueType) { diff --git a/src/Npgsql/Internal/Converters/CastingConverter.cs b/src/Npgsql/Internal/Converters/CastingConverter.cs index fdf8b9a26e..3fbfc5059d 100644 --- a/src/Npgsql/Internal/Converters/CastingConverter.cs +++ b/src/Npgsql/Internal/Converters/CastingConverter.cs @@ -22,7 +22,7 @@ public override bool CanConvert(DataFormat format, out BufferRequirements buffer public override T Read(PgReader reader) => (T)_effectiveConverter.ReadAsObject(reader); public override ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) - => this.ComposingReadAsObjectAsync(_effectiveConverter, reader, cancellationToken); + => this.ReadAsObjectAsyncAsT(_effectiveConverter, reader, cancellationToken); public override Size GetSize(SizeContext context, T value, ref object? writeState) => _effectiveConverter.GetSizeAsObject(context, value!, ref writeState); diff --git a/src/Npgsql/Internal/Converters/NullableConverter.cs b/src/Npgsql/Internal/Converters/NullableConverter.cs index d16f289a43..292def140a 100644 --- a/src/Npgsql/Internal/Converters/NullableConverter.cs +++ b/src/Npgsql/Internal/Converters/NullableConverter.cs @@ -24,7 +24,7 @@ public override bool CanConvert(DataFormat format, out BufferRequirements buffer => _effectiveConverter.Read(reader); public override ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) - => this.ComposingReadAsync(_effectiveConverter, reader, cancellationToken); + => this.ReadAsyncAsNullable(_effectiveConverter, reader, cancellationToken); public override Size GetSize(SizeContext context, [DisallowNull]T? value, ref object? writeState) => _effectiveConverter.GetSize(context, value.GetValueOrDefault(), ref writeState); From fb2bceb40c3592aa7edecc9ad48fc9b894a13377 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Wed, 31 Jan 2024 22:37:37 +0100 Subject: [PATCH 375/761] Composite fixes (#5553) --- .../Composites/Metadata/CompositeFieldInfo.cs | 2 +- .../Internal/Converters/CompositeConverter.cs | 14 +++------- src/Npgsql/Internal/PgWriter.cs | 9 ++++--- .../Types/CompositeHandlerTests.Read.cs | 6 ++++- .../Types/CompositeHandlerTests.Write.cs | 26 ++++++++++++++----- 5 files changed, 34 insertions(+), 23 deletions(-) diff --git a/src/Npgsql/Internal/Composites/Metadata/CompositeFieldInfo.cs b/src/Npgsql/Internal/Composites/Metadata/CompositeFieldInfo.cs index 31a1b0e5f4..a6cc79e4e9 100644 --- a/src/Npgsql/Internal/Composites/Metadata/CompositeFieldInfo.cs +++ b/src/Npgsql/Internal/Composites/Metadata/CompositeFieldInfo.cs @@ -37,6 +37,7 @@ private protected CompositeFieldInfo(string name, PgTypeInfo typeInfo, PgTypeId return; } _binaryBufferRequirements = bufferRequirements; + Converter = resolution.Converter; } } @@ -48,7 +49,6 @@ public PgConverter GetReadInfo(out Size readRequirement) return Converter; } - // TODO this is effectively static work, we could optimize this away. if (!PgTypeInfo.TryBind(new Field(Name, PgTypeInfo.PgTypeId.GetValueOrDefault(), -1), DataFormat.Binary, out var converterInfo)) ThrowHelper.ThrowInvalidOperationException("Converter must support binary format to participate in composite types."); diff --git a/src/Npgsql/Internal/Converters/CompositeConverter.cs b/src/Npgsql/Internal/Converters/CompositeConverter.cs index 04bd0bca60..32eee1357c 100644 --- a/src/Npgsql/Internal/Converters/CompositeConverter.cs +++ b/src/Npgsql/Internal/Converters/CompositeConverter.cs @@ -28,11 +28,7 @@ public CompositeConverter(CompositeInfo composite) writeReq = writeReq.Combine(Size.CreateUpperBound(0)); } - req = req.Combine( - // If a read is Unknown (streaming) we can map it to zero as we just want a minimum buffered size. - readReq is { Kind: SizeKind.Unknown } ? Size.Zero : readReq, - // For writes Unknown means our size is dependent on the value so we can't ignore it. - writeReq); + req = req.Combine(readReq, writeReq); } // We have to put a limit on the requirements we report otherwise smaller buffer sizes won't work. @@ -40,15 +36,11 @@ public CompositeConverter(CompositeInfo composite) _bufferRequirements = req; + // Return unknown if we hit the limit. Size Limit(Size requirement) { const int maxByteCount = 1024; - return requirement switch - { - { Kind: SizeKind.UpperBound } => Size.CreateUpperBound(Math.Min(maxByteCount, requirement.Value)), - { Kind: SizeKind.Exact } => Size.Create(Math.Min(maxByteCount, requirement.Value)), - _ => Size.Unknown - }; + return requirement.GetValueOrDefault() > maxByteCount ? requirement.Combine(Size.Unknown) : requirement; } } diff --git a/src/Npgsql/Internal/PgWriter.cs b/src/Npgsql/Internal/PgWriter.cs index a0fa545718..3c5064386c 100644 --- a/src/Npgsql/Internal/PgWriter.cs +++ b/src/Npgsql/Internal/PgWriter.cs @@ -453,18 +453,19 @@ internal ValueTask Flush(bool async, CancellationToken cancellationToken = defau internal ValueTask BeginNestedWrite(bool async, Size bufferRequirement, int byteCount, object? state, CancellationToken cancellationToken) { Debug.Assert(bufferRequirement != -1); - if (ShouldFlush(bufferRequirement)) - return Core(async, bufferRequirement, byteCount, state, cancellationToken); + // ShouldFlush depends on the current size for upper bound requirements, so we must set it beforehand. _current = new() { Format = _current.Format, Size = byteCount, BufferRequirement = bufferRequirement, WriteState = state }; + if (ShouldFlush(bufferRequirement)) + return Core(async, cancellationToken); + return new(new NestedWriteScope()); [AsyncMethodBuilder(typeof(PoolingAsyncValueTaskMethodBuilder<>))] - async ValueTask Core(bool async, Size bufferRequirement, int byteCount, object? state, CancellationToken cancellationToken) + async ValueTask Core(bool async, CancellationToken cancellationToken) { await Flush(async, cancellationToken).ConfigureAwait(false); - _current = new() { Format = _current.Format, Size = byteCount, BufferRequirement = bufferRequirement, WriteState = state }; return new(); } } diff --git a/test/Npgsql.Tests/Types/CompositeHandlerTests.Read.cs b/test/Npgsql.Tests/Types/CompositeHandlerTests.Read.cs index bf9cb38241..2188569a49 100644 --- a/test/Npgsql.Tests/Types/CompositeHandlerTests.Read.cs +++ b/test/Npgsql.Tests/Types/CompositeHandlerTests.Read.cs @@ -19,11 +19,15 @@ async Task Read(T composite, Action, T> assert, string? schema = null { await using var dataSource = await OpenAndMapComposite(composite, schema, nameof(Read), out var name); await using var connection = await dataSource.OpenConnectionAsync(); - await using var command = new NpgsqlCommand($"SELECT ROW({composite.GetValues()})::{name}", connection); + + var literal = $"ROW({composite.GetValues()})::{name}"; + var arrayLiteral = $"ARRAY[{literal}]::{name}[]"; + await using var command = new NpgsqlCommand($"SELECT {literal}, {arrayLiteral}", connection); await using var reader = command.ExecuteReader(); await reader.ReadAsync(); assert(() => reader.GetFieldValue(0), composite); + assert(() => reader.GetFieldValue(1)[0], composite); } [Test] diff --git a/test/Npgsql.Tests/Types/CompositeHandlerTests.Write.cs b/test/Npgsql.Tests/Types/CompositeHandlerTests.Write.cs index a251cdd4ed..160b037a97 100644 --- a/test/Npgsql.Tests/Types/CompositeHandlerTests.Write.cs +++ b/test/Npgsql.Tests/Types/CompositeHandlerTests.Write.cs @@ -19,14 +19,28 @@ async Task Write(T composite, Action? assert = null, str { await using var dataSource = await OpenAndMapComposite(composite, schema, nameof(Write), out var _); await using var connection = await dataSource.OpenConnectionAsync(); - await using var command = new NpgsqlCommand("SELECT (@c).*", connection); + { + await using var command = new NpgsqlCommand("SELECT (@c).*", connection); + + command.Parameters.AddWithValue("c", composite); + await using var reader = await command.ExecuteReaderAsync(); + await reader.ReadAsync(); + + if (assert is not null) + assert(reader, composite); + } + + { + await using var command = new NpgsqlCommand("SELECT (@arrayc)[1].*", connection); + + command.Parameters.AddWithValue("arrayc", new[] { composite }); + await using var reader = await command.ExecuteReaderAsync(); + await reader.ReadAsync(); - command.Parameters.AddWithValue("c", composite); - await using var reader = await command.ExecuteReaderAsync(); - await reader.ReadAsync(); - if (assert is not null) - assert(reader, composite); + if (assert is not null) + assert(reader, composite); + } } [Test] From 5b998e83fc974c1aa7f3ee6a7474e0e4960b21cb Mon Sep 17 00:00:00 2001 From: Anton Date: Thu, 1 Feb 2024 02:38:23 +0500 Subject: [PATCH 376/761] Fix NpgsqlBox behaves incorrectly with negative coordinates (#5502) Fixes #5500 --- src/Npgsql/NpgsqlTypes/NpgsqlTypes.cs | 63 ++++++----------- test/Npgsql.Tests/Types/GeometricTypeTests.cs | 68 +++++++++++++++---- 2 files changed, 73 insertions(+), 58 deletions(-) diff --git a/src/Npgsql/NpgsqlTypes/NpgsqlTypes.cs b/src/Npgsql/NpgsqlTypes/NpgsqlTypes.cs index 912d3fc535..b16fe8ccea 100644 --- a/src/Npgsql/NpgsqlTypes/NpgsqlTypes.cs +++ b/src/Npgsql/NpgsqlTypes/NpgsqlTypes.cs @@ -128,67 +128,33 @@ public override bool Equals(object? obj) /// public struct NpgsqlBox : IEquatable { + NpgsqlPoint _upperRight; public NpgsqlPoint UpperRight { get => _upperRight; set { - if (value.X < _lowerLeft.X) - { - _upperRight.X = _lowerLeft.X; - _lowerLeft.X = value.X; - } - else - { - _upperRight.X = value.X; - } - - if (value.Y < _lowerLeft.Y) - { - _upperRight.Y = _lowerLeft.Y; - _lowerLeft.Y = value.Y; - } - else - { - _upperRight.Y = value.Y; - } + _upperRight = value; + NormalizeBox(); } } - private NpgsqlPoint _upperRight; - + NpgsqlPoint _lowerLeft; public NpgsqlPoint LowerLeft { get => _lowerLeft; set { - if (value.X > _upperRight.X) - { - _lowerLeft.X = _upperRight.X; - _upperRight.X = value.X; - } - else - { - _lowerLeft.X = value.X; - } - - if (value.Y > _upperRight.Y) - { - _lowerLeft.Y = _upperRight.Y; - _upperRight.Y = value.Y; - } - else - { - _lowerLeft.Y = value.Y; - } + _lowerLeft = value; + NormalizeBox(); } } - private NpgsqlPoint _lowerLeft; public NpgsqlBox(NpgsqlPoint upperRight, NpgsqlPoint lowerLeft) : this() { - UpperRight = upperRight; - LowerLeft = lowerLeft; + _upperRight = upperRight; + _lowerLeft = lowerLeft; + NormalizeBox(); } public NpgsqlBox(double top, double right, double bottom, double left) @@ -216,6 +182,17 @@ public override string ToString() public override int GetHashCode() => HashCode.Combine(Top, Right, Bottom, LowerLeft); + + // Swaps corners for isomorphic boxes, to mirror postgres behavior. + // See: https://github.com/postgres/postgres/blob/af2324fabf0020e464b0268be9ef03e8f46ed84b/src/backend/utils/adt/geo_ops.c#L435-L447 + void NormalizeBox() + { + if (_upperRight.X < _lowerLeft.X) + (_upperRight.X, _lowerLeft.X) = (_lowerLeft.X, _upperRight.X); + + if (_upperRight.Y < _lowerLeft.Y) + (_upperRight.Y, _lowerLeft.Y) = (_lowerLeft.Y, _upperRight.Y); + } } /// diff --git a/test/Npgsql.Tests/Types/GeometricTypeTests.cs b/test/Npgsql.Tests/Types/GeometricTypeTests.cs index 67cdebf4b2..c4d8d53b0e 100644 --- a/test/Npgsql.Tests/Types/GeometricTypeTests.cs +++ b/test/Npgsql.Tests/Types/GeometricTypeTests.cs @@ -34,36 +34,74 @@ await AssertType( NpgsqlDbType.Box, skipArrayCheck: true); // Uses semicolon instead of comma as separator + await AssertType( + new NpgsqlBox(top: -10, right: 0, bottom: -20, left: -10), + "(0,-10),(-10,-20)", + "box", + NpgsqlDbType.Box, + skipArrayCheck: true); // Uses semicolon instead of comma as separator + await AssertType( new NpgsqlBox(top: 1, right: 2, bottom: 3, left: 4), "(4,3),(2,1)", "box", NpgsqlDbType.Box, skipArrayCheck: true); // Uses semicolon instead of comma as separator + + var swapped = new NpgsqlBox(top: -20, right: -10, bottom: -10, left: 0); + + await AssertType( + swapped, + "(0,-10),(-10,-20)", + "box", + NpgsqlDbType.Box, + skipArrayCheck: true); // Uses semicolon instead of comma as separator + + await AssertType( + swapped with { UpperRight = new NpgsqlPoint(-20,-10) }, + "(-10,-10),(-20,-20)", + "box", + NpgsqlDbType.Box, + skipArrayCheck: true); // Uses semicolon instead of comma as separator + + await AssertType( + swapped with { LowerLeft = new NpgsqlPoint(10, 10) }, + "(10,10),(0,-10)", + "box", + NpgsqlDbType.Box, + skipArrayCheck: true); // Uses semicolon instead of comma as separator } [Test] public async Task Box_array() { - var boxarr = await AssertType( - new[] - { - new NpgsqlBox(top: 3, right: 4, bottom: 1, left: 2), - new NpgsqlBox(top: 5, right: 6, bottom: 3, left: 4), - }, - "{(4,3),(2,1);(6,5),(4,3)}", + var data = new[] + { + new NpgsqlBox(top: 3, right: 4, bottom: 1, left: 2), + new NpgsqlBox(top: 5, right: 6, bottom: 3, left: 4), + new NpgsqlBox(top: -10, right: 0, bottom: -20, left: -10) + }; + + await AssertType( + data, + "{(4,3),(2,1);(6,5),(4,3);(0,-10),(-10,-20)}", "box[]", - NpgsqlDbType.Box | NpgsqlDbType.Array); + NpgsqlDbType.Box | NpgsqlDbType.Array + ); + + var swappedData = new[] + { + new NpgsqlBox(top: 1, right: 2, bottom: 3, left: 4), + new NpgsqlBox(top: 3, right: 4, bottom: 5, left: 6), + new NpgsqlBox(top: -20, right: -10, bottom: -10, left: 0) + }; await AssertType( - new[] - { - new NpgsqlBox(top: 1, right: 2, bottom: 3, left: 4), - new NpgsqlBox(top: 3, right: 4, bottom: 5, left: 6) - }, - "{(4,3),(2,1);(6,5),(4,3)}", + swappedData, + "{(4,3),(2,1);(6,5),(4,3);(0,-10),(-10,-20)}", "box[]", - NpgsqlDbType.Box | NpgsqlDbType.Array); + NpgsqlDbType.Box | NpgsqlDbType.Array + ); } [Test] From 882265627b23532bb4aeda0dc35ed16f795eb7a4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=BDygimantas=20A?= <12134941+azygis@users.noreply.github.com> Date: Wed, 31 Jan 2024 23:45:59 +0200 Subject: [PATCH 377/761] Default to invariant culture when using static NpgsqlSnakeCaseNameTranslator.ConvertToSnakeCase (#5448) --- src/Npgsql/NameTranslation/NpgsqlSnakeCaseNameTranslator.cs | 4 ++-- src/Npgsql/PublicAPI.Shipped.txt | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/Npgsql/NameTranslation/NpgsqlSnakeCaseNameTranslator.cs b/src/Npgsql/NameTranslation/NpgsqlSnakeCaseNameTranslator.cs index f2239890ed..760ddb1e5a 100644 --- a/src/Npgsql/NameTranslation/NpgsqlSnakeCaseNameTranslator.cs +++ b/src/Npgsql/NameTranslation/NpgsqlSnakeCaseNameTranslator.cs @@ -81,7 +81,7 @@ IEnumerable LegacyModeMap(string clrName) /// This will be used when converting names to lower case. /// If then will be used. /// - public static string ConvertToSnakeCase(string name, CultureInfo culture) + public static string ConvertToSnakeCase(string name, CultureInfo? culture = null) { if (string.IsNullOrEmpty(name)) return name; @@ -115,7 +115,7 @@ public static string ConvertToSnakeCase(string name, CultureInfo culture) builder.Append('_'); } - currentChar = char.ToLower(currentChar, culture); + currentChar = char.ToLower(currentChar, culture ?? CultureInfo.InvariantCulture); break; case UnicodeCategory.LowercaseLetter: diff --git a/src/Npgsql/PublicAPI.Shipped.txt b/src/Npgsql/PublicAPI.Shipped.txt index 220210350c..3ec604ddc0 100644 --- a/src/Npgsql/PublicAPI.Shipped.txt +++ b/src/Npgsql/PublicAPI.Shipped.txt @@ -1868,7 +1868,7 @@ override sealed Npgsql.NpgsqlParameter.SourceColumnNullMapping.get -> bool override sealed Npgsql.NpgsqlParameter.SourceColumnNullMapping.set -> void override sealed Npgsql.NpgsqlParameter.SourceVersion.get -> System.Data.DataRowVersion override sealed Npgsql.NpgsqlParameter.SourceVersion.set -> void -static Npgsql.NameTranslation.NpgsqlSnakeCaseNameTranslator.ConvertToSnakeCase(string! name, System.Globalization.CultureInfo! culture) -> string! +static Npgsql.NameTranslation.NpgsqlSnakeCaseNameTranslator.ConvertToSnakeCase(string! name, System.Globalization.CultureInfo? culture = null) -> string! static Npgsql.NpgsqlCommandBuilder.DeriveParameters(Npgsql.NpgsqlCommand! command) -> void static Npgsql.NpgsqlConnection.ClearAllPools() -> void static Npgsql.NpgsqlConnection.ClearPool(Npgsql.NpgsqlConnection! connection) -> void From 7f8c25737ce57b65f1d59aba852b1651b2f5da6c Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Mon, 5 Feb 2024 19:55:20 +0100 Subject: [PATCH 378/761] Bring UnsupportedTypeInfoResolver up-to-date with current mechanics (#5556) Fixes #5550 --- ...olverChain.cs => ChainTypeInfoResolver.cs} | 4 +- src/Npgsql/Internal/PgSerializerOptions.cs | 15 +++- .../PgTypeInfoResolverChainBuilder.cs | 48 ++++++++++-- src/Npgsql/Internal/Postgres/DataTypeName.cs | 3 +- .../AdoTypeInfoResolverFactory.Multirange.cs | 64 --------------- .../AdoTypeInfoResolverFactory.Range.cs | 64 --------------- .../FullTextSearchTypeInfoResolverFactory.cs | 8 +- .../JsonDynamicTypeInfoResolverFactory.cs | 12 +++ .../LTreeTypeInfoResolverFactory.cs | 4 +- .../RecordTypeInfoResolverFactory.cs | 4 +- .../UnmappedTypeInfoResolverFactory.cs | 62 ++++++++------- .../UnsupportedTypeInfoResolver.cs | 78 ++++++++++--------- src/Npgsql/NpgsqlDataSource.cs | 13 ++-- src/Npgsql/NpgsqlDataSourceConfiguration.cs | 2 +- .../Properties/NpgsqlStrings.Designer.cs | 10 +-- src/Npgsql/Properties/NpgsqlStrings.resx | 7 +- src/Npgsql/Shims/MemoryExtensions.cs | 4 +- src/Npgsql/TypeMapping/GlobalTypeMapper.cs | 6 +- test/Npgsql.Tests/Support/TestBase.cs | 44 +++++++++-- test/Npgsql.Tests/Types/MultirangeTests.cs | 5 ++ test/Npgsql.Tests/Types/RangeTests.cs | 4 + 21 files changed, 221 insertions(+), 240 deletions(-) rename src/Npgsql/Internal/{TypeInfoResolverChain.cs => ChainTypeInfoResolver.cs} (82%) diff --git a/src/Npgsql/Internal/TypeInfoResolverChain.cs b/src/Npgsql/Internal/ChainTypeInfoResolver.cs similarity index 82% rename from src/Npgsql/Internal/TypeInfoResolverChain.cs rename to src/Npgsql/Internal/ChainTypeInfoResolver.cs index 36dd8db53c..18c39d80b6 100644 --- a/src/Npgsql/Internal/TypeInfoResolverChain.cs +++ b/src/Npgsql/Internal/ChainTypeInfoResolver.cs @@ -4,11 +4,11 @@ namespace Npgsql.Internal; -sealed class TypeInfoResolverChain : IPgTypeInfoResolver +sealed class ChainTypeInfoResolver : IPgTypeInfoResolver { readonly IPgTypeInfoResolver[] _resolvers; - public TypeInfoResolverChain(IEnumerable resolvers) + public ChainTypeInfoResolver(IEnumerable resolvers) => _resolvers = new List(resolvers).ToArray(); public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) diff --git a/src/Npgsql/Internal/PgSerializerOptions.cs b/src/Npgsql/Internal/PgSerializerOptions.cs index 6edb92906f..0057e1f455 100644 --- a/src/Npgsql/Internal/PgSerializerOptions.cs +++ b/src/Npgsql/Internal/PgSerializerOptions.cs @@ -15,11 +15,14 @@ public sealed class PgSerializerOptions [field: ThreadStatic] internal static bool IntrospectionCaller { get; set; } + readonly PgTypeInfoResolverChain _resolverChain; readonly Func? _timeZoneProvider; + IPgTypeInfoResolver? _typeInfoResolver; object? _typeInfoCache; - internal PgSerializerOptions(NpgsqlDatabaseInfo databaseInfo, Func? timeZoneProvider = null) + internal PgSerializerOptions(NpgsqlDatabaseInfo databaseInfo, PgTypeInfoResolverChain? resolverChain = null, Func? timeZoneProvider = null) { + _resolverChain = resolverChain ?? new(); _timeZoneProvider = timeZoneProvider; DatabaseInfo = databaseInfo; UnknownPgType = databaseInfo.GetPostgresType("unknown"); @@ -42,7 +45,11 @@ internal bool IntrospectionMode public string TimeZone => _timeZoneProvider?.Invoke() ?? throw new NotSupportedException("TimeZone was not configured."); public Encoding TextEncoding { get; init; } = Encoding.UTF8; - public required IPgTypeInfoResolver TypeInfoResolver { get; init; } + public IPgTypeInfoResolver TypeInfoResolver + { + get => _typeInfoResolver ??= new ChainTypeInfoResolver(_resolverChain); + internal init => _typeInfoResolver = value; + } public bool EnableDateTimeInfinityConversions { get; init; } = true; public ArrayNullabilityMode ArrayNullabilityMode { get; init; } = ArrayNullabilityMode.Never; @@ -54,6 +61,10 @@ internal bool IntrospectionMode typeof(char), typeof(char?) }; + internal bool RangesEnabled => _resolverChain.RangesEnabled; + internal bool MultirangesEnabled => _resolverChain.MultirangesEnabled; + internal bool ArraysEnabled => _resolverChain.ArraysEnabled; + // We don't verify the kind of pgTypeId we get, it'll throw if it's incorrect. // It's up to the caller to call GetCanonicalTypeId if they want to use an oid instead of a DataTypeName. // This also makes it easier to realize it should be a cached value if infos for different CLR types are requested for the same diff --git a/src/Npgsql/Internal/PgTypeInfoResolverChainBuilder.cs b/src/Npgsql/Internal/PgTypeInfoResolverChainBuilder.cs index 2f59344d3e..69fc5ab8fa 100644 --- a/src/Npgsql/Internal/PgTypeInfoResolverChainBuilder.cs +++ b/src/Npgsql/Internal/PgTypeInfoResolverChainBuilder.cs @@ -1,4 +1,5 @@ using System; +using System.Collections; using System.Collections.Generic; namespace Npgsql.Internal; @@ -10,7 +11,7 @@ struct PgTypeInfoResolverChainBuilder Action>? _addMultirangeResolvers; RangeArrayHandler _rangeArrayHandler = RangeArrayHandler.Instance; MultirangeArrayHandler _multirangeArrayHandler = MultirangeArrayHandler.Instance; - Action>? _arrayResolvers; + Action>? _addArrayResolvers; public PgTypeInfoResolverChainBuilder() { @@ -66,7 +67,7 @@ static void AddResolvers(PgTypeInfoResolverChainBuilder instance, List resolvers) { @@ -86,7 +87,7 @@ static void AddResolvers(PgTypeInfoResolverChainBuilder instance, List Build(Action>? configure = null) + public PgTypeInfoResolverChain Build(Action>? configure = null) { var resolvers = new List(); foreach (var factory in _factories) @@ -94,9 +95,14 @@ public IEnumerable Build(Action>? var instance = this; _addRangeResolvers?.Invoke(instance, resolvers); _addMultirangeResolvers?.Invoke(instance, resolvers); - _arrayResolvers?.Invoke(instance, resolvers); + _addArrayResolvers?.Invoke(instance, resolvers); configure?.Invoke(resolvers); - return resolvers; + return new( + resolvers, + rangesEnabled: _addRangeResolvers is not null, + multirangesEnabled: _addMultirangeResolvers is not null, + arraysEnabled: _addArrayResolvers is not null + ); } class RangeArrayHandler @@ -127,3 +133,35 @@ sealed class MultirangeArrayHandlerImpl : MultirangeArrayHandler public override IPgTypeInfoResolver? CreateMultirangeArrayResolver(PgTypeInfoResolverFactory factory) => factory.CreateMultirangeArrayResolver(); } } + +readonly struct PgTypeInfoResolverChain : IEnumerable +{ + [Flags] + enum EnabledFlags + { + None = 0, + Ranges = 1, + Multiranges = 2, + Arrays = 4 + } + + readonly EnabledFlags _enabled; + readonly List _resolvers; + + public PgTypeInfoResolverChain(List resolvers, bool rangesEnabled, bool multirangesEnabled, bool arraysEnabled) + { + _enabled = rangesEnabled ? EnabledFlags.Ranges | _enabled : _enabled; + _enabled = multirangesEnabled ? EnabledFlags.Multiranges | _enabled : _enabled; + _enabled = arraysEnabled ? EnabledFlags.Arrays | _enabled : _enabled; + _resolvers = resolvers; + } + + public bool RangesEnabled => _enabled.HasFlag(EnabledFlags.Ranges); + public bool MultirangesEnabled => _enabled.HasFlag(EnabledFlags.Multiranges); + public bool ArraysEnabled => _enabled.HasFlag(EnabledFlags.Arrays); + + public IEnumerator GetEnumerator() + => _resolvers?.GetEnumerator() ?? (IEnumerator)Array.Empty().GetEnumerator(); + IEnumerator IEnumerable.GetEnumerator() + => _resolvers?.GetEnumerator() ?? Array.Empty().GetEnumerator(); +} diff --git a/src/Npgsql/Internal/Postgres/DataTypeName.cs b/src/Npgsql/Internal/Postgres/DataTypeName.cs index c95ea7684b..1589e2da91 100644 --- a/src/Npgsql/Internal/Postgres/DataTypeName.cs +++ b/src/Npgsql/Internal/Postgres/DataTypeName.cs @@ -56,8 +56,9 @@ internal static DataTypeName ValidatedName(string fullyQualifiedDataTypeName) public string UnqualifiedDisplayName => ToDisplayName(UnqualifiedNameSpan); + internal ReadOnlySpan SchemaSpan => Value.AsSpan(0, _value.IndexOf('.')); public string Schema => Value.Substring(0, _value.IndexOf('.')); - internal ReadOnlySpan UnqualifiedNameSpan => Value.AsSpan().Slice(_value.IndexOf('.') + 1); + internal ReadOnlySpan UnqualifiedNameSpan => Value.AsSpan(_value.IndexOf('.') + 1); public string UnqualifiedName => Value.Substring(_value.IndexOf('.') + 1); public string Value => _value is null ? ThrowDefaultException() : _value; diff --git a/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.Multirange.cs b/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.Multirange.cs index ada1c9a3b5..873e6b9874 100644 --- a/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.Multirange.cs +++ b/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.Multirange.cs @@ -2,8 +2,6 @@ using System.Collections.Generic; using Npgsql.Internal.Converters; using Npgsql.Internal.Postgres; -using Npgsql.PostgresTypes; -using Npgsql.Properties; using Npgsql.Util; using NpgsqlTypes; using static Npgsql.Internal.PgConverterFactory; @@ -15,68 +13,6 @@ sealed partial class AdoTypeInfoResolverFactory public override IPgTypeInfoResolver CreateMultirangeResolver() => new MultirangeResolver(); public override IPgTypeInfoResolver CreateMultirangeArrayResolver() => new MultirangeArrayResolver(); - public static void ThrowIfMultirangeUnsupported(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) - { - var kind = CheckMultirangeUnsupported(type, dataTypeName, options); - switch (kind) - { - case PostgresTypeKind.Multirange when kind.Value.HasFlag(PostgresTypeKind.Array): - throw new NotSupportedException( - string.Format(NpgsqlStrings.MultirangeArraysNotEnabled, nameof(NpgsqlSlimDataSourceBuilder.EnableArrays), typeof(TBuilder).Name)); - case PostgresTypeKind.Multirange: - throw new NotSupportedException( - string.Format(NpgsqlStrings.MultirangesNotEnabled, nameof(NpgsqlSlimDataSourceBuilder.EnableMultiranges), typeof(TBuilder).Name)); - default: - return; - } - } - - public static PostgresTypeKind? CheckMultirangeUnsupported(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) - { - // Only trigger on well known data type names. - var npgsqlDbType = dataTypeName?.ToNpgsqlDbType(); - if (type != typeof(object)) - { - if (npgsqlDbType?.HasFlag(NpgsqlDbType.Multirange) != true) - return null; - - return dataTypeName?.IsArray == true - ? PostgresTypeKind.Array | PostgresTypeKind.Multirange - : PostgresTypeKind.Multirange; - } - - if (type == typeof(object)) - return null; - - if (!TypeInfoMappingCollection.IsArrayLikeType(type, out var elementType)) - return null; - - type = elementType; - - if (type is { IsConstructedGenericType: true } && type.GetGenericTypeDefinition() == typeof(Nullable<>)) - type = type.GetGenericArguments()[0]; - - if (type is { IsConstructedGenericType: true } && type.GetGenericTypeDefinition() == typeof(NpgsqlRange<>)) - { - type = type.GetGenericArguments()[0]; - var matchingArguments = - new[] - { - typeof(int), typeof(long), typeof(decimal), typeof(DateTime), typeof(DateOnly) - }; - - // If we don't know more than the clr type, default to a Multirange kind over Array as they share the same types. - foreach (var argument in matchingArguments) - if (argument == type) - return PostgresTypeKind.Multirange; - - if (type.AssemblyQualifiedName == "System.Numerics.BigInteger,System.Runtime.Numerics") - return PostgresTypeKind.Multirange; - } - - return null; - } - class MultirangeResolver : IPgTypeInfoResolver { TypeInfoMappingCollection? _mappings; diff --git a/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.Range.cs b/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.Range.cs index ae4d1a294f..54ca555cdd 100644 --- a/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.Range.cs +++ b/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.Range.cs @@ -2,8 +2,6 @@ using System.Numerics; using Npgsql.Internal.Converters; using Npgsql.Internal.Postgres; -using Npgsql.PostgresTypes; -using Npgsql.Properties; using Npgsql.Util; using NpgsqlTypes; using static Npgsql.Internal.PgConverterFactory; @@ -15,68 +13,6 @@ sealed partial class AdoTypeInfoResolverFactory public override IPgTypeInfoResolver CreateRangeResolver() => new RangeResolver(); public override IPgTypeInfoResolver CreateRangeArrayResolver() => new RangeArrayResolver(); - public static void ThrowIfRangeUnsupported(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) - { - var kind = CheckRangeUnsupported(type, dataTypeName, options); - switch (kind) - { - case PostgresTypeKind.Range when kind.Value.HasFlag(PostgresTypeKind.Array): - throw new NotSupportedException( - string.Format(NpgsqlStrings.RangeArraysNotEnabled, nameof(NpgsqlSlimDataSourceBuilder.EnableArrays), typeof(TBuilder).Name)); - case PostgresTypeKind.Range: - throw new NotSupportedException( - string.Format(NpgsqlStrings.RangesNotEnabled, nameof(NpgsqlSlimDataSourceBuilder.EnableRanges), typeof(TBuilder).Name)); - default: - return; - } - } - - public static PostgresTypeKind? CheckRangeUnsupported(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) - { - // Only trigger on well known data type names. - var npgsqlDbType = dataTypeName?.ToNpgsqlDbType(); - if (type != typeof(object)) - { - if (npgsqlDbType?.HasFlag(NpgsqlDbType.Range) != true && npgsqlDbType?.HasFlag(NpgsqlDbType.Multirange) != true) - return null; - - if (npgsqlDbType.Value.HasFlag(NpgsqlDbType.Range)) - return dataTypeName?.IsArray == true - ? PostgresTypeKind.Array | PostgresTypeKind.Range - : PostgresTypeKind.Range; - - return dataTypeName?.IsArray == true - ? PostgresTypeKind.Array | PostgresTypeKind.Multirange - : PostgresTypeKind.Multirange; - } - - if (type == typeof(object)) - return null; - - if (type is { IsConstructedGenericType: true } && type.GetGenericTypeDefinition() == typeof(Nullable<>)) - type = type.GetGenericArguments()[0]; - - if (type is { IsConstructedGenericType: true } && type.GetGenericTypeDefinition() == typeof(NpgsqlRange<>)) - { - type = type.GetGenericArguments()[0]; - var matchingArguments = - new[] - { - typeof(int), typeof(long), typeof(decimal), typeof(DateTime), typeof(DateOnly) - }; - - // If we don't know more than the clr type, default to a Multirange kind over Array as they share the same types. - foreach (var argument in matchingArguments) - if (argument == type) - return PostgresTypeKind.Range; - - if (type.AssemblyQualifiedName == "System.Numerics.BigInteger,System.Runtime.Numerics") - return PostgresTypeKind.Range; - } - - return null; - } - class RangeResolver : IPgTypeInfoResolver { TypeInfoMappingCollection? _mappings; diff --git a/src/Npgsql/Internal/ResolverFactories/FullTextSearchTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/FullTextSearchTypeInfoResolverFactory.cs index 38358ea430..272824ad2d 100644 --- a/src/Npgsql/Internal/ResolverFactories/FullTextSearchTypeInfoResolverFactory.cs +++ b/src/Npgsql/Internal/ResolverFactories/FullTextSearchTypeInfoResolverFactory.cs @@ -11,9 +11,9 @@ sealed class FullTextSearchTypeInfoResolverFactory : PgTypeInfoResolverFactory public override IPgTypeInfoResolver CreateResolver() => new Resolver(); public override IPgTypeInfoResolver CreateArrayResolver() => new ArrayResolver(); - public static void CheckUnsupported(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + public static void ThrowIfUnsupported(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) { - if (type != typeof(object) && (dataTypeName == DataTypeNames.TsQuery || dataTypeName == DataTypeNames.TsVector)) + if (dataTypeName is { SchemaSpan: "pg_catalog", UnqualifiedNameSpan: "tsquery" or "_tsquery" or "tsvector" or "_tsvector" }) throw new NotSupportedException( string.Format(NpgsqlStrings.FullTextSearchNotEnabled, nameof(NpgsqlSlimDataSourceBuilder.EnableFullTextSearch), typeof(TBuilder).Name)); @@ -23,8 +23,8 @@ public static void CheckUnsupported(Type? type, DataTypeName? dataType if (TypeInfoMappingCollection.IsArrayLikeType(type, out var elementType)) type = elementType; - if (type is { IsConstructedGenericType: true } && type.GetGenericTypeDefinition() == typeof(Nullable<>)) - type = type.GetGenericArguments()[0]; + if (Nullable.GetUnderlyingType(type) is { } underlyingType) + type = underlyingType; if (type == typeof(NpgsqlTsVector) || typeof(NpgsqlTsQuery).IsAssignableFrom(type)) throw new NotSupportedException( diff --git a/src/Npgsql/Internal/ResolverFactories/JsonDynamicTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/JsonDynamicTypeInfoResolverFactory.cs index b354f3852c..a5ec6da2ed 100644 --- a/src/Npgsql/Internal/ResolverFactories/JsonDynamicTypeInfoResolverFactory.cs +++ b/src/Npgsql/Internal/ResolverFactories/JsonDynamicTypeInfoResolverFactory.cs @@ -6,6 +6,7 @@ using System.Text.Json.Serialization.Metadata; using Npgsql.Internal.Converters; using Npgsql.Internal.Postgres; +using Npgsql.Properties; namespace Npgsql.Internal.ResolverFactories; @@ -27,6 +28,17 @@ public JsonDynamicTypeInfoResolverFactory(Type[]? jsonbClrTypes = null, Type[]? public override IPgTypeInfoResolver CreateResolver() => new Resolver(_jsonbClrTypes, _jsonClrTypes, _serializerOptions); public override IPgTypeInfoResolver CreateArrayResolver() => new ArrayResolver(_jsonbClrTypes, _jsonClrTypes, _serializerOptions); + public static void ThrowIfUnsupported(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + { + if (dataTypeName is { SchemaSpan: "pg_catalog", UnqualifiedNameSpan: "json" or "_json" or "jsonb" or "_jsonb" }) + throw new NotSupportedException( + string.Format( + NpgsqlStrings.DynamicJsonNotEnabled, + type is null || type == typeof(object) ? "" : type.Name, + nameof(NpgsqlSlimDataSourceBuilder.EnableDynamicJson), + typeof(TBuilder).Name)); + } + [RequiresUnreferencedCode("Json serializer may perform reflection on trimmed types.")] [RequiresDynamicCode("Serializing arbitrary types to json can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] class Resolver : DynamicTypeInfoResolver, IPgTypeInfoResolver diff --git a/src/Npgsql/Internal/ResolverFactories/LTreeTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/LTreeTypeInfoResolverFactory.cs index 7cb2f75a6a..720d8ee78d 100644 --- a/src/Npgsql/Internal/ResolverFactories/LTreeTypeInfoResolverFactory.cs +++ b/src/Npgsql/Internal/ResolverFactories/LTreeTypeInfoResolverFactory.cs @@ -10,9 +10,9 @@ sealed class LTreeTypeInfoResolverFactory : PgTypeInfoResolverFactory public override IPgTypeInfoResolver CreateResolver() => new Resolver(); public override IPgTypeInfoResolver CreateArrayResolver() => new ArrayResolver(); - public static void CheckUnsupported(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + public static void ThrowIfUnsupported(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) { - if (type != typeof(object) && dataTypeName is { UnqualifiedName: "ltree" or "lquery" or "ltxtquery" }) + if (dataTypeName is { UnqualifiedNameSpan: "ltree" or "_ltree" or "lquery" or "_lquery" or "ltxtquery" or "_ltxtquery" }) throw new NotSupportedException( string.Format(NpgsqlStrings.LTreeNotEnabled, nameof(NpgsqlSlimDataSourceBuilder.EnableLTree), typeof(TBuilder).Name)); diff --git a/src/Npgsql/Internal/ResolverFactories/RecordTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/RecordTypeInfoResolverFactory.cs index 7a26e168b9..eb7de18a1f 100644 --- a/src/Npgsql/Internal/ResolverFactories/RecordTypeInfoResolverFactory.cs +++ b/src/Npgsql/Internal/ResolverFactories/RecordTypeInfoResolverFactory.cs @@ -10,9 +10,9 @@ sealed class RecordTypeInfoResolverFactory : PgTypeInfoResolverFactory public override IPgTypeInfoResolver CreateResolver() => new Resolver(); public override IPgTypeInfoResolver CreateArrayResolver() => new ArrayResolver(); - public static void CheckUnsupported(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + public static void ThrowIfUnsupported(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) { - if (type != typeof(object) && dataTypeName == DataTypeNames.Record) + if (dataTypeName is { SchemaSpan: "pg_catalog", UnqualifiedNameSpan: "record" or "_record" }) { throw new NotSupportedException( string.Format( diff --git a/src/Npgsql/Internal/ResolverFactories/UnmappedTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/UnmappedTypeInfoResolverFactory.cs index 1fc961e556..a04c3cc111 100644 --- a/src/Npgsql/Internal/ResolverFactories/UnmappedTypeInfoResolverFactory.cs +++ b/src/Npgsql/Internal/ResolverFactories/UnmappedTypeInfoResolverFactory.cs @@ -11,7 +11,7 @@ namespace Npgsql.Internal.ResolverFactories; [RequiresUnreferencedCode("The use of unmapped enums, ranges or multiranges requires reflection usage which is incompatible with trimming.")] [RequiresDynamicCode("The use of unmapped enums, ranges or multiranges requires dynamic code usage which is incompatible with NativeAOT.")] -sealed partial class UnmappedTypeInfoResolverFactory : PgTypeInfoResolverFactory +sealed class UnmappedTypeInfoResolverFactory : PgTypeInfoResolverFactory { public override IPgTypeInfoResolver CreateResolver() => new EnumResolver(); public override IPgTypeInfoResolver CreateArrayResolver() => new EnumArrayResolver(); @@ -69,9 +69,9 @@ class RangeResolver : DynamicTypeInfoResolver protected override DynamicMappingCollection? GetMappings(Type? type, DataTypeName dataTypeName, PgSerializerOptions options) { var matchedType = type; - if (type is not null && !IsTypeOrNullableOfType(type, + if ((type is not null && type != typeof(object) && !IsTypeOrNullableOfType(type, static type => type.IsConstructedGenericType && type.GetGenericTypeDefinition() == typeof(NpgsqlRange<>), - out matchedType) + out matchedType)) || options.DatabaseInfo.GetPostgresType(dataTypeName) is not PostgresRangeType rangeType) return null; @@ -79,7 +79,7 @@ class RangeResolver : DynamicTypeInfoResolver matchedType is null ? options.GetDefaultTypeInfo(rangeType.Subtype) // Input matchedType here as we don't want an NpgsqlRange over Nullable (it has its own nullability tracking, for better or worse) - : options.GetTypeInfo(matchedType.GetGenericArguments()[0], rangeType.Subtype); + : options.GetTypeInfo(matchedType == typeof(object) ? matchedType : matchedType.GetGenericArguments()[0], rangeType.Subtype); // We have no generic RangeConverterResolver so we would not know how to compose a range mapping for such infos. // See https://github.com/npgsql/npgsql/issues/5268 @@ -88,14 +88,18 @@ matchedType is null subInfo = subInfo.ToNonBoxing(); - matchedType ??= typeof(NpgsqlRange<>).MakeGenericType(subInfo.Type); - - return CreateCollection().AddMapping(matchedType, dataTypeName, - (options, mapping, _) => mapping.CreateInfo(options, - (PgConverter)Activator.CreateInstance(typeof(RangeConverter<>).MakeGenericType(subInfo.Type), - subInfo.GetResolution().Converter)!, - preferredFormat: subInfo.PreferredFormat, supportsWriting: subInfo.SupportsWriting), - mapping => mapping with { MatchRequirement = MatchRequirement.Single }); + var converterType = typeof(NpgsqlRange<>).MakeGenericType(subInfo.Type); + + return CreateCollection().AddMapping(matchedType ?? converterType, dataTypeName, + (options, mapping, _) => + new PgTypeInfo( + options, + (PgConverter)Activator.CreateInstance(typeof(RangeConverter<>).MakeGenericType(subInfo.Type), + subInfo.GetResolution().Converter)!, + new DataTypeName(mapping.DataTypeName), + unboxedType: matchedType is not null && matchedType != converterType ? converterType : null + ) { PreferredFormat = subInfo.PreferredFormat, SupportsWriting = subInfo.SupportsWriting }, + mapping => mapping with { MatchRequirement = MatchRequirement.DataTypeName }); } } @@ -106,8 +110,8 @@ sealed class RangeArrayResolver : RangeResolver protected override DynamicMappingCollection? GetMappings(Type? type, DataTypeName dataTypeName, PgSerializerOptions options) { Type? elementType = null; - if (!((type is null || IsArrayLikeType(type, out elementType)) && - IsArrayDataTypeName(dataTypeName, options, out var elementDataTypeName))) + if ((type is not null && type != typeof(object) && !IsArrayLikeType(type, out elementType)) + || !IsArrayDataTypeName(dataTypeName, options, out var elementDataTypeName)) return null; var mappings = base.GetMappings(elementType, elementDataTypeName, options); @@ -123,16 +127,16 @@ class MultirangeResolver : DynamicTypeInfoResolver protected override DynamicMappingCollection? GetMappings(Type? type, DataTypeName dataTypeName, PgSerializerOptions options) { Type? elementType = null; - if (type is not null && !IsArrayLikeType(type, out elementType) + if ((type is not null && type != typeof(object) && !IsArrayLikeType(type, out elementType)) || elementType is not null && !IsTypeOrNullableOfType(elementType, static type => type.IsConstructedGenericType && type.GetGenericTypeDefinition() == typeof(NpgsqlRange<>), out _) || options.DatabaseInfo.GetPostgresType(dataTypeName) is not PostgresMultirangeType multirangeType) return null; var subInfo = - elementType is null + type is null ? options.GetDefaultTypeInfo(multirangeType.Subrange) - : options.GetTypeInfo(elementType, multirangeType.Subrange); + : options.GetTypeInfo(elementType ?? typeof(object), multirangeType.Subrange); // We have no generic MultirangeConverterResolver so we would not know how to compose a range mapping for such infos. // See https://github.com/npgsql/npgsql/issues/5268 @@ -141,13 +145,18 @@ elementType is null subInfo = subInfo.ToNonBoxing(); - type ??= subInfo.Type.MakeArrayType(); - - return CreateCollection().AddMapping(type, dataTypeName, - (options, mapping, _) => mapping.CreateInfo(options, - (PgConverter)Activator.CreateInstance(typeof(MultirangeConverter<,>).MakeGenericType(type, subInfo.Type), subInfo.GetResolution().Converter)!, - preferredFormat: subInfo.PreferredFormat, supportsWriting: subInfo.SupportsWriting), - mapping => mapping with { MatchRequirement = MatchRequirement.Single }); + var converterType = subInfo.Type.MakeArrayType(); + + return CreateCollection().AddMapping(type ?? converterType, dataTypeName, + (options, mapping, _) => + new PgTypeInfo( + options, + (PgConverter)Activator.CreateInstance(typeof(MultirangeConverter<,>).MakeGenericType(converterType, subInfo.Type), + subInfo.GetResolution().Converter)!, + new DataTypeName(mapping.DataTypeName), + unboxedType: type is not null && type != converterType ? converterType : null + ) { PreferredFormat = subInfo.PreferredFormat, SupportsWriting = subInfo.SupportsWriting }, + mapping => mapping with { MatchRequirement = MatchRequirement.DataTypeName }); } } @@ -157,8 +166,9 @@ sealed class MultirangeArrayResolver : MultirangeResolver { protected override DynamicMappingCollection? GetMappings(Type? type, DataTypeName dataTypeName, PgSerializerOptions options) { - Type? elementType = null; - if (!((type is null || IsArrayLikeType(type, out elementType)) && IsArrayDataTypeName(dataTypeName, options, out var elementDataTypeName))) + var elementType = type == typeof(object) ? type : null; + if ((type is not null && type != typeof(object) && !IsArrayLikeType(type, out elementType)) + || !IsArrayDataTypeName(dataTypeName, options, out var elementDataTypeName)) return null; var mappings = base.GetMappings(elementType, elementDataTypeName, options); diff --git a/src/Npgsql/Internal/ResolverFactories/UnsupportedTypeInfoResolver.cs b/src/Npgsql/Internal/ResolverFactories/UnsupportedTypeInfoResolver.cs index b8fdaa0030..f540b6eca8 100644 --- a/src/Npgsql/Internal/ResolverFactories/UnsupportedTypeInfoResolver.cs +++ b/src/Npgsql/Internal/ResolverFactories/UnsupportedTypeInfoResolver.cs @@ -13,56 +13,60 @@ sealed class UnsupportedTypeInfoResolver : IPgTypeInfoResolver if (options.IntrospectionMode) return null; - RecordTypeInfoResolverFactory.CheckUnsupported(type, dataTypeName, options); - AdoTypeInfoResolverFactory.ThrowIfRangeUnsupported(type, dataTypeName, options); - AdoTypeInfoResolverFactory.ThrowIfMultirangeUnsupported(type, dataTypeName, options); - FullTextSearchTypeInfoResolverFactory.CheckUnsupported(type, dataTypeName, options); - LTreeTypeInfoResolverFactory.CheckUnsupported(type, dataTypeName, options); + RecordTypeInfoResolverFactory.ThrowIfUnsupported(type, dataTypeName, options); + FullTextSearchTypeInfoResolverFactory.ThrowIfUnsupported(type, dataTypeName, options); + LTreeTypeInfoResolverFactory.ThrowIfUnsupported(type, dataTypeName, options); - if (type is null) - return null; + // The compiler can't see that these method(s) are completely safe, other methods force the attributes on the type(s). +#pragma warning disable IL3050, IL2026 + JsonDynamicTypeInfoResolverFactory.ThrowIfUnsupported(type, dataTypeName, options); +#pragma warning restore IL3050, IL2026 - // These checks are here because their resolver types have RUC/RDC - if (type != typeof(object)) + switch (dataTypeName is null ? null : options.DatabaseInfo.GetPostgresType(dataTypeName.GetValueOrDefault())) { - switch (dataTypeName) - { - case "pg_catalog.json" or "pg_catalog.jsonb": - throw new NotSupportedException( - string.Format( - NpgsqlStrings.DynamicJsonNotEnabled, - type == typeof(object) ? "" : type.Name, - nameof(NpgsqlSlimDataSourceBuilder.EnableDynamicJson), - typeof(TBuilder).Name)); - - case not null when options.DatabaseInfo.GetPostgresType(dataTypeName) is PostgresEnumType: + case PostgresEnumType: + // Unmapped enum types never work on object or default. + if (type is not null && type != typeof(object)) throw new NotSupportedException( string.Format( NpgsqlStrings.UnmappedEnumsNotEnabled, nameof(NpgsqlSlimDataSourceBuilder.EnableUnmappedTypes), typeof(TBuilder).Name)); + break; - case not null when options.DatabaseInfo.GetPostgresType(dataTypeName) is PostgresRangeType: - throw new NotSupportedException( - string.Format( - NpgsqlStrings.UnmappedRangesNotEnabled, - nameof(NpgsqlSlimDataSourceBuilder.EnableUnmappedTypes), - typeof(TBuilder).Name)); + case PostgresRangeType when !options.RangesEnabled: + throw new NotSupportedException( + string.Format(NpgsqlStrings.RangesNotEnabled, nameof(NpgsqlSlimDataSourceBuilder.EnableRanges), typeof(TBuilder).Name)); + case PostgresRangeType: + throw new NotSupportedException( + string.Format( + NpgsqlStrings.UnmappedRangesNotEnabled, + nameof(NpgsqlSlimDataSourceBuilder.EnableUnmappedTypes), + typeof(TBuilder).Name)); - case not null when options.DatabaseInfo.GetPostgresType(dataTypeName) is PostgresMultirangeType: - throw new NotSupportedException( - string.Format( - NpgsqlStrings.UnmappedRangesNotEnabled, - nameof(NpgsqlSlimDataSourceBuilder.EnableUnmappedTypes), - typeof(TBuilder).Name)); - } + case PostgresMultirangeType when !options.MultirangesEnabled: + throw new NotSupportedException( + string.Format(NpgsqlStrings.MultirangesNotEnabled, nameof(NpgsqlSlimDataSourceBuilder.EnableMultiranges), typeof(TBuilder).Name)); + case PostgresMultirangeType: + throw new NotSupportedException( + string.Format( + NpgsqlStrings.UnmappedRangesNotEnabled, + nameof(NpgsqlSlimDataSourceBuilder.EnableUnmappedTypes), + typeof(TBuilder).Name)); + + case PostgresArrayType when !options.ArraysEnabled: + throw new NotSupportedException( + string.Format(NpgsqlStrings.ArraysNotEnabled, nameof(NpgsqlSlimDataSourceBuilder.EnableArrays), typeof(TBuilder).Name)); } - if (TypeInfoMappingCollection.IsArrayLikeType(type, out var elementType) && TypeInfoMappingCollection.IsArrayLikeType(elementType, out _)) - throw new NotSupportedException("Writing is not supported for jagged collections, use a multidimensional array instead."); + if (type is not null) + { + if (TypeInfoMappingCollection.IsArrayLikeType(type, out var elementType) && TypeInfoMappingCollection.IsArrayLikeType(elementType, out _)) + throw new NotSupportedException("Writing is not supported for jagged collections, use a multidimensional array instead."); - if (typeof(IEnumerable).IsAssignableFrom(type) && !typeof(IList).IsAssignableFrom(type) && type != typeof(string) && (dataTypeName is null || dataTypeName.Value.IsArray)) - throw new NotSupportedException("Writing is not supported for IEnumerable parameters, use an array or List instead."); + if (typeof(IEnumerable).IsAssignableFrom(type) && !typeof(IList).IsAssignableFrom(type) && type != typeof(string) && (dataTypeName is null || dataTypeName.Value.IsArray)) + throw new NotSupportedException("Writing is not supported for IEnumerable parameters, use an array or List instead."); + } return null; } diff --git a/src/Npgsql/NpgsqlDataSource.cs b/src/Npgsql/NpgsqlDataSource.cs index 47f44c74f5..9415296585 100644 --- a/src/Npgsql/NpgsqlDataSource.cs +++ b/src/Npgsql/NpgsqlDataSource.cs @@ -31,7 +31,7 @@ public abstract class NpgsqlDataSource : DbDataSource internal NpgsqlDataSourceConfiguration Configuration { get; } internal NpgsqlLoggingConfiguration LoggingConfiguration { get; } - readonly IPgTypeInfoResolver _resolver; + readonly PgTypeInfoResolverChain _resolverChain; internal PgSerializerOptions SerializerOptions { get; private set; } = null!; // Initialized at bootstrapping /// @@ -117,8 +117,7 @@ internal NpgsqlDataSource( Debug.Assert(_passwordProvider is null || _passwordProviderAsync is not null); - // TODO probably want this on the options so it can devirt unconditionally. - _resolver = new TypeInfoResolverChain(resolverChain); + _resolverChain = resolverChain; _password = settings.Password; if (_periodicPasswordSuccessRefreshInterval != default) @@ -249,7 +248,7 @@ internal async Task Bootstrap( new(PostgresMinimalDatabaseInfo.DefaultTypeCatalog) { TextEncoding = connector.TextEncoding, - TypeInfoResolver = AdoTypeInfoResolverFactory.Instance.CreateResolver() + TypeInfoResolver = AdoTypeInfoResolverFactory.Instance.CreateResolver(), }; NpgsqlDatabaseInfo databaseInfo; @@ -259,13 +258,13 @@ internal async Task Bootstrap( connector.DatabaseInfo = DatabaseInfo = databaseInfo; connector.SerializerOptions = SerializerOptions = - new(databaseInfo, CreateTimeZoneProvider(connector.Timezone)) + new(databaseInfo, _resolverChain, CreateTimeZoneProvider(connector.Timezone)) { ArrayNullabilityMode = Settings.ArrayNullabilityMode, EnableDateTimeInfinityConversions = !Statics.DisableDateTimeInfinityConversions, TextEncoding = connector.TextEncoding, - TypeInfoResolver = _resolver, - DefaultNameTranslator = _defaultNameTranslator + DefaultNameTranslator = _defaultNameTranslator, + }; IsBootstrapped = true; diff --git a/src/Npgsql/NpgsqlDataSourceConfiguration.cs b/src/Npgsql/NpgsqlDataSourceConfiguration.cs index eae6a3c19d..ec3e5e4611 100644 --- a/src/Npgsql/NpgsqlDataSourceConfiguration.cs +++ b/src/Npgsql/NpgsqlDataSourceConfiguration.cs @@ -19,7 +19,7 @@ sealed record NpgsqlDataSourceConfiguration(string? Name, Func>? PeriodicPasswordProvider, TimeSpan PeriodicPasswordSuccessRefreshInterval, TimeSpan PeriodicPasswordFailureRefreshInterval, - IEnumerable ResolverChain, + PgTypeInfoResolverChain ResolverChain, List HackyEnumMappings, INpgsqlNameTranslator DefaultNameTranslator, Action? ConnectionInitializer, diff --git a/src/Npgsql/Properties/NpgsqlStrings.Designer.cs b/src/Npgsql/Properties/NpgsqlStrings.Designer.cs index d09510d783..f00370da48 100644 --- a/src/Npgsql/Properties/NpgsqlStrings.Designer.cs +++ b/src/Npgsql/Properties/NpgsqlStrings.Designer.cs @@ -177,21 +177,15 @@ internal static string RangesNotEnabled { } } - internal static string RangeArraysNotEnabled { - get { - return ResourceManager.GetString("RangeArraysNotEnabled", resourceCulture); - } - } - internal static string MultirangesNotEnabled { get { return ResourceManager.GetString("MultirangesNotEnabled", resourceCulture); } } - internal static string MultirangeArraysNotEnabled { + internal static string ArraysNotEnabled { get { - return ResourceManager.GetString("MultirangeArraysNotEnabled", resourceCulture); + return ResourceManager.GetString("ArraysNotEnabled", resourceCulture); } } diff --git a/src/Npgsql/Properties/NpgsqlStrings.resx b/src/Npgsql/Properties/NpgsqlStrings.resx index df2642e85e..5dbc58acdf 100644 --- a/src/Npgsql/Properties/NpgsqlStrings.resx +++ b/src/Npgsql/Properties/NpgsqlStrings.resx @@ -85,14 +85,11 @@ Ranges aren't enabled; please call {0} on {1} to enable ranges. - - Range arrays aren't enabled; please call {0} on {1} to enable arrays for ranges. - Multiranges aren't enabled; please call {0} on {1} to enable multiranges. - - Multirange arrays aren't enabled; please call {0} on {1} to enable arrays for multiranges. + + Arrays aren't enabled; please call {0} on {1} to enable arrays. Cannot write DateTime with Kind={0} to PostgreSQL type '{1}', only UTC is supported. Note that it's not possible to mix DateTimes with different Kinds in an array, range, or multirange. diff --git a/src/Npgsql/Shims/MemoryExtensions.cs b/src/Npgsql/Shims/MemoryExtensions.cs index 6247c6a21e..0da143f3c4 100644 --- a/src/Npgsql/Shims/MemoryExtensions.cs +++ b/src/Npgsql/Shims/MemoryExtensions.cs @@ -1,5 +1,7 @@ #if !NET7_0_OR_GREATER -namespace System; +using System; + +namespace Npgsql; static class MemoryExtensions { diff --git a/src/Npgsql/TypeMapping/GlobalTypeMapper.cs b/src/Npgsql/TypeMapping/GlobalTypeMapper.cs index c2c01a1887..caffde5fc0 100644 --- a/src/Npgsql/TypeMapping/GlobalTypeMapper.cs +++ b/src/Npgsql/TypeMapping/GlobalTypeMapper.cs @@ -84,13 +84,13 @@ PgSerializerOptions TypeMappingOptions builder.AppendResolverFactory(factory); foreach (var factory in _typeMappingResolvers) builder.AppendResolverFactory(factory); - return _typeMappingOptions = new(PostgresMinimalDatabaseInfo.DefaultTypeCatalog) + var chain = builder.Build(); + return _typeMappingOptions = new(PostgresMinimalDatabaseInfo.DefaultTypeCatalog, chain) { // This means we don't ever have a missing oid for a datatypename as our canonical format is datatypenames. PortableTypeIds = true, // Don't throw if our catalog doesn't know the datatypename. - IntrospectionMode = true, - TypeInfoResolver = new TypeInfoResolverChain(builder.Build()) + IntrospectionMode = true }; } finally diff --git a/test/Npgsql.Tests/Support/TestBase.cs b/test/Npgsql.Tests/Support/TestBase.cs index d5bde0f142..463b132d56 100644 --- a/test/Npgsql.Tests/Support/TestBase.cs +++ b/test/Npgsql.Tests/Support/TestBase.cs @@ -368,7 +368,7 @@ public async Task AssertTypeUnsupported(T value, string sqlLiteral, string pg public async Task AssertTypeUnsupportedRead(string sqlLiteral, string pgTypeName, NpgsqlDataSource? dataSource = null) { - dataSource ??= DefaultDataSource; + dataSource ??= DataSource; await using var conn = await dataSource.OpenConnectionAsync(); // Make sure we don't poison the connection with a fault, potentially terminating other perfectly passing tests as well. @@ -380,10 +380,26 @@ public async Task AssertTypeUnsupportedRead(string sqlLite return Assert.Throws(() => reader.GetValue(0))!; } - public Task AssertTypeUnsupportedRead(string sqlLiteral, string pgTypeName, NpgsqlDataSource? dataSource = null) + public Task AssertTypeUnsupportedRead(string sqlLiteral, string pgTypeName, + NpgsqlDataSource? dataSource = null, bool skipArrayCheck = false) => AssertTypeUnsupportedRead(sqlLiteral, pgTypeName, dataSource); - public async Task AssertTypeUnsupportedRead(string sqlLiteral, string pgTypeName, NpgsqlDataSource? dataSource = null) + public async Task AssertTypeUnsupportedRead(string sqlLiteral, string pgTypeName, + NpgsqlDataSource? dataSource = null, bool skipArrayCheck = false) + where TException : Exception + { + var result = await AssertTypeUnsupportedReadCore(sqlLiteral, pgTypeName, dataSource); + + // Check the corresponding array type as well + if (!skipArrayCheck && !pgTypeName.EndsWith("[]", StringComparison.Ordinal)) + { + await AssertTypeUnsupportedReadCore(ArrayLiteral(sqlLiteral), pgTypeName + "[]", dataSource); + } + + return result; + } + + async Task AssertTypeUnsupportedReadCore(string sqlLiteral, string pgTypeName, NpgsqlDataSource? dataSource = null) where TException : Exception { dataSource ??= DataSource; @@ -398,10 +414,26 @@ public async Task AssertTypeUnsupportedRead(string sq return Assert.Throws(() => reader.GetFieldValue(0))!; } - public Task AssertTypeUnsupportedWrite(T value, string? pgTypeName = null, NpgsqlDataSource? dataSource = null) - => AssertTypeUnsupportedWrite(value, pgTypeName, dataSource); + public Task AssertTypeUnsupportedWrite(T value, string? pgTypeName = null, NpgsqlDataSource? dataSource = null, + bool skipArrayCheck = false) + => AssertTypeUnsupportedWrite(value, pgTypeName, dataSource, skipArrayCheck: false); + + public async Task AssertTypeUnsupportedWrite(T value, string? pgTypeName = null, + NpgsqlDataSource? dataSource = null, bool skipArrayCheck = false) + where TException : Exception + { + var result = await AssertTypeUnsupportedWriteCore(value, pgTypeName, dataSource); + + // Check the corresponding array type as well + if (!skipArrayCheck && !pgTypeName?.EndsWith("[]", StringComparison.Ordinal) == true) + { + await AssertTypeUnsupportedWriteCore(new[] { value, value }, pgTypeName + "[]", dataSource); + } + + return result; + } - public async Task AssertTypeUnsupportedWrite(T value, string? pgTypeName = null, NpgsqlDataSource? dataSource = null) + async Task AssertTypeUnsupportedWriteCore(T value, string? pgTypeName = null, NpgsqlDataSource? dataSource = null) where TException : Exception { dataSource ??= DataSource; diff --git a/test/Npgsql.Tests/Types/MultirangeTests.cs b/test/Npgsql.Tests/Types/MultirangeTests.cs index 1eeb95f5ac..d01dc6e408 100644 --- a/test/Npgsql.Tests/Types/MultirangeTests.cs +++ b/test/Npgsql.Tests/Types/MultirangeTests.cs @@ -153,6 +153,11 @@ public async Task Unmapped_multirange_supported_only_with_EnableUnmappedTypes() Assert.IsInstanceOf(exception.InnerException); Assert.That(exception.InnerException!.Message, Is.EqualTo(errorMessage)); + exception = await AssertTypeUnsupportedRead("""{["bar","foo"],["moo","zoo"]}""", + multirangeTypeName); + Assert.IsInstanceOf(exception.InnerException); + Assert.That(exception.InnerException!.Message, Is.EqualTo(errorMessage)); + exception = await AssertTypeUnsupportedRead>( """{["bar","foo"],["moo","zoo"]}""", multirangeTypeName); diff --git a/test/Npgsql.Tests/Types/RangeTests.cs b/test/Npgsql.Tests/Types/RangeTests.cs index 75c11e04ce..38449d30a2 100644 --- a/test/Npgsql.Tests/Types/RangeTests.cs +++ b/test/Npgsql.Tests/Types/RangeTests.cs @@ -211,6 +211,10 @@ public async Task Unmapped_range_supported_only_with_EnableUnmappedTypes() Assert.IsInstanceOf(exception.InnerException); Assert.That(exception.InnerException!.Message, Is.EqualTo(errorMessage)); + exception = await AssertTypeUnsupportedRead("""["bar","foo"]""", rangeType); + Assert.IsInstanceOf(exception.InnerException); + Assert.That(exception.InnerException!.Message, Is.EqualTo(errorMessage)); + exception = await AssertTypeUnsupportedRead>("""["bar","foo"]""", rangeType); Assert.IsInstanceOf(exception.InnerException); Assert.That(exception.InnerException!.Message, Is.EqualTo(errorMessage)); From dd05935f030590b4e0da50e525a441a1ad5d4502 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 7 Feb 2024 00:16:47 +0200 Subject: [PATCH 379/761] Bump Microsoft.NET.Test.Sdk from 17.8.0 to 17.9.0 (#5565) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index b12dc4ae42..89c656d2f1 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -33,7 +33,7 @@ - + From 1ad0e416ffab96ce9c9757c4004cec6ba2c4b3d9 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Wed, 7 Feb 2024 18:26:02 +0100 Subject: [PATCH 380/761] Improve no IO expected message details (#5568) --- src/Npgsql/Internal/PgBufferedConverter.cs | 4 ++-- src/Npgsql/Internal/PgConverter.cs | 4 ++-- src/Npgsql/Internal/Size.cs | 15 ++++++++------- 3 files changed, 12 insertions(+), 11 deletions(-) diff --git a/src/Npgsql/Internal/PgBufferedConverter.cs b/src/Npgsql/Internal/PgBufferedConverter.cs index 7faf7bb0c4..f3bdcc1413 100644 --- a/src/Npgsql/Internal/PgBufferedConverter.cs +++ b/src/Npgsql/Internal/PgBufferedConverter.cs @@ -19,7 +19,7 @@ public sealed override T Read(PgReader reader) { // We check IsAtStart first to speed up primitive reads. if (!reader.IsAtStart && reader.ShouldBufferCurrent()) - ThrowIORequired(); + ThrowIORequired(reader.Current.BufferRequirement); return ReadCore(reader); } @@ -33,7 +33,7 @@ internal sealed override ValueTask ReadAsObject(bool async, PgReader rea public sealed override void Write(PgWriter writer, T value) { if (!writer.BufferingWrite && writer.ShouldFlush(writer.CurrentBufferRequirement)) - ThrowIORequired(); + ThrowIORequired(writer.Current.BufferRequirement); WriteCore(writer, value); } diff --git a/src/Npgsql/Internal/PgConverter.cs b/src/Npgsql/Internal/PgConverter.cs index db6796c1ff..8f44dfc295 100644 --- a/src/Npgsql/Internal/PgConverter.cs +++ b/src/Npgsql/Internal/PgConverter.cs @@ -76,8 +76,8 @@ internal enum DbNullPredicate : byte } [DoesNotReturn] - private protected static void ThrowIORequired() - => throw new InvalidOperationException("Buffer requirements for format not respected, expected no IO to be required."); + private protected void ThrowIORequired(Size bufferRequirement) + => throw new InvalidOperationException($"Buffer requirement '{bufferRequirement}' not respected for converter '{GetType().FullName}', expected no IO to be required."); private protected static bool ThrowInvalidNullValue() => throw new ArgumentNullException("value", "Null value given for non-nullable type converter"); diff --git a/src/Npgsql/Internal/Size.cs b/src/Npgsql/Internal/Size.cs index 0d494b6dbd..7f5e52a1f1 100644 --- a/src/Npgsql/Internal/Size.cs +++ b/src/Npgsql/Internal/Size.cs @@ -54,17 +54,18 @@ public Size Combine(Size result) public static implicit operator Size(int value) => Create(value); - string DebuggerDisplay - => _kind switch - { - SizeKind.Exact or SizeKind.UpperBound => $"{_value} ({_kind})", - SizeKind.Unknown => "Unknown", - _ => throw new ArgumentOutOfRangeException() - }; + string DebuggerDisplay => ToString(); public bool Equals(Size other) => _value == other._value && _kind == other.Kind; public override bool Equals(object? obj) => obj is Size other && Equals(other); public override int GetHashCode() => HashCode.Combine(_value, (int)_kind); public static bool operator ==(Size left, Size right) => left.Equals(right); public static bool operator !=(Size left, Size right) => !left.Equals(right); + + public override string ToString() => _kind switch + { + SizeKind.Exact or SizeKind.UpperBound => $"{_value} ({_kind.ToString()})", + SizeKind.Unknown => nameof(SizeKind.Unknown), + _ => throw new ArgumentOutOfRangeException() + }; } From 4f80dd1898fa2727f5df921b345810d34d44d580 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Wed, 7 Feb 2024 18:43:38 +0100 Subject: [PATCH 381/761] Remove tiny bit of code bloat --- src/Npgsql/Internal/PgBufferedConverter.cs | 4 ++-- src/Npgsql/Internal/PgReader.cs | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/Npgsql/Internal/PgBufferedConverter.cs b/src/Npgsql/Internal/PgBufferedConverter.cs index f3bdcc1413..2bed7ffa3c 100644 --- a/src/Npgsql/Internal/PgBufferedConverter.cs +++ b/src/Npgsql/Internal/PgBufferedConverter.cs @@ -19,7 +19,7 @@ public sealed override T Read(PgReader reader) { // We check IsAtStart first to speed up primitive reads. if (!reader.IsAtStart && reader.ShouldBufferCurrent()) - ThrowIORequired(reader.Current.BufferRequirement); + ThrowIORequired(reader.CurrentBufferRequirement); return ReadCore(reader); } @@ -33,7 +33,7 @@ internal sealed override ValueTask ReadAsObject(bool async, PgReader rea public sealed override void Write(PgWriter writer, T value) { if (!writer.BufferingWrite && writer.ShouldFlush(writer.CurrentBufferRequirement)) - ThrowIORequired(writer.Current.BufferRequirement); + ThrowIORequired(writer.CurrentBufferRequirement); WriteCore(writer, value); } diff --git a/src/Npgsql/Internal/PgReader.cs b/src/Npgsql/Internal/PgReader.cs index 2de9086c9e..54672f92e8 100644 --- a/src/Npgsql/Internal/PgReader.cs +++ b/src/Npgsql/Internal/PgReader.cs @@ -65,7 +65,7 @@ internal PgReader(NpgsqlReadBuffer buffer) public ValueMetadata Current => new() { Size = CurrentSize, Format = _fieldFormat, BufferRequirement = CurrentBufferRequirement }; public int CurrentRemaining => HasCurrent ? _currentSize - CurrentOffset : FieldRemaining; - Size CurrentBufferRequirement => HasCurrent ? _currentBufferRequirement : _fieldBufferRequirement; + internal Size CurrentBufferRequirement => HasCurrent ? _currentBufferRequirement : _fieldBufferRequirement; int CurrentOffset => FieldOffset - _currentStartPos; internal bool IsAtStart => FieldOffset is 0; From f309c3f1af2cedfba534d649a7da31122472f1ab Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 9 Feb 2024 01:01:41 +0200 Subject: [PATCH 382/761] Bump NodaTime from 3.1.10 to 3.1.11 (#5571) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 89c656d2f1..101406b447 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -17,7 +17,7 @@ - + From 786ce89e18a768c72c8b73707671cc5e75f3e1cb Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Fri, 9 Feb 2024 18:58:07 +0100 Subject: [PATCH 383/761] Support writing IList impls again (#5558) --- .../Converters/MultirangeConverter.cs | 3 - .../AdoTypeInfoResolverFactory.cs | 6 +- .../UnsupportedTypeInfoResolver.cs | 2 +- src/Npgsql/Internal/TypeInfoMapping.cs | 73 ++++++++++++------- test/Npgsql.Tests/Types/ArrayTests.cs | 23 +++++- 5 files changed, 71 insertions(+), 36 deletions(-) diff --git a/src/Npgsql/Internal/Converters/MultirangeConverter.cs b/src/Npgsql/Internal/Converters/MultirangeConverter.cs index 524901977b..36ae35a11c 100644 --- a/src/Npgsql/Internal/Converters/MultirangeConverter.cs +++ b/src/Npgsql/Internal/Converters/MultirangeConverter.cs @@ -14,9 +14,6 @@ sealed class MultirangeConverter : PgStreamingConverter readonly PgConverter _rangeConverter; readonly BufferRequirements _rangeRequirements; - static MultirangeConverter() - => Debug.Assert(typeof(T).IsArray || typeof(T).IsGenericType && typeof(T).GetGenericTypeDefinition() == typeof(List<>)); - public MultirangeConverter(PgConverter rangeConverter) { if (!rangeConverter.CanConvert(DataFormat.Binary, out var bufferRequirements)) diff --git a/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.cs index 9b024a5e09..8c0d6e9eaf 100644 --- a/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.cs +++ b/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.cs @@ -340,9 +340,9 @@ sealed class ArrayResolver : Resolver, IPgTypeInfoResolver var info = Mappings.Find(type, dataTypeName, options); Type? elementType = null; - if (info is null && dataTypeName is not null && - (type is null || type == typeof(object) || TypeInfoMappingCollection.IsArrayLikeType(type, out elementType)) - && options.DatabaseInfo.GetPostgresType(dataTypeName) is PostgresArrayType { Element: var pgElementType }) + if (info is null && dataTypeName is not null + && options.DatabaseInfo.GetPostgresType(dataTypeName) is PostgresArrayType { Element: var pgElementType } + && (type is null || type == typeof(object) || TypeInfoMappingCollection.IsArrayLikeType(type, out elementType))) { info = GetEnumArrayTypeInfo(elementType, pgElementType, type, dataTypeName.GetValueOrDefault(), options) ?? GetObjectArrayTypeInfo(elementType, pgElementType, type, dataTypeName.GetValueOrDefault(), options); diff --git a/src/Npgsql/Internal/ResolverFactories/UnsupportedTypeInfoResolver.cs b/src/Npgsql/Internal/ResolverFactories/UnsupportedTypeInfoResolver.cs index f540b6eca8..45d4568a26 100644 --- a/src/Npgsql/Internal/ResolverFactories/UnsupportedTypeInfoResolver.cs +++ b/src/Npgsql/Internal/ResolverFactories/UnsupportedTypeInfoResolver.cs @@ -65,7 +65,7 @@ sealed class UnsupportedTypeInfoResolver : IPgTypeInfoResolver throw new NotSupportedException("Writing is not supported for jagged collections, use a multidimensional array instead."); if (typeof(IEnumerable).IsAssignableFrom(type) && !typeof(IList).IsAssignableFrom(type) && type != typeof(string) && (dataTypeName is null || dataTypeName.Value.IsArray)) - throw new NotSupportedException("Writing is not supported for IEnumerable parameters, use an array or List instead."); + throw new NotSupportedException("Writing is not supported for IEnumerable parameters, use an array or some implementation of IList instead."); } return null; diff --git a/src/Npgsql/Internal/TypeInfoMapping.cs b/src/Npgsql/Internal/TypeInfoMapping.cs index e2d8927527..00b9ba18ee 100644 --- a/src/Npgsql/Internal/TypeInfoMapping.cs +++ b/src/Npgsql/Internal/TypeInfoMapping.cs @@ -272,11 +272,12 @@ Func GetDefaultConfigure(MatchRequirement matc }; Func GetArrayTypeMatchPredicate(Func elementTypeMatchPredicate) - => type => type is null ? elementTypeMatchPredicate(null) : type.IsArray && elementTypeMatchPredicate.Invoke(type.GetElementType()!); - Func GetListTypeMatchPredicate(Func elementTypeMatchPredicate) - => type => type is null ? elementTypeMatchPredicate(null) : type.IsConstructedGenericType && type.GetGenericTypeDefinition() is { } def - && (def == typeof(List<>) || def == typeof(IList<>)) - && elementTypeMatchPredicate(type.GetGenericArguments()[0]); + => type => type is null ? elementTypeMatchPredicate(null) : type.IsArray && elementTypeMatchPredicate(type.GetElementType()!); + Func GetListTypeMatchPredicate(Func elementTypeMatchPredicate) + => type => type is null ? elementTypeMatchPredicate(null) + // We anti-constrain on IsArray to avoid matching byte/sbyte, short/ushort int/uint + // with the list mapping of the earlier type when an exact match is probably available. + : !type.IsArray && typeof(IList).IsAssignableFrom(type) && elementTypeMatchPredicate(typeof(TElement)); public void AddType(string dataTypeName, TypeInfoFactory createInfo, bool isDefault = false) where T : class => AddType(dataTypeName, createInfo, GetDefaultConfigure(isDefault)); @@ -330,7 +331,7 @@ public void AddArrayType(TypeInfoMapping elementMapping, bool suppress { // Always use a predicate to match all dimensions. var arrayTypeMatchPredicate = GetArrayTypeMatchPredicate(elementMapping.TypeMatchPredicate ?? (static type => type is null || type == typeof(TElement))); - var listTypeMatchPredicate = GetListTypeMatchPredicate(elementMapping.TypeMatchPredicate ?? (static type => type is null || type == typeof(TElement))); + var listTypeMatchPredicate = GetListTypeMatchPredicate(elementMapping.TypeMatchPredicate ?? (static type => type is null || type == typeof(TElement))); var arrayDataTypeName = GetArrayDataTypeName(elementMapping.DataTypeName); @@ -370,7 +371,7 @@ public void AddResolverArrayType(TypeInfoMapping elementMapping, bool { // Always use a predicate to match all dimensions. var arrayTypeMatchPredicate = GetArrayTypeMatchPredicate(elementMapping.TypeMatchPredicate ?? (static type => type is null || type == typeof(TElement))); - var listTypeMatchPredicate = GetListTypeMatchPredicate(elementMapping.TypeMatchPredicate ?? (static type => type is null || type == typeof(TElement))); + var listTypeMatchPredicate = GetListTypeMatchPredicate(elementMapping.TypeMatchPredicate ?? (static type => type is null || type == typeof(TElement))); var arrayDataTypeName = GetArrayDataTypeName(elementMapping.DataTypeName); @@ -427,9 +428,9 @@ void AddStructType(Type type, Type nullableType, string dataTypeName, TypeInfoFa { MatchRequirement = mapping.MatchRequirement, TypeMatchPredicate = mapping.TypeMatchPredicate is not null - ? type => type is null + ? matchType => matchType is null ? mapping.TypeMatchPredicate(null) - : Nullable.GetUnderlyingType(type) is { } underlying && mapping.TypeMatchPredicate(underlying) + : matchType == nullableType && mapping.TypeMatchPredicate(type) : null }); } @@ -448,10 +449,10 @@ public void AddStructArrayType(TypeInfoMapping elementMapping, TypeInf // Always use a predicate to match all dimensions. var arrayTypeMatchPredicate = GetArrayTypeMatchPredicate(elementMapping.TypeMatchPredicate ?? (static type => type is null || type == typeof(TElement))); var nullableArrayTypeMatchPredicate = GetArrayTypeMatchPredicate(nullableElementMapping.TypeMatchPredicate ?? (static type => - type is null || (Nullable.GetUnderlyingType(type) is { } underlying && underlying == typeof(TElement)))); - var listTypeMatchPredicate = GetListTypeMatchPredicate(elementMapping.TypeMatchPredicate ?? (static type => type is null || type == typeof(TElement))); - var nullableListTypeMatchPredicate = GetListTypeMatchPredicate(nullableElementMapping.TypeMatchPredicate ?? (static type => - type is null || (Nullable.GetUnderlyingType(type) is { } underlying && underlying == typeof(TElement)))); + type is null || type == typeof(TElement?))); + var listTypeMatchPredicate = GetListTypeMatchPredicate(elementMapping.TypeMatchPredicate ?? (static type => type is null || type == typeof(TElement))); + var nullableListTypeMatchPredicate = GetListTypeMatchPredicate(nullableElementMapping.TypeMatchPredicate ?? (static type => + type is null || type == typeof(TElement?))); var arrayDataTypeName = GetArrayDataTypeName(elementMapping.DataTypeName); @@ -544,9 +545,9 @@ void AddResolverStructType(Type type, Type nullableType, string dataTypeName, Ty { MatchRequirement = mapping.MatchRequirement, TypeMatchPredicate = mapping.TypeMatchPredicate is not null - ? type => type is null + ? matchType => matchType is null ? mapping.TypeMatchPredicate(null) - : Nullable.GetUnderlyingType(type) is { } underlying && mapping.TypeMatchPredicate(underlying) + : matchType == nullableType && mapping.TypeMatchPredicate(type) : null }); } @@ -565,10 +566,10 @@ public void AddResolverStructArrayType(TypeInfoMapping elementMapping, // Always use a predicate to match all dimensions. var arrayTypeMatchPredicate = GetArrayTypeMatchPredicate(elementMapping.TypeMatchPredicate ?? (static type => type is null || type == typeof(TElement))); var nullableArrayTypeMatchPredicate = GetArrayTypeMatchPredicate(nullableElementMapping.TypeMatchPredicate ?? (static type => - type is null || (Nullable.GetUnderlyingType(type) is { } underlying && underlying == typeof(TElement)))); - var listTypeMatchPredicate = GetListTypeMatchPredicate(elementMapping.TypeMatchPredicate ?? (static type => type is null || type == typeof(TElement))); - var nullableListTypeMatchPredicate = GetListTypeMatchPredicate(nullableElementMapping.TypeMatchPredicate ?? (static type => - type is null || (Nullable.GetUnderlyingType(type) is { } underlying && underlying == typeof(TElement)))); + type is null || type == typeof(TElement?))); + var listTypeMatchPredicate = GetListTypeMatchPredicate(elementMapping.TypeMatchPredicate ?? (static type => type is null || type == typeof(TElement))); + var nullableListTypeMatchPredicate = GetListTypeMatchPredicate(nullableElementMapping.TypeMatchPredicate ?? (static type => + type is null || type == typeof(TElement?))); var arrayDataTypeName = GetArrayDataTypeName(elementMapping.DataTypeName); @@ -652,18 +653,36 @@ void AddPolymorphicResolverArrayType(TypeInfoMapping elementMapping, Type type, } /// Returns whether type matches any of the types we register pg arrays as. + [UnconditionalSuppressMessage("Trimming", "IL2070", + Justification = "Checking for IList implementing types requires interface list enumeration which isn't compatible with trimming. " + + "However as long as a concrete IList is rooted somewhere in the app, for instance through an `AddArrayType(...)` mapping, every implementation must keep it.")] + // We care about IList implementations if the instantiation is actually rooted by us through an Array mapping. + // Dynamic resolvers are a notable counterexample, but they are all correctly marked with RequiresUnreferencedCode. public static bool IsArrayLikeType(Type type, [NotNullWhen(true)] out Type? elementType) { - elementType = type switch + if (type.GetElementType() is { } t) { - { IsArray: true } => type.GetElementType(), - { IsConstructedGenericType: true } when type.GetGenericTypeDefinition() is { } def && - (def == typeof(List<>) || def == typeof(IList<>)) - => type.GetGenericArguments()[0], - _ => null - }; + elementType = t; + return true; + } + + if (type.IsConstructedGenericType && type.GetGenericTypeDefinition() is var def && (def == typeof(List<>) || def == typeof(IList<>))) + { + elementType = type.GetGenericArguments()[0]; + return true; + } - return elementType is not null; + foreach (var inf in type.GetInterfaces()) + { + if (inf.IsConstructedGenericType && inf.GetGenericTypeDefinition() == typeof(IList<>)) + { + elementType = inf.GetGenericArguments()[0]; + return true; + } + } + + elementType = null; + return false; } static string GetArrayDataTypeName(string dataTypeName) diff --git a/test/Npgsql.Tests/Types/ArrayTests.cs b/test/Npgsql.Tests/Types/ArrayTests.cs index e27d7bc2b1..a567e4891e 100644 --- a/test/Npgsql.Tests/Types/ArrayTests.cs +++ b/test/Npgsql.Tests/Types/ArrayTests.cs @@ -1,4 +1,5 @@ using System; +using System.Collections; using System.Collections.Generic; using System.Collections.Immutable; using System.Data; @@ -155,6 +156,18 @@ public async Task Generic_List() => await AssertType( new List { 1, 2, 3 }, "{1,2,3}", "integer[]", NpgsqlDbType.Integer | NpgsqlDbType.Array, isDefaultForReading: false); + [Test] + public async Task Write_IList_implementation() + => await AssertTypeWrite( + ImmutableArray.Create(1, 2, 3), "{1,2,3}", "integer[]", NpgsqlDbType.Integer | NpgsqlDbType.Array); + + [Test] + public void Read_IList_implementation_throws() + { + Assert.ThrowsAsync(() => + AssertTypeRead("{1,2,3}", "integer[]", ImmutableArray.Create(1, 2, 3), isDefault: false)); + } + [Test] public async Task Generic_IList() { @@ -283,8 +296,14 @@ public async Task Writing_IEnumerable_is_not_supported() { await using var conn = await OpenConnectionAsync(); await using var cmd = new NpgsqlCommand("SELECT @p1", conn); - cmd.Parameters.AddWithValue("p1", Enumerable.Range(1, 3)); - Assert.That(async () => await cmd.ExecuteScalarAsync(), Throws.Exception.TypeOf().With.Property("InnerException").Message.Contains("array or List")); + cmd.Parameters.AddWithValue("p1", new EnumerableOnly()); + Assert.That(async () => await cmd.ExecuteScalarAsync(), Throws.Exception.TypeOf().With.Property("InnerException").Message.Contains("array or some implementation of IList")); + } + + class EnumerableOnly : IEnumerable + { + public IEnumerator GetEnumerator() => throw new NotImplementedException(); + IEnumerator IEnumerable.GetEnumerator() => GetEnumerator(); } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/960")] From d137ac60b8789bc291343a69d178d5fb0e962aab Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Fri, 9 Feb 2024 19:24:32 +0100 Subject: [PATCH 384/761] Add a factory mechanism for the factories called by the chain builder (#5561) Fixes #5562 --- .../PgTypeInfoResolverChainBuilder.cs | 47 ++++++++++++------ src/Npgsql/NpgsqlDataSourceBuilder.cs | 2 +- src/Npgsql/NpgsqlSlimDataSourceBuilder.cs | 7 ++- test/Npgsql.Tests/DataSourceTests.cs | 49 +++++++++++++++++++ 4 files changed, 88 insertions(+), 17 deletions(-) diff --git a/src/Npgsql/Internal/PgTypeInfoResolverChainBuilder.cs b/src/Npgsql/Internal/PgTypeInfoResolverChainBuilder.cs index 69fc5ab8fa..548d236096 100644 --- a/src/Npgsql/Internal/PgTypeInfoResolverChainBuilder.cs +++ b/src/Npgsql/Internal/PgTypeInfoResolverChainBuilder.cs @@ -6,7 +6,7 @@ namespace Npgsql.Internal; struct PgTypeInfoResolverChainBuilder { - readonly List _factories = new(); + readonly List<(Type ImplementationType, object)> _factories = new(); Action>? _addRangeResolvers; Action>? _addMultirangeResolvers; RangeArrayHandler _rangeArrayHandler = RangeArrayHandler.Instance; @@ -19,24 +19,43 @@ public PgTypeInfoResolverChainBuilder() public void Clear() => _factories.Clear(); - public void AppendResolverFactory(PgTypeInfoResolverFactory factory) => AddResolverFactory(factory); - public void PrependResolverFactory(PgTypeInfoResolverFactory factory) => AddResolverFactory(factory, prepend: true); + public void AppendResolverFactory(PgTypeInfoResolverFactory factory) + => AddResolverFactory(factory.GetType(), factory); + public void AppendResolverFactory(Func factory) where T : PgTypeInfoResolverFactory + => AddResolverFactory(typeof(T), Memoize(factory)); - void AddResolverFactory(PgTypeInfoResolverFactory factory, bool prepend = false) + public void PrependResolverFactory(PgTypeInfoResolverFactory factory) + => AddResolverFactory(factory.GetType(), factory, prepend: true); + public void PrependResolverFactory(Func factory) where T : PgTypeInfoResolverFactory + => AddResolverFactory(typeof(T), Memoize(factory), prepend: true); + + // Memoize the caller factory so all our actions (_addArrayResolvers etc.) call into the same instance. + static Func Memoize(Func factory) { - var type = factory.GetType(); + PgTypeInfoResolverFactory? instance = null; + return () => instance ??= factory(); + } + static PgTypeInfoResolverFactory GetInstance((Type, object Instance) factory) => factory.Instance switch + { + PgTypeInfoResolverFactory f => f, + Func f => f(), + _ => throw new ArgumentOutOfRangeException(nameof(factory), factory, null) + }; + + void AddResolverFactory(Type type, object factory, bool prepend = false) + { for (var i = 0; i < _factories.Count; i++) - if (_factories[i].GetType() == type) + if (_factories[i].ImplementationType == type) { _factories.RemoveAt(i); break; } if (prepend) - _factories.Insert(0, factory); + _factories.Insert(0, (type, factory)); else - _factories.Add(factory); + _factories.Add((type, factory)); } public void EnableRanges() @@ -47,7 +66,7 @@ public void EnableRanges() static void AddResolvers(PgTypeInfoResolverChainBuilder instance, List resolvers) { foreach (var factory in instance._factories) - if (factory.CreateRangeResolver() is { } resolver) + if (GetInstance(factory).CreateRangeResolver() is { } resolver) resolvers.Add(resolver); } } @@ -60,7 +79,7 @@ public void EnableMultiranges() static void AddResolvers(PgTypeInfoResolverChainBuilder instance, List resolvers) { foreach (var factory in instance._factories) - if (factory.CreateMultirangeResolver() is { } resolver) + if (GetInstance(factory).CreateMultirangeResolver() is { } resolver) resolvers.Add(resolver); } } @@ -72,17 +91,17 @@ public void EnableArrays() static void AddResolvers(PgTypeInfoResolverChainBuilder instance, List resolvers) { foreach (var factory in instance._factories) - if (factory.CreateArrayResolver() is { } resolver) + if (GetInstance(factory).CreateArrayResolver() is { } resolver) resolvers.Add(resolver); if (instance._addRangeResolvers is not null) foreach (var factory in instance._factories) - if (instance._rangeArrayHandler.CreateRangeArrayResolver(factory) is { } resolver) + if (instance._rangeArrayHandler.CreateRangeArrayResolver(GetInstance(factory)) is { } resolver) resolvers.Add(resolver); if (instance._addMultirangeResolvers is not null) foreach (var factory in instance._factories) - if (instance._multirangeArrayHandler.CreateMultirangeArrayResolver(factory) is { } resolver) + if (instance._multirangeArrayHandler.CreateMultirangeArrayResolver(GetInstance(factory)) is { } resolver) resolvers.Add(resolver); } } @@ -91,7 +110,7 @@ public PgTypeInfoResolverChain Build(Action>? configur { var resolvers = new List(); foreach (var factory in _factories) - resolvers.Add(factory.CreateResolver()); + resolvers.Add(GetInstance(factory).CreateResolver()); var instance = this; _addRangeResolvers?.Invoke(instance, resolvers); _addMultirangeResolvers?.Invoke(instance, resolvers); diff --git a/src/Npgsql/NpgsqlDataSourceBuilder.cs b/src/Npgsql/NpgsqlDataSourceBuilder.cs index b6e8972f96..e304a559cc 100644 --- a/src/Npgsql/NpgsqlDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlDataSourceBuilder.cs @@ -82,7 +82,7 @@ public NpgsqlDataSourceBuilder(string? connectionString = null) { instance.AppendDefaultFactories(); instance.AppendResolverFactory(new ExtraConversionResolverFactory()); - instance.AppendResolverFactory(new JsonTypeInfoResolverFactory(instance.JsonSerializerOptions)); + instance.AppendResolverFactory(() => new JsonTypeInfoResolverFactory(instance.JsonSerializerOptions)); instance.AppendResolverFactory(new RecordTypeInfoResolverFactory()); instance.AppendResolverFactory(new FullTextSearchTypeInfoResolverFactory()); instance.AppendResolverFactory(new NetworkTypeInfoResolverFactory()); diff --git a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs index 3dc2b0b52c..72cfeb4949 100644 --- a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs @@ -365,7 +365,10 @@ public bool UnmapComposite([DynamicallyAccessedMembers(DynamicallyAccessedMember void INpgsqlTypeMapper.Reset() => _resolverChainBuilder.Clear(); internal Action> ConfigureResolverChain { get; set; } - internal void AppendResolverFactory(PgTypeInfoResolverFactory factory) => _resolverChainBuilder.AppendResolverFactory(factory); + internal void AppendResolverFactory(PgTypeInfoResolverFactory factory) + => _resolverChainBuilder.AppendResolverFactory(factory); + internal void AppendResolverFactory(Func factory) where T : PgTypeInfoResolverFactory + => _resolverChainBuilder.AppendResolverFactory(factory); internal void AppendDefaultFactories() { @@ -493,7 +496,7 @@ public NpgsqlSlimDataSourceBuilder EnableDynamicJson( Type[]? jsonbClrTypes = null, Type[]? jsonClrTypes = null) { - _resolverChainBuilder.AppendResolverFactory(new JsonDynamicTypeInfoResolverFactory(jsonbClrTypes, jsonClrTypes, JsonSerializerOptions)); + _resolverChainBuilder.AppendResolverFactory(() => new JsonDynamicTypeInfoResolverFactory(jsonbClrTypes, jsonClrTypes, JsonSerializerOptions)); return this; } diff --git a/test/Npgsql.Tests/DataSourceTests.cs b/test/Npgsql.Tests/DataSourceTests.cs index d2d8c78d21..639e83a795 100644 --- a/test/Npgsql.Tests/DataSourceTests.cs +++ b/test/Npgsql.Tests/DataSourceTests.cs @@ -1,6 +1,8 @@ using System; using System.Data; using System.Data.Common; +using System.Text.Json; +using System.Text.Json.Serialization; using System.Threading.Tasks; using NUnit.Framework; @@ -307,4 +309,51 @@ public async Task Connection_string_builder_settings_are_frozen_on_Build() await using var command = dataSource.CreateCommand("SHOW application_name"); Assert.That(await command.ExecuteScalarAsync(), Is.EqualTo("foo")); } + + class Test + { + public int Id { get; set; } + } + + [Test] + public async Task ConfigureJsonOptions_is_order_independent() + { + // Expect failure, no options + { + var builder = CreateDataSourceBuilder(); + builder.EnableDynamicJson(); + await using var dataSource = builder.Build(); + + await using var command = dataSource.CreateCommand("SELECT '{\"id\": 1}'::json;"); + using var reader = await command.ExecuteReaderAsync(); + reader.Read(); + Assert.That(reader.GetFieldValue(0).Id, Is.EqualTo(default(int))); + } + + // Expect success, ConfigureJsonOptions before EnableDynamicJson + { + var builder = CreateDataSourceBuilder(); + builder.ConfigureJsonOptions(new JsonSerializerOptions { PropertyNamingPolicy = JsonNamingPolicy.CamelCase }); + builder.EnableDynamicJson(); + await using var dataSource = builder.Build(); + + await using var command = dataSource.CreateCommand("SELECT '{\"id\": 1}'::json;"); + using var reader = await command.ExecuteReaderAsync(); + reader.Read(); + Assert.That(reader.GetFieldValue(0).Id, Is.EqualTo(1)); + } + + // Expect success, EnableDynamicJson before ConfigureJsonOptions + { + var builder = CreateDataSourceBuilder(); + builder.EnableDynamicJson(); + builder.ConfigureJsonOptions(new JsonSerializerOptions { PropertyNamingPolicy = JsonNamingPolicy.CamelCase }); + await using var dataSource = builder.Build(); + + await using var command = dataSource.CreateCommand("SELECT '{\"id\": 1}'::json;"); + using var reader = await command.ExecuteReaderAsync(); + reader.Read(); + Assert.That(reader.GetFieldValue(0).Id, Is.EqualTo(1)); + } + } } From 1b138216b02d902b516c3fc0e9bdd36d59a405ee Mon Sep 17 00:00:00 2001 From: Eric Erhardt Date: Wed, 14 Feb 2024 17:58:17 -0600 Subject: [PATCH 385/761] Address trimming/AOT warnings (#5578) Fix #5577 --- .../JsonDynamicTypeInfoResolverFactory.cs | 21 +++++++++++-------- .../UnsupportedTypeInfoResolver.cs | 5 +---- src/Npgsql/NpgsqlConnectionStringBuilder.cs | 4 ++++ 3 files changed, 17 insertions(+), 13 deletions(-) diff --git a/src/Npgsql/Internal/ResolverFactories/JsonDynamicTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/JsonDynamicTypeInfoResolverFactory.cs index a5ec6da2ed..6e1d291bd3 100644 --- a/src/Npgsql/Internal/ResolverFactories/JsonDynamicTypeInfoResolverFactory.cs +++ b/src/Npgsql/Internal/ResolverFactories/JsonDynamicTypeInfoResolverFactory.cs @@ -28,15 +28,19 @@ public JsonDynamicTypeInfoResolverFactory(Type[]? jsonbClrTypes = null, Type[]? public override IPgTypeInfoResolver CreateResolver() => new Resolver(_jsonbClrTypes, _jsonClrTypes, _serializerOptions); public override IPgTypeInfoResolver CreateArrayResolver() => new ArrayResolver(_jsonbClrTypes, _jsonClrTypes, _serializerOptions); - public static void ThrowIfUnsupported(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + // Split into a nested class to avoid erroneous trimming/AOT warnings because the JsonDynamicTypeInfoResolverFactory is marked as incompatible. + internal static class Support { - if (dataTypeName is { SchemaSpan: "pg_catalog", UnqualifiedNameSpan: "json" or "_json" or "jsonb" or "_jsonb" }) - throw new NotSupportedException( - string.Format( - NpgsqlStrings.DynamicJsonNotEnabled, - type is null || type == typeof(object) ? "" : type.Name, - nameof(NpgsqlSlimDataSourceBuilder.EnableDynamicJson), - typeof(TBuilder).Name)); + public static void ThrowIfUnsupported(Type? type, DataTypeName? dataTypeName) + { + if (dataTypeName is { SchemaSpan: "pg_catalog", UnqualifiedNameSpan: "json" or "_json" or "jsonb" or "_jsonb" }) + throw new NotSupportedException( + string.Format( + NpgsqlStrings.DynamicJsonNotEnabled, + type is null || type == typeof(object) ? "" : type.Name, + nameof(NpgsqlSlimDataSourceBuilder.EnableDynamicJson), + typeof(TBuilder).Name)); + } } [RequiresUnreferencedCode("Json serializer may perform reflection on trimmed types.")] @@ -182,4 +186,3 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings, } } } - diff --git a/src/Npgsql/Internal/ResolverFactories/UnsupportedTypeInfoResolver.cs b/src/Npgsql/Internal/ResolverFactories/UnsupportedTypeInfoResolver.cs index 45d4568a26..2d47f86807 100644 --- a/src/Npgsql/Internal/ResolverFactories/UnsupportedTypeInfoResolver.cs +++ b/src/Npgsql/Internal/ResolverFactories/UnsupportedTypeInfoResolver.cs @@ -17,10 +17,7 @@ sealed class UnsupportedTypeInfoResolver : IPgTypeInfoResolver FullTextSearchTypeInfoResolverFactory.ThrowIfUnsupported(type, dataTypeName, options); LTreeTypeInfoResolverFactory.ThrowIfUnsupported(type, dataTypeName, options); - // The compiler can't see that these method(s) are completely safe, other methods force the attributes on the type(s). -#pragma warning disable IL3050, IL2026 - JsonDynamicTypeInfoResolverFactory.ThrowIfUnsupported(type, dataTypeName, options); -#pragma warning restore IL3050, IL2026 + JsonDynamicTypeInfoResolverFactory.Support.ThrowIfUnsupported(type, dataTypeName); switch (dataTypeName is null ? null : options.DatabaseInfo.GetPostgresType(dataTypeName.GetValueOrDefault())) { diff --git a/src/Npgsql/NpgsqlConnectionStringBuilder.cs b/src/Npgsql/NpgsqlConnectionStringBuilder.cs index 166a93ea68..88f3043fc6 100644 --- a/src/Npgsql/NpgsqlConnectionStringBuilder.cs +++ b/src/Npgsql/NpgsqlConnectionStringBuilder.cs @@ -15,6 +15,10 @@ namespace Npgsql; /// Provides a simple way to create and manage the contents of connection strings used by /// the class. /// +[UnconditionalSuppressMessage("ReflectionAnalysis", "IL2112:ReflectionToRequiresUnreferencedCode", + Justification = "Suppressing the same warnings as suppressed in the base DbConnectionStringBuilder. See https://github.com/dotnet/runtime/issues/97057")] +[UnconditionalSuppressMessage("ReflectionAnalysis", "IL2113:ReflectionToRequiresUnreferencedCode", + Justification = "Suppressing the same warnings as suppressed in the base DbConnectionStringBuilder. See https://github.com/dotnet/runtime/issues/97057")] public sealed partial class NpgsqlConnectionStringBuilder : DbConnectionStringBuilder, IDictionary { #region Fields From 727532dd2b612059ec72957346e90472e79c4543 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 17 Feb 2024 10:04:48 +0200 Subject: [PATCH 386/761] Bump xunit from 2.6.6 to 2.7.0 (#5585) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 101406b447..65481eff83 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -35,7 +35,7 @@ - + From 11c8229ed80470ec890333e9289b465cbbb471a2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 17 Feb 2024 11:01:20 +0200 Subject: [PATCH 387/761] Bump xunit.runner.visualstudio from 2.5.6 to 2.5.7 (#5584) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 65481eff83..e808aa7878 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -36,7 +36,7 @@ - + From 1a1dd66aebe5c3326af165e9386ef027d6ad8d57 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Mon, 26 Feb 2024 19:17:36 +0100 Subject: [PATCH 388/761] Use typeof(T) instead of the member type in exception message (#5597) --- .../Internal/Composites/ReflectionCompositeInfoFactory.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Npgsql/Internal/Composites/ReflectionCompositeInfoFactory.cs b/src/Npgsql/Internal/Composites/ReflectionCompositeInfoFactory.cs index 8445cf40ec..857580cbfe 100644 --- a/src/Npgsql/Internal/Composites/ReflectionCompositeInfoFactory.cs +++ b/src/Npgsql/Internal/Composites/ReflectionCompositeInfoFactory.cs @@ -94,7 +94,7 @@ static class ReflectionCompositeInfoFactory return new CompositeInfo(compositeFields!, constructorInfo is null ? 0 : constructorParameters.Length, constructor); static NotSupportedException NotSupportedField(PostgresCompositeType composite, PostgresCompositeType.Field field, bool isField, string name, Type type) - => new($"No resolution could be found for ('{type.FullName}', '{field.Type.FullName}'). Mapping: CLR {(isField ? "field" : "property")} '{type.Name}.{name}' <-> Composite field '{composite.Name}.{field.Name}'"); + => new($"No mapping could be found for ('{type.FullName}', '{field.Type.FullName}'). Mapping: CLR {(isField ? "field" : "property")} '{typeof(T).FullName}.{name}' <-> Composite field '{composite.Name}.{field.Name}'"); } static Delegate CreateGetter(FieldInfo info) From b48677e16fd7e56bec206afa26702e0a2cc06998 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Mon, 26 Feb 2024 19:20:06 +0100 Subject: [PATCH 389/761] Correct composite buffer test before read (#5583) Fixes #5582 --- src/Npgsql/Internal/Converters/CompositeConverter.cs | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/src/Npgsql/Internal/Converters/CompositeConverter.cs b/src/Npgsql/Internal/Converters/CompositeConverter.cs index 32eee1357c..24f3d36329 100644 --- a/src/Npgsql/Internal/Converters/CompositeConverter.cs +++ b/src/Npgsql/Internal/Converters/CompositeConverter.cs @@ -58,13 +58,16 @@ public override ValueTask ReadAsync(PgReader reader, CancellationToken cancel async ValueTask Read(bool async, PgReader reader, CancellationToken cancellationToken) { + if (reader.ShouldBuffer(sizeof(int))) + await reader.Buffer(async, sizeof(int), cancellationToken).ConfigureAwait(false); + // TODO we can make a nice thread-static cache for this. using var builder = new CompositeBuilder(_composite); + var count = reader.ReadInt32(); if (count != _composite.Fields.Count) - throw new InvalidOperationException("Cannot read composite type with mismatched number of fields"); - if (reader.ShouldBuffer(sizeof(int))) - await reader.Buffer(async, sizeof(int), cancellationToken).ConfigureAwait(false); + throw new InvalidOperationException("Cannot read composite type with mismatched number of fields."); + foreach (var field in _composite.Fields) { if (reader.ShouldBuffer(sizeof(uint) + sizeof(int))) @@ -79,7 +82,7 @@ async ValueTask Read(bool async, PgReader reader, CancellationToken cancellat // We could remove this requirement by storing a dictionary of CompositeInfos keyed by backend. throw new InvalidCastException( $"Cannot read oid {oid} into composite field {field.Name} with oid {field.PgTypeId}. " + - $"This could be caused by a DDL change after this DataSource loaded its types, or a difference between column order of table composites between backends make sure these line up identically."); + $"This could be caused by a DDL change after this DataSource loaded its types, or a difference between column order of table composites between backends, make sure these line up identically."); if (length is -1) field.ReadDbNull(builder); From a356e286f958e2b8d129678ad5859c4a2b780d9f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 29 Feb 2024 01:43:08 +0200 Subject: [PATCH 390/761] Bump Microsoft.Data.SqlClient from 5.1.5 to 5.2.0 (#5603) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index e808aa7878..132bbd43e8 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -43,7 +43,7 @@ - + From 709e762d7a1ec92d42027f679bdd83e44385fbb0 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Tue, 5 Mar 2024 13:52:43 +0100 Subject: [PATCH 391/761] Improve AsObject/T implementation symmetry and remove downcast method (#5608) --- src/Npgsql/Internal/PgConverter.cs | 24 ++++++++++-------------- 1 file changed, 10 insertions(+), 14 deletions(-) diff --git a/src/Npgsql/Internal/PgConverter.cs b/src/Npgsql/Internal/PgConverter.cs index 8f44dfc295..3317361516 100644 --- a/src/Npgsql/Internal/PgConverter.cs +++ b/src/Npgsql/Internal/PgConverter.cs @@ -1,7 +1,7 @@ using System; using System.Buffers; +using System.Diagnostics; using System.Diagnostics.CodeAnalysis; -using System.Runtime.CompilerServices; using System.Threading; using System.Threading.Tasks; @@ -33,7 +33,8 @@ internal bool IsDbNullAsObject([NotNullWhen(false)] object? value, ref object? w DbNullPredicate.None => false, DbNullPredicate.PolymorphicNull => value is null or DBNull, // We do the null check to keep the NotNullWhen(false) invariant. - _ => IsDbNullValueAsObject(value, ref writeState) || (value is null && ThrowInvalidNullValue()) + DbNullPredicate.Custom => IsDbNullValueAsObject(value, ref writeState) || (value is null && ThrowInvalidNullValue()), + _ => ThrowDbNullPredicateOutOfRange() }; private protected abstract bool IsDbNullValueAsObject(object? value, ref object? writeState); @@ -82,6 +83,9 @@ private protected void ThrowIORequired(Size bufferRequirement) private protected static bool ThrowInvalidNullValue() => throw new ArgumentNullException("value", "Null value given for non-nullable type converter"); + private protected bool ThrowDbNullPredicateOutOfRange() + => throw new UnreachableException($"Unknown case {DbNullPredicateKind.ToString()}"); + protected bool CanConvertBufferedDefault(DataFormat format, out BufferRequirements bufferRequirements) { bufferRequirements = BufferRequirements.Value; @@ -99,23 +103,19 @@ private protected PgConverter(bool customDbNullPredicate) // Object null semantics as follows, if T is a struct (so excluding nullable) report false for null values, don't throw on the cast. // As a result this creates symmetry with IsDbNull when we're dealing with a struct T, as it cannot be passed null at all. private protected override bool IsDbNullValueAsObject(object? value, ref object? writeState) - => (default(T) is null || value is not null) && IsDbNullValue(Downcast(value), ref writeState); + => (default(T) is null || value is not null) && IsDbNullValue((T?)value, ref writeState); public bool IsDbNull([NotNullWhen(false)] T? value, ref object? writeState) - { - return DbNullPredicateKind switch + => DbNullPredicateKind switch { DbNullPredicate.Null => value is null, DbNullPredicate.None => false, DbNullPredicate.PolymorphicNull => value is null or DBNull, // We do the null check to keep the NotNullWhen(false) invariant. DbNullPredicate.Custom => IsDbNullValue(value, ref writeState) || (value is null && ThrowInvalidNullValue()), - _ => ThrowOutOfRange() + _ => ThrowDbNullPredicateOutOfRange() }; - bool ThrowOutOfRange() => throw new ArgumentOutOfRangeException(nameof(DbNullPredicateKind), "Unknown case", DbNullPredicateKind.ToString()); - } - public abstract T Read(PgReader reader); public abstract ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default); @@ -126,11 +126,7 @@ public bool IsDbNull([NotNullWhen(false)] T? value, ref object? writeState) internal sealed override Type TypeToConvert => typeof(T); internal sealed override Size GetSizeAsObject(SizeContext context, object value, ref object? writeState) - => GetSize(context, Downcast(value), ref writeState); - - [MethodImpl(MethodImplOptions.NoInlining)] - [return: NotNullIfNotNull(nameof(value))] - static T? Downcast(object? value) => (T?)value; + => GetSize(context, (T)value, ref writeState); } static class PgConverterExtensions From b9811a2f8f2072211396d067916878e9a21bb5cd Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Tue, 5 Mar 2024 14:31:31 +0100 Subject: [PATCH 392/761] Remove unknown pgtype dependency (#5596) Fixes #5503 --- .../BackendMessages/RowDescriptionMessage.cs | 2 +- src/Npgsql/Internal/NpgsqlDatabaseInfo.cs | 5 ++ src/Npgsql/Internal/PgSerializerOptions.cs | 8 ++- src/Npgsql/Internal/Postgres/DataTypeName.cs | 10 +-- src/Npgsql/Internal/Postgres/Oid.cs | 1 + src/Npgsql/Internal/Postgres/PgTypeId.cs | 6 +- src/Npgsql/NpgsqlParameter.cs | 40 +++++++---- src/Npgsql/NpgsqlParameter`.cs | 2 +- test/Npgsql.Tests/NpgsqlParameterTests.cs | 66 +++++++++++++++++++ 9 files changed, 117 insertions(+), 23 deletions(-) diff --git a/src/Npgsql/BackendMessages/RowDescriptionMessage.cs b/src/Npgsql/BackendMessages/RowDescriptionMessage.cs index 9688fa7fc7..1dd1045e21 100644 --- a/src/Npgsql/BackendMessages/RowDescriptionMessage.cs +++ b/src/Npgsql/BackendMessages/RowDescriptionMessage.cs @@ -368,7 +368,7 @@ void GetInfoSlow(Type? type, out ColumnInfo lastColumnInfo) // For text we'll fall back to any available text converter for the expected clr type or throw. if (!typeInfo.TryBind(Field, DataFormat, out converterInfo)) { - typeInfo = AdoSerializerHelpers.GetTypeInfoForReading(type ?? typeof(string), _serializerOptions.UnknownPgType, _serializerOptions); + typeInfo = AdoSerializerHelpers.GetTypeInfoForReading(type ?? typeof(string), _serializerOptions.TextPgType, _serializerOptions); converterInfo = typeInfo.Bind(Field, DataFormat); lastColumnInfo = new(converterInfo, DataFormat, type != converterInfo.TypeToConvert || converterInfo.IsBoxingConverter); } diff --git a/src/Npgsql/Internal/NpgsqlDatabaseInfo.cs b/src/Npgsql/Internal/NpgsqlDatabaseInfo.cs index a05547ea1b..fed3f8c165 100644 --- a/src/Npgsql/Internal/NpgsqlDatabaseInfo.cs +++ b/src/Npgsql/Internal/NpgsqlDatabaseInfo.cs @@ -228,6 +228,11 @@ public bool TryGetPostgresTypeByName(string pgName, [NotNullWhen(true)] out Post internal void ProcessTypes() { + var unspecified = new PostgresBaseType(DataTypeName.Unspecified, Oid.Unspecified); + ByOID[Oid.Unspecified.Value] = unspecified; + ByFullName[unspecified.DataTypeName.Value] = unspecified; + ByName[unspecified.InternalName] = unspecified; + foreach (var type in GetTypes()) { ByOID[type.OID] = type; diff --git a/src/Npgsql/Internal/PgSerializerOptions.cs b/src/Npgsql/Internal/PgSerializerOptions.cs index 0057e1f455..9285d295e6 100644 --- a/src/Npgsql/Internal/PgSerializerOptions.cs +++ b/src/Npgsql/Internal/PgSerializerOptions.cs @@ -25,11 +25,13 @@ internal PgSerializerOptions(NpgsqlDatabaseInfo databaseInfo, PgTypeInfoResolver _resolverChain = resolverChain ?? new(); _timeZoneProvider = timeZoneProvider; DatabaseInfo = databaseInfo; - UnknownPgType = databaseInfo.GetPostgresType("unknown"); + UnspecifiedDBNullTypeInfo = new(this, new Converters.Internal.VoidConverter(), DataTypeName.Unspecified, unboxedType: typeof(DBNull)); } - // Represents the 'unknown' type, which can be used for reading and writing arbitrary text values. - public PostgresType UnknownPgType { get; } + internal PgTypeInfo UnspecifiedDBNullTypeInfo { get; } + + PostgresType? _textPgType; + internal PostgresType TextPgType => _textPgType ??= DatabaseInfo.GetPostgresType(DataTypeNames.Text); // Used purely for type mapping, where we don't have a full set of types but resolvers might know enough. readonly bool _introspectionInstance; diff --git a/src/Npgsql/Internal/Postgres/DataTypeName.cs b/src/Npgsql/Internal/Postgres/DataTypeName.cs index 1589e2da91..d20e479f85 100644 --- a/src/Npgsql/Internal/Postgres/DataTypeName.cs +++ b/src/Npgsql/Internal/Postgres/DataTypeName.cs @@ -48,9 +48,9 @@ public DataTypeName(string fullyQualifiedDataTypeName) internal static DataTypeName ValidatedName(string fullyQualifiedDataTypeName) => new(fullyQualifiedDataTypeName, validated: true); - // Includes schema unless it's pg_catalog. + // Includes schema unless it's pg_catalog or the name is unspecified. public string DisplayName => - Value.StartsWith("pg_catalog", StringComparison.Ordinal) + Value.StartsWith("pg_catalog", StringComparison.Ordinal) || Value == Unspecified ? UnqualifiedDisplayName : Schema + "." + UnqualifiedDisplayName; @@ -63,11 +63,13 @@ internal static DataTypeName ValidatedName(string fullyQualifiedDataTypeName) public string Value => _value is null ? ThrowDefaultException() : _value; static string ThrowDefaultException() => - throw new InvalidOperationException($"This operation cannot be performed on a default instance of {nameof(DataTypeName)}."); + throw new InvalidOperationException($"This operation cannot be performed on a default value of {nameof(DataTypeName)}."); public static implicit operator string(DataTypeName value) => value.Value; - public bool IsDefault => _value is null; + // This contains two invalid sql identifiers (schema and name are both separate identifiers, and would both have to be quoted to be valid). + // Given this is an invalid name it's fine for us to represent a fully qualified 'unspecified' name with it. + public static DataTypeName Unspecified => new("-.-", validated: true); public bool IsArray => UnqualifiedNameSpan.StartsWith("_".AsSpan(), StringComparison.Ordinal); diff --git a/src/Npgsql/Internal/Postgres/Oid.cs b/src/Npgsql/Internal/Postgres/Oid.cs index ac9577609d..e6fcad6f4a 100644 --- a/src/Npgsql/Internal/Postgres/Oid.cs +++ b/src/Npgsql/Internal/Postgres/Oid.cs @@ -9,6 +9,7 @@ namespace Npgsql.Internal.Postgres; public static explicit operator uint(Oid oid) => oid.Value; public static implicit operator Oid(uint oid) => new(oid); public uint Value { get; init; } + public static Oid Unspecified => new(0); public override string ToString() => Value.ToString(); public bool Equals(Oid other) => Value == other.Value; diff --git a/src/Npgsql/Internal/Postgres/PgTypeId.cs b/src/Npgsql/Internal/Postgres/PgTypeId.cs index 37be0d2066..c5a40d22ca 100644 --- a/src/Npgsql/Internal/Postgres/PgTypeId.cs +++ b/src/Npgsql/Internal/Postgres/PgTypeId.cs @@ -15,8 +15,8 @@ namespace Npgsql.Internal.Postgres; public PgTypeId(Oid oid) => _oid = oid; [MemberNotNullWhen(true, nameof(_dataTypeName))] - public bool IsDataTypeName => !_dataTypeName.IsDefault; - public bool IsOid => _dataTypeName.IsDefault; + public bool IsDataTypeName => _dataTypeName != default; + public bool IsOid => _dataTypeName == default; public DataTypeName DataTypeName => IsDataTypeName ? _dataTypeName : throw new InvalidOperationException("This value does not describe a DataTypeName."); @@ -42,4 +42,6 @@ public bool Equals(PgTypeId other) public override int GetHashCode() => IsOid ? _oid.GetHashCode() : _dataTypeName.GetHashCode(); public static bool operator ==(PgTypeId left, PgTypeId right) => left.Equals(right); public static bool operator !=(PgTypeId left, PgTypeId right) => !left.Equals(right); + + internal bool IsUnspecified => IsOid && _oid == Oid.Unspecified || _dataTypeName == DataTypeName.Unspecified; } diff --git a/src/Npgsql/NpgsqlParameter.cs b/src/Npgsql/NpgsqlParameter.cs index 997eb6f56d..d1dba6af5d 100644 --- a/src/Npgsql/NpgsqlParameter.cs +++ b/src/Npgsql/NpgsqlParameter.cs @@ -40,10 +40,10 @@ public class NpgsqlParameter : DbParameter, IDbDataParameter, ICloneable internal string TrimmedName { get; private protected set; } = PositionalName; internal const string PositionalName = ""; - internal PgTypeInfo? TypeInfo { get; private set; } + private protected PgTypeInfo? TypeInfo { get; private set; } internal PgTypeId PgTypeId { get; private set; } - internal PgConverter? Converter { get; private set; } + private protected PgConverter? Converter { get; private set; } internal DataFormat Format { get; private protected set; } private protected Size? WriteSize { get; set; } @@ -278,7 +278,7 @@ public override object? Value get => _value; set { - if (value is null || _value?.GetType() != value.GetType()) + if (ShouldResetObjectTypeInfo(value)) ResetTypeInfo(); else ResetBindingInfo(); @@ -497,6 +497,17 @@ public sealed override string SourceColumn Type? GetValueType(Type staticValueType) => staticValueType != typeof(object) ? staticValueType : Value?.GetType(); + internal bool ShouldResetObjectTypeInfo(object? value) + { + var currentType = TypeInfo?.Type; + if (currentType is null || value is null) + return false; + + var valueType = value.GetType(); + // We don't want to reset the type info when the value is a DBNull, we're able to write it out with any type info. + return valueType != typeof(DBNull) && currentType != valueType; + } + internal void GetResolutionInfo(out PgTypeInfo? typeInfo, out PgConverter? converter, out PgTypeId pgTypeId) { typeInfo = TypeInfo; @@ -540,6 +551,7 @@ _npgsqlDbType is { } npgsqlDbType pgTypeId = options.ToCanonicalTypeId(pgType.GetRepresentationalType()); } + var unspecifiedDBNull = false; var valueType = StaticValueType; if (valueType == typeof(object)) { @@ -551,14 +563,23 @@ _npgsqlDbType is { } npgsqlDbType } // We treat object typed DBNull values as default info. + // Unless we don't have a pgTypeId either, at which point we'll use an 'unspecified' PgTypeInfo to help us write a NULL. if (valueType == typeof(DBNull)) { - valueType = null; - pgTypeId ??= options.ToCanonicalTypeId(options.UnknownPgType); + if (pgTypeId is null) + { + unspecifiedDBNull = true; + typeInfo = options.UnspecifiedDBNullTypeInfo; + } + else + valueType = null; } } - TypeInfo = typeInfo = AdoSerializerHelpers.GetTypeInfoForWriting(valueType, pgTypeId, options, _npgsqlDbType); + if (!unspecifiedDBNull) + typeInfo = AdoSerializerHelpers.GetTypeInfoForWriting(valueType, pgTypeId, options, _npgsqlDbType); + + TypeInfo = typeInfo; } // This step isn't part of BindValue because we need to know the PgTypeId beforehand for things like SchemaOnly with null values. @@ -566,7 +587,7 @@ _npgsqlDbType is { } npgsqlDbType // TODO we could expose a property on a Converter/TypeInfo to indicate whether it's immutable, at that point we can reuse. if (!previouslyResolved || typeInfo!.IsResolverInfo) { - ResetBindingInfo(); // No need for ResetConverterResolution as we'll mutate those fields directly afterwards. + ResetBindingInfo(); var resolution = ResolveConverter(typeInfo!); Converter = resolution.Converter; PgTypeId = resolution.PgTypeId; @@ -735,11 +756,6 @@ public override void ResetDbType() private protected void ResetTypeInfo() { TypeInfo = null; - ResetConverterResolution(); - } - - void ResetConverterResolution() - { _asObject = false; Converter = null; PgTypeId = default; diff --git a/src/Npgsql/NpgsqlParameter`.cs b/src/Npgsql/NpgsqlParameter`.cs index 94705aaae4..a749734643 100644 --- a/src/Npgsql/NpgsqlParameter`.cs +++ b/src/Npgsql/NpgsqlParameter`.cs @@ -26,7 +26,7 @@ public T? TypedValue get => _typedValue; set { - if (typeof(T) == typeof(object) && (value is null || _typedValue?.GetType() != value.GetType())) + if (typeof(T) == typeof(object) && ShouldResetObjectTypeInfo(value)) ResetTypeInfo(); else ResetBindingInfo(); diff --git a/test/Npgsql.Tests/NpgsqlParameterTests.cs b/test/Npgsql.Tests/NpgsqlParameterTests.cs index 59f26cbe26..4f8d89a9f0 100644 --- a/test/Npgsql.Tests/NpgsqlParameterTests.cs +++ b/test/Npgsql.Tests/NpgsqlParameterTests.cs @@ -4,6 +4,7 @@ using System.Data; using System.Data.Common; using System.Threading.Tasks; +using Npgsql.Internal.Postgres; namespace Npgsql.Tests; @@ -693,6 +694,71 @@ public void Null_value_with_nullable_type() Assert.That(reader.GetFieldValue(0), Is.Null); } + [Test] + public void DBNull_reuses_type_info([Values]bool generic) + { + // Bootstrap datasource. + using (var _ = OpenConnection()) {} + + var param = generic ? new NpgsqlParameter { Value = "value" } : new NpgsqlParameter { Value = "value" }; + param.ResolveTypeInfo(DataSource.SerializerOptions); + param.GetResolutionInfo(out var typeInfo, out _, out _); + Assert.That(typeInfo, Is.Not.Null); + + // Make sure we don't reset the type info when setting DBNull. + param.Value = DBNull.Value; + param.GetResolutionInfo(out var secondTypeInfo, out _, out _); + Assert.That(typeInfo, Is.SameAs(secondTypeInfo)); + + // Make sure we don't resolve a different type info either. + param.ResolveTypeInfo(DataSource.SerializerOptions); + param.GetResolutionInfo(out var thirdTypeInfo, out _, out _); + Assert.That(secondTypeInfo, Is.SameAs(thirdTypeInfo)); + } + + [Test] + public void DBNull_followed_by_non_null_reresolves([Values]bool generic) + { + // Bootstrap datasource. + using (var _ = OpenConnection()) {} + + var param = generic ? new NpgsqlParameter { Value = DBNull.Value } : new NpgsqlParameter { Value = DBNull.Value }; + param.ResolveTypeInfo(DataSource.SerializerOptions); + param.GetResolutionInfo(out var typeInfo, out _, out var pgTypeId); + Assert.That(typeInfo, Is.Not.Null); + Assert.That(pgTypeId.IsUnspecified, Is.True); + + param.Value = "value"; + param.GetResolutionInfo(out var secondTypeInfo, out _, out _); + Assert.That(secondTypeInfo, Is.Null); + + // Make sure we don't resolve the same type info either. + param.ResolveTypeInfo(DataSource.SerializerOptions); + param.GetResolutionInfo(out var thirdTypeInfo, out _, out _); + Assert.That(typeInfo, Is.Not.SameAs(thirdTypeInfo)); + } + + [Test] + public void Changing_value_type_reresolves([Values]bool generic) + { + // Bootstrap datasource. + using (var _ = OpenConnection()) {} + + var param = generic ? new NpgsqlParameter { Value = "value" } : new NpgsqlParameter { Value = "value" }; + param.ResolveTypeInfo(DataSource.SerializerOptions); + param.GetResolutionInfo(out var typeInfo, out _, out _); + Assert.That(typeInfo, Is.Not.Null); + + param.Value = 1; + param.GetResolutionInfo(out var secondTypeInfo, out _, out _); + Assert.That(secondTypeInfo, Is.Null); + + // Make sure we don't resolve a different type info either. + param.ResolveTypeInfo(DataSource.SerializerOptions); + param.GetResolutionInfo(out var thirdTypeInfo, out _, out _); + Assert.That(typeInfo, Is.Not.SameAs(thirdTypeInfo)); + } + #if NeedsPorting [Test] [Category ("NotWorking")] From da5cc3789651a81d95b31a96a3751c1df7b7845f Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Tue, 5 Mar 2024 14:36:02 +0100 Subject: [PATCH 393/761] Address feedback of #5596 --- test/Npgsql.Tests/NpgsqlParameterTests.cs | 24 +++++++++++------------ 1 file changed, 11 insertions(+), 13 deletions(-) diff --git a/test/Npgsql.Tests/NpgsqlParameterTests.cs b/test/Npgsql.Tests/NpgsqlParameterTests.cs index 4f8d89a9f0..9a4610aadd 100644 --- a/test/Npgsql.Tests/NpgsqlParameterTests.cs +++ b/test/Npgsql.Tests/NpgsqlParameterTests.cs @@ -697,9 +697,6 @@ public void Null_value_with_nullable_type() [Test] public void DBNull_reuses_type_info([Values]bool generic) { - // Bootstrap datasource. - using (var _ = OpenConnection()) {} - var param = generic ? new NpgsqlParameter { Value = "value" } : new NpgsqlParameter { Value = "value" }; param.ResolveTypeInfo(DataSource.SerializerOptions); param.GetResolutionInfo(out var typeInfo, out _, out _); @@ -708,20 +705,17 @@ public void DBNull_reuses_type_info([Values]bool generic) // Make sure we don't reset the type info when setting DBNull. param.Value = DBNull.Value; param.GetResolutionInfo(out var secondTypeInfo, out _, out _); - Assert.That(typeInfo, Is.SameAs(secondTypeInfo)); + Assert.That(secondTypeInfo, Is.SameAs(typeInfo)); // Make sure we don't resolve a different type info either. param.ResolveTypeInfo(DataSource.SerializerOptions); param.GetResolutionInfo(out var thirdTypeInfo, out _, out _); - Assert.That(secondTypeInfo, Is.SameAs(thirdTypeInfo)); + Assert.That(thirdTypeInfo, Is.SameAs(secondTypeInfo)); } [Test] public void DBNull_followed_by_non_null_reresolves([Values]bool generic) { - // Bootstrap datasource. - using (var _ = OpenConnection()) {} - var param = generic ? new NpgsqlParameter { Value = DBNull.Value } : new NpgsqlParameter { Value = DBNull.Value }; param.ResolveTypeInfo(DataSource.SerializerOptions); param.GetResolutionInfo(out var typeInfo, out _, out var pgTypeId); @@ -735,15 +729,12 @@ public void DBNull_followed_by_non_null_reresolves([Values]bool generic) // Make sure we don't resolve the same type info either. param.ResolveTypeInfo(DataSource.SerializerOptions); param.GetResolutionInfo(out var thirdTypeInfo, out _, out _); - Assert.That(typeInfo, Is.Not.SameAs(thirdTypeInfo)); + Assert.That(thirdTypeInfo, Is.Not.SameAs(typeInfo)); } [Test] public void Changing_value_type_reresolves([Values]bool generic) { - // Bootstrap datasource. - using (var _ = OpenConnection()) {} - var param = generic ? new NpgsqlParameter { Value = "value" } : new NpgsqlParameter { Value = "value" }; param.ResolveTypeInfo(DataSource.SerializerOptions); param.GetResolutionInfo(out var typeInfo, out _, out _); @@ -756,7 +747,7 @@ public void Changing_value_type_reresolves([Values]bool generic) // Make sure we don't resolve a different type info either. param.ResolveTypeInfo(DataSource.SerializerOptions); param.GetResolutionInfo(out var thirdTypeInfo, out _, out _); - Assert.That(typeInfo, Is.Not.SameAs(thirdTypeInfo)); + Assert.That(thirdTypeInfo, Is.Not.SameAs(typeInfo)); } #if NeedsPorting @@ -871,4 +862,11 @@ public void LocaleId () Assert.AreEqual(15, parameter.LocaleId, "#2"); } #endif + + [OneTimeSetUp] + public async Task Bootstrap() + { + // Bootstrap datasource. + await using (var _ = await OpenConnectionAsync()) {} + } } From 3ceb33f9b887459d6e4045fd4b20f19909855792 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Sun, 10 Mar 2024 20:58:50 +0100 Subject: [PATCH 394/761] Fix composite type constructor selection (#5615) Fixes #5614 --- .../ReflectionCompositeInfoFactory.cs | 42 +++--- test/Npgsql.Tests/Types/CompositeTests.cs | 123 ++++++++++++++++++ 2 files changed, 143 insertions(+), 22 deletions(-) diff --git a/src/Npgsql/Internal/Composites/ReflectionCompositeInfoFactory.cs b/src/Npgsql/Internal/Composites/ReflectionCompositeInfoFactory.cs index 857580cbfe..d6c51b8344 100644 --- a/src/Npgsql/Internal/Composites/ReflectionCompositeInfoFactory.cs +++ b/src/Npgsql/Internal/Composites/ReflectionCompositeInfoFactory.cs @@ -140,7 +140,7 @@ static Delegate CreateSetter(PropertyInfo info) var invalidOpExceptionMessageConstructor = typeof(InvalidOperationException).GetConstructor(new []{ typeof(string) })!; var body = info.SetMethod is null || !info.SetMethod.IsPublic ? (Expression)Expression.Throw(Expression.New(invalidOpExceptionMessageConstructor, - Expression.Constant($"No (public) getter for '{info}' on type {typeof(T)}")), info.PropertyType) + Expression.Constant($"No (public) setter for '{info}' on type {typeof(T)}")), info.PropertyType) : Expression.Call(UnboxAny(instance, typeof(T)), info.SetMethod, value); return Expression @@ -249,20 +249,15 @@ static CompositeFieldInfo CreateCompositeFieldInfo(string name, Type type, PgTyp static (ConstructorInfo? ConstructorInfo, int[] ParameterFieldMap) MapBestMatchingConstructor<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors)] T>(IReadOnlyList fields, INpgsqlNameTranslator nameTranslator) { ConstructorInfo? clrDefaultConstructor = null; + Exception? duplicatesException = null; foreach (var constructor in typeof(T).GetConstructors().OrderByDescending(x => x.GetParameters().Length)) { var parameters = constructor.GetParameters(); - if (parameters.Length != fields.Count) - { - if (parameters.Length == 0) - clrDefaultConstructor = constructor; + if (parameters.Length == 0) + clrDefaultConstructor = constructor; - continue; - } - - var parametersMapped = 0; var parametersMap = new int[parameters.Length]; - + Array.Fill(parametersMap, -1); for (var i = 0; i < parameters.Length; i++) { var clrParameter = parameters[i]; @@ -273,27 +268,30 @@ static CompositeFieldInfo CreateCompositeFieldInfo(string name, Type type, PgTyp for (var pgFieldIndex = 0; pgFieldIndex < fields.Count; pgFieldIndex++) { - var pgField = fields[pgFieldIndex]; - if (pgField.Name != name) - continue; - - parametersMapped++; - parametersMap[i] = pgFieldIndex; - break; + if (fields[pgFieldIndex].Name == name) + { + parametersMap[i] = pgFieldIndex; + break; + } } } - var duplicates = parametersMap.GroupBy(x => x).Where(g => g.Count() > 1).ToArray(); - if (duplicates.Length > 0) - throw new AmbiguousMatchException($"Multiple constructor parameters are mapped to the '{fields[duplicates[0].Key].Name}' field."); + if (parametersMap.Any(x => x is -1)) + continue; - if (parametersMapped == parameters.Length) + var duplicates = parametersMap.GroupBy(x => x).Where(g => g.Count() > 1).ToArray(); + if (duplicates.Length is 0) return (constructor, parametersMap); + + duplicatesException = new AmbiguousMatchException($"Multiple parameters are mapped to the field '{fields[duplicates[0].Key].Name}' in constructor: {constructor}."); } + if (duplicatesException is not null) + throw duplicatesException; + if (clrDefaultConstructor is null && !typeof(T).IsValueType) throw new InvalidOperationException($"No parameterless constructor defined for type '{typeof(T)}'."); - return (clrDefaultConstructor, Array.Empty()); + return (clrDefaultConstructor, []); } } diff --git a/test/Npgsql.Tests/Types/CompositeTests.cs b/test/Npgsql.Tests/Types/CompositeTests.cs index 96e7dac0fa..36257e126a 100644 --- a/test/Npgsql.Tests/Types/CompositeTests.cs +++ b/test/Npgsql.Tests/Types/CompositeTests.cs @@ -1,5 +1,6 @@ using System; using System.Linq; +using System.Reflection; using System.Threading.Tasks; using Npgsql.PostgresTypes; using NpgsqlTypes; @@ -504,8 +505,130 @@ await connection.ExecuteNonQueryAsync(@$" Assert.That(elemType, Is.SameAs(comp1Type)); } + [Test] + public async Task DuplicateConstructorParameters() + { + await using var adminConnection = await OpenConnectionAsync(); + var type = await GetTempTypeName(adminConnection); + + await adminConnection.ExecuteNonQueryAsync($"CREATE TYPE {type} AS (long int8, boolean bool)"); + + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.MapComposite(type); + await using var dataSource = dataSourceBuilder.Build(); + await using var connection = await dataSource.OpenConnectionAsync(); + + var ex = Assert.ThrowsAsync(async () => await AssertType( + connection, + new DuplicateOneLongOneBool(true, 1), + "(1,t)", + type, + npgsqlDbType: null)); + Assert.That(ex!.InnerException, Is.TypeOf()); + } + + [Test] + public async Task PartialConstructorMissingSetter() + { + await using var adminConnection = await OpenConnectionAsync(); + var type = await GetTempTypeName(adminConnection); + + await adminConnection.ExecuteNonQueryAsync($"CREATE TYPE {type} AS (long int8, boolean bool)"); + + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.MapComposite(type); + await using var dataSource = dataSourceBuilder.Build(); + await using var connection = await dataSource.OpenConnectionAsync(); + + var ex = Assert.ThrowsAsync(async () => await AssertTypeRead( + connection, + "(1,t)", + type, + new MissingSetterOneLongOneBool(true, 1))); + Assert.That(ex, Is.TypeOf().With.Message.Contains("No (public) setter for")); + } + + [Test] + public async Task PartialConstructorWorks() + { + await using var adminConnection = await OpenConnectionAsync(); + var type = await GetTempTypeName(adminConnection); + + await adminConnection.ExecuteNonQueryAsync($"CREATE TYPE {type} AS (long int8, boolean bool)"); + + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.MapComposite(type); + await using var dataSource = dataSourceBuilder.Build(); + await using var connection = await dataSource.OpenConnectionAsync(); + + await AssertType( + connection, + new OneLongOneBool(1) { BooleanValue = true }, + "(1,t)", + type, + npgsqlDbType: null); + } + #region Test Types + readonly struct DuplicateOneLongOneBool + { + public DuplicateOneLongOneBool(bool boolean, [PgName("boolean")]int @bool) + { + } + + [PgName("long")] + public long LongValue { get; } + + [PgName("boolean")] + public bool BooleanValue { get; } + } + + readonly struct MissingSetterOneLongOneBool + { + public MissingSetterOneLongOneBool(long @long) + { + LongValue = @long; + } + + public MissingSetterOneLongOneBool(bool boolean, [PgName("boolean")]int @bool) + { + } + + [PgName("long")] + public long LongValue { get; } + + [PgName("boolean")] + public bool BooleanValue { get; } + } + + struct OneLongOneBool + { + public OneLongOneBool(bool boolean, [PgName("boolean")]int @bool) + { + } + + public OneLongOneBool(long @long) + { + LongValue = @long; + } + + public OneLongOneBool(double other) + { + } + + public OneLongOneBool(int boolean, [PgName("boolean")]bool @bool) + { + } + + [PgName("long")] + public long LongValue { get; } + + [PgName("boolean")] + public bool BooleanValue { get; set; } + } + + record SomeComposite { public int X { get; set; } From 76ada6b85a18592e36f866043a088ad0086d0316 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Mon, 11 Mar 2024 18:03:53 +0100 Subject: [PATCH 395/761] Sync well known text types to actual mapping list (#5617) Fixes #5599 --- .../JsonNetPocoTypeInfoResolverFactory.cs | 2 +- src/Npgsql/Internal/PgSerializerOptions.cs | 15 ++++++++++----- .../AdoTypeInfoResolverFactory.cs | 1 + .../ExtraConversionsTypeInfoResolverFactory.cs | 1 + .../JsonDynamicTypeInfoResolverFactory.cs | 2 +- 5 files changed, 14 insertions(+), 7 deletions(-) diff --git a/src/Npgsql.Json.NET/Internal/JsonNetPocoTypeInfoResolverFactory.cs b/src/Npgsql.Json.NET/Internal/JsonNetPocoTypeInfoResolverFactory.cs index 7f4b4219f0..27f719deca 100644 --- a/src/Npgsql.Json.NET/Internal/JsonNetPocoTypeInfoResolverFactory.cs +++ b/src/Npgsql.Json.NET/Internal/JsonNetPocoTypeInfoResolverFactory.cs @@ -73,7 +73,7 @@ static void AddUserMappings(TypeInfoMappingCollection mappings, bool jsonb, Type protected override DynamicMappingCollection? GetMappings(Type? type, DataTypeName dataTypeName, PgSerializerOptions options) { // Match all types except null, object and text types as long as DataTypeName (json/jsonb) is present. - if (type is null || type == typeof(object) || Array.IndexOf(PgSerializerOptions.WellKnownTextTypes, type) != -1 + if (type is null || type == typeof(object) || PgSerializerOptions.IsWellKnownTextType(type) || dataTypeName != JsonbDataTypeName && dataTypeName != JsonDataTypeName) return null; diff --git a/src/Npgsql/Internal/PgSerializerOptions.cs b/src/Npgsql/Internal/PgSerializerOptions.cs index 9285d295e6..405d1d11da 100644 --- a/src/Npgsql/Internal/PgSerializerOptions.cs +++ b/src/Npgsql/Internal/PgSerializerOptions.cs @@ -1,4 +1,5 @@ using System; +using System.IO; using System.Runtime.CompilerServices; using System.Text; using Npgsql.Internal.Postgres; @@ -57,11 +58,15 @@ public IPgTypeInfoResolver TypeInfoResolver public ArrayNullabilityMode ArrayNullabilityMode { get; init; } = ArrayNullabilityMode.Never; public INpgsqlNameTranslator DefaultNameTranslator { get; init; } = NpgsqlSnakeCaseNameTranslator.Instance; - public static Type[] WellKnownTextTypes { get; } = { - typeof(string), typeof(char[]), typeof(byte[]), - typeof(ArraySegment), typeof(ArraySegment?), - typeof(char), typeof(char?) - }; + public static bool IsWellKnownTextType(Type type) + { + type = type.IsValueType ? Nullable.GetUnderlyingType(type) ?? type : type; + return Array.IndexOf([ + typeof(string), typeof(char), + typeof(char[]), typeof(ReadOnlyMemory), typeof(ArraySegment), + typeof(byte[]), typeof(ReadOnlyMemory) + ], type) != -1 || typeof(Stream).IsAssignableFrom(type); + } internal bool RangesEnabled => _resolverChain.RangesEnabled; internal bool MultirangesEnabled => _resolverChain.MultirangesEnabled; diff --git a/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.cs index 8c0d6e9eaf..61f1bbc2f3 100644 --- a/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.cs +++ b/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.cs @@ -75,6 +75,7 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) static (options, mapping, _) => mapping.CreateInfo(options, new MoneyConverter()), MatchRequirement.DataTypeName); // Text + // Update PgSerializerOptions.IsWellKnownTextType(Type) after any changes to this list. mappings.AddType(DataTypeNames.Text, static (options, mapping, _) => mapping.CreateInfo(options, new StringTextConverter(options.TextEncoding), preferredFormat: DataFormat.Text), isDefault: true); mappings.AddStructType(DataTypeNames.Text, diff --git a/src/Npgsql/Internal/ResolverFactories/ExtraConversionsTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/ExtraConversionsTypeInfoResolverFactory.cs index c92d687d8e..9b5de89736 100644 --- a/src/Npgsql/Internal/ResolverFactories/ExtraConversionsTypeInfoResolverFactory.cs +++ b/src/Npgsql/Internal/ResolverFactories/ExtraConversionsTypeInfoResolverFactory.cs @@ -104,6 +104,7 @@ static TypeInfoMappingCollection AddInfos(TypeInfoMappingCollection mappings) static (options, mapping, _) => mapping.CreateInfo(options, new StringBitStringConverter())); // Text + // Update PgSerializerOptions.IsWellKnownTextType(Type) after any changes to this list. mappings.AddType(DataTypeNames.Text, static (options, mapping, _) => mapping.CreateInfo(options, new CharArrayTextConverter(options.TextEncoding), preferredFormat: DataFormat.Text)); mappings.AddStructType>(DataTypeNames.Text, diff --git a/src/Npgsql/Internal/ResolverFactories/JsonDynamicTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/JsonDynamicTypeInfoResolverFactory.cs index 6e1d291bd3..2515cf9a5b 100644 --- a/src/Npgsql/Internal/ResolverFactories/JsonDynamicTypeInfoResolverFactory.cs +++ b/src/Npgsql/Internal/ResolverFactories/JsonDynamicTypeInfoResolverFactory.cs @@ -121,7 +121,7 @@ void AddUserMappings(bool jsonb, Type[] clrTypes) protected override DynamicMappingCollection? GetMappings(Type? type, DataTypeName dataTypeName, PgSerializerOptions options) { // Match all types except null, object and text types as long as DataTypeName (json/jsonb) is present. - if (type is null || type == typeof(object) || Array.IndexOf(PgSerializerOptions.WellKnownTextTypes, type) != -1 + if (type is null || type == typeof(object) || PgSerializerOptions.IsWellKnownTextType(type) || dataTypeName != DataTypeNames.Jsonb && dataTypeName != DataTypeNames.Json) return null; From f8ad54c68a13a4c1973a192ec0b66265e95bf1e5 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Tue, 12 Mar 2024 18:42:27 +0100 Subject: [PATCH 396/761] Improve binary import perf (#5598) --- src/Npgsql/NpgsqlBinaryImporter.cs | 276 +++++++++++++---------------- src/Npgsql/ThrowHelper.cs | 17 -- 2 files changed, 120 insertions(+), 173 deletions(-) diff --git a/src/Npgsql/NpgsqlBinaryImporter.cs b/src/Npgsql/NpgsqlBinaryImporter.cs index b644b2e710..7a9caa5595 100644 --- a/src/Npgsql/NpgsqlBinaryImporter.cs +++ b/src/Npgsql/NpgsqlBinaryImporter.cs @@ -1,4 +1,5 @@ using System; +using System.Runtime.CompilerServices; using System.Threading; using System.Threading.Tasks; using Microsoft.Extensions.Logging; @@ -35,7 +36,7 @@ public sealed class NpgsqlBinaryImporter : ICancelable /// /// The number of columns, as returned from the backend in the CopyInResponse. /// - internal int NumColumns { get; private set; } + int NumColumns => _params.Length; bool InMiddleOfRow => _column != -1 && _column != NumColumns; @@ -98,8 +99,7 @@ internal async Task Init(string copyFromCommand, bool async, CancellationToken c throw _connector.UnexpectedMessageReceived(msg.Code); } - NumColumns = copyInResponse.NumColumns; - _params = new NpgsqlParameter[NumColumns]; + _params = new NpgsqlParameter[copyInResponse.NumColumns]; _rowsImported = 0; _buf.StartCopyMode(); WriteHeader(); @@ -132,9 +132,8 @@ async Task StartRow(bool async, CancellationToken cancellationToken = default) { CheckReady(); cancellationToken.ThrowIfCancellationRequested(); - - if (_column != -1 && _column != NumColumns) - ThrowHelper.ThrowInvalidOperationException_BinaryImportParametersMismatch(NumColumns, _column); + if (_column is not -1 && _column != NumColumns) + ThrowColumnMismatch(); if (_buf.WriteSpaceLeft < 2) await _buf.Flush(async, cancellationToken).ConfigureAwait(false); @@ -154,7 +153,8 @@ async Task StartRow(bool async, CancellationToken cancellationToken = default) /// corruption will occur. If in doubt, use to manually /// specify the type. /// - public void Write(T value) => Write(async: false, value).GetAwaiter().GetResult(); + public void Write(T value) + => Write(async: false, value, npgsqlDbType: null, dataTypeName: null).GetAwaiter().GetResult(); /// /// Writes a single column in the current row. @@ -168,37 +168,8 @@ async Task StartRow(bool async, CancellationToken cancellationToken = default) /// corruption will occur. If in doubt, use to manually /// specify the type. /// - public Task WriteAsync(T value, CancellationToken cancellationToken = default) => Write(async: true, value, cancellationToken); - - Task Write(bool async, T value, CancellationToken cancellationToken = default) - { - CheckColumnIndex(); - if (cancellationToken.IsCancellationRequested) - return Task.FromCanceled(cancellationToken); - - // First row, create the parameter object - ref var p = ref _params[_column]; - if (p is not NpgsqlParameter typedParam) - typedParam = new NpgsqlParameter(); - - // We only report previous values if anything actually changed, this saves some checks during the write. - // For object typed parameters when we don't have any other data we always have to pass the previousParam. - // In such cases the runtime type will define the entire postgres type lookup. - PgTypeInfo? previousTypeInfo = null; - PgConverter? previousConverter = null; - PgTypeId previousTypeId = default; - if (p is not null && (typeof(T) == typeof(object) || p._npgsqlDbType is not null || p._dataTypeName is not null)) - { - p.GetResolutionInfo(out previousTypeInfo, out previousConverter, out previousTypeId); - if (ReferenceEquals(p, typedParam)) - p.ResetDbType(); - } - - if (!ReferenceEquals(p, typedParam)) - p = typedParam; - - return Write(async, value, typedParam, previousTypeInfo, previousConverter, previousTypeId, cancellationToken); - } + public Task WriteAsync(T value, CancellationToken cancellationToken = default) + => Write(async: true, value, npgsqlDbType: null, dataTypeName: null, cancellationToken); /// /// Writes a single column in the current row as type . @@ -212,7 +183,7 @@ Task Write(bool async, T value, CancellationToken cancellationToken = default /// /// The .NET type of the column to be written. public void Write(T value, NpgsqlDbType npgsqlDbType) => - Write(async: false, value, npgsqlDbType).GetAwaiter().GetResult(); + Write(async: false, value, npgsqlDbType, dataTypeName: null).GetAwaiter().GetResult(); /// /// Writes a single column in the current row as type . @@ -229,38 +200,7 @@ public void Write(T value, NpgsqlDbType npgsqlDbType) => /// /// The .NET type of the column to be written. public Task WriteAsync(T value, NpgsqlDbType npgsqlDbType, CancellationToken cancellationToken = default) - => Write(async: true, value, npgsqlDbType, cancellationToken); - - Task Write(bool async, T value, NpgsqlDbType npgsqlDbType, CancellationToken cancellationToken = default) - { - CheckColumnIndex(); - if (cancellationToken.IsCancellationRequested) - return Task.FromCanceled(cancellationToken); - - // First row, create the parameter objects - ref var p = ref _params[_column]; - if (p is not NpgsqlParameter typedParam) - typedParam = new NpgsqlParameter { NpgsqlDbType = npgsqlDbType }; - - // We only report previous values if anything actually changed, this saves some checks during the write. - PgTypeInfo? previousTypeInfo = null; - PgConverter? previousConverter = null; - PgTypeId previousTypeId = default; - if (p is not null && (p._npgsqlDbType != npgsqlDbType || p._dataTypeName is not null)) - { - p.GetResolutionInfo(out previousTypeInfo, out previousConverter, out previousTypeId); - if (ReferenceEquals(p, typedParam)) - { - p.ResetDbType(); - p.NpgsqlDbType = npgsqlDbType; - } - } - - if (!ReferenceEquals(p, typedParam)) - p = typedParam; - - return Write(async, value, typedParam, previousTypeInfo, previousConverter, previousTypeId, cancellationToken); - } + => Write(async: true, value, npgsqlDbType, dataTypeName: null, cancellationToken); /// /// Writes a single column in the current row as type . @@ -272,7 +212,7 @@ Task Write(bool async, T value, NpgsqlDbType npgsqlDbType, CancellationToken /// /// The .NET type of the column to be written. public void Write(T value, string dataTypeName) => - Write(async: false, value, dataTypeName).GetAwaiter().GetResult(); + Write(async: false, value, npgsqlDbType: null, dataTypeName).GetAwaiter().GetResult(); /// /// Writes a single column in the current row as type . @@ -287,76 +227,90 @@ public void Write(T value, string dataTypeName) => /// /// The .NET type of the column to be written. public Task WriteAsync(T value, string dataTypeName, CancellationToken cancellationToken = default) - => Write(async: true, value, dataTypeName, cancellationToken); + => Write(async: true, value, npgsqlDbType: null, dataTypeName, cancellationToken); - Task Write(bool async, T value, string dataTypeName, CancellationToken cancellationToken = default) + Task Write(bool async, T value, NpgsqlDbType? npgsqlDbType, string? dataTypeName, CancellationToken cancellationToken = default) { - CheckColumnIndex(); - if (cancellationToken.IsCancellationRequested) - return Task.FromCanceled(cancellationToken); - - // First row, create the parameter objects - ref var p = ref _params[_column]; - if (p is not NpgsqlParameter typedParam) - typedParam = new NpgsqlParameter { DataTypeName = dataTypeName }; - - // We only report previous values if anything actually changed, this saves some checks during the write. - PgTypeInfo? previousTypeInfo = null; - PgConverter? previousConverter = null; - PgTypeId previousTypeId = default; - if (p is not null && (p._npgsqlDbType is not null || p._dataTypeName != dataTypeName)) + // Statically handle DBNull for backwards compatibility, generic parameters where T = DBNull normally won't find a mapping. + // Also handle null values for object typed parameters, as parameters only accept DBNull.Value when T = object. + if (typeof(T) == typeof(DBNull) || (typeof(T) == typeof(object) && value is null)) + return WriteNull(async, cancellationToken); + + return Core(async, value, npgsqlDbType, dataTypeName, cancellationToken); + + async Task Core(bool async, T value, NpgsqlDbType? npgsqlDbType, string? dataTypeName, CancellationToken cancellationToken = default) { - p.GetResolutionInfo(out previousTypeInfo, out previousConverter, out previousTypeId); - if (ReferenceEquals(p, typedParam)) + CheckReady(); + cancellationToken.ThrowIfCancellationRequested(); + CheckColumnIndex(); + + // Create the parameter objects for the first row or if the value type changes. + var newParam = false; + if (_params[_column] is not NpgsqlParameter param) { - p.ResetDbType(); - p.DataTypeName = dataTypeName; + newParam = true; + param = new NpgsqlParameter(); + if (npgsqlDbType is not null) + param._npgsqlDbType = npgsqlDbType; + if (dataTypeName is not null) + param._dataTypeName = dataTypeName; } - } - if (!ReferenceEquals(p, typedParam)) - p = typedParam; + // We only retrieve previous values if anything actually changed. + // For object typed parameters we must do so whenever setting NpgsqlParameter.Value would reset the type info. + PgTypeInfo? previousTypeInfo = null; + PgConverter? previousConverter = null; + PgTypeId previousTypeId = default; + if (!newParam && ( + (typeof(T) == typeof(object) && param.ShouldResetObjectTypeInfo(value)) + || param._npgsqlDbType != npgsqlDbType + || param._dataTypeName != dataTypeName)) + { + param.GetResolutionInfo(out previousTypeInfo, out previousConverter, out previousTypeId); + if (!newParam) + { + param.ResetDbType(); + if (npgsqlDbType is not null) + param._npgsqlDbType = npgsqlDbType; + if (dataTypeName is not null) + param._dataTypeName = dataTypeName; + } + } - return Write(async, value, typedParam, previousTypeInfo, previousConverter, previousTypeId, cancellationToken); - } + // These actions can reset or change the type info, we'll check afterwards whether we're still consistent with the original values. + param.TypedValue = value; + param.ResolveTypeInfo(_connector.SerializerOptions); - async Task Write(bool async, T value, NpgsqlParameter param, PgTypeInfo? previousTypeInfo, PgConverter? previousConverter, PgTypeId previousTypeId, CancellationToken cancellationToken = default) - { - CheckReady(); - if (_column == -1) - throw new InvalidOperationException("A row hasn't been started"); + if (previousTypeInfo is not null && previousConverter is not null && param.PgTypeId != previousTypeId) + { + var currentPgTypeId = param.PgTypeId; + // We should only rollback values when the stored instance was used. We'll throw before writing the new instance back anyway. + // Also always rolling back could set PgTypeInfos that were resolved for a type that doesn't match the T of the NpgsqlParameter. + if (!newParam) + param.SetResolutionInfo(previousTypeInfo, previousConverter, previousTypeId); + throw new InvalidOperationException($"Write for column {_column} resolves to a different PostgreSQL type: {currentPgTypeId} than the first row resolved to ({previousTypeId}). " + + $"Please make sure to use clr types that resolve to the same PostgreSQL type across rows. " + + $"Alternatively pass the same NpgsqlDbType or DataTypeName to ensure the PostgreSQL type ends up to be identical." ); + } - // Statically map any DBNull value during importing, generic parameters when T = DBNull normally won't find any mapping. - // Also allow null values for object typed parameters, parameters exclusively accept DBNull.Value when T = object. - if (typeof(T) == typeof(DBNull) || (typeof(T) == typeof(object) && (value == null || value is DBNull))) - { - await WriteNull(async, cancellationToken).ConfigureAwait(false); - return; - } + if (newParam) + _params[_column] = param; - param.TypedValue = value; - param.ResolveTypeInfo(_connector.SerializerOptions); + param.Bind(out _, out _); - if (previousTypeInfo is not null && previousConverter is not null && param.PgTypeId != previousTypeId) - { - var currentPgTypeId = param.PgTypeId; - param.SetResolutionInfo(previousTypeInfo, previousConverter, previousTypeId); - throw new InvalidOperationException($"Write for column {_column} resolves to a different PostgreSQL type: {currentPgTypeId} than the first row resolved to ({previousTypeId}). " + - $"Please make sure to use clr types that resolve to the same PostgreSQL type across rows. " + - $"Alternatively pass the same NpgsqlDbType or DataTypeName to ensure the PostgreSQL type ends up to be identical." ); - } + try + { + await param.Write(async, _pgWriter.WithFlushMode(async ? FlushMode.NonBlocking : FlushMode.Blocking), cancellationToken) + .ConfigureAwait(false); + } + catch (Exception ex) + { + _connector.Break(ex); + throw; + } - param.Bind(out _, out _); - try - { - await param.Write(async, _pgWriter.WithFlushMode(async ? FlushMode.NonBlocking : FlushMode.Blocking), cancellationToken).ConfigureAwait(false); + _column++; } - catch (Exception ex) - { - _connector.Break(ex); - throw; - } - _column++; } /// @@ -372,9 +326,9 @@ async Task Write(bool async, T value, NpgsqlParameter param, PgTypeInfo? p async Task WriteNull(bool async, CancellationToken cancellationToken = default) { CheckReady(); - cancellationToken.ThrowIfCancellationRequested(); - if (_column == -1) - throw new InvalidOperationException("A row hasn't been started"); + if (cancellationToken.IsCancellationRequested) + cancellationToken.ThrowIfCancellationRequested(); + CheckColumnIndex(); if (_buf.WriteSpaceLeft < 4) await _buf.Flush(async, cancellationToken).ConfigureAwait(false); @@ -408,13 +362,23 @@ async Task WriteRow(bool async, CancellationToken cancellationToken = default, p { await StartRow(async, cancellationToken).ConfigureAwait(false); foreach (var value in values) - await Write(async, value, cancellationToken).ConfigureAwait(false); + await Write(async, value, npgsqlDbType: null, dataTypeName: null, cancellationToken).ConfigureAwait(false); } void CheckColumnIndex() { - if (_column >= NumColumns) - ThrowHelper.ThrowInvalidOperationException_BinaryImportParametersMismatch(NumColumns, _column + 1); + if (_column is -1 || _column >= NumColumns) + Throw(); + + [MethodImpl(MethodImplOptions.NoInlining)] + void Throw() + { + if (_column is -1) + throw new InvalidOperationException("A row hasn't been started"); + + if (_column >= NumColumns) + ThrowColumnMismatch(); + } } #endregion @@ -445,7 +409,11 @@ async ValueTask Complete(bool async, CancellationToken cancellationToken try { - await WriteTrailer(async, cancellationToken).ConfigureAwait(false); + // Write trailer + if (_buf.WriteSpaceLeft < 2) + await _buf.Flush(async, cancellationToken).ConfigureAwait(false); + _buf.WriteInt16(-1); + await _buf.Flush(async, cancellationToken).ConfigureAwait(false); _buf.EndCopyMode(); await _connector.WriteCopyDone(async, cancellationToken).ConfigureAwait(false); @@ -574,28 +542,21 @@ void Cleanup() } #pragma warning restore CS8625 - async Task WriteTrailer(bool async, CancellationToken cancellationToken = default) - { - if (_buf.WriteSpaceLeft < 2) - await _buf.Flush(async, cancellationToken).ConfigureAwait(false); - _buf.WriteInt16(-1); - } - void CheckReady() { - switch (_state) - { - case ImporterState.Ready: - return; - case ImporterState.Disposed: - throw new ObjectDisposedException(GetType().FullName, "The COPY operation has already ended."); - case ImporterState.Cancelled: - throw new InvalidOperationException("The COPY operation has already been cancelled."); - case ImporterState.Committed: - throw new InvalidOperationException("The COPY operation has already been committed."); - default: - throw new Exception("Invalid state: " + _state); - } + if (_state is not ImporterState.Ready and var state) + Throw(state); + + [MethodImpl(MethodImplOptions.NoInlining)] + static void Throw(ImporterState state) + => throw (state switch + { + ImporterState.Disposed => new ObjectDisposedException(typeof(NpgsqlBinaryImporter).FullName, + "The COPY operation has already ended."), + ImporterState.Cancelled => new InvalidOperationException("The COPY operation has already been cancelled."), + ImporterState.Committed => new InvalidOperationException("The COPY operation has already been committed."), + _ => new Exception("Invalid state: " + state) + }); } #endregion @@ -611,4 +572,7 @@ enum ImporterState } #endregion Enums + + void ThrowColumnMismatch() + => throw new InvalidOperationException($"The binary import operation was started with {NumColumns} column(s), but {_column + 1} value(s) were provided."); } diff --git a/src/Npgsql/ThrowHelper.cs b/src/Npgsql/ThrowHelper.cs index 4012511787..f20dac780c 100644 --- a/src/Npgsql/ThrowHelper.cs +++ b/src/Npgsql/ThrowHelper.cs @@ -1,7 +1,6 @@ using Npgsql.BackendMessages; using System; using System.Diagnostics.CodeAnalysis; -using System.Reflection; using Npgsql.Internal; namespace Npgsql; @@ -60,22 +59,6 @@ internal static void ThrowInvalidCastException(string message) => internal static void ThrowInvalidCastException_NoValue() => throw new InvalidCastException("Field is null."); - [DoesNotReturn] - internal static void ThrowArgumentOutOfRange_OutOfColumnBounds(string paramName, int columnLength) => - throw new ArgumentOutOfRangeException(paramName, $"The value is out of bounds from the column data, dataOffset must be between 0 and {columnLength}"); - - [DoesNotReturn] - internal static void ThrowInvalidOperationException_NoPropertyGetter(Type type, MemberInfo property) => - throw new InvalidOperationException($"Composite type '{type}' cannot be written because the '{property}' property has no getter."); - - [DoesNotReturn] - internal static void ThrowInvalidOperationException_NoPropertySetter(Type type, MemberInfo property) => - throw new InvalidOperationException($"Composite type '{type}' cannot be read because the '{property}' property has no setter."); - - [DoesNotReturn] - internal static void ThrowInvalidOperationException_BinaryImportParametersMismatch(int columnCount, int valueCount) => - throw new InvalidOperationException($"The binary import operation was started with {columnCount} column(s), but {valueCount} value(s) were provided."); - [DoesNotReturn] internal static void ThrowNpgsqlException(string message) => throw new NpgsqlException(message); From fbbb28245ed3475d0857888622939ba7f13cb04f Mon Sep 17 00:00:00 2001 From: Ken Bailey <12869442+kenbailey@users.noreply.github.com> Date: Fri, 15 Mar 2024 04:43:33 -0600 Subject: [PATCH 397/761] Add missing EFCore.PG Enum hack in non-generic MapEnum method (#5627) Previous fix appears to only have been applied to the generic MapEnum method. Looking for same behavior when using the non generic MapEnum method. --- src/Npgsql/TypeMapping/GlobalTypeMapper.cs | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/src/Npgsql/TypeMapping/GlobalTypeMapper.cs b/src/Npgsql/TypeMapping/GlobalTypeMapper.cs index caffde5fc0..4ef1313adf 100644 --- a/src/Npgsql/TypeMapping/GlobalTypeMapper.cs +++ b/src/Npgsql/TypeMapping/GlobalTypeMapper.cs @@ -291,6 +291,11 @@ public INpgsqlTypeMapper MapEnum([DynamicallyAccessedMembers(DynamicallyAccessed try { _userTypeMapper.MapEnum(clrType, pgName, nameTranslator); + + // Temporary hack for EFCore.PG enum mapping compat + if (_userTypeMapper.Items.FirstOrDefault(i => i.ClrType == clrType) is UserTypeMapping userTypeMapping) + HackyEnumTypeMappings.Add(new(clrType, userTypeMapping.PgTypeName, nameTranslator ?? DefaultNameTranslator)); + ResetTypeMappingCache(); return this; } @@ -307,9 +312,14 @@ public bool UnmapEnum([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes _lock.EnterWriteLock(); try { - var result = _userTypeMapper.UnmapEnum(clrType, pgName, nameTranslator); + var removed = _userTypeMapper.UnmapEnum(clrType, pgName, nameTranslator); + + // Temporary hack for EFCore.PG enum mapping compat + if (removed && ((List)_userTypeMapper.Items).FindIndex(m => m.ClrType == clrType) is > -1 and var index) + HackyEnumTypeMappings.RemoveAt(index); + ResetTypeMappingCache(); - return result; + return removed; } finally { From 1d13ccd2ffc0796cf5405a8258998f1f1e779a54 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Sat, 16 Mar 2024 16:28:09 +0100 Subject: [PATCH 398/761] Fix NodaTime legacy TimestampTz default mapping (#5629) Fixes #5628 --- .../NodaTimeTypeInfoResolverFactory.cs | 75 +++++++++---------- .../Npgsql.PluginTests/LegacyNodaTimeTests.cs | 2 +- 2 files changed, 35 insertions(+), 42 deletions(-) diff --git a/src/Npgsql.NodaTime/Internal/NodaTimeTypeInfoResolverFactory.cs b/src/Npgsql.NodaTime/Internal/NodaTimeTypeInfoResolverFactory.cs index dce258b453..de5548a569 100644 --- a/src/Npgsql.NodaTime/Internal/NodaTimeTypeInfoResolverFactory.cs +++ b/src/Npgsql.NodaTime/Internal/NodaTimeTypeInfoResolverFactory.cs @@ -31,47 +31,47 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) // timestamp and timestamptz, legacy and non-legacy modes if (LegacyTimestampBehavior) { + // timestamp is the default for writing an Instant. + + // timestamp + mappings.AddStructType(TimestampDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, new InstantConverter(options.EnableDateTimeInfinityConversions)), isDefault: true); + mappings.AddStructType(TimestampDataTypeName, + static (options, mapping, _) => + mapping.CreateInfo(options, new LocalDateTimeConverter(options.EnableDateTimeInfinityConversions))); + // timestamptz - mappings.AddStructType(new DataTypeName("pg_catalog.timestamptz"), + mappings.AddStructType(TimestampTzDataTypeName, static (options, mapping, _) => - mapping.CreateInfo(options, new InstantConverter(options.EnableDateTimeInfinityConversions)), isDefault: false); - mappings.AddStructType(new DataTypeName("pg_catalog.timestamptz"), + mapping.CreateInfo(options, new InstantConverter(options.EnableDateTimeInfinityConversions)), isDefault: true); + mappings.AddStructType(TimestampTzDataTypeName, static (options, mapping, _) => mapping.CreateInfo(options, new LegacyTimestampTzZonedDateTimeConverter( DateTimeZoneProviders.Tzdb[options.TimeZone], options.EnableDateTimeInfinityConversions))); - mappings.AddStructType(new DataTypeName("pg_catalog.timestamptz"), + mappings.AddStructType(TimestampTzDataTypeName, static (options, mapping, _) => mapping.CreateInfo(options, new LegacyTimestampTzOffsetDateTimeConverter( DateTimeZoneProviders.Tzdb[options.TimeZone], options.EnableDateTimeInfinityConversions))); - + } + else + { // timestamp - mappings.AddStructType(TimestampDataTypeName, - static (options, mapping, _) => - mapping.CreateInfo(options, new InstantConverter(options.EnableDateTimeInfinityConversions)), - isDefault: true); mappings.AddStructType(TimestampDataTypeName, static (options, mapping, _) => mapping.CreateInfo(options, new LocalDateTimeConverter(options.EnableDateTimeInfinityConversions)), - isDefault: false); - } - else - { + isDefault: true); + // timestamptz mappings.AddStructType(TimestampTzDataTypeName, static (options, mapping, _) => mapping.CreateInfo(options, new InstantConverter(options.EnableDateTimeInfinityConversions)), isDefault: true); - mappings.AddStructType(new DataTypeName("pg_catalog.timestamptz"), + mappings.AddStructType(TimestampTzDataTypeName, static (options, mapping, _) => mapping.CreateInfo(options, new ZonedDateTimeConverter(options.EnableDateTimeInfinityConversions))); - mappings.AddStructType(new DataTypeName("pg_catalog.timestamptz"), + mappings.AddStructType(TimestampTzDataTypeName, static (options, mapping, _) => mapping.CreateInfo(options, new OffsetDateTimeConverter(options.EnableDateTimeInfinityConversions))); - - // timestamp - mappings.AddStructType(TimestampDataTypeName, - static (options, mapping, _) => - mapping.CreateInfo(options, new LocalDateTimeConverter(options.EnableDateTimeInfinityConversions)), - isDefault: true); } // date @@ -107,34 +107,27 @@ sealed class ArrayResolver : Resolver, IPgTypeInfoResolver static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) { - // timestamptz - mappings.AddStructArrayType(TimestampTzDataTypeName); - mappings.AddStructArrayType(TimestampTzDataTypeName); - mappings.AddStructArrayType(TimestampTzDataTypeName); - - // timestamp if (LegacyTimestampBehavior) { + // timestamp mappings.AddStructArrayType(TimestampDataTypeName); + mappings.AddStructArrayType(TimestampDataTypeName); - mappings.AddStructType(TimestampDataTypeName, - static (options, mapping, _) => - mapping.CreateInfo(options, new InstantConverter(options.EnableDateTimeInfinityConversions)), - isDefault: true); - mappings.AddStructType(TimestampDataTypeName, - static (options, mapping, _) => - mapping.CreateInfo(options, new LocalDateTimeConverter(options.EnableDateTimeInfinityConversions)), - isDefault: false); + // timestamptz + mappings.AddStructArrayType(TimestampTzDataTypeName); + mappings.AddStructArrayType(TimestampTzDataTypeName); + mappings.AddStructArrayType(TimestampTzDataTypeName); } else { - mappings.AddStructType(TimestampDataTypeName, - static (options, mapping, _) => - mapping.CreateInfo(options, new LocalDateTimeConverter(options.EnableDateTimeInfinityConversions)), - isDefault: true); - } + // timestamp + mappings.AddStructArrayType(TimestampDataTypeName); - mappings.AddStructArrayType(TimestampDataTypeName); + // timestamptz + mappings.AddStructArrayType(TimestampTzDataTypeName); + mappings.AddStructArrayType(TimestampTzDataTypeName); + mappings.AddStructArrayType(TimestampTzDataTypeName); + } // other mappings.AddStructArrayType(DateDataTypeName); diff --git a/test/Npgsql.PluginTests/LegacyNodaTimeTests.cs b/test/Npgsql.PluginTests/LegacyNodaTimeTests.cs index 6af0afec24..3f5eb05177 100644 --- a/test/Npgsql.PluginTests/LegacyNodaTimeTests.cs +++ b/test/Npgsql.PluginTests/LegacyNodaTimeTests.cs @@ -55,7 +55,7 @@ public Task Timestamptz_as_Instant() "timestamp with time zone", NpgsqlDbType.TimestampTz, DbType.DateTimeOffset, - isDefault: false, + isDefaultForWriting: false, isNpgsqlDbTypeInferredFromClrType: false); [Test] From cb9d849dbf6506ec916560878318d51d42787835 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Sat, 16 Mar 2024 18:24:49 +0100 Subject: [PATCH 399/761] Add DI methods accepting a service provider action (#5572) Closes #4822 --- ...sqlServiceCollectionExtensions.Obsolete.cs | 220 +++++++++++++++++ .../NpgsqlServiceCollectionExtensions.cs | 229 +++++++----------- .../PublicAPI.Unshipped.txt | 4 + 3 files changed, 305 insertions(+), 148 deletions(-) create mode 100644 src/Npgsql.DependencyInjection/NpgsqlServiceCollectionExtensions.Obsolete.cs diff --git a/src/Npgsql.DependencyInjection/NpgsqlServiceCollectionExtensions.Obsolete.cs b/src/Npgsql.DependencyInjection/NpgsqlServiceCollectionExtensions.Obsolete.cs new file mode 100644 index 0000000000..6e2b4e7d4f --- /dev/null +++ b/src/Npgsql.DependencyInjection/NpgsqlServiceCollectionExtensions.Obsolete.cs @@ -0,0 +1,220 @@ +using System; +using System.ComponentModel; +using Npgsql; + +namespace Microsoft.Extensions.DependencyInjection; + +public static partial class NpgsqlServiceCollectionExtensions +{ + /// + /// Registers an and an in the . + /// + /// The to add services to. + /// An Npgsql connection string. + /// + /// The lifetime with which to register the in the container. + /// Defaults to . + /// + /// + /// The lifetime with which to register the service in the container. + /// Defaults to . + /// + /// The same service collection so that multiple calls can be chained. + [EditorBrowsable(EditorBrowsableState.Never), Obsolete("Defined for binary compatibility with 7.0")] + public static IServiceCollection AddNpgsqlDataSource( + this IServiceCollection serviceCollection, + string connectionString, + ServiceLifetime connectionLifetime, + ServiceLifetime dataSourceLifetime) + => AddNpgsqlDataSourceCore( + serviceCollection, serviceKey: null, connectionString, dataSourceBuilderAction: null, + connectionLifetime, dataSourceLifetime, state: null); + + /// + /// Registers an and an in the . + /// + /// The to add services to. + /// An Npgsql connection string. + /// + /// An action to configure the for further customizations of the . + /// + /// + /// The lifetime with which to register the in the container. + /// Defaults to . + /// + /// + /// The lifetime with which to register the service in the container. + /// Defaults to . + /// + /// The same service collection so that multiple calls can be chained. + [EditorBrowsable(EditorBrowsableState.Never), Obsolete("Defined for binary compatibility with 7.0")] + public static IServiceCollection AddNpgsqlDataSource( + this IServiceCollection serviceCollection, + string connectionString, + Action dataSourceBuilderAction, + ServiceLifetime connectionLifetime, + ServiceLifetime dataSourceLifetime) + => AddNpgsqlDataSourceCore(serviceCollection, serviceKey: null, connectionString, + static (_, builder, state) => ((Action)state!)(builder), + connectionLifetime, dataSourceLifetime, state: dataSourceBuilderAction); + + /// + /// Registers an and an in the . + /// + /// The to add services to. + /// An Npgsql connection string. + /// + /// The lifetime with which to register the in the container. + /// Defaults to . + /// + /// + /// The lifetime with which to register the service in the container. + /// Defaults to . + /// + /// The same service collection so that multiple calls can be chained. + [EditorBrowsable(EditorBrowsableState.Never), Obsolete("Defined for binary compatibility with 7.0")] + public static IServiceCollection AddNpgsqlSlimDataSource( + this IServiceCollection serviceCollection, + string connectionString, + ServiceLifetime connectionLifetime, + ServiceLifetime dataSourceLifetime) + => AddNpgsqlSlimDataSourceCore( + serviceCollection, serviceKey: null, connectionString, dataSourceBuilderAction: null, + connectionLifetime, dataSourceLifetime, state: null); + + /// + /// Registers an and an in the . + /// + /// The to add services to. + /// An Npgsql connection string. + /// + /// An action to configure the for further customizations of the . + /// + /// + /// The lifetime with which to register the in the container. + /// Defaults to . + /// + /// + /// The lifetime with which to register the service in the container. + /// Defaults to . + /// + /// The same service collection so that multiple calls can be chained. + [EditorBrowsable(EditorBrowsableState.Never), Obsolete("Defined for binary compatibility with 7.0")] + public static IServiceCollection AddNpgsqlSlimDataSource( + this IServiceCollection serviceCollection, + string connectionString, + Action dataSourceBuilderAction, + ServiceLifetime connectionLifetime, + ServiceLifetime dataSourceLifetime) + => AddNpgsqlSlimDataSourceCore(serviceCollection, serviceKey: null, connectionString, + static (_, builder, state) => ((Action)state!)(builder), + connectionLifetime, dataSourceLifetime, state: dataSourceBuilderAction); + + /// + /// Registers an and an in the + /// . + /// + /// The to add services to. + /// An Npgsql connection string. + /// + /// The lifetime with which to register the in the container. + /// Defaults to . + /// + /// + /// The lifetime with which to register the service in the container. + /// Defaults to . + /// + /// The same service collection so that multiple calls can be chained. + [EditorBrowsable(EditorBrowsableState.Never), Obsolete("Defined for binary compatibility with 7.0")] + public static IServiceCollection AddMultiHostNpgsqlDataSource( + this IServiceCollection serviceCollection, + string connectionString, + ServiceLifetime connectionLifetime, + ServiceLifetime dataSourceLifetime) + => AddMultiHostNpgsqlDataSourceCore( + serviceCollection, serviceKey: null, connectionString, dataSourceBuilderAction: null, + connectionLifetime, dataSourceLifetime, state: null); + + /// + /// Registers an and an in the + /// + /// The to add services to. + /// An Npgsql connection string. + /// + /// An action to configure the for further customizations of the . + /// + /// + /// The lifetime with which to register the in the container. + /// Defaults to . + /// + /// + /// The lifetime with which to register the service in the container. + /// Defaults to . + /// + /// The same service collection so that multiple calls can be chained. + [EditorBrowsable(EditorBrowsableState.Never), Obsolete("Defined for binary compatibility with 7.0")] + public static IServiceCollection AddMultiHostNpgsqlDataSource( + this IServiceCollection serviceCollection, + string connectionString, + Action dataSourceBuilderAction, + ServiceLifetime connectionLifetime, + ServiceLifetime dataSourceLifetime) + => AddMultiHostNpgsqlDataSourceCore( + serviceCollection, serviceKey: null, connectionString, + static (_, builder, state) => ((Action)state!)(builder), + connectionLifetime, dataSourceLifetime, state: dataSourceBuilderAction); + + /// + /// Registers an and an in the + /// . + /// + /// The to add services to. + /// An Npgsql connection string. + /// + /// The lifetime with which to register the in the container. + /// Defaults to . + /// + /// + /// The lifetime with which to register the service in the container. + /// Defaults to . + /// + /// The same service collection so that multiple calls can be chained. + [EditorBrowsable(EditorBrowsableState.Never), Obsolete("Defined for binary compatibility with 7.0")] + public static IServiceCollection AddMultiHostNpgsqlSlimDataSource( + this IServiceCollection serviceCollection, + string connectionString, + ServiceLifetime connectionLifetime, + ServiceLifetime dataSourceLifetime) + => AddMultiHostNpgsqlSlimDataSourceCore( + serviceCollection, serviceKey: null, connectionString, dataSourceBuilderAction: null, + connectionLifetime, dataSourceLifetime, state: null); + + /// + /// Registers an and an in the + /// + /// The to add services to. + /// An Npgsql connection string. + /// + /// An action to configure the for further customizations of the . + /// + /// + /// The lifetime with which to register the in the container. + /// Defaults to . + /// + /// + /// The lifetime with which to register the service in the container. + /// Defaults to . + /// + /// The same service collection so that multiple calls can be chained. + [EditorBrowsable(EditorBrowsableState.Never), Obsolete("Defined for binary compatibility with 7.0")] + public static IServiceCollection AddMultiHostNpgsqlSlimDataSource( + this IServiceCollection serviceCollection, + string connectionString, + Action dataSourceBuilderAction, + ServiceLifetime connectionLifetime, + ServiceLifetime dataSourceLifetime) + => AddMultiHostNpgsqlSlimDataSourceCore( + serviceCollection, serviceKey: null, connectionString, + static (_, builder, state) => ((Action)state!)(builder), + connectionLifetime, dataSourceLifetime, state: dataSourceBuilderAction); +} diff --git a/src/Npgsql.DependencyInjection/NpgsqlServiceCollectionExtensions.cs b/src/Npgsql.DependencyInjection/NpgsqlServiceCollectionExtensions.cs index 755d6b1357..7e22029a40 100644 --- a/src/Npgsql.DependencyInjection/NpgsqlServiceCollectionExtensions.cs +++ b/src/Npgsql.DependencyInjection/NpgsqlServiceCollectionExtensions.cs @@ -1,5 +1,4 @@ using System; -using System.ComponentModel; using System.Data.Common; using Microsoft.Extensions.DependencyInjection.Extensions; using Microsoft.Extensions.Logging; @@ -11,16 +10,13 @@ namespace Microsoft.Extensions.DependencyInjection; /// /// Extension method for setting up Npgsql services in an . /// -public static class NpgsqlServiceCollectionExtensions +public static partial class NpgsqlServiceCollectionExtensions { /// /// Registers an and an in the . /// /// The to add services to. /// An Npgsql connection string. - /// - /// An action to configure the for further customizations of the . - /// /// /// The lifetime with which to register the in the container. /// Defaults to . @@ -34,11 +30,12 @@ public static class NpgsqlServiceCollectionExtensions public static IServiceCollection AddNpgsqlDataSource( this IServiceCollection serviceCollection, string connectionString, - Action dataSourceBuilderAction, ServiceLifetime connectionLifetime = ServiceLifetime.Transient, ServiceLifetime dataSourceLifetime = ServiceLifetime.Singleton, object? serviceKey = null) - => AddNpgsqlDataSourceCore(serviceCollection, serviceKey, connectionString, dataSourceBuilderAction, connectionLifetime, dataSourceLifetime); + => AddNpgsqlDataSourceCore( + serviceCollection, serviceKey, connectionString, dataSourceBuilderAction: null, + connectionLifetime, dataSourceLifetime, state: null); /// /// Registers an and an in the . @@ -56,21 +53,27 @@ public static IServiceCollection AddNpgsqlDataSource( /// The lifetime with which to register the service in the container. /// Defaults to . /// + /// The of the data source. /// The same service collection so that multiple calls can be chained. - [EditorBrowsable(EditorBrowsableState.Never), Obsolete("Defined for binary compatibility with 7.0")] public static IServiceCollection AddNpgsqlDataSource( this IServiceCollection serviceCollection, string connectionString, Action dataSourceBuilderAction, - ServiceLifetime connectionLifetime, - ServiceLifetime dataSourceLifetime) - => AddNpgsqlDataSourceCore(serviceCollection, serviceKey: null, connectionString, dataSourceBuilderAction, connectionLifetime, dataSourceLifetime); + ServiceLifetime connectionLifetime = ServiceLifetime.Transient, + ServiceLifetime dataSourceLifetime = ServiceLifetime.Singleton, + object? serviceKey = null) + => AddNpgsqlDataSourceCore(serviceCollection, serviceKey, connectionString, + static (_, builder, state) => ((Action)state!)(builder) + , connectionLifetime, dataSourceLifetime, state: dataSourceBuilderAction); /// /// Registers an and an in the . /// /// The to add services to. /// An Npgsql connection string. + /// + /// An action to configure the for further customizations of the . + /// /// /// The lifetime with which to register the in the container. /// Defaults to . @@ -84,11 +87,13 @@ public static IServiceCollection AddNpgsqlDataSource( public static IServiceCollection AddNpgsqlDataSource( this IServiceCollection serviceCollection, string connectionString, + Action dataSourceBuilderAction, ServiceLifetime connectionLifetime = ServiceLifetime.Transient, ServiceLifetime dataSourceLifetime = ServiceLifetime.Singleton, object? serviceKey = null) - => AddNpgsqlDataSourceCore( - serviceCollection, serviceKey, connectionString, dataSourceBuilderAction: null, connectionLifetime, dataSourceLifetime); + => AddNpgsqlDataSourceCore(serviceCollection, serviceKey, connectionString, + static (sp, builder, state) => ((Action)state!)(sp, builder), + connectionLifetime, dataSourceLifetime, state: dataSourceBuilderAction); /// /// Registers an and an in the . @@ -103,15 +108,17 @@ public static IServiceCollection AddNpgsqlDataSource( /// The lifetime with which to register the service in the container. /// Defaults to . /// + /// The of the data source. /// The same service collection so that multiple calls can be chained. - [EditorBrowsable(EditorBrowsableState.Never), Obsolete("Defined for binary compatibility with 7.0")] - public static IServiceCollection AddNpgsqlDataSource( + public static IServiceCollection AddNpgsqlSlimDataSource( this IServiceCollection serviceCollection, string connectionString, - ServiceLifetime connectionLifetime, - ServiceLifetime dataSourceLifetime) - => AddNpgsqlDataSourceCore( - serviceCollection, serviceKey: null, connectionString, dataSourceBuilderAction: null, connectionLifetime, dataSourceLifetime); + ServiceLifetime connectionLifetime = ServiceLifetime.Transient, + ServiceLifetime dataSourceLifetime = ServiceLifetime.Singleton, + object? serviceKey = null) + => AddNpgsqlSlimDataSourceCore( + serviceCollection, serviceKey, connectionString, dataSourceBuilderAction: null, + connectionLifetime, dataSourceLifetime, state: null); /// /// Registers an and an in the . @@ -138,7 +145,9 @@ public static IServiceCollection AddNpgsqlSlimDataSource( ServiceLifetime connectionLifetime = ServiceLifetime.Transient, ServiceLifetime dataSourceLifetime = ServiceLifetime.Singleton, object? serviceKey = null) - => AddNpgsqlSlimDataSourceCore(serviceCollection, serviceKey, connectionString, dataSourceBuilderAction, connectionLifetime, dataSourceLifetime); + => AddNpgsqlSlimDataSourceCore(serviceCollection, serviceKey, connectionString, + static (_, builder, state) => ((Action)state!)(builder), + connectionLifetime, dataSourceLifetime, state: dataSourceBuilderAction); /// /// Registers an and an in the . @@ -156,71 +165,25 @@ public static IServiceCollection AddNpgsqlSlimDataSource( /// The lifetime with which to register the service in the container. /// Defaults to . /// - /// The same service collection so that multiple calls can be chained. - [EditorBrowsable(EditorBrowsableState.Never), Obsolete("Defined for binary compatibility with 7.0")] - public static IServiceCollection AddNpgsqlSlimDataSource( - this IServiceCollection serviceCollection, - string connectionString, - Action dataSourceBuilderAction, - ServiceLifetime connectionLifetime, - ServiceLifetime dataSourceLifetime) - => AddNpgsqlSlimDataSourceCore(serviceCollection, serviceKey: null, connectionString, dataSourceBuilderAction, connectionLifetime, dataSourceLifetime); - - /// - /// Registers an and an in the . - /// - /// The to add services to. - /// An Npgsql connection string. - /// - /// The lifetime with which to register the in the container. - /// Defaults to . - /// - /// - /// The lifetime with which to register the service in the container. - /// Defaults to . - /// /// The of the data source. /// The same service collection so that multiple calls can be chained. public static IServiceCollection AddNpgsqlSlimDataSource( this IServiceCollection serviceCollection, string connectionString, + Action dataSourceBuilderAction, ServiceLifetime connectionLifetime = ServiceLifetime.Transient, ServiceLifetime dataSourceLifetime = ServiceLifetime.Singleton, object? serviceKey = null) - => AddNpgsqlSlimDataSourceCore( - serviceCollection, serviceKey, connectionString, dataSourceBuilderAction: null, connectionLifetime, dataSourceLifetime); - - /// - /// Registers an and an in the . - /// - /// The to add services to. - /// An Npgsql connection string. - /// - /// The lifetime with which to register the in the container. - /// Defaults to . - /// - /// - /// The lifetime with which to register the service in the container. - /// Defaults to . - /// - /// The same service collection so that multiple calls can be chained. - [EditorBrowsable(EditorBrowsableState.Never), Obsolete("Defined for binary compatibility with 7.0")] - public static IServiceCollection AddNpgsqlSlimDataSource( - this IServiceCollection serviceCollection, - string connectionString, - ServiceLifetime connectionLifetime, - ServiceLifetime dataSourceLifetime) - => AddNpgsqlSlimDataSourceCore( - serviceCollection, serviceKey: null, connectionString, dataSourceBuilderAction: null, connectionLifetime, dataSourceLifetime); + => AddNpgsqlSlimDataSourceCore(serviceCollection, serviceKey, connectionString, + static (sp, builder, state) => ((Action)state!)(sp, builder), + connectionLifetime, dataSourceLifetime, state: dataSourceBuilderAction); /// /// Registers an and an in the + /// . /// /// The to add services to. /// An Npgsql connection string. - /// - /// An action to configure the for further customizations of the . - /// /// /// The lifetime with which to register the in the container. /// Defaults to . @@ -234,12 +197,12 @@ public static IServiceCollection AddNpgsqlSlimDataSource( public static IServiceCollection AddMultiHostNpgsqlDataSource( this IServiceCollection serviceCollection, string connectionString, - Action dataSourceBuilderAction, ServiceLifetime connectionLifetime = ServiceLifetime.Transient, ServiceLifetime dataSourceLifetime = ServiceLifetime.Singleton, object? serviceKey = null) => AddMultiHostNpgsqlDataSourceCore( - serviceCollection, serviceKey, connectionString, dataSourceBuilderAction, connectionLifetime, dataSourceLifetime); + serviceCollection, serviceKey, connectionString, dataSourceBuilderAction: null, + connectionLifetime, dataSourceLifetime, state: null); /// /// Registers an and an in the @@ -257,23 +220,28 @@ public static IServiceCollection AddMultiHostNpgsqlDataSource( /// The lifetime with which to register the service in the container. /// Defaults to . /// + /// The of the data source. /// The same service collection so that multiple calls can be chained. - [EditorBrowsable(EditorBrowsableState.Never), Obsolete("Defined for binary compatibility with 7.0")] public static IServiceCollection AddMultiHostNpgsqlDataSource( this IServiceCollection serviceCollection, string connectionString, Action dataSourceBuilderAction, - ServiceLifetime connectionLifetime, - ServiceLifetime dataSourceLifetime) + ServiceLifetime connectionLifetime = ServiceLifetime.Transient, + ServiceLifetime dataSourceLifetime = ServiceLifetime.Singleton, + object? serviceKey = null) => AddMultiHostNpgsqlDataSourceCore( - serviceCollection, serviceKey: null, connectionString, dataSourceBuilderAction, connectionLifetime, dataSourceLifetime); + serviceCollection, serviceKey, connectionString, + static (_, builder, state) => ((Action)state!)(builder), + connectionLifetime, dataSourceLifetime, state: dataSourceBuilderAction); /// /// Registers an and an in the - /// . /// /// The to add services to. /// An Npgsql connection string. + /// + /// An action to configure the for further customizations of the . + /// /// /// The lifetime with which to register the in the container. /// Defaults to . @@ -287,11 +255,14 @@ public static IServiceCollection AddMultiHostNpgsqlDataSource( public static IServiceCollection AddMultiHostNpgsqlDataSource( this IServiceCollection serviceCollection, string connectionString, + Action dataSourceBuilderAction, ServiceLifetime connectionLifetime = ServiceLifetime.Transient, ServiceLifetime dataSourceLifetime = ServiceLifetime.Singleton, object? serviceKey = null) => AddMultiHostNpgsqlDataSourceCore( - serviceCollection, serviceKey, connectionString, dataSourceBuilderAction: null, connectionLifetime, dataSourceLifetime); + serviceCollection, serviceKey, connectionString, + static (sp, builder, state) => ((Action)state!)(sp, builder), + connectionLifetime, dataSourceLifetime, state: dataSourceBuilderAction); /// /// Registers an and an in the @@ -307,15 +278,17 @@ public static IServiceCollection AddMultiHostNpgsqlDataSource( /// The lifetime with which to register the service in the container. /// Defaults to . /// + /// The of the data source. /// The same service collection so that multiple calls can be chained. - [EditorBrowsable(EditorBrowsableState.Never), Obsolete("Defined for binary compatibility with 7.0")] - public static IServiceCollection AddMultiHostNpgsqlDataSource( + public static IServiceCollection AddMultiHostNpgsqlSlimDataSource( this IServiceCollection serviceCollection, string connectionString, - ServiceLifetime connectionLifetime, - ServiceLifetime dataSourceLifetime) - => AddMultiHostNpgsqlDataSourceCore( - serviceCollection, serviceKey: null, connectionString, dataSourceBuilderAction: null, connectionLifetime, dataSourceLifetime); + ServiceLifetime connectionLifetime = ServiceLifetime.Transient, + ServiceLifetime dataSourceLifetime = ServiceLifetime.Singleton, + object? serviceKey = null) + => AddMultiHostNpgsqlSlimDataSourceCore( + serviceCollection, serviceKey, connectionString, dataSourceBuilderAction: null, + connectionLifetime, dataSourceLifetime, state: null); /// /// Registers an and an in the @@ -343,7 +316,9 @@ public static IServiceCollection AddMultiHostNpgsqlSlimDataSource( ServiceLifetime dataSourceLifetime = ServiceLifetime.Singleton, object? serviceKey = null) => AddMultiHostNpgsqlSlimDataSourceCore( - serviceCollection, serviceKey, connectionString, dataSourceBuilderAction, connectionLifetime, dataSourceLifetime); + serviceCollection, serviceKey, connectionString, + static (_, builder, state) => ((Action)state!)(builder), + connectionLifetime, dataSourceLifetime, state: dataSourceBuilderAction); /// /// Registers an and an in the @@ -361,73 +336,28 @@ public static IServiceCollection AddMultiHostNpgsqlSlimDataSource( /// The lifetime with which to register the service in the container. /// Defaults to . /// - /// The same service collection so that multiple calls can be chained. - [EditorBrowsable(EditorBrowsableState.Never), Obsolete("Defined for binary compatibility with 7.0")] - public static IServiceCollection AddMultiHostNpgsqlSlimDataSource( - this IServiceCollection serviceCollection, - string connectionString, - Action dataSourceBuilderAction, - ServiceLifetime connectionLifetime, - ServiceLifetime dataSourceLifetime) - => AddMultiHostNpgsqlSlimDataSourceCore( - serviceCollection, serviceKey: null, connectionString, dataSourceBuilderAction, connectionLifetime, dataSourceLifetime); - - /// - /// Registers an and an in the - /// . - /// - /// The to add services to. - /// An Npgsql connection string. - /// - /// The lifetime with which to register the in the container. - /// Defaults to . - /// - /// - /// The lifetime with which to register the service in the container. - /// Defaults to . - /// /// The of the data source. /// The same service collection so that multiple calls can be chained. public static IServiceCollection AddMultiHostNpgsqlSlimDataSource( this IServiceCollection serviceCollection, string connectionString, + Action dataSourceBuilderAction, ServiceLifetime connectionLifetime = ServiceLifetime.Transient, ServiceLifetime dataSourceLifetime = ServiceLifetime.Singleton, object? serviceKey = null) => AddMultiHostNpgsqlSlimDataSourceCore( - serviceCollection, serviceKey, connectionString, dataSourceBuilderAction: null, connectionLifetime, dataSourceLifetime); - - /// - /// Registers an and an in the - /// . - /// - /// The to add services to. - /// An Npgsql connection string. - /// - /// The lifetime with which to register the in the container. - /// Defaults to . - /// - /// - /// The lifetime with which to register the service in the container. - /// Defaults to . - /// - /// The same service collection so that multiple calls can be chained. - [EditorBrowsable(EditorBrowsableState.Never), Obsolete("Defined for binary compatibility with 7.0")] - public static IServiceCollection AddMultiHostNpgsqlSlimDataSource( - this IServiceCollection serviceCollection, - string connectionString, - ServiceLifetime connectionLifetime, - ServiceLifetime dataSourceLifetime) - => AddMultiHostNpgsqlSlimDataSourceCore( - serviceCollection, serviceKey: null, connectionString, dataSourceBuilderAction: null, connectionLifetime, dataSourceLifetime); + serviceCollection, serviceKey, connectionString, + static (sp, builder, state) => ((Action)state!)(sp, builder), + connectionLifetime, dataSourceLifetime, state: dataSourceBuilderAction); static IServiceCollection AddNpgsqlDataSourceCore( this IServiceCollection serviceCollection, object? serviceKey, string connectionString, - Action? dataSourceBuilderAction, + Action? dataSourceBuilderAction, ServiceLifetime connectionLifetime, - ServiceLifetime dataSourceLifetime) + ServiceLifetime dataSourceLifetime, + object? state) { serviceCollection.TryAdd( new ServiceDescriptor( @@ -437,7 +367,7 @@ static IServiceCollection AddNpgsqlDataSourceCore( { var dataSourceBuilder = new NpgsqlDataSourceBuilder(connectionString); dataSourceBuilder.UseLoggerFactory(sp.GetService()); - dataSourceBuilderAction?.Invoke(dataSourceBuilder); + dataSourceBuilderAction?.Invoke(sp, dataSourceBuilder, state); return dataSourceBuilder.Build(); }, dataSourceLifetime)); @@ -451,9 +381,10 @@ static IServiceCollection AddNpgsqlSlimDataSourceCore( this IServiceCollection serviceCollection, object? serviceKey, string connectionString, - Action? dataSourceBuilderAction, + Action? dataSourceBuilderAction, ServiceLifetime connectionLifetime, - ServiceLifetime dataSourceLifetime) + ServiceLifetime dataSourceLifetime, + object? state) { serviceCollection.TryAdd( new ServiceDescriptor( @@ -463,7 +394,7 @@ static IServiceCollection AddNpgsqlSlimDataSourceCore( { var dataSourceBuilder = new NpgsqlSlimDataSourceBuilder(connectionString); dataSourceBuilder.UseLoggerFactory(sp.GetService()); - dataSourceBuilderAction?.Invoke(dataSourceBuilder); + dataSourceBuilderAction?.Invoke(sp, dataSourceBuilder, state); return dataSourceBuilder.Build(); }, dataSourceLifetime)); @@ -477,9 +408,10 @@ static IServiceCollection AddMultiHostNpgsqlDataSourceCore( this IServiceCollection serviceCollection, object? serviceKey, string connectionString, - Action? dataSourceBuilderAction, + Action? dataSourceBuilderAction, ServiceLifetime connectionLifetime, - ServiceLifetime dataSourceLifetime) + ServiceLifetime dataSourceLifetime, + object? state) { serviceCollection.TryAdd( new ServiceDescriptor( @@ -489,7 +421,7 @@ static IServiceCollection AddMultiHostNpgsqlDataSourceCore( { var dataSourceBuilder = new NpgsqlDataSourceBuilder(connectionString); dataSourceBuilder.UseLoggerFactory(sp.GetService()); - dataSourceBuilderAction?.Invoke(dataSourceBuilder); + dataSourceBuilderAction?.Invoke(sp, dataSourceBuilder, state); return dataSourceBuilder.BuildMultiHost(); }, dataSourceLifetime)); @@ -522,9 +454,10 @@ static IServiceCollection AddMultiHostNpgsqlSlimDataSourceCore( this IServiceCollection serviceCollection, object? serviceKey, string connectionString, - Action? dataSourceBuilderAction, + Action? dataSourceBuilderAction, ServiceLifetime connectionLifetime, - ServiceLifetime dataSourceLifetime) + ServiceLifetime dataSourceLifetime, + object? state) { serviceCollection.TryAdd( new ServiceDescriptor( @@ -534,7 +467,7 @@ static IServiceCollection AddMultiHostNpgsqlSlimDataSourceCore( { var dataSourceBuilder = new NpgsqlSlimDataSourceBuilder(connectionString); dataSourceBuilder.UseLoggerFactory(sp.GetService()); - dataSourceBuilderAction?.Invoke(dataSourceBuilder); + dataSourceBuilderAction?.Invoke(sp, dataSourceBuilder, state); return dataSourceBuilder.BuildMultiHost(); }, dataSourceLifetime)); diff --git a/src/Npgsql.DependencyInjection/PublicAPI.Unshipped.txt b/src/Npgsql.DependencyInjection/PublicAPI.Unshipped.txt index ab058de62d..34f2d889e9 100644 --- a/src/Npgsql.DependencyInjection/PublicAPI.Unshipped.txt +++ b/src/Npgsql.DependencyInjection/PublicAPI.Unshipped.txt @@ -1 +1,5 @@ #nullable enable +static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddMultiHostNpgsqlDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, System.Action! dataSourceBuilderAction, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Transient, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Singleton, object? serviceKey = null) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! +static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddMultiHostNpgsqlSlimDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, System.Action! dataSourceBuilderAction, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Transient, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Singleton, object? serviceKey = null) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! +static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddNpgsqlDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, System.Action! dataSourceBuilderAction, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Transient, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Singleton, object? serviceKey = null) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! +static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddNpgsqlSlimDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, System.Action! dataSourceBuilderAction, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Transient, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Singleton, object? serviceKey = null) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! From bc878a2c6b39f0ecf538c94364729c8bfdba999e Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Sun, 17 Mar 2024 18:40:55 +0100 Subject: [PATCH 400/761] Remove CI PG version matrix on Windows (#5632) Closes #4568 --- .github/workflows/build.yml | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 78cf887e9d..ce4cebfaa4 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -29,7 +29,7 @@ jobs: strategy: fail-fast: false matrix: - os: [ubuntu-22.04, windows-2022] + os: [ubuntu-22.04] pg_major: [16, 15, 14, 13, 12] config: [Release] test_tfm: [net8.0] @@ -39,7 +39,11 @@ jobs: config: Debug test_tfm: net8.0 - os: macos-12 - pg_major: 14 + pg_major: 16 + config: Release + test_tfm: net8.0 + - os: windows-2022 + pg_major: 16 config: Release test_tfm: net8.0 # - os: ubuntu-22.04 @@ -232,13 +236,15 @@ jobs: - name: Start PostgreSQL ${{ matrix.pg_major }} (MacOS) if: startsWith(matrix.os, 'macos') run: | + brew install postgresql@${{ matrix.pg_major }} + PGDATA=/usr/local/var/postgresql@${{ matrix.pg_major }} sudo sed -i '' 's/#ssl = off/ssl = on/' $PGDATA/postgresql.conf cp $GITHUB_WORKSPACE/.build/{server.crt,server.key} $PGDATA chmod 600 $PGDATA/{server.crt,server.key} - postgreService=$(brew services list | grep -oe "postgresql\S*") + postgreService=$(brew services list | grep -oe "postgresql@${{ matrix.pg_major }}\S*") brew services start $postgreService echo "Check PostgreSQL service is running" From d868089829af8b11ff69590e57fa2b6e92e86cc9 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Sun, 17 Mar 2024 21:32:29 +0300 Subject: [PATCH 401/761] Upgrade macos to 14 (#5549) Co-authored-by: Nino Floris --- .github/workflows/build.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index ce4cebfaa4..0e7398e744 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -38,7 +38,7 @@ jobs: pg_major: 16 config: Debug test_tfm: net8.0 - - os: macos-12 + - os: macos-14 pg_major: 16 config: Release test_tfm: net8.0 @@ -238,7 +238,7 @@ jobs: run: | brew install postgresql@${{ matrix.pg_major }} - PGDATA=/usr/local/var/postgresql@${{ matrix.pg_major }} + PGDATA=/opt/homebrew/var/postgresql@${{ matrix.pg_major }} sudo sed -i '' 's/#ssl = off/ssl = on/' $PGDATA/postgresql.conf cp $GITHUB_WORKSPACE/.build/{server.crt,server.key} $PGDATA @@ -247,6 +247,7 @@ jobs: postgreService=$(brew services list | grep -oe "postgresql@${{ matrix.pg_major }}\S*") brew services start $postgreService + export PATH="/opt/homebrew/opt/postgresql@16/bin:$PATH" echo "Check PostgreSQL service is running" i=5 COMMAND='pg_isready' From 4b7bf6b20c5f7a0b1fe48bbb2242e1e610e85bc8 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Sun, 17 Mar 2024 22:16:46 +0100 Subject: [PATCH 402/761] Improve binary exporter perf (#5630) Fixes #4944 --- src/Npgsql/Internal/NpgsqlReadBuffer.cs | 3 + src/Npgsql/Internal/PgReader.cs | 2 +- src/Npgsql/NpgsqlBinaryExporter.cs | 224 +++++++++++++----------- test/Npgsql.Tests/ReadBufferTests.cs | 12 +- 4 files changed, 135 insertions(+), 106 deletions(-) diff --git a/src/Npgsql/Internal/NpgsqlReadBuffer.cs b/src/Npgsql/Internal/NpgsqlReadBuffer.cs index e9dee8dfc4..f7807edc2d 100644 --- a/src/Npgsql/Internal/NpgsqlReadBuffer.cs +++ b/src/Npgsql/Internal/NpgsqlReadBuffer.cs @@ -134,6 +134,9 @@ internal NpgsqlReadBuffer( #region I/O + public void Ensure(int count) + => Ensure(count, async: false, readingNotifications: false).GetAwaiter().GetResult(); + public ValueTask Ensure(int count, bool async) => Ensure(count, async, readingNotifications: false); diff --git a/src/Npgsql/Internal/PgReader.cs b/src/Npgsql/Internal/PgReader.cs index 54672f92e8..bd4de75124 100644 --- a/src/Npgsql/Internal/PgReader.cs +++ b/src/Npgsql/Internal/PgReader.cs @@ -783,7 +783,7 @@ static void ThrowArgumentOutOfRangeOfValue() public void Buffer(Size bufferRequirement) => Buffer(GetBufferRequirementByteCount(bufferRequirement)); - public void Buffer(int byteCount) => _buffer.Ensure(byteCount, async: false).GetAwaiter().GetResult(); + public void Buffer(int byteCount) => _buffer.Ensure(byteCount); public ValueTask BufferAsync(Size bufferRequirement, CancellationToken cancellationToken) => BufferAsync(GetBufferRequirementByteCount(bufferRequirement), cancellationToken); diff --git a/src/Npgsql/NpgsqlBinaryExporter.cs b/src/Npgsql/NpgsqlBinaryExporter.cs index 35dea6985d..9d82900364 100644 --- a/src/Npgsql/NpgsqlBinaryExporter.cs +++ b/src/Npgsql/NpgsqlBinaryExporter.cs @@ -149,7 +149,10 @@ async ValueTask StartRow(bool async, CancellationToken cancellationToken = // Consume and advance any active column. if (_column >= 0) { - await Commit(async).ConfigureAwait(false); + if (async) + await PgReader.CommitAsync(resuming: false).ConfigureAwait(false); + else + PgReader.Commit(resuming: false); _column++; } @@ -194,7 +197,8 @@ async ValueTask StartRow(bool async, CancellationToken cancellationToken = /// specify the type. /// /// The value of the column - public T Read() => Read(async: false).GetAwaiter().GetResult(); + public T Read() + => Read(null); /// /// Reads the current column, returns its value and moves ahead to the next column. @@ -207,10 +211,7 @@ async ValueTask StartRow(bool async, CancellationToken cancellationToken = /// /// The value of the column public ValueTask ReadAsync(CancellationToken cancellationToken = default) - => Read(async: true, cancellationToken); - - ValueTask Read(bool async, CancellationToken cancellationToken = default) - => Read(async, null, cancellationToken); + => ReadAsync(null, cancellationToken); /// /// Reads the current column, returns its value according to and @@ -225,7 +226,8 @@ ValueTask Read(bool async, CancellationToken cancellationToken = default) /// /// The .NET type of the column to be read. /// The value of the column - public T Read(NpgsqlDbType type) => Read(async: false, type, CancellationToken.None).GetAwaiter().GetResult(); + public T Read(NpgsqlDbType type) + => Read((NpgsqlDbType?)type); /// /// Reads the current column, returns its value according to and @@ -244,42 +246,28 @@ ValueTask Read(bool async, CancellationToken cancellationToken = default) /// The .NET type of the column to be read. /// The value of the column public ValueTask ReadAsync(NpgsqlDbType type, CancellationToken cancellationToken = default) - => Read(async: true, type, cancellationToken); + => ReadAsync((NpgsqlDbType?)type, cancellationToken); - async ValueTask Read(bool async, NpgsqlDbType? type, CancellationToken cancellationToken) + T Read(NpgsqlDbType? type) { ThrowIfNotOnRow(); - using var registration = _connector.StartNestedCancellableOperation(cancellationToken, attemptPgCancellation: false); - if (!IsInitializedAndAtStart) - await MoveNextColumn(async, resumableOp: false).ConfigureAwait(false); + MoveNextColumn(resumableOp: false); + var reader = PgReader; try { - var reader = PgReader; if (reader.FieldSize is -1) - return DbNullOrThrow(); + return DbNullOrThrow(); - var info = GetInfo(type, out var asObject); + var info = GetInfo(typeof(T), type, out var asObject); - T result; - if (async) - { - await reader.StartReadAsync(info.BufferRequirement, cancellationToken).ConfigureAwait(false); - result = asObject - ? (T)await info.Converter.ReadAsObjectAsync(reader, cancellationToken).ConfigureAwait(false) - : await info.GetConverter().ReadAsync(reader, cancellationToken).ConfigureAwait(false); - await reader.EndReadAsync().ConfigureAwait(false); - } - else - { - reader.StartRead(info.BufferRequirement); - result = asObject - ? (T)info.Converter.ReadAsObject(reader) - : info.GetConverter().Read(reader); - reader.EndRead(); - } + reader.StartRead(info.BufferRequirement); + var result = asObject + ? (T)info.Converter.ReadAsObject(reader) + : info.GetConverter().Read(reader); + reader.EndRead(); return result; } @@ -288,48 +276,82 @@ async ValueTask Read(bool async, NpgsqlDbType? type, CancellationToken can // Don't delay committing the current column, just do it immediately (as opposed to on the next action: Read, IsNull, Skip). // Zero length columns would otherwise create an edge-case where we'd have to immediately commit as we won't know whether we're at the end. // To guarantee the commit happens in that case we would still need this try finally, at which point it's just better to be consistent. - await Commit(async).ConfigureAwait(false); + reader.Commit(resuming: false); } + } + + async ValueTask ReadAsync(NpgsqlDbType? type, CancellationToken cancellationToken) + { + ThrowIfNotOnRow(); + + using var registration = _connector.StartNestedCancellableOperation(cancellationToken, attemptPgCancellation: false); - static T DbNullOrThrow() + if (!IsInitializedAndAtStart) + await MoveNextColumnAsync(resumableOp: false).ConfigureAwait(false); + + var reader = PgReader; + try { - // When T is a Nullable, we support returning null - if (default(T) is null && typeof(T).IsValueType) - return default!; - throw new InvalidCastException("Column is null"); - } + if (reader.FieldSize is -1) + return DbNullOrThrow(); - PgConverterInfo GetInfo(NpgsqlDbType? type, out bool asObject) + var info = GetInfo(typeof(T), type, out var asObject); + + await reader.StartReadAsync(info.BufferRequirement, cancellationToken).ConfigureAwait(false); + var result = asObject + ? (T)await info.Converter.ReadAsObjectAsync(reader, cancellationToken).ConfigureAwait(false) + : await info.GetConverter().ReadAsync(reader, cancellationToken).ConfigureAwait(false); + await reader.EndReadAsync().ConfigureAwait(false); + + return result; + } + finally { - ref var cachedInfo = ref _columnInfoCache[_column]; - var converterInfo = cachedInfo.IsDefault ? cachedInfo = CreateConverterInfo(typeof(T), type) : cachedInfo; - asObject = converterInfo.IsBoxingConverter; - return converterInfo; + // Don't delay committing the current column, just do it immediately (as opposed to on the next action: Read, IsNull, Skip). + // Zero length columns would otherwise create an edge-case where we'd have to immediately commit as we won't know whether we're at the end. + // To guarantee the commit happens in that case we would still need this try finally, at which point it's just better to be consistent. + await reader.CommitAsync(resuming: false).ConfigureAwait(false); } + } + + static T DbNullOrThrow() + { + // When T is a Nullable, we support returning null + if (default(T) is null && typeof(T).IsValueType) + return default!; + throw new InvalidCastException("Column is null"); + } - PgConverterInfo CreateConverterInfo(Type type, NpgsqlDbType? npgsqlDbType = null) + PgConverterInfo GetInfo(Type type, NpgsqlDbType? npgsqlDbType, out bool asObject) + { + ref var cachedInfo = ref _columnInfoCache[_column]; + var converterInfo = cachedInfo.IsDefault ? cachedInfo = CreateConverterInfo(type, npgsqlDbType) : cachedInfo; + asObject = converterInfo.IsBoxingConverter; + return converterInfo; + } + + PgConverterInfo CreateConverterInfo(Type type, NpgsqlDbType? npgsqlDbType = null) + { + var options = _connector.SerializerOptions; + PgTypeId? pgTypeId = null; + if (npgsqlDbType.HasValue) { - var options = _connector.SerializerOptions; - PgTypeId? pgTypeId = null; - if (npgsqlDbType.HasValue) - { - pgTypeId = npgsqlDbType.Value.ToDataTypeName() is { } name - ? options.GetCanonicalTypeId(name) - // Handle plugin types via lookup. - : GetRepresentationalOrDefault(npgsqlDbType.Value.ToUnqualifiedDataTypeNameOrThrow()); - } - var info = options.GetTypeInfo(type, pgTypeId) - ?? throw new NotSupportedException($"Reading is not supported for type '{type}'{(npgsqlDbType is null ? "" : $" and NpgsqlDbType '{npgsqlDbType}'")}"); + pgTypeId = npgsqlDbType.Value.ToDataTypeName() is { } name + ? options.GetCanonicalTypeId(name) + // Handle plugin types via lookup. + : GetRepresentationalOrDefault(npgsqlDbType.Value.ToUnqualifiedDataTypeNameOrThrow()); + } + var info = options.GetTypeInfo(type, pgTypeId) + ?? throw new NotSupportedException($"Reading is not supported for type '{type}'{(npgsqlDbType is null ? "" : $" and NpgsqlDbType '{npgsqlDbType}'")}"); - // Binary export has no type info so we only do caller-directed interpretation of data. - return info.Bind(new Field("?", - info.PgTypeId ?? ((PgResolverTypeInfo)info).GetDefaultResolution(null).PgTypeId, -1), DataFormat.Binary); + // Binary export has no type info so we only do caller-directed interpretation of data. + return info.Bind(new Field("?", + info.PgTypeId ?? ((PgResolverTypeInfo)info).GetDefaultResolution(null).PgTypeId, -1), DataFormat.Binary); - PgTypeId GetRepresentationalOrDefault(string dataTypeName) - { - var type = options.DatabaseInfo.GetPostgresType(dataTypeName); - return options.ToCanonicalTypeId(type.GetRepresentationalType()); - } + PgTypeId GetRepresentationalOrDefault(string dataTypeName) + { + var type = options.DatabaseInfo.GetPostgresType(dataTypeName); + return options.ToCanonicalTypeId(type.GetRepresentationalType()); } } @@ -342,7 +364,7 @@ public bool IsNull { ThrowIfNotOnRow(); if (!IsInitializedAndAtStart) - return MoveNextColumn(async: false, resumableOp: true).GetAwaiter().GetResult() is -1; + return MoveNextColumn(resumableOp: true) is -1; return PgReader.FieldSize is - 1; } @@ -351,24 +373,29 @@ public bool IsNull /// /// Skips the current column without interpreting its value. /// - public void Skip() => Skip(async: false).GetAwaiter().GetResult(); + public void Skip() + { + ThrowIfNotOnRow(); + + if (!IsInitializedAndAtStart) + MoveNextColumn(resumableOp: false); + + PgReader.Commit(resuming: false); + } /// /// Skips the current column without interpreting its value. /// - public Task SkipAsync(CancellationToken cancellationToken = default) - => Skip(true, cancellationToken); - - async Task Skip(bool async, CancellationToken cancellationToken = default) + public async Task SkipAsync(CancellationToken cancellationToken = default) { ThrowIfNotOnRow(); using var registration = _connector.StartNestedCancellableOperation(cancellationToken); if (!IsInitializedAndAtStart) - await MoveNextColumn(async, resumableOp: false).ConfigureAwait(false); + await MoveNextColumnAsync(resumableOp: false).ConfigureAwait(false); - await Commit(async).ConfigureAwait(false); + await PgReader.CommitAsync(resuming: false).ConfigureAwait(false); } #endregion @@ -377,26 +404,27 @@ async Task Skip(bool async, CancellationToken cancellationToken = default) bool IsInitializedAndAtStart => PgReader.Initialized && (PgReader.FieldSize is -1 || PgReader.FieldOffset is 0); - ValueTask Commit(bool async) + int MoveNextColumn(bool resumableOp) { - if (async) - return PgReader.CommitAsync(resuming: false); - PgReader.Commit(resuming: false); - return new(); + + if (_column + 1 == NumColumns) + ThrowHelper.ThrowInvalidOperationException("No more columns left in the current row"); + _column++; + _buf.Ensure(sizeof(int)); + var columnLen = _buf.ReadInt32(); + PgReader.Init(columnLen, DataFormat.Binary, resumableOp); + return PgReader.FieldSize; } - async ValueTask MoveNextColumn(bool async, bool resumableOp) + async ValueTask MoveNextColumnAsync(bool resumableOp) { - if (async) - await PgReader.CommitAsync(resuming: false).ConfigureAwait(false); - else - PgReader.Commit(resuming: false); + await PgReader.CommitAsync(resuming: false).ConfigureAwait(false); if (_column + 1 == NumColumns) ThrowHelper.ThrowInvalidOperationException("No more columns left in the current row"); _column++; - await _buf.Ensure(4, async).ConfigureAwait(false); + await _buf.Ensure(sizeof(int), async: true).ConfigureAwait(false); var columnLen = _buf.ReadInt32(); PgReader.Init(columnLen, DataFormat.Binary, resumableOp); return PgReader.FieldSize; @@ -471,7 +499,7 @@ async ValueTask DisposeAsync(bool async) Expect(await _connector.ReadMessage(async).ConfigureAwait(false), _connector); Expect(await _connector.ReadMessage(async).ConfigureAwait(false), _connector); } - catch (OperationCanceledException e) when (e.InnerException is PostgresException pg && pg.SqlState == PostgresErrorCodes.QueryCanceled) + catch (OperationCanceledException e) when (e.InnerException is PostgresException { SqlState: PostgresErrorCodes.QueryCanceled }) { LogMessages.CopyOperationCancelled(_copyLogger, _connector.Id); } @@ -483,25 +511,23 @@ async ValueTask DisposeAsync(bool async) _connector.EndUserAction(); Cleanup(); - } - -#pragma warning disable CS8625 - void Cleanup() - { - Debug.Assert(!_isDisposed); - var connector = _connector; - if (connector != null) + void Cleanup() { - connector.CurrentCopyOperation = null; - _connector.Connection?.EndBindingScope(ConnectorBindingScope.Copy); - _connector = null; - } + Debug.Assert(!_isDisposed); + var connector = _connector; - _buf = null; - _isDisposed = true; + if (!ReferenceEquals(connector, null)) + { + connector.CurrentCopyOperation = null; + _connector.Connection?.EndBindingScope(ConnectorBindingScope.Copy); + _connector = null!; + } + + _buf = null!; + _isDisposed = true; + } } -#pragma warning restore CS8625 #endregion } diff --git a/test/Npgsql.Tests/ReadBufferTests.cs b/test/Npgsql.Tests/ReadBufferTests.cs index 7d33bf68e1..13c20f3d41 100644 --- a/test/Npgsql.Tests/ReadBufferTests.cs +++ b/test/Npgsql.Tests/ReadBufferTests.cs @@ -16,14 +16,14 @@ public void Skip() for (byte i = 0; i < 50; i++) Writer.WriteByte(i); - ReadBuffer.Ensure(10, async: false).GetAwaiter().GetResult(); + ReadBuffer.Ensure(10); ReadBuffer.Skip(7); Assert.That(ReadBuffer.ReadByte(), Is.EqualTo(7)); ReadBuffer.Skip(10); - ReadBuffer.Ensure(1, async: false).GetAwaiter().GetResult(); + ReadBuffer.Ensure(1); Assert.That(ReadBuffer.ReadByte(), Is.EqualTo(18)); ReadBuffer.Skip(20); - ReadBuffer.Ensure(1, async: false).GetAwaiter().GetResult(); + ReadBuffer.Ensure(1); Assert.That(ReadBuffer.ReadByte(), Is.EqualTo(39)); } @@ -35,7 +35,7 @@ public void ReadSingle() Array.Reverse(bytes); Writer.Write(bytes); - ReadBuffer.Ensure(4, async: false).GetAwaiter().GetResult(); + ReadBuffer.Ensure(4); Assert.That(ReadBuffer.ReadSingle(), Is.EqualTo(expected)); } @@ -47,7 +47,7 @@ public void ReadDouble() Array.Reverse(bytes); Writer.Write(bytes); - ReadBuffer.Ensure(8, async: false).GetAwaiter().GetResult(); + ReadBuffer.Ensure(8); Assert.That(ReadBuffer.ReadDouble(), Is.EqualTo(expected)); } @@ -60,7 +60,7 @@ public void ReadNullTerminatedString_buffered_only() .Write(NpgsqlWriteBuffer.UTF8Encoding.GetBytes(new string("bar"))) .WriteByte(0); - ReadBuffer.Ensure(1, async: false); + ReadBuffer.Ensure(1); Assert.That(ReadBuffer.ReadNullTerminatedString(), Is.EqualTo("foo")); Assert.That(ReadBuffer.ReadNullTerminatedString(), Is.EqualTo("bar")); From 56ee2f0c82bb424fc1da137df0c7ae6c18fd605f Mon Sep 17 00:00:00 2001 From: Alexander Vagner Date: Fri, 22 Mar 2024 20:49:09 +0700 Subject: [PATCH 403/761] Fix ipv6 string NpsqlInet constructor (#5639) Fixes #5638 --- src/Npgsql/NpgsqlTypes/NpgsqlTypes.cs | 31 +++++++++++++++------ test/Npgsql.Tests/TypesTests.cs | 39 +++++++++++++++++++++++++++ 2 files changed, 62 insertions(+), 8 deletions(-) diff --git a/src/Npgsql/NpgsqlTypes/NpgsqlTypes.cs b/src/Npgsql/NpgsqlTypes/NpgsqlTypes.cs index b16fe8ccea..753f0f0919 100644 --- a/src/Npgsql/NpgsqlTypes/NpgsqlTypes.cs +++ b/src/Npgsql/NpgsqlTypes/NpgsqlTypes.cs @@ -448,9 +448,7 @@ public readonly record struct NpgsqlInet public NpgsqlInet(IPAddress address, byte netmask) { - if (address.AddressFamily != AddressFamily.InterNetwork && address.AddressFamily != AddressFamily.InterNetworkV6) - throw new ArgumentException("Only IPAddress of InterNetwork or InterNetworkV6 address families are accepted", nameof(address)); - + CheckAddressFamily(address); Address = address; Netmask = netmask; } @@ -461,12 +459,23 @@ public NpgsqlInet(IPAddress address) } public NpgsqlInet(string addr) - => (Address, Netmask) = addr.Split('/') switch + { + switch (addr.Split('/')) { - { Length: 2 } segments => (IPAddress.Parse(segments[0]), byte.Parse(segments[1])), - { Length: 1 } segments => (IPAddress.Parse(segments[0]), (byte)32), - _ => throw new FormatException("Invalid number of parts in CIDR specification") - }; + case { Length: 2 } segments: + (Address, Netmask) = (IPAddress.Parse(segments[0]), byte.Parse(segments[1])); + break; + case { Length: 1 } segments: + var ipAddr = IPAddress.Parse(segments[0]); + CheckAddressFamily(ipAddr); + (Address, Netmask) = ( + ipAddr, + ipAddr.AddressFamily == AddressFamily.InterNetworkV6 ? (byte)128 : (byte)32); + break; + default: + throw new FormatException("Invalid number of parts in CIDR specification"); + } + } public override string ToString() => (Address.AddressFamily == AddressFamily.InterNetwork && Netmask == 32) || @@ -485,6 +494,12 @@ public void Deconstruct(out IPAddress address, out byte netmask) address = Address; netmask = Netmask; } + + static void CheckAddressFamily(IPAddress address) + { + if (address.AddressFamily != AddressFamily.InterNetwork && address.AddressFamily != AddressFamily.InterNetworkV6) + throw new ArgumentException("Only IPAddress of InterNetwork or InterNetworkV6 address families are accepted", nameof(address)); + } } /// diff --git a/test/Npgsql.Tests/TypesTests.cs b/test/Npgsql.Tests/TypesTests.cs index 610d640a02..0b80062de6 100644 --- a/test/Npgsql.Tests/TypesTests.cs +++ b/test/Npgsql.Tests/TypesTests.cs @@ -209,4 +209,43 @@ public void NpgsqlInet() var v = new NpgsqlInet(IPAddress.Parse("2001:1db8:85a3:1142:1000:8a2e:1370:7334"), 32); Assert.That(v.ToString(), Is.EqualTo("2001:1db8:85a3:1142:1000:8a2e:1370:7334/32")); } + + [Test] + public void NpgsqlInet_parse_ipv4() + { + var ipv4 = new NpgsqlInet("192.168.1.1/8"); + Assert.That(ipv4.Address, Is.EqualTo(IPAddress.Parse("192.168.1.1"))); + Assert.That(ipv4.Netmask, Is.EqualTo(8)); + + ipv4 = new NpgsqlInet("192.168.1.1/32"); + Assert.That(ipv4.Address, Is.EqualTo(IPAddress.Parse("192.168.1.1"))); + Assert.That(ipv4.Netmask, Is.EqualTo(32)); + } + + [Test] + [IssueLink("https://github.com/npgsql/npgsql/issues/5638")] + public void NpgsqlInet_parse_ipv6() + { + var ipv6 = new NpgsqlInet("2001:0000:130F:0000:0000:09C0:876A:130B/32"); + Assert.That(ipv6.Address, Is.EqualTo(IPAddress.Parse("2001:0000:130F:0000:0000:09C0:876A:130B"))); + Assert.That(ipv6.Netmask, Is.EqualTo(32)); + + ipv6 = new NpgsqlInet("2001:0000:130F:0000:0000:09C0:876A:130B"); + Assert.That(ipv6.Address, Is.EqualTo(IPAddress.Parse("2001:0000:130F:0000:0000:09C0:876A:130B"))); + Assert.That(ipv6.Netmask, Is.EqualTo(128)); + } + + [Test] + public void NpgsqlInet_ToString_ipv4() + { + Assert.That(new NpgsqlInet("192.168.1.1/8").ToString(), Is.EqualTo("192.168.1.1/8")); + Assert.That(new NpgsqlInet("192.168.1.1/32").ToString(), Is.EqualTo("192.168.1.1")); + } + + [Test] + public void NpgsqlInet_ToString_ipv6() + { + Assert.That(new NpgsqlInet("2001:0:130f::9c0:876a:130b/32").ToString(), Is.EqualTo("2001:0:130f::9c0:876a:130b/32")); + Assert.That(new NpgsqlInet("2001:0:130f::9c0:876a:130b/128").ToString(), Is.EqualTo("2001:0:130f::9c0:876a:130b")); + } } From 4c9ea33aea6fb635dd47f3d4ff095ca207001985 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Mon, 25 Mar 2024 17:02:01 +0100 Subject: [PATCH 404/761] Fix batch command type (#5643) Fixes #5642 --- src/Npgsql/NpgsqlCommand.cs | 8 ++++---- test/Npgsql.Tests/CommandTests.cs | 28 ++++++++++++++++++++++++++++ 2 files changed, 32 insertions(+), 4 deletions(-) diff --git a/src/Npgsql/NpgsqlCommand.cs b/src/Npgsql/NpgsqlCommand.cs index eaf11d51ff..7df8a71bf5 100644 --- a/src/Npgsql/NpgsqlCommand.cs +++ b/src/Npgsql/NpgsqlCommand.cs @@ -655,7 +655,7 @@ Task Prepare(bool async, CancellationToken cancellationToken = default) { foreach (var batchCommand in InternalBatchCommands) { - batchCommand._parameters?.ProcessParameters(connector.SerializerOptions, validateValues: false, CommandType); + batchCommand._parameters?.ProcessParameters(connector.SerializerOptions, validateValues: false, batchCommand.CommandType); ProcessRawQuery(connector.SqlQueryParser, connector.UseConformingStrings, batchCommand); needToPrepare = batchCommand.ExplicitPrepare(connector) || needToPrepare; @@ -1396,7 +1396,7 @@ internal virtual async ValueTask ExecuteReader(bool async, Com goto case false; } - batchCommand._parameters?.ProcessParameters(connector.SerializerOptions, validateParameterValues, CommandType); + batchCommand._parameters?.ProcessParameters(connector.SerializerOptions, validateParameterValues, batchCommand.CommandType); } } else @@ -1425,7 +1425,7 @@ internal virtual async ValueTask ExecuteReader(bool async, Com { var batchCommand = InternalBatchCommands[i]; - batchCommand._parameters?.ProcessParameters(connector.SerializerOptions, validateParameterValues, CommandType); + batchCommand._parameters?.ProcessParameters(connector.SerializerOptions, validateParameterValues, batchCommand.CommandType); ProcessRawQuery(connector.SqlQueryParser, connector.UseConformingStrings, batchCommand); if (connector.Settings.MaxAutoPrepare > 0 && batchCommand.TryAutoPrepare(connector)) @@ -1531,7 +1531,7 @@ internal virtual async ValueTask ExecuteReader(bool async, Com { foreach (var batchCommand in InternalBatchCommands) { - batchCommand._parameters?.ProcessParameters(dataSource.SerializerOptions, validateValues: true, CommandType); + batchCommand._parameters?.ProcessParameters(dataSource.SerializerOptions, validateValues: true, batchCommand.CommandType); ProcessRawQuery(null, standardConformingStrings: true, batchCommand); } } diff --git a/test/Npgsql.Tests/CommandTests.cs b/test/Npgsql.Tests/CommandTests.cs index 9c42da9c0c..9bb2b3c6f7 100644 --- a/test/Npgsql.Tests/CommandTests.cs +++ b/test/Npgsql.Tests/CommandTests.cs @@ -547,6 +547,34 @@ public async Task CloseConnection_with_exception() #endregion + [Test] + public async Task StoredProcedure_positional_parameters_works() + { + if (IsMultiplexing) + return; + + await using var connection = await DataSource.OpenConnectionAsync(); + await using var transaction = await connection.BeginTransactionAsync(IsolationLevel.Serializable); + await using var batch = new NpgsqlBatch(connection, transaction) + { + BatchCommands = + { + new("unknown_procedure") + { + CommandType = CommandType.StoredProcedure, + Parameters = + { + new() { Value = "" }, + new() { DbType = DbType.Int64, Direction = ParameterDirection.Output } + } + }, + new ("COMMIT") + } + }; + + Assert.ThrowsAsync(() => batch.ExecuteNonQueryAsync()); + } + [Test] public async Task SingleRow([Values(PrepareOrNot.NotPrepared, PrepareOrNot.Prepared)] PrepareOrNot prepare) { From cda190b34b78d8436f1de27ed4311ad1d576e612 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Mon, 25 Mar 2024 22:11:16 +0100 Subject: [PATCH 405/761] Consistently check disposed, closed and in result status (#5471) --- .../BackendMessages/RowDescriptionMessage.cs | 13 +- src/Npgsql/NpgsqlDataReader.cs | 255 ++++++++---------- src/Npgsql/ThrowHelper.cs | 4 + 3 files changed, 123 insertions(+), 149 deletions(-) diff --git a/src/Npgsql/BackendMessages/RowDescriptionMessage.cs b/src/Npgsql/BackendMessages/RowDescriptionMessage.cs index 1dd1045e21..87636ca039 100644 --- a/src/Npgsql/BackendMessages/RowDescriptionMessage.cs +++ b/src/Npgsql/BackendMessages/RowDescriptionMessage.cs @@ -1,6 +1,7 @@ using System; using System.Collections.Generic; using System.Diagnostics; +using System.Diagnostics.CodeAnalysis; using System.Globalization; using System.Runtime.CompilerServices; using System.Threading; @@ -126,15 +127,19 @@ internal static RowDescriptionMessage CreateForReplication( return msg; } - public FieldDescription this[int index] + public FieldDescription this[int ordinal] { [MethodImpl(MethodImplOptions.AggressiveInlining)] get { - Debug.Assert(index < Count); - Debug.Assert(_fields[index] != null); + if ((uint)ordinal < (uint)Count) + { + Debug.Assert(_fields[ordinal] != null); + return _fields[ordinal]!; + } - return _fields[index]!; + ThrowHelper.ThrowIndexOutOfRangeException("Ordinal must be between 0 and " + (Count - 1)); + return default!; } } diff --git a/src/Npgsql/NpgsqlDataReader.cs b/src/Npgsql/NpgsqlDataReader.cs index 59293e989b..56d0fd35c4 100644 --- a/src/Npgsql/NpgsqlDataReader.cs +++ b/src/Npgsql/NpgsqlDataReader.cs @@ -160,7 +160,7 @@ internal void Init( /// public override bool Read() { - CheckClosedOrDisposed(); + ThrowIfClosedOrDisposed(); return TryRead()?.Result ?? Read(false).GetAwaiter().GetResult(); } @@ -173,7 +173,7 @@ public override bool Read() /// A task representing the asynchronous operation. public override Task ReadAsync(CancellationToken cancellationToken) { - CheckClosedOrDisposed(); + ThrowIfClosedOrDisposed(); return TryRead() ?? Read(async: true, cancellationToken); } @@ -197,7 +197,7 @@ public override Task ReadAsync(CancellationToken cancellationToken) if (_behavior.HasFlag(CommandBehavior.SingleRow) || !_isRowBuffered) return null; - ConsumeRowNonSequential(); + ConsumeBufferedRow(); const int headerSize = sizeof(byte) + sizeof(int); var buffer = Buffer; @@ -307,8 +307,12 @@ static async ValueTask ReadMessageSequential(NpgsqlConnector co /// Advances the reader to the next result when reading the results of a batch of statements. /// /// - public override bool NextResult() => (_isSchemaOnly ? NextResultSchemaOnly(false) : NextResult(false)) - .GetAwaiter().GetResult(); + public override bool NextResult() + { + ThrowIfClosedOrDisposed(); + return (_isSchemaOnly ? NextResultSchemaOnly(false) : NextResult(false)) + .GetAwaiter().GetResult(); + } /// /// This is the asynchronous version of NextResult. @@ -318,9 +322,12 @@ public override bool NextResult() => (_isSchemaOnly ? NextResultSchemaOnly(false /// /// A task representing the asynchronous operation. public override Task NextResultAsync(CancellationToken cancellationToken) - => _isSchemaOnly + { + ThrowIfClosedOrDisposed(); + return _isSchemaOnly ? NextResultSchemaOnly(async: true, cancellationToken: cancellationToken) : NextResult(async: true, cancellationToken: cancellationToken); + } /// /// Internal implementation of NextResult @@ -328,8 +335,6 @@ public override Task NextResultAsync(CancellationToken cancellationToken) async Task NextResult(bool async, bool isConsuming = false, CancellationToken cancellationToken = default) { Debug.Assert(!_isSchemaOnly); - CheckClosedOrDisposed(); - if (State is ReaderState.Consumed) return false; @@ -609,7 +614,7 @@ void PopulateOutputParameters() var pending = new Queue(); var taken = new List(); - for (var i = 0; i < FieldCount; i++) + for (var i = 0; i < ColumnCount; i++) { if (Command.Parameters.TryGetValue(GetName(i), out var p) && p.IsOutputDirection) { @@ -647,26 +652,13 @@ void PopulateOutputParameters() async Task NextResultSchemaOnly(bool async, bool isConsuming = false, CancellationToken cancellationToken = default) { Debug.Assert(_isSchemaOnly); + if (State is ReaderState.Consumed) + return false; using var registration = isConsuming ? default : Connector.StartNestedCancellableOperation(cancellationToken); try { - switch (State) - { - case ReaderState.BeforeResult: - case ReaderState.InResult: - case ReaderState.BetweenResults: - break; - case ReaderState.Consumed: - case ReaderState.Closed: - case ReaderState.Disposed: - return false; - default: - ThrowHelper.ThrowArgumentOutOfRangeException(); - return false; - } - for (StatementIndex++; StatementIndex < _statements.Count; StatementIndex++) { var statement = _statements[StatementIndex]; @@ -876,7 +868,7 @@ void HandleUncommon(IBackendMessage msg) /// /// Gets a value indicating whether the data reader is closed. /// - public override bool IsClosed => State == ReaderState.Closed || State == ReaderState.Disposed; + public override bool IsClosed => State is ReaderState.Closed or ReaderState.Disposed; /// /// Gets the number of rows changed, inserted, or deleted by execution of the SQL statement. @@ -912,18 +904,26 @@ public override int RecordsAffected /// which exposes an aggregation across all statements. /// [Obsolete("Use the new DbBatch API")] - public IReadOnlyList Statements => _statements.AsReadOnly(); + public IReadOnlyList Statements + { + get + { + ThrowIfClosedOrDisposed(); + return _statements.AsReadOnly(); + } + } /// /// Gets a value that indicates whether this DbDataReader contains one or more rows. /// public override bool HasRows - => State switch + { + get { - ReaderState.Closed => throw new InvalidOperationException("Invalid attempt to call HasRows when reader is closed."), - ReaderState.Disposed => throw new ObjectDisposedException(nameof(NpgsqlDataReader)), - _ => _hasRows - }; + ThrowIfClosedOrDisposed(); + return _hasRows; + } + } /// /// Indicates whether the reader is currently positioned on a row, i.e. whether reading a @@ -932,7 +932,14 @@ public override bool HasRows /// return true even if attempting to read a column will fail, e.g. before /// has been called /// - public bool IsOnRow => State == ReaderState.InResult; + public bool IsOnRow + { + get + { + ThrowIfClosedOrDisposed(); + return State is ReaderState.InResult; + } + } /// /// Gets the name of the column, given the zero-based column ordinal. @@ -948,7 +955,7 @@ public override int FieldCount { get { - CheckClosedOrDisposed(); + ThrowIfClosedOrDisposed(); return RowDescription?.Count ?? 0; } } @@ -1314,11 +1321,10 @@ internal async Task Cleanup(bool async, bool connectionClosing = false, bool isD /// The number of instances of in the array. public override int GetValues(object[] values) { - if (values == null) - throw new ArgumentNullException(nameof(values)); - CheckResultSet(); + ThrowIfNotInResult(); + ArgumentNullException.ThrowIfNull(values); - var count = Math.Min(FieldCount, values.Length); + var count = Math.Min(ColumnCount, values.Length); for (var i = 0; i < count; i++) values[i] = GetValue(i); return count; @@ -1360,16 +1366,17 @@ public override int GetValues(object[] values) /// A data reader. public new NpgsqlNestedDataReader GetData(int ordinal) { + ThrowIfNotInResult(); + var field = RowDescription[ordinal]; if (_isSequential) - throw new NotSupportedException("GetData() not supported in sequential mode."); + ThrowHelper.ThrowNotSupportedException("GetData() not supported in sequential mode."); - var field = CheckRowAndGetField(ordinal); var type = field.PostgresType; var isArray = type is PostgresArrayType; var elementType = isArray ? ((PostgresArrayType)type).Element : type; var compositeType = elementType as PostgresCompositeType; if (field.DataFormat is DataFormat.Text || (elementType.InternalName != "record" && compositeType == null)) - throw new InvalidCastException("GetData() not supported for type " + field.TypeDisplayName); + ThrowHelper.ThrowInvalidCastException("GetData() not supported for type " + field.TypeDisplayName); var columnLength = SeekToColumn(async: false, ordinal, field.DataFormat, resumableOp: true).GetAwaiter().GetResult(); if (columnLength is -1) @@ -1410,14 +1417,16 @@ public override int GetValues(object[] values) /// The actual number of bytes read. public override long GetBytes(int ordinal, long dataOffset, byte[]? buffer, int bufferOffset, int length) { + ThrowIfNotInResult(); + var field = RowDescription[ordinal]; + if (dataOffset is < 0 or > int.MaxValue) - throw new ArgumentOutOfRangeException(nameof(dataOffset), dataOffset, $"dataOffset must be between {0} and {int.MaxValue}"); + ThrowHelper.ThrowArgumentOutOfRangeException(nameof(dataOffset), "dataOffset must be between 0 and {0}", int.MaxValue); if (buffer != null && (bufferOffset < 0 || bufferOffset >= buffer.Length + 1)) - throw new IndexOutOfRangeException($"bufferOffset must be between 0 and {buffer.Length}"); + ThrowHelper.ThrowIndexOutOfRangeException("bufferOffset must be between 0 and {0}", buffer.Length); if (buffer != null && (length < 0 || length > buffer.Length - bufferOffset)) - throw new IndexOutOfRangeException($"length must be between 0 and {buffer.Length - bufferOffset}"); + ThrowHelper.ThrowIndexOutOfRangeException("bufferOffset must be between 0 and {0}", buffer.Length - bufferOffset); - var field = CheckRowAndGetField(ordinal); var columnLength = SeekToColumn(async: false, ordinal, field.DataFormat, resumableOp: true).GetAwaiter().GetResult(); if (columnLength == -1) ThrowHelper.ThrowInvalidCastException_NoValue(field); @@ -1471,21 +1480,22 @@ public Task GetStreamAsync(int ordinal, CancellationToken cancellationTo /// The actual number of characters read. public override long GetChars(int ordinal, long dataOffset, char[]? buffer, int bufferOffset, int length) { - if (dataOffset is < 0 or > int.MaxValue) - throw new ArgumentOutOfRangeException(nameof(dataOffset), dataOffset, $"dataOffset must be between 0 and {int.MaxValue}"); - if (buffer != null && (bufferOffset < 0 || bufferOffset >= buffer.Length + 1)) - throw new IndexOutOfRangeException($"bufferOffset must be between 0 and {buffer.Length}"); - if (buffer != null && (length < 0 || length > buffer.Length - bufferOffset)) - throw new IndexOutOfRangeException($"length must be between 0 and {buffer.Length - bufferOffset}"); - + ThrowIfNotInResult(); // Check whether we can do resumable reads. var field = GetInfo(ordinal, typeof(GetChars), out var converter, out var bufferRequirement, out var asObject); if (converter is not IResumableRead { Supported: true }) - throw new NotSupportedException("The GetChars method is not supported for this column type"); + ThrowHelper.ThrowNotSupportedException("The GetChars method is not supported for this column type"); + + if (dataOffset is < 0 or > int.MaxValue) + ThrowHelper.ThrowArgumentOutOfRangeException(nameof(dataOffset), "dataOffset must be between 0 and {0}", int.MaxValue); + if (buffer != null && (bufferOffset < 0 || bufferOffset >= buffer.Length + 1)) + ThrowHelper.ThrowIndexOutOfRangeException("bufferOffset must be between 0 and {0}", buffer.Length); + if (buffer != null && (length < 0 || length > buffer.Length - bufferOffset)) + ThrowHelper.ThrowIndexOutOfRangeException("bufferOffset must be between 0 and {0}", buffer.Length - bufferOffset); var columnLength = SeekToColumn(async: false, ordinal, field, resumableOp: true).GetAwaiter().GetResult(); if (columnLength == -1) - ThrowHelper.ThrowInvalidCastException_NoValue(CheckRowAndGetField(ordinal)); + ThrowHelper.ThrowInvalidCastException_NoValue(RowDescription[ordinal]); dataOffset = buffer is null ? 0 : dataOffset; PgReader.InitCharsRead(checked((int)dataOffset), @@ -1550,10 +1560,10 @@ public override Task GetFieldValueAsync(int ordinal, CancellationToken can async ValueTask Core(int ordinal, CancellationToken cancellationToken) { - using var registration = Connector.StartNestedCancellableOperation(cancellationToken, attemptPgCancellation: false); - + ThrowIfNotInResult(); var field = GetInfo(ordinal, typeof(T), out var converter, out var bufferRequirement, out var asObject); + using var registration = Connector.StartNestedCancellableOperation(cancellationToken, attemptPgCancellation: false); var columnLength = await SeekToColumn(async: true, ordinal, field).ConfigureAwait(false); if (columnLength is -1) return DbNullValueOrThrow(ordinal); @@ -1596,6 +1606,8 @@ async Task GetStream(int ordinal, CancellationToken cancellationToken) T GetFieldValueCore(int ordinal) { + ThrowIfNotInResult(); + // The only statically mapped converter, it always exists. if (typeof(T) == typeof(Stream)) return GetStream(ordinal); @@ -1649,6 +1661,7 @@ T GetStream(int ordinal) /// The value of the specified column. public override object GetValue(int ordinal) { + ThrowIfNotInResult(); var field = GetDefaultInfo(ordinal, out var converter, out var bufferRequirement); var columnLength = _isSequential @@ -1681,7 +1694,11 @@ public override object GetValue(int ordinal) /// The zero-based column ordinal. /// true if the specified column is equivalent to ; otherwise false. public override bool IsDBNull(int ordinal) - => SeekToColumn(async: false, ordinal, CheckRowAndGetField(ordinal).DataFormat, resumableOp: true).GetAwaiter().GetResult() is -1; + { + ThrowIfNotInResult(); + return SeekToColumn(async: false, ordinal, RowDescription[ordinal].DataFormat, resumableOp: true).GetAwaiter() + .GetResult() is -1; + } /// /// An asynchronous version of , which gets a value that indicates whether the column contains non-existent or missing values. @@ -1701,8 +1718,9 @@ public override Task IsDBNullAsync(int ordinal, CancellationToken cancella async Task Core(int ordinal, CancellationToken cancellationToken) { + ThrowIfNotInResult(); using var registration = Connector.StartNestedCancellableOperation(cancellationToken, attemptPgCancellation: false); - return await SeekToColumn(async: true, ordinal, CheckRowAndGetField(ordinal).DataFormat, resumableOp: true).ConfigureAwait(false) is -1; + return await SeekToColumn(async: true, ordinal, RowDescription[ordinal].DataFormat, resumableOp: true).ConfigureAwait(false) is -1; } } @@ -1717,9 +1735,9 @@ async Task Core(int ordinal, CancellationToken cancellationToken) /// The zero-based column ordinal. public override int GetOrdinal(string name) { + ThrowIfClosedOrDisposed(); if (string.IsNullOrEmpty(name)) ThrowHelper.ThrowArgumentException($"{nameof(name)} cannot be empty", nameof(name)); - CheckClosedOrDisposed(); if (RowDescription is null) ThrowHelper.ThrowInvalidOperationException("No resultset is currently being traversed"); return RowDescription.GetFieldIndex(name); @@ -2106,7 +2124,7 @@ Task ConsumeRow(bool async) return ConsumeRowSequential(async); // We get here, if we're in a non-sequential mode (or the row is already in the buffer) - ConsumeRowNonSequential(); + ConsumeBufferedRow(); return Task.CompletedTask; async Task ConsumeRowSequential(bool async) @@ -2132,7 +2150,7 @@ async Task ConsumeRowSequential(bool async) } [MethodImpl(MethodImplOptions.AggressiveInlining)] - void ConsumeRowNonSequential() + void ConsumeBufferedRow() { Debug.Assert(State is ReaderState.InResult or ReaderState.BeforeResult); PgReader.Commit(resuming: false); @@ -2143,25 +2161,6 @@ void ConsumeRowNonSequential() #region Checks - void CheckResultSet() - { - switch (State) - { - case ReaderState.BeforeResult: - case ReaderState.InResult: - return; - case ReaderState.Closed: - ThrowHelper.ThrowInvalidOperationException("The reader is closed"); - return; - case ReaderState.Disposed: - ThrowHelper.ThrowObjectDisposedException(nameof(NpgsqlDataReader)); - return; - default: - ThrowHelper.ThrowInvalidOperationException("No resultset is currently being traversed"); - return; - } - } - [MethodImpl(MethodImplOptions.NoInlining)] T DbNullValueOrThrow(int ordinal) { @@ -2172,22 +2171,15 @@ T DbNullValueOrThrow(int ordinal) if (typeof(T) == typeof(object)) return (T)(object)DBNull.Value; - ThrowHelper.ThrowInvalidCastException_NoValue(CheckRowAndGetField(ordinal)); + ThrowHelper.ThrowInvalidCastException_NoValue(RowDescription![ordinal]); return default; } [MethodImpl(MethodImplOptions.AggressiveInlining)] DataFormat GetInfo(int ordinal, Type type, out PgConverter converter, out Size bufferRequirement, out bool asObject) { - var state = State; - if (state is not ReaderState.InResult || (uint)ordinal > (uint)ColumnCount) - { - Unsafe.SkipInit(out converter); - Unsafe.SkipInit(out bufferRequirement); - Unsafe.SkipInit(out asObject); - HandleInvalidState(state, ColumnCount); - Debug.Fail("Should never get here"); - } + if ((uint)ordinal > (uint)ColumnCount) + ThrowHelper.ThrowIndexOutOfRangeException("Ordinal must be between 0 and " + (ColumnCount - 1)); ref var info = ref ColumnInfoCache![ordinal]; @@ -2206,7 +2198,7 @@ DataFormat GetInfo(int ordinal, Type type, out PgConverter converter, out Size b [MethodImpl(MethodImplOptions.NoInlining)] DataFormat Slow(ref ColumnInfo info, out PgConverter converter, out Size bufferRequirement, out bool asObject) { - var field = CheckRowAndGetField(ordinal); + var field = RowDescription![ordinal]; field.GetInfo(type, ref info); converter = info.ConverterInfo.Converter; bufferRequirement = info.ConverterInfo.BufferRequirement; @@ -2218,33 +2210,47 @@ DataFormat Slow(ref ColumnInfo info, out PgConverter converter, out Size bufferR [MethodImpl(MethodImplOptions.AggressiveInlining)] DataFormat GetDefaultInfo(int ordinal, out PgConverter converter, out Size bufferRequirement) { - var field = CheckRowAndGetField(ordinal); + var field = RowDescription![ordinal]; converter = field.ObjectOrDefaultInfo.Converter; bufferRequirement = field.ObjectOrDefaultInfo.BufferRequirement; return field.DataFormat; } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - FieldDescription CheckRowAndGetField(int column) + /// + /// Checks that we have a RowDescription, but not necessary an actual resultset + /// (for operations which work in SchemaOnly mode. + /// + FieldDescription GetField(int ordinal) + { + ThrowIfClosedOrDisposed(); + if (RowDescription is { } columns) + return columns[ordinal]; + + ThrowHelper.ThrowInvalidOperationException("No resultset is currently being traversed"); + return default!; + } + + void ThrowIfClosedOrDisposed() + { + if (State is (ReaderState.Closed or ReaderState.Disposed) and var state) + ThrowInvalidState(state); + } + + [MemberNotNull(nameof(RowDescription))] + void ThrowIfNotInResult() { - var columns = RowDescription; - var state = State; - if (state is ReaderState.InResult && (uint)column < (uint)columns!.Count) - return columns[column]; + if (State is not ReaderState.InResult and var state) + ThrowInvalidState(state); - return HandleInvalidState(state, columns?.Count ?? 0); + Debug.Assert(RowDescription is not null); } - [DoesNotReturn] [MethodImpl(MethodImplOptions.NoInlining)] - static FieldDescription HandleInvalidState(ReaderState state, int maxColumns) + static void ThrowInvalidState(ReaderState state) { switch (state) { - case ReaderState.InResult: - ThrowColumnOutOfRange(maxColumns); - break; case ReaderState.Closed: ThrowHelper.ThrowInvalidOperationException("The reader is closed"); break; @@ -2252,52 +2258,11 @@ static FieldDescription HandleInvalidState(ReaderState state, int maxColumns) ThrowHelper.ThrowObjectDisposedException(nameof(NpgsqlDataReader)); break; default: - ThrowHelper.ThrowInvalidOperationException("No row is available"); - break; - } - return default!; - } - - /// - /// Checks that we have a RowDescription, but not necessary an actual resultset - /// (for operations which work in SchemaOnly mode. - /// - FieldDescription GetField(int column) - { - if (RowDescription is null) ThrowHelper.ThrowInvalidOperationException("No resultset is currently being traversed"); - - var columns = RowDescription; - if (column < 0 || column >= columns.Count) - ThrowColumnOutOfRange(columns.Count); - - return columns[column]; - } - - void CheckClosedOrDisposed() - { - if (State is (ReaderState.Closed or ReaderState.Disposed) and var state) - Throw(state); - - [MethodImpl(MethodImplOptions.NoInlining)] - static void Throw(ReaderState state) - { - switch (state) - { - case ReaderState.Closed: - ThrowHelper.ThrowInvalidOperationException("The reader is closed"); - return; - case ReaderState.Disposed: - ThrowHelper.ThrowObjectDisposedException(nameof(NpgsqlDataReader)); - return; - } + break; } } - [DoesNotReturn] - static void ThrowColumnOutOfRange(int maxIndex) => - throw new IndexOutOfRangeException($"Column must be between {0} and {maxIndex - 1}"); - #endregion #region Misc diff --git a/src/Npgsql/ThrowHelper.cs b/src/Npgsql/ThrowHelper.cs index f20dac780c..63f5647740 100644 --- a/src/Npgsql/ThrowHelper.cs +++ b/src/Npgsql/ThrowHelper.cs @@ -95,6 +95,10 @@ internal static void ThrowArgumentNullException(string message, string paramName internal static void ThrowIndexOutOfRangeException(string message) => throw new IndexOutOfRangeException(message); + [DoesNotReturn] + internal static void ThrowIndexOutOfRangeException(string message, object argument) + => throw new IndexOutOfRangeException(string.Format(message, argument)); + [DoesNotReturn] internal static void ThrowNotSupportedException(string? message = null) => throw new NotSupportedException(message); From 6754cdc7a7f9ad0cc63246c43f9107895e8bcd6b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 25 Mar 2024 22:36:25 +0100 Subject: [PATCH 406/761] Bump Microsoft.CodeAnalysis.CSharp from 4.8.0 to 4.9.2 (#5610) --- .github/workflows/build.yml | 2 +- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/native-aot.yml | 2 +- .github/workflows/rich-code-nav.yml | 2 +- Directory.Packages.props | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 0e7398e744..46273efc41 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -15,7 +15,7 @@ concurrency: cancel-in-progress: true env: - dotnet_sdk_version: '8.0.100' + dotnet_sdk_version: '8.0.203' postgis_version: 3 DOTNET_SKIP_FIRST_TIME_EXPERIENCE: true # Windows comes with PG pre-installed, and defines the PGPASSWORD environment variable. Remove it as it interferes diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 868ea2418b..c171e62e26 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -32,7 +32,7 @@ concurrency: cancel-in-progress: true env: - dotnet_sdk_version: '8.0.100' + dotnet_sdk_version: '8.0.203' DOTNET_SKIP_FIRST_TIME_EXPERIENCE: true jobs: diff --git a/.github/workflows/native-aot.yml b/.github/workflows/native-aot.yml index 97cf2878e7..c3db4812b1 100644 --- a/.github/workflows/native-aot.yml +++ b/.github/workflows/native-aot.yml @@ -15,7 +15,7 @@ concurrency: cancel-in-progress: true env: - dotnet_sdk_version: '8.0.100' + dotnet_sdk_version: '8.0.203' DOTNET_SKIP_FIRST_TIME_EXPERIENCE: true # Uncomment and edit the following to use nightly/preview builds # nuget_config: | diff --git a/.github/workflows/rich-code-nav.yml b/.github/workflows/rich-code-nav.yml index 7ee82bfeb9..11686cdab3 100644 --- a/.github/workflows/rich-code-nav.yml +++ b/.github/workflows/rich-code-nav.yml @@ -4,7 +4,7 @@ on: workflow_dispatch: env: - dotnet_sdk_version: '8.0.100' + dotnet_sdk_version: '8.0.203' DOTNET_SKIP_FIRST_TIME_EXPERIENCE: true jobs: diff --git a/Directory.Packages.props b/Directory.Packages.props index 132bbd43e8..39eeb13026 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -24,7 +24,7 @@ - + From f6d8b1bea9c3fe2fae59b2188c596cd91ee7acef Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Wed, 27 Mar 2024 21:09:03 +0300 Subject: [PATCH 407/761] Add a fast path for PreparedStatementManager.TryGetAutoPrepared (#5395) --- src/Npgsql/PreparedStatementManager.cs | 267 +++++++++++++------------ 1 file changed, 142 insertions(+), 125 deletions(-) diff --git a/src/Npgsql/PreparedStatementManager.cs b/src/Npgsql/PreparedStatementManager.cs index ef72879c6d..e798ec28c2 100644 --- a/src/Npgsql/PreparedStatementManager.cs +++ b/src/Npgsql/PreparedStatementManager.cs @@ -17,8 +17,6 @@ sealed class PreparedStatementManager readonly PreparedStatement?[] _candidates; - static readonly List EmptyParameters = new(); - /// /// Total number of current prepared statements (whether explicit or automatic). /// @@ -98,161 +96,180 @@ internal PreparedStatementManager(NpgsqlConnector connector) internal PreparedStatement? TryGetAutoPrepared(NpgsqlBatchCommand batchCommand) { var sql = batchCommand.FinalCommandText!; - if (!BySql.TryGetValue(sql, out var pStatement)) + // We could also test for PreparedState.BeingPrepared as it's handled the exact same way as PreparedState.Prepared + // But since it's so rare we'll just go through the slow path + if (!BySql.TryGetValue(sql, out var pStatement) || pStatement.State != PreparedState.Prepared) + return TryGetAutoPreparedSlow(batchCommand, pStatement); + + // The statement has already been prepared (explicitly or automatically) + // We just need to check that the parameter types correspond, since prepared statements are + // only keyed by SQL (to prevent pointless allocations). If we have a mismatch, simply run unprepared. + if (!pStatement.DoParametersMatch(batchCommand.CurrentParametersReadOnly)) + return null; + // Prevent this statement from being replaced within this batch + pStatement.LastUsed = long.MaxValue; + return pStatement; + + PreparedStatement? TryGetAutoPreparedSlow(NpgsqlBatchCommand batchCommand, PreparedStatement? pStatement) { - // New candidate. Find an empty candidate slot or eject a least-used one. - int slotIndex = -1, leastUsages = int.MaxValue; - var lastUsed = long.MaxValue; - for (var i = 0; i < _candidates.Length; i++) + var sql = batchCommand.FinalCommandText!; + if (pStatement is null) { - var candidate = _candidates[i]; - // ReSharper disable once ConditionIsAlwaysTrueOrFalse - // ReSharper disable HeuristicUnreachableCode - if (candidate == null) // Found an unused candidate slot, return immediately - { - slotIndex = i; - break; - } - // ReSharper restore HeuristicUnreachableCode - if (candidate.Usages < leastUsages) + // New candidate. Find an empty candidate slot or eject a least-used one. + int slotIndex = -1, leastUsages = int.MaxValue; + var lastUsed = long.MaxValue; + for (var i = 0; i < _candidates.Length; i++) { - leastUsages = candidate.Usages; - slotIndex = i; - lastUsed = candidate.LastUsed; - } - else if (candidate.Usages == leastUsages && candidate.LastUsed < lastUsed) - { - slotIndex = i; - lastUsed = candidate.LastUsed; + var candidate = _candidates[i]; + // ReSharper disable once ConditionIsAlwaysTrueOrFalse + // ReSharper disable HeuristicUnreachableCode + if (candidate == null) // Found an unused candidate slot, return immediately + { + slotIndex = i; + break; + } + // ReSharper restore HeuristicUnreachableCode + if (candidate.Usages < leastUsages) + { + leastUsages = candidate.Usages; + slotIndex = i; + lastUsed = candidate.LastUsed; + } + else if (candidate.Usages == leastUsages && candidate.LastUsed < lastUsed) + { + slotIndex = i; + lastUsed = candidate.LastUsed; + } } + + var leastUsed = _candidates[slotIndex]; + // ReSharper disable once ConditionIsAlwaysTrueOrFalse + if (leastUsed != null) + BySql.Remove(leastUsed.Sql); + pStatement = BySql[sql] = _candidates[slotIndex] = PreparedStatement.CreateAutoPrepareCandidate(this, sql); } - var leastUsed = _candidates[slotIndex]; - // ReSharper disable once ConditionIsAlwaysTrueOrFalse - if (leastUsed != null) - BySql.Remove(leastUsed.Sql); - pStatement = BySql[sql] = _candidates[slotIndex] = PreparedStatement.CreateAutoPrepareCandidate(this, sql); - } + switch (pStatement.State) + { + case PreparedState.NotPrepared: + case PreparedState.Invalidated: + break; - switch (pStatement.State) - { - case PreparedState.NotPrepared: - case PreparedState.Invalidated: - break; - - case PreparedState.Prepared: - case PreparedState.BeingPrepared: - // The statement has already been prepared (explicitly or automatically), or has been selected - // for preparation (earlier identical statement in the same command). - // We just need to check that the parameter types correspond, since prepared statements are - // only keyed by SQL (to prevent pointless allocations). If we have a mismatch, simply run unprepared. - if (!pStatement.DoParametersMatch(batchCommand.CurrentParametersReadOnly)) + // We shouldn't ever get PreparedState.Prepared since it's handled above but handle it here just in case + case PreparedState.Prepared: + case PreparedState.BeingPrepared: + // The statement has already been prepared (explicitly or automatically), or has been selected + // for preparation (earlier identical statement in the same command). + // We just need to check that the parameter types correspond, since prepared statements are + // only keyed by SQL (to prevent pointless allocations). If we have a mismatch, simply run unprepared. + if (!pStatement.DoParametersMatch(batchCommand.CurrentParametersReadOnly)) + return null; + // Prevent this statement from being replaced within this batch + pStatement.LastUsed = long.MaxValue; + return pStatement; + + case PreparedState.BeingUnprepared: + // The statement is being replaced by an earlier statement in this same batch. return null; - // Prevent this statement from being replaced within this batch - pStatement.LastUsed = long.MaxValue; - return pStatement; - - case PreparedState.BeingUnprepared: - // The statement is being replaced by an earlier statement in this same batch. - return null; - - default: - Debug.Fail($"Unexpected {nameof(PreparedState)} in auto-preparation: {pStatement.State}"); - break; - } - if (++pStatement.Usages < UsagesBeforePrepare) - { - // Statement still hasn't passed the usage threshold, no automatic preparation. - // Return null for unprepared execution. - pStatement.RefreshLastUsed(); - return null; - } - - // Bingo, we've just passed the usage threshold, statement should get prepared - LogMessages.AutoPreparingStatement(_commandLogger, sql, _connector.Id); - - // Look for either an empty autoprepare slot, or the least recently used prepared statement which we'll replace it. - var oldestLastUsed = long.MaxValue; - var selectedIndex = -1; - for (var i = 0; i < AutoPrepared.Length; i++) - { - var slot = AutoPrepared[i]; + default: + Debug.Fail($"Unexpected {nameof(PreparedState)} in auto-preparation: {pStatement.State}"); + break; + } - if (slot is null or { State: PreparedState.Invalidated }) + if (++pStatement.Usages < UsagesBeforePrepare) { - // We found a free or invalidated slot, exit the loop immediately - selectedIndex = i; - break; + // Statement still hasn't passed the usage threshold, no automatic preparation. + // Return null for unprepared execution. + pStatement.RefreshLastUsed(); + return null; } - switch (slot.State) + // Bingo, we've just passed the usage threshold, statement should get prepared + LogMessages.AutoPreparingStatement(_commandLogger, sql, _connector.Id); + + // Look for either an empty autoprepare slot, or the least recently used prepared statement which we'll replace it. + var oldestLastUsed = long.MaxValue; + var selectedIndex = -1; + for (var i = 0; i < AutoPrepared.Length; i++) { - case PreparedState.Prepared: - if (slot.LastUsed < oldestLastUsed) + var slot = AutoPrepared[i]; + + if (slot is null or { State: PreparedState.Invalidated }) { + // We found a free or invalidated slot, exit the loop immediately selectedIndex = i; - oldestLastUsed = slot.LastUsed; + break; } - break; - case PreparedState.BeingPrepared: - // Slot has already been selected for preparation by an earlier statement in this batch. Skip it. - continue; + switch (slot.State) + { + case PreparedState.Prepared: + if (slot.LastUsed < oldestLastUsed) + { + selectedIndex = i; + oldestLastUsed = slot.LastUsed; + } + break; - default: - ThrowHelper.ThrowInvalidOperationException($"Invalid {nameof(PreparedState)} state {slot.State} encountered when scanning prepared statement slots"); - return null; + case PreparedState.BeingPrepared: + // Slot has already been selected for preparation by an earlier statement in this batch. Skip it. + continue; + + default: + ThrowHelper.ThrowInvalidOperationException($"Invalid {nameof(PreparedState)} state {slot.State} encountered when scanning prepared statement slots"); + return null; + } } - } - if (selectedIndex == -1) - { - // We're here if we couldn't find a free slot or a prepared statement to replace - this means all slots are taken by - // statements being prepared in this batch. - return null; - } + if (selectedIndex < 0) + { + // We're here if we couldn't find a free slot or a prepared statement to replace - this means all slots are taken by + // statements being prepared in this batch. + return null; + } - if (pStatement.State != PreparedState.Invalidated) - RemoveCandidate(pStatement); + if (pStatement.State != PreparedState.Invalidated) + RemoveCandidate(pStatement); - var oldPreparedStatement = AutoPrepared[selectedIndex]; + var oldPreparedStatement = AutoPrepared[selectedIndex]; - if (oldPreparedStatement is null) - { - pStatement.Name = Encoding.ASCII.GetBytes("_auto" + selectedIndex); - } - else - { - // When executing an invalidated prepared statement, the old and the new statements are the same instance. - // Create a copy so that we have two distinct instances with their own states. - if (oldPreparedStatement == pStatement) + if (oldPreparedStatement is null) { - oldPreparedStatement = new PreparedStatement(this, oldPreparedStatement.Sql, isExplicit: false) - { - Name = oldPreparedStatement.Name - }; + pStatement.Name = Encoding.ASCII.GetBytes("_auto" + selectedIndex); } + else + { + // When executing an invalidated prepared statement, the old and the new statements are the same instance. + // Create a copy so that we have two distinct instances with their own states. + if (oldPreparedStatement == pStatement) + { + oldPreparedStatement = new PreparedStatement(this, oldPreparedStatement.Sql, isExplicit: false) + { + Name = oldPreparedStatement.Name + }; + } - pStatement.Name = oldPreparedStatement.Name; - pStatement.State = PreparedState.NotPrepared; - pStatement.StatementBeingReplaced = oldPreparedStatement; - oldPreparedStatement.State = PreparedState.BeingUnprepared; - } + pStatement.Name = oldPreparedStatement.Name; + pStatement.State = PreparedState.NotPrepared; + pStatement.StatementBeingReplaced = oldPreparedStatement; + oldPreparedStatement.State = PreparedState.BeingUnprepared; + } - pStatement.AutoPreparedSlotIndex = selectedIndex; - AutoPrepared[selectedIndex] = pStatement; + pStatement.AutoPreparedSlotIndex = selectedIndex; + AutoPrepared[selectedIndex] = pStatement; - // Make sure this statement isn't replaced by a later statement in the same batch. - pStatement.LastUsed = long.MaxValue; + // Make sure this statement isn't replaced by a later statement in the same batch. + pStatement.LastUsed = long.MaxValue; - // Note that the parameter types are only set at the moment of preparation - in the candidate phase - // there's no differentiation between overloaded statements, which are a pretty rare case, saving - // allocations. - pStatement.SetParamTypes(batchCommand.CurrentParametersReadOnly); + // Note that the parameter types are only set at the moment of preparation - in the candidate phase + // there's no differentiation between overloaded statements, which are a pretty rare case, saving + // allocations. + pStatement.SetParamTypes(batchCommand.CurrentParametersReadOnly); - return pStatement; + return pStatement; + } } void RemoveCandidate(PreparedStatement candidate) From 5f24dc61157f4d96be740b5bd627adb901184e88 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Wed, 27 Mar 2024 20:15:05 +0100 Subject: [PATCH 408/761] Return boxing info for derived IPAddress types instead of using reflection (#5649) --- .../NetworkTypeInfoResolverFactory.cs | 26 +++++-------------- 1 file changed, 6 insertions(+), 20 deletions(-) diff --git a/src/Npgsql/Internal/ResolverFactories/NetworkTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/NetworkTypeInfoResolverFactory.cs index da738f54d0..eca3dfec64 100644 --- a/src/Npgsql/Internal/ResolverFactories/NetworkTypeInfoResolverFactory.cs +++ b/src/Npgsql/Internal/ResolverFactories/NetworkTypeInfoResolverFactory.cs @@ -1,5 +1,4 @@ using System; -using System.Diagnostics.CodeAnalysis; using System.Net; using System.Net.NetworkInformation; using Npgsql.Internal.Converters; @@ -31,13 +30,15 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) mapping => mapping with { MatchRequirement = MatchRequirement.DataTypeName }); // inet - // This is one of the rare mappings that force us to use reflection for a lack of any alternative. // There are certain IPAddress values like Loopback or Any that return a *private* derived type (see https://github.com/dotnet/runtime/issues/27870). - // However we still need to be able to resolve an exactly typed converter for those values. - // We do so by wrapping our converter in a casting converter constructed over the derived type. + // However we still need to be able to resolve some typed converter for those values. + // We do so by returning a boxing info when we deal with a derived type, as a result we don't need an exact typed converter. + // For arrays users can't actually reference the private type so we'll only see some version of ArrayType. + // For reads we'll only see the public type so we never surface an InvalidCastException trying to cast IPAddress to ReadOnlyIPAddress. // Finally we add a custom predicate to be able to match any type which values are assignable to IPAddress. mappings.AddType(DataTypeNames.Inet, - CreateInfo, + static (options, mapping, _) => new PgTypeInfo(options, new IPAddressConverter(), + new DataTypeName(mapping.DataTypeName), unboxedType: mapping.Type == typeof(IPAddress) ? null : mapping.Type), mapping => mapping with { MatchRequirement = MatchRequirement.Single, @@ -50,21 +51,6 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) mappings.AddStructType(DataTypeNames.Cidr, static (options, mapping, _) => mapping.CreateInfo(options, new NpgsqlCidrConverter()), isDefault: true); - // Code is split out to a local method as suppression attributes on lambdas aren't properly handled by the ILLink analyzer yet. - [UnconditionalSuppressMessage("AotAnalysis", "IL3050", - Justification = "MakeGenericType is safe because the target will only ever be a reference type.")] - static PgTypeInfo CreateInfo(PgSerializerOptions options, TypeInfoMapping resolvedMapping, bool _) - { - var derivedType = resolvedMapping.Type != typeof(IPAddress); - PgConverter converter = new IPAddressConverter(); - if (derivedType) - // There is not much more we can do, the deriving type IPAddress+ReadOnlyIPAddress isn't public. - converter = (PgConverter)Activator.CreateInstance(typeof(CastingConverter<>).MakeGenericType(resolvedMapping.Type), - converter)!; - - return resolvedMapping.CreateInfo(options, converter); - } - return mappings; } } From 132a42dc0cc920a3bc9dcf398d3e6dd56f73f579 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Wed, 27 Mar 2024 20:25:31 +0100 Subject: [PATCH 409/761] Reduce use of Unsafe.As (#5554) --- .../Internal/GeoJSONConverter.cs | 13 ++----- .../Internal/Converters/ArrayConverter.cs | 36 ++++++++++--------- .../Internal/Converters/AsyncHelpers.cs | 5 ++- src/Npgsql/Internal/NpgsqlReadBuffer.cs | 23 ++++-------- src/Npgsql/Internal/NpgsqlWriteBuffer.cs | 31 ---------------- src/Npgsql/Internal/PgConverter.cs | 8 +++++ src/Npgsql/Internal/PgSerializerOptions.cs | 4 +-- src/Npgsql/Internal/PgStreamingConverter.cs | 3 +- src/Npgsql/Internal/PgTypeInfo.cs | 2 -- src/Npgsql/Internal/TypeInfoCache.cs | 4 +-- src/Npgsql/NpgsqlBinaryExporter.cs | 4 +-- src/Npgsql/NpgsqlDataReader.cs | 4 +-- src/Npgsql/NpgsqlNestedDataReader.cs | 2 +- src/Npgsql/NpgsqlParameter`.cs | 8 ++--- .../Replication/PgOutput/ReplicationValue.cs | 2 +- src/Npgsql/Schema/NpgsqlDbColumn.cs | 5 ++- 16 files changed, 57 insertions(+), 97 deletions(-) diff --git a/src/Npgsql.GeoJSON/Internal/GeoJSONConverter.cs b/src/Npgsql.GeoJSON/Internal/GeoJSONConverter.cs index 755c8acc19..f1a633724f 100644 --- a/src/Npgsql.GeoJSON/Internal/GeoJSONConverter.cs +++ b/src/Npgsql.GeoJSON/Internal/GeoJSONConverter.cs @@ -278,16 +278,9 @@ static Position ReadPosition(PgReader reader, EwkbGeometryType type, bool little return position; double ReadDouble(bool littleEndian) - { - if (littleEndian) - { - var doubleValue = reader.ReadDouble(); - var value = BinaryPrimitives.ReverseEndianness(Unsafe.As(ref doubleValue)); - return Unsafe.As(ref value); - } - - return reader.ReadDouble(); - } + => littleEndian + ? BitConverter.Int64BitsToDouble(BinaryPrimitives.ReverseEndianness(BitConverter.DoubleToInt64Bits(reader.ReadDouble()))) + : reader.ReadDouble(); } } diff --git a/src/Npgsql/Internal/Converters/ArrayConverter.cs b/src/Npgsql/Internal/Converters/ArrayConverter.cs index 5c2ff9133f..2255100e95 100644 --- a/src/Npgsql/Internal/Converters/ArrayConverter.cs +++ b/src/Npgsql/Internal/Converters/ArrayConverter.cs @@ -285,7 +285,7 @@ public async ValueTask Write(bool async, PgWriter writer, object values, Cancell } } -// Class constraint exists to make Unsafe.As, ValueTask> safe, don't remove unless that unsafe cast is also removed. +// Class constraint exists to make ValueTask to ValueTask reinterpretation safe, don't remove unless that is also removed. abstract class ArrayConverter : PgStreamingConverter where T : class { protected PgConverterResolution ElemResolution { get; } @@ -309,6 +309,8 @@ private protected ArrayConverter(int? expectedDimensions, PgConverterResolution public override ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) { var value = _pgArrayConverter.Read(async: true, reader, cancellationToken); + // Justification: elides the async method bloat/perf cost to transition from object to T (where T : class) + Debug.Assert(typeof(T).IsClass); return Unsafe.As, ValueTask>(ref value); } @@ -381,9 +383,11 @@ public ArrayBasedArrayConverter(PgConverterResolution elemResolution, Type? effe switch (indices.Length) { case 1: + // Justification: avoid the cast overhead for per element calls. Debug.Assert(collection is TElement?[]); return Unsafe.As(collection)[indices[0]]; default: + // Justification: avoid the cast overhead for per element calls. Debug.Assert(collection is Array); return (TElement?)Unsafe.As(collection).GetValue(indices); } @@ -395,10 +399,12 @@ static void SetValue(object collection, int[] indices, TElement? value) switch (indices.Length) { case 1: + // Justification: avoid the cast overhead for per element calls. Debug.Assert(collection is TElement?[]); Unsafe.As(collection)[indices[0]] = value; break; default: + // Justification: avoid the cast overhead for per element calls. Debug.Assert(collection is Array); Unsafe.As(collection).SetValue(value, indices); break; @@ -411,20 +417,19 @@ object IElementOperations.CreateCollection(int[] lengths) 0 => Array.Empty(), 1 when lengths[0] == 0 => Array.Empty(), 1 => new TElement?[lengths[0]], - 2 => new TElement?[lengths[0],lengths[1]], - 3 => new TElement?[lengths[0],lengths[1], lengths[2]], - 4 => new TElement?[lengths[0],lengths[1], lengths[2], lengths[3]], - 5 => new TElement?[lengths[0],lengths[1], lengths[2], lengths[3], lengths[4]], - 6 => new TElement?[lengths[0],lengths[1], lengths[2], lengths[3], lengths[4], lengths[5]], - 7 => new TElement?[lengths[0],lengths[1], lengths[2], lengths[3], lengths[4], lengths[5], lengths[6]], - 8 => new TElement?[lengths[0],lengths[1], lengths[2], lengths[3], lengths[4], lengths[5], lengths[6], lengths[7]], + 2 => new TElement?[lengths[0], lengths[1]], + 3 => new TElement?[lengths[0], lengths[1], lengths[2]], + 4 => new TElement?[lengths[0], lengths[1], lengths[2], lengths[3]], + 5 => new TElement?[lengths[0], lengths[1], lengths[2], lengths[3], lengths[4]], + 6 => new TElement?[lengths[0], lengths[1], lengths[2], lengths[3], lengths[4], lengths[5]], + 7 => new TElement?[lengths[0], lengths[1], lengths[2], lengths[3], lengths[4], lengths[5], lengths[6]], + 8 => new TElement?[lengths[0], lengths[1], lengths[2], lengths[3], lengths[4], lengths[5], lengths[6], lengths[7]], _ => throw new InvalidOperationException("Postgres arrays can have at most 8 dimensions.") }; int IElementOperations.GetCollectionCount(object collection, out int[]? lengths) { - Debug.Assert(collection is Array); - var array = Unsafe.As(collection); + var array = (Array)collection; lengths = GetLengths(array); return array.Length; } @@ -452,8 +457,7 @@ unsafe ValueTask ReadAsync(PgStreamingConverter converter, PgReader re // Using .Result on ValueTask is equivalent to GetAwaiter().GetResult(), this removes TaskAwaiter rooting. static void SetResult(Task task, object collection, int[] indices) { - Debug.Assert(task is Task); - SetValue(collection, indices, new ValueTask(Unsafe.As>(task)).Result); + SetValue(collection, indices, new ValueTask((Task)task).Result); } } @@ -478,6 +482,7 @@ public ListBasedArrayConverter(PgConverterResolution elemResolution, int pgLower [MethodImpl(MethodImplOptions.AggressiveInlining)] static TElement? GetValue(object collection, int index) { + // Justification: avoid the cast overhead for per element calls. Debug.Assert(collection is IList); return Unsafe.As>(collection)[index]; } @@ -485,6 +490,7 @@ public ListBasedArrayConverter(PgConverterResolution elemResolution, int pgLower [MethodImpl(MethodImplOptions.AggressiveInlining)] static void SetValue(object collection, int index, TElement? value) { + // Justification: avoid the cast overhead for per element calls. Debug.Assert(collection is IList); var list = Unsafe.As>(collection); list.Insert(index, value); @@ -495,9 +501,8 @@ object IElementOperations.CreateCollection(int[] lengths) int IElementOperations.GetCollectionCount(object collection, out int[]? lengths) { - Debug.Assert(collection is IList); lengths = null; - return Unsafe.As>(collection).Count; + return ((IList)collection).Count; } Size? IElementOperations.GetSizeOrDbNull(SizeContext context, object collection, int[] indices, ref object? writeState) @@ -524,8 +529,7 @@ unsafe ValueTask ReadAsync(PgStreamingConverter converter, PgReader re // Using .Result on ValueTask is equivalent to GetAwaiter().GetResult(), this removes TaskAwaiter rooting. static void SetResult(Task task, object collection, int[] indices) { - Debug.Assert(task is Task); - SetValue(collection, indices[0], new ValueTask(Unsafe.As>(task)).Result); + SetValue(collection, indices[0], new ValueTask((Task)task).Result); } } diff --git a/src/Npgsql/Internal/Converters/AsyncHelpers.cs b/src/Npgsql/Internal/Converters/AsyncHelpers.cs index ccf8780ca0..534e9f10ef 100644 --- a/src/Npgsql/Internal/Converters/AsyncHelpers.cs +++ b/src/Npgsql/Internal/Converters/AsyncHelpers.cs @@ -75,7 +75,7 @@ public Continuation(object handle, delegate* conti static void UnboxAndComplete(Task task, CompletionSource completionSource) { - // Justification: unsafe exact cast used to reduce generic duplication cost. + // Justification: exact type Unsafe.As used to reduce generic duplication cost. Debug.Assert(task is Task); Debug.Assert(completionSource is CompletionSource); Unsafe.As>(completionSource).SetResult(new T?(new ValueTask(Unsafe.As>(task)).Result)); @@ -87,6 +87,8 @@ public static unsafe ValueTask ReadAsObjectAsyncAsT(this PgConverter in if (!typeof(T).IsValueType) { var value = effectiveConverter.ReadAsObjectAsync(reader, cancellationToken); + // Justification: elides the async method bloat/perf cost to transition from object to T (where T : class) + Debug.Assert(typeof(T).IsClass); return Unsafe.As, ValueTask>(ref value); } @@ -102,6 +104,7 @@ public static unsafe ValueTask ReadAsObjectAsyncAsT(this PgConverter in static void UnboxAndComplete(Task task, CompletionSource completionSource) { + // Justification: exact type Unsafe.As used to reduce generic duplication cost. Debug.Assert(task is Task); Debug.Assert(completionSource is CompletionSource); Unsafe.As>(completionSource).SetResult((T)new ValueTask(Unsafe.As>(task)).Result); diff --git a/src/Npgsql/Internal/NpgsqlReadBuffer.cs b/src/Npgsql/Internal/NpgsqlReadBuffer.cs index f7807edc2d..b0aba8aae0 100644 --- a/src/Npgsql/Internal/NpgsqlReadBuffer.cs +++ b/src/Npgsql/Internal/NpgsqlReadBuffer.cs @@ -525,19 +525,13 @@ public ulong ReadUInt64() return result; } - [MethodImpl(MethodImplOptions.AggressiveInlining)] public float ReadSingle() { CheckBounds(sizeof(float)); - float result; - if (BitConverter.IsLittleEndian) - { - var value = BinaryPrimitives.ReverseEndianness(Unsafe.ReadUnaligned(ref Buffer[ReadPosition])); - result = Unsafe.As(ref value); - } - else - result = Unsafe.ReadUnaligned(ref Buffer[ReadPosition]); + var result = BitConverter.IsLittleEndian + ? BitConverter.Int32BitsToSingle(BinaryPrimitives.ReverseEndianness(Unsafe.ReadUnaligned(ref Buffer[ReadPosition]))) + : Unsafe.ReadUnaligned(ref Buffer[ReadPosition]); ReadPosition += sizeof(float); return result; } @@ -546,14 +540,9 @@ public float ReadSingle() public double ReadDouble() { CheckBounds(sizeof(double)); - double result; - if (BitConverter.IsLittleEndian) - { - var value = BinaryPrimitives.ReverseEndianness(Unsafe.ReadUnaligned(ref Buffer[ReadPosition])); - result = Unsafe.As(ref value); - } - else - result = Unsafe.ReadUnaligned(ref Buffer[ReadPosition]); + var result = BitConverter.IsLittleEndian + ? BitConverter.Int64BitsToDouble(BinaryPrimitives.ReverseEndianness(Unsafe.ReadUnaligned(ref Buffer[ReadPosition]))) + : Unsafe.ReadUnaligned(ref Buffer[ReadPosition]); ReadPosition += sizeof(double); return result; } diff --git a/src/Npgsql/Internal/NpgsqlWriteBuffer.cs b/src/Npgsql/Internal/NpgsqlWriteBuffer.cs index 316495eaa8..9a2cd5eead 100644 --- a/src/Npgsql/Internal/NpgsqlWriteBuffer.cs +++ b/src/Npgsql/Internal/NpgsqlWriteBuffer.cs @@ -306,37 +306,6 @@ public void WriteInt64(long value) WritePosition += sizeof(long); } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public void WriteUInt64(ulong value) - { - CheckBounds(); - Unsafe.WriteUnaligned(ref Buffer[WritePosition], BitConverter.IsLittleEndian ? BinaryPrimitives.ReverseEndianness(value) : value); - WritePosition += sizeof(ulong); - } - - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public void WriteSingle(float value) - { - CheckBounds(); - if (BitConverter.IsLittleEndian) - Unsafe.WriteUnaligned(ref Buffer[WritePosition], BinaryPrimitives.ReverseEndianness(Unsafe.As(ref value))); - else - Unsafe.WriteUnaligned(ref Buffer[WritePosition], value); - WritePosition += sizeof(float); - } - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public void WriteDouble(double value) - { - CheckBounds(); - if (BitConverter.IsLittleEndian) - Unsafe.WriteUnaligned(ref Buffer[WritePosition], BinaryPrimitives.ReverseEndianness(Unsafe.As(ref value))); - else - Unsafe.WriteUnaligned(ref Buffer[WritePosition], value); - WritePosition += sizeof(double); - } - [Conditional("DEBUG")] unsafe void CheckBounds() where T : unmanaged { diff --git a/src/Npgsql/Internal/PgConverter.cs b/src/Npgsql/Internal/PgConverter.cs index 3317361516..a99c5b4991 100644 --- a/src/Npgsql/Internal/PgConverter.cs +++ b/src/Npgsql/Internal/PgConverter.cs @@ -2,6 +2,7 @@ using System.Buffers; using System.Diagnostics; using System.Diagnostics.CodeAnalysis; +using System.Runtime.CompilerServices; using System.Threading; using System.Threading.Tasks; @@ -176,6 +177,13 @@ static class PgConverterExtensions return size; } + + internal static PgConverter UnsafeDowncast(this PgConverter converter) + { + // Justification: avoid perf cost of casting to a known base class type per read/write, see callers. + Debug.Assert(converter is PgConverter); + return Unsafe.As>(converter); + } } interface IResumableRead diff --git a/src/Npgsql/Internal/PgSerializerOptions.cs b/src/Npgsql/Internal/PgSerializerOptions.cs index 405d1d11da..193e193826 100644 --- a/src/Npgsql/Internal/PgSerializerOptions.cs +++ b/src/Npgsql/Internal/PgSerializerOptions.cs @@ -79,8 +79,8 @@ public static bool IsWellKnownTextType(Type type) // for. PgTypeInfo? GetTypeInfoCore(Type? type, PgTypeId? pgTypeId, bool defaultTypeFallback) => PortableTypeIds - ? Unsafe.As>(_typeInfoCache ??= new TypeInfoCache(this)).GetOrAddInfo(type, pgTypeId?.DataTypeName, defaultTypeFallback) - : Unsafe.As>(_typeInfoCache ??= new TypeInfoCache(this)).GetOrAddInfo(type, pgTypeId?.Oid, defaultTypeFallback); + ? ((TypeInfoCache)(_typeInfoCache ??= new TypeInfoCache(this))).GetOrAddInfo(type, pgTypeId?.DataTypeName, defaultTypeFallback) + : ((TypeInfoCache)(_typeInfoCache ??= new TypeInfoCache(this))).GetOrAddInfo(type, pgTypeId?.Oid, defaultTypeFallback); public PgTypeInfo? GetDefaultTypeInfo(PostgresType pgType) => GetTypeInfoCore(null, ToCanonicalTypeId(pgType), false); diff --git a/src/Npgsql/Internal/PgStreamingConverter.cs b/src/Npgsql/Internal/PgStreamingConverter.cs index ff9c6b5eb2..3d69889b3b 100644 --- a/src/Npgsql/Internal/PgStreamingConverter.cs +++ b/src/Npgsql/Internal/PgStreamingConverter.cs @@ -42,10 +42,9 @@ internal sealed override unsafe ValueTask ReadAsObject( static object BoxResult(Task task) { - Debug.Assert(task is Task); // We're using ValueTask.Result here to avoid rooting any TaskAwaiter or ValueTaskAwaiter types. // On ValueTask calling .Result is equivalent to GetAwaiter().GetResult() w.r.t. exception wrapping. - return new ValueTask(task: Unsafe.As>(task)).Result!; + return new ValueTask(task: (Task)task).Result!; } } diff --git a/src/Npgsql/Internal/PgTypeInfo.cs b/src/Npgsql/Internal/PgTypeInfo.cs index 0c1f2f4ede..b72497775e 100644 --- a/src/Npgsql/Internal/PgTypeInfo.cs +++ b/src/Npgsql/Internal/PgTypeInfo.cs @@ -325,6 +325,4 @@ public PgConverterInfo(PgTypeInfo pgTypeInfo, PgConverter converter, Size buffer /// Whether Converter.TypeToConvert matches PgTypeInfo.Type, if it doesn't object apis should be used. public bool IsBoxingConverter => _typeInfo.IsBoxing; - - public PgConverter GetConverter() => (PgConverter)Converter; } diff --git a/src/Npgsql/Internal/TypeInfoCache.cs b/src/Npgsql/Internal/TypeInfoCache.cs index 5c72463d03..df570ca825 100644 --- a/src/Npgsql/Internal/TypeInfoCache.cs +++ b/src/Npgsql/Internal/TypeInfoCache.cs @@ -161,8 +161,8 @@ public TypeInfoCache(PgSerializerOptions options, bool validatePgTypeIds = true) static PgTypeId? AsPgTypeId(TPgTypeId? pgTypeId) => pgTypeId switch { - { } id when typeof(TPgTypeId) == typeof(DataTypeName) => new PgTypeId(Unsafe.As(ref id)), - { } id => new PgTypeId(Unsafe.As(ref id)), + { } id when typeof(TPgTypeId) == typeof(DataTypeName) => new((DataTypeName)(object)id), + { } id => new((Oid)(object)id), null => null }; } diff --git a/src/Npgsql/NpgsqlBinaryExporter.cs b/src/Npgsql/NpgsqlBinaryExporter.cs index 9d82900364..406962d837 100644 --- a/src/Npgsql/NpgsqlBinaryExporter.cs +++ b/src/Npgsql/NpgsqlBinaryExporter.cs @@ -266,7 +266,7 @@ T Read(NpgsqlDbType? type) reader.StartRead(info.BufferRequirement); var result = asObject ? (T)info.Converter.ReadAsObject(reader) - : info.GetConverter().Read(reader); + : info.Converter.UnsafeDowncast().Read(reader); reader.EndRead(); return result; @@ -300,7 +300,7 @@ async ValueTask ReadAsync(NpgsqlDbType? type, CancellationToken cancellati await reader.StartReadAsync(info.BufferRequirement, cancellationToken).ConfigureAwait(false); var result = asObject ? (T)await info.Converter.ReadAsObjectAsync(reader, cancellationToken).ConfigureAwait(false) - : await info.GetConverter().ReadAsync(reader, cancellationToken).ConfigureAwait(false); + : await info.Converter.UnsafeDowncast().ReadAsync(reader, cancellationToken).ConfigureAwait(false); await reader.EndReadAsync().ConfigureAwait(false); return result; diff --git a/src/Npgsql/NpgsqlDataReader.cs b/src/Npgsql/NpgsqlDataReader.cs index 56d0fd35c4..88c1131de9 100644 --- a/src/Npgsql/NpgsqlDataReader.cs +++ b/src/Npgsql/NpgsqlDataReader.cs @@ -1575,7 +1575,7 @@ async ValueTask Core(int ordinal, CancellationToken cancellationToken) await PgReader.StartReadAsync(bufferRequirement, cancellationToken).ConfigureAwait(false); var result = asObject ? (T)await converter.ReadAsObjectAsync(PgReader, cancellationToken).ConfigureAwait(false) - : await Unsafe.As>(converter).ReadAsync(PgReader, cancellationToken).ConfigureAwait(false); + : await converter.UnsafeDowncast().ReadAsync(PgReader, cancellationToken).ConfigureAwait(false); await PgReader.EndReadAsync().ConfigureAwait(false); return result; } @@ -1628,7 +1628,7 @@ T GetFieldValueCore(int ordinal) PgReader.StartRead(bufferRequirement); var result = asObject ? (T)converter.ReadAsObject(PgReader) - : Unsafe.As>(converter).Read(PgReader); + : converter.UnsafeDowncast().Read(PgReader); PgReader.EndRead(); return result; diff --git a/src/Npgsql/NpgsqlNestedDataReader.cs b/src/Npgsql/NpgsqlNestedDataReader.cs index 1d499585f8..d3b6e37bfd 100644 --- a/src/Npgsql/NpgsqlNestedDataReader.cs +++ b/src/Npgsql/NpgsqlNestedDataReader.cs @@ -353,7 +353,7 @@ public override T GetFieldValue(int ordinal) using var _ = PgReader.BeginNestedRead(columnLength, info.BufferRequirement); return asObject ? (T)info.Converter.ReadAsObject(PgReader)! - : info.GetConverter().Read(PgReader); + : info.Converter.UnsafeDowncast().Read(PgReader); } /// diff --git a/src/Npgsql/NpgsqlParameter`.cs b/src/Npgsql/NpgsqlParameter`.cs index a749734643..5edccfeb1b 100644 --- a/src/Npgsql/NpgsqlParameter`.cs +++ b/src/Npgsql/NpgsqlParameter`.cs @@ -101,8 +101,7 @@ private protected override void BindCore(bool allowNullReference = false) } var value = TypedValue; - Debug.Assert(Converter is PgConverter); - if (TypeInfo!.Bind(Unsafe.As>(Converter), value, out var size, out _writeState, out var dataFormat) is { } info) + if (TypeInfo!.Bind(Converter!.UnsafeDowncast(), value, out var size, out _writeState, out var dataFormat) is { } info) { WriteSize = size; _bufferRequirement = info.BufferRequirement; @@ -120,11 +119,10 @@ private protected override ValueTask WriteValue(bool async, PgWriter writer, Can if (_asObject) return base.WriteValue(async, writer, cancellationToken); - Debug.Assert(Converter is PgConverter); if (async) - return Unsafe.As>(Converter!).WriteAsync(writer, TypedValue!, cancellationToken); + return Converter!.UnsafeDowncast().WriteAsync(writer, TypedValue!, cancellationToken); - Unsafe.As>(Converter!).Write(writer, TypedValue!); + Converter!.UnsafeDowncast().Write(writer, TypedValue!); return new(); } diff --git a/src/Npgsql/Replication/PgOutput/ReplicationValue.cs b/src/Npgsql/Replication/PgOutput/ReplicationValue.cs index c918325840..aed44411d7 100644 --- a/src/Npgsql/Replication/PgOutput/ReplicationValue.cs +++ b/src/Npgsql/Replication/PgOutput/ReplicationValue.cs @@ -115,7 +115,7 @@ public async ValueTask Get(CancellationToken cancellationToken = default) await reader.StartReadAsync(info.ConverterInfo.BufferRequirement, cancellationToken).ConfigureAwait(false); var result = info.AsObject ? (T)await info.ConverterInfo.Converter.ReadAsObjectAsync(reader, cancellationToken).ConfigureAwait(false) - : await info.ConverterInfo.GetConverter().ReadAsync(reader, cancellationToken).ConfigureAwait(false); + : await info.ConverterInfo.Converter.UnsafeDowncast().ReadAsync(reader, cancellationToken).ConfigureAwait(false); await reader.EndReadAsync().ConfigureAwait(false); return result; } diff --git a/src/Npgsql/Schema/NpgsqlDbColumn.cs b/src/Npgsql/Schema/NpgsqlDbColumn.cs index 4b118e97f6..e4597e3d86 100644 --- a/src/Npgsql/Schema/NpgsqlDbColumn.cs +++ b/src/Npgsql/Schema/NpgsqlDbColumn.cs @@ -1,6 +1,5 @@ using System; using System.Data.Common; -using System.Runtime.CompilerServices; using Npgsql.PostgresTypes; using NpgsqlTypes; @@ -32,7 +31,7 @@ public NpgsqlDbColumn() } internal NpgsqlDbColumn Clone() => - Unsafe.As(MemberwiseClone()); + (NpgsqlDbColumn)MemberwiseClone(); #region Standard fields // ReSharper disable once InconsistentNaming @@ -232,4 +231,4 @@ public override object? this[string propertyName] }; #endregion Npgsql-specific fields -} \ No newline at end of file +} From 5fc494edb757b26af0c15141987d8baaeb1f835f Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Thu, 28 Mar 2024 17:40:54 +0300 Subject: [PATCH 410/761] Start loading composite types before ranges (#5651) Fixes #5650 --- src/Npgsql/PostgresDatabaseInfo.cs | 6 ++-- test/Npgsql.Tests/Types/CompositeTests.cs | 36 +++++++++++++++++++++++ 2 files changed, 39 insertions(+), 3 deletions(-) diff --git a/src/Npgsql/PostgresDatabaseInfo.cs b/src/Npgsql/PostgresDatabaseInfo.cs index 0d2397eac3..4d793238b6 100644 --- a/src/Npgsql/PostgresDatabaseInfo.cs +++ b/src/Npgsql/PostgresDatabaseInfo.cs @@ -152,9 +152,9 @@ elemtyptype IN ('b', 'r', 'm', 'e', 'd') OR -- Array of base, range, multirange, )) ORDER BY CASE WHEN typtype IN ('b', 'e', 'p') THEN 0 -- First base types, enums, pseudo-types - WHEN typtype = 'r' THEN 1 -- Ranges after - WHEN typtype = 'm' THEN 2 -- Multiranges after - WHEN typtype = 'c' THEN 3 -- Composites after + WHEN typtype = 'c' THEN 1 -- Composites after (fields loaded later in 2nd pass) + WHEN typtype = 'r' THEN 2 -- Ranges after + WHEN typtype = 'm' THEN 3 -- Multiranges after WHEN typtype = 'd' AND elemtyptype <> 'a' THEN 4 -- Domains over non-arrays after WHEN typtype = 'a' THEN 5 -- Arrays after WHEN typtype = 'd' AND elemtyptype = 'a' THEN 6 -- Domains over arrays last diff --git a/test/Npgsql.Tests/Types/CompositeTests.cs b/test/Npgsql.Tests/Types/CompositeTests.cs index 36257e126a..713d5220a3 100644 --- a/test/Npgsql.Tests/Types/CompositeTests.cs +++ b/test/Npgsql.Tests/Types/CompositeTests.cs @@ -569,6 +569,42 @@ await AssertType( npgsqlDbType: null); } + [Test] + public async Task CompositeOverRange() + { + await using var adminConnection = await OpenConnectionAsync(); + var type = await GetTempTypeName(adminConnection); + var rangeType = await GetTempTypeName(adminConnection); + + await adminConnection.ExecuteNonQueryAsync($"CREATE TYPE {type} AS (x int, some_text text); CREATE TYPE {rangeType} AS RANGE(subtype={type})"); + + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.MapComposite(type); + dataSourceBuilder.EnableUnmappedTypes(); + await using var dataSource = dataSourceBuilder.Build(); + await using var connection = await dataSource.OpenConnectionAsync(); + + var composite1 = new SomeComposite + { + SomeText = "foo", + X = 8 + }; + + var composite2 = new SomeComposite + { + SomeText = "bar", + X = 42 + }; + + await AssertType( + connection, + new NpgsqlRange(composite1, composite2), + "[\"(8,foo)\",\"(42,bar)\"]", + rangeType, + npgsqlDbType: null, + isDefaultForWriting: false); + } + #region Test Types readonly struct DuplicateOneLongOneBool From f3205151b535ca7a11b21fce9023f326f2e6121c Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Thu, 28 Mar 2024 19:32:39 +0100 Subject: [PATCH 411/761] Require binary format in binary importer (#5648) --- src/Npgsql/Internal/PgTypeInfo.cs | 55 +++++++++++++++--------------- src/Npgsql/NpgsqlBinaryImporter.cs | 2 +- src/Npgsql/NpgsqlParameter.cs | 22 ++++++------ src/Npgsql/NpgsqlParameter`.cs | 7 ++-- 4 files changed, 43 insertions(+), 43 deletions(-) diff --git a/src/Npgsql/Internal/PgTypeInfo.cs b/src/Npgsql/Internal/PgTypeInfo.cs index b72497775e..a949ee8db5 100644 --- a/src/Npgsql/Internal/PgTypeInfo.cs +++ b/src/Npgsql/Internal/PgTypeInfo.cs @@ -114,24 +114,27 @@ internal PgConverterResolution GetResolution() return new(Converter, PgTypeId.GetValueOrDefault()); } - bool CachedCanConvert(DataFormat format, out BufferRequirements bufferRequirements) + bool CanConvert(PgConverter converter, DataFormat format, out BufferRequirements bufferRequirements) { - if (format is DataFormat.Binary) + if (HasCachedInfo(converter)) { - bufferRequirements = _binaryBufferRequirements; - return _canBinaryConvert; + switch (format) + { + case DataFormat.Binary: + bufferRequirements = _binaryBufferRequirements; + return _canBinaryConvert; + case DataFormat.Text: + bufferRequirements = _textBufferRequirements; + return _canTextConvert; + } } - bufferRequirements = _textBufferRequirements; - return _canTextConvert; + return converter.CanConvert(format, out bufferRequirements); } public BufferRequirements? GetBufferRequirements(PgConverter converter, DataFormat format) { - var success = HasCachedInfo(converter) - ? CachedCanConvert(format, out var bufferRequirements) - : converter.CanConvert(format, out bufferRequirements); - + var success = CanConvert(converter, format, out var bufferRequirements); return success ? bufferRequirements : null; } @@ -141,7 +144,7 @@ internal bool TryBind(Field field, DataFormat format, out PgConverterInfo info) switch (this) { case { IsResolverInfo: false }: - if (!CachedCanConvert(format, out var bufferRequirements)) + if (!CanConvert(Converter, format, out var bufferRequirements)) { info = default; return false; @@ -150,9 +153,7 @@ internal bool TryBind(Field field, DataFormat format, out PgConverterInfo info) return true; case PgResolverTypeInfo resolverInfo: var resolution = resolverInfo.GetResolution(field); - if (HasCachedInfo(resolution.Converter) - ? !CachedCanConvert(format, out bufferRequirements) - : !resolution.Converter.CanConvert(format, out bufferRequirements)) + if (!CanConvert(resolution.Converter, format, out bufferRequirements)) { info = default; return false; @@ -217,27 +218,25 @@ internal PgConverterInfo Bind(Field field, DataFormat format) return new(this, converter, bufferRequirements.Write); } - // If we don't have a converter stored we must ask the retrieved one. DataFormat ResolveFormat(PgConverter converter, out BufferRequirements bufferRequirements, DataFormat? formatPreference = null) { + // First try to check for preferred support. switch (formatPreference) { - // The common case, no preference means we default to binary if supported. - case null or DataFormat.Binary when HasCachedInfo(converter) ? CachedCanConvert(DataFormat.Binary, out bufferRequirements) : converter.CanConvert(DataFormat.Binary, out bufferRequirements): + case DataFormat.Binary when CanConvert(converter, DataFormat.Binary, out bufferRequirements): return DataFormat.Binary; - // In this case we either prefer text or we have no preference and our converter doesn't support binary. - case null or DataFormat.Text: - var canTextConvert = HasCachedInfo(converter) ? CachedCanConvert(DataFormat.Text, out bufferRequirements) : converter.CanConvert(DataFormat.Text, out bufferRequirements); - if (!canTextConvert) - { - if (formatPreference is null) - throw new InvalidOperationException("Converter doesn't support any data format."); - // Rerun without preference. - return ResolveFormat(converter, out bufferRequirements); - } + case DataFormat.Text when CanConvert(converter, DataFormat.Text, out bufferRequirements): return DataFormat.Text; default: - throw new ArgumentOutOfRangeException(); + // The common case, no preference given (or no match) means we default to binary if supported. + if (CanConvert(converter, DataFormat.Binary, out bufferRequirements)) + return DataFormat.Binary; + if (CanConvert(converter, DataFormat.Text, out bufferRequirements)) + return DataFormat.Text; + + ThrowHelper.ThrowInvalidOperationException("Converter doesn't support any data format."); + bufferRequirements = default; + return default; } } } diff --git a/src/Npgsql/NpgsqlBinaryImporter.cs b/src/Npgsql/NpgsqlBinaryImporter.cs index 7a9caa5595..f80807af3e 100644 --- a/src/Npgsql/NpgsqlBinaryImporter.cs +++ b/src/Npgsql/NpgsqlBinaryImporter.cs @@ -296,7 +296,7 @@ async Task Core(bool async, T value, NpgsqlDbType? npgsqlDbType, string? dataTyp if (newParam) _params[_column] = param; - param.Bind(out _, out _); + param.Bind(out _, out _, requiredFormat: DataFormat.Binary); try { diff --git a/src/Npgsql/NpgsqlParameter.cs b/src/Npgsql/NpgsqlParameter.cs index d1dba6af5d..6273a9617a 100644 --- a/src/Npgsql/NpgsqlParameter.cs +++ b/src/Npgsql/NpgsqlParameter.cs @@ -613,7 +613,7 @@ private protected virtual PgConverterResolution ResolveConverter(PgTypeInfo type } /// Bind the current value to the type info, truncate (if applicable), take its size, and do any final validation before writing. - internal void Bind(out DataFormat format, out Size size) + internal void Bind(out DataFormat format, out Size size, DataFormat? requiredFormat = null) { if (TypeInfo is null) ThrowHelper.ThrowInvalidOperationException($"Missing type info, {nameof(ResolveTypeInfo)} needs to be called before {nameof(Bind)}."); @@ -622,19 +622,18 @@ internal void Bind(out DataFormat format, out Size size) ThrowHelper.ThrowNotSupportedException($"Cannot write values for parameters of type '{TypeInfo.Type}' and postgres type '{TypeInfo.Options.DatabaseInfo.GetDataTypeName(PgTypeId).DisplayName}'."); // We might call this twice, once during validation and once during WriteBind, only compute things once. - if (WriteSize is not null) + if (WriteSize is null) { - format = Format; - size = WriteSize.Value; - return; - } + if (_size > 0) + HandleSizeTruncation(); - if (_size > 0) - HandleSizeTruncation(); + BindCore(requiredFormat); + } - BindCore(); format = Format; size = WriteSize!.Value; + if (requiredFormat is not null && format != requiredFormat) + ThrowHelper.ThrowNotSupportedException($"Parameter '{ParameterName}' must be written in {requiredFormat} format, but does not support this format."); // Handle Size truncate behavior for a predetermined set of types and pg types. // Doesn't matter if we 'box' Value, all supported types are reference types. @@ -674,7 +673,7 @@ void HandleSizeTruncation() } } - private protected virtual void BindCore(bool allowNullReference = false) + private protected virtual void BindCore(DataFormat? formatPreference, bool allowNullReference = false) { // Pull from Value so we also support object typed generic params. var value = Value; @@ -684,7 +683,7 @@ private protected virtual void BindCore(bool allowNullReference = false) if (_useSubStream && value is not null) value = _subStream = new SubReadStream((Stream)value, _size); - if (TypeInfo!.BindObject(Converter!, value, out var size, out _writeState, out var dataFormat) is { } info) + if (TypeInfo!.BindObject(Converter!, value, out var size, out _writeState, out var dataFormat, formatPreference) is { } info) { WriteSize = size; _bufferRequirement = info.BufferRequirement; @@ -694,6 +693,7 @@ private protected virtual void BindCore(bool allowNullReference = false) WriteSize = -1; _bufferRequirement = default; } + Format = dataFormat; } diff --git a/src/Npgsql/NpgsqlParameter`.cs b/src/Npgsql/NpgsqlParameter`.cs index 5edccfeb1b..e50618a510 100644 --- a/src/Npgsql/NpgsqlParameter`.cs +++ b/src/Npgsql/NpgsqlParameter`.cs @@ -91,17 +91,17 @@ private protected override PgConverterResolution ResolveConverter(PgTypeInfo typ } // We ignore allowNullReference, it's just there to control the base implementation. - private protected override void BindCore(bool allowNullReference = false) + private protected override void BindCore(DataFormat? formatPreference, bool allowNullReference = false) { if (_asObject) { // If we're object typed we should not support null. - base.BindCore(typeof(T) != typeof(object)); + base.BindCore(formatPreference, typeof(T) != typeof(object)); return; } var value = TypedValue; - if (TypeInfo!.Bind(Converter!.UnsafeDowncast(), value, out var size, out _writeState, out var dataFormat) is { } info) + if (TypeInfo!.Bind(Converter!.UnsafeDowncast(), value, out var size, out _writeState, out var dataFormat, formatPreference) is { } info) { WriteSize = size; _bufferRequirement = info.BufferRequirement; @@ -111,6 +111,7 @@ private protected override void BindCore(bool allowNullReference = false) WriteSize = -1; _bufferRequirement = default; } + Format = dataFormat; } From 74178a484e70c9465379c188af3a161c40c65e50 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Thu, 28 Mar 2024 19:46:44 +0100 Subject: [PATCH 412/761] GetChars fix and infra cleanup (#5618) Fixes #5616 --- .../Converters/Primitive/TextConverters.cs | 30 +++---- .../VersionPrefixedTextConverter.cs | 4 +- src/Npgsql/Internal/PgConverter.cs | 5 -- src/Npgsql/Internal/PgReader.cs | 84 ++++++++----------- src/Npgsql/NpgsqlDataReader.cs | 25 +++--- test/Npgsql.Tests/ReaderTests.cs | 24 ++++++ 6 files changed, 87 insertions(+), 85 deletions(-) diff --git a/src/Npgsql/Internal/Converters/Primitive/TextConverters.cs b/src/Npgsql/Internal/Converters/Primitive/TextConverters.cs index b0e3a1b5bd..56c5ee7bae 100644 --- a/src/Npgsql/Internal/Converters/Primitive/TextConverters.cs +++ b/src/Npgsql/Internal/Converters/Primitive/TextConverters.cs @@ -206,13 +206,13 @@ readonly struct GetChars public GetChars(int read) => Read = read; } -sealed class GetCharsTextConverter : PgStreamingConverter, IResumableRead +sealed class GetCharsTextConverter : PgStreamingConverter { readonly Encoding _encoding; public GetCharsTextConverter(Encoding encoding) => _encoding = encoding; public override GetChars Read(PgReader reader) - => reader.IsCharsRead + => reader.CharsReadActive ? ResumableRead(reader) : throw new NotSupportedException(); @@ -226,27 +226,25 @@ public override ValueTask ReadAsync(PgReader reader, CancellationToken GetChars ResumableRead(PgReader reader) { reader.GetCharsReadInfo(_encoding, out var charsRead, out var textReader, out var charsOffset, out var buffer); - if (charsOffset < charsRead || (buffer is null && charsRead > 0)) + + // With variable length encodings, moving backwards based on bytes means we have to start over. + if (charsRead > charsOffset) { - // With variable length encodings, moving backwards based on bytes means we have to start over. - reader.ResetCharsRead(out charsRead); + reader.RestartCharsRead(); + charsRead = 0; } // First seek towards the charsOffset. // If buffer is null read the entire thing and report the length, see sql client remarks. // https://learn.microsoft.com/en-us/dotnet/api/system.data.sqlclient.sqldatareader.getchars - int read; + var read = ConsumeChars(textReader, buffer is null ? null : charsOffset - charsRead); + Debug.Assert(buffer is null || read == charsOffset - charsRead); + reader.AdvanceCharsRead(read); if (buffer is null) - { - read = ConsumeChars(textReader, null); - } - else - { - var consumed = ConsumeChars(textReader, charsOffset - charsRead); - Debug.Assert(consumed == charsOffset - charsRead); - read = textReader.ReadBlock(buffer.GetValueOrDefault().Array!, buffer.GetValueOrDefault().Offset, buffer.GetValueOrDefault().Count); - } + return new(read); + read = textReader.ReadBlock(buffer.GetValueOrDefault().Array!, buffer.GetValueOrDefault().Offset, buffer.GetValueOrDefault().Count); + reader.AdvanceCharsRead(read); return new(read); static int ConsumeChars(TextReader reader, int? count) @@ -271,8 +269,6 @@ static int ConsumeChars(TextReader reader, int? count) return totalRead; } } - - bool IResumableRead.Supported => true; } // Moved out for code size/sharing. diff --git a/src/Npgsql/Internal/Converters/VersionPrefixedTextConverter.cs b/src/Npgsql/Internal/Converters/VersionPrefixedTextConverter.cs index ff3a985a66..ccb4f2041e 100644 --- a/src/Npgsql/Internal/Converters/VersionPrefixedTextConverter.cs +++ b/src/Npgsql/Internal/Converters/VersionPrefixedTextConverter.cs @@ -5,7 +5,7 @@ namespace Npgsql.Internal.Converters; -sealed class VersionPrefixedTextConverter : PgStreamingConverter, IResumableRead +sealed class VersionPrefixedTextConverter : PgStreamingConverter { readonly byte _versionPrefix; readonly PgConverter _textConverter; @@ -52,8 +52,6 @@ async ValueTask Write(bool async, PgWriter writer, [DisallowNull]T value, Cancel else _textConverter.Write(writer, value); } - - bool IResumableRead.Supported => _textConverter is IResumableRead { Supported: true }; } static class VersionPrefixedTextConverter diff --git a/src/Npgsql/Internal/PgConverter.cs b/src/Npgsql/Internal/PgConverter.cs index a99c5b4991..462030f2b7 100644 --- a/src/Npgsql/Internal/PgConverter.cs +++ b/src/Npgsql/Internal/PgConverter.cs @@ -186,11 +186,6 @@ internal static PgConverter UnsafeDowncast(this PgConverter converter) } } -interface IResumableRead -{ - bool Supported { get; } -} - public readonly struct SizeContext { [SetsRequiredMembers] diff --git a/src/Npgsql/Internal/PgReader.cs b/src/Npgsql/Internal/PgReader.cs index bd4de75124..d3bd7d3260 100644 --- a/src/Npgsql/Internal/PgReader.cs +++ b/src/Npgsql/Internal/PgReader.cs @@ -74,9 +74,6 @@ internal PgReader(NpgsqlReadBuffer buffer) ArrayPool ArrayPool => ArrayPool.Shared; - [MemberNotNullWhen(true, nameof(_charsReadReader))] - internal bool IsCharsRead => _charsReadOffset is not null; - // Here for testing purposes internal void BreakConnection() => throw _buffer.Connector.Break(new Exception("Broken")); @@ -193,7 +190,7 @@ public string ReadNullTerminatedString(Encoding encoding) NpgsqlReadBuffer.ColumnStream GetColumnStream(bool canSeek = false, int? length = null) { if (length > CurrentRemaining) - throw new ArgumentOutOfRangeException(nameof(length), "Length is larger than the current remaining value size"); + ThrowHelper.ThrowArgumentOutOfRangeException(nameof(length), "Length is larger than the current remaining value size"); _requiresCleanup = true; // This will cause any previously handed out StreamReaders etc to throw, as intended. @@ -348,10 +345,10 @@ public void Rewind(int count) DisposeUserActiveStream(async: false).GetAwaiter().GetResult(); if (_buffer.ReadPosition < count) - throw new ArgumentOutOfRangeException("Cannot rewind further than the buffer start"); + ThrowHelper.ThrowArgumentOutOfRangeException(nameof(count), "Cannot rewind further than the buffer start"); if (CurrentOffset < count) - throw new ArgumentOutOfRangeException("Cannot rewind further than the current field offset"); + ThrowHelper.ThrowArgumentOutOfRangeException(nameof(count), "Cannot rewind further than the current field offset"); _buffer.ReadPosition -= count; } @@ -374,32 +371,24 @@ async ValueTask DisposeUserActiveStream(bool async) _userActiveStream = null; } - internal bool GetCharsReadInfo(Encoding encoding, out int charsRead, out TextReader reader, out int charsOffset, out ArraySegment? buffer) - { - if (!IsCharsRead) - throw new InvalidOperationException("No active chars read"); + internal int CharsRead => _charsRead; + internal bool CharsReadActive => _charsReadOffset is not null; - if (_charsReadReader is null) - { - charsRead = 0; - reader = _charsReadReader = GetTextReader(encoding); - charsOffset = _charsReadOffset ??= 0; - buffer = _charsReadBuffer; - return true; - } + internal void GetCharsReadInfo(Encoding encoding, out int charsRead, out TextReader reader, out int charsOffset, out ArraySegment? buffer) + { + if (!CharsReadActive) + ThrowHelper.ThrowInvalidOperationException("No active chars read"); charsRead = _charsRead; - reader = _charsReadReader; - charsOffset = _charsReadOffset!.Value; + reader = _charsReadReader ??= GetTextReader(encoding); + charsOffset = _charsReadOffset ?? 0; buffer = _charsReadBuffer; - - return false; } - internal void ResetCharsRead(out int charsRead) + internal void RestartCharsRead() { - if (!IsCharsRead) - throw new InvalidOperationException("No active chars read"); + if (!CharsReadActive) + ThrowHelper.ThrowInvalidOperationException("No active chars read"); switch (_charsReadReader) { @@ -411,26 +400,32 @@ internal void ResetCharsRead(out int charsRead) reader.DiscardBufferedData(); break; } - _charsRead = charsRead = 0; + _charsRead = 0; } - internal void AdvanceCharsRead(int charsRead) - { - _charsRead += charsRead; - _charsReadOffset = null; - _charsReadBuffer = null; - } + internal void AdvanceCharsRead(int charsRead) => _charsRead += charsRead; - internal void InitCharsRead(int dataOffset, ArraySegment? buffer, out int? charsRead) + internal void StartCharsRead(int dataOffset, ArraySegment? buffer) { if (!Resumable) - throw new InvalidOperationException("Wasn't initialized as resumed"); + ThrowHelper.ThrowInvalidOperationException("Wasn't initialized as resumed"); - charsRead = _charsReadReader is null ? null : _charsRead; _charsReadOffset = dataOffset; _charsReadBuffer = buffer; } + internal void EndCharsRead() + { + if (!Resumable) + ThrowHelper.ThrowInvalidOperationException("Wasn't initialized as resumed"); + + if (!CharsReadActive) + ThrowHelper.ThrowInvalidOperationException("No active chars read"); + + _charsReadOffset = null; + _charsReadBuffer = null; + } + internal PgReader Init(int fieldLength, DataFormat format, bool resumable = false) { if (Initialized) @@ -511,10 +506,10 @@ internal ValueTask EndReadAsync() internal async ValueTask BeginNestedRead(bool async, int size, Size bufferRequirement, CancellationToken cancellationToken = default) { if (size > CurrentRemaining) - throw new ArgumentOutOfRangeException(nameof(size), "Cannot begin a read for a larger size than the current remaining size."); + ThrowHelper.ThrowArgumentOutOfRangeException(nameof(size), "Cannot begin a read for a larger size than the current remaining size."); if (size < 0) - throw new ArgumentOutOfRangeException(nameof(size), "Cannot be negative"); + ThrowHelper.ThrowArgumentOutOfRangeException(nameof(size), "Cannot be negative"); var previousSize = CurrentSize; var previousStartPos = _currentStartPos; @@ -766,19 +761,14 @@ public bool ShouldBuffer(int byteCount) bool ShouldBufferSlow() { if (byteCount > _buffer.Size) - ThrowArgumentOutOfRange(); + ThrowHelper.ThrowArgumentOutOfRangeException(nameof(byteCount), + "Buffer requirement is larger than the buffer size, this can never succeed by buffering data but requires a larger buffer size instead."); if (byteCount > CurrentRemaining) - ThrowArgumentOutOfRangeOfValue(); + ThrowHelper.ThrowArgumentOutOfRangeException(nameof(byteCount), + "Buffer requirement is larger than the remaining length of the value, make sure the value is always at least this size or use an upper bound requirement instead."); return true; } - - static void ThrowArgumentOutOfRange() - => throw new ArgumentOutOfRangeException(nameof(byteCount), - "Buffer requirement is larger than the buffer size, this can never succeed by buffering data but requires a larger buffer size instead."); - static void ThrowArgumentOutOfRangeOfValue() - => throw new ArgumentOutOfRangeException(nameof(byteCount), - "Buffer requirement is larger than the remaining length of the value, make sure the value is always at least this size or use an upper bound requirement instead."); } public void Buffer(Size bufferRequirement) @@ -828,7 +818,7 @@ internal NestedReadScope(bool async, PgReader reader, int previousSize, int prev public void Dispose() { if (_async) - throw new InvalidOperationException("Cannot synchronously dispose async scopes, call DisposeAsync instead."); + ThrowHelper.ThrowInvalidOperationException("Cannot synchronously dispose async scopes, call DisposeAsync instead."); DisposeAsync().GetAwaiter().GetResult(); } diff --git a/src/Npgsql/NpgsqlDataReader.cs b/src/Npgsql/NpgsqlDataReader.cs index 88c1131de9..d682eaef7c 100644 --- a/src/Npgsql/NpgsqlDataReader.cs +++ b/src/Npgsql/NpgsqlDataReader.cs @@ -1481,10 +1481,8 @@ public Task GetStreamAsync(int ordinal, CancellationToken cancellationTo public override long GetChars(int ordinal, long dataOffset, char[]? buffer, int bufferOffset, int length) { ThrowIfNotInResult(); - // Check whether we can do resumable reads. + // Check whether we have a GetChars implementation for this column type. var field = GetInfo(ordinal, typeof(GetChars), out var converter, out var bufferRequirement, out var asObject); - if (converter is not IResumableRead { Supported: true }) - ThrowHelper.ThrowNotSupportedException("The GetChars method is not supported for this column type"); if (dataOffset is < 0 or > int.MaxValue) ThrowHelper.ThrowArgumentOutOfRangeException(nameof(dataOffset), "dataOffset must be between 0 and {0}", int.MaxValue); @@ -1497,20 +1495,21 @@ public override long GetChars(int ordinal, long dataOffset, char[]? buffer, int if (columnLength == -1) ThrowHelper.ThrowInvalidCastException_NoValue(RowDescription[ordinal]); + var reader = PgReader; dataOffset = buffer is null ? 0 : dataOffset; - PgReader.InitCharsRead(checked((int)dataOffset), - buffer is not null ? new ArraySegment(buffer, bufferOffset, length) : (ArraySegment?)null, - out var previousDataOffset); - - if (_isSequential && previousDataOffset > dataOffset) + if (_isSequential && reader.CharsRead > dataOffset) ThrowHelper.ThrowInvalidOperationException("Attempt to read a position in the column which has already been read"); - PgReader.StartRead(bufferRequirement); + reader.StartCharsRead(checked((int)dataOffset), + buffer is not null ? new ArraySegment(buffer, bufferOffset, length) : (ArraySegment?)null); + + reader.StartRead(bufferRequirement); var result = asObject - ? (GetChars)converter.ReadAsObject(PgReader) - : ((PgConverter)converter).Read(PgReader); - PgReader.AdvanceCharsRead(result.Read); - PgReader.EndRead(); + ? (GetChars)converter.ReadAsObject(reader) + : ((PgConverter)converter).Read(reader); + reader.EndRead(); + + reader.EndCharsRead(); return result.Read; } diff --git a/test/Npgsql.Tests/ReaderTests.cs b/test/Npgsql.Tests/ReaderTests.cs index e37bea6b31..33a5127d6f 100644 --- a/test/Npgsql.Tests/ReaderTests.cs +++ b/test/Npgsql.Tests/ReaderTests.cs @@ -1668,6 +1668,30 @@ public async Task GetChars() reader.GetChars(6, 0, actual, 0, 2); } + [Test] + public async Task GetChars_AdvanceConsumed() + { + const string value = "01234567"; + + using var conn = await OpenConnectionAsync(); + using var cmd = new NpgsqlCommand($"SELECT '{value}'", conn); + using var reader = await cmd.ExecuteReaderAsync(Behavior); + reader.Read(); + + var buffer = new char[2]; + // Don't start at the beginning of the column. + reader.GetChars(0, 2, buffer, 0, 2); + reader.GetChars(0, 4, buffer, 0, 2); + reader.GetChars(0, 6, buffer, 0, 2); + + // Ask for data past the start and the previous point, exercising restart logic. + if (!IsSequential) + { + reader.GetChars(0, 4, buffer, 0, 2); + reader.GetChars(0, 6, buffer, 0, 2); + } + } + [Test] public async Task GetTextReader([Values(true, false)] bool isAsync) { From 5be5aeab7b9c3abe713de97a6f658889c82e9588 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Fri, 29 Mar 2024 22:43:42 +0100 Subject: [PATCH 413/761] Remove mono socket error workaround (#5653) Fixes #3135 --- src/Npgsql/Internal/NpgsqlReadBuffer.cs | 16 +++++----------- src/Npgsql/Internal/NpgsqlWriteBuffer.cs | 18 ++++++++---------- 2 files changed, 13 insertions(+), 21 deletions(-) diff --git a/src/Npgsql/Internal/NpgsqlReadBuffer.cs b/src/Npgsql/Internal/NpgsqlReadBuffer.cs index b0aba8aae0..6cfa68e16a 100644 --- a/src/Npgsql/Internal/NpgsqlReadBuffer.cs +++ b/src/Npgsql/Internal/NpgsqlReadBuffer.cs @@ -158,18 +158,13 @@ int ReadWithTimeout(Span buffer) catch (Exception ex) { var connector = Connector; - switch (ex) - { - // Note that mono throws SocketException with the wrong error (see #1330) - case IOException e when (e.InnerException as SocketException)?.SocketErrorCode == - (Type.GetType("Mono.Runtime") == null ? SocketError.TimedOut : SocketError.WouldBlock): + if (ex is IOException { InnerException: SocketException { SocketErrorCode: SocketError.TimedOut } }) { // If we should attempt PostgreSQL cancellation, do it the first time we get a timeout. // TODO: As an optimization, we can still attempt to send a cancellation request, but after // that immediately break the connection - if (connector.AttemptPostgresCancellation && - !connector.PostgresCancellationPerformed && - connector.PerformPostgresCancellation()) + if (connector is { AttemptPostgresCancellation: true, PostgresCancellationPerformed: false } + && connector.PerformPostgresCancellation()) { // Note that if the cancellation timeout is negative, we flow down and break the // connection immediately. @@ -187,9 +182,8 @@ int ReadWithTimeout(Span buffer) // Break the connection, bubbling up the correct exception type (cancellation or timeout) throw connector.Break(CreateCancelException(connector)); } - default: - throw connector.Break(new NpgsqlException("Exception while reading from stream", ex)); - } + + throw connector.Break(new NpgsqlException("Exception while reading from stream", ex)); } } } diff --git a/src/Npgsql/Internal/NpgsqlWriteBuffer.cs b/src/Npgsql/Internal/NpgsqlWriteBuffer.cs index 9a2cd5eead..06b49d9c71 100644 --- a/src/Npgsql/Internal/NpgsqlWriteBuffer.cs +++ b/src/Npgsql/Internal/NpgsqlWriteBuffer.cs @@ -151,28 +151,26 @@ public async Task Flush(bool async, CancellationToken cancellationToken = defaul Underlying.Flush(); } } - catch (Exception e) + catch (Exception ex) { // Stopping twice (in case the previous Stop() call succeeded) doesn't hurt. // Not stopping will cause an assertion failure in debug mode when we call Start() the next time. // We can't stop in a finally block because Connector.Break() will dispose the buffer and the contained // _timeoutCts _timeoutCts.Stop(); - switch (e) + switch (ex) { // User requested the cancellation - case OperationCanceledException _ when (cancellationToken.IsCancellationRequested): - throw Connector.Break(e); + case OperationCanceledException when cancellationToken.IsCancellationRequested: + throw Connector.Break(ex); // Read timeout - case OperationCanceledException _: - // Note that mono throws SocketException with the wrong error (see #1330) - case IOException _ when (e.InnerException as SocketException)?.SocketErrorCode == - (Type.GetType("Mono.Runtime") == null ? SocketError.TimedOut : SocketError.WouldBlock): - Debug.Assert(e is OperationCanceledException ? async : !async); + case OperationCanceledException: + case IOException { InnerException: SocketException { SocketErrorCode: SocketError.TimedOut } }: + Debug.Assert(ex is OperationCanceledException ? async : !async); throw Connector.Break(new NpgsqlException("Exception while writing to stream", new TimeoutException("Timeout during writing attempt"))); } - throw Connector.Break(new NpgsqlException("Exception while writing to stream", e)); + throw Connector.Break(new NpgsqlException("Exception while writing to stream", ex)); } NpgsqlEventSource.Log.BytesWritten(WritePosition); _metricsReporter?.ReportBytesWritten(WritePosition); From c86e5ef4f585a19c1389219597c0443bf09552f5 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Fri, 12 Apr 2024 21:10:00 +0300 Subject: [PATCH 414/761] Add [Experimental] to converter-related APIs (#5668) Closes #5620 --- src/Npgsql.GeoJSON/Npgsql.GeoJSON.csproj | 1 + src/Npgsql.Json.NET/Npgsql.Json.NET.csproj | 1 + .../Npgsql.NetTopologySuite.csproj | 1 + src/Npgsql.NodaTime/Npgsql.NodaTime.csproj | 1 + src/Npgsql/Internal/BufferRequirements.cs | 2 ++ src/Npgsql/Internal/DataFormat.cs | 2 ++ .../Internal/DynamicTypeInfoResolver.cs | 1 + src/Npgsql/Internal/HackyEnumTypeMapping.cs | 3 ++- .../Internal/INpgsqlDatabaseInfoFactory.cs | 6 ++++-- src/Npgsql/Internal/IPgTypeInfoResolver.cs | 2 ++ src/Npgsql/Internal/NpgsqlConnector.cs | 2 ++ src/Npgsql/Internal/NpgsqlDatabaseInfo.cs | 1 + src/Npgsql/Internal/NpgsqlReadBuffer.cs | 2 ++ src/Npgsql/Internal/PgBufferedConverter.cs | 1 + src/Npgsql/Internal/PgConverter.cs | 1 + src/Npgsql/Internal/PgConverterResolver.cs | 2 ++ src/Npgsql/Internal/PgReader.cs | 1 + src/Npgsql/Internal/PgSerializerOptions.cs | 2 ++ src/Npgsql/Internal/PgStreamingConverter.cs | 2 ++ src/Npgsql/Internal/PgTypeInfo.cs | 1 + .../Internal/PgTypeInfoResolverFactory.cs | 3 +++ src/Npgsql/Internal/PgWriter.cs | 3 +++ src/Npgsql/Internal/Postgres/DataTypeName.cs | 2 ++ src/Npgsql/Internal/Postgres/Field.cs | 3 +++ src/Npgsql/Internal/Postgres/Oid.cs | 2 ++ src/Npgsql/Internal/Postgres/PgTypeId.cs | 1 + src/Npgsql/Internal/Size.cs | 3 +++ src/Npgsql/Internal/TypeInfoMapping.cs | 6 ++++++ src/Npgsql/Internal/ValueMetadata.cs | 3 +++ src/Npgsql/Npgsql.csproj | 2 ++ src/Npgsql/NpgsqlDiagnostics.cs | 7 +++++++ src/Npgsql/Shims/ExperimentalAttribute.cs | 21 +++++++++++++++++++ .../Npgsql.Benchmarks.csproj | 1 + test/Npgsql.Tests/Npgsql.Tests.csproj | 2 ++ 34 files changed, 91 insertions(+), 3 deletions(-) create mode 100644 src/Npgsql/NpgsqlDiagnostics.cs create mode 100644 src/Npgsql/Shims/ExperimentalAttribute.cs diff --git a/src/Npgsql.GeoJSON/Npgsql.GeoJSON.csproj b/src/Npgsql.GeoJSON/Npgsql.GeoJSON.csproj index a802ca5653..072feabea3 100644 --- a/src/Npgsql.GeoJSON/Npgsql.GeoJSON.csproj +++ b/src/Npgsql.GeoJSON/Npgsql.GeoJSON.csproj @@ -5,6 +5,7 @@ npgsql;postgresql;postgres;postgis;geojson;spatial;ado;ado.net;database;sql net6.0 net8.0 + $(NoWarn);NPG9001 diff --git a/src/Npgsql.Json.NET/Npgsql.Json.NET.csproj b/src/Npgsql.Json.NET/Npgsql.Json.NET.csproj index 49707eb02f..67109a48da 100644 --- a/src/Npgsql.Json.NET/Npgsql.Json.NET.csproj +++ b/src/Npgsql.Json.NET/Npgsql.Json.NET.csproj @@ -6,6 +6,7 @@ net6.0 net8.0 enable + $(NoWarn);NPG9001 diff --git a/src/Npgsql.NetTopologySuite/Npgsql.NetTopologySuite.csproj b/src/Npgsql.NetTopologySuite/Npgsql.NetTopologySuite.csproj index fd2342614b..214f4bd72e 100644 --- a/src/Npgsql.NetTopologySuite/Npgsql.NetTopologySuite.csproj +++ b/src/Npgsql.NetTopologySuite/Npgsql.NetTopologySuite.csproj @@ -7,6 +7,7 @@ net6.0 net8.0 $(NoWarn);NU5104 + $(NoWarn);NPG9001 diff --git a/src/Npgsql.NodaTime/Npgsql.NodaTime.csproj b/src/Npgsql.NodaTime/Npgsql.NodaTime.csproj index 4ac9e068fa..3e4d826188 100644 --- a/src/Npgsql.NodaTime/Npgsql.NodaTime.csproj +++ b/src/Npgsql.NodaTime/Npgsql.NodaTime.csproj @@ -6,6 +6,7 @@ README.md net6.0 net8.0 + $(NoWarn);NPG9001 diff --git a/src/Npgsql/Internal/BufferRequirements.cs b/src/Npgsql/Internal/BufferRequirements.cs index cd32c0cbd1..14ffabc52b 100644 --- a/src/Npgsql/Internal/BufferRequirements.cs +++ b/src/Npgsql/Internal/BufferRequirements.cs @@ -1,7 +1,9 @@ using System; +using System.Diagnostics.CodeAnalysis; namespace Npgsql.Internal; +[Experimental(NpgsqlDiagnostics.ConvertersExperimental)] public readonly struct BufferRequirements : IEquatable { readonly Size _read; diff --git a/src/Npgsql/Internal/DataFormat.cs b/src/Npgsql/Internal/DataFormat.cs index c9950ea417..c52b418b7d 100644 --- a/src/Npgsql/Internal/DataFormat.cs +++ b/src/Npgsql/Internal/DataFormat.cs @@ -1,8 +1,10 @@ using System; using System.Diagnostics; +using System.Diagnostics.CodeAnalysis; namespace Npgsql.Internal; +[Experimental(NpgsqlDiagnostics.ConvertersExperimental)] public enum DataFormat : byte { Binary, diff --git a/src/Npgsql/Internal/DynamicTypeInfoResolver.cs b/src/Npgsql/Internal/DynamicTypeInfoResolver.cs index 637c337321..421de703f5 100644 --- a/src/Npgsql/Internal/DynamicTypeInfoResolver.cs +++ b/src/Npgsql/Internal/DynamicTypeInfoResolver.cs @@ -6,6 +6,7 @@ namespace Npgsql.Internal; +[Experimental(NpgsqlDiagnostics.ConvertersExperimental)] [RequiresDynamicCode("A dynamic type info resolver may need to construct a generic converter for a statically unknown type.")] public abstract class DynamicTypeInfoResolver : IPgTypeInfoResolver { diff --git a/src/Npgsql/Internal/HackyEnumTypeMapping.cs b/src/Npgsql/Internal/HackyEnumTypeMapping.cs index 1aa4b27554..8b3d5255cc 100644 --- a/src/Npgsql/Internal/HackyEnumTypeMapping.cs +++ b/src/Npgsql/Internal/HackyEnumTypeMapping.cs @@ -1,5 +1,6 @@ using System; using System.Collections.Generic; +using System.Diagnostics.CodeAnalysis; using System.Linq; using System.Reflection; using Npgsql.Internal; @@ -8,10 +9,10 @@ namespace Npgsql.Internal; - /// /// Hacky temporary measure used by EFCore.PG to extract user-configured enum mappings. Accessed via reflection only. /// +[Experimental(NpgsqlDiagnostics.ConvertersExperimental)] public sealed class HackyEnumTypeMapping { public HackyEnumTypeMapping(Type enumClrType, string pgTypeName, INpgsqlNameTranslator nameTranslator) diff --git a/src/Npgsql/Internal/INpgsqlDatabaseInfoFactory.cs b/src/Npgsql/Internal/INpgsqlDatabaseInfoFactory.cs index ccdb7a8477..ea3f0ad525 100644 --- a/src/Npgsql/Internal/INpgsqlDatabaseInfoFactory.cs +++ b/src/Npgsql/Internal/INpgsqlDatabaseInfoFactory.cs @@ -1,4 +1,5 @@ -using System.Threading.Tasks; +using System.Diagnostics.CodeAnalysis; +using System.Threading.Tasks; using Npgsql.Util; namespace Npgsql.Internal; @@ -8,6 +9,7 @@ namespace Npgsql.Internal; /// and the types it contains. When first connecting to a database, Npgsql will attempt to load information /// about it via this factory. /// +[Experimental(NpgsqlDiagnostics.ConvertersExperimental)] public interface INpgsqlDatabaseInfoFactory { /// @@ -19,4 +21,4 @@ public interface INpgsqlDatabaseInfoFactory /// database isn't of the correct type and isn't handled by this factory. /// Task Load(NpgsqlConnector conn, NpgsqlTimeout timeout, bool async); -} \ No newline at end of file +} diff --git a/src/Npgsql/Internal/IPgTypeInfoResolver.cs b/src/Npgsql/Internal/IPgTypeInfoResolver.cs index 62955446eb..b7b3ddc9ec 100644 --- a/src/Npgsql/Internal/IPgTypeInfoResolver.cs +++ b/src/Npgsql/Internal/IPgTypeInfoResolver.cs @@ -1,4 +1,5 @@ using System; +using System.Diagnostics.CodeAnalysis; using Npgsql.Internal.Postgres; namespace Npgsql.Internal; @@ -6,6 +7,7 @@ namespace Npgsql.Internal; /// /// An Npgsql resolver for type info. Used by Npgsql to read and write values to PostgreSQL. /// +[Experimental(NpgsqlDiagnostics.ConvertersExperimental)] public interface IPgTypeInfoResolver { /// diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index c3726180a1..adfa7cb5d8 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -3,6 +3,7 @@ using System.Collections.Generic; using System.Data; using System.Diagnostics; +using System.Diagnostics.CodeAnalysis; using System.IO; using System.Net; using System.Net.Security; @@ -29,6 +30,7 @@ namespace Npgsql.Internal; /// Represents a connection to a PostgreSQL backend. Unlike NpgsqlConnection objects, which are /// exposed to users, connectors are internal to Npgsql and are recycled by the connection pool. /// +[Experimental(NpgsqlDiagnostics.ConvertersExperimental)] public sealed partial class NpgsqlConnector { #region Fields and Properties diff --git a/src/Npgsql/Internal/NpgsqlDatabaseInfo.cs b/src/Npgsql/Internal/NpgsqlDatabaseInfo.cs index fed3f8c165..7fd3fe95e9 100644 --- a/src/Npgsql/Internal/NpgsqlDatabaseInfo.cs +++ b/src/Npgsql/Internal/NpgsqlDatabaseInfo.cs @@ -12,6 +12,7 @@ namespace Npgsql.Internal; /// Base class for implementations which provide information about PostgreSQL and PostgreSQL-like databases /// (e.g. type definitions, capabilities...). /// +[Experimental(NpgsqlDiagnostics.DatabaseInfoExperimental)] public abstract class NpgsqlDatabaseInfo { #region Fields diff --git a/src/Npgsql/Internal/NpgsqlReadBuffer.cs b/src/Npgsql/Internal/NpgsqlReadBuffer.cs index 6cfa68e16a..139594e25a 100644 --- a/src/Npgsql/Internal/NpgsqlReadBuffer.cs +++ b/src/Npgsql/Internal/NpgsqlReadBuffer.cs @@ -2,6 +2,7 @@ using System.Buffers; using System.Buffers.Binary; using System.Diagnostics; +using System.Diagnostics.CodeAnalysis; using System.IO; using System.Net.Sockets; using System.Runtime.CompilerServices; @@ -17,6 +18,7 @@ namespace Npgsql.Internal; /// A buffer used by Npgsql to read data from the socket efficiently. /// Provides methods which decode different values types and tracks the current position. /// +[Experimental(NpgsqlDiagnostics.ConvertersExperimental)] sealed partial class NpgsqlReadBuffer : IDisposable { #region Fields and Properties diff --git a/src/Npgsql/Internal/PgBufferedConverter.cs b/src/Npgsql/Internal/PgBufferedConverter.cs index 2bed7ffa3c..d7b673fb7c 100644 --- a/src/Npgsql/Internal/PgBufferedConverter.cs +++ b/src/Npgsql/Internal/PgBufferedConverter.cs @@ -5,6 +5,7 @@ namespace Npgsql.Internal; +[Experimental(NpgsqlDiagnostics.ConvertersExperimental)] public abstract class PgBufferedConverter : PgConverter { protected PgBufferedConverter(bool customDbNullPredicate = false) : base(customDbNullPredicate) { } diff --git a/src/Npgsql/Internal/PgConverter.cs b/src/Npgsql/Internal/PgConverter.cs index 462030f2b7..323c572e0a 100644 --- a/src/Npgsql/Internal/PgConverter.cs +++ b/src/Npgsql/Internal/PgConverter.cs @@ -8,6 +8,7 @@ namespace Npgsql.Internal; +[Experimental(NpgsqlDiagnostics.ConvertersExperimental)] public abstract class PgConverter { internal DbNullPredicate DbNullPredicateKind { get; } diff --git a/src/Npgsql/Internal/PgConverterResolver.cs b/src/Npgsql/Internal/PgConverterResolver.cs index baee09d58e..5fbe699017 100644 --- a/src/Npgsql/Internal/PgConverterResolver.cs +++ b/src/Npgsql/Internal/PgConverterResolver.cs @@ -1,8 +1,10 @@ using System; +using System.Diagnostics.CodeAnalysis; using Npgsql.Internal.Postgres; namespace Npgsql.Internal; +[Experimental(NpgsqlDiagnostics.ConvertersExperimental)] public abstract class PgConverterResolver { private protected PgConverterResolver() { } diff --git a/src/Npgsql/Internal/PgReader.cs b/src/Npgsql/Internal/PgReader.cs index d3bd7d3260..90f5b53e14 100644 --- a/src/Npgsql/Internal/PgReader.cs +++ b/src/Npgsql/Internal/PgReader.cs @@ -10,6 +10,7 @@ namespace Npgsql.Internal; +[Experimental(NpgsqlDiagnostics.ConvertersExperimental)] public class PgReader { // We don't want to add a ton of memory pressure for large strings. diff --git a/src/Npgsql/Internal/PgSerializerOptions.cs b/src/Npgsql/Internal/PgSerializerOptions.cs index 193e193826..b79b5757ec 100644 --- a/src/Npgsql/Internal/PgSerializerOptions.cs +++ b/src/Npgsql/Internal/PgSerializerOptions.cs @@ -1,4 +1,5 @@ using System; +using System.Diagnostics.CodeAnalysis; using System.IO; using System.Runtime.CompilerServices; using System.Text; @@ -8,6 +9,7 @@ namespace Npgsql.Internal; +[Experimental(NpgsqlDiagnostics.ConvertersExperimental)] public sealed class PgSerializerOptions { /// diff --git a/src/Npgsql/Internal/PgStreamingConverter.cs b/src/Npgsql/Internal/PgStreamingConverter.cs index 3d69889b3b..f0a32156d3 100644 --- a/src/Npgsql/Internal/PgStreamingConverter.cs +++ b/src/Npgsql/Internal/PgStreamingConverter.cs @@ -1,11 +1,13 @@ using System; using System.Diagnostics; +using System.Diagnostics.CodeAnalysis; using System.Runtime.CompilerServices; using System.Threading; using System.Threading.Tasks; namespace Npgsql.Internal; +[Experimental(NpgsqlDiagnostics.ConvertersExperimental)] public abstract class PgStreamingConverter : PgConverter { protected PgStreamingConverter(bool customDbNullPredicate = false) : base(customDbNullPredicate) { } diff --git a/src/Npgsql/Internal/PgTypeInfo.cs b/src/Npgsql/Internal/PgTypeInfo.cs index a949ee8db5..d83c5dfa36 100644 --- a/src/Npgsql/Internal/PgTypeInfo.cs +++ b/src/Npgsql/Internal/PgTypeInfo.cs @@ -4,6 +4,7 @@ namespace Npgsql.Internal; +[Experimental(NpgsqlDiagnostics.ConvertersExperimental)] public class PgTypeInfo { readonly bool _canBinaryConvert; diff --git a/src/Npgsql/Internal/PgTypeInfoResolverFactory.cs b/src/Npgsql/Internal/PgTypeInfoResolverFactory.cs index f30059c7ec..9392e2c840 100644 --- a/src/Npgsql/Internal/PgTypeInfoResolverFactory.cs +++ b/src/Npgsql/Internal/PgTypeInfoResolverFactory.cs @@ -1,5 +1,8 @@ +using System.Diagnostics.CodeAnalysis; + namespace Npgsql.Internal; +[Experimental(NpgsqlDiagnostics.ConvertersExperimental)] public abstract class PgTypeInfoResolverFactory { public abstract IPgTypeInfoResolver CreateResolver(); diff --git a/src/Npgsql/Internal/PgWriter.cs b/src/Npgsql/Internal/PgWriter.cs index 3c5064386c..6fe6ed0e4c 100644 --- a/src/Npgsql/Internal/PgWriter.cs +++ b/src/Npgsql/Internal/PgWriter.cs @@ -2,6 +2,7 @@ using System.Buffers; using System.Buffers.Binary; using System.Diagnostics; +using System.Diagnostics.CodeAnalysis; using System.IO; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; @@ -92,6 +93,7 @@ public ValueTask FlushAsync(CancellationToken cancellationToken = default) => new(_buffer.Flush(async: true, cancellationToken)); } +[Experimental(NpgsqlDiagnostics.ConvertersExperimental)] public sealed class PgWriter { readonly IBufferWriter _writer; @@ -557,6 +559,7 @@ public override long Seek(long offset, SeekOrigin origin) } // No-op for now. +[Experimental(NpgsqlDiagnostics.ConvertersExperimental)] public struct NestedWriteScope : IDisposable { public void Dispose() diff --git a/src/Npgsql/Internal/Postgres/DataTypeName.cs b/src/Npgsql/Internal/Postgres/DataTypeName.cs index d20e479f85..c5b223f866 100644 --- a/src/Npgsql/Internal/Postgres/DataTypeName.cs +++ b/src/Npgsql/Internal/Postgres/DataTypeName.cs @@ -1,11 +1,13 @@ using System; using System.Diagnostics; +using System.Diagnostics.CodeAnalysis; namespace Npgsql.Internal.Postgres; /// /// Represents the fully-qualified name of a PostgreSQL type. /// +[Experimental(NpgsqlDiagnostics.ConvertersExperimental)] [DebuggerDisplay("{DisplayName,nq}")] public readonly struct DataTypeName : IEquatable { diff --git a/src/Npgsql/Internal/Postgres/Field.cs b/src/Npgsql/Internal/Postgres/Field.cs index f6a261c103..cb2879f998 100644 --- a/src/Npgsql/Internal/Postgres/Field.cs +++ b/src/Npgsql/Internal/Postgres/Field.cs @@ -1,6 +1,9 @@ +using System.Diagnostics.CodeAnalysis; + namespace Npgsql.Internal.Postgres; /// Base field type shared between tables and composites. +[Experimental(NpgsqlDiagnostics.ConvertersExperimental)] public readonly struct Field { public Field(string name, PgTypeId pgTypeId, int typeModifier) diff --git a/src/Npgsql/Internal/Postgres/Oid.cs b/src/Npgsql/Internal/Postgres/Oid.cs index e6fcad6f4a..55ede288fe 100644 --- a/src/Npgsql/Internal/Postgres/Oid.cs +++ b/src/Npgsql/Internal/Postgres/Oid.cs @@ -1,7 +1,9 @@ using System; +using System.Diagnostics.CodeAnalysis; namespace Npgsql.Internal.Postgres; +[Experimental(NpgsqlDiagnostics.ConvertersExperimental)] public readonly struct Oid: IEquatable { public Oid(uint value) => Value = value; diff --git a/src/Npgsql/Internal/Postgres/PgTypeId.cs b/src/Npgsql/Internal/Postgres/PgTypeId.cs index c5a40d22ca..ee5ffb9d41 100644 --- a/src/Npgsql/Internal/Postgres/PgTypeId.cs +++ b/src/Npgsql/Internal/Postgres/PgTypeId.cs @@ -6,6 +6,7 @@ namespace Npgsql.Internal.Postgres; /// /// A discriminated union of and . /// +[Experimental(NpgsqlDiagnostics.ConvertersExperimental)] public readonly struct PgTypeId: IEquatable { readonly DataTypeName _dataTypeName; diff --git a/src/Npgsql/Internal/Size.cs b/src/Npgsql/Internal/Size.cs index 7f5e52a1f1..7cbdd9bb20 100644 --- a/src/Npgsql/Internal/Size.cs +++ b/src/Npgsql/Internal/Size.cs @@ -1,8 +1,10 @@ using System; using System.Diagnostics; +using System.Diagnostics.CodeAnalysis; namespace Npgsql.Internal; +[Experimental(NpgsqlDiagnostics.ConvertersExperimental)] public enum SizeKind { Unknown = 0, @@ -10,6 +12,7 @@ public enum SizeKind UpperBound } +[Experimental(NpgsqlDiagnostics.ConvertersExperimental)] [DebuggerDisplay("{DebuggerDisplay,nq}")] public readonly struct Size : IEquatable { diff --git a/src/Npgsql/Internal/TypeInfoMapping.cs b/src/Npgsql/Internal/TypeInfoMapping.cs index 00b9ba18ee..753c2bcac3 100644 --- a/src/Npgsql/Internal/TypeInfoMapping.cs +++ b/src/Npgsql/Internal/TypeInfoMapping.cs @@ -19,8 +19,10 @@ namespace Npgsql.Internal; /// /// Signals whether a resolver based TypeInfo can keep its PgTypeId undecided or whether it should follow mapping.DataTypeName. /// +[Experimental(NpgsqlDiagnostics.ConvertersExperimental)] public delegate PgTypeInfo TypeInfoFactory(PgSerializerOptions options, TypeInfoMapping mapping, bool resolvedDataTypeName); +[Experimental(NpgsqlDiagnostics.ConvertersExperimental)] public enum MatchRequirement { /// Match when the clr type and datatype name both match. @@ -33,6 +35,7 @@ public enum MatchRequirement } /// A factory for well-known PgConverters. +[Experimental(NpgsqlDiagnostics.ConvertersExperimental)] public static class PgConverterFactory { public static PgConverter CreateArrayMultirangeConverter(PgConverter rangeConverter, PgSerializerOptions options) where T : notnull @@ -55,6 +58,7 @@ public static PgConverter CreatePolymorphicArrayConverter(Func throw new InvalidOperationException($"Boxing converters are not supported, manually construct a mapping over a casting converter{(resolver ? " resolver" : "")} instead."); } +[Experimental(NpgsqlDiagnostics.ConvertersExperimental)] public static class TypeInfoMappingHelpers { internal static bool TryResolveFullyQualifiedName(PgSerializerOptions options, string dataTypeName, out DataTypeName fqDataTypeName) diff --git a/src/Npgsql/Internal/ValueMetadata.cs b/src/Npgsql/Internal/ValueMetadata.cs index ff041a3060..b71028c4a1 100644 --- a/src/Npgsql/Internal/ValueMetadata.cs +++ b/src/Npgsql/Internal/ValueMetadata.cs @@ -1,5 +1,8 @@ +using System.Diagnostics.CodeAnalysis; + namespace Npgsql.Internal; +[Experimental(NpgsqlDiagnostics.ConvertersExperimental)] public readonly struct ValueMetadata { public required DataFormat Format { get; init; } diff --git a/src/Npgsql/Npgsql.csproj b/src/Npgsql/Npgsql.csproj index ecae24940a..77b65d47b2 100644 --- a/src/Npgsql/Npgsql.csproj +++ b/src/Npgsql/Npgsql.csproj @@ -8,6 +8,8 @@ net6.0;net8.0 net8.0 $(NoWarn);CA2017 + $(NoWarn);NPG9001 + $(NoWarn);NPG9002 diff --git a/src/Npgsql/NpgsqlDiagnostics.cs b/src/Npgsql/NpgsqlDiagnostics.cs new file mode 100644 index 0000000000..2037fec667 --- /dev/null +++ b/src/Npgsql/NpgsqlDiagnostics.cs @@ -0,0 +1,7 @@ +namespace Npgsql; + +static class NpgsqlDiagnostics +{ + public const string ConvertersExperimental = "NPG9001"; + public const string DatabaseInfoExperimental = "NPG9002"; +} diff --git a/src/Npgsql/Shims/ExperimentalAttribute.cs b/src/Npgsql/Shims/ExperimentalAttribute.cs new file mode 100644 index 0000000000..36ff9ee11d --- /dev/null +++ b/src/Npgsql/Shims/ExperimentalAttribute.cs @@ -0,0 +1,21 @@ +#if !NET8_0_OR_GREATER +namespace System.Diagnostics.CodeAnalysis; + +/// Indicates that an API is experimental and it may change in the future. +[AttributeUsage(AttributeTargets.Assembly | AttributeTargets.Module | AttributeTargets.Class | AttributeTargets.Struct | AttributeTargets.Enum | AttributeTargets.Constructor | AttributeTargets.Method | AttributeTargets.Property | AttributeTargets.Field | AttributeTargets.Event | AttributeTargets.Interface | AttributeTargets.Delegate, Inherited = false)] +public sealed class ExperimentalAttribute : Attribute +{ + /// Initializes a new instance of the class, specifying the ID that the compiler will use when reporting a use of the API the attribute applies to. + /// The ID that the compiler will use when reporting a use of the API the attribute applies to. + public ExperimentalAttribute(string diagnosticId) => this.DiagnosticId = diagnosticId; + + /// Gets the ID that the compiler will use when reporting a use of the API the attribute applies to. + /// The unique diagnostic ID. + public string DiagnosticId { get; } + + /// Gets or sets the URL for corresponding documentation. + /// The API accepts a format string instead of an actual URL, creating a generic URL that includes the diagnostic ID. + /// The format string that represents a URL to corresponding documentation. + public string? UrlFormat { get; set; } +} +#endif diff --git a/test/Npgsql.Benchmarks/Npgsql.Benchmarks.csproj b/test/Npgsql.Benchmarks/Npgsql.Benchmarks.csproj index bc51b25561..922d4cbdce 100644 --- a/test/Npgsql.Benchmarks/Npgsql.Benchmarks.csproj +++ b/test/Npgsql.Benchmarks/Npgsql.Benchmarks.csproj @@ -4,6 +4,7 @@ portable Npgsql.Benchmarks Exe + $(NoWarn);NPG9001 diff --git a/test/Npgsql.Tests/Npgsql.Tests.csproj b/test/Npgsql.Tests/Npgsql.Tests.csproj index 980b51d8aa..6b7baca8ad 100644 --- a/test/Npgsql.Tests/Npgsql.Tests.csproj +++ b/test/Npgsql.Tests/Npgsql.Tests.csproj @@ -12,5 +12,7 @@ true + $(NoWarn);NPG9001 + $(NoWarn);NPG9002 From 5b380fb55130b608e4f2a9b670a2f6a187badba1 Mon Sep 17 00:00:00 2001 From: Jelte Fennema-Nio Date: Sat, 13 Apr 2024 20:34:58 +0200 Subject: [PATCH 415/761] Set the default Connection Lifetime to one hour (#5662) Connection Lifetime can also be used to limit memory growth of PostgreSQL connection. Certain caches only grow over time, the most common example is the cache for table metadata: the "relcache". For systems with many tables (often times due to partitioning) this relcache slowly grows larger and larger. By putting a 1 hour limit on a connection lifetime such excessive growth is limited. This same 1 hour limit is used for PgBouncer its equivalent server_lifetime config option. (I'm the maintainer of PgBouncer) --- src/Npgsql/NpgsqlConnectionStringBuilder.cs | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/Npgsql/NpgsqlConnectionStringBuilder.cs b/src/Npgsql/NpgsqlConnectionStringBuilder.cs index 88f3043fc6..361232d198 100644 --- a/src/Npgsql/NpgsqlConnectionStringBuilder.cs +++ b/src/Npgsql/NpgsqlConnectionStringBuilder.cs @@ -777,13 +777,17 @@ public int ConnectionPruningInterval /// /// The total maximum lifetime of connections (in seconds). Connections which have exceeded this value will be /// destroyed instead of returned from the pool. This is useful in clustered configurations to force load - /// balancing between a running server and a server just brought online. + /// balancing between a running server and a server just brought online. It can also be useful to prevent + /// runaway memory growth of connections at the PostgreSQL server side, because in some cases very long lived + /// connections slowly consume more and more memory over time. + /// Defaults to 3600 seconds (1 hour). /// - /// The time (in seconds) to wait, or 0 to to make connections last indefinitely (the default). + /// The time (in seconds) to wait, or 0 to to make connections last indefinitely. [Category("Pooling")] [Description("The total maximum lifetime of connections (in seconds).")] [DisplayName("Connection Lifetime")] [NpgsqlConnectionStringProperty("Load Balance Timeout")] + [DefaultValue(3600)] public int ConnectionLifetime { get => _connectionLifetime; From 058894067d33229fbef2f3bcafbfa75858fc60fb Mon Sep 17 00:00:00 2001 From: Yoh Deadfall Date: Mon, 15 Apr 2024 10:34:13 +0300 Subject: [PATCH 416/761] Changed year to 2024 (#5670) --- Directory.Build.props | 2 +- LICENSE | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Directory.Build.props b/Directory.Build.props index 57494750c7..298bcc5ac6 100644 --- a/Directory.Build.props +++ b/Directory.Build.props @@ -10,7 +10,7 @@ true true - Copyright 2023 © The Npgsql Development Team + Copyright 2024 © The Npgsql Development Team Npgsql PostgreSQL https://github.com/npgsql/npgsql diff --git a/LICENSE b/LICENSE index efec310cda..a74ee166ce 100644 --- a/LICENSE +++ b/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2002-2023, Npgsql +Copyright (c) 2002-2024, Npgsql Permission to use, copy, modify, and distribute this software and its documentation for any purpose, without fee, and without a written agreement From 9e70a62a375b6d60d05d39a0e7c3bcd47fe7869e Mon Sep 17 00:00:00 2001 From: John Paul Date: Sun, 21 Apr 2024 18:23:53 -0400 Subject: [PATCH 417/761] Buffer over-read with ArrayNullabilityMode.PerInstance (#5675) Fixes #5674 --- src/Npgsql/Internal/Converters/ArrayConverter.cs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/Npgsql/Internal/Converters/ArrayConverter.cs b/src/Npgsql/Internal/Converters/ArrayConverter.cs index 2255100e95..d191b43be6 100644 --- a/src/Npgsql/Internal/Converters/ArrayConverter.cs +++ b/src/Npgsql/Internal/Converters/ArrayConverter.cs @@ -628,6 +628,12 @@ public PolymorphicArrayConverter(PgConverter structElementCollectionConve _nullableElementCollectionConverter = nullableElementCollectionConverter; } + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.Create(read: sizeof(int) + sizeof(int), write: Size.Unknown); + return format is DataFormat.Binary; + } + public override TBase Read(PgReader reader) { _ = reader.ReadInt32(); From 12e5ec24db566a4a0fe93d283f1007a30b4e31aa Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Mon, 22 Apr 2024 01:27:55 +0200 Subject: [PATCH 418/761] Fix output parameter population in batch commands (#5644) Fixes #5642 --- src/Npgsql/NpgsqlCommand.cs | 6 +- src/Npgsql/NpgsqlDataReader.cs | 13 ++-- src/Npgsql/SqlQueryParser.cs | 3 +- test/Npgsql.Tests/BatchTests.cs | 18 ----- test/Npgsql.Tests/CommandTests.cs | 28 -------- test/Npgsql.Tests/StoredProcedureTests.cs | 82 +++++++++++++++++++++++ 6 files changed, 94 insertions(+), 56 deletions(-) diff --git a/src/Npgsql/NpgsqlCommand.cs b/src/Npgsql/NpgsqlCommand.cs index 7df8a71bf5..6403673212 100644 --- a/src/Npgsql/NpgsqlCommand.cs +++ b/src/Npgsql/NpgsqlCommand.cs @@ -874,7 +874,10 @@ internal void ProcessRawQuery(SqlQueryParser? parser, bool standardConformingStr batchCommand = TruncateStatementsToOne(); batchCommand.FinalCommandText = CommandText; if (parameters is not null) + { batchCommand.PositionalParameters = parameters.InternalList; + batchCommand._parameters = parameters; + } } else { @@ -911,8 +914,6 @@ internal void ProcessRawQuery(SqlQueryParser? parser, bool standardConformingStr else { parser.ParseRawQuery(batchCommand, standardConformingStrings); - if (batchCommand._parameters?.HasOutputParameters == true) - ThrowHelper.ThrowNotSupportedException("Batches cannot cannot have out parameters"); ValidateParameterCount(batchCommand); } @@ -989,6 +990,7 @@ internal void ProcessRawQuery(SqlQueryParser? parser, bool standardConformingStr batchCommand ??= TruncateStatementsToOne(); batchCommand.FinalCommandText = sqlBuilder.ToString(); + batchCommand._parameters = parameters; batchCommand.PositionalParameters.AddRange(inputParameters); ValidateParameterCount(batchCommand); diff --git a/src/Npgsql/NpgsqlDataReader.cs b/src/Npgsql/NpgsqlDataReader.cs index d682eaef7c..98a3f4e662 100644 --- a/src/Npgsql/NpgsqlDataReader.cs +++ b/src/Npgsql/NpgsqlDataReader.cs @@ -459,15 +459,15 @@ async Task NextResult(bool async, bool isConsuming = false, CancellationTo continue; } - if (!Command.IsWrappedByBatch && StatementIndex == 0 && Command._parameters?.HasOutputParameters == true) + if ((Command.IsWrappedByBatch || StatementIndex is 0) && Command.InternalBatchCommands[StatementIndex]._parameters?.HasOutputParameters == true) { - // If output parameters are present and this is the first row of the first resultset, + // If output parameters are present and this is the first row of the resultset, // we must always read it in non-sequential mode because it will be traversed twice (once // here for the parameters, then as a regular row). msg = await Connector.ReadMessage(async).ConfigureAwait(false); ProcessMessage(msg); if (msg.Code == BackendMessageCode.DataRow) - PopulateOutputParameters(); + PopulateOutputParameters(Command.InternalBatchCommands[StatementIndex]._parameters!); } else { @@ -598,12 +598,11 @@ async ValueTask ConsumeResultSet(bool async) } - void PopulateOutputParameters() + void PopulateOutputParameters(NpgsqlParameterCollection parameters) { // The first row in a stored procedure command that has output parameters needs to be traversed twice - // once for populating the output parameters and once for the actual result set traversal. So in this // case we can't be sequential. - Debug.Assert(StatementIndex == 0); Debug.Assert(RowDescription != null); Debug.Assert(State == ReaderState.BeforeResult); @@ -616,7 +615,7 @@ void PopulateOutputParameters() var taken = new List(); for (var i = 0; i < ColumnCount; i++) { - if (Command.Parameters.TryGetValue(GetName(i), out var p) && p.IsOutputDirection) + if (parameters.TryGetValue(GetName(i), out var p) && p.IsOutputDirection) { p.Value = GetValue(i); taken.Add(p); @@ -628,7 +627,7 @@ void PopulateOutputParameters() // Not sure where this odd behavior comes from: all output parameters which did not get matched by // name now get populated with column values which weren't matched. Keeping this for backwards compat, // opened #2252 for investigation. - foreach (var p in (IEnumerable)Command.Parameters) + foreach (var p in (IEnumerable)parameters) { if (!p.IsOutputDirection || taken.Contains(p)) continue; diff --git a/src/Npgsql/SqlQueryParser.cs b/src/Npgsql/SqlQueryParser.cs index 2a76755f0b..2e9e37a010 100644 --- a/src/Npgsql/SqlQueryParser.cs +++ b/src/Npgsql/SqlQueryParser.cs @@ -501,10 +501,11 @@ void MoveToNextBatchCommand() { batchCommand = batchCommands[statementIndex]; batchCommand.Reset(); + batchCommand._parameters = parameters; } else { - batchCommand = new NpgsqlBatchCommand(); + batchCommand = new NpgsqlBatchCommand { _parameters = parameters }; batchCommands.Add(batchCommand); } } diff --git a/test/Npgsql.Tests/BatchTests.cs b/test/Npgsql.Tests/BatchTests.cs index 977bb5f98f..208fc2642d 100644 --- a/test/Npgsql.Tests/BatchTests.cs +++ b/test/Npgsql.Tests/BatchTests.cs @@ -70,24 +70,6 @@ public async Task Positional_parameters() Assert.That(await reader.NextResultAsync(), Is.False); } - [Test] - public async Task Out_parameters_are_not_allowed() - { - await using var conn = await OpenConnectionAsync(); - await using var batch = new NpgsqlBatch(conn) - { - BatchCommands = - { - new("SELECT @p1") - { - Parameters = { new("p", 8) { Direction = ParameterDirection.InputOutput } } - } - } - }; - - Assert.That(() => batch.ExecuteReaderAsync(Behavior), Throws.Exception.TypeOf()); - } - #endregion Parameters #region NpgsqlBatchCommand diff --git a/test/Npgsql.Tests/CommandTests.cs b/test/Npgsql.Tests/CommandTests.cs index 9bb2b3c6f7..9c42da9c0c 100644 --- a/test/Npgsql.Tests/CommandTests.cs +++ b/test/Npgsql.Tests/CommandTests.cs @@ -547,34 +547,6 @@ public async Task CloseConnection_with_exception() #endregion - [Test] - public async Task StoredProcedure_positional_parameters_works() - { - if (IsMultiplexing) - return; - - await using var connection = await DataSource.OpenConnectionAsync(); - await using var transaction = await connection.BeginTransactionAsync(IsolationLevel.Serializable); - await using var batch = new NpgsqlBatch(connection, transaction) - { - BatchCommands = - { - new("unknown_procedure") - { - CommandType = CommandType.StoredProcedure, - Parameters = - { - new() { Value = "" }, - new() { DbType = DbType.Int64, Direction = ParameterDirection.Output } - } - }, - new ("COMMIT") - } - }; - - Assert.ThrowsAsync(() => batch.ExecuteNonQueryAsync()); - } - [Test] public async Task SingleRow([Values(PrepareOrNot.NotPrepared, PrepareOrNot.Prepared)] PrepareOrNot prepare) { diff --git a/test/Npgsql.Tests/StoredProcedureTests.cs b/test/Npgsql.Tests/StoredProcedureTests.cs index 8666740f74..84acb51b36 100644 --- a/test/Npgsql.Tests/StoredProcedureTests.cs +++ b/test/Npgsql.Tests/StoredProcedureTests.cs @@ -129,6 +129,88 @@ LANGUAGE plpgsql Assert.That(reader[1], Is.EqualTo(11)); } + [Test] + public async Task Batch_positional_parameters_works() + { + var tempname = await GetTempProcedureName(DataSource); + await using var connection = await DataSource.OpenConnectionAsync(); + await using var transaction = await connection.BeginTransactionAsync(IsolationLevel.Serializable); + await using var batch = new NpgsqlBatch(connection, transaction) + { + BatchCommands = + { + new(tempname) + { + CommandType = CommandType.StoredProcedure, + Parameters = + { + new() { Value = "" }, + new() { DbType = DbType.Int64, Direction = ParameterDirection.Output } + } + }, + new ("COMMIT") + } + }; + + Assert.ThrowsAsync(() => batch.ExecuteNonQueryAsync()); + } + + [Test] + public async Task Batch_StoredProcedure_output_parameters_works() + { + // Proper OUT params were introduced in PostgreSQL 14 + MinimumPgVersion(DataSource, "14.0", "Stored procedure OUT parameters are only support starting with version 14"); + var sproc = await GetTempProcedureName(DataSource); + + await using var connection = await DataSource.OpenConnectionAsync(); + await using var transaction = await connection.BeginTransactionAsync(IsolationLevel.Serializable); + var c = connection.CreateCommand(); + c.CommandText = $""" + CREATE OR REPLACE PROCEDURE {sproc} + ( + p_username TEXT, + OUT p_user_id BIGINT + ) + LANGUAGE plpgsql + AS $$ + BEGIN + p_user_id = 1; + return; + END; + $$; + """; + await c.ExecuteNonQueryAsync(); + + await using var batch = new NpgsqlBatch(connection, transaction) + { + BatchCommands = + { + new(sproc) + { + CommandType = CommandType.StoredProcedure, + Parameters = + { + new() { Value = "" }, + new() { NpgsqlDbType = NpgsqlDbType.Bigint, Direction = ParameterDirection.Output } + } + }, + new(sproc) + { + CommandType = CommandType.StoredProcedure, + Parameters = + { + new() { Value = "" }, + new() { NpgsqlDbType = NpgsqlDbType.Bigint, Direction = ParameterDirection.Output } + } + } + } + }; + + await batch.ExecuteNonQueryAsync(); + Assert.AreEqual(1, batch.BatchCommands[0].Parameters[1].Value); + Assert.AreEqual(1, batch.BatchCommands[1].Parameters[1].Value); + } + #region DeriveParameters [Test, Description("Tests function parameter derivation with IN, OUT and INOUT parameters")] From f7e7ead0702d776a8f551f5786c4cac2d65c4bc6 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Thu, 9 May 2024 13:20:06 +0200 Subject: [PATCH 419/761] Merge pull request from GHSA-x9vc-6hfv-hg8c --- .../NpgsqlConnector.FrontendMessages.cs | 56 ++++-- src/Npgsql/Internal/NpgsqlWriteBuffer.cs | 61 ++++++- src/Npgsql/NpgsqlTransaction.cs | 11 +- test/Npgsql.Tests/CommandTests.cs | 171 ++++++++++++++++++ test/Npgsql.Tests/Support/PgPostmasterMock.cs | 1 + test/Npgsql.Tests/Support/PgServerMock.cs | 1 + test/Npgsql.Tests/WriteBufferTests.cs | 1 + 7 files changed, 272 insertions(+), 30 deletions(-) diff --git a/src/Npgsql/Internal/NpgsqlConnector.FrontendMessages.cs b/src/Npgsql/Internal/NpgsqlConnector.FrontendMessages.cs index f3e3173124..9e0fd45dd3 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.FrontendMessages.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.FrontendMessages.cs @@ -19,6 +19,7 @@ internal Task WriteDescribe(StatementOrPortal statementOrPortal, byte[] asciiNam (asciiName.Length + 1); // Statement/portal name var writeBuffer = WriteBuffer; + writeBuffer.StartMessage(len); if (writeBuffer.WriteSpaceLeft < len) return FlushAndWrite(len, statementOrPortal, asciiName, async, cancellationToken); @@ -48,6 +49,7 @@ internal Task WriteSync(bool async, CancellationToken cancellationToken = defaul sizeof(int); // Length var writeBuffer = WriteBuffer; + writeBuffer.StartMessage(len); if (writeBuffer.WriteSpaceLeft < len) return FlushAndWrite(async, cancellationToken); @@ -79,6 +81,7 @@ internal Task WriteExecute(int maxRows, bool async, CancellationToken cancellati sizeof(int); // Max number of rows var writeBuffer = WriteBuffer; + writeBuffer.StartMessage(len); if (writeBuffer.WriteSpaceLeft < len) return FlushAndWrite(maxRows, async, cancellationToken); @@ -118,9 +121,6 @@ internal async Task WriteParse(string sql, byte[] asciiName, List= headerLength, "Write buffer too small for Bind header"); - await Flush(async, cancellationToken).ConfigureAwait(false); - } - var formatCodesSum = 0; var paramsLength = 0; for (var paramIndex = 0; paramIndex < parameters.Count; paramIndex++) @@ -197,8 +196,15 @@ internal async Task WriteBind( sizeof(short) + // Number of result format codes sizeof(short) * (unknownResultTypeList?.Length ?? 1); // Result format codes - writeBuffer.WriteByte(FrontendMessageCode.Bind); - writeBuffer.WriteInt32(messageLength - 1); + WriteBuffer.StartMessage(messageLength); + if (WriteBuffer.WriteSpaceLeft < headerLength) + { + Debug.Assert(WriteBuffer.Size >= headerLength, "Write buffer too small for Bind header"); + await Flush(async, cancellationToken).ConfigureAwait(false); + } + + WriteBuffer.WriteByte(FrontendMessageCode.Bind); + WriteBuffer.WriteInt32(messageLength - 1); Debug.Assert(portal == string.Empty); writeBuffer.WriteByte(0); // Portal is always empty @@ -269,6 +275,7 @@ internal Task WriteClose(StatementOrPortal type, byte[] asciiName, bool async, C asciiName.Length + sizeof(byte); // Statement or portal name plus null terminator var writeBuffer = WriteBuffer; + writeBuffer.StartMessage(len); if (writeBuffer.WriteSpaceLeft < len) return FlushAndWrite(len, type, asciiName, async, cancellationToken); @@ -296,14 +303,17 @@ internal async Task WriteQuery(string sql, bool async, CancellationToken cancell { var queryByteLen = TextEncoding.GetByteCount(sql); + var len = sizeof(byte) + + sizeof(int) + // Message length (including self excluding code) + queryByteLen + // Query byte length + sizeof(byte); + + WriteBuffer.StartMessage(len); if (WriteBuffer.WriteSpaceLeft < 1 + 4) await Flush(async, cancellationToken).ConfigureAwait(false); WriteBuffer.WriteByte(FrontendMessageCode.Query); - WriteBuffer.WriteInt32( - sizeof(int) + // Message length (including self excluding code) - queryByteLen + // Query byte length - sizeof(byte)); // Null terminator + WriteBuffer.WriteInt32(len - 1); await WriteBuffer.WriteString(sql, queryByteLen, async, cancellationToken).ConfigureAwait(false); if (WriteBuffer.WriteSpaceLeft < 1) @@ -316,6 +326,7 @@ internal async Task WriteCopyDone(bool async, CancellationToken cancellationToke const int len = sizeof(byte) + // Message code sizeof(int); // Length + WriteBuffer.StartMessage(len); if (WriteBuffer.WriteSpaceLeft < len) await Flush(async, cancellationToken).ConfigureAwait(false); @@ -331,6 +342,7 @@ internal async Task WriteCopyFail(bool async, CancellationToken cancellationToke sizeof(int) + // Length sizeof(byte); // Error message is always empty (only a null terminator) + WriteBuffer.StartMessage(len); if (WriteBuffer.WriteSpaceLeft < len) await Flush(async, cancellationToken).ConfigureAwait(false); @@ -348,6 +360,7 @@ internal void WriteCancelRequest(int backendProcessId, int backendSecretKey) Debug.Assert(backendProcessId != 0); + WriteBuffer.StartMessage(len); if (WriteBuffer.WriteSpaceLeft < len) Flush(false).GetAwaiter().GetResult(); @@ -362,6 +375,7 @@ internal void WriteTerminate() const int len = sizeof(byte) + // Message code sizeof(int); // Length + WriteBuffer.StartMessage(len); if (WriteBuffer.WriteSpaceLeft < len) Flush(false).GetAwaiter().GetResult(); @@ -374,6 +388,7 @@ internal void WriteSslRequest() const int len = sizeof(int) + // Length sizeof(int); // SSL request code + WriteBuffer.StartMessage(len); if (WriteBuffer.WriteSpaceLeft < len) Flush(false).GetAwaiter().GetResult(); @@ -394,6 +409,7 @@ internal void WriteStartup(Dictionary parameters) NpgsqlWriteBuffer.UTF8Encoding.GetByteCount(kvp.Value) + 1; // Should really never happen, just in case + WriteBuffer.StartMessage(len); if (len > WriteBuffer.Size) throw new Exception("Startup message bigger than buffer"); @@ -417,8 +433,10 @@ internal void WriteStartup(Dictionary parameters) internal async Task WritePassword(byte[] payload, int offset, int count, bool async, CancellationToken cancellationToken = default) { + WriteBuffer.StartMessage(sizeof(byte) + sizeof(int) + count); if (WriteBuffer.WriteSpaceLeft < sizeof(byte) + sizeof(int)) await WriteBuffer.Flush(async, cancellationToken).ConfigureAwait(false); + WriteBuffer.WriteByte(FrontendMessageCode.Password); WriteBuffer.WriteInt32(sizeof(int) + count); @@ -441,6 +459,7 @@ internal async Task WriteSASLInitialResponse(string mechanism, byte[] initialRes sizeof(int) + // Initial response length (initialResponse?.Length ?? 0); // Initial response payload + WriteBuffer.StartMessage(len); if (WriteBuffer.WriteSpaceLeft < len) await WriteBuffer.Flush(async, cancellationToken).ConfigureAwait(false); @@ -464,6 +483,7 @@ internal async Task WriteSASLInitialResponse(string mechanism, byte[] initialRes internal Task WritePregenerated(byte[] data, bool async = false, CancellationToken cancellationToken = default) { + WriteBuffer.StartMessage(data.Length); if (WriteBuffer.WriteSpaceLeft < data.Length) return FlushAndWrite(data, async, cancellationToken); diff --git a/src/Npgsql/Internal/NpgsqlWriteBuffer.cs b/src/Npgsql/Internal/NpgsqlWriteBuffer.cs index 06b49d9c71..821bb7e6b1 100644 --- a/src/Npgsql/Internal/NpgsqlWriteBuffer.cs +++ b/src/Npgsql/Internal/NpgsqlWriteBuffer.cs @@ -28,6 +28,8 @@ sealed class NpgsqlWriteBuffer : IDisposable internal Stream Underlying { private get; set; } readonly Socket? _underlyingSocket; + internal bool MessageLengthValidation { get; set; } = true; + readonly ResettableCancellationTokenSource _timeoutCts; readonly MetricsReporter? _metricsReporter; @@ -76,6 +78,9 @@ internal PgWriter GetWriter(NpgsqlDatabaseInfo typeCatalog, FlushMode flushMode internal int WritePosition; + int _messageBytesFlushed; + int? _messageLength; + bool _disposed; readonly PgWriter _pgWriter; @@ -131,6 +136,8 @@ public async Task Flush(bool async, CancellationToken cancellationToken = defaul WritePosition = pos; } else if (WritePosition == 0) return; + else + AdvanceMessageBytesFlushed(WritePosition); var finalCt = async && Timeout > TimeSpan.Zero ? _timeoutCts.Start(cancellationToken) @@ -197,15 +204,19 @@ internal void DirectWrite(ReadOnlySpan buffer) Debug.Assert(WritePosition == 5); WritePosition = 1; - WriteInt32(buffer.Length + 4); + WriteInt32(checked(buffer.Length + 4)); WritePosition = 5; _copyMode = false; + StartMessage(5); Flush(); _copyMode = true; WriteCopyDataHeader(); // And ready the buffer after the direct write completes } else + { Debug.Assert(WritePosition == 0); + AdvanceMessageBytesFlushed(buffer.Length); + } try { @@ -228,15 +239,19 @@ internal async Task DirectWrite(ReadOnlyMemory memory, bool async, Cancell Debug.Assert(WritePosition == 5); WritePosition = 1; - WriteInt32(memory.Length + 4); + WriteInt32(checked(memory.Length + 4)); WritePosition = 5; _copyMode = false; + StartMessage(5); await Flush(async, cancellationToken).ConfigureAwait(false); _copyMode = true; WriteCopyDataHeader(); // And ready the buffer after the direct write completes } else + { Debug.Assert(WritePosition == 0); + AdvanceMessageBytesFlushed(memory.Length); + } try { @@ -534,9 +549,51 @@ public void Dispose() #region Misc + internal void StartMessage(int messageLength) + { + if (!MessageLengthValidation) + return; + + if (_messageLength is not null && _messageBytesFlushed != _messageLength && WritePosition != -_messageBytesFlushed + _messageLength) + Throw(); + + // Add negative WritePosition to compensate for previous message(s) written without flushing. + _messageBytesFlushed = -WritePosition; + _messageLength = messageLength; + + void Throw() + { + throw Connector.Break(new OverflowException("Did not write the amount of bytes the message length specified")); + } + } + + void AdvanceMessageBytesFlushed(int count) + { + if (!MessageLengthValidation) + return; + + if (count < 0 || _messageLength is null || (long)_messageBytesFlushed + count > _messageLength) + Throw(); + + _messageBytesFlushed += count; + + void Throw() + { + if (count < 0) + throw new ArgumentOutOfRangeException(nameof(count), "Can't advance by a negative count"); + + if (_messageLength is null) + throw Connector.Break(new InvalidOperationException("No message was started")); + + if ((long)_messageBytesFlushed + count > _messageLength) + throw Connector.Break(new OverflowException("Tried to write more bytes than the message length specified")); + } + } + internal void Clear() { WritePosition = 0; + _messageLength = null; } /// diff --git a/src/Npgsql/NpgsqlTransaction.cs b/src/Npgsql/NpgsqlTransaction.cs index 6481e185af..7819af479b 100644 --- a/src/Npgsql/NpgsqlTransaction.cs +++ b/src/Npgsql/NpgsqlTransaction.cs @@ -212,16 +212,7 @@ public override void Save(string name) // Note: savepoint names are PostgreSQL identifiers, and so limited by default to 63 characters. // Since we are prepending, we assume below that the statement will always fit in the buffer. - _connector.WriteBuffer.WriteByte(FrontendMessageCode.Query); - _connector.WriteBuffer.WriteInt32( - sizeof(int) + // Message length (including self excluding code) - _connector.TextEncoding.GetByteCount("SAVEPOINT ") + - _connector.TextEncoding.GetByteCount(name) + - sizeof(byte)); // Null terminator - - _connector.WriteBuffer.WriteString("SAVEPOINT "); - _connector.WriteBuffer.WriteString(name); - _connector.WriteBuffer.WriteByte(0); + _connector.WriteQuery("SAVEPOINT " + name, async: false).GetAwaiter().GetResult(); _connector.PendingPrependedResponses += 2; } diff --git a/test/Npgsql.Tests/CommandTests.cs b/test/Npgsql.Tests/CommandTests.cs index 9c42da9c0c..51e55885b9 100644 --- a/test/Npgsql.Tests/CommandTests.cs +++ b/test/Npgsql.Tests/CommandTests.cs @@ -852,6 +852,176 @@ public async Task Use_after_reload_types_invalidates_cached_infos() } } + [Test] + public async Task Parameter_overflow_message_length_throws() + { + await using var conn = CreateConnection(); + await conn.OpenAsync(); + await using var cmd = new NpgsqlCommand("SELECT @a, @b, @c, @d, @e, @f, @g, @h", conn); + + var largeParam = new string('A', 1 << 29); + cmd.Parameters.AddWithValue("a", largeParam); + cmd.Parameters.AddWithValue("b", largeParam); + cmd.Parameters.AddWithValue("c", largeParam); + cmd.Parameters.AddWithValue("d", largeParam); + cmd.Parameters.AddWithValue("e", largeParam); + cmd.Parameters.AddWithValue("f", largeParam); + cmd.Parameters.AddWithValue("g", largeParam); + cmd.Parameters.AddWithValue("h", largeParam); + + Assert.ThrowsAsync(() => cmd.ExecuteReaderAsync()); + } + + [Test] + public async Task Composite_overflow_message_length_throws() + { + await using var adminConnection = await OpenConnectionAsync(); + var type = await GetTempTypeName(adminConnection); + + await adminConnection.ExecuteNonQueryAsync( + $"CREATE TYPE {type} AS (a text, b text, c text, d text, e text, f text, g text, h text)"); + + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.MapComposite(type); + await using var dataSource = dataSourceBuilder.Build(); + await using var connection = await dataSource.OpenConnectionAsync(); + + var largeString = new string('A', 1 << 29); + + await using var cmd = connection.CreateCommand(); + cmd.CommandText = "SELECT @a"; + cmd.Parameters.AddWithValue("a", new BigComposite + { + A = largeString, + B = largeString, + C = largeString, + D = largeString, + E = largeString, + F = largeString, + G = largeString, + H = largeString + }); + + Assert.ThrowsAsync(async () => await cmd.ExecuteNonQueryAsync()); + } + + record BigComposite + { + public string A { get; set; } = null!; + public string B { get; set; } = null!; + public string C { get; set; } = null!; + public string D { get; set; } = null!; + public string E { get; set; } = null!; + public string F { get; set; } = null!; + public string G { get; set; } = null!; + public string H { get; set; } = null!; + } + + [Test] + public async Task Array_overflow_message_length_throws() + { + await using var connection = await OpenConnectionAsync(); + + var largeString = new string('A', 1 << 29); + + await using var cmd = connection.CreateCommand(); + cmd.CommandText = "SELECT @a"; + var array = new[] + { + largeString, + largeString, + largeString, + largeString, + largeString, + largeString, + largeString, + largeString + }; + cmd.Parameters.AddWithValue("a", array); + + Assert.ThrowsAsync(async () => await cmd.ExecuteNonQueryAsync()); + } + + [Test] + public async Task Range_overflow_message_length_throws() + { + await using var adminConnection = await OpenConnectionAsync(); + var type = await GetTempTypeName(adminConnection); + var rangeType = await GetTempTypeName(adminConnection); + + await adminConnection.ExecuteNonQueryAsync( + $"CREATE TYPE {type} AS (a text, b text, c text, d text, e text, f text, g text, h text);CREATE TYPE {rangeType} AS RANGE(subtype={type})"); + + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.MapComposite(type); + dataSourceBuilder.EnableUnmappedTypes(); + await using var dataSource = dataSourceBuilder.Build(); + await using var connection = await dataSource.OpenConnectionAsync(); + + var largeString = new string('A', (1 << 28) + 2000000); + + await using var cmd = connection.CreateCommand(); + cmd.CommandText = "SELECT @a"; + var composite = new BigComposite + { + A = largeString, + B = largeString, + C = largeString, + D = largeString + }; + var range = new NpgsqlRange(composite, composite); + cmd.Parameters.Add(new NpgsqlParameter + { + Value = range, + ParameterName = "a", + DataTypeName = rangeType + }); + + Assert.ThrowsAsync(async () => await cmd.ExecuteNonQueryAsync()); + } + + [Test] + public async Task Multirange_overflow_message_length_throws() + { + await using var adminConnection = await OpenConnectionAsync(); + var type = await GetTempTypeName(adminConnection); + var rangeType = await GetTempTypeName(adminConnection); + + await adminConnection.ExecuteNonQueryAsync( + $"CREATE TYPE {type} AS (a text, b text, c text, d text, e text, f text, g text, h text);CREATE TYPE {rangeType} AS RANGE(subtype={type})"); + + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.MapComposite(type); + dataSourceBuilder.EnableUnmappedTypes(); + await using var dataSource = dataSourceBuilder.Build(); + await using var connection = await dataSource.OpenConnectionAsync(); + + var largeString = new string('A', (1 << 28) + 2000000); + + await using var cmd = connection.CreateCommand(); + cmd.CommandText = "SELECT @a"; + var composite = new BigComposite + { + A = largeString + }; + var range = new NpgsqlRange(composite, composite); + var multirange = new[] + { + range, + range, + range, + range + }; + cmd.Parameters.Add(new NpgsqlParameter + { + Value = multirange, + ParameterName = "a", + DataTypeName = rangeType + "_multirange" + }); + + Assert.ThrowsAsync(async () => await cmd.ExecuteNonQueryAsync()); + } + [Test, Description("CreateCommand before connection open")] [IssueLink("https://github.com/npgsql/npgsql/issues/565")] public async Task Create_command_before_connection_open() @@ -1027,6 +1197,7 @@ public async Task Too_many_parameters_throws([Values(PrepareOrNot.NotPrepared, P sb.Append('@'); sb.Append(paramName); } + cmd.CommandText = sb.ToString(); if (prepare == PrepareOrNot.Prepared) diff --git a/test/Npgsql.Tests/Support/PgPostmasterMock.cs b/test/Npgsql.Tests/Support/PgPostmasterMock.cs index e45c1a7f28..ab3eeab521 100644 --- a/test/Npgsql.Tests/Support/PgPostmasterMock.cs +++ b/test/Npgsql.Tests/Support/PgPostmasterMock.cs @@ -138,6 +138,7 @@ async Task Accept(bool completeCancellationImmediat var readBuffer = new NpgsqlReadBuffer(null!, stream, clientSocket, ReadBufferSize, Encoding, RelaxedEncoding); var writeBuffer = new NpgsqlWriteBuffer(null!, stream, clientSocket, WriteBufferSize, Encoding); + writeBuffer.MessageLengthValidation = false; await readBuffer.EnsureAsync(4); var len = readBuffer.ReadInt32(); diff --git a/test/Npgsql.Tests/Support/PgServerMock.cs b/test/Npgsql.Tests/Support/PgServerMock.cs index c34a9315c8..9f7a799649 100644 --- a/test/Npgsql.Tests/Support/PgServerMock.cs +++ b/test/Npgsql.Tests/Support/PgServerMock.cs @@ -41,6 +41,7 @@ internal PgServerMock( _stream = stream; _readBuffer = readBuffer; _writeBuffer = writeBuffer; + writeBuffer.MessageLengthValidation = false; } internal async Task Startup(MockState state) diff --git a/test/Npgsql.Tests/WriteBufferTests.cs b/test/Npgsql.Tests/WriteBufferTests.cs index 99e5626b75..ff8d9413ce 100644 --- a/test/Npgsql.Tests/WriteBufferTests.cs +++ b/test/Npgsql.Tests/WriteBufferTests.cs @@ -112,6 +112,7 @@ public void SetUp() { Underlying = new MemoryStream(); WriteBuffer = new NpgsqlWriteBuffer(null, Underlying, null, NpgsqlReadBuffer.DefaultSize, NpgsqlWriteBuffer.UTF8Encoding); + WriteBuffer.MessageLengthValidation = false; } // ReSharper disable once InconsistentNaming From 1be7b84315b2cc45c19c8324d07632354d7be7fa Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Thu, 9 May 2024 14:30:20 +0200 Subject: [PATCH 420/761] Fix replication message writing --- src/Npgsql/Replication/ReplicationConnection.cs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Npgsql/Replication/ReplicationConnection.cs b/src/Npgsql/Replication/ReplicationConnection.cs index 283ffd6d3f..b7b97e6d12 100644 --- a/src/Npgsql/Replication/ReplicationConnection.cs +++ b/src/Npgsql/Replication/ReplicationConnection.cs @@ -621,6 +621,7 @@ async Task SendFeedback(bool waitOnSemaphore = false, bool requestReply = false, if (buf.WriteSpaceLeft < len) await connector.Flush(async: true, cancellationToken).ConfigureAwait(false); + buf.StartMessage(len); buf.WriteByte(FrontendMessageCode.CopyData); buf.WriteInt32(len - 1); buf.WriteByte((byte)'r'); // TODO: enum/const? From 87aeafbc8c68f9f31d3b80e68b072475ba910768 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Thu, 9 May 2024 14:41:35 +0200 Subject: [PATCH 421/761] Ignore multirange test pre PG 14 --- test/Npgsql.Tests/CommandTests.cs | 1 + 1 file changed, 1 insertion(+) diff --git a/test/Npgsql.Tests/CommandTests.cs b/test/Npgsql.Tests/CommandTests.cs index 51e55885b9..5d3b35b01d 100644 --- a/test/Npgsql.Tests/CommandTests.cs +++ b/test/Npgsql.Tests/CommandTests.cs @@ -984,6 +984,7 @@ await adminConnection.ExecuteNonQueryAsync( public async Task Multirange_overflow_message_length_throws() { await using var adminConnection = await OpenConnectionAsync(); + MinimumPgVersion(adminConnection, "14.0", "Multirange types were introduced in PostgreSQL 14"); var type = await GetTempTypeName(adminConnection); var rangeType = await GetTempTypeName(adminConnection); From dc0d22edb1cd79d465726a431fcab69c84b80ba9 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Sat, 18 May 2024 22:08:28 +0300 Subject: [PATCH 422/761] Fix command text with batching for OpenTelemetry (#5706) Fixes #5660 --- src/Npgsql/Internal/NpgsqlConnector.Auth.cs | 9 +++++---- src/Npgsql/NpgsqlCommand.cs | 22 ++++++++++++++++++++- 2 files changed, 26 insertions(+), 5 deletions(-) diff --git a/src/Npgsql/Internal/NpgsqlConnector.Auth.cs b/src/Npgsql/Internal/NpgsqlConnector.Auth.cs index 8fe1bfe402..827bc71485 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.Auth.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.Auth.cs @@ -57,7 +57,7 @@ await AuthenticateSASL(((AuthenticationSASLMessage)msg).Mechanisms, username, as async Task AuthenticateCleartext(string username, bool async, CancellationToken cancellationToken = default) { var passwd = await GetPassword(username, async, cancellationToken).ConfigureAwait(false); - if (passwd == null) + if (string.IsNullOrEmpty(passwd)) throw new NpgsqlException("No password has been provided but the backend requires one (in cleartext)"); var encoded = new byte[Encoding.UTF8.GetByteCount(passwd) + 1]; @@ -114,8 +114,9 @@ async Task AuthenticateSASL(List mechanisms, string username, bool async throw new NpgsqlException("Unable to bind to SCRAM-SHA-256-PLUS, check logs for more information"); } - var passwd = await GetPassword(username, async, cancellationToken).ConfigureAwait(false) ?? - throw new NpgsqlException($"No password has been provided but the backend requires one (in SASL/{mechanism})"); + var passwd = await GetPassword(username, async, cancellationToken).ConfigureAwait(false); + if (string.IsNullOrEmpty(passwd)) + throw new NpgsqlException($"No password has been provided but the backend requires one (in SASL/{mechanism})"); // Assumption: the write buffer is big enough to contain all our outgoing messages var clientNonce = GetNonce(); @@ -275,7 +276,7 @@ static byte[] HMAC(byte[] key, string data) async Task AuthenticateMD5(string username, byte[] salt, bool async, CancellationToken cancellationToken = default) { var passwd = await GetPassword(username, async, cancellationToken).ConfigureAwait(false); - if (passwd == null) + if (string.IsNullOrEmpty(passwd)) throw new NpgsqlException("No password has been provided but the backend requires one (in MD5)"); byte[] result; diff --git a/src/Npgsql/NpgsqlCommand.cs b/src/Npgsql/NpgsqlCommand.cs index 6403673212..073785514d 100644 --- a/src/Npgsql/NpgsqlCommand.cs +++ b/src/Npgsql/NpgsqlCommand.cs @@ -195,6 +195,26 @@ public override string CommandText } } + string GetBatchFullCommandText() + { + Debug.Assert(IsWrappedByBatch); + if (InternalBatchCommands.Count == 0) + return string.Empty; + if (InternalBatchCommands.Count == 1) + return InternalBatchCommands[0].CommandText; + // TODO: Potentially cache on connector/command? + var sb = new StringBuilder(); + sb.Append(InternalBatchCommands[0].CommandText); + for (var i = 1; i < InternalBatchCommands.Count; i++) + { + sb + .Append(';') + .AppendLine() + .Append(InternalBatchCommands[i].CommandText); + } + return sb.ToString(); + } + /// /// Gets or sets the wait time (in seconds) before terminating the attempt to execute a command and generating an error. /// @@ -1683,7 +1703,7 @@ internal void TraceCommandStart(NpgsqlConnector connector) { Debug.Assert(CurrentActivity is null); if (NpgsqlActivitySource.IsEnabled) - CurrentActivity = NpgsqlActivitySource.CommandStart(connector, CommandText, CommandType); + CurrentActivity = NpgsqlActivitySource.CommandStart(connector, IsWrappedByBatch ? GetBatchFullCommandText() : CommandText, CommandType); } internal void TraceReceivedFirstResponse() From 4c06c69404a898acf42f701701681a9db1da637a Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Sun, 19 May 2024 09:44:31 +0300 Subject: [PATCH 423/761] Fix message overflow tests with multiplexing (#5713) --- test/Npgsql.Tests/CommandTests.cs | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/test/Npgsql.Tests/CommandTests.cs b/test/Npgsql.Tests/CommandTests.cs index 5d3b35b01d..2826ea728a 100644 --- a/test/Npgsql.Tests/CommandTests.cs +++ b/test/Npgsql.Tests/CommandTests.cs @@ -855,8 +855,9 @@ public async Task Use_after_reload_types_invalidates_cached_infos() [Test] public async Task Parameter_overflow_message_length_throws() { - await using var conn = CreateConnection(); - await conn.OpenAsync(); + // Create a separate dataSource because of Multiplexing (otherwise we can break unrelated queries) + await using var dataSource = CreateDataSource(); + await using var conn = await dataSource.OpenConnectionAsync(); await using var cmd = new NpgsqlCommand("SELECT @a, @b, @c, @d, @e, @f, @g, @h", conn); var largeParam = new string('A', 1 << 29); @@ -920,11 +921,13 @@ record BigComposite [Test] public async Task Array_overflow_message_length_throws() { - await using var connection = await OpenConnectionAsync(); + // Create a separate dataSource because of Multiplexing (otherwise we can break unrelated queries) + await using var dataSource = CreateDataSource(); + await using var conn = await dataSource.OpenConnectionAsync(); var largeString = new string('A', 1 << 29); - await using var cmd = connection.CreateCommand(); + await using var cmd = conn.CreateCommand(); cmd.CommandText = "SELECT @a"; var array = new[] { From 1d68fb99b38b56aa0f4629ada84edb1810eba241 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Sun, 19 May 2024 23:00:24 +0300 Subject: [PATCH 424/761] Reset AllResultTypesAreUnknown and UnknownResultTypeList for cached commands (#5712) Fixes #5690 --- src/Npgsql/NpgsqlCommand.cs | 3 ++- test/Npgsql.Tests/CommandTests.cs | 8 +++++++- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/src/Npgsql/NpgsqlCommand.cs b/src/Npgsql/NpgsqlCommand.cs index 073785514d..0a46e675cf 100644 --- a/src/Npgsql/NpgsqlCommand.cs +++ b/src/Npgsql/NpgsqlCommand.cs @@ -1689,7 +1689,8 @@ internal void Reset() // Can be null if it's owned by batch _parameters?.Clear(); _timeout = null; - _allResultTypesAreUnknown = false; + AllResultTypesAreUnknown = false; + Debug.Assert(_unknownResultTypeList is null); EnableErrorBarriers = false; } diff --git a/test/Npgsql.Tests/CommandTests.cs b/test/Npgsql.Tests/CommandTests.cs index 2826ea728a..36c8744fdf 100644 --- a/test/Npgsql.Tests/CommandTests.cs +++ b/test/Npgsql.Tests/CommandTests.cs @@ -1118,11 +1118,15 @@ public async Task ExecuteReader_Throws_PostgresException([Values] bool async) } [Test] - public void Command_is_recycled() + public void Command_is_recycled([Values] bool allResultTypesAreUnknown) { using var conn = OpenConnection(); var cmd1 = conn.CreateCommand(); cmd1.CommandText = "SELECT @p1"; + if (allResultTypesAreUnknown) + cmd1.AllResultTypesAreUnknown = true; + else + cmd1.UnknownResultTypeList = [true]; var tx = conn.BeginTransaction(); cmd1.Transaction = tx; cmd1.Parameters.AddWithValue("p1", 8); @@ -1135,6 +1139,8 @@ public void Command_is_recycled() Assert.That(cmd2.CommandType, Is.EqualTo(CommandType.Text)); Assert.That(cmd2.Transaction, Is.Null); Assert.That(cmd2.Parameters, Is.Empty); + Assert.That(cmd2.AllResultTypesAreUnknown, Is.False); + Assert.That(cmd2.UnknownResultTypeList, Is.Null); // TODO: Leaving this for now, since it'll be replaced by the new batching API // Assert.That(cmd2.Statements, Is.Empty); } From d1c62a1eb3b5817dcbd5c755176fc600b1557626 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Thu, 30 May 2024 17:08:43 +0200 Subject: [PATCH 425/761] Remove hacky enum type mapping introspection (#5711) Following https://github.com/npgsql/efcore.pg/pull/3167, which removes EF's logic for extracting enum mappings from Npgsql. --- src/Npgsql/Internal/HackyEnumTypeMapping.cs | 28 --------------------- src/Npgsql/NpgsqlDataSource.cs | 3 --- src/Npgsql/NpgsqlDataSourceConfiguration.cs | 1 - src/Npgsql/NpgsqlSlimDataSourceBuilder.cs | 16 ------------ src/Npgsql/TypeMapping/GlobalTypeMapper.cs | 25 ------------------ 5 files changed, 73 deletions(-) delete mode 100644 src/Npgsql/Internal/HackyEnumTypeMapping.cs diff --git a/src/Npgsql/Internal/HackyEnumTypeMapping.cs b/src/Npgsql/Internal/HackyEnumTypeMapping.cs deleted file mode 100644 index 8b3d5255cc..0000000000 --- a/src/Npgsql/Internal/HackyEnumTypeMapping.cs +++ /dev/null @@ -1,28 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Diagnostics.CodeAnalysis; -using System.Linq; -using System.Reflection; -using Npgsql.Internal; -using Npgsql.PostgresTypes; -using NpgsqlTypes; - -namespace Npgsql.Internal; - -/// -/// Hacky temporary measure used by EFCore.PG to extract user-configured enum mappings. Accessed via reflection only. -/// -[Experimental(NpgsqlDiagnostics.ConvertersExperimental)] -public sealed class HackyEnumTypeMapping -{ - public HackyEnumTypeMapping(Type enumClrType, string pgTypeName, INpgsqlNameTranslator nameTranslator) - { - EnumClrType = enumClrType; - PgTypeName = pgTypeName; - NameTranslator = nameTranslator; - } - - public string PgTypeName { get; } - public Type EnumClrType { get; } - public INpgsqlNameTranslator NameTranslator { get; } -} diff --git a/src/Npgsql/NpgsqlDataSource.cs b/src/Npgsql/NpgsqlDataSource.cs index 9415296585..ba5fdf255a 100644 --- a/src/Npgsql/NpgsqlDataSource.cs +++ b/src/Npgsql/NpgsqlDataSource.cs @@ -83,8 +83,6 @@ private protected readonly Dictionary> _pendi readonly INpgsqlNameTranslator _defaultNameTranslator; - internal List? _hackyEnumTypeMappings; - internal NpgsqlDataSource( NpgsqlConnectionStringBuilder settings, NpgsqlDataSourceConfiguration dataSourceConfig) @@ -108,7 +106,6 @@ internal NpgsqlDataSource( _periodicPasswordSuccessRefreshInterval, _periodicPasswordFailureRefreshInterval, var resolverChain, - _hackyEnumTypeMappings, _defaultNameTranslator, ConnectionInitializer, ConnectionInitializerAsync) diff --git a/src/Npgsql/NpgsqlDataSourceConfiguration.cs b/src/Npgsql/NpgsqlDataSourceConfiguration.cs index ec3e5e4611..075df28aa8 100644 --- a/src/Npgsql/NpgsqlDataSourceConfiguration.cs +++ b/src/Npgsql/NpgsqlDataSourceConfiguration.cs @@ -20,7 +20,6 @@ sealed record NpgsqlDataSourceConfiguration(string? Name, TimeSpan PeriodicPasswordSuccessRefreshInterval, TimeSpan PeriodicPasswordFailureRefreshInterval, PgTypeInfoResolverChain ResolverChain, - List HackyEnumMappings, INpgsqlNameTranslator DefaultNameTranslator, Action? ConnectionInitializer, Func? ConnectionInitializerAsync); diff --git a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs index 72cfeb4949..bc1026b297 100644 --- a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs @@ -631,25 +631,9 @@ _loggerFactory is null _periodicPasswordSuccessRefreshInterval, _periodicPasswordFailureRefreshInterval, _resolverChainBuilder.Build(ConfigureResolverChain), - HackyEnumMappings(), DefaultNameTranslator, _connectionInitializer, _connectionInitializerAsync); - - List HackyEnumMappings() - { - var mappings = new List(); - - if (_userTypeMapper.Items.Count > 0) - foreach (var userTypeMapping in _userTypeMapper.Items) - if (userTypeMapping is UserTypeMapper.EnumMapping enumMapping) - mappings.Add(new(enumMapping.ClrType, enumMapping.PgTypeName, enumMapping.NameTranslator)); - - if (GlobalTypeMapper.Instance.HackyEnumTypeMappings.Count > 0) - mappings.AddRange(GlobalTypeMapper.Instance.HackyEnumTypeMappings); - - return mappings; - } } void ValidateMultiHost() diff --git a/src/Npgsql/TypeMapping/GlobalTypeMapper.cs b/src/Npgsql/TypeMapping/GlobalTypeMapper.cs index 4ef1313adf..9abe5acecb 100644 --- a/src/Npgsql/TypeMapping/GlobalTypeMapper.cs +++ b/src/Npgsql/TypeMapping/GlobalTypeMapper.cs @@ -20,8 +20,6 @@ sealed class GlobalTypeMapper : INpgsqlTypeMapper readonly ReaderWriterLockSlim _lock = new(); PgTypeInfoResolverFactory[] _typeMappingResolvers = Array.Empty(); - internal List HackyEnumTypeMappings { get; } = new(); - internal IEnumerable GetPluginResolverFactories() { var resolvers = new List(); @@ -185,7 +183,6 @@ public void Reset() { _pluginResolverFactories.Clear(); _userTypeMapper.Items.Clear(); - HackyEnumTypeMappings.Clear(); } finally { @@ -245,13 +242,7 @@ public INpgsqlTypeMapper EnableUnmappedTypes() try { _userTypeMapper.MapEnum(pgName, nameTranslator); - - // Temporary hack for EFCore.PG enum mapping compat - if (_userTypeMapper.Items.FirstOrDefault(i => i.ClrType == typeof(TEnum)) is UserTypeMapping userTypeMapping) - HackyEnumTypeMappings.Add(new(typeof(TEnum), userTypeMapping.PgTypeName, nameTranslator ?? DefaultNameTranslator)); - ResetTypeMappingCache(); - return this; } finally @@ -267,13 +258,7 @@ public INpgsqlTypeMapper EnableUnmappedTypes() try { var removed = _userTypeMapper.UnmapEnum(pgName, nameTranslator); - - // Temporary hack for EFCore.PG enum mapping compat - if (removed && ((List)_userTypeMapper.Items).FindIndex(m => m.ClrType == typeof(TEnum)) is > -1 and var index) - HackyEnumTypeMappings.RemoveAt(index); - ResetTypeMappingCache(); - return removed; } finally @@ -291,11 +276,6 @@ public INpgsqlTypeMapper MapEnum([DynamicallyAccessedMembers(DynamicallyAccessed try { _userTypeMapper.MapEnum(clrType, pgName, nameTranslator); - - // Temporary hack for EFCore.PG enum mapping compat - if (_userTypeMapper.Items.FirstOrDefault(i => i.ClrType == clrType) is UserTypeMapping userTypeMapping) - HackyEnumTypeMappings.Add(new(clrType, userTypeMapping.PgTypeName, nameTranslator ?? DefaultNameTranslator)); - ResetTypeMappingCache(); return this; } @@ -313,11 +293,6 @@ public bool UnmapEnum([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes try { var removed = _userTypeMapper.UnmapEnum(clrType, pgName, nameTranslator); - - // Temporary hack for EFCore.PG enum mapping compat - if (removed && ((List)_userTypeMapper.Items).FindIndex(m => m.ClrType == clrType) is > -1 and var index) - HackyEnumTypeMappings.RemoveAt(index); - ResetTypeMappingCache(); return removed; } From c0888f503baf056b550ad32aab91529d9f8e91f0 Mon Sep 17 00:00:00 2001 From: Umar Hayat Date: Thu, 6 Jun 2024 03:17:43 +0900 Subject: [PATCH 426/761] Fixed multiple typos (#5742) --- src/Npgsql.GeoJSON/CrsMap.WellKnown.cs | 2 +- src/Npgsql.GeoJSON/CrsMap.cs | 8 ++++---- src/Npgsql.GeoJSON/Internal/CrsMapBuilder.cs | 12 ++++++------ src/Npgsql.GeoJSON/Internal/GeoJSONConverter.cs | 8 ++++---- src/Npgsql.Json.NET/Internal/JsonNetJsonConverter.cs | 2 +- src/Npgsql/MultiplexingDataSource.cs | 2 +- src/Npgsql/NpgsqlTypes/NpgsqlTsQuery.cs | 2 +- src/Npgsql/Util/ResettableCancellationTokenSource.cs | 2 +- test/Npgsql.Tests/PrepareTests.cs | 4 ++-- test/Npgsql.Tests/SchemaTests.cs | 2 +- test/Npgsql.Tests/TestMetrics.cs | 6 +++--- test/Npgsql.Tests/TransactionTests.cs | 4 ++-- test/Npgsql.Tests/Types/RangeTests.cs | 2 +- 13 files changed, 28 insertions(+), 28 deletions(-) diff --git a/src/Npgsql.GeoJSON/CrsMap.WellKnown.cs b/src/Npgsql.GeoJSON/CrsMap.WellKnown.cs index 14da2f893e..6ad08d6c80 100644 --- a/src/Npgsql.GeoJSON/CrsMap.WellKnown.cs +++ b/src/Npgsql.GeoJSON/CrsMap.WellKnown.cs @@ -5,7 +5,7 @@ public partial class CrsMap /// /// These entries came from spatial_res_sys. They are used to elide memory allocations /// if they are identical to the entries for the current connection. Otherwise, - /// memory allocated for overrided entries only (added, removed, or modified). + /// memory allocated for overridden entries only (added, removed, or modified). /// internal static readonly CrsMapEntry[] WellKnown = { diff --git a/src/Npgsql.GeoJSON/CrsMap.cs b/src/Npgsql.GeoJSON/CrsMap.cs index dd556d9b33..602387a911 100644 --- a/src/Npgsql.GeoJSON/CrsMap.cs +++ b/src/Npgsql.GeoJSON/CrsMap.cs @@ -6,13 +6,13 @@ namespace Npgsql.GeoJSON; /// public partial class CrsMap { - readonly CrsMapEntry[]? _overriden; + readonly CrsMapEntry[]? _overridden; - internal CrsMap(CrsMapEntry[]? overriden) - => _overriden = overriden; + internal CrsMap(CrsMapEntry[]? overridden) + => _overridden = overridden; internal string? GetAuthority(int srid) - => GetAuthority(_overriden, srid) ?? GetAuthority(WellKnown, srid); + => GetAuthority(_overridden, srid) ?? GetAuthority(WellKnown, srid); static string? GetAuthority(CrsMapEntry[]? entries, int srid) { diff --git a/src/Npgsql.GeoJSON/Internal/CrsMapBuilder.cs b/src/Npgsql.GeoJSON/Internal/CrsMapBuilder.cs index 44829761c9..95f45d5db3 100644 --- a/src/Npgsql.GeoJSON/Internal/CrsMapBuilder.cs +++ b/src/Npgsql.GeoJSON/Internal/CrsMapBuilder.cs @@ -5,7 +5,7 @@ namespace Npgsql.GeoJSON.Internal; struct CrsMapBuilder { CrsMapEntry[] _overrides; - int _overridenIndex; + int _overriddenIndex; int _wellKnownIndex; internal void Add(in CrsMapEntry entry) @@ -33,21 +33,21 @@ internal void Add(in CrsMapEntry entry) void AddCore(in CrsMapEntry entry) { - var index = _overridenIndex + 1; + var index = _overriddenIndex + 1; if (_overrides == null) _overrides = new CrsMapEntry[4]; else if (_overrides.Length == index) Array.Resize(ref _overrides, _overrides.Length << 1); - _overrides[_overridenIndex] = entry; - _overridenIndex = index; + _overrides[_overriddenIndex] = entry; + _overriddenIndex = index; } internal CrsMap Build() { - if (_overrides != null && _overrides.Length < _overridenIndex) - Array.Resize(ref _overrides, _overridenIndex); + if (_overrides != null && _overrides.Length < _overriddenIndex) + Array.Resize(ref _overrides, _overriddenIndex); return new CrsMap(_overrides); } diff --git a/src/Npgsql.GeoJSON/Internal/GeoJSONConverter.cs b/src/Npgsql.GeoJSON/Internal/GeoJSONConverter.cs index f1a633724f..5d54d16194 100644 --- a/src/Npgsql.GeoJSON/Internal/GeoJSONConverter.cs +++ b/src/Npgsql.GeoJSON/Internal/GeoJSONConverter.cs @@ -323,7 +323,7 @@ static Size GetSize(LineString value) { var coordinates = value.Coordinates; if (NotValid(coordinates, out var hasZ)) - throw AllOrNoneCoordiantesMustHaveZ(nameof(LineString)); + throw AllOrNoneCoordinatesMustHaveZ(nameof(LineString)); var length = Size.Create(SizeOfHeaderWithLength + coordinates.Count * SizeOfPoint(hasZ)); if (GetSrid(value.CRS) != 0) @@ -344,12 +344,12 @@ static Size GetSize(Polygon value) { var coordinates = lines[i].Coordinates; if (NotValid(coordinates, out var lineHasZ)) - throw AllOrNoneCoordiantesMustHaveZ(nameof(Polygon)); + throw AllOrNoneCoordinatesMustHaveZ(nameof(Polygon)); if (hasZ != lineHasZ) { if (i == 0) hasZ = lineHasZ; - else throw AllOrNoneCoordiantesMustHaveZ(nameof(LineString)); + else throw AllOrNoneCoordinatesMustHaveZ(nameof(LineString)); } length = length.Combine(coordinates.Count * SizeOfPoint(hasZ)); @@ -678,7 +678,7 @@ static int SizeOfPoint(EwkbGeometryType type) static Exception UnknownPostGisType() => throw new InvalidOperationException("Invalid PostGIS type"); - static Exception AllOrNoneCoordiantesMustHaveZ(string typeName) + static Exception AllOrNoneCoordinatesMustHaveZ(string typeName) => new ArgumentException($"The Z coordinate must be specified for all or none elements of {typeName}"); static int GetSrid(ICRSObject crs) diff --git a/src/Npgsql.Json.NET/Internal/JsonNetJsonConverter.cs b/src/Npgsql.Json.NET/Internal/JsonNetJsonConverter.cs index 42b7c88e0d..c54670d10d 100644 --- a/src/Npgsql.Json.NET/Internal/JsonNetJsonConverter.cs +++ b/src/Npgsql.Json.NET/Internal/JsonNetJsonConverter.cs @@ -38,7 +38,7 @@ public override ValueTask WriteAsync(PgWriter writer, T? value, CancellationToke => JsonNetJsonConverter.Write(_jsonb, async: true, writer, cancellationToken); } -// Split out to avoid unneccesary code duplication. +// Split out to avoid unnecessary code duplication. static class JsonNetJsonConverter { public const byte JsonbProtocolVersion = 1; diff --git a/src/Npgsql/MultiplexingDataSource.cs b/src/Npgsql/MultiplexingDataSource.cs index 277bc4e835..24da20183d 100644 --- a/src/Npgsql/MultiplexingDataSource.cs +++ b/src/Npgsql/MultiplexingDataSource.cs @@ -270,7 +270,7 @@ bool WriteCommand(NpgsqlConnector connector, NpgsqlCommand command, ref Multiple } // There's almost certainly more buffered outgoing data for the command, after the flush - // occured. Complete the write, which will flush again (and update statistics). + // occurred. Complete the write, which will flush again (and update statistics). try { Flush(conn, ref clonedStats); diff --git a/src/Npgsql/NpgsqlTypes/NpgsqlTsQuery.cs b/src/Npgsql/NpgsqlTypes/NpgsqlTsQuery.cs index eb1dfbb86b..616ec83dcf 100644 --- a/src/Npgsql/NpgsqlTypes/NpgsqlTsQuery.cs +++ b/src/Npgsql/NpgsqlTypes/NpgsqlTsQuery.cs @@ -721,7 +721,7 @@ public override int GetHashCode() } /// -/// Represents an empty tsquery. Shold only be used as top node. +/// Represents an empty tsquery. Should only be used as top node. /// public sealed class NpgsqlTsQueryEmpty : NpgsqlTsQuery { diff --git a/src/Npgsql/Util/ResettableCancellationTokenSource.cs b/src/Npgsql/Util/ResettableCancellationTokenSource.cs index 874d7a40f8..d87fc9a80e 100644 --- a/src/Npgsql/Util/ResettableCancellationTokenSource.cs +++ b/src/Npgsql/Util/ResettableCancellationTokenSource.cs @@ -23,7 +23,7 @@ sealed class ResettableCancellationTokenSource : IDisposable CancellationTokenRegistration? _registration; /// - /// Used, so we wouldn't concurently use the cts for the cancellation, while it's being disposed + /// Used, so we wouldn't concurrently use the cts for the cancellation, while it's being disposed /// readonly object lockObject = new(); diff --git a/test/Npgsql.Tests/PrepareTests.cs b/test/Npgsql.Tests/PrepareTests.cs index 1d9c6dde85..3c91b7ad47 100644 --- a/test/Npgsql.Tests/PrepareTests.cs +++ b/test/Npgsql.Tests/PrepareTests.cs @@ -462,7 +462,7 @@ public void Overloaded_sql() // SQL overloading is a pretty rare/exotic scenario. Handling it properly would involve keying // prepared statements not just by SQL but also by the parameter types, which would pointlessly - // increase allocations. Instead, the second execution simply reuns unprepared + // increase allocations. Instead, the second execution simply reruns unprepared AssertNumPreparedStatements(conn, 1); conn.UnprepareAll(); } @@ -659,7 +659,7 @@ public void Same_sql_different_params() using (var conn = OpenConnectionAndUnprepare()) using (var cmd = new NpgsqlCommand("SELECT @p", conn)) { - throw new NotImplementedException("Problem: currentl setting NpgsqlParameter.Value clears/invalidates..."); + throw new NotImplementedException("Problem: current setting NpgsqlParameter.Value clears/invalidates..."); cmd.Parameters.Add(new NpgsqlParameter("p", NpgsqlDbType.Integer)); cmd.Prepare(true); diff --git a/test/Npgsql.Tests/SchemaTests.cs b/test/Npgsql.Tests/SchemaTests.cs index e65fc48cf2..230ae6a90d 100644 --- a/test/Npgsql.Tests/SchemaTests.cs +++ b/test/Npgsql.Tests/SchemaTests.cs @@ -47,7 +47,7 @@ public async Task No_parameter() Assert.That(collections1, Is.EquivalentTo(collections2)); } - [Test, Description("Calling GetSchema(collectionName [, restrictions]) case insensive collectionName can be used")] + [Test, Description("Calling GetSchema(collectionName [, restrictions]) case insensitive collectionName can be used")] public async Task Case_insensitive_collection_name() { await using var conn = await OpenConnectionAsync(); diff --git a/test/Npgsql.Tests/TestMetrics.cs b/test/Npgsql.Tests/TestMetrics.cs index 52bf2ed935..d1280c1e79 100644 --- a/test/Npgsql.Tests/TestMetrics.cs +++ b/test/Npgsql.Tests/TestMetrics.cs @@ -46,7 +46,7 @@ public static TestMetrics Start(TimeSpan allowedTime, bool reportOnStop) } /// - /// Incremnent the Iterations value by one. + /// Increment the Iterations value by one. /// public void IncrementIterations() { @@ -102,7 +102,7 @@ public double IterationsPer(TimeSpan timeSpan) /// /// Calculate the number of iterations accumulated per second. - /// Equivelent to calling IterationsPer(new TimeSpan(0, 0, 1)). + /// Equivalent to calling IterationsPer(new TimeSpan(0, 0, 1)). /// /// The number of iterations accumulated per second. public double IterationsPerSecond() @@ -122,7 +122,7 @@ public double IterationsPerCPU(TimeSpan timeSpan) /// /// Calculate the number of iterations accumulated per CPU second. - /// Equivelent to calling IterationsPerCPU(new TimeSpan(0, 0, 1)). + /// Equivalent to calling IterationsPerCPU(new TimeSpan(0, 0, 1)). /// /// /// The number of iterations accumulated per CPU second. diff --git a/test/Npgsql.Tests/TransactionTests.cs b/test/Npgsql.Tests/TransactionTests.cs index e0e61f95b4..d5e9a927f8 100644 --- a/test/Npgsql.Tests/TransactionTests.cs +++ b/test/Npgsql.Tests/TransactionTests.cs @@ -14,7 +14,7 @@ namespace Npgsql.Tests; public class TransactionTests : MultiplexingTestBase { - [Test, Description("Basic insert within a commited transaction")] + [Test, Description("Basic insert within a committed transaction")] public async Task Commit([Values(PrepareOrNot.NotPrepared, PrepareOrNot.Prepared)] PrepareOrNot prepare) { if (prepare == PrepareOrNot.Prepared && IsMultiplexing) @@ -43,7 +43,7 @@ public async Task Commit([Values(PrepareOrNot.NotPrepared, PrepareOrNot.Prepared Assert.That(() => tx.Connection, Throws.Exception.TypeOf()); } - [Test, Description("Basic insert within a commited transaction")] + [Test, Description("Basic insert within a committed transaction")] public async Task CommitAsync([Values(PrepareOrNot.NotPrepared, PrepareOrNot.Prepared)] PrepareOrNot prepare) { if (prepare == PrepareOrNot.Prepared && IsMultiplexing) diff --git a/test/Npgsql.Tests/Types/RangeTests.cs b/test/Npgsql.Tests/Types/RangeTests.cs index 38449d30a2..011d66dfdd 100644 --- a/test/Npgsql.Tests/Types/RangeTests.cs +++ b/test/Npgsql.Tests/Types/RangeTests.cs @@ -97,7 +97,7 @@ public void Equality_infinite() { var r1 = new NpgsqlRange(0, false, true, 1, false, false); - //different upper bound (lower bound shoulnd't matter since it is infinite) + //different upper bound (lower bound shouldn't matter since it is infinite) var r2 = new NpgsqlRange(1, false, true, 2, false, false); Assert.IsFalse(r1 == r2); From 663488d1c2bb73c4821163f7ac132b8bddf618fc Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Wed, 19 Jun 2024 17:46:40 +0200 Subject: [PATCH 427/761] Widen the DBNull check so it also applies to object typed DBNull values. (#5738) Fixes #5716 --- src/Npgsql/NpgsqlBinaryImporter.cs | 8 +++++--- test/Npgsql.Tests/CopyTests.cs | 20 ++++++++++++++++++++ 2 files changed, 25 insertions(+), 3 deletions(-) diff --git a/src/Npgsql/NpgsqlBinaryImporter.cs b/src/Npgsql/NpgsqlBinaryImporter.cs index f80807af3e..633d1bac15 100644 --- a/src/Npgsql/NpgsqlBinaryImporter.cs +++ b/src/Npgsql/NpgsqlBinaryImporter.cs @@ -231,9 +231,11 @@ public Task WriteAsync(T value, string dataTypeName, CancellationToken cancel Task Write(bool async, T value, NpgsqlDbType? npgsqlDbType, string? dataTypeName, CancellationToken cancellationToken = default) { - // Statically handle DBNull for backwards compatibility, generic parameters where T = DBNull normally won't find a mapping. - // Also handle null values for object typed parameters, as parameters only accept DBNull.Value when T = object. - if (typeof(T) == typeof(DBNull) || (typeof(T) == typeof(object) && value is null)) + // Handle DBNull: + // 1. when T = DBNull for backwards compatibility, DBNull as a type normally won't find a mapping. + // 2. when T = object we resolve oid 0 if DBNull is the first value, later column value oids would needlessly be limited to oid 0. + // Also handle null values for object typed parameters, these parameters require non null values to be seen as set. + if (typeof(T) == typeof(DBNull) || (typeof(T) == typeof(object) && value is null or DBNull)) return WriteNull(async, cancellationToken); return Core(async, value, npgsqlDbType, dataTypeName, cancellationToken); diff --git a/test/Npgsql.Tests/CopyTests.cs b/test/Npgsql.Tests/CopyTests.cs index d84ad7b53a..d6be58290e 100644 --- a/test/Npgsql.Tests/CopyTests.cs +++ b/test/Npgsql.Tests/CopyTests.cs @@ -386,6 +386,26 @@ public async Task Import_string_array() Assert.That(await conn.ExecuteScalarAsync($"SELECT field FROM {table}"), Is.EqualTo(data)); } + [Test] + public async Task Import_DBNull_then_other_object() + { + using var conn = await OpenConnectionAsync(); + var table = await CreateTempTable(conn, "field TEXT"); + + object data = "foo"; + using (var writer = conn.BeginBinaryImport($"COPY {table} (field) FROM STDIN BINARY")) + { + writer.StartRow(); + writer.Write((object?)DBNull.Value); + writer.StartRow(); + writer.Write(data); + var rowsWritten = writer.Complete(); + Assert.That(rowsWritten, Is.EqualTo(2)); + } + + Assert.That(await conn.ExecuteScalarAsync($"SELECT field FROM {table} OFFSET 1"), Is.EqualTo(data)); + } + [Test] public async Task Import_reused_instance_mapping_info_identical_or_throws() { From 81e2f6e6e8083d61eb3c8076296e52a71b5ca129 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Mon, 24 Jun 2024 14:10:58 +0200 Subject: [PATCH 428/761] Fix unsafe cast (#5732) --- .../Internal/Converters/ArrayConverter.cs | 111 ++++++++++-------- .../Internal/Converters/AsyncHelpers.cs | 86 ++++++++------ src/Npgsql/Internal/PgStreamingConverter.cs | 7 +- 3 files changed, 118 insertions(+), 86 deletions(-) diff --git a/src/Npgsql/Internal/Converters/ArrayConverter.cs b/src/Npgsql/Internal/Converters/ArrayConverter.cs index d191b43be6..1c87555ab0 100644 --- a/src/Npgsql/Internal/Converters/ArrayConverter.cs +++ b/src/Npgsql/Internal/Converters/ArrayConverter.cs @@ -283,45 +283,6 @@ public async ValueTask Write(bool async, PgWriter writer, object values, Cancell // We can immediately continue if we didn't reach the end of the last dimension. while (++indices[indices.Length - 1] < lastLength || (indices.Length > 1 && CarryIndices(state.Lengths!, indices))); } -} - -// Class constraint exists to make ValueTask to ValueTask reinterpretation safe, don't remove unless that is also removed. -abstract class ArrayConverter : PgStreamingConverter where T : class -{ - protected PgConverterResolution ElemResolution { get; } - protected Type ElemTypeToConvert { get; } - - readonly PgArrayConverter _pgArrayConverter; - - private protected ArrayConverter(int? expectedDimensions, PgConverterResolution elemResolution, int pgLowerBound = 1) - { - if (!elemResolution.Converter.CanConvert(DataFormat.Binary, out var bufferRequirements)) - throw new NotSupportedException("Element converter has to support the binary format to be compatible."); - - ElemResolution = elemResolution; - ElemTypeToConvert = elemResolution.Converter.TypeToConvert; - _pgArrayConverter = new((IElementOperations)this, elemResolution.Converter.IsDbNullable, expectedDimensions, - bufferRequirements, elemResolution.PgTypeId, pgLowerBound); - } - - public override T Read(PgReader reader) => (T)_pgArrayConverter.Read(async: false, reader).Result; - - public override ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) - { - var value = _pgArrayConverter.Read(async: true, reader, cancellationToken); - // Justification: elides the async method bloat/perf cost to transition from object to T (where T : class) - Debug.Assert(typeof(T).IsClass); - return Unsafe.As, ValueTask>(ref value); - } - - public override Size GetSize(SizeContext context, T values, ref object? writeState) - => _pgArrayConverter.GetSize(context, values, ref writeState); - - public override void Write(PgWriter writer, T values) - => _pgArrayConverter.Write(async: false, writer, values, CancellationToken.None).GetAwaiter().GetResult(); - - public override ValueTask WriteAsync(PgWriter writer, T values, CancellationToken cancellationToken = default) - => _pgArrayConverter.Write(async: true, writer, values, cancellationToken); // Using a function pointer here is safe against assembly unloading as the instance reference that the static pointer method lives on is passed along. // As such the instance cannot be collected by the gc which means the entire assembly is prevented from unloading until we're done. @@ -329,7 +290,7 @@ public override ValueTask WriteAsync(PgWriter writer, T values, CancellationToke // 1. Add a virtual method and make AwaitTask call into it (bloating the vtable of all derived types). // 2. Using a delegate, meaning we add a static field + an alloc per T + metadata, slightly slower dispatch perf so overall strictly worse as well. [AsyncMethodBuilder(typeof(PoolingAsyncValueTaskMethodBuilder))] - private protected static async ValueTask AwaitTask(Task task, Continuation continuation, object collection, int[] indices) + public static async ValueTask AwaitTask(Task task, Continuation continuation, object collection, int[] indices) { await task.ConfigureAwait(false); continuation.Invoke(task, collection, indices); @@ -338,7 +299,7 @@ private protected static async ValueTask AwaitTask(Task task, Continuation conti } // Split out into a struct as unsafe and async don't mix, while we do want a nicely typed function pointer signature to prevent mistakes. - protected readonly unsafe struct Continuation + public readonly unsafe struct Continuation { public object Handle { get; } readonly delegate* _continuation; @@ -353,6 +314,58 @@ public Continuation(object handle, delegate* continua public void Invoke(Task task, object collection, int[] indices) => _continuation(task, collection, indices); } +} + +abstract class ArrayConverter : PgStreamingConverter where T : notnull +{ + readonly PgArrayConverter _pgArrayConverter; + + private protected ArrayConverter(int? expectedDimensions, PgConverterResolution elemResolution, int pgLowerBound = 1) + { + if (!elemResolution.Converter.CanConvert(DataFormat.Binary, out var bufferRequirements)) + throw new NotSupportedException("Element converter has to support the binary format to be compatible."); + + _pgArrayConverter = new((IElementOperations)this, elemResolution.Converter.IsDbNullable, expectedDimensions, + bufferRequirements, elemResolution.PgTypeId, pgLowerBound); + } + + public override T Read(PgReader reader) => (T)_pgArrayConverter.Read(async: false, reader).Result; + + public override unsafe ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) + { + // Cheap if we have all the data. + var task = _pgArrayConverter.Read(async: true, reader, cancellationToken); + if (task.IsCompletedSuccessfully) + return new((T)task.Result); + + // Otherwise do these additional allocations (source and task) to allow us to share state machine codegen for all Ts. + // We don't use the PoolingCompletionSource here as it would be backed by an IValueTaskSource. + // Any ReadAsObjectAsync caller would call AsTask() on it immediately, causing another allocation and indirection. + var source = new AsyncHelpers.CompletionSource(); + AsyncHelpers.OnCompletedWithSource(task.AsTask(), source, new(this, &UnboxAndComplete)); + return source.Task; + + static void UnboxAndComplete(Task task, AsyncHelpers.CompletionSource completionSource) + { + // Justification: exact type Unsafe.As used to reduce generic duplication cost when T is a value type (like ReadOnlyMemory). + Debug.Assert(task is Task); + // Using .Result on ValueTask is equivalent to GetAwaiter().GetResult(), this removes TaskAwaiter rooting. + var result = (T)new ValueTask(Unsafe.As>(task)).Result; + + // Justification: exact type Unsafe.As used to reduce generic duplication cost. + Debug.Assert(completionSource is AsyncHelpers.CompletionSource); + Unsafe.As>(completionSource).SetResult(result); + } + } + + public override Size GetSize(SizeContext context, T values, ref object? writeState) + => _pgArrayConverter.GetSize(context, values, ref writeState); + + public override void Write(PgWriter writer, T values) + => _pgArrayConverter.Write(async: false, writer, values, CancellationToken.None).GetAwaiter().GetResult(); + + public override ValueTask WriteAsync(PgWriter writer, T values, CancellationToken cancellationToken = default) + => _pgArrayConverter.Write(async: true, writer, values, cancellationToken); protected static int[]? GetLengths(Array array) { @@ -449,15 +462,17 @@ ValueTask IElementOperations.Read(bool async, PgReader reader, bool isDbNull, ob unsafe ValueTask ReadAsync(PgStreamingConverter converter, PgReader reader, object collection, int[] indices, CancellationToken cancellationToken) { if (converter.ReadAsyncAsTask(reader, cancellationToken, out var result) is { } task) - return AwaitTask(task, new(this, &SetResult), collection, indices); + return PgArrayConverter.AwaitTask(task, new(this, &SetResult), collection, indices); SetValue(collection, indices, result); return new(); - // Using .Result on ValueTask is equivalent to GetAwaiter().GetResult(), this removes TaskAwaiter rooting. static void SetResult(Task task, object collection, int[] indices) { - SetValue(collection, indices, new ValueTask((Task)task).Result); + // Justification: exact type Unsafe.As used to reduce generic duplication cost. + Debug.Assert(task is Task); + // Using .Result on ValueTask is equivalent to GetAwaiter().GetResult(), this removes TaskAwaiter rooting. + SetValue(collection, indices, new ValueTask(task: Unsafe.As>(task)).Result); } } @@ -521,15 +536,17 @@ ValueTask IElementOperations.Read(bool async, PgReader reader, bool isDbNull, ob unsafe ValueTask ReadAsync(PgStreamingConverter converter, PgReader reader, object collection, int[] indices, CancellationToken cancellationToken) { if (converter.ReadAsyncAsTask(reader, cancellationToken, out var result) is { } task) - return AwaitTask(task, new(this, &SetResult), collection, indices); + return PgArrayConverter.AwaitTask(task, new(this, &SetResult), collection, indices); SetValue(collection, indices[0], result); return new(); - // Using .Result on ValueTask is equivalent to GetAwaiter().GetResult(), this removes TaskAwaiter rooting. static void SetResult(Task task, object collection, int[] indices) { - SetValue(collection, indices[0], new ValueTask((Task)task).Result); + // Justification: exact type Unsafe.As used to reduce generic duplication cost. + Debug.Assert(task is Task); + // Using .Result on ValueTask is equivalent to GetAwaiter().GetResult(), this removes TaskAwaiter rooting. + SetValue(collection, indices[0], new ValueTask(task: Unsafe.As>(task)).Result); } } diff --git a/src/Npgsql/Internal/Converters/AsyncHelpers.cs b/src/Npgsql/Internal/Converters/AsyncHelpers.cs index 534e9f10ef..ddd03a24be 100644 --- a/src/Npgsql/Internal/Converters/AsyncHelpers.cs +++ b/src/Npgsql/Internal/Converters/AsyncHelpers.cs @@ -6,31 +6,49 @@ namespace Npgsql.Internal.Converters; -// Using a function pointer here is safe against assembly unloading as the instance reference that the static pointer method lives on is passed along. -// As such the instance cannot be collected by the gc which means the entire assembly is prevented from unloading until we're done. static class AsyncHelpers { - static async void AwaitTask(Task task, CompletionSource tcs, Continuation continuation) + public static void OnCompletedWithSource(Task task, CompletionSource source, CompletionSourceContinuation continuation) { - try - { - await task.ConfigureAwait(false); - continuation.Invoke(task, tcs); - } - catch (Exception ex) + _ = Core(task, source, continuation); + + // Have our state machine be pooled, but don't return the task, source.Task should be used instead. + [AsyncMethodBuilder(typeof(PoolingAsyncValueTaskMethodBuilder))] + async ValueTask Core(Task task, CompletionSource source, CompletionSourceContinuation continuation) { - tcs.SetException(ex); + try + { + await task.ConfigureAwait(false); + continuation.Invoke(task, source); + } + catch (Exception ex) + { + source.SetException(ex); + } + // Guarantee the type stays loaded until the function pointer call is done. + continuation.KeepAlive(); } - // Guarantee the type stays loaded until the function pointer call is done. - GC.KeepAlive(continuation.Handle); } - abstract class CompletionSource + public abstract class CompletionSource { public abstract void SetException(Exception exception); } - sealed class CompletionSource : CompletionSource + public sealed class CompletionSource : CompletionSource + { + AsyncValueTaskMethodBuilder _amb = AsyncValueTaskMethodBuilder.Create(); + + public ValueTask Task => _amb.Task; + + public void SetResult(T value) + => _amb.SetResult(value); + + public override void SetException(Exception exception) + => _amb.SetException(exception); + } + + public sealed class PoolingCompletionSource : CompletionSource { PoolingAsyncValueTaskMethodBuilder _amb = PoolingAsyncValueTaskMethodBuilder.Create(); @@ -43,71 +61,67 @@ public override void SetException(Exception exception) => _amb.SetException(exception); } + // Using a function pointer here is safe against assembly unloading as the instance reference that the static pointer method lives on is passed along. + // As such the instance cannot be collected by the gc which means the entire assembly is prevented from unloading until we're done. // Split out into a struct as unsafe and async don't mix, while we do want a nicely typed function pointer signature to prevent mistakes. - readonly unsafe struct Continuation + public readonly unsafe struct CompletionSourceContinuation { - public object Handle { get; } + readonly object _handle; readonly delegate* _continuation; /// A reference to the type that houses the static method points to. /// The continuation - public Continuation(object handle, delegate* continuation) + public CompletionSourceContinuation(object handle, delegate* continuation) { - Handle = handle; + _handle = handle; _continuation = continuation; } + public void KeepAlive() => GC.KeepAlive(_handle); + public void Invoke(Task task, CompletionSource tcs) => _continuation(task, tcs); } public static unsafe ValueTask ReadAsyncAsNullable(this PgConverter instance, PgConverter effectiveConverter, PgReader reader, CancellationToken cancellationToken) where T : struct { - // Easy if we have all the data. + // Cheap if we have all the data. var task = effectiveConverter.ReadAsync(reader, cancellationToken); if (task.IsCompletedSuccessfully) return new(new T?(task.Result)); // Otherwise we do one additional allocation, this allow us to share state machine codegen for all Ts. - var source = new CompletionSource(); - AwaitTask(task.AsTask(), source, new(instance, &UnboxAndComplete)); + var source = new PoolingCompletionSource(); + OnCompletedWithSource(task.AsTask(), source, new(instance, &UnboxAndComplete)); return source.Task; static void UnboxAndComplete(Task task, CompletionSource completionSource) { // Justification: exact type Unsafe.As used to reduce generic duplication cost. Debug.Assert(task is Task); - Debug.Assert(completionSource is CompletionSource); - Unsafe.As>(completionSource).SetResult(new T?(new ValueTask(Unsafe.As>(task)).Result)); + Debug.Assert(completionSource is PoolingCompletionSource); + Unsafe.As>(completionSource).SetResult(new T?(new ValueTask(Unsafe.As>(task)).Result)); } } public static unsafe ValueTask ReadAsObjectAsyncAsT(this PgConverter instance, PgConverter effectiveConverter, PgReader reader, CancellationToken cancellationToken) { - if (!typeof(T).IsValueType) - { - var value = effectiveConverter.ReadAsObjectAsync(reader, cancellationToken); - // Justification: elides the async method bloat/perf cost to transition from object to T (where T : class) - Debug.Assert(typeof(T).IsClass); - return Unsafe.As, ValueTask>(ref value); - } - - // Easy if we have all the data. + // Cheap if we have all the data. var task = effectiveConverter.ReadAsObjectAsync(reader, cancellationToken); if (task.IsCompletedSuccessfully) return new((T)task.Result); // Otherwise we do one additional allocation, this allow us to share state machine codegen for all Ts. - var source = new CompletionSource(); - AwaitTask(task.AsTask(), source, new(instance, &UnboxAndComplete)); + var source = new PoolingCompletionSource(); + OnCompletedWithSource(task.AsTask(), source, new(instance, &UnboxAndComplete)); return source.Task; static void UnboxAndComplete(Task task, CompletionSource completionSource) { // Justification: exact type Unsafe.As used to reduce generic duplication cost. Debug.Assert(task is Task); - Debug.Assert(completionSource is CompletionSource); - Unsafe.As>(completionSource).SetResult((T)new ValueTask(Unsafe.As>(task)).Result); + Debug.Assert(completionSource is PoolingCompletionSource); + Unsafe.As>(completionSource).SetResult((T)new ValueTask(Unsafe.As>(task)).Result); } } } diff --git a/src/Npgsql/Internal/PgStreamingConverter.cs b/src/Npgsql/Internal/PgStreamingConverter.cs index f0a32156d3..c27d7a320e 100644 --- a/src/Npgsql/Internal/PgStreamingConverter.cs +++ b/src/Npgsql/Internal/PgStreamingConverter.cs @@ -44,9 +44,10 @@ internal sealed override unsafe ValueTask ReadAsObject( static object BoxResult(Task task) { - // We're using ValueTask.Result here to avoid rooting any TaskAwaiter or ValueTaskAwaiter types. - // On ValueTask calling .Result is equivalent to GetAwaiter().GetResult() w.r.t. exception wrapping. - return new ValueTask(task: (Task)task).Result!; + // Justification: exact type Unsafe.As used to reduce generic duplication cost. + Debug.Assert(task is Task); + // Using .Result on ValueTask is equivalent to GetAwaiter().GetResult(), this removes TaskAwaiter rooting. + return new ValueTask(task: Unsafe.As>(task)).Result!; } } From 403729a9c47c18b4a7a63deb5765f7e2dc947f0c Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Mon, 24 Jun 2024 21:37:12 +0200 Subject: [PATCH 429/761] Trimmer warnings test (#5654) Fixes #5579 --- .github/workflows/native-aot.yml | 166 +++++++++++++----- src/Directory.Build.props | 2 +- .../ReflectionCompositeInfoFactory.cs | 8 +- .../Internal/DynamicTypeInfoResolver.cs | 115 ++++++------ .../Npgsql.NativeAotTests.csproj | 2 +- 5 files changed, 192 insertions(+), 101 deletions(-) diff --git a/.github/workflows/native-aot.yml b/.github/workflows/native-aot.yml index c3db4812b1..a795054e8c 100644 --- a/.github/workflows/native-aot.yml +++ b/.github/workflows/native-aot.yml @@ -17,32 +17,109 @@ concurrency: env: dotnet_sdk_version: '8.0.203' DOTNET_SKIP_FIRST_TIME_EXPERIENCE: true + AOT_Compat: | + param([string]$targetFramework) + + $publishOutput = dotnet publish test/Npgsql.NativeAotTests/Npgsql.NativeAotTests.csproj -r linux-x64 -c Release -f $targetFramework -p:RootNpgsql=true + + $actualWarningCount = 0 + + foreach ($line in $($publishOutput -split "`r`n")) + { + if ($line -like "*analysis warning IL*") + { + Write-Host $line + + $actualWarningCount += 1 + } + } + + $testPassed = 0 + + $binaryPath = "test/Npgsql.NativeAotTests/bin/Release/$targetFramework/linux-x64/native/" + if (-not (Test-Path -LiteralPath $binaryPath)) + { + $testPassed = 1 + Write-Host "Could not publish app, output was:" + foreach ($line in $($publishOutput -split "`r`n")) + { + Write-Host $line + } + } + + Write-Host "Actual warning count is:", $actualWarningCount + $expectedWarningCount = 0 + + if ($actualWarningCount -ne $expectedWarningCount) + { + $testPassed = 2 + Write-Host "Actual warning count:", $actualWarningCount, "is not as expected. Expected warning count is:", $expectedWarningCount + } + + Exit $testPassed # Uncomment and edit the following to use nightly/preview builds -# nuget_config: | -# -# -# -# -# -# -# -# -# -# -# -# -# -# -# -# -# -# -# -# -# -# + # nuget_config: | + # + # + # + # + # + # + # + # + # + # + # + # + # + # + # + # + # + # + # + # + # + # jobs: - build: + full: + runs-on: ${{ matrix.os }} + + strategy: + fail-fast: false + matrix: + os: [ ubuntu-22.04 ] + pg_major: [ 15 ] + tfm: [ net8.0 ] + + steps: + - name: Checkout + uses: actions/checkout@v4 + + # - name: Setup nuget config + # run: echo "$nuget_config" > NuGet.config + + - name: NuGet Cache + uses: actions/cache@v4 + with: + path: ~/.nuget/packages + key: ${{ runner.os }}-nuget-${{ hashFiles('**/Directory.Build.targets') }} + restore-keys: | + ${{ runner.os }}-nuget- + + - name: Setup .NET Core SDK + uses: actions/setup-dotnet@v4.0.0 + with: + dotnet-version: | + ${{ env.dotnet_sdk_version }} + + - name: Write script + run: echo "$AOT_Compat" > test-aot-compatibility.ps1 + + - name: Publish and check for trimmer warnings + run: ./test-aot-compatibility.ps1 ${{ matrix.tfm }} + shell: pwsh + trimmed: runs-on: ${{ matrix.os }} strategy: @@ -50,11 +127,15 @@ jobs: matrix: os: [ubuntu-22.04] pg_major: [15] + tfm: [ net8.0 ] steps: - name: Checkout uses: actions/checkout@v4 + # - name: Setup nuget config + # run: echo "$nuget_config" > NuGet.config + - name: NuGet Cache uses: actions/cache@v4 with: @@ -69,15 +150,14 @@ jobs: dotnet-version: | ${{ env.dotnet_sdk_version }} -# - name: Setup nuget config -# run: echo "$nuget_config" > NuGet.config - - - name: Setup Native AOT prerequisites - run: sudo apt-get install clang zlib1g-dev - shell: bash - + - name: Start PostgreSQL + run: | + sudo systemctl start postgresql.service + sudo -u postgres psql -c "CREATE USER npgsql_tests SUPERUSER PASSWORD 'npgsql_tests'" + sudo -u postgres psql -c "CREATE DATABASE npgsql_tests OWNER npgsql_tests" + - name: Build - run: dotnet publish test/Npgsql.NativeAotTests/Npgsql.NativeAotTests.csproj -r linux-x64 -c Release -f net8.0 -p:OptimizationPreference=Size + run: dotnet publish test/Npgsql.NativeAotTests/Npgsql.NativeAotTests.csproj -r linux-x64 -c Release -f ${{ matrix.tfm }} -p:OptimizationPreference=Size shell: bash # Uncomment the following to SSH into the agent running the build (https://github.com/mxschmitt/action-tmate) @@ -85,14 +165,8 @@ jobs: #- name: Setup tmate session # uses: mxschmitt/action-tmate@v3 - - name: Start PostgreSQL - run: | - sudo systemctl start postgresql.service - sudo -u postgres psql -c "CREATE USER npgsql_tests SUPERUSER PASSWORD 'npgsql_tests'" - sudo -u postgres psql -c "CREATE DATABASE npgsql_tests OWNER npgsql_tests" - - name: Run - run: test/Npgsql.NativeAotTests/bin/Release/net8.0/linux-x64/native/Npgsql.NativeAotTests + run: test/Npgsql.NativeAotTests/bin/Release/${{ matrix.tfm }}/linux-x64/native/Npgsql.NativeAotTests - name: Write binary size to summary run: | @@ -100,35 +174,35 @@ jobs: echo "Binary size is $size bytes ($((size / (1024 * 1024))) mb)" >> $GITHUB_STEP_SUMMARY - name: Dump mstat - run: dotnet run --project test/MStatDumper/MStatDumper.csproj -c release -f net8.0 -- "test/Npgsql.NativeAotTests/obj/Release/net8.0/linux-x64/native/Npgsql.NativeAotTests.mstat" md >> $GITHUB_STEP_SUMMARY + run: dotnet run --project test/MStatDumper/MStatDumper.csproj -c release -f ${{ matrix.tfm }} -- "test/Npgsql.NativeAotTests/obj/Release/net8.0/linux-x64/native/Npgsql.NativeAotTests.mstat" md >> $GITHUB_STEP_SUMMARY - name: Upload mstat uses: actions/upload-artifact@v4 with: name: npgsql.mstat - path: "test/Npgsql.NativeAotTests/obj/Release/net8.0/linux-x64/native/Npgsql.NativeAotTests.mstat" + path: "test/Npgsql.NativeAotTests/obj/Release/${{ matrix.tfm }}/linux-x64/native/Npgsql.NativeAotTests.mstat" retention-days: 3 - name: Upload codedgen dgml uses: actions/upload-artifact@v4 with: name: npgsql.codegen.dgml.xml - path: "test/Npgsql.NativeAotTests/obj/Release/net8.0/linux-x64/native/Npgsql.NativeAotTests.codegen.dgml.xml" + path: "test/Npgsql.NativeAotTests/obj/Release/${{ matrix.tfm }}/linux-x64/native/Npgsql.NativeAotTests.codegen.dgml.xml" retention-days: 3 - name: Upload scan dgml uses: actions/upload-artifact@v4 with: name: npgsql.scan.dgml.xml - path: "test/Npgsql.NativeAotTests/obj/Release/net8.0/linux-x64/native/Npgsql.NativeAotTests.scan.dgml.xml" + path: "test/Npgsql.NativeAotTests/obj/Release/${{ matrix.tfm }}/linux-x64/native/Npgsql.NativeAotTests.scan.dgml.xml" retention-days: 3 - name: Assert binary size run: | - size="$(ls -l test/Npgsql.NativeAotTests/bin/Release/net8.0/linux-x64/native/Npgsql.NativeAotTests | cut -d ' ' -f 5)" + size="$(ls -l test/Npgsql.NativeAotTests/bin/Release/${{ matrix.tfm }}/linux-x64/native/Npgsql.NativeAotTests | cut -d ' ' -f 5)" echo "Binary size is $size bytes ($((size / (1024 * 1024))) mb)" - if (( size > 7340032 )); then - echo "Binary size exceeds 7mb threshold" + if (( size > 5242880 )); then + echo "Binary size exceeds 5MB threshold" exit 1 fi diff --git a/src/Directory.Build.props b/src/Directory.Build.props index 169a5988a2..9d07823223 100644 --- a/src/Directory.Build.props +++ b/src/Directory.Build.props @@ -4,7 +4,7 @@ true - true + true diff --git a/src/Npgsql/Internal/Composites/ReflectionCompositeInfoFactory.cs b/src/Npgsql/Internal/Composites/ReflectionCompositeInfoFactory.cs index d6c51b8344..522b46acf3 100644 --- a/src/Npgsql/Internal/Composites/ReflectionCompositeInfoFactory.cs +++ b/src/Npgsql/Internal/Composites/ReflectionCompositeInfoFactory.cs @@ -151,8 +151,8 @@ static Delegate CreateSetter(PropertyInfo info) static Expression UnboxAny(Expression expression, Type type) => type.IsValueType ? Expression.Unbox(expression, type) : Expression.Convert(expression, type, null); - [DynamicDependency("TypedValue", typeof(StrongBox<>))] - [DynamicDependency("Length", typeof(StrongBox[]))] + [DynamicDependency(nameof(StrongBox.TypedValue), typeof(StrongBox<>))] + [DynamicDependency(DynamicallyAccessedMemberTypes.PublicProperties, typeof(StrongBox[]))] [UnconditionalSuppressMessage("Trimming", "IL2026", Justification = "DynamicDependencies in place for the System.Linq.Expression.Property calls")] static Func CreateStrongBoxConstructor(ConstructorInfo constructorInfo) { @@ -165,7 +165,7 @@ static Func CreateStrongBoxConstructor(ConstructorInfo constr .Lambda>( Expression.Block( Expression.IfThen( - Expression.LessThan(Expression.Property(values, "Length"), parameterCount), + Expression.LessThan(Expression.Property(values, nameof(Array.Length)), parameterCount), Expression.Throw(Expression.New(argumentExceptionNameMessageConstructor, Expression.Constant("Passed fewer arguments than there are constructor parameters."), Expression.Constant(values.Name))) @@ -176,7 +176,7 @@ static Func CreateStrongBoxConstructor(ConstructorInfo constr Expression.ArrayIndex(values, Expression.Constant(i)), typeof(StrongBox<>).MakeGenericType(parameter.ParameterType) ), - "TypedValue" + nameof(StrongBox.TypedValue) ) )) ), values) diff --git a/src/Npgsql/Internal/DynamicTypeInfoResolver.cs b/src/Npgsql/Internal/DynamicTypeInfoResolver.cs index 421de703f5..91af319207 100644 --- a/src/Npgsql/Internal/DynamicTypeInfoResolver.cs +++ b/src/Npgsql/Internal/DynamicTypeInfoResolver.cs @@ -1,13 +1,17 @@ using System; using System.Diagnostics.CodeAnalysis; -using System.Reflection; using Npgsql.Internal.Postgres; using Npgsql.PostgresTypes; namespace Npgsql.Internal; [Experimental(NpgsqlDiagnostics.ConvertersExperimental)] +#if NET9_0_OR_GREATER [RequiresDynamicCode("A dynamic type info resolver may need to construct a generic converter for a statically unknown type.")] +#else +[RequiresUnreferencedCode("A dynamic type info resolver may need to construct a generic converter for a statically unknown type.")] +[RequiresDynamicCode("A dynamic type info resolver may need to construct a generic converter for a statically unknown type.")] +#endif public abstract class DynamicTypeInfoResolver : IPgTypeInfoResolver { public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) @@ -43,84 +47,97 @@ protected static bool IsArrayDataTypeName(DataTypeName dataTypeName, PgSerialize protected abstract DynamicMappingCollection? GetMappings(Type? type, DataTypeName dataTypeName, PgSerializerOptions options); +#if NET9_0_OR_GREATER + [RequiresDynamicCode("A dynamic type info resolver may need to construct a generic converter for a statically unknown type.")] +#else + [RequiresUnreferencedCode("A dynamic type info resolver may need to construct a generic converter for a statically unknown type.")] [RequiresDynamicCode("A dynamic type info resolver may need to construct a generic converter for a statically unknown type.")] +#endif protected class DynamicMappingCollection { TypeInfoMappingCollection? _mappings; - static readonly MethodInfo AddTypeMethodInfo = typeof(TypeInfoMappingCollection).GetMethod(nameof(TypeInfoMappingCollection.AddType), - new[] { typeof(string), typeof(TypeInfoFactory), typeof(Func) }) ?? throw new NullReferenceException(); - - static readonly MethodInfo AddArrayTypeMethodInfo = typeof(TypeInfoMappingCollection) - .GetMethod(nameof(TypeInfoMappingCollection.AddArrayType), new[] { typeof(string) }) ?? throw new NullReferenceException(); - - static readonly MethodInfo AddStructTypeMethodInfo = typeof(TypeInfoMappingCollection).GetMethod(nameof(TypeInfoMappingCollection.AddStructType), - new[] { typeof(string), typeof(TypeInfoFactory), typeof(Func) }) ?? throw new NullReferenceException(); - - static readonly MethodInfo AddStructArrayTypeMethodInfo = typeof(TypeInfoMappingCollection) - .GetMethod(nameof(TypeInfoMappingCollection.AddStructArrayType), new[] { typeof(string) }) ?? throw new NullReferenceException(); - - static readonly MethodInfo AddResolverTypeMethodInfo = typeof(TypeInfoMappingCollection).GetMethod( - nameof(TypeInfoMappingCollection.AddResolverType), - new[] { typeof(string), typeof(TypeInfoFactory), typeof(Func) }) ?? throw new NullReferenceException(); - - static readonly MethodInfo AddResolverArrayTypeMethodInfo = typeof(TypeInfoMappingCollection) - .GetMethod(nameof(TypeInfoMappingCollection.AddResolverArrayType), new[] { typeof(string) }) ?? throw new NullReferenceException(); - - static readonly MethodInfo AddResolverStructTypeMethodInfo = typeof(TypeInfoMappingCollection).GetMethod( - nameof(TypeInfoMappingCollection.AddResolverStructType), - new[] { typeof(string), typeof(TypeInfoFactory), typeof(Func) }) ?? throw new NullReferenceException(); - - static readonly MethodInfo AddResolverStructArrayTypeMethodInfo = typeof(TypeInfoMappingCollection) - .GetMethod(nameof(TypeInfoMappingCollection.AddResolverStructArrayType), new[] { typeof(string) }) ?? throw new NullReferenceException(); - internal DynamicMappingCollection(TypeInfoMappingCollection? baseCollection = null) { if (baseCollection is not null) _mappings = new(baseCollection); } - public DynamicMappingCollection AddMapping(Type type, string dataTypeName, TypeInfoFactory factory, Func? configureMapping = null) + public DynamicMappingCollection AddMapping([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicParameterlessConstructor)]Type type, string dataTypeName, TypeInfoFactory factory, Func? configureMapping = null) { if (type.IsValueType && Nullable.GetUnderlyingType(type) is not null) throw new NotSupportedException("Mapping nullable types is not supported, map its underlying type instead to get both."); - (type.IsValueType ? AddStructTypeMethodInfo : AddTypeMethodInfo) - .MakeGenericMethod(type).Invoke(_mappings ??= new(), new object?[] - { - dataTypeName, - factory, - configureMapping - }); + if (type.IsValueType) + typeof(TypeInfoMappingCollection) + .GetMethod(nameof(TypeInfoMappingCollection.AddStructType), [typeof(string), typeof(TypeInfoFactory), typeof(Func)])! + .MakeGenericMethod(type).Invoke(_mappings ??= new(), + [ + dataTypeName, + factory, + configureMapping + ]); + else + typeof(TypeInfoMappingCollection) + .GetMethod(nameof(TypeInfoMappingCollection.AddType), [typeof(string), typeof(TypeInfoFactory), typeof(Func)])! + .MakeGenericMethod(type).Invoke(_mappings ??= new(), + [ + dataTypeName, + factory, + configureMapping + ]); return this; } - public DynamicMappingCollection AddArrayMapping(Type elementType, string dataTypeName) + public DynamicMappingCollection AddArrayMapping([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicParameterlessConstructor)]Type elementType, string dataTypeName) { - (elementType.IsValueType ? AddStructArrayTypeMethodInfo : AddArrayTypeMethodInfo) - .MakeGenericMethod(elementType).Invoke(_mappings ??= new(), new object?[] { dataTypeName }); + if (elementType.IsValueType) + typeof(TypeInfoMappingCollection) + .GetMethod(nameof(TypeInfoMappingCollection.AddStructArrayType), [typeof(string)])! + .MakeGenericMethod(elementType).Invoke(_mappings ??= new(), [dataTypeName]); + else + typeof(TypeInfoMappingCollection) + .GetMethod(nameof(TypeInfoMappingCollection.AddArrayType), [typeof(string)])! + .MakeGenericMethod(elementType).Invoke(_mappings ??= new(), [dataTypeName]); return this; } - public DynamicMappingCollection AddResolverMapping(Type type, string dataTypeName, TypeInfoFactory factory, Func? configureMapping = null) + public DynamicMappingCollection AddResolverMapping([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicParameterlessConstructor)]Type type, string dataTypeName, TypeInfoFactory factory, Func? configureMapping = null) { if (type.IsValueType && Nullable.GetUnderlyingType(type) is not null) throw new NotSupportedException("Mapping nullable types is not supported"); - (type.IsValueType ? AddResolverStructTypeMethodInfo : AddResolverTypeMethodInfo) - .MakeGenericMethod(type).Invoke(_mappings ??= new(), new object?[] - { - dataTypeName, - factory, - configureMapping - }); + if (type.IsValueType) + typeof(TypeInfoMappingCollection) + .GetMethod(nameof(TypeInfoMappingCollection.AddResolverStructType), [typeof(string), typeof(TypeInfoFactory), typeof(Func)])! + .MakeGenericMethod(type).Invoke(_mappings ??= new(), + [ + dataTypeName, + factory, + configureMapping + ]); + else + typeof(TypeInfoMappingCollection) + .GetMethod(nameof(TypeInfoMappingCollection.AddResolverType), [typeof(string), typeof(TypeInfoFactory), typeof(Func)])! + .MakeGenericMethod(type).Invoke(_mappings ??= new(), + [ + dataTypeName, + factory, + configureMapping + ]); return this; } - public DynamicMappingCollection AddResolverArrayMapping(Type elementType, string dataTypeName) + public DynamicMappingCollection AddResolverArrayMapping([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicParameterlessConstructor)]Type elementType, string dataTypeName) { - (elementType.IsValueType ? AddResolverStructArrayTypeMethodInfo : AddResolverArrayTypeMethodInfo) - .MakeGenericMethod(elementType).Invoke(_mappings ??= new(), new object?[] { dataTypeName }); + if (elementType.IsValueType) + typeof(TypeInfoMappingCollection) + .GetMethod(nameof(TypeInfoMappingCollection.AddResolverStructArrayType), [typeof(string)])! + .MakeGenericMethod(elementType).Invoke(_mappings ??= new(), [dataTypeName]); + else + typeof(TypeInfoMappingCollection) + .GetMethod(nameof(TypeInfoMappingCollection.AddResolverArrayType), [typeof(string)])! + .MakeGenericMethod(elementType).Invoke(_mappings ??= new(), [dataTypeName]); return this; } diff --git a/test/Npgsql.NativeAotTests/Npgsql.NativeAotTests.csproj b/test/Npgsql.NativeAotTests/Npgsql.NativeAotTests.csproj index f384594eb3..31831faded 100644 --- a/test/Npgsql.NativeAotTests/Npgsql.NativeAotTests.csproj +++ b/test/Npgsql.NativeAotTests/Npgsql.NativeAotTests.csproj @@ -9,10 +9,10 @@ true false true - Size + From 80acce2ab7e96a0b0ac1fc035b18835833e6cdfb Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Tue, 25 Jun 2024 01:22:39 +0200 Subject: [PATCH 430/761] Throw instead of saturate on Size overflow (#5735) Fixes #5722 --- src/Npgsql/Internal/Size.cs | 4 +-- test/Npgsql.Tests/SizeTests.cs | 59 ++++++++++++++++++++++++++++++++++ 2 files changed, 61 insertions(+), 2 deletions(-) create mode 100644 test/Npgsql.Tests/SizeTests.cs diff --git a/src/Npgsql/Internal/Size.cs b/src/Npgsql/Internal/Size.cs index 7cbdd9bb20..79fe38b5c4 100644 --- a/src/Npgsql/Internal/Size.cs +++ b/src/Npgsql/Internal/Size.cs @@ -50,9 +50,9 @@ public Size Combine(Size result) return Unknown; if (_kind is SizeKind.UpperBound || result._kind is SizeKind.UpperBound) - return CreateUpperBound((int)Math.Min((long)(_value + result._value), int.MaxValue)); + return CreateUpperBound(checked(_value + result._value)); - return Create((int)Math.Min((long)(_value + result._value), int.MaxValue)); + return Create(checked(_value + result._value)); } public static implicit operator Size(int value) => Create(value); diff --git a/test/Npgsql.Tests/SizeTests.cs b/test/Npgsql.Tests/SizeTests.cs new file mode 100644 index 0000000000..93bd3b8d29 --- /dev/null +++ b/test/Npgsql.Tests/SizeTests.cs @@ -0,0 +1,59 @@ +using System; +using NUnit.Framework; +using Npgsql.Internal; + +namespace Npgsql.Tests; + +public class SizeTests +{ + [Test] + public void UnknownKind() => Assert.That(Size.Unknown.Kind, Is.EqualTo(SizeKind.Unknown)); + + [Test] + public void UnknownThrowsOnValue() => Assert.Throws(() => _ = Size.Unknown.Value); + + [Test] + public void Exact() + { + Assert.That(Size.Create(1).Value, Is.EqualTo(1)); + Assert.That(Size.Create(1).Kind, Is.EqualTo(SizeKind.Exact)); + } + + [Test] + public void ZeroIsExactKind() => Assert.That(Size.Zero.Kind, Is.EqualTo(SizeKind.Exact)); + + [Test] + public void UpperBound() + { + Assert.That(Size.CreateUpperBound(1).Value, Is.EqualTo(1)); + Assert.That(Size.CreateUpperBound(1).Kind, Is.EqualTo(SizeKind.UpperBound)); + } + + [Test] + public void CombineThrowsOnOverflow() => Assert.Throws(() => Size.Create(1).Combine(int.MaxValue)); + + [Test] + public void CombineExactWorks() => Assert.That(Size.Create(1).Combine(1), Is.EqualTo(Size.Create(2))); + + [Test] + public void CombineUpperBoundWorks() => Assert.That(Size.CreateUpperBound(1).Combine(1), Is.EqualTo(Size.CreateUpperBound(2))); + + [Test] + public void CombineUnknownWithAnyGivesUnknown() + { + Assert.That(Size.Unknown.Combine(Size.Unknown), Is.EqualTo(Size.Unknown)); + + Assert.That(Size.Create(1).Combine(Size.Unknown), Is.EqualTo(Size.Unknown)); + Assert.That(Size.Unknown.Combine(Size.Create(1)), Is.EqualTo(Size.Unknown)); + + Assert.That(Size.Unknown.Combine(Size.CreateUpperBound(1)), Is.EqualTo(Size.Unknown)); + Assert.That(Size.CreateUpperBound(1).Combine(Size.Unknown), Is.EqualTo(Size.Unknown)); + } + + [Test] + public void CombineUpperBoundWithExactGivesUpperBound() + { + Assert.That(Size.Create(1).Combine(Size.CreateUpperBound(1)), Is.EqualTo(Size.CreateUpperBound(2))); + Assert.That(Size.CreateUpperBound(1).Combine(Size.Create(1)), Is.EqualTo(Size.CreateUpperBound(2))); + } +} From 0bbdc3ccbaee5644ebda5f52335c4643c3f7f918 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Fri, 28 Jun 2024 14:51:11 +0200 Subject: [PATCH 431/761] Remove indices alloc from ArrayConverter (#5736) --- .../Internal/Converters/ArrayConverter.cs | 240 +++++++++++------- test/Npgsql.Tests/Types/ArrayTests.cs | 2 +- 2 files changed, 145 insertions(+), 97 deletions(-) diff --git a/src/Npgsql/Internal/Converters/ArrayConverter.cs b/src/Npgsql/Internal/Converters/ArrayConverter.cs index 1c87555ab0..daf05aceb1 100644 --- a/src/Npgsql/Internal/Converters/ArrayConverter.cs +++ b/src/Npgsql/Internal/Converters/ArrayConverter.cs @@ -10,18 +10,56 @@ namespace Npgsql.Internal.Converters; +struct Indices +{ + // Public field to be able to return it by ref in GetItem. + public int One; + public int[]? Many { get; private init; } + public int Count { get; private init; } + + public static Indices Create(int dimensions) + => dimensions switch + { + 0 => new() { Count = dimensions, One = -1 }, + 1 => new() { Count = dimensions }, + _ => new() { Count = dimensions, Many = new int[dimensions] } + }; +} + +static class IndicesExtensions +{ + // Workaround for lack of ref returns on struct fields. + public static ref int GetItem(this ref Indices indices, int index) + { + switch (indices.Count) + { + case 0: + ThrowHelper.ThrowIndexOutOfRangeException("Cannot index into a 0-dimensional array."); + return ref Unsafe.NullRef(); + case 1: + Debug.Assert(index is 0); + Debug.Assert(indices.Many is null); + return ref indices.One; + default: + return ref indices.Many![index]; + } + } +} + interface IElementOperations { - object CreateCollection(int[] lengths); + object CreateCollection(ReadOnlySpan lengths); int GetCollectionCount(object collection, out int[]? lengths); - Size? GetSizeOrDbNull(SizeContext context, object collection, int[] indices, ref object? writeState); - ValueTask Read(bool async, PgReader reader, bool isDbNull, object collection, int[] indices, CancellationToken cancellationToken = default); - ValueTask Write(bool async, PgWriter writer, object collection, int[] indices, CancellationToken cancellationToken = default); + Size? GetSizeOrDbNull(SizeContext context, object collection, Indices indices, ref object? writeState); + ValueTask Read(bool async, PgReader reader, bool isDbNull, object collection, Indices indices, CancellationToken cancellationToken = default); + ValueTask Write(bool async, PgWriter writer, object collection, Indices indices, CancellationToken cancellationToken = default); } readonly struct PgArrayConverter { - internal const string ReadNonNullableCollectionWithNullsExceptionMessage = "Cannot read a non-nullable collection of elements because the returned array contains nulls. Call GetFieldValue with a nullable collection type instead."; + public const string ReadNonNullableCollectionWithNullsExceptionMessage = + "Cannot read a non-nullable collection of elements because the returned array contains nulls. Call GetFieldValue with a nullable collection type instead."; + public const int MaxDimensions = 8; readonly IElementOperations _elemOps; readonly int? _expectedDimensions; @@ -40,20 +78,20 @@ public PgArrayConverter(IElementOperations elemOps, bool elemTypeDbNullable, int _bufferRequirements = bufferRequirements; } - bool IsDbNull(object values, int[] indices) + bool IsDbNull(object values, Indices indices) { object? state = null; return _elemOps.GetSizeOrDbNull(new(DataFormat.Binary, _bufferRequirements.Write), values, indices, ref state) is null; } - Size GetElemsSize(object values, (Size, object?)[] elemStates, out bool anyElementState, DataFormat format, int count, int[] indices, int[]? lengths = null) + Size GetElemsSize(object values, (Size, object?)[] elemStates, out bool anyElementState, DataFormat format, int count, Indices indices, int[]? lengths = null) { Debug.Assert(elemStates.Length >= count); var totalSize = Size.Zero; var context = new SizeContext(format, _bufferRequirements.Write); anyElementState = false; - var lastLength = lengths?[lengths.Length - 1] ?? count; - ref var lastIndex = ref indices[indices.Length - 1]; + var lastLength = lengths?[^1] ?? count; + ref var lastIndex = ref indices.GetItem(indices.Count - 1); var i = 0; do { @@ -65,16 +103,16 @@ Size GetElemsSize(object values, (Size, object?)[] elemStates, out bool anyEleme totalSize = totalSize.Combine(size ?? 0); } // We can immediately continue if we didn't reach the end of the last dimension. - while (++lastIndex < lastLength || (indices.Length > 1 && CarryIndices(lengths!, indices))); + while (++lastIndex < lastLength || (indices.Count > 1 && CarryIndices(lengths!, indices))); return totalSize; } - Size GetFixedElemsSize(Size elemSize, object values, int count, int[] indices, int[]? lengths = null) + Size GetFixedElemsSize(Size elemSize, object values, int count, Indices indices, int[]? lengths = null) { var nulls = 0; - var lastLength = lengths?[lengths.Length - 1] ?? count; - ref var lastIndex = ref indices[indices.Length - 1]; + var lastLength = lengths?[^1] ?? count; + ref var lastIndex = ref indices.GetItem(indices.Count - 1); if (ElemTypeDbNullable) do { @@ -82,7 +120,7 @@ Size GetFixedElemsSize(Size elemSize, object values, int count, int[] indices, i nulls++; } // We can immediately continue if we didn't reach the end of the last dimension. - while (++lastIndex < lastLength || (indices.Length > 1 && CarryIndices(lengths!, indices))); + while (++lastIndex < lastLength || (indices.Count > 1 && CarryIndices(lengths!, indices))); return (count - nulls) * elemSize.Value; } @@ -98,15 +136,15 @@ public Size GetSize(SizeContext context, object values, ref object? writeState) { var count = _elemOps.GetCollectionCount(values, out var lengths); var dimensions = lengths?.Length ?? 1; - if (dimensions > 8) - throw new ArgumentException(nameof(values), "Postgres arrays can have at most 8 dimensions."); + if (dimensions > MaxDimensions) + ThrowHelper.ThrowArgumentException($"Postgres arrays can have at most {MaxDimensions} dimensions.", nameof(values)); var formatSize = Size.Create(GetFormatSize(count, dimensions)); if (count is 0) return formatSize; Size elemsSize; - var indices = new int[dimensions]; + var indices = Indices.Create(dimensions); if (_bufferRequirements.Write is { Kind: SizeKind.Exact } req) { elemsSize = GetFixedElemsSize(req, values, count, indices, lengths); @@ -128,16 +166,37 @@ public Size GetSize(SizeContext context, object values, ref object? writeState) sealed class WriteState : MultiWriteState { public required int Count { get; init; } - public required int[] Indices { get; init; } + public required Indices Indices { get; init; } public required int[]? Lengths { get; init; } } + unsafe object ReadDimsAndCreateCollection(PgReader reader, int dimensions, out int lastDimLength) + { + Debug.Assert(!reader.ShouldBuffer((sizeof(int) + sizeof(int)) * dimensions)); + + Span dimLengths = stackalloc int[MaxDimensions]; + lastDimLength = 0; + for (var i = 0; i < dimensions; i++) + { + lastDimLength = reader.ReadInt32(); + _ = reader.ReadInt32(); // Lower bound + dimLengths[i] = lastDimLength; + } + + var collection = _elemOps.CreateCollection(dimLengths.Slice(0, dimensions)); + Debug.Assert(dimensions <= 1 || collection is Array a && a.Rank == dimensions); + return collection; + } + public async ValueTask Read(bool async, PgReader reader, CancellationToken cancellationToken = default) { if (reader.ShouldBuffer(sizeof(int) + sizeof(int) + sizeof(uint))) await reader.Buffer(async, sizeof(int) + sizeof(int) + sizeof(uint), cancellationToken).ConfigureAwait(false); var dimensions = reader.ReadInt32(); + if (dimensions > MaxDimensions) + ThrowHelper.ThrowInvalidOperationException($"Postgres arrays can have at most {MaxDimensions} dimensions."); + var containsNulls = reader.ReadInt32() is 1; _ = reader.ReadUInt32(); // Element OID. @@ -154,32 +213,13 @@ public async ValueTask Read(bool async, PgReader reader, CancellationTok if (reader.ShouldBuffer((sizeof(int) + sizeof(int)) * dimensions)) await reader.Buffer(async, (sizeof(int) + sizeof(int)) * dimensions, cancellationToken).ConfigureAwait(false); - var dimLengths = new int[_expectedDimensions ?? dimensions]; - var lastDimLength = 0; - for (var i = 0; i < dimensions; i++) - { - lastDimLength = reader.ReadInt32(); - reader.ReadInt32(); // Lower bound - if (dimLengths.Length is 0) - break; - dimLengths[i] = lastDimLength; - } - - var collection = _elemOps.CreateCollection(dimLengths); - Debug.Assert(dimensions <= 1 || collection is Array a && a.Rank == dimensions); - + var collection = ReadDimsAndCreateCollection(reader, dimensions, out var lastDimLength); if (dimensions is 0 || lastDimLength is 0) return collection; - int[] indices; - // Reuse array for dim <= 1 - if (dimensions == 1) - { - dimLengths[0] = 0; - indices = dimLengths; - } - else - indices = new int[dimensions]; + _ = _elemOps.GetCollectionCount(collection, out var dimLengths); + var indices = Indices.Create(dimensions); + do { if (reader.ShouldBuffer(sizeof(int))) @@ -206,23 +246,24 @@ public async ValueTask Read(bool async, PgReader reader, CancellationTok await _elemOps.Read(async, reader, isDbNull, collection, indices, cancellationToken).ConfigureAwait(false); } // We can immediately continue if we didn't reach the end of the last dimension. - while (++indices[indices.Length - 1] < lastDimLength || (dimensions > 1 && CarryIndices(dimLengths, indices))); + while (++indices.GetItem(indices.Count - 1) < lastDimLength || (dimLengths is not null && CarryIndices(dimLengths, indices))); return collection; } - static bool CarryIndices(int[] lengths, int[] indices) + static bool CarryIndices(int[] lengths, Indices indices) { Debug.Assert(lengths.Length > 1); + Debug.Assert(indices.Count > 1); // Find the first dimension from the end that isn't at or past its length, increment it and bring all previous dimensions to zero. - for (var dim = indices.Length - 1; dim >= 0; dim--) + for (var dim = indices.Count - 1; dim >= 0; dim--) { - if (indices[dim] >= lengths[dim] - 1) + if (indices.GetItem(dim) >= lengths[dim] - 1) continue; - indices.AsSpan().Slice(dim + 1).Clear(); - indices[dim]++; + indices.Many.AsSpan().Slice(dim + 1).Clear(); + indices.GetItem(dim)++; return true; } @@ -259,8 +300,9 @@ public async ValueTask Write(bool async, PgWriter writer, object values, Cancell var elemData = state.Data.Array; var indices = state.Indices; - Array.Clear(indices, 0 , indices.Length); - var lastLength = state.Lengths?[state.Lengths.Length - 1] ?? state.Count; + if (indices.Many is not null) + Array.Clear(indices.Many, 0 , indices.Many.Length); + var lastLength = state.Lengths?[^1] ?? state.Count; var i = state.Data.Offset; do { @@ -281,7 +323,7 @@ public async ValueTask Write(bool async, PgWriter writer, object values, Cancell } } // We can immediately continue if we didn't reach the end of the last dimension. - while (++indices[indices.Length - 1] < lastLength || (indices.Length > 1 && CarryIndices(state.Lengths!, indices))); + while (++indices.GetItem(indices.Count - 1) < lastLength || (state.Lengths is not null && CarryIndices(state.Lengths, indices))); } // Using a function pointer here is safe against assembly unloading as the instance reference that the static pointer method lives on is passed along. @@ -290,7 +332,7 @@ public async ValueTask Write(bool async, PgWriter writer, object values, Cancell // 1. Add a virtual method and make AwaitTask call into it (bloating the vtable of all derived types). // 2. Using a delegate, meaning we add a static field + an alloc per T + metadata, slightly slower dispatch perf so overall strictly worse as well. [AsyncMethodBuilder(typeof(PoolingAsyncValueTaskMethodBuilder))] - public static async ValueTask AwaitTask(Task task, Continuation continuation, object collection, int[] indices) + public static async ValueTask AwaitTask(Task task, Continuation continuation, object collection, Indices indices) { await task.ConfigureAwait(false); continuation.Invoke(task, collection, indices); @@ -302,17 +344,17 @@ public static async ValueTask AwaitTask(Task task, Continuation continuation, ob public readonly unsafe struct Continuation { public object Handle { get; } - readonly delegate* _continuation; + readonly delegate* _continuation; /// A reference to the type that houses the static method points to. /// The continuation - public Continuation(object handle, delegate* continuation) + public Continuation(object handle, delegate* continuation) { Handle = handle; _continuation = continuation; } - public void Invoke(Task task, object collection, int[] indices) => _continuation(task, collection, indices); + public void Invoke(Task task, object collection, Indices indices) => _continuation(task, collection, indices); } } @@ -367,16 +409,23 @@ public override void Write(PgWriter writer, T values) public override ValueTask WriteAsync(PgWriter writer, T values, CancellationToken cancellationToken = default) => _pgArrayConverter.Write(async: true, writer, values, cancellationToken); - protected static int[]? GetLengths(Array array) + protected static int GetLengths(Array array, out int[]? lengths) { - if (array.Rank == 1) - return null; + var dimensions = array.Rank; + + if (dimensions is 1) + { + lengths = null; + return array.Length; + } - var lengths = new int[array.Rank]; + lengths = new int[dimensions]; for (var i = 0; i < lengths.Length; i++) lengths[i] = array.GetLength(i); - return lengths; + // If we have a multidim array it may throw an overflow exception for large arrays (LongLength exists for these cases) + // however anything over int.MaxValue wouldn't fit in a parameter anyway so easier to throw here than deal with a long. + return array.Length; } } @@ -391,40 +440,42 @@ public ArrayBasedArrayConverter(PgConverterResolution elemResolution, Type? effe => _elemConverter = elemResolution.GetConverter(); [MethodImpl(MethodImplOptions.AggressiveInlining)] - static TElement? GetValue(object collection, int[] indices) + static TElement? GetValue(object collection, Indices indices) { - switch (indices.Length) + Debug.Assert(indices.Count > 0); + switch (indices.Count) { case 1: - // Justification: avoid the cast overhead for per element calls. + // Justification: exact type Unsafe.As used to avoid the cast overhead for per element calls. Debug.Assert(collection is TElement?[]); - return Unsafe.As(collection)[indices[0]]; + return Unsafe.As(collection)[indices.One]; default: - // Justification: avoid the cast overhead for per element calls. + // Justification: exact type Unsafe.As used to avoid the cast overhead for per element calls. Debug.Assert(collection is Array); - return (TElement?)Unsafe.As(collection).GetValue(indices); + return (TElement?)Unsafe.As(collection).GetValue(indices.Many!); } } [MethodImpl(MethodImplOptions.AggressiveInlining)] - static void SetValue(object collection, int[] indices, TElement? value) + static void SetValue(object collection, Indices indices, TElement? value) { - switch (indices.Length) + Debug.Assert(indices.Count > 0); + switch (indices.Count) { case 1: - // Justification: avoid the cast overhead for per element calls. + // Justification: exact type Unsafe.As used to avoid the cast overhead for per element calls. Debug.Assert(collection is TElement?[]); - Unsafe.As(collection)[indices[0]] = value; + Unsafe.As(collection)[indices.One] = value; break; default: - // Justification: avoid the cast overhead for per element calls. + // Justification: exact type Unsafe.As used to avoid the cast overhead for per element calls. Debug.Assert(collection is Array); - Unsafe.As(collection).SetValue(value, indices); + Unsafe.As(collection).SetValue(value, indices.Many!); break; } } - object IElementOperations.CreateCollection(int[] lengths) + object IElementOperations.CreateCollection(ReadOnlySpan lengths) => lengths.Length switch { 0 => Array.Empty(), @@ -441,16 +492,12 @@ object IElementOperations.CreateCollection(int[] lengths) }; int IElementOperations.GetCollectionCount(object collection, out int[]? lengths) - { - var array = (Array)collection; - lengths = GetLengths(array); - return array.Length; - } + => GetLengths((Array)collection, out lengths); - Size? IElementOperations.GetSizeOrDbNull(SizeContext context, object collection, int[] indices, ref object? writeState) + Size? IElementOperations.GetSizeOrDbNull(SizeContext context, object collection, Indices indices, ref object? writeState) => _elemConverter.GetSizeOrDbNull(context.Format, context.BufferRequirement, GetValue(collection, indices), ref writeState); - ValueTask IElementOperations.Read(bool async, PgReader reader, bool isDbNull, object collection, int[] indices, CancellationToken cancellationToken) + ValueTask IElementOperations.Read(bool async, PgReader reader, bool isDbNull, object collection, Indices indices, CancellationToken cancellationToken) { if (!isDbNull && async && _elemConverter is PgStreamingConverter streamingConverter) return ReadAsync(streamingConverter, reader, collection, indices, cancellationToken); @@ -459,7 +506,7 @@ ValueTask IElementOperations.Read(bool async, PgReader reader, bool isDbNull, ob return new(); } - unsafe ValueTask ReadAsync(PgStreamingConverter converter, PgReader reader, object collection, int[] indices, CancellationToken cancellationToken) + unsafe ValueTask ReadAsync(PgStreamingConverter converter, PgReader reader, object collection, Indices indices, CancellationToken cancellationToken) { if (converter.ReadAsyncAsTask(reader, cancellationToken, out var result) is { } task) return PgArrayConverter.AwaitTask(task, new(this, &SetResult), collection, indices); @@ -467,7 +514,7 @@ unsafe ValueTask ReadAsync(PgStreamingConverter converter, PgReader re SetValue(collection, indices, result); return new(); - static void SetResult(Task task, object collection, int[] indices) + static void SetResult(Task task, object collection, Indices indices) { // Justification: exact type Unsafe.As used to reduce generic duplication cost. Debug.Assert(task is Task); @@ -476,7 +523,7 @@ static void SetResult(Task task, object collection, int[] indices) } } - ValueTask IElementOperations.Write(bool async, PgWriter writer, object collection, int[] indices, CancellationToken cancellationToken) + ValueTask IElementOperations.Write(bool async, PgWriter writer, object collection, Indices indices, CancellationToken cancellationToken) { if (async) return _elemConverter.WriteAsync(writer, GetValue(collection, indices)!, cancellationToken); @@ -511,7 +558,7 @@ static void SetValue(object collection, int index, TElement? value) list.Insert(index, value); } - object IElementOperations.CreateCollection(int[] lengths) + object IElementOperations.CreateCollection(ReadOnlySpan lengths) => new List(lengths.Length is 0 ? 0 : lengths[0]); int IElementOperations.GetCollectionCount(object collection, out int[]? lengths) @@ -520,43 +567,44 @@ int IElementOperations.GetCollectionCount(object collection, out int[]? lengths) return ((IList)collection).Count; } - Size? IElementOperations.GetSizeOrDbNull(SizeContext context, object collection, int[] indices, ref object? writeState) - => _elemConverter.GetSizeOrDbNull(context.Format, context.BufferRequirement, GetValue(collection, indices[0]), ref writeState); + Size? IElementOperations.GetSizeOrDbNull(SizeContext context, object collection, Indices indices, ref object? writeState) + => _elemConverter.GetSizeOrDbNull(context.Format, context.BufferRequirement, GetValue(collection, indices.One), ref writeState); - ValueTask IElementOperations.Read(bool async, PgReader reader, bool isDbNull, object collection, int[] indices, CancellationToken cancellationToken) + ValueTask IElementOperations.Read(bool async, PgReader reader, bool isDbNull, object collection, Indices indices, CancellationToken cancellationToken) { - Debug.Assert(indices.Length is 1); + Debug.Assert(indices.Count is 1); if (!isDbNull && async && _elemConverter is PgStreamingConverter streamingConverter) return ReadAsync(streamingConverter, reader, collection, indices, cancellationToken); - SetValue(collection, indices[0], isDbNull ? default : _elemConverter.Read(reader)); + SetValue(collection, indices.One, isDbNull ? default : _elemConverter.Read(reader)); return new(); } - unsafe ValueTask ReadAsync(PgStreamingConverter converter, PgReader reader, object collection, int[] indices, CancellationToken cancellationToken) + unsafe ValueTask ReadAsync(PgStreamingConverter converter, PgReader reader, object collection, Indices indices, CancellationToken cancellationToken) { + Debug.Assert(indices.Count is 1); if (converter.ReadAsyncAsTask(reader, cancellationToken, out var result) is { } task) return PgArrayConverter.AwaitTask(task, new(this, &SetResult), collection, indices); - SetValue(collection, indices[0], result); + SetValue(collection, indices.One, result); return new(); - static void SetResult(Task task, object collection, int[] indices) + static void SetResult(Task task, object collection, Indices indices) { // Justification: exact type Unsafe.As used to reduce generic duplication cost. Debug.Assert(task is Task); // Using .Result on ValueTask is equivalent to GetAwaiter().GetResult(), this removes TaskAwaiter rooting. - SetValue(collection, indices[0], new ValueTask(task: Unsafe.As>(task)).Result); + SetValue(collection, indices.One, new ValueTask(task: Unsafe.As>(task)).Result); } } - ValueTask IElementOperations.Write(bool async, PgWriter writer, object collection, int[] indices, CancellationToken cancellationToken) + ValueTask IElementOperations.Write(bool async, PgWriter writer, object collection, Indices indices, CancellationToken cancellationToken) { - Debug.Assert(indices.Length is 1); + Debug.Assert(indices.Count is 1); if (async) - return _elemConverter.WriteAsync(writer, GetValue(collection, indices[0])!, cancellationToken); + return _elemConverter.WriteAsync(writer, GetValue(collection, indices.One)!, cancellationToken); - _elemConverter.Write(writer, GetValue(collection, indices[0])!); + _elemConverter.Write(writer, GetValue(collection, indices.One)!); return new(); } } diff --git a/test/Npgsql.Tests/Types/ArrayTests.cs b/test/Npgsql.Tests/Types/ArrayTests.cs index a567e4891e..187c71d646 100644 --- a/test/Npgsql.Tests/Types/ArrayTests.cs +++ b/test/Npgsql.Tests/Types/ArrayTests.cs @@ -65,7 +65,7 @@ public async Task Throws_too_many_dimensions() cmd.Parameters.AddWithValue("p", new int[1, 1, 1, 1, 1, 1, 1, 1, 1]); // 9 dimensions Assert.That( () => cmd.ExecuteScalarAsync(), - Throws.Exception.TypeOf().With.Message.EqualTo("values (Parameter 'Postgres arrays can have at most 8 dimensions.')")); + Throws.Exception.TypeOf().With.Message.EqualTo("Postgres arrays can have at most 8 dimensions. (Parameter 'values')")); } [Test, Description("Checks that PG arrays containing nulls are returned as set via ValueTypeArrayMode.")] From 450f9002bacce6b388b4735cb0354543001b912a Mon Sep 17 00:00:00 2001 From: DruAtNexee <168309909+DruAtNexee@users.noreply.github.com> Date: Fri, 28 Jun 2024 09:06:59 -0400 Subject: [PATCH 432/761] Improve Schema retrieval performance by eliminating the use of view (#5685) --- src/Npgsql/Schema/DbColumnSchemaGenerator.cs | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/src/Npgsql/Schema/DbColumnSchemaGenerator.cs b/src/Npgsql/Schema/DbColumnSchemaGenerator.cs index 300001e72d..b8b957041c 100644 --- a/src/Npgsql/Schema/DbColumnSchemaGenerator.cs +++ b/src/Npgsql/Schema/DbColumnSchemaGenerator.cs @@ -35,7 +35,12 @@ static string GenerateColumnsQuery(Version pgVersion, string columnFieldFilter) {(pgVersion.IsGreaterOrEqual(10) ? "attidentity != ''" : "FALSE")} AS isidentity, CASE WHEN typ.typtype = 'd' THEN typ.typtypmod ELSE atttypmod END AS typmod, CASE WHEN atthasdef THEN (SELECT pg_get_expr(adbin, cls.oid) FROM pg_attrdef WHERE adrelid = cls.oid AND adnum = attr.attnum) ELSE NULL END AS default, - CASE WHEN col.is_updatable = 'YES' THEN true ELSE false END AS is_updatable, + CASE WHEN ((cls.relkind = ANY (ARRAY['r'::""char"", 'p'::""char""])) + OR ((cls.relkind = ANY (ARRAY['v'::""char"", 'f'::""char""])) + AND pg_column_is_updatable((cls.oid)::regclass, attr.attnum, false))) + AND attr.attidentity NOT IN ('a') THEN 'true'::boolean + ELSE 'false'::boolean + END AS is_updatable, EXISTS ( SELECT * FROM pg_index WHERE pg_index.indrelid = cls.oid AND @@ -53,9 +58,6 @@ FROM pg_attribute AS attr JOIN pg_type AS typ ON attr.atttypid = typ.oid JOIN pg_class AS cls ON cls.oid = attr.attrelid JOIN pg_namespace AS ns ON ns.oid = cls.relnamespace -LEFT OUTER JOIN information_schema.columns AS col ON col.table_schema = nspname AND - col.table_name = relname AND - col.column_name = attname WHERE atttypid <> 0 AND relkind IN ('r', 'v', 'm') AND @@ -80,9 +82,6 @@ FROM pg_attribute AS attr JOIN pg_type AS typ ON attr.atttypid = typ.oid JOIN pg_class AS cls ON cls.oid = attr.attrelid JOIN pg_namespace AS ns ON ns.oid = cls.relnamespace -LEFT OUTER JOIN information_schema.columns AS col ON col.table_schema = nspname AND - col.table_name = relname AND - col.column_name = attname WHERE atttypid <> 0 AND relkind IN ('r', 'v', 'm') AND From a1459d5117aa2d3ef417230f43056e0f639ac98a Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Fri, 28 Jun 2024 15:29:36 +0200 Subject: [PATCH 433/761] Add missing EnableX methods to slim builder (#5682) Fixes #5733 --- src/Npgsql/NpgsqlSlimDataSourceBuilder.cs | 34 +++++++++++++++++++++++ src/Npgsql/PublicAPI.Unshipped.txt | 3 ++ 2 files changed, 37 insertions(+) diff --git a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs index bc1026b297..31a82dc04d 100644 --- a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs @@ -477,6 +477,39 @@ public NpgsqlSlimDataSourceBuilder EnableIntegratedSecurity() return this; } + /// + /// Sets up network mappings. This allows mapping PhysicalAddress, IPAddress, NpgsqlInet and NpgsqlCidr types + /// to PostgreSQL macaddr, macaddr8, inet and cidr types. + /// + /// The same builder instance so that multiple calls can be chained. + public NpgsqlSlimDataSourceBuilder EnableNetworkTypes() + { + _resolverChainBuilder.AppendResolverFactory(new NetworkTypeInfoResolverFactory()); + return this; + } + + /// + /// Sets up network mappings. This allows mapping types like NpgsqlPoint and NpgsqlPath + /// to PostgreSQL point, path and so on types. + /// + /// The same builder instance so that multiple calls can be chained. + public NpgsqlSlimDataSourceBuilder EnableGeometricTypes() + { + _resolverChainBuilder.AppendResolverFactory(new GeometricTypeInfoResolverFactory()); + return this; + } + + /// + /// Sets up System.Text.Json mappings. This allows mapping JsonDocument and JsonElement types to PostgreSQL json and jsonb + /// types. + /// + /// The same builder instance so that multiple calls can be chained. + public NpgsqlSlimDataSourceBuilder EnableJsonTypes() + { + _resolverChainBuilder.AppendResolverFactory(() => new JsonTypeInfoResolverFactory(JsonSerializerOptions)); + return this; + } + /// /// Sets up dynamic System.Text.Json mappings. This allows mapping arbitrary .NET types to PostgreSQL json and jsonb /// types, as well as and its derived types. @@ -490,6 +523,7 @@ public NpgsqlSlimDataSourceBuilder EnableIntegratedSecurity() /// /// Due to the dynamic nature of these mappings, they are not compatible with NativeAOT or trimming. /// + /// The same builder instance so that multiple calls can be chained. [RequiresUnreferencedCode("Json serializer may perform reflection on trimmed types.")] [RequiresDynamicCode("Serializing arbitrary types to json can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] public NpgsqlSlimDataSourceBuilder EnableDynamicJson( diff --git a/src/Npgsql/PublicAPI.Unshipped.txt b/src/Npgsql/PublicAPI.Unshipped.txt index ab058de62d..e152ada722 100644 --- a/src/Npgsql/PublicAPI.Unshipped.txt +++ b/src/Npgsql/PublicAPI.Unshipped.txt @@ -1 +1,4 @@ #nullable enable +Npgsql.NpgsqlSlimDataSourceBuilder.EnableGeometricTypes() -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.EnableJsonTypes() -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.EnableNetworkTypes() -> Npgsql.NpgsqlSlimDataSourceBuilder! From 447387cdb0281224a85be385fe9b75820a5b6f18 Mon Sep 17 00:00:00 2001 From: Ede Meijer Date: Fri, 28 Jun 2024 15:37:52 +0200 Subject: [PATCH 434/761] Fix connect timeout when resolving to multiple IPs (#5739) --- src/Npgsql/Internal/NpgsqlConnector.cs | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index adfa7cb5d8..fe852b6a46 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -995,17 +995,16 @@ Task GetHostAddressesAsync(CancellationToken ct) => : IPAddressesToEndpoints(await TaskTimeoutAndCancellation.ExecuteAsync(GetHostAddressesAsync, timeout, cancellationToken).ConfigureAwait(false), Port); - // Give each IP an equal share of the remaining time - var perIpTimespan = default(TimeSpan); - var perIpTimeout = timeout; + // Give each endpoint an equal share of the remaining time + var perEndpointTimeout = default(TimeSpan); if (timeout.IsSet) - { - perIpTimespan = new TimeSpan(timeout.CheckAndGetTimeLeft().Ticks / endpoints.Length); - perIpTimeout = new NpgsqlTimeout(perIpTimespan); - } + perEndpointTimeout = timeout.CheckAndGetTimeLeft() / endpoints.Length; for (var i = 0; i < endpoints.Length; i++) { + var endpointTimeout = timeout.IsSet ? new NpgsqlTimeout(perEndpointTimeout) : timeout; + Debug.Assert(timeout.IsSet == endpointTimeout.IsSet); + var endpoint = endpoints[i]; ConnectionLogger.LogTrace("Attempting to connect to {Endpoint}", endpoint); var protocolType = @@ -1016,7 +1015,7 @@ Task GetHostAddressesAsync(CancellationToken ct) => var socket = new Socket(endpoint.AddressFamily, SocketType.Stream, protocolType); try { - await OpenSocketConnectionAsync(socket, endpoint, perIpTimeout, cancellationToken).ConfigureAwait(false); + await OpenSocketConnectionAsync(socket, endpoint, endpointTimeout, cancellationToken).ConfigureAwait(false); SetSocketOptions(socket); _socket = socket; ConnectedEndPoint = endpoint; From 3592cee555af62aee78752e7cd5a6c694929d2db Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Fri, 28 Jun 2024 16:37:27 +0200 Subject: [PATCH 435/761] Column seeking rework (#5476) --- src/Npgsql/Internal/NpgsqlConnector.cs | 2 +- .../Internal/NpgsqlReadBuffer.Stream.cs | 19 +- src/Npgsql/Internal/NpgsqlReadBuffer.cs | 29 +- src/Npgsql/Internal/PgBufferedConverter.cs | 4 +- src/Npgsql/Internal/PgReader.cs | 352 +++++++++--------- src/Npgsql/NpgsqlBinaryExporter.cs | 38 +- src/Npgsql/NpgsqlDataReader.cs | 311 ++++++---------- src/Npgsql/NpgsqlNestedDataReader.cs | 14 +- src/Npgsql/NpgsqlRawCopyStream.cs | 2 +- .../Replication/PgOutput/ReplicationValue.cs | 18 +- .../Replication/ReplicationConnection.cs | 2 +- test/Npgsql.Tests/ReaderTests.cs | 2 +- 12 files changed, 358 insertions(+), 435 deletions(-) diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index fe852b6a46..72a2d571f2 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -1295,7 +1295,7 @@ internal ValueTask ReadMessage( { if (dataRowLoadingMode == DataRowLoadingMode.Skip) { - await ReadBuffer.Skip(len, async).ConfigureAwait(false); + await ReadBuffer.Skip(async, len).ConfigureAwait(false); continue; } } diff --git a/src/Npgsql/Internal/NpgsqlReadBuffer.Stream.cs b/src/Npgsql/Internal/NpgsqlReadBuffer.Stream.cs index 78e17d4a82..95b6c712f8 100644 --- a/src/Npgsql/Internal/NpgsqlReadBuffer.Stream.cs +++ b/src/Npgsql/Internal/NpgsqlReadBuffer.Stream.cs @@ -17,6 +17,7 @@ internal sealed class ColumnStream : Stream int _read; bool _canSeek; bool _commandScoped; + bool _consumeOnDispose; /// Does not throw ODE. internal int CurrentLength { get; private set; } internal bool IsDisposed { get; private set; } @@ -28,7 +29,7 @@ internal ColumnStream(NpgsqlConnector connector) IsDisposed = true; } - internal void Init(int len, bool canSeek, bool commandScoped) + internal void Init(int len, bool canSeek, bool commandScoped, bool consumeOnDispose = true) { Debug.Assert(!canSeek || _buf.ReadBytesLeft >= len, "Seekable stream constructed but not all data is in buffer (sequential)"); @@ -41,6 +42,7 @@ internal void Init(int len, bool canSeek, bool commandScoped) _read = 0; _commandScoped = commandScoped; + _consumeOnDispose = consumeOnDispose; IsDisposed = false; } @@ -195,22 +197,25 @@ void CheckDisposed() } protected override void Dispose(bool disposing) - => DisposeAsync(disposing, async: false).GetAwaiter().GetResult(); + { + if (disposing) + DisposeCore(async: false).GetAwaiter().GetResult(); + } public override ValueTask DisposeAsync() - => DisposeAsync(disposing: true, async: true); + => DisposeCore(async: true); - async ValueTask DisposeAsync(bool disposing, bool async) + async ValueTask DisposeCore(bool async) { - if (IsDisposed || !disposing) + if (IsDisposed) return; - if (!_connector.IsBroken) + if (_consumeOnDispose && !_connector.IsBroken) { var pos = _buf.CumulativeReadPosition - _startPos; var remaining = checked((int)(CurrentLength - pos)); if (remaining > 0) - await _buf.Skip(remaining, async).ConfigureAwait(false); + await _buf.Skip(async, remaining).ConfigureAwait(false); } IsDisposed = true; diff --git a/src/Npgsql/Internal/NpgsqlReadBuffer.cs b/src/Npgsql/Internal/NpgsqlReadBuffer.cs index 139594e25a..bd878ed4b0 100644 --- a/src/Npgsql/Internal/NpgsqlReadBuffer.cs +++ b/src/Npgsql/Internal/NpgsqlReadBuffer.cs @@ -411,8 +411,29 @@ internal NpgsqlReadBuffer AllocateOversize(int count) } /// - /// Does not perform any I/O - assuming that the bytes to be skipped are in the memory buffer. + /// Skip a given number of bytes. /// + internal void Skip(int len, bool allowIO) + { + Debug.Assert(len >= 0); + + if (allowIO && len > ReadBytesLeft) + { + len -= ReadBytesLeft; + while (len > Size) + { + ResetPosition(); + Ensure(Size); + len -= Size; + } + ResetPosition(); + Ensure(len); + } + + Debug.Assert(ReadBytesLeft >= len); + ReadPosition += len; + } + internal void Skip(int len) { Debug.Assert(ReadBytesLeft >= len); @@ -422,7 +443,7 @@ internal void Skip(int len) /// /// Skip a given number of bytes. /// - public async Task Skip(int len, bool async) + public async Task Skip(bool async, int len) { Debug.Assert(len >= 0); @@ -658,11 +679,11 @@ static async ValueTask ReadAsyncLong(NpgsqlReadBuffer buffer, bool commandS } ColumnStream? _lastStream; - public ColumnStream CreateStream(int len, bool canSeek) + public ColumnStream CreateStream(int len, bool canSeek, bool consumeOnDispose = true) { if (_lastStream is not { IsDisposed: true }) _lastStream = new ColumnStream(Connector); - _lastStream.Init(len, canSeek, !Connector.LongRunningConnection); + _lastStream.Init(len, canSeek, !Connector.LongRunningConnection, consumeOnDispose); return _lastStream; } diff --git a/src/Npgsql/Internal/PgBufferedConverter.cs b/src/Npgsql/Internal/PgBufferedConverter.cs index d7b673fb7c..9fd4644c56 100644 --- a/src/Npgsql/Internal/PgBufferedConverter.cs +++ b/src/Npgsql/Internal/PgBufferedConverter.cs @@ -18,8 +18,8 @@ public override Size GetSize(SizeContext context, T value, ref object? writeStat public sealed override T Read(PgReader reader) { - // We check IsAtStart first to speed up primitive reads. - if (!reader.IsAtStart && reader.ShouldBufferCurrent()) + // We check FieldAtStart to speed up simple value reads, as field level buffering was handled by reader.StartRead() already. + if (!reader.FieldAtStart && reader.ShouldBufferCurrent()) ThrowIORequired(reader.CurrentBufferRequirement); return ReadCore(reader); diff --git a/src/Npgsql/Internal/PgReader.cs b/src/Npgsql/Internal/PgReader.cs index 90f5b53e14..7fbaa695cd 100644 --- a/src/Npgsql/Internal/PgReader.cs +++ b/src/Npgsql/Internal/PgReader.cs @@ -13,6 +13,8 @@ namespace Npgsql.Internal; [Experimental(NpgsqlDiagnostics.ConvertersExperimental)] public class PgReader { + const int UninitializedSentinel = -1; + // We don't want to add a ton of memory pressure for large strings. internal const int MaxPreparedTextReaderSize = 1024 * 64; @@ -50,28 +52,36 @@ public class PgReader internal PgReader(NpgsqlReadBuffer buffer) { _buffer = buffer; - _fieldStartPos = -1; - _currentSize = -1; + _fieldStartPos = UninitializedSentinel; + _currentSize = UninitializedSentinel; } - internal long FieldStartPos => _fieldStartPos; - internal int FieldSize => _fieldSize; - internal bool Initialized => _fieldStartPos is not -1; - internal int FieldOffset => (int)(_buffer.CumulativeReadPosition - _fieldStartPos); - internal int FieldRemaining => FieldSize - FieldOffset; + internal bool Initialized => _fieldStartPos is not UninitializedSentinel; + int FieldOffset => (int)(_buffer.CumulativeReadPosition - _fieldStartPos); + int FieldSize => _fieldSize; + int FieldRemaining => FieldSize - FieldOffset; + + internal bool FieldIsDbNull => FieldSize is -1; + internal bool FieldAtStart => FieldOffset is 0; + + internal bool IsFieldConsumed(int offset) => FieldOffset > offset; - bool HasCurrent => _currentSize is not -1; - int CurrentSize => HasCurrent ? _currentSize : _fieldSize; + // TODO refactor out + internal long GetFieldStartPos(NpgsqlNestedDataReader nestedDataReader) => _fieldStartPos; + // TODO refactor out + internal int GetFieldOffset(NpgsqlNestedDataReader nestedDataReader) => FieldOffset; + + internal bool NestedInitialized => _currentSize is not UninitializedSentinel; + int CurrentSize => NestedInitialized ? _currentSize : _fieldSize; public ValueMetadata Current => new() { Size = CurrentSize, Format = _fieldFormat, BufferRequirement = CurrentBufferRequirement }; - public int CurrentRemaining => HasCurrent ? _currentSize - CurrentOffset : FieldRemaining; + public int CurrentRemaining => NestedInitialized ? _currentSize - CurrentOffset : FieldRemaining; - internal Size CurrentBufferRequirement => HasCurrent ? _currentBufferRequirement : _fieldBufferRequirement; + internal Size CurrentBufferRequirement => NestedInitialized ? _currentBufferRequirement : _fieldBufferRequirement; int CurrentOffset => FieldOffset - _currentStartPos; - internal bool IsAtStart => FieldOffset is 0; internal bool Resumable => _resumable; - public bool IsResumed => Resumable && CurrentSize != CurrentRemaining; + public bool IsResumed => Resumable && CurrentOffset > 0; ArrayPool ArrayPool => ArrayPool.Shared; @@ -96,8 +106,8 @@ void CheckBounds(int count) [MethodImpl(MethodImplOptions.NoInlining)] void Core(int count) { - if (count > FieldRemaining) - ThrowHelper.ThrowInvalidOperationException("Attempt to read past the end of the field."); + if (count > CurrentRemaining) + ThrowHelper.ThrowIndexOutOfRangeException("Attempt to read past the end of the current field size."); } } @@ -200,7 +210,7 @@ NpgsqlReadBuffer.ColumnStream GetColumnStream(bool canSeek = false, int? length length ??= CurrentRemaining; CheckBounds(length.GetValueOrDefault()); - return _userActiveStream = _buffer.CreateStream(length.GetValueOrDefault(), canSeek && length <= _buffer.ReadBytesLeft); + return _userActiveStream = _buffer.CreateStream(length.GetValueOrDefault(), canSeek && length <= _buffer.ReadBytesLeft, consumeOnDispose: false); } public TextReader GetTextReader(Encoding encoding) @@ -342,14 +352,15 @@ public async ValueTask> ReadBytesAsync(int count, Cancell public void Rewind(int count) { - // Shut down any streaming going on on the column - DisposeUserActiveStream(async: false).GetAwaiter().GetResult(); + if (CurrentOffset < count) + ThrowHelper.ThrowArgumentOutOfRangeException(nameof(count), "Attempt to rewind past the current field start."); if (_buffer.ReadPosition < count) - ThrowHelper.ThrowArgumentOutOfRangeException(nameof(count), "Cannot rewind further than the buffer start"); + ThrowHelper.ThrowArgumentOutOfRangeException(nameof(count), "Attempt to rewind past the buffer start, some of this data is no longer part of the underlying buffer."); - if (CurrentOffset < count) - ThrowHelper.ThrowArgumentOutOfRangeException(nameof(count), "Cannot rewind further than the current field offset"); + // Shut down any streaming going on on the column + if (StreamActive) + DisposeUserActiveStream(async: false).GetAwaiter().GetResult(); _buffer.ReadPosition -= count; } @@ -361,14 +372,10 @@ public void Rewind(int count) /// The stream length, if any async ValueTask DisposeUserActiveStream(bool async) { - if (StreamActive) - { - if (async) - await _userActiveStream.DisposeAsync().ConfigureAwait(false); - else - _userActiveStream.Dispose(); - } - + if (async) + await (_userActiveStream?.DisposeAsync() ?? new()).ConfigureAwait(false); + else + _userActiveStream?.Dispose(); _userActiveStream = null; } @@ -409,7 +416,7 @@ internal void RestartCharsRead() internal void StartCharsRead(int dataOffset, ArraySegment? buffer) { if (!Resumable) - ThrowHelper.ThrowInvalidOperationException("Wasn't initialized as resumed"); + ThrowHelper.ThrowInvalidOperationException("Reader was not initialized as resumable"); _charsReadOffset = dataOffset; _charsReadBuffer = buffer; @@ -427,32 +434,16 @@ internal void EndCharsRead() _charsReadBuffer = null; } - internal PgReader Init(int fieldLength, DataFormat format, bool resumable = false) + internal void Init(int fieldSize, DataFormat fieldFormat, bool resumable = false) { if (Initialized) - { - if (resumable) - { - if (Resumable) - return this; - _resumable = true; - } - else - { - if (!IsAtStart) - ThrowHelper.ThrowInvalidOperationException("Cannot be initialized to be non-resumable until a commit is issued."); - _resumable = false; - } - } - - Debug.Assert(!_requiresCleanup, "Reader wasn't properly committed before next init"); + ThrowHelper.ThrowInvalidOperationException("Already initialized"); _fieldStartPos = _buffer.CumulativeReadPosition; - _fieldFormat = format; - _fieldSize = fieldLength; - _resumable = resumable; _fieldConsumed = false; - return this; + _fieldSize = fieldSize; + _fieldFormat = fieldFormat; + _resumable = resumable; } internal void StartRead(Size bufferRequirement) @@ -460,7 +451,11 @@ internal void StartRead(Size bufferRequirement) Debug.Assert(FieldSize >= 0); _fieldBufferRequirement = bufferRequirement; if (ShouldBuffer(bufferRequirement)) - Buffer(bufferRequirement); + BufferNoInlined(bufferRequirement); + + [MethodImpl(MethodImplOptions.NoInlining)] + void BufferNoInlined(Size bufferRequirement) + => Buffer(bufferRequirement); } internal ValueTask StartReadAsync(Size bufferRequirement, CancellationToken cancellationToken) @@ -529,32 +524,60 @@ public NestedReadScope BeginNestedRead(int size, Size bufferRequirement) public ValueTask BeginNestedReadAsync(int size, Size bufferRequirement, CancellationToken cancellationToken = default) => BeginNestedRead(async: true, size, bufferRequirement, cancellationToken); - internal void Seek(int offset) + /// Seek origin is the start of Current, e.g. Seek(0) rewinds to the start. + internal int Seek(int offset) { if (CurrentOffset > offset) Rewind(CurrentOffset - offset); else if (CurrentOffset < offset) Consume(offset - CurrentOffset); + + return FieldRemaining; } - internal async ValueTask Consume(bool async, int? count = null, CancellationToken cancellationToken = default) + public void Consume(int? count = null) { if (count <= 0 || FieldSize < 0 || FieldRemaining == 0) return; - var remaining = count ?? CurrentRemaining; - CheckBounds(remaining); + var currentRemaining = CurrentRemaining; + var remaining = count ?? currentRemaining; + + if (count > currentRemaining) + ThrowHelper.ThrowArgumentOutOfRangeException(nameof(count), "Attempt to read past the end of the current field size."); + + if (StreamActive) + DisposeUserActiveStream(async: false).GetAwaiter().GetResult(); var origOffset = FieldOffset; // A breaking exception unwind from a nested scope should not try to consume its remaining data. if (!_buffer.Connector.IsBroken) - await _buffer.Skip(remaining, async).ConfigureAwait(false); + _buffer.Skip(remaining, allowIO: true); Debug.Assert(FieldRemaining == FieldSize - origOffset - remaining); } - public void Consume(int? count = null) => Consume(async: false, count).GetAwaiter().GetResult(); - public ValueTask ConsumeAsync(int? count = null, CancellationToken cancellationToken = default) => Consume(async: true, count, cancellationToken); + public async ValueTask ConsumeAsync(int? count = null, CancellationToken cancellationToken = default) + { + if (count <= 0 || FieldSize < 0 || FieldRemaining == 0) + return; + + var currentRemaining = CurrentRemaining; + var remaining = count ?? currentRemaining; + + if (count > currentRemaining) + ThrowHelper.ThrowArgumentOutOfRangeException(nameof(count), "Attempt to read past the end of the current field size."); + + if (StreamActive) + await DisposeUserActiveStream(async: true).ConfigureAwait(false); + + var origOffset = FieldOffset; + // A breaking exception unwind from a nested scope should not try to consume its remaining data. + if (!_buffer.Connector.IsBroken) + await _buffer.Skip(async:true, remaining).ConfigureAwait(false); + + Debug.Assert(FieldRemaining == FieldSize - origOffset - remaining); + } [MemberNotNullWhen(true, nameof(_userActiveStream))] bool StreamActive => _userActiveStream is { IsDisposed: false }; @@ -564,169 +587,134 @@ internal void ThrowIfStreamActive() ThrowHelper.ThrowInvalidOperationException("A stream is already open for this reader"); } - internal bool CommitHasIO(bool resuming) => Initialized && !resuming && FieldRemaining > 0; + [MethodImpl(MethodImplOptions.NoInlining)] + void Cleanup() + { + if (StreamActive) + DisposeUserActiveStream(async: false).GetAwaiter().GetResult(); - [MethodImpl(MethodImplOptions.AggressiveInlining)] - internal void Commit(bool resuming) + if (_pooledArray is not null) + { + ArrayPool.Return(_pooledArray); + _pooledArray = null; + } + + if (_charsReadReader is not null) + { + _charsReadReader.Dispose(); + _charsReadReader = null; + _charsRead = default; + } + + _requiresCleanup = false; + } + + void ResetCurrent() + { + _currentStartPos = 0; + _currentBufferRequirement = default; + _currentSize = UninitializedSentinel; + } + + internal int Restart(bool resumable) { if (!Initialized) - return; + ThrowHelper.ThrowInvalidOperationException("Cannot restart a non-initialized reader."); - if (resuming) + // We resume if the reader was initialized as resumable and we're not explicitly restarting as non-resumable. + // When the field size is DbNullFieldSize (i.e. -1) we're always restarting as resumable, to allow rereading null values endlessly. + if ((Resumable && resumable) || FieldIsDbNull) { - if (!Resumable) - ThrowHelper.ThrowInvalidOperationException("Cannot resume a non-resumable read."); - return; + _resumable = resumable || FieldIsDbNull; + return FieldSize; } - // We don't rely on CurrentRemaining, just to make sure we consume fully in the event of a nested scope not being disposed. - // Also shut down any streaming, pooled arrays etc. - if (_requiresCleanup || (!_fieldConsumed && FieldRemaining > 0)) - { - CommitSlow(); + // From this point on we're not resuming, we're resetting any remaining state and rewinding our position. + + // Shut down any streaming and pooling going on on the column. + if (_requiresCleanup) + Cleanup(); + + if (NestedInitialized) + ResetCurrent(); + + _fieldConsumed = false; + _resumable = resumable; + Seek(0); + + Debug.Assert(Initialized); + return FieldSize; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + internal void Commit() + { + if (!Initialized) return; - } - _fieldStartPos = -1; + // Shut down any streaming and pooling going on on the column. + if (_requiresCleanup) + Cleanup(); + + if (NestedInitialized) + ResetCurrent(); + + // We make sure to fuly consume any FieldRemaining in the event of an exception or a nested scope not being disposed. + Debug.Assert(!NestedInitialized); + if (!_fieldConsumed && FieldRemaining > 0) + Consume(); + + _fieldStartPos = UninitializedSentinel; Debug.Assert(!Initialized); // These will always be re-initialized by Init() // _fieldSize = default; // _fieldFormat = default; // _resumable = default; - // _fieldCompleted = default; - - if (HasCurrent) - { - _currentStartPos = 0; - _currentBufferRequirement = default; - _currentSize = -1; - Debug.Assert(!HasCurrent); - } - - [MethodImpl(MethodImplOptions.NoInlining)] - void CommitSlow() - { - // Shut down any streaming and pooling going on on the column. - if (_requiresCleanup) - { - if (StreamActive) - DisposeUserActiveStream(async: false).GetAwaiter().GetResult(); - - if (_pooledArray is not null) - { - ArrayPool.Return(_pooledArray); - _pooledArray = null; - } - - if (_charsReadReader is not null) - { - _charsReadReader.Dispose(); - _charsReadReader = null; - _charsRead = default; - } - _requiresCleanup = false; - } - - Consume(async: false, count: FieldRemaining).GetAwaiter().GetResult(); - - _fieldStartPos = -1; - Debug.Assert(!Initialized); - - // These will always be re-initialized by Init() - // _fieldSize = default; - // _fieldFormat = default; - // _resumable = default; - // _fieldCompleted = default; - - if (HasCurrent) - { - _currentStartPos = 0; - _currentBufferRequirement = default; - _currentSize = -1; - Debug.Assert(!HasCurrent); - } - } + // _fieldConsumed = default; } [MethodImpl(MethodImplOptions.AggressiveInlining)] - internal ValueTask CommitAsync(bool resuming) + internal ValueTask CommitAsync() { if (!Initialized) return new(); - if (resuming) - { - if (!Resumable) - ThrowHelper.ThrowInvalidOperationException("Cannot resume a non-resumable read."); - return new(); - } + // Shut down any streaming and pooling going on on the column. + if (_requiresCleanup) + Cleanup(); + + if (NestedInitialized) + ResetCurrent(); - // We don't rely on CurrentRemaining, just to make sure we consume fully in the event of a nested scope not being disposed. - // Also shut down any streaming, pooled arrays etc. - if (_requiresCleanup || (!_fieldConsumed && FieldRemaining > 0)) - return CommitSlow(); + // We make sure to fuly consume any FieldRemaining in the event of an exception or a nested scope not being disposed. + Debug.Assert(!NestedInitialized); + if (!_fieldConsumed && FieldRemaining > 0) + return CommitAsync(); - _fieldStartPos = -1; + _fieldStartPos = UninitializedSentinel; Debug.Assert(!Initialized); // These will always be re-initialized by Init() // _fieldSize = default; // _fieldFormat = default; // _resumable = default; - // _fieldCompleted = default; - - if (HasCurrent) - { - _currentStartPos = 0; - _currentBufferRequirement = default; - _currentSize = -1; - Debug.Assert(!HasCurrent); - } + // _fieldConsumed = default; return new(); - async ValueTask CommitSlow() + async ValueTask CommitAsync() { - // Shut down any streaming and pooling going on on the column. - if (_requiresCleanup) - { - if (StreamActive) - await DisposeUserActiveStream(async: true).ConfigureAwait(false); - - if (_pooledArray is not null) - { - ArrayPool.Return(_pooledArray); - _pooledArray = null; - } - - if (_charsReadReader is not null) - { - _charsReadReader.Dispose(); - _charsReadReader = null; - _charsRead = default; - } - _requiresCleanup = false; - } - - await Consume(async: true, count: FieldRemaining).ConfigureAwait(false); - - _fieldStartPos = -1; + await ConsumeAsync().ConfigureAwait(false); + + _fieldStartPos = UninitializedSentinel; Debug.Assert(!Initialized); // These will always be re-initialized by Init() // _fieldSize = default; // _fieldFormat = default; // _resumable = default; - // _fieldCompleted = default; - - if (HasCurrent) - { - _currentStartPos = 0; - _currentBufferRequirement = default; - _currentSize = -1; - Debug.Assert(!HasCurrent); - } + // _fieldConsumed = default; } } diff --git a/src/Npgsql/NpgsqlBinaryExporter.cs b/src/Npgsql/NpgsqlBinaryExporter.cs index 406962d837..171d233af5 100644 --- a/src/Npgsql/NpgsqlBinaryExporter.cs +++ b/src/Npgsql/NpgsqlBinaryExporter.cs @@ -150,9 +150,9 @@ async ValueTask StartRow(bool async, CancellationToken cancellationToken = if (_column >= 0) { if (async) - await PgReader.CommitAsync(resuming: false).ConfigureAwait(false); + await PgReader.CommitAsync().ConfigureAwait(false); else - PgReader.Commit(resuming: false); + PgReader.Commit(); _column++; } @@ -258,7 +258,7 @@ T Read(NpgsqlDbType? type) var reader = PgReader; try { - if (reader.FieldSize is -1) + if (reader.FieldIsDbNull) return DbNullOrThrow(); var info = GetInfo(typeof(T), type, out var asObject); @@ -276,7 +276,7 @@ T Read(NpgsqlDbType? type) // Don't delay committing the current column, just do it immediately (as opposed to on the next action: Read, IsNull, Skip). // Zero length columns would otherwise create an edge-case where we'd have to immediately commit as we won't know whether we're at the end. // To guarantee the commit happens in that case we would still need this try finally, at which point it's just better to be consistent. - reader.Commit(resuming: false); + reader.Commit(); } } @@ -292,7 +292,7 @@ async ValueTask ReadAsync(NpgsqlDbType? type, CancellationToken cancellati var reader = PgReader; try { - if (reader.FieldSize is -1) + if (reader.FieldIsDbNull) return DbNullOrThrow(); var info = GetInfo(typeof(T), type, out var asObject); @@ -310,7 +310,7 @@ async ValueTask ReadAsync(NpgsqlDbType? type, CancellationToken cancellati // Don't delay committing the current column, just do it immediately (as opposed to on the next action: Read, IsNull, Skip). // Zero length columns would otherwise create an edge-case where we'd have to immediately commit as we won't know whether we're at the end. // To guarantee the commit happens in that case we would still need this try finally, at which point it's just better to be consistent. - await reader.CommitAsync(resuming: false).ConfigureAwait(false); + await reader.CommitAsync().ConfigureAwait(false); } } @@ -364,9 +364,9 @@ public bool IsNull { ThrowIfNotOnRow(); if (!IsInitializedAndAtStart) - return MoveNextColumn(resumableOp: true) is -1; + MoveNextColumn(resumableOp: true); - return PgReader.FieldSize is - 1; + return PgReader.FieldIsDbNull; } } @@ -380,7 +380,7 @@ public void Skip() if (!IsInitializedAndAtStart) MoveNextColumn(resumableOp: false); - PgReader.Commit(resuming: false); + PgReader.Commit(); } /// @@ -395,18 +395,18 @@ public async Task SkipAsync(CancellationToken cancellationToken = default) if (!IsInitializedAndAtStart) await MoveNextColumnAsync(resumableOp: false).ConfigureAwait(false); - await PgReader.CommitAsync(resuming: false).ConfigureAwait(false); + await PgReader.CommitAsync().ConfigureAwait(false); } #endregion #region Utilities - bool IsInitializedAndAtStart => PgReader.Initialized && (PgReader.FieldSize is -1 || PgReader.FieldOffset is 0); + bool IsInitializedAndAtStart => PgReader.Initialized && (PgReader.FieldIsDbNull || PgReader.FieldAtStart); - int MoveNextColumn(bool resumableOp) + void MoveNextColumn(bool resumableOp) { - PgReader.Commit(resuming: false); + PgReader.Commit(); if (_column + 1 == NumColumns) ThrowHelper.ThrowInvalidOperationException("No more columns left in the current row"); @@ -414,12 +414,11 @@ int MoveNextColumn(bool resumableOp) _buf.Ensure(sizeof(int)); var columnLen = _buf.ReadInt32(); PgReader.Init(columnLen, DataFormat.Binary, resumableOp); - return PgReader.FieldSize; } - async ValueTask MoveNextColumnAsync(bool resumableOp) + async ValueTask MoveNextColumnAsync(bool resumableOp) { - await PgReader.CommitAsync(resuming: false).ConfigureAwait(false); + await PgReader.CommitAsync().ConfigureAwait(false); if (_column + 1 == NumColumns) ThrowHelper.ThrowInvalidOperationException("No more columns left in the current row"); @@ -427,7 +426,6 @@ async ValueTask MoveNextColumnAsync(bool resumableOp) await _buf.Ensure(sizeof(int), async: true).ConfigureAwait(false); var columnLen = _buf.ReadInt32(); PgReader.Init(columnLen, DataFormat.Binary, resumableOp); - return PgReader.FieldSize; } void ThrowIfNotOnRow() @@ -488,11 +486,11 @@ async ValueTask DisposeAsync(bool async) using var registration = _connector.StartNestedCancellableOperation(attemptPgCancellation: false); // Be sure to commit the reader. if (async) - await PgReader.CommitAsync(resuming: false).ConfigureAwait(false); + await PgReader.CommitAsync().ConfigureAwait(false); else - PgReader.Commit(resuming: false); + PgReader.Commit(); // Finish the current CopyData message - await _buf.Skip(checked((int)(_endOfMessagePos - _buf.CumulativeReadPosition)), async).ConfigureAwait(false); + await _buf.Skip(async, checked((int)(_endOfMessagePos - _buf.CumulativeReadPosition))).ConfigureAwait(false); // Read to the end _connector.SkipUntil(BackendMessageCode.CopyDone); // We intentionally do not pass a CancellationToken since we don't want to cancel cleanup diff --git a/src/Npgsql/NpgsqlDataReader.cs b/src/Npgsql/NpgsqlDataReader.cs index 98a3f4e662..f85aa3d4a5 100644 --- a/src/Npgsql/NpgsqlDataReader.cs +++ b/src/Npgsql/NpgsqlDataReader.cs @@ -637,7 +637,7 @@ void PopulateOutputParameters(NpgsqlParameterCollection parameters) p.Value = pending.Dequeue(); } - PgReader.Commit(resuming: false); + PgReader.Commit(); State = ReaderState.BeforeResult; // Set the state back Buffer.ReadPosition = currentPosition; // Restore position @@ -1377,12 +1377,11 @@ public override int GetValues(object[] values) if (field.DataFormat is DataFormat.Text || (elementType.InternalName != "record" && compositeType == null)) ThrowHelper.ThrowInvalidCastException("GetData() not supported for type " + field.TypeDisplayName); - var columnLength = SeekToColumn(async: false, ordinal, field.DataFormat, resumableOp: true).GetAwaiter().GetResult(); - if (columnLength is -1) + if (SeekToColumn(ordinal, field.DataFormat, resumableOp: true) is -1) ThrowHelper.ThrowInvalidCastException_NoValue(field); - if (PgReader.FieldOffset > 0) - PgReader.Rewind(PgReader.FieldOffset); + Debug.Assert(!PgReader.NestedInitialized, "Unexpected nested read active, Seek(0) would seek to the start of the nested data."); + PgReader.Seek(0); var reader = CachedFreeNestedDataReader; if (reader != null) @@ -1426,21 +1425,22 @@ public override long GetBytes(int ordinal, long dataOffset, byte[]? buffer, int if (buffer != null && (length < 0 || length > buffer.Length - bufferOffset)) ThrowHelper.ThrowIndexOutOfRangeException("bufferOffset must be between 0 and {0}", buffer.Length - bufferOffset); - var columnLength = SeekToColumn(async: false, ordinal, field.DataFormat, resumableOp: true).GetAwaiter().GetResult(); - if (columnLength == -1) + if (SeekToColumn(ordinal, field.DataFormat, resumableOp: true) is var columnLength && columnLength is -1) ThrowHelper.ThrowInvalidCastException_NoValue(field); if (buffer is null) return columnLength; - // Move to offset - if (_isSequential && PgReader.FieldOffset > dataOffset) + // Check whether any sequential seek is contractually sound (even though we might be able to satisfy rewinds we make sure we won't). + if (_isSequential && PgReader.IsFieldConsumed((int)dataOffset)) ThrowHelper.ThrowInvalidOperationException("Attempt to read a position in the column which has already been read"); - PgReader.Seek((int)dataOffset); + // Move to offset + Debug.Assert(!PgReader.NestedInitialized, "Unexpected nested read active, Seek(0) would seek to the start of the nested data."); + var remaining = PgReader.Seek((int)dataOffset); // At offset, read into buffer. - length = Math.Min(length, PgReader.FieldRemaining); + length = Math.Min(length, remaining); PgReader.ReadBytes(new Span(buffer, bufferOffset, length)); return length; } @@ -1480,6 +1480,7 @@ public Task GetStreamAsync(int ordinal, CancellationToken cancellationTo public override long GetChars(int ordinal, long dataOffset, char[]? buffer, int bufferOffset, int length) { ThrowIfNotInResult(); + // Check whether we have a GetChars implementation for this column type. var field = GetInfo(ordinal, typeof(GetChars), out var converter, out var bufferRequirement, out var asObject); @@ -1490,8 +1491,7 @@ public override long GetChars(int ordinal, long dataOffset, char[]? buffer, int if (buffer != null && (length < 0 || length > buffer.Length - bufferOffset)) ThrowHelper.ThrowIndexOutOfRangeException("bufferOffset must be between 0 and {0}", buffer.Length - bufferOffset); - var columnLength = SeekToColumn(async: false, ordinal, field, resumableOp: true).GetAwaiter().GetResult(); - if (columnLength == -1) + if (SeekToColumn(ordinal, field, resumableOp: true) is -1) ThrowHelper.ThrowInvalidCastException_NoValue(RowDescription[ordinal]); var reader = PgReader; @@ -1562,8 +1562,7 @@ async ValueTask Core(int ordinal, CancellationToken cancellationToken) var field = GetInfo(ordinal, typeof(T), out var converter, out var bufferRequirement, out var asObject); using var registration = Connector.StartNestedCancellableOperation(cancellationToken, attemptPgCancellation: false); - var columnLength = await SeekToColumn(async: true, ordinal, field).ConfigureAwait(false); - if (columnLength is -1) + if (await SeekToColumnAsync(ordinal, field).ConfigureAwait(false) is -1) return DbNullValueOrThrow(ordinal); if (typeof(T) == typeof(TextReader)) @@ -1585,9 +1584,7 @@ async Task GetStream(int ordinal, CancellationToken cancellationToken) var field = GetDefaultInfo(ordinal, out _, out _); PgReader.ThrowIfStreamActive(); - var columnLength = await SeekToColumn(async: true, ordinal, field).ConfigureAwait(false); - - if (columnLength == -1) + if (await SeekToColumnAsync(ordinal, field).ConfigureAwait(false) is -1) return DbNullValueOrThrow(ordinal); return (T)(object)PgReader.GetStream(canSeek: !_isSequential); @@ -1615,11 +1612,7 @@ T GetFieldValueCore(int ordinal) if (typeof(T) == typeof(TextReader)) PgReader.ThrowIfStreamActive(); - var columnLength = - _isSequential - ? SeekToColumnSequential(async: false, ordinal, field).GetAwaiter().GetResult() - : SeekToColumnNonSequential(ordinal, field); - if (columnLength is -1) + if (SeekToColumn(ordinal, field) is -1) return DbNullValueOrThrow(ordinal); Debug.Assert(asObject || converter is PgConverter); @@ -1636,12 +1629,7 @@ T GetStream(int ordinal) var field = GetDefaultInfo(ordinal, out _, out _); PgReader.ThrowIfStreamActive(); - var columnLength = - _isSequential - ? SeekToColumnSequential(async: false, ordinal, field).GetAwaiter().GetResult() - : SeekToColumnNonSequential(ordinal, field); - - if (columnLength == -1) + if (SeekToColumn(ordinal, field) is -1) return DbNullValueOrThrow(ordinal); return (T)(object)PgReader.GetStream(canSeek: !_isSequential); @@ -1661,11 +1649,7 @@ public override object GetValue(int ordinal) { ThrowIfNotInResult(); var field = GetDefaultInfo(ordinal, out var converter, out var bufferRequirement); - var columnLength = - _isSequential - ? SeekToColumnSequential(async: false, ordinal, field).GetAwaiter().GetResult() - : SeekToColumnNonSequential(ordinal, field); - if (columnLength == -1) + if (SeekToColumn(ordinal, field) is -1) return DBNull.Value; PgReader.StartRead(bufferRequirement); @@ -1694,8 +1678,7 @@ public override object GetValue(int ordinal) public override bool IsDBNull(int ordinal) { ThrowIfNotInResult(); - return SeekToColumn(async: false, ordinal, RowDescription[ordinal].DataFormat, resumableOp: true).GetAwaiter() - .GetResult() is -1; + return SeekToColumn(ordinal, RowDescription[ordinal].DataFormat, resumableOp: true) is -1; } /// @@ -1718,7 +1701,7 @@ async Task Core(int ordinal, CancellationToken cancellationToken) { ThrowIfNotInResult(); using var registration = Connector.StartNestedCancellableOperation(cancellationToken, attemptPgCancellation: false); - return await SeekToColumn(async: true, ordinal, RowDescription[ordinal].DataFormat, resumableOp: true).ConfigureAwait(false) is -1; + return await SeekToColumnAsync(ordinal, RowDescription[ordinal].DataFormat, resumableOp: true).ConfigureAwait(false) is -1; } } @@ -1910,203 +1893,127 @@ Task> GetColumnSchema(bool async, Cancellatio #region Seeking - /// - /// Seeks to the given column. The 4-byte length is read and returned. - /// [MethodImpl(MethodImplOptions.AggressiveInlining)] - ValueTask SeekToColumn(bool async, int ordinal, DataFormat dataFormat, bool resumableOp = false) - => _isSequential - ? SeekToColumnSequential(async, ordinal, dataFormat, resumableOp) - : new(SeekToColumnNonSequential(ordinal, dataFormat, resumableOp)); - - int SeekToColumnNonSequential(int ordinal, DataFormat dataFormat, bool resumableOp = false) + int SeekToColumn(int ordinal, DataFormat dataFormat, bool resumableOp = false) { - var currentColumn = _column; - var buffer = Buffer; - var pgReader = PgReader; - - // Deals with current column commit and rereads - int columnLength; - if (currentColumn >= 0) - { - if (currentColumn == ordinal) - return HandleReread(pgReader.Resumable && resumableOp); - pgReader.Commit(resuming: false); - } + Debug.Assert(_isRowBuffered || _isSequential); + var reader = PgReader; + var column = _column; - // Deals with forward movement - Debug.Assert(ordinal != currentColumn); - if (ordinal > currentColumn) - { - // Written as a while to be able to increment _column directly after reading into it. - while (_column < ordinal - 1) - { - columnLength = buffer.ReadInt32(); - _column++; - Debug.Assert(columnLength >= -1); - if (columnLength > 0) - buffer.Skip(columnLength); - } - columnLength = buffer.ReadInt32(); - } - else - columnLength = SeekBackwards(); + // Column rereading rules for sequential mode: + // * We never allow rereading if the column didn't get initialized as resumable the previous time + // * If it did get initialized as resumable we only allow rereading when either of the following is true: + // - The op is a resumable one again + // - The op isn't resumable but the field is still entirely unconsumed + if (_isSequential && (column > ordinal || (column == ordinal && (!reader.Resumable || (!resumableOp && !reader.FieldAtStart))))) + ThrowInvalidSequentialSeek(column, ordinal); - pgReader.Init(columnLength, dataFormat, resumableOp); - _column = ordinal; + if (column == ordinal) + return reader.Restart(resumableOp); + reader.Commit(); + var columnLength = BufferSeekToColumn(column, ordinal, !_isRowBuffered); + reader.Init(columnLength, dataFormat, resumableOp); return columnLength; - int HandleReread(bool resuming) - { - Debug.Assert(pgReader.Initialized); - var columnLength = pgReader.FieldSize; - pgReader.Commit(resuming); - if (!resuming && columnLength > 0) - buffer.ReadPosition -= columnLength; - pgReader.Init(columnLength, dataFormat, resumableOp); - return columnLength; - } + static void ThrowInvalidSequentialSeek(int column, int ordinal) + => ThrowHelper.ThrowInvalidOperationException( + $"Invalid attempt to read from column ordinal '{ordinal}'. With CommandBehavior.SequentialAccess, " + + $"you may only read from column ordinal '{column}' or greater."); + } - // On the first call to SeekBackwards we'll fill up the columns list as we may need seek positions more than once. - [MethodImpl(MethodImplOptions.NoInlining)] - int SeekBackwards() + ValueTask SeekToColumnAsync(int ordinal, DataFormat dataFormat, bool resumableOp = false) + { + // When the row is buffered or we're rereading previous data no IO will be done. + if (_isRowBuffered || _column >= ordinal) + return new(SeekToColumn(ordinal, dataFormat, resumableOp)); + + return Core(ordinal, dataFormat, resumableOp); + + [AsyncMethodBuilder(typeof(PoolingAsyncValueTaskMethodBuilder<>))] + async ValueTask Core(int ordinal, DataFormat dataFormat, bool resumableOp) { - // Backfill the first column. - if (_columns.Count is 0) - { - buffer.ReadPosition = _columnsStartPos; - var len = buffer.ReadInt32(); - _columns.Add((buffer.ReadPosition, len)); - } - for (var lastColumnRead = _columns.Count; ordinal >= lastColumnRead; lastColumnRead++) - { - (Buffer.ReadPosition, var lastLen) = _columns[lastColumnRead - 1]; - if (lastLen > 0) - buffer.Skip(lastLen); - var len = Buffer.ReadInt32(); - _columns.Add((Buffer.ReadPosition, len)); - } - (Buffer.ReadPosition, var columnLength) = _columns[ordinal]; + Debug.Assert(!_isRowBuffered && _column < ordinal); + + var reader = PgReader; + await reader.CommitAsync().ConfigureAwait(false); + var columnLength = await BufferSeekToColumnAsync(_column, ordinal, !_isRowBuffered).ConfigureAwait(false); + reader.Init(columnLength, dataFormat, resumableOp); return columnLength; } } - ValueTask SeekToColumnSequential(bool async, int ordinal, DataFormat dataFormat, bool resumableOp = false) + [MethodImpl(MethodImplOptions.AggressiveInlining)] + int BufferSeekToColumn(int column, int ordinal, bool allowIO) { - var reread = _column == ordinal; - // Column rereading rules for sequential mode: - // * We never allow rereading if the column didn't get initialized as resumable the previous time - // * If it did get initialized as resumable we only allow rereading when either of the following is true: - // - The op is a resumable one again - // - The op isn't resumable but the field is still entirely unconsumed - if (ordinal < _column || (reread && (!PgReader.Resumable || (!resumableOp && !PgReader.IsAtStart)))) - ThrowHelper.ThrowInvalidOperationException( - $"Invalid attempt to read from column ordinal '{ordinal}'. With CommandBehavior.SequentialAccess, " + - $"you may only read from column ordinal '{_column}' or greater."); + Debug.Assert(column < ordinal || !allowIO); - var committed = false; - if (!PgReader.CommitHasIO(reread)) + if (column >= ordinal) { - var columnLength = PgReader.FieldSize; - PgReader.Commit(reread); - committed = true; - if (reread) - { - PgReader.Init(columnLength, dataFormat, columnLength is -1 || resumableOp); - return new(columnLength); - } + _column = ordinal; + return SeekBackwards(ordinal); + } - if (TrySeekBuffered(ordinal, out columnLength)) - { - PgReader.Init(columnLength, dataFormat, columnLength is -1 || resumableOp); - return new(columnLength); - } + // We know we need at least one iteration, a do while also helps with optimal codegen. + var buffer = Buffer; + var columnLength = 0; + do + { + if (columnLength > 0) + buffer.Skip(columnLength, allowIO); - // If we couldn't consume the column TrySeekBuffered had to stop at, do so now. - if (columnLength > -1) - { - // Resumable: true causes commit to consume without error. - PgReader.Init(columnLength, dataFormat, resumable: true); - committed = false; - } - } + if (allowIO) + buffer.Ensure(sizeof(int)); + columnLength = buffer.ReadInt32(); + Debug.Assert(columnLength >= -1); + } while (++_column < ordinal); - return Core(async, reread, !committed, ordinal, dataFormat, resumableOp); + return columnLength; - [AsyncMethodBuilder(typeof(PoolingAsyncValueTaskMethodBuilder<>))] - async ValueTask Core(bool async, bool reread, bool commit, int ordinal, DataFormat dataFormat, bool resumableOp) + // On the first call to SeekBackwards we'll fill up the columns list as we may need seek positions more than once. + [MethodImpl(MethodImplOptions.NoInlining)] + int SeekBackwards(int ordinal) { - if (commit) - { - Debug.Assert(ordinal != _column); - if (async) - await PgReader.CommitAsync(reread).ConfigureAwait(false); - else - PgReader.Commit(reread); - } + var buffer = Buffer; + var columns = _columns; - if (reread) - { - PgReader.Init(PgReader.FieldSize, dataFormat, PgReader.FieldSize is -1 || resumableOp); - return PgReader.FieldSize; - } + (buffer.ReadPosition, var columnLength) = columns.Count is 0 + ? (_columnsStartPos, 0) + : columns[Math.Min(columns.Count -1, ordinal)]; - // Seek to the requested column - int columnLength; - var buffer = Buffer; - // Written as a while to be able to increment _column directly after reading into it. - while (_column < ordinal - 1) + while (columns.Count <= ordinal) { - await buffer.Ensure(4, async).ConfigureAwait(false); - columnLength = buffer.ReadInt32(); - _column++; - Debug.Assert(columnLength >= -1); if (columnLength > 0) - await buffer.Skip(columnLength, async).ConfigureAwait(false); + buffer.Skip(columnLength); + columnLength = buffer.ReadInt32(); + columns.Add((buffer.ReadPosition, columnLength)); } - await buffer.Ensure(4, async).ConfigureAwait(false); - columnLength = buffer.ReadInt32(); - _column = ordinal; - - PgReader.Init(columnLength, dataFormat, resumableOp); return columnLength; } + } + + ValueTask BufferSeekToColumnAsync(int column, int ordinal, bool allowIO) + { + return !allowIO || column >= ordinal ? new(BufferSeekToColumn(column, ordinal, allowIO)) : Core(ordinal); - bool TrySeekBuffered(int ordinal, out int columnLength) + [AsyncMethodBuilder(typeof(PoolingAsyncValueTaskMethodBuilder<>))] + async ValueTask Core(int ordinal) { - // Skip over unwanted fields - columnLength = -1; + // We know we need at least one iteration, a do while also helps with optimal codegen. var buffer = Buffer; - // Written as a while to be able to increment _column directly after reading into it. - while (_column < ordinal - 1) + var columnLength = 0; + do { - if (buffer.ReadBytesLeft < 4) - { - columnLength = -1; - return false; - } - columnLength = buffer.ReadInt32(); - _column++; - Debug.Assert(columnLength >= -1); if (columnLength > 0) - { - if (buffer.ReadBytesLeft < columnLength) - return false; - buffer.Skip(columnLength); - } - } + await buffer.Skip(async: true, columnLength).ConfigureAwait(false); - if (buffer.ReadBytesLeft < 4) - { - columnLength = -1; - return false; - } + await buffer.EnsureAsync(sizeof(int)).ConfigureAwait(false); + columnLength = buffer.ReadInt32(); + Debug.Assert(columnLength >= -1); + } while (++_column < ordinal); - columnLength = buffer.ReadInt32(); - _column = ordinal; - return true; + return columnLength; } } @@ -2128,9 +2035,9 @@ Task ConsumeRow(bool async) async Task ConsumeRowSequential(bool async) { if (async) - await PgReader.CommitAsync(resuming: false).ConfigureAwait(false); + await PgReader.CommitAsync().ConfigureAwait(false); else - PgReader.Commit(resuming: false); + PgReader.Commit(); // Skip over the remaining columns in the row var buffer = Buffer; @@ -2142,7 +2049,7 @@ async Task ConsumeRowSequential(bool async) _column++; Debug.Assert(columnLength >= -1); if (columnLength > 0) - await buffer.Skip(columnLength, async).ConfigureAwait(false); + await buffer.Skip(async, columnLength).ConfigureAwait(false); } } } @@ -2151,7 +2058,7 @@ async Task ConsumeRowSequential(bool async) void ConsumeBufferedRow() { Debug.Assert(State is ReaderState.InResult or ReaderState.BeforeResult); - PgReader.Commit(resuming: false); + PgReader.Commit(); Buffer.ReadPosition = _dataMsgEnd; } diff --git a/src/Npgsql/NpgsqlNestedDataReader.cs b/src/Npgsql/NpgsqlNestedDataReader.cs index d3b6e37bfd..d6c7e1b7cc 100644 --- a/src/Npgsql/NpgsqlNestedDataReader.cs +++ b/src/Npgsql/NpgsqlNestedDataReader.cs @@ -67,12 +67,12 @@ internal NpgsqlNestedDataReader(NpgsqlDataReader outermostReader, NpgsqlNestedDa _outerNestedReader = outerNestedReader; _depth = depth; _compositeType = compositeType; - _startPos = PgReader.FieldStartPos; + _startPos = PgReader.GetFieldStartPos(this); } internal void Init(PostgresCompositeType? compositeType) { - _startPos = PgReader.FieldStartPos; + _startPos = PgReader.GetFieldStartPos(this); _columns.Clear(); _numRows = 0; _nextRowIndex = 0; @@ -102,13 +102,13 @@ internal void InitArray() if (_numRows > 0) PgReader.ReadInt32(); // Length of first row - _nextRowBufferPos = PgReader.FieldOffset; + _nextRowBufferPos = PgReader.GetFieldOffset(this); } internal void InitSingleRow() { _numRows = 1; - _nextRowBufferPos = PgReader.FieldOffset; + _nextRowBufferPos = PgReader.GetFieldOffset(this); } /// @@ -150,7 +150,7 @@ public override bool HasRows /// public override bool IsClosed => _readerState == ReaderState.Closed || _readerState == ReaderState.Disposed - || _outermostReader.IsClosed || PgReader.FieldStartPos != _startPos; + || _outermostReader.IsClosed || PgReader.GetFieldStartPos(this) != _startPos; /// public override int RecordsAffected => -1; @@ -376,7 +376,7 @@ public override bool Read() for (var i = 0; i < numColumns; i++) { var typeOid = PgReader.ReadUInt32(); - var bufferPos = PgReader.FieldOffset; + var bufferPos = PgReader.GetFieldOffset(this); if (i >= _columns.Count) { var pgType = SerializerOptions.DatabaseInfo.GetPostgresType(typeOid); @@ -396,7 +396,7 @@ public override bool Read() } _columns.RemoveRange(numColumns, _columns.Count - numColumns); - _nextRowBufferPos = PgReader.FieldOffset; + _nextRowBufferPos = PgReader.GetFieldOffset(this); _readerState = ReaderState.OnRow; return true; diff --git a/src/Npgsql/NpgsqlRawCopyStream.cs b/src/Npgsql/NpgsqlRawCopyStream.cs index ffae8e9fc4..e91633d053 100644 --- a/src/Npgsql/NpgsqlRawCopyStream.cs +++ b/src/Npgsql/NpgsqlRawCopyStream.cs @@ -379,7 +379,7 @@ async ValueTask DisposeAsync(bool disposing, bool async) { if (_leftToReadInDataMsg > 0) { - await _readBuf.Skip(_leftToReadInDataMsg, async).ConfigureAwait(false); + await _readBuf.Skip(async, _leftToReadInDataMsg).ConfigureAwait(false); } _connector.SkipUntil(BackendMessageCode.ReadyForQuery); } diff --git a/src/Npgsql/Replication/PgOutput/ReplicationValue.cs b/src/Npgsql/Replication/PgOutput/ReplicationValue.cs index aed44411d7..c5d1772745 100644 --- a/src/Npgsql/Replication/PgOutput/ReplicationValue.cs +++ b/src/Npgsql/Replication/PgOutput/ReplicationValue.cs @@ -111,7 +111,8 @@ public async ValueTask Get(CancellationToken cancellationToken = default) using var registration = _readBuffer.Connector.StartNestedCancellableOperation(cancellationToken, attemptPgCancellation: false); - var reader = PgReader.Init(Length, _fieldDescription.DataFormat); + var reader = PgReader; + reader.Init(Length, _fieldDescription.DataFormat); await reader.StartReadAsync(info.ConverterInfo.BufferRequirement, cancellationToken).ConfigureAwait(false); var result = info.AsObject ? (T)await info.ConverterInfo.Converter.ReadAsObjectAsync(reader, cancellationToken).ConfigureAwait(false) @@ -146,7 +147,8 @@ public Stream GetStream() throw new InvalidCastException($"Column '{_fieldDescription.Name}' is an unchanged TOASTed value (actual value not sent)."); } - var reader = _readBuffer.PgReader.Init(Length, _fieldDescription.DataFormat); + var reader = PgReader; + reader.Init(Length, _fieldDescription.DataFormat); return reader.GetStream(canSeek: false); } @@ -170,7 +172,8 @@ public TextReader GetTextReader() throw new InvalidCastException($"Column '{_fieldDescription.Name}' is an unchanged TOASTed value (actual value not sent)."); } - var reader = PgReader.Init(Length, _fieldDescription.DataFormat); + var reader = PgReader; + reader.Init(Length, _fieldDescription.DataFormat); reader.StartRead(info.ConverterInfo.BufferRequirement); var result = (TextReader)info.ConverterInfo.Converter.ReadAsObject(reader); reader.EndRead(); @@ -182,10 +185,11 @@ internal async Task Consume(CancellationToken cancellationToken) if (_isConsumed) return; - if (!PgReader.Initialized) - PgReader.Init(Length, _fieldDescription.DataFormat); - await PgReader.ConsumeAsync(cancellationToken: cancellationToken).ConfigureAwait(false); - await PgReader.CommitAsync(resuming: false).ConfigureAwait(false); + var reader = PgReader; + if (!reader.Initialized) + reader.Init(Length, _fieldDescription.DataFormat); + await reader.ConsumeAsync(cancellationToken: cancellationToken).ConfigureAwait(false); + await reader.CommitAsync().ConfigureAwait(false); _isConsumed = true; } diff --git a/src/Npgsql/Replication/ReplicationConnection.cs b/src/Npgsql/Replication/ReplicationConnection.cs index b7b97e6d12..44fa4b4ac8 100644 --- a/src/Npgsql/Replication/ReplicationConnection.cs +++ b/src/Npgsql/Replication/ReplicationConnection.cs @@ -499,7 +499,7 @@ internal async IAsyncEnumerator StartReplicationInternal( // Our consumer may not have read the stream to the end, but it might as well have been us // ourselves bypassing the stream and reading directly from the buffer in StartReplication() if (!columnStream.IsDisposed && columnStream.Position < columnStream.Length && !bypassingStream) - await buf.Skip(checked((int)(columnStream.Length - columnStream.Position)), true).ConfigureAwait(false); + await buf.Skip(async: true, checked((int)(columnStream.Length - columnStream.Position))).ConfigureAwait(false); continue; } diff --git a/test/Npgsql.Tests/ReaderTests.cs b/test/Npgsql.Tests/ReaderTests.cs index 33a5127d6f..126276d2e8 100644 --- a/test/Npgsql.Tests/ReaderTests.cs +++ b/test/Npgsql.Tests/ReaderTests.cs @@ -1880,7 +1880,7 @@ public async Task EndRead_StreamActive([Values]bool async) Assert.DoesNotThrow(() => reader.EndRead()); } - reader.Commit(resuming: false); + reader.Commit(); } [Test, Description("Tests that everything goes well when a type handler generates a NpgsqlSafeReadException")] From d36b2f5bfd5d6ebb07b36087ec882dad380caf3e Mon Sep 17 00:00:00 2001 From: Brar Piening Date: Fri, 28 Jun 2024 18:43:39 +0200 Subject: [PATCH 436/761] Add the field (column) name to ReplicationValue (#5719) * Add the field (column) name to ReplicationValue and add test for field information accessor methods Closes #5718 --- src/Npgsql/PublicAPI.Unshipped.txt | 1 + .../Replication/PgOutput/ReplicationValue.cs | 6 ++++++ .../Replication/PgOutputReplicationTests.cs | 15 +++++++++++++++ 3 files changed, 22 insertions(+) diff --git a/src/Npgsql/PublicAPI.Unshipped.txt b/src/Npgsql/PublicAPI.Unshipped.txt index e152ada722..50b8d6c3da 100644 --- a/src/Npgsql/PublicAPI.Unshipped.txt +++ b/src/Npgsql/PublicAPI.Unshipped.txt @@ -2,3 +2,4 @@ Npgsql.NpgsqlSlimDataSourceBuilder.EnableGeometricTypes() -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.EnableJsonTypes() -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.EnableNetworkTypes() -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.Replication.PgOutput.ReplicationValue.GetFieldName() -> string! diff --git a/src/Npgsql/Replication/PgOutput/ReplicationValue.cs b/src/Npgsql/Replication/PgOutput/ReplicationValue.cs index c5d1772745..5f7d76b418 100644 --- a/src/Npgsql/Replication/PgOutput/ReplicationValue.cs +++ b/src/Npgsql/Replication/PgOutput/ReplicationValue.cs @@ -76,6 +76,12 @@ public bool IsUnchangedToastedValue /// The data type of the specified column. public Type GetFieldType() => _fieldDescription.FieldType; + /// + /// Gets the name of the specified column. + /// + /// The name of the specified column. + public string GetFieldName() => _fieldDescription.Name; + /// /// Gets the value of the specified column as a type. /// diff --git a/test/Npgsql.Tests/Replication/PgOutputReplicationTests.cs b/test/Npgsql.Tests/Replication/PgOutputReplicationTests.cs index e3d81a63f5..a32528452a 100644 --- a/test/Npgsql.Tests/Replication/PgOutputReplicationTests.cs +++ b/test/Npgsql.Tests/Replication/PgOutputReplicationTests.cs @@ -125,12 +125,27 @@ public Task Insert() Assert.That(insertMsg.Relation, Is.SameAs(relationMsg)); var columnEnumerator = insertMsg.NewRow.GetAsyncEnumerator(); Assert.That(await columnEnumerator.MoveNextAsync(), Is.True); + var postgresType = columnEnumerator.Current.GetPostgresType(); + Assert.That(postgresType.FullName, Is.EqualTo("pg_catalog.integer")); + Assert.That(columnEnumerator.Current.GetDataTypeName(), Is.EqualTo("integer")); + Assert.That(columnEnumerator.Current.GetFieldName(), Is.EqualTo("id")); if (IsBinary) + { + Assert.That(columnEnumerator.Current.GetFieldType(), Is.EqualTo(typeof(int))); Assert.That(await columnEnumerator.Current.Get(), Is.EqualTo(1)); + } else + { + Assert.That(columnEnumerator.Current.GetFieldType(), Is.EqualTo(typeof(string))); Assert.That(await columnEnumerator.Current.Get(), Is.EqualTo("1")); + } Assert.That(await columnEnumerator.MoveNextAsync(), Is.True); + postgresType = columnEnumerator.Current.GetPostgresType(); + Assert.That(postgresType.FullName, Is.EqualTo("pg_catalog.text")); + Assert.That(columnEnumerator.Current.GetDataTypeName(), Is.EqualTo("text")); + Assert.That(columnEnumerator.Current.GetFieldType(), Is.EqualTo(typeof(string))); + Assert.That(columnEnumerator.Current.GetFieldName(), Is.EqualTo("name")); Assert.That(columnEnumerator.Current.IsDBNull, Is.False); Assert.That(await columnEnumerator.Current.Get(), Is.EqualTo("val1")); Assert.That(await columnEnumerator.MoveNextAsync(), Is.False); From ea3b1617057afcfdd3a2eb4338e85be5ef46e6c1 Mon Sep 17 00:00:00 2001 From: Brar Piening Date: Tue, 2 Jul 2024 14:44:28 +0200 Subject: [PATCH 437/761] Implement Logical Streaming Replication Protocol V4 (#5761) Closes #5760 --- src/Npgsql/PublicAPI.Unshipped.txt | 22 +++++ .../PgOutput/Messages/StreamAbortMessage.cs | 33 ++++++- .../PgOutput/PgOutputAsyncEnumerable.cs | 68 +++++++++++--- .../PgOutput/PgOutputProtocolVersion.cs | 30 +++++++ .../PgOutput/PgOutputReplicationOptions.cs | 77 ++++++++++++---- .../PgOutput/PgOutputStreamingMode.cs | 23 +++++ .../Replication/PgOutputReplicationTests.cs | 89 ++++++++++--------- 7 files changed, 269 insertions(+), 73 deletions(-) create mode 100644 src/Npgsql/Replication/PgOutput/PgOutputProtocolVersion.cs create mode 100644 src/Npgsql/Replication/PgOutput/PgOutputStreamingMode.cs diff --git a/src/Npgsql/PublicAPI.Unshipped.txt b/src/Npgsql/PublicAPI.Unshipped.txt index 50b8d6c3da..aa6649d3e6 100644 --- a/src/Npgsql/PublicAPI.Unshipped.txt +++ b/src/Npgsql/PublicAPI.Unshipped.txt @@ -3,3 +3,25 @@ Npgsql.NpgsqlSlimDataSourceBuilder.EnableGeometricTypes() -> Npgsql.NpgsqlSlimDa Npgsql.NpgsqlSlimDataSourceBuilder.EnableJsonTypes() -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.EnableNetworkTypes() -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.Replication.PgOutput.ReplicationValue.GetFieldName() -> string! +Npgsql.Replication.PgOutput.Messages.ParallelStreamAbortMessage +Npgsql.Replication.PgOutput.Messages.ParallelStreamAbortMessage.AbortLsn.get -> NpgsqlTypes.NpgsqlLogSequenceNumber +Npgsql.Replication.PgOutput.Messages.ParallelStreamAbortMessage.AbortTimestamp.get -> System.DateTime +Npgsql.Replication.PgOutput.PgOutputProtocolVersion +Npgsql.Replication.PgOutput.PgOutputProtocolVersion.V1 = 1 -> Npgsql.Replication.PgOutput.PgOutputProtocolVersion +Npgsql.Replication.PgOutput.PgOutputProtocolVersion.V2 = 2 -> Npgsql.Replication.PgOutput.PgOutputProtocolVersion +Npgsql.Replication.PgOutput.PgOutputProtocolVersion.V3 = 3 -> Npgsql.Replication.PgOutput.PgOutputProtocolVersion +Npgsql.Replication.PgOutput.PgOutputProtocolVersion.V4 = 4 -> Npgsql.Replication.PgOutput.PgOutputProtocolVersion +Npgsql.Replication.PgOutput.PgOutputReplicationOptions.PgOutputReplicationOptions(string! publicationName, Npgsql.Replication.PgOutput.PgOutputProtocolVersion protocolVersion, bool? binary = null, Npgsql.Replication.PgOutput.PgOutputStreamingMode? streamingMode = null, bool? messages = null, bool? twoPhase = null) -> void +Npgsql.Replication.PgOutput.PgOutputReplicationOptions.PgOutputReplicationOptions(string! publicationName, ulong protocolVersion, bool? binary = null, Npgsql.Replication.PgOutput.PgOutputStreamingMode? streamingMode = null, bool? messages = null, bool? twoPhase = null) -> void +*REMOVED*Npgsql.Replication.PgOutput.PgOutputReplicationOptions.PgOutputReplicationOptions(string! publicationName, ulong protocolVersion, bool? binary = null, bool? streaming = null, bool? messages = null, bool? twoPhase = null) -> void +Npgsql.Replication.PgOutput.PgOutputReplicationOptions.PgOutputReplicationOptions(System.Collections.Generic.IEnumerable! publicationNames, Npgsql.Replication.PgOutput.PgOutputProtocolVersion protocolVersion, bool? binary = null, Npgsql.Replication.PgOutput.PgOutputStreamingMode? streamingMode = null, bool? messages = null, bool? twoPhase = null) -> void +*REMOVED*Npgsql.Replication.PgOutput.PgOutputReplicationOptions.PgOutputReplicationOptions(System.Collections.Generic.IEnumerable! publicationNames, ulong protocolVersion, bool? binary = null, bool? streaming = null, bool? messages = null, bool? twoPhase = null) -> void +Npgsql.Replication.PgOutput.PgOutputReplicationOptions.PgOutputReplicationOptions(System.Collections.Generic.IEnumerable! publicationNames, ulong protocolVersion, bool? binary = null, Npgsql.Replication.PgOutput.PgOutputStreamingMode? streamingMode = null, bool? messages = null, bool? twoPhase = null) -> void +Npgsql.Replication.PgOutput.PgOutputReplicationOptions.ProtocolVersion.get -> Npgsql.Replication.PgOutput.PgOutputProtocolVersion +*REMOVED*Npgsql.Replication.PgOutput.PgOutputReplicationOptions.ProtocolVersion.get -> ulong +Npgsql.Replication.PgOutput.PgOutputReplicationOptions.StreamingMode.get -> Npgsql.Replication.PgOutput.PgOutputStreamingMode? +*REMOVED*Npgsql.Replication.PgOutput.PgOutputReplicationOptions.Streaming.get -> bool? +Npgsql.Replication.PgOutput.PgOutputStreamingMode +Npgsql.Replication.PgOutput.PgOutputStreamingMode.Off = 0 -> Npgsql.Replication.PgOutput.PgOutputStreamingMode +Npgsql.Replication.PgOutput.PgOutputStreamingMode.On = 1 -> Npgsql.Replication.PgOutput.PgOutputStreamingMode +Npgsql.Replication.PgOutput.PgOutputStreamingMode.Parallel = 2 -> Npgsql.Replication.PgOutput.PgOutputStreamingMode diff --git a/src/Npgsql/Replication/PgOutput/Messages/StreamAbortMessage.cs b/src/Npgsql/Replication/PgOutput/Messages/StreamAbortMessage.cs index 23fc2c5a24..20e5c4d2e3 100644 --- a/src/Npgsql/Replication/PgOutput/Messages/StreamAbortMessage.cs +++ b/src/Npgsql/Replication/PgOutput/Messages/StreamAbortMessage.cs @@ -4,9 +4,9 @@ namespace Npgsql.Replication.PgOutput.Messages; /// -/// Logical Replication Protocol stream abort message +/// Logical Replication Protocol stream abort message for Logical Streaming Replication Protocol versions 2-3 /// -public sealed class StreamAbortMessage : TransactionControlMessage +public class StreamAbortMessage : TransactionControlMessage { /// /// Xid of the subtransaction (will be same as xid of the transaction for top-level transactions). @@ -22,4 +22,31 @@ internal StreamAbortMessage Populate(NpgsqlLogSequenceNumber walStart, NpgsqlLog SubtransactionXid = subtransactionXid; return this; } -} \ No newline at end of file +} + +/// +/// Logical Replication Protocol stream abort message for Logical Streaming Replication Protocol versions 4+ +/// +public sealed class ParallelStreamAbortMessage : StreamAbortMessage +{ + /// + /// The LSN of the abort. + /// + public NpgsqlLogSequenceNumber AbortLsn { get; private set; } + + /// + /// Abort timestamp of the transaction. + /// + public DateTime AbortTimestamp { get; private set; } + + internal ParallelStreamAbortMessage() {} + + internal ParallelStreamAbortMessage Populate(NpgsqlLogSequenceNumber walStart, NpgsqlLogSequenceNumber walEnd, DateTime serverClock, + uint transactionXid, uint subtransactionXid, NpgsqlLogSequenceNumber abortLsn, DateTime abortTimestamp) + { + base.Populate(walStart, walEnd, serverClock, transactionXid, subtransactionXid); + AbortLsn = abortLsn; + AbortTimestamp = abortTimestamp; + return this; + } +} diff --git a/src/Npgsql/Replication/PgOutput/PgOutputAsyncEnumerable.cs b/src/Npgsql/Replication/PgOutput/PgOutputAsyncEnumerable.cs index ae26d229f6..d200c780a1 100644 --- a/src/Npgsql/Replication/PgOutput/PgOutputAsyncEnumerable.cs +++ b/src/Npgsql/Replication/PgOutput/PgOutputAsyncEnumerable.cs @@ -13,6 +13,7 @@ namespace Npgsql.Replication.PgOutput; sealed class PgOutputAsyncEnumerable : IAsyncEnumerable { + readonly PgOutputProtocolVersion _protocolVersion; readonly LogicalReplicationConnection _connection; readonly PgOutputReplicationSlot _slot; readonly PgOutputReplicationOptions _options; @@ -38,17 +39,20 @@ sealed class PgOutputAsyncEnumerable : IAsyncEnumerable _truncateMessageRelations = new(); // V2 - readonly StreamStartMessage _streamStartMessage = new(); - readonly StreamStopMessage _streamStopMessage = new(); - readonly StreamCommitMessage _streamCommitMessage = new(); - readonly StreamAbortMessage _streamAbortMessage = new(); + readonly StreamStartMessage _streamStartMessage = null!; + readonly StreamStopMessage _streamStopMessage = null!; + readonly StreamCommitMessage _streamCommitMessage = null!; + readonly StreamAbortMessage _streamAbortMessage = null!; // V3 - readonly BeginPrepareMessage _beginPrepareMessage = new(); - readonly PrepareMessage _prepareMessage = new(); - readonly CommitPreparedMessage _commitPreparedMessage = new(); - readonly RollbackPreparedMessage _rollbackPreparedMessage = new(); - readonly StreamPrepareMessage _streamPrepareMessage = new(); + readonly BeginPrepareMessage _beginPrepareMessage = null!; + readonly PrepareMessage _prepareMessage = null!; + readonly CommitPreparedMessage _commitPreparedMessage = null!; + readonly RollbackPreparedMessage _rollbackPreparedMessage = null!; + readonly StreamPrepareMessage _streamPrepareMessage = null!; + + // V4 + readonly ParallelStreamAbortMessage _parallelStreamAbortMessage = null!; #endregion @@ -59,12 +63,38 @@ internal PgOutputAsyncEnumerable( CancellationToken cancellationToken, NpgsqlLogSequenceNumber? walLocation = null) { + _protocolVersion = options.ProtocolVersion; _connection = connection; _slot = slot; _options = options; _baseCancellationToken = cancellationToken; _walLocation = walLocation; + + if (_protocolVersion >= PgOutputProtocolVersion.V2) + { + _streamStartMessage = new(); + _streamStopMessage = new(); + _streamCommitMessage = new(); + } + if (_protocolVersion >= PgOutputProtocolVersion.V3) + { + _beginPrepareMessage = new(); + _prepareMessage = new(); + _commitPreparedMessage = new(); + _rollbackPreparedMessage = new(); + _streamPrepareMessage = new(); + } + + if (_protocolVersion >= PgOutputProtocolVersion.V4) + { + _parallelStreamAbortMessage = new(); + } + else if (_protocolVersion >= PgOutputProtocolVersion.V2) + { + _streamAbortMessage = new(); + } + var connector = _connection.Connector; _insertMessage = new(connector); _defaultUpdateMessage = new(connector); @@ -395,9 +425,23 @@ async IAsyncEnumerator StartReplicationInternal(Canc } case BackendReplicationMessageCode.StreamAbort: { - await buf.EnsureAsync(8).ConfigureAwait(false); - yield return _streamAbortMessage.Populate(xLogData.WalStart, xLogData.WalEnd, xLogData.ServerClock, - transactionXid: buf.ReadUInt32(), subtransactionXid: buf.ReadUInt32()); + if (_protocolVersion >= PgOutputProtocolVersion.V4) + { + await buf.EnsureAsync(24).ConfigureAwait(false); + yield return _parallelStreamAbortMessage.Populate(xLogData.WalStart, xLogData.WalEnd, xLogData.ServerClock, + transactionXid: buf.ReadUInt32(), + subtransactionXid: buf.ReadUInt32(), + abortLsn: new(buf.ReadUInt64()), + abortTimestamp: PgDateTime.DecodeTimestamp(buf.ReadInt64(), DateTimeKind.Utc)); + + } + else + { + await buf.EnsureAsync(8).ConfigureAwait(false); + yield return _streamAbortMessage.Populate(xLogData.WalStart, xLogData.WalEnd, xLogData.ServerClock, + transactionXid: buf.ReadUInt32(), subtransactionXid: buf.ReadUInt32()); + + } continue; } case BackendReplicationMessageCode.BeginPrepare: diff --git a/src/Npgsql/Replication/PgOutput/PgOutputProtocolVersion.cs b/src/Npgsql/Replication/PgOutput/PgOutputProtocolVersion.cs new file mode 100644 index 0000000000..fd717b6791 --- /dev/null +++ b/src/Npgsql/Replication/PgOutput/PgOutputProtocolVersion.cs @@ -0,0 +1,30 @@ +namespace Npgsql.Replication.PgOutput; + +/// +/// The Logical Streaming Replication Protocol version. +/// +public enum PgOutputProtocolVersion : ulong +{ + /// + /// Version 1 is supported for server version 10 and above. + /// + V1 = 1UL, + + /// + /// Version 2 is supported only for server version 14 and above, and it allows + /// streaming of large in-progress transactions. + /// + V2 = 2UL, + + /// + /// Version 3 is supported only for server version 15 and above, and it allows + /// streaming of two-phase commits. + /// + V3 = 3UL, + + /// + /// Version 4 is supported only for server version 16 and above, and it allows + /// streams of large in-progress transactions to be applied in parallel. + /// + V4 = 4UL +} diff --git a/src/Npgsql/Replication/PgOutput/PgOutputReplicationOptions.cs b/src/Npgsql/Replication/PgOutput/PgOutputReplicationOptions.cs index 5835b88ee2..394758c83a 100644 --- a/src/Npgsql/Replication/PgOutput/PgOutputReplicationOptions.cs +++ b/src/Npgsql/Replication/PgOutput/PgOutputReplicationOptions.cs @@ -15,12 +15,48 @@ public class PgOutputReplicationOptions : IEquatable /// The publication names to include into the stream /// The version of the logical streaming replication protocol /// Send values in binary representation - /// Enable streaming of in-progress transactions + /// Enable streaming of in-progress transactions /// Write logical decoding messages into the replication stream /// Enable streaming of prepared transactions - public PgOutputReplicationOptions(string publicationName, ulong protocolVersion, bool? binary = null, bool? streaming = null, bool? messages = null, bool? twoPhase = null) - : this(new List { publicationName ?? throw new ArgumentNullException(nameof(publicationName)) }, protocolVersion, binary, streaming, messages, twoPhase) - { } + [Obsolete("Please switch to the overloads that take a PgOutputProtocolVersion value instead.")] + public PgOutputReplicationOptions(string publicationName, ulong protocolVersion, bool? binary = null, + PgOutputStreamingMode? streamingMode = null, bool? messages = null, bool? twoPhase = null) + : this([publicationName ?? throw new ArgumentNullException(nameof(publicationName))], (PgOutputProtocolVersion)protocolVersion, + binary, streamingMode, messages, twoPhase) + { + } + + /// + /// Creates a new instance of . + /// + /// The publication names to include into the stream + /// The version of the logical streaming replication protocol + /// Send values in binary representation + /// Enable streaming of in-progress transactions + /// Write logical decoding messages into the replication stream + /// Enable streaming of prepared transactions + public PgOutputReplicationOptions(string publicationName, PgOutputProtocolVersion protocolVersion, bool? binary = null, + PgOutputStreamingMode? streamingMode = null, bool? messages = null, bool? twoPhase = null) + : this([publicationName ?? throw new ArgumentNullException(nameof(publicationName))], protocolVersion, binary, streamingMode, + messages, twoPhase) + { + } + + /// + /// Creates a new instance of . + /// + /// The publication names to include into the stream + /// The version of the logical streaming replication protocol + /// Send values in binary representation + /// Enable streaming of in-progress transactions + /// Write logical decoding messages into the replication stream + /// Enable streaming of prepared transactions + [Obsolete("Please switch to the overloads that take a PgOutputProtocolVersion value instead.")] + public PgOutputReplicationOptions(IEnumerable publicationNames, ulong protocolVersion, bool? binary = null, + PgOutputStreamingMode? streamingMode = null, bool? messages = null, bool? twoPhase = null) + : this(publicationNames, (PgOutputProtocolVersion)protocolVersion, binary, streamingMode, messages, twoPhase) + { + } /// /// Creates a new instance of . @@ -28,10 +64,11 @@ public PgOutputReplicationOptions(string publicationName, ulong protocolVersion, /// The publication names to include into the stream /// The version of the logical streaming replication protocol /// Send values in binary representation - /// Enable streaming of in-progress transactions + /// Enable streaming of in-progress transactions /// Write logical decoding messages into the replication stream /// Enable streaming of prepared transactions - public PgOutputReplicationOptions(IEnumerable publicationNames, ulong protocolVersion, bool? binary = null, bool? streaming = null, bool? messages = null, bool? twoPhase = null) + public PgOutputReplicationOptions(IEnumerable publicationNames, PgOutputProtocolVersion protocolVersion, bool? binary = null, + PgOutputStreamingMode? streamingMode = null, bool? messages = null, bool? twoPhase = null) { var publicationNamesList = new List(publicationNames); if (publicationNamesList.Count < 1) @@ -46,7 +83,7 @@ public PgOutputReplicationOptions(IEnumerable publicationNames, ulong pr PublicationNames = publicationNamesList; ProtocolVersion = protocolVersion; Binary = binary; - Streaming = streaming; + StreamingMode = streamingMode; Messages = messages; TwoPhase = twoPhase; } @@ -54,7 +91,7 @@ public PgOutputReplicationOptions(IEnumerable publicationNames, ulong pr /// /// The version of the Logical Streaming Replication Protocol /// - public ulong ProtocolVersion { get; } + public PgOutputProtocolVersion ProtocolVersion { get; } /// /// The publication names to stream @@ -74,10 +111,12 @@ public PgOutputReplicationOptions(IEnumerable publicationNames, ulong pr /// Enable streaming of in-progress transactions /// /// - /// This works as of logical streaming replication protocol version 2 (PostgreSQL 14+) + /// works as of logical streaming replication protocol version 2 (PostgreSQL 14+), + /// works as of logical streaming replication protocol version 4 (PostgreSQL 16+), /// // See: https://github.com/postgres/postgres/commit/464824323e57dc4b397e8b05854d779908b55304 - public bool? Streaming { get; } + // and https://github.com/postgres/postgres/commit/216a784829c2c5f03ab0c43e009126cbb819e9b2 + public PgOutputStreamingMode? StreamingMode { get; } /// /// Write logical decoding messages into the replication stream @@ -100,13 +139,21 @@ public PgOutputReplicationOptions(IEnumerable publicationNames, ulong pr internal IEnumerable> GetOptionPairs() { - yield return new KeyValuePair("proto_version", ProtocolVersion.ToString(CultureInfo.InvariantCulture)); + yield return new KeyValuePair("proto_version", ((ulong)ProtocolVersion).ToString(CultureInfo.InvariantCulture)); yield return new KeyValuePair("publication_names", "\"" + string.Join("\",\"", PublicationNames) + "\""); if (Binary != null) yield return new KeyValuePair("binary", Binary.Value ? "on" : "off"); - if (Streaming != null) - yield return new KeyValuePair("streaming", Streaming.Value ? "on" : "off"); + if (StreamingMode != null) + { + yield return new KeyValuePair("streaming", StreamingMode.Value switch + { + PgOutputStreamingMode.Off => "off", + PgOutputStreamingMode.On => "on", + PgOutputStreamingMode.Parallel => "parallel", + _ => throw new ArgumentOutOfRangeException($"Unknown {nameof(PgOutputStreamingMode)} value: {StreamingMode.Value}") + }); + } if (Messages != null) yield return new KeyValuePair("messages", Messages.Value ? "on" : "off"); if (TwoPhase != null) @@ -118,12 +165,12 @@ public bool Equals(PgOutputReplicationOptions? other) => other != null && ( ReferenceEquals(this, other) || ProtocolVersion == other.ProtocolVersion && PublicationNames.Equals(other.PublicationNames) && Binary == other.Binary && - Streaming == other.Streaming && Messages == other.Messages && TwoPhase == other.TwoPhase); + StreamingMode == other.StreamingMode && Messages == other.Messages && TwoPhase == other.TwoPhase); /// public override bool Equals(object? obj) => obj is PgOutputReplicationOptions other && other.Equals(this); /// - public override int GetHashCode() => HashCode.Combine(ProtocolVersion, PublicationNames, Binary, Streaming, Messages, TwoPhase); + public override int GetHashCode() => HashCode.Combine(ProtocolVersion, PublicationNames, Binary, StreamingMode, Messages, TwoPhase); } diff --git a/src/Npgsql/Replication/PgOutput/PgOutputStreamingMode.cs b/src/Npgsql/Replication/PgOutput/PgOutputStreamingMode.cs new file mode 100644 index 0000000000..935ad2792c --- /dev/null +++ b/src/Npgsql/Replication/PgOutput/PgOutputStreamingMode.cs @@ -0,0 +1,23 @@ +namespace Npgsql.Replication.PgOutput; + +/// +/// Option to enable streaming of in-progress transactions. +/// Minimum protocol version 2 is required to turn it on. Minimum protocol version 4 is required for the "parallel" option. +/// +public enum PgOutputStreamingMode +{ + /// + /// Disable streaming of in-progress transactions + /// + Off, + + /// + /// Enable streaming of in-progress transactions + /// + On, + + /// + /// Enable streaming of in-progress transactions and enable sending extra information with some messages to be used for parallelisation + /// + Parallel +} diff --git a/test/Npgsql.Tests/Replication/PgOutputReplicationTests.cs b/test/Npgsql.Tests/Replication/PgOutputReplicationTests.cs index a32528452a..57d137c367 100644 --- a/test/Npgsql.Tests/Replication/PgOutputReplicationTests.cs +++ b/test/Npgsql.Tests/Replication/PgOutputReplicationTests.cs @@ -16,47 +16,40 @@ namespace Npgsql.Tests.Replication; -[TestFixture(ProtocolVersion.V1, ReplicationDataMode.DefaultReplicationDataMode, TransactionMode.DefaultTransactionMode)] -[TestFixture(ProtocolVersion.V1, ReplicationDataMode.BinaryReplicationDataMode, TransactionMode.DefaultTransactionMode)] -[TestFixture(ProtocolVersion.V2, ReplicationDataMode.DefaultReplicationDataMode, TransactionMode.StreamingTransactionMode)] -[TestFixture(ProtocolVersion.V3, ReplicationDataMode.DefaultReplicationDataMode, TransactionMode.DefaultTransactionMode)] -[TestFixture(ProtocolVersion.V3, ReplicationDataMode.DefaultReplicationDataMode, TransactionMode.StreamingTransactionMode)] -// We currently don't execute all possible combinations of settings for efficiency reasons because they don't -// interact in the current implementation. -// Feel free to uncomment some or all of the following lines if the implementation changed or you suspect a -// problem with some combination. -// [TestFixture(ProtocolVersion.V1, ReplicationDataMode.TextReplicationDataMode, TransactionMode.NonStreamingTransactionMode)] -// [TestFixture(ProtocolVersion.V2, ReplicationDataMode.DefaultReplicationDataMode, TransactionMode.DefaultTransactionMode)] -// [TestFixture(ProtocolVersion.V2, ReplicationDataMode.TextReplicationDataMode, TransactionMode.NonStreamingTransactionMode)] -// [TestFixture(ProtocolVersion.V2, ReplicationDataMode.BinaryReplicationDataMode, TransactionMode.DefaultTransactionMode)] -// [TestFixture(ProtocolVersion.V2, ReplicationDataMode.BinaryReplicationDataMode, TransactionMode.StreamingTransactionMode)] -// [TestFixture(ProtocolVersion.V3, ReplicationDataMode.TextReplicationDataMode, TransactionMode.NonStreamingTransactionMode)] -// [TestFixture(ProtocolVersion.V3, ReplicationDataMode.BinaryReplicationDataMode, TransactionMode.DefaultTransactionMode)] -// [TestFixture(ProtocolVersion.V3, ReplicationDataMode.BinaryReplicationDataMode, TransactionMode.StreamingTransactionMode)] +[TestFixture(PgOutputProtocolVersion.V1, ReplicationDataMode.DefaultReplicationDataMode, TransactionMode.DefaultTransactionMode)] +[TestFixture(PgOutputProtocolVersion.V1, ReplicationDataMode.BinaryReplicationDataMode, TransactionMode.DefaultTransactionMode)] +[TestFixture(PgOutputProtocolVersion.V2, ReplicationDataMode.DefaultReplicationDataMode, TransactionMode.StreamingTransactionMode)] +[TestFixture(PgOutputProtocolVersion.V3, ReplicationDataMode.DefaultReplicationDataMode, TransactionMode.DefaultTransactionMode)] +[TestFixture(PgOutputProtocolVersion.V3, ReplicationDataMode.DefaultReplicationDataMode, TransactionMode.StreamingTransactionMode)] +[TestFixture(PgOutputProtocolVersion.V4, ReplicationDataMode.DefaultReplicationDataMode, TransactionMode.DefaultTransactionMode)] +[TestFixture(PgOutputProtocolVersion.V4, ReplicationDataMode.DefaultReplicationDataMode, TransactionMode.ParallelStreamingTransactionMode)] [NonParallelizable] // These tests aren't designed to be parallelizable public class PgOutputReplicationTests : SafeReplicationTestBase { - readonly ulong _protocolVersion; + readonly PgOutputProtocolVersion _protocolVersion; readonly bool? _binary; - readonly bool? _streaming; + readonly PgOutputStreamingMode? _streamingMode; bool IsBinary => _binary ?? false; - bool IsStreaming => _streaming ?? false; - ulong Version => _protocolVersion; + bool IsStreaming => _streamingMode.HasValue && _streamingMode.Value != PgOutputStreamingMode.Off; + PgOutputProtocolVersion Version => _protocolVersion; - public PgOutputReplicationTests(ProtocolVersion protocolVersion, ReplicationDataMode dataMode, TransactionMode transactionMode) + public PgOutputReplicationTests(PgOutputProtocolVersion protocolVersion, ReplicationDataMode dataMode, TransactionMode transactionMode) { - _protocolVersion = (ulong)protocolVersion; + _protocolVersion = protocolVersion; _binary = dataMode == ReplicationDataMode.BinaryReplicationDataMode ? true : dataMode == ReplicationDataMode.TextReplicationDataMode ? false : null; - _streaming = transactionMode == TransactionMode.StreamingTransactionMode - ? true - : transactionMode == TransactionMode.NonStreamingTransactionMode - ? false - : null; + _streamingMode = transactionMode switch + { + TransactionMode.DefaultTransactionMode => null, + TransactionMode.NonStreamingTransactionMode => PgOutputStreamingMode.Off, + TransactionMode.StreamingTransactionMode => PgOutputStreamingMode.On, + TransactionMode.ParallelStreamingTransactionMode => PgOutputStreamingMode.Parallel, + _ => throw new ArgumentOutOfRangeException(nameof(transactionMode), transactionMode, null) + }; } [Test] @@ -805,7 +798,12 @@ public Task LogicalDecodingMessage(bool writeMessages, bool readMessages) // Rollback Transaction 2 if (IsStreaming) - Assert.That(messages.Current, Is.TypeOf()); + { + Assert.That(messages.Current, + _streamingMode == PgOutputStreamingMode.On + ? Is.TypeOf() + : Is.TypeOf()); + } streamingCts.Cancel(); await AssertReplicationCancellation(messages); @@ -1105,7 +1103,7 @@ public Task TwoPhase([Values]bool commit) { // Streaming of prepared transaction is only supported for // logical streaming replication protocol >= 3 - if (_protocolVersion < 3UL) + if (_protocolVersion < PgOutputProtocolVersion.V3) return Task.CompletedTask; return SafePgOutputReplicationTest( @@ -1185,7 +1183,7 @@ public Task TwoPhase([Values]bool commit) public Task Bug4633() { // We don't need all the various test cases here since the bug gets triggered in any case - if (IsStreaming || IsBinary || Version > 1) + if (IsStreaming || IsBinary || Version > PgOutputProtocolVersion.V1) return Task.CompletedTask; return SafePgOutputReplicationTest( @@ -1468,7 +1466,7 @@ async IAsyncEnumerable SkipEmptyTransactions(IAsyncE } PgOutputReplicationOptions GetOptions(string publicationName, bool? messages = null) - => new(publicationName, _protocolVersion, _binary, _streaming, messages); + => new(publicationName, _protocolVersion, _binary, _streamingMode, messages); Task SafePgOutputReplicationTest(Func testAction, [CallerMemberName] string memberName = "") => SafeReplicationTest(testAction, GetObjectName(memberName)); @@ -1482,8 +1480,8 @@ string GetObjectName(string memberName) .Append("_v").Append(_protocolVersion); if (_binary.HasValue) sb.Append("_b_").Append(BoolToChar(_binary.Value)); - if (_streaming.HasValue) - sb.Append("_s_").Append(BoolToChar(_streaming.Value)); + if (_streamingMode.HasValue) + sb.Append("_s_").Append(_streamingMode.Value); return sb.ToString(); } @@ -1498,15 +1496,25 @@ public async Task SetUp() { await using var c = await OpenConnectionAsync(); TestUtil.MinimumPgVersion(c, "10.0", "The Logical Replication Protocol (via pgoutput plugin) was introduced in PostgreSQL 10"); - if (_protocolVersion > 2) + if (_protocolVersion > PgOutputProtocolVersion.V3) + TestUtil.MinimumPgVersion(c, "16.0", "Logical Streaming Replication Protocol version 4 was introduced in PostgreSQL 16"); + if (_protocolVersion > PgOutputProtocolVersion.V2) TestUtil.MinimumPgVersion(c, "15.0", "Logical Streaming Replication Protocol version 3 was introduced in PostgreSQL 15"); - if (_protocolVersion > 1) + if (_protocolVersion > PgOutputProtocolVersion.V1) TestUtil.MinimumPgVersion(c, "14.0", "Logical Streaming Replication Protocol version 2 was introduced in PostgreSQL 14"); if (IsBinary) TestUtil.MinimumPgVersion(c, "14.0", "Sending replication values in binary representation was introduced in PostgreSQL 14"); if (IsStreaming) { - TestUtil.MinimumPgVersion(c, "14.0", "Streaming of in-progress transactions was introduced in PostgreSQL 14"); + switch (_streamingMode) + { + case PgOutputStreamingMode.On: + TestUtil.MinimumPgVersion(c, "14.0", "Streaming of in-progress transactions was introduced in PostgreSQL 14"); + break; + case PgOutputStreamingMode.Parallel: + TestUtil.MinimumPgVersion(c, "16.0", "Parallel streaming of in-progress transactions was introduced in PostgreSQL 16"); + break; + } var logicalDecodingWorkMem = (string)(await c.ExecuteScalarAsync("SHOW logical_decoding_work_mem"))!; if (logicalDecodingWorkMem != "64kB") { @@ -1517,12 +1525,6 @@ public async Task SetUp() } } - public enum ProtocolVersion : ulong - { - V1 = 1UL, - V2 = 2UL, - V3 = 3UL, - } public enum ReplicationDataMode { DefaultReplicationDataMode, @@ -1534,6 +1536,7 @@ public enum TransactionMode DefaultTransactionMode, NonStreamingTransactionMode, StreamingTransactionMode, + ParallelStreamingTransactionMode } #endregion Non-Test stuff (helper methods, initialization, ennums, ...) From 792b5d23ec32ae282e51c22898efba99932c5bcb Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Tue, 2 Jul 2024 17:18:01 +0300 Subject: [PATCH 438/761] Fix tracing activity leak with multiplexing (#5765) Fixes #5764 --- src/Npgsql/MultiplexingDataSource.cs | 2 +- src/Npgsql/NpgsqlActivitySource.cs | 31 +++++++++++++++------------- src/Npgsql/NpgsqlCommand.cs | 17 ++++++++++----- 3 files changed, 30 insertions(+), 20 deletions(-) diff --git a/src/Npgsql/MultiplexingDataSource.cs b/src/Npgsql/MultiplexingDataSource.cs index 24da20183d..1912931575 100644 --- a/src/Npgsql/MultiplexingDataSource.cs +++ b/src/Npgsql/MultiplexingDataSource.cs @@ -183,7 +183,7 @@ async Task MultiplexingWriteLoop() { stats.Reset(); connector.FlagAsNotWritableForMultiplexing(); - command.TraceCommandStart(connector); + command.TraceCommandEnrich(connector); // Read queued commands and write them to the connector's buffer, for as long as we're // under our write threshold and timer delay. diff --git a/src/Npgsql/NpgsqlActivitySource.cs b/src/Npgsql/NpgsqlActivitySource.cs index 224bb2e658..6e84ba531f 100644 --- a/src/Npgsql/NpgsqlActivitySource.cs +++ b/src/Npgsql/NpgsqlActivitySource.cs @@ -13,11 +13,9 @@ static class NpgsqlActivitySource internal static bool IsEnabled => Source.HasListeners(); - internal static Activity? CommandStart(NpgsqlConnector connector, string commandText, CommandType commandType) + internal static Activity? CommandStart(NpgsqlConnectionStringBuilder settings, string commandText, CommandType commandType) { - var settings = connector.Settings; - - var dbName = settings.Database ?? connector.InferredUserName; + var dbName = settings.Database ?? "UNKNOWN"; string? dbOperation = null; string? dbSqlTable = null; string activityName; @@ -53,18 +51,25 @@ static class NpgsqlActivitySource if (activity is not { IsAllDataRequested: true }) return activity; + activity.SetTag("db.statement", commandText); + + if (dbOperation != null) + activity.SetTag("db.operation", dbOperation); + if (dbSqlTable != null) + activity.SetTag("db.sql.table", dbSqlTable); + + return activity; + } + + internal static void Enrich(Activity activity, NpgsqlConnector connector) + { activity.SetTag("db.system", "postgresql"); activity.SetTag("db.connection_string", connector.UserFacingConnectionString); activity.SetTag("db.user", connector.InferredUserName); // We trace the actual (maybe inferred) database name we're connected to, even if it // wasn't specified in the connection string - activity.SetTag("db.name", dbName); - activity.SetTag("db.statement", commandText); + activity.SetTag("db.name", connector.Settings.Database ?? connector.InferredUserName); activity.SetTag("db.connection_id", connector.Id); - if (dbOperation != null) - activity.SetTag("db.operation", dbOperation); - if (dbSqlTable != null) - activity.SetTag("db.sql.table", dbSqlTable); var endPoint = connector.ConnectedEndPoint; Debug.Assert(endPoint is not null); @@ -75,19 +80,17 @@ static class NpgsqlActivitySource activity.SetTag("net.peer.ip", ipEndPoint.Address.ToString()); if (ipEndPoint.Port != 5432) activity.SetTag("net.peer.port", ipEndPoint.Port); - activity.SetTag("net.peer.name", settings.Host); + activity.SetTag("net.peer.name", connector.Host); break; case UnixDomainSocketEndPoint: activity.SetTag("net.transport", "unix"); - activity.SetTag("net.peer.name", settings.Host); + activity.SetTag("net.peer.name", connector.Host); break; default: throw new ArgumentOutOfRangeException("Invalid endpoint type: " + endPoint.GetType()); } - - return activity; } internal static void ReceivedFirstResponse(Activity activity) diff --git a/src/Npgsql/NpgsqlCommand.cs b/src/Npgsql/NpgsqlCommand.cs index 0a46e675cf..2d573360f5 100644 --- a/src/Npgsql/NpgsqlCommand.cs +++ b/src/Npgsql/NpgsqlCommand.cs @@ -1493,7 +1493,8 @@ internal virtual async ValueTask ExecuteReader(bool async, Com NpgsqlEventSource.Log.CommandStart(CommandText); startTimestamp = connector.DataSource.MetricsReporter.ReportCommandStart(); - TraceCommandStart(connector); + TraceCommandStart(connector.Settings); + TraceCommandEnrich(connector); // If a cancellation is in progress, wait for it to "complete" before proceeding (#615) connector.ResetCancellation(); @@ -1565,6 +1566,8 @@ internal virtual async ValueTask ExecuteReader(bool async, Com State = CommandState.InProgress; + TraceCommandStart(conn.Settings); + // TODO: Experiment: do we want to wait on *writing* here, or on *reading*? // Previous behavior was to wait on reading, which throw the exception from ExecuteReader (and not from // the first read). But waiting on writing would allow us to do sync writing and async reading. @@ -1700,19 +1703,23 @@ internal void Reset() #region Tracing - internal void TraceCommandStart(NpgsqlConnector connector) + internal void TraceCommandStart(NpgsqlConnectionStringBuilder settings) { Debug.Assert(CurrentActivity is null); if (NpgsqlActivitySource.IsEnabled) - CurrentActivity = NpgsqlActivitySource.CommandStart(connector, IsWrappedByBatch ? GetBatchFullCommandText() : CommandText, CommandType); + CurrentActivity = NpgsqlActivitySource.CommandStart(settings, IsWrappedByBatch ? GetBatchFullCommandText() : CommandText, CommandType); + } + + internal void TraceCommandEnrich(NpgsqlConnector connector) + { + if (CurrentActivity is not null) + NpgsqlActivitySource.Enrich(CurrentActivity, connector); } internal void TraceReceivedFirstResponse() { if (CurrentActivity is not null) - { NpgsqlActivitySource.ReceivedFirstResponse(CurrentActivity); - } } internal void TraceCommandStop() From dcee5ed3757c863aace376280f8f81115176bae9 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Tue, 2 Jul 2024 17:45:47 +0300 Subject: [PATCH 439/761] Do not enrich activity if IsAllDataRequested = false --- src/Npgsql/NpgsqlActivitySource.cs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/Npgsql/NpgsqlActivitySource.cs b/src/Npgsql/NpgsqlActivitySource.cs index 6e84ba531f..b2624fc1ac 100644 --- a/src/Npgsql/NpgsqlActivitySource.cs +++ b/src/Npgsql/NpgsqlActivitySource.cs @@ -63,6 +63,9 @@ static class NpgsqlActivitySource internal static void Enrich(Activity activity, NpgsqlConnector connector) { + if (!activity.IsAllDataRequested) + return; + activity.SetTag("db.system", "postgresql"); activity.SetTag("db.connection_string", connector.UserFacingConnectionString); activity.SetTag("db.user", connector.InferredUserName); @@ -95,6 +98,9 @@ internal static void Enrich(Activity activity, NpgsqlConnector connector) internal static void ReceivedFirstResponse(Activity activity) { + if (!activity.IsAllDataRequested) + return; + var activityEvent = new ActivityEvent("received-first-response"); activity.AddEvent(activityEvent); } From 95ef17330be1032c10c4ccae779c54e19b9f0eac Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Thu, 4 Jul 2024 16:37:20 +0200 Subject: [PATCH 440/761] Remove deprecated auth methods and rename enum values (#5777) --- .../BackendMessages/AuthenticationMessages.cs | 57 ++++++------------- src/Npgsql/Internal/NpgsqlConnector.Auth.cs | 20 +++---- .../Internal/NpgsqlConnector.OldAuth.cs | 2 +- src/Npgsql/Internal/NpgsqlConnector.cs | 18 +++--- 4 files changed, 38 insertions(+), 59 deletions(-) diff --git a/src/Npgsql/BackendMessages/AuthenticationMessages.cs b/src/Npgsql/BackendMessages/AuthenticationMessages.cs index b6320e87b8..a447bb632a 100644 --- a/src/Npgsql/BackendMessages/AuthenticationMessages.cs +++ b/src/Npgsql/BackendMessages/AuthenticationMessages.cs @@ -13,23 +13,15 @@ abstract class AuthenticationRequestMessage : IBackendMessage sealed class AuthenticationOkMessage : AuthenticationRequestMessage { - internal override AuthenticationRequestType AuthRequestType => AuthenticationRequestType.AuthenticationOk; + internal override AuthenticationRequestType AuthRequestType => AuthenticationRequestType.Ok; internal static readonly AuthenticationOkMessage Instance = new(); AuthenticationOkMessage() { } } -sealed class AuthenticationKerberosV5Message : AuthenticationRequestMessage -{ - internal override AuthenticationRequestType AuthRequestType => AuthenticationRequestType.AuthenticationKerberosV5; - - internal static readonly AuthenticationKerberosV5Message Instance = new(); - AuthenticationKerberosV5Message() { } -} - sealed class AuthenticationCleartextPasswordMessage : AuthenticationRequestMessage { - internal override AuthenticationRequestType AuthRequestType => AuthenticationRequestType.AuthenticationCleartextPassword; + internal override AuthenticationRequestType AuthRequestType => AuthenticationRequestType.CleartextPassword; internal static readonly AuthenticationCleartextPasswordMessage Instance = new(); AuthenticationCleartextPasswordMessage() { } @@ -37,7 +29,7 @@ sealed class AuthenticationCleartextPasswordMessage : AuthenticationRequestMess sealed class AuthenticationMD5PasswordMessage : AuthenticationRequestMessage { - internal override AuthenticationRequestType AuthRequestType => AuthenticationRequestType.AuthenticationMD5Password; + internal override AuthenticationRequestType AuthRequestType => AuthenticationRequestType.MD5Password; internal byte[] Salt { get; } @@ -54,17 +46,9 @@ internal static AuthenticationMD5PasswordMessage Load(NpgsqlReadBuffer buf) } } -sealed class AuthenticationSCMCredentialMessage : AuthenticationRequestMessage -{ - internal override AuthenticationRequestType AuthRequestType => AuthenticationRequestType.AuthenticationSCMCredential; - - internal static readonly AuthenticationSCMCredentialMessage Instance = new(); - AuthenticationSCMCredentialMessage() { } -} - sealed class AuthenticationGSSMessage : AuthenticationRequestMessage { - internal override AuthenticationRequestType AuthRequestType => AuthenticationRequestType.AuthenticationGSS; + internal override AuthenticationRequestType AuthRequestType => AuthenticationRequestType.GSS; internal static readonly AuthenticationGSSMessage Instance = new(); AuthenticationGSSMessage() { } @@ -72,7 +56,7 @@ sealed class AuthenticationGSSMessage : AuthenticationRequestMessage sealed class AuthenticationGSSContinueMessage : AuthenticationRequestMessage { - internal override AuthenticationRequestType AuthRequestType => AuthenticationRequestType.AuthenticationGSSContinue; + internal override AuthenticationRequestType AuthRequestType => AuthenticationRequestType.GSSContinue; internal byte[] AuthenticationData { get; } @@ -92,7 +76,7 @@ internal static AuthenticationGSSContinueMessage Load(NpgsqlReadBuffer buf, int sealed class AuthenticationSSPIMessage : AuthenticationRequestMessage { - internal override AuthenticationRequestType AuthRequestType => AuthenticationRequestType.AuthenticationSSPI; + internal override AuthenticationRequestType AuthRequestType => AuthenticationRequestType.SSPI; internal static readonly AuthenticationSSPIMessage Instance = new(); AuthenticationSSPIMessage() { } @@ -102,7 +86,7 @@ sealed class AuthenticationSSPIMessage : AuthenticationRequestMessage sealed class AuthenticationSASLMessage : AuthenticationRequestMessage { - internal override AuthenticationRequestType AuthRequestType => AuthenticationRequestType.AuthenticationSASL; + internal override AuthenticationRequestType AuthRequestType => AuthenticationRequestType.SASL; internal List Mechanisms { get; } = new(); internal AuthenticationSASLMessage(NpgsqlReadBuffer buf) @@ -117,7 +101,7 @@ internal AuthenticationSASLMessage(NpgsqlReadBuffer buf) sealed class AuthenticationSASLContinueMessage : AuthenticationRequestMessage { - internal override AuthenticationRequestType AuthRequestType => AuthenticationRequestType.AuthenticationSASLContinue; + internal override AuthenticationRequestType AuthRequestType => AuthenticationRequestType.SASLContinue; internal byte[] Payload { get; } internal AuthenticationSASLContinueMessage(NpgsqlReadBuffer buf, int len) @@ -171,7 +155,7 @@ internal static AuthenticationSCRAMServerFirstMessage Load(byte[] bytes, ILogger sealed class AuthenticationSASLFinalMessage : AuthenticationRequestMessage { - internal override AuthenticationRequestType AuthRequestType => AuthenticationRequestType.AuthenticationSASLFinal; + internal override AuthenticationRequestType AuthRequestType => AuthenticationRequestType.SASLFinal; internal byte[] Payload { get; } internal AuthenticationSASLFinalMessage(NpgsqlReadBuffer buf, int len) @@ -210,20 +194,15 @@ internal AuthenticationSCRAMServerFinalMessage(string serverSignature) #endregion SASL -// TODO: Remove Authentication prefix from everything enum AuthenticationRequestType { - AuthenticationOk = 0, - AuthenticationKerberosV4 = 1, - AuthenticationKerberosV5 = 2, - AuthenticationCleartextPassword = 3, - AuthenticationCryptPassword = 4, - AuthenticationMD5Password = 5, - AuthenticationSCMCredential = 6, - AuthenticationGSS = 7, - AuthenticationGSSContinue = 8, - AuthenticationSSPI = 9, - AuthenticationSASL = 10, - AuthenticationSASLContinue = 11, - AuthenticationSASLFinal = 12 + Ok = 0, + CleartextPassword = 3, + MD5Password = 5, + GSS = 7, + GSSContinue = 8, + SSPI = 9, + SASL = 10, + SASLContinue = 11, + SASLFinal = 12 } diff --git a/src/Npgsql/Internal/NpgsqlConnector.Auth.cs b/src/Npgsql/Internal/NpgsqlConnector.Auth.cs index 827bc71485..3f73b4ed58 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.Auth.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.Auth.cs @@ -24,28 +24,28 @@ async Task Authenticate(string username, NpgsqlTimeout timeout, bool async, Canc var msg = ExpectAny(await ReadMessage(async).ConfigureAwait(false), this); switch (msg.AuthRequestType) { - case AuthenticationRequestType.AuthenticationOk: + case AuthenticationRequestType.Ok: return; - case AuthenticationRequestType.AuthenticationCleartextPassword: + case AuthenticationRequestType.CleartextPassword: await AuthenticateCleartext(username, async, cancellationToken).ConfigureAwait(false); break; - case AuthenticationRequestType.AuthenticationMD5Password: + case AuthenticationRequestType.MD5Password: await AuthenticateMD5(username, ((AuthenticationMD5PasswordMessage)msg).Salt, async, cancellationToken).ConfigureAwait(false); break; - case AuthenticationRequestType.AuthenticationSASL: + case AuthenticationRequestType.SASL: await AuthenticateSASL(((AuthenticationSASLMessage)msg).Mechanisms, username, async, cancellationToken).ConfigureAwait(false); break; - case AuthenticationRequestType.AuthenticationGSS: - case AuthenticationRequestType.AuthenticationSSPI: + case AuthenticationRequestType.GSS: + case AuthenticationRequestType.SSPI: await DataSource.IntegratedSecurityHandler.NegotiateAuthentication(async, this).ConfigureAwait(false); return; - case AuthenticationRequestType.AuthenticationGSSContinue: + case AuthenticationRequestType.GSSContinue: throw new NpgsqlException("Can't start auth cycle with AuthenticationGSSContinue"); default: @@ -125,7 +125,7 @@ async Task AuthenticateSASL(List mechanisms, string username, bool async await Flush(async, cancellationToken).ConfigureAwait(false); var saslContinueMsg = Expect(await ReadMessage(async).ConfigureAwait(false), this); - if (saslContinueMsg.AuthRequestType != AuthenticationRequestType.AuthenticationSASLContinue) + if (saslContinueMsg.AuthRequestType != AuthenticationRequestType.SASLContinue) throw new NpgsqlException("[SASL] AuthenticationSASLContinue message expected"); var firstServerMsg = AuthenticationSCRAMServerFirstMessage.Load(saslContinueMsg.Payload, ConnectionLogger); if (!firstServerMsg.Nonce.StartsWith(clientNonce, StringComparison.Ordinal)) @@ -161,7 +161,7 @@ async Task AuthenticateSASL(List mechanisms, string username, bool async await Flush(async, cancellationToken).ConfigureAwait(false); var saslFinalServerMsg = Expect(await ReadMessage(async).ConfigureAwait(false), this); - if (saslFinalServerMsg.AuthRequestType != AuthenticationRequestType.AuthenticationSASLFinal) + if (saslFinalServerMsg.AuthRequestType != AuthenticationRequestType.SASLFinal) throw new NpgsqlException("[SASL] AuthenticationSASLFinal message expected"); var scramFinalServerMsg = AuthenticationSCRAMServerFinalMessage.Load(saslFinalServerMsg.Payload, ConnectionLogger); @@ -342,7 +342,7 @@ internal async Task AuthenticateGSS(bool async) while (true) { var response = ExpectAny(await ReadMessage(async).ConfigureAwait(false), this); - if (response.AuthRequestType == AuthenticationRequestType.AuthenticationOk) + if (response.AuthRequestType == AuthenticationRequestType.Ok) break; if (response is not AuthenticationGSSContinueMessage gssMsg) throw new NpgsqlException($"Received unexpected authentication request message {response.AuthRequestType}"); diff --git a/src/Npgsql/Internal/NpgsqlConnector.OldAuth.cs b/src/Npgsql/Internal/NpgsqlConnector.OldAuth.cs index 6d60251773..91aec10660 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.OldAuth.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.OldAuth.cs @@ -107,7 +107,7 @@ async Task Read(byte[] buffer, int offset, int count, bool async, Cancellat if (_leftToRead == 0) { var response = ExpectAny(await _connector.ReadMessage(async).ConfigureAwait(false), _connector); - if (response.AuthRequestType == AuthenticationRequestType.AuthenticationOk) + if (response.AuthRequestType == AuthenticationRequestType.Ok) throw new AuthenticationCompleteException(); var gssMsg = response as AuthenticationGSSContinueMessage; if (gssMsg == null) diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index 72a2d571f2..d1be574cf8 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -1462,15 +1462,15 @@ internal ValueTask ReadMessage( var authType = (AuthenticationRequestType)buf.ReadInt32(); return authType switch { - AuthenticationRequestType.AuthenticationOk => AuthenticationOkMessage.Instance, - AuthenticationRequestType.AuthenticationCleartextPassword => AuthenticationCleartextPasswordMessage.Instance, - AuthenticationRequestType.AuthenticationMD5Password => AuthenticationMD5PasswordMessage.Load(buf), - AuthenticationRequestType.AuthenticationGSS => AuthenticationGSSMessage.Instance, - AuthenticationRequestType.AuthenticationSSPI => AuthenticationSSPIMessage.Instance, - AuthenticationRequestType.AuthenticationGSSContinue => AuthenticationGSSContinueMessage.Load(buf, len), - AuthenticationRequestType.AuthenticationSASL => new AuthenticationSASLMessage(buf), - AuthenticationRequestType.AuthenticationSASLContinue => new AuthenticationSASLContinueMessage(buf, len - 4), - AuthenticationRequestType.AuthenticationSASLFinal => new AuthenticationSASLFinalMessage(buf, len - 4), + AuthenticationRequestType.Ok => AuthenticationOkMessage.Instance, + AuthenticationRequestType.CleartextPassword => AuthenticationCleartextPasswordMessage.Instance, + AuthenticationRequestType.MD5Password => AuthenticationMD5PasswordMessage.Load(buf), + AuthenticationRequestType.GSS => AuthenticationGSSMessage.Instance, + AuthenticationRequestType.SSPI => AuthenticationSSPIMessage.Instance, + AuthenticationRequestType.GSSContinue => AuthenticationGSSContinueMessage.Load(buf, len), + AuthenticationRequestType.SASL => new AuthenticationSASLMessage(buf), + AuthenticationRequestType.SASLContinue => new AuthenticationSASLContinueMessage(buf, len - 4), + AuthenticationRequestType.SASLFinal => new AuthenticationSASLFinalMessage(buf, len - 4), _ => throw new NotSupportedException($"Authentication method not supported (Received: {authType})") }; From 334c907a8f2f161904623184f5c403e1351df0fb Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Thu, 4 Jul 2024 16:52:54 +0200 Subject: [PATCH 441/761] Support more unknown result types (#5768) Closes #5754 --- .../BackendMessages/RowDescriptionMessage.cs | 50 +++++++++++++------ src/Npgsql/Internal/AdoSerializerHelpers.cs | 13 +++-- src/Npgsql/ThrowHelper.cs | 5 ++ test/Npgsql.Tests/Types/MiscTypeTests.cs | 15 ++++++ 4 files changed, 63 insertions(+), 20 deletions(-) diff --git a/src/Npgsql/BackendMessages/RowDescriptionMessage.cs b/src/Npgsql/BackendMessages/RowDescriptionMessage.cs index 87636ca039..fe91b0cf8f 100644 --- a/src/Npgsql/BackendMessages/RowDescriptionMessage.cs +++ b/src/Npgsql/BackendMessages/RowDescriptionMessage.cs @@ -1,7 +1,6 @@ using System; using System.Collections.Generic; using System.Diagnostics; -using System.Diagnostics.CodeAnalysis; using System.Globalization; using System.Runtime.CompilerServices; using System.Threading; @@ -331,8 +330,11 @@ internal FieldDescription Clone() internal void GetInfo(Type? type, ref ColumnInfo lastColumnInfo) { Debug.Assert(lastColumnInfo.ConverterInfo.IsDefault || ( - ReferenceEquals(_serializerOptions, lastColumnInfo.ConverterInfo.TypeInfo.Options) && - lastColumnInfo.ConverterInfo.TypeInfo.PgTypeId == _serializerOptions.ToCanonicalTypeId(PostgresType)), "Cache is bleeding over"); + ReferenceEquals(_serializerOptions, lastColumnInfo.ConverterInfo.TypeInfo.Options) && ( + IsUnknownResultType() && lastColumnInfo.ConverterInfo.TypeInfo.PgTypeId == _serializerOptions.ToCanonicalTypeId(_serializerOptions.TextPgType) || + // Normal resolution + lastColumnInfo.ConverterInfo.TypeInfo.PgTypeId == _serializerOptions.ToCanonicalTypeId(PostgresType)) + ), "Cache is bleeding over"); if (!lastColumnInfo.ConverterInfo.IsDefault && lastColumnInfo.ConverterInfo.TypeToConvert == type) return; @@ -360,25 +362,36 @@ internal void GetInfo(Type? type, ref ColumnInfo lastColumnInfo) [MethodImpl(MethodImplOptions.NoInlining)] void GetInfoSlow(Type? type, out ColumnInfo lastColumnInfo) { - var typeInfo = AdoSerializerHelpers.GetTypeInfoForReading(type ?? typeof(object), PostgresType, _serializerOptions); PgConverterInfo converterInfo; switch (DataFormat) { - case DataFormat.Binary: - // If we don't support binary we'll just throw. + case DataFormat.Text when IsUnknownResultType(): + { + // Try to resolve some 'pg_catalog.text' type info for the expected clr type. + var typeInfo = AdoSerializerHelpers.GetTypeInfoForReading(type ?? typeof(string), _serializerOptions.TextPgType, _serializerOptions); + + // We start binding to DataFormat.Binary as it's the broadest supported format. + // The format however is irrelevant as 'pg_catalog.text' data is identical across either. + // Given we did a resolution against 'pg_catalog.text' and not the actual field type we're in reinterpretation territory anyway. + if (!typeInfo.TryBind(Field, DataFormat.Binary, out converterInfo)) + converterInfo = typeInfo.Bind(Field, DataFormat.Text); + + lastColumnInfo = new(converterInfo, DataFormat, type != converterInfo.TypeToConvert || converterInfo.IsBoxingConverter); + + break; + } + case DataFormat.Binary or DataFormat.Text: + { + var typeInfo = AdoSerializerHelpers.GetTypeInfoForReading(type ?? typeof(object), PostgresType, _serializerOptions); + + // If we don't support the DataFormat we'll just throw. converterInfo = typeInfo.Bind(Field, DataFormat); - lastColumnInfo = new(converterInfo, DataFormat.Binary, typeof(object) == type || converterInfo.IsBoxingConverter); + lastColumnInfo = new(converterInfo, DataFormat, typeof(object) == type || converterInfo.IsBoxingConverter); break; + } default: - // For text we'll fall back to any available text converter for the expected clr type or throw. - if (!typeInfo.TryBind(Field, DataFormat, out converterInfo)) - { - typeInfo = AdoSerializerHelpers.GetTypeInfoForReading(type ?? typeof(string), _serializerOptions.TextPgType, _serializerOptions); - converterInfo = typeInfo.Bind(Field, DataFormat); - lastColumnInfo = new(converterInfo, DataFormat, type != converterInfo.TypeToConvert || converterInfo.IsBoxingConverter); - } - else - lastColumnInfo = new(converterInfo, DataFormat, typeof(object) == type || converterInfo.IsBoxingConverter); + ThrowHelper.ThrowUnreachableException("Unknown data format {0}", DataFormat); + lastColumnInfo = default; break; } @@ -386,7 +399,12 @@ void GetInfoSlow(Type? type, out ColumnInfo lastColumnInfo) // When passed in an unsupported type it allows the error to be more specific, instead of just having object/null to deal with. if (_objectOrDefaultInfo.ConverterInfo.IsDefault && type is not null) _ = ObjectOrDefaultInfo; + } + + // DataFormat.Text today exclusively signals that we executed with an UnknownResultTypeList. + // If we ever want to fully support DataFormat.Text we'll need to flow UnknownResultType status separately. + bool IsUnknownResultType() => DataFormat is DataFormat.Text; } /// diff --git a/src/Npgsql/Internal/AdoSerializerHelpers.cs b/src/Npgsql/Internal/AdoSerializerHelpers.cs index d0ea19c7a8..ed0ea1cc56 100644 --- a/src/Npgsql/Internal/AdoSerializerHelpers.cs +++ b/src/Npgsql/Internal/AdoSerializerHelpers.cs @@ -10,6 +10,12 @@ namespace Npgsql.Internal; static class AdoSerializerHelpers { public static PgTypeInfo GetTypeInfoForReading(Type type, PostgresType postgresType, PgSerializerOptions options) + { + var (typeInfo, exception) = TryGetTypeInfoForReading(type, postgresType, options); + return typeInfo ?? throw exception!; + } + + static (PgTypeInfo? TypeInfo , Exception? Exception) TryGetTypeInfoForReading(Type type, PostgresType postgresType, PgSerializerOptions options) { PgTypeInfo? typeInfo = null; Exception? inner = null; @@ -21,12 +27,11 @@ public static PgTypeInfo GetTypeInfoForReading(Type type, PostgresType postgresT { inner = ex; } - return typeInfo ?? ThrowReadingNotSupported(type, postgresType.DisplayName, inner); + return typeInfo is not null ? (typeInfo, null) : (null, ThrowReadingNotSupported(type, postgresType.DisplayName, inner)); // InvalidCastException thrown to align with ADO.NET convention. - [DoesNotReturn] - static PgTypeInfo ThrowReadingNotSupported(Type? type, string displayName, Exception? inner = null) - => throw new InvalidCastException($"Reading{(type is null ? "" : $" as '{type.FullName}'")} is not supported for fields having DataTypeName '{displayName}'", inner); + static Exception ThrowReadingNotSupported(Type? type, string displayName, Exception? inner = null) + => new InvalidCastException($"Reading{(type is null ? "" : $" as '{type.FullName}'")} is not supported for fields having DataTypeName '{displayName}'", inner); } public static PgTypeInfo GetTypeInfoForWriting(Type? type, PgTypeId? pgTypeId, PgSerializerOptions options, NpgsqlDbType? npgsqlDbType = null) diff --git a/src/Npgsql/ThrowHelper.cs b/src/Npgsql/ThrowHelper.cs index 63f5647740..1c754884ab 100644 --- a/src/Npgsql/ThrowHelper.cs +++ b/src/Npgsql/ThrowHelper.cs @@ -1,5 +1,6 @@ using Npgsql.BackendMessages; using System; +using System.Diagnostics; using System.Diagnostics.CodeAnalysis; using Npgsql.Internal; @@ -19,6 +20,10 @@ internal static void ThrowArgumentOutOfRangeException(string paramName, string m internal static void ThrowArgumentOutOfRangeException(string paramName, string message, object argument) => throw new ArgumentOutOfRangeException(paramName, string.Format(message, argument)); + [DoesNotReturn] + internal static void ThrowUnreachableException(string message, object argument) + => throw new UnreachableException(string.Format(message, argument)); + [DoesNotReturn] internal static void ThrowInvalidOperationException() => throw new InvalidOperationException(); diff --git a/test/Npgsql.Tests/Types/MiscTypeTests.cs b/test/Npgsql.Tests/Types/MiscTypeTests.cs index d689a268ef..5291da5a09 100644 --- a/test/Npgsql.Tests/Types/MiscTypeTests.cs +++ b/test/Npgsql.Tests/Types/MiscTypeTests.cs @@ -111,8 +111,23 @@ public async Task UnknownResultTypeList() cmd.UnknownResultTypeList = new[] { true, false }; await using var reader = await cmd.ExecuteReaderAsync(); reader.Read(); + Assert.That(reader.GetFieldType(0), Is.EqualTo(typeof(string))); Assert.That(reader.GetString(0), Is.EqualTo("t")); + Assert.That(reader.GetValue(0), Is.EqualTo("t")); + Assert.That(reader.GetFieldValue(0), Is.EqualTo("t")); + + // Try some alternative text types + Assert.That(reader.GetFieldValue(0), Is.EqualTo("t")); + Assert.That(reader.GetFieldValue(0), Is.EqualTo("t")); + + // Try as async + Assert.That(await reader.GetFieldValueAsync(0), Is.EqualTo("t")); + Assert.That(await reader.GetFieldValueAsync(0), Is.EqualTo("t")); + Assert.That(await reader.GetFieldValueAsync(0), Is.EqualTo("t")); + Assert.That(await reader.GetFieldValueAsync(0), Is.EqualTo("t")); + + // Normal binary column Assert.That(reader.GetInt32(1), Is.EqualTo(8)); } From 321d578c26b2b9f77619c12d81cc332117b61c22 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Fri, 5 Jul 2024 15:11:46 +0300 Subject: [PATCH 442/761] Add callback to customize Kerberos authentication options (#5775) Closes #5181 --- src/Npgsql/Internal/NpgsqlConnector.Auth.cs | 5 +++- src/Npgsql/Internal/NpgsqlConnector.cs | 8 ++++++ src/Npgsql/NpgsqlDataSource.cs | 6 +++- src/Npgsql/NpgsqlDataSourceBuilder.cs | 18 ++++++++++++ src/Npgsql/NpgsqlDataSourceConfiguration.cs | 7 +++-- src/Npgsql/NpgsqlSlimDataSourceBuilder.cs | 31 +++++++++++++++++++-- src/Npgsql/PublicAPI.Unshipped.txt | 2 ++ 7 files changed, 71 insertions(+), 6 deletions(-) diff --git a/src/Npgsql/Internal/NpgsqlConnector.Auth.cs b/src/Npgsql/Internal/NpgsqlConnector.Auth.cs index 3f73b4ed58..fad990158f 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.Auth.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.Auth.cs @@ -334,7 +334,10 @@ internal async Task AuthenticateGSS(bool async) { var targetName = $"{KerberosServiceName}/{Host}"; - using var authContext = new NegotiateAuthentication(new NegotiateAuthenticationClientOptions{ TargetName = targetName}); + var clientOptions = new NegotiateAuthenticationClientOptions { TargetName = targetName }; + NegotiateOptionsCallback?.Invoke(clientOptions); + + using var authContext = new NegotiateAuthentication(clientOptions); var data = authContext.GetOutgoingBlob(ReadOnlySpan.Empty, out var statusCode)!; Debug.Assert(statusCode == NegotiateAuthenticationStatusCode.ContinueNeeded); await WritePassword(data, 0, data.Length, async, UserCancellationToken).ConfigureAwait(false); diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index d1be574cf8..b2127d562d 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -61,6 +61,10 @@ public sealed partial class NpgsqlConnector ProvidePasswordCallback? ProvidePasswordCallback { get; } #pragma warning restore CS0618 +#if NET7_0_OR_GREATER + Action? NegotiateOptionsCallback { get; } +#endif + public Encoding TextEncoding { get; private set; } = default!; /// @@ -366,6 +370,10 @@ internal NpgsqlConnector(NpgsqlDataSource dataSource, NpgsqlConnection conn) ClientCertificatesCallback = dataSource.ClientCertificatesCallback; UserCertificateValidationCallback = dataSource.UserCertificateValidationCallback; +#if NET7_0_OR_GREATER + NegotiateOptionsCallback = dataSource.Configuration.NegotiateOptionsCallback; +#endif + State = ConnectorState.Closed; TransactionStatus = TransactionStatus.Idle; Settings = dataSource.Settings; diff --git a/src/Npgsql/NpgsqlDataSource.cs b/src/Npgsql/NpgsqlDataSource.cs index ba5fdf255a..67b8c154ac 100644 --- a/src/Npgsql/NpgsqlDataSource.cs +++ b/src/Npgsql/NpgsqlDataSource.cs @@ -108,7 +108,11 @@ internal NpgsqlDataSource( var resolverChain, _defaultNameTranslator, ConnectionInitializer, - ConnectionInitializerAsync) + ConnectionInitializerAsync +#if NET7_0_OR_GREATER + ,_ +#endif + ) = dataSourceConfig; _connectionLogger = LoggingConfiguration.ConnectionLogger; diff --git a/src/Npgsql/NpgsqlDataSourceBuilder.cs b/src/Npgsql/NpgsqlDataSourceBuilder.cs index e304a559cc..73e13a11f9 100644 --- a/src/Npgsql/NpgsqlDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlDataSourceBuilder.cs @@ -325,6 +325,24 @@ public NpgsqlDataSourceBuilder UsePasswordProvider( return this; } +#if NET7_0_OR_GREATER + /// + /// When using Kerberos, this is a callback that allows customizing default settings for Kerberos authentication. + /// + /// The callback containing logic to customize Kerberos authentication settings. + /// + /// + /// See . + /// + /// + /// The same builder instance so that multiple calls can be chained. + public NpgsqlDataSourceBuilder UseNegotiateOptionsCallback(Action? negotiateOptionsCallback) + { + _internalBuilder.UseNegotiateOptionsCallback(negotiateOptionsCallback); + return this; + } +#endif + #endregion Authentication #region Type mapping diff --git a/src/Npgsql/NpgsqlDataSourceConfiguration.cs b/src/Npgsql/NpgsqlDataSourceConfiguration.cs index 075df28aa8..83539bd17b 100644 --- a/src/Npgsql/NpgsqlDataSourceConfiguration.cs +++ b/src/Npgsql/NpgsqlDataSourceConfiguration.cs @@ -1,5 +1,4 @@ using System; -using System.Collections.Generic; using System.Net.Security; using System.Security.Cryptography.X509Certificates; using System.Threading; @@ -22,4 +21,8 @@ sealed record NpgsqlDataSourceConfiguration(string? Name, PgTypeInfoResolverChain ResolverChain, INpgsqlNameTranslator DefaultNameTranslator, Action? ConnectionInitializer, - Func? ConnectionInitializerAsync); + Func? ConnectionInitializerAsync +#if NET7_0_OR_GREATER + ,Action? NegotiateOptionsCallback +#endif + ); diff --git a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs index 31a82dc04d..3ee4cd80dc 100644 --- a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs @@ -34,6 +34,10 @@ public sealed class NpgsqlSlimDataSourceBuilder : INpgsqlTypeMapper RemoteCertificateValidationCallback? _userCertificateValidationCallback; Action? _clientCertificatesCallback; +#if NET7_0_OR_GREATER + Action? _negotiateOptionsCallback; +#endif + IntegratedSecurityHandler _integratedSecurityHandler = new(); Func? _passwordProvider; @@ -289,6 +293,25 @@ public NpgsqlSlimDataSourceBuilder UsePasswordProvider( return this; } +#if NET7_0_OR_GREATER + /// + /// When using Kerberos, this is a callback that allows customizing default settings for Kerberos authentication. + /// + /// The callback containing logic to customize Kerberos authentication settings. + /// + /// + /// See . + /// + /// + /// The same builder instance so that multiple calls can be chained. + public NpgsqlSlimDataSourceBuilder UseNegotiateOptionsCallback(Action? negotiateOptionsCallback) + { + _negotiateOptionsCallback = negotiateOptionsCallback; + + return this; + } +#endif + #endregion Authentication #region Type mapping @@ -602,7 +625,7 @@ public NpgsqlDataSource Build() var config = PrepareConfiguration(); var connectionStringBuilder = ConnectionStringBuilder.Clone(); - if (ConnectionStringBuilder.Host!.Contains(",")) + if (ConnectionStringBuilder.Host!.Contains(',')) { ValidateMultiHost(); @@ -667,7 +690,11 @@ _loggerFactory is null _resolverChainBuilder.Build(ConfigureResolverChain), DefaultNameTranslator, _connectionInitializer, - _connectionInitializerAsync); + _connectionInitializerAsync +#if NET7_0_OR_GREATER + ,_negotiateOptionsCallback +#endif + ); } void ValidateMultiHost() diff --git a/src/Npgsql/PublicAPI.Unshipped.txt b/src/Npgsql/PublicAPI.Unshipped.txt index aa6649d3e6..92e625df4c 100644 --- a/src/Npgsql/PublicAPI.Unshipped.txt +++ b/src/Npgsql/PublicAPI.Unshipped.txt @@ -1,7 +1,9 @@ #nullable enable +Npgsql.NpgsqlDataSourceBuilder.UseNegotiateOptionsCallback(System.Action? negotiateOptionsCallback) -> Npgsql.NpgsqlDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.EnableGeometricTypes() -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.EnableJsonTypes() -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.EnableNetworkTypes() -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.UseNegotiateOptionsCallback(System.Action? negotiateOptionsCallback) -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.Replication.PgOutput.ReplicationValue.GetFieldName() -> string! Npgsql.Replication.PgOutput.Messages.ParallelStreamAbortMessage Npgsql.Replication.PgOutput.Messages.ParallelStreamAbortMessage.AbortLsn.get -> NpgsqlTypes.NpgsqlLogSequenceNumber From 3cb15adef19b3137e88929bb422c433a66dc8107 Mon Sep 17 00:00:00 2001 From: Brar Piening Date: Mon, 8 Jul 2024 15:20:25 +0200 Subject: [PATCH 443/761] Non-breaking obsoletion for PgOutputReplicationOptions constructors (#5779) Fixup for #5761 --- src/Npgsql/PublicAPI.Unshipped.txt | 4 --- .../PgOutput/PgOutputReplicationOptions.cs | 29 ++++++++++++------- 2 files changed, 18 insertions(+), 15 deletions(-) diff --git a/src/Npgsql/PublicAPI.Unshipped.txt b/src/Npgsql/PublicAPI.Unshipped.txt index 92e625df4c..84b9e74092 100644 --- a/src/Npgsql/PublicAPI.Unshipped.txt +++ b/src/Npgsql/PublicAPI.Unshipped.txt @@ -14,11 +14,7 @@ Npgsql.Replication.PgOutput.PgOutputProtocolVersion.V2 = 2 -> Npgsql.Replication Npgsql.Replication.PgOutput.PgOutputProtocolVersion.V3 = 3 -> Npgsql.Replication.PgOutput.PgOutputProtocolVersion Npgsql.Replication.PgOutput.PgOutputProtocolVersion.V4 = 4 -> Npgsql.Replication.PgOutput.PgOutputProtocolVersion Npgsql.Replication.PgOutput.PgOutputReplicationOptions.PgOutputReplicationOptions(string! publicationName, Npgsql.Replication.PgOutput.PgOutputProtocolVersion protocolVersion, bool? binary = null, Npgsql.Replication.PgOutput.PgOutputStreamingMode? streamingMode = null, bool? messages = null, bool? twoPhase = null) -> void -Npgsql.Replication.PgOutput.PgOutputReplicationOptions.PgOutputReplicationOptions(string! publicationName, ulong protocolVersion, bool? binary = null, Npgsql.Replication.PgOutput.PgOutputStreamingMode? streamingMode = null, bool? messages = null, bool? twoPhase = null) -> void -*REMOVED*Npgsql.Replication.PgOutput.PgOutputReplicationOptions.PgOutputReplicationOptions(string! publicationName, ulong protocolVersion, bool? binary = null, bool? streaming = null, bool? messages = null, bool? twoPhase = null) -> void Npgsql.Replication.PgOutput.PgOutputReplicationOptions.PgOutputReplicationOptions(System.Collections.Generic.IEnumerable! publicationNames, Npgsql.Replication.PgOutput.PgOutputProtocolVersion protocolVersion, bool? binary = null, Npgsql.Replication.PgOutput.PgOutputStreamingMode? streamingMode = null, bool? messages = null, bool? twoPhase = null) -> void -*REMOVED*Npgsql.Replication.PgOutput.PgOutputReplicationOptions.PgOutputReplicationOptions(System.Collections.Generic.IEnumerable! publicationNames, ulong protocolVersion, bool? binary = null, bool? streaming = null, bool? messages = null, bool? twoPhase = null) -> void -Npgsql.Replication.PgOutput.PgOutputReplicationOptions.PgOutputReplicationOptions(System.Collections.Generic.IEnumerable! publicationNames, ulong protocolVersion, bool? binary = null, Npgsql.Replication.PgOutput.PgOutputStreamingMode? streamingMode = null, bool? messages = null, bool? twoPhase = null) -> void Npgsql.Replication.PgOutput.PgOutputReplicationOptions.ProtocolVersion.get -> Npgsql.Replication.PgOutput.PgOutputProtocolVersion *REMOVED*Npgsql.Replication.PgOutput.PgOutputReplicationOptions.ProtocolVersion.get -> ulong Npgsql.Replication.PgOutput.PgOutputReplicationOptions.StreamingMode.get -> Npgsql.Replication.PgOutput.PgOutputStreamingMode? diff --git a/src/Npgsql/Replication/PgOutput/PgOutputReplicationOptions.cs b/src/Npgsql/Replication/PgOutput/PgOutputReplicationOptions.cs index 394758c83a..b6aedba5d3 100644 --- a/src/Npgsql/Replication/PgOutput/PgOutputReplicationOptions.cs +++ b/src/Npgsql/Replication/PgOutput/PgOutputReplicationOptions.cs @@ -13,16 +13,19 @@ public class PgOutputReplicationOptions : IEquatable /// Creates a new instance of . /// /// The publication names to include into the stream - /// The version of the logical streaming replication protocol + /// The version of the logical streaming replication protocol. + /// Passing in unsupported protocol version numbers may lead to runtime errors. /// Send values in binary representation - /// Enable streaming of in-progress transactions + /// Enable streaming of in-progress transactions. + /// Setting this to sets + /// to . /// Write logical decoding messages into the replication stream /// Enable streaming of prepared transactions - [Obsolete("Please switch to the overloads that take a PgOutputProtocolVersion value instead.")] - public PgOutputReplicationOptions(string publicationName, ulong protocolVersion, bool? binary = null, - PgOutputStreamingMode? streamingMode = null, bool? messages = null, bool? twoPhase = null) + [Obsolete("Please switch to the overloads that take PgOutputProtocolVersion and PgOutputStreamingMode values instead.")] + public PgOutputReplicationOptions(string publicationName, ulong protocolVersion, bool? binary = null, bool? streaming = null, + bool? messages = null, bool? twoPhase = null) : this([publicationName ?? throw new ArgumentNullException(nameof(publicationName))], (PgOutputProtocolVersion)protocolVersion, - binary, streamingMode, messages, twoPhase) + binary, streaming.HasValue ? streaming.Value ? PgOutputStreamingMode.On : PgOutputStreamingMode.Off : null, messages, twoPhase) { } @@ -46,15 +49,19 @@ public PgOutputReplicationOptions(string publicationName, PgOutputProtocolVersio /// Creates a new instance of . /// /// The publication names to include into the stream - /// The version of the logical streaming replication protocol + /// The version of the logical streaming replication protocol. + /// Passing in unsupported protocol version numbers may lead to runtime errors. /// Send values in binary representation - /// Enable streaming of in-progress transactions + /// Enable streaming of in-progress transactions. + /// Setting this to sets + /// to . /// Write logical decoding messages into the replication stream /// Enable streaming of prepared transactions - [Obsolete("Please switch to the overloads that take a PgOutputProtocolVersion value instead.")] + [Obsolete("Please switch to the overloads that take PgOutputProtocolVersion and PgOutputStreamingMode values instead.")] public PgOutputReplicationOptions(IEnumerable publicationNames, ulong protocolVersion, bool? binary = null, - PgOutputStreamingMode? streamingMode = null, bool? messages = null, bool? twoPhase = null) - : this(publicationNames, (PgOutputProtocolVersion)protocolVersion, binary, streamingMode, messages, twoPhase) + bool? streaming = null, bool? messages = null, bool? twoPhase = null) + : this(publicationNames, (PgOutputProtocolVersion)protocolVersion, binary, + streaming.HasValue ? streaming.Value ? PgOutputStreamingMode.On : PgOutputStreamingMode.Off : null, messages, twoPhase) { } From 2f29e62bbd012d504d5a3858b479b86b1ada1ade Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 10 Jul 2024 13:38:39 +0300 Subject: [PATCH 444/761] Bump actions/setup-dotnet from 4.0.0 to 4.0.1 (#5782) --- .github/workflows/build.yml | 6 +++--- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/native-aot.yml | 4 ++-- .github/workflows/rich-code-nav.yml | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 46273efc41..a789857815 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -69,7 +69,7 @@ jobs: ${{ runner.os }}-nuget- - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v4.0.0 + uses: actions/setup-dotnet@v4.0.1 with: dotnet-version: | ${{ env.dotnet_sdk_version }} @@ -352,7 +352,7 @@ jobs: ${{ runner.os }}-nuget- - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v4.0.0 + uses: actions/setup-dotnet@v4.0.1 with: dotnet-version: ${{ env.dotnet_sdk_version }} @@ -386,7 +386,7 @@ jobs: uses: actions/checkout@v4 - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v4.0.0 + uses: actions/setup-dotnet@v4.0.1 with: dotnet-version: ${{ env.dotnet_sdk_version }} diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index c171e62e26..fe38f213f6 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -66,7 +66,7 @@ jobs: # queries: ./path/to/local/query, your-org/your-repo/queries@main - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v4.0.0 + uses: actions/setup-dotnet@v4.0.1 with: dotnet-version: ${{ env.dotnet_sdk_version }} diff --git a/.github/workflows/native-aot.yml b/.github/workflows/native-aot.yml index a795054e8c..3e6e7ce47f 100644 --- a/.github/workflows/native-aot.yml +++ b/.github/workflows/native-aot.yml @@ -108,7 +108,7 @@ jobs: ${{ runner.os }}-nuget- - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v4.0.0 + uses: actions/setup-dotnet@v4.0.1 with: dotnet-version: | ${{ env.dotnet_sdk_version }} @@ -145,7 +145,7 @@ jobs: ${{ runner.os }}-nuget- - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v4.0.0 + uses: actions/setup-dotnet@v4.0.1 with: dotnet-version: | ${{ env.dotnet_sdk_version }} diff --git a/.github/workflows/rich-code-nav.yml b/.github/workflows/rich-code-nav.yml index 11686cdab3..9c629c0801 100644 --- a/.github/workflows/rich-code-nav.yml +++ b/.github/workflows/rich-code-nav.yml @@ -24,7 +24,7 @@ jobs: ${{ runner.os }}-nuget- - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v4.0.0 + uses: actions/setup-dotnet@v4.0.1 with: dotnet-version: ${{ env.dotnet_sdk_version }} From a9bfb4a07c1db1e8c7b711b978c6f5a2aa57a958 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Thu, 11 Jul 2024 19:04:39 +0200 Subject: [PATCH 445/761] Bump System.Text.Json to 8.0.4 (#5787) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 39eeb13026..4799d90a90 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -10,7 +10,7 @@ - + From 30ba2ddd614972c72d8d6997c8fc38ee7c4ea517 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Thu, 11 Jul 2024 19:13:12 +0200 Subject: [PATCH 446/761] Correct docs on NpgsqlParameterCollection (#5786) Fixes #5778 --- src/Npgsql/NpgsqlParameterCollection.cs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/Npgsql/NpgsqlParameterCollection.cs b/src/Npgsql/NpgsqlParameterCollection.cs index a10f9dceb0..8031fd7efc 100644 --- a/src/Npgsql/NpgsqlParameterCollection.cs +++ b/src/Npgsql/NpgsqlParameterCollection.cs @@ -228,7 +228,7 @@ internal void ChangeParameterName(NpgsqlParameter parameter, string? value) /// Adds the specified object to the . /// /// The to add to the collection. - /// The index of the new object. + /// The parameter that was added. public NpgsqlParameter Add(NpgsqlParameter value) { if (value is null) @@ -315,7 +315,7 @@ public NpgsqlParameter AddWithValue(NpgsqlDbType parameterType, object value) /// /// The name of the parameter. /// One of the values. - /// The index of the new object. + /// The parameter that was added. public NpgsqlParameter Add(string parameterName, NpgsqlDbType parameterType) => Add(new NpgsqlParameter(parameterName, parameterType)); @@ -326,7 +326,7 @@ public NpgsqlParameter Add(string parameterName, NpgsqlDbType parameterType) /// The name of the parameter. /// One of the values. /// The length of the column. - /// The index of the new object. + /// The parameter that was added. public NpgsqlParameter Add(string parameterName, NpgsqlDbType parameterType, int size) => Add(new NpgsqlParameter(parameterName, parameterType, size)); @@ -338,7 +338,7 @@ public NpgsqlParameter Add(string parameterName, NpgsqlDbType parameterType, int /// One of the values. /// The length of the column. /// The name of the source column. - /// The index of the new object. + /// The parameter that was added. public NpgsqlParameter Add(string parameterName, NpgsqlDbType parameterType, int size, string sourceColumn) => Add(new NpgsqlParameter(parameterName, parameterType, size, sourceColumn)); From 04de96873bc9c4cdc66ff0e4fb48da100467d230 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Fri, 26 Jul 2024 17:09:15 +0300 Subject: [PATCH 447/761] Add a callback for SslClientAuthenticationOptions (#5483) Closes #5478 --- src/Npgsql/Internal/NpgsqlConnector.cs | 106 +++++--- .../Internal/TransportSecurityHandler.cs | 7 +- src/Npgsql/NpgsqlConnection.cs | 24 +- src/Npgsql/NpgsqlDataSource.cs | 7 +- src/Npgsql/NpgsqlDataSourceBuilder.cs | 20 ++ src/Npgsql/NpgsqlDataSourceConfiguration.cs | 3 +- src/Npgsql/NpgsqlSlimDataSourceBuilder.cs | 51 +++- .../Properties/NpgsqlStrings.Designer.cs | 240 +++++++++++++----- src/Npgsql/Properties/NpgsqlStrings.resx | 15 +- src/Npgsql/PublicAPI.Unshipped.txt | 4 + test/Npgsql.Tests/ConnectionTests.cs | 25 +- test/Npgsql.Tests/SecurityTests.cs | 40 ++- 12 files changed, 393 insertions(+), 149 deletions(-) diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index b2127d562d..7f10a9ed64 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -55,8 +55,8 @@ public sealed partial class NpgsqlConnector /// public NpgsqlConnectionStringBuilder Settings { get; } - Action? ClientCertificatesCallback { get; } - RemoteCertificateValidationCallback? UserCertificateValidationCallback { get; } + Action? SslClientAuthenticationOptionsCallback { get; } + #pragma warning disable CS0618 // ProvidePasswordCallback is obsolete ProvidePasswordCallback? ProvidePasswordCallback { get; } #pragma warning restore CS0618 @@ -282,7 +282,11 @@ internal bool PostgresCancellationPerformed internal bool AttemptPostgresCancellation { get; private set; } static readonly TimeSpan _cancelImmediatelyTimeout = TimeSpan.FromMilliseconds(-1); +#pragma warning disable CA1859 + // We're casting to IDisposable to not explicitly reference X509Certificate2 for NativeAOT + // TODO: probably pointless now, needs to be rechecked IDisposable? _certificate; +#pragma warning restore CA1859 internal NpgsqlLoggingConfiguration LoggingConfiguration { get; } @@ -337,12 +341,34 @@ internal bool PostgresCancellationPerformed internal NpgsqlConnector(NpgsqlDataSource dataSource, NpgsqlConnection conn) : this(dataSource) { - if (conn.ProvideClientCertificatesCallback is not null) - ClientCertificatesCallback = certs => conn.ProvideClientCertificatesCallback(certs); - if (conn.UserCertificateValidationCallback is not null) - UserCertificateValidationCallback = conn.UserCertificateValidationCallback; - + var sslClientAuthenticationOptionsCallback = conn.SslClientAuthenticationOptionsCallback; #pragma warning disable CS0618 // Obsolete + var provideClientCertificatesCallback = conn.ProvideClientCertificatesCallback; + var userCertificateValidationCallback = conn.UserCertificateValidationCallback; + if (provideClientCertificatesCallback is not null || + userCertificateValidationCallback is not null) + { + if (sslClientAuthenticationOptionsCallback is not null) + throw new NotSupportedException(NpgsqlStrings.SslClientAuthenticationOptionsCallbackWithOtherCallbacksNotSupported); + + sslClientAuthenticationOptionsCallback = options => + { + if (provideClientCertificatesCallback is not null) + { + options.ClientCertificates ??= new X509Certificate2Collection(); + provideClientCertificatesCallback.Invoke(options.ClientCertificates); + } + + if (userCertificateValidationCallback is not null) + { + options.RemoteCertificateValidationCallback = userCertificateValidationCallback; + } + }; + } + + if (sslClientAuthenticationOptionsCallback is not null) + SslClientAuthenticationOptionsCallback = sslClientAuthenticationOptionsCallback; + ProvidePasswordCallback = conn.ProvidePasswordCallback; #pragma warning restore CS0618 } @@ -350,8 +376,7 @@ internal NpgsqlConnector(NpgsqlDataSource dataSource, NpgsqlConnection conn) NpgsqlConnector(NpgsqlConnector connector) : this(connector.DataSource) { - ClientCertificatesCallback = connector.ClientCertificatesCallback; - UserCertificateValidationCallback = connector.UserCertificateValidationCallback; + SslClientAuthenticationOptionsCallback = connector.SslClientAuthenticationOptionsCallback; ProvidePasswordCallback = connector.ProvidePasswordCallback; } @@ -367,8 +392,7 @@ internal NpgsqlConnector(NpgsqlDataSource dataSource, NpgsqlConnection conn) TransactionLogger = LoggingConfiguration.TransactionLogger; CopyLogger = LoggingConfiguration.CopyLogger; - ClientCertificatesCallback = dataSource.ClientCertificatesCallback; - UserCertificateValidationCallback = dataSource.UserCertificateValidationCallback; + SslClientAuthenticationOptionsCallback = dataSource.SslClientAuthenticationOptionsCallback; #if NET7_0_OR_GREATER NegotiateOptionsCallback = dataSource.Configuration.NegotiateOptionsCallback; @@ -777,7 +801,7 @@ async Task RawOpen(SslMode sslMode, NpgsqlTimeout timeout, bool async, Cancellat throw new NpgsqlException("SSL connection requested. No SSL enabled connection from this host is configured."); break; case 'S': - await DataSource.TransportSecurityHandler.NegotiateEncryption(async, this, sslMode, timeout).ConfigureAwait(false); + await DataSource.TransportSecurityHandler.NegotiateEncryption(async, this, sslMode, timeout, cancellationToken).ConfigureAwait(false); break; } @@ -802,7 +826,7 @@ async Task RawOpen(SslMode sslMode, NpgsqlTimeout timeout, bool async, Cancellat } } - internal async Task NegotiateEncryption(SslMode sslMode, NpgsqlTimeout timeout, bool async) + internal async Task NegotiateEncryption(SslMode sslMode, NpgsqlTimeout timeout, bool async, CancellationToken cancellationToken) { var clientCertificates = new X509Certificate2Collection(); var certPath = Settings.SslCertificate ?? PostgresEnvironment.SslCert ?? PostgresEnvironment.SslCertDefault; @@ -812,7 +836,7 @@ internal async Task NegotiateEncryption(SslMode sslMode, NpgsqlTimeout timeout, var password = Settings.SslPassword; X509Certificate2? cert = null; - if (Path.GetExtension(certPath).ToUpperInvariant() != ".PFX") + if (!string.Equals(Path.GetExtension(certPath), ".pfx", StringComparison.OrdinalIgnoreCase)) { // It's PEM time var keyPath = Settings.SslKey ?? PostgresEnvironment.SslKey ?? PostgresEnvironment.SslKeyDefault; @@ -836,28 +860,13 @@ internal async Task NegotiateEncryption(SslMode sslMode, NpgsqlTimeout timeout, try { - ClientCertificatesCallback?.Invoke(clientCertificates); - var checkCertificateRevocation = Settings.CheckCertificateRevocation; RemoteCertificateValidationCallback? certificateValidationCallback; X509Certificate2? caCert; string? certRootPath = null; - if (UserCertificateValidationCallback is not null) - { - if (sslMode is SslMode.VerifyCA or SslMode.VerifyFull) - throw new ArgumentException(string.Format(NpgsqlStrings.CannotUseSslVerifyWithUserCallback, sslMode)); - - if (Settings.RootCertificate is not null) - throw new ArgumentException(NpgsqlStrings.CannotUseSslRootCertificateWithUserCallback); - - if (DataSource.TransportSecurityHandler.RootCertificateCallback is not null) - throw new ArgumentException(NpgsqlStrings.CannotUseValidationRootCertificateCallbackWithUserCallback); - - certificateValidationCallback = UserCertificateValidationCallback; - } - else if (sslMode is SslMode.Prefer or SslMode.Require) + if (sslMode is SslMode.Prefer or SslMode.Require) { certificateValidationCallback = SslTrustServerValidation; checkCertificateRevocation = false; @@ -892,19 +901,48 @@ internal async Task NegotiateEncryption(SslMode sslMode, NpgsqlTimeout timeout, timeout.CheckAndApply(this); - try + var sslStream = new SslStream(_stream, leaveInnerStreamOpen: false); + + var sslStreamOptions = new SslClientAuthenticationOptions + { + TargetHost = host, + ClientCertificates = clientCertificates, + EnabledSslProtocols = SslProtocols.None, + CertificateRevocationCheckMode = checkCertificateRevocation ? X509RevocationMode.Online : X509RevocationMode.Offline, + RemoteCertificateValidationCallback = certificateValidationCallback + }; + + if (SslClientAuthenticationOptionsCallback is not null) { - var sslStream = new SslStream(_stream, leaveInnerStreamOpen: false, certificateValidationCallback); + SslClientAuthenticationOptionsCallback.Invoke(sslStreamOptions); + // User changed remote certificate validation callback + // Check whether the change doesn't lead to unexpected behavior + if (sslStreamOptions.RemoteCertificateValidationCallback != certificateValidationCallback) + { + if (sslMode is SslMode.VerifyCA or SslMode.VerifyFull) + throw new ArgumentException(string.Format(NpgsqlStrings.CannotUseSslVerifyWithCustomValidationCallback, sslMode)); + + if (Settings.RootCertificate is not null) + throw new ArgumentException(NpgsqlStrings.CannotUseSslRootCertificateWithCustomValidationCallback); + + if (DataSource.TransportSecurityHandler.RootCertificateCallback is not null) + throw new ArgumentException(NpgsqlStrings.CannotUseValidationRootCertificateCallbackWithCustomValidationCallback); + } + } + + try + { if (async) - await sslStream.AuthenticateAsClientAsync(host, clientCertificates, SslProtocols.None, checkCertificateRevocation).ConfigureAwait(false); + await sslStream.AuthenticateAsClientAsync(sslStreamOptions, cancellationToken).ConfigureAwait(false); else - sslStream.AuthenticateAsClient(host, clientCertificates, SslProtocols.None, checkCertificateRevocation); + sslStream.AuthenticateAsClient(sslStreamOptions); _stream = sslStream; } catch (Exception e) { + sslStream.Dispose(); throw new NpgsqlException("Exception while performing SSL handshake", e); } diff --git a/src/Npgsql/Internal/TransportSecurityHandler.cs b/src/Npgsql/Internal/TransportSecurityHandler.cs index ecb447c6da..9945e80534 100644 --- a/src/Npgsql/Internal/TransportSecurityHandler.cs +++ b/src/Npgsql/Internal/TransportSecurityHandler.cs @@ -1,5 +1,6 @@ using System; using System.Security.Cryptography.X509Certificates; +using System.Threading; using System.Threading.Tasks; using Npgsql.Properties; using Npgsql.Util; @@ -16,7 +17,7 @@ public virtual Func? RootCertificateCallback set => throw new NotSupportedException(string.Format(NpgsqlStrings.TransportSecurityDisabled, nameof(NpgsqlSlimDataSourceBuilder.EnableTransportSecurity))); } - public virtual Task NegotiateEncryption(bool async, NpgsqlConnector connector, SslMode sslMode, NpgsqlTimeout timeout) + public virtual Task NegotiateEncryption(bool async, NpgsqlConnector connector, SslMode sslMode, NpgsqlTimeout timeout, CancellationToken cancellationToken) => throw new NotSupportedException(string.Format(NpgsqlStrings.TransportSecurityDisabled, nameof(NpgsqlSlimDataSourceBuilder.EnableTransportSecurity))); public virtual void AuthenticateSASLSha256Plus(NpgsqlConnector connector, ref string mechanism, ref string cbindFlag, ref string cbind, @@ -30,8 +31,8 @@ sealed class RealTransportSecurityHandler : TransportSecurityHandler public override Func? RootCertificateCallback { get; set; } - public override Task NegotiateEncryption(bool async, NpgsqlConnector connector, SslMode sslMode, NpgsqlTimeout timeout) - => connector.NegotiateEncryption(sslMode, timeout, async); + public override Task NegotiateEncryption(bool async, NpgsqlConnector connector, SslMode sslMode, NpgsqlTimeout timeout, CancellationToken cancellationToken) + => connector.NegotiateEncryption(sslMode, timeout, async, cancellationToken); public override void AuthenticateSASLSha256Plus(NpgsqlConnector connector, ref string mechanism, ref string cbindFlag, ref string cbind, ref bool successfulBind) diff --git a/src/Npgsql/NpgsqlConnection.cs b/src/Npgsql/NpgsqlConnection.cs index da8262636c..d3b7d8f636 100644 --- a/src/Npgsql/NpgsqlConnection.cs +++ b/src/Npgsql/NpgsqlConnection.cs @@ -1017,6 +1017,7 @@ internal void OnNotification(NpgsqlNotificationEventArgs e) /// /// See /// + [Obsolete("Use UseSslClientAuthenticationOptionsCallback")] public ProvideClientCertificatesCallback? ProvideClientCertificatesCallback { get; set; } /// @@ -1032,8 +1033,19 @@ internal void OnNotification(NpgsqlNotificationEventArgs e) /// See . /// /// + [Obsolete("Use UseSslClientAuthenticationOptionsCallback")] public RemoteCertificateValidationCallback? UserCertificateValidationCallback { get; set; } + /// + /// When using SSL/TLS, this is a callback that allows customizing SslStream's authentication options. + /// + /// + /// + /// See . + /// + /// + public Action? SslClientAuthenticationOptionsCallback { get; set; } + #endregion SSL #region Backend version, capabilities, settings @@ -1747,9 +1759,10 @@ object ICloneable.Clone() ? _cloningInstantiator!(_connectionString) : _dataSource.CreateConnection(); + conn.SslClientAuthenticationOptionsCallback = SslClientAuthenticationOptionsCallback; +#pragma warning disable CS0618 // Obsolete conn.ProvideClientCertificatesCallback = ProvideClientCertificatesCallback; conn.UserCertificateValidationCallback = UserCertificateValidationCallback; -#pragma warning disable CS0618 // Obsolete conn.ProvidePasswordCallback = ProvidePasswordCallback; #pragma warning restore CS0618 conn._userFacingConnectionString = _userFacingConnectionString; @@ -1773,13 +1786,10 @@ public NpgsqlConnection CloneWith(string connectionString) return new NpgsqlConnection(csb.ToString()) { - ProvideClientCertificatesCallback = - ProvideClientCertificatesCallback ?? - (_dataSource?.ClientCertificatesCallback is { } clientCertificatesCallback - ? (ProvideClientCertificatesCallback)(certs => clientCertificatesCallback(certs)) - : null), - UserCertificateValidationCallback = UserCertificateValidationCallback ?? _dataSource?.UserCertificateValidationCallback, + SslClientAuthenticationOptionsCallback = SslClientAuthenticationOptionsCallback ?? _dataSource?.SslClientAuthenticationOptionsCallback, #pragma warning disable CS0618 // Obsolete + ProvideClientCertificatesCallback = ProvideClientCertificatesCallback, + UserCertificateValidationCallback = UserCertificateValidationCallback, ProvidePasswordCallback = ProvidePasswordCallback, #pragma warning restore CS0618 }; diff --git a/src/Npgsql/NpgsqlDataSource.cs b/src/Npgsql/NpgsqlDataSource.cs index 67b8c154ac..2455194716 100644 --- a/src/Npgsql/NpgsqlDataSource.cs +++ b/src/Npgsql/NpgsqlDataSource.cs @@ -40,8 +40,8 @@ public abstract class NpgsqlDataSource : DbDataSource internal NpgsqlDatabaseInfo DatabaseInfo { get; private set; } = null!; // Initialized at bootstrapping internal TransportSecurityHandler TransportSecurityHandler { get; } - internal RemoteCertificateValidationCallback? UserCertificateValidationCallback { get; } - internal Action? ClientCertificatesCallback { get; } + + internal Action? SslClientAuthenticationOptionsCallback { get; } readonly Func? _passwordProvider; readonly Func>? _passwordProviderAsync; @@ -98,8 +98,7 @@ internal NpgsqlDataSource( LoggingConfiguration, TransportSecurityHandler, IntegratedSecurityHandler, - UserCertificateValidationCallback, - ClientCertificatesCallback, + SslClientAuthenticationOptionsCallback, _passwordProvider, _passwordProviderAsync, _periodicPasswordProvider, diff --git a/src/Npgsql/NpgsqlDataSourceBuilder.cs b/src/Npgsql/NpgsqlDataSourceBuilder.cs index 73e13a11f9..8274f6669a 100644 --- a/src/Npgsql/NpgsqlDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlDataSourceBuilder.cs @@ -194,6 +194,7 @@ public NpgsqlDataSourceBuilder EnableUnmappedTypes() /// /// /// The same builder instance so that multiple calls can be chained. + [Obsolete("Use UseSslClientAuthenticationOptionsCallback")] public NpgsqlDataSourceBuilder UseUserCertificateValidationCallback(RemoteCertificateValidationCallback userCertificateValidationCallback) { _internalBuilder.UseUserCertificateValidationCallback(userCertificateValidationCallback); @@ -205,6 +206,7 @@ public NpgsqlDataSourceBuilder UseUserCertificateValidationCallback(RemoteCertif /// /// The client certificate to be sent to PostgreSQL when opening a connection. /// The same builder instance so that multiple calls can be chained. + [Obsolete("Use UseSslClientAuthenticationOptionsCallback")] public NpgsqlDataSourceBuilder UseClientCertificate(X509Certificate? clientCertificate) { _internalBuilder.UseClientCertificate(clientCertificate); @@ -216,12 +218,29 @@ public NpgsqlDataSourceBuilder UseClientCertificate(X509Certificate? clientCerti /// /// The client certificate collection to be sent to PostgreSQL when opening a connection. /// The same builder instance so that multiple calls can be chained. + [Obsolete("Use UseSslClientAuthenticationOptionsCallback")] public NpgsqlDataSourceBuilder UseClientCertificates(X509CertificateCollection? clientCertificates) { _internalBuilder.UseClientCertificates(clientCertificates); return this; } + /// + /// When using SSL/TLS, this is a callback that allows customizing SslStream's authentication options. + /// + /// The callback to customize SslStream's authentication options. + /// + /// + /// See . + /// + /// + /// The same builder instance so that multiple calls can be chained. + public NpgsqlDataSourceBuilder UseSslClientAuthenticationOptionsCallback(Action? sslClientAuthenticationOptionsCallback) + { + _internalBuilder.UseSslClientAuthenticationOptionsCallback(sslClientAuthenticationOptionsCallback); + return this; + } + /// /// Specifies a callback to modify the collection of SSL/TLS client certificates which Npgsql will send to PostgreSQL for /// certificate-based authentication. This is an advanced API, consider using or @@ -239,6 +258,7 @@ public NpgsqlDataSourceBuilder UseClientCertificates(X509CertificateCollection? /// /// /// The same builder instance so that multiple calls can be chained. + [Obsolete("Use UseSslClientAuthenticationOptionsCallback")] public NpgsqlDataSourceBuilder UseClientCertificatesCallback(Action? clientCertificatesCallback) { _internalBuilder.UseClientCertificatesCallback(clientCertificatesCallback); diff --git a/src/Npgsql/NpgsqlDataSourceConfiguration.cs b/src/Npgsql/NpgsqlDataSourceConfiguration.cs index 83539bd17b..64acebf136 100644 --- a/src/Npgsql/NpgsqlDataSourceConfiguration.cs +++ b/src/Npgsql/NpgsqlDataSourceConfiguration.cs @@ -11,8 +11,7 @@ sealed record NpgsqlDataSourceConfiguration(string? Name, NpgsqlLoggingConfiguration LoggingConfiguration, TransportSecurityHandler TransportSecurityHandler, IntegratedSecurityHandler userCertificateValidationCallback, - RemoteCertificateValidationCallback? UserCertificateValidationCallback, - Action? ClientCertificatesCallback, + Action? SslClientAuthenticationOptionsCallback, Func? PasswordProvider, Func>? PasswordProviderAsync, Func>? PeriodicPasswordProvider, diff --git a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs index 3ee4cd80dc..cd8c556fdb 100644 --- a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs @@ -33,6 +33,7 @@ public sealed class NpgsqlSlimDataSourceBuilder : INpgsqlTypeMapper TransportSecurityHandler _transportSecurityHandler = new(); RemoteCertificateValidationCallback? _userCertificateValidationCallback; Action? _clientCertificatesCallback; + Action? _sslClientAuthenticationOptionsCallback; #if NET7_0_OR_GREATER Action? _negotiateOptionsCallback; @@ -143,6 +144,7 @@ public NpgsqlSlimDataSourceBuilder ConfigureJsonOptions(JsonSerializerOptions se /// /// /// The same builder instance so that multiple calls can be chained. + [Obsolete("Use UseSslClientAuthenticationOptionsCallback")] public NpgsqlSlimDataSourceBuilder UseUserCertificateValidationCallback( RemoteCertificateValidationCallback userCertificateValidationCallback) { @@ -156,6 +158,7 @@ public NpgsqlSlimDataSourceBuilder UseUserCertificateValidationCallback( /// /// The client certificate to be sent to PostgreSQL when opening a connection. /// The same builder instance so that multiple calls can be chained. + [Obsolete("Use UseSslClientAuthenticationOptionsCallback")] public NpgsqlSlimDataSourceBuilder UseClientCertificate(X509Certificate? clientCertificate) { if (clientCertificate is null) @@ -170,9 +173,27 @@ public NpgsqlSlimDataSourceBuilder UseClientCertificate(X509Certificate? clientC /// /// The client certificate collection to be sent to PostgreSQL when opening a connection. /// The same builder instance so that multiple calls can be chained. + [Obsolete("Use UseSslClientAuthenticationOptionsCallback")] public NpgsqlSlimDataSourceBuilder UseClientCertificates(X509CertificateCollection? clientCertificates) => UseClientCertificatesCallback(clientCertificates is null ? null : certs => certs.AddRange(clientCertificates)); + /// + /// When using SSL/TLS, this is a callback that allows customizing SslStream's authentication options. + /// + /// The callback to customize SslStream's authentication options. + /// + /// + /// See . + /// + /// + /// The same builder instance so that multiple calls can be chained. + public NpgsqlSlimDataSourceBuilder UseSslClientAuthenticationOptionsCallback(Action? sslClientAuthenticationOptionsCallback) + { + _sslClientAuthenticationOptionsCallback = sslClientAuthenticationOptionsCallback; + + return this; + } + /// /// Specifies a callback to modify the collection of SSL/TLS client certificates which Npgsql will send to PostgreSQL for /// certificate-based authentication. This is an advanced API, consider using or @@ -190,6 +211,7 @@ public NpgsqlSlimDataSourceBuilder UseClientCertificates(X509CertificateCollecti /// /// /// The same builder instance so that multiple calls can be chained. + [Obsolete("Use UseSslClientAuthenticationOptionsCallback")] public NpgsqlSlimDataSourceBuilder UseClientCertificatesCallback(Action? clientCertificatesCallback) { _clientCertificatesCallback = clientCertificatesCallback; @@ -655,7 +677,31 @@ NpgsqlDataSourceConfiguration PrepareConfiguration() { ConnectionStringBuilder.PostProcessAndValidate(); - if (!_transportSecurityHandler.SupportEncryption && (_userCertificateValidationCallback is not null || _clientCertificatesCallback is not null)) + var sslClientAuthenticationOptionsCallback = _sslClientAuthenticationOptionsCallback; + var hasCertificateCallbacks = _userCertificateValidationCallback is not null || _clientCertificatesCallback is not null; + if (sslClientAuthenticationOptionsCallback is not null && hasCertificateCallbacks) + { + throw new NotSupportedException(NpgsqlStrings.SslClientAuthenticationOptionsCallbackWithOtherCallbacksNotSupported); + } + + if (sslClientAuthenticationOptionsCallback is null && hasCertificateCallbacks) + { + sslClientAuthenticationOptionsCallback = options => + { + if (_clientCertificatesCallback is not null) + { + options.ClientCertificates ??= new X509Certificate2Collection(); + _clientCertificatesCallback.Invoke(options.ClientCertificates); + } + + if (_userCertificateValidationCallback is not null) + { + options.RemoteCertificateValidationCallback = _userCertificateValidationCallback; + } + }; + } + + if (!_transportSecurityHandler.SupportEncryption && sslClientAuthenticationOptionsCallback is not null) { throw new InvalidOperationException(NpgsqlStrings.TransportSecurityDisabled); } @@ -680,8 +726,7 @@ _loggerFactory is null : new NpgsqlLoggingConfiguration(_loggerFactory, _sensitiveDataLoggingEnabled), _transportSecurityHandler, _integratedSecurityHandler, - _userCertificateValidationCallback, - _clientCertificatesCallback, + sslClientAuthenticationOptionsCallback, _passwordProvider, _passwordProviderAsync, _periodicPasswordProvider, diff --git a/src/Npgsql/Properties/NpgsqlStrings.Designer.cs b/src/Npgsql/Properties/NpgsqlStrings.Designer.cs index f00370da48..7f6fca99cd 100644 --- a/src/Npgsql/Properties/NpgsqlStrings.Designer.cs +++ b/src/Npgsql/Properties/NpgsqlStrings.Designer.cs @@ -11,32 +11,46 @@ namespace Npgsql.Properties { using System; - [System.CodeDom.Compiler.GeneratedCodeAttribute("System.Resources.Tools.StronglyTypedResourceBuilder", "4.0.0.0")] - [System.Diagnostics.DebuggerNonUserCodeAttribute()] - [System.Runtime.CompilerServices.CompilerGeneratedAttribute()] + /// + /// A strongly-typed resource class, for looking up localized strings, etc. + /// + // This class was auto-generated by the StronglyTypedResourceBuilder + // class via a tool like ResGen or Visual Studio. + // To add or remove a member, edit your .ResX file then rerun ResGen + // with the /str option, or rebuild your VS project. + [global::System.CodeDom.Compiler.GeneratedCodeAttribute("System.Resources.Tools.StronglyTypedResourceBuilder", "4.0.0.0")] + [global::System.Diagnostics.DebuggerNonUserCodeAttribute()] + [global::System.Runtime.CompilerServices.CompilerGeneratedAttribute()] internal class NpgsqlStrings { - private static System.Resources.ResourceManager resourceMan; + private static global::System.Resources.ResourceManager resourceMan; - private static System.Globalization.CultureInfo resourceCulture; + private static global::System.Globalization.CultureInfo resourceCulture; - [System.Diagnostics.CodeAnalysis.SuppressMessageAttribute("Microsoft.Performance", "CA1811:AvoidUncalledPrivateCode")] + [global::System.Diagnostics.CodeAnalysis.SuppressMessageAttribute("Microsoft.Performance", "CA1811:AvoidUncalledPrivateCode")] internal NpgsqlStrings() { } - [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Advanced)] - internal static System.Resources.ResourceManager ResourceManager { + /// + /// Returns the cached ResourceManager instance used by this class. + /// + [global::System.ComponentModel.EditorBrowsableAttribute(global::System.ComponentModel.EditorBrowsableState.Advanced)] + internal static global::System.Resources.ResourceManager ResourceManager { get { - if (object.Equals(null, resourceMan)) { - System.Resources.ResourceManager temp = new System.Resources.ResourceManager("Npgsql.Properties.NpgsqlStrings", typeof(NpgsqlStrings).Assembly); + if (object.ReferenceEquals(resourceMan, null)) { + global::System.Resources.ResourceManager temp = new global::System.Resources.ResourceManager("Npgsql.Properties.NpgsqlStrings", typeof(NpgsqlStrings).Assembly); resourceMan = temp; } return resourceMan; } } - [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Advanced)] - internal static System.Globalization.CultureInfo Culture { + /// + /// Overrides the current thread's CurrentUICulture property for all + /// resource lookups using this strongly typed resource class. + /// + [global::System.ComponentModel.EditorBrowsableAttribute(global::System.ComponentModel.EditorBrowsableState.Advanced)] + internal static global::System.Globalization.CultureInfo Culture { get { return resourceCulture; } @@ -45,174 +59,272 @@ internal static System.Globalization.CultureInfo Culture { } } - internal static string CannotUseSslVerifyWithUserCallback { + /// + /// Looks up a localized string similar to '{0}' must be positive.. + /// + internal static string ArgumentMustBePositive { get { - return ResourceManager.GetString("CannotUseSslVerifyWithUserCallback", resourceCulture); + return ResourceManager.GetString("ArgumentMustBePositive", resourceCulture); } } - internal static string CannotUseSslRootCertificateWithUserCallback { + /// + /// Looks up a localized string similar to Arrays aren't enabled; please call {0} on {1} to enable arrays.. + /// + internal static string ArraysNotEnabled { get { - return ResourceManager.GetString("CannotUseSslRootCertificateWithUserCallback", resourceCulture); + return ResourceManager.GetString("ArraysNotEnabled", resourceCulture); } } - internal static string TransportSecurityDisabled { + /// + /// Looks up a localized string similar to Cannot read infinity value since Npgsql.DisableDateTimeInfinityConversions is enabled.. + /// + internal static string CannotReadInfinityValue { get { - return ResourceManager.GetString("TransportSecurityDisabled", resourceCulture); + return ResourceManager.GetString("CannotReadInfinityValue", resourceCulture); } } - internal static string IntegratedSecurityDisabled { + /// + /// Looks up a localized string similar to Cannot read interval values with non-zero months as TimeSpan, since that type doesn't support months. Consider using NodaTime Period which better corresponds to PostgreSQL interval, or read the value as NpgsqlInterval, or transform the interval to not contain months or years in PostgreSQL before reading it.. + /// + internal static string CannotReadIntervalWithMonthsAsTimeSpan { get { - return ResourceManager.GetString("IntegratedSecurityDisabled", resourceCulture); + return ResourceManager.GetString("CannotReadIntervalWithMonthsAsTimeSpan", resourceCulture); } } - internal static string NoMultirangeTypeFound { + /// + /// Looks up a localized string similar to When registering a password provider, a password or password file may not be set.. + /// + internal static string CannotSetBothPasswordProviderAndPassword { get { - return ResourceManager.GetString("NoMultirangeTypeFound", resourceCulture); + return ResourceManager.GetString("CannotSetBothPasswordProviderAndPassword", resourceCulture); } } - internal static string NotSupportedOnDataSourceCommand { + /// + /// Looks up a localized string similar to Multiple kinds of password providers were found, only one kind may be configured per DbDataSource.. + /// + internal static string CannotSetMultiplePasswordProviderKinds { get { - return ResourceManager.GetString("NotSupportedOnDataSourceCommand", resourceCulture); + return ResourceManager.GetString("CannotSetMultiplePasswordProviderKinds", resourceCulture); } } - internal static string NotSupportedOnDataSourceBatch { + /// + /// Looks up a localized string similar to When creating a multi-host data source, TargetSessionAttributes cannot be specified. Create without TargetSessionAttributes, and then obtain DataSource wrappers from it. Consult the docs for more information.. + /// + internal static string CannotSpecifyTargetSessionAttributes { get { - return ResourceManager.GetString("NotSupportedOnDataSourceBatch", resourceCulture); + return ResourceManager.GetString("CannotSpecifyTargetSessionAttributes", resourceCulture); } } - internal static string CannotSetBothPasswordProviderAndPassword { + /// + /// Looks up a localized string similar to RootCertificate cannot be used in conjunction with SslClientAuthenticationOptionsCallback overwriting RemoteCertificateValidationCallback; when registering a validation callback, perform whatever validation you require in that callback.. + /// + internal static string CannotUseSslRootCertificateWithCustomValidationCallback { get { - return ResourceManager.GetString("CannotSetBothPasswordProviderAndPassword", resourceCulture); + return ResourceManager.GetString("CannotUseSslRootCertificateWithCustomValidationCallback", resourceCulture); } } - internal static string CannotSetMultiplePasswordProviderKinds { + /// + /// Looks up a localized string similar to SslMode.{0} cannot be used in conjunction with SslClientAuthenticationOptionsCallback overwriting RemoteCertificateValidationCallback; when registering a validation callback, perform whatever validation you require in that callback.. + /// + internal static string CannotUseSslVerifyWithCustomValidationCallback { get { - return ResourceManager.GetString("CannotSetMultiplePasswordProviderKinds", resourceCulture); + return ResourceManager.GetString("CannotUseSslVerifyWithCustomValidationCallback", resourceCulture); } } - internal static string SyncAndAsyncPasswordProvidersRequired { + /// + /// Looks up a localized string similar to ValidationRootCertificateCallback cannot be used in conjunction with SslClientAuthenticationOptionsCallback overwriting RemoteCertificateValidationCallback; when registering a validation callback, perform whatever validation you require in that callback.. + /// + internal static string CannotUseValidationRootCertificateCallbackWithCustomValidationCallback { get { - return ResourceManager.GetString("SyncAndAsyncPasswordProvidersRequired", resourceCulture); + return ResourceManager.GetString("CannotUseValidationRootCertificateCallbackWithCustomValidationCallback", resourceCulture); } } - internal static string PasswordProviderMissing { + /// + /// Looks up a localized string similar to Type '{0}' required dynamic JSON serialization, which requires an explicit opt-in; call '{1}' on '{2}' or NpgsqlConnection.GlobalTypeMapper (see https://www.npgsql.org/doc/types/json.html and the 8.0 release notes for more details). Alternatively, if you meant to use Newtonsoft JSON.NET instead of System.Text.Json, call UseJsonNet() instead. + ///. + /// + internal static string DynamicJsonNotEnabled { get { - return ResourceManager.GetString("PasswordProviderMissing", resourceCulture); + return ResourceManager.GetString("DynamicJsonNotEnabled", resourceCulture); } } - internal static string ArgumentMustBePositive { + /// + /// Looks up a localized string similar to Full-text search isn't enabled; please call {0} on {1} to enable full-text search.. + /// + internal static string FullTextSearchNotEnabled { get { - return ResourceManager.GetString("ArgumentMustBePositive", resourceCulture); + return ResourceManager.GetString("FullTextSearchNotEnabled", resourceCulture); } } - internal static string CannotSpecifyTargetSessionAttributes { + /// + /// Looks up a localized string similar to Integrated security hasn't been enabled; please call {0} on NpgsqlSlimDataSourceBuilder to enable it.. + /// + internal static string IntegratedSecurityDisabled { get { - return ResourceManager.GetString("CannotSpecifyTargetSessionAttributes", resourceCulture); + return ResourceManager.GetString("IntegratedSecurityDisabled", resourceCulture); } } - internal static string CannotReadIntervalWithMonthsAsTimeSpan { + /// + /// Looks up a localized string similar to Ltree isn't enabled; please call {0} on {1} to enable LTree.. + /// + internal static string LTreeNotEnabled { get { - return ResourceManager.GetString("CannotReadIntervalWithMonthsAsTimeSpan", resourceCulture); + return ResourceManager.GetString("LTreeNotEnabled", resourceCulture); } } - internal static string PositionalParameterAfterNamed { + /// + /// Looks up a localized string similar to Multiranges aren't enabled; please call {0} on {1} to enable multiranges.. + /// + internal static string MultirangesNotEnabled { get { - return ResourceManager.GetString("PositionalParameterAfterNamed", resourceCulture); + return ResourceManager.GetString("MultirangesNotEnabled", resourceCulture); } } - internal static string CannotReadInfinityValue { + /// + /// Looks up a localized string similar to No multirange type could be found in the database for subtype {0}.. + /// + internal static string NoMultirangeTypeFound { get { - return ResourceManager.GetString("CannotReadInfinityValue", resourceCulture); + return ResourceManager.GetString("NoMultirangeTypeFound", resourceCulture); } } - internal static string SyncAndAsyncConnectionInitializersRequired { + /// + /// Looks up a localized string similar to Connection and transaction access is not supported on batches created from DbDataSource.. + /// + internal static string NotSupportedOnDataSourceBatch { get { - return ResourceManager.GetString("SyncAndAsyncConnectionInitializersRequired", resourceCulture); + return ResourceManager.GetString("NotSupportedOnDataSourceBatch", resourceCulture); } } - internal static string CannotUseValidationRootCertificateCallbackWithUserCallback { + /// + /// Looks up a localized string similar to Connection and transaction access is not supported on commands created from DbDataSource.. + /// + internal static string NotSupportedOnDataSourceCommand { get { - return ResourceManager.GetString("CannotUseValidationRootCertificateCallbackWithUserCallback", resourceCulture); + return ResourceManager.GetString("NotSupportedOnDataSourceCommand", resourceCulture); } } - internal static string RecordsNotEnabled { + /// + /// Looks up a localized string similar to The right type of password provider (sync or async) was not found.. + /// + internal static string PasswordProviderMissing { get { - return ResourceManager.GetString("RecordsNotEnabled", resourceCulture); + return ResourceManager.GetString("PasswordProviderMissing", resourceCulture); } } - internal static string FullTextSearchNotEnabled { + /// + /// Looks up a localized string similar to When using CommandType.StoredProcedure, all positional parameters must come before named parameters.. + /// + internal static string PositionalParameterAfterNamed { get { - return ResourceManager.GetString("FullTextSearchNotEnabled", resourceCulture); + return ResourceManager.GetString("PositionalParameterAfterNamed", resourceCulture); } } - internal static string LTreeNotEnabled { + /// + /// Looks up a localized string similar to Ranges aren't enabled; please call {0} on {1} to enable ranges.. + /// + internal static string RangesNotEnabled { get { - return ResourceManager.GetString("LTreeNotEnabled", resourceCulture); + return ResourceManager.GetString("RangesNotEnabled", resourceCulture); } } - internal static string RangesNotEnabled { + /// + /// Looks up a localized string similar to Could not read a PostgreSQL record. If you're attempting to read a record as a .NET tuple, call '{0}' on '{1}' or NpgsqlConnection.GlobalTypeMapper (see https://www.npgsql.org/doc/types/basic.html and the 8.0 release notes for more details). If you're reading a record as a .NET object array using NpgsqlSlimDataSourceBuilder, call '{2}'. + ///. + /// + internal static string RecordsNotEnabled { get { - return ResourceManager.GetString("RangesNotEnabled", resourceCulture); + return ResourceManager.GetString("RecordsNotEnabled", resourceCulture); } } - internal static string MultirangesNotEnabled { + /// + /// Looks up a localized string similar to SslClientAuthenticationOptionsCallback is not supported together with UserCertificateValidationCallback and ClientCertificatesCallback. + /// + internal static string SslClientAuthenticationOptionsCallbackWithOtherCallbacksNotSupported { get { - return ResourceManager.GetString("MultirangesNotEnabled", resourceCulture); + return ResourceManager.GetString("SslClientAuthenticationOptionsCallbackWithOtherCallbacksNotSupported", resourceCulture); } } - internal static string ArraysNotEnabled { + /// + /// Looks up a localized string similar to Both sync and async connection initializers must be provided.. + /// + internal static string SyncAndAsyncConnectionInitializersRequired { get { - return ResourceManager.GetString("ArraysNotEnabled", resourceCulture); + return ResourceManager.GetString("SyncAndAsyncConnectionInitializersRequired", resourceCulture); } } - internal static string TimestampTzNoDateTimeUnspecified { + /// + /// Looks up a localized string similar to Both sync and async password providers must be provided.. + /// + internal static string SyncAndAsyncPasswordProvidersRequired { get { - return ResourceManager.GetString("TimestampTzNoDateTimeUnspecified", resourceCulture); + return ResourceManager.GetString("SyncAndAsyncPasswordProvidersRequired", resourceCulture); } } + /// + /// Looks up a localized string similar to Cannot write DateTime with Kind=UTC to PostgreSQL type '{0}', consider using '{1}'. Note that it's not possible to mix DateTimes with different Kinds in an array, range, or multirange.. + /// internal static string TimestampNoDateTimeUtc { get { return ResourceManager.GetString("TimestampNoDateTimeUtc", resourceCulture); } } - internal static string DynamicJsonNotEnabled { + /// + /// Looks up a localized string similar to Cannot write DateTime with Kind={0} to PostgreSQL type '{1}', only UTC is supported. Note that it's not possible to mix DateTimes with different Kinds in an array, range, or multirange.. + /// + internal static string TimestampTzNoDateTimeUnspecified { get { - return ResourceManager.GetString("DynamicJsonNotEnabled", resourceCulture); + return ResourceManager.GetString("TimestampTzNoDateTimeUnspecified", resourceCulture); + } + } + + /// + /// Looks up a localized string similar to Transport security hasn't been enabled; please call {0} on NpgsqlSlimDataSourceBuilder to enable it.. + /// + internal static string TransportSecurityDisabled { + get { + return ResourceManager.GetString("TransportSecurityDisabled", resourceCulture); } } + /// + /// Looks up a localized string similar to Reading and writing unmapped enums requires an explicit opt-in; call '{0}' on '{1}' or NpgsqlConnection.GlobalTypeMapper (see https://www.npgsql.org/doc/types/enums_and_composites.html and the 8.0 release notes for more details).. + /// internal static string UnmappedEnumsNotEnabled { get { return ResourceManager.GetString("UnmappedEnumsNotEnabled", resourceCulture); } } + /// + /// Looks up a localized string similar to Reading and writing unmapped ranges and multiranges requires an explicit opt-in; call '{0}' on '{1}' or NpgsqlConnection.GlobalTypeMapper (see https://www.npgsql.org/doc/types/ranges.html and the 8.0 release notes for more details).. + /// internal static string UnmappedRangesNotEnabled { get { return ResourceManager.GetString("UnmappedRangesNotEnabled", resourceCulture); diff --git a/src/Npgsql/Properties/NpgsqlStrings.resx b/src/Npgsql/Properties/NpgsqlStrings.resx index 5dbc58acdf..f523cf6eb2 100644 --- a/src/Npgsql/Properties/NpgsqlStrings.resx +++ b/src/Npgsql/Properties/NpgsqlStrings.resx @@ -18,11 +18,11 @@ System.Resources.ResXResourceWriter, System.Windows.Forms, Version=2.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089 - - SslMode.{0} cannot be used in conjunction with UserCertificateValidationCallback; when registering a validation callback, perform whatever validation you require in that callback. + + SslMode.{0} cannot be used in conjunction with SslClientAuthenticationOptionsCallback overwriting RemoteCertificateValidationCallback; when registering a validation callback, perform whatever validation you require in that callback. - - RootCertificate cannot be used in conjunction with UserCertificateValidationCallback; when registering a validation callback, perform whatever validation you require in that callback. + + RootCertificate cannot be used in conjunction with SslClientAuthenticationOptionsCallback overwriting RemoteCertificateValidationCallback; when registering a validation callback, perform whatever validation you require in that callback. Transport security hasn't been enabled; please call {0} on NpgsqlSlimDataSourceBuilder to enable it. @@ -69,8 +69,8 @@ Both sync and async connection initializers must be provided. - - ValidationRootCertificateCallback cannot be used in conjunction with UserCertificateValidationCallback; when registering a validation callback, perform whatever validation you require in that callback. + + ValidationRootCertificateCallback cannot be used in conjunction with SslClientAuthenticationOptionsCallback overwriting RemoteCertificateValidationCallback; when registering a validation callback, perform whatever validation you require in that callback. Could not read a PostgreSQL record. If you're attempting to read a record as a .NET tuple, call '{0}' on '{1}' or NpgsqlConnection.GlobalTypeMapper (see https://www.npgsql.org/doc/types/basic.html and the 8.0 release notes for more details). If you're reading a record as a .NET object array using NpgsqlSlimDataSourceBuilder, call '{2}'. @@ -107,4 +107,7 @@ Reading and writing unmapped ranges and multiranges requires an explicit opt-in; call '{0}' on '{1}' or NpgsqlConnection.GlobalTypeMapper (see https://www.npgsql.org/doc/types/ranges.html and the 8.0 release notes for more details). + + SslClientAuthenticationOptionsCallback is not supported together with UserCertificateValidationCallback and ClientCertificatesCallback + diff --git a/src/Npgsql/PublicAPI.Unshipped.txt b/src/Npgsql/PublicAPI.Unshipped.txt index 84b9e74092..a601f6b737 100644 --- a/src/Npgsql/PublicAPI.Unshipped.txt +++ b/src/Npgsql/PublicAPI.Unshipped.txt @@ -1,9 +1,13 @@ #nullable enable +Npgsql.NpgsqlConnection.SslClientAuthenticationOptionsCallback.get -> System.Action? +Npgsql.NpgsqlConnection.SslClientAuthenticationOptionsCallback.set -> void Npgsql.NpgsqlDataSourceBuilder.UseNegotiateOptionsCallback(System.Action? negotiateOptionsCallback) -> Npgsql.NpgsqlDataSourceBuilder! +Npgsql.NpgsqlDataSourceBuilder.UseSslClientAuthenticationOptionsCallback(System.Action? sslClientAuthenticationOptionsCallback) -> Npgsql.NpgsqlDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.EnableGeometricTypes() -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.EnableJsonTypes() -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.EnableNetworkTypes() -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.UseNegotiateOptionsCallback(System.Action? negotiateOptionsCallback) -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.UseSslClientAuthenticationOptionsCallback(System.Action? sslClientAuthenticationOptionsCallback) -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.Replication.PgOutput.ReplicationValue.GetFieldName() -> string! Npgsql.Replication.PgOutput.Messages.ParallelStreamAbortMessage Npgsql.Replication.PgOutput.Messages.ParallelStreamAbortMessage.AbortLsn.get -> NpgsqlTypes.NpgsqlLogSequenceNumber diff --git a/test/Npgsql.Tests/ConnectionTests.cs b/test/Npgsql.Tests/ConnectionTests.cs index d0fb7f827b..4b1eeeda8d 100644 --- a/test/Npgsql.Tests/ConnectionTests.cs +++ b/test/Npgsql.Tests/ConnectionTests.cs @@ -994,23 +994,27 @@ public async Task CloneWith_and_data_source_with_auth_callbacks() var (userCertificateValidationCallbackCalled, clientCertificatesCallbackCalled) = (false, false); var dataSourceBuilder = CreateDataSourceBuilder(); - dataSourceBuilder.UseUserCertificateValidationCallback(UserCertificateValidationCallback); - dataSourceBuilder.UseClientCertificatesCallback(ClientCertificatesCallback); + dataSourceBuilder.UseSslClientAuthenticationOptionsCallback(options => + { + ClientCertificatesCallback(options.ClientCertificates); + options.RemoteCertificateValidationCallback = UserCertificateValidationCallback; + }); await using var dataSource = dataSourceBuilder.Build(); await using var connection = dataSource.CreateConnection(); using var _ = CreateTempPool(ConnectionString, out var tempConnectionString); await using var clonedConnection = connection.CloneWith(tempConnectionString); - clonedConnection.UserCertificateValidationCallback!(null!, null, null, SslPolicyErrors.None); - Assert.True(userCertificateValidationCallbackCalled); - clonedConnection.ProvideClientCertificatesCallback!(null!); + var sslClientAuthenticationOptions = new SslClientAuthenticationOptions(); + clonedConnection.SslClientAuthenticationOptionsCallback!(sslClientAuthenticationOptions); Assert.True(clientCertificatesCallbackCalled); + sslClientAuthenticationOptions.RemoteCertificateValidationCallback!(null!, null, null, SslPolicyErrors.None); + Assert.True(userCertificateValidationCallbackCalled); bool UserCertificateValidationCallback(object sender, X509Certificate? certificate, X509Chain? chain, SslPolicyErrors errors) => userCertificateValidationCallbackCalled = true; - void ClientCertificatesCallback(X509CertificateCollection certs) + void ClientCertificatesCallback(X509CertificateCollection? certs) => clientCertificatesCallbackCalled = true; } @@ -1023,18 +1027,15 @@ public void Clone() { using var pool = CreateTempPool(ConnectionString, out var connectionString); using var conn = new NpgsqlConnection(connectionString); - ProvideClientCertificatesCallback callback1 = certificates => { }; - conn.ProvideClientCertificatesCallback = callback1; - RemoteCertificateValidationCallback callback2 = (sender, certificate, chain, errors) => true; - conn.UserCertificateValidationCallback = callback2; + Action callback = _ => { }; + conn.SslClientAuthenticationOptionsCallback = callback; conn.Open(); Assert.That(async () => await conn.ExecuteScalarAsync("SELECT 1"), Is.EqualTo(1)); using var conn2 = (NpgsqlConnection)((ICloneable)conn).Clone(); Assert.That(conn2.ConnectionString, Is.EqualTo(conn.ConnectionString)); - Assert.That(conn2.ProvideClientCertificatesCallback, Is.SameAs(callback1)); - Assert.That(conn2.UserCertificateValidationCallback, Is.SameAs(callback2)); + Assert.That(conn2.SslClientAuthenticationOptionsCallback, Is.SameAs(callback)); conn2.Open(); Assert.That(async () => await conn2.ExecuteScalarAsync("SELECT 1"), Is.EqualTo(1)); } diff --git a/test/Npgsql.Tests/SecurityTests.cs b/test/Npgsql.Tests/SecurityTests.cs index 8600942969..7c44cbc2b6 100644 --- a/test/Npgsql.Tests/SecurityTests.cs +++ b/test/Npgsql.Tests/SecurityTests.cs @@ -294,16 +294,19 @@ public async Task Connect_with_only_non_ssl_allowed_user([Values] bool multiplex } [Test] - public async Task DataSource_UserCertificateValidationCallback_is_invoked([Values] bool acceptCertificate) + public async Task DataSource_SslClientAuthenticationOptionsCallback_is_invoked([Values] bool acceptCertificate) { var callbackWasInvoked = false; var dataSourceBuilder = CreateDataSourceBuilder(); dataSourceBuilder.ConnectionStringBuilder.SslMode = SslMode.Require; - dataSourceBuilder.UseUserCertificateValidationCallback((_, _, _, _) => + dataSourceBuilder.UseSslClientAuthenticationOptionsCallback(options => { - callbackWasInvoked = true; - return acceptCertificate; + options.RemoteCertificateValidationCallback = (_, _, _, _) => + { + callbackWasInvoked = true; + return acceptCertificate; + }; }); await using var dataSource = dataSourceBuilder.Build(); await using var connection = dataSource.CreateConnection(); @@ -316,11 +319,11 @@ public async Task DataSource_UserCertificateValidationCallback_is_invoked([Value Assert.That(ex.InnerException, Is.TypeOf()); } - Assert.That(callbackWasInvoked); + Assert.IsTrue(callbackWasInvoked); } [Test] - public async Task Connection_UserCertificateValidationCallback_is_invoked([Values] bool acceptCertificate) + public async Task Connection_SslClientAuthenticationOptionsCallback_is_invoked([Values] bool acceptCertificate) { var callbackWasInvoked = false; @@ -328,10 +331,13 @@ public async Task Connection_UserCertificateValidationCallback_is_invoked([Value dataSourceBuilder.ConnectionStringBuilder.SslMode = SslMode.Require; await using var dataSource = dataSourceBuilder.Build(); await using var connection = dataSource.CreateConnection(); - connection.UserCertificateValidationCallback = (_, _, _, _) => + connection.SslClientAuthenticationOptionsCallback = options => { - callbackWasInvoked = true; - return acceptCertificate; + options.RemoteCertificateValidationCallback = (_, _, _, _) => + { + callbackWasInvoked = true; + return acceptCertificate; + }; }; if (acceptCertificate) @@ -342,7 +348,7 @@ public async Task Connection_UserCertificateValidationCallback_is_invoked([Value Assert.That(ex.InnerException, Is.TypeOf()); } - Assert.That(callbackWasInvoked); + Assert.IsTrue(callbackWasInvoked); } [Test] @@ -350,10 +356,13 @@ public void Connect_with_Verify_and_callback_throws([Values(SslMode.VerifyCA, Ss { using var dataSource = CreateDataSource(csb => csb.SslMode = sslMode); using var connection = dataSource.CreateConnection(); - connection.UserCertificateValidationCallback = (_, _, _, _) => true; + connection.SslClientAuthenticationOptionsCallback = options => + { + options.RemoteCertificateValidationCallback = (_, _, _, _) => true; + }; var ex = Assert.ThrowsAsync(async () => await connection.OpenAsync())!; - Assert.That(ex.Message, Is.EqualTo(string.Format(NpgsqlStrings.CannotUseSslVerifyWithUserCallback, sslMode))); + Assert.That(ex.Message, Is.EqualTo(string.Format(NpgsqlStrings.CannotUseSslVerifyWithCustomValidationCallback, sslMode))); } [Test] @@ -365,10 +374,13 @@ public void Connect_with_RootCertificate_and_callback_throws() csb.RootCertificate = "foo"; }); using var connection = dataSource.CreateConnection(); - connection.UserCertificateValidationCallback = (_, _, _, _) => true; + connection.SslClientAuthenticationOptionsCallback = options => + { + options.RemoteCertificateValidationCallback = (_, _, _, _) => true; + }; var ex = Assert.ThrowsAsync(async () => await connection.OpenAsync())!; - Assert.That(ex.Message, Is.EqualTo(string.Format(NpgsqlStrings.CannotUseSslRootCertificateWithUserCallback))); + Assert.That(ex.Message, Is.EqualTo(string.Format(NpgsqlStrings.CannotUseSslRootCertificateWithCustomValidationCallback))); } [Test] From c3b31c393de66a4b03fba0d45708d46a2acb06d2 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Sun, 1 Sep 2024 11:05:44 +0200 Subject: [PATCH 448/761] Run CI tests on PG17 (beta) (#5817) --- .github/workflows/build.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index a789857815..c60c4eb0d2 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -46,11 +46,11 @@ jobs: pg_major: 16 config: Release test_tfm: net8.0 -# - os: ubuntu-22.04 -# pg_major: 17 -# config: Release -# test_tfm: net8.0 -# pg_prerelease: 'PG Prerelease' + - os: ubuntu-22.04 + pg_major: 17 + config: Release + test_tfm: net8.0 + pg_prerelease: 'PG Prerelease' outputs: is_release: ${{ steps.analyze_tag.outputs.is_release }} From 28d41bf3f78bd3ff310d8bd3a6a31ea59d39f6b6 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Wed, 4 Sep 2024 20:43:34 +0300 Subject: [PATCH 449/761] Ignore parameters with ParameterDirection.ReturnValue (#5796) --- src/Npgsql/NpgsqlCommand.cs | 3 +++ test/Npgsql.Tests/FunctionTests.cs | 19 +++++++++++++++++++ 2 files changed, 22 insertions(+) diff --git a/src/Npgsql/NpgsqlCommand.cs b/src/Npgsql/NpgsqlCommand.cs index 2d573360f5..2520d6221d 100644 --- a/src/Npgsql/NpgsqlCommand.cs +++ b/src/Npgsql/NpgsqlCommand.cs @@ -976,6 +976,9 @@ internal void ProcessRawQuery(SqlQueryParser? parser, bool standardConformingStr if (EnableStoredProcedureCompatMode && parameter.Direction == ParameterDirection.Output) continue; + if (parameter.Direction == ParameterDirection.ReturnValue) + continue; + if (isFirstParam) isFirstParam = false; else diff --git a/test/Npgsql.Tests/FunctionTests.cs b/test/Npgsql.Tests/FunctionTests.cs index 37f203b812..ea85185879 100644 --- a/test/Npgsql.Tests/FunctionTests.cs +++ b/test/Npgsql.Tests/FunctionTests.cs @@ -143,6 +143,25 @@ public async Task Too_many_output_params() Assert.That(command.Parameters["c"].Value, Is.EqualTo(-1)); } + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/5793")] + public async Task ReturnValue_parameter_ignored() + { + await using var conn = await OpenConnectionAsync(); + var funcName = await GetTempFunctionName(conn); + await conn.ExecuteNonQueryAsync(@$"CREATE FUNCTION {funcName}() RETURNS integer AS 'SELECT 8;' LANGUAGE 'sql'"); + await using var cmd = new NpgsqlCommand(funcName, conn) { CommandType = CommandType.StoredProcedure }; + var param = new NpgsqlParameter + { + ParameterName = "@ReturnValue", + NpgsqlDbType = NpgsqlDbType.Integer, + Direction = ParameterDirection.ReturnValue, + Value = 0 + }; + cmd.Parameters.Add(param); + Assert.That(cmd.ExecuteScalar(), Is.EqualTo(8)); + Assert.That(param.Value, Is.EqualTo(0)); + } + [Test] public async Task CommandBehavior_SchemaOnly_support_function_call() { From 3fce77d7ce92b4ac984ce3afb4364b23b791fb05 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Thu, 5 Sep 2024 13:40:26 +0300 Subject: [PATCH 450/761] Fix unpooled connection return with multiple hosts (#5784) Fixes #5783 --- src/Npgsql/Internal/NpgsqlConnector.cs | 3 --- src/Npgsql/MultiplexingDataSource.cs | 5 ++--- src/Npgsql/NpgsqlMultiHostDataSource.cs | 2 +- src/Npgsql/PoolingDataSource.cs | 12 +----------- src/Npgsql/VolatileResourceManager.cs | 8 ++++++-- test/Npgsql.Tests/SystemTransactionTests.cs | 12 +++++++----- 6 files changed, 17 insertions(+), 25 deletions(-) diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index 7f10a9ed64..3bf0ee3617 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -2016,9 +2016,6 @@ internal void Close() LogMessages.ClosedPhysicalConnection(ConnectionLogger, Host, Port, Database, UserFacingConnectionString, Id); } - internal bool TryRemovePendingEnlistedConnector(Transaction transaction) - => DataSource.TryRemovePendingEnlistedConnector(this, transaction); - internal void Return() => DataSource.Return(this); /// diff --git a/src/Npgsql/MultiplexingDataSource.cs b/src/Npgsql/MultiplexingDataSource.cs index 1912931575..1d228e1f4c 100644 --- a/src/Npgsql/MultiplexingDataSource.cs +++ b/src/Npgsql/MultiplexingDataSource.cs @@ -33,9 +33,8 @@ sealed class MultiplexingDataSource : PoolingDataSource internal MultiplexingDataSource( NpgsqlConnectionStringBuilder settings, - NpgsqlDataSourceConfiguration dataSourceConfig, - NpgsqlMultiHostDataSource? parentPool = null) - : base(settings, dataSourceConfig, parentPool) + NpgsqlDataSourceConfiguration dataSourceConfig) + : base(settings, dataSourceConfig) { Debug.Assert(Settings.Multiplexing); diff --git a/src/Npgsql/NpgsqlMultiHostDataSource.cs b/src/Npgsql/NpgsqlMultiHostDataSource.cs index 4b8731e5b6..30a6bed0b0 100644 --- a/src/Npgsql/NpgsqlMultiHostDataSource.cs +++ b/src/Npgsql/NpgsqlMultiHostDataSource.cs @@ -49,7 +49,7 @@ internal NpgsqlMultiHostDataSource(NpgsqlConnectionStringBuilder settings, Npgsq poolSettings.Host = host.ToString(); _pools[i] = settings.Pooling - ? new PoolingDataSource(poolSettings, dataSourceConfig, this) + ? new PoolingDataSource(poolSettings, dataSourceConfig) : new UnpooledDataSource(poolSettings, dataSourceConfig); } diff --git a/src/Npgsql/PoolingDataSource.cs b/src/Npgsql/PoolingDataSource.cs index 192a86c052..7e4c410f69 100644 --- a/src/Npgsql/PoolingDataSource.cs +++ b/src/Npgsql/PoolingDataSource.cs @@ -31,8 +31,6 @@ class PoolingDataSource : NpgsqlDataSource /// private protected readonly NpgsqlConnector?[] Connectors; - readonly NpgsqlMultiHostDataSource? _parentPool; - /// /// Reader side for the idle connector channel. Contains nulls in order to release waiting attempts after /// a connector has been physically closed/broken. @@ -76,15 +74,12 @@ internal sealed override (int Total, int Idle, int Busy) Statistics internal PoolingDataSource( NpgsqlConnectionStringBuilder settings, - NpgsqlDataSourceConfiguration dataSourceConfig, - NpgsqlMultiHostDataSource? parentPool = null) + NpgsqlDataSourceConfiguration dataSourceConfig) : base(settings, dataSourceConfig) { if (settings.MaxPoolSize < settings.MinPoolSize) throw new ArgumentException($"Connection can't have 'Max Pool Size' {settings.MaxPoolSize} under 'Min Pool Size' {settings.MinPoolSize}"); - _parentPool = parentPool; - // We enforce Max Pool Size, so no need to to create a bounded channel (which is less efficient) // On the consuming side, we have the multiplexing write loop but also non-multiplexing Rents // On the producing side, we have connections being released back into the pool (both multiplexing and not) @@ -400,11 +395,6 @@ void CloseConnector(NpgsqlConnector connector) UpdatePruningTimer(); } - internal override bool TryRemovePendingEnlistedConnector(NpgsqlConnector connector, Transaction transaction) - => _parentPool is null - ? base.TryRemovePendingEnlistedConnector(connector, transaction) - : _parentPool.TryRemovePendingEnlistedConnector(connector, transaction); - #region Pruning void UpdatePruningTimer() diff --git a/src/Npgsql/VolatileResourceManager.cs b/src/Npgsql/VolatileResourceManager.cs index 239b62fe8e..2e2d698834 100644 --- a/src/Npgsql/VolatileResourceManager.cs +++ b/src/Npgsql/VolatileResourceManager.cs @@ -17,6 +17,7 @@ namespace Npgsql; sealed class VolatileResourceManager : ISinglePhaseNotification { NpgsqlConnector _connector; + NpgsqlDataSource _dataSource; Transaction _transaction; readonly string _txId; NpgsqlTransaction _localTx = null!; @@ -31,6 +32,7 @@ sealed class VolatileResourceManager : ISinglePhaseNotification internal VolatileResourceManager(NpgsqlConnection connection, Transaction transaction) { _connector = connection.Connector!; + _dataSource = connection.NpgsqlDataSource; _transaction = transaction; // _tx gets disposed by System.Transactions at some point, but we want to be able to log its local ID _txId = transaction.TransactionInformation.LocalIdentifier; @@ -277,8 +279,10 @@ void Dispose() { // We're here for connections which were closed before their TransactionScope completes. // These need to be closed now. - // We should return the connector to the pool only if we've successfully removed it from the pending list - if (_connector.TryRemovePendingEnlistedConnector(_transaction)) + // We should return the connector to the pool only if we've successfully removed it from the pending list. + // Note that we remove it from the NpgsqlDataSource bound to connection and not to connector + // because of NpgsqlMultiHostDataSource which has its own list to which connection adds connectors. + if (_dataSource.TryRemovePendingEnlistedConnector(_connector, _transaction)) _connector.Return(); } diff --git a/test/Npgsql.Tests/SystemTransactionTests.cs b/test/Npgsql.Tests/SystemTransactionTests.cs index b71c949259..c5bced6bfa 100644 --- a/test/Npgsql.Tests/SystemTransactionTests.cs +++ b/test/Npgsql.Tests/SystemTransactionTests.cs @@ -310,13 +310,15 @@ public void Single_unpooled_connection() scope.Complete(); } - [Test, IssueLink("https://github.com/npgsql/npgsql/issues/4963")] - public void Single_unpooled_closed_connection() + [Test] + [IssueLink("https://github.com/npgsql/npgsql/issues/4963"), IssueLink("https://github.com/npgsql/npgsql/issues/5783")] + public void Single_closed_connection_in_transaction_scope([Values] bool pooling, [Values] bool multipleHosts) { using var dataSource = CreateDataSource(csb => { - csb.Pooling = false; + csb.Pooling = pooling; csb.Enlist = true; + csb.Host = multipleHosts ? "localhost,127.0.0.1" : csb.Host; }); using (var scope = new TransactionScope()) @@ -325,11 +327,11 @@ public void Single_unpooled_closed_connection() { cmd.ExecuteNonQuery(); conn.Close(); - Assert.That(dataSource.Statistics.Total, Is.EqualTo(1)); + Assert.That(pooling ? dataSource.Statistics.Busy : dataSource.Statistics.Total, Is.EqualTo(1)); scope.Complete(); } - Assert.That(dataSource.Statistics.Total, Is.EqualTo(0)); + Assert.That(pooling ? dataSource.Statistics.Busy : dataSource.Statistics.Total, Is.EqualTo(0)); } [Test] From 6c867d8f40351b4638eb1c9b4125bbd4c4fecc35 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Tue, 10 Sep 2024 15:28:31 +0200 Subject: [PATCH 451/761] User resolver PgTypeInfo reuse (#5737) --- .../BackendMessages/RowDescriptionMessage.cs | 6 ++--- src/Npgsql/Internal/AdoSerializerHelpers.cs | 24 ++++++++--------- .../ReflectionCompositeInfoFactory.cs | 10 ++++--- .../Internal/Converters/ObjectConverter.cs | 2 +- .../Internal/Converters/RecordConverter.cs | 5 ++-- src/Npgsql/Internal/PgSerializerOptions.cs | 26 ++++++++----------- .../AdoTypeInfoResolverFactory.cs | 2 +- .../UnmappedTypeInfoResolverFactory.cs | 14 ++++------ src/Npgsql/NpgsqlBinaryExporter.cs | 2 +- src/Npgsql/NpgsqlNestedDataReader.cs | 8 +++--- src/Npgsql/NpgsqlSchema.cs | 11 ++++---- src/Npgsql/Schema/DbColumnSchemaGenerator.cs | 6 ++--- src/Npgsql/TypeMapping/GlobalTypeMapper.cs | 2 +- test/Npgsql.Benchmarks/ResolveHandler.cs | 6 ++--- 14 files changed, 60 insertions(+), 64 deletions(-) diff --git a/src/Npgsql/BackendMessages/RowDescriptionMessage.cs b/src/Npgsql/BackendMessages/RowDescriptionMessage.cs index fe91b0cf8f..4bbbd58d67 100644 --- a/src/Npgsql/BackendMessages/RowDescriptionMessage.cs +++ b/src/Npgsql/BackendMessages/RowDescriptionMessage.cs @@ -331,7 +331,7 @@ internal void GetInfo(Type? type, ref ColumnInfo lastColumnInfo) { Debug.Assert(lastColumnInfo.ConverterInfo.IsDefault || ( ReferenceEquals(_serializerOptions, lastColumnInfo.ConverterInfo.TypeInfo.Options) && ( - IsUnknownResultType() && lastColumnInfo.ConverterInfo.TypeInfo.PgTypeId == _serializerOptions.ToCanonicalTypeId(_serializerOptions.TextPgType) || + IsUnknownResultType() && lastColumnInfo.ConverterInfo.TypeInfo.PgTypeId == _serializerOptions.TextPgTypeId || // Normal resolution lastColumnInfo.ConverterInfo.TypeInfo.PgTypeId == _serializerOptions.ToCanonicalTypeId(PostgresType)) ), "Cache is bleeding over"); @@ -368,7 +368,7 @@ void GetInfoSlow(Type? type, out ColumnInfo lastColumnInfo) case DataFormat.Text when IsUnknownResultType(): { // Try to resolve some 'pg_catalog.text' type info for the expected clr type. - var typeInfo = AdoSerializerHelpers.GetTypeInfoForReading(type ?? typeof(string), _serializerOptions.TextPgType, _serializerOptions); + var typeInfo = AdoSerializerHelpers.GetTypeInfoForReading(type ?? typeof(string), _serializerOptions.TextPgTypeId, _serializerOptions); // We start binding to DataFormat.Binary as it's the broadest supported format. // The format however is irrelevant as 'pg_catalog.text' data is identical across either. @@ -382,7 +382,7 @@ void GetInfoSlow(Type? type, out ColumnInfo lastColumnInfo) } case DataFormat.Binary or DataFormat.Text: { - var typeInfo = AdoSerializerHelpers.GetTypeInfoForReading(type ?? typeof(object), PostgresType, _serializerOptions); + var typeInfo = AdoSerializerHelpers.GetTypeInfoForReading(type ?? typeof(object), _serializerOptions.ToCanonicalTypeId(PostgresType), _serializerOptions); // If we don't support the DataFormat we'll just throw. converterInfo = typeInfo.Bind(Field, DataFormat); diff --git a/src/Npgsql/Internal/AdoSerializerHelpers.cs b/src/Npgsql/Internal/AdoSerializerHelpers.cs index ed0ea1cc56..b83c447c39 100644 --- a/src/Npgsql/Internal/AdoSerializerHelpers.cs +++ b/src/Npgsql/Internal/AdoSerializerHelpers.cs @@ -2,36 +2,34 @@ using System.Diagnostics; using System.Diagnostics.CodeAnalysis; using Npgsql.Internal.Postgres; -using Npgsql.PostgresTypes; using NpgsqlTypes; namespace Npgsql.Internal; static class AdoSerializerHelpers { - public static PgTypeInfo GetTypeInfoForReading(Type type, PostgresType postgresType, PgSerializerOptions options) - { - var (typeInfo, exception) = TryGetTypeInfoForReading(type, postgresType, options); - return typeInfo ?? throw exception!; - } - - static (PgTypeInfo? TypeInfo , Exception? Exception) TryGetTypeInfoForReading(Type type, PostgresType postgresType, PgSerializerOptions options) + public static PgTypeInfo GetTypeInfoForReading(Type type, PgTypeId pgTypeId, PgSerializerOptions options) { PgTypeInfo? typeInfo = null; Exception? inner = null; try { - typeInfo = type == typeof(object) ? options.GetObjectOrDefaultTypeInfo(postgresType) : options.GetTypeInfo(type, postgresType); + typeInfo = type == typeof(object) ? options.GetObjectOrDefaultTypeInfoInternal(pgTypeId) : options.GetTypeInfoInternal(type, pgTypeId); } catch (Exception ex) { inner = ex; } - return typeInfo is not null ? (typeInfo, null) : (null, ThrowReadingNotSupported(type, postgresType.DisplayName, inner)); + return typeInfo ?? ThrowReadingNotSupported(type, options, pgTypeId, inner); // InvalidCastException thrown to align with ADO.NET convention. - static Exception ThrowReadingNotSupported(Type? type, string displayName, Exception? inner = null) - => new InvalidCastException($"Reading{(type is null ? "" : $" as '{type.FullName}'")} is not supported for fields having DataTypeName '{displayName}'", inner); + [DoesNotReturn] + static PgTypeInfo ThrowReadingNotSupported(Type? type, PgSerializerOptions options, PgTypeId pgTypeId, Exception? inner = null) + { + throw new InvalidCastException( + $"Reading{(type is null ? "" : $" as '{type.FullName}'")} is not supported for fields having DataTypeName '{options.DatabaseInfo.FindPostgresType(pgTypeId)?.DisplayName ?? "unknown"}'", + inner); + } } public static PgTypeInfo GetTypeInfoForWriting(Type? type, PgTypeId? pgTypeId, PgSerializerOptions options, NpgsqlDbType? npgsqlDbType = null) @@ -42,7 +40,7 @@ public static PgTypeInfo GetTypeInfoForWriting(Type? type, PgTypeId? pgTypeId, P Exception? inner = null; try { - typeInfo = type is null ? options.GetDefaultTypeInfo(pgTypeId!.Value) : options.GetTypeInfo(type, pgTypeId); + typeInfo = options.GetTypeInfoInternal(type, pgTypeId); } catch (Exception ex) { diff --git a/src/Npgsql/Internal/Composites/ReflectionCompositeInfoFactory.cs b/src/Npgsql/Internal/Composites/ReflectionCompositeInfoFactory.cs index 522b46acf3..14bba0dd9a 100644 --- a/src/Npgsql/Internal/Composites/ReflectionCompositeInfoFactory.cs +++ b/src/Npgsql/Internal/Composites/ReflectionCompositeInfoFactory.cs @@ -34,6 +34,7 @@ static class ReflectionCompositeInfoFactory var fieldIndex = parameterFieldMap[i]; var pgField = pgFields[fieldIndex]; var parameter = constructorParameters[i]; + var reprTypeId = options.ToCanonicalTypeId(pgField.Type.GetRepresentationalType()); PgTypeInfo pgTypeInfo; Delegate getter; if (propertyMap.TryGetValue(fieldIndex, out var property) && property.GetMethod is not null) @@ -41,7 +42,7 @@ static class ReflectionCompositeInfoFactory if (property.PropertyType != parameter.ParameterType) throw new InvalidOperationException($"Could not find a matching getter for constructor parameter {parameter.Name} and type {parameter.ParameterType} mapped to composite field {pgFields[fieldIndex].Name}."); - pgTypeInfo = options.GetTypeInfo(property.PropertyType, pgField.Type.GetRepresentationalType()) ?? throw NotSupportedField(pgType, pgField, isField: false, property.Name, property.PropertyType); + pgTypeInfo = options.GetTypeInfoInternal(property.PropertyType, reprTypeId) ?? throw NotSupportedField(pgType, pgField, isField: false, property.Name, property.PropertyType); getter = CreateGetter(property); } else if (fieldMap.TryGetValue(fieldIndex, out var field)) @@ -49,7 +50,7 @@ static class ReflectionCompositeInfoFactory if (field.FieldType != parameter.ParameterType) throw new InvalidOperationException($"Could not find a matching getter for constructor parameter {parameter.Name} and type {parameter.ParameterType} mapped to composite field {pgFields[fieldIndex].Name}."); - pgTypeInfo = options.GetTypeInfo(field.FieldType, pgField.Type.GetRepresentationalType()) ?? throw NotSupportedField(pgType, pgField, isField: true, field.Name, field.FieldType); + pgTypeInfo = options.GetTypeInfoInternal(field.FieldType, reprTypeId) ?? throw NotSupportedField(pgType, pgField, isField: true, field.Name, field.FieldType); getter = CreateGetter(field); } else @@ -65,19 +66,20 @@ static class ReflectionCompositeInfoFactory continue; var pgField = pgFields[fieldIndex]; + var reprTypeId = options.ToCanonicalTypeId(pgField.Type.GetRepresentationalType()); PgTypeInfo pgTypeInfo; Delegate getter; Delegate setter; if (propertyMap.TryGetValue(fieldIndex, out var property)) { - pgTypeInfo = options.GetTypeInfo(property.PropertyType, pgField.Type.GetRepresentationalType()) + pgTypeInfo = options.GetTypeInfoInternal(property.PropertyType, reprTypeId) ?? throw NotSupportedField(pgType, pgField, isField: false, property.Name, property.PropertyType); getter = CreateGetter(property); setter = CreateSetter(property); } else if (fieldMap.TryGetValue(fieldIndex, out var field)) { - pgTypeInfo = options.GetTypeInfo(field.FieldType, pgField.Type.GetRepresentationalType()) + pgTypeInfo = options.GetTypeInfoInternal(field.FieldType, reprTypeId) ?? throw NotSupportedField(pgType, pgField, isField: true, field.Name, field.FieldType); getter = CreateGetter(field); setter = CreateSetter(field); diff --git a/src/Npgsql/Internal/Converters/ObjectConverter.cs b/src/Npgsql/Internal/Converters/ObjectConverter.cs index 568fc32c2b..3cc788adf1 100644 --- a/src/Npgsql/Internal/Converters/ObjectConverter.cs +++ b/src/Npgsql/Internal/Converters/ObjectConverter.cs @@ -98,7 +98,7 @@ async ValueTask Write(bool async, PgWriter writer, object value, CancellationTok } PgTypeInfo GetTypeInfo(Type type) - => _options.GetTypeInfo(type, _pgTypeId) + => _options.GetTypeInfoInternal(type, _pgTypeId) ?? throw new NotSupportedException($"Writing values of '{type.FullName}' having DataTypeName '{_options.DatabaseInfo.GetPostgresType(_pgTypeId).DisplayName}' is not supported."); sealed class WriteState diff --git a/src/Npgsql/Internal/Converters/RecordConverter.cs b/src/Npgsql/Internal/Converters/RecordConverter.cs index aabd914b49..89e904099b 100644 --- a/src/Npgsql/Internal/Converters/RecordConverter.cs +++ b/src/Npgsql/Internal/Converters/RecordConverter.cs @@ -44,11 +44,12 @@ async ValueTask Read(bool async, PgReader reader, CancellationToken cancellat _options.DatabaseInfo.GetPostgresType(typeOid).GetRepresentationalType() ?? throw new NotSupportedException($"Reading isn't supported for record field {i} (unknown type OID {typeOid}"); - var typeInfo = _options.GetObjectOrDefaultTypeInfo(postgresType) + var pgTypeId = _options.ToCanonicalTypeId(postgresType); + var typeInfo = _options.GetObjectOrDefaultTypeInfoInternal(pgTypeId) ?? throw new NotSupportedException( $"Reading isn't supported for record field {i} (PG type '{postgresType.DisplayName}'"); - var converterInfo = typeInfo.Bind(new Field("?", _options.ToCanonicalTypeId(postgresType), -1), DataFormat.Binary); + var converterInfo = typeInfo.Bind(new Field("?", pgTypeId, -1), DataFormat.Binary); var scope = await reader.BeginNestedRead(async, length, converterInfo.BufferRequirement, cancellationToken).ConfigureAwait(false); try { diff --git a/src/Npgsql/Internal/PgSerializerOptions.cs b/src/Npgsql/Internal/PgSerializerOptions.cs index b79b5757ec..49d7cf7200 100644 --- a/src/Npgsql/Internal/PgSerializerOptions.cs +++ b/src/Npgsql/Internal/PgSerializerOptions.cs @@ -1,7 +1,6 @@ using System; using System.Diagnostics.CodeAnalysis; using System.IO; -using System.Runtime.CompilerServices; using System.Text; using Npgsql.Internal.Postgres; using Npgsql.NameTranslation; @@ -34,7 +33,7 @@ internal PgSerializerOptions(NpgsqlDatabaseInfo databaseInfo, PgTypeInfoResolver internal PgTypeInfo UnspecifiedDBNullTypeInfo { get; } PostgresType? _textPgType; - internal PostgresType TextPgType => _textPgType ??= DatabaseInfo.GetPostgresType(DataTypeNames.Text); + internal PgTypeId TextPgTypeId => ToCanonicalTypeId(_textPgType ??= DatabaseInfo.GetPostgresType(DataTypeNames.Text)); // Used purely for type mapping, where we don't have a full set of types but resolvers might know enough. readonly bool _introspectionInstance; @@ -84,23 +83,20 @@ public static bool IsWellKnownTextType(Type type) ? ((TypeInfoCache)(_typeInfoCache ??= new TypeInfoCache(this))).GetOrAddInfo(type, pgTypeId?.DataTypeName, defaultTypeFallback) : ((TypeInfoCache)(_typeInfoCache ??= new TypeInfoCache(this))).GetOrAddInfo(type, pgTypeId?.Oid, defaultTypeFallback); - public PgTypeInfo? GetDefaultTypeInfo(PostgresType pgType) - => GetTypeInfoCore(null, ToCanonicalTypeId(pgType), false); + internal PgTypeInfo? GetTypeInfoInternal(Type? type, PgTypeId? pgTypeId) + => GetTypeInfoCore(type, pgTypeId, false); - public PgTypeInfo? GetDefaultTypeInfo(PgTypeId pgTypeId) - => GetTypeInfoCore(null, pgTypeId, false); + internal PgTypeInfo? GetObjectOrDefaultTypeInfoInternal(PgTypeId pgTypeId) + => GetTypeInfoCore(typeof(object), pgTypeId, true); - public PgTypeInfo? GetTypeInfo(Type type, PostgresType pgType) - => GetTypeInfoCore(type, ToCanonicalTypeId(pgType), false); + public PgTypeInfo? GetDefaultTypeInfo(Type type) + => GetTypeInfoCore(type, null, false); - public PgTypeInfo? GetTypeInfo(Type type, PgTypeId? pgTypeId = null) - => GetTypeInfoCore(type, pgTypeId, false); - - public PgTypeInfo? GetObjectOrDefaultTypeInfo(PostgresType pgType) - => GetTypeInfoCore(typeof(object), ToCanonicalTypeId(pgType), true); + public PgTypeInfo? GetDefaultTypeInfo(PgTypeId pgTypeId) + => GetTypeInfoCore(null, GetCanonicalTypeId(pgTypeId), false); - public PgTypeInfo? GetObjectOrDefaultTypeInfo(PgTypeId pgTypeId) - => GetTypeInfoCore(typeof(object), pgTypeId, true); + public PgTypeInfo? GetTypeInfo(Type type, PgTypeId pgTypeId) + => GetTypeInfoCore(type, GetCanonicalTypeId(pgTypeId), false); // If a given type id is in the opposite form than what was expected it will be mapped according to the requirement. internal PgTypeId GetCanonicalTypeId(PgTypeId pgTypeId) diff --git a/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.cs index 61f1bbc2f3..aa5bbde21f 100644 --- a/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.cs +++ b/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.cs @@ -499,7 +499,7 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) // Probe if there is any mapping at all for this element type. var elementId = options.ToCanonicalTypeId(pgElementType); - if (options.GetDefaultTypeInfo(elementId) is null) + if (options.GetTypeInfoInternal(null, elementId) is null) return null; var mappings = new TypeInfoMappingCollection(); diff --git a/src/Npgsql/Internal/ResolverFactories/UnmappedTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/UnmappedTypeInfoResolverFactory.cs index a04c3cc111..d0d52f2168 100644 --- a/src/Npgsql/Internal/ResolverFactories/UnmappedTypeInfoResolverFactory.cs +++ b/src/Npgsql/Internal/ResolverFactories/UnmappedTypeInfoResolverFactory.cs @@ -75,11 +75,10 @@ class RangeResolver : DynamicTypeInfoResolver || options.DatabaseInfo.GetPostgresType(dataTypeName) is not PostgresRangeType rangeType) return null; - var subInfo = - matchedType is null - ? options.GetDefaultTypeInfo(rangeType.Subtype) - // Input matchedType here as we don't want an NpgsqlRange over Nullable (it has its own nullability tracking, for better or worse) - : options.GetTypeInfo(matchedType == typeof(object) ? matchedType : matchedType.GetGenericArguments()[0], rangeType.Subtype); + // Input matchedType here as we don't want an NpgsqlRange over Nullable (it has its own nullability tracking, for better or worse) + var subInfo = options.GetTypeInfoInternal( + matchedType is null ? null : matchedType == typeof(object) ? matchedType : matchedType.GetGenericArguments()[0], + options.ToCanonicalTypeId(rangeType.Subtype)); // We have no generic RangeConverterResolver so we would not know how to compose a range mapping for such infos. // See https://github.com/npgsql/npgsql/issues/5268 @@ -133,10 +132,7 @@ class MultirangeResolver : DynamicTypeInfoResolver || options.DatabaseInfo.GetPostgresType(dataTypeName) is not PostgresMultirangeType multirangeType) return null; - var subInfo = - type is null - ? options.GetDefaultTypeInfo(multirangeType.Subrange) - : options.GetTypeInfo(elementType ?? typeof(object), multirangeType.Subrange); + var subInfo = options.GetTypeInfoInternal(type is null ? null : elementType ?? typeof(object), options.ToCanonicalTypeId(multirangeType.Subrange)); // We have no generic MultirangeConverterResolver so we would not know how to compose a range mapping for such infos. // See https://github.com/npgsql/npgsql/issues/5268 diff --git a/src/Npgsql/NpgsqlBinaryExporter.cs b/src/Npgsql/NpgsqlBinaryExporter.cs index 171d233af5..9c54a0bba5 100644 --- a/src/Npgsql/NpgsqlBinaryExporter.cs +++ b/src/Npgsql/NpgsqlBinaryExporter.cs @@ -341,7 +341,7 @@ PgConverterInfo CreateConverterInfo(Type type, NpgsqlDbType? npgsqlDbType = null // Handle plugin types via lookup. : GetRepresentationalOrDefault(npgsqlDbType.Value.ToUnqualifiedDataTypeNameOrThrow()); } - var info = options.GetTypeInfo(type, pgTypeId) + var info = options.GetTypeInfoInternal(type, pgTypeId) ?? throw new NotSupportedException($"Reading is not supported for type '{type}'{(npgsqlDbType is null ? "" : $" and NpgsqlDbType '{npgsqlDbType}'")}"); // Binary export has no type info so we only do caller-directed interpretation of data. diff --git a/src/Npgsql/NpgsqlNestedDataReader.cs b/src/Npgsql/NpgsqlNestedDataReader.cs index d6c7e1b7cc..5794054670 100644 --- a/src/Npgsql/NpgsqlNestedDataReader.cs +++ b/src/Npgsql/NpgsqlNestedDataReader.cs @@ -380,14 +380,16 @@ public override bool Read() if (i >= _columns.Count) { var pgType = SerializerOptions.DatabaseInfo.GetPostgresType(typeOid); - _columns.Add(new ColumnInfo(pgType, bufferPos, AdoSerializerHelpers.GetTypeInfoForReading(typeof(object), pgType, SerializerOptions), Format)); + var pgTypeId = SerializerOptions.ToCanonicalTypeId(pgType); + _columns.Add(new ColumnInfo(pgType, bufferPos, AdoSerializerHelpers.GetTypeInfoForReading(typeof(object), pgTypeId, SerializerOptions), Format)); } else { var pgType = _columns[i].PostgresType.OID == typeOid ? _columns[i].PostgresType : SerializerOptions.DatabaseInfo.GetPostgresType(typeOid); - _columns[i] = new ColumnInfo(pgType, bufferPos, AdoSerializerHelpers.GetTypeInfoForReading(typeof(object), pgType, SerializerOptions), Format); + var pgTypeId = SerializerOptions.ToCanonicalTypeId(pgType); + _columns[i] = new ColumnInfo(pgType, bufferPos, AdoSerializerHelpers.GetTypeInfoForReading(typeof(object), pgTypeId, SerializerOptions), Format); } var columnLen = PgReader.ReadInt32(); @@ -517,7 +519,7 @@ PgConverterInfo GetOrAddConverterInfo(Type type, ColumnInfo column, int ordinal, } } - var converterInfo = column.Bind(AdoSerializerHelpers.GetTypeInfoForReading(type, column.PostgresType, SerializerOptions)); + var converterInfo = column.Bind(AdoSerializerHelpers.GetTypeInfoForReading(type, SerializerOptions.ToCanonicalTypeId(column.PostgresType), SerializerOptions)); _columns[ordinal] = column with { LastConverterInfo = converterInfo }; asObject = converterInfo.IsBoxingConverter; return converterInfo; diff --git a/src/Npgsql/NpgsqlSchema.cs b/src/Npgsql/NpgsqlSchema.cs index ba18c0acc7..1b52738561 100644 --- a/src/Npgsql/NpgsqlSchema.cs +++ b/src/Npgsql/NpgsqlSchema.cs @@ -794,6 +794,7 @@ static DataTable GetDataTypes(NpgsqlConnection conn) // TODO: Support type name restriction try { + var serializerOptions = connector.SerializerOptions; PgSerializerOptions.IntrospectionCaller = true; var types = new List(); @@ -802,7 +803,7 @@ static DataTable GetDataTypes(NpgsqlConnection conn) types.AddRange(connector.DatabaseInfo.CompositeTypes); foreach (var baseType in types) { - if (connector.SerializerOptions.GetDefaultTypeInfo(baseType) is not { } info) + if (serializerOptions.GetTypeInfoInternal(null, serializerOptions.ToCanonicalTypeId(baseType)) is not { } info) continue; var row = table.Rows.Add(); @@ -817,7 +818,7 @@ static DataTable GetDataTypes(NpgsqlConnection conn) foreach (var arrayType in connector.DatabaseInfo.ArrayTypes) { - if (connector.SerializerOptions.GetDefaultTypeInfo(arrayType) is not { } info) + if (serializerOptions.GetTypeInfoInternal(null, serializerOptions.ToCanonicalTypeId(arrayType)) is not { } info) continue; var row = table.Rows.Add(); @@ -836,7 +837,7 @@ static DataTable GetDataTypes(NpgsqlConnection conn) foreach (var rangeType in connector.DatabaseInfo.RangeTypes) { - if (connector.SerializerOptions.GetDefaultTypeInfo(rangeType) is not { } info) + if (serializerOptions.GetTypeInfoInternal(null, serializerOptions.ToCanonicalTypeId(rangeType)) is not { } info) continue; var row = table.Rows.Add(); @@ -856,7 +857,7 @@ static DataTable GetDataTypes(NpgsqlConnection conn) foreach (var multirangeType in connector.DatabaseInfo.MultirangeTypes) { var subtypeType = multirangeType.Subrange.Subtype; - if (connector.SerializerOptions.GetDefaultTypeInfo(multirangeType) is not { } info) + if (serializerOptions.GetTypeInfoInternal(null, serializerOptions.ToCanonicalTypeId(multirangeType)) is not { } info) continue; var row = table.Rows.Add(); @@ -876,7 +877,7 @@ static DataTable GetDataTypes(NpgsqlConnection conn) foreach (var domainType in connector.DatabaseInfo.DomainTypes) { var representationalType = domainType.GetRepresentationalType(); - if (connector.SerializerOptions.GetDefaultTypeInfo(representationalType) is not { } info) + if (serializerOptions.GetTypeInfoInternal(null, serializerOptions.ToCanonicalTypeId(representationalType)) is not { } info) continue; var row = table.Rows.Add(); diff --git a/src/Npgsql/Schema/DbColumnSchemaGenerator.cs b/src/Npgsql/Schema/DbColumnSchemaGenerator.cs index b8b957041c..014b107580 100644 --- a/src/Npgsql/Schema/DbColumnSchemaGenerator.cs +++ b/src/Npgsql/Schema/DbColumnSchemaGenerator.cs @@ -35,9 +35,9 @@ static string GenerateColumnsQuery(Version pgVersion, string columnFieldFilter) {(pgVersion.IsGreaterOrEqual(10) ? "attidentity != ''" : "FALSE")} AS isidentity, CASE WHEN typ.typtype = 'd' THEN typ.typtypmod ELSE atttypmod END AS typmod, CASE WHEN atthasdef THEN (SELECT pg_get_expr(adbin, cls.oid) FROM pg_attrdef WHERE adrelid = cls.oid AND adnum = attr.attnum) ELSE NULL END AS default, - CASE WHEN ((cls.relkind = ANY (ARRAY['r'::""char"", 'p'::""char""])) + CASE WHEN ((cls.relkind = ANY (ARRAY['r'::""char"", 'p'::""char""])) OR ((cls.relkind = ANY (ARRAY['v'::""char"", 'f'::""char""])) - AND pg_column_is_updatable((cls.oid)::regclass, attr.attnum, false))) + AND pg_column_is_updatable((cls.oid)::regclass, attr.attnum, false))) AND attr.attidentity NOT IN ('a') THEN 'true'::boolean ELSE 'false'::boolean END AS is_updatable, @@ -260,7 +260,7 @@ void ColumnPostConfig(NpgsqlDbColumn column, int typeModifier) var serializerOptions = _connection.Connector!.SerializerOptions; column.NpgsqlDbType = column.PostgresType.DataTypeName.ToNpgsqlDbType(); - if (serializerOptions.GetObjectOrDefaultTypeInfo(column.PostgresType) is { } typeInfo) + if (serializerOptions.GetObjectOrDefaultTypeInfoInternal(serializerOptions.ToCanonicalTypeId(column.PostgresType)) is { } typeInfo) { column.DataType = typeInfo.Type; column.IsLong = column.PostgresType.DataTypeName == DataTypeNames.Bytea; diff --git a/src/Npgsql/TypeMapping/GlobalTypeMapper.cs b/src/Npgsql/TypeMapping/GlobalTypeMapper.cs index 9abe5acecb..ad83c7bc77 100644 --- a/src/Npgsql/TypeMapping/GlobalTypeMapper.cs +++ b/src/Npgsql/TypeMapping/GlobalTypeMapper.cs @@ -103,7 +103,7 @@ PgSerializerOptions TypeMappingOptions DataTypeName? dataTypeName; try { - var typeInfo = TypeMappingOptions.GetTypeInfo(type); + var typeInfo = TypeMappingOptions.GetTypeInfoInternal(type, null); if (typeInfo is PgResolverTypeInfo info) dataTypeName = info.GetObjectResolution(value).PgTypeId.DataTypeName; else diff --git a/test/Npgsql.Benchmarks/ResolveHandler.cs b/test/Npgsql.Benchmarks/ResolveHandler.cs index 86e5d20fbb..e082b81c4e 100644 --- a/test/Npgsql.Benchmarks/ResolveHandler.cs +++ b/test/Npgsql.Benchmarks/ResolveHandler.cs @@ -30,13 +30,13 @@ public void Setup() [Benchmark] public PgTypeInfo? ResolveDefault() - => _serializerOptions.GetDefaultTypeInfo(new Oid(23)); // int4 + => _serializerOptions.GetTypeInfoInternal(null, new Oid(23)); // int4 [Benchmark] public PgTypeInfo? ResolveType() - => _serializerOptions.GetTypeInfo(typeof(int)); + => _serializerOptions.GetTypeInfoInternal(typeof(int), null); [Benchmark] public PgTypeInfo? ResolveBoth() - => _serializerOptions.GetTypeInfo(typeof(int), new Oid(23)); // int4 + => _serializerOptions.GetTypeInfoInternal(typeof(int), new Oid(23)); // int4 } From 8473d729e667a4db860c22335509484c4eb268f8 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Tue, 10 Sep 2024 17:02:03 +0300 Subject: [PATCH 452/761] Fix sending cancellation request if it's requested while reading prepended responses (#5774) Fixes #5191 --- src/Npgsql/Internal/NpgsqlConnector.cs | 87 +++++++++++++++++--------- src/Npgsql/NpgsqlBinaryExporter.cs | 2 +- src/Npgsql/NpgsqlCommand.cs | 9 +-- test/Npgsql.Tests/CommandTests.cs | 44 ++++++++++++- 4 files changed, 107 insertions(+), 35 deletions(-) diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index 3bf0ee3617..ac5558f476 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -1314,6 +1314,12 @@ internal ValueTask ReadMessage( // We've read all the prepended response. // Allow cancellation to proceed. ReadingPrependedMessagesMRE.Set(); + + // User requested cancellation but it hasn't been performed yet. + // This might happen if the cancellation is requested while we're reading prepended responses + // because we shouldn't cancel them and otherwise might deadlock. + if (UserCancellationRequested && !PostgresCancellationPerformed) + PerformDelayedUserCancellation(); } catch (Exception e) { @@ -1724,7 +1730,7 @@ internal void ResetCancellation() } } - internal void PerformUserCancellation() + internal void PerformImmediateUserCancellation() { var connection = Connection; if (connection is null || connection.ConnectorBindingScope == ConnectorBindingScope.Reader || UserCancellationRequested) @@ -1744,34 +1750,43 @@ internal void PerformUserCancellation() try { - // Wait before we've read all responses for the prepended queries - // as we can't gracefully handle their cancellation. - // Break makes sure that it's going to be set even if we fail while reading them. + // Set the flag first before waiting on ReadingPrependedMessagesMRE. + // That way we're making sure that in case we're racing with ReadingPrependedMessagesMRE.Set + // that it's going to read the new value of the flag and request cancellation + _userCancellationRequested = true; + // Check whether we've read all responses for the prepended queries + // as we can't gracefully handle their cancellation. // We don't wait indefinitely to avoid deadlocks from synchronous CancellationToken.Register // See #5032 if (!ReadingPrependedMessagesMRE.Wait(0)) return; - _userCancellationRequested = true; - - if (AttemptPostgresCancellation && SupportsPostgresCancellation) - { - var cancellationTimeout = Settings.CancellationTimeout; - if (PerformPostgresCancellation() && cancellationTimeout >= 0) - { - if (cancellationTimeout > 0) - { - ReadBuffer.Timeout = TimeSpan.FromMilliseconds(cancellationTimeout); - ReadBuffer.Cts.CancelAfter(cancellationTimeout); - } + PerformUserCancellationUnsynchronized(); + } + finally + { + Monitor.Exit(CancelLock); + } + } - return; - } - } + void PerformDelayedUserCancellation() + { + // Take the lock first to make sure there is no concurrent Break. + // We should be safe to take it as Break only take it to set the state. + lock (SyncObj) + { + // The connector is dead, exit gracefully. + if (!IsConnected) + return; + // The connector is still alive, take the CancelLock before exiting SingleUseLock. + // If a break will happen after, it's going to wait for the cancellation to complete. + Monitor.Enter(CancelLock); + } - ReadBuffer.Timeout = _cancelImmediatelyTimeout; - ReadBuffer.Cts.Cancel(); + try + { + PerformUserCancellationUnsynchronized(); } finally { @@ -1779,6 +1794,27 @@ internal void PerformUserCancellation() } } + void PerformUserCancellationUnsynchronized() + { + if (AttemptPostgresCancellation && SupportsPostgresCancellation) + { + var cancellationTimeout = Settings.CancellationTimeout; + if (PerformPostgresCancellation() && cancellationTimeout >= 0) + { + if (cancellationTimeout > 0) + { + ReadBuffer.Timeout = TimeSpan.FromMilliseconds(cancellationTimeout); + ReadBuffer.Cts.CancelAfter(cancellationTimeout); + } + + return; + } + } + + ReadBuffer.Timeout = _cancelImmediatelyTimeout; + ReadBuffer.Cts.Cancel(); + } + /// /// Creates another connector and sends a cancel request through it for this connector. This method never throws, but returns /// whether the cancellation attempt failed. @@ -1861,7 +1897,7 @@ internal CancellationTokenRegistration StartCancellableOperation( AttemptPostgresCancellation = attemptPgCancellation; return _cancellationTokenRegistration = - cancellationToken.Register(static c => ((NpgsqlConnector)c!).PerformUserCancellation(), this); + cancellationToken.Register(static c => ((NpgsqlConnector)c!).PerformImmediateUserCancellation(), this); } /// @@ -1893,7 +1929,7 @@ internal NestedCancellableScope StartNestedCancellableOperation( var currentAttemptPostgresCancellation = AttemptPostgresCancellation; AttemptPostgresCancellation = attemptPgCancellation; - var registration = cancellationToken.Register(static c => ((NpgsqlConnector)c!).PerformUserCancellation(), this); + var registration = cancellationToken.Register(static c => ((NpgsqlConnector)c!).PerformImmediateUserCancellation(), this); return new(this, registration, currentUserCancellationToken, currentAttemptPostgresCancellation); } @@ -2050,11 +2086,6 @@ internal Exception Break(Exception reason) try { - // If we're broken while reading prepended messages - // the cancellation request might still be waiting on the MRE. - // Unblock it. - ReadingPrependedMessagesMRE.Set(); - LogMessages.BreakingConnection(ConnectionLogger, Id, reason); // Note that we may be reading and writing from the same connector concurrently, so safely set diff --git a/src/Npgsql/NpgsqlBinaryExporter.cs b/src/Npgsql/NpgsqlBinaryExporter.cs index 9c54a0bba5..f221056119 100644 --- a/src/Npgsql/NpgsqlBinaryExporter.cs +++ b/src/Npgsql/NpgsqlBinaryExporter.cs @@ -448,7 +448,7 @@ void ThrowIfDisposed() /// /// Cancels an ongoing export. /// - public void Cancel() => _connector.PerformUserCancellation(); + public void Cancel() => _connector.PerformImmediateUserCancellation(); /// /// Async cancels an ongoing export. diff --git a/src/Npgsql/NpgsqlCommand.cs b/src/Npgsql/NpgsqlCommand.cs index 2520d6221d..f3900cfb50 100644 --- a/src/Npgsql/NpgsqlCommand.cs +++ b/src/Npgsql/NpgsqlCommand.cs @@ -1484,6 +1484,10 @@ internal virtual async ValueTask ExecuteReader(bool async, Com break; } + // If a cancellation is in progress, wait for it to "complete" before proceeding (#615) + // We do it before changing the state because we only allow sending cancellation request if State == InProgress + connector.ResetCancellation(); + State = CommandState.InProgress; if (logger.IsEnabled(LogLevel.Information)) @@ -1499,9 +1503,6 @@ internal virtual async ValueTask ExecuteReader(bool async, Com TraceCommandStart(connector.Settings); TraceCommandEnrich(connector); - // If a cancellation is in progress, wait for it to "complete" before proceeding (#615) - connector.ResetCancellation(); - // We do not wait for the entire send to complete before proceeding to reading - // the sending continues in parallel with the user's reading. Waiting for the // entire send to complete would trigger a deadlock for multi-statement commands, @@ -1662,7 +1663,7 @@ public override void Cancel() if (connector is null) return; - connector.PerformUserCancellation(); + connector.PerformImmediateUserCancellation(); } #endregion Cancel diff --git a/test/Npgsql.Tests/CommandTests.cs b/test/Npgsql.Tests/CommandTests.cs index 36c8744fdf..b4b088f5f8 100644 --- a/test/Npgsql.Tests/CommandTests.cs +++ b/test/Npgsql.Tests/CommandTests.cs @@ -300,7 +300,6 @@ public async Task Prepare_timeout_hard([Values] SyncOrAsync async) #region Cancel [Test, Description("Basic cancellation scenario")] - [Ignore("Flaky, see https://github.com/npgsql/npgsql/issues/5070")] public async Task Cancel() { if (IsMultiplexing) @@ -340,7 +339,6 @@ public async Task Cancel_async_immediately() } [Test, Description("Cancels an async query with the cancellation token, with successful PG cancellation")] - [Explicit("Flaky due to #5033")] public async Task Cancel_async_soft() { if (IsMultiplexing) @@ -361,6 +359,48 @@ public async Task Cancel_async_soft() Assert.That(await conn.ExecuteScalarAsync("SELECT 1"), Is.EqualTo(1)); } + [Test, Description("Cancels an async query with the cancellation token and prepended query, with successful PG cancellation")] + [IssueLink("https://github.com/npgsql/npgsql/issues/5191")] + public async Task Cancel_async_soft_with_prepended_query() + { + if (IsMultiplexing) + return; // Multiplexing, cancellation + + await using var postmasterMock = PgPostmasterMock.Start(ConnectionString); + await using var dataSource = CreateDataSource(postmasterMock.ConnectionString); + await using var conn = await dataSource.OpenConnectionAsync(); + var server = await postmasterMock.WaitForServerConnection(); + + var processId = conn.ProcessID; + + await using var tx = await conn.BeginTransactionAsync(); + await using var cmd = CreateSleepCommand(conn); + using var cancellationSource = new CancellationTokenSource(); + var t = cmd.ExecuteNonQueryAsync(cancellationSource.Token); + + await server.ExpectSimpleQuery("BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED"); + cancellationSource.Cancel(); + await server + .WriteCommandComplete() + .WriteReadyForQuery(TransactionStatus.InTransactionBlock) + .FlushAsync(); + + Assert.That((await postmasterMock.WaitForCancellationRequest()).ProcessId, + Is.EqualTo(processId)); + + await server + .WriteErrorResponse(PostgresErrorCodes.QueryCanceled) + .WriteReadyForQuery() + .FlushAsync(); + + var exception = Assert.ThrowsAsync(async () => await t)!; + Assert.That(exception.InnerException, + Is.TypeOf().With.Property(nameof(PostgresException.SqlState)).EqualTo(PostgresErrorCodes.QueryCanceled)); + Assert.That(exception.CancellationToken, Is.EqualTo(cancellationSource.Token)); + + Assert.That(conn.FullState, Is.EqualTo(ConnectionState.Open)); + } + [Test, Description("Cancels an async query with the cancellation token, with unsuccessful PG cancellation (socket break)")] public async Task Cancel_async_hard() { From 6faa690b2eba0642b8b0d84848e7e2d80c64af95 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Fri, 13 Sep 2024 18:53:33 +0300 Subject: [PATCH 453/761] Add method to enable metrics for Npgsql (#5833) Fixes #5832 --- .../MeterProviderBuilderExtensions.cs | 19 +++++++++++++++++++ src/Npgsql/NpgsqlMetricsOptions.cs | 9 +++++++++ src/Npgsql/PublicAPI.Unshipped.txt | 2 ++ 3 files changed, 30 insertions(+) create mode 100644 src/Npgsql.OpenTelemetry/MeterProviderBuilderExtensions.cs create mode 100644 src/Npgsql/NpgsqlMetricsOptions.cs diff --git a/src/Npgsql.OpenTelemetry/MeterProviderBuilderExtensions.cs b/src/Npgsql.OpenTelemetry/MeterProviderBuilderExtensions.cs new file mode 100644 index 0000000000..90f81c4cc3 --- /dev/null +++ b/src/Npgsql.OpenTelemetry/MeterProviderBuilderExtensions.cs @@ -0,0 +1,19 @@ +using System; +using OpenTelemetry.Metrics; + +// ReSharper disable once CheckNamespace +namespace Npgsql; + +/// +/// Extension method for setting up Npgsql OpenTelemetry metrics. +/// +public static class MeterProviderBuilderExtensions +{ + /// + /// Subscribes to the Npgsql metrics reporter to enable OpenTelemetry metrics. + /// + public static MeterProviderBuilder AddNpgsqlInstrumentation( + this MeterProviderBuilder builder, + Action? options = null) + => builder.AddMeter("Npgsql"); +} diff --git a/src/Npgsql/NpgsqlMetricsOptions.cs b/src/Npgsql/NpgsqlMetricsOptions.cs new file mode 100644 index 0000000000..b4da63dc7a --- /dev/null +++ b/src/Npgsql/NpgsqlMetricsOptions.cs @@ -0,0 +1,9 @@ +namespace Npgsql; + +/// +/// Options to configure Npgsql's support for OpenTelemetry metrics. +/// Currently no options are available. +/// +public class NpgsqlMetricsOptions +{ +} diff --git a/src/Npgsql/PublicAPI.Unshipped.txt b/src/Npgsql/PublicAPI.Unshipped.txt index a601f6b737..dae728da58 100644 --- a/src/Npgsql/PublicAPI.Unshipped.txt +++ b/src/Npgsql/PublicAPI.Unshipped.txt @@ -3,6 +3,8 @@ Npgsql.NpgsqlConnection.SslClientAuthenticationOptionsCallback.get -> System.Act Npgsql.NpgsqlConnection.SslClientAuthenticationOptionsCallback.set -> void Npgsql.NpgsqlDataSourceBuilder.UseNegotiateOptionsCallback(System.Action? negotiateOptionsCallback) -> Npgsql.NpgsqlDataSourceBuilder! Npgsql.NpgsqlDataSourceBuilder.UseSslClientAuthenticationOptionsCallback(System.Action? sslClientAuthenticationOptionsCallback) -> Npgsql.NpgsqlDataSourceBuilder! +Npgsql.NpgsqlMetricsOptions +Npgsql.NpgsqlMetricsOptions.NpgsqlMetricsOptions() -> void Npgsql.NpgsqlSlimDataSourceBuilder.EnableGeometricTypes() -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.EnableJsonTypes() -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.EnableNetworkTypes() -> Npgsql.NpgsqlSlimDataSourceBuilder! From 7de5d711476af1389cba921103f344f37e8adfb8 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Wed, 25 Sep 2024 13:05:30 +0300 Subject: [PATCH 454/761] Fix sequential read for NpgsqlPath (#5845) Fixes #5844 --- src/Npgsql/Internal/Converters/Geometric/PathConverter.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Npgsql/Internal/Converters/Geometric/PathConverter.cs b/src/Npgsql/Internal/Converters/Geometric/PathConverter.cs index c78ba84013..0481037254 100644 --- a/src/Npgsql/Internal/Converters/Geometric/PathConverter.cs +++ b/src/Npgsql/Internal/Converters/Geometric/PathConverter.cs @@ -32,7 +32,7 @@ async ValueTask Read(bool async, PgReader reader, CancellationToken for (var i = 0; i < numPoints; i++) { if (reader.ShouldBuffer(sizeof(double) * 2)) - await reader.Buffer(async, sizeof(byte) + sizeof(int), cancellationToken).ConfigureAwait(false); + await reader.Buffer(async, sizeof(double) * 2, cancellationToken).ConfigureAwait(false); result.Add(new NpgsqlPoint(reader.ReadDouble(), reader.ReadDouble())); } From 9ca526b85bfb75859fbea018df27cfcdfb125f69 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Wed, 25 Sep 2024 17:22:55 +0300 Subject: [PATCH 455/761] Fix reading numerics with big scale difference (#5851) Fixes #5848 --- .../Internal/Converters/Primitive/PgNumeric.cs | 14 ++++++++++++-- test/Npgsql.Tests/Types/NumericTests.cs | 2 ++ 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/src/Npgsql/Internal/Converters/Primitive/PgNumeric.cs b/src/Npgsql/Internal/Converters/Primitive/PgNumeric.cs index 19266cda1f..799494dda1 100644 --- a/src/Npgsql/Internal/Converters/Primitive/PgNumeric.cs +++ b/src/Npgsql/Internal/Converters/Primitive/PgNumeric.cs @@ -351,14 +351,24 @@ internal static decimal ToDecimal(short scale, short weight, ushort sign, Span 0) { var scaleChunk = Math.Min(MaxUIntScale, scaleDifference); - result *= UIntPowers10[scaleChunk]; + scaleFactor *= UIntPowers10[scaleChunk]; scaleDifference -= scaleChunk; } + } } result *= scaleFactor; diff --git a/test/Npgsql.Tests/Types/NumericTests.cs b/test/Npgsql.Tests/Types/NumericTests.cs index 43dd846a8c..44c636984b 100644 --- a/test/Npgsql.Tests/Types/NumericTests.cs +++ b/test/Npgsql.Tests/Types/NumericTests.cs @@ -76,6 +76,8 @@ public class NumericTests : MultiplexingTestBase // Bug 2033 new object[] { "0.0036882500000000000000000000", 0.0036882500000000000000000000M }, + // Bug 5848 + new object[] { "10836968.715000000000000000000000", 10836968.715000000000000000000000M }, new object[] { "936490726837837729197", 936490726837837729197M }, new object[] { "9364907268378377291970000", 9364907268378377291970000M }, From 036bfc461b9492d1f8ca7ff12d88fe1e242c1955 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Fri, 27 Sep 2024 17:57:06 +0200 Subject: [PATCH 456/761] Add NpgsqlConnection.CloneWithAsync (#5856) Fixes #5852 --- src/Npgsql/NpgsqlConnection.cs | 25 +++++++++++++++++++++++++ src/Npgsql/PublicAPI.Unshipped.txt | 1 + test/Npgsql.Tests/ConnectionTests.cs | 22 +++++++++++++++------- 3 files changed, 41 insertions(+), 7 deletions(-) diff --git a/src/Npgsql/NpgsqlConnection.cs b/src/Npgsql/NpgsqlConnection.cs index d3b7d8f636..b8bfbd8d01 100644 --- a/src/Npgsql/NpgsqlConnection.cs +++ b/src/Npgsql/NpgsqlConnection.cs @@ -1795,6 +1795,31 @@ public NpgsqlConnection CloneWith(string connectionString) }; } + /// + /// Clones this connection, replacing its connection string with the given one. + /// This allows creating a new connection with the same security information + /// (password, SSL callbacks) while changing other connection parameters (e.g. + /// database or pooling) + /// + public async ValueTask CloneWithAsync(string connectionString, CancellationToken cancellationToken = default) + { + CheckDisposed(); + var csb = new NpgsqlConnectionStringBuilder(connectionString); + csb.Password ??= _dataSource is null ? null : await _dataSource.GetPassword(async: true, cancellationToken).ConfigureAwait(false); + if (csb.PersistSecurityInfo && !Settings.PersistSecurityInfo) + csb.PersistSecurityInfo = false; + + return new NpgsqlConnection(csb.ToString()) + { + SslClientAuthenticationOptionsCallback = SslClientAuthenticationOptionsCallback ?? _dataSource?.SslClientAuthenticationOptionsCallback, +#pragma warning disable CS0618 // Obsolete + ProvideClientCertificatesCallback = ProvideClientCertificatesCallback, + UserCertificateValidationCallback = UserCertificateValidationCallback, + ProvidePasswordCallback = ProvidePasswordCallback, +#pragma warning restore CS0618 + }; + } + /// /// This method changes the current database by disconnecting from the actual /// database and connecting to the specified. diff --git a/src/Npgsql/PublicAPI.Unshipped.txt b/src/Npgsql/PublicAPI.Unshipped.txt index dae728da58..c94ae72bca 100644 --- a/src/Npgsql/PublicAPI.Unshipped.txt +++ b/src/Npgsql/PublicAPI.Unshipped.txt @@ -1,4 +1,5 @@ #nullable enable +Npgsql.NpgsqlConnection.CloneWithAsync(string! connectionString, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.ValueTask Npgsql.NpgsqlConnection.SslClientAuthenticationOptionsCallback.get -> System.Action? Npgsql.NpgsqlConnection.SslClientAuthenticationOptionsCallback.set -> void Npgsql.NpgsqlDataSourceBuilder.UseNegotiateOptionsCallback(System.Action? negotiateOptionsCallback) -> Npgsql.NpgsqlDataSourceBuilder! diff --git a/test/Npgsql.Tests/ConnectionTests.cs b/test/Npgsql.Tests/ConnectionTests.cs index 4b1eeeda8d..de25a239bb 100644 --- a/test/Npgsql.Tests/ConnectionTests.cs +++ b/test/Npgsql.Tests/ConnectionTests.cs @@ -945,7 +945,7 @@ public void No_password_without_PersistSecurityInfo([Values(true, false)] bool p } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/2725")] - public void Clone_with_PersistSecurityInfo() + public async Task Clone_with_PersistSecurityInfo([Values] bool async) { var builder = new NpgsqlConnectionStringBuilder(ConnectionString) { @@ -958,20 +958,24 @@ public void Clone_with_PersistSecurityInfo() // First un-persist, should work builder.PersistSecurityInfo = false; var connStringWithoutPersist = builder.ToString(); - using var clonedWithoutPersist = connWithPersist.CloneWith(connStringWithoutPersist); + using var clonedWithoutPersist = async + ? await connWithPersist.CloneWithAsync(connStringWithoutPersist) + : connWithPersist.CloneWith(connStringWithoutPersist); clonedWithoutPersist.Open(); Assert.That(clonedWithoutPersist.ConnectionString, Does.Not.Contain("Password=")); // Then attempt to re-persist, should not work - using var clonedConn = clonedWithoutPersist.CloneWith(connStringWithPersist); + using var clonedConn = async + ? await clonedWithoutPersist.CloneWithAsync(connStringWithPersist) + : clonedWithoutPersist.CloneWith(connStringWithPersist); clonedConn.Open(); Assert.That(clonedConn.ConnectionString, Does.Not.Contain("Password=")); } [Test] - public async Task CloneWith_and_data_source_with_password() + public async Task CloneWith_and_data_source_with_password([Values] bool async) { var dataSourceBuilder = new NpgsqlDataSourceBuilder(ConnectionString); // Set the password via the data source property later to make sure that's picked up by CloneWith @@ -984,12 +988,14 @@ public async Task CloneWith_and_data_source_with_password() // Test that the up-to-date password gets copied to the clone, as if we opened the original connection instead of cloning it using var _ = CreateTempPool(new NpgsqlConnectionStringBuilder(ConnectionString) { Password = null }, out var tempConnectionString); - await using var clonedConnection = connection.CloneWith(tempConnectionString); + await using var clonedConnection = async + ? await connection.CloneWithAsync(tempConnectionString) + : connection.CloneWith(tempConnectionString); await clonedConnection.OpenAsync(); } [Test] - public async Task CloneWith_and_data_source_with_auth_callbacks() + public async Task CloneWith_and_data_source_with_auth_callbacks([Values] bool async) { var (userCertificateValidationCallbackCalled, clientCertificatesCallbackCalled) = (false, false); @@ -1003,7 +1009,9 @@ public async Task CloneWith_and_data_source_with_auth_callbacks() await using var connection = dataSource.CreateConnection(); using var _ = CreateTempPool(ConnectionString, out var tempConnectionString); - await using var clonedConnection = connection.CloneWith(tempConnectionString); + await using var clonedConnection = async + ? await connection.CloneWithAsync(tempConnectionString) + : connection.CloneWith(tempConnectionString); var sslClientAuthenticationOptions = new SslClientAuthenticationOptions(); clonedConnection.SslClientAuthenticationOptionsCallback!(sslClientAuthenticationOptions); From 39e38fd1d5ef354b27cdd70c632877660b24ac87 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Mon, 7 Oct 2024 21:55:22 +0300 Subject: [PATCH 457/761] Fix explicit prepare/unprepare after error 0A000 (#5869) Fixes #5864 --- src/Npgsql/NpgsqlBatchCommand.cs | 6 +----- src/Npgsql/PreparedStatement.cs | 3 ++- src/Npgsql/PreparedStatementManager.cs | 7 +++++-- test/Npgsql.Tests/PrepareTests.cs | 24 +++++++++++++++++++++--- 4 files changed, 29 insertions(+), 11 deletions(-) diff --git a/src/Npgsql/NpgsqlBatchCommand.cs b/src/Npgsql/NpgsqlBatchCommand.cs index 8175afa614..911554d27d 100644 --- a/src/Npgsql/NpgsqlBatchCommand.cs +++ b/src/Npgsql/NpgsqlBatchCommand.cs @@ -288,11 +288,7 @@ internal void ApplyCommandComplete(CommandCompleteMessage msg) OID = msg.OID; } - internal void ResetPreparation() - { - PreparedStatement = null; - ConnectorPreparedOn = null; - } + internal void ResetPreparation() => ConnectorPreparedOn = null; /// /// Returns the . diff --git a/src/Npgsql/PreparedStatement.cs b/src/Npgsql/PreparedStatement.cs index f24905eb41..cda613d1cc 100644 --- a/src/Npgsql/PreparedStatement.cs +++ b/src/Npgsql/PreparedStatement.cs @@ -26,7 +26,8 @@ sealed class PreparedStatement internal PreparedState State { get; set; } - internal bool IsPrepared => State == PreparedState.Prepared; + // Invalidated statement is still prepared and allocated on PG's side + internal bool IsPrepared => State is PreparedState.Prepared or PreparedState.Invalidated; /// /// If true, the user explicitly requested this statement be prepared. It does not get closed as part of diff --git a/src/Npgsql/PreparedStatementManager.cs b/src/Npgsql/PreparedStatementManager.cs index e798ec28c2..8f80223753 100644 --- a/src/Npgsql/PreparedStatementManager.cs +++ b/src/Npgsql/PreparedStatementManager.cs @@ -61,7 +61,8 @@ internal PreparedStatementManager(NpgsqlConnector connector) if (BySql.TryGetValue(sql, out var pStatement)) { Debug.Assert(pStatement.State != PreparedState.Unprepared); - if (pStatement.IsExplicit) + // If statement is invalidated, fall through below where we replace it with another + if (pStatement.IsExplicit && pStatement.State != PreparedState.Invalidated) { // Great, we've found an explicit prepared statement. // We just need to check that the parameter types correspond, since prepared statements are @@ -78,8 +79,10 @@ internal PreparedStatementManager(NpgsqlConnector connector) // Found a candidate for autopreparation. Remove it and prepare explicitly. RemoveCandidate(pStatement); break; + // The statement is invalidated. Just replace it with a new one. + case PreparedState.Invalidated: + // The statement has already been autoprepared. We need to "promote" it to explicit. case PreparedState.Prepared: - // The statement has already been autoprepared. We need to "promote" it to explicit. statementBeingReplaced = pStatement; break; case PreparedState.Unprepared: diff --git a/test/Npgsql.Tests/PrepareTests.cs b/test/Npgsql.Tests/PrepareTests.cs index 3c91b7ad47..8a1c763e9a 100644 --- a/test/Npgsql.Tests/PrepareTests.cs +++ b/test/Npgsql.Tests/PrepareTests.cs @@ -737,7 +737,7 @@ public void Multiplexing_not_supported() } [Test] - public async Task Explicitly_prepared_statement_invalidation() + public async Task Explicitly_prepared_statement_invalidation([Values] bool prepareAfterError, [Values] bool unprepareAfterError) { await using var dataSource = CreateDataSource(csb => { @@ -755,12 +755,30 @@ public async Task Explicitly_prepared_statement_invalidation() // Since we've changed the table schema, the next execution of the prepared statement will error with 0A000 var exception = Assert.ThrowsAsync(() => command.ExecuteNonQueryAsync())!; Assert.That(exception.SqlState, Is.EqualTo(PostgresErrorCodes.FeatureNotSupported)); // cached plan must not change result type + Assert.IsFalse(command.IsPrepared); + + if (unprepareAfterError) + { + // Just check that calling unprepare after error doesn't break anything + await command.UnprepareAsync(); + Assert.IsFalse(command.IsPrepared); + } + + if (prepareAfterError) + { + // If we explicitly prepare after error, we should replace the previous prepared statement with a new one + await command.PrepareAsync(); + Assert.IsTrue(command.IsPrepared); + } // However, Npgsql should invalidate the prepared statement in this case, so the next execution should work Assert.DoesNotThrowAsync(() => command.ExecuteNonQueryAsync()); - // The command is unprepared, though. It's the user's responsibility to re-prepare if they wish. - Assert.False(command.IsPrepared); + if (!prepareAfterError) + { + // The command is unprepared, though. It's the user's responsibility to re-prepare if they wish. + Assert.False(command.IsPrepared); + } } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/4920")] From 7062c5cb3296f3b984abdd92e7c08f5b3ed98ac7 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Mon, 7 Oct 2024 21:58:16 +0300 Subject: [PATCH 458/761] Fix writing non-normalized Nodatime's periods (#5868) Fixes #5867 --- .../Internal/PeriodConverter.cs | 2 ++ test/Npgsql.PluginTests/NodaTimeTests.cs | 25 +++++++++++++++++++ 2 files changed, 27 insertions(+) diff --git a/src/Npgsql.NodaTime/Internal/PeriodConverter.cs b/src/Npgsql.NodaTime/Internal/PeriodConverter.cs index 4dbde48dbc..70c0228652 100644 --- a/src/Npgsql.NodaTime/Internal/PeriodConverter.cs +++ b/src/Npgsql.NodaTime/Internal/PeriodConverter.cs @@ -33,6 +33,8 @@ protected override Period ReadCore(PgReader reader) protected override void WriteCore(PgWriter writer, Period value) { + // We have to normalize the value as otherwise we might get a value with 0 everything except for ticks, which we ignore + value = value.Normalize(); // Note that the end result must be long // see #3438 var microsecondsInDay = diff --git a/test/Npgsql.PluginTests/NodaTimeTests.cs b/test/Npgsql.PluginTests/NodaTimeTests.cs index a6af632723..8edccc1ce1 100644 --- a/test/Npgsql.PluginTests/NodaTimeTests.cs +++ b/test/Npgsql.PluginTests/NodaTimeTests.cs @@ -770,6 +770,31 @@ public async Task Bug3438() } } + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/5867")] + public async Task Normalize_period_on_write() + { + var value = Period.FromTicks(-3675048768766); + var expected = value.Normalize(); + var expectedAfterRoundtripBuilder = expected.ToBuilder(); + // Postgres doesn't support nanoseconds, trim them to microseconds + expectedAfterRoundtripBuilder.Nanoseconds -= expected.Nanoseconds % 1000; + var expectedAfterRoundtrip = expectedAfterRoundtripBuilder.Build(); + + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT $1, $2", conn); + cmd.Parameters.AddWithValue(value); + cmd.Parameters.AddWithValue(expected); + + await using var reader = await cmd.ExecuteReaderAsync(); + await reader.ReadAsync(); + + var dbValue = reader.GetFieldValue(0); + var dbExpected = reader.GetFieldValue(1); + + Assert.That(dbValue, Is.EqualTo(dbExpected)); + Assert.That(dbValue, Is.EqualTo(expectedAfterRoundtrip)); + } + #endregion Interval #region Support From 602ef9aa96eeb4699464565da0fb82ed03ef354a Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Tue, 8 Oct 2024 15:03:14 +0300 Subject: [PATCH 459/761] Fix explicit preparation replacing automatically prepared one (#5874) Fixes #5873 --- src/Npgsql/NpgsqlCommand.cs | 9 +++++++-- test/Npgsql.Tests/AutoPrepareTests.cs | 4 ++++ 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/src/Npgsql/NpgsqlCommand.cs b/src/Npgsql/NpgsqlCommand.cs index f3900cfb50..9b4c0e2959 100644 --- a/src/Npgsql/NpgsqlCommand.cs +++ b/src/Npgsql/NpgsqlCommand.cs @@ -731,11 +731,16 @@ static async Task PrepareLong(NpgsqlCommand command, bool async, NpgsqlConnector continue; var pStatement = batchCommand.PreparedStatement!; + var replacedStatement = pStatement.StatementBeingReplaced; - if (pStatement.StatementBeingReplaced != null) + if (replacedStatement != null) { Expect(await connector.ReadMessage(async).ConfigureAwait(false), connector); - pStatement.StatementBeingReplaced.CompleteUnprepare(); + replacedStatement.CompleteUnprepare(); + + if (!replacedStatement.IsExplicit) + connector.PreparedStatementManager.AutoPrepared[replacedStatement.AutoPreparedSlotIndex] = null; + pStatement.StatementBeingReplaced = null; } diff --git a/test/Npgsql.Tests/AutoPrepareTests.cs b/test/Npgsql.Tests/AutoPrepareTests.cs index 14d6997230..00d9455147 100644 --- a/test/Npgsql.Tests/AutoPrepareTests.cs +++ b/test/Npgsql.Tests/AutoPrepareTests.cs @@ -168,6 +168,10 @@ public void Promote_auto_to_explicit() // cmd1's statement is no longer valid (has been closed), make sure it still works (will run unprepared) cmd2.ExecuteScalar(); + + // Trigger autoprepare on a different query to confirm we didn't leave replaced statement in a bad state + using var cmd3 = new NpgsqlCommand("SELECT 2", conn); + cmd3.ExecuteNonQuery(); cmd3.ExecuteNonQuery(); } [Test] From 9a0f32c623fe683fa4613d3277ce29e38d439ccf Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Sat, 12 Oct 2024 08:48:16 +0200 Subject: [PATCH 460/761] Bump System.Text.Json to 8.0.5 (CVE) (#5875) See https://github.com/advisories/GHSA-8g4q-xg66-9fp4 --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 4799d90a90..830632c58b 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -10,7 +10,7 @@ - + From e433ce1d3af9307c3e1b287b40b7c0837bd3d40a Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Sat, 12 Oct 2024 20:17:55 +0300 Subject: [PATCH 461/761] Fix NRE with default NpgsqlPolygon and NpgsqlPath (#5860) Fixes #5854 --- src/Npgsql/NpgsqlTypes/NpgsqlTypes.cs | 73 ++++++++++++++------------- test/Npgsql.Tests/TypesTests.cs | 14 +++++ 2 files changed, 53 insertions(+), 34 deletions(-) diff --git a/src/Npgsql/NpgsqlTypes/NpgsqlTypes.cs b/src/Npgsql/NpgsqlTypes/NpgsqlTypes.cs index 753f0f0919..c1555603c2 100644 --- a/src/Npgsql/NpgsqlTypes/NpgsqlTypes.cs +++ b/src/Npgsql/NpgsqlTypes/NpgsqlTypes.cs @@ -200,7 +200,10 @@ void NormalizeBox() /// public struct NpgsqlPath : IList, IEquatable { - readonly List _points; + List _points; + + List Points => _points ??= new(); + public bool Open { get; set; } public NpgsqlPath() @@ -231,23 +234,23 @@ public NpgsqlPath(int capacity) : this(capacity, false) {} public NpgsqlPoint this[int index] { - get => _points[index]; - set => _points[index] = value; + get => Points[index]; + set => Points[index] = value; } - public int Capacity => _points.Capacity; - public int Count => _points.Count; + public int Capacity => Points.Capacity; + public int Count => _points?.Count ?? 0; public bool IsReadOnly => false; - public int IndexOf(NpgsqlPoint item) => _points.IndexOf(item); - public void Insert(int index, NpgsqlPoint item) => _points.Insert(index, item); - public void RemoveAt(int index) => _points.RemoveAt(index); - public void Add(NpgsqlPoint item) => _points.Add(item); - public void Clear() => _points.Clear(); - public bool Contains(NpgsqlPoint item) => _points.Contains(item); - public void CopyTo(NpgsqlPoint[] array, int arrayIndex) => _points.CopyTo(array, arrayIndex); - public bool Remove(NpgsqlPoint item) => _points.Remove(item); - public IEnumerator GetEnumerator() => _points.GetEnumerator(); + public int IndexOf(NpgsqlPoint item) => Points.IndexOf(item); + public void Insert(int index, NpgsqlPoint item) => Points.Insert(index, item); + public void RemoveAt(int index) => Points.RemoveAt(index); + public void Add(NpgsqlPoint item) => Points.Add(item); + public void Clear() => Points.Clear(); + public bool Contains(NpgsqlPoint item) => Points.Contains(item); + public void CopyTo(NpgsqlPoint[] array, int arrayIndex) => Points.CopyTo(array, arrayIndex); + public bool Remove(NpgsqlPoint item) => Points.Remove(item); + public IEnumerator GetEnumerator() => Points.GetEnumerator(); IEnumerator IEnumerable.GetEnumerator() => GetEnumerator(); public bool Equals(NpgsqlPath other) @@ -287,12 +290,12 @@ public override string ToString() var sb = new StringBuilder(); sb.Append(Open ? '[' : '('); int i; - for (i = 0; i < _points.Count; i++) + for (i = 0; i < Count; i++) { var p = _points[i]; sb.AppendFormat(CultureInfo.InvariantCulture, "({0},{1})", p.X, p.Y); if (i < _points.Count - 1) - sb.Append(","); + sb.Append(','); } sb.Append(Open ? ']' : ')'); return sb.ToString(); @@ -302,9 +305,11 @@ public override string ToString() /// /// Represents a PostgreSQL Polygon type. /// -public readonly struct NpgsqlPolygon : IList, IEquatable +public struct NpgsqlPolygon : IList, IEquatable { - readonly List _points; + List _points; + + List Points => _points ??= new(); public NpgsqlPolygon() => _points = new(); @@ -319,23 +324,23 @@ public NpgsqlPolygon(int capacity) public NpgsqlPoint this[int index] { - get => _points[index]; - set => _points[index] = value; + get => Points[index]; + set => Points[index] = value; } - public int Capacity => _points.Capacity; - public int Count => _points.Count; + public int Capacity => Points.Capacity; + public int Count => _points?.Count ?? 0; public bool IsReadOnly => false; - public int IndexOf(NpgsqlPoint item) => _points.IndexOf(item); - public void Insert(int index, NpgsqlPoint item) => _points.Insert(index, item); - public void RemoveAt(int index) => _points.RemoveAt(index); - public void Add(NpgsqlPoint item) => _points.Add(item); - public void Clear() => _points.Clear(); - public bool Contains(NpgsqlPoint item) => _points.Contains(item); - public void CopyTo(NpgsqlPoint[] array, int arrayIndex) => _points.CopyTo(array, arrayIndex); - public bool Remove(NpgsqlPoint item) => _points.Remove(item); - public IEnumerator GetEnumerator() => _points.GetEnumerator(); + public int IndexOf(NpgsqlPoint item) => Points.IndexOf(item); + public void Insert(int index, NpgsqlPoint item) => Points.Insert(index, item); + public void RemoveAt(int index) => Points.RemoveAt(index); + public void Add(NpgsqlPoint item) => Points.Add(item); + public void Clear() => Points.Clear(); + public bool Contains(NpgsqlPoint item) => Points.Contains(item); + public void CopyTo(NpgsqlPoint[] array, int arrayIndex) => Points.CopyTo(array, arrayIndex); + public bool Remove(NpgsqlPoint item) => Points.Remove(item); + public IEnumerator GetEnumerator() => Points.GetEnumerator(); IEnumerator IEnumerable.GetEnumerator() => GetEnumerator(); public bool Equals(NpgsqlPolygon other) @@ -374,7 +379,7 @@ public override string ToString() var sb = new StringBuilder(); sb.Append('('); int i; - for (i = 0; i < _points.Count; i++) + for (i = 0; i < Count; i++) { var p = _points[i]; sb.AppendFormat(CultureInfo.InvariantCulture, "({0},{1})", p.X, p.Y); @@ -478,8 +483,8 @@ public NpgsqlInet(string addr) } public override string ToString() - => (Address.AddressFamily == AddressFamily.InterNetwork && Netmask == 32) || - (Address.AddressFamily == AddressFamily.InterNetworkV6 && Netmask == 128) + => (Address?.AddressFamily == AddressFamily.InterNetwork && Netmask == 32) || + (Address?.AddressFamily == AddressFamily.InterNetworkV6 && Netmask == 128) ? Address.ToString() : $"{Address}/{Netmask}"; diff --git a/test/Npgsql.Tests/TypesTests.cs b/test/Npgsql.Tests/TypesTests.cs index 0b80062de6..047093df60 100644 --- a/test/Npgsql.Tests/TypesTests.cs +++ b/test/Npgsql.Tests/TypesTests.cs @@ -193,6 +193,20 @@ public void NpgsqlPath_empty() public void NpgsqlPolygon_empty() => Assert.That(new NpgsqlPolygon { new(1, 2) }, Is.EqualTo(new NpgsqlPolygon(new NpgsqlPoint(1, 2)))); + [Test] + public void NpgsqlPath_default() + { + NpgsqlPath defaultPath = default; + Assert.IsFalse(defaultPath.Equals(new NpgsqlPath { new(1, 2) })); + } + + [Test] + public void NpgsqlPolygon_default() + { + NpgsqlPolygon defaultPolygon = default; + Assert.IsFalse(defaultPolygon.Equals(new NpgsqlPolygon { new(1, 2) })); + } + [Test] public void Bug1011018() { From 53b9fa5008aaf8d9a60d3dc8f46435a6ab03977a Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Sun, 13 Oct 2024 07:11:29 +0300 Subject: [PATCH 462/761] Fix writing composites with enum (#5837) Fixes #5835 Co-authored-by: Nino Floris --- .../Internal/Converters/CompositeConverter.cs | 9 +++-- src/Npgsql/Internal/Size.cs | 35 ++++++++++++++--- test/Npgsql.Tests/Types/CompositeTests.cs | 38 +++++++++++++++++++ 3 files changed, 74 insertions(+), 8 deletions(-) diff --git a/src/Npgsql/Internal/Converters/CompositeConverter.cs b/src/Npgsql/Internal/Converters/CompositeConverter.cs index 24f3d36329..2c985b647c 100644 --- a/src/Npgsql/Internal/Converters/CompositeConverter.cs +++ b/src/Npgsql/Internal/Converters/CompositeConverter.cs @@ -21,14 +21,17 @@ public CompositeConverter(CompositeInfo composite) var readReq = field.BinaryReadRequirement; var writeReq = field.BinaryWriteRequirement; - // If so we cannot depend on its buffer size being fixed. + // If field is nullable we cannot depend on its buffer size being fixed. if (field.IsDbNullable) { readReq = readReq.Combine(Size.CreateUpperBound(0)); writeReq = writeReq.Combine(Size.CreateUpperBound(0)); } - req = req.Combine(readReq, writeReq); + var readSuccess = req.Read.TryCombine(readReq, out readReq); + var writeSuccess = req.Write.TryCombine(writeReq, out writeReq); + // If we fail to combine due to overflow return unknown. + req = BufferRequirements.Create(readSuccess ? readReq : Size.Unknown, writeSuccess ? writeReq : Size.Unknown); } // We have to put a limit on the requirements we report otherwise smaller buffer sizes won't work. @@ -37,7 +40,7 @@ public CompositeConverter(CompositeInfo composite) _bufferRequirements = req; // Return unknown if we hit the limit. - Size Limit(Size requirement) + static Size Limit(Size requirement) { const int maxByteCount = 1024; return requirement.GetValueOrDefault() > maxByteCount ? requirement.Combine(Size.Unknown) : requirement; diff --git a/src/Npgsql/Internal/Size.cs b/src/Npgsql/Internal/Size.cs index 79fe38b5c4..299f2bb229 100644 --- a/src/Npgsql/Internal/Size.cs +++ b/src/Npgsql/Internal/Size.cs @@ -44,15 +44,40 @@ public int Value public static Size Unknown { get; } = new(SizeKind.Unknown, 0); public static Size Zero { get; } = new(SizeKind.Exact, 0); - public Size Combine(Size result) + public bool TryCombine(Size other, out Size result) { - if (_kind is SizeKind.Unknown || result._kind is SizeKind.Unknown) + if (_kind is SizeKind.Unknown || other._kind is SizeKind.Unknown) + { + result = Unknown; + return true; + } + + var sum = unchecked(_value + other._value); + if ((_value >= 0 && sum < other._value) || (_value < 0 && sum > other._value)) + { + result = default; + return false; + } + + if (_kind is SizeKind.UpperBound || other._kind is SizeKind.UpperBound) + { + result = CreateUpperBound(sum); + return true; + } + + result = Create(sum); + return true; + } + + public Size Combine(Size other) + { + if (_kind is SizeKind.Unknown || other._kind is SizeKind.Unknown) return Unknown; - if (_kind is SizeKind.UpperBound || result._kind is SizeKind.UpperBound) - return CreateUpperBound(checked(_value + result._value)); + if (_kind is SizeKind.UpperBound || other._kind is SizeKind.UpperBound) + return CreateUpperBound(checked(_value + other._value)); - return Create(checked(_value + result._value)); + return Create(checked(_value + other._value)); } public static implicit operator Size(int value) => Create(value); diff --git a/test/Npgsql.Tests/Types/CompositeTests.cs b/test/Npgsql.Tests/Types/CompositeTests.cs index 713d5220a3..a1553b99a5 100644 --- a/test/Npgsql.Tests/Types/CompositeTests.cs +++ b/test/Npgsql.Tests/Types/CompositeTests.cs @@ -312,6 +312,32 @@ await AssertType( comparer: (actual, expected) => actual.Ints!.SequenceEqual(expected.Ints!)); } + [Test] + public async Task Composite_containing_enum_type() + { + await using var adminConnection = await OpenConnectionAsync(); + var enumType = await GetTempTypeName(adminConnection); + var compositeType = await GetTempTypeName(adminConnection); + + await adminConnection.ExecuteNonQueryAsync($@" +CREATE TYPE {enumType} AS enum ('value1', 'value2', 'value3'); +CREATE TYPE {compositeType} AS (enum_value {enumType});"); + + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.MapComposite(compositeType); + dataSourceBuilder.MapEnum(enumType); + await using var dataSource = dataSourceBuilder.Build(); + await using var connection = await dataSource.OpenConnectionAsync(); + + await AssertType( + connection, + new SomeCompositeWithEnum { EnumValue = SomeCompositeWithEnum.TestEnum.Value2 }, + @"(value2)", + compositeType, + npgsqlDbType: null, + comparer: (actual, expected) => actual.EnumValue == expected.EnumValue); + } + [Test] public async Task Composite_containing_converter_resolver_type() { @@ -688,6 +714,18 @@ class SomeCompositeWithArray public int[]? Ints { get; set; } } + class SomeCompositeWithEnum + { + public enum TestEnum + { + Value1, + Value2, + Value3 + } + + public TestEnum EnumValue { get; set; } + } + class SomeCompositeWithConverterResolverType { public DateTime[]? DateTimes { get; set; } From 764bfea6cfe807222625a2b9bc9688207f296e74 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Sun, 13 Oct 2024 07:31:04 +0300 Subject: [PATCH 463/761] Fix protocol desync while reading output parameters from a function (#5840) Fixes #5820 --- src/Npgsql/NpgsqlDataReader.cs | 16 +++++++++++++++- test/Npgsql.Tests/FunctionTests.cs | 26 ++++++++++++++++++++++++++ 2 files changed, 41 insertions(+), 1 deletion(-) diff --git a/src/Npgsql/NpgsqlDataReader.cs b/src/Npgsql/NpgsqlDataReader.cs index f85aa3d4a5..3df6affdeb 100644 --- a/src/Npgsql/NpgsqlDataReader.cs +++ b/src/Npgsql/NpgsqlDataReader.cs @@ -467,7 +467,18 @@ async Task NextResult(bool async, bool isConsuming = false, CancellationTo msg = await Connector.ReadMessage(async).ConfigureAwait(false); ProcessMessage(msg); if (msg.Code == BackendMessageCode.DataRow) - PopulateOutputParameters(Command.InternalBatchCommands[StatementIndex]._parameters!); + { + try + { + PopulateOutputParameters(Command.InternalBatchCommands[StatementIndex]._parameters!); + } + catch (Exception e) + { + // TODO: ideally we should flow down to global exception filter and consume there + await Consume(async, firstException: e).ConfigureAwait(false); + throw; + } + } } else { @@ -510,6 +521,7 @@ async Task NextResult(bool async, bool isConsuming = false, CancellationTo // Prevent the command or batch from being recycled (by the connection) when it's disposed. This is important since // the exception is very likely to escape the using statement of the command, and by that time some other user may // already be using the recycled instance. + // TODO: we probably should do than even if it's not PostgresException (error from PopulateOutputParameters) Command.IsCacheable = false; // If the schema of a table changes after a statement is prepared on that table, PostgreSQL errors with @@ -540,6 +552,8 @@ async Task NextResult(bool async, bool isConsuming = false, CancellationTo // However, if the command has error barrier, we now have to consume results from the commands after it (unless it's the // last one). // Note that Consume calls NextResult (this method) recursively, the isConsuming flag tells us we're in this mode. + // TODO: We might as well call Consume on every command (even the last one) to make sure we do read every single message until RFQ + // in case we get an exception in the middle of NextResult if ((statement.AppendErrorBarrier ?? Command.EnableErrorBarriers) && StatementIndex < _statements.Count - 1) { if (isConsuming) diff --git a/test/Npgsql.Tests/FunctionTests.cs b/test/Npgsql.Tests/FunctionTests.cs index ea85185879..9323dd2349 100644 --- a/test/Npgsql.Tests/FunctionTests.cs +++ b/test/Npgsql.Tests/FunctionTests.cs @@ -177,6 +177,32 @@ public async Task CommandBehavior_SchemaOnly_support_function_call() Assert.AreEqual(0, i); } + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/5820")] + public async Task Output_param_cast_error() + { + await using var conn = await OpenConnectionAsync(); + var function = await GetTempFunctionName(conn); + await conn.ExecuteNonQueryAsync(@$" +CREATE FUNCTION {function} (INOUT param_in int4, OUT param_out interval) AS $$ +BEGIN + param_out = interval '5 years'; +END +$$ LANGUAGE plpgsql"); + await using var cmd = new NpgsqlCommand(function, conn); + cmd.CommandType = CommandType.StoredProcedure; + cmd.Parameters.Add(new NpgsqlParameter("param_in", DbType.Int32) + { + Direction = ParameterDirection.InputOutput, + Value = 1 + }); + cmd.Parameters.Add(new NpgsqlParameter("param_out", NpgsqlDbType.Interval) + { + Direction = ParameterDirection.Output + }); + Assert.ThrowsAsync(cmd.ExecuteNonQueryAsync); + Assert.DoesNotThrowAsync(async () => await conn.ExecuteNonQueryAsync("SELECT 1")); + } + #region DeriveParameters [Test, Description("Tests function parameter derivation with IN, OUT and INOUT parameters")] From cd5c96423871870c597624e0c43b7fb17b14b5db Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Sun, 13 Oct 2024 20:02:05 +0200 Subject: [PATCH 464/761] Switch to dotnet SDK 9.0.0-rc.2 (#5879) --- .github/workflows/build.yml | 2 +- .github/workflows/codeql-analysis.yml | 2 +- Directory.Packages.props | 21 +++++++------------ global.json | 4 ++-- .../Npgsql.Benchmarks.csproj | 3 +++ test/Npgsql.Tests/BatchTests.cs | 1 - test/Npgsql.Tests/CopyTests.cs | 2 +- test/Npgsql.Tests/LargeObjectTests.cs | 2 +- 8 files changed, 17 insertions(+), 20 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index c60c4eb0d2..bb9e020a23 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -15,7 +15,7 @@ concurrency: cancel-in-progress: true env: - dotnet_sdk_version: '8.0.203' + dotnet_sdk_version: '9.0.100-rc.2.24474.11' postgis_version: 3 DOTNET_SKIP_FIRST_TIME_EXPERIENCE: true # Windows comes with PG pre-installed, and defines the PGPASSWORD environment variable. Remove it as it interferes diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index fe38f213f6..f1ed40c3c7 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -32,7 +32,7 @@ concurrency: cancel-in-progress: true env: - dotnet_sdk_version: '8.0.203' + dotnet_sdk_version: '9.0.100-rc.2.24474.11' DOTNET_SKIP_FIRST_TIME_EXPERIENCE: true jobs: diff --git a/Directory.Packages.props b/Directory.Packages.props index 830632c58b..d9319bc357 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -1,24 +1,19 @@ - - 8.0.0 - $(SystemVersion) - - - - + + - + - + @@ -30,8 +25,8 @@ - - + + @@ -39,11 +34,11 @@ - + - + diff --git a/global.json b/global.json index c4fc1c4611..bc9c875848 100644 --- a/global.json +++ b/global.json @@ -1,7 +1,7 @@ { "sdk": { - "version": "8.0.100", + "version": "9.0.100-rc.2.24474.11", "rollForward": "latestMajor", - "allowPrerelease": "false" + "allowPrerelease": "true" } } diff --git a/test/Npgsql.Benchmarks/Npgsql.Benchmarks.csproj b/test/Npgsql.Benchmarks/Npgsql.Benchmarks.csproj index 922d4cbdce..013bfb8a9d 100644 --- a/test/Npgsql.Benchmarks/Npgsql.Benchmarks.csproj +++ b/test/Npgsql.Benchmarks/Npgsql.Benchmarks.csproj @@ -5,6 +5,9 @@ Npgsql.Benchmarks Exe $(NoWarn);NPG9001 + + + NU1901;NU1902;NU1903;NU1904 diff --git a/test/Npgsql.Tests/BatchTests.cs b/test/Npgsql.Tests/BatchTests.cs index 208fc2642d..5fddf0a316 100644 --- a/test/Npgsql.Tests/BatchTests.cs +++ b/test/Npgsql.Tests/BatchTests.cs @@ -723,7 +723,6 @@ await conn.ExecuteNonQueryAsync($@" await using (var reader = await batch.ExecuteReaderAsync(Behavior)) { - var e = Assert.ThrowsAsync(async () => await reader.NextResultAsync())!; Assert.That(e.BatchCommand, Is.SameAs(batch.BatchCommands[1])); } diff --git a/test/Npgsql.Tests/CopyTests.cs b/test/Npgsql.Tests/CopyTests.cs index d6be58290e..5daa935d2f 100644 --- a/test/Npgsql.Tests/CopyTests.cs +++ b/test/Npgsql.Tests/CopyTests.cs @@ -977,7 +977,7 @@ public async Task Cancel_raw_binary_export_when_not_consumed_and_then_Dispose() // This must be large enough to cause Postgres to queue up CopyData messages. var stream = conn.BeginRawBinaryCopy("COPY (select md5(random()::text) as id from generate_series(1, 100000)) TO STDOUT BINARY"); var buffer = new byte[32]; - await stream.ReadAsync(buffer, 0, buffer.Length); + await stream.ReadExactlyAsync(buffer, 0, buffer.Length); stream.Cancel(); Assert.DoesNotThrowAsync(async () => await stream.DisposeAsync()); } diff --git a/test/Npgsql.Tests/LargeObjectTests.cs b/test/Npgsql.Tests/LargeObjectTests.cs index fb7179abb4..3d11dfd7b1 100644 --- a/test/Npgsql.Tests/LargeObjectTests.cs +++ b/test/Npgsql.Tests/LargeObjectTests.cs @@ -21,7 +21,7 @@ public void Test() stream.Write(buf, 0, buf.Length); stream.Seek(0, System.IO.SeekOrigin.Begin); var buf2 = new byte[buf.Length]; - stream.Read(buf2, 0, buf2.Length); + stream.ReadExactly(buf2, 0, buf2.Length); Assert.That(buf.SequenceEqual(buf2)); Assert.AreEqual(5, stream.Position); From ef09d55c02033eb8381948ca6fe47b42859a34d8 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Sun, 13 Oct 2024 22:13:36 +0200 Subject: [PATCH 465/761] Configure CI for released PG17 (#5880) --- .github/workflows/build.yml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index bb9e020a23..636b03a012 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -30,12 +30,12 @@ jobs: fail-fast: false matrix: os: [ubuntu-22.04] - pg_major: [16, 15, 14, 13, 12] + pg_major: [17, 16, 15, 14, 13] config: [Release] test_tfm: [net8.0] include: - os: ubuntu-22.04 - pg_major: 16 + pg_major: 17 config: Debug test_tfm: net8.0 - os: macos-14 @@ -43,14 +43,14 @@ jobs: config: Release test_tfm: net8.0 - os: windows-2022 - pg_major: 16 - config: Release - test_tfm: net8.0 - - os: ubuntu-22.04 pg_major: 17 config: Release test_tfm: net8.0 - pg_prerelease: 'PG Prerelease' +# - os: ubuntu-22.04 +# pg_major: 17 +# config: Release +# test_tfm: net8.0 +# pg_prerelease: 'PG Prerelease' outputs: is_release: ${{ steps.analyze_tag.outputs.is_release }} From 588b018b7560572bce055ca15095efa28dae1b56 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Mon, 14 Oct 2024 18:19:14 +0300 Subject: [PATCH 466/761] Add support for direct ssl with pg17 (#5881) Closes #5677 --- src/Npgsql/Internal/NpgsqlConnector.cs | 20 +++++++++-- src/Npgsql/NpgsqlConnectionStringBuilder.cs | 39 +++++++++++++++++++- src/Npgsql/PublicAPI.Unshipped.txt | 5 +++ test/Npgsql.Tests/SecurityTests.cs | 40 +++++++++++++++++++++ 4 files changed, 100 insertions(+), 4 deletions(-) diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index ac5558f476..e6427ab3d4 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -20,7 +20,6 @@ using Npgsql.BackendMessages; using Npgsql.Util; using static Npgsql.Util.Statics; -using System.Transactions; using Microsoft.Extensions.Logging; using Npgsql.Properties; @@ -282,6 +281,8 @@ internal bool PostgresCancellationPerformed internal bool AttemptPostgresCancellation { get; private set; } static readonly TimeSpan _cancelImmediatelyTimeout = TimeSpan.FromMilliseconds(-1); + static readonly SslApplicationProtocol _alpnProtocol = new("postgresql"); + #pragma warning disable CA1859 // We're casting to IDisposable to not explicitly reference X509Certificate2 for NativeAOT // TODO: probably pointless now, needs to be rechecked @@ -782,7 +783,16 @@ async Task RawOpen(SslMode sslMode, NpgsqlTimeout timeout, bool async, Cancellat IsSecure = false; - if ((sslMode is SslMode.Prefer && DataSource.TransportSecurityHandler.SupportEncryption) || + if (Settings.SslNegotiation == SslNegotiation.Direct) + { + // We already check that in NpgsqlConnectionStringBuilder.PostProcessAndValidate, but just on the off case + if (Settings.SslMode is not SslMode.Require and not SslMode.VerifyCA and not SslMode.VerifyFull) + throw new ArgumentException("SSL Mode has to be Require or higher to be used with direct SSL Negotiation"); + await DataSource.TransportSecurityHandler.NegotiateEncryption(async, this, sslMode, timeout, cancellationToken).ConfigureAwait(false); + if (ReadBuffer.ReadBytesLeft > 0) + throw new NpgsqlException("Additional unencrypted data received after SSL negotiation - this should never happen, and may be an indication of a man-in-the-middle attack."); + } + else if ((sslMode is SslMode.Prefer && DataSource.TransportSecurityHandler.SupportEncryption) || sslMode is SslMode.Require or SslMode.VerifyCA or SslMode.VerifyFull) { WriteSslRequest(); @@ -909,7 +919,11 @@ internal async Task NegotiateEncryption(SslMode sslMode, NpgsqlTimeout timeout, ClientCertificates = clientCertificates, EnabledSslProtocols = SslProtocols.None, CertificateRevocationCheckMode = checkCertificateRevocation ? X509RevocationMode.Online : X509RevocationMode.Offline, - RemoteCertificateValidationCallback = certificateValidationCallback + RemoteCertificateValidationCallback = certificateValidationCallback, + ApplicationProtocols = new List + { + _alpnProtocol + } }; if (SslClientAuthenticationOptionsCallback is not null) diff --git a/src/Npgsql/NpgsqlConnectionStringBuilder.cs b/src/Npgsql/NpgsqlConnectionStringBuilder.cs index 361232d198..4260343d82 100644 --- a/src/Npgsql/NpgsqlConnectionStringBuilder.cs +++ b/src/Npgsql/NpgsqlConnectionStringBuilder.cs @@ -461,6 +461,26 @@ public SslMode SslMode } SslMode _sslMode; + /// + /// Controls how SSL encryption is negotiated with the server, if SSL is used. + /// + [Category("Security")] + [Description("Controls how SSL encryption is negotiated with the server, if SSL is used.")] + [DisplayName("SSL Negotiation")] + [DefaultValue(SslNegotiation.Postgres)] + [NpgsqlConnectionStringProperty] + public SslNegotiation SslNegotiation + { + get => _sslNegotiation; + set + { + _sslNegotiation = value; + SetValue(nameof(SslNegotiation), value); + } + } + + SslNegotiation _sslNegotiation; + /// /// Location of a client certificate to be sent to the server. /// @@ -1394,8 +1414,10 @@ internal void PostProcessAndValidate() throw new ArgumentException("Host can't be null"); if (Multiplexing && !Pooling) throw new ArgumentException("Pooling must be on to use multiplexing"); + if (SslNegotiation == SslNegotiation.Direct && SslMode is not SslMode.Require and not SslMode.VerifyCA and not SslMode.VerifyFull) + throw new ArgumentException("SSL Mode has to be Require or higher to be used with direct SSL Negotiation"); - if (!Host.Contains(",")) + if (!Host.Contains(',')) { if (TargetSessionAttributesParsed is not null && TargetSessionAttributesParsed != Npgsql.TargetSessionAttributes.Any) @@ -1658,6 +1680,21 @@ public enum SslMode VerifyFull } +/// +/// Specifies how to initialize SSL session. +/// +public enum SslNegotiation +{ + /// + /// Perform PostgreSQL protocol negotiation. + /// + Postgres, + /// + /// Start SSL handshake directly after establishing the TCP/IP connection. + /// + Direct +} + /// /// Specifies how to manage channel binding. /// diff --git a/src/Npgsql/PublicAPI.Unshipped.txt b/src/Npgsql/PublicAPI.Unshipped.txt index c94ae72bca..0c4717dac8 100644 --- a/src/Npgsql/PublicAPI.Unshipped.txt +++ b/src/Npgsql/PublicAPI.Unshipped.txt @@ -2,6 +2,8 @@ Npgsql.NpgsqlConnection.CloneWithAsync(string! connectionString, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.ValueTask Npgsql.NpgsqlConnection.SslClientAuthenticationOptionsCallback.get -> System.Action? Npgsql.NpgsqlConnection.SslClientAuthenticationOptionsCallback.set -> void +Npgsql.NpgsqlConnectionStringBuilder.SslNegotiation.get -> Npgsql.SslNegotiation +Npgsql.NpgsqlConnectionStringBuilder.SslNegotiation.set -> void Npgsql.NpgsqlDataSourceBuilder.UseNegotiateOptionsCallback(System.Action? negotiateOptionsCallback) -> Npgsql.NpgsqlDataSourceBuilder! Npgsql.NpgsqlDataSourceBuilder.UseSslClientAuthenticationOptionsCallback(System.Action? sslClientAuthenticationOptionsCallback) -> Npgsql.NpgsqlDataSourceBuilder! Npgsql.NpgsqlMetricsOptions @@ -30,3 +32,6 @@ Npgsql.Replication.PgOutput.PgOutputStreamingMode Npgsql.Replication.PgOutput.PgOutputStreamingMode.Off = 0 -> Npgsql.Replication.PgOutput.PgOutputStreamingMode Npgsql.Replication.PgOutput.PgOutputStreamingMode.On = 1 -> Npgsql.Replication.PgOutput.PgOutputStreamingMode Npgsql.Replication.PgOutput.PgOutputStreamingMode.Parallel = 2 -> Npgsql.Replication.PgOutput.PgOutputStreamingMode +Npgsql.SslNegotiation +Npgsql.SslNegotiation.Direct = 1 -> Npgsql.SslNegotiation +Npgsql.SslNegotiation.Postgres = 0 -> Npgsql.SslNegotiation diff --git a/test/Npgsql.Tests/SecurityTests.cs b/test/Npgsql.Tests/SecurityTests.cs index 7c44cbc2b6..daf30fbfcd 100644 --- a/test/Npgsql.Tests/SecurityTests.cs +++ b/test/Npgsql.Tests/SecurityTests.cs @@ -482,6 +482,46 @@ public async Task Bug4305_not_Secure([Values] bool async) Assert.DoesNotThrow(() => cmd.ExecuteNonQuery()); } + [Test] + public async Task Direct_ssl_negotiation() + { + await using var adminConn = await OpenConnectionAsync(); + MinimumPgVersion(adminConn, "17.0"); + + await using var dataSource = CreateDataSource(csb => + { + csb.SslMode = SslMode.Require; + csb.SslNegotiation = SslNegotiation.Direct; + }); + await using var conn = await dataSource.OpenConnectionAsync(); + Assert.IsTrue(conn.IsSecure); + } + + [Test] + public void Direct_ssl_requires_correct_sslmode([Values] SslMode sslMode) + { + if (sslMode is SslMode.Disable or SslMode.Allow or SslMode.Prefer) + { + var ex = Assert.Throws(() => + { + using var dataSource = CreateDataSource(csb => + { + csb.SslMode = sslMode; + csb.SslNegotiation = SslNegotiation.Direct; + }); + })!; + Assert.That(ex.Message, Is.EqualTo("SSL Mode has to be Require or higher to be used with direct SSL Negotiation")); + } + else + { + using var dataSource = CreateDataSource(csb => + { + csb.SslMode = sslMode; + csb.SslNegotiation = SslNegotiation.Direct; + }); + } + } + #region Setup / Teardown / Utils [OneTimeSetUp] From ab8e8db61617283603ac0350c1902222d710b92a Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Tue, 15 Oct 2024 12:51:01 +0300 Subject: [PATCH 467/761] Add support for environment variable PGSSLNEGOTIATION (#5882) Related to #5677 Followup to #5881 --- src/Npgsql/Internal/NpgsqlConnector.cs | 18 ++++++++++++-- src/Npgsql/NpgsqlConnectionStringBuilder.cs | 7 +++--- src/Npgsql/PostgresEnvironment.cs | 4 ++- test/Npgsql.Tests/SecurityTests.cs | 27 +++++++++++++++++++++ 4 files changed, 49 insertions(+), 7 deletions(-) diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index e6427ab3d4..faa7c0c0d7 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -783,9 +783,9 @@ async Task RawOpen(SslMode sslMode, NpgsqlTimeout timeout, bool async, Cancellat IsSecure = false; - if (Settings.SslNegotiation == SslNegotiation.Direct) + if (GetSslNegotiation(Settings) == SslNegotiation.Direct) { - // We already check that in NpgsqlConnectionStringBuilder.PostProcessAndValidate, but just on the off case + // We already check that in NpgsqlConnectionStringBuilder.PostProcessAndValidate, but since we also allow environment variables... if (Settings.SslMode is not SslMode.Require and not SslMode.VerifyCA and not SslMode.VerifyFull) throw new ArgumentException("SSL Mode has to be Require or higher to be used with direct SSL Negotiation"); await DataSource.TransportSecurityHandler.NegotiateEncryption(async, this, sslMode, timeout, cancellationToken).ConfigureAwait(false); @@ -836,6 +836,20 @@ async Task RawOpen(SslMode sslMode, NpgsqlTimeout timeout, bool async, Cancellat } } + static SslNegotiation GetSslNegotiation(NpgsqlConnectionStringBuilder settings) + { + if (settings.UserProvidedSslNegotiation is { } userProvidedSslNegotiation) + return userProvidedSslNegotiation; + + if (PostgresEnvironment.SslNegotiation is { } sslNegotiationEnv) + { + if (Enum.TryParse(sslNegotiationEnv, ignoreCase: true, out var sslNegotiation)) + return sslNegotiation; + } + + return SslNegotiation.Postgres; + } + internal async Task NegotiateEncryption(SslMode sslMode, NpgsqlTimeout timeout, bool async, CancellationToken cancellationToken) { var clientCertificates = new X509Certificate2Collection(); diff --git a/src/Npgsql/NpgsqlConnectionStringBuilder.cs b/src/Npgsql/NpgsqlConnectionStringBuilder.cs index 4260343d82..f2dbe7340e 100644 --- a/src/Npgsql/NpgsqlConnectionStringBuilder.cs +++ b/src/Npgsql/NpgsqlConnectionStringBuilder.cs @@ -467,19 +467,18 @@ public SslMode SslMode [Category("Security")] [Description("Controls how SSL encryption is negotiated with the server, if SSL is used.")] [DisplayName("SSL Negotiation")] - [DefaultValue(SslNegotiation.Postgres)] [NpgsqlConnectionStringProperty] public SslNegotiation SslNegotiation { - get => _sslNegotiation; + get => UserProvidedSslNegotiation ?? SslNegotiation.Postgres; set { - _sslNegotiation = value; + UserProvidedSslNegotiation = value; SetValue(nameof(SslNegotiation), value); } } - SslNegotiation _sslNegotiation; + internal SslNegotiation? UserProvidedSslNegotiation { get; private set; } /// /// Location of a client certificate to be sent to the server. diff --git a/src/Npgsql/PostgresEnvironment.cs b/src/Npgsql/PostgresEnvironment.cs index 69036601e5..bacdd9bfde 100644 --- a/src/Npgsql/PostgresEnvironment.cs +++ b/src/Npgsql/PostgresEnvironment.cs @@ -48,6 +48,8 @@ internal static string? SslCertRootDefault internal static string? TargetSessionAttributes => Environment.GetEnvironmentVariable("PGTARGETSESSIONATTRS"); + internal static string? SslNegotiation => Environment.GetEnvironmentVariable("PGSSLNEGOTIATION"); + static string? GetHomeDir() => Environment.GetEnvironmentVariable(RuntimeInformation.IsOSPlatform(OSPlatform.Windows) ? "APPDATA" : "HOME"); @@ -55,4 +57,4 @@ internal static string? SslCertRootDefault => GetHomeDir() is string homedir ? Path.Combine(homedir, RuntimeInformation.IsOSPlatform(OSPlatform.Windows) ? "postgresql" : ".postgresql") : null; -} \ No newline at end of file +} diff --git a/test/Npgsql.Tests/SecurityTests.cs b/test/Npgsql.Tests/SecurityTests.cs index daf30fbfcd..c1af68f515 100644 --- a/test/Npgsql.Tests/SecurityTests.cs +++ b/test/Npgsql.Tests/SecurityTests.cs @@ -522,6 +522,33 @@ public void Direct_ssl_requires_correct_sslmode([Values] SslMode sslMode) } } + [Test] + [NonParallelizable] // Sets environment variable + public async Task Direct_ssl_via_env_requires_correct_sslmode() + { + await using var adminConn = await OpenConnectionAsync(); + MinimumPgVersion(adminConn, "17.0"); + + // NonParallelizable attribute doesn't work with parameters that well + foreach (var sslMode in new[] { SslMode.Disable, SslMode.Allow, SslMode.Prefer, SslMode.Require }) + { + using var _ = SetEnvironmentVariable("PGSSLNEGOTIATION", nameof(SslNegotiation.Direct)); + await using var dataSource = CreateDataSource(csb => + { + csb.SslMode = sslMode; + }); + if (sslMode is SslMode.Disable or SslMode.Allow or SslMode.Prefer) + { + var ex = Assert.ThrowsAsync(async () => await dataSource.OpenConnectionAsync())!; + Assert.That(ex.Message, Is.EqualTo("SSL Mode has to be Require or higher to be used with direct SSL Negotiation")); + } + else + { + await using var conn = await dataSource.OpenConnectionAsync(); + } + } + } + #region Setup / Teardown / Utils [OneTimeSetUp] From d8a690bdf50262f700b2728ee95161dc7801d623 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Wed, 16 Oct 2024 17:53:07 +0300 Subject: [PATCH 468/761] Allow to load nested postgres types (like range over domain) (#5885) Fixes #5671 --- .../UnmappedTypeInfoResolverFactory.cs | 2 +- src/Npgsql/PostgresDatabaseInfo.cs | 188 ++++++++++-------- test/Npgsql.Tests/Types/DomainTests.cs | 24 +++ 3 files changed, 135 insertions(+), 79 deletions(-) diff --git a/src/Npgsql/Internal/ResolverFactories/UnmappedTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/UnmappedTypeInfoResolverFactory.cs index d0d52f2168..d3dcabb467 100644 --- a/src/Npgsql/Internal/ResolverFactories/UnmappedTypeInfoResolverFactory.cs +++ b/src/Npgsql/Internal/ResolverFactories/UnmappedTypeInfoResolverFactory.cs @@ -78,7 +78,7 @@ class RangeResolver : DynamicTypeInfoResolver // Input matchedType here as we don't want an NpgsqlRange over Nullable (it has its own nullability tracking, for better or worse) var subInfo = options.GetTypeInfoInternal( matchedType is null ? null : matchedType == typeof(object) ? matchedType : matchedType.GetGenericArguments()[0], - options.ToCanonicalTypeId(rangeType.Subtype)); + options.ToCanonicalTypeId(rangeType.Subtype.GetRepresentationalType())); // We have no generic RangeConverterResolver so we would not know how to compose a range mapping for such infos. // See https://github.com/npgsql/npgsql/issues/5268 diff --git a/src/Npgsql/PostgresDatabaseInfo.cs b/src/Npgsql/PostgresDatabaseInfo.cs index 4d793238b6..70aadbed3a 100644 --- a/src/Npgsql/PostgresDatabaseInfo.cs +++ b/src/Npgsql/PostgresDatabaseInfo.cs @@ -319,6 +319,7 @@ static string SanitizeForReplicationConnection(string str) // Then load the types Expect(await conn.ReadMessage(async).ConfigureAwait(false), conn); IBackendMessage msg; + var unknownPostgresTypes = new List(); while (true) { msg = await conn.ReadMessage(async).ConfigureAwait(false); @@ -335,93 +336,32 @@ static string SanitizeForReplicationConnection(string str) var len = conn.ReadBuffer.ReadInt32(); var elemtypoid = len == -1 ? 0 : uint.Parse(conn.ReadBuffer.ReadString(len), NumberFormatInfo.InvariantInfo); - switch (typtype) - { - case 'b': // Normal base type - var baseType = new PostgresBaseType(nspname, typname, oid); - byOID[baseType.OID] = baseType; - continue; + var postgresTypeDefinition = new PostgresTypeDefinition(nspname, oid, typname, typtype, typnotnull, elemtypoid); + if (!TryAddPostgresType(postgresTypeDefinition, byOID)) + unknownPostgresTypes.Add(postgresTypeDefinition); + } - case 'a': // Array + while (unknownPostgresTypes.Count > 0) + { + var hasChanges = false; + for (var i = unknownPostgresTypes.Count - 1; i >= 0; i--) { - Debug.Assert(elemtypoid > 0); - if (!byOID.TryGetValue(elemtypoid, out var elementPostgresType)) + var unknownPostgresType = unknownPostgresTypes[i]; + if (TryAddPostgresType(unknownPostgresType, byOID)) { - _connectionLogger.LogTrace("Array type '{ArrayTypeName}' refers to unknown element with OID {ElementTypeOID}, skipping", - typname, elemtypoid); - continue; + unknownPostgresTypes.RemoveAt(i); + hasChanges = true; } - - var arrayType = new PostgresArrayType(nspname, typname, oid, elementPostgresType); - byOID[arrayType.OID] = arrayType; - continue; } - case 'r': // Range + if (!hasChanges) { - Debug.Assert(elemtypoid > 0); - if (!byOID.TryGetValue(elemtypoid, out var subtypePostgresType)) - { - _connectionLogger.LogTrace("Range type '{RangeTypeName}' refers to unknown subtype with OID {ElementTypeOID}, skipping", - typname, elemtypoid); - continue; - } - - var rangeType = new PostgresRangeType(nspname, typname, oid, subtypePostgresType); - byOID[rangeType.OID] = rangeType; - continue; - } - - case 'm': // Multirange - Debug.Assert(elemtypoid > 0); - if (!byOID.TryGetValue(elemtypoid, out var type)) - { - _connectionLogger.LogTrace("Multirange type '{MultirangeTypeName}' refers to unknown range with OID {ElementTypeOID}, skipping", - typname, elemtypoid); - continue; - } - - if (type is not PostgresRangeType rangePostgresType) - { - _connectionLogger.LogTrace("Multirange type '{MultirangeTypeName}' refers to non-range type '{TypeName}', skipping", - typname, type.Name); - continue; - } - - var multirangeType = new PostgresMultirangeType(nspname, typname, oid, rangePostgresType); - byOID[multirangeType.OID] = multirangeType; - continue; - - case 'e': // Enum - var enumType = new PostgresEnumType(nspname, typname, oid); - byOID[enumType.OID] = enumType; - continue; - - case 'c': // Composite - var compositeType = new PostgresCompositeType(nspname, typname, oid); - byOID[compositeType.OID] = compositeType; - continue; - - case 'd': // Domain - Debug.Assert(elemtypoid > 0); - if (!byOID.TryGetValue(elemtypoid, out var basePostgresType)) - { - _connectionLogger.LogTrace("Domain type '{DomainTypeName}' refers to unknown base type with OID {ElementTypeOID}, skipping", - typname, elemtypoid); - continue; - } - - var domainType = new PostgresDomainType(nspname, typname, oid, basePostgresType, typnotnull); - byOID[domainType.OID] = domainType; - continue; - - case 'p': // pseudo-type (record, void) - goto case 'b'; // Hack this as a base type - - default: - throw new ArgumentOutOfRangeException($"Unknown typtype for type '{typname}' in pg_type: {typtype}"); + _connectionLogger.LogWarning("Unable to load '{UnknownTypeCount}' Postgres types while loading database info.", + unknownPostgresTypes.Count); + break; } } + Expect(msg, conn); if (isReplicationConnection) Expect(await conn.ReadMessage(async).ConfigureAwait(false), conn); @@ -545,5 +485,97 @@ static string SanitizeForReplicationConnection(string str) static string ReadNonNullableString(NpgsqlReadBuffer buffer) => buffer.ReadString(buffer.ReadInt32()); + + bool TryAddPostgresType(PostgresTypeDefinition postgresTypeDefinition, Dictionary byOID) + { + switch (postgresTypeDefinition.Type) + { + case 'b': // Normal base type + var baseType = new PostgresBaseType(postgresTypeDefinition.Namespace, postgresTypeDefinition.Name, postgresTypeDefinition.OID); + byOID[baseType.OID] = baseType; + return true; + + case 'a': // Array + { + Debug.Assert(postgresTypeDefinition.ElemTypeOID > 0); + if (!byOID.TryGetValue(postgresTypeDefinition.ElemTypeOID, out var elementPostgresType)) + { + _connectionLogger.LogTrace("Array type '{ArrayTypeName}' refers to unknown element with OID {ElementTypeOID}, skipping", + postgresTypeDefinition.Name, postgresTypeDefinition.ElemTypeOID); + return false; + } + + var arrayType = new PostgresArrayType(postgresTypeDefinition.Namespace, postgresTypeDefinition.Name, postgresTypeDefinition.OID, elementPostgresType); + byOID[arrayType.OID] = arrayType; + return true; + } + + case 'r': // Range + { + Debug.Assert(postgresTypeDefinition.ElemTypeOID > 0); + if (!byOID.TryGetValue(postgresTypeDefinition.ElemTypeOID, out var subtypePostgresType)) + { + _connectionLogger.LogTrace("Range type '{RangeTypeName}' refers to unknown subtype with OID {ElementTypeOID}, skipping", + postgresTypeDefinition.Name, postgresTypeDefinition.ElemTypeOID); + return false; + } + + var rangeType = new PostgresRangeType(postgresTypeDefinition.Namespace, postgresTypeDefinition.Name, postgresTypeDefinition.OID, subtypePostgresType); + byOID[rangeType.OID] = rangeType; + return true; + } + + case 'm': // Multirange + Debug.Assert(postgresTypeDefinition.ElemTypeOID > 0); + if (!byOID.TryGetValue(postgresTypeDefinition.ElemTypeOID, out var type)) + { + _connectionLogger.LogTrace("Multirange type '{MultirangeTypeName}' refers to unknown range with OID {ElementTypeOID}, skipping", + postgresTypeDefinition.Name, postgresTypeDefinition.ElemTypeOID); + return false; + } + + if (type is not PostgresRangeType rangePostgresType) + { + _connectionLogger.LogTrace("Multirange type '{MultirangeTypeName}' refers to non-range type '{TypeName}', skipping", + postgresTypeDefinition.Name, type.Name); + return false; + } + + var multirangeType = new PostgresMultirangeType(postgresTypeDefinition.Namespace, postgresTypeDefinition.Name, postgresTypeDefinition.OID, rangePostgresType); + byOID[multirangeType.OID] = multirangeType; + return true; + + case 'e': // Enum + var enumType = new PostgresEnumType(postgresTypeDefinition.Namespace, postgresTypeDefinition.Name, postgresTypeDefinition.OID); + byOID[enumType.OID] = enumType; + return true; + + case 'c': // Composite + var compositeType = new PostgresCompositeType(postgresTypeDefinition.Namespace, postgresTypeDefinition.Name, postgresTypeDefinition.OID); + byOID[compositeType.OID] = compositeType; + return true; + + case 'd': // Domain + Debug.Assert(postgresTypeDefinition.ElemTypeOID > 0); + if (!byOID.TryGetValue(postgresTypeDefinition.ElemTypeOID, out var basePostgresType)) + { + _connectionLogger.LogTrace("Domain type '{DomainTypeName}' refers to unknown base type with OID {ElementTypeOID}, skipping", + postgresTypeDefinition.Name, postgresTypeDefinition.ElemTypeOID); + return false; + } + + var domainType = new PostgresDomainType(postgresTypeDefinition.Namespace, postgresTypeDefinition.Name, postgresTypeDefinition.OID, basePostgresType, postgresTypeDefinition.NotNull); + byOID[domainType.OID] = domainType; + return true; + + case 'p': // pseudo-type (record, void) + goto case 'b'; // Hack this as a base type + + default: + throw new ArgumentOutOfRangeException($"Unknown typtype for type '{postgresTypeDefinition.Name}' in pg_type: {postgresTypeDefinition.Type}"); + } + } } } + +readonly record struct PostgresTypeDefinition(string Namespace, uint OID, string Name, char Type, bool NotNull, uint ElemTypeOID); diff --git a/test/Npgsql.Tests/Types/DomainTests.cs b/test/Npgsql.Tests/Types/DomainTests.cs index 4faaceb212..2e2ff5ae84 100644 --- a/test/Npgsql.Tests/Types/DomainTests.cs +++ b/test/Npgsql.Tests/Types/DomainTests.cs @@ -1,5 +1,6 @@ using System; using System.Threading.Tasks; +using NpgsqlTypes; using NUnit.Framework; using static Npgsql.Tests.TestUtil; @@ -75,5 +76,28 @@ class SomeComposite public string? Value { get; set; } } + [Test] + public async Task Domain_over_range() + { + await using var adminConnection = await OpenConnectionAsync(); + var type = await GetTempTypeName(adminConnection); + var rangeType = await GetTempTypeName(adminConnection); + + await adminConnection.ExecuteNonQueryAsync($"CREATE DOMAIN {type} AS integer; CREATE TYPE {rangeType} AS RANGE(subtype={type})"); + + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.EnableUnmappedTypes(); + await using var dataSource = dataSourceBuilder.Build(); + await using var connection = await dataSource.OpenConnectionAsync(); + + await AssertType( + connection, + new NpgsqlRange(1, 2), + "[1,2]", + rangeType, + npgsqlDbType: null, + isDefaultForWriting: false); + } + public DomainTests(MultiplexingMode multiplexingMode) : base(multiplexingMode) {} } From ea56478daf3c9fd3523c57189025b8f23c6b5b7c Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Wed, 16 Oct 2024 22:33:21 +0200 Subject: [PATCH 469/761] Bump MacOS to 15 in CI (#5886) --- .github/workflows/build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 636b03a012..b4d5dd137b 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -38,7 +38,7 @@ jobs: pg_major: 17 config: Debug test_tfm: net8.0 - - os: macos-14 + - os: macos-15 pg_major: 16 config: Release test_tfm: net8.0 From ec6de9cf1d1c7a58b148bf74295611a9fff7d6bc Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Thu, 17 Oct 2024 22:23:56 +0300 Subject: [PATCH 470/761] Add IPNetwork to cidr mapping (#5889) Closes #5821 --- .../Networking/IPNetworkConverter.cs | 26 +++++++++++++++++++ .../NetworkTypeInfoResolverFactory.cs | 9 +++++++ test/Npgsql.Tests/Types/NetworkTypeTests.cs | 10 +++++++ 3 files changed, 45 insertions(+) create mode 100644 src/Npgsql/Internal/Converters/Networking/IPNetworkConverter.cs diff --git a/src/Npgsql/Internal/Converters/Networking/IPNetworkConverter.cs b/src/Npgsql/Internal/Converters/Networking/IPNetworkConverter.cs new file mode 100644 index 0000000000..0371fb32a9 --- /dev/null +++ b/src/Npgsql/Internal/Converters/Networking/IPNetworkConverter.cs @@ -0,0 +1,26 @@ +#if NET8_0_OR_GREATER + +using System.Net; + +// ReSharper disable once CheckNamespace +namespace Npgsql.Internal.Converters; + +sealed class IPNetworkConverter : PgBufferedConverter +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + => CanConvertBufferedDefault(format, out bufferRequirements); + + public override Size GetSize(SizeContext context, IPNetwork value, ref object? writeState) + => NpgsqlInetConverter.GetSizeImpl(context, value.BaseAddress, ref writeState); + + protected override IPNetwork ReadCore(PgReader reader) + { + var (ip, netmask) = NpgsqlInetConverter.ReadImpl(reader, shouldBeCidr: true); + return new(ip, netmask); + } + + protected override void WriteCore(PgWriter writer, IPNetwork value) + => NpgsqlInetConverter.WriteImpl(writer, (value.BaseAddress, (byte)value.PrefixLength), isCidr: true); +} + +#endif diff --git a/src/Npgsql/Internal/ResolverFactories/NetworkTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/NetworkTypeInfoResolverFactory.cs index eca3dfec64..0a7ebaaa1b 100644 --- a/src/Npgsql/Internal/ResolverFactories/NetworkTypeInfoResolverFactory.cs +++ b/src/Npgsql/Internal/ResolverFactories/NetworkTypeInfoResolverFactory.cs @@ -51,6 +51,11 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) mappings.AddStructType(DataTypeNames.Cidr, static (options, mapping, _) => mapping.CreateInfo(options, new NpgsqlCidrConverter()), isDefault: true); +#if NET8_0_OR_GREATER + mappings.AddStructType(DataTypeNames.Cidr, + static (options, mapping, _) => mapping.CreateInfo(options, new IPNetworkConverter())); +#endif + return mappings; } } @@ -76,6 +81,10 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) // cidr mappings.AddStructArrayType(DataTypeNames.Cidr); +#if NET8_0_OR_GREATER + mappings.AddStructArrayType(DataTypeNames.Cidr); +#endif + return mappings; } } diff --git a/test/Npgsql.Tests/Types/NetworkTypeTests.cs b/test/Npgsql.Tests/Types/NetworkTypeTests.cs index f164b57d75..e09f2814f8 100644 --- a/test/Npgsql.Tests/Types/NetworkTypeTests.cs +++ b/test/Npgsql.Tests/Types/NetworkTypeTests.cs @@ -61,6 +61,16 @@ public Task Cidr() NpgsqlDbType.Cidr, isDefaultForWriting: false); + [Test] + public Task IPNetwork_as_cidr() + => AssertType( + new IPNetwork(IPAddress.Parse("192.168.1.0"), 24), + "192.168.1.0/24", + "cidr", + NpgsqlDbType.Cidr, + isDefaultForWriting: false, + isDefaultForReading: false); + [Test] public Task Inet_v4_as_NpgsqlInet() => AssertType( From c59aee06340316e2659b53615345d5034acfceb8 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Fri, 18 Oct 2024 13:22:31 +0300 Subject: [PATCH 471/761] Add support for infinity intervals with PG17 (#5890) Closes #5696 --- Directory.Packages.props | 4 +- .../NodaTimeTypeInfoResolverFactory.cs | 2 +- .../Internal/PeriodConverter.cs | 32 +++++++- .../NodaTimeInfinityTests.cs | 82 ++++++++++++++++++- 4 files changed, 115 insertions(+), 5 deletions(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index d9319bc357..b110da1c1e 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -12,7 +12,7 @@ - + @@ -44,4 +44,4 @@ - + \ No newline at end of file diff --git a/src/Npgsql.NodaTime/Internal/NodaTimeTypeInfoResolverFactory.cs b/src/Npgsql.NodaTime/Internal/NodaTimeTypeInfoResolverFactory.cs index de5548a569..b010ce58a6 100644 --- a/src/Npgsql.NodaTime/Internal/NodaTimeTypeInfoResolverFactory.cs +++ b/src/Npgsql.NodaTime/Internal/NodaTimeTypeInfoResolverFactory.cs @@ -89,7 +89,7 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) // interval mappings.AddType(IntervalDataTypeName, - static (options, mapping, _) => mapping.CreateInfo(options, new PeriodConverter()), isDefault: true); + static (options, mapping, _) => mapping.CreateInfo(options, new PeriodConverter(options.EnableDateTimeInfinityConversions)), isDefault: true); mappings.AddStructType(IntervalDataTypeName, static (options, mapping, _) => mapping.CreateInfo(options, new DurationConverter())); diff --git a/src/Npgsql.NodaTime/Internal/PeriodConverter.cs b/src/Npgsql.NodaTime/Internal/PeriodConverter.cs index 70c0228652..8d6c431439 100644 --- a/src/Npgsql.NodaTime/Internal/PeriodConverter.cs +++ b/src/Npgsql.NodaTime/Internal/PeriodConverter.cs @@ -1,9 +1,11 @@ +using System; using NodaTime; using Npgsql.Internal; +using Npgsql.NodaTime.Properties; namespace Npgsql.NodaTime.Internal; -sealed class PeriodConverter : PgBufferedConverter +sealed class PeriodConverter(bool dateTimeInfinityConversions) : PgBufferedConverter { public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) { @@ -17,6 +19,15 @@ protected override Period ReadCore(PgReader reader) var days = reader.ReadInt32(); var totalMonths = reader.ReadInt32(); + if (microsecondsInDay == long.MaxValue && days == int.MaxValue && totalMonths == int.MaxValue) + return dateTimeInfinityConversions + ? Period.MaxValue + : throw new InvalidCastException(NpgsqlNodaTimeStrings.CannotReadInfinityValue); + if (microsecondsInDay == long.MinValue && days == int.MinValue && totalMonths == int.MinValue) + return dateTimeInfinityConversions + ? Period.MinValue + : throw new InvalidCastException(NpgsqlNodaTimeStrings.CannotReadInfinityValue); + // NodaTime will normalize most things (i.e. nanoseconds to milliseconds, seconds...) // but it will not normalize months to years. var months = totalMonths % 12; @@ -33,6 +44,25 @@ protected override Period ReadCore(PgReader reader) protected override void WriteCore(PgWriter writer, Period value) { + if (dateTimeInfinityConversions) + { + if (value == Period.MaxValue) + { + writer.WriteInt64(long.MaxValue); // microseconds + writer.WriteInt32(int.MaxValue); // days + writer.WriteInt32(int.MaxValue); // months + return; + } + + if (value == Period.MinValue) + { + writer.WriteInt64(long.MinValue); // microseconds + writer.WriteInt32(int.MinValue); // days + writer.WriteInt32(int.MinValue); // months + return; + } + } + // We have to normalize the value as otherwise we might get a value with 0 everything except for ticks, which we ignore value = value.Normalize(); // Note that the end result must be long diff --git a/test/Npgsql.PluginTests/NodaTimeInfinityTests.cs b/test/Npgsql.PluginTests/NodaTimeInfinityTests.cs index 59f581e7de..75559169f0 100644 --- a/test/Npgsql.PluginTests/NodaTimeInfinityTests.cs +++ b/test/Npgsql.PluginTests/NodaTimeInfinityTests.cs @@ -1,11 +1,11 @@ using System; +using System.Data; using System.Threading.Tasks; using NodaTime; using Npgsql.Tests; using Npgsql.Util; using NpgsqlTypes; using NUnit.Framework; -using static Npgsql.NodaTime.Internal.NodaTimeUtils; namespace Npgsql.PluginTests; @@ -281,6 +281,86 @@ public async Task DateConvertInfinity() } } + [Test] + public async Task Interval_write() + { + await using var conn = await OpenConnectionAsync(); + TestUtil.MinimumPgVersion(conn, "17.0", "Infinity values for intervals were introduced in PostgreSQL 17"); + await using var cmd = new NpgsqlCommand("SELECT $1::text", conn) + { + Parameters = { new() { Value = Period.MinValue, NpgsqlDbType = NpgsqlDbType.Interval } } + }; + + // While Period.MinValue technically isn't outside of supported values by postgres, we can't reasonably convert it + if (Statics.DisableDateTimeInfinityConversions) + { + Assert.That(async () => await cmd.ExecuteScalarAsync(), Throws.Exception.TypeOf()); + Assert.That(conn.State, Is.EqualTo(ConnectionState.Closed)); + await conn.OpenAsync(); + } + else + Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo("-infinity")); + + cmd.Parameters[0].Value = Period.MaxValue; + + // While Period.MaxValue technically isn't outside of supported values by postgres, we can't reasonably convert it + if (Statics.DisableDateTimeInfinityConversions) + Assert.That(async () => await cmd.ExecuteScalarAsync(), Throws.Exception.TypeOf()); + else + Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo("infinity")); + } + + [Test] + public async Task Interval_read() + { + await using var conn = await OpenConnectionAsync(); + TestUtil.MinimumPgVersion(conn, "17.0", "Infinity values for intervals were introduced in PostgreSQL 17"); + + await using var cmd = new NpgsqlCommand("SELECT '-infinity'::interval, 'infinity'::interval", conn); + + await using var reader = await cmd.ExecuteReaderAsync(); + await reader.ReadAsync(); + + if (Statics.DisableDateTimeInfinityConversions) + { + Assert.That(() => reader[0], Throws.Exception.TypeOf()); + Assert.That(() => reader[1], Throws.Exception.TypeOf()); + } + else + { + Assert.That(reader[0], Is.EqualTo(Period.MinValue)); + Assert.That(reader[1], Is.EqualTo(Period.MaxValue)); + } + } + + [Test, Description("Makes sure that when ConvertInfinityDateTime is true, infinity values are properly converted")] + public async Task Interval_convert_infinity() + { + if (Statics.DisableDateTimeInfinityConversions) + return; + + await using var conn = await OpenConnectionAsync(); + TestUtil.MinimumPgVersion(conn, "17.0", "Infinity values for intervals were introduced in PostgreSQL 17"); + await conn.ExecuteNonQueryAsync("CREATE TEMP TABLE data (i1 INTERVAL, i2 INTERVAL)"); + + using (var cmd = new NpgsqlCommand("INSERT INTO data VALUES (@p1, @p2)", conn)) + { + cmd.Parameters.AddWithValue("p1", NpgsqlDbType.Interval, Period.MaxValue); + cmd.Parameters.AddWithValue("p2", NpgsqlDbType.Interval, Period.MinValue); + await cmd.ExecuteNonQueryAsync(); + } + + using (var cmd = new NpgsqlCommand("SELECT i1::TEXT, i2::TEXT, i1, i2 FROM data", conn)) + using (var reader = await cmd.ExecuteReaderAsync()) + { + await reader.ReadAsync(); + Assert.That(reader.GetValue(0), Is.EqualTo("infinity")); + Assert.That(reader.GetValue(1), Is.EqualTo("-infinity")); + Assert.That(reader.GetFieldValue(2), Is.EqualTo(Period.MaxValue)); + Assert.That(reader.GetFieldValue(3), Is.EqualTo(Period.MinValue)); + } + } + protected override NpgsqlDataSource DataSource { get; } public NodaTimeInfinityTests(bool disableDateTimeInfinityConversions) From e6c166ba51bc1632498c944981e648fa987b9c12 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Mon, 21 Oct 2024 13:22:21 +0300 Subject: [PATCH 472/761] Fix not throwing due to overflow while writing NodaTime's period (#5894) Fixes #5893 --- .../Internal/PeriodConverter.cs | 26 ++++++--- .../NpgsqlNodaTimeStrings.Designer.cs | 53 ++++++++++++++----- .../Properties/NpgsqlNodaTimeStrings.resx | 3 ++ test/Npgsql.PluginTests/NodaTimeTests.cs | 12 +++++ 4 files changed, 74 insertions(+), 20 deletions(-) diff --git a/src/Npgsql.NodaTime/Internal/PeriodConverter.cs b/src/Npgsql.NodaTime/Internal/PeriodConverter.cs index 8d6c431439..1d768109c4 100644 --- a/src/Npgsql.NodaTime/Internal/PeriodConverter.cs +++ b/src/Npgsql.NodaTime/Internal/PeriodConverter.cs @@ -65,14 +65,24 @@ protected override void WriteCore(PgWriter writer, Period value) // We have to normalize the value as otherwise we might get a value with 0 everything except for ticks, which we ignore value = value.Normalize(); - // Note that the end result must be long - // see #3438 - var microsecondsInDay = - (((value.Hours * NodaConstants.MinutesPerHour + value.Minutes) * NodaConstants.SecondsPerMinute + value.Seconds) * NodaConstants.MillisecondsPerSecond + value.Milliseconds) * 1000 + - value.Nanoseconds / 1000; // Take the microseconds, discard the nanosecond remainder - writer.WriteInt64(microsecondsInDay); - writer.WriteInt32(value.Weeks * 7 + value.Days); // days - writer.WriteInt32(value.Years * 12 + value.Months); // months + try + { + checked + { + // Note that the end result must be long + // see #3438 + var microsecondsInDay = + (((value.Hours * NodaConstants.MinutesPerHour + value.Minutes) * NodaConstants.SecondsPerMinute + value.Seconds) * NodaConstants.MillisecondsPerSecond + value.Milliseconds) * 1000 + + value.Nanoseconds / 1000; // Take the microseconds, discard the nanosecond remainder + writer.WriteInt64(microsecondsInDay); + writer.WriteInt32(value.Weeks * 7 + value.Days); // days + writer.WriteInt32(value.Years * 12 + value.Months); // months + } + } + catch (OverflowException ex) + { + throw new ArgumentException(NpgsqlNodaTimeStrings.CannotWritePeriodDueToOverflow, ex); + } } } diff --git a/src/Npgsql.NodaTime/Properties/NpgsqlNodaTimeStrings.Designer.cs b/src/Npgsql.NodaTime/Properties/NpgsqlNodaTimeStrings.Designer.cs index bc6511ea9a..ab29289106 100644 --- a/src/Npgsql.NodaTime/Properties/NpgsqlNodaTimeStrings.Designer.cs +++ b/src/Npgsql.NodaTime/Properties/NpgsqlNodaTimeStrings.Designer.cs @@ -11,32 +11,46 @@ namespace Npgsql.NodaTime.Properties { using System; - [System.CodeDom.Compiler.GeneratedCodeAttribute("System.Resources.Tools.StronglyTypedResourceBuilder", "4.0.0.0")] - [System.Diagnostics.DebuggerNonUserCodeAttribute()] - [System.Runtime.CompilerServices.CompilerGeneratedAttribute()] + /// + /// A strongly-typed resource class, for looking up localized strings, etc. + /// + // This class was auto-generated by the StronglyTypedResourceBuilder + // class via a tool like ResGen or Visual Studio. + // To add or remove a member, edit your .ResX file then rerun ResGen + // with the /str option, or rebuild your VS project. + [global::System.CodeDom.Compiler.GeneratedCodeAttribute("System.Resources.Tools.StronglyTypedResourceBuilder", "4.0.0.0")] + [global::System.Diagnostics.DebuggerNonUserCodeAttribute()] + [global::System.Runtime.CompilerServices.CompilerGeneratedAttribute()] internal class NpgsqlNodaTimeStrings { - private static System.Resources.ResourceManager resourceMan; + private static global::System.Resources.ResourceManager resourceMan; - private static System.Globalization.CultureInfo resourceCulture; + private static global::System.Globalization.CultureInfo resourceCulture; - [System.Diagnostics.CodeAnalysis.SuppressMessageAttribute("Microsoft.Performance", "CA1811:AvoidUncalledPrivateCode")] + [global::System.Diagnostics.CodeAnalysis.SuppressMessageAttribute("Microsoft.Performance", "CA1811:AvoidUncalledPrivateCode")] internal NpgsqlNodaTimeStrings() { } - [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Advanced)] - internal static System.Resources.ResourceManager ResourceManager { + /// + /// Returns the cached ResourceManager instance used by this class. + /// + [global::System.ComponentModel.EditorBrowsableAttribute(global::System.ComponentModel.EditorBrowsableState.Advanced)] + internal static global::System.Resources.ResourceManager ResourceManager { get { - if (object.Equals(null, resourceMan)) { - System.Resources.ResourceManager temp = new System.Resources.ResourceManager("Npgsql.NodaTime.Properties.NpgsqlNodaTimeStrings", typeof(NpgsqlNodaTimeStrings).Assembly); + if (object.ReferenceEquals(resourceMan, null)) { + global::System.Resources.ResourceManager temp = new global::System.Resources.ResourceManager("Npgsql.NodaTime.Properties.NpgsqlNodaTimeStrings", typeof(NpgsqlNodaTimeStrings).Assembly); resourceMan = temp; } return resourceMan; } } - [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Advanced)] - internal static System.Globalization.CultureInfo Culture { + /// + /// Overrides the current thread's CurrentUICulture property for all + /// resource lookups using this strongly typed resource class. + /// + [global::System.ComponentModel.EditorBrowsableAttribute(global::System.ComponentModel.EditorBrowsableState.Advanced)] + internal static global::System.Globalization.CultureInfo Culture { get { return resourceCulture; } @@ -45,16 +59,31 @@ internal static System.Globalization.CultureInfo Culture { } } + /// + /// Looks up a localized string similar to Cannot read infinity value since Npgsql.DisableDateTimeInfinityConversions is enabled.. + /// internal static string CannotReadInfinityValue { get { return ResourceManager.GetString("CannotReadInfinityValue", resourceCulture); } } + /// + /// Looks up a localized string similar to Cannot read PostgreSQL interval with non-zero months to NodaTime Duration. Try reading as a NodaTime Period instead.. + /// internal static string CannotReadIntervalWithMonthsAsDuration { get { return ResourceManager.GetString("CannotReadIntervalWithMonthsAsDuration", resourceCulture); } } + + /// + /// Looks up a localized string similar to Cannot write NodaTime's Period because it's out of range for the PG interval type.. + /// + internal static string CannotWritePeriodDueToOverflow { + get { + return ResourceManager.GetString("CannotWritePeriodDueToOverflow", resourceCulture); + } + } } } diff --git a/src/Npgsql.NodaTime/Properties/NpgsqlNodaTimeStrings.resx b/src/Npgsql.NodaTime/Properties/NpgsqlNodaTimeStrings.resx index d3329f2a80..f0090afb83 100644 --- a/src/Npgsql.NodaTime/Properties/NpgsqlNodaTimeStrings.resx +++ b/src/Npgsql.NodaTime/Properties/NpgsqlNodaTimeStrings.resx @@ -24,4 +24,7 @@ Cannot read PostgreSQL interval with non-zero months to NodaTime Duration. Try reading as a NodaTime Period instead. + + Cannot write NodaTime's Period because it's out of range for the PG interval type. + diff --git a/test/Npgsql.PluginTests/NodaTimeTests.cs b/test/Npgsql.PluginTests/NodaTimeTests.cs index 8edccc1ce1..6bbc401943 100644 --- a/test/Npgsql.PluginTests/NodaTimeTests.cs +++ b/test/Npgsql.PluginTests/NodaTimeTests.cs @@ -795,6 +795,18 @@ public async Task Normalize_period_on_write() Assert.That(dbValue, Is.EqualTo(expectedAfterRoundtrip)); } + [Test] + public async Task Period_write_throw_on_overflow() + { + var periodBuilder = new PeriodBuilder + { + Years = int.MaxValue + }; + var ex = await AssertTypeUnsupportedWrite(periodBuilder.Build(), "interval"); + Assert.That(ex.Message, Is.EqualTo(NpgsqlNodaTimeStrings.CannotWritePeriodDueToOverflow)); + Assert.That(ex.InnerException, Is.TypeOf()); + } + #endregion Interval #region Support From 32eb7097c81e3dc6e3cdc59c2f19592883f00c77 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 25 Oct 2024 12:59:29 +0300 Subject: [PATCH 473/761] Bump actions/setup-dotnet from 4.0.1 to 4.1.0 (#5898) --- .github/workflows/build.yml | 6 +++--- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/native-aot.yml | 4 ++-- .github/workflows/rich-code-nav.yml | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index b4d5dd137b..28f84df8e8 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -69,7 +69,7 @@ jobs: ${{ runner.os }}-nuget- - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v4.0.1 + uses: actions/setup-dotnet@v4.1.0 with: dotnet-version: | ${{ env.dotnet_sdk_version }} @@ -352,7 +352,7 @@ jobs: ${{ runner.os }}-nuget- - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v4.0.1 + uses: actions/setup-dotnet@v4.1.0 with: dotnet-version: ${{ env.dotnet_sdk_version }} @@ -386,7 +386,7 @@ jobs: uses: actions/checkout@v4 - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v4.0.1 + uses: actions/setup-dotnet@v4.1.0 with: dotnet-version: ${{ env.dotnet_sdk_version }} diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index f1ed40c3c7..5300a780e4 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -66,7 +66,7 @@ jobs: # queries: ./path/to/local/query, your-org/your-repo/queries@main - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v4.0.1 + uses: actions/setup-dotnet@v4.1.0 with: dotnet-version: ${{ env.dotnet_sdk_version }} diff --git a/.github/workflows/native-aot.yml b/.github/workflows/native-aot.yml index 3e6e7ce47f..31f5ddd2f1 100644 --- a/.github/workflows/native-aot.yml +++ b/.github/workflows/native-aot.yml @@ -108,7 +108,7 @@ jobs: ${{ runner.os }}-nuget- - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v4.0.1 + uses: actions/setup-dotnet@v4.1.0 with: dotnet-version: | ${{ env.dotnet_sdk_version }} @@ -145,7 +145,7 @@ jobs: ${{ runner.os }}-nuget- - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v4.0.1 + uses: actions/setup-dotnet@v4.1.0 with: dotnet-version: | ${{ env.dotnet_sdk_version }} diff --git a/.github/workflows/rich-code-nav.yml b/.github/workflows/rich-code-nav.yml index 9c629c0801..927c1b268f 100644 --- a/.github/workflows/rich-code-nav.yml +++ b/.github/workflows/rich-code-nav.yml @@ -24,7 +24,7 @@ jobs: ${{ runner.os }}-nuget- - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v4.0.1 + uses: actions/setup-dotnet@v4.1.0 with: dotnet-version: ${{ env.dotnet_sdk_version }} From 32fc88f52bcb4e203656defc46239aa967fba076 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Mon, 28 Oct 2024 08:53:56 +0100 Subject: [PATCH 474/761] Expose NpgsqlDataSource.Clear() publicly (#5903) Closes #5902 --- src/Npgsql/MultiHostDataSourceWrapper.cs | 24 ++++++++++-------------- src/Npgsql/NpgsqlDataSource.cs | 8 ++++++-- src/Npgsql/NpgsqlMultiHostDataSource.cs | 3 ++- src/Npgsql/PoolingDataSource.cs | 2 +- src/Npgsql/PublicAPI.Unshipped.txt | 2 ++ src/Npgsql/UnpooledDataSource.cs | 12 +++++------- test/Npgsql.Tests/DataSourceTests.cs | 24 ++++++++++++++++++++++++ 7 files changed, 50 insertions(+), 25 deletions(-) diff --git a/src/Npgsql/MultiHostDataSourceWrapper.cs b/src/Npgsql/MultiHostDataSourceWrapper.cs index 4dcded98cc..3217ec95cf 100644 --- a/src/Npgsql/MultiHostDataSourceWrapper.cs +++ b/src/Npgsql/MultiHostDataSourceWrapper.cs @@ -7,15 +7,12 @@ namespace Npgsql; -sealed class MultiHostDataSourceWrapper : NpgsqlDataSource +sealed class MultiHostDataSourceWrapper(NpgsqlMultiHostDataSource wrappedSource, TargetSessionAttributes targetSessionAttributes) + : NpgsqlDataSource(CloneSettingsForTargetSessionAttributes(wrappedSource.Settings, targetSessionAttributes), wrappedSource.Configuration) { internal override bool OwnsConnectors => false; - readonly NpgsqlMultiHostDataSource _wrappedSource; - - public MultiHostDataSourceWrapper(NpgsqlMultiHostDataSource source, TargetSessionAttributes targetSessionAttributes) - : base(CloneSettingsForTargetSessionAttributes(source.Settings, targetSessionAttributes), source.Configuration) - => _wrappedSource = source; + public override void Clear() => wrappedSource.Clear(); static NpgsqlConnectionStringBuilder CloneSettingsForTargetSessionAttributes( NpgsqlConnectionStringBuilder settings, @@ -26,23 +23,22 @@ static NpgsqlConnectionStringBuilder CloneSettingsForTargetSessionAttributes( return clonedSettings; } - internal override (int Total, int Idle, int Busy) Statistics => _wrappedSource.Statistics; + internal override (int Total, int Idle, int Busy) Statistics => wrappedSource.Statistics; - internal override void Clear() => _wrappedSource.Clear(); internal override ValueTask Get(NpgsqlConnection conn, NpgsqlTimeout timeout, bool async, CancellationToken cancellationToken) - => _wrappedSource.Get(conn, timeout, async, cancellationToken); + => wrappedSource.Get(conn, timeout, async, cancellationToken); internal override bool TryGetIdleConnector([NotNullWhen(true)] out NpgsqlConnector? connector) => throw new NpgsqlException("Npgsql bug: trying to get an idle connector from " + nameof(MultiHostDataSourceWrapper)); internal override ValueTask OpenNewConnector(NpgsqlConnection conn, NpgsqlTimeout timeout, bool async, CancellationToken cancellationToken) => throw new NpgsqlException("Npgsql bug: trying to open a new connector from " + nameof(MultiHostDataSourceWrapper)); internal override void Return(NpgsqlConnector connector) - => _wrappedSource.Return(connector); + => wrappedSource.Return(connector); internal override void AddPendingEnlistedConnector(NpgsqlConnector connector, Transaction transaction) - => _wrappedSource.AddPendingEnlistedConnector(connector, transaction); + => wrappedSource.AddPendingEnlistedConnector(connector, transaction); internal override bool TryRemovePendingEnlistedConnector(NpgsqlConnector connector, Transaction transaction) - => _wrappedSource.TryRemovePendingEnlistedConnector(connector, transaction); + => wrappedSource.TryRemovePendingEnlistedConnector(connector, transaction); internal override bool TryRentEnlistedPending(Transaction transaction, NpgsqlConnection connection, [NotNullWhen(true)] out NpgsqlConnector? connector) - => _wrappedSource.TryRentEnlistedPending(transaction, connection, out connector); -} \ No newline at end of file + => wrappedSource.TryRentEnlistedPending(transaction, connection, out connector); +} diff --git a/src/Npgsql/NpgsqlDataSource.cs b/src/Npgsql/NpgsqlDataSource.cs index 2455194716..f349291ed6 100644 --- a/src/Npgsql/NpgsqlDataSource.cs +++ b/src/Npgsql/NpgsqlDataSource.cs @@ -208,6 +208,12 @@ protected override DbBatch CreateDbBatch() public new NpgsqlBatch CreateBatch() => new NpgsqlDataSourceBatch(CreateConnection()); + /// + /// If the data source pools connections, clears any idle connections and flags any busy connections to be closed as soon as they're + /// returned to the pool. + /// + public abstract void Clear(); + /// /// Creates a new for the given . /// @@ -371,8 +377,6 @@ internal abstract ValueTask Get( internal abstract void Return(NpgsqlConnector connector); - internal abstract void Clear(); - internal abstract bool OwnsConnectors { get; } #region Database state management diff --git a/src/Npgsql/NpgsqlMultiHostDataSource.cs b/src/Npgsql/NpgsqlMultiHostDataSource.cs index 30a6bed0b0..7236e7bb8b 100644 --- a/src/Npgsql/NpgsqlMultiHostDataSource.cs +++ b/src/Npgsql/NpgsqlMultiHostDataSource.cs @@ -363,7 +363,8 @@ internal override bool TryGetIdleConnector([NotNullWhen(true)] out NpgsqlConnect internal override ValueTask OpenNewConnector(NpgsqlConnection conn, NpgsqlTimeout timeout, bool async, CancellationToken cancellationToken) => throw new NpgsqlException("Npgsql bug: trying to open a new connector from " + nameof(NpgsqlMultiHostDataSource)); - internal override void Clear() + /// + public override void Clear() { foreach (var pool in _pools) pool.Clear(); diff --git a/src/Npgsql/PoolingDataSource.cs b/src/Npgsql/PoolingDataSource.cs index 7e4c410f69..46861a5c5e 100644 --- a/src/Npgsql/PoolingDataSource.cs +++ b/src/Npgsql/PoolingDataSource.cs @@ -335,7 +335,7 @@ internal sealed override void Return(NpgsqlConnector connector) Debug.Assert(written); } - internal override void Clear() + public override void Clear() { Interlocked.Increment(ref _clearCounter); diff --git a/src/Npgsql/PublicAPI.Unshipped.txt b/src/Npgsql/PublicAPI.Unshipped.txt index 0c4717dac8..74ee66de05 100644 --- a/src/Npgsql/PublicAPI.Unshipped.txt +++ b/src/Npgsql/PublicAPI.Unshipped.txt @@ -1,4 +1,5 @@ #nullable enable +abstract Npgsql.NpgsqlDataSource.Clear() -> void Npgsql.NpgsqlConnection.CloneWithAsync(string! connectionString, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.ValueTask Npgsql.NpgsqlConnection.SslClientAuthenticationOptionsCallback.get -> System.Action? Npgsql.NpgsqlConnection.SslClientAuthenticationOptionsCallback.set -> void @@ -35,3 +36,4 @@ Npgsql.Replication.PgOutput.PgOutputStreamingMode.Parallel = 2 -> Npgsql.Replica Npgsql.SslNegotiation Npgsql.SslNegotiation.Direct = 1 -> Npgsql.SslNegotiation Npgsql.SslNegotiation.Postgres = 0 -> Npgsql.SslNegotiation +override Npgsql.NpgsqlMultiHostDataSource.Clear() -> void diff --git a/src/Npgsql/UnpooledDataSource.cs b/src/Npgsql/UnpooledDataSource.cs index 549a45f9b8..e801f537eb 100644 --- a/src/Npgsql/UnpooledDataSource.cs +++ b/src/Npgsql/UnpooledDataSource.cs @@ -6,13 +6,9 @@ namespace Npgsql; -sealed class UnpooledDataSource : NpgsqlDataSource +sealed class UnpooledDataSource(NpgsqlConnectionStringBuilder settings, NpgsqlDataSourceConfiguration dataSourceConfig) + : NpgsqlDataSource(settings, dataSourceConfig) { - public UnpooledDataSource(NpgsqlConnectionStringBuilder settings, NpgsqlDataSourceConfiguration dataSourceConfig) - : base(settings, dataSourceConfig) - { - } - volatile int _numConnectors; internal override (int Total, int Idle, int Busy) Statistics => (_numConnectors, 0, _numConnectors); @@ -46,5 +42,7 @@ internal override void Return(NpgsqlConnector connector) connector.Close(); } - internal override void Clear() {} + public override void Clear() + { + } } diff --git a/test/Npgsql.Tests/DataSourceTests.cs b/test/Npgsql.Tests/DataSourceTests.cs index 639e83a795..ad412ed5c3 100644 --- a/test/Npgsql.Tests/DataSourceTests.cs +++ b/test/Npgsql.Tests/DataSourceTests.cs @@ -134,6 +134,30 @@ public async Task ExecuteReader_on_connectionless_batch([Values] bool async) Assert.That(dataSource.Statistics, Is.EqualTo((Total: 1, Idle: 1, Busy: 0))); } + [Test] + public void Clear() + { + using var dataSource = NpgsqlDataSource.Create(ConnectionString); + var connection1 = dataSource.OpenConnection(); + var connection2 = dataSource.OpenConnection(); + connection1.Close(); + + Assert.That(dataSource.Statistics, Is.EqualTo((Total: 2, Idle: 1, Busy: 1))); + + dataSource.Clear(); + + Assert.That(dataSource.Statistics, Is.EqualTo((Total: 1, Idle: 0, Busy: 1))); + + var connection3 = dataSource.OpenConnection(); + Assert.That(dataSource.Statistics, Is.EqualTo((Total: 2, Idle: 0, Busy: 2))); + + connection2.Close(); + Assert.That(dataSource.Statistics, Is.EqualTo((Total: 1, Idle: 0, Busy: 1))); + + connection3.Close(); + Assert.That(dataSource.Statistics, Is.EqualTo((Total: 1, Idle: 1, Busy: 0))); + } + [Test] public void Dispose() { From 2e568a1f52a51b0cee65d2fb99cbaf5b0467f2bc Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Wed, 30 Oct 2024 16:13:40 +0100 Subject: [PATCH 475/761] Apply collection initializers in solution (#5907) --- src/Npgsql.GeoJSON/CrsMap.WellKnown.cs | 6 +- .../Internal/BoundingBoxBuilder.cs | 4 +- .../JsonNetPocoTypeInfoResolverFactory.cs | 2 +- ...NetTopologySuiteTypeInfoResolverFactory.cs | 4 +- .../BackendMessages/AuthenticationMessages.cs | 2 +- src/Npgsql/BackendMessages/CopyMessages.cs | 2 +- .../ParameterDescriptionMessage.cs | 2 +- .../Composites/Metadata/CompositeInfo.cs | 4 +- .../ReflectionCompositeInfoFactory.cs | 8 +- .../Converters/Primitive/PgNumeric.cs | 5 +- .../Converters/Primitive/TextConverters.cs | 4 +- src/Npgsql/Internal/NpgsqlConnector.cs | 7 +- src/Npgsql/Internal/NpgsqlDatabaseInfo.cs | 25 ++-- .../PgTypeInfoResolverChainBuilder.cs | 2 +- .../JsonDynamicTypeInfoResolverFactory.cs | 4 +- .../TupledRecordTypeInfoResolverFactory.cs | 2 +- src/Npgsql/Internal/TypeInfoCache.cs | 2 +- src/Npgsql/Internal/TypeInfoMapping.cs | 2 +- src/Npgsql/KerberosUsernameProvider.cs | 2 +- src/Npgsql/MetricsReporter.cs | 2 +- src/Npgsql/NpgsqlBatchCommand.cs | 8 +- src/Npgsql/NpgsqlCommand.cs | 8 +- src/Npgsql/NpgsqlConnectionStringBuilder.cs | 2 +- src/Npgsql/NpgsqlDataReader.cs | 4 +- src/Npgsql/NpgsqlDataSourceBuilder.cs | 7 +- src/Npgsql/NpgsqlNestedDataReader.cs | 2 +- src/Npgsql/NpgsqlRawCopyStream.cs | 4 +- src/Npgsql/NpgsqlSchema.cs | 4 +- src/Npgsql/NpgsqlSlimDataSourceBuilder.cs | 2 +- src/Npgsql/NpgsqlTypes/NpgsqlTsVector.cs | 8 +- src/Npgsql/NpgsqlTypes/NpgsqlTypes.cs | 14 +- src/Npgsql/PostgresDatabaseInfo.cs | 2 +- src/Npgsql/PostgresErrorCodes.cs | 6 +- .../PostgresTypes/PostgresCompositeType.cs | 2 +- src/Npgsql/PostgresTypes/PostgresEnumType.cs | 2 +- src/Npgsql/PreparedStatement.cs | 2 +- .../PgOutput/ReadonlyArrayBuffer.cs | 2 +- src/Npgsql/SqlQueryParser.cs | 2 +- src/Npgsql/TypeMapping/GlobalTypeMapper.cs | 4 +- src/Npgsql/TypeMapping/UserTypeMapper.cs | 16 +-- .../Npgsql.Benchmarks/TypeHandlers/Numeric.cs | 8 +- .../TypeHandlers/TypeHandlerBenchmarks.cs | 2 +- test/Npgsql.PluginTests/GeoJSONTests.cs | 104 +++++++-------- test/Npgsql.PluginTests/JsonNetTests.cs | 14 +- .../NetTopologySuiteTests.cs | 121 ++++++++---------- test/Npgsql.PluginTests/NodaTimeTests.cs | 8 +- .../NpgsqlSelectValueFixture.cs | 5 +- test/Npgsql.Tests/CommandParameterTests.cs | 2 +- test/Npgsql.Tests/CopyTests.cs | 11 +- test/Npgsql.Tests/MultipleHostsTests.cs | 4 +- test/Npgsql.Tests/NpgsqlEventSourceTests.cs | 2 +- test/Npgsql.Tests/ReaderTests.cs | 2 +- test/Npgsql.Tests/SchemaTests.cs | 26 ++-- .../Npgsql.Tests/Support/ListLoggerFactory.cs | 2 +- test/Npgsql.Tests/Support/PgPostmasterMock.cs | 2 +- test/Npgsql.Tests/Support/TestBase.cs | 2 +- test/Npgsql.Tests/Types/ArrayTests.cs | 10 +- test/Npgsql.Tests/Types/BitStringTests.cs | 2 +- test/Npgsql.Tests/Types/ByteaTests.cs | 22 ++-- test/Npgsql.Tests/Types/CompositeTests.cs | 8 +- .../Types/DateTimeInfinityTests.cs | 30 ++--- test/Npgsql.Tests/Types/DateTimeTests.cs | 32 +++-- .../Npgsql.Tests/Types/FullTextSearchTests.cs | 20 ++- test/Npgsql.Tests/Types/GeometricTypeTests.cs | 4 +- test/Npgsql.Tests/Types/InternalTypeTests.cs | 7 +- test/Npgsql.Tests/Types/JsonDynamicTests.cs | 12 +- test/Npgsql.Tests/Types/JsonPathTests.cs | 8 +- test/Npgsql.Tests/Types/MiscTypeTests.cs | 2 +- test/Npgsql.Tests/Types/MoneyTests.cs | 16 +-- test/Npgsql.Tests/Types/MultirangeTests.cs | 6 +- test/Npgsql.Tests/Types/NumericTests.cs | 8 +- test/Npgsql.Tests/Types/RangeTests.cs | 24 ++-- test/Npgsql.Tests/TypesTests.cs | 4 +- 73 files changed, 348 insertions(+), 374 deletions(-) diff --git a/src/Npgsql.GeoJSON/CrsMap.WellKnown.cs b/src/Npgsql.GeoJSON/CrsMap.WellKnown.cs index 6ad08d6c80..dda11bd1d7 100644 --- a/src/Npgsql.GeoJSON/CrsMap.WellKnown.cs +++ b/src/Npgsql.GeoJSON/CrsMap.WellKnown.cs @@ -8,7 +8,7 @@ public partial class CrsMap /// memory allocated for overridden entries only (added, removed, or modified). /// internal static readonly CrsMapEntry[] WellKnown = - { + [ new(2000, 2180, "EPSG"), new(2188, 2217, "EPSG"), new(2219, 2220, "EPSG"), @@ -584,6 +584,6 @@ public partial class CrsMap new(32601, 32667, "EPSG"), new(32701, 32761, "EPSG"), new(32766, 32766, "EPSG"), - new(900913, 900913, "spatialreferencing.org"), - }; + new(900913, 900913, "spatialreferencing.org") + ]; } diff --git a/src/Npgsql.GeoJSON/Internal/BoundingBoxBuilder.cs b/src/Npgsql.GeoJSON/Internal/BoundingBoxBuilder.cs index 7702a7e0b3..c3ea8f271f 100644 --- a/src/Npgsql.GeoJSON/Internal/BoundingBoxBuilder.cs +++ b/src/Npgsql.GeoJSON/Internal/BoundingBoxBuilder.cs @@ -48,6 +48,6 @@ internal void Accumulate(Position position) internal double[] Build() => _hasAltitude - ? new[] { _minLongitude, _minLatitude, _minAltitude, _maxLongitude, _maxLatitude, _maxAltitude } - : new[] { _minLongitude, _minLatitude, _maxLongitude, _maxLatitude }; + ? [_minLongitude, _minLatitude, _minAltitude, _maxLongitude, _maxLatitude, _maxAltitude] + : [_minLongitude, _minLatitude, _maxLongitude, _maxLatitude]; } \ No newline at end of file diff --git a/src/Npgsql.Json.NET/Internal/JsonNetPocoTypeInfoResolverFactory.cs b/src/Npgsql.Json.NET/Internal/JsonNetPocoTypeInfoResolverFactory.cs index 27f719deca..57eb05f1c7 100644 --- a/src/Npgsql.Json.NET/Internal/JsonNetPocoTypeInfoResolverFactory.cs +++ b/src/Npgsql.Json.NET/Internal/JsonNetPocoTypeInfoResolverFactory.cs @@ -34,7 +34,7 @@ class Resolver : DynamicTypeInfoResolver, IPgTypeInfoResolver readonly JsonSerializerSettings _serializerSettings; TypeInfoMappingCollection? _mappings; - protected TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(), _jsonbClrTypes ?? Array.Empty(), _jsonClrTypes ?? Array.Empty(), _serializerSettings); + protected TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(), _jsonbClrTypes ?? [], _jsonClrTypes ?? [], _serializerSettings); const string JsonDataTypeName = "pg_catalog.json"; const string JsonbDataTypeName = "pg_catalog.jsonb"; diff --git a/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeInfoResolverFactory.cs b/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeInfoResolverFactory.cs index b9a559c12f..7484dc7832 100644 --- a/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeInfoResolverFactory.cs +++ b/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeInfoResolverFactory.cs @@ -54,7 +54,7 @@ public Resolver( static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings, PostGisReader reader, PostGisWriter writer, bool geographyAsDefault) { - foreach (var dataTypeName in geographyAsDefault ? new[] {"geography", "geometry"} : new[] { "geometry", "geography" }) + foreach (var dataTypeName in geographyAsDefault ? ["geography", "geometry"] : new[] { "geometry", "geography" }) { mappings.AddType(dataTypeName, (options, mapping, _) => mapping.CreateInfo(options, new NetTopologySuiteConverter(reader, writer)), @@ -98,7 +98,7 @@ public ArrayResolver(CoordinateSequenceFactory? coordinateSequenceFactory, Preci static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings, bool geographyAsDefault) { - foreach (var dataTypeName in geographyAsDefault ? new[] { "geography", "geometry" } : new[] { "geometry", "geography" }) + foreach (var dataTypeName in geographyAsDefault ? ["geography", "geometry"] : new[] { "geometry", "geography" }) { mappings.AddArrayType(dataTypeName); mappings.AddArrayType(dataTypeName); diff --git a/src/Npgsql/BackendMessages/AuthenticationMessages.cs b/src/Npgsql/BackendMessages/AuthenticationMessages.cs index a447bb632a..93d535109d 100644 --- a/src/Npgsql/BackendMessages/AuthenticationMessages.cs +++ b/src/Npgsql/BackendMessages/AuthenticationMessages.cs @@ -87,7 +87,7 @@ sealed class AuthenticationSSPIMessage : AuthenticationRequestMessage sealed class AuthenticationSASLMessage : AuthenticationRequestMessage { internal override AuthenticationRequestType AuthRequestType => AuthenticationRequestType.SASL; - internal List Mechanisms { get; } = new(); + internal List Mechanisms { get; } = []; internal AuthenticationSASLMessage(NpgsqlReadBuffer buf) { diff --git a/src/Npgsql/BackendMessages/CopyMessages.cs b/src/Npgsql/BackendMessages/CopyMessages.cs index 1aa8aec0c2..174768f7a6 100644 --- a/src/Npgsql/BackendMessages/CopyMessages.cs +++ b/src/Npgsql/BackendMessages/CopyMessages.cs @@ -14,7 +14,7 @@ abstract class CopyResponseMessageBase : IBackendMessage internal CopyResponseMessageBase() { - ColumnFormatCodes = new List(); + ColumnFormatCodes = []; } internal void Load(NpgsqlReadBuffer buf) diff --git a/src/Npgsql/BackendMessages/ParameterDescriptionMessage.cs b/src/Npgsql/BackendMessages/ParameterDescriptionMessage.cs index ebda485331..9faccd0f50 100644 --- a/src/Npgsql/BackendMessages/ParameterDescriptionMessage.cs +++ b/src/Npgsql/BackendMessages/ParameterDescriptionMessage.cs @@ -10,7 +10,7 @@ sealed class ParameterDescriptionMessage : IBackendMessage internal ParameterDescriptionMessage() { - TypeOIDs = new List(); + TypeOIDs = []; } internal ParameterDescriptionMessage Load(NpgsqlReadBuffer buf) diff --git a/src/Npgsql/Internal/Composites/Metadata/CompositeInfo.cs b/src/Npgsql/Internal/Composites/Metadata/CompositeInfo.cs index 1db91b2052..3c8f564a46 100644 --- a/src/Npgsql/Internal/Composites/Metadata/CompositeInfo.cs +++ b/src/Npgsql/Internal/Composites/Metadata/CompositeInfo.cs @@ -35,7 +35,7 @@ public CompositeInfo(CompositeFieldInfo[] fields, int constructorParameters, Fun throw new InvalidOperationException($"Missing composite fields to map to the required {constructorParameters} constructor parameters."); _fields = fields; - var arguments = constructorParameters is 0 ? Array.Empty() : new CompositeFieldInfo[constructorParameters]; + var arguments = constructorParameters is 0 ? [] : new CompositeFieldInfo[constructorParameters]; foreach (var field in fields) { if (field.ConstructorParameterIndex is { } index) @@ -56,7 +56,7 @@ public CompositeInfo(CompositeFieldInfo[] fields, int constructorParameters, Fun /// public StrongBox[] CreateTempBoxes() { - var valueCache = _lastConstructorFieldIndex + 1 is 0 ? Array.Empty() : new StrongBox[_lastConstructorFieldIndex + 1]; + var valueCache = _lastConstructorFieldIndex + 1 is 0 ? [] : new StrongBox[_lastConstructorFieldIndex + 1]; var fields = _fields; for (var i = 0; i < valueCache.Length; i++) diff --git a/src/Npgsql/Internal/Composites/ReflectionCompositeInfoFactory.cs b/src/Npgsql/Internal/Composites/ReflectionCompositeInfoFactory.cs index 14bba0dd9a..c520c4fdf9 100644 --- a/src/Npgsql/Internal/Composites/ReflectionCompositeInfoFactory.cs +++ b/src/Npgsql/Internal/Composites/ReflectionCompositeInfoFactory.cs @@ -27,7 +27,7 @@ static class ReflectionCompositeInfoFactory throw new AmbiguousMatchException($"Property {propertyMap[duplicates[0]].Name} and field {fieldMap[duplicates[0]].Name} map to the same '{pgFields[duplicates[0]].Name}' composite field name."); var (constructorInfo, parameterFieldMap) = MapBestMatchingConstructor(pgFields, nameTranslator); - var constructorParameters = constructorInfo?.GetParameters() ?? Array.Empty(); + var constructorParameters = constructorInfo?.GetParameters() ?? []; var compositeFields = new CompositeFieldInfo?[pgFields.Count]; for (var i = 0; i < parameterFieldMap.Length; i++) { @@ -122,7 +122,7 @@ static Delegate CreateSetter(FieldInfo info) static Delegate CreateGetter(PropertyInfo info) { - var invalidOpExceptionMessageConstructor = typeof(InvalidOperationException).GetConstructor(new []{ typeof(string) })!; + var invalidOpExceptionMessageConstructor = typeof(InvalidOperationException).GetConstructor([typeof(string)])!; var instance = Expression.Parameter(typeof(object), "instance"); var body = info.GetMethod is null || !info.GetMethod.IsPublic ? (Expression)Expression.Throw(Expression.New(invalidOpExceptionMessageConstructor, @@ -139,7 +139,7 @@ static Delegate CreateSetter(PropertyInfo info) var instance = Expression.Parameter(typeof(object), "instance"); var value = Expression.Parameter(info.PropertyType, "value"); - var invalidOpExceptionMessageConstructor = typeof(InvalidOperationException).GetConstructor(new []{ typeof(string) })!; + var invalidOpExceptionMessageConstructor = typeof(InvalidOperationException).GetConstructor([typeof(string)])!; var body = info.SetMethod is null || !info.SetMethod.IsPublic ? (Expression)Expression.Throw(Expression.New(invalidOpExceptionMessageConstructor, Expression.Constant($"No (public) setter for '{info}' on type {typeof(T)}")), info.PropertyType) @@ -162,7 +162,7 @@ static Func CreateStrongBoxConstructor(ConstructorInfo constr var parameters = constructorInfo.GetParameters(); var parameterCount = Expression.Constant(parameters.Length); - var argumentExceptionNameMessageConstructor = typeof(ArgumentException).GetConstructor(new []{ typeof(string), typeof(string) })!; + var argumentExceptionNameMessageConstructor = typeof(ArgumentException).GetConstructor([typeof(string), typeof(string)])!; return Expression .Lambda>( Expression.Block( diff --git a/src/Npgsql/Internal/Converters/Primitive/PgNumeric.cs b/src/Npgsql/Internal/Converters/Primitive/PgNumeric.cs index 799494dda1..908bf6fb4d 100644 --- a/src/Npgsql/Internal/Converters/Primitive/PgNumeric.cs +++ b/src/Npgsql/Internal/Converters/Primitive/PgNumeric.cs @@ -101,7 +101,8 @@ public readonly ref struct Builder internal const int MaxDecimalNumericDigits = 8; // Fast access for 10^n where n is 0-9 - static ReadOnlySpan UIntPowers10 => new uint[] { + static ReadOnlySpan UIntPowers10 => + [ 1, 10, 100, @@ -112,7 +113,7 @@ public readonly ref struct Builder 10000000, 100000000, 1000000000 - }; + ]; const int MaxUInt32Scale = 9; const int MaxUInt16Scale = 4; diff --git a/src/Npgsql/Internal/Converters/Primitive/TextConverters.cs b/src/Npgsql/Internal/Converters/Primitive/TextConverters.cs index 56c5ee7bae..5bbc1a61f2 100644 --- a/src/Npgsql/Internal/Converters/Primitive/TextConverters.cs +++ b/src/Npgsql/Internal/Converters/Primitive/TextConverters.cs @@ -166,13 +166,13 @@ protected override char ReadCore(PgReader reader) public override Size GetSize(SizeContext context, char value, ref object? writeState) { - Span spanValue = stackalloc char[] { value }; + Span spanValue = [value]; return _encoding.GetByteCount(spanValue); } protected override void WriteCore(PgWriter writer, char value) { - Span spanValue = stackalloc char[] { value }; + Span spanValue = [value]; writer.WriteChars(spanValue, _encoding); } } diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index faa7c0c0d7..cffe1f1755 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -177,7 +177,7 @@ internal string InferredUserName /// /// Holds all run-time parameters in raw, binary format for efficient handling without allocations. /// - readonly List<(byte[] Name, byte[] Value)> _rawParameters = new(); + readonly List<(byte[] Name, byte[] Value)> _rawParameters = []; /// /// If this connector was broken, this contains the exception that caused the break. @@ -934,10 +934,7 @@ internal async Task NegotiateEncryption(SslMode sslMode, NpgsqlTimeout timeout, EnabledSslProtocols = SslProtocols.None, CertificateRevocationCheckMode = checkCertificateRevocation ? X509RevocationMode.Online : X509RevocationMode.Offline, RemoteCertificateValidationCallback = certificateValidationCallback, - ApplicationProtocols = new List - { - _alpnProtocol - } + ApplicationProtocols = [_alpnProtocol] }; if (SslClientAuthenticationOptionsCallback is not null) diff --git a/src/Npgsql/Internal/NpgsqlDatabaseInfo.cs b/src/Npgsql/Internal/NpgsqlDatabaseInfo.cs index 7fd3fe95e9..7e3aebe237 100644 --- a/src/Npgsql/Internal/NpgsqlDatabaseInfo.cs +++ b/src/Npgsql/Internal/NpgsqlDatabaseInfo.cs @@ -17,10 +17,11 @@ public abstract class NpgsqlDatabaseInfo { #region Fields - static volatile INpgsqlDatabaseInfoFactory[] Factories = { + static volatile INpgsqlDatabaseInfoFactory[] Factories = + [ new PostgresMinimalDatabaseInfoFactory(), new PostgresDatabaseInfoFactory() - }; + ]; #endregion Fields @@ -115,13 +116,13 @@ public abstract class NpgsqlDatabaseInfo #region Types - readonly List _baseTypesMutable = new(); - readonly List _arrayTypesMutable = new(); - readonly List _rangeTypesMutable = new(); - readonly List _multirangeTypesMutable = new(); - readonly List _enumTypesMutable = new(); - readonly List _compositeTypesMutable = new(); - readonly List _domainTypesMutable = new(); + readonly List _baseTypesMutable = []; + readonly List _arrayTypesMutable = []; + readonly List _rangeTypesMutable = []; + readonly List _multirangeTypesMutable = []; + readonly List _enumTypesMutable = []; + readonly List _compositeTypesMutable = []; + readonly List _domainTypesMutable = []; internal IReadOnlyList BaseTypes => _baseTypesMutable; internal IReadOnlyList ArrayTypes => _arrayTypesMutable; @@ -339,11 +340,11 @@ internal static async Task Load(NpgsqlConnector conn, Npgsql // For tests internal static void ResetFactories() - => Factories = new INpgsqlDatabaseInfoFactory[] - { + => Factories = + [ new PostgresMinimalDatabaseInfoFactory(), new PostgresDatabaseInfoFactory() - }; + ]; #endregion Factory management diff --git a/src/Npgsql/Internal/PgTypeInfoResolverChainBuilder.cs b/src/Npgsql/Internal/PgTypeInfoResolverChainBuilder.cs index 548d236096..f83fa384f4 100644 --- a/src/Npgsql/Internal/PgTypeInfoResolverChainBuilder.cs +++ b/src/Npgsql/Internal/PgTypeInfoResolverChainBuilder.cs @@ -6,7 +6,7 @@ namespace Npgsql.Internal; struct PgTypeInfoResolverChainBuilder { - readonly List<(Type ImplementationType, object)> _factories = new(); + readonly List<(Type ImplementationType, object)> _factories = []; Action>? _addRangeResolvers; Action>? _addMultirangeResolvers; RangeArrayHandler _rangeArrayHandler = RangeArrayHandler.Instance; diff --git a/src/Npgsql/Internal/ResolverFactories/JsonDynamicTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/JsonDynamicTypeInfoResolverFactory.cs index 2515cf9a5b..c164e40a22 100644 --- a/src/Npgsql/Internal/ResolverFactories/JsonDynamicTypeInfoResolverFactory.cs +++ b/src/Npgsql/Internal/ResolverFactories/JsonDynamicTypeInfoResolverFactory.cs @@ -63,8 +63,8 @@ JsonSerializerOptions SerializerOptions public Resolver(Type[]? jsonbClrTypes = null, Type[]? jsonClrTypes = null, JsonSerializerOptions? serializerOptions = null) { - _jsonbClrTypes = jsonbClrTypes ?? Array.Empty(); - _jsonClrTypes = jsonClrTypes ?? Array.Empty(); + _jsonbClrTypes = jsonbClrTypes ?? []; + _jsonClrTypes = jsonClrTypes ?? []; _serializerOptions = serializerOptions; } diff --git a/src/Npgsql/Internal/ResolverFactories/TupledRecordTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/TupledRecordTypeInfoResolverFactory.cs index 189f84a868..7ee00d37a7 100644 --- a/src/Npgsql/Internal/ResolverFactories/TupledRecordTypeInfoResolverFactory.cs +++ b/src/Npgsql/Internal/ResolverFactories/TupledRecordTypeInfoResolverFactory.cs @@ -46,7 +46,7 @@ class Resolver : DynamicTypeInfoResolver var factory = typeof(Resolver).GetMethod(nameof(CreateFactory), BindingFlags.Static | BindingFlags.NonPublic)! .MakeGenericMethod(mapping.Type) - .Invoke(null, new object[] { constructor, constructor.GetParameters().Length }); + .Invoke(null, [constructor, constructor.GetParameters().Length]); var converterType = typeof(RecordConverter<>).MakeGenericType(mapping.Type); var converter = (PgConverter)Activator.CreateInstance(converterType, options, factory)!; diff --git a/src/Npgsql/Internal/TypeInfoCache.cs b/src/Npgsql/Internal/TypeInfoCache.cs index df570ca825..ca646b5d29 100644 --- a/src/Npgsql/Internal/TypeInfoCache.cs +++ b/src/Npgsql/Internal/TypeInfoCache.cs @@ -102,7 +102,7 @@ public TypeInfoCache(PgSerializerOptions options, bool validatePgTypeIds = true) // Also add defaults by their info type to save a future resolver lookup + resize. infos = isDefaultInfo ? new [] { (type, info), (info!.Type, info) } - : new [] { (type, info) }; + : [(type, info)]; if (_cacheByPgTypeId.TryAdd(pgTypeId, infos)) return info; diff --git a/src/Npgsql/Internal/TypeInfoMapping.cs b/src/Npgsql/Internal/TypeInfoMapping.cs index 753c2bcac3..2ece0ae474 100644 --- a/src/Npgsql/Internal/TypeInfoMapping.cs +++ b/src/Npgsql/Internal/TypeInfoMapping.cs @@ -119,7 +119,7 @@ public TypeInfoMappingCollection(TypeInfoMappingCollection baseCollection) : thi => _baseCollection = baseCollection; public TypeInfoMappingCollection(IEnumerable items) - => _items = new(items); + => _items = [..items]; public IReadOnlyList Items => _items; diff --git a/src/Npgsql/KerberosUsernameProvider.cs b/src/Npgsql/KerberosUsernameProvider.cs index 0395bca337..3afb326548 100644 --- a/src/Npgsql/KerberosUsernameProvider.cs +++ b/src/Npgsql/KerberosUsernameProvider.cs @@ -104,7 +104,7 @@ sealed class KerberosUsernameProvider static string? FindInPath(string name) { - foreach (var p in Environment.GetEnvironmentVariable("PATH")?.Split(Path.PathSeparator) ?? Array.Empty()) + foreach (var p in Environment.GetEnvironmentVariable("PATH")?.Split(Path.PathSeparator) ?? []) { var path = Path.Combine(p, name); if (File.Exists(path)) diff --git a/src/Npgsql/MetricsReporter.cs b/src/Npgsql/MetricsReporter.cs index f806e44852..83b804c2f6 100644 --- a/src/Npgsql/MetricsReporter.cs +++ b/src/Npgsql/MetricsReporter.cs @@ -31,7 +31,7 @@ sealed class MetricsReporter : IDisposable readonly NpgsqlDataSource _dataSource; readonly KeyValuePair _poolNameTag; - static readonly List Reporters = new(); + static readonly List Reporters = []; CommandCounters _commandCounters; diff --git a/src/Npgsql/NpgsqlBatchCommand.cs b/src/Npgsql/NpgsqlBatchCommand.cs index 911554d27d..0321ba4369 100644 --- a/src/Npgsql/NpgsqlBatchCommand.cs +++ b/src/Npgsql/NpgsqlBatchCommand.cs @@ -13,7 +13,7 @@ namespace Npgsql; /// public sealed class NpgsqlBatchCommand : DbBatchCommand { - internal static readonly List EmptyParameters = new(); + internal static readonly List EmptyParameters = []; string _commandText; @@ -39,7 +39,7 @@ public override string CommandText internal NpgsqlParameterCollection? _parameters; /// - public new NpgsqlParameterCollection Parameters => _parameters ??= new(); + public new NpgsqlParameterCollection Parameters => _parameters ??= []; #if NET8_0_OR_GREATER @@ -149,7 +149,7 @@ public override int RecordsAffected /// internal List PositionalParameters { - get => _inputParameters ??= _ownedInputParameters ??= new(); + get => _inputParameters ??= _ownedInputParameters ??= []; set => _inputParameters = value; } @@ -198,7 +198,7 @@ internal PreparedStatement? PreparedStatement /// /// Holds the server-side (prepared) ASCII statement name. Empty string for non-prepared statements. /// - internal byte[] StatementName => PreparedStatement?.Name ?? Array.Empty(); + internal byte[] StatementName => PreparedStatement?.Name ?? []; /// /// Whether this statement has already been prepared (including automatic preparation). diff --git a/src/Npgsql/NpgsqlCommand.cs b/src/Npgsql/NpgsqlCommand.cs index 9b4c0e2959..9719485bd4 100644 --- a/src/Npgsql/NpgsqlCommand.cs +++ b/src/Npgsql/NpgsqlCommand.cs @@ -424,7 +424,7 @@ internal CommandState State /// Gets the . /// /// The parameters of the SQL statement or function (stored procedure). The default is an empty collection. - public new NpgsqlParameterCollection Parameters => _parameters ??= new(); + public new NpgsqlParameterCollection Parameters => _parameters ??= []; #endregion @@ -1083,7 +1083,7 @@ await connector.WriteBind( i == 0 ? UnknownResultTypeList : null, async, cancellationToken).ConfigureAwait(false); - await connector.WriteDescribe(StatementOrPortal.Portal, Array.Empty(), async, cancellationToken).ConfigureAwait(false); + await connector.WriteDescribe(StatementOrPortal.Portal, [], async, cancellationToken).ConfigureAwait(false); } else { @@ -1154,8 +1154,8 @@ async Task SendDeriveParameters(NpgsqlConnector connector, bool async, Cancellat var batchCommand = InternalBatchCommands[i]; - await connector.WriteParse(batchCommand.FinalCommandText!, Array.Empty(), NpgsqlBatchCommand.EmptyParameters, async, cancellationToken).ConfigureAwait(false); - await connector.WriteDescribe(StatementOrPortal.Statement, Array.Empty(), async, cancellationToken).ConfigureAwait(false); + await connector.WriteParse(batchCommand.FinalCommandText!, [], NpgsqlBatchCommand.EmptyParameters, async, cancellationToken).ConfigureAwait(false); + await connector.WriteDescribe(StatementOrPortal.Statement, [], async, cancellationToken).ConfigureAwait(false); } await connector.WriteSync(async, cancellationToken).ConfigureAwait(false); diff --git a/src/Npgsql/NpgsqlConnectionStringBuilder.cs b/src/Npgsql/NpgsqlConnectionStringBuilder.cs index f2dbe7340e..b18a310ab3 100644 --- a/src/Npgsql/NpgsqlConnectionStringBuilder.cs +++ b/src/Npgsql/NpgsqlConnectionStringBuilder.cs @@ -1615,7 +1615,7 @@ sealed class NpgsqlConnectionStringPropertyAttribute : Attribute /// Creates a . /// public NpgsqlConnectionStringPropertyAttribute() - => Synonyms = Array.Empty(); + => Synonyms = []; /// /// Creates a . diff --git a/src/Npgsql/NpgsqlDataReader.cs b/src/Npgsql/NpgsqlDataReader.cs index 3df6affdeb..37eeb3eda8 100644 --- a/src/Npgsql/NpgsqlDataReader.cs +++ b/src/Npgsql/NpgsqlDataReader.cs @@ -67,7 +67,7 @@ public sealed class NpgsqlDataReader : DbDataReader, IDbColumnSchemaGenerator /// Records, for each column, its starting offset and length in the current row. /// Used only in non-sequential mode. /// - readonly List<(int Offset, int Length)> _columns = new(); + readonly List<(int Offset, int Length)> _columns = []; int _columnsStartPos; /// @@ -998,7 +998,7 @@ async Task Consume(bool async, Exception? firstException = null) } catch (Exception e) { - exceptions ??= new(); + exceptions ??= []; exceptions.Add(e); } } diff --git a/src/Npgsql/NpgsqlDataSourceBuilder.cs b/src/Npgsql/NpgsqlDataSourceBuilder.cs index 8274f6669a..7a886ba335 100644 --- a/src/Npgsql/NpgsqlDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlDataSourceBuilder.cs @@ -50,8 +50,7 @@ public INpgsqlNameTranslator DefaultNameTranslator public string ConnectionString => _internalBuilder.ConnectionString; internal static void ResetGlobalMappings(bool overwrite) - => GlobalTypeMapper.Instance.AddGlobalTypeMappingResolvers(new PgTypeInfoResolverFactory[] - { + => GlobalTypeMapper.Instance.AddGlobalTypeMappingResolvers([ overwrite ? new AdoTypeInfoResolverFactory() : AdoTypeInfoResolverFactory.Instance, new ExtraConversionResolverFactory(), new JsonTypeInfoResolverFactory(), @@ -59,8 +58,8 @@ internal static void ResetGlobalMappings(bool overwrite) new FullTextSearchTypeInfoResolverFactory(), new NetworkTypeInfoResolverFactory(), new GeometricTypeInfoResolverFactory(), - new LTreeTypeInfoResolverFactory(), - }, static () => + new LTreeTypeInfoResolverFactory() + ], static () => { var builder = new PgTypeInfoResolverChainBuilder(); builder.EnableRanges(); diff --git a/src/Npgsql/NpgsqlNestedDataReader.cs b/src/Npgsql/NpgsqlNestedDataReader.cs index 5794054670..f35e635e50 100644 --- a/src/Npgsql/NpgsqlNestedDataReader.cs +++ b/src/Npgsql/NpgsqlNestedDataReader.cs @@ -29,7 +29,7 @@ public sealed class NpgsqlNestedDataReader : DbDataReader int _nextRowBufferPos; ReaderState _readerState; - readonly List _columns = new(); + readonly List _columns = []; long _startPos; DataFormat Format => DataFormat.Binary; diff --git a/src/Npgsql/NpgsqlRawCopyStream.cs b/src/Npgsql/NpgsqlRawCopyStream.cs index e91633d053..1185cd5422 100644 --- a/src/Npgsql/NpgsqlRawCopyStream.cs +++ b/src/Npgsql/NpgsqlRawCopyStream.cs @@ -54,10 +54,10 @@ public override int ReadTimeout /// The copy binary format header signature /// internal static readonly byte[] BinarySignature = - { + [ (byte)'P',(byte)'G',(byte)'C',(byte)'O',(byte)'P',(byte)'Y', (byte)'\n', 255, (byte)'\r', (byte)'\n', 0 - }; + ]; readonly ILogger _copyLogger; diff --git a/src/Npgsql/NpgsqlSchema.cs b/src/Npgsql/NpgsqlSchema.cs index 1b52738561..7ce5f3ec1d 100644 --- a/src/Npgsql/NpgsqlSchema.cs +++ b/src/Npgsql/NpgsqlSchema.cs @@ -1007,7 +1007,7 @@ static DataTable GetReservedWords() /// List of keywords taken from PostgreSQL 9.0 reserved words documentation. /// static readonly string[] ReservedKeywords = - { + [ "ALL", "ANALYSE", "ANALYZE", @@ -1107,7 +1107,7 @@ static DataTable GetReservedWords() "WHERE", "WINDOW", "WITH" - }; + ]; #endregion Reserved Keywords diff --git a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs index cd8c556fdb..8711bd9b63 100644 --- a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs @@ -69,7 +69,7 @@ public sealed class NpgsqlSlimDataSourceBuilder : INpgsqlTypeMapper public string ConnectionString => ConnectionStringBuilder.ToString(); static NpgsqlSlimDataSourceBuilder() - => GlobalTypeMapper.Instance.AddGlobalTypeMappingResolvers(new PgTypeInfoResolverFactory[] { new AdoTypeInfoResolverFactory() }); + => GlobalTypeMapper.Instance.AddGlobalTypeMappingResolvers([new AdoTypeInfoResolverFactory()]); /// /// A diagnostics name used by Npgsql when generating tracing, logging and metrics. diff --git a/src/Npgsql/NpgsqlTypes/NpgsqlTsVector.cs b/src/Npgsql/NpgsqlTypes/NpgsqlTsVector.cs index 2ec4c66afe..3c765ede02 100644 --- a/src/Npgsql/NpgsqlTypes/NpgsqlTsVector.cs +++ b/src/Npgsql/NpgsqlTypes/NpgsqlTsVector.cs @@ -21,7 +21,7 @@ internal NpgsqlTsVector(List lexemes, bool noCheck = false) return; } - _lexemes = new List(lexemes); + _lexemes = [..lexemes]; if (_lexemes.Count == 0) return; @@ -167,7 +167,7 @@ public static NpgsqlTsVector Parse(string value) goto WaitWord; StartPosInfo: - wordEntryPositions = new List(); + wordEntryPositions = []; InPosInfo: var digitPos = pos; @@ -321,7 +321,7 @@ internal Lexeme(string text, List? wordEntryPositions, bool noCopy { Text = text; if (wordEntryPositions != null) - WordEntryPositions = noCopy ? wordEntryPositions : new List(wordEntryPositions); + WordEntryPositions = noCopy ? wordEntryPositions : [..wordEntryPositions]; else WordEntryPositions = null; } @@ -343,7 +343,7 @@ internal Lexeme(string text, List? wordEntryPositions, bool noCopy return list; // Don't change the original list, as the user might inspect it later if he holds a reference to the lexeme's list - list = new List(list); + list = [..list]; list.Sort((x, y) => x.Pos.CompareTo(y.Pos)); diff --git a/src/Npgsql/NpgsqlTypes/NpgsqlTypes.cs b/src/Npgsql/NpgsqlTypes/NpgsqlTypes.cs index c1555603c2..b861548b83 100644 --- a/src/Npgsql/NpgsqlTypes/NpgsqlTypes.cs +++ b/src/Npgsql/NpgsqlTypes/NpgsqlTypes.cs @@ -202,16 +202,16 @@ public struct NpgsqlPath : IList, IEquatable { List _points; - List Points => _points ??= new(); + List Points => _points ??= []; public bool Open { get; set; } public NpgsqlPath() - => _points = new(); + => _points = []; public NpgsqlPath(IEnumerable points, bool open) { - _points = new List(points); + _points = [..points]; Open = open; } @@ -220,7 +220,7 @@ public NpgsqlPath(params NpgsqlPoint[] points) : this(points, false) {} public NpgsqlPath(bool open) : this() { - _points = new List(); + _points = []; Open = open; } @@ -309,13 +309,13 @@ public struct NpgsqlPolygon : IList, IEquatable { List _points; - List Points => _points ??= new(); + List Points => _points ??= []; public NpgsqlPolygon() - => _points = new(); + => _points = []; public NpgsqlPolygon(IEnumerable points) - => _points = new List(points); + => _points = [..points]; public NpgsqlPolygon(params NpgsqlPoint[] points) : this((IEnumerable) points) {} diff --git a/src/Npgsql/PostgresDatabaseInfo.cs b/src/Npgsql/PostgresDatabaseInfo.cs index 70aadbed3a..9daa5060f4 100644 --- a/src/Npgsql/PostgresDatabaseInfo.cs +++ b/src/Npgsql/PostgresDatabaseInfo.cs @@ -481,7 +481,7 @@ static string SanitizeForReplicationConnection(string str) if (!isReplicationConnection) Expect(await conn.ReadMessage(async).ConfigureAwait(false), conn); - return new(byOID.Values); + return [..byOID.Values]; static string ReadNonNullableString(NpgsqlReadBuffer buffer) => buffer.ReadString(buffer.ReadInt32()); diff --git a/src/Npgsql/PostgresErrorCodes.cs b/src/Npgsql/PostgresErrorCodes.cs index afeadbf2c6..98d878e12b 100644 --- a/src/Npgsql/PostgresErrorCodes.cs +++ b/src/Npgsql/PostgresErrorCodes.cs @@ -466,15 +466,15 @@ public static class PostgresErrorCodes #endregion Class XX - Internal Error static readonly string[] CriticalFailureCodes = - { + [ "53", // Insufficient resources AdminShutdown, // Self explanatory CrashShutdown, // Self explanatory CannotConnectNow, // Database is starting up "58", // System errors, external to PG (server is dying) "F0", // Configuration file error - "XX", // Internal error (database is dying) - }; + "XX" // Internal error (database is dying) + ]; internal static bool IsCriticalFailure(PostgresException e, bool clusterError = true) { diff --git a/src/Npgsql/PostgresTypes/PostgresCompositeType.cs b/src/Npgsql/PostgresTypes/PostgresCompositeType.cs index 2d53199e6f..1663b01ebd 100644 --- a/src/Npgsql/PostgresTypes/PostgresCompositeType.cs +++ b/src/Npgsql/PostgresTypes/PostgresCompositeType.cs @@ -16,7 +16,7 @@ public class PostgresCompositeType : PostgresType /// public IReadOnlyList Fields => MutableFields; - internal List MutableFields { get; } = new(); + internal List MutableFields { get; } = []; /// /// Constructs a representation of a PostgreSQL array data type. diff --git a/src/Npgsql/PostgresTypes/PostgresEnumType.cs b/src/Npgsql/PostgresTypes/PostgresEnumType.cs index 7e4440252e..2422cb07a2 100644 --- a/src/Npgsql/PostgresTypes/PostgresEnumType.cs +++ b/src/Npgsql/PostgresTypes/PostgresEnumType.cs @@ -16,7 +16,7 @@ public class PostgresEnumType : PostgresType /// public IReadOnlyList Labels => MutableLabels; - internal List MutableLabels { get; } = new(); + internal List MutableLabels { get; } = []; /// /// Constructs a representation of a PostgreSQL enum data type. diff --git a/src/Npgsql/PreparedStatement.cs b/src/Npgsql/PreparedStatement.cs index cda613d1cc..5a5a877eb2 100644 --- a/src/Npgsql/PreparedStatement.cs +++ b/src/Npgsql/PreparedStatement.cs @@ -84,7 +84,7 @@ internal void SetParamTypes(List parameters) { if (parameters.Count == 0) { - ConverterParamTypes = Array.Empty(); + ConverterParamTypes = []; return; } diff --git a/src/Npgsql/Replication/PgOutput/ReadonlyArrayBuffer.cs b/src/Npgsql/Replication/PgOutput/ReadonlyArrayBuffer.cs index df910af4d2..3d22b5f5f6 100644 --- a/src/Npgsql/Replication/PgOutput/ReadonlyArrayBuffer.cs +++ b/src/Npgsql/Replication/PgOutput/ReadonlyArrayBuffer.cs @@ -11,7 +11,7 @@ sealed class ReadOnlyArrayBuffer : IReadOnlyList int _size; public ReadOnlyArrayBuffer() - => _items = Array.Empty(); + => _items = []; ReadOnlyArrayBuffer(T[] items) { diff --git a/src/Npgsql/SqlQueryParser.cs b/src/Npgsql/SqlQueryParser.cs index 2e9e37a010..88728a34f7 100644 --- a/src/Npgsql/SqlQueryParser.cs +++ b/src/Npgsql/SqlQueryParser.cs @@ -7,7 +7,7 @@ namespace Npgsql; sealed class SqlQueryParser { - static NpgsqlParameterCollection EmptyParameters { get; } = new(); + static NpgsqlParameterCollection EmptyParameters { get; } = []; readonly Dictionary _paramIndexMap = new(StringComparer.OrdinalIgnoreCase); readonly StringBuilder _rewrittenSql = new(); diff --git a/src/Npgsql/TypeMapping/GlobalTypeMapper.cs b/src/Npgsql/TypeMapping/GlobalTypeMapper.cs index ad83c7bc77..c8b72cb8cd 100644 --- a/src/Npgsql/TypeMapping/GlobalTypeMapper.cs +++ b/src/Npgsql/TypeMapping/GlobalTypeMapper.cs @@ -16,9 +16,9 @@ namespace Npgsql.TypeMapping; sealed class GlobalTypeMapper : INpgsqlTypeMapper { readonly UserTypeMapper _userTypeMapper = new(); - readonly List _pluginResolverFactories = new(); + readonly List _pluginResolverFactories = []; readonly ReaderWriterLockSlim _lock = new(); - PgTypeInfoResolverFactory[] _typeMappingResolvers = Array.Empty(); + PgTypeInfoResolverFactory[] _typeMappingResolvers = []; internal IEnumerable GetPluginResolverFactories() { diff --git a/src/Npgsql/TypeMapping/UserTypeMapper.cs b/src/Npgsql/TypeMapping/UserTypeMapper.cs index 35fabb90fe..7c86f5b949 100644 --- a/src/Npgsql/TypeMapping/UserTypeMapper.cs +++ b/src/Npgsql/TypeMapping/UserTypeMapper.cs @@ -40,8 +40,8 @@ sealed class UserTypeMapper : PgTypeInfoResolverFactory public INpgsqlNameTranslator DefaultNameTranslator { get; set; } = NpgsqlSnakeCaseNameTranslator.Instance; - UserTypeMapper(IEnumerable mappings) => _mappings = new List(mappings); - public UserTypeMapper() => _mappings = new(); + UserTypeMapper(IEnumerable mappings) => _mappings = [..mappings]; + public UserTypeMapper() => _mappings = []; public UserTypeMapper Clone() => new(_mappings) { DefaultNameTranslator = DefaultNameTranslator }; @@ -65,9 +65,9 @@ public UserTypeMapper MapEnum([DynamicallyAccessedMembers(DynamicallyAccessedMem if (!clrType.IsEnum || !clrType.IsValueType) throw new ArgumentException("Type must be a concrete Enum", nameof(clrType)); - var openMethod = typeof(UserTypeMapper).GetMethod(nameof(MapEnum), new[] { typeof(string), typeof(INpgsqlNameTranslator) })!; + var openMethod = typeof(UserTypeMapper).GetMethod(nameof(MapEnum), [typeof(string), typeof(INpgsqlNameTranslator)])!; var method = openMethod.MakeGenericMethod(clrType); - method.Invoke(this, new object?[] { pgName, nameTranslator }); + method.Invoke(this, [pgName, nameTranslator]); return this; } @@ -107,11 +107,11 @@ public UserTypeMapper MapComposite([DynamicallyAccessedMembers(DynamicallyAccess var openMethod = typeof(UserTypeMapper).GetMethod( clrType.IsValueType ? nameof(MapStructComposite) : nameof(MapComposite), - new[] { typeof(string), typeof(INpgsqlNameTranslator) })!; + [typeof(string), typeof(INpgsqlNameTranslator)])!; var method = openMethod.MakeGenericMethod(clrType); - method.Invoke(this, new object?[] { pgName, nameTranslator }); + method.Invoke(this, [pgName, nameTranslator]); return this; } @@ -145,8 +145,8 @@ static string GetPgName(Type type, INpgsqlNameTranslator nameTranslator) => type.GetCustomAttribute()?.PgName ?? nameTranslator.TranslateTypeName(type.Name); - public override IPgTypeInfoResolver CreateResolver() => new Resolver(new(_mappings)); - public override IPgTypeInfoResolver CreateArrayResolver() => new ArrayResolver(new(_mappings)); + public override IPgTypeInfoResolver CreateResolver() => new Resolver([.._mappings]); + public override IPgTypeInfoResolver CreateArrayResolver() => new ArrayResolver([.._mappings]); class Resolver : IPgTypeInfoResolver { diff --git a/test/Npgsql.Benchmarks/TypeHandlers/Numeric.cs b/test/Npgsql.Benchmarks/TypeHandlers/Numeric.cs index 42f5f3936a..66d3a82aa8 100644 --- a/test/Npgsql.Benchmarks/TypeHandlers/Numeric.cs +++ b/test/Npgsql.Benchmarks/TypeHandlers/Numeric.cs @@ -39,8 +39,8 @@ public class Numeric : TypeHandlerBenchmarks { public Numeric() : base(new DecimalNumericConverter()) { } - protected override IEnumerable ValuesOverride() => new[] - { + protected override IEnumerable ValuesOverride() => + [ 0.0000000000000000000000000001M, 0.000000000000000000000001M, 0.00000000000000000001M, @@ -55,8 +55,8 @@ protected override IEnumerable ValuesOverride() => new[] 10000000000000000M, 100000000000000000000M, 1000000000000000000000000M, - 10000000000000000000000000000M, - }; + 10000000000000000000000000000M + ]; } [Config(typeof(Config))] diff --git a/test/Npgsql.Benchmarks/TypeHandlers/TypeHandlerBenchmarks.cs b/test/Npgsql.Benchmarks/TypeHandlers/TypeHandlerBenchmarks.cs index 994839c219..9bc09dac99 100644 --- a/test/Npgsql.Benchmarks/TypeHandlers/TypeHandlerBenchmarks.cs +++ b/test/Npgsql.Benchmarks/TypeHandlers/TypeHandlerBenchmarks.cs @@ -61,7 +61,7 @@ protected TypeHandlerBenchmarks(PgConverter handler) public IEnumerable Values() => ValuesOverride(); - protected virtual IEnumerable ValuesOverride() => new[] { default(T) }; + protected virtual IEnumerable ValuesOverride() => [default(T)]; [ParamsSource(nameof(Values))] public T Value diff --git a/test/Npgsql.PluginTests/GeoJSONTests.cs b/test/Npgsql.PluginTests/GeoJSONTests.cs index 0630eebc8d..0a421eee01 100644 --- a/test/Npgsql.PluginTests/GeoJSONTests.cs +++ b/test/Npgsql.PluginTests/GeoJSONTests.cs @@ -23,89 +23,89 @@ public struct TestData } public static readonly TestData[] Tests = - { + [ new() { Geometry = new Point( new Position(longitude: 1d, latitude: 2d)) - { BoundingBoxes = new[] { 1d, 2d, 1d, 2d } }, + { BoundingBoxes = [1d, 2d, 1d, 2d] }, CommandText = "st_makepoint(1,2)" }, new() { - Geometry = new LineString(new[] { + Geometry = new LineString([ new Position(longitude: 1d, latitude: 1d), new Position(longitude: 1d, latitude: 2d) - }) - { BoundingBoxes = new[] { 1d, 1d, 1d, 2d } }, + ]) + { BoundingBoxes = [1d, 1d, 1d, 2d] }, CommandText = "st_makeline(st_makepoint(1,1), st_makepoint(1,2))" }, new() { - Geometry = new Polygon(new[] { - new LineString(new[] { + Geometry = new Polygon([ + new LineString([ new Position(longitude: 1d, latitude: 1d), new Position(longitude: 2d, latitude: 2d), new Position(longitude: 3d, latitude: 3d), new Position(longitude: 1d, latitude: 1d) - }) - }) - { BoundingBoxes = new[] { 1d, 1d, 3d, 3d } }, + ]) + ]) + { BoundingBoxes = [1d, 1d, 3d, 3d] }, CommandText = "st_makepolygon(st_makeline(ARRAY[st_makepoint(1,1), st_makepoint(2,2), st_makepoint(3,3), st_makepoint(1,1)]))" }, new() { - Geometry = new MultiPoint(new[] { + Geometry = new MultiPoint([ new Point(new Position(longitude: 1d, latitude: 1d)) - }) - { BoundingBoxes = new[] { 1d, 1d, 1d, 1d } }, + ]) + { BoundingBoxes = [1d, 1d, 1d, 1d] }, CommandText = "st_multi(st_makepoint(1, 1))" }, new() { - Geometry = new MultiLineString(new[] { - new LineString(new[] { + Geometry = new MultiLineString([ + new LineString([ new Position(longitude: 1d, latitude: 1d), new Position(longitude: 1d, latitude: 2d) - }) - }) - { BoundingBoxes = new[] { 1d, 1d, 1d, 2d } }, + ]) + ]) + { BoundingBoxes = [1d, 1d, 1d, 2d] }, CommandText = "st_multi(st_makeline(st_makepoint(1,1), st_makepoint(1,2)))" }, new() { - Geometry = new MultiPolygon(new[] { - new Polygon(new[] { - new LineString(new[] { + Geometry = new MultiPolygon([ + new Polygon([ + new LineString([ new Position(longitude: 1d, latitude: 1d), new Position(longitude: 2d, latitude: 2d), new Position(longitude: 3d, latitude: 3d), new Position(longitude: 1d, latitude: 1d) - }) - }) - }) - { BoundingBoxes = new[] { 1d, 1d, 3d, 3d } }, + ]) + ]) + ]) + { BoundingBoxes = [1d, 1d, 3d, 3d] }, CommandText = "st_multi(st_makepolygon(st_makeline(ARRAY[st_makepoint(1,1), st_makepoint(2,2), st_makepoint(3,3), st_makepoint(1,1)])))" }, new() { - Geometry = new GeometryCollection(new IGeometryObject[] { + Geometry = new GeometryCollection([ new Point(new Position(longitude: 1d, latitude: 1d)), - new MultiPolygon(new[] { - new Polygon(new[] { - new LineString(new[] { + new MultiPolygon([ + new Polygon([ + new LineString([ new Position(longitude: 1d, latitude: 1d), new Position(longitude: 2d, latitude: 2d), new Position(longitude: 3d, latitude: 3d), new Position(longitude: 1d, latitude: 1d) - }) - }) - }) - }) - { BoundingBoxes = new[] { 1d, 1d, 3d, 3d } }, + ]) + ]) + ]) + ]) + { BoundingBoxes = [1d, 1d, 3d, 3d] }, CommandText = "st_collect(st_makepoint(1,1),st_multi(st_makepolygon(st_makeline(ARRAY[st_makepoint(1,1), st_makepoint(2,2), st_makepoint(3,3), st_makepoint(1,1)]))))" - }, - }; + } + ]; [Test, TestCaseSource(nameof(Tests))] public async Task Read(TestData data) @@ -138,24 +138,24 @@ public async Task IgnoreM() } public static readonly TestData[] NotAllZSpecifiedTests = - { + [ new() { - Geometry = new LineString(new[] { + Geometry = new LineString([ new Position(1d, 1d, 0d), new Position(2d, 2d) - }) + ]) }, new() { - Geometry = new LineString(new[] { + Geometry = new LineString([ new Position(1d, 1d, 0d), new Position(2d, 2d), new Position(3d, 3d), new Position(4d, 4d) - }) + ]) } - }; + ]; [Test, TestCaseSource(nameof(NotAllZSpecifiedTests))] public async Task Not_all_Z_specified(TestData data) @@ -315,18 +315,18 @@ public async Task Import_big_geometry() await using var conn = await OpenConnectionAsync(); var table = await CreateTempTable(conn, "id text, field geometry"); - var geometry = new MultiLineString(new[] { + var geometry = new MultiLineString([ new LineString( Enumerable.Range(1, 507) .Select(i => new Position(longitude: i, latitude: i)) .Append(new Position(longitude: 1d, latitude: 1d))), - new LineString(new[] { + new LineString([ new Position(longitude: 1d, latitude: 1d), new Position(longitude: 1d, latitude: 2d), new Position(longitude: 1d, latitude: 3d), - new Position(longitude: 1d, latitude: 1d), - }) - }); + new Position(longitude: 1d, latitude: 1d) + ]) + ]); await using (var writer = await conn.BeginBinaryImportAsync($"COPY {table} (id, field) FROM STDIN BINARY")) { @@ -375,18 +375,18 @@ public async Task Export_big_geometry() await using var conn = await OpenConnectionAsync(); var table = await CreateTempTable(conn, "id text, field geometry"); - var geometry = new Polygon(new[] { + var geometry = new Polygon([ new LineString( Enumerable.Range(1, 507) .Select(i => new Position(longitude: i, latitude: i)) .Append(new Position(longitude: 1d, latitude: 1d))), - new LineString(new[] { + new LineString([ new Position(longitude: 1d, latitude: 1d), new Position(longitude: 1d, latitude: 2d), new Position(longitude: 1d, latitude: 3d), - new Position(longitude: 1d, latitude: 1d), - }) - }); + new Position(longitude: 1d, latitude: 1d) + ]) + ]); await using (var writer = await conn.BeginBinaryImportAsync($"COPY {table} (id, field) FROM STDIN BINARY")) { diff --git a/test/Npgsql.PluginTests/JsonNetTests.cs b/test/Npgsql.PluginTests/JsonNetTests.cs index b3fb1e26bb..a88fed0492 100644 --- a/test/Npgsql.PluginTests/JsonNetTests.cs +++ b/test/Npgsql.PluginTests/JsonNetTests.cs @@ -108,9 +108,9 @@ public async Task Clr_type_mapping() { var dataSourceBuilder = CreateDataSourceBuilder(); if (IsJsonb) - dataSourceBuilder.UseJsonNet(jsonbClrTypes: new[] { typeof(Foo) }); + dataSourceBuilder.UseJsonNet(jsonbClrTypes: [typeof(Foo)]); else - dataSourceBuilder.UseJsonNet(jsonClrTypes: new[] { typeof(Foo) }); + dataSourceBuilder.UseJsonNet(jsonClrTypes: [typeof(Foo)]); await using var dataSource = dataSourceBuilder.Build(); await AssertType( @@ -128,9 +128,9 @@ public async Task Roundtrip_clr_array() { var dataSourceBuilder = CreateDataSourceBuilder(); if (IsJsonb) - dataSourceBuilder.UseJsonNet(jsonbClrTypes: new[] { typeof(int[]) }); + dataSourceBuilder.UseJsonNet(jsonbClrTypes: [typeof(int[])]); else - dataSourceBuilder.UseJsonNet(jsonClrTypes: new[] { typeof(int[]) }); + dataSourceBuilder.UseJsonNet(jsonClrTypes: [typeof(int[])]); await using var dataSource = dataSourceBuilder.Build(); await AssertType( @@ -157,9 +157,9 @@ public async Task Custom_serializer_settings() var dataSourceBuilder = CreateDataSourceBuilder(); if (IsJsonb) - dataSourceBuilder.UseJsonNet(jsonbClrTypes: new[] { typeof(DateWrapper) }, settings: settings); + dataSourceBuilder.UseJsonNet(jsonbClrTypes: [typeof(DateWrapper)], settings: settings); else - dataSourceBuilder.UseJsonNet(jsonClrTypes: new[] { typeof(DateWrapper) }, settings: settings); + dataSourceBuilder.UseJsonNet(jsonClrTypes: [typeof(DateWrapper)], settings: settings); await using var dataSource = dataSourceBuilder.Build(); await AssertType( @@ -176,7 +176,7 @@ await AssertType( public async Task Bug3464() { var dataSourceBuilder = CreateDataSourceBuilder(); - dataSourceBuilder.UseJsonNet(jsonbClrTypes: new[] { typeof(Bug3464Class) }); + dataSourceBuilder.UseJsonNet(jsonbClrTypes: [typeof(Bug3464Class)]); await using var dataSource = dataSourceBuilder.Build(); var expected = new Bug3464Class { SomeString = new string('5', 8174) }; diff --git a/test/Npgsql.PluginTests/NetTopologySuiteTests.cs b/test/Npgsql.PluginTests/NetTopologySuiteTests.cs index 4e225d121c..4cece1952c 100644 --- a/test/Npgsql.PluginTests/NetTopologySuiteTests.cs +++ b/test/Npgsql.PluginTests/NetTopologySuiteTests.cs @@ -14,60 +14,55 @@ namespace Npgsql.PluginTests; public class NetTopologySuiteTests : TestBase { static readonly TestCaseData[] TestCases = - { + [ new TestCaseData(Ordinates.None, new Point(1d, 2500d), "st_makepoint(1,2500)") .SetName("Point"), - new TestCaseData(Ordinates.None, new MultiPoint(new[] { new Point(new Coordinate(1d, 1d)) }), "st_multi(st_makepoint(1, 1))") + new TestCaseData(Ordinates.None, new MultiPoint([new Point(new Coordinate(1d, 1d))]), "st_multi(st_makepoint(1, 1))") .SetName("MultiPoint"), new TestCaseData( Ordinates.None, - new LineString(new[] { new Coordinate(1d, 1d), new Coordinate(1d, 2500d) }), + new LineString([new Coordinate(1d, 1d), new Coordinate(1d, 2500d)]), "st_makeline(st_makepoint(1,1),st_makepoint(1,2500))") .SetName("LineString"), new TestCaseData( Ordinates.None, - new MultiLineString(new[] - { - new LineString(new[] - { + new MultiLineString([ + new LineString([ new Coordinate(1d, 1d), new Coordinate(1d, 2500d) - }) - }), + ]) + ]), "st_multi(st_makeline(st_makepoint(1,1),st_makepoint(1,2500)))") .SetName("MultiLineString"), new TestCaseData( Ordinates.None, new Polygon( - new LinearRing(new[] - { + new LinearRing([ new Coordinate(1d, 1d), new Coordinate(2d, 2d), new Coordinate(3d, 3d), new Coordinate(1d, 1d) - }) + ]) ), "st_makepolygon(st_makeline(ARRAY[st_makepoint(1,1),st_makepoint(2,2),st_makepoint(3,3),st_makepoint(1,1)]))") .SetName("Polygon"), new TestCaseData( Ordinates.None, - new MultiPolygon(new[] - { + new MultiPolygon([ new Polygon( - new LinearRing(new[] - { + new LinearRing([ new Coordinate(1d, 1d), new Coordinate(2d, 2d), new Coordinate(3d, 3d), new Coordinate(1d, 1d) - }) + ]) ) - }), + ]), "st_multi(st_makepolygon(st_makeline(ARRAY[st_makepoint(1,1),st_makepoint(2,2),st_makepoint(3,3),st_makepoint(1,1)])))") .SetName("MultiPolygon"), @@ -76,47 +71,40 @@ public class NetTopologySuiteTests : TestBase new TestCaseData( Ordinates.None, - new GeometryCollection(new Geometry[] - { + new GeometryCollection([ new Point(new Coordinate(1d, 1d)), - new MultiPolygon(new[] - { + new MultiPolygon([ new Polygon( - new LinearRing(new[] - { + new LinearRing([ new Coordinate(1d, 1d), new Coordinate(2d, 2d), new Coordinate(3d, 3d), new Coordinate(1d, 1d) - }) + ]) ) - }) - }), + ]) + ]), "st_collect(st_makepoint(1,1),st_multi(st_makepolygon(st_makeline(ARRAY[st_makepoint(1,1),st_makepoint(2,2),st_makepoint(3,3),st_makepoint(1,1)]))))") .SetName("Collection"), new TestCaseData( Ordinates.None, - new GeometryCollection(new Geometry[] - { + new GeometryCollection([ new Point(new Coordinate(1d, 1d)), - new GeometryCollection(new Geometry[] - { + new GeometryCollection([ new Point(new Coordinate(1d, 1d)), - new MultiPolygon(new[] - { + new MultiPolygon([ new Polygon( - new LinearRing(new[] - { + new LinearRing([ new Coordinate(1d, 1d), new Coordinate(2d, 2d), new Coordinate(3d, 3d), new Coordinate(1d, 1d) - }) + ]) ) - }) - }) - }), + ]) + ]) + ]), "st_collect(st_makepoint(1,1),st_collect(st_makepoint(1,1),st_multi(st_makepolygon(st_makeline(ARRAY[st_makepoint(1,1),st_makepoint(2,2),st_makepoint(3,3),st_makepoint(1,1)])))))") .SetName("CollectionNested"), @@ -126,23 +114,22 @@ public class NetTopologySuiteTests : TestBase new TestCaseData( Ordinates.XYZM, new Point( - new DotSpatialAffineCoordinateSequence(new[] { 1d, 2d }, new[] { 3d }, new[] { 4d }), + new DotSpatialAffineCoordinateSequence([1d, 2d], [3d], [4d]), GeometryFactory.Default), "st_makepoint(1,2,3,4)") .SetName("PointXYZM"), new TestCaseData( Ordinates.None, - new LinearRing(new[] - { - new Coordinate(1d, 1d), + new LinearRing([ + new Coordinate(1d, 1d), new Coordinate(2d, 2d), new Coordinate(3d, 3d), new Coordinate(1d, 1d) - }), + ]), "st_makeline(ARRAY[st_makepoint(1,1),st_makepoint(2,2),st_makepoint(3,3),st_makepoint(1,1)])") .SetName("LinearRing") - }; + ]; [Test, TestCaseSource(nameof(TestCases))] public async Task Read(Ordinates ordinates, Geometry geometry, string sqlRepresentation) @@ -221,53 +208,45 @@ public async Task Concurrency_test() await adminConnection.ExecuteNonQueryAsync($"INSERT INTO {table} DEFAULT VALUES"); var point = new Point(new Coordinate(1d, 1d)); - var lineString = new LineString(new[] { new Coordinate(1d, 1d), new Coordinate(1d, 2500d) }); + var lineString = new LineString([new Coordinate(1d, 1d), new Coordinate(1d, 2500d)]); var polygon = new Polygon( - new LinearRing(new[] - { + new LinearRing([ new Coordinate(1d, 1d), new Coordinate(2d, 2d), new Coordinate(3d, 3d), new Coordinate(1d, 1d) - }) + ]) ); - var multiPoint = new MultiPoint(new[] { new Point(new Coordinate(1d, 1d)) }); - var multiLineString = new MultiLineString(new[] - { - new LineString(new[] - { + var multiPoint = new MultiPoint([new Point(new Coordinate(1d, 1d))]); + var multiLineString = new MultiLineString([ + new LineString([ new Coordinate(1d, 1d), new Coordinate(1d, 2500d) - }) - }); - var multiPolygon = new MultiPolygon(new[] - { + ]) + ]); + var multiPolygon = new MultiPolygon([ new Polygon( - new LinearRing(new[] - { + new LinearRing([ new Coordinate(1d, 1d), new Coordinate(2d, 2d), new Coordinate(3d, 3d), new Coordinate(1d, 1d) - }) + ]) ) - }); - var collection = new GeometryCollection(new Geometry[] - { + ]); + var collection = new GeometryCollection([ new Point(new Coordinate(1d, 1d)), - new MultiPolygon(new[] - { + new MultiPolygon([ new Polygon( - new LinearRing(new[] - { + new LinearRing([ new Coordinate(1d, 1d), new Coordinate(2d, 2d), new Coordinate(3d, 3d), new Coordinate(1d, 1d) - }) + ]) ) - }) - }); + ]) + ]); await Task.WhenAll(Enumerable.Range(0, 30).Select(i => Task.Run(async () => { diff --git a/test/Npgsql.PluginTests/NodaTimeTests.cs b/test/Npgsql.PluginTests/NodaTimeTests.cs index 6bbc401943..adddf9cf25 100644 --- a/test/Npgsql.PluginTests/NodaTimeTests.cs +++ b/test/Npgsql.PluginTests/NodaTimeTests.cs @@ -18,14 +18,14 @@ public class NodaTimeTests : MultiplexingTestBase, IDisposable #region Timestamp without time zone static readonly TestCaseData[] TimestampValues = - { + [ new TestCaseData(new LocalDateTime(1998, 4, 12, 13, 26, 38, 789), "1998-04-12 13:26:38.789") .SetName("Timestamp_pre2000"), new TestCaseData(new LocalDateTime(2015, 1, 27, 8, 45, 12, 345), "2015-01-27 08:45:12.345") .SetName("Timestamp_post2000"), new TestCaseData(new LocalDateTime(1999, 12, 31, 23, 59, 59, 999).PlusNanoseconds(456000), "1999-12-31 23:59:59.999456") .SetName("Timestamp_with_microseconds") - }; + ]; [Test, TestCaseSource(nameof(TimestampValues))] public Task Timestamp_as_LocalDateTime(LocalDateTime localDateTime, string sqlLiteral) @@ -145,7 +145,7 @@ await AssertType( #region Timestamp with time zone static readonly TestCaseData[] TimestamptzValues = - { + [ new TestCaseData(new LocalDateTime(1998, 4, 12, 13, 26, 38).InUtc().ToInstant(), "1998-04-12 15:26:38+02") .SetName("Timestamptz_pre2000"), new TestCaseData(new LocalDateTime(2015, 1, 27, 8, 45, 12, 345).InUtc().ToInstant(), "2015-01-27 09:45:12.345+01") @@ -154,7 +154,7 @@ await AssertType( .SetName("Timestamptz_write_date_only"), new TestCaseData(new LocalDateTime(1999, 12, 31, 23, 59, 59, 999).PlusNanoseconds(456000).InUtc().ToInstant(), "2000-01-01 00:59:59.999456+01") .SetName("Timestamptz_with_microseconds") - }; + ]; [Test, TestCaseSource(nameof(TimestamptzValues))] public Task Timestamptz_as_Instant(Instant instant, string sqlLiteral) diff --git a/test/Npgsql.Specification.Tests/NpgsqlSelectValueFixture.cs b/test/Npgsql.Specification.Tests/NpgsqlSelectValueFixture.cs index 67f1d9f1b4..f524a00505 100644 --- a/test/Npgsql.Specification.Tests/NpgsqlSelectValueFixture.cs +++ b/test/Npgsql.Specification.Tests/NpgsqlSelectValueFixture.cs @@ -51,8 +51,7 @@ public string CreateSelectSql(byte[] value) => public string SelectNoRows => "SELECT 1 WHERE 0 = 1;"; - public IReadOnlyCollection SupportedDbTypes { get; } = new ReadOnlyCollection(new[] - { + public IReadOnlyCollection SupportedDbTypes { get; } = new ReadOnlyCollection([ DbType.Binary, DbType.Boolean, DbType.Date, @@ -67,7 +66,7 @@ public string CreateSelectSql(byte[] value) => DbType.Single, DbType.String, DbType.Time - }); + ]); public Type NullValueExceptionType => typeof(InvalidCastException); diff --git a/test/Npgsql.Tests/CommandParameterTests.cs b/test/Npgsql.Tests/CommandParameterTests.cs index 1e4355df4b..6b58fc4518 100644 --- a/test/Npgsql.Tests/CommandParameterTests.cs +++ b/test/Npgsql.Tests/CommandParameterTests.cs @@ -164,7 +164,7 @@ public async Task Generic_parameter() cmd.Parameters.Add(new NpgsqlParameter("p1", 8)); cmd.Parameters.Add(new NpgsqlParameter("p2", 8) { NpgsqlDbType = NpgsqlDbType.Integer }); cmd.Parameters.Add(new NpgsqlParameter("p3", "hello")); - cmd.Parameters.Add(new NpgsqlParameter("p4", new[] { 'f', 'o', 'o' })); + cmd.Parameters.Add(new NpgsqlParameter("p4", ['f', 'o', 'o'])); using var reader = await cmd.ExecuteReaderAsync(); reader.Read(); Assert.That(reader.GetInt32(0), Is.EqualTo(8)); diff --git a/test/Npgsql.Tests/CopyTests.cs b/test/Npgsql.Tests/CopyTests.cs index 5daa935d2f..48d40fa2cc 100644 --- a/test/Npgsql.Tests/CopyTests.cs +++ b/test/Npgsql.Tests/CopyTests.cs @@ -477,10 +477,10 @@ public async Task Import_object_null() } static readonly TestCaseData[] DBNullValues = - { + [ new TestCaseData(DBNull.Value).SetName("DBNull.Value"), new TestCaseData(null).SetName("null") - }; + ]; [Test, TestCaseSource(nameof(DBNullValues))] public async Task Import_dbnull(DBNull? value) @@ -773,12 +773,13 @@ await conn.ExecuteNonQueryAsync($@" using var reader = conn.BeginBinaryExport($"COPY {table} (bits, bitvector, bitarray) TO STDIN BINARY"); reader.StartRow(); - Assert.That(reader.Read(), Is.EqualTo(new BitArray(new[] { false, false, false, false, false, false, false, true, true, false, true }))); + Assert.That(reader.Read(), Is.EqualTo(new BitArray([false, false, false, false, false, false, false, true, true, false, true + ]))); Assert.That(reader.Read(), Is.EqualTo(new BitVector32(0b00000001101000000000000000000000))); Assert.That(reader.Read(), Is.EqualTo(new[] { - new BitArray(new[] { true, false, true }), - new BitArray(new[] { true, true, true }) + new BitArray([true, false, true]), + new BitArray([true, true, true]) })); } diff --git a/test/Npgsql.Tests/MultipleHostsTests.cs b/test/Npgsql.Tests/MultipleHostsTests.cs index f4f2dfffb7..18bd56167f 100644 --- a/test/Npgsql.Tests/MultipleHostsTests.cs +++ b/test/Npgsql.Tests/MultipleHostsTests.cs @@ -23,7 +23,7 @@ namespace Npgsql.Tests; public class MultipleHostsTests : TestBase { static readonly object[] MyCases = - { + [ new object[] { TargetSessionAttributes.Standby, new[] { Primary, Standby }, 1 }, new object[] { TargetSessionAttributes.Standby, new[] { PrimaryReadOnly, Standby }, 1 }, new object[] { TargetSessionAttributes.PreferStandby, new[] { Primary, Standby }, 1 }, @@ -41,7 +41,7 @@ public class MultipleHostsTests : TestBase new object[] { TargetSessionAttributes.ReadWrite, new[] { PrimaryReadOnly, Primary }, 1 }, new object[] { TargetSessionAttributes.ReadOnly, new[] { Primary, Standby }, 1 }, new object[] { TargetSessionAttributes.ReadOnly, new[] { PrimaryReadOnly, Standby }, 0 } - }; + ]; [Test] [TestCaseSource(nameof(MyCases))] diff --git a/test/Npgsql.Tests/NpgsqlEventSourceTests.cs b/test/Npgsql.Tests/NpgsqlEventSourceTests.cs index c1659e6fba..6419846fbe 100644 --- a/test/Npgsql.Tests/NpgsqlEventSourceTests.cs +++ b/test/Npgsql.Tests/NpgsqlEventSourceTests.cs @@ -44,7 +44,7 @@ public void DisableEventSource() TestEventListener _listener = null!; - readonly List _events = new(); + readonly List _events = []; class TestEventListener : EventListener { diff --git a/test/Npgsql.Tests/ReaderTests.cs b/test/Npgsql.Tests/ReaderTests.cs index 126276d2e8..249b5bf2ce 100644 --- a/test/Npgsql.Tests/ReaderTests.cs +++ b/test/Npgsql.Tests/ReaderTests.cs @@ -1369,7 +1369,7 @@ public async Task GetBytes() var table = await CreateTempTable(conn, "bytes BYTEA"); // TODO: This is too small to actually test any interesting sequential behavior - byte[] expected = { 1, 2, 3, 4, 5 }; + byte[] expected = [1, 2, 3, 4, 5]; var actual = new byte[expected.Length]; await conn.ExecuteNonQueryAsync($"INSERT INTO {table} (bytes) VALUES ({EncodeByteaHex(expected)})"); diff --git a/test/Npgsql.Tests/SchemaTests.cs b/test/Npgsql.Tests/SchemaTests.cs index 230ae6a90d..301cf44d80 100644 --- a/test/Npgsql.Tests/SchemaTests.cs +++ b/test/Npgsql.Tests/SchemaTests.cs @@ -286,7 +286,7 @@ public async Task Precision_and_scale() var table = await CreateTempTable( conn, "explicit_both NUMERIC(10,2), explicit_precision NUMERIC(10), implicit_both NUMERIC, integer INTEGER, text TEXT"); - var dataTable = await GetSchema(conn, "Columns", new[] { null, null, table }); + var dataTable = await GetSchema(conn, "Columns", [null, null, table]); var rows = dataTable.Rows.Cast().ToList(); var explicitBoth = rows.Single(r => (string)r["column_name"] == "explicit_both"); @@ -339,7 +339,7 @@ public async Task GetSchema_tables_with_restrictions() await using var conn = await OpenConnectionAsync(); var table = await CreateTempTable(conn, "bar INTEGER"); - var dt = await GetSchema(conn, "Tables", new[] { null, null, table }); + var dt = await GetSchema(conn, "Tables", [null, null, table]); foreach (var row in dt.Rows.OfType()) Assert.That(row["table_name"], Is.EqualTo(table)); } @@ -352,7 +352,7 @@ public async Task GetSchema_views_with_restrictions() await conn.ExecuteNonQueryAsync($"CREATE VIEW {view} AS SELECT 8 AS foo"); - var dt = await GetSchema(conn, "Views", new[] { null, null, view }); + var dt = await GetSchema(conn, "Views", [null, null, view]); foreach (var row in dt.Rows.OfType()) Assert.That(row["table_name"], Is.EqualTo(view)); } @@ -365,7 +365,7 @@ public async Task GetSchema_materialized_views_with_restrictions() await conn.ExecuteNonQueryAsync($"CREATE MATERIALIZED VIEW {viewName} AS SELECT 8 AS foo"); - var dt = await GetSchema(conn, "MaterializedViews", new[] { null, viewName, null, null }); + var dt = await GetSchema(conn, "MaterializedViews", [null, viewName, null, null]); foreach (var row in dt.Rows.OfType()) Assert.That(row["table_name"], Is.EqualTo(viewName)); } @@ -376,7 +376,7 @@ public async Task Primary_key() await using var conn = await OpenConnectionAsync(); var table = await CreateTempTable(conn, "id INT PRIMARY KEY, f1 INT"); - var dataTable = await GetSchema(conn, "CONSTRAINTCOLUMNS", new[] { null, null, table }); + var dataTable = await GetSchema(conn, "CONSTRAINTCOLUMNS", [null, null, table]); var column = dataTable.Rows.Cast().Single(); Assert.That(column["table_schema"], Is.EqualTo("public")); @@ -391,7 +391,7 @@ public async Task Primary_key_composite() await using var conn = await OpenConnectionAsync(); var table = await CreateTempTable(conn, "id1 INT, id2 INT, f1 INT, PRIMARY KEY (id1, id2)"); - var dataTable = await GetSchema(conn, "CONSTRAINTCOLUMNS", new[] { null, null, table }); + var dataTable = await GetSchema(conn, "CONSTRAINTCOLUMNS", [null, null, table]); var columns = dataTable.Rows.Cast().OrderBy(r => r["ordinal_number"]).ToList(); Assert.That(columns.All(r => r["table_schema"].Equals("public"))); @@ -410,7 +410,7 @@ public async Task Unique_constraint() var database = await conn.ExecuteScalarAsync("SELECT current_database()"); - var dataTable = await GetSchema(conn, "CONSTRAINTCOLUMNS", new[] { null, null, table }); + var dataTable = await GetSchema(conn, "CONSTRAINTCOLUMNS", [null, null, table]); var columns = dataTable.Rows.Cast().ToList(); Assert.That(columns.All(r => r["constraint_catalog"].Equals(database))); @@ -448,7 +448,7 @@ await conn.ExecuteNonQueryAsync(@$" var database = await conn.ExecuteScalarAsync("SELECT current_database()"); - var dataTable = await GetSchema(conn, "INDEXES", new[] { null, null, table }); + var dataTable = await GetSchema(conn, "INDEXES", [null, null, table]); var index = dataTable.Rows.Cast().Single(); Assert.That(index["table_schema"], Is.EqualTo("public")); @@ -456,7 +456,7 @@ await conn.ExecuteNonQueryAsync(@$" Assert.That(index["index_name"], Is.EqualTo(constraint)); Assert.That(index["type_desc"], Is.EqualTo("")); - string[] indexColumnRestrictions = { null!, null!, table }; + string[] indexColumnRestrictions = [null!, null!, table]; var dataTable2 = await GetSchema(conn, "INDEXCOLUMNS", indexColumnRestrictions); var columns = dataTable2.Rows.Cast().ToList(); @@ -470,7 +470,7 @@ await conn.ExecuteNonQueryAsync(@$" Assert.That(columns[0]["column_name"], Is.EqualTo("f1")); Assert.That(columns[1]["column_name"], Is.EqualTo("f2")); - string[] indexColumnRestrictions3 = { (string) database! , "public", table, constraint, "f1" }; + string[] indexColumnRestrictions3 = [(string) database! , "public", table, constraint, "f1"]; var dataTable3 = await GetSchema(conn, "INDEXCOLUMNS", indexColumnRestrictions3); var columns3 = dataTable3.Rows.Cast().ToList(); Assert.That(columns3.Count, Is.EqualTo(1)); @@ -533,7 +533,7 @@ vbit bit varying(5), cid cid"; var table = await CreateTempTable(conn, columnDefinition); - var columnsSchema = await GetSchema(conn, "Columns", new[] { null, null, table }); + var columnsSchema = await GetSchema(conn, "Columns", [null, null, table]); var columns = columnsSchema.Rows.Cast().ToList(); var dataTypes = await GetSchema(conn, DbMetaDataCollectionNames.DataTypes); @@ -554,7 +554,7 @@ await conn.ExecuteNonQueryAsync($@" CREATE TYPE {enumName} AS ENUM ('red', 'yellow', 'blue'); CREATE TABLE {table} (color {enumName});"); - var dataTable = await GetSchema(conn, "Columns", new[] { null, null, table }); + var dataTable = await GetSchema(conn, "Columns", [null, null, table]); var row = dataTable.Rows.Cast().Single(); Assert.That(row["data_type"], Is.EqualTo(enumName)); } @@ -571,7 +571,7 @@ await conn.ExecuteNonQueryAsync($@" CREATE TYPE {schema}.{enumName} AS ENUM ('red', 'yellow', 'blue'); CREATE TABLE {table} (color {schema}.{enumName});"); - var dataTable = await GetSchema(conn, "Columns", new[] { null, null, table }); + var dataTable = await GetSchema(conn, "Columns", [null, null, table]); var row = dataTable.Rows.Cast().Single(); Assert.That(row["data_type"], Is.EqualTo($"{schema}.{enumName}")); } diff --git a/test/Npgsql.Tests/Support/ListLoggerFactory.cs b/test/Npgsql.Tests/Support/ListLoggerFactory.cs index 2852335df8..930bb0cf92 100644 --- a/test/Npgsql.Tests/Support/ListLoggerFactory.cs +++ b/test/Npgsql.Tests/Support/ListLoggerFactory.cs @@ -43,7 +43,7 @@ public ListLogger(ListLoggerProvider provider) => _provider = provider; public List<(LogLevel, EventId, string, object?, Exception?)> LoggedEvents { get; } - = new(); + = []; public void Log(LogLevel logLevel, EventId eventId, TState state, Exception? exception, Func formatter) diff --git a/test/Npgsql.Tests/Support/PgPostmasterMock.cs b/test/Npgsql.Tests/Support/PgPostmasterMock.cs index ab3eeab521..2e298be3d3 100644 --- a/test/Npgsql.Tests/Support/PgPostmasterMock.cs +++ b/test/Npgsql.Tests/Support/PgPostmasterMock.cs @@ -21,7 +21,7 @@ class PgPostmasterMock : IAsyncDisposable static readonly Encoding RelaxedEncoding = NpgsqlWriteBuffer.RelaxedUTF8Encoding; readonly Socket _socket; - readonly List _allServers = new(); + readonly List _allServers = []; bool _acceptingClients; Task? _acceptClientsTask; int _processIdCounter; diff --git a/test/Npgsql.Tests/Support/TestBase.cs b/test/Npgsql.Tests/Support/TestBase.cs index 463b132d56..05d2bfea37 100644 --- a/test/Npgsql.Tests/Support/TestBase.cs +++ b/test/Npgsql.Tests/Support/TestBase.cs @@ -427,7 +427,7 @@ public async Task AssertTypeUnsupportedWrite(T value, // Check the corresponding array type as well if (!skipArrayCheck && !pgTypeName?.EndsWith("[]", StringComparison.Ordinal) == true) { - await AssertTypeUnsupportedWriteCore(new[] { value, value }, pgTypeName + "[]", dataSource); + await AssertTypeUnsupportedWriteCore([value, value], pgTypeName + "[]", dataSource); } return result; diff --git a/test/Npgsql.Tests/Types/ArrayTests.cs b/test/Npgsql.Tests/Types/ArrayTests.cs index 187c71d646..6b0daad6cf 100644 --- a/test/Npgsql.Tests/Types/ArrayTests.cs +++ b/test/Npgsql.Tests/Types/ArrayTests.cs @@ -24,16 +24,16 @@ namespace Npgsql.Tests.Types; public class ArrayTests : MultiplexingTestBase { static readonly TestCaseData[] ArrayTestCases = - { + [ new TestCaseData(new[] { 1, 2, 3 }, "{1,2,3}", "integer[]", NpgsqlDbType.Integer | NpgsqlDbType.Array) .SetName("Integer_array"), new TestCaseData(Array.Empty(), "{}", "integer[]", NpgsqlDbType.Integer | NpgsqlDbType.Array) .SetName("Empty_array"), new TestCaseData(new[,] { { 1, 2, 3 }, { 7, 8, 9 } }, "{{1,2,3},{7,8,9}}", "integer[]", NpgsqlDbType.Integer | NpgsqlDbType.Array) .SetName("Two_dimensional_array"), - new TestCaseData(new[] { new byte[] { 1, 2 }, new byte[] { 3, 4 } }, """{"\\x0102","\\x0304"}""", "bytea[]", NpgsqlDbType.Bytea | NpgsqlDbType.Array) + new TestCaseData(new[] { [1, 2], new byte[] { 3, 4 } }, """{"\\x0102","\\x0304"}""", "bytea[]", NpgsqlDbType.Bytea | NpgsqlDbType.Array) .SetName("Bytea_array") - }; + ]; [Test, TestCaseSource(nameof(ArrayTestCases))] public Task Arrays(T array, string sqlLiteral, string pgTypeName, NpgsqlDbType? npgsqlDbType) @@ -311,7 +311,7 @@ public async Task Jagged_arrays_not_supported() { await using var conn = await OpenConnectionAsync(); await using var cmd = new NpgsqlCommand("SELECT @p1", conn); - cmd.Parameters.AddWithValue("p1", NpgsqlDbType.Array | NpgsqlDbType.Integer, new[] { new[] { 8 }, new[] { 8, 10 } }); + cmd.Parameters.AddWithValue("p1", NpgsqlDbType.Array | NpgsqlDbType.Integer, new[] { [8], new[] { 8, 10 } }); Assert.That(async () => await cmd.ExecuteNonQueryAsync(), Throws.Exception .TypeOf() .With.Property("InnerException").Message.Contains("jagged")); @@ -403,7 +403,7 @@ public async Task Arrays_not_supported_by_default_on_NpgsqlSlimSourceBuilder() await using var dataSource = dataSourceBuilder.Build(); await AssertTypeUnsupportedRead("{1,2,3}", "integer[]", dataSource); - await AssertTypeUnsupportedWrite(new[] { 1, 2, 3 }, "integer[]", dataSource); + await AssertTypeUnsupportedWrite([1, 2, 3], "integer[]", dataSource); } [Test] diff --git a/test/Npgsql.Tests/Types/BitStringTests.cs b/test/Npgsql.Tests/Types/BitStringTests.cs index 95c81ffb41..7a1b085fa1 100644 --- a/test/Npgsql.Tests/Types/BitStringTests.cs +++ b/test/Npgsql.Tests/Types/BitStringTests.cs @@ -69,7 +69,7 @@ public async Task Array() { using var conn = await OpenConnectionAsync(); using var cmd = new NpgsqlCommand("SELECT @p", conn); - var expected = new[] { new BitArray(new[] { true, false, true }), new BitArray(new[] { false }) }; + var expected = new[] { new BitArray([true, false, true]), new BitArray([false]) }; var p = new NpgsqlParameter("p", NpgsqlDbType.Array | NpgsqlDbType.Varbit) { Value = expected }; cmd.Parameters.Add(p); p.Value = expected; diff --git a/test/Npgsql.Tests/Types/ByteaTests.cs b/test/Npgsql.Tests/Types/ByteaTests.cs index c34bce04ff..4f8db3c3dc 100644 --- a/test/Npgsql.Tests/Types/ByteaTests.cs +++ b/test/Npgsql.Tests/Types/ByteaTests.cs @@ -37,31 +37,31 @@ public async Task Bytea_long() [Test] public Task AsMemory() => AssertType( - new Memory(new byte[] { 1, 2, 3 }), "\\x010203", "bytea", NpgsqlDbType.Bytea, DbType.Binary, isDefault: false, + new Memory([1, 2, 3]), "\\x010203", "bytea", NpgsqlDbType.Bytea, DbType.Binary, isDefault: false, comparer: (left, right) => left.Span.SequenceEqual(right.Span)); [Test] public Task AsReadOnlyMemory() => AssertType( - new ReadOnlyMemory(new byte[] { 1, 2, 3 }), "\\x010203", "bytea", NpgsqlDbType.Bytea, DbType.Binary, isDefault: false, + new ReadOnlyMemory([1, 2, 3]), "\\x010203", "bytea", NpgsqlDbType.Bytea, DbType.Binary, isDefault: false, comparer: (left, right) => left.Span.SequenceEqual(right.Span)); [Test] public Task AsArraySegment() => AssertType( - new ArraySegment(new byte[] { 1, 2, 3 }), "\\x010203", "bytea", NpgsqlDbType.Bytea, DbType.Binary, isDefault: false); + new ArraySegment([1, 2, 3]), "\\x010203", "bytea", NpgsqlDbType.Bytea, DbType.Binary, isDefault: false); [Test] public Task Write_as_MemoryStream() => AssertTypeWrite( - () => new MemoryStream(new byte[] { 1, 2, 3 }), "\\x010203", "bytea", NpgsqlDbType.Bytea, DbType.Binary, isDefault: false); + () => new MemoryStream([1, 2, 3]), "\\x010203", "bytea", NpgsqlDbType.Bytea, DbType.Binary, isDefault: false); [Test] public Task Write_as_MemoryStream_truncated() { var msFactory = () => { - var ms = new MemoryStream(new byte[] { 1, 2, 3, 4 }); + var ms = new MemoryStream([1, 2, 3, 4]); ms.ReadByte(); return ms; }; @@ -107,7 +107,7 @@ public async Task Write_as_FileStream() var fsList = new List(); try { - await File.WriteAllBytesAsync(filePath, new byte[] { 1, 2, 3 }); + await File.WriteAllBytesAsync(filePath, [1, 2, 3]); await AssertTypeWrite( () => FileStreamFactory(filePath, fsList), "\\x010203", "bytea", NpgsqlDbType.Bytea, DbType.Binary, isDefault: false); @@ -191,14 +191,14 @@ public async Task Truncate_array() { await using var conn = await OpenConnectionAsync(); await using var cmd = new NpgsqlCommand("SELECT @p", conn); - byte[] data = { 1, 2, 3, 4, 5, 6 }; + byte[] data = [1, 2, 3, 4, 5, 6]; var p = new NpgsqlParameter("p", data) { Size = 4 }; cmd.Parameters.Add(p); Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo(new byte[] { 1, 2, 3, 4 })); Assert.That(p.Value, Is.EqualTo(new byte[] { 1, 2, 3, 4 }), "Truncated parameter value should be persisted on the parameter per DbParameter.Size docs"); // NpgsqlParameter.Size needs to persist when value is changed - byte[] data2 = { 11, 12, 13, 14, 15, 16 }; + byte[] data2 = [11, 12, 13, 14, 15, 16]; p.Value = data2; Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo(new byte[] { 11, 12, 13, 14 })); @@ -219,13 +219,13 @@ public async Task Truncate_stream() { await using var conn = await OpenConnectionAsync(); await using var cmd = new NpgsqlCommand("SELECT @p", conn); - byte[] data = { 1, 2, 3, 4, 5, 6 }; + byte[] data = [1, 2, 3, 4, 5, 6]; var p = new NpgsqlParameter("p", new MemoryStream(data)) { Size = 4 }; cmd.Parameters.Add(p); Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo(new byte[] { 1, 2, 3, 4 })); // NpgsqlParameter.Size needs to persist when value is changed - byte[] data2 = { 11, 12, 13, 14, 15, 16 }; + byte[] data2 = [11, 12, 13, 14, 15, 16]; p.Value = new MemoryStream(data2); Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo(new byte[] { 11, 12, 13, 14 })); @@ -254,7 +254,7 @@ public async Task Write_as_NonSeekable_stream() { await using var conn = await OpenConnectionAsync(); await using var cmd = new NpgsqlCommand("SELECT @p", conn); - byte[] data = { 1, 2, 3, 4, 5, 6 }; + byte[] data = [1, 2, 3, 4, 5, 6]; var p = new NpgsqlParameter("p", new NonSeekableStream(data)) { Size = 4 }; cmd.Parameters.Add(p); Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo(new byte[] { 1, 2, 3, 4 })); diff --git a/test/Npgsql.Tests/Types/CompositeTests.cs b/test/Npgsql.Tests/Types/CompositeTests.cs index a1553b99a5..a7a5f20191 100644 --- a/test/Npgsql.Tests/Types/CompositeTests.cs +++ b/test/Npgsql.Tests/Types/CompositeTests.cs @@ -305,7 +305,7 @@ await adminConnection.ExecuteNonQueryAsync($@" await AssertType( connection, - new SomeCompositeWithArray { Ints = new[] { 1, 2, 3, 4 } }, + new SomeCompositeWithArray { Ints = [1, 2, 3, 4] }, @"(""{1,2,3,4}"")", compositeType, npgsqlDbType: null, @@ -355,7 +355,9 @@ await adminConnection.ExecuteNonQueryAsync($@" await AssertType( connection, - new SomeCompositeWithConverterResolverType { DateTimes = new [] { new DateTime(DateTime.UnixEpoch.Ticks, DateTimeKind.Unspecified), new DateTime(DateTime.UnixEpoch.Ticks, DateTimeKind.Unspecified).AddDays(1) } }, + new SomeCompositeWithConverterResolverType { DateTimes = [new DateTime(DateTime.UnixEpoch.Ticks, DateTimeKind.Unspecified), new DateTime(DateTime.UnixEpoch.Ticks, DateTimeKind.Unspecified).AddDays(1) + ] + }, """("{""1970-01-01 00:00:00"",""1970-01-02 00:00:00""}")""", compositeType, npgsqlDbType: null, @@ -379,7 +381,7 @@ await adminConnection.ExecuteNonQueryAsync($@" Assert.ThrowsAsync(() => AssertType( connection, - new SomeCompositeWithConverterResolverType { DateTimes = new[] { DateTime.UnixEpoch } }, // UTC DateTime + new SomeCompositeWithConverterResolverType { DateTimes = [DateTime.UnixEpoch] }, // UTC DateTime """("{""1970-01-01 01:00:00"",""1970-01-02 01:00:00""}")""", compositeType, npgsqlDbType: null, diff --git a/test/Npgsql.Tests/Types/DateTimeInfinityTests.cs b/test/Npgsql.Tests/Types/DateTimeInfinityTests.cs index e1ccad4445..7a4876e47c 100644 --- a/test/Npgsql.Tests/Types/DateTimeInfinityTests.cs +++ b/test/Npgsql.Tests/Types/DateTimeInfinityTests.cs @@ -15,44 +15,44 @@ namespace Npgsql.Tests.Types; public sealed class DateTimeInfinityTests : TestBase, IDisposable { static readonly TestCaseData[] TimestampDateTimeValues = - { + [ new TestCaseData(DateTime.MinValue.AddYears(1), "0002-01-01 00:00:00", "0002-01-01 00:00:00") .SetName("MinValue_AddYear"), new TestCaseData(DateTime.MinValue, "0001-01-01 00:00:00", "-infinity") .SetName("MinValue"), new TestCaseData(DateTime.MaxValue, "9999-12-31 23:59:59.999999", "infinity") - .SetName("MaxValue"), - }; + .SetName("MaxValue") + ]; static readonly TestCaseData[] TimestampTzDateTimeValues = - { + [ new TestCaseData(DateTime.MinValue.AddYears(1), "0002-01-01 00:00:00+00", "0002-01-01 00:00:00+00") .SetName("MinValue_AddYear"), new TestCaseData(DateTime.MinValue, "0001-01-01 00:00:00+00", "-infinity") .SetName("MinValue"), new TestCaseData(DateTime.MaxValue, "9999-12-31 23:59:59.999999+00", "infinity") - .SetName("MaxValue"), - }; + .SetName("MaxValue") + ]; static readonly TestCaseData[] TimestampTzDateTimeOffsetValues = - { + [ new TestCaseData(DateTimeOffset.MinValue.ToUniversalTime().AddYears(1), "0002-01-01 00:00:00+00", "0002-01-01 00:00:00+00") .SetName("MinValue_AddYear"), new TestCaseData(DateTimeOffset.MinValue, "0001-01-01 00:00:00+00", "-infinity") .SetName("MinValue"), new TestCaseData(DateTimeOffset.MaxValue, "9999-12-31 23:59:59.999999+00", "infinity") - .SetName("MaxValue"), - }; + .SetName("MaxValue") + ]; static readonly TestCaseData[] DateDateTimeValues = - { + [ new TestCaseData(DateTime.MinValue.AddYears(1), "0002-01-01", "0002-01-01") .SetName("MinValue_AddYear"), new TestCaseData(DateTime.MinValue, "0001-01-01", "-infinity") .SetName("MinValue"), new TestCaseData(DateTime.MaxValue, "9999-12-31", "infinity") - .SetName("MaxValue"), - }; + .SetName("MaxValue") + ]; // As we can't roundtrip DateTime.MaxValue due to precision differences with postgres we are lenient with equality for this particular value. static readonly Func MaxValuePrecisionLenientComparer = @@ -86,14 +86,14 @@ public Task Date_DateTime(DateTime dateTime, string sqlLiteral, string infinityC isDefault: false); static readonly TestCaseData[] DateOnlyDateTimeValues = - { + [ new TestCaseData(DateOnly.MinValue.AddYears(1), "0002-01-01", "0002-01-01") .SetName("MinValue_AddYear"), new TestCaseData(DateOnly.MinValue, "0001-01-01", "-infinity") .SetName("MinValue"), new TestCaseData(DateOnly.MaxValue, "9999-12-31", "infinity") - .SetName("MaxValue"), - }; + .SetName("MaxValue") + ]; [Test, TestCaseSource(nameof(DateOnlyDateTimeValues))] public Task Date_DateOnly(DateOnly dateTime, string sqlLiteral, string infinityConvertedSqlLiteral) diff --git a/test/Npgsql.Tests/Types/DateTimeTests.cs b/test/Npgsql.Tests/Types/DateTimeTests.cs index ec91148f2c..815514031a 100644 --- a/test/Npgsql.Tests/Types/DateTimeTests.cs +++ b/test/Npgsql.Tests/Types/DateTimeTests.cs @@ -127,7 +127,7 @@ public Task Time_as_TimeOnly() #region Time with timezone static readonly TestCaseData[] TimeTzValues = - { + [ new TestCaseData(new DateTimeOffset(1, 1, 2, 13, 3, 45, 510, TimeSpan.FromHours(2)), "13:03:45.51+02") .SetName("Timezone"), new TestCaseData(new DateTimeOffset(1, 1, 2, 1, 0, 45, 510, TimeSpan.FromHours(-3)), "01:00:45.51-03") @@ -135,8 +135,8 @@ public Task Time_as_TimeOnly() new TestCaseData(new DateTimeOffset(1212720130000, TimeSpan.Zero), "09:41:12.013+00") .SetName("Utc"), new TestCaseData(new DateTimeOffset(1, 1, 2, 1, 0, 0, new TimeSpan(0, 2, 0, 0)), "01:00:00+02") - .SetName("Before_utc_zero"), - }; + .SetName("Before_utc_zero") + ]; [Test, TestCaseSource(nameof(TimeTzValues))] public Task TimeTz_as_DateTimeOffset(DateTimeOffset time, string sqlLiteral) @@ -147,14 +147,14 @@ public Task TimeTz_as_DateTimeOffset(DateTimeOffset time, string sqlLiteral) #region Timestamp static readonly TestCaseData[] TimestampValues = - { + [ new TestCaseData(new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Unspecified), "1998-04-12 13:26:38") .SetName("Timestamp_pre2000"), new TestCaseData(new DateTime(2015, 1, 27, 8, 45, 12, 345, DateTimeKind.Unspecified), "2015-01-27 08:45:12.345") .SetName("Timestamp_post2000"), new TestCaseData(new DateTime(2013, 7, 25, 0, 0, 0, DateTimeKind.Unspecified), "2013-07-25 00:00:00") .SetName("Timestamp_date_only") - }; + ]; [Test, TestCaseSource(nameof(TimestampValues))] public async Task Timestamp_as_DateTime(DateTime dateTime, string sqlLiteral) @@ -245,14 +245,14 @@ await AssertType( // Note that the below text representations are local (according to TimeZone, which is set to Europe/Berlin in this test class), // because that's how PG does timestamptz *text* representation. static readonly TestCaseData[] TimestampTzWriteValues = - { + [ new TestCaseData(new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Utc), "1998-04-12 15:26:38+02") .SetName("Timestamptz_write_pre2000"), new TestCaseData(new DateTime(2015, 1, 27, 8, 45, 12, 345, DateTimeKind.Utc), "2015-01-27 09:45:12.345+01") .SetName("Timestamptz_write_post2000"), new TestCaseData(new DateTime(2013, 7, 25, 0, 0, 0, DateTimeKind.Utc), "2013-07-25 02:00:00+02") .SetName("Timestamptz_write_date_only") - }; + ]; [Test, TestCaseSource(nameof(TimestampTzWriteValues))] public async Task Timestamptz_as_DateTime(DateTime dateTime, string sqlLiteral) @@ -391,11 +391,10 @@ await AssertType( [Test] public Task Cannot_mix_DateTime_Kinds_in_array() - => AssertTypeUnsupportedWrite(new[] - { + => AssertTypeUnsupportedWrite([ new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Utc), - new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Local), - }); + new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Local) + ]); [Test] @@ -410,8 +409,7 @@ public async Task Cannot_mix_DateTime_Kinds_in_multirange() await using var conn = await OpenConnectionAsync(); MinimumPgVersion(conn, "14.0", "Multirange types were introduced in PostgreSQL 14"); - await AssertTypeUnsupportedWrite[], ArgumentException>(new[] - { + await AssertTypeUnsupportedWrite[], ArgumentException>([ new NpgsqlRange( new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Utc), new DateTime(1998, 4, 12, 15, 26, 38, DateTimeKind.Utc)), @@ -438,8 +436,8 @@ await AssertTypeUnsupportedWrite[], ArgumentException>(new new DateTime(1998, 4, 12, 15, 26, 38, DateTimeKind.Utc)), new NpgsqlRange( new DateTime(1998, 4, 13, 13, 26, 38, DateTimeKind.Local), - new DateTime(1998, 4, 13, 15, 26, 38, DateTimeKind.Local)), - }); + new DateTime(1998, 4, 13, 15, 26, 38, DateTimeKind.Local)) + ]); } [Test] @@ -493,7 +491,7 @@ public async Task Array_of_nullable_timestamptz() #region Interval static readonly TestCaseData[] IntervalValues = - { + [ new TestCaseData(new TimeSpan(0, 2, 3, 4, 5), "02:03:04.005") .SetName("Interval_time_only"), new TestCaseData(new TimeSpan(1, 2, 3, 4, 5), "1 day 02:03:04.005") @@ -502,7 +500,7 @@ public async Task Array_of_nullable_timestamptz() .SetName("Interval_with_many_days"), new TestCaseData(new TimeSpan(new TimeSpan(2, 3, 4).Ticks + 10), "02:03:04.000001") .SetName("Interval_with_microsecond") - }; + ]; [Test, TestCaseSource(nameof(IntervalValues))] public Task Interval_as_TimeSpan(TimeSpan timeSpan, string sqlLiteral) diff --git a/test/Npgsql.Tests/Types/FullTextSearchTests.cs b/test/Npgsql.Tests/Types/FullTextSearchTests.cs index eda874b12a..a3c83a31c9 100644 --- a/test/Npgsql.Tests/Types/FullTextSearchTests.cs +++ b/test/Npgsql.Tests/Types/FullTextSearchTests.cs @@ -24,31 +24,27 @@ public Task TsVector() public static IEnumerable TsQueryTestCases() => new[] { - new object[] - { + [ "'a'", new NpgsqlTsQueryLexeme("a") - }, - new object[] - { + ], + [ "!'a'", new NpgsqlTsQueryNot( new NpgsqlTsQueryLexeme("a")) - }, - new object[] - { + ], + [ "'a' | 'b'", new NpgsqlTsQueryOr( new NpgsqlTsQueryLexeme("a"), new NpgsqlTsQueryLexeme("b")) - }, - new object[] - { + ], + [ "'a' & 'b'", new NpgsqlTsQueryAnd( new NpgsqlTsQueryLexeme("a"), new NpgsqlTsQueryLexeme("b")) - }, + ], new object[] { "'a' <-> 'b'", diff --git a/test/Npgsql.Tests/Types/GeometricTypeTests.cs b/test/Npgsql.Tests/Types/GeometricTypeTests.cs index c4d8d53b0e..62b63d3d6d 100644 --- a/test/Npgsql.Tests/Types/GeometricTypeTests.cs +++ b/test/Npgsql.Tests/Types/GeometricTypeTests.cs @@ -107,7 +107,7 @@ await AssertType( [Test] public Task Path_closed() => AssertType( - new NpgsqlPath(new[] { new NpgsqlPoint(1, 2), new NpgsqlPoint(3, 4) }, false), + new NpgsqlPath([new NpgsqlPoint(1, 2), new NpgsqlPoint(3, 4)], false), "((1,2),(3,4))", "path", NpgsqlDbType.Path); @@ -115,7 +115,7 @@ public Task Path_closed() [Test] public Task Path_open() => AssertType( - new NpgsqlPath(new[] { new NpgsqlPoint(1, 2), new NpgsqlPoint(3, 4) }, true), + new NpgsqlPath([new NpgsqlPoint(1, 2), new NpgsqlPoint(3, 4)], true), "[(1,2),(3,4)]", "path", NpgsqlDbType.Path); diff --git a/test/Npgsql.Tests/Types/InternalTypeTests.cs b/test/Npgsql.Tests/Types/InternalTypeTests.cs index a5d69664a4..ad95686825 100644 --- a/test/Npgsql.Tests/Types/InternalTypeTests.cs +++ b/test/Npgsql.Tests/Types/InternalTypeTests.cs @@ -60,13 +60,14 @@ public async Task Tid() #region NpgsqlLogSequenceNumber / PgLsn - static readonly TestCaseData[] EqualsObjectCases = { + static readonly TestCaseData[] EqualsObjectCases = + [ new TestCaseData(new NpgsqlLogSequenceNumber(1ul), null).Returns(false), new TestCaseData(new NpgsqlLogSequenceNumber(1ul), new object()).Returns(false), new TestCaseData(new NpgsqlLogSequenceNumber(1ul), 1ul).Returns(false), // no implicit cast new TestCaseData(new NpgsqlLogSequenceNumber(1ul), "0/0").Returns(false), // no implicit cast/parsing - new TestCaseData(new NpgsqlLogSequenceNumber(1ul), new NpgsqlLogSequenceNumber(1ul)).Returns(true), - }; + new TestCaseData(new NpgsqlLogSequenceNumber(1ul), new NpgsqlLogSequenceNumber(1ul)).Returns(true) + ]; [Test, TestCaseSource(nameof(EqualsObjectCases))] public bool NpgsqlLogSequenceNumber_equals(NpgsqlLogSequenceNumber lsn, object? obj) diff --git a/test/Npgsql.Tests/Types/JsonDynamicTests.cs b/test/Npgsql.Tests/Types/JsonDynamicTests.cs index 3c948a4816..07cecc2a62 100644 --- a/test/Npgsql.Tests/Types/JsonDynamicTests.cs +++ b/test/Npgsql.Tests/Types/JsonDynamicTests.cs @@ -150,7 +150,7 @@ public async Task As_poco_supported_only_with_EnableDynamicJson() public async Task Poco_does_not_stomp_GetValue_string() { var dataSource = CreateDataSourceBuilder() - .EnableDynamicJson(new[] {typeof(WeatherForecast)}, new[] {typeof(WeatherForecast)}) + .EnableDynamicJson([typeof(WeatherForecast)], [typeof(WeatherForecast)]) .Build(); var sqlLiteral = IsJsonb @@ -193,9 +193,9 @@ public async Task Poco_default_mapping() { var dataSourceBuilder = CreateDataSourceBuilder(); if (IsJsonb) - dataSourceBuilder.EnableDynamicJson(jsonbClrTypes: new[] { typeof(WeatherForecast) }); + dataSourceBuilder.EnableDynamicJson(jsonbClrTypes: [typeof(WeatherForecast)]); else - dataSourceBuilder.EnableDynamicJson(jsonClrTypes: new[] { typeof(WeatherForecast) }); + dataSourceBuilder.EnableDynamicJson(jsonClrTypes: [typeof(WeatherForecast)]); await using var dataSource = dataSourceBuilder.Build(); await AssertType( @@ -224,7 +224,7 @@ public async Task Poco_polymorphic_mapping() return; var dataSourceBuilder = CreateDataSourceBuilder(); - dataSourceBuilder.EnableDynamicJson(jsonClrTypes: new[] { typeof(WeatherForecast) }); + dataSourceBuilder.EnableDynamicJson(jsonClrTypes: [typeof(WeatherForecast)]); await using var dataSource = dataSourceBuilder.Build(); await AssertType( @@ -251,7 +251,7 @@ public async Task Poco_polymorphic_mapping_read_parents() return; var dataSourceBuilder = CreateDataSourceBuilder(); - dataSourceBuilder.EnableDynamicJson(jsonClrTypes: new[] { typeof(WeatherForecast) }); + dataSourceBuilder.EnableDynamicJson(jsonClrTypes: [typeof(WeatherForecast)]); await using var dataSource = dataSourceBuilder.Build(); var value = new ExtendedDerivedWeatherForecast() @@ -292,7 +292,7 @@ public async Task Poco_exact_polymorphic_mapping() return; var dataSourceBuilder = CreateDataSourceBuilder(); - dataSourceBuilder.EnableDynamicJson(jsonClrTypes: new[] { typeof(ExtendedDerivedWeatherForecast) }); + dataSourceBuilder.EnableDynamicJson(jsonClrTypes: [typeof(ExtendedDerivedWeatherForecast)]); await using var dataSource = dataSourceBuilder.Build(); await AssertType( diff --git a/test/Npgsql.Tests/Types/JsonPathTests.cs b/test/Npgsql.Tests/Types/JsonPathTests.cs index de49a631e0..3bc4f07a3b 100644 --- a/test/Npgsql.Tests/Types/JsonPathTests.cs +++ b/test/Npgsql.Tests/Types/JsonPathTests.cs @@ -11,11 +11,11 @@ public class JsonPathTests : MultiplexingTestBase public JsonPathTests(MultiplexingMode multiplexingMode) : base(multiplexingMode) { } - static readonly object[] ReadWriteCases = new[] - { + static readonly object[] ReadWriteCases = + [ new object[] { "'$'", "$" }, - new object[] { "'$\"varname\"'", "$\"varname\"" }, - }; + new object[] { "'$\"varname\"'", "$\"varname\"" } + ]; [Test] [TestCase("$")] diff --git a/test/Npgsql.Tests/Types/MiscTypeTests.cs b/test/Npgsql.Tests/Types/MiscTypeTests.cs index 5291da5a09..6586fd89c3 100644 --- a/test/Npgsql.Tests/Types/MiscTypeTests.cs +++ b/test/Npgsql.Tests/Types/MiscTypeTests.cs @@ -108,7 +108,7 @@ public async Task UnknownResultTypeList() await using var conn = await OpenConnectionAsync(); await using var cmd = new NpgsqlCommand("SELECT TRUE, 8", conn); - cmd.UnknownResultTypeList = new[] { true, false }; + cmd.UnknownResultTypeList = [true, false]; await using var reader = await cmd.ExecuteReaderAsync(); reader.Read(); diff --git a/test/Npgsql.Tests/Types/MoneyTests.cs b/test/Npgsql.Tests/Types/MoneyTests.cs index 4c38f3d111..5f4a31b65b 100644 --- a/test/Npgsql.Tests/Types/MoneyTests.cs +++ b/test/Npgsql.Tests/Types/MoneyTests.cs @@ -7,8 +7,8 @@ namespace Npgsql.Tests.Types; public class MoneyTests : TestBase { - static readonly object[] MoneyValues = new[] - { + static readonly object[] MoneyValues = + [ new object[] { "$1.22", 1.22M }, new object[] { "$1,000.22", 1000.22M }, new object[] { "$1,000,000.22", 1000000.22M }, @@ -18,8 +18,8 @@ public class MoneyTests : TestBase new object[] { "$92,233,720,368,547,758.07", +92233720368547758.07M }, new object[] { "-$92,233,720,368,547,758.08", -92233720368547758.08M }, - new object[] { "-$92,233,720,368,547,758.08", -92233720368547758.08M }, - }; + new object[] { "-$92,233,720,368,547,758.08", -92233720368547758.08M } + ]; [Test] [TestCaseSource(nameof(MoneyValues))] @@ -41,11 +41,11 @@ public async Task Non_decimal_types_are_not_supported() await AssertTypeUnsupportedRead("8", "money"); } - static readonly object[] WriteWithLargeScaleCases = new[] - { + static readonly object[] WriteWithLargeScaleCases = + [ new object[] { "0.004::money", 0.004M, 0.00M }, - new object[] { "0.005::money", 0.005M, 0.01M }, - }; + new object[] { "0.005::money", 0.005M, 0.01M } + ]; [Test] [TestCaseSource(nameof(WriteWithLargeScaleCases))] diff --git a/test/Npgsql.Tests/Types/MultirangeTests.cs b/test/Npgsql.Tests/Types/MultirangeTests.cs index d01dc6e408..cf9cdcd8a9 100644 --- a/test/Npgsql.Tests/Types/MultirangeTests.cs +++ b/test/Npgsql.Tests/Types/MultirangeTests.cs @@ -13,7 +13,7 @@ namespace Npgsql.Tests.Types; public class MultirangeTests : TestBase { static readonly TestCaseData[] MultirangeTestCases = - { + [ // int4multirange new TestCaseData( new NpgsqlRange[] @@ -82,8 +82,8 @@ public class MultirangeTests : TestBase new(new(2020, 1, 10), true, false, default, false, true) }, "{[2020-01-01,2020-01-05),[2020-01-10,)}", "datemultirange", NpgsqlDbType.DateMultirange, false, false, default(NpgsqlRange)) - .SetName("DateOnly"), - }; + .SetName("DateOnly") + ]; [Test, TestCaseSource(nameof(MultirangeTestCases))] public Task Multirange_as_array( diff --git a/test/Npgsql.Tests/Types/NumericTests.cs b/test/Npgsql.Tests/Types/NumericTests.cs index 44c636984b..d0e48d8c3a 100644 --- a/test/Npgsql.Tests/Types/NumericTests.cs +++ b/test/Npgsql.Tests/Types/NumericTests.cs @@ -10,8 +10,8 @@ namespace Npgsql.Tests.Types; public class NumericTests : MultiplexingTestBase { - static readonly object[] ReadWriteCases = new[] - { + static readonly object[] ReadWriteCases = + [ new object[] { "0.0000000000000000000000000001::numeric", 0.0000000000000000000000000001M }, new object[] { "0.000000000000000000000001::numeric", 0.000000000000000000000001M }, new object[] { "0.00000000000000000001::numeric", 0.00000000000000000001M }, @@ -84,8 +84,8 @@ public class NumericTests : MultiplexingTestBase new object[] { "3649072683783772919700000000", 3649072683783772919700000000M }, new object[] { "1234567844445555.000000000", 1234567844445555.000000000M }, new object[] { "11112222000000000000", 11112222000000000000M }, - new object[] { "0::numeric", 0M }, - }; + new object[] { "0::numeric", 0M } + ]; [Test] [TestCaseSource(nameof(ReadWriteCases))] diff --git a/test/Npgsql.Tests/Types/RangeTests.cs b/test/Npgsql.Tests/Types/RangeTests.cs index 011d66dfdd..a8c67c0d8c 100644 --- a/test/Npgsql.Tests/Types/RangeTests.cs +++ b/test/Npgsql.Tests/Types/RangeTests.cs @@ -15,7 +15,7 @@ namespace Npgsql.Tests.Types; class RangeTests : MultiplexingTestBase { static readonly TestCaseData[] RangeTestCases = - { + [ new TestCaseData(new NpgsqlRange(1, true, 10, false), "[1,10)", "int4range", NpgsqlDbType.IntegerRange) .SetName("IntegerRange"), new TestCaseData(new NpgsqlRange(1, true, 10, false), "[1,10)", "int8range", NpgsqlDbType.BigIntRange) @@ -50,7 +50,7 @@ class RangeTests : MultiplexingTestBase .SetName("InfiniteLowerBound"), new TestCaseData(new NpgsqlRange(1, true, false, 10, false, true), "[1,)", "numrange", NpgsqlDbType.NumericRange) .SetName("InfiniteUpperBound") - }; + ]; // See more test cases in DateTimeTests [Test, TestCaseSource(nameof(RangeTestCases))] @@ -438,28 +438,28 @@ public override object ConvertFrom(ITypeDescriptorContext? context, CultureInfo? new object[][] { // (2018-05-17, 2018-05-18) - new object[] { new NpgsqlRange(May_17_2018, false, false, May_18_2018, false, false) }, + [new NpgsqlRange(May_17_2018, false, false, May_18_2018, false, false)], // [2018-05-17, 2018-05-18] - new object[] { new NpgsqlRange(May_17_2018, true, false, May_18_2018, true, false) }, + [new NpgsqlRange(May_17_2018, true, false, May_18_2018, true, false)], // [2018-05-17, 2018-05-18) - new object[] { new NpgsqlRange(May_17_2018, true, false, May_18_2018, false, false) }, + [new NpgsqlRange(May_17_2018, true, false, May_18_2018, false, false)], // (2018-05-17, 2018-05-18] - new object[] { new NpgsqlRange(May_17_2018, false, false, May_18_2018, true, false) }, + [new NpgsqlRange(May_17_2018, false, false, May_18_2018, true, false)], // (,) - new object[] { new NpgsqlRange(default, false, true, default, false, true) }, - new object[] { new NpgsqlRange(May_17_2018, false, true, May_18_2018, false, true) }, + [new NpgsqlRange(default, false, true, default, false, true)], + [new NpgsqlRange(May_17_2018, false, true, May_18_2018, false, true)], // (2018-05-17,) - new object[] { new NpgsqlRange(May_17_2018, false, false, default, false, true) }, - new object[] { new NpgsqlRange(May_17_2018, false, false, May_18_2018, false, true) }, + [new NpgsqlRange(May_17_2018, false, false, default, false, true)], + [new NpgsqlRange(May_17_2018, false, false, May_18_2018, false, true)], // (,2018-05-18) - new object[] { new NpgsqlRange(default, false, true, May_18_2018, false, false) }, - new object[] { new NpgsqlRange(May_17_2018, false, true, May_18_2018, false, false) } + [new NpgsqlRange(default, false, true, May_18_2018, false, false)], + [new NpgsqlRange(May_17_2018, false, true, May_18_2018, false, false)] }; #endregion diff --git a/test/Npgsql.Tests/TypesTests.cs b/test/Npgsql.Tests/TypesTests.cs index 047093df60..113c08b954 100644 --- a/test/Npgsql.Tests/TypesTests.cs +++ b/test/Npgsql.Tests/TypesTests.cs @@ -197,14 +197,14 @@ public void NpgsqlPolygon_empty() public void NpgsqlPath_default() { NpgsqlPath defaultPath = default; - Assert.IsFalse(defaultPath.Equals(new NpgsqlPath { new(1, 2) })); + Assert.IsFalse(defaultPath.Equals([new(1, 2)])); } [Test] public void NpgsqlPolygon_default() { NpgsqlPolygon defaultPolygon = default; - Assert.IsFalse(defaultPolygon.Equals(new NpgsqlPolygon { new(1, 2) })); + Assert.IsFalse(defaultPolygon.Equals([new(1, 2)])); } [Test] From 40cc1a170a9b5b7748228cf06961546dc6e71cb8 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Wed, 30 Oct 2024 16:30:08 +0100 Subject: [PATCH 476/761] Apply primary constructors to solution (#5908) --- .../GeoJSONTypeInfoResolverFactory.cs | 42 ++----- .../Internal/JsonNetJsonConverter.cs | 23 +--- .../JsonNetPocoTypeInfoResolverFactory.cs | 46 +++---- .../JsonNetTypeInfoResolverFactory.cs | 24 ++-- ...NetTopologySuiteTypeInfoResolverFactory.cs | 38 +++--- .../Internal/DateIntervalConverter.cs | 24 ++-- .../Internal/IntervalConverter.cs | 17 +-- .../Internal/LegacyConverters.cs | 42 ++----- .../Internal/LocalDateConverter.cs | 13 +- .../Internal/TimestampConverters.cs | 44 ++----- .../BackendMessages/RowDescriptionMessage.cs | 15 +-- src/Npgsql/Internal/ChainTypeInfoResolver.cs | 7 +- .../Composites/Metadata/CompositeBuilder.cs | 29 ++--- .../Internal/Converters/ArrayConverter.cs | 117 +++++++----------- .../Converters/BitStringConverters.cs | 4 +- .../Internal/Converters/CastingConverter.cs | 36 +++--- .../FullTextSearch/TsQueryConverter.cs | 17 +-- .../FullTextSearch/TsVectorConverter.cs | 17 +-- .../Internal/Converters/HstoreConverter.cs | 31 ++--- .../Converters/Networking/MacaddrConverter.cs | 10 +- .../Internal/Converters/NullableConverter.cs | 34 +++-- .../Internal/Converters/ObjectConverter.cs | 16 +-- .../PolymorphicConverterResolver.cs | 6 +- .../Converters/Primitive/PgNumeric.cs | 19 +-- .../Converters/Primitive/TextConverters.cs | 82 +++++------- .../Internal/Converters/RecordConverter.cs | 19 +-- .../Converters/Temporal/DateConverters.cs | 26 ++-- .../Converters/Temporal/DateTimeConverters.cs | 25 +--- .../Temporal/LegacyDateTimeConverter.cs | 36 ++---- .../VersionPrefixedTextConverter.cs | 28 ++--- src/Npgsql/Internal/NpgsqlConnector.cs | 28 ++--- src/Npgsql/Internal/PgBufferedConverter.cs | 4 +- src/Npgsql/Internal/PgConverter.cs | 14 +-- src/Npgsql/Internal/PgStreamingConverter.cs | 4 +- src/Npgsql/Internal/PgTypeInfo.cs | 42 +++---- src/Npgsql/Internal/PgWriter.cs | 32 +++-- src/Npgsql/Internal/Postgres/Field.cs | 15 +-- src/Npgsql/Internal/Postgres/Oid.cs | 6 +- .../JsonDynamicTypeInfoResolverFactory.cs | 43 +++---- .../JsonTypeInfoResolverFactory.cs | 17 +-- src/Npgsql/Internal/TypeInfoCache.cs | 16 +-- src/Npgsql/Internal/TypeInfoMapping.cs | 19 +-- src/Npgsql/NpgsqlConnection.cs | 6 +- src/Npgsql/NpgsqlDataAdapter.cs | 26 ++-- src/Npgsql/NpgsqlDataSource.cs | 11 +- src/Npgsql/NpgsqlNestedDataReader.cs | 21 +--- src/Npgsql/NpgsqlTypes/NpgsqlTsQuery.cs | 12 +- src/Npgsql/NpgsqlTypes/NpgsqlTypes.cs | 61 +++------ src/Npgsql/TypeMapping/UserTypeMapper.cs | 50 ++++---- .../Util/ResettableCancellationTokenSource.cs | 10 +- src/Npgsql/Util/TaskSchedulerAwaitable.cs | 7 +- src/Shared/CodeAnalysis.cs | 9 +- .../Npgsql.Benchmarks/TypeHandlers/Numeric.cs | 34 ++--- test/Npgsql.Benchmarks/TypeHandlers/Text.cs | 4 +- test/Npgsql.Benchmarks/TypeHandlers/Uuid.cs | 5 +- test/Npgsql.PluginTests/JsonNetTests.cs | 35 +++--- .../NpgsqlCommandTests.cs | 7 +- .../NpgsqlConnectionTests.cs | 8 +- .../NpgsqlDataReaderTests.cs | 6 +- test/Npgsql.Tests/AuthenticationTests.cs | 10 +- test/Npgsql.Tests/CommandParameterTests.cs | 6 +- test/Npgsql.Tests/CommandTests.cs | 4 +- test/Npgsql.Tests/ConnectionTests.cs | 4 +- test/Npgsql.Tests/CopyTests.cs | 4 +- .../DistributedTransactionTests.cs | 6 +- test/Npgsql.Tests/MultipleHostsTests.cs | 8 +- test/Npgsql.Tests/NpgsqlEventSourceTests.cs | 6 +- test/Npgsql.Tests/ReadBufferTests.cs | 14 +-- test/Npgsql.Tests/ReaderNewSchemaTests.cs | 4 +- test/Npgsql.Tests/ReaderOldSchemaTests.cs | 4 +- test/Npgsql.Tests/ReaderTests.cs | 14 +-- .../Replication/PgOutputReplicationTests.cs | 54 ++++---- test/Npgsql.Tests/SchemaTests.cs | 4 +- .../Npgsql.Tests/Support/ListLoggerFactory.cs | 20 +-- .../Support/PgCancellationRequest.cs | 26 +--- test/Npgsql.Tests/Support/TestBase.cs | 9 +- test/Npgsql.Tests/SyncOrAsyncTestBase.cs | 6 +- test/Npgsql.Tests/TestUtil.cs | 8 +- test/Npgsql.Tests/TransactionTests.cs | 4 +- test/Npgsql.Tests/Types/ArrayTests.cs | 4 +- test/Npgsql.Tests/Types/BitStringTests.cs | 4 +- test/Npgsql.Tests/Types/ByteaTests.cs | 10 +- .../Types/CompositeHandlerTests.cs | 80 ++++++------ test/Npgsql.Tests/Types/CompositeTests.cs | 12 +- test/Npgsql.Tests/Types/DomainTests.cs | 4 +- test/Npgsql.Tests/Types/EnumTests.cs | 4 +- .../Npgsql.Tests/Types/FullTextSearchTests.cs | 5 +- test/Npgsql.Tests/Types/GeometricTypeTests.cs | 4 +- test/Npgsql.Tests/Types/HstoreTests.cs | 4 +- test/Npgsql.Tests/Types/InternalTypeTests.cs | 4 +- test/Npgsql.Tests/Types/JsonPathTests.cs | 5 +- test/Npgsql.Tests/Types/LTreeTests.cs | 4 +- test/Npgsql.Tests/Types/MiscTypeTests.cs | 4 +- test/Npgsql.Tests/Types/NetworkTypeTests.cs | 4 +- test/Npgsql.Tests/Types/NumericTests.cs | 4 +- test/Npgsql.Tests/Types/NumericTypeTests.cs | 4 +- test/Npgsql.Tests/Types/RecordTests.cs | 4 +- test/Npgsql.Tests/Types/TextTests.cs | 4 +- 98 files changed, 602 insertions(+), 1220 deletions(-) diff --git a/src/Npgsql.GeoJSON/Internal/GeoJSONTypeInfoResolverFactory.cs b/src/Npgsql.GeoJSON/Internal/GeoJSONTypeInfoResolverFactory.cs index c25118f1d7..f1b56000f2 100644 --- a/src/Npgsql.GeoJSON/Internal/GeoJSONTypeInfoResolverFactory.cs +++ b/src/Npgsql.GeoJSON/Internal/GeoJSONTypeInfoResolverFactory.cs @@ -6,37 +6,17 @@ namespace Npgsql.GeoJSON.Internal; -sealed class GeoJSONTypeInfoResolverFactory : PgTypeInfoResolverFactory +sealed class GeoJSONTypeInfoResolverFactory(GeoJSONOptions options, bool geographyAsDefault, CrsMap? crsMap = null) + : PgTypeInfoResolverFactory { - readonly GeoJSONOptions _options; - readonly bool _geographyAsDefault; - readonly CrsMap? _crsMap; + public override IPgTypeInfoResolver CreateResolver() => new Resolver(options, geographyAsDefault, crsMap); + public override IPgTypeInfoResolver? CreateArrayResolver() => new ArrayResolver(options, geographyAsDefault, crsMap); - public GeoJSONTypeInfoResolverFactory(GeoJSONOptions options, bool geographyAsDefault, CrsMap? crsMap = null) + class Resolver(GeoJSONOptions options, bool geographyAsDefault, CrsMap? crsMap = null) + : IPgTypeInfoResolver { - _options = options; - _geographyAsDefault = geographyAsDefault; - _crsMap = crsMap; - } - - public override IPgTypeInfoResolver CreateResolver() => new Resolver(_options, _geographyAsDefault, _crsMap); - public override IPgTypeInfoResolver? CreateArrayResolver() => new ArrayResolver(_options, _geographyAsDefault, _crsMap); - - class Resolver : IPgTypeInfoResolver - { - readonly GeoJSONOptions _options; - readonly bool _geographyAsDefault; - readonly CrsMap? _crsMap; - TypeInfoMappingCollection? _mappings; - protected TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(), _options, _geographyAsDefault, _crsMap); - - public Resolver(GeoJSONOptions options, bool geographyAsDefault, CrsMap? crsMap = null) - { - _options = options; - _geographyAsDefault = geographyAsDefault; - _crsMap = crsMap; - } + protected TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(), options, geographyAsDefault, crsMap); public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) => Mappings.Find(type, dataTypeName, options); @@ -83,16 +63,12 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings, } } - sealed class ArrayResolver : Resolver, IPgTypeInfoResolver + sealed class ArrayResolver(GeoJSONOptions options, bool geographyAsDefault, CrsMap? crsMap = null) + : Resolver(options, geographyAsDefault, crsMap), IPgTypeInfoResolver { TypeInfoMappingCollection? _mappings; new TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(base.Mappings)); - public ArrayResolver(GeoJSONOptions options, bool geographyAsDefault, CrsMap? crsMap = null) - : base(options, geographyAsDefault, crsMap) - { - } - public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) => Mappings.Find(type, dataTypeName, options); diff --git a/src/Npgsql.Json.NET/Internal/JsonNetJsonConverter.cs b/src/Npgsql.Json.NET/Internal/JsonNetJsonConverter.cs index c54670d10d..5d75568f98 100644 --- a/src/Npgsql.Json.NET/Internal/JsonNetJsonConverter.cs +++ b/src/Npgsql.Json.NET/Internal/JsonNetJsonConverter.cs @@ -10,32 +10,21 @@ namespace Npgsql.Json.NET.Internal; -sealed class JsonNetJsonConverter : PgStreamingConverter +sealed class JsonNetJsonConverter(bool jsonb, Encoding textEncoding, JsonSerializerSettings settings) : PgStreamingConverter { - readonly bool _jsonb; - readonly Encoding _textEncoding; - readonly JsonSerializerSettings _settings; - - public JsonNetJsonConverter(bool jsonb, Encoding textEncoding, JsonSerializerSettings settings) - { - _jsonb = jsonb; - _textEncoding = textEncoding; - _settings = settings; - } - public override T? Read(PgReader reader) - => (T?)JsonNetJsonConverter.Read(async: false, _jsonb, reader, typeof(T), _settings, _textEncoding, CancellationToken.None).GetAwaiter().GetResult(); + => (T?)JsonNetJsonConverter.Read(async: false, jsonb, reader, typeof(T), settings, textEncoding, CancellationToken.None).GetAwaiter().GetResult(); public override async ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) - => (T?)await JsonNetJsonConverter.Read(async: true, _jsonb, reader, typeof(T), _settings, _textEncoding, cancellationToken).ConfigureAwait(false); + => (T?)await JsonNetJsonConverter.Read(async: true, jsonb, reader, typeof(T), settings, textEncoding, cancellationToken).ConfigureAwait(false); public override Size GetSize(SizeContext context, T? value, ref object? writeState) - => JsonNetJsonConverter.GetSize(_jsonb, context, typeof(T), _settings, _textEncoding, value, ref writeState); + => JsonNetJsonConverter.GetSize(jsonb, context, typeof(T), settings, textEncoding, value, ref writeState); public override void Write(PgWriter writer, T? value) - => JsonNetJsonConverter.Write(_jsonb, async: false, writer, CancellationToken.None).GetAwaiter().GetResult(); + => JsonNetJsonConverter.Write(jsonb, async: false, writer, CancellationToken.None).GetAwaiter().GetResult(); public override ValueTask WriteAsync(PgWriter writer, T? value, CancellationToken cancellationToken = default) - => JsonNetJsonConverter.Write(_jsonb, async: true, writer, cancellationToken); + => JsonNetJsonConverter.Write(jsonb, async: true, writer, cancellationToken); } // Split out to avoid unnecessary code duplication. diff --git a/src/Npgsql.Json.NET/Internal/JsonNetPocoTypeInfoResolverFactory.cs b/src/Npgsql.Json.NET/Internal/JsonNetPocoTypeInfoResolverFactory.cs index 57eb05f1c7..c038f17aab 100644 --- a/src/Npgsql.Json.NET/Internal/JsonNetPocoTypeInfoResolverFactory.cs +++ b/src/Npgsql.Json.NET/Internal/JsonNetPocoTypeInfoResolverFactory.cs @@ -9,43 +9,29 @@ namespace Npgsql.Json.NET.Internal; [RequiresUnreferencedCode("Json serializer may perform reflection on trimmed types.")] [RequiresDynamicCode("Serializing arbitrary types to json can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] -sealed class JsonNetPocoTypeInfoResolverFactory : PgTypeInfoResolverFactory +sealed class JsonNetPocoTypeInfoResolverFactory( + Type[]? jsonbClrTypes = null, + Type[]? jsonClrTypes = null, + JsonSerializerSettings? serializerSettings = null) + : PgTypeInfoResolverFactory { - readonly Type[]? _jsonbClrTypes; - readonly Type[]? _jsonClrTypes; - readonly JsonSerializerSettings? _serializerSettings; - - public JsonNetPocoTypeInfoResolverFactory(Type[]? jsonbClrTypes = null, Type[]? jsonClrTypes = null, JsonSerializerSettings? serializerSettings = null) - { - _jsonbClrTypes = jsonbClrTypes; - _jsonClrTypes = jsonClrTypes; - _serializerSettings = serializerSettings; - } - - public override IPgTypeInfoResolver CreateResolver() => new Resolver(_jsonbClrTypes, _jsonClrTypes, _serializerSettings); - public override IPgTypeInfoResolver? CreateArrayResolver() => new ArrayResolver(_jsonbClrTypes, _jsonClrTypes, _serializerSettings); + public override IPgTypeInfoResolver CreateResolver() => new Resolver(jsonbClrTypes, jsonClrTypes, serializerSettings); + public override IPgTypeInfoResolver? CreateArrayResolver() => new ArrayResolver(jsonbClrTypes, jsonClrTypes, serializerSettings); [RequiresUnreferencedCode("Json serializer may perform reflection on trimmed types.")] [RequiresDynamicCode("Serializing arbitrary types to json can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] - class Resolver : DynamicTypeInfoResolver, IPgTypeInfoResolver + class Resolver(Type[]? jsonbClrTypes = null, Type[]? jsonClrTypes = null, JsonSerializerSettings? serializerSettings = null) + : DynamicTypeInfoResolver, IPgTypeInfoResolver { - readonly Type[]? _jsonbClrTypes; - readonly Type[]? _jsonClrTypes; - readonly JsonSerializerSettings _serializerSettings; + readonly JsonSerializerSettings _serializerSettings = serializerSettings ?? JsonConvert.DefaultSettings?.Invoke() ?? new JsonSerializerSettings(); TypeInfoMappingCollection? _mappings; - protected TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(), _jsonbClrTypes ?? [], _jsonClrTypes ?? [], _serializerSettings); + protected TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(), jsonbClrTypes ?? [], jsonClrTypes ?? [], _serializerSettings); const string JsonDataTypeName = "pg_catalog.json"; const string JsonbDataTypeName = "pg_catalog.jsonb"; - public Resolver(Type[]? jsonbClrTypes = null, Type[]? jsonClrTypes = null, JsonSerializerSettings? serializerSettings = null) - { - _jsonbClrTypes = jsonbClrTypes; - _jsonClrTypes = jsonClrTypes; - // Capture default settings during construction. - _serializerSettings = serializerSettings ?? JsonConvert.DefaultSettings?.Invoke() ?? new JsonSerializerSettings(); - } + // Capture default settings during construction. TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings, Type[] jsonbClrTypes, Type[] jsonClrTypes, JsonSerializerSettings serializerSettings) { @@ -96,16 +82,12 @@ static PgConverter CreateConverter(Type valueType, bool jsonb, Encoding textEnco [RequiresUnreferencedCode("Json serializer may perform reflection on trimmed types.")] [RequiresDynamicCode("Serializing arbitrary types to json can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] - sealed class ArrayResolver : Resolver, IPgTypeInfoResolver + sealed class ArrayResolver(Type[]? jsonbClrTypes = null, Type[]? jsonClrTypes = null, JsonSerializerSettings? serializerSettings = null) + : Resolver(jsonbClrTypes, jsonClrTypes, serializerSettings), IPgTypeInfoResolver { TypeInfoMappingCollection? _mappings; new TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(base.Mappings), base.Mappings); - public ArrayResolver(Type[]? jsonbClrTypes = null, Type[]? jsonClrTypes = null, JsonSerializerSettings? serializerSettings = null) - : base(jsonbClrTypes, jsonClrTypes, serializerSettings) - { - } - public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) => Mappings.Find(type, dataTypeName, options) ?? base.GetTypeInfo(type, dataTypeName, options); diff --git a/src/Npgsql.Json.NET/Internal/JsonNetTypeInfoResolverFactory.cs b/src/Npgsql.Json.NET/Internal/JsonNetTypeInfoResolverFactory.cs index 1f07bf0252..cf2be51cc4 100644 --- a/src/Npgsql.Json.NET/Internal/JsonNetTypeInfoResolverFactory.cs +++ b/src/Npgsql.Json.NET/Internal/JsonNetTypeInfoResolverFactory.cs @@ -6,26 +6,18 @@ namespace Npgsql.Json.NET.Internal; -sealed class JsonNetTypeInfoResolverFactory : PgTypeInfoResolverFactory +sealed class JsonNetTypeInfoResolverFactory(JsonSerializerSettings? settings = null) : PgTypeInfoResolverFactory { - readonly JsonSerializerSettings? _settings; + public override IPgTypeInfoResolver CreateResolver() => new Resolver(settings); + public override IPgTypeInfoResolver? CreateArrayResolver() => new ArrayResolver(settings); - public JsonNetTypeInfoResolverFactory(JsonSerializerSettings? settings = null) => _settings = settings; - - public override IPgTypeInfoResolver CreateResolver() => new Resolver(_settings); - public override IPgTypeInfoResolver? CreateArrayResolver() => new ArrayResolver(_settings); - - class Resolver : IPgTypeInfoResolver + class Resolver(JsonSerializerSettings? settings = null) : IPgTypeInfoResolver { TypeInfoMappingCollection? _mappings; - readonly JsonSerializerSettings _serializerSettings; + readonly JsonSerializerSettings _serializerSettings = settings ?? JsonConvert.DefaultSettings?.Invoke() ?? new JsonSerializerSettings(); protected TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(), _serializerSettings); - public Resolver(JsonSerializerSettings? settings = null) - { - // Capture default settings during construction. - _serializerSettings = settings ?? JsonConvert.DefaultSettings?.Invoke() ?? new JsonSerializerSettings(); - } + // Capture default settings during construction. static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings, JsonSerializerSettings settings) { @@ -50,13 +42,11 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings, => Mappings.Find(type, dataTypeName, options); } - sealed class ArrayResolver : Resolver, IPgTypeInfoResolver + sealed class ArrayResolver(JsonSerializerSettings? settings = null) : Resolver(settings), IPgTypeInfoResolver { TypeInfoMappingCollection? _mappings; new TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(base.Mappings)); - public ArrayResolver(JsonSerializerSettings? settings = null) : base(settings) {} - public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) => Mappings.Find(type, dataTypeName, options); diff --git a/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeInfoResolverFactory.cs b/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeInfoResolverFactory.cs index 7484dc7832..e533d62207 100644 --- a/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeInfoResolverFactory.cs +++ b/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeInfoResolverFactory.cs @@ -7,24 +7,15 @@ namespace Npgsql.NetTopologySuite.Internal; -sealed class NetTopologySuiteTypeInfoResolverFactory : PgTypeInfoResolverFactory +sealed class NetTopologySuiteTypeInfoResolverFactory( + CoordinateSequenceFactory? coordinateSequenceFactory, + PrecisionModel? precisionModel, + Ordinates handleOrdinates, + bool geographyAsDefault) + : PgTypeInfoResolverFactory { - readonly CoordinateSequenceFactory? _coordinateSequenceFactory; - readonly PrecisionModel? _precisionModel; - readonly Ordinates _handleOrdinates; - readonly bool _geographyAsDefault; - - public NetTopologySuiteTypeInfoResolverFactory(CoordinateSequenceFactory? coordinateSequenceFactory, PrecisionModel? precisionModel, - Ordinates handleOrdinates, bool geographyAsDefault) - { - _coordinateSequenceFactory = coordinateSequenceFactory; - _precisionModel = precisionModel; - _handleOrdinates = handleOrdinates; - _geographyAsDefault = geographyAsDefault; - } - - public override IPgTypeInfoResolver CreateResolver() => new Resolver(_coordinateSequenceFactory, _precisionModel, _handleOrdinates, _geographyAsDefault); - public override IPgTypeInfoResolver? CreateArrayResolver() => new ArrayResolver(_coordinateSequenceFactory, _precisionModel, _handleOrdinates, _geographyAsDefault); + public override IPgTypeInfoResolver CreateResolver() => new Resolver(coordinateSequenceFactory, precisionModel, handleOrdinates, geographyAsDefault); + public override IPgTypeInfoResolver? CreateArrayResolver() => new ArrayResolver(coordinateSequenceFactory, precisionModel, handleOrdinates, geographyAsDefault); class Resolver : IPgTypeInfoResolver { @@ -82,17 +73,16 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings, } } - sealed class ArrayResolver : Resolver, IPgTypeInfoResolver + sealed class ArrayResolver( + CoordinateSequenceFactory? coordinateSequenceFactory, + PrecisionModel? precisionModel, + Ordinates handleOrdinates, + bool geographyAsDefault) + : Resolver(coordinateSequenceFactory, precisionModel, handleOrdinates, geographyAsDefault), IPgTypeInfoResolver { TypeInfoMappingCollection? _mappings; new TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(base.Mappings), _geographyAsDefault); - public ArrayResolver(CoordinateSequenceFactory? coordinateSequenceFactory, PrecisionModel? precisionModel, - Ordinates handleOrdinates, bool geographyAsDefault) - : base(coordinateSequenceFactory, precisionModel, handleOrdinates, geographyAsDefault) - { - } - public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) => Mappings.Find(type, dataTypeName, options); diff --git a/src/Npgsql.NodaTime/Internal/DateIntervalConverter.cs b/src/Npgsql.NodaTime/Internal/DateIntervalConverter.cs index 5e25d8bfcc..1bf2d027df 100644 --- a/src/Npgsql.NodaTime/Internal/DateIntervalConverter.cs +++ b/src/Npgsql.NodaTime/Internal/DateIntervalConverter.cs @@ -6,17 +6,9 @@ namespace Npgsql.NodaTime.Internal; -public class DateIntervalConverter : PgStreamingConverter +public class DateIntervalConverter(PgConverter> rangeConverter, bool dateTimeInfinityConversions) + : PgStreamingConverter { - readonly bool _dateTimeInfinityConversions; - readonly PgConverter> _rangeConverter; - - public DateIntervalConverter(PgConverter> rangeConverter, bool dateTimeInfinityConversions) - { - _rangeConverter = rangeConverter; - _dateTimeInfinityConversions = dateTimeInfinityConversions; - } - public override DateInterval Read(PgReader reader) => Read(async: false, reader, CancellationToken.None).GetAwaiter().GetResult(); @@ -26,24 +18,24 @@ public override ValueTask ReadAsync(PgReader reader, CancellationT async ValueTask Read(bool async, PgReader reader, CancellationToken cancellationToken) { var range = async - ? await _rangeConverter.ReadAsync(reader, cancellationToken).ConfigureAwait(false) + ? await rangeConverter.ReadAsync(reader, cancellationToken).ConfigureAwait(false) // ReSharper disable once MethodHasAsyncOverloadWithCancellation - : _rangeConverter.Read(reader); + : rangeConverter.Read(reader); var upperBound = range.UpperBound; - if (upperBound != LocalDate.MaxIsoValue || !_dateTimeInfinityConversions) + if (upperBound != LocalDate.MaxIsoValue || !dateTimeInfinityConversions) upperBound -= Period.FromDays(1); return new(range.LowerBound, upperBound); } public override Size GetSize(SizeContext context, DateInterval value, ref object? writeState) - => _rangeConverter.GetSize(context, new NpgsqlRange(value.Start, value.End), ref writeState); + => rangeConverter.GetSize(context, new NpgsqlRange(value.Start, value.End), ref writeState); public override void Write(PgWriter writer, DateInterval value) - => _rangeConverter.Write(writer, new NpgsqlRange(value.Start, value.End)); + => rangeConverter.Write(writer, new NpgsqlRange(value.Start, value.End)); public override ValueTask WriteAsync(PgWriter writer, DateInterval value, CancellationToken cancellationToken = default) - => _rangeConverter.WriteAsync(writer, new NpgsqlRange(value.Start, value.End), cancellationToken); + => rangeConverter.WriteAsync(writer, new NpgsqlRange(value.Start, value.End), cancellationToken); } diff --git a/src/Npgsql.NodaTime/Internal/IntervalConverter.cs b/src/Npgsql.NodaTime/Internal/IntervalConverter.cs index 3ca9ca9ab0..7da4aa401c 100644 --- a/src/Npgsql.NodaTime/Internal/IntervalConverter.cs +++ b/src/Npgsql.NodaTime/Internal/IntervalConverter.cs @@ -6,13 +6,8 @@ namespace Npgsql.NodaTime.Internal; -public class IntervalConverter : PgStreamingConverter +public class IntervalConverter(PgConverter> rangeConverter) : PgStreamingConverter { - readonly PgConverter> _rangeConverter; - - public IntervalConverter(PgConverter> rangeConverter) - => _rangeConverter = rangeConverter; - public override Interval Read(PgReader reader) => Read(async: false, reader, CancellationToken.None).GetAwaiter().GetResult(); @@ -22,9 +17,9 @@ public override ValueTask ReadAsync(PgReader reader, CancellationToken async ValueTask Read(bool async, PgReader reader, CancellationToken cancellationToken) { var range = async - ? await _rangeConverter.ReadAsync(reader, cancellationToken).ConfigureAwait(false) + ? await rangeConverter.ReadAsync(reader, cancellationToken).ConfigureAwait(false) // ReSharper disable once MethodHasAsyncOverloadWithCancellation - : _rangeConverter.Read(reader); + : rangeConverter.Read(reader); // NodaTime Interval includes the start instant and excludes the end instant. Instant? start = range.LowerBoundInfinite @@ -42,13 +37,13 @@ async ValueTask Read(bool async, PgReader reader, CancellationToken ca } public override Size GetSize(SizeContext context, Interval value, ref object? writeState) - => _rangeConverter.GetSize(context, IntervalToNpgsqlRange(value), ref writeState); + => rangeConverter.GetSize(context, IntervalToNpgsqlRange(value), ref writeState); public override void Write(PgWriter writer, Interval value) - => _rangeConverter.Write(writer, IntervalToNpgsqlRange(value)); + => rangeConverter.Write(writer, IntervalToNpgsqlRange(value)); public override ValueTask WriteAsync(PgWriter writer, Interval value, CancellationToken cancellationToken = default) - => _rangeConverter.WriteAsync(writer, IntervalToNpgsqlRange(value), cancellationToken); + => rangeConverter.WriteAsync(writer, IntervalToNpgsqlRange(value), cancellationToken); static NpgsqlRange IntervalToNpgsqlRange(Interval interval) => new( diff --git a/src/Npgsql.NodaTime/Internal/LegacyConverters.cs b/src/Npgsql.NodaTime/Internal/LegacyConverters.cs index 54393a4821..c0b4b82268 100644 --- a/src/Npgsql.NodaTime/Internal/LegacyConverters.cs +++ b/src/Npgsql.NodaTime/Internal/LegacyConverters.cs @@ -5,17 +5,9 @@ namespace Npgsql.NodaTime.Internal; -sealed class LegacyTimestampTzZonedDateTimeConverter : PgBufferedConverter +sealed class LegacyTimestampTzZonedDateTimeConverter(DateTimeZone dateTimeZone, bool dateTimeInfinityConversions) + : PgBufferedConverter { - readonly DateTimeZone _dateTimeZone; - readonly bool _dateTimeInfinityConversions; - - public LegacyTimestampTzZonedDateTimeConverter(DateTimeZone dateTimeZone, bool dateTimeInfinityConversions) - { - _dateTimeZone = dateTimeZone; - _dateTimeInfinityConversions = dateTimeInfinityConversions; - } - public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) { bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(long)); @@ -24,34 +16,26 @@ public override bool CanConvert(DataFormat format, out BufferRequirements buffer protected override ZonedDateTime ReadCore(PgReader reader) { - var instant = DecodeInstant(reader.ReadInt64(), _dateTimeInfinityConversions); - if (_dateTimeInfinityConversions && (instant == Instant.MaxValue || instant == Instant.MinValue)) + var instant = DecodeInstant(reader.ReadInt64(), dateTimeInfinityConversions); + if (dateTimeInfinityConversions && (instant == Instant.MaxValue || instant == Instant.MinValue)) throw new InvalidCastException("Infinity values not supported for timestamp with time zone"); - return instant.InZone(_dateTimeZone); + return instant.InZone(dateTimeZone); } protected override void WriteCore(PgWriter writer, ZonedDateTime value) { var instant = value.ToInstant(); - if (_dateTimeInfinityConversions && (instant == Instant.MaxValue || instant == Instant.MinValue)) + if (dateTimeInfinityConversions && (instant == Instant.MaxValue || instant == Instant.MinValue)) throw new ArgumentException("Infinity values not supported for timestamp with time zone"); - writer.WriteInt64(EncodeInstant(instant, _dateTimeInfinityConversions)); + writer.WriteInt64(EncodeInstant(instant, dateTimeInfinityConversions)); } } -sealed class LegacyTimestampTzOffsetDateTimeConverter : PgBufferedConverter +sealed class LegacyTimestampTzOffsetDateTimeConverter(DateTimeZone dateTimeZone, bool dateTimeInfinityConversions) + : PgBufferedConverter { - readonly bool _dateTimeInfinityConversions; - readonly DateTimeZone _dateTimeZone; - - public LegacyTimestampTzOffsetDateTimeConverter(DateTimeZone dateTimeZone, bool dateTimeInfinityConversions) - { - _dateTimeInfinityConversions = dateTimeInfinityConversions; - _dateTimeZone = dateTimeZone; - } - public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) { bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(long)); @@ -60,17 +44,17 @@ public override bool CanConvert(DataFormat format, out BufferRequirements buffer protected override OffsetDateTime ReadCore(PgReader reader) { - var instant = DecodeInstant(reader.ReadInt64(), _dateTimeInfinityConversions); - if (_dateTimeInfinityConversions && (instant == Instant.MaxValue || instant == Instant.MinValue)) + var instant = DecodeInstant(reader.ReadInt64(), dateTimeInfinityConversions); + if (dateTimeInfinityConversions && (instant == Instant.MaxValue || instant == Instant.MinValue)) throw new InvalidCastException("Infinity values not supported for timestamp with time zone"); - return instant.InZone(_dateTimeZone).ToOffsetDateTime(); + return instant.InZone(dateTimeZone).ToOffsetDateTime(); } protected override void WriteCore(PgWriter writer, OffsetDateTime value) { var instant = value.ToInstant(); - if (_dateTimeInfinityConversions && (instant == Instant.MaxValue || instant == Instant.MinValue)) + if (dateTimeInfinityConversions && (instant == Instant.MaxValue || instant == Instant.MinValue)) throw new ArgumentException("Infinity values not supported for timestamp with time zone"); writer.WriteInt64(EncodeInstant(instant, true)); diff --git a/src/Npgsql.NodaTime/Internal/LocalDateConverter.cs b/src/Npgsql.NodaTime/Internal/LocalDateConverter.cs index e6be7fe69b..ffaa6e8d45 100644 --- a/src/Npgsql.NodaTime/Internal/LocalDateConverter.cs +++ b/src/Npgsql.NodaTime/Internal/LocalDateConverter.cs @@ -5,13 +5,8 @@ namespace Npgsql.NodaTime.Internal; -sealed class LocalDateConverter : PgBufferedConverter +sealed class LocalDateConverter(bool dateTimeInfinityConversions) : PgBufferedConverter { - readonly bool _dateTimeInfinityConversions; - - public LocalDateConverter(bool dateTimeInfinityConversions) - => _dateTimeInfinityConversions = dateTimeInfinityConversions; - public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) { bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(int)); @@ -21,10 +16,10 @@ public override bool CanConvert(DataFormat format, out BufferRequirements buffer protected override LocalDate ReadCore(PgReader reader) => reader.ReadInt32() switch { - int.MaxValue => _dateTimeInfinityConversions + int.MaxValue => dateTimeInfinityConversions ? LocalDate.MaxIsoValue : throw new InvalidCastException(NpgsqlNodaTimeStrings.CannotReadInfinityValue), - int.MinValue => _dateTimeInfinityConversions + int.MinValue => dateTimeInfinityConversions ? LocalDate.MinIsoValue : throw new InvalidCastException(NpgsqlNodaTimeStrings.CannotReadInfinityValue), var value => new LocalDate().PlusDays(value + 730119) @@ -32,7 +27,7 @@ protected override LocalDate ReadCore(PgReader reader) protected override void WriteCore(PgWriter writer, LocalDate value) { - if (_dateTimeInfinityConversions) + if (dateTimeInfinityConversions) { if (value == LocalDate.MaxIsoValue) { diff --git a/src/Npgsql.NodaTime/Internal/TimestampConverters.cs b/src/Npgsql.NodaTime/Internal/TimestampConverters.cs index 6808503638..4ac841c80e 100644 --- a/src/Npgsql.NodaTime/Internal/TimestampConverters.cs +++ b/src/Npgsql.NodaTime/Internal/TimestampConverters.cs @@ -5,13 +5,8 @@ namespace Npgsql.NodaTime.Internal; -sealed class InstantConverter : PgBufferedConverter +sealed class InstantConverter(bool dateTimeInfinityConversions) : PgBufferedConverter { - readonly bool _dateTimeInfinityConversions; - - public InstantConverter(bool dateTimeInfinityConversions) - => _dateTimeInfinityConversions = dateTimeInfinityConversions; - public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) { bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(long)); @@ -19,19 +14,14 @@ public override bool CanConvert(DataFormat format, out BufferRequirements buffer } protected override Instant ReadCore(PgReader reader) - => DecodeInstant(reader.ReadInt64(), _dateTimeInfinityConversions); + => DecodeInstant(reader.ReadInt64(), dateTimeInfinityConversions); protected override void WriteCore(PgWriter writer, Instant value) - => writer.WriteInt64(EncodeInstant(value, _dateTimeInfinityConversions)); + => writer.WriteInt64(EncodeInstant(value, dateTimeInfinityConversions)); } -sealed class ZonedDateTimeConverter : PgBufferedConverter +sealed class ZonedDateTimeConverter(bool dateTimeInfinityConversions) : PgBufferedConverter { - readonly bool _dateTimeInfinityConversions; - - public ZonedDateTimeConverter(bool dateTimeInfinityConversions) - => _dateTimeInfinityConversions = dateTimeInfinityConversions; - public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) { bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(long)); @@ -39,7 +29,7 @@ public override bool CanConvert(DataFormat format, out BufferRequirements buffer } protected override ZonedDateTime ReadCore(PgReader reader) - => DecodeInstant(reader.ReadInt64(), _dateTimeInfinityConversions).InUtc(); + => DecodeInstant(reader.ReadInt64(), dateTimeInfinityConversions).InUtc(); protected override void WriteCore(PgWriter writer, ZonedDateTime value) { @@ -51,17 +41,12 @@ protected override void WriteCore(PgWriter writer, ZonedDateTime value) "See the Npgsql.EnableLegacyTimestampBehavior AppContext switch to enable legacy behavior."); } - writer.WriteInt64(EncodeInstant(value.ToInstant(), _dateTimeInfinityConversions)); + writer.WriteInt64(EncodeInstant(value.ToInstant(), dateTimeInfinityConversions)); } } -sealed class OffsetDateTimeConverter : PgBufferedConverter +sealed class OffsetDateTimeConverter(bool dateTimeInfinityConversions) : PgBufferedConverter { - readonly bool _dateTimeInfinityConversions; - - public OffsetDateTimeConverter(bool dateTimeInfinityConversions) - => _dateTimeInfinityConversions = dateTimeInfinityConversions; - public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) { bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(long)); @@ -69,7 +54,7 @@ public override bool CanConvert(DataFormat format, out BufferRequirements buffer } protected override OffsetDateTime ReadCore(PgReader reader) - => DecodeInstant(reader.ReadInt64(), _dateTimeInfinityConversions).WithOffset(Offset.Zero); + => DecodeInstant(reader.ReadInt64(), dateTimeInfinityConversions).WithOffset(Offset.Zero); protected override void WriteCore(PgWriter writer, OffsetDateTime value) { @@ -81,17 +66,12 @@ protected override void WriteCore(PgWriter writer, OffsetDateTime value) "See the Npgsql.EnableLegacyTimestampBehavior AppContext switch to enable legacy behavior."); } - writer.WriteInt64(EncodeInstant(value.ToInstant(), _dateTimeInfinityConversions)); + writer.WriteInt64(EncodeInstant(value.ToInstant(), dateTimeInfinityConversions)); } } -sealed class LocalDateTimeConverter : PgBufferedConverter +sealed class LocalDateTimeConverter(bool dateTimeInfinityConversions) : PgBufferedConverter { - readonly bool _dateTimeInfinityConversions; - - public LocalDateTimeConverter(bool dateTimeInfinityConversions) - => _dateTimeInfinityConversions = dateTimeInfinityConversions; - public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) { bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(long)); @@ -99,8 +79,8 @@ public override bool CanConvert(DataFormat format, out BufferRequirements buffer } protected override LocalDateTime ReadCore(PgReader reader) - => DecodeInstant(reader.ReadInt64(), _dateTimeInfinityConversions).InUtc().LocalDateTime; + => DecodeInstant(reader.ReadInt64(), dateTimeInfinityConversions).InUtc().LocalDateTime; protected override void WriteCore(PgWriter writer, LocalDateTime value) - => writer.WriteInt64(EncodeInstant(value.InUtc().ToInstant(), _dateTimeInfinityConversions)); + => writer.WriteInt64(EncodeInstant(value.InUtc().ToInstant(), dateTimeInfinityConversions)); } diff --git a/src/Npgsql/BackendMessages/RowDescriptionMessage.cs b/src/Npgsql/BackendMessages/RowDescriptionMessage.cs index 4bbbd58d67..8c6883109d 100644 --- a/src/Npgsql/BackendMessages/RowDescriptionMessage.cs +++ b/src/Npgsql/BackendMessages/RowDescriptionMessage.cs @@ -11,18 +11,11 @@ namespace Npgsql.BackendMessages; -readonly struct ColumnInfo +readonly struct ColumnInfo(PgConverterInfo converterInfo, DataFormat dataFormat, bool asObject) { - public ColumnInfo(PgConverterInfo converterInfo, DataFormat dataFormat, bool asObject) - { - ConverterInfo = converterInfo; - DataFormat = dataFormat; - AsObject = asObject; - } - - public PgConverterInfo ConverterInfo { get; } - public DataFormat DataFormat { get; } - public bool AsObject { get; } + public PgConverterInfo ConverterInfo { get; } = converterInfo; + public DataFormat DataFormat { get; } = dataFormat; + public bool AsObject { get; } = asObject; } /// diff --git a/src/Npgsql/Internal/ChainTypeInfoResolver.cs b/src/Npgsql/Internal/ChainTypeInfoResolver.cs index 18c39d80b6..4c7f56e454 100644 --- a/src/Npgsql/Internal/ChainTypeInfoResolver.cs +++ b/src/Npgsql/Internal/ChainTypeInfoResolver.cs @@ -4,12 +4,9 @@ namespace Npgsql.Internal; -sealed class ChainTypeInfoResolver : IPgTypeInfoResolver +sealed class ChainTypeInfoResolver(IEnumerable resolvers) : IPgTypeInfoResolver { - readonly IPgTypeInfoResolver[] _resolvers; - - public ChainTypeInfoResolver(IEnumerable resolvers) - => _resolvers = new List(resolvers).ToArray(); + readonly IPgTypeInfoResolver[] _resolvers = new List(resolvers).ToArray(); public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) { diff --git a/src/Npgsql/Internal/Composites/Metadata/CompositeBuilder.cs b/src/Npgsql/Internal/Composites/Metadata/CompositeBuilder.cs index c51c0dafa0..e12d44bf88 100644 --- a/src/Npgsql/Internal/Composites/Metadata/CompositeBuilder.cs +++ b/src/Npgsql/Internal/Composites/Metadata/CompositeBuilder.cs @@ -4,13 +4,11 @@ namespace Npgsql.Internal.Composites; -abstract class CompositeBuilder +abstract class CompositeBuilder(StrongBox[] tempBoxes) { - protected StrongBox[] _tempBoxes; + protected StrongBox[] _tempBoxes = tempBoxes; protected int _currentField; - protected CompositeBuilder(StrongBox[] tempBoxes) => _tempBoxes = tempBoxes; - protected abstract void Construct(); protected abstract void SetField(TValue value); @@ -35,20 +33,15 @@ public void AddValue(TValue value) } } -sealed class CompositeBuilder : CompositeBuilder, IDisposable +sealed class CompositeBuilder(CompositeInfo compositeInfo) : CompositeBuilder(compositeInfo.CreateTempBoxes()), IDisposable { - readonly CompositeInfo _compositeInfo; T _instance = default!; object? _boxedInstance; - public CompositeBuilder(CompositeInfo compositeInfo) - : base(compositeInfo.CreateTempBoxes()) - => _compositeInfo = compositeInfo; - public T Complete() { - if (_currentField < _compositeInfo.Fields.Count) - throw new InvalidOperationException($"Missing values, expected: {_compositeInfo.Fields.Count} got: {_currentField}"); + if (_currentField < compositeInfo.Fields.Count) + throw new InvalidOperationException($"Missing values, expected: {compositeInfo.Fields.Count} got: {_currentField}"); return (T)(_boxedInstance ?? _instance!); } @@ -70,25 +63,25 @@ protected override void Construct() if (_currentField < tempBoxes.Length - 1) throw new InvalidOperationException($"Missing values, expected: {tempBoxes.Length} got: {_currentField + 1}"); - var fields = _compositeInfo.Fields; - var args = ArrayPool.Shared.Rent(_compositeInfo.ConstructorParameters); + var fields = compositeInfo.Fields; + var args = ArrayPool.Shared.Rent(compositeInfo.ConstructorParameters); for (var i = 0; i < tempBoxes.Length; i++) { var field = fields[i]; if (field.ConstructorParameterIndex is { } argIndex) args[argIndex] = tempBoxes[i]; } - _instance = _compositeInfo.Constructor(args)!; + _instance = compositeInfo.Constructor(args)!; ArrayPool.Shared.Return(args); - if (tempBoxes.Length == _compositeInfo.Fields.Count) + if (tempBoxes.Length == compositeInfo.Fields.Count) return; // We're expecting or already have stored more fields, so box the instance once here. _boxedInstance = _instance; for (var i = 0; i < tempBoxes.Length; i++) { - var field = _compositeInfo.Fields[i]; + var field = compositeInfo.Fields[i]; if (field.ConstructorParameterIndex is null) field.Set(_boxedInstance, tempBoxes[i]); } @@ -100,7 +93,7 @@ protected override void SetField(TValue value) ThrowHelper.ThrowInvalidOperationException("Not constructed yet, or no more fields were expected."); var currentField = _currentField; - var fields = _compositeInfo.Fields; + var fields = compositeInfo.Fields; if (currentField > fields.Count - 1) ThrowHelper.ThrowIndexOutOfRangeException($"Cannot set field {value} at position {currentField} - all fields have already been set"); diff --git a/src/Npgsql/Internal/Converters/ArrayConverter.cs b/src/Npgsql/Internal/Converters/ArrayConverter.cs index daf05aceb1..262f748651 100644 --- a/src/Npgsql/Internal/Converters/ArrayConverter.cs +++ b/src/Npgsql/Internal/Converters/ArrayConverter.cs @@ -55,40 +55,31 @@ interface IElementOperations ValueTask Write(bool async, PgWriter writer, object collection, Indices indices, CancellationToken cancellationToken = default); } -readonly struct PgArrayConverter +readonly struct PgArrayConverter( + IElementOperations elemOps, + bool elemTypeDbNullable, + int? expectedDimensions, + BufferRequirements bufferRequirements, + PgTypeId elemTypeId, + int pgLowerBound = 1) { public const string ReadNonNullableCollectionWithNullsExceptionMessage = "Cannot read a non-nullable collection of elements because the returned array contains nulls. Call GetFieldValue with a nullable collection type instead."; public const int MaxDimensions = 8; - readonly IElementOperations _elemOps; - readonly int? _expectedDimensions; - readonly BufferRequirements _bufferRequirements; - public bool ElemTypeDbNullable { get; } - readonly int _pgLowerBound; - readonly PgTypeId _elemTypeId; - - public PgArrayConverter(IElementOperations elemOps, bool elemTypeDbNullable, int? expectedDimensions, BufferRequirements bufferRequirements, PgTypeId elemTypeId, int pgLowerBound = 1) - { - _elemTypeId = elemTypeId; - ElemTypeDbNullable = elemTypeDbNullable; - _pgLowerBound = pgLowerBound; - _elemOps = elemOps; - _expectedDimensions = expectedDimensions; - _bufferRequirements = bufferRequirements; - } + public bool ElemTypeDbNullable { get; } = elemTypeDbNullable; bool IsDbNull(object values, Indices indices) { object? state = null; - return _elemOps.GetSizeOrDbNull(new(DataFormat.Binary, _bufferRequirements.Write), values, indices, ref state) is null; + return elemOps.GetSizeOrDbNull(new(DataFormat.Binary, bufferRequirements.Write), values, indices, ref state) is null; } Size GetElemsSize(object values, (Size, object?)[] elemStates, out bool anyElementState, DataFormat format, int count, Indices indices, int[]? lengths = null) { Debug.Assert(elemStates.Length >= count); var totalSize = Size.Zero; - var context = new SizeContext(format, _bufferRequirements.Write); + var context = new SizeContext(format, bufferRequirements.Write); anyElementState = false; var lastLength = lengths?[^1] ?? count; ref var lastIndex = ref indices.GetItem(indices.Count - 1); @@ -97,7 +88,7 @@ Size GetElemsSize(object values, (Size, object?)[] elemStates, out bool anyEleme { ref var elemItem = ref elemStates[i++]; var elemState = (object?)null; - var size = _elemOps.GetSizeOrDbNull(context, values, indices, ref elemState); + var size = elemOps.GetSizeOrDbNull(context, values, indices, ref elemState); anyElementState = anyElementState || elemState is not null; elemItem = (size ?? -1, elemState); totalSize = totalSize.Combine(size ?? 0); @@ -134,7 +125,7 @@ int GetFormatSize(int count, int dimensions) public Size GetSize(SizeContext context, object values, ref object? writeState) { - var count = _elemOps.GetCollectionCount(values, out var lengths); + var count = elemOps.GetCollectionCount(values, out var lengths); var dimensions = lengths?.Length ?? 1; if (dimensions > MaxDimensions) ThrowHelper.ThrowArgumentException($"Postgres arrays can have at most {MaxDimensions} dimensions.", nameof(values)); @@ -145,7 +136,7 @@ public Size GetSize(SizeContext context, object values, ref object? writeState) Size elemsSize; var indices = Indices.Create(dimensions); - if (_bufferRequirements.Write is { Kind: SizeKind.Exact } req) + if (bufferRequirements.Write is { Kind: SizeKind.Exact } req) { elemsSize = GetFixedElemsSize(req, values, count, indices, lengths); writeState = new WriteState { Count = count, Indices = indices, Lengths = lengths, ArrayPool = null, Data = default, AnyWriteState = false }; @@ -183,7 +174,7 @@ unsafe object ReadDimsAndCreateCollection(PgReader reader, int dimensions, out i dimLengths[i] = lastDimLength; } - var collection = _elemOps.CreateCollection(dimLengths.Slice(0, dimensions)); + var collection = elemOps.CreateCollection(dimLengths.Slice(0, dimensions)); Debug.Assert(dimensions <= 1 || collection is Array a && a.Rank == dimensions); return collection; } @@ -200,10 +191,10 @@ public async ValueTask Read(bool async, PgReader reader, CancellationTok var containsNulls = reader.ReadInt32() is 1; _ = reader.ReadUInt32(); // Element OID. - if (dimensions is not 0 && _expectedDimensions is not null && dimensions != _expectedDimensions) + if (dimensions is not 0 && expectedDimensions is not null && dimensions != expectedDimensions) ThrowHelper.ThrowInvalidCastException( $"Cannot read an array value with {dimensions} dimension{(dimensions == 1 ? "" : "s")} into a " - + $"collection type with {_expectedDimensions} dimension{(_expectedDimensions == 1 ? "" : "s")}. " + + $"collection type with {expectedDimensions} dimension{(expectedDimensions == 1 ? "" : "s")}. " + $"Call GetValue or a version of GetFieldValue with the commas being the expected amount of dimensions."); if (containsNulls && !ElemTypeDbNullable) @@ -217,7 +208,7 @@ public async ValueTask Read(bool async, PgReader reader, CancellationTok if (dimensions is 0 || lastDimLength is 0) return collection; - _ = _elemOps.GetCollectionCount(collection, out var dimLengths); + _ = elemOps.GetCollectionCount(collection, out var dimLengths); var indices = Indices.Create(dimensions); do @@ -229,10 +220,10 @@ public async ValueTask Read(bool async, PgReader reader, CancellationTok var isDbNull = length == -1; if (!isDbNull) { - var scope = await reader.BeginNestedRead(async, length, _bufferRequirements.Read, cancellationToken).ConfigureAwait(false); + var scope = await reader.BeginNestedRead(async, length, bufferRequirements.Read, cancellationToken).ConfigureAwait(false); try { - await _elemOps.Read(async, reader, isDbNull, collection, indices, cancellationToken).ConfigureAwait(false); + await elemOps.Read(async, reader, isDbNull, collection, indices, cancellationToken).ConfigureAwait(false); } finally { @@ -243,7 +234,7 @@ public async ValueTask Read(bool async, PgReader reader, CancellationTok } } else - await _elemOps.Read(async, reader, isDbNull, collection, indices, cancellationToken).ConfigureAwait(false); + await elemOps.Read(async, reader, isDbNull, collection, indices, cancellationToken).ConfigureAwait(false); } // We can immediately continue if we didn't reach the end of the last dimension. while (++indices.GetItem(indices.Count - 1) < lastDimLength || (dimLengths is not null && CarryIndices(dimLengths, indices))); @@ -285,11 +276,11 @@ public async ValueTask Write(bool async, PgWriter writer, object values, Cancell writer.WriteInt32(dims); // Dimensions writer.WriteInt32(0); // Flags (not really used) - writer.WriteAsOid(_elemTypeId); + writer.WriteAsOid(elemTypeId); for (var dim = 0; dim < dims; dim++) { writer.WriteInt32(state?.Lengths?[dim] ?? count); - writer.WriteInt32(_pgLowerBound); // Lower bound + writer.WriteInt32(pgLowerBound); // Lower bound } // We can stop here for empty collections. @@ -310,7 +301,7 @@ public async ValueTask Write(bool async, PgWriter writer, object values, Cancell await writer.Flush(async, cancellationToken).ConfigureAwait(false); var elem = elemData?[i++]; - var size = elem?.Size ?? (elemTypeDbNullable && IsDbNull(values, indices) ? -1 : _bufferRequirements.Write); + var size = elem?.Size ?? (elemTypeDbNullable && IsDbNull(values, indices) ? -1 : bufferRequirements.Write); if (size.Kind is SizeKind.Unknown) throw new NotImplementedException(); @@ -318,8 +309,8 @@ public async ValueTask Write(bool async, PgWriter writer, object values, Cancell writer.WriteInt32(length); if (length != -1) { - using var _ = await writer.BeginNestedWrite(async, _bufferRequirements.Write, length, elem?.WriteState, cancellationToken).ConfigureAwait(false); - await _elemOps.Write(async, writer, values, indices, cancellationToken).ConfigureAwait(false); + using var _ = await writer.BeginNestedWrite(async, bufferRequirements.Write, length, elem?.WriteState, cancellationToken).ConfigureAwait(false); + await elemOps.Write(async, writer, values, indices, cancellationToken).ConfigureAwait(false); } } // We can immediately continue if we didn't reach the end of the last dimension. @@ -429,15 +420,12 @@ protected static int GetLengths(Array array, out int[]? lengths) } } -sealed class ArrayBasedArrayConverter : ArrayConverter, IElementOperations where T : class +sealed class ArrayBasedArrayConverter(PgConverterResolution elemResolution, Type? effectiveType = null, int pgLowerBound = 1) + : ArrayConverter(expectedDimensions: effectiveType is null ? 1 : effectiveType.IsArray ? effectiveType.GetArrayRank() : null, + elemResolution, pgLowerBound), IElementOperations + where T : class { - readonly PgConverter _elemConverter; - - public ArrayBasedArrayConverter(PgConverterResolution elemResolution, Type? effectiveType = null, int pgLowerBound = 1) - : base( - expectedDimensions: effectiveType is null ? 1 : effectiveType.IsArray ? effectiveType.GetArrayRank() : null, - elemResolution, pgLowerBound) - => _elemConverter = elemResolution.GetConverter(); + readonly PgConverter _elemConverter = elemResolution.GetConverter(); [MethodImpl(MethodImplOptions.AggressiveInlining)] static TElement? GetValue(object collection, Indices indices) @@ -533,13 +521,11 @@ ValueTask IElementOperations.Write(bool async, PgWriter writer, object collectio } } -sealed class ListBasedArrayConverter : ArrayConverter, IElementOperations where T : class +sealed class ListBasedArrayConverter(PgConverterResolution elemResolution, int pgLowerBound = 1) + : ArrayConverter(expectedDimensions: 1, elemResolution, pgLowerBound), IElementOperations + where T : class { - readonly PgConverter _elemConverter; - - public ListBasedArrayConverter(PgConverterResolution elemResolution, int pgLowerBound = 1) - : base(expectedDimensions: 1, elemResolution, pgLowerBound) - => _elemConverter = elemResolution.GetConverter(); + readonly PgConverter _elemConverter = elemResolution.GetConverter(); [MethodImpl(MethodImplOptions.AggressiveInlining)] static TElement? GetValue(object collection, int index) @@ -609,14 +595,11 @@ ValueTask IElementOperations.Write(bool async, PgWriter writer, object collectio } } -sealed class ArrayConverterResolver : PgComposingConverterResolver where T : class +sealed class ArrayConverterResolver(PgResolverTypeInfo elementTypeInfo, Type effectiveType) + : PgComposingConverterResolver(elementTypeInfo.PgTypeId is { } id ? elementTypeInfo.Options.GetArrayTypeId(id) : null, + elementTypeInfo) + where T : class { - readonly Type _effectiveType; - - public ArrayConverterResolver(PgResolverTypeInfo elementTypeInfo, Type effectiveType) - : base(elementTypeInfo.PgTypeId is { } id ? elementTypeInfo.Options.GetArrayTypeId(id) : null, elementTypeInfo) - => _effectiveType = effectiveType; - PgSerializerOptions Options => EffectiveTypeInfo.Options; protected override PgTypeId GetEffectivePgTypeId(PgTypeId pgTypeId) => Options.GetArrayElementTypeId(pgTypeId); @@ -625,7 +608,7 @@ public ArrayConverterResolver(PgResolverTypeInfo elementTypeInfo, Type effective protected override PgConverter CreateConverter(PgConverterResolution effectiveResolution) { if (typeof(T) == typeof(Array) || typeof(T).IsArray) - return new ArrayBasedArrayConverter(effectiveResolution, _effectiveType); + return new ArrayBasedArrayConverter(effectiveResolution, effectiveType); if (typeof(T).IsConstructedGenericType && typeof(T).GetGenericTypeDefinition() == typeof(IList<>)) return new ListBasedArrayConverter(effectiveResolution); @@ -682,17 +665,11 @@ protected override PgConverter CreateConverter(PgConverterResolution effectiv } // T is Array as we only know what type it will be after reading 'contains nulls'. -sealed class PolymorphicArrayConverter : PgStreamingConverter +sealed class PolymorphicArrayConverter( + PgConverter structElementCollectionConverter, + PgConverter nullableElementCollectionConverter) + : PgStreamingConverter { - readonly PgConverter _structElementCollectionConverter; - readonly PgConverter _nullableElementCollectionConverter; - - public PolymorphicArrayConverter(PgConverter structElementCollectionConverter, PgConverter nullableElementCollectionConverter) - { - _structElementCollectionConverter = structElementCollectionConverter; - _nullableElementCollectionConverter = nullableElementCollectionConverter; - } - public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) { bufferRequirements = BufferRequirements.Create(read: sizeof(int) + sizeof(int), write: Size.Unknown); @@ -705,8 +682,8 @@ public override TBase Read(PgReader reader) var containsNulls = reader.ReadInt32() is 1; reader.Rewind(sizeof(int) + sizeof(int)); return containsNulls - ? _nullableElementCollectionConverter.Read(reader) - : _structElementCollectionConverter.Read(reader); + ? nullableElementCollectionConverter.Read(reader) + : structElementCollectionConverter.Read(reader); } public override ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) @@ -715,8 +692,8 @@ public override ValueTask ReadAsync(PgReader reader, CancellationToken ca var containsNulls = reader.ReadInt32() is 1; reader.Rewind(sizeof(int) + sizeof(int)); return containsNulls - ? _nullableElementCollectionConverter.ReadAsync(reader, cancellationToken) - : _structElementCollectionConverter.ReadAsync(reader, cancellationToken); + ? nullableElementCollectionConverter.ReadAsync(reader, cancellationToken) + : structElementCollectionConverter.ReadAsync(reader, cancellationToken); } public override Size GetSize(SizeContext context, TBase value, ref object? writeState) diff --git a/src/Npgsql/Internal/Converters/BitStringConverters.cs b/src/Npgsql/Internal/Converters/BitStringConverters.cs index b7597f96d9..d90f6f5c6e 100644 --- a/src/Npgsql/Internal/Converters/BitStringConverters.cs +++ b/src/Npgsql/Internal/Converters/BitStringConverters.cs @@ -235,13 +235,11 @@ async ValueTask Write(bool async, PgWriter writer, string value, CancellationTok /// Note that for BIT(1), this resolver will return a bool by default, to align with SqlClient /// (see discussion https://github.com/npgsql/npgsql/pull/362#issuecomment-59622101). -sealed class PolymorphicBitStringConverterResolver : PolymorphicConverterResolver +sealed class PolymorphicBitStringConverterResolver(PgTypeId bitString) : PolymorphicConverterResolver(bitString) { BoolBitStringConverter? _boolConverter; BitArrayBitStringConverter? _bitArrayConverter; - public PolymorphicBitStringConverterResolver(PgTypeId bitString) : base(bitString) { } - protected override PgConverter Get(Field? field) => field?.TypeModifier is 1 ? _boolConverter ??= new BoolBitStringConverter() diff --git a/src/Npgsql/Internal/Converters/CastingConverter.cs b/src/Npgsql/Internal/Converters/CastingConverter.cs index 3fbfc5059d..a2b83fd94c 100644 --- a/src/Npgsql/Internal/Converters/CastingConverter.cs +++ b/src/Npgsql/Internal/Converters/CastingConverter.cs @@ -7,53 +7,47 @@ namespace Npgsql.Internal.Converters; /// A converter to map strongly typed apis onto boxed converter results to produce a strongly typed converter over T. -sealed class CastingConverter : PgConverter +sealed class CastingConverter(PgConverter effectiveConverter) + : PgConverter(effectiveConverter.DbNullPredicateKind is DbNullPredicate.Custom) { - readonly PgConverter _effectiveConverter; - public CastingConverter(PgConverter effectiveConverter) - : base(effectiveConverter.DbNullPredicateKind is DbNullPredicate.Custom) - => _effectiveConverter = effectiveConverter; - - protected override bool IsDbNullValue(T? value, ref object? writeState) => _effectiveConverter.IsDbNullAsObject(value, ref writeState); + protected override bool IsDbNullValue(T? value, ref object? writeState) => effectiveConverter.IsDbNullAsObject(value, ref writeState); public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) - => _effectiveConverter.CanConvert(format, out bufferRequirements); + => effectiveConverter.CanConvert(format, out bufferRequirements); - public override T Read(PgReader reader) => (T)_effectiveConverter.ReadAsObject(reader); + public override T Read(PgReader reader) => (T)effectiveConverter.ReadAsObject(reader); public override ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) - => this.ReadAsObjectAsyncAsT(_effectiveConverter, reader, cancellationToken); + => this.ReadAsObjectAsyncAsT(effectiveConverter, reader, cancellationToken); public override Size GetSize(SizeContext context, T value, ref object? writeState) - => _effectiveConverter.GetSizeAsObject(context, value!, ref writeState); + => effectiveConverter.GetSizeAsObject(context, value!, ref writeState); public override void Write(PgWriter writer, T value) - => _effectiveConverter.WriteAsObject(writer, value!); + => effectiveConverter.WriteAsObject(writer, value!); public override ValueTask WriteAsync(PgWriter writer, T value, CancellationToken cancellationToken = default) - => _effectiveConverter.WriteAsObjectAsync(writer, value!, cancellationToken); + => effectiveConverter.WriteAsObjectAsync(writer, value!, cancellationToken); internal override ValueTask ReadAsObject(bool async, PgReader reader, CancellationToken cancellationToken) => async - ? _effectiveConverter.ReadAsObjectAsync(reader, cancellationToken) - : new(_effectiveConverter.ReadAsObject(reader)); + ? effectiveConverter.ReadAsObjectAsync(reader, cancellationToken) + : new(effectiveConverter.ReadAsObject(reader)); internal override ValueTask WriteAsObject(bool async, PgWriter writer, object value, CancellationToken cancellationToken) { if (async) - return _effectiveConverter.WriteAsObjectAsync(writer, value, cancellationToken); + return effectiveConverter.WriteAsObjectAsync(writer, value, cancellationToken); - _effectiveConverter.WriteAsObject(writer, value); + effectiveConverter.WriteAsObject(writer, value); return new(); } } // Given there aren't many instantiations of converter resolvers (and it's fairly involved to write a fast one) we use the composing base class. -sealed class CastingConverterResolver : PgComposingConverterResolver +sealed class CastingConverterResolver(PgResolverTypeInfo effectiveResolverTypeInfo) + : PgComposingConverterResolver(effectiveResolverTypeInfo.PgTypeId, effectiveResolverTypeInfo) { - public CastingConverterResolver(PgResolverTypeInfo effectiveResolverTypeInfo) - : base(effectiveResolverTypeInfo.PgTypeId, effectiveResolverTypeInfo) { } - protected override PgTypeId GetEffectivePgTypeId(PgTypeId pgTypeId) => pgTypeId; protected override PgTypeId GetPgTypeId(PgTypeId effectivePgTypeId) => effectivePgTypeId; diff --git a/src/Npgsql/Internal/Converters/FullTextSearch/TsQueryConverter.cs b/src/Npgsql/Internal/Converters/FullTextSearch/TsQueryConverter.cs index 220cc88894..9e88fbe8f1 100644 --- a/src/Npgsql/Internal/Converters/FullTextSearch/TsQueryConverter.cs +++ b/src/Npgsql/Internal/Converters/FullTextSearch/TsQueryConverter.cs @@ -10,14 +10,9 @@ // ReSharper disable once CheckNamespace namespace Npgsql.Internal.Converters; -sealed class TsQueryConverter : PgStreamingConverter +sealed class TsQueryConverter(Encoding encoding) : PgStreamingConverter where T : NpgsqlTsQuery { - readonly Encoding _encoding; - - public TsQueryConverter(Encoding encoding) - => _encoding = encoding; - public override T Read(PgReader reader) => (T)Read(async: false, reader, CancellationToken.None).GetAwaiter().GetResult(); @@ -49,8 +44,8 @@ async ValueTask Read(bool async, PgReader reader, CancellationTok var prefix = reader.ReadByte() != 0; var str = async - ? await reader.ReadNullTerminatedStringAsync(_encoding, cancellationToken).ConfigureAwait(false) - : reader.ReadNullTerminatedString(_encoding); + ? await reader.ReadNullTerminatedStringAsync(encoding, cancellationToken).ConfigureAwait(false) + : reader.ReadNullTerminatedString(encoding); InsertInTree(new NpgsqlTsQueryLexeme(str, weight, prefix), nodes, ref value); continue; @@ -134,7 +129,7 @@ public override Size GetSize(SizeContext context, T value, ref object? writeStat int GetNodeLength(NpgsqlTsQuery node) => node.Kind switch { - Lexeme when _encoding.GetByteCount(((NpgsqlTsQueryLexeme)node).Text) is var strLen + Lexeme when encoding.GetByteCount(((NpgsqlTsQueryLexeme)node).Text) is var strLen => strLen > 2046 ? throw new InvalidCastException("Lexeme text too long. Must be at most 2046 encoded bytes.") : 4 + strLen, @@ -185,9 +180,9 @@ async Task WriteCore(NpgsqlTsQuery node) writer.WriteByte(lexemeNode.IsPrefixSearch ? (byte)1 : (byte)0); if (async) - await writer.WriteCharsAsync(lexemeNode.Text.AsMemory(), _encoding, cancellationToken).ConfigureAwait(false); + await writer.WriteCharsAsync(lexemeNode.Text.AsMemory(), encoding, cancellationToken).ConfigureAwait(false); else - writer.WriteChars(lexemeNode.Text.AsMemory().Span, _encoding); + writer.WriteChars(lexemeNode.Text.AsMemory().Span, encoding); if (writer.ShouldFlush(sizeof(byte))) await writer.Flush(async, cancellationToken).ConfigureAwait(false); diff --git a/src/Npgsql/Internal/Converters/FullTextSearch/TsVectorConverter.cs b/src/Npgsql/Internal/Converters/FullTextSearch/TsVectorConverter.cs index 2c431fd35b..04b16b80f5 100644 --- a/src/Npgsql/Internal/Converters/FullTextSearch/TsVectorConverter.cs +++ b/src/Npgsql/Internal/Converters/FullTextSearch/TsVectorConverter.cs @@ -8,13 +8,8 @@ // ReSharper disable once CheckNamespace namespace Npgsql.Internal.Converters; -sealed class TsVectorConverter : PgStreamingConverter +sealed class TsVectorConverter(Encoding encoding) : PgStreamingConverter { - readonly Encoding _encoding; - - public TsVectorConverter(Encoding encoding) - => _encoding = encoding; - public override NpgsqlTsVector Read(PgReader reader) => Read(async: false, reader, CancellationToken.None).GetAwaiter().GetResult(); @@ -32,8 +27,8 @@ async ValueTask Read(bool async, PgReader reader, CancellationTo for (var i = 0; i < numLexemes; i++) { var lexemeString = async - ? await reader.ReadNullTerminatedStringAsync(_encoding, cancellationToken).ConfigureAwait(false) - : reader.ReadNullTerminatedString(_encoding); + ? await reader.ReadNullTerminatedStringAsync(encoding, cancellationToken).ConfigureAwait(false) + : reader.ReadNullTerminatedString(encoding); if (reader.ShouldBuffer(sizeof(short))) await reader.Buffer(async, sizeof(short), cancellationToken).ConfigureAwait(false); @@ -70,7 +65,7 @@ public override Size GetSize(SizeContext context, NpgsqlTsVector value, ref obje { var size = 4; foreach (var l in value) - size += _encoding.GetByteCount(l.Text) + 1 + 2 + l.Count * 2; + size += encoding.GetByteCount(l.Text) + 1 + 2 + l.Count * 2; return size; } @@ -90,9 +85,9 @@ async ValueTask Write(bool async, PgWriter writer, NpgsqlTsVector value, Cancell foreach (var lexeme in value) { if (async) - await writer.WriteCharsAsync(lexeme.Text.AsMemory(), _encoding, cancellationToken).ConfigureAwait(false); + await writer.WriteCharsAsync(lexeme.Text.AsMemory(), encoding, cancellationToken).ConfigureAwait(false); else - writer.WriteChars(lexeme.Text.AsMemory().Span, _encoding); + writer.WriteChars(lexeme.Text.AsMemory().Span, encoding); if (writer.ShouldFlush(sizeof(byte) + sizeof(short))) await writer.Flush(async, cancellationToken).ConfigureAwait(false); diff --git a/src/Npgsql/Internal/Converters/HstoreConverter.cs b/src/Npgsql/Internal/Converters/HstoreConverter.cs index e2e8762d8e..f9514450f7 100644 --- a/src/Npgsql/Internal/Converters/HstoreConverter.cs +++ b/src/Npgsql/Internal/Converters/HstoreConverter.cs @@ -7,17 +7,10 @@ namespace Npgsql.Internal.Converters; -sealed class HstoreConverter : PgStreamingConverter where T : ICollection> +sealed class HstoreConverter(Encoding encoding, Func>, T>? convert = null) + : PgStreamingConverter + where T : ICollection> { - readonly Encoding _encoding; - readonly Func>, T>? _convert; - - public HstoreConverter(Encoding encoding, Func>, T>? convert = null) - { - _encoding = encoding; - _convert = convert; - } - public override T Read(PgReader reader) => Read(async: false, reader, CancellationToken.None).Result; @@ -40,8 +33,8 @@ public override Size GetSize(SizeContext context, T value, ref object? writeStat if (kv.Key is null) throw new ArgumentException("Hstore doesn't support null keys", nameof(value)); - var keySize = _encoding.GetByteCount(kv.Key); - var valueSize = kv.Value is null ? -1 : _encoding.GetByteCount(kv.Value); + var keySize = encoding.GetByteCount(kv.Key); + var valueSize = kv.Value is null ? -1 : encoding.GetByteCount(kv.Value); totalSize += keySize + (valueSize is -1 ? 0 : valueSize); data[i] = (keySize, null); data[i + 1] = (valueSize, null); @@ -78,7 +71,7 @@ async ValueTask Read(bool async, PgReader reader, CancellationToken cancellat if (reader.ShouldBuffer(sizeof(int))) await reader.Buffer(async, sizeof(int), cancellationToken).ConfigureAwait(false); var keySize = reader.ReadInt32(); - var key = _encoding.GetString(async + var key = encoding.GetString(async ? await reader.ReadBytesAsync(keySize, cancellationToken).ConfigureAwait(false) : reader.ReadBytes(keySize) ); @@ -88,7 +81,7 @@ async ValueTask Read(bool async, PgReader reader, CancellationToken cancellat var valueSize = reader.ReadInt32(); string? value = null; if (valueSize is not -1) - value = _encoding.GetString(async + value = encoding.GetString(async ? await reader.ReadBytesAsync(valueSize, cancellationToken).ConfigureAwait(false) : reader.ReadBytes(valueSize) ); @@ -99,7 +92,7 @@ async ValueTask Read(bool async, PgReader reader, CancellationToken cancellat if (typeof(T) == typeof(Dictionary) || typeof(T) == typeof(IDictionary)) return (T)result; - return _convert is null ? throw new NotSupportedException() : _convert(result); + return convert is null ? throw new NotSupportedException() : convert(result); } async ValueTask Write(bool async, PgWriter writer, T value, CancellationToken cancellationToken) @@ -129,9 +122,9 @@ async ValueTask Write(bool async, PgWriter writer, T value, CancellationToken ca var length = size.Value; writer.WriteInt32(length); if (async) - await writer.WriteCharsAsync(kv.Key.AsMemory(), _encoding, cancellationToken).ConfigureAwait(false); + await writer.WriteCharsAsync(kv.Key.AsMemory(), encoding, cancellationToken).ConfigureAwait(false); else - writer.WriteChars(kv.Key.AsSpan(), _encoding); + writer.WriteChars(kv.Key.AsSpan(), encoding); if (writer.ShouldFlush(sizeof(int))) await writer.Flush(async, cancellationToken).ConfigureAwait(false); @@ -145,9 +138,9 @@ async ValueTask Write(bool async, PgWriter writer, T value, CancellationToken ca if (valueLength is not -1) { if (async) - await writer.WriteCharsAsync(kv.Value.AsMemory(), _encoding, cancellationToken).ConfigureAwait(false); + await writer.WriteCharsAsync(kv.Value.AsMemory(), encoding, cancellationToken).ConfigureAwait(false); else - writer.WriteChars(kv.Value.AsSpan(), _encoding); + writer.WriteChars(kv.Value.AsSpan(), encoding); } i += 2; } diff --git a/src/Npgsql/Internal/Converters/Networking/MacaddrConverter.cs b/src/Npgsql/Internal/Converters/Networking/MacaddrConverter.cs index dd8aac78bc..d9c2aa46e8 100644 --- a/src/Npgsql/Internal/Converters/Networking/MacaddrConverter.cs +++ b/src/Npgsql/Internal/Converters/Networking/MacaddrConverter.cs @@ -5,15 +5,11 @@ // ReSharper disable once CheckNamespace namespace Npgsql.Internal.Converters; -sealed class MacaddrConverter : PgBufferedConverter +sealed class MacaddrConverter(bool macaddr8) : PgBufferedConverter { - readonly bool _macaddr8; - - public MacaddrConverter(bool macaddr8) => _macaddr8 = macaddr8; - public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) { - bufferRequirements = _macaddr8 ? BufferRequirements.Create(Size.CreateUpperBound(8)) : BufferRequirements.CreateFixedSize(6); + bufferRequirements = macaddr8 ? BufferRequirements.Create(Size.CreateUpperBound(8)) : BufferRequirements.CreateFixedSize(6); return format is DataFormat.Binary; } @@ -33,7 +29,7 @@ protected override PhysicalAddress ReadCore(PgReader reader) protected override void WriteCore(PgWriter writer, PhysicalAddress value) { var bytes = value.GetAddressBytes(); - if (!_macaddr8 && bytes.Length is not 6) + if (!macaddr8 && bytes.Length is not 6) throw new ArgumentException("A macaddr value must be 6 bytes long."); writer.WriteBytes(bytes); } diff --git a/src/Npgsql/Internal/Converters/NullableConverter.cs b/src/Npgsql/Internal/Converters/NullableConverter.cs index 292def140a..57a12e005f 100644 --- a/src/Npgsql/Internal/Converters/NullableConverter.cs +++ b/src/Npgsql/Internal/Converters/NullableConverter.cs @@ -7,46 +7,42 @@ namespace Npgsql.Internal.Converters; // NULL writing is always responsibility of the caller writing the length, so there is not much we do here. /// Special value converter to be able to use struct converters as System.Nullable converters, it delegates all behavior to the effective converter. -sealed class NullableConverter : PgConverter where T : struct +sealed class NullableConverter(PgConverter effectiveConverter) + : PgConverter(effectiveConverter.DbNullPredicateKind is DbNullPredicate.Custom) + where T : struct { - readonly PgConverter _effectiveConverter; - public NullableConverter(PgConverter effectiveConverter) - : base(effectiveConverter.DbNullPredicateKind is DbNullPredicate.Custom) - => _effectiveConverter = effectiveConverter; - protected override bool IsDbNullValue(T? value, ref object? writeState) - => value is null || _effectiveConverter.IsDbNull(value.GetValueOrDefault(), ref writeState); + => value is null || effectiveConverter.IsDbNull(value.GetValueOrDefault(), ref writeState); public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) - => _effectiveConverter.CanConvert(format, out bufferRequirements); + => effectiveConverter.CanConvert(format, out bufferRequirements); public override T? Read(PgReader reader) - => _effectiveConverter.Read(reader); + => effectiveConverter.Read(reader); public override ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) - => this.ReadAsyncAsNullable(_effectiveConverter, reader, cancellationToken); + => this.ReadAsyncAsNullable(effectiveConverter, reader, cancellationToken); public override Size GetSize(SizeContext context, [DisallowNull]T? value, ref object? writeState) - => _effectiveConverter.GetSize(context, value.GetValueOrDefault(), ref writeState); + => effectiveConverter.GetSize(context, value.GetValueOrDefault(), ref writeState); public override void Write(PgWriter writer, T? value) - => _effectiveConverter.Write(writer, value.GetValueOrDefault()); + => effectiveConverter.Write(writer, value.GetValueOrDefault()); public override ValueTask WriteAsync(PgWriter writer, T? value, CancellationToken cancellationToken = default) - => _effectiveConverter.WriteAsync(writer, value.GetValueOrDefault(), cancellationToken); + => effectiveConverter.WriteAsync(writer, value.GetValueOrDefault(), cancellationToken); internal override ValueTask ReadAsObject(bool async, PgReader reader, CancellationToken cancellationToken) - => _effectiveConverter.ReadAsObject(async, reader, cancellationToken); + => effectiveConverter.ReadAsObject(async, reader, cancellationToken); internal override ValueTask WriteAsObject(bool async, PgWriter writer, object value, CancellationToken cancellationToken) - => _effectiveConverter.WriteAsObject(async, writer, value, cancellationToken); + => effectiveConverter.WriteAsObject(async, writer, value, cancellationToken); } -sealed class NullableConverterResolver : PgComposingConverterResolver where T : struct +sealed class NullableConverterResolver(PgResolverTypeInfo effectiveTypeInfo) + : PgComposingConverterResolver(effectiveTypeInfo.PgTypeId, effectiveTypeInfo) + where T : struct { - public NullableConverterResolver(PgResolverTypeInfo effectiveTypeInfo) - : base(effectiveTypeInfo.PgTypeId, effectiveTypeInfo) { } - protected override PgTypeId GetEffectivePgTypeId(PgTypeId pgTypeId) => pgTypeId; protected override PgTypeId GetPgTypeId(PgTypeId effectivePgTypeId) => effectivePgTypeId; diff --git a/src/Npgsql/Internal/Converters/ObjectConverter.cs b/src/Npgsql/Internal/Converters/ObjectConverter.cs index 3cc788adf1..394f7a8a4c 100644 --- a/src/Npgsql/Internal/Converters/ObjectConverter.cs +++ b/src/Npgsql/Internal/Converters/ObjectConverter.cs @@ -5,18 +5,8 @@ namespace Npgsql.Internal; -sealed class ObjectConverter : PgStreamingConverter +sealed class ObjectConverter(PgSerializerOptions options, PgTypeId pgTypeId) : PgStreamingConverter(customDbNullPredicate: true) { - readonly PgSerializerOptions _options; - readonly PgTypeId _pgTypeId; - - public ObjectConverter(PgSerializerOptions options, PgTypeId pgTypeId) - : base(customDbNullPredicate: true) - { - _options = options; - _pgTypeId = pgTypeId; - } - protected override bool IsDbNullValue(object? value, ref object? writeState) { if (value is null or DBNull) @@ -98,8 +88,8 @@ async ValueTask Write(bool async, PgWriter writer, object value, CancellationTok } PgTypeInfo GetTypeInfo(Type type) - => _options.GetTypeInfoInternal(type, _pgTypeId) - ?? throw new NotSupportedException($"Writing values of '{type.FullName}' having DataTypeName '{_options.DatabaseInfo.GetPostgresType(_pgTypeId).DisplayName}' is not supported."); + => options.GetTypeInfoInternal(type, pgTypeId) + ?? throw new NotSupportedException($"Writing values of '{type.FullName}' having DataTypeName '{options.DatabaseInfo.GetPostgresType(pgTypeId).DisplayName}' is not supported."); sealed class WriteState { diff --git a/src/Npgsql/Internal/Converters/PolymorphicConverterResolver.cs b/src/Npgsql/Internal/Converters/PolymorphicConverterResolver.cs index 7c78e34a24..7cf355d103 100644 --- a/src/Npgsql/Internal/Converters/PolymorphicConverterResolver.cs +++ b/src/Npgsql/Internal/Converters/PolymorphicConverterResolver.cs @@ -5,11 +5,9 @@ namespace Npgsql.Internal.Converters; -abstract class PolymorphicConverterResolver : PgConverterResolver +abstract class PolymorphicConverterResolver(PgTypeId pgTypeId) : PgConverterResolver { - protected PolymorphicConverterResolver(PgTypeId pgTypeId) => PgTypeId = pgTypeId; - - protected PgTypeId PgTypeId { get; } + protected PgTypeId PgTypeId { get; } = pgTypeId; protected abstract PgConverter Get(Field? field); diff --git a/src/Npgsql/Internal/Converters/Primitive/PgNumeric.cs b/src/Npgsql/Internal/Converters/Primitive/PgNumeric.cs index 908bf6fb4d..299dd9b419 100644 --- a/src/Npgsql/Internal/Converters/Primitive/PgNumeric.cs +++ b/src/Npgsql/Internal/Converters/Primitive/PgNumeric.cs @@ -8,28 +8,21 @@ namespace Npgsql.Internal.Converters; -readonly struct PgNumeric +readonly struct PgNumeric(ArraySegment digits, short weight, short sign, short scale) { // numeric digit count + weight + sign + scale const int StructureByteCount = 4 * sizeof(short); const int DecimalBits = 4; const int StackAllocByteThreshold = 64 * sizeof(uint); - readonly ushort _sign; - - public PgNumeric(ArraySegment digits, short weight, short sign, short scale) - { - Digits = digits; - Weight = weight; - _sign = (ushort)sign; - Scale = scale; - } + readonly ushort _sign = (ushort)sign; /// Big endian array of numeric digits - public ArraySegment Digits { get; } - public short Weight { get; } + public ArraySegment Digits { get; } = digits; + + public short Weight { get; } = weight; public short Sign => (short)_sign; - public short Scale { get; } + public short Scale { get; } = scale; public int GetByteCount() => GetByteCount(Digits.Count); public static int GetByteCount(int digitCount) => StructureByteCount + digitCount * sizeof(short); diff --git a/src/Npgsql/Internal/Converters/Primitive/TextConverters.cs b/src/Npgsql/Internal/Converters/Primitive/TextConverters.cs index 5bbc1a61f2..e1ef7f714a 100644 --- a/src/Npgsql/Internal/Converters/Primitive/TextConverters.cs +++ b/src/Npgsql/Internal/Converters/Primitive/TextConverters.cs @@ -11,25 +11,22 @@ // ReSharper disable once CheckNamespace namespace Npgsql.Internal.Converters; -abstract class StringBasedTextConverter : PgStreamingConverter +abstract class StringBasedTextConverter(Encoding encoding) : PgStreamingConverter { - readonly Encoding _encoding; - protected StringBasedTextConverter(Encoding encoding) => _encoding = encoding; - public override T Read(PgReader reader) - => Read(async: false, reader, _encoding).GetAwaiter().GetResult(); + => Read(async: false, reader, encoding).GetAwaiter().GetResult(); public override ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) - => Read(async: true, reader, _encoding, cancellationToken); + => Read(async: true, reader, encoding, cancellationToken); public override Size GetSize(SizeContext context, T value, ref object? writeState) - => TextConverter.GetSize(ref context, ConvertTo(value), _encoding); + => TextConverter.GetSize(ref context, ConvertTo(value), encoding); public override void Write(PgWriter writer, T value) - => writer.WriteChars(ConvertTo(value).Span, _encoding); + => writer.WriteChars(ConvertTo(value).Span, encoding); public override ValueTask WriteAsync(PgWriter writer, T value, CancellationToken cancellationToken = default) - => writer.WriteCharsAsync(ConvertTo(value), _encoding, cancellationToken); + => writer.WriteCharsAsync(ConvertTo(value), encoding, cancellationToken); public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) { @@ -52,38 +49,33 @@ async ValueTask ReadAsync(PgReader reader, Encoding encoding, CancellationTok } } -sealed class ReadOnlyMemoryTextConverter : StringBasedTextConverter> +sealed class ReadOnlyMemoryTextConverter(Encoding encoding) : StringBasedTextConverter>(encoding) { - public ReadOnlyMemoryTextConverter(Encoding encoding) : base(encoding) { } protected override ReadOnlyMemory ConvertTo(ReadOnlyMemory value) => value; protected override ReadOnlyMemory ConvertFrom(string value) => value.AsMemory(); } -sealed class StringTextConverter : StringBasedTextConverter +sealed class StringTextConverter(Encoding encoding) : StringBasedTextConverter(encoding) { - public StringTextConverter(Encoding encoding) : base(encoding) { } protected override ReadOnlyMemory ConvertTo(string value) => value.AsMemory(); protected override string ConvertFrom(string value) => value; } -abstract class ArrayBasedTextConverter : PgStreamingConverter +abstract class ArrayBasedTextConverter(Encoding encoding) : PgStreamingConverter { - readonly Encoding _encoding; - protected ArrayBasedTextConverter(Encoding encoding) => _encoding = encoding; - public override T Read(PgReader reader) - => Read(async: false, reader, _encoding).GetAwaiter().GetResult(); + => Read(async: false, reader, encoding).GetAwaiter().GetResult(); public override ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) - => Read(async: true, reader, _encoding); + => Read(async: true, reader, encoding); public override Size GetSize(SizeContext context, T value, ref object? writeState) - => TextConverter.GetSize(ref context, ConvertTo(value), _encoding); + => TextConverter.GetSize(ref context, ConvertTo(value), encoding); public override void Write(PgWriter writer, T value) - => writer.WriteChars(ConvertTo(value).AsSpan(), _encoding); + => writer.WriteChars(ConvertTo(value).AsSpan(), encoding); public override ValueTask WriteAsync(PgWriter writer, T value, CancellationToken cancellationToken = default) - => writer.WriteCharsAsync(ConvertTo(value), _encoding, cancellationToken); + => writer.WriteCharsAsync(ConvertTo(value), encoding, cancellationToken); public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) { @@ -110,16 +102,14 @@ static ArraySegment GetSegment(ReadOnlySequence bytes, Encoding enco } } -sealed class CharArraySegmentTextConverter : ArrayBasedTextConverter> +sealed class CharArraySegmentTextConverter(Encoding encoding) : ArrayBasedTextConverter>(encoding) { - public CharArraySegmentTextConverter(Encoding encoding) : base(encoding) { } protected override ArraySegment ConvertTo(ArraySegment value) => value; protected override ArraySegment ConvertFrom(ArraySegment value) => value; } -sealed class CharArrayTextConverter : ArrayBasedTextConverter +sealed class CharArrayTextConverter(Encoding encoding) : ArrayBasedTextConverter(encoding) { - public CharArrayTextConverter(Encoding encoding) : base(encoding) { } protected override ArraySegment ConvertTo(char[] value) => new(value, 0, value.Length); protected override char[] ConvertFrom(ArraySegment value) { @@ -132,16 +122,9 @@ protected override char[] ConvertFrom(ArraySegment value) } } -sealed class CharTextConverter : PgBufferedConverter +sealed class CharTextConverter(Encoding encoding) : PgBufferedConverter { - readonly Encoding _encoding; - readonly Size _oneCharMaxByteCount; - - public CharTextConverter(Encoding encoding) - { - _encoding = encoding; - _oneCharMaxByteCount = Size.CreateUpperBound(encoding.GetMaxByteCount(1)); - } + readonly Size _oneCharMaxByteCount = Size.CreateUpperBound(encoding.GetMaxByteCount(1)); public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) { @@ -155,33 +138,30 @@ protected override char ReadCore(PgReader reader) Debug.Assert(byteSeq.IsSingleSegment); var bytes = byteSeq.FirstSpan; - var chars = _encoding.GetCharCount(bytes); + var chars = encoding.GetCharCount(bytes); if (chars < 1) throw new NpgsqlException("Could not read char - string was empty"); Span destination = stackalloc char[chars]; - _encoding.GetChars(bytes, destination); + encoding.GetChars(bytes, destination); return destination[0]; } public override Size GetSize(SizeContext context, char value, ref object? writeState) { Span spanValue = [value]; - return _encoding.GetByteCount(spanValue); + return encoding.GetByteCount(spanValue); } protected override void WriteCore(PgWriter writer, char value) { Span spanValue = [value]; - writer.WriteChars(spanValue, _encoding); + writer.WriteChars(spanValue, encoding); } } -sealed class TextReaderTextConverter : PgStreamingConverter +sealed class TextReaderTextConverter(Encoding encoding) : PgStreamingConverter { - readonly Encoding _encoding; - public TextReaderTextConverter(Encoding encoding) => _encoding = encoding; - public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) { bufferRequirements = BufferRequirements.None; @@ -189,10 +169,10 @@ public override bool CanConvert(DataFormat format, out BufferRequirements buffer } public override TextReader Read(PgReader reader) - => reader.GetTextReader(_encoding); + => reader.GetTextReader(encoding); public override ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) - => reader.GetTextReaderAsync(_encoding, cancellationToken); + => reader.GetTextReaderAsync(encoding, cancellationToken); public override Size GetSize(SizeContext context, TextReader value, ref object? writeState) => throw new NotImplementedException(); public override void Write(PgWriter writer, TextReader value) => throw new NotImplementedException(); @@ -200,17 +180,13 @@ public override ValueTask ReadAsync(PgReader reader, CancellationTok } -readonly struct GetChars +readonly struct GetChars(int read) { - public int Read { get; } - public GetChars(int read) => Read = read; + public int Read { get; } = read; } -sealed class GetCharsTextConverter : PgStreamingConverter +sealed class GetCharsTextConverter(Encoding encoding) : PgStreamingConverter { - readonly Encoding _encoding; - public GetCharsTextConverter(Encoding encoding) => _encoding = encoding; - public override GetChars Read(PgReader reader) => reader.CharsReadActive ? ResumableRead(reader) @@ -225,7 +201,7 @@ public override ValueTask ReadAsync(PgReader reader, CancellationToken GetChars ResumableRead(PgReader reader) { - reader.GetCharsReadInfo(_encoding, out var charsRead, out var textReader, out var charsOffset, out var buffer); + reader.GetCharsReadInfo(encoding, out var charsRead, out var textReader, out var charsOffset, out var buffer); // With variable length encodings, moving backwards based on bytes means we have to start over. if (charsRead > charsOffset) diff --git a/src/Npgsql/Internal/Converters/RecordConverter.cs b/src/Npgsql/Internal/Converters/RecordConverter.cs index 89e904099b..a0666abdeb 100644 --- a/src/Npgsql/Internal/Converters/RecordConverter.cs +++ b/src/Npgsql/Internal/Converters/RecordConverter.cs @@ -5,17 +5,8 @@ namespace Npgsql.Internal.Converters; -sealed class RecordConverter : PgStreamingConverter +sealed class RecordConverter(PgSerializerOptions options, Func? factory = null) : PgStreamingConverter { - readonly PgSerializerOptions _options; - readonly Func? _factory; - - public RecordConverter(PgSerializerOptions options, Func? factory = null) - { - _options = options; - _factory = factory; - } - public override T Read(PgReader reader) => Read(async: false, reader, CancellationToken.None).GetAwaiter().GetResult(); @@ -41,11 +32,11 @@ async ValueTask Read(bool async, PgReader reader, CancellationToken cancellat continue; var postgresType = - _options.DatabaseInfo.GetPostgresType(typeOid).GetRepresentationalType() + options.DatabaseInfo.GetPostgresType(typeOid).GetRepresentationalType() ?? throw new NotSupportedException($"Reading isn't supported for record field {i} (unknown type OID {typeOid}"); - var pgTypeId = _options.ToCanonicalTypeId(postgresType); - var typeInfo = _options.GetObjectOrDefaultTypeInfoInternal(pgTypeId) + var pgTypeId = options.ToCanonicalTypeId(postgresType); + var typeInfo = options.GetObjectOrDefaultTypeInfoInternal(pgTypeId) ?? throw new NotSupportedException( $"Reading isn't supported for record field {i} (PG type '{postgresType.DisplayName}'"); @@ -64,7 +55,7 @@ async ValueTask Read(bool async, PgReader reader, CancellationToken cancellat } } - return _factory is null ? (T)(object)result : _factory(result); + return factory is null ? (T)(object)result : factory(result); } public override Size GetSize(SizeContext context, T value, ref object? writeState) diff --git a/src/Npgsql/Internal/Converters/Temporal/DateConverters.cs b/src/Npgsql/Internal/Converters/Temporal/DateConverters.cs index 79aabf1d58..807e2528d2 100644 --- a/src/Npgsql/Internal/Converters/Temporal/DateConverters.cs +++ b/src/Npgsql/Internal/Converters/Temporal/DateConverters.cs @@ -4,15 +4,10 @@ // ReSharper disable once CheckNamespace namespace Npgsql.Internal.Converters; -sealed class DateTimeDateConverter : PgBufferedConverter +sealed class DateTimeDateConverter(bool dateTimeInfinityConversions) : PgBufferedConverter { - readonly bool _dateTimeInfinityConversions; - static readonly DateTime BaseValue = new(2000, 1, 1, 0, 0, 0); - public DateTimeDateConverter(bool dateTimeInfinityConversions) - => _dateTimeInfinityConversions = dateTimeInfinityConversions; - public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) { bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(int)); @@ -22,10 +17,10 @@ public override bool CanConvert(DataFormat format, out BufferRequirements buffer protected override DateTime ReadCore(PgReader reader) => reader.ReadInt32() switch { - int.MaxValue => _dateTimeInfinityConversions + int.MaxValue => dateTimeInfinityConversions ? DateTime.MaxValue : throw new InvalidCastException(NpgsqlStrings.CannotReadInfinityValue), - int.MinValue => _dateTimeInfinityConversions + int.MinValue => dateTimeInfinityConversions ? DateTime.MinValue : throw new InvalidCastException(NpgsqlStrings.CannotReadInfinityValue), var value => BaseValue + TimeSpan.FromDays(value) @@ -33,7 +28,7 @@ protected override DateTime ReadCore(PgReader reader) protected override void WriteCore(PgWriter writer, DateTime value) { - if (_dateTimeInfinityConversions) + if (dateTimeInfinityConversions) { if (value == DateTime.MaxValue) { @@ -52,15 +47,10 @@ protected override void WriteCore(PgWriter writer, DateTime value) } } -sealed class DateOnlyDateConverter : PgBufferedConverter +sealed class DateOnlyDateConverter(bool dateTimeInfinityConversions) : PgBufferedConverter { - readonly bool _dateTimeInfinityConversions; - static readonly DateOnly BaseValue = new(2000, 1, 1); - public DateOnlyDateConverter(bool dateTimeInfinityConversions) - => _dateTimeInfinityConversions = dateTimeInfinityConversions; - public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) { bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(int)); @@ -70,10 +60,10 @@ public override bool CanConvert(DataFormat format, out BufferRequirements buffer protected override DateOnly ReadCore(PgReader reader) => reader.ReadInt32() switch { - int.MaxValue => _dateTimeInfinityConversions + int.MaxValue => dateTimeInfinityConversions ? DateOnly.MaxValue : throw new InvalidCastException(NpgsqlStrings.CannotReadInfinityValue), - int.MinValue => _dateTimeInfinityConversions + int.MinValue => dateTimeInfinityConversions ? DateOnly.MinValue : throw new InvalidCastException(NpgsqlStrings.CannotReadInfinityValue), var value => BaseValue.AddDays(value) @@ -81,7 +71,7 @@ protected override DateOnly ReadCore(PgReader reader) protected override void WriteCore(PgWriter writer, DateOnly value) { - if (_dateTimeInfinityConversions) + if (dateTimeInfinityConversions) { if (value == DateOnly.MaxValue) { diff --git a/src/Npgsql/Internal/Converters/Temporal/DateTimeConverters.cs b/src/Npgsql/Internal/Converters/Temporal/DateTimeConverters.cs index ed744bb099..389c2ec021 100644 --- a/src/Npgsql/Internal/Converters/Temporal/DateTimeConverters.cs +++ b/src/Npgsql/Internal/Converters/Temporal/DateTimeConverters.cs @@ -3,17 +3,8 @@ // ReSharper disable once CheckNamespace namespace Npgsql.Internal.Converters; -sealed class DateTimeConverter : PgBufferedConverter +sealed class DateTimeConverter(bool dateTimeInfinityConversions, DateTimeKind kind) : PgBufferedConverter { - readonly bool _dateTimeInfinityConversions; - readonly DateTimeKind _kind; - - public DateTimeConverter(bool dateTimeInfinityConversions, DateTimeKind kind) - { - _dateTimeInfinityConversions = dateTimeInfinityConversions; - _kind = kind; - } - public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) { bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(long)); @@ -21,18 +12,14 @@ public override bool CanConvert(DataFormat format, out BufferRequirements buffer } protected override DateTime ReadCore(PgReader reader) - => PgTimestamp.Decode(reader.ReadInt64(), _kind, _dateTimeInfinityConversions); + => PgTimestamp.Decode(reader.ReadInt64(), kind, dateTimeInfinityConversions); protected override void WriteCore(PgWriter writer, DateTime value) - => writer.WriteInt64(PgTimestamp.Encode(value, _dateTimeInfinityConversions)); + => writer.WriteInt64(PgTimestamp.Encode(value, dateTimeInfinityConversions)); } -sealed class DateTimeOffsetConverter : PgBufferedConverter +sealed class DateTimeOffsetConverter(bool dateTimeInfinityConversions) : PgBufferedConverter { - readonly bool _dateTimeInfinityConversions; - public DateTimeOffsetConverter(bool dateTimeInfinityConversions) - => _dateTimeInfinityConversions = dateTimeInfinityConversions; - public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) { bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(long)); @@ -40,14 +27,14 @@ public override bool CanConvert(DataFormat format, out BufferRequirements buffer } protected override DateTimeOffset ReadCore(PgReader reader) - => new(PgTimestamp.Decode(reader.ReadInt64(), DateTimeKind.Utc, _dateTimeInfinityConversions), TimeSpan.Zero); + => new(PgTimestamp.Decode(reader.ReadInt64(), DateTimeKind.Utc, dateTimeInfinityConversions), TimeSpan.Zero); protected override void WriteCore(PgWriter writer, DateTimeOffset value) { if (value.Offset != TimeSpan.Zero) throw new ArgumentException($"Cannot write DateTimeOffset with Offset={value.Offset} to PostgreSQL type 'timestamp with time zone', only offset 0 (UTC) is supported. ", nameof(value)); - writer.WriteInt64(PgTimestamp.Encode(value.DateTime, _dateTimeInfinityConversions)); + writer.WriteInt64(PgTimestamp.Encode(value.DateTime, dateTimeInfinityConversions)); } } diff --git a/src/Npgsql/Internal/Converters/Temporal/LegacyDateTimeConverter.cs b/src/Npgsql/Internal/Converters/Temporal/LegacyDateTimeConverter.cs index 99ad4ed599..8bcca02db1 100644 --- a/src/Npgsql/Internal/Converters/Temporal/LegacyDateTimeConverter.cs +++ b/src/Npgsql/Internal/Converters/Temporal/LegacyDateTimeConverter.cs @@ -2,17 +2,8 @@ namespace Npgsql.Internal.Converters; -sealed class LegacyDateTimeConverter : PgBufferedConverter +sealed class LegacyDateTimeConverter(bool dateTimeInfinityConversions, bool timestamp) : PgBufferedConverter { - readonly bool _dateTimeInfinityConversions; - readonly bool _timestamp; - - public LegacyDateTimeConverter(bool dateTimeInfinityConversions, bool timestamp) - { - _dateTimeInfinityConversions = dateTimeInfinityConversions; - _timestamp = timestamp; - } - public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) { bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(long)); @@ -21,33 +12,28 @@ public override bool CanConvert(DataFormat format, out BufferRequirements buffer protected override DateTime ReadCore(PgReader reader) { - if (_timestamp) + if (timestamp) { - return PgTimestamp.Decode(reader.ReadInt64(), DateTimeKind.Unspecified, _dateTimeInfinityConversions); + return PgTimestamp.Decode(reader.ReadInt64(), DateTimeKind.Unspecified, dateTimeInfinityConversions); } - var dateTime = PgTimestamp.Decode(reader.ReadInt64(), DateTimeKind.Utc, _dateTimeInfinityConversions); - return (dateTime == DateTime.MinValue || dateTime == DateTime.MaxValue) && _dateTimeInfinityConversions + var dateTime = PgTimestamp.Decode(reader.ReadInt64(), DateTimeKind.Utc, dateTimeInfinityConversions); + return (dateTime == DateTime.MinValue || dateTime == DateTime.MaxValue) && dateTimeInfinityConversions ? dateTime : dateTime.ToLocalTime(); } protected override void WriteCore(PgWriter writer, DateTime value) { - if (!_timestamp && value.Kind is DateTimeKind.Local) + if (!timestamp && value.Kind is DateTimeKind.Local) value = value.ToUniversalTime(); - writer.WriteInt64(PgTimestamp.Encode(value, _dateTimeInfinityConversions)); + writer.WriteInt64(PgTimestamp.Encode(value, dateTimeInfinityConversions)); } } -sealed class LegacyDateTimeOffsetConverter : PgBufferedConverter +sealed class LegacyDateTimeOffsetConverter(bool dateTimeInfinityConversions) : PgBufferedConverter { - readonly bool _dateTimeInfinityConversions; - - public LegacyDateTimeOffsetConverter(bool dateTimeInfinityConversions) - => _dateTimeInfinityConversions = dateTimeInfinityConversions; - public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) { bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(long)); @@ -56,9 +42,9 @@ public override bool CanConvert(DataFormat format, out BufferRequirements buffer protected override DateTimeOffset ReadCore(PgReader reader) { - var dateTime = PgTimestamp.Decode(reader.ReadInt64(), DateTimeKind.Utc, _dateTimeInfinityConversions); + var dateTime = PgTimestamp.Decode(reader.ReadInt64(), DateTimeKind.Utc, dateTimeInfinityConversions); - if (_dateTimeInfinityConversions) + if (dateTimeInfinityConversions) { if (dateTime == DateTime.MinValue) return DateTimeOffset.MinValue; @@ -70,5 +56,5 @@ protected override DateTimeOffset ReadCore(PgReader reader) } protected override void WriteCore(PgWriter writer, DateTimeOffset value) - => writer.WriteInt64(PgTimestamp.Encode(value.UtcDateTime, _dateTimeInfinityConversions)); + => writer.WriteInt64(PgTimestamp.Encode(value.UtcDateTime, dateTimeInfinityConversions)); } diff --git a/src/Npgsql/Internal/Converters/VersionPrefixedTextConverter.cs b/src/Npgsql/Internal/Converters/VersionPrefixedTextConverter.cs index ccb4f2041e..8dc981a47e 100644 --- a/src/Npgsql/Internal/Converters/VersionPrefixedTextConverter.cs +++ b/src/Npgsql/Internal/Converters/VersionPrefixedTextConverter.cs @@ -5,23 +5,15 @@ namespace Npgsql.Internal.Converters; -sealed class VersionPrefixedTextConverter : PgStreamingConverter +sealed class VersionPrefixedTextConverter(byte versionPrefix, PgConverter textConverter) + : PgStreamingConverter(textConverter.DbNullPredicateKind is DbNullPredicate.Custom) { - readonly byte _versionPrefix; - readonly PgConverter _textConverter; BufferRequirements _innerRequirements; - public VersionPrefixedTextConverter(byte versionPrefix, PgConverter textConverter) - : base(textConverter.DbNullPredicateKind is DbNullPredicate.Custom) - { - _versionPrefix = versionPrefix; - _textConverter = textConverter; - } - - protected override bool IsDbNullValue(T? value, ref object? writeState) => _textConverter.IsDbNull(value, ref writeState); + protected override bool IsDbNullValue(T? value, ref object? writeState) => textConverter.IsDbNull(value, ref writeState); public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) - => VersionPrefixedTextConverter.CanConvert(_textConverter, format, out _innerRequirements, out bufferRequirements); + => VersionPrefixedTextConverter.CanConvert(textConverter, format, out _innerRequirements, out bufferRequirements); public override T Read(PgReader reader) => Read(async: false, reader, CancellationToken.None).Result; @@ -30,7 +22,7 @@ public override ValueTask ReadAsync(PgReader reader, CancellationToken cancel => Read(async: true, reader, cancellationToken); public override Size GetSize(SizeContext context, [DisallowNull]T value, ref object? writeState) - => _textConverter.GetSize(context, value, ref writeState).Combine(context.Format is DataFormat.Binary ? sizeof(byte) : 0); + => textConverter.GetSize(context, value, ref writeState).Combine(context.Format is DataFormat.Binary ? sizeof(byte) : 0); public override void Write(PgWriter writer, [DisallowNull]T value) => Write(async: false, writer, value, CancellationToken.None).GetAwaiter().GetResult(); @@ -40,17 +32,17 @@ public override ValueTask WriteAsync(PgWriter writer, [DisallowNull]T value, Can async ValueTask Read(bool async, PgReader reader, CancellationToken cancellationToken) { - await VersionPrefixedTextConverter.ReadVersion(async, _versionPrefix, reader, _innerRequirements.Read, cancellationToken).ConfigureAwait(false); - return async ? await _textConverter.ReadAsync(reader, cancellationToken).ConfigureAwait(false) : _textConverter.Read(reader); + await VersionPrefixedTextConverter.ReadVersion(async, versionPrefix, reader, _innerRequirements.Read, cancellationToken).ConfigureAwait(false); + return async ? await textConverter.ReadAsync(reader, cancellationToken).ConfigureAwait(false) : textConverter.Read(reader); } async ValueTask Write(bool async, PgWriter writer, [DisallowNull]T value, CancellationToken cancellationToken) { - await VersionPrefixedTextConverter.WriteVersion(async, _versionPrefix, writer, cancellationToken).ConfigureAwait(false); + await VersionPrefixedTextConverter.WriteVersion(async, versionPrefix, writer, cancellationToken).ConfigureAwait(false); if (async) - await _textConverter.WriteAsync(writer, value, cancellationToken).ConfigureAwait(false); + await textConverter.WriteAsync(writer, value, cancellationToken).ConfigureAwait(false); else - _textConverter.Write(writer, value); + textConverter.Write(writer, value); } } diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index cffe1f1755..7a60648c71 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -1959,29 +1959,21 @@ internal NestedCancellableScope StartNestedCancellableOperation( return new(this, registration, currentUserCancellationToken, currentAttemptPostgresCancellation); } - internal readonly struct NestedCancellableScope : IDisposable + internal readonly struct NestedCancellableScope( + NpgsqlConnector connector, + CancellationTokenRegistration registration, + CancellationToken previousCancellationToken, + bool previousAttemptPostgresCancellation) + : IDisposable { - readonly NpgsqlConnector _connector; - readonly CancellationTokenRegistration _registration; - readonly CancellationToken _previousCancellationToken; - readonly bool _previousAttemptPostgresCancellation; - - public NestedCancellableScope(NpgsqlConnector connector, CancellationTokenRegistration registration, CancellationToken previousCancellationToken, bool previousAttemptPostgresCancellation) - { - _connector = connector; - _registration = registration; - _previousCancellationToken = previousCancellationToken; - _previousAttemptPostgresCancellation = previousAttemptPostgresCancellation; - } - public void Dispose() { - if (_connector is null) + if (connector is null) return; - _connector.UserCancellationToken = _previousCancellationToken; - _connector.AttemptPostgresCancellation = _previousAttemptPostgresCancellation; - _registration.Dispose(); + connector.UserCancellationToken = previousCancellationToken; + connector.AttemptPostgresCancellation = previousAttemptPostgresCancellation; + registration.Dispose(); } } diff --git a/src/Npgsql/Internal/PgBufferedConverter.cs b/src/Npgsql/Internal/PgBufferedConverter.cs index 9fd4644c56..beced6d589 100644 --- a/src/Npgsql/Internal/PgBufferedConverter.cs +++ b/src/Npgsql/Internal/PgBufferedConverter.cs @@ -6,10 +6,8 @@ namespace Npgsql.Internal; [Experimental(NpgsqlDiagnostics.ConvertersExperimental)] -public abstract class PgBufferedConverter : PgConverter +public abstract class PgBufferedConverter(bool customDbNullPredicate = false) : PgConverter(customDbNullPredicate) { - protected PgBufferedConverter(bool customDbNullPredicate = false) : base(customDbNullPredicate) { } - protected abstract T ReadCore(PgReader reader); protected abstract void WriteCore(PgWriter writer, T value); diff --git a/src/Npgsql/Internal/PgConverter.cs b/src/Npgsql/Internal/PgConverter.cs index 323c572e0a..627c4dc979 100644 --- a/src/Npgsql/Internal/PgConverter.cs +++ b/src/Npgsql/Internal/PgConverter.cs @@ -187,17 +187,11 @@ internal static PgConverter UnsafeDowncast(this PgConverter converter) } } -public readonly struct SizeContext +[method: SetsRequiredMembers] +public readonly struct SizeContext(DataFormat format, Size bufferRequirement) { - [SetsRequiredMembers] - public SizeContext(DataFormat format, Size bufferRequirement) - { - Format = format; - BufferRequirement = bufferRequirement; - } - - public required Size BufferRequirement { get; init; } - public DataFormat Format { get; } + public required Size BufferRequirement { get; init; } = bufferRequirement; + public DataFormat Format { get; } = format; } class MultiWriteState : IDisposable diff --git a/src/Npgsql/Internal/PgStreamingConverter.cs b/src/Npgsql/Internal/PgStreamingConverter.cs index c27d7a320e..951e940fd8 100644 --- a/src/Npgsql/Internal/PgStreamingConverter.cs +++ b/src/Npgsql/Internal/PgStreamingConverter.cs @@ -8,10 +8,8 @@ namespace Npgsql.Internal; [Experimental(NpgsqlDiagnostics.ConvertersExperimental)] -public abstract class PgStreamingConverter : PgConverter +public abstract class PgStreamingConverter(bool customDbNullPredicate = false) : PgConverter(customDbNullPredicate) { - protected PgStreamingConverter(bool customDbNullPredicate = false) : base(customDbNullPredicate) { } - public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) { bufferRequirements = BufferRequirements.None; diff --git a/src/Npgsql/Internal/PgTypeInfo.cs b/src/Npgsql/Internal/PgTypeInfo.cs index d83c5dfa36..836cd941b8 100644 --- a/src/Npgsql/Internal/PgTypeInfo.cs +++ b/src/Npgsql/Internal/PgTypeInfo.cs @@ -242,17 +242,17 @@ DataFormat ResolveFormat(PgConverter converter, out BufferRequirements bufferReq } } -public sealed class PgResolverTypeInfo : PgTypeInfo +public sealed class PgResolverTypeInfo( + PgSerializerOptions options, + PgConverterResolver converterResolver, + PgTypeId? pgTypeId, + Type? unboxedType = null) + : PgTypeInfo(options, + converterResolver.TypeToConvert, + pgTypeId is { } typeId ? ResolveDefaultId(options, converterResolver, typeId) : null, + unboxedType ?? (converterResolver.TypeToConvert == typeof(object) ? typeof(object) : null)) { - readonly PgConverterResolver _converterResolver; - - public PgResolverTypeInfo(PgSerializerOptions options, PgConverterResolver converterResolver, PgTypeId? pgTypeId, Type? unboxedType = null) - : base(options, - converterResolver.TypeToConvert, - pgTypeId is { } typeId ? ResolveDefaultId(options, converterResolver, typeId) : null, - // We always mark resolvers with type object as boxing, as they may freely return converters for any type (see PgConverterResolver.Validate). - unboxedType ?? (converterResolver.TypeToConvert == typeof(object) ? typeof(object) : null)) - => _converterResolver = converterResolver; + // We always mark resolvers with type object as boxing, as they may freely return converters for any type (see PgConverterResolver.Validate). // We'll always validate the default resolution, the info will be re-used so there is no real downside. static PgConverterResolution ResolveDefaultId(PgSerializerOptions options, PgConverterResolver converterResolver, PgTypeId typeId) @@ -260,7 +260,7 @@ static PgConverterResolution ResolveDefaultId(PgSerializerOptions options, PgCon public PgConverterResolution? GetResolution(T? value, PgTypeId? expectedPgTypeId) { - return _converterResolver is PgConverterResolver resolverT + return converterResolver is PgConverterResolver resolverT ? resolverT.GetInternal(this, value, expectedPgTypeId ?? PgTypeId) : ThrowNotSupportedType(typeof(T)); @@ -271,27 +271,21 @@ PgConverterResolution ThrowNotSupportedType(Type? type) } public PgConverterResolution? GetResolutionAsObject(object? value, PgTypeId? expectedPgTypeId) - => _converterResolver.GetAsObjectInternal(this, value, expectedPgTypeId ?? PgTypeId); + => converterResolver.GetAsObjectInternal(this, value, expectedPgTypeId ?? PgTypeId); public PgConverterResolution GetResolution(Field field) - => _converterResolver.GetInternal(this, field); + => converterResolver.GetInternal(this, field); public PgConverterResolution GetDefaultResolution(PgTypeId? expectedPgTypeId) - => _converterResolver.GetDefaultInternal(ValidateResolution, Options.PortableTypeIds, expectedPgTypeId ?? PgTypeId); + => converterResolver.GetDefaultInternal(ValidateResolution, Options.PortableTypeIds, expectedPgTypeId ?? PgTypeId); - public PgConverterResolver GetConverterResolver() => _converterResolver; + public PgConverterResolver GetConverterResolver() => converterResolver; } -public readonly struct PgConverterResolution +public readonly struct PgConverterResolution(PgConverter converter, PgTypeId pgTypeId) { - public PgConverterResolution(PgConverter converter, PgTypeId pgTypeId) - { - Converter = converter; - PgTypeId = pgTypeId; - } - - public PgConverter Converter { get; } - public PgTypeId PgTypeId { get; } + public PgConverter Converter { get; } = converter; + public PgTypeId PgTypeId { get; } = pgTypeId; public PgConverter GetConverter() => (PgConverter)Converter; } diff --git a/src/Npgsql/Internal/PgWriter.cs b/src/Npgsql/Internal/PgWriter.cs index 6fe6ed0e4c..8dd0a9ba9f 100644 --- a/src/Npgsql/Internal/PgWriter.cs +++ b/src/Npgsql/Internal/PgWriter.cs @@ -27,40 +27,38 @@ interface IStreamingWriter: IBufferWriter ValueTask FlushAsync(CancellationToken cancellationToken = default); } -sealed class NpgsqlBufferWriter : IStreamingWriter +sealed class NpgsqlBufferWriter(NpgsqlWriteBuffer buffer) : IStreamingWriter { - readonly NpgsqlWriteBuffer _buffer; int? _lastBufferSize; - public NpgsqlBufferWriter(NpgsqlWriteBuffer buffer) => _buffer = buffer; public void Advance(int count) { - if (_lastBufferSize < count || _buffer.WriteSpaceLeft < count) + if (_lastBufferSize < count || buffer.WriteSpaceLeft < count) ThrowHelper.ThrowInvalidOperationException("Cannot advance past the end of the current buffer."); _lastBufferSize = null; - _buffer.WritePosition += count; + buffer.WritePosition += count; } public Memory GetMemory(int sizeHint = 0) { - var writePosition = _buffer.WritePosition; - var bufferSize = _buffer.Size - writePosition; + var writePosition = buffer.WritePosition; + var bufferSize = buffer.Size - writePosition; if (sizeHint > bufferSize) ThrowOutOfMemoryException(); _lastBufferSize = bufferSize; - return _buffer.Buffer.AsMemory(writePosition, bufferSize); + return buffer.Buffer.AsMemory(writePosition, bufferSize); } public Span GetSpan(int sizeHint = 0) { - var writePosition = _buffer.WritePosition; - var bufferSize = _buffer.Size - writePosition; + var writePosition = buffer.WritePosition; + var bufferSize = buffer.Size - writePosition; if (sizeHint > bufferSize) ThrowOutOfMemoryException(); _lastBufferSize = bufferSize; - return _buffer.Buffer.AsSpan(writePosition, bufferSize); + return buffer.Buffer.AsSpan(writePosition, bufferSize); } static void ThrowOutOfMemoryException() => throw new OutOfMemoryException("Not enough space left in buffer."); @@ -68,7 +66,7 @@ public Span GetSpan(int sizeHint = 0) public void Flush(TimeSpan timeout = default) { if (timeout == TimeSpan.Zero) - _buffer.Flush(); + buffer.Flush(); else { TimeSpan? originalTimeout = null; @@ -76,21 +74,21 @@ public void Flush(TimeSpan timeout = default) { if (timeout != TimeSpan.Zero) { - originalTimeout = _buffer.Timeout; - _buffer.Timeout = timeout; + originalTimeout = buffer.Timeout; + buffer.Timeout = timeout; } - _buffer.Flush(); + buffer.Flush(); } finally { if (originalTimeout is { } value) - _buffer.Timeout = value; + buffer.Timeout = value; } } } public ValueTask FlushAsync(CancellationToken cancellationToken = default) - => new(_buffer.Flush(async: true, cancellationToken)); + => new(buffer.Flush(async: true, cancellationToken)); } [Experimental(NpgsqlDiagnostics.ConvertersExperimental)] diff --git a/src/Npgsql/Internal/Postgres/Field.cs b/src/Npgsql/Internal/Postgres/Field.cs index cb2879f998..abd74a0bc7 100644 --- a/src/Npgsql/Internal/Postgres/Field.cs +++ b/src/Npgsql/Internal/Postgres/Field.cs @@ -4,16 +4,9 @@ namespace Npgsql.Internal.Postgres; /// Base field type shared between tables and composites. [Experimental(NpgsqlDiagnostics.ConvertersExperimental)] -public readonly struct Field +public readonly struct Field(string name, PgTypeId pgTypeId, int typeModifier) { - public Field(string name, PgTypeId pgTypeId, int typeModifier) - { - Name = name; - PgTypeId = pgTypeId; - TypeModifier = typeModifier; - } - - public string Name { get; init; } - public PgTypeId PgTypeId { get; init; } - public int TypeModifier { get; init; } + public string Name { get; init; } = name; + public PgTypeId PgTypeId { get; init; } = pgTypeId; + public int TypeModifier { get; init; } = typeModifier; } diff --git a/src/Npgsql/Internal/Postgres/Oid.cs b/src/Npgsql/Internal/Postgres/Oid.cs index 55ede288fe..8c01e65ff7 100644 --- a/src/Npgsql/Internal/Postgres/Oid.cs +++ b/src/Npgsql/Internal/Postgres/Oid.cs @@ -4,13 +4,11 @@ namespace Npgsql.Internal.Postgres; [Experimental(NpgsqlDiagnostics.ConvertersExperimental)] -public readonly struct Oid: IEquatable +public readonly struct Oid(uint value) : IEquatable { - public Oid(uint value) => Value = value; - public static explicit operator uint(Oid oid) => oid.Value; public static implicit operator Oid(uint oid) => new(oid); - public uint Value { get; init; } + public uint Value { get; init; } = value; public static Oid Unspecified => new(0); public override string ToString() => Value.ToString(); diff --git a/src/Npgsql/Internal/ResolverFactories/JsonDynamicTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/JsonDynamicTypeInfoResolverFactory.cs index c164e40a22..2aa24ec888 100644 --- a/src/Npgsql/Internal/ResolverFactories/JsonDynamicTypeInfoResolverFactory.cs +++ b/src/Npgsql/Internal/ResolverFactories/JsonDynamicTypeInfoResolverFactory.cs @@ -12,21 +12,14 @@ namespace Npgsql.Internal.ResolverFactories; [RequiresUnreferencedCode("Json serializer may perform reflection on trimmed types.")] [RequiresDynamicCode("Serializing arbitrary types to json can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] -sealed class JsonDynamicTypeInfoResolverFactory : PgTypeInfoResolverFactory +sealed class JsonDynamicTypeInfoResolverFactory( + Type[]? jsonbClrTypes = null, + Type[]? jsonClrTypes = null, + JsonSerializerOptions? serializerOptions = null) + : PgTypeInfoResolverFactory { - readonly Type[]? _jsonbClrTypes; - readonly Type[]? _jsonClrTypes; - readonly JsonSerializerOptions? _serializerOptions; - - public JsonDynamicTypeInfoResolverFactory(Type[]? jsonbClrTypes = null, Type[]? jsonClrTypes = null, JsonSerializerOptions? serializerOptions = null) - { - _jsonbClrTypes = jsonbClrTypes; - _jsonClrTypes = jsonClrTypes; - _serializerOptions = serializerOptions; - } - - public override IPgTypeInfoResolver CreateResolver() => new Resolver(_jsonbClrTypes, _jsonClrTypes, _serializerOptions); - public override IPgTypeInfoResolver CreateArrayResolver() => new ArrayResolver(_jsonbClrTypes, _jsonClrTypes, _serializerOptions); + public override IPgTypeInfoResolver CreateResolver() => new Resolver(jsonbClrTypes, jsonClrTypes, serializerOptions); + public override IPgTypeInfoResolver CreateArrayResolver() => new ArrayResolver(jsonbClrTypes, jsonClrTypes, serializerOptions); // Split into a nested class to avoid erroneous trimming/AOT warnings because the JsonDynamicTypeInfoResolverFactory is marked as incompatible. internal static class Support @@ -45,9 +38,10 @@ public static void ThrowIfUnsupported(Type? type, DataTypeName? dataTy [RequiresUnreferencedCode("Json serializer may perform reflection on trimmed types.")] [RequiresDynamicCode("Serializing arbitrary types to json can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] - class Resolver : DynamicTypeInfoResolver, IPgTypeInfoResolver + class Resolver(Type[]? jsonbClrTypes = null, Type[]? jsonClrTypes = null, JsonSerializerOptions? serializerOptions = null) + : DynamicTypeInfoResolver, IPgTypeInfoResolver { - JsonSerializerOptions? _serializerOptions; + JsonSerializerOptions? _serializerOptions = serializerOptions; JsonSerializerOptions SerializerOptions #if NET7_0_OR_GREATER => _serializerOptions ??= JsonSerializerOptions.Default; @@ -55,19 +49,12 @@ JsonSerializerOptions SerializerOptions => _serializerOptions ??= new(); #endif - readonly Type[] _jsonbClrTypes; - readonly Type[] _jsonClrTypes; + readonly Type[] _jsonbClrTypes = jsonbClrTypes ?? []; + readonly Type[] _jsonClrTypes = jsonClrTypes ?? []; TypeInfoMappingCollection? _mappings; protected TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(), _jsonbClrTypes, _jsonClrTypes, SerializerOptions); - public Resolver(Type[]? jsonbClrTypes = null, Type[]? jsonClrTypes = null, JsonSerializerOptions? serializerOptions = null) - { - _jsonbClrTypes = jsonbClrTypes ?? []; - _jsonClrTypes = jsonClrTypes ?? []; - _serializerOptions = serializerOptions; - } - public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) => Mappings.Find(type, dataTypeName, options) ?? base.GetTypeInfo(type, dataTypeName, options); @@ -148,14 +135,12 @@ static PgConverter CreateSystemTextJsonConverter(Type valueType, bool jsonb, Enc [RequiresUnreferencedCode("Json serializer may perform reflection on trimmed types.")] [RequiresDynamicCode("Serializing arbitrary types to json can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] - sealed class ArrayResolver : Resolver, IPgTypeInfoResolver + sealed class ArrayResolver(Type[]? jsonbClrTypes = null, Type[]? jsonClrTypes = null, JsonSerializerOptions? serializerOptions = null) + : Resolver(jsonbClrTypes, jsonClrTypes, serializerOptions), IPgTypeInfoResolver { TypeInfoMappingCollection? _mappings; new TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(base.Mappings), base.Mappings); - public ArrayResolver(Type[]? jsonbClrTypes = null, Type[]? jsonClrTypes = null, JsonSerializerOptions? serializerOptions = null) - : base(jsonbClrTypes, jsonClrTypes, serializerOptions) { } - public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) => Mappings.Find(type, dataTypeName, options) ?? base.GetTypeInfo(type, dataTypeName, options); diff --git a/src/Npgsql/Internal/ResolverFactories/JsonTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/JsonTypeInfoResolverFactory.cs index a94d5d36f8..24903e3f54 100644 --- a/src/Npgsql/Internal/ResolverFactories/JsonTypeInfoResolverFactory.cs +++ b/src/Npgsql/Internal/ResolverFactories/JsonTypeInfoResolverFactory.cs @@ -6,14 +6,10 @@ namespace Npgsql.Internal.ResolverFactories; -sealed class JsonTypeInfoResolverFactory : PgTypeInfoResolverFactory +sealed class JsonTypeInfoResolverFactory(JsonSerializerOptions? serializerOptions = null) : PgTypeInfoResolverFactory { - readonly JsonSerializerOptions? _serializerOptions; - - public JsonTypeInfoResolverFactory(JsonSerializerOptions? serializerOptions = null) => _serializerOptions = serializerOptions; - - public override IPgTypeInfoResolver CreateResolver() => new Resolver(_serializerOptions); - public override IPgTypeInfoResolver CreateArrayResolver() => new ArrayResolver(_serializerOptions); + public override IPgTypeInfoResolver CreateResolver() => new Resolver(serializerOptions); + public override IPgTypeInfoResolver CreateArrayResolver() => new ArrayResolver(serializerOptions); class Resolver : IPgTypeInfoResolver { @@ -73,16 +69,11 @@ sealed class BasicJsonTypeInfoResolver : IJsonTypeInfoResolver } } - sealed class ArrayResolver : Resolver, IPgTypeInfoResolver + sealed class ArrayResolver(JsonSerializerOptions? serializerOptions = null) : Resolver(serializerOptions), IPgTypeInfoResolver { TypeInfoMappingCollection? _mappings; new TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(base.Mappings)); - public ArrayResolver(JsonSerializerOptions? serializerOptions = null) - : base(serializerOptions) - { - } - public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) => Mappings.Find(type, dataTypeName, options); diff --git a/src/Npgsql/Internal/TypeInfoCache.cs b/src/Npgsql/Internal/TypeInfoCache.cs index ca646b5d29..7627997822 100644 --- a/src/Npgsql/Internal/TypeInfoCache.cs +++ b/src/Npgsql/Internal/TypeInfoCache.cs @@ -5,11 +5,9 @@ namespace Npgsql.Internal; -sealed class TypeInfoCache where TPgTypeId : struct +sealed class TypeInfoCache(PgSerializerOptions options, bool validatePgTypeIds = true) + where TPgTypeId : struct { - readonly PgSerializerOptions _options; - readonly bool _validatePgTypeIds; - // Mostly used for parameter writing, 8ns readonly ConcurrentDictionary _cacheByClrType = new(); @@ -23,12 +21,6 @@ static TypeInfoCache() throw new InvalidOperationException("Cannot use this type argument."); } - public TypeInfoCache(PgSerializerOptions options, bool validatePgTypeIds = true) - { - _options = options; - _validatePgTypeIds = validatePgTypeIds; - } - /// /// /// @@ -82,7 +74,7 @@ public TypeInfoCache(PgSerializerOptions options, bool validatePgTypeIds = true) PgTypeInfo? AddByType(Type type) { // We don't pass PgTypeId as we're interested in default converters here. - var info = CreateInfo(type, null, _options, defaultTypeFallback: false, _validatePgTypeIds); + var info = CreateInfo(type, null, options, defaultTypeFallback: false, validatePgTypeIds); return info is null ? null @@ -94,7 +86,7 @@ public TypeInfoCache(PgSerializerOptions options, bool validatePgTypeIds = true) PgTypeInfo? AddEntryById(Type? type, TPgTypeId pgTypeId, (Type? Type, PgTypeInfo? Info)[]? infos, bool defaultTypeFallback) { // We cache negatives (null info) to allow 'object or default' checks to never hit the resolvers after the first lookup. - var info = CreateInfo(type, pgTypeId, _options, defaultTypeFallback, _validatePgTypeIds); + var info = CreateInfo(type, pgTypeId, options, defaultTypeFallback, validatePgTypeIds); var isDefaultInfo = type is null && info is not null; if (infos is null) diff --git a/src/Npgsql/Internal/TypeInfoMapping.cs b/src/Npgsql/Internal/TypeInfoMapping.cs index 2ece0ae474..1d54101379 100644 --- a/src/Npgsql/Internal/TypeInfoMapping.cs +++ b/src/Npgsql/Internal/TypeInfoMapping.cs @@ -59,22 +59,15 @@ public static PgConverter CreatePolymorphicArrayConverter(Func? TypeMatchPredicate { get; init; } public bool TypeEquals(Type type) => TypeMatchPredicate?.Invoke(type) ?? Type == type; diff --git a/src/Npgsql/NpgsqlConnection.cs b/src/Npgsql/NpgsqlConnection.cs index b8bfbd8d01..e0315b5952 100644 --- a/src/Npgsql/NpgsqlConnection.cs +++ b/src/Npgsql/NpgsqlConnection.cs @@ -1967,11 +1967,9 @@ enum ConnectorBindingScope Temporary } -readonly struct EndScopeDisposable : IDisposable +readonly struct EndScopeDisposable(NpgsqlConnection connection) : IDisposable { - readonly NpgsqlConnection _connection; - public EndScopeDisposable(NpgsqlConnection connection) => _connection = connection; - public void Dispose() => _connection.EndBindingScope(ConnectorBindingScope.Temporary); + public void Dispose() => connection.EndBindingScope(ConnectorBindingScope.Temporary); } #region Delegates diff --git a/src/Npgsql/NpgsqlDataAdapter.cs b/src/Npgsql/NpgsqlDataAdapter.cs index c18773b2d6..f98f4cca61 100644 --- a/src/Npgsql/NpgsqlDataAdapter.cs +++ b/src/Npgsql/NpgsqlDataAdapter.cs @@ -213,18 +213,18 @@ async Task Fill(DataTable dataTable, NpgsqlDataReader dataReader, bool asyn #pragma warning disable 1591 -public class NpgsqlRowUpdatingEventArgs : RowUpdatingEventArgs -{ - public NpgsqlRowUpdatingEventArgs(DataRow dataRow, IDbCommand? command, System.Data.StatementType statementType, - DataTableMapping tableMapping) - : base(dataRow, command, statementType, tableMapping) {} -} - -public class NpgsqlRowUpdatedEventArgs : RowUpdatedEventArgs -{ - public NpgsqlRowUpdatedEventArgs(DataRow dataRow, IDbCommand? command, System.Data.StatementType statementType, - DataTableMapping tableMapping) - : base(dataRow, command, statementType, tableMapping) {} -} +public class NpgsqlRowUpdatingEventArgs( + DataRow dataRow, + IDbCommand? command, + System.Data.StatementType statementType, + DataTableMapping tableMapping) + : RowUpdatingEventArgs(dataRow, command, statementType, tableMapping); + +public class NpgsqlRowUpdatedEventArgs( + DataRow dataRow, + IDbCommand? command, + System.Data.StatementType statementType, + DataTableMapping tableMapping) + : RowUpdatedEventArgs(dataRow, command, statementType, tableMapping); #pragma warning restore 1591 diff --git a/src/Npgsql/NpgsqlDataSource.cs b/src/Npgsql/NpgsqlDataSource.cs index f349291ed6..acdaf5f56f 100644 --- a/src/Npgsql/NpgsqlDataSource.cs +++ b/src/Npgsql/NpgsqlDataSource.cs @@ -520,16 +520,13 @@ private protected void CheckDisposed() #endregion - sealed class DatabaseStateInfo + sealed class DatabaseStateInfo(DatabaseState state, NpgsqlTimeout timeout, DateTime timeStamp) { - internal readonly DatabaseState State; - internal readonly NpgsqlTimeout Timeout; + internal readonly DatabaseState State = state; + internal readonly NpgsqlTimeout Timeout = timeout; // While the TimeStamp is not strictly required, it does lower the risk of overwriting the current state with an old value - internal readonly DateTime TimeStamp; + internal readonly DateTime TimeStamp = timeStamp; public DatabaseStateInfo() : this(default, default, default) { } - - public DatabaseStateInfo(DatabaseState state, NpgsqlTimeout timeout, DateTime timeStamp) - => (State, Timeout, TimeStamp) = (state, timeout, timeStamp); } } diff --git a/src/Npgsql/NpgsqlNestedDataReader.cs b/src/Npgsql/NpgsqlNestedDataReader.cs index f35e635e50..b505fe04f0 100644 --- a/src/Npgsql/NpgsqlNestedDataReader.cs +++ b/src/Npgsql/NpgsqlNestedDataReader.cs @@ -34,27 +34,18 @@ public sealed class NpgsqlNestedDataReader : DbDataReader DataFormat Format => DataFormat.Binary; - readonly struct ColumnInfo + readonly struct ColumnInfo(PostgresType postgresType, int bufferPos, PgTypeInfo objectOrDefaultTypeInfo, DataFormat format) { - readonly DataFormat _format; - public PostgresType PostgresType { get; } - public int BufferPos { get; } + public PostgresType PostgresType { get; } = postgresType; + public int BufferPos { get; } = bufferPos; public PgConverterInfo LastConverterInfo { get; init; } - public PgTypeInfo ObjectOrDefaultTypeInfo { get; } - public PgConverterInfo GetObjectOrDefaultInfo() => ObjectOrDefaultTypeInfo.Bind(Field, _format); + public PgTypeInfo ObjectOrDefaultTypeInfo { get; } = objectOrDefaultTypeInfo; + public PgConverterInfo GetObjectOrDefaultInfo() => ObjectOrDefaultTypeInfo.Bind(Field, format); Field Field => new("?", ObjectOrDefaultTypeInfo.Options.PortableTypeIds ? PostgresType.DataTypeName : (Oid)PostgresType.OID, -1); - public PgConverterInfo Bind(PgTypeInfo typeInfo) => typeInfo.Bind(Field, _format); - - public ColumnInfo(PostgresType postgresType, int bufferPos, PgTypeInfo objectOrDefaultTypeInfo, DataFormat format) - { - _format = format; - PostgresType = postgresType; - BufferPos = bufferPos; - ObjectOrDefaultTypeInfo = objectOrDefaultTypeInfo; - } + public PgConverterInfo Bind(PgTypeInfo typeInfo) => typeInfo.Bind(Field, format); } PgReader PgReader => _outermostReader.Buffer.PgReader; diff --git a/src/Npgsql/NpgsqlTypes/NpgsqlTsQuery.cs b/src/Npgsql/NpgsqlTypes/NpgsqlTsQuery.cs index 616ec83dcf..cbfc201e85 100644 --- a/src/Npgsql/NpgsqlTypes/NpgsqlTsQuery.cs +++ b/src/Npgsql/NpgsqlTypes/NpgsqlTsQuery.cs @@ -380,16 +380,10 @@ public override bool Equals(object? obj) => left is null ? right is not null : !left.Equals(right); } -readonly struct NpgsqlTsQueryOperator +readonly struct NpgsqlTsQueryOperator(char character, short followedByDistance) { - public readonly char Char; - public readonly short FollowedByDistance; - - public NpgsqlTsQueryOperator(char character, short followedByDistance) - { - Char = character; - FollowedByDistance = followedByDistance; - } + public readonly char Char = character; + public readonly short FollowedByDistance = followedByDistance; public static implicit operator NpgsqlTsQueryOperator(char c) => new(c, 0); public static implicit operator char(NpgsqlTsQueryOperator o) => o.Char; diff --git a/src/Npgsql/NpgsqlTypes/NpgsqlTypes.cs b/src/Npgsql/NpgsqlTypes/NpgsqlTypes.cs index b861548b83..493be99dea 100644 --- a/src/Npgsql/NpgsqlTypes/NpgsqlTypes.cs +++ b/src/Npgsql/NpgsqlTypes/NpgsqlTypes.cs @@ -17,17 +17,10 @@ namespace NpgsqlTypes; /// /// See https://www.postgresql.org/docs/current/static/datatype-geometric.html /// -public struct NpgsqlPoint : IEquatable +public struct NpgsqlPoint(double x, double y) : IEquatable { - public double X { get; set; } - public double Y { get; set; } - - public NpgsqlPoint(double x, double y) - : this() - { - X = x; - Y = y; - } + public double X { get; set; } = x; + public double Y { get; set; } = y; // ReSharper disable CompareOfFloatsByEqualityOperator public bool Equals(NpgsqlPoint other) => X == other.X && Y == other.Y; @@ -53,19 +46,11 @@ public override string ToString() /// /// See https://www.postgresql.org/docs/current/static/datatype-geometric.html /// -public struct NpgsqlLine : IEquatable +public struct NpgsqlLine(double a, double b, double c) : IEquatable { - public double A { get; set; } - public double B { get; set; } - public double C { get; set; } - - public NpgsqlLine(double a, double b, double c) - : this() - { - A = a; - B = b; - C = c; - } + public double A { get; set; } = a; + public double B { get; set; } = b; + public double C { get; set; } = c; public override string ToString() => string.Format(CultureInfo.InvariantCulture, "{{{0},{1},{2}}}", A, B, C); @@ -395,25 +380,15 @@ public override string ToString() /// /// Represents a PostgreSQL Circle type. /// -public struct NpgsqlCircle : IEquatable +public struct NpgsqlCircle(double x, double y, double radius) : IEquatable { - public double X { get; set; } - public double Y { get; set; } - public double Radius { get; set; } + public double X { get; set; } = x; + public double Y { get; set; } = y; + public double Radius { get; set; } = radius; public NpgsqlCircle(NpgsqlPoint center, double radius) - : this() - { - X = center.X; - Y = center.Y; - Radius = radius; - } - - public NpgsqlCircle(double x, double y, double radius) : this() + : this(center.X, center.Y, radius) { - X = x; - Y = y; - Radius = radius; } public NpgsqlPoint Center @@ -557,23 +532,17 @@ public void Deconstruct(out IPAddress address, out byte netmask) /// /// https://www.postgresql.org/docs/current/static/datatype-oid.html /// -public readonly struct NpgsqlTid : IEquatable +public readonly struct NpgsqlTid(uint blockNumber, ushort offsetNumber) : IEquatable { /// /// Block number /// - public uint BlockNumber { get; } + public uint BlockNumber { get; } = blockNumber; /// /// Tuple index within block /// - public ushort OffsetNumber { get; } - - public NpgsqlTid(uint blockNumber, ushort offsetNumber) - { - BlockNumber = blockNumber; - OffsetNumber = offsetNumber; - } + public ushort OffsetNumber { get; } = offsetNumber; public bool Equals(NpgsqlTid other) => BlockNumber == other.BlockNumber && OffsetNumber == other.OffsetNumber; diff --git a/src/Npgsql/TypeMapping/UserTypeMapper.cs b/src/Npgsql/TypeMapping/UserTypeMapper.cs index 7c86f5b949..7db06826cb 100644 --- a/src/Npgsql/TypeMapping/UserTypeMapper.cs +++ b/src/Npgsql/TypeMapping/UserTypeMapper.cs @@ -148,14 +148,12 @@ static string GetPgName(Type type, INpgsqlNameTranslator nameTranslator) public override IPgTypeInfoResolver CreateResolver() => new Resolver([.._mappings]); public override IPgTypeInfoResolver CreateArrayResolver() => new ArrayResolver([.._mappings]); - class Resolver : IPgTypeInfoResolver + class Resolver(List userTypeMappings) : IPgTypeInfoResolver { - protected readonly List _userTypeMappings; + protected readonly List _userTypeMappings = userTypeMappings; TypeInfoMappingCollection? _mappings; protected TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new()); - public Resolver(List userTypeMappings) => _userTypeMappings = userTypeMappings; - PgTypeInfo? IPgTypeInfoResolver.GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) => Mappings.Find(type, dataTypeName, options); @@ -168,13 +166,11 @@ TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) } } - sealed class ArrayResolver : Resolver, IPgTypeInfoResolver + sealed class ArrayResolver(List userTypeMappings) : Resolver(userTypeMappings), IPgTypeInfoResolver { TypeInfoMappingCollection? _mappings; new TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(base.Mappings)); - public ArrayResolver(List userTypeMappings) : base(userTypeMappings) { } - PgTypeInfo? IPgTypeInfoResolver.GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) => Mappings.Find(type, dataTypeName, options); @@ -188,14 +184,12 @@ TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) } [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types which can require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] - sealed class CompositeMapping<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicFields | DynamicallyAccessedMemberTypes.PublicProperties)] T> : UserTypeMapping where T : class + sealed class CompositeMapping< + [DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicFields | + DynamicallyAccessedMemberTypes.PublicProperties)] + T>(string pgTypeName, INpgsqlNameTranslator nameTranslator) : UserTypeMapping(pgTypeName, typeof(T)) + where T : class { - readonly INpgsqlNameTranslator _nameTranslator; - - public CompositeMapping(string pgTypeName, INpgsqlNameTranslator nameTranslator) - : base(pgTypeName, typeof(T)) - => _nameTranslator = nameTranslator; - internal override void AddMapping(TypeInfoMappingCollection mappings) { mappings.AddType(PgTypeName, (options, mapping, _) => @@ -205,7 +199,7 @@ internal override void AddMapping(TypeInfoMappingCollection mappings) throw new InvalidOperationException("Composite mapping must be to a composite type"); return mapping.CreateInfo(options, new CompositeConverter( - ReflectionCompositeInfoFactory.CreateCompositeInfo(compositeType, _nameTranslator, options))); + ReflectionCompositeInfoFactory.CreateCompositeInfo(compositeType, nameTranslator, options))); }, isDefault: true); } @@ -213,14 +207,12 @@ internal override void AddMapping(TypeInfoMappingCollection mappings) } [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types which can require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] - sealed class StructCompositeMapping<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicFields | DynamicallyAccessedMemberTypes.PublicProperties)] T> : UserTypeMapping where T : struct + sealed class StructCompositeMapping< + [DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicFields | + DynamicallyAccessedMemberTypes.PublicProperties)] + T>(string pgTypeName, INpgsqlNameTranslator nameTranslator) : UserTypeMapping(pgTypeName, typeof(T)) + where T : struct { - readonly INpgsqlNameTranslator _nameTranslator; - - public StructCompositeMapping(string pgTypeName, INpgsqlNameTranslator nameTranslator) - : base(pgTypeName, typeof(T)) - => _nameTranslator = nameTranslator; - internal override void AddMapping(TypeInfoMappingCollection mappings) { mappings.AddStructType(PgTypeName, (options, mapping, dataTypeNameMatch) => @@ -230,20 +222,20 @@ internal override void AddMapping(TypeInfoMappingCollection mappings) throw new InvalidOperationException("Composite mapping must be to a composite type"); return mapping.CreateInfo(options, new CompositeConverter( - ReflectionCompositeInfoFactory.CreateCompositeInfo(compositeType, _nameTranslator, options))); + ReflectionCompositeInfoFactory.CreateCompositeInfo(compositeType, nameTranslator, options))); }, isDefault: true); } internal override void AddArrayMapping(TypeInfoMappingCollection mappings) => mappings.AddStructArrayType(PgTypeName); } - internal abstract class EnumMapping : UserTypeMapping + internal abstract class EnumMapping( + string pgTypeName, + [DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields)] Type enumClrType, + INpgsqlNameTranslator nameTranslator) + : UserTypeMapping(pgTypeName, enumClrType) { - internal INpgsqlNameTranslator NameTranslator { get; } - - public EnumMapping(string pgTypeName, [DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields)]Type enumClrType, INpgsqlNameTranslator nameTranslator) - : base(pgTypeName, enumClrType) - => NameTranslator = nameTranslator; + internal INpgsqlNameTranslator NameTranslator { get; } = nameTranslator; } sealed class EnumMapping<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields)] TEnum> : EnumMapping diff --git a/src/Npgsql/Util/ResettableCancellationTokenSource.cs b/src/Npgsql/Util/ResettableCancellationTokenSource.cs index d87fc9a80e..f4b1652e2a 100644 --- a/src/Npgsql/Util/ResettableCancellationTokenSource.cs +++ b/src/Npgsql/Util/ResettableCancellationTokenSource.cs @@ -13,11 +13,11 @@ namespace Npgsql.Util; /// we need to make sure that an existing cancellation token source hasn't been cancelled, /// every time we start it (see https://github.com/dotnet/runtime/issues/4694). /// -sealed class ResettableCancellationTokenSource : IDisposable +sealed class ResettableCancellationTokenSource(TimeSpan timeout) : IDisposable { bool isDisposed; - public TimeSpan Timeout { get; set; } + public TimeSpan Timeout { get; set; } = timeout; CancellationTokenSource _cts = new(); CancellationTokenRegistration? _registration; @@ -31,9 +31,9 @@ sealed class ResettableCancellationTokenSource : IDisposable bool _isRunning; #endif - public ResettableCancellationTokenSource() => Timeout = InfiniteTimeSpan; - - public ResettableCancellationTokenSource(TimeSpan timeout) => Timeout = timeout; + public ResettableCancellationTokenSource() : this(InfiniteTimeSpan) + { + } /// /// Set the timeout on the wrapped diff --git a/src/Npgsql/Util/TaskSchedulerAwaitable.cs b/src/Npgsql/Util/TaskSchedulerAwaitable.cs index be16d8fa55..1b6d2c5647 100644 --- a/src/Npgsql/Util/TaskSchedulerAwaitable.cs +++ b/src/Npgsql/Util/TaskSchedulerAwaitable.cs @@ -6,11 +6,8 @@ namespace Npgsql.Util; -readonly struct TaskSchedulerAwaitable : ICriticalNotifyCompletion +readonly struct TaskSchedulerAwaitable(TaskScheduler scheduler) : ICriticalNotifyCompletion { - readonly TaskScheduler _scheduler; - public TaskSchedulerAwaitable(TaskScheduler scheduler) => _scheduler = scheduler; - public void GetResult() {} public bool IsCompleted => false; @@ -18,7 +15,7 @@ public void OnCompleted(Action continuation) { var task = Task.Factory.StartNew(continuation, CancellationToken.None, TaskCreationOptions.DenyChildAttach, - scheduler: _scheduler); + scheduler: scheduler); // Exceptions should never happen as the continuation should be the async statemachine. // It normally does its own error handling through the returned task unless it's an async void returning method. diff --git a/src/Shared/CodeAnalysis.cs b/src/Shared/CodeAnalysis.cs index 0e98a03210..518874c699 100644 --- a/src/Shared/CodeAnalysis.cs +++ b/src/Shared/CodeAnalysis.cs @@ -59,17 +59,12 @@ sealed class RequiredMemberAttribute : Attribute { } [AttributeUsage(AttributeTargets.All, AllowMultiple = true, Inherited = false)] - sealed class CompilerFeatureRequiredAttribute : Attribute + sealed class CompilerFeatureRequiredAttribute(string featureName) : Attribute { - public CompilerFeatureRequiredAttribute(string featureName) - { - FeatureName = featureName; - } - /// /// The name of the compiler feature. /// - public string FeatureName { get; } + public string FeatureName { get; } = featureName; /// /// If true, the compiler can choose to allow access to the location where this attribute is applied if it does not understand . diff --git a/test/Npgsql.Benchmarks/TypeHandlers/Numeric.cs b/test/Npgsql.Benchmarks/TypeHandlers/Numeric.cs index 66d3a82aa8..ae5dbfe0d9 100644 --- a/test/Npgsql.Benchmarks/TypeHandlers/Numeric.cs +++ b/test/Npgsql.Benchmarks/TypeHandlers/Numeric.cs @@ -5,40 +5,23 @@ namespace Npgsql.Benchmarks.TypeHandlers; [Config(typeof(Config))] -public class Int16 : TypeHandlerBenchmarks -{ - public Int16() : base(new Int2Converter()) { } -} +public class Int16() : TypeHandlerBenchmarks(new Int2Converter()); [Config(typeof(Config))] -public class Int32 : TypeHandlerBenchmarks -{ - public Int32() : base(new Int4Converter()) { } -} +public class Int32() : TypeHandlerBenchmarks(new Int4Converter()); [Config(typeof(Config))] -public class Int64 : TypeHandlerBenchmarks -{ - public Int64() : base(new Int8Converter()) { } -} +public class Int64() : TypeHandlerBenchmarks(new Int8Converter()); [Config(typeof(Config))] -public class Single : TypeHandlerBenchmarks -{ - public Single() : base(new RealConverter()) { } -} +public class Single() : TypeHandlerBenchmarks(new RealConverter()); [Config(typeof(Config))] -public class Double : TypeHandlerBenchmarks -{ - public Double() : base(new DoubleConverter()) { } -} +public class Double() : TypeHandlerBenchmarks(new DoubleConverter()); [Config(typeof(Config))] -public class Numeric : TypeHandlerBenchmarks +public class Numeric() : TypeHandlerBenchmarks(new DecimalNumericConverter()) { - public Numeric() : base(new DecimalNumericConverter()) { } - protected override IEnumerable ValuesOverride() => [ 0.0000000000000000000000000001M, @@ -60,7 +43,4 @@ protected override IEnumerable ValuesOverride() => } [Config(typeof(Config))] -public class Money : TypeHandlerBenchmarks -{ - public Money() : base(new MoneyConverter()) { } -} +public class Money() : TypeHandlerBenchmarks(new MoneyConverter()); diff --git a/test/Npgsql.Benchmarks/TypeHandlers/Text.cs b/test/Npgsql.Benchmarks/TypeHandlers/Text.cs index 80d5f6ce0c..6216cdc5de 100644 --- a/test/Npgsql.Benchmarks/TypeHandlers/Text.cs +++ b/test/Npgsql.Benchmarks/TypeHandlers/Text.cs @@ -6,10 +6,8 @@ namespace Npgsql.Benchmarks.TypeHandlers; [Config(typeof(Config))] -public class Text : TypeHandlerBenchmarks +public class Text() : TypeHandlerBenchmarks(new StringTextConverter(Encoding.UTF8)) { - public Text() : base(new StringTextConverter(Encoding.UTF8)) { } - protected override IEnumerable ValuesOverride() { for (var i = 1; i <= 10000; i *= 10) diff --git a/test/Npgsql.Benchmarks/TypeHandlers/Uuid.cs b/test/Npgsql.Benchmarks/TypeHandlers/Uuid.cs index 7c229a3b57..a497a0c509 100644 --- a/test/Npgsql.Benchmarks/TypeHandlers/Uuid.cs +++ b/test/Npgsql.Benchmarks/TypeHandlers/Uuid.cs @@ -5,7 +5,4 @@ namespace Npgsql.Benchmarks.TypeHandlers; [Config(typeof(Config))] -public class Uuid : TypeHandlerBenchmarks -{ - public Uuid() : base(new GuidUuidConverter()) { } -} +public class Uuid() : TypeHandlerBenchmarks(new GuidUuidConverter()); diff --git a/test/Npgsql.PluginTests/JsonNetTests.cs b/test/Npgsql.PluginTests/JsonNetTests.cs index a88fed0492..5ab396bdc8 100644 --- a/test/Npgsql.PluginTests/JsonNetTests.cs +++ b/test/Npgsql.PluginTests/JsonNetTests.cs @@ -17,7 +17,7 @@ namespace Npgsql.PluginTests; /// [TestFixture(NpgsqlDbType.Jsonb)] [TestFixture(NpgsqlDbType.Json)] -public class JsonNetTests : TestBase +public class JsonNetTests(NpgsqlDbType npgsqlDbType) : TestBase { [Test] public Task Roundtrip_object() @@ -26,7 +26,7 @@ public Task Roundtrip_object() new Foo { Bar = 8 }, IsJsonb ? @"{""Bar"": 8}" : @"{""Bar"":8}", _pgTypeName, - _npgsqlDbType, + npgsqlDbType, isDefault: false, isNpgsqlDbTypeInferredFromClrType: false); @@ -37,7 +37,7 @@ public Task Roundtrip_string() @"{""p"": 1}", @"{""p"": 1}", _pgTypeName, - _npgsqlDbType, + npgsqlDbType, isDefault: false, isNpgsqlDbTypeInferredFromClrType: false); @@ -48,7 +48,7 @@ public Task Roundtrip_char_array() @"{""p"": 1}".ToCharArray(), @"{""p"": 1}", _pgTypeName, - _npgsqlDbType, + npgsqlDbType, isDefault: false, isNpgsqlDbTypeInferredFromClrType: false); @@ -59,7 +59,7 @@ public Task Roundtrip_byte_array() Encoding.ASCII.GetBytes(@"{""p"": 1}"), @"{""p"": 1}", _pgTypeName, - _npgsqlDbType, + npgsqlDbType, isDefault: false, isNpgsqlDbTypeInferredFromClrType: false); @@ -70,7 +70,7 @@ public Task Roundtrip_JObject() new JObject { ["Bar"] = 8 }, IsJsonb ? @"{""Bar"": 8}" : @"{""Bar"":8}", _pgTypeName, - _npgsqlDbType, + npgsqlDbType, // By default we map JObject to jsonb isDefaultForWriting: IsJsonb, isDefaultForReading: false, @@ -83,7 +83,7 @@ public Task Roundtrip_JArray() new JArray(new[] { 1, 2, 3 }), IsJsonb ? "[1, 2, 3]" : "[1,2,3]", _pgTypeName, - _npgsqlDbType, + npgsqlDbType, // By default we map JArray to jsonb isDefaultForWriting: IsJsonb, isDefaultForReading: false, @@ -118,7 +118,7 @@ await AssertType( new Foo { Bar = 8 }, IsJsonb ? @"{""Bar"": 8}" : @"{""Bar"":8}", _pgTypeName, - _npgsqlDbType, + npgsqlDbType, isDefaultForReading: false, isNpgsqlDbTypeInferredFromClrType: false); } @@ -138,7 +138,7 @@ await AssertType( new[] { 1, 2, 3 }, IsJsonb ? "[1, 2, 3]" : "[1,2,3]", _pgTypeName, - _npgsqlDbType, + npgsqlDbType, isDefaultForReading: false, isNpgsqlDbTypeInferredFromClrType: false); } @@ -167,7 +167,7 @@ await AssertType( new DateWrapper { Date = new DateTime(2018, 04, 20) }, IsJsonb ? "{\"Date\": \"The 20th of April, 2018\"}" : "{\"Date\":\"The 20th of April, 2018\"}", _pgTypeName, - _npgsqlDbType, + npgsqlDbType, isDefault: false, isNpgsqlDbTypeInferredFromClrType: false); } @@ -183,8 +183,8 @@ public async Task Bug3464() await using var conn = await dataSource.OpenConnectionAsync(); await using var cmd = new NpgsqlCommand(@"SELECT @p1, @p2", conn); - cmd.Parameters.AddWithValue("p1", expected).NpgsqlDbType = _npgsqlDbType; - cmd.Parameters.AddWithValue("p2", expected).NpgsqlDbType = _npgsqlDbType; + cmd.Parameters.AddWithValue("p1", expected).NpgsqlDbType = npgsqlDbType; + cmd.Parameters.AddWithValue("p2", expected).NpgsqlDbType = npgsqlDbType; await using var reader = cmd.ExecuteReader(); } @@ -261,8 +261,7 @@ class Foo public override int GetHashCode() => Bar.GetHashCode(); } - readonly NpgsqlDbType _npgsqlDbType; - readonly string _pgTypeName; + readonly string _pgTypeName = npgsqlDbType.ToString().ToLower(); [OneTimeSetUp] public void SetUp() @@ -276,13 +275,7 @@ public void SetUp() public async Task Teardown() => await JsonDataSource.DisposeAsync(); - public JsonNetTests(NpgsqlDbType npgsqlDbType) - { - _npgsqlDbType = npgsqlDbType; - _pgTypeName = npgsqlDbType.ToString().ToLower(); - } - - bool IsJsonb => _npgsqlDbType == NpgsqlDbType.Jsonb; + bool IsJsonb => npgsqlDbType == NpgsqlDbType.Jsonb; NpgsqlDataSource JsonDataSource = default!; } diff --git a/test/Npgsql.Specification.Tests/NpgsqlCommandTests.cs b/test/Npgsql.Specification.Tests/NpgsqlCommandTests.cs index c92cd069f9..8318435aa9 100644 --- a/test/Npgsql.Specification.Tests/NpgsqlCommandTests.cs +++ b/test/Npgsql.Specification.Tests/NpgsqlCommandTests.cs @@ -2,13 +2,8 @@ namespace Npgsql.Specification.Tests; -public sealed class NpgsqlCommandTests : CommandTestBase +public sealed class NpgsqlCommandTests(NpgsqlDbFactoryFixture fixture) : CommandTestBase(fixture) { - public NpgsqlCommandTests(NpgsqlDbFactoryFixture fixture) - : base(fixture) - { - } - // PostgreSQL only supports a single transaction on a given connection at a given time. As a result, // Npgsql completely ignores DbCommand.Transaction. public override void ExecuteReader_throws_when_transaction_required() {} diff --git a/test/Npgsql.Specification.Tests/NpgsqlConnectionTests.cs b/test/Npgsql.Specification.Tests/NpgsqlConnectionTests.cs index fa71ea0f2f..20f5bc2547 100644 --- a/test/Npgsql.Specification.Tests/NpgsqlConnectionTests.cs +++ b/test/Npgsql.Specification.Tests/NpgsqlConnectionTests.cs @@ -2,10 +2,4 @@ namespace Npgsql.Specification.Tests; -public sealed class NpgsqlConnectionTests : ConnectionTestBase -{ - public NpgsqlConnectionTests(NpgsqlDbFactoryFixture fixture) - : base(fixture) - { - } -} \ No newline at end of file +public sealed class NpgsqlConnectionTests(NpgsqlDbFactoryFixture fixture) : ConnectionTestBase(fixture); \ No newline at end of file diff --git a/test/Npgsql.Specification.Tests/NpgsqlDataReaderTests.cs b/test/Npgsql.Specification.Tests/NpgsqlDataReaderTests.cs index 356d1da966..3f3c9021aa 100644 --- a/test/Npgsql.Specification.Tests/NpgsqlDataReaderTests.cs +++ b/test/Npgsql.Specification.Tests/NpgsqlDataReaderTests.cs @@ -2,8 +2,4 @@ namespace Npgsql.Specification.Tests; -public sealed class NpgsqlDataReaderTests : DataReaderTestBase -{ - public NpgsqlDataReaderTests(NpgsqlSelectValueFixture fixture) - : base(fixture) {} -} \ No newline at end of file +public sealed class NpgsqlDataReaderTests(NpgsqlSelectValueFixture fixture) : DataReaderTestBase(fixture); \ No newline at end of file diff --git a/test/Npgsql.Tests/AuthenticationTests.cs b/test/Npgsql.Tests/AuthenticationTests.cs index 5a041a7aca..90bdb79f00 100644 --- a/test/Npgsql.Tests/AuthenticationTests.cs +++ b/test/Npgsql.Tests/AuthenticationTests.cs @@ -11,7 +11,7 @@ namespace Npgsql.Tests; -public class AuthenticationTests : MultiplexingTestBase +public class AuthenticationTests(MultiplexingMode multiplexingMode) : MultiplexingTestBase(multiplexingMode) { [Test] [NonParallelizable] // Sets environment variable @@ -338,11 +338,9 @@ public void Password_source_precedence() static DeferDisposable Defer(Action action) => new(action); } - readonly struct DeferDisposable : IDisposable + readonly struct DeferDisposable(Action action) : IDisposable { - readonly Action _action; - public DeferDisposable(Action action) => _action = action; - public void Dispose() => _action(); + public void Dispose() => action(); } [Test, Description("Connects with a bad password to ensure the proper error is thrown")] @@ -531,6 +529,4 @@ NpgsqlDataSourceBuilder GetPasswordlessDataSourceBuilder() Password = null } }; - - public AuthenticationTests(MultiplexingMode multiplexingMode) : base(multiplexingMode) {} } diff --git a/test/Npgsql.Tests/CommandParameterTests.cs b/test/Npgsql.Tests/CommandParameterTests.cs index 6b58fc4518..6fc042bff9 100644 --- a/test/Npgsql.Tests/CommandParameterTests.cs +++ b/test/Npgsql.Tests/CommandParameterTests.cs @@ -6,7 +6,7 @@ namespace Npgsql.Tests; -public class CommandParameterTests : MultiplexingTestBase +public class CommandParameterTests(MultiplexingMode multiplexingMode) : MultiplexingTestBase(multiplexingMode) { [Test] [TestCase(CommandBehavior.Default)] @@ -209,8 +209,4 @@ public async Task Object_generic_parameter_works() cmd.Parameters.Add(new NpgsqlParameter { NpgsqlDbType = NpgsqlDbType.Integer, Value = 8 }); Assert.That(await cmd.ExecuteScalarAsync(), Is.EqualTo(8)); } - - public CommandParameterTests(MultiplexingMode multiplexingMode) : base(multiplexingMode) - { - } } diff --git a/test/Npgsql.Tests/CommandTests.cs b/test/Npgsql.Tests/CommandTests.cs index b4b088f5f8..f43550320a 100644 --- a/test/Npgsql.Tests/CommandTests.cs +++ b/test/Npgsql.Tests/CommandTests.cs @@ -16,7 +16,7 @@ namespace Npgsql.Tests; -public class CommandTests : MultiplexingTestBase +public class CommandTests(MultiplexingMode multiplexingMode) : MultiplexingTestBase(multiplexingMode) { static uint Int4Oid => PostgresMinimalDatabaseInfo.DefaultTypeCatalog.GetOid(DataTypeNames.Int4).Value; static uint TextOid => PostgresMinimalDatabaseInfo.DefaultTypeCatalog.GetOid(DataTypeNames.Text).Value; @@ -1815,6 +1815,4 @@ public async Task Log_ExecuteScalar_multiple_statement_with_parameter_logging_of } #endregion Logging - - public CommandTests(MultiplexingMode multiplexingMode) : base(multiplexingMode) {} } diff --git a/test/Npgsql.Tests/ConnectionTests.cs b/test/Npgsql.Tests/ConnectionTests.cs index de25a239bb..424fc38830 100644 --- a/test/Npgsql.Tests/ConnectionTests.cs +++ b/test/Npgsql.Tests/ConnectionTests.cs @@ -20,7 +20,7 @@ namespace Npgsql.Tests; -public class ConnectionTests : MultiplexingTestBase +public class ConnectionTests(MultiplexingMode multiplexingMode) : MultiplexingTestBase(multiplexingMode) { [Test, Description("Makes sure the connection goes through the proper state lifecycle")] public async Task Basic_lifecycle() @@ -1774,6 +1774,4 @@ void AssertLoggingConnectionString(NpgsqlConnection connection, object? logState } #endregion Logging tests - - public ConnectionTests(MultiplexingMode multiplexingMode) : base(multiplexingMode) {} } diff --git a/test/Npgsql.Tests/CopyTests.cs b/test/Npgsql.Tests/CopyTests.cs index 48d40fa2cc..73a2591195 100644 --- a/test/Npgsql.Tests/CopyTests.cs +++ b/test/Npgsql.Tests/CopyTests.cs @@ -17,7 +17,7 @@ namespace Npgsql.Tests; -public class CopyTests : MultiplexingTestBase +public class CopyTests(MultiplexingMode multiplexingMode) : MultiplexingTestBase(multiplexingMode) { #region Issue 2257 @@ -1394,6 +1394,4 @@ void StateAssertions(NpgsqlConnection conn) } #endregion - - public CopyTests(MultiplexingMode multiplexingMode) : base(multiplexingMode) {} } diff --git a/test/Npgsql.Tests/DistributedTransactionTests.cs b/test/Npgsql.Tests/DistributedTransactionTests.cs index e55d6e7bd9..5260e3daef 100644 --- a/test/Npgsql.Tests/DistributedTransactionTests.cs +++ b/test/Npgsql.Tests/DistributedTransactionTests.cs @@ -564,11 +564,9 @@ void Current_TransactionCompleted(object sender, TransactionEventArgs e) } } - public class TransactionEvent + public class TransactionEvent(string message) { - public TransactionEvent(string message) - => Message = $"{message} (TId {Thread.CurrentThread.ManagedThreadId})"; - public string Message { get; } + public string Message { get; } = $"{message} (TId {Thread.CurrentThread.ManagedThreadId})"; } #endregion Utilities diff --git a/test/Npgsql.Tests/MultipleHostsTests.cs b/test/Npgsql.Tests/MultipleHostsTests.cs index 18bd56167f..78cbeded75 100644 --- a/test/Npgsql.Tests/MultipleHostsTests.cs +++ b/test/Npgsql.Tests/MultipleHostsTests.cs @@ -1166,15 +1166,11 @@ public async Task LoadBalancing_is_fair_if_first_host_is_down([Values]TargetSess static string MultipleHosts(params PgPostmasterMock[] postmasters) => string.Join(",", postmasters.Select(p => $"{p.Host}:{p.Port}")); - class DisposableWrapper : IAsyncDisposable + class DisposableWrapper(IEnumerable disposables) : IAsyncDisposable { - readonly IEnumerable _disposables; - - public DisposableWrapper(IEnumerable disposables) => _disposables = disposables; - public async ValueTask DisposeAsync() { - foreach (var disposable in _disposables) + foreach (var disposable in disposables) await disposable.DisposeAsync(); } } diff --git a/test/Npgsql.Tests/NpgsqlEventSourceTests.cs b/test/Npgsql.Tests/NpgsqlEventSourceTests.cs index 6419846fbe..1da8c0745d 100644 --- a/test/Npgsql.Tests/NpgsqlEventSourceTests.cs +++ b/test/Npgsql.Tests/NpgsqlEventSourceTests.cs @@ -46,10 +46,8 @@ public void DisableEventSource() readonly List _events = []; - class TestEventListener : EventListener + class TestEventListener(List events) : EventListener { - readonly List _events; - public TestEventListener(List events) => _events = events; - protected override void OnEventWritten(EventWrittenEventArgs eventData) => _events.Add(eventData); + protected override void OnEventWritten(EventWrittenEventArgs eventData) => events.Add(eventData); } } diff --git a/test/Npgsql.Tests/ReadBufferTests.cs b/test/Npgsql.Tests/ReadBufferTests.cs index 13c20f3d41..3169e5366d 100644 --- a/test/Npgsql.Tests/ReadBufferTests.cs +++ b/test/Npgsql.Tests/ReadBufferTests.cs @@ -133,12 +133,8 @@ async Task Read(byte[] buffer, int offset, int count, bool async) return count; } - internal class MockStreamWriter + internal class MockStreamWriter(MockStream stream) { - readonly MockStream _stream; - - public MockStreamWriter(MockStream stream) => _stream = stream; - public MockStreamWriter WriteByte(byte b) { Span bytes = stackalloc byte[1]; @@ -149,11 +145,11 @@ public MockStreamWriter WriteByte(byte b) public MockStreamWriter Write(ReadOnlySpan bytes) { - if (_stream._filled + bytes.Length > Size) + if (stream._filled + bytes.Length > Size) throw new Exception("Mock stream overrun"); - bytes.CopyTo(new Span(_stream._data, _stream._filled, bytes.Length)); - _stream._filled += bytes.Length; - _stream._tcs.TrySetResult(new()); + bytes.CopyTo(new Span(stream._data, stream._filled, bytes.Length)); + stream._filled += bytes.Length; + stream._tcs.TrySetResult(new()); return this; } } diff --git a/test/Npgsql.Tests/ReaderNewSchemaTests.cs b/test/Npgsql.Tests/ReaderNewSchemaTests.cs index 01e46cdd06..a9a11d5ce0 100644 --- a/test/Npgsql.Tests/ReaderNewSchemaTests.cs +++ b/test/Npgsql.Tests/ReaderNewSchemaTests.cs @@ -14,7 +14,7 @@ namespace Npgsql.Tests; /// Note that this API is also available on .NET Framework. /// For the old DataTable-based API, see . /// -public class ReaderNewSchemaTests : SyncOrAsyncTestBase +public class ReaderNewSchemaTests(SyncOrAsync syncOrAsync) : SyncOrAsyncTestBase(syncOrAsync) { // ReSharper disable once InconsistentNaming [Test] @@ -793,8 +793,6 @@ class SomeComposite public int Foo { get; set; } } - public ReaderNewSchemaTests(SyncOrAsync syncOrAsync) : base(syncOrAsync) { } - async Task> GetColumnSchema(NpgsqlDataReader reader) => IsAsync ? await reader.GetColumnSchemaAsync() : reader.GetColumnSchema(); } diff --git a/test/Npgsql.Tests/ReaderOldSchemaTests.cs b/test/Npgsql.Tests/ReaderOldSchemaTests.cs index edbeb15842..604af68789 100644 --- a/test/Npgsql.Tests/ReaderOldSchemaTests.cs +++ b/test/Npgsql.Tests/ReaderOldSchemaTests.cs @@ -11,7 +11,7 @@ namespace Npgsql.Tests; /// This tests the .NET Framework DbDataReader schema/metadata API, which returns DataTable. /// For the new CoreCLR API, see . /// -public class ReaderOldSchemaTests : SyncOrAsyncTestBase +public class ReaderOldSchemaTests(SyncOrAsync syncOrAsync) : SyncOrAsyncTestBase(syncOrAsync) { [Test] public async Task Primary_key_composite() @@ -240,7 +240,5 @@ CONSTRAINT PK_test_Cod PRIMARY KEY (Cod) Assert.That(dt.Rows[2]["ColumnName"].ToString(), Is.EqualTo("date")); } - public ReaderOldSchemaTests(SyncOrAsync syncOrAsync) : base(syncOrAsync) { } - async Task GetSchemaTable(NpgsqlDataReader dr) => IsAsync ? await dr.GetSchemaTableAsync() : dr.GetSchemaTable(); } diff --git a/test/Npgsql.Tests/ReaderTests.cs b/test/Npgsql.Tests/ReaderTests.cs index 249b5bf2ce..4d6ceab478 100644 --- a/test/Npgsql.Tests/ReaderTests.cs +++ b/test/Npgsql.Tests/ReaderTests.cs @@ -2391,23 +2391,17 @@ public ReaderTests(MultiplexingMode multiplexingMode, CommandBehavior behavior) #region Mock Type Handlers -sealed class ExplodingTypeHandlerResolverFactory : PgTypeInfoResolverFactory +sealed class ExplodingTypeHandlerResolverFactory(bool safe) : PgTypeInfoResolverFactory { - readonly bool _safe; - public ExplodingTypeHandlerResolverFactory(bool safe) => _safe = safe; - - public override IPgTypeInfoResolver CreateResolver() => new Resolver(_safe); + public override IPgTypeInfoResolver CreateResolver() => new Resolver(safe); public override IPgTypeInfoResolver? CreateArrayResolver() => null; - sealed class Resolver : IPgTypeInfoResolver + sealed class Resolver(bool safe) : IPgTypeInfoResolver { - readonly bool _safe; - public Resolver(bool safe) => _safe = safe; - public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) { if (dataTypeName == DataTypeNames.Int4 && (type == typeof(int) || type is null)) - return new(options, new ExplodingTypeHandler(_safe), DataTypeNames.Int4); + return new(options, new ExplodingTypeHandler(safe), DataTypeNames.Int4); return null; } diff --git a/test/Npgsql.Tests/Replication/PgOutputReplicationTests.cs b/test/Npgsql.Tests/Replication/PgOutputReplicationTests.cs index 57d137c367..0186d4f0d8 100644 --- a/test/Npgsql.Tests/Replication/PgOutputReplicationTests.cs +++ b/test/Npgsql.Tests/Replication/PgOutputReplicationTests.cs @@ -24,33 +24,29 @@ namespace Npgsql.Tests.Replication; [TestFixture(PgOutputProtocolVersion.V4, ReplicationDataMode.DefaultReplicationDataMode, TransactionMode.DefaultTransactionMode)] [TestFixture(PgOutputProtocolVersion.V4, ReplicationDataMode.DefaultReplicationDataMode, TransactionMode.ParallelStreamingTransactionMode)] [NonParallelizable] // These tests aren't designed to be parallelizable -public class PgOutputReplicationTests : SafeReplicationTestBase +public class PgOutputReplicationTests( + PgOutputProtocolVersion protocolVersion, + PgOutputReplicationTests.ReplicationDataMode dataMode, + PgOutputReplicationTests.TransactionMode transactionMode) + : SafeReplicationTestBase { - readonly PgOutputProtocolVersion _protocolVersion; - readonly bool? _binary; - readonly PgOutputStreamingMode? _streamingMode; + readonly bool? _binary = dataMode == ReplicationDataMode.BinaryReplicationDataMode + ? true + : dataMode == ReplicationDataMode.TextReplicationDataMode + ? false + : null; + readonly PgOutputStreamingMode? _streamingMode = transactionMode switch + { + TransactionMode.DefaultTransactionMode => null, + TransactionMode.NonStreamingTransactionMode => PgOutputStreamingMode.Off, + TransactionMode.StreamingTransactionMode => PgOutputStreamingMode.On, + TransactionMode.ParallelStreamingTransactionMode => PgOutputStreamingMode.Parallel, + _ => throw new ArgumentOutOfRangeException(nameof(transactionMode), transactionMode, null) + }; bool IsBinary => _binary ?? false; bool IsStreaming => _streamingMode.HasValue && _streamingMode.Value != PgOutputStreamingMode.Off; - PgOutputProtocolVersion Version => _protocolVersion; - - public PgOutputReplicationTests(PgOutputProtocolVersion protocolVersion, ReplicationDataMode dataMode, TransactionMode transactionMode) - { - _protocolVersion = protocolVersion; - _binary = dataMode == ReplicationDataMode.BinaryReplicationDataMode - ? true - : dataMode == ReplicationDataMode.TextReplicationDataMode - ? false - : null; - _streamingMode = transactionMode switch - { - TransactionMode.DefaultTransactionMode => null, - TransactionMode.NonStreamingTransactionMode => PgOutputStreamingMode.Off, - TransactionMode.StreamingTransactionMode => PgOutputStreamingMode.On, - TransactionMode.ParallelStreamingTransactionMode => PgOutputStreamingMode.Parallel, - _ => throw new ArgumentOutOfRangeException(nameof(transactionMode), transactionMode, null) - }; - } + PgOutputProtocolVersion Version => protocolVersion; [Test] public Task CreatePgOutputReplicationSlot() @@ -1103,7 +1099,7 @@ public Task TwoPhase([Values]bool commit) { // Streaming of prepared transaction is only supported for // logical streaming replication protocol >= 3 - if (_protocolVersion < PgOutputProtocolVersion.V3) + if (protocolVersion < PgOutputProtocolVersion.V3) return Task.CompletedTask; return SafePgOutputReplicationTest( @@ -1466,7 +1462,7 @@ async IAsyncEnumerable SkipEmptyTransactions(IAsyncE } PgOutputReplicationOptions GetOptions(string publicationName, bool? messages = null) - => new(publicationName, _protocolVersion, _binary, _streamingMode, messages); + => new(publicationName, protocolVersion, _binary, _streamingMode, messages); Task SafePgOutputReplicationTest(Func testAction, [CallerMemberName] string memberName = "") => SafeReplicationTest(testAction, GetObjectName(memberName)); @@ -1477,7 +1473,7 @@ Task SafePgOutputReplicationTest(Func testAction string GetObjectName(string memberName) { var sb = new StringBuilder(memberName) - .Append("_v").Append(_protocolVersion); + .Append("_v").Append(protocolVersion); if (_binary.HasValue) sb.Append("_b_").Append(BoolToChar(_binary.Value)); if (_streamingMode.HasValue) @@ -1496,11 +1492,11 @@ public async Task SetUp() { await using var c = await OpenConnectionAsync(); TestUtil.MinimumPgVersion(c, "10.0", "The Logical Replication Protocol (via pgoutput plugin) was introduced in PostgreSQL 10"); - if (_protocolVersion > PgOutputProtocolVersion.V3) + if (protocolVersion > PgOutputProtocolVersion.V3) TestUtil.MinimumPgVersion(c, "16.0", "Logical Streaming Replication Protocol version 4 was introduced in PostgreSQL 16"); - if (_protocolVersion > PgOutputProtocolVersion.V2) + if (protocolVersion > PgOutputProtocolVersion.V2) TestUtil.MinimumPgVersion(c, "15.0", "Logical Streaming Replication Protocol version 3 was introduced in PostgreSQL 15"); - if (_protocolVersion > PgOutputProtocolVersion.V1) + if (protocolVersion > PgOutputProtocolVersion.V1) TestUtil.MinimumPgVersion(c, "14.0", "Logical Streaming Replication Protocol version 2 was introduced in PostgreSQL 14"); if (IsBinary) TestUtil.MinimumPgVersion(c, "14.0", "Sending replication values in binary representation was introduced in PostgreSQL 14"); diff --git a/test/Npgsql.Tests/SchemaTests.cs b/test/Npgsql.Tests/SchemaTests.cs index 301cf44d80..49f31eff19 100644 --- a/test/Npgsql.Tests/SchemaTests.cs +++ b/test/Npgsql.Tests/SchemaTests.cs @@ -10,7 +10,7 @@ namespace Npgsql.Tests; -public class SchemaTests : SyncOrAsyncTestBase +public class SchemaTests(SyncOrAsync syncOrAsync) : SyncOrAsyncTestBase(syncOrAsync) { [Test] public async Task MetaDataCollections() @@ -584,8 +584,6 @@ public async Task SlimBuilder_introspection_without_unsupported_type_exceptions( Assert.That(() => GetSchema(conn, DbMetaDataCollectionNames.DataTypes), Throws.Nothing); } - public SchemaTests(SyncOrAsync syncOrAsync) : base(syncOrAsync) { } - // ReSharper disable MethodHasAsyncOverload async Task GetSchema(NpgsqlConnection conn) => IsAsync ? await conn.GetSchemaAsync() : conn.GetSchema(); diff --git a/test/Npgsql.Tests/Support/ListLoggerFactory.cs b/test/Npgsql.Tests/Support/ListLoggerFactory.cs index 930bb0cf92..98f94cb8fa 100644 --- a/test/Npgsql.Tests/Support/ListLoggerFactory.cs +++ b/test/Npgsql.Tests/Support/ListLoggerFactory.cs @@ -35,20 +35,15 @@ public void AddProvider(ILoggerProvider provider) public void Dispose() => StopRecording(); - class ListLogger : ILogger + class ListLogger(ListLoggerProvider provider) : ILogger { - readonly ListLoggerProvider _provider; - - public ListLogger(ListLoggerProvider provider) - => _provider = provider; - public List<(LogLevel, EventId, string, object?, Exception?)> LoggedEvents { get; } = []; public void Log(LogLevel logLevel, EventId eventId, TState state, Exception? exception, Func formatter) { - if (_provider._recording) + if (provider._recording) { lock (this) { @@ -66,7 +61,7 @@ public void Clear() } } - public bool IsEnabled(LogLevel logLevel) => _provider._recording; + public bool IsEnabled(LogLevel logLevel) => provider._recording; public IDisposable BeginScope(TState state) where TState : notnull => new Scope(); @@ -79,14 +74,9 @@ public void Dispose() } } - class RecordingDisposable : IDisposable + class RecordingDisposable(ListLoggerProvider provider) : IDisposable { - readonly ListLoggerProvider _provider; - - public RecordingDisposable(ListLoggerProvider provider) - => _provider = provider; - public void Dispose() - => _provider.StopRecording(); + => provider.StopRecording(); } } diff --git a/test/Npgsql.Tests/Support/PgCancellationRequest.cs b/test/Npgsql.Tests/Support/PgCancellationRequest.cs index c07f606bb8..6773c55dd2 100644 --- a/test/Npgsql.Tests/Support/PgCancellationRequest.cs +++ b/test/Npgsql.Tests/Support/PgCancellationRequest.cs @@ -3,35 +3,21 @@ namespace Npgsql.Tests.Support; -class PgCancellationRequest +class PgCancellationRequest(NpgsqlReadBuffer readBuffer, NpgsqlWriteBuffer writeBuffer, Stream stream, int processId, int secret) { - readonly NpgsqlReadBuffer _readBuffer; - readonly NpgsqlWriteBuffer _writeBuffer; - readonly Stream _stream; - - public int ProcessId { get; } - public int Secret { get; } + public int ProcessId { get; } = processId; + public int Secret { get; } = secret; bool completed; - public PgCancellationRequest(NpgsqlReadBuffer readBuffer, NpgsqlWriteBuffer writeBuffer, Stream stream, int processId, int secret) - { - _readBuffer = readBuffer; - _writeBuffer = writeBuffer; - _stream = stream; - - ProcessId = processId; - Secret = secret; - } - public void Complete() { if (completed) return; - _readBuffer.Dispose(); - _writeBuffer.Dispose(); - _stream.Dispose(); + readBuffer.Dispose(); + writeBuffer.Dispose(); + stream.Dispose(); completed = true; } diff --git a/test/Npgsql.Tests/Support/TestBase.cs b/test/Npgsql.Tests/Support/TestBase.cs index 05d2bfea37..66cdfb6780 100644 --- a/test/Npgsql.Tests/Support/TestBase.cs +++ b/test/Npgsql.Tests/Support/TestBase.cs @@ -452,17 +452,12 @@ async Task AssertTypeUnsupportedWriteCore(T value, st return Assert.ThrowsAsync(() => cmd.ExecuteReaderAsync())!; } - class SimpleComparer : IEqualityComparer + class SimpleComparer(Func comparerDelegate) : IEqualityComparer { - readonly Func _comparerDelegate; - - public SimpleComparer(Func comparerDelegate) - => _comparerDelegate = comparerDelegate; - public bool Equals(T? x, T? y) => x is null ? y is null - : y is not null && _comparerDelegate(x, y); + : y is not null && comparerDelegate(x, y); public int GetHashCode(T obj) => throw new NotSupportedException(); } diff --git a/test/Npgsql.Tests/SyncOrAsyncTestBase.cs b/test/Npgsql.Tests/SyncOrAsyncTestBase.cs index 0eff0c7488..2a676e97cb 100644 --- a/test/Npgsql.Tests/SyncOrAsyncTestBase.cs +++ b/test/Npgsql.Tests/SyncOrAsyncTestBase.cs @@ -4,13 +4,11 @@ namespace Npgsql.Tests; [TestFixture(SyncOrAsync.Sync)] [TestFixture(SyncOrAsync.Async)] -public abstract class SyncOrAsyncTestBase : TestBase +public abstract class SyncOrAsyncTestBase(SyncOrAsync syncOrAsync) : TestBase { protected bool IsAsync => SyncOrAsync == SyncOrAsync.Async; - protected SyncOrAsync SyncOrAsync { get; } - - protected SyncOrAsyncTestBase(SyncOrAsync syncOrAsync) => SyncOrAsync = syncOrAsync; + protected SyncOrAsync SyncOrAsync { get; } = syncOrAsync; } public enum SyncOrAsync diff --git a/test/Npgsql.Tests/TestUtil.cs b/test/Npgsql.Tests/TestUtil.cs index 81b1140f48..1d68c130f5 100644 --- a/test/Npgsql.Tests/TestUtil.cs +++ b/test/Npgsql.Tests/TestUtil.cs @@ -518,13 +518,9 @@ public static void WaitUntilCommandIsInProgress(this NpgsqlCommand command) /// test reproduces the issue) /// [AttributeUsage(AttributeTargets.Method, AllowMultiple = true)] -public class IssueLink : Attribute +public class IssueLink(string linkAddress) : Attribute { - public string LinkAddress { get; private set; } - public IssueLink(string linkAddress) - { - LinkAddress = linkAddress; - } + public string LinkAddress { get; private set; } = linkAddress; } public enum PrepareOrNot diff --git a/test/Npgsql.Tests/TransactionTests.cs b/test/Npgsql.Tests/TransactionTests.cs index d5e9a927f8..9836bac8f2 100644 --- a/test/Npgsql.Tests/TransactionTests.cs +++ b/test/Npgsql.Tests/TransactionTests.cs @@ -12,7 +12,7 @@ namespace Npgsql.Tests; -public class TransactionTests : MultiplexingTestBase +public class TransactionTests(MultiplexingMode multiplexingMode) : MultiplexingTestBase(multiplexingMode) { [Test, Description("Basic insert within a committed transaction")] public async Task Commit([Values(PrepareOrNot.NotPrepared, PrepareOrNot.Prepared)] PrepareOrNot prepare) @@ -746,6 +746,4 @@ public void Bug184_Rollback_fails_on_aborted_transaction() t.Rollback(); } } - - public TransactionTests(MultiplexingMode multiplexingMode) : base(multiplexingMode) {} } diff --git a/test/Npgsql.Tests/Types/ArrayTests.cs b/test/Npgsql.Tests/Types/ArrayTests.cs index 6b0daad6cf..5917f24eff 100644 --- a/test/Npgsql.Tests/Types/ArrayTests.cs +++ b/test/Npgsql.Tests/Types/ArrayTests.cs @@ -21,7 +21,7 @@ namespace Npgsql.Tests.Types; /// /// https://www.postgresql.org/docs/current/static/arrays.html /// -public class ArrayTests : MultiplexingTestBase +public class ArrayTests(MultiplexingMode multiplexingMode) : MultiplexingTestBase(multiplexingMode) { static readonly TestCaseData[] ArrayTestCases = [ @@ -415,6 +415,4 @@ public async Task NpgsqlSlimSourceBuilder_EnableArrays() await AssertType(dataSource, new[] { 1, 2, 3 }, "{1,2,3}", "integer[]", NpgsqlDbType.Integer | NpgsqlDbType.Array); } - - public ArrayTests(MultiplexingMode multiplexingMode) : base(multiplexingMode) {} } diff --git a/test/Npgsql.Tests/Types/BitStringTests.cs b/test/Npgsql.Tests/Types/BitStringTests.cs index 7a1b085fa1..fb8377c395 100644 --- a/test/Npgsql.Tests/Types/BitStringTests.cs +++ b/test/Npgsql.Tests/Types/BitStringTests.cs @@ -13,7 +13,7 @@ namespace Npgsql.Tests.Types; /// /// https://www.postgresql.org/docs/current/static/datatype-bit.html /// -public class BitStringTests : MultiplexingTestBase +public class BitStringTests(MultiplexingMode multiplexingMode) : MultiplexingTestBase(multiplexingMode) { [Test] [TestCase("10110110", TestName = "BitArray")] @@ -123,6 +123,4 @@ public Task As_string() [Test] public Task Write_as_string_validation() => AssertTypeUnsupportedWrite("001q0", "bit varying"); - - public BitStringTests(MultiplexingMode multiplexingMode) : base(multiplexingMode) {} } diff --git a/test/Npgsql.Tests/Types/ByteaTests.cs b/test/Npgsql.Tests/Types/ByteaTests.cs index 4f8db3c3dc..60cc2830f3 100644 --- a/test/Npgsql.Tests/Types/ByteaTests.cs +++ b/test/Npgsql.Tests/Types/ByteaTests.cs @@ -14,7 +14,7 @@ namespace Npgsql.Tests.Types; /// /// https://www.postgresql.org/docs/current/static/datatype-binary.html /// -public class ByteaTests : MultiplexingTestBase +public class ByteaTests(MultiplexingMode multiplexingMode) : MultiplexingTestBase(multiplexingMode) { [Test] [TestCase(new byte[] { 1, 2, 3, 4, 5 }, "\\x0102030405", TestName = "Bytea")] @@ -284,14 +284,8 @@ public async Task Array_of_bytea() Assert.AreEqual(inVal[1], retVal[1]); } - sealed class NonSeekableStream : MemoryStream + sealed class NonSeekableStream(byte[] data) : MemoryStream(data) { public override bool CanSeek => false; - - public NonSeekableStream(byte[] data) : base(data) - { - } } - - public ByteaTests(MultiplexingMode multiplexingMode) : base(multiplexingMode) {} } diff --git a/test/Npgsql.Tests/Types/CompositeHandlerTests.cs b/test/Npgsql.Tests/Types/CompositeHandlerTests.cs index 1df95980a3..cc84efd094 100644 --- a/test/Npgsql.Tests/Types/CompositeHandlerTests.cs +++ b/test/Npgsql.Tests/Types/CompositeHandlerTests.cs @@ -155,10 +155,9 @@ public class TypeWithExplicitPropertyName : SimpleComposite protected override string GetValue() => MyValue; } - public class TypeWithExplicitParameterName : SimpleComposite + public class TypeWithExplicitParameterName([PgName("value")] string myValue) : SimpleComposite { - public TypeWithExplicitParameterName([PgName("value")] string myValue) => Value = myValue; - public string Value { get; } + public string Value { get; } = myValue; protected override string GetValue() => Value; } @@ -178,81 +177,72 @@ public class TypeWithLessPropertiesThanAttributes : IComposite public int IntValue { get; set; } } - public class TypeWithMoreParametersThanAttributes : IComposite + public class TypeWithMoreParametersThanAttributes(int intValue, string? stringValue) : IComposite { public string GetAttributes() => "int_value integer"; public string GetValues() => $"{IntValue}"; - public TypeWithMoreParametersThanAttributes(int intValue, string? stringValue) - { - IntValue = intValue; - StringValue = stringValue; - } - - public int IntValue { get; set; } - public string? StringValue { get; set; } + public int IntValue { get; set; } = intValue; + public string? StringValue { get; set; } = stringValue; } - public class TypeWithLessParametersThanAttributes : IComposite + public class TypeWithLessParametersThanAttributes(int intValue) : IComposite { public string GetAttributes() => "int_value integer, string_value text"; public string GetValues() => $"{IntValue}, NULL"; - public TypeWithLessParametersThanAttributes(int intValue) => - IntValue = intValue; - - public int IntValue { get; } + public int IntValue { get; } = intValue; } - public class TypeWithOneParameter : IComposite + public class TypeWithOneParameter(int value1) : IComposite { public string GetAttributes() => "value1 integer"; public string GetValues() => $"{Value1}"; - public TypeWithOneParameter(int value1) => Value1 = value1; - public int Value1 { get; } + public int Value1 { get; } = value1; } - public class TypeWithTwoParameters : IComposite + public class TypeWithTwoParameters(int intValue, string stringValue) : IComposite { public string GetAttributes() => "int_value integer, string_value text"; public string GetValues() => $"{IntValue}, '{StringValue}'"; - public TypeWithTwoParameters(int intValue, string stringValue) => - (IntValue, StringValue) = (intValue, stringValue); - - public int IntValue { get; } - public string? StringValue { get; } + public int IntValue { get; } = intValue; + public string? StringValue { get; } = stringValue; } - public class TypeWithTwoParametersReversed : IComposite + public class TypeWithTwoParametersReversed(string stringValue, int intValue) : IComposite { public string GetAttributes() => "int_value integer, string_value text"; public string GetValues() => $"{IntValue}, '{StringValue}'"; - public TypeWithTwoParametersReversed(string stringValue, int intValue) => - (StringValue, IntValue) = (stringValue, intValue); - - public int IntValue { get; } - public string? StringValue { get; } + public int IntValue { get; } = intValue; + public string? StringValue { get; } = stringValue; } - public class TypeWithNineParameters : IComposite + public class TypeWithNineParameters( + int value1, + int value2, + int value3, + int value4, + int value5, + int value6, + int value7, + int value8, + int value9) + : IComposite { public string GetAttributes() => "value1 integer, value2 integer, value3 integer, value4 integer, value5 integer, value6 integer, value7 integer, value8 integer, value9 integer"; public string GetValues() => $"{Value1}, {Value2}, {Value3}, {Value4}, {Value5}, {Value6}, {Value7}, {Value8}, {Value9}"; - public TypeWithNineParameters(int value1, int value2, int value3, int value4, int value5, int value6, int value7, int value8, int value9) - => (Value1, Value2, Value3, Value4, Value5, Value6, Value7, Value8, Value9) = (value1, value2, value3, value4, value5, value6, value7, value8, value9); - - public int Value1 { get; } - public int Value2 { get; } - public int Value3 { get; } - public int Value4 { get; } - public int Value5 { get; } - public int Value6 { get; } - public int Value7 { get; } - public int Value8 { get; } - public int Value9 { get; } + public int Value1 { get; } = value1; + public int Value2 { get; } = value2; + public int Value3 { get; } = value3; + public int Value4 { get; } = value4; + public int Value5 { get; } = value5; + public int Value6 { get; } = value6; + public int Value7 { get; } = value7; + public int Value8 { get; } = value8; + public int Value9 { get; } = value9; } } diff --git a/test/Npgsql.Tests/Types/CompositeTests.cs b/test/Npgsql.Tests/Types/CompositeTests.cs index a7a5f20191..bdbdb97d54 100644 --- a/test/Npgsql.Tests/Types/CompositeTests.cs +++ b/test/Npgsql.Tests/Types/CompositeTests.cs @@ -9,7 +9,7 @@ namespace Npgsql.Tests.Types; -public class CompositeTests : MultiplexingTestBase +public class CompositeTests(MultiplexingMode multiplexingMode) : MultiplexingTestBase(multiplexingMode) { [Test] public async Task Basic() @@ -635,18 +635,16 @@ await AssertType( #region Test Types - readonly struct DuplicateOneLongOneBool +#pragma warning disable CS9113 + readonly struct DuplicateOneLongOneBool(bool boolean, [PgName("boolean")] int @bool) { - public DuplicateOneLongOneBool(bool boolean, [PgName("boolean")]int @bool) - { - } - [PgName("long")] public long LongValue { get; } [PgName("boolean")] public bool BooleanValue { get; } } +#pragma warning restore CS9113 readonly struct MissingSetterOneLongOneBool { @@ -757,7 +755,5 @@ struct StructWithNullableProperty public int? Foo { get; set; } } - public CompositeTests(MultiplexingMode multiplexingMode) : base(multiplexingMode) {} - #endregion } diff --git a/test/Npgsql.Tests/Types/DomainTests.cs b/test/Npgsql.Tests/Types/DomainTests.cs index 2e2ff5ae84..117204d307 100644 --- a/test/Npgsql.Tests/Types/DomainTests.cs +++ b/test/Npgsql.Tests/Types/DomainTests.cs @@ -6,7 +6,7 @@ namespace Npgsql.Tests.Types; -public class DomainTests : MultiplexingTestBase +public class DomainTests(MultiplexingMode multiplexingMode) : MultiplexingTestBase(multiplexingMode) { [Test, Description("Resolves a domain type handler via the different pathways")] public async Task Domain_resolution() @@ -98,6 +98,4 @@ await AssertType( npgsqlDbType: null, isDefaultForWriting: false); } - - public DomainTests(MultiplexingMode multiplexingMode) : base(multiplexingMode) {} } diff --git a/test/Npgsql.Tests/Types/EnumTests.cs b/test/Npgsql.Tests/Types/EnumTests.cs index c36514d6d3..b6e56c632b 100644 --- a/test/Npgsql.Tests/Types/EnumTests.cs +++ b/test/Npgsql.Tests/Types/EnumTests.cs @@ -10,7 +10,7 @@ namespace Npgsql.Tests.Types; -public class EnumTests : MultiplexingTestBase +public class EnumTests(MultiplexingMode multiplexingMode) : MultiplexingTestBase(multiplexingMode) { enum Mood { Sad, Ok, Happy } enum AnotherEnum { Value1, Value2 } @@ -243,6 +243,4 @@ enum TestEnum [PgName("label3")] Label3 } - - public EnumTests(MultiplexingMode multiplexingMode) : base(multiplexingMode) {} } diff --git a/test/Npgsql.Tests/Types/FullTextSearchTests.cs b/test/Npgsql.Tests/Types/FullTextSearchTests.cs index a3c83a31c9..9c5104051a 100644 --- a/test/Npgsql.Tests/Types/FullTextSearchTests.cs +++ b/test/Npgsql.Tests/Types/FullTextSearchTests.cs @@ -9,11 +9,8 @@ namespace Npgsql.Tests.Types; -public class FullTextSearchTests : MultiplexingTestBase +public class FullTextSearchTests(MultiplexingMode multiplexingMode) : MultiplexingTestBase(multiplexingMode) { - public FullTextSearchTests(MultiplexingMode multiplexingMode) - : base(multiplexingMode) { } - [Test] public Task TsVector() => AssertType( diff --git a/test/Npgsql.Tests/Types/GeometricTypeTests.cs b/test/Npgsql.Tests/Types/GeometricTypeTests.cs index 62b63d3d6d..8c858b4f64 100644 --- a/test/Npgsql.Tests/Types/GeometricTypeTests.cs +++ b/test/Npgsql.Tests/Types/GeometricTypeTests.cs @@ -10,7 +10,7 @@ namespace Npgsql.Tests.Types; /// /// https://www.postgresql.org/docs/current/static/datatype-geometric.html /// -class GeometricTypeTests : MultiplexingTestBase +class GeometricTypeTests(MultiplexingMode multiplexingMode) : MultiplexingTestBase(multiplexingMode) { [Test] public Task Point() @@ -135,6 +135,4 @@ public Task Circle() "<(1,2),0.5>", "circle", NpgsqlDbType.Circle); - - public GeometricTypeTests(MultiplexingMode multiplexingMode) : base(multiplexingMode) {} } diff --git a/test/Npgsql.Tests/Types/HstoreTests.cs b/test/Npgsql.Tests/Types/HstoreTests.cs index 5696cad98b..366b38bb44 100644 --- a/test/Npgsql.Tests/Types/HstoreTests.cs +++ b/test/Npgsql.Tests/Types/HstoreTests.cs @@ -6,7 +6,7 @@ namespace Npgsql.Tests.Types; -public class HstoreTests : MultiplexingTestBase +public class HstoreTests(MultiplexingMode multiplexingMode) : MultiplexingTestBase(multiplexingMode) { [Test] public Task Hstore() @@ -63,6 +63,4 @@ public async Task SetUp() TestUtil.MinimumPgVersion(conn, "9.1", "Hstore introduced in PostgreSQL 9.1"); await TestUtil.EnsureExtensionAsync(conn, "hstore", "9.1"); } - - public HstoreTests(MultiplexingMode multiplexingMode) : base(multiplexingMode) {} } diff --git a/test/Npgsql.Tests/Types/InternalTypeTests.cs b/test/Npgsql.Tests/Types/InternalTypeTests.cs index ad95686825..9c68a66695 100644 --- a/test/Npgsql.Tests/Types/InternalTypeTests.cs +++ b/test/Npgsql.Tests/Types/InternalTypeTests.cs @@ -4,7 +4,7 @@ namespace Npgsql.Tests.Types; -public class InternalTypeTests : MultiplexingTestBase +public class InternalTypeTests(MultiplexingMode multiplexingMode) : MultiplexingTestBase(multiplexingMode) { [Test] public async Task Read_internal_char() @@ -96,6 +96,4 @@ public async Task NpgsqlLogSequenceNumber() } #endregion NpgsqlLogSequenceNumber / PgLsn - - public InternalTypeTests(MultiplexingMode multiplexingMode) : base(multiplexingMode) {} } \ No newline at end of file diff --git a/test/Npgsql.Tests/Types/JsonPathTests.cs b/test/Npgsql.Tests/Types/JsonPathTests.cs index 3bc4f07a3b..022c8eaaf9 100644 --- a/test/Npgsql.Tests/Types/JsonPathTests.cs +++ b/test/Npgsql.Tests/Types/JsonPathTests.cs @@ -6,11 +6,8 @@ namespace Npgsql.Tests.Types; -public class JsonPathTests : MultiplexingTestBase +public class JsonPathTests(MultiplexingMode multiplexingMode) : MultiplexingTestBase(multiplexingMode) { - public JsonPathTests(MultiplexingMode multiplexingMode) - : base(multiplexingMode) { } - static readonly object[] ReadWriteCases = [ new object[] { "'$'", "$" }, diff --git a/test/Npgsql.Tests/Types/LTreeTests.cs b/test/Npgsql.Tests/Types/LTreeTests.cs index f836b49ca0..c90cc88a20 100644 --- a/test/Npgsql.Tests/Types/LTreeTests.cs +++ b/test/Npgsql.Tests/Types/LTreeTests.cs @@ -5,7 +5,7 @@ namespace Npgsql.Tests.Types; -public class LTreeTests : MultiplexingTestBase +public class LTreeTests(MultiplexingMode multiplexingMode) : MultiplexingTestBase(multiplexingMode) { [Test] public Task LQuery() @@ -63,6 +63,4 @@ public async Task SetUp() TestUtil.MinimumPgVersion(conn, "13.0"); await TestUtil.EnsureExtensionAsync(conn, "ltree"); } - - public LTreeTests(MultiplexingMode multiplexingMode) : base(multiplexingMode) {} } diff --git a/test/Npgsql.Tests/Types/MiscTypeTests.cs b/test/Npgsql.Tests/Types/MiscTypeTests.cs index 6586fd89c3..7f1fe7c0ba 100644 --- a/test/Npgsql.Tests/Types/MiscTypeTests.cs +++ b/test/Npgsql.Tests/Types/MiscTypeTests.cs @@ -9,7 +9,7 @@ namespace Npgsql.Tests.Types; /// /// Tests on PostgreSQL types which don't fit elsewhere /// -class MiscTypeTests : MultiplexingTestBase +class MiscTypeTests(MultiplexingMode multiplexingMode) : MultiplexingTestBase(multiplexingMode) { [Test] public async Task Boolean() @@ -207,6 +207,4 @@ public async Task Unsupported_DbType() Assert.That(() => cmd.Parameters.Add(new NpgsqlParameter("p", DbType.UInt32) { Value = 8u }), Throws.Exception.TypeOf()); } - - public MiscTypeTests(MultiplexingMode multiplexingMode) : base(multiplexingMode) {} } diff --git a/test/Npgsql.Tests/Types/NetworkTypeTests.cs b/test/Npgsql.Tests/Types/NetworkTypeTests.cs index e09f2814f8..e24446f136 100644 --- a/test/Npgsql.Tests/Types/NetworkTypeTests.cs +++ b/test/Npgsql.Tests/Types/NetworkTypeTests.cs @@ -13,7 +13,7 @@ namespace Npgsql.Tests.Types; /// /// https://www.postgresql.org/docs/current/static/datatype-net-types.html /// -class NetworkTypeTests : MultiplexingTestBase +class NetworkTypeTests(MultiplexingMode multiplexingMode) : MultiplexingTestBase(multiplexingMode) { [Test] public Task Inet_v4_as_IPAddress() @@ -138,6 +138,4 @@ public async Task Macaddr_write_validation() await AssertTypeUnsupportedWrite(PhysicalAddress.Parse("08-00-2B-01-02-03-04-05"), "macaddr"); } - - public NetworkTypeTests(MultiplexingMode multiplexingMode) : base(multiplexingMode) {} } diff --git a/test/Npgsql.Tests/Types/NumericTests.cs b/test/Npgsql.Tests/Types/NumericTests.cs index d0e48d8c3a..20eed3fa04 100644 --- a/test/Npgsql.Tests/Types/NumericTests.cs +++ b/test/Npgsql.Tests/Types/NumericTests.cs @@ -8,7 +8,7 @@ namespace Npgsql.Tests.Types; -public class NumericTests : MultiplexingTestBase +public class NumericTests(MultiplexingMode multiplexingMode) : MultiplexingTestBase(multiplexingMode) { static readonly object[] ReadWriteCases = [ @@ -212,6 +212,4 @@ public async Task NumericZero_WithScale() Assert.That(value.Scale, Is.EqualTo(2)); } - - public NumericTests(MultiplexingMode multiplexingMode) : base(multiplexingMode) {} } diff --git a/test/Npgsql.Tests/Types/NumericTypeTests.cs b/test/Npgsql.Tests/Types/NumericTypeTests.cs index 9fcd5b695b..5fda011158 100644 --- a/test/Npgsql.Tests/Types/NumericTypeTests.cs +++ b/test/Npgsql.Tests/Types/NumericTypeTests.cs @@ -14,7 +14,7 @@ namespace Npgsql.Tests.Types; /// /// https://www.postgresql.org/docs/current/static/datatype-numeric.html /// -public class NumericTypeTests : MultiplexingTestBase +public class NumericTypeTests(MultiplexingMode multiplexingMode) : MultiplexingTestBase(multiplexingMode) { [Test] public async Task Int16() @@ -108,6 +108,4 @@ public Task Write_overflow(T value, string pgTypeName) [TestCase(0L, long.MaxValue + 1D, "decimal")] public Task Read_overflow(T _, double value, string pgTypeName) => AssertTypeUnsupportedRead(value.ToString(CultureInfo.InvariantCulture), pgTypeName); - - public NumericTypeTests(MultiplexingMode multiplexingMode) : base(multiplexingMode) {} } diff --git a/test/Npgsql.Tests/Types/RecordTests.cs b/test/Npgsql.Tests/Types/RecordTests.cs index 7aefe1e98d..0823323041 100644 --- a/test/Npgsql.Tests/Types/RecordTests.cs +++ b/test/Npgsql.Tests/Types/RecordTests.cs @@ -7,7 +7,7 @@ namespace Npgsql.Tests.Types; -public class RecordTests : MultiplexingTestBase +public class RecordTests(MultiplexingMode multiplexingMode) : MultiplexingTestBase(multiplexingMode) { [Test] [IssueLink("https://github.com/npgsql/npgsql/issues/724")] @@ -152,6 +152,4 @@ public async Task NpgsqlSlimSourceBuilder_EnableRecords() Assert.That(() => reader.GetValue(0), Throws.Nothing); Assert.That(() => reader.GetFieldValue(0), Throws.Nothing); } - - public RecordTests(MultiplexingMode multiplexingMode) : base(multiplexingMode) {} } diff --git a/test/Npgsql.Tests/Types/TextTests.cs b/test/Npgsql.Tests/Types/TextTests.cs index 7e86fb131b..f7e770c088 100644 --- a/test/Npgsql.Tests/Types/TextTests.cs +++ b/test/Npgsql.Tests/Types/TextTests.cs @@ -15,7 +15,7 @@ namespace Npgsql.Tests.Types; /// /// https://www.postgresql.org/docs/current/static/datatype-character.html /// -public class TextTests : MultiplexingTestBase +public class TextTests(MultiplexingMode multiplexingMode) : MultiplexingTestBase(multiplexingMode) { [Test] public Task Text_as_string() @@ -149,6 +149,4 @@ public async Task Internal_char() Assert.AreEqual(testArr2[i], arr2[i]); } } - - public TextTests(MultiplexingMode multiplexingMode) : base(multiplexingMode) {} } From a770d79cf67c1c06a09abb46cc232da8d4ec0e8a Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Wed, 30 Oct 2024 16:26:22 +0100 Subject: [PATCH 477/761] Expression-bodied methods --- .../BackendMessages/AuthenticationMessages.cs | 8 ++--- src/Npgsql/BackendMessages/CopyMessages.cs | 4 +-- .../ParameterDescriptionMessage.cs | 6 ++-- src/Npgsql/NpgsqlConnection.cs | 4 +-- .../NpgsqlOperationInProgressException.cs | 6 ++-- src/Npgsql/NpgsqlRawCopyStream.cs | 10 ++----- src/Npgsql/NpgsqlTransaction.cs | 5 +--- src/Npgsql/NpgsqlTypes/NpgsqlTsQuery.cs | 4 +-- src/Npgsql/NpgsqlTypes/NpgsqlTsVector.cs | 4 +-- src/Npgsql/PostgresMinimalDatabaseInfo.cs | 6 ++-- src/Npgsql/PostgresNotice.cs | 4 +-- src/Npgsql/Replication/ReplicationSlot.cs | 6 ++-- src/Npgsql/TypeMapping/UserTypeMapper.cs | 8 ++--- src/Shared/CodeAnalysis.cs | 4 +-- .../CommandExecuteBenchmarks.cs | 6 ++-- test/Npgsql.Benchmarks/Commit.cs | 6 ++-- .../ConnectionCreationBenchmarks.cs | 6 ++-- .../ConnectionOpenCloseBenchmarks.cs | 6 ++-- test/Npgsql.Benchmarks/Prepare.cs | 6 ++-- .../Npgsql.PluginTests/LegacyNodaTimeTests.cs | 4 +-- .../NpgsqlSelectValueFixture.cs | 6 ++-- test/Npgsql.Tests/BugTests.cs | 4 +-- test/Npgsql.Tests/TestMetrics.cs | 30 +++++-------------- test/Npgsql.Tests/Types/ArrayTests.cs | 4 +-- test/Npgsql.Tests/Types/CompositeTests.cs | 8 ++--- test/Npgsql.Tests/Types/RangeTests.cs | 8 ++--- 26 files changed, 48 insertions(+), 125 deletions(-) diff --git a/src/Npgsql/BackendMessages/AuthenticationMessages.cs b/src/Npgsql/BackendMessages/AuthenticationMessages.cs index 93d535109d..c52da80d33 100644 --- a/src/Npgsql/BackendMessages/AuthenticationMessages.cs +++ b/src/Npgsql/BackendMessages/AuthenticationMessages.cs @@ -41,9 +41,7 @@ internal static AuthenticationMD5PasswordMessage Load(NpgsqlReadBuffer buf) } AuthenticationMD5PasswordMessage(byte[] salt) - { - Salt = salt; - } + => Salt = salt; } sealed class AuthenticationGSSMessage : AuthenticationRequestMessage @@ -69,9 +67,7 @@ internal static AuthenticationGSSContinueMessage Load(NpgsqlReadBuffer buf, int } AuthenticationGSSContinueMessage(byte[] authenticationData) - { - AuthenticationData = authenticationData; - } + => AuthenticationData = authenticationData; } sealed class AuthenticationSSPIMessage : AuthenticationRequestMessage diff --git a/src/Npgsql/BackendMessages/CopyMessages.cs b/src/Npgsql/BackendMessages/CopyMessages.cs index 174768f7a6..e7d4d6935c 100644 --- a/src/Npgsql/BackendMessages/CopyMessages.cs +++ b/src/Npgsql/BackendMessages/CopyMessages.cs @@ -13,9 +13,7 @@ abstract class CopyResponseMessageBase : IBackendMessage internal List ColumnFormatCodes { get; } internal CopyResponseMessageBase() - { - ColumnFormatCodes = []; - } + => ColumnFormatCodes = []; internal void Load(NpgsqlReadBuffer buf) { diff --git a/src/Npgsql/BackendMessages/ParameterDescriptionMessage.cs b/src/Npgsql/BackendMessages/ParameterDescriptionMessage.cs index 9faccd0f50..16c4687da5 100644 --- a/src/Npgsql/BackendMessages/ParameterDescriptionMessage.cs +++ b/src/Npgsql/BackendMessages/ParameterDescriptionMessage.cs @@ -9,9 +9,7 @@ sealed class ParameterDescriptionMessage : IBackendMessage internal List TypeOIDs { get; } internal ParameterDescriptionMessage() - { - TypeOIDs = []; - } + => TypeOIDs = []; internal ParameterDescriptionMessage Load(NpgsqlReadBuffer buf) { @@ -23,4 +21,4 @@ internal ParameterDescriptionMessage Load(NpgsqlReadBuffer buf) } public BackendMessageCode Code => BackendMessageCode.ParameterDescription; -} \ No newline at end of file +} diff --git a/src/Npgsql/NpgsqlConnection.cs b/src/Npgsql/NpgsqlConnection.cs index e0315b5952..a50d9548be 100644 --- a/src/Npgsql/NpgsqlConnection.cs +++ b/src/Npgsql/NpgsqlConnection.cs @@ -1734,9 +1734,7 @@ public override Task GetSchemaAsync(string collectionName, Cancellati /// /// The collection specified. public override Task GetSchemaAsync(string collectionName, string?[]? restrictions, CancellationToken cancellationToken = default) - { - return NpgsqlSchema.GetSchema(async: true, this, collectionName, restrictions, cancellationToken); - } + => NpgsqlSchema.GetSchema(async: true, this, collectionName, restrictions, cancellationToken); #endregion Schema operations diff --git a/src/Npgsql/NpgsqlOperationInProgressException.cs b/src/Npgsql/NpgsqlOperationInProgressException.cs index eb7377afcd..74e7e646ff 100644 --- a/src/Npgsql/NpgsqlOperationInProgressException.cs +++ b/src/Npgsql/NpgsqlOperationInProgressException.cs @@ -16,9 +16,7 @@ public sealed class NpgsqlOperationInProgressException : NpgsqlException /// public NpgsqlOperationInProgressException(NpgsqlCommand command) : base("A command is already in progress: " + command.CommandText) - { - CommandInProgress = command; - } + => CommandInProgress = command; internal NpgsqlOperationInProgressException(ConnectorState state) : base($"The connection is already in state '{state}'") @@ -31,4 +29,4 @@ internal NpgsqlOperationInProgressException(ConnectorState state) /// . /// public NpgsqlCommand? CommandInProgress { get; } -} \ No newline at end of file +} diff --git a/src/Npgsql/NpgsqlRawCopyStream.cs b/src/Npgsql/NpgsqlRawCopyStream.cs index 1185cd5422..689e11ba5b 100644 --- a/src/Npgsql/NpgsqlRawCopyStream.cs +++ b/src/Npgsql/NpgsqlRawCopyStream.cs @@ -428,15 +428,9 @@ void CheckDisposed() public override bool CanSeek => false; - public override long Seek(long offset, SeekOrigin origin) - { - throw new NotSupportedException(); - } + public override long Seek(long offset, SeekOrigin origin) => throw new NotSupportedException(); - public override void SetLength(long value) - { - throw new NotSupportedException(); - } + public override void SetLength(long value) => throw new NotSupportedException(); public override long Length => throw new NotSupportedException(); diff --git a/src/Npgsql/NpgsqlTransaction.cs b/src/Npgsql/NpgsqlTransaction.cs index 7819af479b..1beebd0924 100644 --- a/src/Npgsql/NpgsqlTransaction.cs +++ b/src/Npgsql/NpgsqlTransaction.cs @@ -307,10 +307,7 @@ public override Task ReleaseAsync(string name, CancellationToken cancellationTok /// /// Indicates whether this transaction supports database savepoints. /// - public override bool SupportsSavepoints - { - get => _connector.DatabaseInfo.SupportsTransactions; - } + public override bool SupportsSavepoints => _connector.DatabaseInfo.SupportsTransactions; #endregion diff --git a/src/Npgsql/NpgsqlTypes/NpgsqlTsQuery.cs b/src/Npgsql/NpgsqlTypes/NpgsqlTsQuery.cs index cbfc201e85..bb1629705c 100644 --- a/src/Npgsql/NpgsqlTypes/NpgsqlTsQuery.cs +++ b/src/Npgsql/NpgsqlTypes/NpgsqlTsQuery.cs @@ -533,9 +533,7 @@ public sealed class NpgsqlTsQueryNot : NpgsqlTsQuery /// public NpgsqlTsQueryNot(NpgsqlTsQuery child) : base(NodeKind.Not) - { - Child = child; - } + => Child = child; internal override void WriteCore(StringBuilder sb, bool first = false) { diff --git a/src/Npgsql/NpgsqlTypes/NpgsqlTsVector.cs b/src/Npgsql/NpgsqlTypes/NpgsqlTsVector.cs index 3c765ede02..dca621190d 100644 --- a/src/Npgsql/NpgsqlTypes/NpgsqlTsVector.cs +++ b/src/Npgsql/NpgsqlTypes/NpgsqlTsVector.cs @@ -414,9 +414,7 @@ public struct WordEntryPos : IEquatable internal short Value { get; } internal WordEntryPos(short value) - { - Value = value; - } + => Value = value; /// /// Creates a WordEntryPos with a given position and weight. diff --git a/src/Npgsql/PostgresMinimalDatabaseInfo.cs b/src/Npgsql/PostgresMinimalDatabaseInfo.cs index eb90453062..3d07fac236 100644 --- a/src/Npgsql/PostgresMinimalDatabaseInfo.cs +++ b/src/Npgsql/PostgresMinimalDatabaseInfo.cs @@ -118,10 +118,8 @@ protected override IEnumerable GetTypes() internal PostgresMinimalDatabaseInfo(NpgsqlConnector conn) : base(conn) - { - HasIntegerDateTimes = !conn.PostgresParameters.TryGetValue("integer_datetimes", out var intDateTimes) || - intDateTimes == "on"; - } + => HasIntegerDateTimes = !conn.PostgresParameters.TryGetValue("integer_datetimes", out var intDateTimes) || + intDateTimes == "on"; // TODO, split database info and type catalog. internal PostgresMinimalDatabaseInfo() diff --git a/src/Npgsql/PostgresNotice.cs b/src/Npgsql/PostgresNotice.cs index ef55ad4e13..6ed9c7f98d 100644 --- a/src/Npgsql/PostgresNotice.cs +++ b/src/Npgsql/PostgresNotice.cs @@ -198,7 +198,5 @@ public sealed class NpgsqlNoticeEventArgs : EventArgs public PostgresNotice Notice { get; } internal NpgsqlNoticeEventArgs(PostgresNotice notice) - { - Notice = notice; - } + => Notice = notice; } diff --git a/src/Npgsql/Replication/ReplicationSlot.cs b/src/Npgsql/Replication/ReplicationSlot.cs index 8790303444..1e9b3473b6 100644 --- a/src/Npgsql/Replication/ReplicationSlot.cs +++ b/src/Npgsql/Replication/ReplicationSlot.cs @@ -6,12 +6,10 @@ public abstract class ReplicationSlot { internal ReplicationSlot(string name) - { - Name = name; - } + => Name = name; /// /// The name of the newly-created replication slot. /// public string Name { get; } -} \ No newline at end of file +} diff --git a/src/Npgsql/TypeMapping/UserTypeMapper.cs b/src/Npgsql/TypeMapping/UserTypeMapper.cs index 7db06826cb..90da77728a 100644 --- a/src/Npgsql/TypeMapping/UserTypeMapper.cs +++ b/src/Npgsql/TypeMapping/UserTypeMapper.cs @@ -191,8 +191,7 @@ sealed class CompositeMapping< where T : class { internal override void AddMapping(TypeInfoMappingCollection mappings) - { - mappings.AddType(PgTypeName, (options, mapping, _) => + => mappings.AddType(PgTypeName, (options, mapping, _) => { var pgType = mapping.GetPgType(options); if (pgType is not PostgresCompositeType compositeType) @@ -201,7 +200,6 @@ internal override void AddMapping(TypeInfoMappingCollection mappings) return mapping.CreateInfo(options, new CompositeConverter( ReflectionCompositeInfoFactory.CreateCompositeInfo(compositeType, nameTranslator, options))); }, isDefault: true); - } internal override void AddArrayMapping(TypeInfoMappingCollection mappings) => mappings.AddArrayType(PgTypeName); } @@ -214,8 +212,7 @@ sealed class StructCompositeMapping< where T : struct { internal override void AddMapping(TypeInfoMappingCollection mappings) - { - mappings.AddStructType(PgTypeName, (options, mapping, dataTypeNameMatch) => + => mappings.AddStructType(PgTypeName, (options, mapping, dataTypeNameMatch) => { var pgType = mapping.GetPgType(options); if (pgType is not PostgresCompositeType compositeType) @@ -224,7 +221,6 @@ internal override void AddMapping(TypeInfoMappingCollection mappings) return mapping.CreateInfo(options, new CompositeConverter( ReflectionCompositeInfoFactory.CreateCompositeInfo(compositeType, nameTranslator, options))); }, isDefault: true); - } internal override void AddArrayMapping(TypeInfoMappingCollection mappings) => mappings.AddStructArrayType(PgTypeName); } diff --git a/src/Shared/CodeAnalysis.cs b/src/Shared/CodeAnalysis.cs index 518874c699..091b856c05 100644 --- a/src/Shared/CodeAnalysis.cs +++ b/src/Shared/CodeAnalysis.cs @@ -20,9 +20,7 @@ sealed class RequiresDynamicCodeAttribute : Attribute /// A message that contains information about the usage of dynamic code. /// public RequiresDynamicCodeAttribute(string message) - { - Message = message; - } + => Message = message; /// /// Gets a message that contains information about the usage of dynamic code. diff --git a/test/Npgsql.Benchmarks/CommandExecuteBenchmarks.cs b/test/Npgsql.Benchmarks/CommandExecuteBenchmarks.cs index c75febe708..e2e6d4706a 100644 --- a/test/Npgsql.Benchmarks/CommandExecuteBenchmarks.cs +++ b/test/Npgsql.Benchmarks/CommandExecuteBenchmarks.cs @@ -55,8 +55,6 @@ public object ExecuteReader() class Config : ManualConfig { public Config() - { - AddColumn(StatisticColumn.OperationsPerSecond); - } + => AddColumn(StatisticColumn.OperationsPerSecond); } -} \ No newline at end of file +} diff --git a/test/Npgsql.Benchmarks/Commit.cs b/test/Npgsql.Benchmarks/Commit.cs index 96e04ade96..9ab03c11db 100644 --- a/test/Npgsql.Benchmarks/Commit.cs +++ b/test/Npgsql.Benchmarks/Commit.cs @@ -29,8 +29,6 @@ public void Basic() class Config : ManualConfig { public Config() - { - AddColumn(StatisticColumn.OperationsPerSecond); - } + => AddColumn(StatisticColumn.OperationsPerSecond); } -} \ No newline at end of file +} diff --git a/test/Npgsql.Benchmarks/ConnectionCreationBenchmarks.cs b/test/Npgsql.Benchmarks/ConnectionCreationBenchmarks.cs index e63bbba7c6..633445ae0a 100644 --- a/test/Npgsql.Benchmarks/ConnectionCreationBenchmarks.cs +++ b/test/Npgsql.Benchmarks/ConnectionCreationBenchmarks.cs @@ -22,8 +22,6 @@ public class ConnectionCreationBenchmarks class Config : ManualConfig { public Config() - { - AddColumn(StatisticColumn.OperationsPerSecond); - } + => AddColumn(StatisticColumn.OperationsPerSecond); } -} \ No newline at end of file +} diff --git a/test/Npgsql.Benchmarks/ConnectionOpenCloseBenchmarks.cs b/test/Npgsql.Benchmarks/ConnectionOpenCloseBenchmarks.cs index d733ff9c11..ef5e69f62e 100644 --- a/test/Npgsql.Benchmarks/ConnectionOpenCloseBenchmarks.cs +++ b/test/Npgsql.Benchmarks/ConnectionOpenCloseBenchmarks.cs @@ -168,8 +168,6 @@ public void NonPooled() class Config : ManualConfig { public Config() - { - AddColumn(StatisticColumn.OperationsPerSecond); - } + => AddColumn(StatisticColumn.OperationsPerSecond); } -} \ No newline at end of file +} diff --git a/test/Npgsql.Benchmarks/Prepare.cs b/test/Npgsql.Benchmarks/Prepare.cs index 6b8d9b06bc..5648e75f98 100644 --- a/test/Npgsql.Benchmarks/Prepare.cs +++ b/test/Npgsql.Benchmarks/Prepare.cs @@ -54,9 +54,7 @@ public void GlobalSetup() [GlobalCleanup] public void GlobalCleanup() - { - _conn.Dispose(); - } + => _conn.Dispose(); public Prepare() { @@ -119,4 +117,4 @@ static string GenerateQuery(int tablesToJoin) .Values .Cast() .ToArray(); -} \ No newline at end of file +} diff --git a/test/Npgsql.PluginTests/LegacyNodaTimeTests.cs b/test/Npgsql.PluginTests/LegacyNodaTimeTests.cs index 3f5eb05177..c6e5f25a9d 100644 --- a/test/Npgsql.PluginTests/LegacyNodaTimeTests.cs +++ b/test/Npgsql.PluginTests/LegacyNodaTimeTests.cs @@ -16,15 +16,13 @@ public class LegacyNodaTimeTests : TestBase, IDisposable [Test] public async Task Timestamp_as_ZonedDateTime() - { - await AssertType( + => await AssertType( new LocalDateTime(1998, 4, 12, 13, 26, 38, 789).InZoneLeniently(DateTimeZoneProviders.Tzdb[TimeZone]), "1998-04-12 13:26:38.789+02", "timestamp with time zone", NpgsqlDbType.TimestampTz, DbType.DateTimeOffset, isNpgsqlDbTypeInferredFromClrType: false, isDefault: false); - } [Test] public Task Timestamp_as_Instant() diff --git a/test/Npgsql.Specification.Tests/NpgsqlSelectValueFixture.cs b/test/Npgsql.Specification.Tests/NpgsqlSelectValueFixture.cs index f524a00505..06bdb837f2 100644 --- a/test/Npgsql.Specification.Tests/NpgsqlSelectValueFixture.cs +++ b/test/Npgsql.Specification.Tests/NpgsqlSelectValueFixture.cs @@ -10,8 +10,7 @@ namespace Npgsql.Specification.Tests; public class NpgsqlSelectValueFixture : NpgsqlDbFactoryFixture, ISelectValueFixture, IDeleteFixture, IDisposable { public NpgsqlSelectValueFixture() - { - Utility.ExecuteNonQuery(this, @" + => Utility.ExecuteNonQuery(this, @" DROP TABLE IF EXISTS select_value; CREATE TABLE select_value ( @@ -39,7 +38,6 @@ INSERT INTO select_value VALUES (4, NULL, false, '0001-01-01', '0001-01-01', '0001-01-01', 0.000000000000001, 2.23e-308, '33221100-5544-7766-9988-aabbccddeeff', -32768, -2147483648, -9223372036854775808, 1.18e-38, NULL, '00:00:00'), (5, NULL, true, '9999-12-31', '9999-12-31 23:59:59.999', '9999-12-31 23:59:59.999 +14:00', 99999999999999999999.999999999999999, 1.79e308, 'ccddeeff-aabb-8899-7766-554433221100', 32767, 2147483647, 9223372036854775807, 3.40e38, NULL, '23:59:59.999'); "); - } public void Dispose() => Utility.ExecuteNonQuery(this, "DROP TABLE IF EXISTS select_value;"); @@ -71,4 +69,4 @@ public string CreateSelectSql(byte[] value) => public Type NullValueExceptionType => typeof(InvalidCastException); public string DeleteNoRows => "DELETE FROM select_value WHERE 1 = 0"; -} \ No newline at end of file +} diff --git a/test/Npgsql.Tests/BugTests.cs b/test/Npgsql.Tests/BugTests.cs index e3c05dd5fb..2e3dfa97fc 100644 --- a/test/Npgsql.Tests/BugTests.cs +++ b/test/Npgsql.Tests/BugTests.cs @@ -174,8 +174,7 @@ public void Bug1695() [Test, IssueLink("https://github.com/npgsql/npgsql/issues/1700")] public void Bug1700() - { - Assert.That(() => + => Assert.That(() => { using var conn = OpenConnection(); using var tx = conn.BeginTransaction(); @@ -197,7 +196,6 @@ public void Bug1700() // Note, we never get here tx.Commit(); }, Throws.InvalidOperationException.With.Message.EqualTo("Some problem parsing the returned data")); - } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/1964")] public void Bug1964() diff --git a/test/Npgsql.Tests/TestMetrics.cs b/test/Npgsql.Tests/TestMetrics.cs index d1280c1e79..3b6c11dbda 100644 --- a/test/Npgsql.Tests/TestMetrics.cs +++ b/test/Npgsql.Tests/TestMetrics.cs @@ -41,17 +41,13 @@ private TestMetrics(TimeSpan allowedTime, bool reportOnStop) /// Report metrics to stdout when stopped. /// A new running TestMetrics object. public static TestMetrics Start(TimeSpan allowedTime, bool reportOnStop) - { - return new(allowedTime, reportOnStop); - } + => new(allowedTime, reportOnStop); /// /// Increment the Iterations value by one. /// public void IncrementIterations() - { - Iterations++; - } + => Iterations++; /// /// Stop the internal stop watch and record elapsed CPU times. @@ -81,9 +77,7 @@ public void Stop() /// Stop the internal stop watch and record elapsed CPU times. /// public void Dispose() - { - Stop(); - } + => Stop(); /// /// Report whether ElapsedClockTime has met or exceeded the maximum run time. @@ -96,9 +90,7 @@ public void Dispose() /// /// The number of iterations accumulated per the time span provided. public double IterationsPer(TimeSpan timeSpan) - { - return (double)Iterations / ((double)stopwatch.Elapsed.TotalMilliseconds / (double)timeSpan.TotalMilliseconds); - } + => (double)Iterations / ((double)stopwatch.Elapsed.TotalMilliseconds / (double)timeSpan.TotalMilliseconds); /// /// Calculate the number of iterations accumulated per second. @@ -106,9 +98,7 @@ public double IterationsPer(TimeSpan timeSpan) /// /// The number of iterations accumulated per second. public double IterationsPerSecond() - { - return IterationsPer(new TimeSpan(0, 0, 1)); - } + => IterationsPer(new TimeSpan(0, 0, 1)); /// /// Calculate the number of iterations accumulated per the CPU time span provided. @@ -116,9 +106,7 @@ public double IterationsPerSecond() /// /// The number of iterations accumulated per the CPU time span provided. public double IterationsPerCPU(TimeSpan timeSpan) - { - return (double)Iterations / ((double)ElapsedTotalCPUTime.TotalMilliseconds / (double)timeSpan.TotalMilliseconds); - } + => (double)Iterations / ((double)ElapsedTotalCPUTime.TotalMilliseconds / (double)timeSpan.TotalMilliseconds); /// /// Calculate the number of iterations accumulated per CPU second. @@ -127,9 +115,7 @@ public double IterationsPerCPU(TimeSpan timeSpan) /// /// The number of iterations accumulated per CPU second. public double IterationsPerCPUSecond() - { - return IterationsPerCPU(new TimeSpan(0, 0, 1)); - } + => IterationsPerCPU(new TimeSpan(0, 0, 1)); /// /// Elapsed time since start. @@ -176,4 +162,4 @@ public TimeSpan ElapsedUserCPUTime /// Elapsed total (system + user) CPU time since start. /// public TimeSpan ElapsedTotalCPUTime => ElapsedSystemCPUTime + ElapsedUserCPUTime; -} \ No newline at end of file +} diff --git a/test/Npgsql.Tests/Types/ArrayTests.cs b/test/Npgsql.Tests/Types/ArrayTests.cs index 5917f24eff..3202bd0ba9 100644 --- a/test/Npgsql.Tests/Types/ArrayTests.cs +++ b/test/Npgsql.Tests/Types/ArrayTests.cs @@ -163,10 +163,8 @@ public async Task Write_IList_implementation() [Test] public void Read_IList_implementation_throws() - { - Assert.ThrowsAsync(() => + => Assert.ThrowsAsync(() => AssertTypeRead("{1,2,3}", "integer[]", ImmutableArray.Create(1, 2, 3), isDefault: false)); - } [Test] public async Task Generic_IList() diff --git a/test/Npgsql.Tests/Types/CompositeTests.cs b/test/Npgsql.Tests/Types/CompositeTests.cs index bdbdb97d54..3928be3515 100644 --- a/test/Npgsql.Tests/Types/CompositeTests.cs +++ b/test/Npgsql.Tests/Types/CompositeTests.cs @@ -649,9 +649,7 @@ readonly struct DuplicateOneLongOneBool(bool boolean, [PgName("boolean")] int @b readonly struct MissingSetterOneLongOneBool { public MissingSetterOneLongOneBool(long @long) - { - LongValue = @long; - } + => LongValue = @long; public MissingSetterOneLongOneBool(bool boolean, [PgName("boolean")]int @bool) { @@ -671,9 +669,7 @@ public OneLongOneBool(bool boolean, [PgName("boolean")]int @bool) } public OneLongOneBool(long @long) - { - LongValue = @long; - } + => LongValue = @long; public OneLongOneBool(double other) { diff --git a/test/Npgsql.Tests/Types/RangeTests.cs b/test/Npgsql.Tests/Types/RangeTests.cs index a8c67c0d8c..f0f9fa6637 100644 --- a/test/Npgsql.Tests/Types/RangeTests.cs +++ b/test/Npgsql.Tests/Types/RangeTests.cs @@ -406,14 +406,10 @@ class SimpleType string? Value { get; } SimpleType(string? value) - { - Value = value; - } + => Value = value; public override string? ToString() - { - return Value; - } + => Value; class SimpleTypeConverter : TypeConverter { From 237570718072de782c43ce211bd13198ffc4415e Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Wed, 30 Oct 2024 16:26:35 +0100 Subject: [PATCH 478/761] Merge to patterns cleanup --- src/Npgsql/Internal/NpgsqlConnector.cs | 8 ++++---- src/Npgsql/Internal/NpgsqlReadBuffer.cs | 6 ++---- src/Npgsql/NpgsqlBatchCommand.cs | 2 +- src/Npgsql/NpgsqlCommand.cs | 2 +- src/Npgsql/NpgsqlRawCopyStream.cs | 2 +- src/Npgsql/NpgsqlTypes/NpgsqlTsVector.cs | 2 +- test/Npgsql.Tests/CommandTests.cs | 2 +- test/Npgsql.Tests/ConnectionTests.cs | 12 ++++-------- 8 files changed, 15 insertions(+), 21 deletions(-) diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index 7a60648c71..53438b7bd0 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -504,7 +504,7 @@ internal async Task Open(NpgsqlTimeout timeout, bool async, CancellationToken ca SerializerOptions = DataSource.SerializerOptions; DatabaseInfo = DataSource.DatabaseInfo; - if (Settings.Pooling && !Settings.Multiplexing && !Settings.NoResetOnClose && DatabaseInfo.SupportsDiscard) + if (Settings.Pooling && Settings is { Multiplexing: false, NoResetOnClose: false } && DatabaseInfo.SupportsDiscard) { _sendResetOnClose = true; GenerateResetMessage(); @@ -1145,7 +1145,7 @@ void SetSocketOptions(Socket socket) if (Settings.TcpKeepAlive) socket.SetSocketOption(SocketOptionLevel.Socket, SocketOptionName.KeepAlive, true); - if (Settings.TcpKeepAliveInterval > 0 && Settings.TcpKeepAliveTime == 0) + if (Settings is { TcpKeepAliveInterval: > 0, TcpKeepAliveTime: 0 }) throw new ArgumentException("If TcpKeepAliveInterval is defined, TcpKeepAliveTime must be defined as well"); if (Settings.TcpKeepAliveTime > 0) { @@ -2001,7 +2001,7 @@ internal async Task CloseOngoingOperations(bool async) // therefore vulnerable to the race condition in #615. if (copyOperation is NpgsqlBinaryImporter || copyOperation is NpgsqlCopyTextWriter || - copyOperation is NpgsqlRawCopyStream rawCopyStream && rawCopyStream.CanWrite) + copyOperation is NpgsqlRawCopyStream { CanWrite: true }) { try { @@ -2676,7 +2676,7 @@ internal async Task Wait(bool async, int timeout, CancellationToken cancel { msg = await ReadMessageWithNotifications(async).ConfigureAwait(false); } - catch (Exception e) when (e is OperationCanceledException || e is NpgsqlException npgEx && npgEx.InnerException is TimeoutException) + catch (Exception e) when (e is OperationCanceledException || e is NpgsqlException { InnerException: TimeoutException }) { // We're somewhere in the middle of a reading keepalive messages // Breaking the connection, as we've lost protocol sync diff --git a/src/Npgsql/Internal/NpgsqlReadBuffer.cs b/src/Npgsql/Internal/NpgsqlReadBuffer.cs index bd878ed4b0..a9b094efc2 100644 --- a/src/Npgsql/Internal/NpgsqlReadBuffer.cs +++ b/src/Npgsql/Internal/NpgsqlReadBuffer.cs @@ -223,8 +223,7 @@ async ValueTask ReadWithTimeoutAsync(Memory buffer, CancellationToken // If we should attempt PostgreSQL cancellation, do it the first time we get a timeout. // TODO: As an optimization, we can still attempt to send a cancellation request, but after // that immediately break the connection - if (connector.AttemptPostgresCancellation && - !connector.PostgresCancellationPerformed && + if (connector is { AttemptPostgresCancellation: true, PostgresCancellationPerformed: false } && connector.PerformPostgresCancellation()) { // Note that if the cancellation timeout is negative, we flow down and break the @@ -350,8 +349,7 @@ static async ValueTask EnsureLong( // If we should attempt PostgreSQL cancellation, do it the first time we get a timeout. // TODO: As an optimization, we can still attempt to send a cancellation request, but after // that immediately break the connection - if (connector.AttemptPostgresCancellation && - !connector.PostgresCancellationPerformed && + if (connector is { AttemptPostgresCancellation: true, PostgresCancellationPerformed: false } && connector.PerformPostgresCancellation()) { // Note that if the cancellation timeout is negative, we flow down and break the diff --git a/src/Npgsql/NpgsqlBatchCommand.cs b/src/Npgsql/NpgsqlBatchCommand.cs index 0321ba4369..f25e8937b4 100644 --- a/src/Npgsql/NpgsqlBatchCommand.cs +++ b/src/Npgsql/NpgsqlBatchCommand.cs @@ -183,7 +183,7 @@ internal RowDescriptionMessage? Description /// internal PreparedStatement? PreparedStatement { - get => _preparedStatement != null && _preparedStatement.State == PreparedState.Unprepared + get => _preparedStatement is { State: PreparedState.Unprepared } ? _preparedStatement = null : _preparedStatement; set => _preparedStatement = value; diff --git a/src/Npgsql/NpgsqlCommand.cs b/src/Npgsql/NpgsqlCommand.cs index 9719485bd4..c53c0e5468 100644 --- a/src/Npgsql/NpgsqlCommand.cs +++ b/src/Npgsql/NpgsqlCommand.cs @@ -1031,7 +1031,7 @@ internal void ProcessRawQuery(SqlQueryParser? parser, bool standardConformingStr static void ValidateParameterCount(NpgsqlBatchCommand batchCommand) { - if (batchCommand.HasParameters && batchCommand.PositionalParameters.Count > ushort.MaxValue) + if (batchCommand is { HasParameters: true, PositionalParameters.Count: > ushort.MaxValue }) ThrowHelper.ThrowNpgsqlException("A statement cannot have more than 65535 parameters"); } } diff --git a/src/Npgsql/NpgsqlRawCopyStream.cs b/src/Npgsql/NpgsqlRawCopyStream.cs index 689e11ba5b..d7b818679a 100644 --- a/src/Npgsql/NpgsqlRawCopyStream.cs +++ b/src/Npgsql/NpgsqlRawCopyStream.cs @@ -383,7 +383,7 @@ async ValueTask DisposeAsync(bool disposing, bool async) } _connector.SkipUntil(BackendMessageCode.ReadyForQuery); } - catch (OperationCanceledException e) when (e.InnerException is PostgresException pg && pg.SqlState == PostgresErrorCodes.QueryCanceled) + catch (OperationCanceledException e) when (e.InnerException is PostgresException { SqlState: PostgresErrorCodes.QueryCanceled }) { LogMessages.CopyOperationCancelled(_copyLogger, _connector.Id); } diff --git a/src/Npgsql/NpgsqlTypes/NpgsqlTsVector.cs b/src/Npgsql/NpgsqlTypes/NpgsqlTsVector.cs index dca621190d..2cf1bcb3f7 100644 --- a/src/Npgsql/NpgsqlTypes/NpgsqlTsVector.cs +++ b/src/Npgsql/NpgsqlTypes/NpgsqlTsVector.cs @@ -189,7 +189,7 @@ public static NpgsqlTsVector Parse(string value) if (value[pos] >= 'B' && value[pos] <= 'D' || value[pos] >= 'b' && value[pos] <= 'd') { var weight = value[pos]; - if (weight >= 'b' && weight <= 'd') + if (weight is >= 'b' and <= 'd') weight = (char)(weight - ('b' - 'B')); wordEntryPositions.Add(new Lexeme.WordEntryPos(wordPos, Lexeme.Weight.D + ('D' - weight))); pos++; diff --git a/test/Npgsql.Tests/CommandTests.cs b/test/Npgsql.Tests/CommandTests.cs index f43550320a..c0ec6e45b9 100644 --- a/test/Npgsql.Tests/CommandTests.cs +++ b/test/Npgsql.Tests/CommandTests.cs @@ -243,7 +243,7 @@ public async Task Timeout_from_connection_string() public async Task Timeout_switch_connection() { var csb = new NpgsqlConnectionStringBuilder(ConnectionString); - if (csb.CommandTimeout >= 100 && csb.CommandTimeout < 105) + if (csb.CommandTimeout is >= 100 and < 105) IgnoreExceptOnBuildServer("Bad default command timeout"); await using var dataSource1 = CreateDataSource(ConnectionString + ";CommandTimeout=100"); diff --git a/test/Npgsql.Tests/ConnectionTests.cs b/test/Npgsql.Tests/ConnectionTests.cs index 424fc38830..7dd4f9e82b 100644 --- a/test/Npgsql.Tests/ConnectionTests.cs +++ b/test/Npgsql.Tests/ConnectionTests.cs @@ -32,12 +32,10 @@ public async Task Basic_lifecycle() conn.StateChange += (s, e) => { - if (e.OriginalState == ConnectionState.Closed && - e.CurrentState == ConnectionState.Open) + if (e is { OriginalState: ConnectionState.Closed, CurrentState: ConnectionState.Open }) eventOpen = true; - if (e.OriginalState == ConnectionState.Open && - e.CurrentState == ConnectionState.Closed) + if (e is { OriginalState: ConnectionState.Open, CurrentState: ConnectionState.Closed }) eventClosed = true; }; @@ -83,12 +81,10 @@ public async Task Broken_lifecycle([Values] bool openFromClose) conn.StateChange += (s, e) => { - if (e.OriginalState == ConnectionState.Closed && - e.CurrentState == ConnectionState.Open) + if (e is { OriginalState: ConnectionState.Closed, CurrentState: ConnectionState.Open }) eventOpen = true; - if (e.OriginalState == ConnectionState.Open && - e.CurrentState == ConnectionState.Closed) + if (e is { OriginalState: ConnectionState.Open, CurrentState: ConnectionState.Closed }) eventClosed = true; }; From d2ef02f720e4dd0867649c72bd92723a994e7067 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Wed, 30 Oct 2024 16:30:54 +0100 Subject: [PATCH 479/761] Various additional small cleanup --- src/Npgsql/Internal/NpgsqlConnector.Auth.cs | 2 +- src/Npgsql/Internal/NpgsqlConnector.cs | 2 +- src/Npgsql/NpgsqlDataReader.cs | 2 +- src/Npgsql/NpgsqlDataSource.cs | 2 +- src/Npgsql/NpgsqlTypes/NpgsqlRange.cs | 4 ++-- test/Npgsql.Tests/DataAdapterTests.cs | 4 ++-- test/Npgsql.Tests/ReaderTests.cs | 4 ++-- 7 files changed, 10 insertions(+), 10 deletions(-) diff --git a/src/Npgsql/Internal/NpgsqlConnector.Auth.cs b/src/Npgsql/Internal/NpgsqlConnector.Auth.cs index fad990158f..59564f5361 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.Auth.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.Auth.cs @@ -322,7 +322,7 @@ async Task AuthenticateMD5(string username, byte[] salt, bool async, Cancellatio var resultString = sb.ToString(); result = new byte[Encoding.UTF8.GetByteCount(resultString) + 1]; Encoding.UTF8.GetBytes(resultString, 0, resultString.Length, result, 0); - result[result.Length - 1] = 0; + result[^1] = 0; } await WritePassword(result, async, cancellationToken).ConfigureAwait(false); diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index 53438b7bd0..8ef246448c 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -2772,7 +2772,7 @@ void ReadParameterStatus(ReadOnlySpan incomingName, ReadOnlySpan inc for (var i = 0; i < _rawParameters.Count; i++) { - (var currentName, var currentValue) = _rawParameters[i]; + var (currentName, currentValue) = _rawParameters[i]; if (incomingName.SequenceEqual(currentName)) { if (incomingValue.SequenceEqual(currentValue)) diff --git a/src/Npgsql/NpgsqlDataReader.cs b/src/Npgsql/NpgsqlDataReader.cs index 37eeb3eda8..7446889c05 100644 --- a/src/Npgsql/NpgsqlDataReader.cs +++ b/src/Npgsql/NpgsqlDataReader.cs @@ -502,7 +502,7 @@ async Task NextResult(bool async, bool isConsuming = false, CancellationTo } // There are no more queries, we're done. Read the RFQ. - if (_statements.Count is 0 || !(_statements[_statements.Count - 1].AppendErrorBarrier ?? Command.EnableErrorBarriers)) + if (_statements.Count is 0 || !(_statements[^1].AppendErrorBarrier ?? Command.EnableErrorBarriers)) Expect(await Connector.ReadMessage(async).ConfigureAwait(false), Connector); State = ReaderState.Consumed; diff --git a/src/Npgsql/NpgsqlDataSource.cs b/src/Npgsql/NpgsqlDataSource.cs index acdaf5f56f..2b7f4e5420 100644 --- a/src/Npgsql/NpgsqlDataSource.cs +++ b/src/Npgsql/NpgsqlDataSource.cs @@ -447,7 +447,7 @@ internal virtual bool TryRentEnlistedPending(Transaction transaction, NpgsqlConn connector = null; return false; } - connector = list[list.Count - 1]; + connector = list[^1]; list.RemoveAt(list.Count - 1); if (list.Count == 0) _pendingEnlistedConnectors.Remove(transaction); diff --git a/src/Npgsql/NpgsqlTypes/NpgsqlRange.cs b/src/Npgsql/NpgsqlTypes/NpgsqlRange.cs index c260202ce9..b447cb5df7 100644 --- a/src/Npgsql/NpgsqlTypes/NpgsqlRange.cs +++ b/src/Npgsql/NpgsqlTypes/NpgsqlRange.cs @@ -395,8 +395,8 @@ public static NpgsqlRange Parse(string value) if (!lowerInclusive && !lowerExclusive) throw new FormatException("Malformed range literal. Missing left parenthesis or bracket."); - var upperInclusive = value[value.Length - 1] == UpperInclusiveBound; - var upperExclusive = value[value.Length - 1] == UpperExclusiveBound; + var upperInclusive = value[^1] == UpperInclusiveBound; + var upperExclusive = value[^1] == UpperExclusiveBound; if (!upperInclusive && !upperExclusive) throw new FormatException("Malformed range literal. Missing right parenthesis or bracket."); diff --git a/test/Npgsql.Tests/DataAdapterTests.cs b/test/Npgsql.Tests/DataAdapterTests.cs index 4b413409d7..016e01b6b3 100644 --- a/test/Npgsql.Tests/DataAdapterTests.cs +++ b/test/Npgsql.Tests/DataAdapterTests.cs @@ -304,7 +304,7 @@ public async Task Update_letting_null_field_falue() var dt = ds.Tables[0]; Assert.IsNotNull(dt); - var dr = ds.Tables[0].Rows[ds.Tables[0].Rows.Count - 1]; + var dr = ds.Tables[0].Rows[^1]; dr["field_int2"] = 4; var ds2 = ds.GetChanges()!; @@ -350,7 +350,7 @@ public async Task DoUpdateWithDataSet() var dt = ds.Tables[0]; Assert.IsNotNull(dt); - var dr = ds.Tables[0].Rows[ds.Tables[0].Rows.Count - 1]; + var dr = ds.Tables[0].Rows[^1]; dr["field_int2"] = 4; diff --git a/test/Npgsql.Tests/ReaderTests.cs b/test/Npgsql.Tests/ReaderTests.cs index 4d6ceab478..7ee1aa6e11 100644 --- a/test/Npgsql.Tests/ReaderTests.cs +++ b/test/Npgsql.Tests/ReaderTests.cs @@ -1400,7 +1400,7 @@ public async Task GetBytes() reader.GetBytes(4, 0, actual, 0, 2); Assert.That(reader.GetBytes(4, expected.Length - 1, actual, 0, 2), Is.EqualTo(1), "Length greater than data length"); - Assert.That(actual[0], Is.EqualTo(expected[expected.Length - 1]), "Length greater than data length"); + Assert.That(actual[0], Is.EqualTo(expected[^1]), "Length greater than data length"); Assert.That(() => reader.GetBytes(4, 0, actual, 0, actual.Length + 1), Throws.Exception.TypeOf(), "Length great than output buffer length"); // Close in the middle of a column @@ -1662,7 +1662,7 @@ public async Task GetChars() // Jump to another column from the middle of the column reader.GetChars(5, 0, actual, 0, 2); Assert.That(reader.GetChars(5, expected.Length - 1, actual, 0, 2), Is.EqualTo(1), "Length greater than data length"); - Assert.That(actual[0], Is.EqualTo(expected[expected.Length - 1]), "Length greater than data length"); + Assert.That(actual[0], Is.EqualTo(expected[^1]), "Length greater than data length"); Assert.That(() => reader.GetChars(5, 0, actual, 0, actual.Length + 1), Throws.Exception.TypeOf(), "Length great than output buffer length"); // Close in the middle of a column reader.GetChars(6, 0, actual, 0, 2); From 441c1154ee553b649b2579dd9981dc5e352d8623 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Wed, 30 Oct 2024 19:18:19 +0100 Subject: [PATCH 480/761] Remove explicit this --- src/Npgsql/Shims/ExperimentalAttribute.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Npgsql/Shims/ExperimentalAttribute.cs b/src/Npgsql/Shims/ExperimentalAttribute.cs index 36ff9ee11d..ad6dfbf58c 100644 --- a/src/Npgsql/Shims/ExperimentalAttribute.cs +++ b/src/Npgsql/Shims/ExperimentalAttribute.cs @@ -7,7 +7,7 @@ public sealed class ExperimentalAttribute : Attribute { /// Initializes a new instance of the class, specifying the ID that the compiler will use when reporting a use of the API the attribute applies to. /// The ID that the compiler will use when reporting a use of the API the attribute applies to. - public ExperimentalAttribute(string diagnosticId) => this.DiagnosticId = diagnosticId; + public ExperimentalAttribute(string diagnosticId) => DiagnosticId = diagnosticId; /// Gets the ID that the compiler will use when reporting a use of the API the attribute applies to. /// The unique diagnostic ID. From 91086e54deb970d828b7518e80b918b80f94323e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bal=C3=A1zs=20M=C3=A9sz=C3=A9get=C5=91?= Date: Fri, 8 Nov 2024 20:52:15 +0100 Subject: [PATCH 481/761] Improve parameter logging: show actual values of array-typed parameters, fixes #5342 (#5673) inspired by: LogValuesFormatter from Microsoft.Extensions.Logging.Abstractions Co-authored-by: Shay Rojansky --- src/Npgsql/NpgsqlCommand.cs | 51 ++++- test/Npgsql.Tests/BatchTests.cs | 94 ---------- test/Npgsql.Tests/CommandTests.cs | 176 ----------------- test/Npgsql.Tests/LoggingTests.cs | 302 ++++++++++++++++++++++++++++++ 4 files changed, 347 insertions(+), 276 deletions(-) create mode 100644 test/Npgsql.Tests/LoggingTests.cs diff --git a/src/Npgsql/NpgsqlCommand.cs b/src/Npgsql/NpgsqlCommand.cs index c53c0e5468..8116a73c7f 100644 --- a/src/Npgsql/NpgsqlCommand.cs +++ b/src/Npgsql/NpgsqlCommand.cs @@ -17,6 +17,7 @@ using Microsoft.Extensions.Logging; using Npgsql.Internal; using Npgsql.Properties; +using System.Collections; namespace Npgsql; @@ -1812,7 +1813,7 @@ internal void LogExecutingCompleted(NpgsqlConnector connector, bool executing) LogMessages.ExecutingCommandWithParameters( logger, singleCommand.FinalCommandText!, - ParametersDbNullAsString(singleCommand), + GetParametersForLogging(singleCommand), connector.Id); } else @@ -1820,7 +1821,7 @@ internal void LogExecutingCompleted(NpgsqlConnector connector, bool executing) LogMessages.CommandExecutionCompletedWithParameters( logger, singleCommand.FinalCommandText!, - ParametersDbNullAsString(singleCommand), + GetParametersForLogging(singleCommand), connector.QueryLogStopWatch.ElapsedMilliseconds, connector.Id); } @@ -1839,7 +1840,7 @@ internal void LogExecutingCompleted(NpgsqlConnector connector, bool executing) { var commands = new (string, object[])[InternalBatchCommands.Count]; for (var i = 0; i < InternalBatchCommands.Count; i++) - commands[i] = (InternalBatchCommands[i].FinalCommandText!, ParametersDbNullAsString(InternalBatchCommands[i])); + commands[i] = (InternalBatchCommands[i].FinalCommandText!, GetParametersForLogging(InternalBatchCommands[i])); if (executing) LogMessages.ExecutingBatchWithParameters(logger, commands, connector.Id); @@ -1858,15 +1859,53 @@ internal void LogExecutingCompleted(NpgsqlConnector connector, bool executing) } } - object[] ParametersDbNullAsString(NpgsqlBatchCommand c) + static object[] GetParametersForLogging(NpgsqlBatchCommand c) { var positionalParameters = c.CurrentParametersReadOnly; var parameters = new object[positionalParameters.Count]; for (var i = 0; i < positionalParameters.Count; i++) - parameters[i] = positionalParameters[i].Value == DBNull.Value ? "NULL" : positionalParameters[i].Value!; + { + parameters[i] = GetParameterForLogging(positionalParameters[i].Value); + } return parameters; + + object GetParameterForLogging(object? value) + { + return value switch + { + DBNull or null => "NULL", + IEnumerable enumerable and not string => GetEnumerableForLogging(enumerable), + _ => value + }; + + string GetEnumerableForLogging(IEnumerable enumerable) + { + var vsb = new StringBuilder(256); + var count = 0; + vsb.Append('['); + foreach (var e in enumerable) + { + if (count > 9) + { + vsb.Append(", ..."); + break; + } + + if (count > 0) + { + vsb.Append(", "); + } + + vsb.Append(GetParameterForLogging(e)); + count++; + } + + vsb.Append(']'); + return vsb.ToString(); + } + } } - } + } /// /// Create a new command based on this one. diff --git a/test/Npgsql.Tests/BatchTests.cs b/test/Npgsql.Tests/BatchTests.cs index 5fddf0a316..4450635edb 100644 --- a/test/Npgsql.Tests/BatchTests.cs +++ b/test/Npgsql.Tests/BatchTests.cs @@ -797,100 +797,6 @@ public async Task Batch_dispose_reuse() #endregion Miscellaneous - #region Logging - - [Test] - public async Task Log_ExecuteScalar_single_statement_without_parameters() - { - await using var dataSource = CreateLoggingDataSource(out var listLoggerProvider); - await using var conn = await dataSource.OpenConnectionAsync(); - await using var cmd = new NpgsqlBatch(conn) - { - BatchCommands = { new("SELECT 1") } - }; - - using (listLoggerProvider.Record()) - { - await cmd.ExecuteScalarAsync(); - } - - var executingCommandEvent = listLoggerProvider.Log.Single(l => l.Id == NpgsqlEventId.CommandExecutionCompleted); - - Assert.That(executingCommandEvent.Message, Does.Contain("Command execution completed").And.Contains("SELECT 1")); - AssertLoggingStateContains(executingCommandEvent, "CommandText", "SELECT 1"); - AssertLoggingStateDoesNotContain(executingCommandEvent, "Parameters"); - - if (!IsMultiplexing) - AssertLoggingStateContains(executingCommandEvent, "ConnectorId", conn.ProcessID); - } - - [Test] - public async Task Log_ExecuteScalar_multiple_statements_with_parameters() - { - await using var dataSource = CreateLoggingDataSource(out var listLoggerProvider); - await using var conn = await dataSource.OpenConnectionAsync(); - await using var batch = new NpgsqlBatch(conn) - { - BatchCommands = - { - new("SELECT $1") { Parameters = { new() { Value = 8 } } }, - new("SELECT $1, 9") { Parameters = { new() { Value = 9 } } } - } - }; - - using (listLoggerProvider.Record()) - { - await batch.ExecuteScalarAsync(); - } - - var executingCommandEvent = listLoggerProvider.Log.Single(l => l.Id == NpgsqlEventId.CommandExecutionCompleted); - - // Note: the message formatter of Microsoft.Extensions.Logging doesn't seem to handle arrays inside tuples, so we get the - // following ugliness (https://github.com/dotnet/runtime/issues/63165). Serilog handles this fine. - Assert.That(executingCommandEvent.Message, Does.Contain("Batch execution completed").And.Contains("[(SELECT $1, System.Object[]), (SELECT $1, 9, System.Object[])]")); - AssertLoggingStateDoesNotContain(executingCommandEvent, "CommandText"); - AssertLoggingStateDoesNotContain(executingCommandEvent, "Parameters"); - - if (!IsMultiplexing) - AssertLoggingStateContains(executingCommandEvent, "ConnectorId", conn.ProcessID); - - var batchCommands = (IList<(string CommandText, object[] Parameters)>)AssertLoggingStateContains(executingCommandEvent, "BatchCommands"); - Assert.That(batchCommands.Count, Is.EqualTo(2)); - Assert.That(batchCommands[0].CommandText, Is.EqualTo("SELECT $1")); - Assert.That(batchCommands[0].Parameters[0], Is.EqualTo(8)); - Assert.That(batchCommands[1].CommandText, Is.EqualTo("SELECT $1, 9")); - Assert.That(batchCommands[1].Parameters[0], Is.EqualTo(9)); - } - - [Test] - public async Task Log_ExecuteScalar_single_statement_with_parameter_logging_off() - { - await using var dataSource = CreateLoggingDataSource(out var listLoggerProvider, sensitiveDataLoggingEnabled: false); - await using var conn = await dataSource.OpenConnectionAsync(); - await using var batch = new NpgsqlBatch(conn) - { - BatchCommands = - { - new("SELECT $1") { Parameters = { new() { Value = 8 } } }, - new("SELECT $1, 9") { Parameters = { new() { Value = 9 } } } - } - }; - - using (listLoggerProvider.Record()) - { - await batch.ExecuteScalarAsync(); - } - - var executingCommandEvent = listLoggerProvider.Log.Single(l => l.Id == NpgsqlEventId.CommandExecutionCompleted); - Assert.That(executingCommandEvent.Message, Does.Contain("Batch execution completed").And.Contains("[SELECT $1, SELECT $1, 9]")); - var batchCommands = (IList)AssertLoggingStateContains(executingCommandEvent, "BatchCommands"); - Assert.That(batchCommands.Count, Is.EqualTo(2)); - Assert.That(batchCommands[0], Is.EqualTo("SELECT $1")); - Assert.That(batchCommands[1], Is.EqualTo("SELECT $1, 9")); - } - - #endregion Logging - #region Initialization / setup / teardown // ReSharper disable InconsistentNaming diff --git a/test/Npgsql.Tests/CommandTests.cs b/test/Npgsql.Tests/CommandTests.cs index c0ec6e45b9..f159d7d97c 100644 --- a/test/Npgsql.Tests/CommandTests.cs +++ b/test/Npgsql.Tests/CommandTests.cs @@ -5,7 +5,6 @@ using NUnit.Framework; using System; using System.Buffers.Binary; -using System.Collections.Generic; using System.Data; using System.Linq; using System.Text; @@ -1640,179 +1639,4 @@ await server Assert.That(connection.PostgresParameters, Contains.Key("SomeKey").WithValue("SomeValue")); } - - #region Logging - - [Test] - public async Task Log_ExecuteScalar_single_statement_without_parameters() - { - await using var dataSource = CreateLoggingDataSource(out var listLoggerProvider); - await using var conn = await dataSource.OpenConnectionAsync(); - await using var cmd = new NpgsqlCommand("SELECT 1", conn); - - using (listLoggerProvider.Record()) - { - await cmd.ExecuteScalarAsync(); - } - - var executingCommandEvent = listLoggerProvider.Log.Single(l => l.Id == NpgsqlEventId.CommandExecutionCompleted); - Assert.That(executingCommandEvent.Message, Does.Contain("Command execution completed").And.Contains("SELECT 1")); - AssertLoggingStateContains(executingCommandEvent, "CommandText", "SELECT 1"); - AssertLoggingStateDoesNotContain(executingCommandEvent, "Parameters"); - - if (!IsMultiplexing) - AssertLoggingStateContains(executingCommandEvent, "ConnectorId", conn.ProcessID); - } - - [Test] - public async Task Log_ExecuteScalar_single_statement_with_positional_parameters() - { - await using var dataSource = CreateLoggingDataSource(out var listLoggerProvider); - await using var conn = await dataSource.OpenConnectionAsync(); - await using var cmd = new NpgsqlCommand("SELECT $1, $2", conn); - cmd.Parameters.Add(new() { Value = 8 }); - cmd.Parameters.Add(new() { NpgsqlDbType = NpgsqlDbType.Integer, Value = DBNull.Value }); - - using (listLoggerProvider.Record()) - { - await cmd.ExecuteScalarAsync(); - } - - var executingCommandEvent = listLoggerProvider.Log.Single(l => l.Id == NpgsqlEventId.CommandExecutionCompleted); - Assert.That(executingCommandEvent.Message, Does.Contain("Command execution completed") - .And.Contains("SELECT $1, $2") - .And.Contains("Parameters: [8, NULL]")); - AssertLoggingStateContains(executingCommandEvent, "CommandText", "SELECT $1, $2"); - AssertLoggingStateContains(executingCommandEvent, "Parameters", new object[] { 8, "NULL" }); - - if (!IsMultiplexing) - AssertLoggingStateContains(executingCommandEvent, "ConnectorId", conn.ProcessID); - } - - [Test] - public async Task Log_ExecuteScalar_single_statement_with_named_parameters() - { - await using var dataSource = CreateLoggingDataSource(out var listLoggerProvider); - await using var conn = await dataSource.OpenConnectionAsync(); - await using var cmd = new NpgsqlCommand("SELECT @p1, @p2", conn); - cmd.Parameters.Add(new() { ParameterName = "p1", Value = 8 }); - cmd.Parameters.Add(new() { ParameterName = "p2", NpgsqlDbType = NpgsqlDbType.Integer, Value = DBNull.Value }); - - using (listLoggerProvider.Record()) - { - await cmd.ExecuteScalarAsync(); - } - - var executingCommandEvent = listLoggerProvider.Log.Single(l => l.Id == NpgsqlEventId.CommandExecutionCompleted); - Assert.That(executingCommandEvent.Message, Does.Contain("Command execution completed") - .And.Contains("SELECT $1, $2") - .And.Contains("Parameters: [8, NULL]")); - AssertLoggingStateContains(executingCommandEvent, "CommandText", "SELECT $1, $2"); - AssertLoggingStateContains(executingCommandEvent, "Parameters", new object[] { 8, "NULL" }); - - if (!IsMultiplexing) - AssertLoggingStateContains(executingCommandEvent, "ConnectorId", conn.ProcessID); - } - - [Test] - public async Task Log_ExecuteScalar_single_statement_with_parameter_logging_off() - { - await using var dataSource = CreateLoggingDataSource(out var listLoggerProvider, sensitiveDataLoggingEnabled: false); - await using var conn = await dataSource.OpenConnectionAsync(); - await using var cmd = new NpgsqlCommand("SELECT $1, $2", conn); - cmd.Parameters.Add(new() { Value = 8 }); - cmd.Parameters.Add(new() { Value = 9 }); - - using (listLoggerProvider.Record()) - { - await cmd.ExecuteScalarAsync(); - } - - var executingCommandEvent = listLoggerProvider.Log.Single(l => l.Id == NpgsqlEventId.CommandExecutionCompleted); - Assert.That(executingCommandEvent.Message, Does.Contain("Command execution completed").And.Contains($"SELECT $1, $2")); - AssertLoggingStateContains(executingCommandEvent, "CommandText", "SELECT $1, $2"); - AssertLoggingStateDoesNotContain(executingCommandEvent, "Parameters"); - } - - [Test] - public async Task Log_ExecuteScalar_multiple_statement_without_parameters() - { - await using var dataSource = CreateLoggingDataSource(out var listLoggerProvider); - await using var conn = await dataSource.OpenConnectionAsync(); - await using var cmd = new NpgsqlCommand("SELECT 1; SELECT 2", conn); - - using (listLoggerProvider.Record()) - { - await cmd.ExecuteScalarAsync(); - } - - var executingCommandEvent = listLoggerProvider.Log.Single(l => l.Id == NpgsqlEventId.CommandExecutionCompleted); - Assert.That(executingCommandEvent.Message, Does.Contain("Batch execution completed").And.Contains("[(SELECT 1, System.Object[]), (SELECT 2, System.Object[])]")); - var batchCommands = (IList<(string CommandText, object[] Parameters)>)AssertLoggingStateContains(executingCommandEvent, "BatchCommands"); - Assert.That(batchCommands.Count, Is.EqualTo(2)); - Assert.That(batchCommands[0].CommandText, Is.EqualTo("SELECT 1")); - Assert.That(batchCommands[0].Parameters, Is.Empty); - Assert.That(batchCommands[1].CommandText, Is.EqualTo("SELECT 2")); - Assert.That(batchCommands[1].Parameters, Is.Empty); - AssertLoggingStateDoesNotContain(executingCommandEvent, "Parameters"); - - if (!IsMultiplexing) - AssertLoggingStateContains(executingCommandEvent, "ConnectorId", conn.ProcessID); - } - - [Test] - public async Task Log_ExecuteScalar_multiple_statement_with_parameters() - { - await using var dataSource = CreateLoggingDataSource(out var listLoggerProvider); - await using var conn = await dataSource.OpenConnectionAsync(); - await using var cmd = new NpgsqlCommand("SELECT @p1; SELECT @p2", conn); - cmd.Parameters.Add(new() { ParameterName = "p1", Value = 8 }); - cmd.Parameters.Add(new() { ParameterName = "p2", Value = 9 }); - - using (listLoggerProvider.Record()) - { - await cmd.ExecuteScalarAsync(); - } - - var executingCommandEvent = listLoggerProvider.Log.Single(l => l.Id == NpgsqlEventId.CommandExecutionCompleted); - Assert.That(executingCommandEvent.Message, Does.Contain("Batch execution completed").And.Contains("[(SELECT $1, System.Object[]), (SELECT $1, System.Object[])]")); - var batchCommands = (IList<(string CommandText, object[] Parameters)>)AssertLoggingStateContains(executingCommandEvent, "BatchCommands"); - Assert.That(batchCommands.Count, Is.EqualTo(2)); - Assert.That(batchCommands[0].CommandText, Is.EqualTo("SELECT $1")); - Assert.That(batchCommands[0].Parameters[0], Is.EqualTo(8)); - Assert.That(batchCommands[1].CommandText, Is.EqualTo("SELECT $1")); - Assert.That(batchCommands[1].Parameters[0], Is.EqualTo(9)); - AssertLoggingStateDoesNotContain(executingCommandEvent, "Parameters"); - - if (!IsMultiplexing) - AssertLoggingStateContains(executingCommandEvent, "ConnectorId", conn.ProcessID); - } - - [Test] - public async Task Log_ExecuteScalar_multiple_statement_with_parameter_logging_off() - { - await using var dataSource = CreateLoggingDataSource(out var listLoggerProvider, sensitiveDataLoggingEnabled: false); - await using var conn = await dataSource.OpenConnectionAsync(); - await using var cmd = new NpgsqlCommand("SELECT @p1; SELECT @p2", conn); - cmd.Parameters.Add(new() { ParameterName = "p1", Value = 8 }); - cmd.Parameters.Add(new() { ParameterName = "p2", Value = 9 }); - - using (listLoggerProvider.Record()) - { - await cmd.ExecuteScalarAsync(); - } - - var executingCommandEvent = listLoggerProvider.Log.Single(l => l.Id == NpgsqlEventId.CommandExecutionCompleted); - Assert.That(executingCommandEvent.Message, Does.Contain("Batch execution completed").And.Contains("[SELECT $1, SELECT $1]")); - var batchCommands = (IList)AssertLoggingStateContains(executingCommandEvent, "BatchCommands"); - Assert.That(batchCommands.Count, Is.EqualTo(2)); - Assert.That(batchCommands[0], Is.EqualTo("SELECT $1")); - Assert.That(batchCommands[1], Is.EqualTo("SELECT $1")); - AssertLoggingStateDoesNotContain(executingCommandEvent, "Parameters"); - - if (!IsMultiplexing) - AssertLoggingStateContains(executingCommandEvent, "ConnectorId", conn.ProcessID); - } - - #endregion Logging } diff --git a/test/Npgsql.Tests/LoggingTests.cs b/test/Npgsql.Tests/LoggingTests.cs new file mode 100644 index 0000000000..b9a566b6a8 --- /dev/null +++ b/test/Npgsql.Tests/LoggingTests.cs @@ -0,0 +1,302 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using System.Threading.Tasks; +using NpgsqlTypes; +using NUnit.Framework; +using static Npgsql.Tests.TestUtil; + +namespace Npgsql.Tests; + +public class LoggingTests(MultiplexingMode multiplexingMode) : MultiplexingTestBase(multiplexingMode) +{ + [Test] + public async Task Command_ExecuteScalar_single_statement_without_parameters() + { + await using var dataSource = CreateLoggingDataSource(out var listLoggerProvider); + await using var conn = await dataSource.OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT 1", conn); + + using (listLoggerProvider.Record()) + { + await cmd.ExecuteScalarAsync(); + } + + var executingCommandEvent = listLoggerProvider.Log.Single(l => l.Id == NpgsqlEventId.CommandExecutionCompleted); + Assert.That(executingCommandEvent.Message, Does.Contain("Command execution completed").And.Contains("SELECT 1")); + AssertLoggingStateContains(executingCommandEvent, "CommandText", "SELECT 1"); + AssertLoggingStateDoesNotContain(executingCommandEvent, "Parameters"); + + if (!IsMultiplexing) + AssertLoggingStateContains(executingCommandEvent, "ConnectorId", conn.ProcessID); + } + + [Test] + public async Task Command_ExecuteScalar_single_statement_with_positional_parameters() + { + await using var dataSource = CreateLoggingDataSource(out var listLoggerProvider); + await using var conn = await dataSource.OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT $1, $2", conn); + cmd.Parameters.Add(new() { Value = 8 }); + cmd.Parameters.Add(new() { NpgsqlDbType = NpgsqlDbType.Integer, Value = DBNull.Value }); + + using (listLoggerProvider.Record()) + { + await cmd.ExecuteScalarAsync(); + } + + var executingCommandEvent = listLoggerProvider.Log.Single(l => l.Id == NpgsqlEventId.CommandExecutionCompleted); + Assert.That(executingCommandEvent.Message, Does.Contain("Command execution completed") + .And.Contains("SELECT $1, $2") + .And.Contains("Parameters: [8, NULL]")); + AssertLoggingStateContains(executingCommandEvent, "CommandText", "SELECT $1, $2"); + AssertLoggingStateContains(executingCommandEvent, "Parameters", new object[] { 8, "NULL" }); + + if (!IsMultiplexing) + AssertLoggingStateContains(executingCommandEvent, "ConnectorId", conn.ProcessID); + } + + [Test] + public async Task Command_ExecuteScalar_single_statement__Should_unwrap_array_and_truncate_and_write_nulls() + { + await using var dataSource = CreateLoggingDataSource(out var listLoggerProvider); + await using var conn = await dataSource.OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT $1, $2, $3, $4, $5, $6", conn); + cmd.Parameters.Add(new NpgsqlParameter { TypedValue = 1024 }); + cmd.Parameters.Add(new NpgsqlParameter { TypedValue = [1, 2, 3], NpgsqlDbType = NpgsqlDbType.Array | NpgsqlDbType.Integer }); + cmd.Parameters.Add(new NpgsqlParameter { TypedValue = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], NpgsqlDbType = NpgsqlDbType.Array | NpgsqlDbType.Integer }); + cmd.Parameters.Add(new NpgsqlParameter { TypedValue = [1, null], NpgsqlDbType = NpgsqlDbType.Array | NpgsqlDbType.Integer }); + cmd.Parameters.Add(new NpgsqlParameter { TypedValue = null }); + cmd.Parameters.Add(new() { NpgsqlDbType = NpgsqlDbType.Integer, Value = DBNull.Value }); + + using (listLoggerProvider.Record()) + { + await cmd.ExecuteScalarAsync(); + } + + var executingCommandEvent = listLoggerProvider.Log.Single(l => l.Id == NpgsqlEventId.CommandExecutionCompleted); + Assert.That(executingCommandEvent.Message, Does.Contain("Command execution completed") + .And.Contains("SELECT $1, $2, $3, $4, $5, $6") + .And.Contains("Parameters: [1024, [1, 2, 3], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, ...], [1, NULL], NULL, NULL]")); + AssertLoggingStateContains(executingCommandEvent, "CommandText", "SELECT $1, $2, $3, $4, $5, $6"); + AssertLoggingStateContains(executingCommandEvent, "Parameters", new object[] { 1024, "[1, 2, 3]", "[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, ...]", "[1, NULL]", "NULL", "NULL" }); + + if (!IsMultiplexing) + AssertLoggingStateContains(executingCommandEvent, "ConnectorId", conn.ProcessID); + } + + [Test] + public async Task Command_ExecuteScalar_single_statement_with_named_parameters() + { + await using var dataSource = CreateLoggingDataSource(out var listLoggerProvider); + await using var conn = await dataSource.OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT @p1, @p2", conn); + cmd.Parameters.Add(new() { ParameterName = "p1", Value = 8 }); + cmd.Parameters.Add(new() { ParameterName = "p2", NpgsqlDbType = NpgsqlDbType.Integer, Value = DBNull.Value }); + + using (listLoggerProvider.Record()) + { + await cmd.ExecuteScalarAsync(); + } + + var executingCommandEvent = listLoggerProvider.Log.Single(l => l.Id == NpgsqlEventId.CommandExecutionCompleted); + Assert.That(executingCommandEvent.Message, Does.Contain("Command execution completed") + .And.Contains("SELECT $1, $2") + .And.Contains("Parameters: [8, NULL]")); + AssertLoggingStateContains(executingCommandEvent, "CommandText", "SELECT $1, $2"); + AssertLoggingStateContains(executingCommandEvent, "Parameters", new object[] { 8, "NULL" }); + + if (!IsMultiplexing) + AssertLoggingStateContains(executingCommandEvent, "ConnectorId", conn.ProcessID); + } + + [Test] + public async Task Command_ExecuteScalar_single_statement_with_parameter_logging_off() + { + await using var dataSource = CreateLoggingDataSource(out var listLoggerProvider, sensitiveDataLoggingEnabled: false); + await using var conn = await dataSource.OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT $1, $2", conn); + cmd.Parameters.Add(new() { Value = 8 }); + cmd.Parameters.Add(new() { Value = 9 }); + + using (listLoggerProvider.Record()) + { + await cmd.ExecuteScalarAsync(); + } + + var executingCommandEvent = listLoggerProvider.Log.Single(l => l.Id == NpgsqlEventId.CommandExecutionCompleted); + Assert.That(executingCommandEvent.Message, Does.Contain("Command execution completed").And.Contains($"SELECT $1, $2")); + AssertLoggingStateContains(executingCommandEvent, "CommandText", "SELECT $1, $2"); + AssertLoggingStateDoesNotContain(executingCommandEvent, "Parameters"); + } + + [Test] + public async Task Command_ExecuteScalar_multiple_statement_without_parameters() + { + await using var dataSource = CreateLoggingDataSource(out var listLoggerProvider); + await using var conn = await dataSource.OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT 1; SELECT 2", conn); + + using (listLoggerProvider.Record()) + { + await cmd.ExecuteScalarAsync(); + } + + var executingCommandEvent = listLoggerProvider.Log.Single(l => l.Id == NpgsqlEventId.CommandExecutionCompleted); + Assert.That(executingCommandEvent.Message, Does.Contain("Batch execution completed").And.Contains("[(SELECT 1, System.Object[]), (SELECT 2, System.Object[])]")); + var batchCommands = (IList<(string CommandText, object[] Parameters)>)AssertLoggingStateContains(executingCommandEvent, "BatchCommands"); + Assert.That(batchCommands.Count, Is.EqualTo(2)); + Assert.That(batchCommands[0].CommandText, Is.EqualTo("SELECT 1")); + Assert.That(batchCommands[0].Parameters, Is.Empty); + Assert.That(batchCommands[1].CommandText, Is.EqualTo("SELECT 2")); + Assert.That(batchCommands[1].Parameters, Is.Empty); + AssertLoggingStateDoesNotContain(executingCommandEvent, "Parameters"); + + if (!IsMultiplexing) + AssertLoggingStateContains(executingCommandEvent, "ConnectorId", conn.ProcessID); + } + + [Test] + public async Task Command_ExecuteScalar_multiple_statement_with_parameters() + { + await using var dataSource = CreateLoggingDataSource(out var listLoggerProvider); + await using var conn = await dataSource.OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT @p1; SELECT @p2", conn); + cmd.Parameters.Add(new() { ParameterName = "p1", Value = 8 }); + cmd.Parameters.Add(new() { ParameterName = "p2", Value = 9 }); + + using (listLoggerProvider.Record()) + { + await cmd.ExecuteScalarAsync(); + } + + var executingCommandEvent = listLoggerProvider.Log.Single(l => l.Id == NpgsqlEventId.CommandExecutionCompleted); + Assert.That(executingCommandEvent.Message, Does.Contain("Batch execution completed").And.Contains("[(SELECT $1, System.Object[]), (SELECT $1, System.Object[])]")); + var batchCommands = (IList<(string CommandText, object[] Parameters)>)AssertLoggingStateContains(executingCommandEvent, "BatchCommands"); + Assert.That(batchCommands.Count, Is.EqualTo(2)); + Assert.That(batchCommands[0].CommandText, Is.EqualTo("SELECT $1")); + Assert.That(batchCommands[0].Parameters[0], Is.EqualTo(8)); + Assert.That(batchCommands[1].CommandText, Is.EqualTo("SELECT $1")); + Assert.That(batchCommands[1].Parameters[0], Is.EqualTo(9)); + AssertLoggingStateDoesNotContain(executingCommandEvent, "Parameters"); + + if (!IsMultiplexing) + AssertLoggingStateContains(executingCommandEvent, "ConnectorId", conn.ProcessID); + } + + [Test] + public async Task Command_ExecuteScalar_multiple_statement_with_parameter_logging_off() + { + await using var dataSource = CreateLoggingDataSource(out var listLoggerProvider, sensitiveDataLoggingEnabled: false); + await using var conn = await dataSource.OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT @p1; SELECT @p2", conn); + cmd.Parameters.Add(new() { ParameterName = "p1", Value = 8 }); + cmd.Parameters.Add(new() { ParameterName = "p2", Value = 9 }); + + using (listLoggerProvider.Record()) + { + await cmd.ExecuteScalarAsync(); + } + + var executingCommandEvent = listLoggerProvider.Log.Single(l => l.Id == NpgsqlEventId.CommandExecutionCompleted); + Assert.That(executingCommandEvent.Message, Does.Contain("Batch execution completed").And.Contains("[SELECT $1, SELECT $1]")); + var batchCommands = (IList)AssertLoggingStateContains(executingCommandEvent, "BatchCommands"); + Assert.That(batchCommands.Count, Is.EqualTo(2)); + Assert.That(batchCommands[0], Is.EqualTo("SELECT $1")); + Assert.That(batchCommands[1], Is.EqualTo("SELECT $1")); + AssertLoggingStateDoesNotContain(executingCommandEvent, "Parameters"); + + if (!IsMultiplexing) + AssertLoggingStateContains(executingCommandEvent, "ConnectorId", conn.ProcessID); + } + + [Test] + public async Task Batch_ExecuteScalar_single_statement_without_parameters() + { + await using var dataSource = CreateLoggingDataSource(out var listLoggerProvider); + await using var conn = await dataSource.OpenConnectionAsync(); + await using var cmd = new NpgsqlBatch(conn) + { + BatchCommands = { new("SELECT 1") } + }; + + using (listLoggerProvider.Record()) + { + await cmd.ExecuteScalarAsync(); + } + + var executingCommandEvent = listLoggerProvider.Log.Single(l => l.Id == NpgsqlEventId.CommandExecutionCompleted); + + Assert.That(executingCommandEvent.Message, Does.Contain("Command execution completed").And.Contains("SELECT 1")); + AssertLoggingStateContains(executingCommandEvent, "CommandText", "SELECT 1"); + AssertLoggingStateDoesNotContain(executingCommandEvent, "Parameters"); + + if (!IsMultiplexing) + AssertLoggingStateContains(executingCommandEvent, "ConnectorId", conn.ProcessID); + } + + [Test] + public async Task Batch_ExecuteScalar_multiple_statements_with_parameters() + { + await using var dataSource = CreateLoggingDataSource(out var listLoggerProvider); + await using var conn = await dataSource.OpenConnectionAsync(); + await using var batch = new NpgsqlBatch(conn) + { + BatchCommands = + { + new("SELECT $1") { Parameters = { new() { Value = 8 } } }, + new("SELECT $1, 9") { Parameters = { new() { Value = 9 } } } + } + }; + + using (listLoggerProvider.Record()) + { + await batch.ExecuteScalarAsync(); + } + + var executingCommandEvent = listLoggerProvider.Log.Single(l => l.Id == NpgsqlEventId.CommandExecutionCompleted); + + // Note: the message formatter of Microsoft.Extensions.Logging doesn't seem to handle arrays inside tuples, so we get the + // following ugliness (https://github.com/dotnet/runtime/issues/63165). Serilog handles this fine. + Assert.That(executingCommandEvent.Message, Does.Contain("Batch execution completed").And.Contains("[(SELECT $1, System.Object[]), (SELECT $1, 9, System.Object[])]")); + AssertLoggingStateDoesNotContain(executingCommandEvent, "CommandText"); + AssertLoggingStateDoesNotContain(executingCommandEvent, "Parameters"); + + if (!IsMultiplexing) + AssertLoggingStateContains(executingCommandEvent, "ConnectorId", conn.ProcessID); + + var batchCommands = (IList<(string CommandText, object[] Parameters)>)AssertLoggingStateContains(executingCommandEvent, "BatchCommands"); + Assert.That(batchCommands.Count, Is.EqualTo(2)); + Assert.That(batchCommands[0].CommandText, Is.EqualTo("SELECT $1")); + Assert.That(batchCommands[0].Parameters[0], Is.EqualTo(8)); + Assert.That(batchCommands[1].CommandText, Is.EqualTo("SELECT $1, 9")); + Assert.That(batchCommands[1].Parameters[0], Is.EqualTo(9)); + } + + [Test] + public async Task Batch_ExecuteScalar_single_statement_with_parameter_logging_off() + { + await using var dataSource = CreateLoggingDataSource(out var listLoggerProvider, sensitiveDataLoggingEnabled: false); + await using var conn = await dataSource.OpenConnectionAsync(); + await using var batch = new NpgsqlBatch(conn) + { + BatchCommands = + { + new("SELECT $1") { Parameters = { new() { Value = 8 } } }, + new("SELECT $1, 9") { Parameters = { new() { Value = 9 } } } + } + }; + + using (listLoggerProvider.Record()) + { + await batch.ExecuteScalarAsync(); + } + + var executingCommandEvent = listLoggerProvider.Log.Single(l => l.Id == NpgsqlEventId.CommandExecutionCompleted); + Assert.That(executingCommandEvent.Message, Does.Contain("Batch execution completed").And.Contains("[SELECT $1, SELECT $1, 9]")); + var batchCommands = (IList)AssertLoggingStateContains(executingCommandEvent, "BatchCommands"); + Assert.That(batchCommands.Count, Is.EqualTo(2)); + Assert.That(batchCommands[0], Is.EqualTo("SELECT $1")); + Assert.That(batchCommands[1], Is.EqualTo("SELECT $1, 9")); + } +} From e038010ae55e23710056125e9c73f71d03fb5fcd Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Sat, 9 Nov 2024 00:07:42 +0300 Subject: [PATCH 482/761] Make the default value for json with Json.NET a string (#5914) Fixes #5913 --- src/Npgsql.Json.NET/Internal/JsonNetTypeInfoResolverFactory.cs | 2 +- .../Internal/ResolverFactories/JsonTypeInfoResolverFactory.cs | 3 +-- test/Npgsql.PluginTests/JsonNetTests.cs | 2 +- 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/src/Npgsql.Json.NET/Internal/JsonNetTypeInfoResolverFactory.cs b/src/Npgsql.Json.NET/Internal/JsonNetTypeInfoResolverFactory.cs index cf2be51cc4..be2e0a3ba7 100644 --- a/src/Npgsql.Json.NET/Internal/JsonNetTypeInfoResolverFactory.cs +++ b/src/Npgsql.Json.NET/Internal/JsonNetTypeInfoResolverFactory.cs @@ -26,7 +26,7 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings, { var jsonb = dataTypeName == "jsonb"; mappings.AddType(dataTypeName, (options, mapping, _) => - mapping.CreateInfo(options, new JsonNetJsonConverter(jsonb, options.TextEncoding, settings)), isDefault: true); + mapping.CreateInfo(options, new JsonNetJsonConverter(jsonb, options.TextEncoding, settings))); mappings.AddType(dataTypeName, (options, mapping, _) => mapping.CreateInfo(options, new JsonNetJsonConverter(jsonb, options.TextEncoding, settings))); mappings.AddType(dataTypeName, (options, mapping, _) => diff --git a/src/Npgsql/Internal/ResolverFactories/JsonTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/JsonTypeInfoResolverFactory.cs index 24903e3f54..250e000022 100644 --- a/src/Npgsql/Internal/ResolverFactories/JsonTypeInfoResolverFactory.cs +++ b/src/Npgsql/Internal/ResolverFactories/JsonTypeInfoResolverFactory.cs @@ -43,8 +43,7 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings, var jsonb = dataTypeName == DataTypeNames.Jsonb; mappings.AddType(dataTypeName, (options, mapping, _) => mapping.CreateInfo(options, - new JsonConverter(jsonb, options.TextEncoding, serializerOptions)), - isDefault: true); + new JsonConverter(jsonb, options.TextEncoding, serializerOptions))); mappings.AddStructType(dataTypeName, (options, mapping, _) => mapping.CreateInfo(options, new JsonConverter(jsonb, options.TextEncoding, serializerOptions))); diff --git a/test/Npgsql.PluginTests/JsonNetTests.cs b/test/Npgsql.PluginTests/JsonNetTests.cs index 5ab396bdc8..e251bf7249 100644 --- a/test/Npgsql.PluginTests/JsonNetTests.cs +++ b/test/Npgsql.PluginTests/JsonNetTests.cs @@ -38,7 +38,7 @@ public Task Roundtrip_string() @"{""p"": 1}", _pgTypeName, npgsqlDbType, - isDefault: false, + isDefaultForWriting: false, isNpgsqlDbTypeInferredFromClrType: false); [Test, IssueLink("https://github.com/npgsql/npgsql/issues/3085")] From 7949c45e2957da3641d73531e7efe931e2868fe4 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Sat, 9 Nov 2024 09:23:58 +0300 Subject: [PATCH 483/761] Add an ability to filter and enrich tracing activities (#5853) Closes #4192 Closes #4250 Closes #4228 Closes #4245 --- .../TracerProviderBuilderExtensions.cs | 6 +- src/Npgsql/Internal/NpgsqlConnector.cs | 2 +- src/Npgsql/NpgsqlActivitySource.cs | 8 +-- src/Npgsql/NpgsqlBatch.cs | 8 +-- src/Npgsql/NpgsqlCommand.cs | 71 +++++++++++++------ src/Npgsql/NpgsqlDataReader.cs | 2 +- src/Npgsql/NpgsqlDataSource.cs | 1 + src/Npgsql/NpgsqlDataSourceBatch.cs | 2 +- src/Npgsql/NpgsqlDataSourceBuilder.cs | 13 +++- src/Npgsql/NpgsqlDataSourceCommand.cs | 4 +- src/Npgsql/NpgsqlDataSourceConfiguration.cs | 1 + src/Npgsql/NpgsqlSlimDataSourceBuilder.cs | 15 +++- src/Npgsql/NpgsqlTracingOptions.cs | 43 ++++++++++- src/Npgsql/PublicAPI.Unshipped.txt | 16 +++++ 14 files changed, 148 insertions(+), 44 deletions(-) diff --git a/src/Npgsql.OpenTelemetry/TracerProviderBuilderExtensions.cs b/src/Npgsql.OpenTelemetry/TracerProviderBuilderExtensions.cs index 0c34138278..1568d2d080 100644 --- a/src/Npgsql.OpenTelemetry/TracerProviderBuilderExtensions.cs +++ b/src/Npgsql.OpenTelemetry/TracerProviderBuilderExtensions.cs @@ -12,8 +12,6 @@ public static class TracerProviderBuilderExtensions /// /// Subscribes to the Npgsql activity source to enable OpenTelemetry tracing. /// - public static TracerProviderBuilder AddNpgsql( - this TracerProviderBuilder builder, - Action? options = null) + public static TracerProviderBuilder AddNpgsql(this TracerProviderBuilder builder) => builder.AddSource("Npgsql"); -} \ No newline at end of file +} diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index 8ef246448c..4e214d4b48 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -1195,7 +1195,7 @@ async Task MultiplexingReadLoop() // We have a resultset for the command - hand back control to the command (which will // return it to the user) - command.TraceReceivedFirstResponse(); + command.TraceReceivedFirstResponse(DataSource.Configuration.TracingOptions); ReaderCompleted.Reset(); command.ExecutionCompletion.SetResult(this); diff --git a/src/Npgsql/NpgsqlActivitySource.cs b/src/Npgsql/NpgsqlActivitySource.cs index b2624fc1ac..aa1d6b28a5 100644 --- a/src/Npgsql/NpgsqlActivitySource.cs +++ b/src/Npgsql/NpgsqlActivitySource.cs @@ -13,7 +13,7 @@ static class NpgsqlActivitySource internal static bool IsEnabled => Source.HasListeners(); - internal static Activity? CommandStart(NpgsqlConnectionStringBuilder settings, string commandText, CommandType commandType) + internal static Activity? CommandStart(NpgsqlConnectionStringBuilder settings, string commandText, CommandType commandType, string? spanName) { var dbName = settings.Database ?? "UNKNOWN"; string? dbOperation = null; @@ -47,7 +47,7 @@ static class NpgsqlActivitySource throw new ArgumentOutOfRangeException(nameof(commandType), commandType, null); } - var activity = Source.StartActivity(activityName, ActivityKind.Client); + var activity = Source.StartActivity(spanName ?? activityName, ActivityKind.Client); if (activity is not { IsAllDataRequested: true }) return activity; @@ -96,9 +96,9 @@ internal static void Enrich(Activity activity, NpgsqlConnector connector) } } - internal static void ReceivedFirstResponse(Activity activity) + internal static void ReceivedFirstResponse(Activity activity, NpgsqlTracingOptions? tracingSettings) { - if (!activity.IsAllDataRequested) + if (!activity.IsAllDataRequested || tracingSettings?.EnableFirstResponseEvent == false) return; var activityEvent = new ActivityEvent("received-first-response"); diff --git a/src/Npgsql/NpgsqlBatch.cs b/src/Npgsql/NpgsqlBatch.cs index 446cb4746f..e692199e2b 100644 --- a/src/Npgsql/NpgsqlBatch.cs +++ b/src/Npgsql/NpgsqlBatch.cs @@ -100,7 +100,7 @@ internal bool AllResultTypesAreUnknown public NpgsqlBatch(NpgsqlConnection? connection = null, NpgsqlTransaction? transaction = null) { GC.SuppressFinalize(this); - Command = new(DefaultBatchCommandsSize); + Command = new(this, DefaultBatchCommandsSize); BatchCommands = new NpgsqlBatchCommandCollection(Command.InternalBatchCommands); Connection = connection; @@ -110,14 +110,14 @@ public NpgsqlBatch(NpgsqlConnection? connection = null, NpgsqlTransaction? trans internal NpgsqlBatch(NpgsqlConnector connector) { GC.SuppressFinalize(this); - Command = new(connector, DefaultBatchCommandsSize); + Command = new(this, connector, DefaultBatchCommandsSize); BatchCommands = new NpgsqlBatchCommandCollection(Command.InternalBatchCommands); } - private protected NpgsqlBatch(NpgsqlDataSourceCommand command) + private protected NpgsqlBatch(Func commandFactory, NpgsqlConnection connection) { GC.SuppressFinalize(this); - Command = command; + Command = commandFactory(connection, this); BatchCommands = new NpgsqlBatchCommandCollection(Command.InternalBatchCommands); } diff --git a/src/Npgsql/NpgsqlCommand.cs b/src/Npgsql/NpgsqlCommand.cs index 8116a73c7f..d0c2bc43c4 100644 --- a/src/Npgsql/NpgsqlCommand.cs +++ b/src/Npgsql/NpgsqlCommand.cs @@ -47,10 +47,7 @@ public class NpgsqlCommand : DbCommand, ICloneable, IComponent int? _timeout; internal NpgsqlParameterCollection? _parameters; - /// - /// Whether this is wrapped by an . - /// - internal bool IsWrappedByBatch { get; } + internal NpgsqlBatch? WrappingBatch { get; } internal List InternalBatchCommands { get; } @@ -143,13 +140,13 @@ public NpgsqlCommand(string? cmdText, NpgsqlConnection? connection, NpgsqlTransa /// /// Used when this instance is wrapped inside an . /// - internal NpgsqlCommand(int batchCommandCapacity, NpgsqlConnection? connection = null) + internal NpgsqlCommand(NpgsqlBatch batch, int batchCommandCapacity, NpgsqlConnection? connection = null) { GC.SuppressFinalize(this); InternalBatchCommands = new List(batchCommandCapacity); InternalConnection = connection; CommandType = CommandType.Text; - IsWrappedByBatch = true; + WrappingBatch = batch; // These can/should never be used in this mode _commandText = null!; @@ -162,8 +159,8 @@ internal NpgsqlCommand(string? cmdText, NpgsqlConnector connector) : this(cmdTex /// /// Used when this instance is wrapped inside an . /// - internal NpgsqlCommand(NpgsqlConnector connector, int batchCommandCapacity) - : this(batchCommandCapacity) + internal NpgsqlCommand(NpgsqlBatch batch, NpgsqlConnector connector, int batchCommandCapacity) + : this(batch, batchCommandCapacity) => _connector = connector; internal static NpgsqlCommand CreateCachedCommand(NpgsqlConnection connection) @@ -184,7 +181,7 @@ public override string CommandText get => _commandText; set { - Debug.Assert(!IsWrappedByBatch); + Debug.Assert(WrappingBatch is null); if (State != CommandState.Idle) ThrowHelper.ThrowInvalidOperationException("An open data reader exists for this command."); @@ -198,7 +195,7 @@ public override string CommandText string GetBatchFullCommandText() { - Debug.Assert(IsWrappedByBatch); + Debug.Assert(WrappingBatch is not null); if (InternalBatchCommands.Count == 0) return string.Empty; if (InternalBatchCommands.Count == 1) @@ -559,7 +556,7 @@ void DeriveParametersForQuery(NpgsqlConnector connector) { LogMessages.DerivingParameters(connector.CommandLogger, CommandText, connector.Id); - if (IsWrappedByBatch) + if (WrappingBatch is not null) foreach (var batchCommand in InternalBatchCommands) connector.SqlQueryParser.ParseRawQuery(batchCommand, connector.UseConformingStrings, deriveParameters: true); else @@ -672,7 +669,7 @@ Task Prepare(bool async, CancellationToken cancellationToken = default) var needToPrepare = false; - if (IsWrappedByBatch) + if (WrappingBatch is not null) { foreach (var batchCommand in InternalBatchCommands) { @@ -1295,7 +1292,7 @@ async Task ExecuteNonQuery(bool async, CancellationToken cancellationToken) async ValueTask ExecuteScalar(bool async, CancellationToken cancellationToken) { var behavior = CommandBehavior.SingleRow; - if (IsWrappedByBatch || _parameters?.HasOutputParameters != true) + if (WrappingBatch is not null || _parameters?.HasOutputParameters != true) behavior |= CommandBehavior.SequentialAccess; var reader = await ExecuteReader(async, behavior, cancellationToken).ConfigureAwait(false); @@ -1415,7 +1412,7 @@ internal virtual async ValueTask ExecuteReader(bool async, Com { case true: Debug.Assert(_connectorPreparedOn != null); - if (IsWrappedByBatch) + if (WrappingBatch is not null) { foreach (var batchCommand in InternalBatchCommands) { @@ -1450,7 +1447,7 @@ internal virtual async ValueTask ExecuteReader(bool async, Com case false: var numPrepared = 0; - if (IsWrappedByBatch) + if (WrappingBatch is not null) { for (var i = 0; i < InternalBatchCommands.Count; i++) { @@ -1506,7 +1503,7 @@ internal virtual async ValueTask ExecuteReader(bool async, Com NpgsqlEventSource.Log.CommandStart(CommandText); startTimestamp = connector.DataSource.MetricsReporter.ReportCommandStart(); - TraceCommandStart(connector.Settings); + TraceCommandStart(connector.Settings, connector.DataSource.Configuration.TracingOptions); TraceCommandEnrich(connector); // We do not wait for the entire send to complete before proceeding to reading - @@ -1541,7 +1538,7 @@ internal virtual async ValueTask ExecuteReader(bool async, Com else reader.NextResult(); - TraceReceivedFirstResponse(); + TraceReceivedFirstResponse(connector.DataSource.Configuration.TracingOptions); return reader; } @@ -1560,7 +1557,7 @@ internal virtual async ValueTask ExecuteReader(bool async, Com ThrowHelper.ThrowNotSupportedException("Synchronous command execution is not supported when multiplexing is on"); } - if (IsWrappedByBatch) + if (WrappingBatch is not null) { foreach (var batchCommand in InternalBatchCommands) { @@ -1576,7 +1573,7 @@ internal virtual async ValueTask ExecuteReader(bool async, Com State = CommandState.InProgress; - TraceCommandStart(conn.Settings); + TraceCommandStart(conn.Settings, conn.NpgsqlDataSource.Configuration.TracingOptions); // TODO: Experiment: do we want to wait on *writing* here, or on *reading*? // Previous behavior was to wait on reading, which throw the exception from ExecuteReader (and not from @@ -1713,23 +1710,51 @@ internal void Reset() #region Tracing - internal void TraceCommandStart(NpgsqlConnectionStringBuilder settings) + internal void TraceCommandStart(NpgsqlConnectionStringBuilder settings, NpgsqlTracingOptions? tracingSettings) { Debug.Assert(CurrentActivity is null); if (NpgsqlActivitySource.IsEnabled) - CurrentActivity = NpgsqlActivitySource.CommandStart(settings, IsWrappedByBatch ? GetBatchFullCommandText() : CommandText, CommandType); + { + (var enableTracing, string? spanName) = (true, null); + if (tracingSettings is not null) + { + enableTracing = WrappingBatch is not null + ? tracingSettings.FilterBatch?.Invoke(WrappingBatch) ?? true + : tracingSettings.FilterCommand?.Invoke(this) ?? true; + + spanName = WrappingBatch is not null + ? tracingSettings.ProvideSpanNameForBatch?.Invoke(WrappingBatch) + : tracingSettings.ProvideSpanNameForCommand?.Invoke(this); + } + + if (enableTracing) + { + CurrentActivity = NpgsqlActivitySource.CommandStart( + settings, + WrappingBatch is not null ? GetBatchFullCommandText() : CommandText, + CommandType, + spanName); + } + } } internal void TraceCommandEnrich(NpgsqlConnector connector) { if (CurrentActivity is not null) + { NpgsqlActivitySource.Enrich(CurrentActivity, connector); + var tracingSettings = connector.DataSource.Configuration.TracingOptions; + if (WrappingBatch is not null) + tracingSettings?.EnrichWithBatch?.Invoke(CurrentActivity, WrappingBatch); + else + tracingSettings?.EnrichWithCommand?.Invoke(CurrentActivity, this); + } } - internal void TraceReceivedFirstResponse() + internal void TraceReceivedFirstResponse(NpgsqlTracingOptions? tracingSettings) { if (CurrentActivity is not null) - NpgsqlActivitySource.ReceivedFirstResponse(CurrentActivity); + NpgsqlActivitySource.ReceivedFirstResponse(CurrentActivity, tracingSettings); } internal void TraceCommandStop() diff --git a/src/Npgsql/NpgsqlDataReader.cs b/src/Npgsql/NpgsqlDataReader.cs index 7446889c05..e7ca332a37 100644 --- a/src/Npgsql/NpgsqlDataReader.cs +++ b/src/Npgsql/NpgsqlDataReader.cs @@ -459,7 +459,7 @@ async Task NextResult(bool async, bool isConsuming = false, CancellationTo continue; } - if ((Command.IsWrappedByBatch || StatementIndex is 0) && Command.InternalBatchCommands[StatementIndex]._parameters?.HasOutputParameters == true) + if ((Command.WrappingBatch is not null || StatementIndex is 0) && Command.InternalBatchCommands[StatementIndex]._parameters?.HasOutputParameters == true) { // If output parameters are present and this is the first row of the resultset, // we must always read it in non-sequential mode because it will be traversed twice (once diff --git a/src/Npgsql/NpgsqlDataSource.cs b/src/Npgsql/NpgsqlDataSource.cs index 2b7f4e5420..ffde9feda8 100644 --- a/src/Npgsql/NpgsqlDataSource.cs +++ b/src/Npgsql/NpgsqlDataSource.cs @@ -96,6 +96,7 @@ internal NpgsqlDataSource( (var name, LoggingConfiguration, + _, TransportSecurityHandler, IntegratedSecurityHandler, SslClientAuthenticationOptionsCallback, diff --git a/src/Npgsql/NpgsqlDataSourceBatch.cs b/src/Npgsql/NpgsqlDataSourceBatch.cs index fa239ee8e6..c5b44e9ff6 100644 --- a/src/Npgsql/NpgsqlDataSourceBatch.cs +++ b/src/Npgsql/NpgsqlDataSourceBatch.cs @@ -9,7 +9,7 @@ namespace Npgsql; sealed class NpgsqlDataSourceBatch : NpgsqlBatch { internal NpgsqlDataSourceBatch(NpgsqlConnection connection) - : base(new NpgsqlDataSourceCommand(DefaultBatchCommandsSize, connection)) + : base(static (conn, batch) => new NpgsqlDataSourceCommand(batch, DefaultBatchCommandsSize, conn), connection) { } diff --git a/src/Npgsql/NpgsqlDataSourceBuilder.cs b/src/Npgsql/NpgsqlDataSourceBuilder.cs index 7a886ba335..7bbcf274aa 100644 --- a/src/Npgsql/NpgsqlDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlDataSourceBuilder.cs @@ -120,11 +120,22 @@ public NpgsqlDataSourceBuilder EnableParameterLogging(bool parameterLoggingEnabl return this; } + /// + /// Configures tracing options for the DataSource. + /// + /// Tracing options for the DataSource. + /// The same builder instance so that multiple calls can be chained. + public NpgsqlDataSourceBuilder ConfigureTracingOptions(NpgsqlTracingOptions tracingOptions) + { + _internalBuilder.ConfigureTracingOptions(tracingOptions); + return this; + } + /// /// Configures the JSON serializer options used when reading and writing all System.Text.Json data. /// /// Options to customize JSON serialization and deserialization. - /// + /// The same builder instance so that multiple calls can be chained. public NpgsqlDataSourceBuilder ConfigureJsonOptions(JsonSerializerOptions serializerOptions) { _internalBuilder.ConfigureJsonOptions(serializerOptions); diff --git a/src/Npgsql/NpgsqlDataSourceCommand.cs b/src/Npgsql/NpgsqlDataSourceCommand.cs index 3ff565de66..d293194f66 100644 --- a/src/Npgsql/NpgsqlDataSourceCommand.cs +++ b/src/Npgsql/NpgsqlDataSourceCommand.cs @@ -15,8 +15,8 @@ internal NpgsqlDataSourceCommand(NpgsqlConnection connection) } // For NpgsqlBatch only - internal NpgsqlDataSourceCommand(int batchCommandCapacity, NpgsqlConnection connection) - : base(batchCommandCapacity, connection) + internal NpgsqlDataSourceCommand(NpgsqlBatch batch, int batchCommandCapacity, NpgsqlConnection connection) + : base(batch, batchCommandCapacity, connection) { } diff --git a/src/Npgsql/NpgsqlDataSourceConfiguration.cs b/src/Npgsql/NpgsqlDataSourceConfiguration.cs index 64acebf136..2b2d09cded 100644 --- a/src/Npgsql/NpgsqlDataSourceConfiguration.cs +++ b/src/Npgsql/NpgsqlDataSourceConfiguration.cs @@ -9,6 +9,7 @@ namespace Npgsql; sealed record NpgsqlDataSourceConfiguration(string? Name, NpgsqlLoggingConfiguration LoggingConfiguration, + NpgsqlTracingOptions? TracingOptions, TransportSecurityHandler TransportSecurityHandler, IntegratedSecurityHandler userCertificateValidationCallback, Action? SslClientAuthenticationOptionsCallback, diff --git a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs index 8711bd9b63..d9fdab90b4 100644 --- a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs @@ -29,6 +29,7 @@ public sealed class NpgsqlSlimDataSourceBuilder : INpgsqlTypeMapper ILoggerFactory? _loggerFactory; bool _sensitiveDataLoggingEnabled; + NpgsqlTracingOptions? _tracingOptions; TransportSecurityHandler _transportSecurityHandler = new(); RemoteCertificateValidationCallback? _userCertificateValidationCallback; @@ -116,11 +117,22 @@ public NpgsqlSlimDataSourceBuilder EnableParameterLogging(bool parameterLoggingE return this; } + /// + /// Configures tracing options for the DataSource. + /// + /// Tracing options for the DataSource. + /// The same builder instance so that multiple calls can be chained. + public NpgsqlSlimDataSourceBuilder ConfigureTracingOptions(NpgsqlTracingOptions tracingOptions) + { + _tracingOptions = tracingOptions; + return this; + } + /// /// Configures the JSON serializer options used when reading and writing all System.Text.Json data. /// /// Options to customize JSON serialization and deserialization. - /// + /// The same builder instance so that multiple calls can be chained. public NpgsqlSlimDataSourceBuilder ConfigureJsonOptions(JsonSerializerOptions serializerOptions) { JsonSerializerOptions = serializerOptions; @@ -724,6 +736,7 @@ NpgsqlDataSourceConfiguration PrepareConfiguration() _loggerFactory is null ? NpgsqlLoggingConfiguration.NullConfiguration : new NpgsqlLoggingConfiguration(_loggerFactory, _sensitiveDataLoggingEnabled), + _tracingOptions, _transportSecurityHandler, _integratedSecurityHandler, sslClientAuthenticationOptionsCallback, diff --git a/src/Npgsql/NpgsqlTracingOptions.cs b/src/Npgsql/NpgsqlTracingOptions.cs index 4aa61beec6..7fbf6254dc 100644 --- a/src/Npgsql/NpgsqlTracingOptions.cs +++ b/src/Npgsql/NpgsqlTracingOptions.cs @@ -1,9 +1,48 @@ +using System; +using System.Diagnostics; + namespace Npgsql; /// /// Options to configure Npgsql's support for OpenTelemetry tracing. -/// Currently no options are available. /// public class NpgsqlTracingOptions { -} \ No newline at end of file + /// + /// Gets or sets a filter function that determines whether or not to + /// collect telemetry on a per basis. + /// + public Func? FilterCommand { get; set; } + + /// + /// Gets or sets an action to enrich an with . + /// + public Action? EnrichWithCommand { get; set; } + + /// + /// Gets or sets a function that provides a span's name on a per basis. + /// + public Func? ProvideSpanNameForCommand { get; set; } + + /// + /// Gets or sets a filter function that determines whether or not to + /// collect telemetry on a per basis. + /// + public Func? FilterBatch { get; set; } + + /// + /// Gets or sets an action to enrich an with . + /// + public Action? EnrichWithBatch { get; set; } + + /// + /// Gets or sets a function that provides a span's name on a per basis. + /// + public Func? ProvideSpanNameForBatch { get; set; } + + /// + /// Gets or sets a value indicating whether to enable the "time-to-first-read" event. + /// Default is true to preserve existing behavior. + /// + public bool EnableFirstResponseEvent { get; set; } = true; +} diff --git a/src/Npgsql/PublicAPI.Unshipped.txt b/src/Npgsql/PublicAPI.Unshipped.txt index 74ee66de05..cabfe99eaa 100644 --- a/src/Npgsql/PublicAPI.Unshipped.txt +++ b/src/Npgsql/PublicAPI.Unshipped.txt @@ -5,15 +5,31 @@ Npgsql.NpgsqlConnection.SslClientAuthenticationOptionsCallback.get -> System.Act Npgsql.NpgsqlConnection.SslClientAuthenticationOptionsCallback.set -> void Npgsql.NpgsqlConnectionStringBuilder.SslNegotiation.get -> Npgsql.SslNegotiation Npgsql.NpgsqlConnectionStringBuilder.SslNegotiation.set -> void +Npgsql.NpgsqlDataSourceBuilder.ConfigureTracingOptions(Npgsql.NpgsqlTracingOptions! tracingOptions) -> Npgsql.NpgsqlDataSourceBuilder! Npgsql.NpgsqlDataSourceBuilder.UseNegotiateOptionsCallback(System.Action? negotiateOptionsCallback) -> Npgsql.NpgsqlDataSourceBuilder! Npgsql.NpgsqlDataSourceBuilder.UseSslClientAuthenticationOptionsCallback(System.Action? sslClientAuthenticationOptionsCallback) -> Npgsql.NpgsqlDataSourceBuilder! Npgsql.NpgsqlMetricsOptions Npgsql.NpgsqlMetricsOptions.NpgsqlMetricsOptions() -> void +Npgsql.NpgsqlSlimDataSourceBuilder.ConfigureTracingOptions(Npgsql.NpgsqlTracingOptions! tracingOptions) -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.EnableGeometricTypes() -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.EnableJsonTypes() -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.EnableNetworkTypes() -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.UseNegotiateOptionsCallback(System.Action? negotiateOptionsCallback) -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.UseSslClientAuthenticationOptionsCallback(System.Action? sslClientAuthenticationOptionsCallback) -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlTracingOptions.EnableFirstResponseEvent.get -> bool +Npgsql.NpgsqlTracingOptions.EnableFirstResponseEvent.set -> void +Npgsql.NpgsqlTracingOptions.EnrichWithBatch.get -> System.Action? +Npgsql.NpgsqlTracingOptions.EnrichWithBatch.set -> void +Npgsql.NpgsqlTracingOptions.EnrichWithCommand.get -> System.Action? +Npgsql.NpgsqlTracingOptions.EnrichWithCommand.set -> void +Npgsql.NpgsqlTracingOptions.FilterBatch.get -> System.Func? +Npgsql.NpgsqlTracingOptions.FilterBatch.set -> void +Npgsql.NpgsqlTracingOptions.FilterCommand.get -> System.Func? +Npgsql.NpgsqlTracingOptions.FilterCommand.set -> void +Npgsql.NpgsqlTracingOptions.ProvideSpanNameForBatch.get -> System.Func? +Npgsql.NpgsqlTracingOptions.ProvideSpanNameForBatch.set -> void +Npgsql.NpgsqlTracingOptions.ProvideSpanNameForCommand.get -> System.Func? +Npgsql.NpgsqlTracingOptions.ProvideSpanNameForCommand.set -> void Npgsql.Replication.PgOutput.ReplicationValue.GetFieldName() -> string! Npgsql.Replication.PgOutput.Messages.ParallelStreamAbortMessage Npgsql.Replication.PgOutput.Messages.ParallelStreamAbortMessage.AbortLsn.get -> NpgsqlTypes.NpgsqlLogSequenceNumber From 4f2785b6cb8c19a04ea636f4a5b7be5e11851193 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Sun, 10 Nov 2024 12:45:07 +0100 Subject: [PATCH 484/761] Concrete INpgsqlTypeMapper return types (#5918) --- src/Npgsql.GeoJSON/NpgsqlGeoJSONExtensions.cs | 29 ++++ src/Npgsql.GeoJSON/PublicAPI.Unshipped.txt | 2 + .../NpgsqlJsonNetExtensions.cs | 27 ++++ src/Npgsql.Json.NET/PublicAPI.Unshipped.txt | 1 + .../NpgsqlNetTopologySuiteExtensions.cs | 23 +++ .../PublicAPI.Unshipped.txt | 1 + .../NpgsqlNodaTimeExtensions.cs | 11 ++ src/Npgsql.NodaTime/PublicAPI.Unshipped.txt | 1 + src/Npgsql/NpgsqlDataSourceBuilder.cs | 143 ++++++++++++++++-- src/Npgsql/NpgsqlSlimDataSourceBuilder.cs | 127 ++++++++++++++-- src/Npgsql/PublicAPI.Unshipped.txt | 16 ++ src/Npgsql/TypeMapping/GlobalTypeMapper.cs | 3 - src/Npgsql/TypeMapping/INpgsqlTypeMapper.cs | 1 - 13 files changed, 357 insertions(+), 28 deletions(-) diff --git a/src/Npgsql.GeoJSON/NpgsqlGeoJSONExtensions.cs b/src/Npgsql.GeoJSON/NpgsqlGeoJSONExtensions.cs index b47a9b211f..9651004a86 100644 --- a/src/Npgsql.GeoJSON/NpgsqlGeoJSONExtensions.cs +++ b/src/Npgsql.GeoJSON/NpgsqlGeoJSONExtensions.cs @@ -10,6 +10,7 @@ namespace Npgsql; /// public static class NpgsqlGeoJSONExtensions { + // Note: defined for binary compatibility and NpgsqlConnection.GlobalTypeMapper. /// /// Sets up GeoJSON mappings for the PostGIS types. /// @@ -22,6 +23,7 @@ public static INpgsqlTypeMapper UseGeoJson(this INpgsqlTypeMapper mapper, GeoJSO return mapper; } + // Note: defined for binary compatibility and NpgsqlConnection.GlobalTypeMapper. /// /// Sets up GeoJSON mappings for the PostGIS types. /// @@ -34,4 +36,31 @@ public static INpgsqlTypeMapper UseGeoJson(this INpgsqlTypeMapper mapper, CrsMap mapper.AddTypeInfoResolverFactory(new GeoJSONTypeInfoResolverFactory(options, geographyAsDefault, crsMap)); return mapper; } + + /// + /// Sets up GeoJSON mappings for the PostGIS types. + /// + /// The type mapper to set up (global or connection-specific) + /// Options to use when constructing objects. + /// Specifies that the geography type is used for mapping by default. + public static TMapper UseGeoJson(this TMapper mapper, GeoJSONOptions options = GeoJSONOptions.None, bool geographyAsDefault = false) + where TMapper : INpgsqlTypeMapper + { + mapper.AddTypeInfoResolverFactory(new GeoJSONTypeInfoResolverFactory(options, geographyAsDefault, crsMap: null)); + return mapper; + } + + /// + /// Sets up GeoJSON mappings for the PostGIS types. + /// + /// The type mapper to set up (global or connection-specific) + /// A custom crs map that might contain more or less entries than the default well-known crs map. + /// Options to use when constructing objects. + /// Specifies that the geography type is used for mapping by default. + public static TMapper UseGeoJson(this TMapper mapper, CrsMap crsMap, GeoJSONOptions options = GeoJSONOptions.None, bool geographyAsDefault = false) + where TMapper : INpgsqlTypeMapper + { + mapper.AddTypeInfoResolverFactory(new GeoJSONTypeInfoResolverFactory(options, geographyAsDefault, crsMap)); + return mapper; + } } diff --git a/src/Npgsql.GeoJSON/PublicAPI.Unshipped.txt b/src/Npgsql.GeoJSON/PublicAPI.Unshipped.txt index ab058de62d..34de07f0d7 100644 --- a/src/Npgsql.GeoJSON/PublicAPI.Unshipped.txt +++ b/src/Npgsql.GeoJSON/PublicAPI.Unshipped.txt @@ -1 +1,3 @@ #nullable enable +static Npgsql.NpgsqlGeoJSONExtensions.UseGeoJson(this TMapper mapper, Npgsql.GeoJSON.CrsMap! crsMap, Npgsql.GeoJSONOptions options = Npgsql.GeoJSONOptions.None, bool geographyAsDefault = false) -> TMapper +static Npgsql.NpgsqlGeoJSONExtensions.UseGeoJson(this TMapper mapper, Npgsql.GeoJSONOptions options = Npgsql.GeoJSONOptions.None, bool geographyAsDefault = false) -> TMapper diff --git a/src/Npgsql.Json.NET/NpgsqlJsonNetExtensions.cs b/src/Npgsql.Json.NET/NpgsqlJsonNetExtensions.cs index f2b33933b8..89c8d21603 100644 --- a/src/Npgsql.Json.NET/NpgsqlJsonNetExtensions.cs +++ b/src/Npgsql.Json.NET/NpgsqlJsonNetExtensions.cs @@ -13,6 +13,7 @@ namespace Npgsql; /// public static class NpgsqlJsonNetExtensions { + // Note: defined for binary compatibility and NpgsqlConnection.GlobalTypeMapper. /// /// Sets up JSON.NET mappings for the PostgreSQL json and jsonb types. /// @@ -37,4 +38,30 @@ public static INpgsqlTypeMapper UseJsonNet( mapper.AddTypeInfoResolverFactory(new JsonNetTypeInfoResolverFactory(settings)); return mapper; } + + /// + /// Sets up JSON.NET mappings for the PostgreSQL json and jsonb types. + /// + /// The type mapper to set up. + /// Optional settings to customize JSON serialization. + /// + /// A list of CLR types to map to PostgreSQL jsonb (no need to specify ). + /// + /// + /// A list of CLR types to map to PostgreSQL json (no need to specify ). + /// + [RequiresUnreferencedCode("Json serializer may perform reflection on trimmed types.")] + [RequiresDynamicCode("Serializing arbitrary types to json can require creating new generic types or methods, which requires creating code at runtime. This may not work when AOT compiling.")] + public static TMapper UseJsonNet( + this TMapper mapper, + JsonSerializerSettings? settings = null, + Type[]? jsonbClrTypes = null, + Type[]? jsonClrTypes = null) + where TMapper : INpgsqlTypeMapper + { + // Reverse order + mapper.AddTypeInfoResolverFactory(new JsonNetPocoTypeInfoResolverFactory(jsonbClrTypes, jsonClrTypes, settings)); + mapper.AddTypeInfoResolverFactory(new JsonNetTypeInfoResolverFactory(settings)); + return mapper; + } } diff --git a/src/Npgsql.Json.NET/PublicAPI.Unshipped.txt b/src/Npgsql.Json.NET/PublicAPI.Unshipped.txt index ab058de62d..f4557570e1 100644 --- a/src/Npgsql.Json.NET/PublicAPI.Unshipped.txt +++ b/src/Npgsql.Json.NET/PublicAPI.Unshipped.txt @@ -1 +1,2 @@ #nullable enable +static Npgsql.NpgsqlJsonNetExtensions.UseJsonNet(this TMapper mapper, Newtonsoft.Json.JsonSerializerSettings? settings = null, System.Type![]? jsonbClrTypes = null, System.Type![]? jsonClrTypes = null) -> TMapper diff --git a/src/Npgsql.NetTopologySuite/NpgsqlNetTopologySuiteExtensions.cs b/src/Npgsql.NetTopologySuite/NpgsqlNetTopologySuiteExtensions.cs index a30d023891..76afcf886c 100644 --- a/src/Npgsql.NetTopologySuite/NpgsqlNetTopologySuiteExtensions.cs +++ b/src/Npgsql.NetTopologySuite/NpgsqlNetTopologySuiteExtensions.cs @@ -10,6 +10,7 @@ namespace Npgsql; /// public static class NpgsqlNetTopologySuiteExtensions { + // Note: defined for binary compatibility and NpgsqlConnection.GlobalTypeMapper. /// /// Sets up NetTopologySuite mappings for the PostGIS types. /// @@ -30,4 +31,26 @@ public static INpgsqlTypeMapper UseNetTopologySuite( mapper.AddTypeInfoResolverFactory(new NetTopologySuiteTypeInfoResolverFactory(coordinateSequenceFactory, precisionModel, handleOrdinates, geographyAsDefault)); return mapper; } + + /// + /// Sets up NetTopologySuite mappings for the PostGIS types. + /// + /// The type mapper to set up (global or connection-specific). + /// The factory which knows how to build a particular implementation of ICoordinateSequence from an array of Coordinates. + /// Specifies the grid of allowable points. + /// Specifies the ordinates which will be handled. Not specified ordinates will be ignored. + /// If is specified, an actual value will be taken from + /// the property of . + /// Specifies that the geography type is used for mapping by default. + public static TMapper UseNetTopologySuite( + this TMapper mapper, + CoordinateSequenceFactory? coordinateSequenceFactory = null, + PrecisionModel? precisionModel = null, + Ordinates handleOrdinates = Ordinates.None, + bool geographyAsDefault = false) + where TMapper : INpgsqlTypeMapper + { + mapper.AddTypeInfoResolverFactory(new NetTopologySuiteTypeInfoResolverFactory(coordinateSequenceFactory, precisionModel, handleOrdinates, geographyAsDefault)); + return mapper; + } } diff --git a/src/Npgsql.NetTopologySuite/PublicAPI.Unshipped.txt b/src/Npgsql.NetTopologySuite/PublicAPI.Unshipped.txt index ab058de62d..ab78bca1af 100644 --- a/src/Npgsql.NetTopologySuite/PublicAPI.Unshipped.txt +++ b/src/Npgsql.NetTopologySuite/PublicAPI.Unshipped.txt @@ -1 +1,2 @@ #nullable enable +static Npgsql.NpgsqlNetTopologySuiteExtensions.UseNetTopologySuite(this TMapper mapper, NetTopologySuite.Geometries.CoordinateSequenceFactory? coordinateSequenceFactory = null, NetTopologySuite.Geometries.PrecisionModel? precisionModel = null, NetTopologySuite.Geometries.Ordinates handleOrdinates = NetTopologySuite.Geometries.Ordinates.None, bool geographyAsDefault = false) -> TMapper diff --git a/src/Npgsql.NodaTime/NpgsqlNodaTimeExtensions.cs b/src/Npgsql.NodaTime/NpgsqlNodaTimeExtensions.cs index 9ebf42e83f..585143f3fe 100644 --- a/src/Npgsql.NodaTime/NpgsqlNodaTimeExtensions.cs +++ b/src/Npgsql.NodaTime/NpgsqlNodaTimeExtensions.cs @@ -9,6 +9,7 @@ namespace Npgsql; /// public static class NpgsqlNodaTimeExtensions { + // Note: defined for binary compatibility and NpgsqlConnection.GlobalTypeMapper. /// /// Sets up NodaTime mappings for the PostgreSQL date/time types. /// @@ -18,4 +19,14 @@ public static INpgsqlTypeMapper UseNodaTime(this INpgsqlTypeMapper mapper) mapper.AddTypeInfoResolverFactory(new NodaTimeTypeInfoResolverFactory()); return mapper; } + + /// + /// Sets up NodaTime mappings for the PostgreSQL date/time types. + /// + /// The type mapper to set up (global or connection-specific) + public static TMapper UseNodaTime(this TMapper mapper) where TMapper : INpgsqlTypeMapper + { + mapper.AddTypeInfoResolverFactory(new NodaTimeTypeInfoResolverFactory()); + return mapper; + } } diff --git a/src/Npgsql.NodaTime/PublicAPI.Unshipped.txt b/src/Npgsql.NodaTime/PublicAPI.Unshipped.txt index ab058de62d..f1ab4e3c0c 100644 --- a/src/Npgsql.NodaTime/PublicAPI.Unshipped.txt +++ b/src/Npgsql.NodaTime/PublicAPI.Unshipped.txt @@ -1 +1,2 @@ #nullable enable +static Npgsql.NpgsqlNodaTimeExtensions.UseNodaTime(this TMapper mapper) -> TMapper diff --git a/src/Npgsql/NpgsqlDataSourceBuilder.cs b/src/Npgsql/NpgsqlDataSourceBuilder.cs index 7bbcf274aa..8bb72ccede 100644 --- a/src/Npgsql/NpgsqlDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlDataSourceBuilder.cs @@ -9,6 +9,7 @@ using Microsoft.Extensions.Logging; using Npgsql.Internal; using Npgsql.Internal.ResolverFactories; +using Npgsql.NameTranslation; using Npgsql.TypeMapping; using NpgsqlTypes; @@ -384,8 +385,27 @@ public void AddTypeInfoResolverFactory(PgTypeInfoResolverFactory factory) /// void INpgsqlTypeMapper.Reset() => ((INpgsqlTypeMapper)_internalBuilder).Reset(); - /// - public INpgsqlTypeMapper MapEnum<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields)] TEnum>(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) + /// + /// Maps a CLR enum to a PostgreSQL enum type. + /// + /// + /// CLR enum labels are mapped by name to PostgreSQL enum labels. + /// The translation strategy can be controlled by the parameter, + /// which defaults to . + /// You can also use the on your enum fields to manually specify a PostgreSQL enum label. + /// If there is a discrepancy between the .NET and database labels while an enum is read or written, + /// an exception will be raised. + /// + /// + /// A PostgreSQL type name for the corresponding enum type in the database. + /// If null, the name translator given in will be used. + /// + /// + /// A component which will be used to translate CLR names (e.g. SomeClass) into database names (e.g. some_class). + /// Defaults to . + /// + /// The .NET enum type to be mapped + public NpgsqlDataSourceBuilder MapEnum<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields)] TEnum>(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) where TEnum : struct, Enum { _internalBuilder.MapEnum(pgName, nameTranslator); @@ -397,41 +417,100 @@ public void AddTypeInfoResolverFactory(PgTypeInfoResolverFactory factory) where TEnum : struct, Enum => _internalBuilder.UnmapEnum(pgName, nameTranslator); - /// + /// + /// Maps a CLR enum to a PostgreSQL enum type. + /// + /// + /// CLR enum labels are mapped by name to PostgreSQL enum labels. + /// The translation strategy can be controlled by the parameter, + /// which defaults to . + /// You can also use the on your enum fields to manually specify a PostgreSQL enum label. + /// If there is a discrepancy between the .NET and database labels while an enum is read or written, + /// an exception will be raised. + /// + /// The .NET enum type to be mapped + /// + /// A PostgreSQL type name for the corresponding enum type in the database. + /// If null, the name translator given in will be used. + /// + /// + /// A component which will be used to translate CLR names (e.g. SomeClass) into database names (e.g. some_class). + /// Defaults to . + /// [RequiresDynamicCode("Calling MapEnum with a Type can require creating new generic types or methods. This may not work when AOT compiling.")] - public INpgsqlTypeMapper MapEnum([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields | DynamicallyAccessedMemberTypes.PublicParameterlessConstructor)] + public NpgsqlDataSourceBuilder MapEnum([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields | DynamicallyAccessedMemberTypes.PublicParameterlessConstructor)] Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) - => _internalBuilder.MapEnum(clrType, pgName, nameTranslator); + { + _internalBuilder.MapEnum(clrType, pgName, nameTranslator); + return this; + } /// public bool UnmapEnum([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields | DynamicallyAccessedMemberTypes.PublicParameterlessConstructor)] Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) => _internalBuilder.UnmapEnum(clrType, pgName, nameTranslator); - /// + /// + /// Maps a CLR type to a PostgreSQL composite type. + /// + /// + /// CLR fields and properties by string to PostgreSQL names. + /// The translation strategy can be controlled by the parameter, + /// which defaults to . + /// You can also use the on your members to manually specify a PostgreSQL name. + /// If there is a discrepancy between the .NET type and database type while a composite is read or written, + /// an exception will be raised. + /// + /// + /// A PostgreSQL type name for the corresponding composite type in the database. + /// If null, the name translator given in will be used. + /// + /// + /// A component which will be used to translate CLR names (e.g. SomeClass) into database names (e.g. some_class). + /// Defaults to . + /// + /// The .NET type to be mapped [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types which can require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] - public INpgsqlTypeMapper MapComposite<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] T>( + public NpgsqlDataSourceBuilder MapComposite<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] T>( string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) { - _internalBuilder.MapComposite(pgName, nameTranslator); + _internalBuilder.MapComposite(typeof(T), pgName, nameTranslator); return this; } /// [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types which can require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] - public INpgsqlTypeMapper MapComposite([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] + public bool UnmapComposite<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] T>( + string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) + => _internalBuilder.UnmapComposite(typeof(T), pgName, nameTranslator); + + /// + /// Maps a CLR type to a composite type. + /// + /// + /// Maps CLR fields and properties by string to PostgreSQL names. + /// The translation strategy can be controlled by the parameter, + /// which defaults to . + /// If there is a discrepancy between the .NET type and database type while a composite is read or written, + /// an exception will be raised. + /// + /// The .NET type to be mapped. + /// + /// A PostgreSQL type name for the corresponding composite type in the database. + /// If null, the name translator given in will be used. + /// + /// + /// A component which will be used to translate CLR names (e.g. SomeClass) into database names (e.g. some_class). + /// Defaults to . + /// + [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types which can require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] + public NpgsqlDataSourceBuilder MapComposite([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) { _internalBuilder.MapComposite(clrType, pgName, nameTranslator); return this; } - /// - [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types which can require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] - public bool UnmapComposite<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] T>( - string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) - => _internalBuilder.UnmapComposite(pgName, nameTranslator); - /// [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types which can require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] public bool UnmapComposite([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] @@ -503,4 +582,38 @@ INpgsqlTypeMapper INpgsqlTypeMapper.EnableRecordsAsTuples() "The use of unmapped enums, ranges or multiranges requires dynamic code usage which is incompatible with NativeAOT.")] INpgsqlTypeMapper INpgsqlTypeMapper.EnableUnmappedTypes() => EnableUnmappedTypes(); + + /// + INpgsqlTypeMapper INpgsqlTypeMapper.MapEnum<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields)] TEnum>(string? pgName, INpgsqlNameTranslator? nameTranslator) + { + _internalBuilder.MapEnum(pgName, nameTranslator); + return this; + } + + /// + [RequiresDynamicCode("Calling MapEnum with a Type can require creating new generic types or methods. This may not work when AOT compiling.")] + INpgsqlTypeMapper INpgsqlTypeMapper.MapEnum([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields | DynamicallyAccessedMemberTypes.PublicParameterlessConstructor)] + Type clrType, string? pgName, INpgsqlNameTranslator? nameTranslator) + { + _internalBuilder.MapEnum(clrType, pgName, nameTranslator); + return this; + } + + /// + [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types which can require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] + INpgsqlTypeMapper INpgsqlTypeMapper.MapComposite<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] T>( + string? pgName, INpgsqlNameTranslator? nameTranslator) + { + _internalBuilder.MapComposite(typeof(T), pgName, nameTranslator); + return this; + } + + /// + [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types which can require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] + INpgsqlTypeMapper INpgsqlTypeMapper.MapComposite([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] + Type clrType, string? pgName, INpgsqlNameTranslator? nameTranslator) + { + _internalBuilder.MapComposite(clrType, pgName, nameTranslator); + return this; + } } diff --git a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs index d9fdab90b4..a3097b5089 100644 --- a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs @@ -10,6 +10,7 @@ using Microsoft.Extensions.Logging; using Npgsql.Internal; using Npgsql.Internal.ResolverFactories; +using Npgsql.NameTranslation; using Npgsql.Properties; using Npgsql.TypeMapping; using NpgsqlTypes; @@ -357,8 +358,27 @@ public INpgsqlNameTranslator DefaultNameTranslator set => _userTypeMapper.DefaultNameTranslator = value; } - /// - public INpgsqlTypeMapper MapEnum<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields)] TEnum>(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) + /// + /// Maps a CLR enum to a PostgreSQL enum type. + /// + /// + /// CLR enum labels are mapped by name to PostgreSQL enum labels. + /// The translation strategy can be controlled by the parameter, + /// which defaults to . + /// You can also use the on your enum fields to manually specify a PostgreSQL enum label. + /// If there is a discrepancy between the .NET and database labels while an enum is read or written, + /// an exception will be raised. + /// + /// + /// A PostgreSQL type name for the corresponding enum type in the database. + /// If null, the name translator given in will be used. + /// + /// + /// A component which will be used to translate CLR names (e.g. SomeClass) into database names (e.g. some_class). + /// Defaults to . + /// + /// The .NET enum type to be mapped + public NpgsqlSlimDataSourceBuilder MapEnum<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields)] TEnum>(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) where TEnum : struct, Enum { _userTypeMapper.MapEnum(pgName, nameTranslator); @@ -370,9 +390,28 @@ public INpgsqlNameTranslator DefaultNameTranslator where TEnum : struct, Enum => _userTypeMapper.UnmapEnum(pgName, nameTranslator); - /// + /// + /// Maps a CLR enum to a PostgreSQL enum type. + /// + /// + /// CLR enum labels are mapped by name to PostgreSQL enum labels. + /// The translation strategy can be controlled by the parameter, + /// which defaults to . + /// You can also use the on your enum fields to manually specify a PostgreSQL enum label. + /// If there is a discrepancy between the .NET and database labels while an enum is read or written, + /// an exception will be raised. + /// + /// The .NET enum type to be mapped + /// + /// A PostgreSQL type name for the corresponding enum type in the database. + /// If null, the name translator given in will be used. + /// + /// + /// A component which will be used to translate CLR names (e.g. SomeClass) into database names (e.g. some_class). + /// Defaults to . + /// [RequiresDynamicCode("Calling MapEnum with a Type can require creating new generic types or methods. This may not work when AOT compiling.")] - public INpgsqlTypeMapper MapEnum([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields | DynamicallyAccessedMemberTypes.PublicParameterlessConstructor)] + public NpgsqlSlimDataSourceBuilder MapEnum([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields | DynamicallyAccessedMemberTypes.PublicParameterlessConstructor)] Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) { _userTypeMapper.MapEnum(clrType, pgName, nameTranslator); @@ -384,9 +423,28 @@ public bool UnmapEnum([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) => _userTypeMapper.UnmapEnum(clrType, pgName, nameTranslator); - /// + /// + /// Maps a CLR type to a PostgreSQL composite type. + /// + /// + /// CLR fields and properties by string to PostgreSQL names. + /// The translation strategy can be controlled by the parameter, + /// which defaults to . + /// You can also use the on your members to manually specify a PostgreSQL name. + /// If there is a discrepancy between the .NET type and database type while a composite is read or written, + /// an exception will be raised. + /// + /// + /// A PostgreSQL type name for the corresponding composite type in the database. + /// If null, the name translator given in will be used. + /// + /// + /// A component which will be used to translate CLR names (e.g. SomeClass) into database names (e.g. some_class). + /// Defaults to . + /// + /// The .NET type to be mapped [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types which can require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] - public INpgsqlTypeMapper MapComposite<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] T>( + public NpgsqlSlimDataSourceBuilder MapComposite<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] T>( string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) { _userTypeMapper.MapComposite(typeof(T), pgName, nameTranslator); @@ -399,9 +457,27 @@ public bool UnmapEnum([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) => _userTypeMapper.UnmapComposite(typeof(T), pgName, nameTranslator); - /// + /// + /// Maps a CLR type to a composite type. + /// + /// + /// Maps CLR fields and properties by string to PostgreSQL names. + /// The translation strategy can be controlled by the parameter, + /// which defaults to . + /// If there is a discrepancy between the .NET type and database type while a composite is read or written, + /// an exception will be raised. + /// + /// The .NET type to be mapped. + /// + /// A PostgreSQL type name for the corresponding composite type in the database. + /// If null, the name translator given in will be used. + /// + /// + /// A component which will be used to translate CLR names (e.g. SomeClass) into database names (e.g. some_class). + /// Defaults to . + /// [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types which can require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] - public INpgsqlTypeMapper MapComposite([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] + public NpgsqlSlimDataSourceBuilder MapComposite([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) { _userTypeMapper.MapComposite(clrType, pgName, nameTranslator); @@ -414,7 +490,6 @@ public bool UnmapComposite([DynamicallyAccessedMembers(DynamicallyAccessedMember Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) => _userTypeMapper.UnmapComposite(clrType, pgName, nameTranslator); - /// public void AddTypeInfoResolverFactory(PgTypeInfoResolverFactory factory) => _resolverChainBuilder.PrependResolverFactory(factory); @@ -787,4 +862,38 @@ INpgsqlTypeMapper INpgsqlTypeMapper.EnableRecordsAsTuples() "The use of unmapped enums, ranges or multiranges requires dynamic code usage which is incompatible with NativeAOT.")] INpgsqlTypeMapper INpgsqlTypeMapper.EnableUnmappedTypes() => EnableUnmappedTypes(); + + /// + INpgsqlTypeMapper INpgsqlTypeMapper.MapEnum<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields)] TEnum>(string? pgName, INpgsqlNameTranslator? nameTranslator) + { + _userTypeMapper.MapEnum(pgName, nameTranslator); + return this; + } + + /// + [RequiresDynamicCode("Calling MapEnum with a Type can require creating new generic types or methods. This may not work when AOT compiling.")] + INpgsqlTypeMapper INpgsqlTypeMapper.MapEnum([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields | DynamicallyAccessedMemberTypes.PublicParameterlessConstructor)] + Type clrType, string? pgName, INpgsqlNameTranslator? nameTranslator) + { + _userTypeMapper.MapEnum(clrType, pgName, nameTranslator); + return this; + } + + /// + [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types which can require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] + INpgsqlTypeMapper INpgsqlTypeMapper.MapComposite<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] T>( + string? pgName, INpgsqlNameTranslator? nameTranslator) + { + _userTypeMapper.MapComposite(typeof(T), pgName, nameTranslator); + return this; + } + + /// + [RequiresDynamicCode("Mapping composite types involves serializing arbitrary types which can require creating new generic types or methods. This is currently unsupported with NativeAOT, vote on issue #5303 if this is important to you.")] + INpgsqlTypeMapper INpgsqlTypeMapper.MapComposite([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] + Type clrType, string? pgName, INpgsqlNameTranslator? nameTranslator) + { + _userTypeMapper.MapComposite(clrType, pgName, nameTranslator); + return this; + } } diff --git a/src/Npgsql/PublicAPI.Unshipped.txt b/src/Npgsql/PublicAPI.Unshipped.txt index cabfe99eaa..b7bdea1d8a 100644 --- a/src/Npgsql/PublicAPI.Unshipped.txt +++ b/src/Npgsql/PublicAPI.Unshipped.txt @@ -5,6 +5,10 @@ Npgsql.NpgsqlConnection.SslClientAuthenticationOptionsCallback.get -> System.Act Npgsql.NpgsqlConnection.SslClientAuthenticationOptionsCallback.set -> void Npgsql.NpgsqlConnectionStringBuilder.SslNegotiation.get -> Npgsql.SslNegotiation Npgsql.NpgsqlConnectionStringBuilder.SslNegotiation.set -> void +Npgsql.NpgsqlDataSourceBuilder.MapComposite(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.NpgsqlDataSourceBuilder! +Npgsql.NpgsqlDataSourceBuilder.MapComposite(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.NpgsqlDataSourceBuilder! +Npgsql.NpgsqlDataSourceBuilder.MapEnum(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.NpgsqlDataSourceBuilder! +Npgsql.NpgsqlDataSourceBuilder.MapEnum(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.NpgsqlDataSourceBuilder! Npgsql.NpgsqlDataSourceBuilder.ConfigureTracingOptions(Npgsql.NpgsqlTracingOptions! tracingOptions) -> Npgsql.NpgsqlDataSourceBuilder! Npgsql.NpgsqlDataSourceBuilder.UseNegotiateOptionsCallback(System.Action? negotiateOptionsCallback) -> Npgsql.NpgsqlDataSourceBuilder! Npgsql.NpgsqlDataSourceBuilder.UseSslClientAuthenticationOptionsCallback(System.Action? sslClientAuthenticationOptionsCallback) -> Npgsql.NpgsqlDataSourceBuilder! @@ -14,6 +18,10 @@ Npgsql.NpgsqlSlimDataSourceBuilder.ConfigureTracingOptions(Npgsql.NpgsqlTracingO Npgsql.NpgsqlSlimDataSourceBuilder.EnableGeometricTypes() -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.EnableJsonTypes() -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.EnableNetworkTypes() -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.MapComposite(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.MapComposite(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.MapEnum(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.MapEnum(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.UseNegotiateOptionsCallback(System.Action? negotiateOptionsCallback) -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.UseSslClientAuthenticationOptionsCallback(System.Action? sslClientAuthenticationOptionsCallback) -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlTracingOptions.EnableFirstResponseEvent.get -> bool @@ -53,3 +61,11 @@ Npgsql.SslNegotiation Npgsql.SslNegotiation.Direct = 1 -> Npgsql.SslNegotiation Npgsql.SslNegotiation.Postgres = 0 -> Npgsql.SslNegotiation override Npgsql.NpgsqlMultiHostDataSource.Clear() -> void +*REMOVED*Npgsql.NpgsqlDataSourceBuilder.MapComposite(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! +*REMOVED*Npgsql.NpgsqlDataSourceBuilder.MapComposite(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! +*REMOVED*Npgsql.NpgsqlDataSourceBuilder.MapEnum(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! +*REMOVED*Npgsql.NpgsqlDataSourceBuilder.MapEnum(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! +*REMOVED*Npgsql.NpgsqlSlimDataSourceBuilder.MapComposite(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! +*REMOVED*Npgsql.NpgsqlSlimDataSourceBuilder.MapComposite(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! +*REMOVED*Npgsql.NpgsqlSlimDataSourceBuilder.MapEnum(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! +*REMOVED*Npgsql.NpgsqlSlimDataSourceBuilder.MapEnum(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! diff --git a/src/Npgsql/TypeMapping/GlobalTypeMapper.cs b/src/Npgsql/TypeMapping/GlobalTypeMapper.cs index c8b72cb8cd..2f9b5b3479 100644 --- a/src/Npgsql/TypeMapping/GlobalTypeMapper.cs +++ b/src/Npgsql/TypeMapping/GlobalTypeMapper.cs @@ -1,14 +1,11 @@ using System; using System.Collections.Generic; using System.Diagnostics.CodeAnalysis; -using System.Linq; using System.Text.Json; -using System.Text.Json.Nodes; using System.Threading; using Npgsql.Internal; using Npgsql.Internal.Postgres; using Npgsql.Internal.ResolverFactories; -using NpgsqlTypes; namespace Npgsql.TypeMapping; diff --git a/src/Npgsql/TypeMapping/INpgsqlTypeMapper.cs b/src/Npgsql/TypeMapping/INpgsqlTypeMapper.cs index 83728785d6..b5f2de7594 100644 --- a/src/Npgsql/TypeMapping/INpgsqlTypeMapper.cs +++ b/src/Npgsql/TypeMapping/INpgsqlTypeMapper.cs @@ -3,7 +3,6 @@ using System.Text.Json; using System.Text.Json.Nodes; using Npgsql.Internal; -using Npgsql.Internal.ResolverFactories; using Npgsql.NameTranslation; using NpgsqlTypes; From 919ed979c444531ab89706868041b2b9713243b8 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Sun, 10 Nov 2024 13:18:53 +0100 Subject: [PATCH 485/761] Remove multipass resolution (#5559) --- .../BackendMessages/RowDescriptionMessage.cs | 36 +++++----- src/Npgsql/Internal/AdoSerializerHelpers.cs | 2 +- .../Internal/Converters/RecordConverter.cs | 8 ++- src/Npgsql/Internal/PgSerializerOptions.cs | 17 ++--- .../AdoTypeInfoResolverFactory.cs | 12 ++-- src/Npgsql/Internal/TypeInfoCache.cs | 66 +++++++------------ src/Npgsql/NpgsqlDataReader.cs | 4 +- src/Npgsql/Schema/DbColumnSchemaGenerator.cs | 2 +- 8 files changed, 65 insertions(+), 82 deletions(-) diff --git a/src/Npgsql/BackendMessages/RowDescriptionMessage.cs b/src/Npgsql/BackendMessages/RowDescriptionMessage.cs index 8c6883109d..fd04ddfdaf 100644 --- a/src/Npgsql/BackendMessages/RowDescriptionMessage.cs +++ b/src/Npgsql/BackendMessages/RowDescriptionMessage.cs @@ -232,7 +232,7 @@ internal FieldDescription(FieldDescription source) DataFormat = source.DataFormat; PostgresType = source.PostgresType; Field = source.Field; - _objectOrDefaultInfo = source._objectOrDefaultInfo; + _objectInfo = source._objectInfo; } internal void Populate( @@ -250,7 +250,7 @@ internal void Populate( DataFormat = dataFormat; PostgresType = _serializerOptions.DatabaseInfo.FindPostgresType((Oid)TypeOID)?.GetRepresentationalType() ?? UnknownBackendType.Instance; Field = new(Name, _serializerOptions.ToCanonicalTypeId(PostgresType), TypeModifier); - _objectOrDefaultInfo = default; + _objectInfo = default; } /// @@ -296,18 +296,18 @@ internal void Populate( internal PostgresType PostgresType { get; private set; } - internal Type FieldType => ObjectOrDefaultInfo.TypeToConvert; + internal Type FieldType => ObjectInfo.TypeToConvert; - ColumnInfo _objectOrDefaultInfo; - internal PgConverterInfo ObjectOrDefaultInfo + ColumnInfo _objectInfo; + internal PgConverterInfo ObjectInfo { get { - if (!_objectOrDefaultInfo.ConverterInfo.IsDefault) - return _objectOrDefaultInfo.ConverterInfo; + if (!_objectInfo.ConverterInfo.IsDefault) + return _objectInfo.ConverterInfo; - ref var info = ref _objectOrDefaultInfo; - GetInfo(null, ref _objectOrDefaultInfo); + ref var info = ref _objectInfo; + GetInfoCore(null, ref _objectInfo); return info.ConverterInfo; } } @@ -320,7 +320,8 @@ internal FieldDescription Clone() return field; } - internal void GetInfo(Type? type, ref ColumnInfo lastColumnInfo) + internal void GetInfo(Type type, ref ColumnInfo lastColumnInfo) => GetInfoCore(type, ref lastColumnInfo); + void GetInfoCore(Type? type, ref ColumnInfo lastColumnInfo) { Debug.Assert(lastColumnInfo.ConverterInfo.IsDefault || ( ReferenceEquals(_serializerOptions, lastColumnInfo.ConverterInfo.TypeInfo.Options) && ( @@ -332,20 +333,20 @@ internal void GetInfo(Type? type, ref ColumnInfo lastColumnInfo) if (!lastColumnInfo.ConverterInfo.IsDefault && lastColumnInfo.ConverterInfo.TypeToConvert == type) return; - var odfInfo = DataFormat is DataFormat.Text && type is not null ? ObjectOrDefaultInfo : _objectOrDefaultInfo.ConverterInfo; - if (odfInfo is { IsDefault: false }) + var objectInfo = DataFormat is DataFormat.Text && type is not null ? ObjectInfo : _objectInfo.ConverterInfo; + if (objectInfo is { IsDefault: false }) { if (typeof(object) == type) { - lastColumnInfo = new(odfInfo, DataFormat, true); + lastColumnInfo = new(objectInfo, DataFormat, true); return; } - if (odfInfo.TypeToConvert == type) + if (objectInfo.TypeToConvert == type) { // As TypeInfoMappingCollection is always adding object mappings for // default/datatypename mappings, we'll also check Converter.TypeToConvert. // If we have an exact match we are still able to use e.g. a converter for ints in an unboxed fashion. - lastColumnInfo = new(odfInfo, DataFormat, odfInfo.IsBoxingConverter && odfInfo.Converter.TypeToConvert != type); + lastColumnInfo = new(objectInfo, DataFormat, objectInfo.IsBoxingConverter && objectInfo.Converter.TypeToConvert != type); return; } } @@ -390,9 +391,8 @@ void GetInfoSlow(Type? type, out ColumnInfo lastColumnInfo) // We delay initializing ObjectOrDefaultInfo until after the first lookup (unless it is itself the first lookup). // When passed in an unsupported type it allows the error to be more specific, instead of just having object/null to deal with. - if (_objectOrDefaultInfo.ConverterInfo.IsDefault && type is not null) - _ = ObjectOrDefaultInfo; - + if (_objectInfo.ConverterInfo.IsDefault && type is not null) + _ = ObjectInfo; } // DataFormat.Text today exclusively signals that we executed with an UnknownResultTypeList. diff --git a/src/Npgsql/Internal/AdoSerializerHelpers.cs b/src/Npgsql/Internal/AdoSerializerHelpers.cs index b83c447c39..177114f78d 100644 --- a/src/Npgsql/Internal/AdoSerializerHelpers.cs +++ b/src/Npgsql/Internal/AdoSerializerHelpers.cs @@ -14,7 +14,7 @@ public static PgTypeInfo GetTypeInfoForReading(Type type, PgTypeId pgTypeId, PgS Exception? inner = null; try { - typeInfo = type == typeof(object) ? options.GetObjectOrDefaultTypeInfoInternal(pgTypeId) : options.GetTypeInfoInternal(type, pgTypeId); + typeInfo = options.GetTypeInfoInternal(type, pgTypeId); } catch (Exception ex) { diff --git a/src/Npgsql/Internal/Converters/RecordConverter.cs b/src/Npgsql/Internal/Converters/RecordConverter.cs index a0666abdeb..05eabcf7cd 100644 --- a/src/Npgsql/Internal/Converters/RecordConverter.cs +++ b/src/Npgsql/Internal/Converters/RecordConverter.cs @@ -7,6 +7,8 @@ namespace Npgsql.Internal.Converters; sealed class RecordConverter(PgSerializerOptions options, Func? factory = null) : PgStreamingConverter { + static bool IsObjectArrayRecord => typeof(T) == typeof(object[]); + public override T Read(PgReader reader) => Read(async: false, reader, CancellationToken.None).GetAwaiter().GetResult(); @@ -34,9 +36,11 @@ async ValueTask Read(bool async, PgReader reader, CancellationToken cancellat var postgresType = options.DatabaseInfo.GetPostgresType(typeOid).GetRepresentationalType() ?? throw new NotSupportedException($"Reading isn't supported for record field {i} (unknown type OID {typeOid}"); - var pgTypeId = options.ToCanonicalTypeId(postgresType); - var typeInfo = options.GetObjectOrDefaultTypeInfoInternal(pgTypeId) + + // TODO resolve based on types expected by _factory (pass in a Type[] during construcion) + // Only allow object polymorphism for object[] records, valuetuple records are always strongly typed. + var typeInfo = (IsObjectArrayRecord ? options.GetTypeInfo(typeof(object), pgTypeId) : options.GetDefaultTypeInfo(pgTypeId)) ?? throw new NotSupportedException( $"Reading isn't supported for record field {i} (PG type '{postgresType.DisplayName}'"); diff --git a/src/Npgsql/Internal/PgSerializerOptions.cs b/src/Npgsql/Internal/PgSerializerOptions.cs index 49d7cf7200..052404da5c 100644 --- a/src/Npgsql/Internal/PgSerializerOptions.cs +++ b/src/Npgsql/Internal/PgSerializerOptions.cs @@ -78,25 +78,22 @@ public static bool IsWellKnownTextType(Type type) // This also makes it easier to realize it should be a cached value if infos for different CLR types are requested for the same // pgTypeId. Effectively it should be 'impossible' to get the wrong kind via any PgConverterOptions api which is what this is mainly // for. - PgTypeInfo? GetTypeInfoCore(Type? type, PgTypeId? pgTypeId, bool defaultTypeFallback) + PgTypeInfo? GetTypeInfoCore(Type? type, PgTypeId? pgTypeId) => PortableTypeIds - ? ((TypeInfoCache)(_typeInfoCache ??= new TypeInfoCache(this))).GetOrAddInfo(type, pgTypeId?.DataTypeName, defaultTypeFallback) - : ((TypeInfoCache)(_typeInfoCache ??= new TypeInfoCache(this))).GetOrAddInfo(type, pgTypeId?.Oid, defaultTypeFallback); + ? ((TypeInfoCache)(_typeInfoCache ??= new TypeInfoCache(this))).GetOrAddInfo(type, pgTypeId?.DataTypeName) + : ((TypeInfoCache)(_typeInfoCache ??= new TypeInfoCache(this))).GetOrAddInfo(type, pgTypeId?.Oid); internal PgTypeInfo? GetTypeInfoInternal(Type? type, PgTypeId? pgTypeId) - => GetTypeInfoCore(type, pgTypeId, false); - - internal PgTypeInfo? GetObjectOrDefaultTypeInfoInternal(PgTypeId pgTypeId) - => GetTypeInfoCore(typeof(object), pgTypeId, true); + => GetTypeInfoCore(type, pgTypeId); public PgTypeInfo? GetDefaultTypeInfo(Type type) - => GetTypeInfoCore(type, null, false); + => GetTypeInfoCore(type, null); public PgTypeInfo? GetDefaultTypeInfo(PgTypeId pgTypeId) - => GetTypeInfoCore(null, GetCanonicalTypeId(pgTypeId), false); + => GetTypeInfoCore(null, GetCanonicalTypeId(pgTypeId)); public PgTypeInfo? GetTypeInfo(Type type, PgTypeId pgTypeId) - => GetTypeInfoCore(type, GetCanonicalTypeId(pgTypeId), false); + => GetTypeInfoCore(type, GetCanonicalTypeId(pgTypeId)); // If a given type id is in the opposite form than what was expected it will be mapped according to the requirement. internal PgTypeId GetCanonicalTypeId(PgTypeId pgTypeId) diff --git a/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.cs index aa5bbde21f..768e0873ef 100644 --- a/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.cs +++ b/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.cs @@ -38,13 +38,12 @@ class Resolver : IPgTypeInfoResolver static PgTypeInfo? GetEnumTypeInfo(Type? type, DataTypeName dataTypeName, PgSerializerOptions options) { - if (type is not null && type != typeof(string)) + if (type is not null && type != typeof(object) && type != typeof(string) + || options.DatabaseInfo.GetPostgresType(dataTypeName) is not PostgresEnumType) return null; - if (options.DatabaseInfo.GetPostgresType(dataTypeName) is not PostgresEnumType) - return null; - - return new PgTypeInfo(options, new StringTextConverter(options.TextEncoding), dataTypeName); + return new PgTypeInfo(options, new StringTextConverter(options.TextEncoding), dataTypeName, + unboxedType: type == typeof(object) ? typeof(string) : null); } static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) @@ -511,7 +510,8 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) static PgTypeInfo? GetEnumArrayTypeInfo(Type? elementType, PostgresType pgElementType, Type? type, DataTypeName dataTypeName, PgSerializerOptions options) { - if ((type != typeof(object) && elementType is not null && elementType != typeof(string)) || pgElementType is not PostgresEnumType enumType) + if ((type is not null && type != typeof(object) && elementType != typeof(string)) + || pgElementType is not PostgresEnumType enumType) return null; var mappings = new TypeInfoMappingCollection(); diff --git a/src/Npgsql/Internal/TypeInfoCache.cs b/src/Npgsql/Internal/TypeInfoCache.cs index 7627997822..91a6de9295 100644 --- a/src/Npgsql/Internal/TypeInfoCache.cs +++ b/src/Npgsql/Internal/TypeInfoCache.cs @@ -1,6 +1,5 @@ using System; using System.Collections.Concurrent; -using System.Runtime.CompilerServices; using Npgsql.Internal.Postgres; namespace Npgsql.Internal; @@ -13,7 +12,7 @@ sealed class TypeInfoCache(PgSerializerOptions options, bool validate // Used for reading, occasionally for parameter writing where a db type was given. // 8ns, about 10ns total to scan an array with 6, 7 different clr types under one pg type - readonly ConcurrentDictionary _cacheByPgTypeId = new(); + readonly ConcurrentDictionary _cacheByPgTypeId = new(); static TypeInfoCache() { @@ -26,21 +25,17 @@ static TypeInfoCache() /// /// /// - /// - /// When this flag is true, and both type and pgTypeId are non null, a default info for the pgTypeId can be returned if an exact match - /// can't be found. - /// /// /// - public PgTypeInfo? GetOrAddInfo(Type? type, TPgTypeId? pgTypeId, bool defaultTypeFallback = false) + public PgTypeInfo? GetOrAddInfo(Type? type, TPgTypeId? pgTypeId) { if (pgTypeId is { } id) { if (_cacheByPgTypeId.TryGetValue(id, out var infos)) - if (FindMatch(type, infos, defaultTypeFallback) is { } info) + if (FindMatch(type, infos) is { } info) return info; - return AddEntryById(type, id, infos, defaultTypeFallback); + return AddEntryById(type, id, infos); } if (type is not null) @@ -48,33 +43,22 @@ static TypeInfoCache() return null; - PgTypeInfo? FindMatch(Type? type, (Type? Type, PgTypeInfo? Info)[] infos, bool defaultTypeFallback) + PgTypeInfo? FindMatch(Type? type, (Type? Type, PgTypeInfo Info)[] infos) { - PgTypeInfo? defaultInfo = null; - var negativeExactMatch = false; for (var i = 0; i < infos.Length; i++) { ref var item = ref infos[i]; if (item.Type == type) - { - if (item.Info is not null || !defaultTypeFallback) - return item.Info; - negativeExactMatch = true; - } - - if (defaultTypeFallback && item.Type is null) - defaultInfo = item.Info; + return item.Info; } - // We can only return default info if we've seen a negative match (type: typeof(object), info: null) - // Otherwise we might return a previously requested default while the resolvers could produce the exact match. - return negativeExactMatch ? defaultInfo : null; + return null; } PgTypeInfo? AddByType(Type type) { // We don't pass PgTypeId as we're interested in default converters here. - var info = CreateInfo(type, null, options, defaultTypeFallback: false, validatePgTypeIds); + var info = CreateInfo(type, null, options, validatePgTypeIds); return info is null ? null @@ -83,18 +67,18 @@ static TypeInfoCache() : _cacheByClrType[type]; } - PgTypeInfo? AddEntryById(Type? type, TPgTypeId pgTypeId, (Type? Type, PgTypeInfo? Info)[]? infos, bool defaultTypeFallback) + PgTypeInfo? AddEntryById(Type? type, TPgTypeId pgTypeId, (Type? Type, PgTypeInfo Info)[]? infos) { - // We cache negatives (null info) to allow 'object or default' checks to never hit the resolvers after the first lookup. - var info = CreateInfo(type, pgTypeId, options, defaultTypeFallback, validatePgTypeIds); + if (CreateInfo(type, pgTypeId, options, validatePgTypeIds) is not { } info) + return null; - var isDefaultInfo = type is null && info is not null; + var isDefaultInfo = type is null; if (infos is null) { // Also add defaults by their info type to save a future resolver lookup + resize. infos = isDefaultInfo - ? new [] { (type, info), (info!.Type, info) } - : [(type, info)]; + ? new [] { (type, info), (info.Type, info) } + : new [] { (type, info) }; if (_cacheByPgTypeId.TryAdd(pgTypeId, infos)) return info; @@ -104,7 +88,7 @@ static TypeInfoCache() while (true) { infos = _cacheByPgTypeId[pgTypeId]; - if (FindMatch(type, infos, defaultTypeFallback) is { } racedInfo) + if (FindMatch(type, infos) is { } racedInfo) return racedInfo; // Also add defaults by their info type to save a future resolver lookup + resize. @@ -113,39 +97,37 @@ static TypeInfoCache() if (isDefaultInfo) { foreach (var oldInfo in oldInfos) - if (oldInfo.Type == info!.Type) + if (oldInfo.Type == info.Type) hasExactType = true; } Array.Resize(ref infos, oldInfos.Length + (isDefaultInfo && !hasExactType ? 2 : 1)); infos[oldInfos.Length] = (type, info); if (isDefaultInfo && !hasExactType) - infos[oldInfos.Length + 1] = (info!.Type, info); + infos[oldInfos.Length + 1] = (info.Type, info); if (_cacheByPgTypeId.TryUpdate(pgTypeId, infos, oldInfos)) return info; } } - static PgTypeInfo? CreateInfo(Type? type, TPgTypeId? typeId, PgSerializerOptions options, bool defaultTypeFallback, bool validatePgTypeIds) + static PgTypeInfo? CreateInfo(Type? type, TPgTypeId? typeId, PgSerializerOptions options, bool validatePgTypeIds) { var pgTypeId = AsPgTypeId(typeId); // Validate that we only pass data types that are supported by the backend. var dataTypeName = pgTypeId is { } id ? (DataTypeName?)options.DatabaseInfo.GetDataTypeName(id, validate: validatePgTypeIds) : null; var info = options.TypeInfoResolver.GetTypeInfo(type, dataTypeName, options); - if (info is null && defaultTypeFallback) - { - type = null; - info = options.TypeInfoResolver.GetTypeInfo(type, dataTypeName, options); - } - if (info is null) return null; if (pgTypeId is not null && info.PgTypeId != pgTypeId) throw new InvalidOperationException("A Postgres type was passed but the resolved PgTypeInfo does not have an equal PgTypeId."); - if (type is not null && !info.IsBoxing && info.Type != type) - throw new InvalidOperationException($"A CLR type '{type}' was passed but the resolved PgTypeInfo does not have an equal Type: {info.Type}."); + if (type is not null && info.Type != type) + { + // Types were not equal, throw for IsBoxing = false, otherwise we throw when the returned type isn't assignable to the requested type (after unboxing). + if (!info.IsBoxing || !info.Type.IsAssignableTo(type)) + throw new InvalidOperationException($"A CLR type '{type}' was passed but the resolved PgTypeInfo does not have an equal Type: {info.Type}."); + } return info; } diff --git a/src/Npgsql/NpgsqlDataReader.cs b/src/Npgsql/NpgsqlDataReader.cs index e7ca332a37..4add2e970d 100644 --- a/src/Npgsql/NpgsqlDataReader.cs +++ b/src/Npgsql/NpgsqlDataReader.cs @@ -2131,8 +2131,8 @@ DataFormat GetDefaultInfo(int ordinal, out PgConverter converter, out Size buffe { var field = RowDescription![ordinal]; - converter = field.ObjectOrDefaultInfo.Converter; - bufferRequirement = field.ObjectOrDefaultInfo.BufferRequirement; + converter = field.ObjectInfo.Converter; + bufferRequirement = field.ObjectInfo.BufferRequirement; return field.DataFormat; } diff --git a/src/Npgsql/Schema/DbColumnSchemaGenerator.cs b/src/Npgsql/Schema/DbColumnSchemaGenerator.cs index 014b107580..e42c6f2505 100644 --- a/src/Npgsql/Schema/DbColumnSchemaGenerator.cs +++ b/src/Npgsql/Schema/DbColumnSchemaGenerator.cs @@ -260,7 +260,7 @@ void ColumnPostConfig(NpgsqlDbColumn column, int typeModifier) var serializerOptions = _connection.Connector!.SerializerOptions; column.NpgsqlDbType = column.PostgresType.DataTypeName.ToNpgsqlDbType(); - if (serializerOptions.GetObjectOrDefaultTypeInfoInternal(serializerOptions.ToCanonicalTypeId(column.PostgresType)) is { } typeInfo) + if (serializerOptions.GetDefaultTypeInfo(serializerOptions.ToCanonicalTypeId(column.PostgresType)) is { } typeInfo) { column.DataType = typeInfo.Type; column.IsLong = column.PostgresType.DataTypeName == DataTypeNames.Bytea; From 7ef908db1f18d85bdedaf0d04318a6233dd45d08 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Emmanuel=20Andr=C3=A9?= <2341261+manandre@users.noreply.github.com> Date: Sun, 10 Nov 2024 14:14:39 +0100 Subject: [PATCH 486/761] Add NpgsqlDataSource.ReloadTypes{Async} (#5919) --- src/Npgsql/NpgsqlConnection.cs | 4 +- src/Npgsql/NpgsqlDataSource.cs | 24 +++++++++- src/Npgsql/PublicAPI.Unshipped.txt | 4 ++ test/Npgsql.Tests/DataSourceTests.cs | 69 ++++++++++++++++++++++++++++ 4 files changed, 98 insertions(+), 3 deletions(-) diff --git a/src/Npgsql/NpgsqlConnection.cs b/src/Npgsql/NpgsqlConnection.cs index a50d9548be..74290d23f9 100644 --- a/src/Npgsql/NpgsqlConnection.cs +++ b/src/Npgsql/NpgsqlConnection.cs @@ -1897,7 +1897,7 @@ public void ReloadTypes() /// Flushes the type cache for this connection's connection string and reloads the types for this connection only. /// Type changes will appear for other connections only after they are re-opened from the pool. /// - public async Task ReloadTypesAsync() + public async Task ReloadTypesAsync(CancellationToken cancellationToken = default) { CheckReady(); @@ -1908,7 +1908,7 @@ public async Task ReloadTypesAsync() NpgsqlTimeout.Infinite, forceReload: true, async: true, - CancellationToken.None).ConfigureAwait(false); + cancellationToken).ConfigureAwait(false); } /// diff --git a/src/Npgsql/NpgsqlDataSource.cs b/src/Npgsql/NpgsqlDataSource.cs index ffde9feda8..da5348e8e2 100644 --- a/src/Npgsql/NpgsqlDataSource.cs +++ b/src/Npgsql/NpgsqlDataSource.cs @@ -4,7 +4,6 @@ using System.Diagnostics; using System.Diagnostics.CodeAnalysis; using System.Net.Security; -using System.Security.Cryptography.X509Certificates; using System.Threading; using System.Threading.Tasks; using System.Transactions; @@ -227,6 +226,29 @@ public static NpgsqlDataSource Create(string connectionString) public static NpgsqlDataSource Create(NpgsqlConnectionStringBuilder connectionStringBuilder) => Create(connectionStringBuilder.ToString()); + /// + /// Flushes the type cache for this data source. + /// Type changes will appear for connections only after they are re-opened from the pool. + /// + public void ReloadTypes() + { + using var connection = OpenConnection(); + connection.ReloadTypes(); + } + + /// + /// Flushes the type cache for this data source. + /// Type changes will appear for connections only after they are re-opened from the pool. + /// + public async Task ReloadTypesAsync(CancellationToken cancellationToken = default) + { + var connection = await OpenConnectionAsync(cancellationToken).ConfigureAwait(false); + await using (connection.ConfigureAwait(false)) + { + await connection.ReloadTypesAsync(cancellationToken).ConfigureAwait(false); + } + } + internal async Task Bootstrap( NpgsqlConnector connector, NpgsqlTimeout timeout, diff --git a/src/Npgsql/PublicAPI.Unshipped.txt b/src/Npgsql/PublicAPI.Unshipped.txt index b7bdea1d8a..f05c3fdb3f 100644 --- a/src/Npgsql/PublicAPI.Unshipped.txt +++ b/src/Npgsql/PublicAPI.Unshipped.txt @@ -61,6 +61,10 @@ Npgsql.SslNegotiation Npgsql.SslNegotiation.Direct = 1 -> Npgsql.SslNegotiation Npgsql.SslNegotiation.Postgres = 0 -> Npgsql.SslNegotiation override Npgsql.NpgsqlMultiHostDataSource.Clear() -> void +Npgsql.NpgsqlDataSource.ReloadTypes() -> void +Npgsql.NpgsqlDataSource.ReloadTypesAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! +Npgsql.NpgsqlConnection.ReloadTypesAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! +*REMOVED*Npgsql.NpgsqlConnection.ReloadTypesAsync() -> System.Threading.Tasks.Task! *REMOVED*Npgsql.NpgsqlDataSourceBuilder.MapComposite(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! *REMOVED*Npgsql.NpgsqlDataSourceBuilder.MapComposite(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! *REMOVED*Npgsql.NpgsqlDataSourceBuilder.MapEnum(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! diff --git a/test/Npgsql.Tests/DataSourceTests.cs b/test/Npgsql.Tests/DataSourceTests.cs index ad412ed5c3..7e33d00991 100644 --- a/test/Npgsql.Tests/DataSourceTests.cs +++ b/test/Npgsql.Tests/DataSourceTests.cs @@ -5,6 +5,7 @@ using System.Text.Json.Serialization; using System.Threading.Tasks; using NUnit.Framework; +using static Npgsql.Tests.TestUtil; // ReSharper disable MethodHasAsyncOverload @@ -380,4 +381,72 @@ public async Task ConfigureJsonOptions_is_order_independent() Assert.That(reader.GetFieldValue(0).Id, Is.EqualTo(1)); } } + + [Test] + public async Task ReloadTypes([Values] bool async) + { + await using var adminConnection = await OpenConnectionAsync(); + var type = await GetTempTypeName(adminConnection); + + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.MapEnum(type); + await using var dataSource = dataSourceBuilder.Build(); + + await using var connection = await dataSource.OpenConnectionAsync(); + await connection.ExecuteNonQueryAsync($"CREATE TYPE {type} AS ENUM ('sad', 'ok', 'happy')"); + + if (async) + await dataSource.ReloadTypesAsync(); + else + dataSource.ReloadTypes(); + + Assert.ThrowsAsync(async () => await connection.ExecuteScalarAsync($"SELECT 'happy'::{type}")); + + // Close connection and reopen to make sure it picks up the new type and mapping from the data source + await connection.CloseAsync(); + await connection.OpenAsync(); + + Assert.DoesNotThrowAsync(async () => await connection.ExecuteScalarAsync($"SELECT 'happy'::{type}")); + } + + [Test] + public async Task ReloadTypes_across_data_sources([Values] bool async) + { + await using var adminConnection = await OpenConnectionAsync(); + var type = await GetTempTypeName(adminConnection); + + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.MapEnum(type); + await using var dataSource1 = dataSourceBuilder.Build(); + await using var connection1 = await dataSource1.OpenConnectionAsync(); + + await using var dataSource2 = dataSourceBuilder.Build(); + await using var connection2 = await dataSource2.OpenConnectionAsync(); + + await connection1.ExecuteNonQueryAsync($"CREATE TYPE {type} AS ENUM ('sad', 'ok', 'happy')"); + + if (async) + await dataSource1.ReloadTypesAsync(); + else + dataSource1.ReloadTypes(); + + Assert.ThrowsAsync(async () => await connection1.ExecuteScalarAsync($"SELECT 'happy'::{type}")); + Assert.ThrowsAsync(async () => await connection2.ExecuteScalarAsync($"SELECT 'happy'::{type}")); + + // Close connection and reopen to check that the new type and mapping is not available in dataSource2 + await connection2.CloseAsync(); + await connection2.OpenAsync(); + + Assert.ThrowsAsync(async () => await connection2.ExecuteScalarAsync($"SELECT 'happy'::{type}")); + + await dataSource2.ReloadTypesAsync(); + + // Close connection2 and reopen to make sure it picks up the new type and mapping from dataSource2 + await connection2.CloseAsync(); + await connection2.OpenAsync(); + + Assert.DoesNotThrowAsync(async () => await connection2.ExecuteScalarAsync($"SELECT 'happy'::{type}")); + } + + enum Mood { Sad, Ok, Happy } } From 06f5ba3c9036d91e0f46a0297c35201fb7cc0eb5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Emmanuel=20Andr=C3=A9?= <2341261+manandre@users.noreply.github.com> Date: Sun, 10 Nov 2024 16:39:53 +0100 Subject: [PATCH 487/761] Devcontainer update (#5922) * Update devcontainer * Install the .NET 8 runtime * Update to postgres 17 --- .devcontainer/db/Dockerfile | 4 ++-- .devcontainer/devcontainer.json | 38 ++++++++++++++++++-------------- .devcontainer/docker-compose.yml | 3 +-- .devcontainer/dotnet/Dockerfile | 5 +++++ .vscode/settings.json | 4 ++-- 5 files changed, 31 insertions(+), 23 deletions(-) create mode 100644 .devcontainer/dotnet/Dockerfile diff --git a/.devcontainer/db/Dockerfile b/.devcontainer/db/Dockerfile index 64cc3febb1..85efd91832 100644 --- a/.devcontainer/db/Dockerfile +++ b/.devcontainer/db/Dockerfile @@ -1,3 +1,3 @@ -FROM postgres +FROM postgres:17 RUN apt-get update && \ - apt-get install -y --no-install-recommends openssl postgresql-16-postgis-3 + apt-get install -y --no-install-recommends openssl postgresql-17-postgis-3 diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 69115b9ad3..1cdf7d0550 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -5,25 +5,29 @@ "workspaceFolder": "/workspace", - "settings": { - "terminal.integrated.profiles.linux": { - "bash": { - "path": "/bin/bash" - } - }, - "terminal.integrated.defaultProfile.linux": "bash", - "remote.extensionKind": { - "ms-azuretools.vscode-docker": "workspace" + "customizations": { + "vscode": { + "settings": { + "terminal.integrated.profiles.linux": { + "bash": { + "path": "/bin/bash" + } + }, + "terminal.integrated.defaultProfile.linux": "bash", + "remote.extensionKind": { + "ms-azuretools.vscode-docker": "workspace" + } + }, + + "extensions": [ + "ms-dotnettools.csharp", + "formulahendry.dotnet-test-explorer", + "ms-azuretools.vscode-docker", + "mutantdino.resourcemonitor" + ] } }, - - "extensions": [ - "ms-dotnettools.csharp", - "formulahendry.dotnet-test-explorer", - "ms-azuretools.vscode-docker", - "mutantdino.resourcemonitor" - ], - + "forwardPorts": [5432, 5050], "remoteEnv": { diff --git a/.devcontainer/docker-compose.yml b/.devcontainer/docker-compose.yml index e956e66c24..99d9177380 100644 --- a/.devcontainer/docker-compose.yml +++ b/.devcontainer/docker-compose.yml @@ -2,8 +2,7 @@ version: '3' services: npgsql-dev: - # Source for tags: https://mcr.microsoft.com/v2/dotnet/sdk/tags/list - image: mcr.microsoft.com/dotnet/sdk:8.0.100 + build: ./dotnet volumes: - ..:/workspace:cached tty: true diff --git a/.devcontainer/dotnet/Dockerfile b/.devcontainer/dotnet/Dockerfile new file mode 100644 index 0000000000..66aaa421c9 --- /dev/null +++ b/.devcontainer/dotnet/Dockerfile @@ -0,0 +1,5 @@ +# Source for tags: https://mcr.microsoft.com/v2/dotnet/sdk/tags/list +FROM mcr.microsoft.com/dotnet/sdk:9.0 + +# "install" the .NET 8 runtime +COPY --from=mcr.microsoft.com/dotnet/sdk:8.0 /usr/share/dotnet/shared /usr/share/dotnet/shared \ No newline at end of file diff --git a/.vscode/settings.json b/.vscode/settings.json index 3f641af41f..4e57c5f1c2 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -1,4 +1,4 @@ { - "omnisharp.defaultLaunchSolution": "Npgsql.sln", - "dotnet-test-explorer.testProjectPath": "**/*.Tests.csproj" + "dotnet-test-explorer.testProjectPath": "**/*.Tests.csproj", + "dotnet.defaultSolution": "Npgsql.sln" } \ No newline at end of file From d95db6427bd8aa53a72619cd343dfed40fcc67cd Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Mon, 11 Nov 2024 10:02:20 +0100 Subject: [PATCH 488/761] Refactor bitstring (#5924) --- .../Converters/BitStringConverters.cs | 40 +++++++------------ 1 file changed, 15 insertions(+), 25 deletions(-) diff --git a/src/Npgsql/Internal/Converters/BitStringConverters.cs b/src/Npgsql/Internal/Converters/BitStringConverters.cs index d90f6f5c6e..d0d6327a20 100644 --- a/src/Npgsql/Internal/Converters/BitStringConverters.cs +++ b/src/Npgsql/Internal/Converters/BitStringConverters.cs @@ -11,19 +11,16 @@ namespace Npgsql.Internal.Converters; -static class BitStringHelpers +file static class BitStringHelpers { - public static int GetByteLengthFromBits(int n) + public static int GetByteCountFromBitCount(int n) { const int BitShiftPerByte = 3; Debug.Assert(n >= 0); // Due to sign extension, we don't need to special case for n == 0, since ((n - 1) >> 3) + 1 = 0 // This doesn't hold true for ((n - 1) / 8) + 1, which equals 1. - return (int)((uint)(n - 1 + (1 << BitShiftPerByte)) >> BitShiftPerByte); + return (n - 1 + (1 << BitShiftPerByte)) >>> BitShiftPerByte; } - - // http://graphics.stanford.edu/~seander/bithacks.html#ReverseByteWith64Bits - public static byte ReverseBits(byte b) => (byte)(((b * 0x80200802UL) & 0x0884422110UL) * 0x0101010101UL >> 32); } sealed class BitArrayBitStringConverter : PgStreamingConverter @@ -34,7 +31,7 @@ public override BitArray Read(PgReader reader) reader.Buffer(sizeof(int)); var bits = reader.ReadInt32(); - var bytes = new byte[GetByteLengthFromBits(bits)]; + var bytes = new byte[GetByteCountFromBitCount(bits)]; reader.ReadBytes(bytes); return ReadValue(bytes, bits); } @@ -44,7 +41,7 @@ public override async ValueTask ReadAsync(PgReader reader, Cancellatio await reader.BufferAsync(sizeof(int), cancellationToken).ConfigureAwait(false); var bits = reader.ReadInt32(); - var bytes = new byte[GetByteLengthFromBits(bits)]; + var bytes = new byte[GetByteCountFromBitCount(bits)]; await reader.ReadBytesAsync(bytes, cancellationToken).ConfigureAwait(false); return ReadValue(bytes, bits); } @@ -58,10 +55,13 @@ internal static BitArray ReadValue(byte[] bytes, int bits) } return new(bytes) { Length = bits }; + + // https://graphics.stanford.edu/~seander/bithacks.html#ReverseByteWith64Bits + static byte ReverseBits(byte b) => (byte)(((b * 0x80200802UL) & 0x0884422110UL) * 0x0101010101UL >> 32); } public override Size GetSize(SizeContext context, BitArray value, ref object? writeState) - => sizeof(int) + GetByteLengthFromBits(value.Length); + => sizeof(int) + GetByteCountFromBitCount(value.Length); public override void Write(PgWriter writer, BitArray value) => Write(async: false, writer, value, CancellationToken.None).GetAwaiter().GetResult(); @@ -97,11 +97,9 @@ async ValueTask Write(bool async, PgWriter writer, BitArray value, CancellationT sealed class BitVector32BitStringConverter : PgBufferedConverter { - static int MaxSize => sizeof(int) + sizeof(int); - public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) { - bufferRequirements = BufferRequirements.Create(Size.CreateUpperBound(MaxSize)); + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(int) + sizeof(int)); return format is DataFormat.Binary; } @@ -111,7 +109,7 @@ protected override BitVector32 ReadCore(PgReader reader) throw new InvalidCastException("Can't read a BIT(N) with more than 32 bits to BitVector32, only up to BIT(32)."); var bits = reader.ReadInt32(); - return GetByteLengthFromBits(bits) switch + return GetByteCountFromBitCount(bits) switch { 4 => new(reader.ReadInt32()), 3 => new((reader.ReadInt16() << 8) + reader.ReadByte()), @@ -121,18 +119,10 @@ protected override BitVector32 ReadCore(PgReader reader) }; } - public override Size GetSize(SizeContext context, BitVector32 value, ref object? writeState) - => value.Data is 0 ? 4 : MaxSize; - protected override void WriteCore(PgWriter writer, BitVector32 value) { - if (value.Data == 0) - writer.WriteInt32(0); - else - { - writer.WriteInt32(32); - writer.WriteInt32(value.Data); - } + writer.WriteInt32(32); + writer.WriteInt32(value.Data); } } @@ -179,7 +169,7 @@ async ValueTask Read(bool async, PgReader reader, CancellationToken canc await reader.Buffer(async, sizeof(int), cancellationToken).ConfigureAwait(false); var bits = reader.ReadInt32(); - var bytes = new byte[GetByteLengthFromBits(bits)]; + var bytes = new byte[GetByteCountFromBitCount(bits)]; if (async) await reader.ReadBytesAsync(bytes, cancellationToken).ConfigureAwait(false); else @@ -198,7 +188,7 @@ public override Size GetSize(SizeContext context, string value, ref object? writ if (value.AsSpan().IndexOfAnyExcept('0', '1') is not -1 and var index) throw new ArgumentException($"Invalid bitstring character '{value[index]}' at index: {index}", nameof(value)); - return sizeof(int) + GetByteLengthFromBits(value.Length); + return sizeof(int) + GetByteCountFromBitCount(value.Length); } public override void Write(PgWriter writer, string value) From 196f40589bfa89463874a3beec1c98b235669b50 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Tue, 12 Nov 2024 15:52:41 +0100 Subject: [PATCH 489/761] Obsolete ServerCompatibilityLevel.Redshift (#5927) Closes #5926 --- src/Npgsql/NpgsqlConnectionStringBuilder.cs | 3 +++ src/Npgsql/NpgsqlDataSource.cs | 3 +-- src/Npgsql/PostgresDatabaseInfo.cs | 9 +++++--- test/Npgsql.Tests/ReaderNewSchemaTests.cs | 24 ++++++++++----------- test/Npgsql.Tests/ReaderOldSchemaTests.cs | 6 +++--- test/Npgsql.Tests/TestUtil.cs | 7 ++++-- 6 files changed, 30 insertions(+), 22 deletions(-) diff --git a/src/Npgsql/NpgsqlConnectionStringBuilder.cs b/src/Npgsql/NpgsqlConnectionStringBuilder.cs index b18a310ab3..8639fc1029 100644 --- a/src/Npgsql/NpgsqlConnectionStringBuilder.cs +++ b/src/Npgsql/NpgsqlConnectionStringBuilder.cs @@ -1637,10 +1637,13 @@ public enum ServerCompatibilityMode /// No special server compatibility mode is active /// None, + /// /// The server is an Amazon Redshift instance. /// + [Obsolete("ServerCompatibilityMode.Redshift no longer does anything and can be safely removed.")] Redshift, + /// /// The server is doesn't support full type loading from the PostgreSQL catalogs, support the basic set /// of types via information hardcoded inside Npgsql. diff --git a/src/Npgsql/NpgsqlDataSource.cs b/src/Npgsql/NpgsqlDataSource.cs index da5348e8e2..e897b181d3 100644 --- a/src/Npgsql/NpgsqlDataSource.cs +++ b/src/Npgsql/NpgsqlDataSource.cs @@ -292,8 +292,7 @@ internal async Task Bootstrap( ArrayNullabilityMode = Settings.ArrayNullabilityMode, EnableDateTimeInfinityConversions = !Statics.DisableDateTimeInfinityConversions, TextEncoding = connector.TextEncoding, - DefaultNameTranslator = _defaultNameTranslator, - + DefaultNameTranslator = _defaultNameTranslator }; IsBootstrapped = true; diff --git a/src/Npgsql/PostgresDatabaseInfo.cs b/src/Npgsql/PostgresDatabaseInfo.cs index 9daa5060f4..1670954a9c 100644 --- a/src/Npgsql/PostgresDatabaseInfo.cs +++ b/src/Npgsql/PostgresDatabaseInfo.cs @@ -44,19 +44,23 @@ class PostgresDatabaseInfo : NpgsqlDatabaseInfo /// List? _types; + bool? _isRedshift; + /// protected override IEnumerable GetTypes() => _types ?? (IEnumerable)Array.Empty(); /// /// The PostgreSQL version string as returned by the version() function. Populated during loading. /// - public string LongVersion { get; set; } = default!; + public string LongVersion { get; set; } = ""; /// /// True if the backend is Amazon Redshift; otherwise, false. /// - public bool IsRedshift { get; private set; } + public bool IsRedshift => _isRedshift ??= LongVersion.Contains("redshift", StringComparison.OrdinalIgnoreCase); + // Note that UNLISTEN is only needed for the reset message, but those don't get generated for Redshift anyway because e.g. DISCARD + // isn't supported there either. So the IsRedshift check isn't actually used, but is here for completeness. /// public override bool SupportsUnlisten => Version.IsGreaterOrEqual(6, 4) && !IsRedshift; @@ -97,7 +101,6 @@ internal async Task LoadPostgresInfo(NpgsqlConnector conn, NpgsqlTimeout timeout conn.PostgresParameters.TryGetValue("integer_datetimes", out var intDateTimes) && intDateTimes == "on"; - IsRedshift = conn.Settings.ServerCompatibilityMode == ServerCompatibilityMode.Redshift; _types = await LoadBackendTypes(conn, timeout, async).ConfigureAwait(false); } diff --git a/test/Npgsql.Tests/ReaderNewSchemaTests.cs b/test/Npgsql.Tests/ReaderNewSchemaTests.cs index a9a11d5ce0..79b4b38ddb 100644 --- a/test/Npgsql.Tests/ReaderNewSchemaTests.cs +++ b/test/Npgsql.Tests/ReaderNewSchemaTests.cs @@ -204,7 +204,7 @@ public async Task ColumnAttributeNumber() public async Task ColumnSize() { using var conn = await OpenConnectionAsync(); - IgnoreOnRedshift(conn, "Column size is never unlimited on Redshift"); + await IgnoreOnRedshift(conn, "Column size is never unlimited on Redshift"); var table = await CreateTempTable(conn, "bounded VARCHAR(30), unbounded VARCHAR"); using var cmd = new NpgsqlCommand($"SELECT bounded,unbounded,'a'::VARCHAR(10),'b'::VARCHAR FROM {table}", conn); @@ -220,7 +220,7 @@ public async Task ColumnSize() public async Task IsAutoIncrement() { await using var conn = await OpenConnectionAsync(); - IgnoreOnRedshift(conn, "Serial columns not support on Redshift"); + await IgnoreOnRedshift(conn, "Serial columns not support on Redshift"); var table = await CreateTempTable(conn, "serial SERIAL, int INT"); @@ -236,7 +236,7 @@ public async Task IsAutoIncrement() public async Task IsAutoIncrement_identity() { await using var conn = await OpenConnectionAsync(); - IgnoreOnRedshift(conn, "Identity columns not support on Redshift"); + await IgnoreOnRedshift(conn, "Identity columns not support on Redshift"); MinimumPgVersion(conn, "10.0", "IDENTITY introduced in PostgreSQL 10"); var table = @@ -253,7 +253,7 @@ public async Task IsAutoIncrement_identity() public async Task IsIdentity() { await using var conn = await OpenConnectionAsync(); - IgnoreOnRedshift(conn, "Identity columns not support on Redshift"); + await IgnoreOnRedshift(conn, "Identity columns not support on Redshift"); MinimumPgVersion(conn, "10.0", "IDENTITY introduced in PostgreSQL 10"); var table = await CreateTempTable( conn, @@ -273,7 +273,7 @@ public async Task IsIdentity() public async Task IsKey() { using var conn = await OpenConnectionAsync(); - IgnoreOnRedshift(conn, "Key not supported in reader schema on Redshift"); + await IgnoreOnRedshift(conn, "Key not supported in reader schema on Redshift"); var table = await CreateTempTable(conn, "id INT PRIMARY KEY, non_id INT, uniq INT UNIQUE"); using var cmd = new NpgsqlCommand($"SELECT id,non_id,uniq,8 FROM {table}", conn); @@ -294,7 +294,7 @@ public async Task IsKey() public async Task IsKey_composite() { using var conn = await OpenConnectionAsync(); - IgnoreOnRedshift(conn, "Key not supported in reader schema on Redshift"); + await IgnoreOnRedshift(conn, "Key not supported in reader schema on Redshift"); var table = await CreateTempTable(conn, "id1 INT, id2 INT, PRIMARY KEY (id1, id2)"); using var cmd = new NpgsqlCommand($"SELECT id1,id2 FROM {table}", conn); @@ -308,7 +308,7 @@ public async Task IsKey_composite() public async Task IsLong() { using var conn = await OpenConnectionAsync(); - IgnoreOnRedshift(conn, "bytea not supported on Redshift"); + await IgnoreOnRedshift(conn, "bytea not supported on Redshift"); var table = await CreateTempTable(conn, "long BYTEA, non_long INT"); using var cmd = new NpgsqlCommand($"SELECT long, non_long, 8 FROM {table}", conn); @@ -351,7 +351,7 @@ public async Task IsReadOnly_on_non_column() public async Task IsUnique() { using var conn = await OpenConnectionAsync(); - IgnoreOnRedshift(conn, "Unique not supported in reader schema on Redshift"); + await IgnoreOnRedshift(conn, "Unique not supported in reader schema on Redshift"); var table = await GetTempTableName(conn); await conn.ExecuteNonQueryAsync($@" @@ -373,7 +373,7 @@ await conn.ExecuteNonQueryAsync($@" public async Task NumericPrecision() { using var conn = await OpenConnectionAsync(); - IgnoreOnRedshift(conn, "Precision is never unlimited on Redshift"); + await IgnoreOnRedshift(conn, "Precision is never unlimited on Redshift"); var table = await CreateTempTable(conn, "a NUMERIC(8), b NUMERIC, c INTEGER"); using var cmd = new NpgsqlCommand($"SELECT a,b,c,8.3::NUMERIC(8) FROM {table}", conn); @@ -389,7 +389,7 @@ public async Task NumericPrecision() public async Task NumericScale() { using var conn = await OpenConnectionAsync(); - IgnoreOnRedshift(conn, "Scale is never unlimited on Redshift"); + await IgnoreOnRedshift(conn, "Scale is never unlimited on Redshift"); var table = await CreateTempTable(conn, "a NUMERIC(8,5), b NUMERIC, c INTEGER"); using var cmd = new NpgsqlCommand($"SELECT a,b,c,8.3::NUMERIC(8,5) FROM {table}", conn); @@ -431,7 +431,7 @@ public async Task DataType_unknown_type() public async Task DataType_with_composite() { await using var adminConnection = await OpenConnectionAsync(); - IgnoreOnRedshift(adminConnection, "Composite types not support on Redshift"); + await IgnoreOnRedshift(adminConnection, "Composite types not support on Redshift"); var type = await GetTempTypeName(adminConnection); await adminConnection.ExecuteNonQueryAsync($"CREATE TYPE {type} AS (foo int)"); var tableName = await CreateTempTable(adminConnection, $"comp {type}"); @@ -657,7 +657,7 @@ public async Task Domain_type() // if (IsMultiplexing) // Assert.Ignore("Multiplexing: ReloadTypes"); using var conn = await OpenConnectionAsync(); - IgnoreOnRedshift(conn, "Domain types not support on Redshift"); + await IgnoreOnRedshift(conn, "Domain types not support on Redshift"); const string domainTypeName = "my_domain"; var schema = await CreateTempSchema(conn); diff --git a/test/Npgsql.Tests/ReaderOldSchemaTests.cs b/test/Npgsql.Tests/ReaderOldSchemaTests.cs index 604af68789..43ac627f46 100644 --- a/test/Npgsql.Tests/ReaderOldSchemaTests.cs +++ b/test/Npgsql.Tests/ReaderOldSchemaTests.cs @@ -55,7 +55,7 @@ public async Task Primary_key() public async Task IsAutoIncrement() { await using var conn = await OpenConnectionAsync(); - IgnoreOnRedshift(conn, "Serial columns not supported on Redshift"); + await IgnoreOnRedshift(conn, "Serial columns not supported on Redshift"); var table = await CreateTempTable(conn, "serial SERIAL, int INT"); @@ -72,7 +72,7 @@ public async Task IsAutoIncrement() public async Task IsAutoIncrement_identity() { await using var conn = await OpenConnectionAsync(); - IgnoreOnRedshift(conn, "Serial columns not supported on Redshift"); + await IgnoreOnRedshift(conn, "Serial columns not supported on Redshift"); MinimumPgVersion(conn, "10.0", "IDENTITY introduced in PostgreSQL 10"); var table = @@ -90,7 +90,7 @@ public async Task IsAutoIncrement_identity() public async Task IsIdentity() { await using var conn = await OpenConnectionAsync(); - IgnoreOnRedshift(conn, "Identity columns not support on Redshift"); + await IgnoreOnRedshift(conn, "Identity columns not support on Redshift"); MinimumPgVersion(conn, "10.0", "IDENTITY introduced in PostgreSQL 10"); var table = await CreateTempTable( conn, diff --git a/test/Npgsql.Tests/TestUtil.cs b/test/Npgsql.Tests/TestUtil.cs index 1d68c130f5..c751ea0018 100644 --- a/test/Npgsql.Tests/TestUtil.cs +++ b/test/Npgsql.Tests/TestUtil.cs @@ -86,9 +86,12 @@ public static void MaximumPgVersionExclusive(NpgsqlConnection conn, string maxVe static readonly Version MinCreateExtensionVersion = new(9, 1); - public static void IgnoreOnRedshift(NpgsqlConnection conn, string? ignoreText = null) + public static async Task IgnoreOnRedshift(NpgsqlConnection conn, string? ignoreText = null) { - if (new NpgsqlConnectionStringBuilder(conn.ConnectionString).ServerCompatibilityMode == ServerCompatibilityMode.Redshift) + await using var command = conn.CreateCommand(); + command.CommandText = "SELECT version()"; + var version = (string)(await command.ExecuteScalarAsync())!; + if (version.Contains("redshift", StringComparison.OrdinalIgnoreCase)) { var msg = "Test ignored on Redshift"; if (ignoreText != null) From 3efc720ede9604df33dd170af973764150c9e4d3 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Tue, 12 Nov 2024 20:35:36 +0100 Subject: [PATCH 490/761] Use dotnet SDK 9.0.100 GA (#5929) --- .github/workflows/build.yml | 2 +- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/native-aot.yml | 2 +- .github/workflows/rich-code-nav.yml | 2 +- Directory.Build.props | 2 +- global.json | 4 ++-- 6 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 28f84df8e8..20c6b93478 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -15,7 +15,7 @@ concurrency: cancel-in-progress: true env: - dotnet_sdk_version: '9.0.100-rc.2.24474.11' + dotnet_sdk_version: '9.0.100' postgis_version: 3 DOTNET_SKIP_FIRST_TIME_EXPERIENCE: true # Windows comes with PG pre-installed, and defines the PGPASSWORD environment variable. Remove it as it interferes diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 5300a780e4..013421b14d 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -32,7 +32,7 @@ concurrency: cancel-in-progress: true env: - dotnet_sdk_version: '9.0.100-rc.2.24474.11' + dotnet_sdk_version: '9.0.100' DOTNET_SKIP_FIRST_TIME_EXPERIENCE: true jobs: diff --git a/.github/workflows/native-aot.yml b/.github/workflows/native-aot.yml index 31f5ddd2f1..2f1b94a0d5 100644 --- a/.github/workflows/native-aot.yml +++ b/.github/workflows/native-aot.yml @@ -15,7 +15,7 @@ concurrency: cancel-in-progress: true env: - dotnet_sdk_version: '8.0.203' + dotnet_sdk_version: '9.0.100' DOTNET_SKIP_FIRST_TIME_EXPERIENCE: true AOT_Compat: | param([string]$targetFramework) diff --git a/.github/workflows/rich-code-nav.yml b/.github/workflows/rich-code-nav.yml index 927c1b268f..278b05c95a 100644 --- a/.github/workflows/rich-code-nav.yml +++ b/.github/workflows/rich-code-nav.yml @@ -4,7 +4,7 @@ on: workflow_dispatch: env: - dotnet_sdk_version: '8.0.203' + dotnet_sdk_version: '9.0.100' DOTNET_SKIP_FIRST_TIME_EXPERIENCE: true jobs: diff --git a/Directory.Build.props b/Directory.Build.props index 298bcc5ac6..0117142918 100644 --- a/Directory.Build.props +++ b/Directory.Build.props @@ -1,6 +1,6 @@  - 9.0.0-preview.1 + 9.0.0 latest true enable diff --git a/global.json b/global.json index bc9c875848..67db748a1b 100644 --- a/global.json +++ b/global.json @@ -1,7 +1,7 @@ { "sdk": { - "version": "9.0.100-rc.2.24474.11", + "version": "9.0.100", "rollForward": "latestMajor", - "allowPrerelease": "true" + "allowPrerelease": "false" } } From 6fb947ff68025e9a7ef0a6381a5fc6bd066656d3 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Sat, 16 Nov 2024 10:34:27 +0200 Subject: [PATCH 491/761] Refactor tracing configuration API (#5928) Continues #5853 Co-authored-by: Nikita Kazmin Co-authored-by: Nino Floris --- src/Npgsql/NpgsqlActivitySource.cs | 4 +- src/Npgsql/NpgsqlCommand.cs | 29 +++-- src/Npgsql/NpgsqlDataSourceBuilder.cs | 7 +- src/Npgsql/NpgsqlDataSourceConfiguration.cs | 2 +- src/Npgsql/NpgsqlSlimDataSourceBuilder.cs | 17 +-- src/Npgsql/NpgsqlTracingOptions.cs | 48 --------- src/Npgsql/NpgsqlTracingOptionsBuilder.cs | 112 ++++++++++++++++++++ src/Npgsql/PublicAPI.Shipped.txt | 2 - src/Npgsql/PublicAPI.Unshipped.txt | 26 ++--- 9 files changed, 152 insertions(+), 95 deletions(-) delete mode 100644 src/Npgsql/NpgsqlTracingOptions.cs create mode 100644 src/Npgsql/NpgsqlTracingOptionsBuilder.cs diff --git a/src/Npgsql/NpgsqlActivitySource.cs b/src/Npgsql/NpgsqlActivitySource.cs index aa1d6b28a5..667728a89a 100644 --- a/src/Npgsql/NpgsqlActivitySource.cs +++ b/src/Npgsql/NpgsqlActivitySource.cs @@ -96,9 +96,9 @@ internal static void Enrich(Activity activity, NpgsqlConnector connector) } } - internal static void ReceivedFirstResponse(Activity activity, NpgsqlTracingOptions? tracingSettings) + internal static void ReceivedFirstResponse(Activity activity, NpgsqlTracingOptions tracingOptions) { - if (!activity.IsAllDataRequested || tracingSettings?.EnableFirstResponseEvent == false) + if (!activity.IsAllDataRequested || !tracingOptions.EnableFirstResponseEvent) return; var activityEvent = new ActivityEvent("received-first-response"); diff --git a/src/Npgsql/NpgsqlCommand.cs b/src/Npgsql/NpgsqlCommand.cs index d0c2bc43c4..012ce4cf56 100644 --- a/src/Npgsql/NpgsqlCommand.cs +++ b/src/Npgsql/NpgsqlCommand.cs @@ -1710,22 +1710,19 @@ internal void Reset() #region Tracing - internal void TraceCommandStart(NpgsqlConnectionStringBuilder settings, NpgsqlTracingOptions? tracingSettings) + internal void TraceCommandStart(NpgsqlConnectionStringBuilder settings, NpgsqlTracingOptions tracingOptions) { Debug.Assert(CurrentActivity is null); + if (NpgsqlActivitySource.IsEnabled) { - (var enableTracing, string? spanName) = (true, null); - if (tracingSettings is not null) - { - enableTracing = WrappingBatch is not null - ? tracingSettings.FilterBatch?.Invoke(WrappingBatch) ?? true - : tracingSettings.FilterCommand?.Invoke(this) ?? true; + var enableTracing = WrappingBatch is not null + ? tracingOptions.BatchFilter?.Invoke(WrappingBatch) ?? true + : tracingOptions.CommandFilter?.Invoke(this) ?? true; - spanName = WrappingBatch is not null - ? tracingSettings.ProvideSpanNameForBatch?.Invoke(WrappingBatch) - : tracingSettings.ProvideSpanNameForCommand?.Invoke(this); - } + var spanName = WrappingBatch is not null + ? tracingOptions.BatchSpanNameProvider?.Invoke(WrappingBatch) + : tracingOptions.CommandSpanNameProvider?.Invoke(this); if (enableTracing) { @@ -1743,18 +1740,18 @@ internal void TraceCommandEnrich(NpgsqlConnector connector) if (CurrentActivity is not null) { NpgsqlActivitySource.Enrich(CurrentActivity, connector); - var tracingSettings = connector.DataSource.Configuration.TracingOptions; + var tracingOptions = connector.DataSource.Configuration.TracingOptions; if (WrappingBatch is not null) - tracingSettings?.EnrichWithBatch?.Invoke(CurrentActivity, WrappingBatch); + tracingOptions.BatchEnrichmentCallback?.Invoke(CurrentActivity, WrappingBatch); else - tracingSettings?.EnrichWithCommand?.Invoke(CurrentActivity, this); + tracingOptions.CommandEnrichmentCallback?.Invoke(CurrentActivity, this); } } - internal void TraceReceivedFirstResponse(NpgsqlTracingOptions? tracingSettings) + internal void TraceReceivedFirstResponse(NpgsqlTracingOptions tracingOptions) { if (CurrentActivity is not null) - NpgsqlActivitySource.ReceivedFirstResponse(CurrentActivity, tracingSettings); + NpgsqlActivitySource.ReceivedFirstResponse(CurrentActivity, tracingOptions); } internal void TraceCommandStop() diff --git a/src/Npgsql/NpgsqlDataSourceBuilder.cs b/src/Npgsql/NpgsqlDataSourceBuilder.cs index 8bb72ccede..8e2003786f 100644 --- a/src/Npgsql/NpgsqlDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlDataSourceBuilder.cs @@ -122,13 +122,12 @@ public NpgsqlDataSourceBuilder EnableParameterLogging(bool parameterLoggingEnabl } /// - /// Configures tracing options for the DataSource. + /// Configures OpenTelemetry tracing options. /// - /// Tracing options for the DataSource. /// The same builder instance so that multiple calls can be chained. - public NpgsqlDataSourceBuilder ConfigureTracingOptions(NpgsqlTracingOptions tracingOptions) + public NpgsqlDataSourceBuilder ConfigureTracing(Action configureAction) { - _internalBuilder.ConfigureTracingOptions(tracingOptions); + _internalBuilder.ConfigureTracing(configureAction); return this; } diff --git a/src/Npgsql/NpgsqlDataSourceConfiguration.cs b/src/Npgsql/NpgsqlDataSourceConfiguration.cs index 2b2d09cded..981831d595 100644 --- a/src/Npgsql/NpgsqlDataSourceConfiguration.cs +++ b/src/Npgsql/NpgsqlDataSourceConfiguration.cs @@ -9,7 +9,7 @@ namespace Npgsql; sealed record NpgsqlDataSourceConfiguration(string? Name, NpgsqlLoggingConfiguration LoggingConfiguration, - NpgsqlTracingOptions? TracingOptions, + NpgsqlTracingOptions TracingOptions, TransportSecurityHandler TransportSecurityHandler, IntegratedSecurityHandler userCertificateValidationCallback, Action? SslClientAuthenticationOptionsCallback, diff --git a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs index a3097b5089..2930c29d63 100644 --- a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs @@ -30,7 +30,7 @@ public sealed class NpgsqlSlimDataSourceBuilder : INpgsqlTypeMapper ILoggerFactory? _loggerFactory; bool _sensitiveDataLoggingEnabled; - NpgsqlTracingOptions? _tracingOptions; + List>? _tracingOptionsBuilderCallbacks; TransportSecurityHandler _transportSecurityHandler = new(); RemoteCertificateValidationCallback? _userCertificateValidationCallback; @@ -119,13 +119,13 @@ public NpgsqlSlimDataSourceBuilder EnableParameterLogging(bool parameterLoggingE } /// - /// Configures tracing options for the DataSource. + /// Configures OpenTelemetry tracing options. /// - /// Tracing options for the DataSource. /// The same builder instance so that multiple calls can be chained. - public NpgsqlSlimDataSourceBuilder ConfigureTracingOptions(NpgsqlTracingOptions tracingOptions) + public NpgsqlSlimDataSourceBuilder ConfigureTracing(Action configureAction) { - _tracingOptions = tracingOptions; + _tracingOptionsBuilderCallbacks ??= new(); + _tracingOptionsBuilderCallbacks.Add(configureAction); return this; } @@ -806,12 +806,17 @@ NpgsqlDataSourceConfiguration PrepareConfiguration() ConfigureDefaultFactories(this); + var tracingOptionsBuilder = new NpgsqlTracingOptionsBuilder(); + foreach (var callback in _tracingOptionsBuilderCallbacks ?? (IEnumerable>)[]) + callback.Invoke(tracingOptionsBuilder); + var tracingOptions = tracingOptionsBuilder.Build(); + return new( Name, _loggerFactory is null ? NpgsqlLoggingConfiguration.NullConfiguration : new NpgsqlLoggingConfiguration(_loggerFactory, _sensitiveDataLoggingEnabled), - _tracingOptions, + tracingOptions, _transportSecurityHandler, _integratedSecurityHandler, sslClientAuthenticationOptionsCallback, diff --git a/src/Npgsql/NpgsqlTracingOptions.cs b/src/Npgsql/NpgsqlTracingOptions.cs deleted file mode 100644 index 7fbf6254dc..0000000000 --- a/src/Npgsql/NpgsqlTracingOptions.cs +++ /dev/null @@ -1,48 +0,0 @@ -using System; -using System.Diagnostics; - -namespace Npgsql; - -/// -/// Options to configure Npgsql's support for OpenTelemetry tracing. -/// -public class NpgsqlTracingOptions -{ - /// - /// Gets or sets a filter function that determines whether or not to - /// collect telemetry on a per basis. - /// - public Func? FilterCommand { get; set; } - - /// - /// Gets or sets an action to enrich an with . - /// - public Action? EnrichWithCommand { get; set; } - - /// - /// Gets or sets a function that provides a span's name on a per basis. - /// - public Func? ProvideSpanNameForCommand { get; set; } - - /// - /// Gets or sets a filter function that determines whether or not to - /// collect telemetry on a per basis. - /// - public Func? FilterBatch { get; set; } - - /// - /// Gets or sets an action to enrich an with . - /// - public Action? EnrichWithBatch { get; set; } - - /// - /// Gets or sets a function that provides a span's name on a per basis. - /// - public Func? ProvideSpanNameForBatch { get; set; } - - /// - /// Gets or sets a value indicating whether to enable the "time-to-first-read" event. - /// Default is true to preserve existing behavior. - /// - public bool EnableFirstResponseEvent { get; set; } = true; -} diff --git a/src/Npgsql/NpgsqlTracingOptionsBuilder.cs b/src/Npgsql/NpgsqlTracingOptionsBuilder.cs new file mode 100644 index 0000000000..1da344553f --- /dev/null +++ b/src/Npgsql/NpgsqlTracingOptionsBuilder.cs @@ -0,0 +1,112 @@ +using System; +using System.Diagnostics; + +namespace Npgsql; + +/// +/// A builder to configure Npgsql's support for OpenTelemetry tracing. +/// +public sealed class NpgsqlTracingOptionsBuilder +{ + Func? _commandFilter; + Func? _batchFilter; + Action? _commandEnrichmentCallback; + Action? _batchEnrichmentCallback; + Func? _commandSpanNameProvider; + Func? _batchSpanNameProvider; + bool _enableFirstResponseEvent = true; + + internal NpgsqlTracingOptionsBuilder() + { + } + + /// + /// Configures a filter function that determines whether to emit tracing information for an . + /// By default, tracing information is emitted for all commands. + /// + public NpgsqlTracingOptionsBuilder ConfigureCommandFilter(Func? commandFilter) + { + _commandFilter = commandFilter; + return this; + } + + /// + /// Configures a filter function that determines whether to emit tracing information for an . + /// By default, tracing information is emitted for all batches. + /// + public NpgsqlTracingOptionsBuilder ConfigureBatchFilter(Func? batchFilter) + { + _batchFilter = batchFilter; + return this; + } + + /// + /// Configures a callback that can enrich the emitted for the given . + /// + public NpgsqlTracingOptionsBuilder ConfigureCommandEnrichmentCallback(Action? commandEnrichmentCallback) + { + _commandEnrichmentCallback = commandEnrichmentCallback; + return this; + } + + /// + /// Configures a callback that can enrich the emitted for the given . + /// + public NpgsqlTracingOptionsBuilder ConfigureBatchEnrichmentCallback(Action? batchEnrichmentCallback) + { + _batchEnrichmentCallback = batchEnrichmentCallback; + return this; + } + + /// + /// Configures a callback that provides the tracing span's name for an . If null, the default standard + /// span name is used, which is the database name. + /// + public NpgsqlTracingOptionsBuilder ConfigureCommandSpanNameProvider(Func? commandSpanNameProvider) + { + _commandSpanNameProvider = commandSpanNameProvider; + return this; + } + + /// + /// Configures a callback that provides the tracing span's name for an . If null, the default standard + /// span name is used, which is the database name. + /// + public NpgsqlTracingOptionsBuilder ConfigureBatchSpanNameProvider(Func? batchSpanNameProvider) + { + _batchSpanNameProvider = batchSpanNameProvider; + return this; + } + + /// + /// Gets or sets a value indicating whether to enable the "time-to-first-read" event. + /// Default is true to preserve existing behavior. + /// + public NpgsqlTracingOptionsBuilder EnableFirstResponseEvent(bool enable = true) + { + _enableFirstResponseEvent = enable; + return this; + } + + internal NpgsqlTracingOptions Build() => new() + { + CommandFilter = _commandFilter, + BatchFilter = _batchFilter, + CommandEnrichmentCallback = _commandEnrichmentCallback, + BatchEnrichmentCallback = _batchEnrichmentCallback, + CommandSpanNameProvider = _commandSpanNameProvider, + BatchSpanNameProvider = _batchSpanNameProvider, + EnableFirstResponseEvent = _enableFirstResponseEvent + }; +} + +sealed class NpgsqlTracingOptions +{ + internal Func? CommandFilter { get; init; } + internal Func? BatchFilter { get; init; } + internal Action? CommandEnrichmentCallback { get; init; } + internal Action? BatchEnrichmentCallback { get; init; } + internal Func? CommandSpanNameProvider { get; init; } + internal Func? BatchSpanNameProvider { get; init; } + internal bool EnableFirstResponseEvent { get; init; } +} diff --git a/src/Npgsql/PublicAPI.Shipped.txt b/src/Npgsql/PublicAPI.Shipped.txt index 3ec604ddc0..3318b574b1 100644 --- a/src/Npgsql/PublicAPI.Shipped.txt +++ b/src/Npgsql/PublicAPI.Shipped.txt @@ -767,8 +767,6 @@ Npgsql.NpgsqlSlimDataSourceBuilder.UsePhysicalConnectionInitializer(System.Actio Npgsql.NpgsqlSlimDataSourceBuilder.UseRootCertificate(System.Security.Cryptography.X509Certificates.X509Certificate2? rootCertificate) -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.UseRootCertificateCallback(System.Func? rootCertificateCallback) -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.UseUserCertificateValidationCallback(System.Net.Security.RemoteCertificateValidationCallback! userCertificateValidationCallback) -> Npgsql.NpgsqlSlimDataSourceBuilder! -Npgsql.NpgsqlTracingOptions -Npgsql.NpgsqlTracingOptions.NpgsqlTracingOptions() -> void Npgsql.NpgsqlTransaction Npgsql.NpgsqlTransaction.Connection.get -> Npgsql.NpgsqlConnection? Npgsql.PostgresErrorCodes diff --git a/src/Npgsql/PublicAPI.Unshipped.txt b/src/Npgsql/PublicAPI.Unshipped.txt index f05c3fdb3f..731bfe82fe 100644 --- a/src/Npgsql/PublicAPI.Unshipped.txt +++ b/src/Npgsql/PublicAPI.Unshipped.txt @@ -9,12 +9,12 @@ Npgsql.NpgsqlDataSourceBuilder.MapComposite(System.Type! clrType, string? pgName Npgsql.NpgsqlDataSourceBuilder.MapComposite(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.NpgsqlDataSourceBuilder! Npgsql.NpgsqlDataSourceBuilder.MapEnum(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.NpgsqlDataSourceBuilder! Npgsql.NpgsqlDataSourceBuilder.MapEnum(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.NpgsqlDataSourceBuilder! -Npgsql.NpgsqlDataSourceBuilder.ConfigureTracingOptions(Npgsql.NpgsqlTracingOptions! tracingOptions) -> Npgsql.NpgsqlDataSourceBuilder! +Npgsql.NpgsqlDataSourceBuilder.ConfigureTracing(System.Action! configureAction) -> Npgsql.NpgsqlDataSourceBuilder! Npgsql.NpgsqlDataSourceBuilder.UseNegotiateOptionsCallback(System.Action? negotiateOptionsCallback) -> Npgsql.NpgsqlDataSourceBuilder! Npgsql.NpgsqlDataSourceBuilder.UseSslClientAuthenticationOptionsCallback(System.Action? sslClientAuthenticationOptionsCallback) -> Npgsql.NpgsqlDataSourceBuilder! Npgsql.NpgsqlMetricsOptions Npgsql.NpgsqlMetricsOptions.NpgsqlMetricsOptions() -> void -Npgsql.NpgsqlSlimDataSourceBuilder.ConfigureTracingOptions(Npgsql.NpgsqlTracingOptions! tracingOptions) -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.ConfigureTracing(System.Action! configureAction) -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.EnableGeometricTypes() -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.EnableJsonTypes() -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.EnableNetworkTypes() -> Npgsql.NpgsqlSlimDataSourceBuilder! @@ -24,20 +24,14 @@ Npgsql.NpgsqlSlimDataSourceBuilder.MapEnum(System.Type! clrType, string? pgName Npgsql.NpgsqlSlimDataSourceBuilder.MapEnum(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.UseNegotiateOptionsCallback(System.Action? negotiateOptionsCallback) -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.UseSslClientAuthenticationOptionsCallback(System.Action? sslClientAuthenticationOptionsCallback) -> Npgsql.NpgsqlSlimDataSourceBuilder! -Npgsql.NpgsqlTracingOptions.EnableFirstResponseEvent.get -> bool -Npgsql.NpgsqlTracingOptions.EnableFirstResponseEvent.set -> void -Npgsql.NpgsqlTracingOptions.EnrichWithBatch.get -> System.Action? -Npgsql.NpgsqlTracingOptions.EnrichWithBatch.set -> void -Npgsql.NpgsqlTracingOptions.EnrichWithCommand.get -> System.Action? -Npgsql.NpgsqlTracingOptions.EnrichWithCommand.set -> void -Npgsql.NpgsqlTracingOptions.FilterBatch.get -> System.Func? -Npgsql.NpgsqlTracingOptions.FilterBatch.set -> void -Npgsql.NpgsqlTracingOptions.FilterCommand.get -> System.Func? -Npgsql.NpgsqlTracingOptions.FilterCommand.set -> void -Npgsql.NpgsqlTracingOptions.ProvideSpanNameForBatch.get -> System.Func? -Npgsql.NpgsqlTracingOptions.ProvideSpanNameForBatch.set -> void -Npgsql.NpgsqlTracingOptions.ProvideSpanNameForCommand.get -> System.Func? -Npgsql.NpgsqlTracingOptions.ProvideSpanNameForCommand.set -> void +Npgsql.NpgsqlTracingOptionsBuilder +Npgsql.NpgsqlTracingOptionsBuilder.ConfigureBatchEnrichmentCallback(System.Action? batchEnrichmentCallback) -> Npgsql.NpgsqlTracingOptionsBuilder! +Npgsql.NpgsqlTracingOptionsBuilder.ConfigureBatchFilter(System.Func? batchFilter) -> Npgsql.NpgsqlTracingOptionsBuilder! +Npgsql.NpgsqlTracingOptionsBuilder.ConfigureBatchSpanNameProvider(System.Func? batchSpanNameProvider) -> Npgsql.NpgsqlTracingOptionsBuilder! +Npgsql.NpgsqlTracingOptionsBuilder.ConfigureCommandEnrichmentCallback(System.Action? commandEnrichmentCallback) -> Npgsql.NpgsqlTracingOptionsBuilder! +Npgsql.NpgsqlTracingOptionsBuilder.ConfigureCommandFilter(System.Func? commandFilter) -> Npgsql.NpgsqlTracingOptionsBuilder! +Npgsql.NpgsqlTracingOptionsBuilder.ConfigureCommandSpanNameProvider(System.Func? commandSpanNameProvider) -> Npgsql.NpgsqlTracingOptionsBuilder! +Npgsql.NpgsqlTracingOptionsBuilder.EnableFirstResponseEvent(bool enable = true) -> Npgsql.NpgsqlTracingOptionsBuilder! Npgsql.Replication.PgOutput.ReplicationValue.GetFieldName() -> string! Npgsql.Replication.PgOutput.Messages.ParallelStreamAbortMessage Npgsql.Replication.PgOutput.Messages.ParallelStreamAbortMessage.AbortLsn.get -> NpgsqlTypes.NpgsqlLogSequenceNumber From e189bd1d181d50f406f2edb404c885cee6ddcf7f Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Sat, 16 Nov 2024 13:03:35 +0100 Subject: [PATCH 492/761] Composite improvements (#5923) --- .../Composites/Metadata/CompositeBuilder.cs | 55 ++++++++++--------- .../Composites/Metadata/CompositeFieldInfo.cs | 2 + .../Composites/Metadata/CompositeInfo.cs | 37 ++++--------- 3 files changed, 42 insertions(+), 52 deletions(-) diff --git a/src/Npgsql/Internal/Composites/Metadata/CompositeBuilder.cs b/src/Npgsql/Internal/Composites/Metadata/CompositeBuilder.cs index e12d44bf88..0917dfd834 100644 --- a/src/Npgsql/Internal/Composites/Metadata/CompositeBuilder.cs +++ b/src/Npgsql/Internal/Composites/Metadata/CompositeBuilder.cs @@ -1,16 +1,18 @@ using System; using System.Buffers; +using System.Collections.Generic; using Npgsql.Util; namespace Npgsql.Internal.Composites; -abstract class CompositeBuilder(StrongBox[] tempBoxes) +abstract class CompositeBuilder(StrongBox[] tempBoxes, IReadOnlyList fields) { - protected StrongBox[] _tempBoxes = tempBoxes; + protected readonly StrongBox[] _tempBoxes = tempBoxes; + protected readonly IReadOnlyList _fields = fields; protected int _currentField; + protected object? _boxedInstance; protected abstract void Construct(); - protected abstract void SetField(TValue value); public void AddValue(TValue value) { @@ -30,13 +32,25 @@ public void AddValue(TValue value) } _currentField++; + + void SetField(TValue value) + { + if (_boxedInstance is null) + ThrowHelper.ThrowInvalidOperationException("Not constructed yet, or no more fields were expected."); + + var currentField = _currentField; + var fields = _fields; + if (currentField > fields.Count - 1) + ThrowHelper.ThrowIndexOutOfRangeException($"Cannot set field {value} at position {currentField} - all fields have already been set"); + + ((CompositeFieldInfo)fields[currentField]).Set(_boxedInstance, value); + } } } -sealed class CompositeBuilder(CompositeInfo compositeInfo) : CompositeBuilder(compositeInfo.CreateTempBoxes()), IDisposable +sealed class CompositeBuilder(CompositeInfo compositeInfo) : CompositeBuilder(compositeInfo.CreateTempBoxes(), compositeInfo.Fields), IDisposable { T _instance = default!; - object? _boxedInstance; public T Complete() { @@ -46,17 +60,6 @@ public T Complete() return (T)(_boxedInstance ?? _instance!); } - public void Reset() - { - _instance = default!; - _boxedInstance = null; - _currentField = 0; - foreach (var box in _tempBoxes) - box.Clear(); - } - - public void Dispose() => Reset(); - protected override void Construct() { var tempBoxes = _tempBoxes; @@ -72,7 +75,7 @@ protected override void Construct() args[argIndex] = tempBoxes[i]; } _instance = compositeInfo.Constructor(args)!; - ArrayPool.Shared.Return(args); + ArrayPool.Shared.Return(args, clearArray: true); if (tempBoxes.Length == compositeInfo.Fields.Count) return; @@ -87,16 +90,14 @@ protected override void Construct() } } - protected override void SetField(TValue value) + public void Reset() { - if (_boxedInstance is null) - ThrowHelper.ThrowInvalidOperationException("Not constructed yet, or no more fields were expected."); - - var currentField = _currentField; - var fields = compositeInfo.Fields; - if (currentField > fields.Count - 1) - ThrowHelper.ThrowIndexOutOfRangeException($"Cannot set field {value} at position {currentField} - all fields have already been set"); - - ((CompositeFieldInfo)fields[currentField]).Set(_boxedInstance, value); + _instance = default!; + _boxedInstance = null; + _currentField = 0; + foreach (var box in _tempBoxes) + box.Clear(); } + + public void Dispose() { } } diff --git a/src/Npgsql/Internal/Composites/Metadata/CompositeFieldInfo.cs b/src/Npgsql/Internal/Composites/Metadata/CompositeFieldInfo.cs index a6cc79e4e9..080d31ea68 100644 --- a/src/Npgsql/Internal/Composites/Metadata/CompositeFieldInfo.cs +++ b/src/Npgsql/Internal/Composites/Metadata/CompositeFieldInfo.cs @@ -143,10 +143,12 @@ sealed class CompositeFieldInfo : CompositeFieldInfo _getter = getter; } + // Accessed through reflection (ReflectionCompositeInfoFactory) public CompositeFieldInfo(string name, PgTypeInfo typeInfo, PgTypeId nominalPgTypeId, Func getter, int parameterIndex) : this(name, typeInfo, nominalPgTypeId, getter) => _parameterIndex = parameterIndex; + // Accessed through reflection (ReflectionCompositeInfoFactory) public CompositeFieldInfo(string name, PgTypeInfo typeInfo, PgTypeId nominalPgTypeId, Func getter, Action setter) : this(name, typeInfo, nominalPgTypeId, getter) => _setter = setter; diff --git a/src/Npgsql/Internal/Composites/Metadata/CompositeInfo.cs b/src/Npgsql/Internal/Composites/Metadata/CompositeInfo.cs index 3c8f564a46..f1e291cf53 100644 --- a/src/Npgsql/Internal/Composites/Metadata/CompositeInfo.cs +++ b/src/Npgsql/Internal/Composites/Metadata/CompositeInfo.cs @@ -12,35 +12,20 @@ sealed class CompositeInfo public CompositeInfo(CompositeFieldInfo[] fields, int constructorParameters, Func constructor) { _lastConstructorFieldIndex = -1; - for (var i = fields.Length - 1; i >= 0; i--) + var constructorFields = 0; + for (var i = 0; i < fields.Length; i++) + { if (fields[i].ConstructorParameterIndex is not null) { _lastConstructorFieldIndex = i; - break; + constructorFields++; } - - var parameterSum = 0; - for (var i = constructorParameters - 1; i > 0; i--) - parameterSum += i; - - var argumentsSum = 0; - if (parameterSum > 0) - { - foreach (var field in fields) - if (field.ConstructorParameterIndex is { } index) - argumentsSum += index; } - if (parameterSum != argumentsSum) + if (constructorParameters != constructorFields) throw new InvalidOperationException($"Missing composite fields to map to the required {constructorParameters} constructor parameters."); _fields = fields; - var arguments = constructorParameters is 0 ? [] : new CompositeFieldInfo[constructorParameters]; - foreach (var field in fields) - { - if (field.ConstructorParameterIndex is { } index) - arguments[index] = field; - } Constructor = constructor; ConstructorParameters = constructorParameters; } @@ -56,12 +41,14 @@ public CompositeInfo(CompositeFieldInfo[] fields, int constructorParameters, Fun /// public StrongBox[] CreateTempBoxes() { - var valueCache = _lastConstructorFieldIndex + 1 is 0 ? [] : new StrongBox[_lastConstructorFieldIndex + 1]; - var fields = _fields; + if (_lastConstructorFieldIndex is -1) + return []; - for (var i = 0; i < valueCache.Length; i++) - valueCache[i] = fields[i].CreateBox(); + var boxes = new StrongBox[_lastConstructorFieldIndex + 1]; + var fields = _fields; + for (var i = 0; i < boxes.Length; i++) + boxes[i] = fields[i].CreateBox(); - return valueCache; + return boxes; } } From 36cb97a33f021d395c0a231cbfb691c87dc85882 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Sat, 16 Nov 2024 13:09:45 +0100 Subject: [PATCH 493/761] Add TypeLoadingOptions (#5925) --- src/Npgsql/NpgsqlConnectionStringBuilder.cs | 70 ++++++----------- src/Npgsql/NpgsqlDataSource.cs | 1 + src/Npgsql/NpgsqlDataSourceBuilder.cs | 9 +++ src/Npgsql/NpgsqlDataSourceConfiguration.cs | 4 +- src/Npgsql/NpgsqlSlimDataSourceBuilder.cs | 39 ++++++++-- src/Npgsql/NpgsqlTypeLoadingOptions.cs | 78 +++++++++++++++++++ src/Npgsql/PostgresDatabaseInfo.cs | 5 +- src/Npgsql/PostgresMinimalDatabaseInfo.cs | 2 +- src/Npgsql/PublicAPI.Unshipped.txt | 5 ++ .../Replication/ReplicationConnection.cs | 4 - src/Npgsql/TypeMapping/UserTypeMapper.cs | 11 ++- test/Npgsql.Tests/ConnectionTests.cs | 2 +- test/Npgsql.Tests/MultipleHostsTests.cs | 39 ++++++---- test/Npgsql.Tests/Support/PgPostmasterMock.cs | 13 ++-- test/Npgsql.Tests/Types/CompositeTests.cs | 5 +- 15 files changed, 197 insertions(+), 90 deletions(-) create mode 100644 src/Npgsql/NpgsqlTypeLoadingOptions.cs diff --git a/src/Npgsql/NpgsqlConnectionStringBuilder.cs b/src/Npgsql/NpgsqlConnectionStringBuilder.cs index 8639fc1029..f662dd4a83 100644 --- a/src/Npgsql/NpgsqlConnectionStringBuilder.cs +++ b/src/Npgsql/NpgsqlConnectionStringBuilder.cs @@ -1211,24 +1211,6 @@ public bool NoResetOnClose } bool _noResetOnClose; - /// - /// Load table composite type definitions, and not just free-standing composite types. - /// - [Category("Advanced")] - [Description("Load table composite type definitions, and not just free-standing composite types.")] - [DisplayName("Load Table Composites")] - [NpgsqlConnectionStringProperty] - public bool LoadTableComposites - { - get => _loadTableComposites; - set - { - _loadTableComposites = value; - SetValue(nameof(LoadTableComposites), value); - } - } - bool _loadTableComposites; - /// /// Set the replication mode of the connection /// @@ -1336,7 +1318,26 @@ public int WriteCoalescingBufferThresholdBytes #endregion - #region Properties - Compatibility + #region Properties - Obsolete + + /// + /// Load table composite type definitions, and not just free-standing composite types. + /// + [Category("Advanced")] + [Description("Load table composite type definitions, and not just free-standing composite types.")] + [DisplayName("Load Table Composites")] + [NpgsqlConnectionStringProperty] + [Obsolete("Specifying type loading options through the connection string is obsolete, use the DataSource builder instead. See the 9.0 release notes for more information.")] + public bool LoadTableComposites + { + get => _loadTableComposites; + set + { + _loadTableComposites = value; + SetValue(nameof(LoadTableComposites), value); + } + } + bool _loadTableComposites; /// /// A compatibility mode for special PostgreSQL server types. @@ -1345,9 +1346,11 @@ public int WriteCoalescingBufferThresholdBytes [Description("A compatibility mode for special PostgreSQL server types.")] [DisplayName("Server Compatibility Mode")] [NpgsqlConnectionStringProperty] + [Obsolete("Specifying type loading options through the connection string is obsolete, use the DataSource builder instead. See the 9.0 release notes for more information.")] public ServerCompatibilityMode ServerCompatibilityMode { - get => _serverCompatibilityMode; + // Physical replication connections don't allow regular queries, so we can't load types from PG + get => ReplicationMode is ReplicationMode.Physical ? ServerCompatibilityMode.NoTypeLoading : _serverCompatibilityMode; set { _serverCompatibilityMode = value; @@ -1356,10 +1359,6 @@ public ServerCompatibilityMode ServerCompatibilityMode } ServerCompatibilityMode _serverCompatibilityMode; - #endregion - - #region Properties - Obsolete - /// /// Whether to trust the server certificate without validating it. /// @@ -1628,29 +1627,6 @@ public NpgsqlConnectionStringPropertyAttribute(params string[] synonyms) #region Enums -/// -/// An option specified in the connection string that activates special compatibility features. -/// -public enum ServerCompatibilityMode -{ - /// - /// No special server compatibility mode is active - /// - None, - - /// - /// The server is an Amazon Redshift instance. - /// - [Obsolete("ServerCompatibilityMode.Redshift no longer does anything and can be safely removed.")] - Redshift, - - /// - /// The server is doesn't support full type loading from the PostgreSQL catalogs, support the basic set - /// of types via information hardcoded inside Npgsql. - /// - NoTypeLoading, -} - /// /// Specifies how to manage SSL. /// diff --git a/src/Npgsql/NpgsqlDataSource.cs b/src/Npgsql/NpgsqlDataSource.cs index e897b181d3..ce6db8f843 100644 --- a/src/Npgsql/NpgsqlDataSource.cs +++ b/src/Npgsql/NpgsqlDataSource.cs @@ -96,6 +96,7 @@ internal NpgsqlDataSource( (var name, LoggingConfiguration, _, + _, TransportSecurityHandler, IntegratedSecurityHandler, SslClientAuthenticationOptionsCallback, diff --git a/src/Npgsql/NpgsqlDataSourceBuilder.cs b/src/Npgsql/NpgsqlDataSourceBuilder.cs index 8e2003786f..a3b7779083 100644 --- a/src/Npgsql/NpgsqlDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlDataSourceBuilder.cs @@ -121,6 +121,15 @@ public NpgsqlDataSourceBuilder EnableParameterLogging(bool parameterLoggingEnabl return this; } + /// + /// Configures type loading options for the DataSource. + /// + public NpgsqlDataSourceBuilder ConfigureTypeLoading(Action configureAction) + { + _internalBuilder.ConfigureTypeLoading(configureAction); + return this; + } + /// /// Configures OpenTelemetry tracing options. /// diff --git a/src/Npgsql/NpgsqlDataSourceConfiguration.cs b/src/Npgsql/NpgsqlDataSourceConfiguration.cs index 981831d595..14c9b5804b 100644 --- a/src/Npgsql/NpgsqlDataSourceConfiguration.cs +++ b/src/Npgsql/NpgsqlDataSourceConfiguration.cs @@ -1,6 +1,5 @@ using System; using System.Net.Security; -using System.Security.Cryptography.X509Certificates; using System.Threading; using System.Threading.Tasks; using Npgsql.Internal; @@ -10,8 +9,9 @@ namespace Npgsql; sealed record NpgsqlDataSourceConfiguration(string? Name, NpgsqlLoggingConfiguration LoggingConfiguration, NpgsqlTracingOptions TracingOptions, + NpgsqlTypeLoadingOptions TypeLoading, TransportSecurityHandler TransportSecurityHandler, - IntegratedSecurityHandler userCertificateValidationCallback, + IntegratedSecurityHandler IntegratedSecurityHandler, Action? SslClientAuthenticationOptionsCallback, Func? PasswordProvider, Func>? PasswordProviderAsync, diff --git a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs index 2930c29d63..376e3bd7c9 100644 --- a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs @@ -31,6 +31,7 @@ public sealed class NpgsqlSlimDataSourceBuilder : INpgsqlTypeMapper ILoggerFactory? _loggerFactory; bool _sensitiveDataLoggingEnabled; List>? _tracingOptionsBuilderCallbacks; + List>? _typeLoadingOptionsBuilderCallbacks; TransportSecurityHandler _transportSecurityHandler = new(); RemoteCertificateValidationCallback? _userCertificateValidationCallback; @@ -118,12 +119,25 @@ public NpgsqlSlimDataSourceBuilder EnableParameterLogging(bool parameterLoggingE return this; } + /// + /// Configure type loading options for the DataSource. Calling this again will replace + /// the prior action. + /// + public NpgsqlSlimDataSourceBuilder ConfigureTypeLoading(Action configureAction) + { + ArgumentNullException.ThrowIfNull(configureAction); + _typeLoadingOptionsBuilderCallbacks ??= new(); + _typeLoadingOptionsBuilderCallbacks.Add(configureAction); + return this; + } + /// /// Configures OpenTelemetry tracing options. /// /// The same builder instance so that multiple calls can be chained. public NpgsqlSlimDataSourceBuilder ConfigureTracing(Action configureAction) { + ArgumentNullException.ThrowIfNull(configureAction); _tracingOptionsBuilderCallbacks ??= new(); _tracingOptionsBuilderCallbacks.Add(configureAction); return this; @@ -136,6 +150,7 @@ public NpgsqlSlimDataSourceBuilder ConfigureTracing(ActionThe same builder instance so that multiple calls can be chained. public NpgsqlSlimDataSourceBuilder ConfigureJsonOptions(JsonSerializerOptions serializerOptions) { + ArgumentNullException.ThrowIfNull(serializerOptions); JsonSerializerOptions = serializerOptions; return this; } @@ -731,8 +746,7 @@ public NpgsqlSlimDataSourceBuilder UsePhysicalConnectionInitializer( /// public NpgsqlDataSource Build() { - var config = PrepareConfiguration(); - var connectionStringBuilder = ConnectionStringBuilder.Clone(); + var (connectionStringBuilder, config) = PrepareConfiguration(); if (ConnectionStringBuilder.Host!.Contains(',')) { @@ -753,16 +767,17 @@ public NpgsqlDataSource Build() /// public NpgsqlMultiHostDataSource BuildMultiHost() { - var config = PrepareConfiguration(); + var (connectionStringBuilder, config) = PrepareConfiguration(); ValidateMultiHost(); - return new(ConnectionStringBuilder.Clone(), config); + return new(connectionStringBuilder, config); } - NpgsqlDataSourceConfiguration PrepareConfiguration() + (NpgsqlConnectionStringBuilder, NpgsqlDataSourceConfiguration) PrepareConfiguration() { ConnectionStringBuilder.PostProcessAndValidate(); + var connectionStringBuilder = ConnectionStringBuilder.Clone(); var sslClientAuthenticationOptionsCallback = _sslClientAuthenticationOptionsCallback; var hasCertificateCallbacks = _userCertificateValidationCallback is not null || _clientCertificatesCallback is not null; @@ -806,17 +821,27 @@ NpgsqlDataSourceConfiguration PrepareConfiguration() ConfigureDefaultFactories(this); + var typeLoadingOptionsBuilder = new NpgsqlTypeLoadingOptionsBuilder(); +#pragma warning disable CS0618 // Type or member is obsolete + typeLoadingOptionsBuilder.EnableTableCompositesLoading(connectionStringBuilder.LoadTableComposites); + typeLoadingOptionsBuilder.EnableTypeLoading(connectionStringBuilder.ServerCompatibilityMode is not ServerCompatibilityMode.NoTypeLoading); +#pragma warning restore CS0618 // Type or member is obsolete + foreach (var callback in _typeLoadingOptionsBuilderCallbacks ?? (IEnumerable>)[]) + callback.Invoke(typeLoadingOptionsBuilder); + var typeLoadingOptions = typeLoadingOptionsBuilder.Build(); + var tracingOptionsBuilder = new NpgsqlTracingOptionsBuilder(); foreach (var callback in _tracingOptionsBuilderCallbacks ?? (IEnumerable>)[]) callback.Invoke(tracingOptionsBuilder); var tracingOptions = tracingOptionsBuilder.Build(); - return new( + return (connectionStringBuilder, new( Name, _loggerFactory is null ? NpgsqlLoggingConfiguration.NullConfiguration : new NpgsqlLoggingConfiguration(_loggerFactory, _sensitiveDataLoggingEnabled), tracingOptions, + typeLoadingOptions, _transportSecurityHandler, _integratedSecurityHandler, sslClientAuthenticationOptionsCallback, @@ -832,7 +857,7 @@ _loggerFactory is null #if NET7_0_OR_GREATER ,_negotiateOptionsCallback #endif - ); + )); } void ValidateMultiHost() diff --git a/src/Npgsql/NpgsqlTypeLoadingOptions.cs b/src/Npgsql/NpgsqlTypeLoadingOptions.cs new file mode 100644 index 0000000000..87af06124a --- /dev/null +++ b/src/Npgsql/NpgsqlTypeLoadingOptions.cs @@ -0,0 +1,78 @@ +using System; + +namespace Npgsql; + +/// +/// Options for configuring Npgsql type loading. +/// +sealed class NpgsqlTypeLoadingOptions +{ + /// + /// Load table composite type definitions, and not just free-standing composite types. + /// + public bool LoadTableComposites { get; init; } + + /// + /// When false, if the server doesn't support full type loading from the PostgreSQL catalogs, + /// support the basic set of types via information hardcoded inside Npgsql. + /// + public required bool LoadTypes { get; init; } = true; +} + +/// +/// Options builder for configuring Npgsql type loading. +/// +public sealed class NpgsqlTypeLoadingOptionsBuilder +{ + bool _loadTableComposites; + bool _loadTypes = true; + + internal NpgsqlTypeLoadingOptionsBuilder() {} + + /// + /// Enable loading table composite type definitions, and not just free-standing composite types. + /// + public NpgsqlTypeLoadingOptionsBuilder EnableTableCompositesLoading(bool enable = true) + { + _loadTableComposites = enable; + return this; + } + + /// + /// Set a compatibility mode for special PostgreSQL server types. + /// + public NpgsqlTypeLoadingOptionsBuilder EnableTypeLoading(bool enable = true) + { + _loadTypes = enable; + return this; + } + + internal NpgsqlTypeLoadingOptions Build() => new() + { + LoadTableComposites = _loadTableComposites, + LoadTypes = _loadTypes + }; +} + +/// +/// An option specified in the connection string that activates special compatibility features. +/// +public enum ServerCompatibilityMode +{ + /// + /// No special server compatibility mode is active + /// + None, + + /// + /// The server is an Amazon Redshift instance. + /// + [Obsolete("ServerCompatibilityMode.Redshift no longer does anything and can be safely removed.")] + Redshift, + + /// + /// The server is doesn't support full type loading from the PostgreSQL catalogs, support the basic set + /// of types via information hardcoded inside Npgsql. + /// + NoTypeLoading, +} diff --git a/src/Npgsql/PostgresDatabaseInfo.cs b/src/Npgsql/PostgresDatabaseInfo.cs index 1670954a9c..670a56cd5d 100644 --- a/src/Npgsql/PostgresDatabaseInfo.cs +++ b/src/Npgsql/PostgresDatabaseInfo.cs @@ -199,8 +199,9 @@ FROM pg_enum internal async Task> LoadBackendTypes(NpgsqlConnector conn, NpgsqlTimeout timeout, bool async) { var versionQuery = "SELECT version();"; - var loadTypesQuery = GenerateLoadTypesQuery(SupportsRangeTypes, SupportsMultirangeTypes, conn.Settings.LoadTableComposites); - var loadCompositeTypesQuery = GenerateLoadCompositeTypesQuery(conn.Settings.LoadTableComposites); + var loadTableComposites = conn.DataSource.Configuration.TypeLoading.LoadTableComposites; + var loadTypesQuery = GenerateLoadTypesQuery(SupportsRangeTypes, SupportsMultirangeTypes, loadTableComposites); + var loadCompositeTypesQuery = GenerateLoadCompositeTypesQuery(loadTableComposites); var loadEnumFieldsQuery = SupportsEnumTypes ? GenerateLoadEnumFieldsQuery(HasEnumSortOrder) : string.Empty; diff --git a/src/Npgsql/PostgresMinimalDatabaseInfo.cs b/src/Npgsql/PostgresMinimalDatabaseInfo.cs index 3d07fac236..ed2ef15e81 100644 --- a/src/Npgsql/PostgresMinimalDatabaseInfo.cs +++ b/src/Npgsql/PostgresMinimalDatabaseInfo.cs @@ -11,7 +11,7 @@ sealed class PostgresMinimalDatabaseInfoFactory : INpgsqlDatabaseInfoFactory { public Task Load(NpgsqlConnector conn, NpgsqlTimeout timeout, bool async) => Task.FromResult( - conn.Settings.ServerCompatibilityMode == ServerCompatibilityMode.NoTypeLoading + !conn.DataSource.Configuration.TypeLoading.LoadTypes ? (NpgsqlDatabaseInfo)new PostgresMinimalDatabaseInfo(conn) : null); } diff --git a/src/Npgsql/PublicAPI.Unshipped.txt b/src/Npgsql/PublicAPI.Unshipped.txt index 731bfe82fe..003afe82f8 100644 --- a/src/Npgsql/PublicAPI.Unshipped.txt +++ b/src/Npgsql/PublicAPI.Unshipped.txt @@ -5,6 +5,7 @@ Npgsql.NpgsqlConnection.SslClientAuthenticationOptionsCallback.get -> System.Act Npgsql.NpgsqlConnection.SslClientAuthenticationOptionsCallback.set -> void Npgsql.NpgsqlConnectionStringBuilder.SslNegotiation.get -> Npgsql.SslNegotiation Npgsql.NpgsqlConnectionStringBuilder.SslNegotiation.set -> void +Npgsql.NpgsqlDataSourceBuilder.ConfigureTypeLoading(System.Action! configureAction) -> Npgsql.NpgsqlDataSourceBuilder! Npgsql.NpgsqlDataSourceBuilder.MapComposite(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.NpgsqlDataSourceBuilder! Npgsql.NpgsqlDataSourceBuilder.MapComposite(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.NpgsqlDataSourceBuilder! Npgsql.NpgsqlDataSourceBuilder.MapEnum(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.NpgsqlDataSourceBuilder! @@ -15,6 +16,7 @@ Npgsql.NpgsqlDataSourceBuilder.UseSslClientAuthenticationOptionsCallback(System. Npgsql.NpgsqlMetricsOptions Npgsql.NpgsqlMetricsOptions.NpgsqlMetricsOptions() -> void Npgsql.NpgsqlSlimDataSourceBuilder.ConfigureTracing(System.Action! configureAction) -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.ConfigureTypeLoading(System.Action! configureAction) -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.EnableGeometricTypes() -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.EnableJsonTypes() -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.EnableNetworkTypes() -> Npgsql.NpgsqlSlimDataSourceBuilder! @@ -32,6 +34,9 @@ Npgsql.NpgsqlTracingOptionsBuilder.ConfigureCommandEnrichmentCallback(System.Act Npgsql.NpgsqlTracingOptionsBuilder.ConfigureCommandFilter(System.Func? commandFilter) -> Npgsql.NpgsqlTracingOptionsBuilder! Npgsql.NpgsqlTracingOptionsBuilder.ConfigureCommandSpanNameProvider(System.Func? commandSpanNameProvider) -> Npgsql.NpgsqlTracingOptionsBuilder! Npgsql.NpgsqlTracingOptionsBuilder.EnableFirstResponseEvent(bool enable = true) -> Npgsql.NpgsqlTracingOptionsBuilder! +Npgsql.NpgsqlTypeLoadingOptionsBuilder +Npgsql.NpgsqlTypeLoadingOptionsBuilder.EnableTableCompositesLoading(bool enable = true) -> Npgsql.NpgsqlTypeLoadingOptionsBuilder! +Npgsql.NpgsqlTypeLoadingOptionsBuilder.EnableTypeLoading(bool enable = true) -> Npgsql.NpgsqlTypeLoadingOptionsBuilder! Npgsql.Replication.PgOutput.ReplicationValue.GetFieldName() -> string! Npgsql.Replication.PgOutput.Messages.ParallelStreamAbortMessage Npgsql.Replication.PgOutput.Messages.ParallelStreamAbortMessage.AbortLsn.get -> NpgsqlTypes.NpgsqlLogSequenceNumber diff --git a/src/Npgsql/Replication/ReplicationConnection.cs b/src/Npgsql/Replication/ReplicationConnection.cs index 44fa4b4ac8..575efb669b 100644 --- a/src/Npgsql/Replication/ReplicationConnection.cs +++ b/src/Npgsql/Replication/ReplicationConnection.cs @@ -100,10 +100,6 @@ public string ConnectionString { ReplicationMode = ReplicationMode }; - // Physical replication connections don't allow regular queries, so we can't load types from PG - if (ReplicationMode == ReplicationMode.Physical) - cs.ServerCompatibilityMode = ServerCompatibilityMode.NoTypeLoading; - _npgsqlConnection.ConnectionString = cs.ToString(); } } diff --git a/src/Npgsql/TypeMapping/UserTypeMapper.cs b/src/Npgsql/TypeMapping/UserTypeMapper.cs index 90da77728a..bd990efcee 100644 --- a/src/Npgsql/TypeMapping/UserTypeMapper.cs +++ b/src/Npgsql/TypeMapping/UserTypeMapper.cs @@ -38,7 +38,16 @@ sealed class UserTypeMapper : PgTypeInfoResolverFactory readonly List _mappings; public IList Items => _mappings; - public INpgsqlNameTranslator DefaultNameTranslator { get; set; } = NpgsqlSnakeCaseNameTranslator.Instance; + INpgsqlNameTranslator _defaultNameTranslator = NpgsqlSnakeCaseNameTranslator.Instance; + public INpgsqlNameTranslator DefaultNameTranslator + { + get => _defaultNameTranslator; + set + { + ArgumentNullException.ThrowIfNull(value); + _defaultNameTranslator = value; + } + } UserTypeMapper(IEnumerable mappings) => _mappings = [..mappings]; public UserTypeMapper() => _mappings = []; diff --git a/test/Npgsql.Tests/ConnectionTests.cs b/test/Npgsql.Tests/ConnectionTests.cs index 7dd4f9e82b..d49b350803 100644 --- a/test/Npgsql.Tests/ConnectionTests.cs +++ b/test/Npgsql.Tests/ConnectionTests.cs @@ -1150,7 +1150,7 @@ public async Task Exception_during_close() [Test, Description("Some pseudo-PG database don't support pg_type loading, we have a minimal DatabaseInfo for this")] public async Task NoTypeLoading() { - await using var dataSource = CreateDataSource(csb => csb.ServerCompatibilityMode = ServerCompatibilityMode.NoTypeLoading); + await using var dataSource = CreateDataSource(builder => builder.ConfigureTypeLoading(builder => builder.EnableTypeLoading())); await using var conn = await dataSource.OpenConnectionAsync(); Assert.That(await conn.ExecuteScalarAsync("SELECT 8"), Is.EqualTo(8)); diff --git a/test/Npgsql.Tests/MultipleHostsTests.cs b/test/Npgsql.Tests/MultipleHostsTests.cs index 78cbeded75..662c08d5b9 100644 --- a/test/Npgsql.Tests/MultipleHostsTests.cs +++ b/test/Npgsql.Tests/MultipleHostsTests.cs @@ -8,7 +8,6 @@ using System.Linq; using System.Net; using System.Net.Sockets; -using System.Runtime.InteropServices; using System.Threading; using System.Threading.Tasks; using System.Transactions; @@ -20,6 +19,8 @@ namespace Npgsql.Tests; +#pragma warning disable CS0618 + public class MultipleHostsTests : TestBase { static readonly object[] MyCases = @@ -615,10 +616,11 @@ public async Task Offline_state_on_query_execution_IOException() public async Task Offline_state_on_query_execution_TimeoutException() { await using var postmaster = PgPostmasterMock.Start(ConnectionString); - var dataSourceBuilder = postmaster.GetDataSourceBuilder(); - dataSourceBuilder.ConnectionStringBuilder.CommandTimeout = 1; - dataSourceBuilder.ConnectionStringBuilder.CancellationTimeout = 1; - await using var dataSource = dataSourceBuilder.Build(); + await using var dataSource = postmaster.CreateDataSource(builder => + { + builder.ConnectionStringBuilder.CommandTimeout = 1; + builder.ConnectionStringBuilder.CancellationTimeout = 1; + }); await using var conn = await dataSource.OpenConnectionAsync(); await using var anotherConn = await dataSource.OpenConnectionAsync(); @@ -641,10 +643,11 @@ public async Task Offline_state_on_query_execution_TimeoutException() public async Task Unknown_state_on_query_execution_TimeoutException_with_disabled_cancellation() { await using var postmaster = PgPostmasterMock.Start(ConnectionString); - var dataSourceBuilder = postmaster.GetDataSourceBuilder(); - dataSourceBuilder.ConnectionStringBuilder.CommandTimeout = 1; - dataSourceBuilder.ConnectionStringBuilder.CancellationTimeout = -1; - await using var dataSource = dataSourceBuilder.Build(); + await using var dataSource = postmaster.CreateDataSource(builder => + { + builder.ConnectionStringBuilder.CommandTimeout = 1; + builder.ConnectionStringBuilder.CancellationTimeout = -1; + }); await using var conn = await dataSource.OpenConnectionAsync(); await using var anotherConn = await dataSource.OpenConnectionAsync(); @@ -667,10 +670,11 @@ public async Task Unknown_state_on_query_execution_TimeoutException_with_disable public async Task Unknown_state_on_query_execution_cancellation_with_disabled_cancellation_timeout() { await using var postmaster = PgPostmasterMock.Start(ConnectionString); - var dataSourceBuilder = postmaster.GetDataSourceBuilder(); - dataSourceBuilder.ConnectionStringBuilder.CommandTimeout = 30; - dataSourceBuilder.ConnectionStringBuilder.CancellationTimeout = -1; - await using var dataSource = dataSourceBuilder.Build(); + await using var dataSource = postmaster.CreateDataSource(builder => + { + builder.ConnectionStringBuilder.CommandTimeout = 30; + builder.ConnectionStringBuilder.CancellationTimeout = -1; + }); await using var conn = await dataSource.OpenConnectionAsync(); await using var anotherConn = await dataSource.OpenConnectionAsync(); @@ -697,10 +701,11 @@ public async Task Unknown_state_on_query_execution_cancellation_with_disabled_ca public async Task Unknown_state_on_query_execution_TimeoutException_with_cancellation_failure() { await using var postmaster = PgPostmasterMock.Start(ConnectionString); - var dataSourceBuilder = postmaster.GetDataSourceBuilder(); - dataSourceBuilder.ConnectionStringBuilder.CommandTimeout = 1; - dataSourceBuilder.ConnectionStringBuilder.CancellationTimeout = 0; - await using var dataSource = dataSourceBuilder.Build(); + await using var dataSource = postmaster.CreateDataSource(builder => + { + builder.ConnectionStringBuilder.CommandTimeout = 1; + builder.ConnectionStringBuilder.CancellationTimeout = 0; + }); await using var conn = await dataSource.OpenConnectionAsync(); diff --git a/test/Npgsql.Tests/Support/PgPostmasterMock.cs b/test/Npgsql.Tests/Support/PgPostmasterMock.cs index 2e298be3d3..3a59ccc2f9 100644 --- a/test/Npgsql.Tests/Support/PgPostmasterMock.cs +++ b/test/Npgsql.Tests/Support/PgPostmasterMock.cs @@ -80,17 +80,20 @@ internal PgPostmasterMock( Port = localEndPoint.Port; connectionStringBuilder.Host = Host; connectionStringBuilder.Port = Port; +#pragma warning disable CS0618 // Type or member is obsolete connectionStringBuilder.ServerCompatibilityMode = ServerCompatibilityMode.NoTypeLoading; +#pragma warning restore CS0618 // Type or member is obsolete ConnectionString = connectionStringBuilder.ConnectionString; _socket.Listen(5); } - public NpgsqlDataSourceBuilder GetDataSourceBuilder() - => new(ConnectionString); - - public NpgsqlDataSource CreateDataSource() - => NpgsqlDataSource.Create(ConnectionString); + public NpgsqlDataSource CreateDataSource(Action? configure = null) + { + var builder = new NpgsqlDataSourceBuilder(ConnectionString); + configure?.Invoke(builder); + return builder.Build(); + } void AcceptClients() { diff --git a/test/Npgsql.Tests/Types/CompositeTests.cs b/test/Npgsql.Tests/Types/CompositeTests.cs index 3928be3515..d10daf6734 100644 --- a/test/Npgsql.Tests/Types/CompositeTests.cs +++ b/test/Npgsql.Tests/Types/CompositeTests.cs @@ -396,8 +396,7 @@ public async Task Table_as_composite([Values] bool enabled) var dataSourceBuilder = CreateDataSourceBuilder(); dataSourceBuilder.MapComposite(table); - if (enabled) - dataSourceBuilder.ConnectionStringBuilder.LoadTableComposites = true; + dataSourceBuilder.ConfigureTypeLoading(b => b.EnableTableCompositesLoading(enabled)); await using var dataSource = dataSourceBuilder.Build(); await using var connection = await dataSource.OpenConnectionAsync(); @@ -430,7 +429,7 @@ public async Task Table_as_composite_with_deleted_columns() await adminConnection.ExecuteNonQueryAsync($"ALTER TABLE {table} DROP COLUMN bar;"); var dataSourceBuilder = CreateDataSourceBuilder(); - dataSourceBuilder.ConnectionStringBuilder.LoadTableComposites = true; + dataSourceBuilder.ConfigureTypeLoading(b => b.EnableTableCompositesLoading()); dataSourceBuilder.MapComposite(table); await using var dataSource = dataSourceBuilder.Build(); await using var connection = await dataSource.OpenConnectionAsync(); From e7bf5f8a638287bb77fef5cb44d40cb48c2d762b Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Sat, 16 Nov 2024 19:27:50 +0700 Subject: [PATCH 494/761] Fix merge failure --- src/Npgsql/NpgsqlTypeLoadingOptions.cs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Npgsql/NpgsqlTypeLoadingOptions.cs b/src/Npgsql/NpgsqlTypeLoadingOptions.cs index 87af06124a..bfca07357d 100644 --- a/src/Npgsql/NpgsqlTypeLoadingOptions.cs +++ b/src/Npgsql/NpgsqlTypeLoadingOptions.cs @@ -10,7 +10,7 @@ sealed class NpgsqlTypeLoadingOptions /// /// Load table composite type definitions, and not just free-standing composite types. /// - public bool LoadTableComposites { get; init; } + public required bool LoadTableComposites { get; init; } /// /// When false, if the server doesn't support full type loading from the PostgreSQL catalogs, @@ -39,7 +39,7 @@ public NpgsqlTypeLoadingOptionsBuilder EnableTableCompositesLoading(bool enable } /// - /// Set a compatibility mode for special PostgreSQL server types. + /// Enable loading of types, when disabled Npgsql falls back to a small, builtin, set of known types and type ids. /// public NpgsqlTypeLoadingOptionsBuilder EnableTypeLoading(bool enable = true) { From eeaef3f0447a099eb9930c4d93030122e1a27653 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Sat, 16 Nov 2024 14:01:34 +0100 Subject: [PATCH 495/761] Improve type mapping collection code (#5920) --- .../AdoTypeInfoResolverFactory.Multirange.cs | 16 +-- .../AdoTypeInfoResolverFactory.Range.cs | 8 +- .../AdoTypeInfoResolverFactory.cs | 12 +- src/Npgsql/Internal/TypeInfoMapping.cs | 104 +++++++++++++----- src/Npgsql/TypeMapping/UserTypeMapper.cs | 2 +- 5 files changed, 97 insertions(+), 45 deletions(-) diff --git a/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.Multirange.cs b/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.Multirange.cs index 873e6b9874..f76ed3a457 100644 --- a/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.Multirange.cs +++ b/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.Multirange.cs @@ -74,18 +74,18 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) else { mappings.AddResolverType[]>(DataTypeNames.TsMultirange, - static (options, mapping, dataTypeNameMatch) => mapping.CreateInfo(options, + static (options, mapping, requiresDataTypeName) => mapping.CreateInfo(options, DateTimeConverterResolver.CreateMultirangeResolver[], NpgsqlRange>(options, options.GetCanonicalTypeId(DataTypeNames.TsTzMultirange), options.GetCanonicalTypeId(DataTypeNames.TsMultirange), - options.EnableDateTimeInfinityConversions), dataTypeNameMatch), + options.EnableDateTimeInfinityConversions), requiresDataTypeName), isDefault: true); mappings.AddResolverType>>(DataTypeNames.TsMultirange, - static (options, mapping, dataTypeNameMatch) => mapping.CreateInfo(options, + static (options, mapping, requiresDataTypeName) => mapping.CreateInfo(options, DateTimeConverterResolver.CreateMultirangeResolver>, NpgsqlRange>(options, options.GetCanonicalTypeId(DataTypeNames.TsTzMultirange), options.GetCanonicalTypeId(DataTypeNames.TsMultirange), - options.EnableDateTimeInfinityConversions), dataTypeNameMatch)); + options.EnableDateTimeInfinityConversions), requiresDataTypeName)); } mappings.AddType[]>(DataTypeNames.TsMultirange, @@ -126,18 +126,18 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) else { mappings.AddResolverType[]>(DataTypeNames.TsTzMultirange, - static (options, mapping, dataTypeNameMatch) => mapping.CreateInfo(options, + static (options, mapping, requiresDataTypeName) => mapping.CreateInfo(options, DateTimeConverterResolver.CreateMultirangeResolver[], NpgsqlRange>(options, options.GetCanonicalTypeId(DataTypeNames.TsTzMultirange), options.GetCanonicalTypeId(DataTypeNames.TsMultirange), - options.EnableDateTimeInfinityConversions), dataTypeNameMatch), + options.EnableDateTimeInfinityConversions), requiresDataTypeName), isDefault: true); mappings.AddResolverType>>(DataTypeNames.TsTzMultirange, - static (options, mapping, dataTypeNameMatch) => mapping.CreateInfo(options, + static (options, mapping, requiresDataTypeName) => mapping.CreateInfo(options, DateTimeConverterResolver.CreateMultirangeResolver>, NpgsqlRange>(options, options.GetCanonicalTypeId(DataTypeNames.TsTzMultirange), options.GetCanonicalTypeId(DataTypeNames.TsMultirange), - options.EnableDateTimeInfinityConversions), dataTypeNameMatch)); + options.EnableDateTimeInfinityConversions), requiresDataTypeName)); mappings.AddType[]>(DataTypeNames.TsTzMultirange, static (options, mapping, _) => mapping.CreateInfo(options, CreateArrayMultirangeConverter( diff --git a/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.Range.cs b/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.Range.cs index 54ca555cdd..74a9028423 100644 --- a/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.Range.cs +++ b/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.Range.cs @@ -48,11 +48,11 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) else { mappings.AddResolverStructType>(DataTypeNames.TsRange, - static (options, mapping, dataTypeNameMatch) => mapping.CreateInfo(options, + static (options, mapping, requiresDataTypeName) => mapping.CreateInfo(options, DateTimeConverterResolver.CreateRangeResolver(options, options.GetCanonicalTypeId(DataTypeNames.TsTzRange), options.GetCanonicalTypeId(DataTypeNames.TsRange), - options.EnableDateTimeInfinityConversions), dataTypeNameMatch), + options.EnableDateTimeInfinityConversions), requiresDataTypeName), isDefault: true); } mappings.AddStructType>(DataTypeNames.TsRange, @@ -73,11 +73,11 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) else { mappings.AddResolverStructType>(DataTypeNames.TsTzRange, - static (options, mapping, dataTypeNameMatch) => mapping.CreateInfo(options, + static (options, mapping, requiresDataTypeName) => mapping.CreateInfo(options, DateTimeConverterResolver.CreateRangeResolver(options, options.GetCanonicalTypeId(DataTypeNames.TsTzRange), options.GetCanonicalTypeId(DataTypeNames.TsRange), - options.EnableDateTimeInfinityConversions), dataTypeNameMatch), + options.EnableDateTimeInfinityConversions), requiresDataTypeName), isDefault: true); mappings.AddStructType>(DataTypeNames.TsTzRange, static (options, mapping, _) => mapping.CreateInfo(options, diff --git a/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.cs index 768e0873ef..b2a39db34b 100644 --- a/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.cs +++ b/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.cs @@ -172,7 +172,7 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) // Varbit mappings.AddType(DataTypeNames.Varbit, static (options, mapping, _) => mapping.CreateInfo(options, - new PolymorphicBitStringConverterResolver(options.GetCanonicalTypeId(DataTypeNames.Varbit)), supportsWriting: false)); + new PolymorphicBitStringConverterResolver(options.GetCanonicalTypeId(DataTypeNames.Varbit)), includeDataTypeName: true, supportsWriting: false)); mappings.AddType(DataTypeNames.Varbit, static (options, mapping, _) => mapping.CreateInfo(options, new BitArrayBitStringConverter()), isDefault: true); mappings.AddStructType(DataTypeNames.Varbit, @@ -183,7 +183,7 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) // Bit mappings.AddType(DataTypeNames.Bit, static (options, mapping, _) => mapping.CreateInfo(options, - new PolymorphicBitStringConverterResolver(options.GetCanonicalTypeId(DataTypeNames.Bit)), supportsWriting: false)); + new PolymorphicBitStringConverterResolver(options.GetCanonicalTypeId(DataTypeNames.Bit)), includeDataTypeName: true, supportsWriting: false)); mappings.AddType(DataTypeNames.Bit, static (options, mapping, _) => mapping.CreateInfo(options, new BitArrayBitStringConverter()), isDefault: true); mappings.AddStructType(DataTypeNames.Bit, @@ -201,9 +201,9 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) else { mappings.AddResolverStructType(DataTypeNames.Timestamp, - static (options, mapping, dataTypeNameMatch) => mapping.CreateInfo(options, + static (options, mapping, requiresDataTypeName) => mapping.CreateInfo(options, DateTimeConverterResolver.CreateResolver(options, options.GetCanonicalTypeId(DataTypeNames.TimestampTz), options.GetCanonicalTypeId(DataTypeNames.Timestamp), - options.EnableDateTimeInfinityConversions), dataTypeNameMatch), isDefault: true); + options.EnableDateTimeInfinityConversions), requiresDataTypeName), isDefault: true); } mappings.AddStructType(DataTypeNames.Timestamp, static (options, mapping, _) => mapping.CreateInfo(options, new Int8Converter())); @@ -220,9 +220,9 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) else { mappings.AddResolverStructType(DataTypeNames.TimestampTz, - static (options, mapping, dataTypeNameMatch) => mapping.CreateInfo(options, + static (options, mapping, requiresDataTypeName) => mapping.CreateInfo(options, DateTimeConverterResolver.CreateResolver(options, options.GetCanonicalTypeId(DataTypeNames.TimestampTz), options.GetCanonicalTypeId(DataTypeNames.Timestamp), - options.EnableDateTimeInfinityConversions), dataTypeNameMatch), isDefault: true); + options.EnableDateTimeInfinityConversions), requiresDataTypeName), isDefault: true); mappings.AddStructType(DataTypeNames.TimestampTz, static (options, mapping, _) => mapping.CreateInfo(options, new DateTimeOffsetConverter(options.EnableDateTimeInfinityConversions))); } diff --git a/src/Npgsql/Internal/TypeInfoMapping.cs b/src/Npgsql/Internal/TypeInfoMapping.cs index 1d54101379..afb5325590 100644 --- a/src/Npgsql/Internal/TypeInfoMapping.cs +++ b/src/Npgsql/Internal/TypeInfoMapping.cs @@ -16,11 +16,11 @@ namespace Npgsql.Internal; /// /// /// -/// -/// Signals whether a resolver based TypeInfo can keep its PgTypeId undecided or whether it should follow mapping.DataTypeName. +/// +/// Relevant for `PgResolverTypeInfo` only: whether the instance can be constructed without passing mapping.DataTypeName, an exception occurs otherwise. /// [Experimental(NpgsqlDiagnostics.ConvertersExperimental)] -public delegate PgTypeInfo TypeInfoFactory(PgSerializerOptions options, TypeInfoMapping mapping, bool resolvedDataTypeName); +public delegate PgTypeInfo TypeInfoFactory(PgSerializerOptions options, TypeInfoMapping mapping, bool requiresDataTypeName); [Experimental(NpgsqlDiagnostics.ConvertersExperimental)] public enum MatchRequirement @@ -184,13 +184,13 @@ TypeInfoMapping GetMapping(Type type, string dataTypeName) // Helper to eliminate generic display class duplication. static TypeInfoFactory CreateComposedFactory(Type mappingType, TypeInfoMapping innerMapping, Func mapper, bool copyPreferredFormat = false, bool supportsWriting = true) - => (options, mapping, dataTypeNameMatch) => + => (options, mapping, requiresDataTypeName) => { var resolvedInnerMapping = innerMapping; if (!DataTypeName.IsFullyQualified(innerMapping.DataTypeName.AsSpan())) resolvedInnerMapping = innerMapping with { DataTypeName = new DataTypeName(mapping.DataTypeName).Schema + "." + innerMapping.DataTypeName }; - var innerInfo = innerMapping.Factory(options, resolvedInnerMapping, dataTypeNameMatch); + var innerInfo = innerMapping.Factory(options, resolvedInnerMapping, requiresDataTypeName); var converter = mapper(mapping, innerInfo); var preferredFormat = copyPreferredFormat ? innerInfo.PreferredFormat : null; var writingSupported = supportsWriting && innerInfo.SupportsWriting; @@ -205,13 +205,13 @@ static TypeInfoFactory CreateComposedFactory(Type mappingType, TypeInfoMapping i // Helper to eliminate generic display class duplication. static TypeInfoFactory CreateComposedFactory(Type mappingType, TypeInfoMapping innerMapping, Func mapper, bool copyPreferredFormat = false, bool supportsWriting = true) - => (options, mapping, dataTypeNameMatch) => + => (options, mapping, requiresDataTypeName) => { var resolvedInnerMapping = innerMapping; if (!DataTypeName.IsFullyQualified(innerMapping.DataTypeName.AsSpan())) resolvedInnerMapping = innerMapping with { DataTypeName = new DataTypeName(mapping.DataTypeName).Schema + "." + innerMapping.DataTypeName }; - var innerInfo = (PgResolverTypeInfo)innerMapping.Factory(options, resolvedInnerMapping, dataTypeNameMatch); + var innerInfo = (PgResolverTypeInfo)innerMapping.Factory(options, resolvedInnerMapping, requiresDataTypeName); var resolver = mapper(mapping, innerInfo); var preferredFormat = copyPreferredFormat ? innerInfo.PreferredFormat : null; var writingSupported = supportsWriting && innerInfo.SupportsWriting; @@ -346,12 +346,12 @@ void AddArrayType(TypeInfoMapping elementMapping, Type type, Func + _items.Add(new TypeInfoMapping(typeof(object), arrayDataTypeName, (options, mapping, requiresDataTypeName) => { - if (!dataTypeNameMatch) + if (!requiresDataTypeName) throw new InvalidOperationException("Should not happen, please file a bug."); - return arrayMapping.Factory(options, mapping, dataTypeNameMatch); + return arrayMapping.Factory(options, mapping, requiresDataTypeName); })); } } @@ -386,12 +386,12 @@ void AddResolverArrayType(TypeInfoMapping elementMapping, Type type, Func + _items.Add(new TypeInfoMapping(typeof(object), arrayDataTypeName, (options, mapping, requiresDataTypeName) => { - if (!dataTypeNameMatch) + if (!requiresDataTypeName) throw new InvalidOperationException("Should not happen, please file a bug."); - return arrayMapping.Factory(options, mapping, dataTypeNameMatch); + return arrayMapping.Factory(options, mapping, requiresDataTypeName); })); } } @@ -485,16 +485,16 @@ void AddStructArrayType(TypeInfoMapping elementMapping, TypeInfoMapping nullable _items.Add(nullableArrayMapping); suppressObjectMapping = suppressObjectMapping || arrayMapping.TypeEquals(typeof(object)); if (!suppressObjectMapping && arrayMapping.MatchRequirement is MatchRequirement.DataTypeName or MatchRequirement.Single) - _items.Add(new TypeInfoMapping(typeof(object), arrayDataTypeName, (options, mapping, dataTypeNameMatch) => + _items.Add(new TypeInfoMapping(typeof(object), arrayDataTypeName, (options, mapping, requiresDataTypeName) => { return options.ArrayNullabilityMode switch { - _ when !dataTypeNameMatch => throw new InvalidOperationException("Should not happen, please file a bug."), - ArrayNullabilityMode.Never => arrayMapping.Factory(options, mapping, dataTypeNameMatch), - ArrayNullabilityMode.Always => nullableArrayMapping.Factory(options, mapping, dataTypeNameMatch), + _ when !requiresDataTypeName => throw new InvalidOperationException("Should not happen, please file a bug."), + ArrayNullabilityMode.Never => arrayMapping.Factory(options, mapping, requiresDataTypeName), + ArrayNullabilityMode.Always => nullableArrayMapping.Factory(options, mapping, requiresDataTypeName), ArrayNullabilityMode.PerInstance => CreateComposedPerInstance( - arrayMapping.Factory(options, mapping, dataTypeNameMatch), - nullableArrayMapping.Factory(options, mapping, dataTypeNameMatch), + arrayMapping.Factory(options, mapping, requiresDataTypeName), + nullableArrayMapping.Factory(options, mapping, requiresDataTypeName), mapping.DataTypeName ), _ => throw new ArgumentOutOfRangeException() @@ -603,14 +603,14 @@ void AddResolverStructArrayType(TypeInfoMapping elementMapping, TypeInfoMapping _items.Add(nullableArrayMapping); suppressObjectMapping = suppressObjectMapping || arrayMapping.TypeEquals(typeof(object)); if (!suppressObjectMapping && arrayMapping.MatchRequirement is MatchRequirement.DataTypeName or MatchRequirement.Single) - _items.Add(new TypeInfoMapping(typeof(object), arrayDataTypeName, (options, mapping, dataTypeNameMatch) => options.ArrayNullabilityMode switch + _items.Add(new TypeInfoMapping(typeof(object), arrayDataTypeName, (options, mapping, requiresDataTypeName) => options.ArrayNullabilityMode switch { - _ when !dataTypeNameMatch => throw new InvalidOperationException("Should not happen, please file a bug."), - ArrayNullabilityMode.Never => arrayMapping.Factory(options, mapping, dataTypeNameMatch), - ArrayNullabilityMode.Always => nullableArrayMapping.Factory(options, mapping, dataTypeNameMatch), + _ when !requiresDataTypeName => throw new InvalidOperationException("Should not happen, please file a bug."), + ArrayNullabilityMode.Never => arrayMapping.Factory(options, mapping, requiresDataTypeName), + ArrayNullabilityMode.Always => nullableArrayMapping.Factory(options, mapping, requiresDataTypeName), ArrayNullabilityMode.PerInstance => CreateComposedPerInstance( - arrayMapping.Factory(options, mapping, dataTypeNameMatch), - nullableArrayMapping.Factory(options, mapping, dataTypeNameMatch), + arrayMapping.Factory(options, mapping, requiresDataTypeName), + nullableArrayMapping.Factory(options, mapping, requiresDataTypeName), mapping.DataTypeName ), _ => throw new ArgumentOutOfRangeException() @@ -753,6 +753,31 @@ internal static bool TryResolveFullyQualifiedName(PgSerializerOptions options, s internal static PostgresType GetPgType(this TypeInfoMapping mapping, PgSerializerOptions options) => options.DatabaseInfo.GetPostgresType(new DataTypeName(mapping.DataTypeName)); + // NOTE: This method exists since 9.0 to be able to deprecate the method below that has optional arguments in 10.0 (potentially removing it directly or in 11.0). + // It reduces how binary breaking that change will be if this method would not be there to be picked for the most common invocations. + /// + /// Creates a PgTypeInfo from a mapping, optins, and a converter. + /// + /// The mapping to create an info for. + /// The options to use. + /// The converter to create a PgTypeInfo for. + /// The created info instance. + public static PgTypeInfo CreateInfo(this TypeInfoMapping mapping, PgSerializerOptions options, PgConverter converter) + => new(options, converter, new DataTypeName(mapping.DataTypeName)) + { + PreferredFormat = null, + SupportsWriting = true + }; + + /// + /// Creates a PgTypeInfo from a mapping, options, and a converter. + /// + /// The mapping to create an info for. + /// The options to use. + /// The converter to create a PgTypeInfo for. + /// Whether to prefer a specific data format for this info, when null it defaults to the most suitable format. + /// Whether the converters returned from the given converter resolver support writing. + /// The created info instance. public static PgTypeInfo CreateInfo(this TypeInfoMapping mapping, PgSerializerOptions options, PgConverter converter, DataFormat? preferredFormat = null, bool supportsWriting = true) => new(options, converter, new DataTypeName(mapping.DataTypeName)) { @@ -760,7 +785,34 @@ public static PgTypeInfo CreateInfo(this TypeInfoMapping mapping, PgSerializerOp SupportsWriting = supportsWriting }; - public static PgResolverTypeInfo CreateInfo(this TypeInfoMapping mapping, PgSerializerOptions options, PgConverterResolver resolver, bool includeDataTypeName = true, DataFormat? preferredFormat = null, bool supportsWriting = true) + // NOTE: This method exists since 9.0 to be able to deprecate the method below that has optional arguments in 10.0 (potentially removing it directly or in 11.0). + // It reduces how binary breaking that change will be if this method would not be there to be picked for the most common invocations. + /// + /// Creates a PgResolverTypeInfo from a mapping, options, and a converter resolver. + /// + /// The mapping to create an info for. + /// The options to use. + /// The resolver to create a PgResolverTypeInfo for. + /// Whether to pass mapping.DataTypeName to the PgResolverTypeInfo constructor, mandatory when TypeInfoFactory(..., requiresDataTypeName: true). + /// The created info instance. + public static PgResolverTypeInfo CreateInfo(this TypeInfoMapping mapping, PgSerializerOptions options, PgConverterResolver resolver, bool includeDataTypeName) + => new(options, resolver, includeDataTypeName ? new DataTypeName(mapping.DataTypeName) : null) + { + PreferredFormat = null, + SupportsWriting = true + }; + + /// + /// Creates a PgResolverTypeInfo from a mapping, options, and a converter resolver. + /// + /// The mapping to create an info for. + /// The options to use. + /// The converter resolver to create a PgResolverTypeInfo for. + /// Whether to pass mapping.DataTypeName to the PgResolverTypeInfo constructor, mandatory when TypeInfoFactory(..., requiresDataTypeName: true). + /// Whether to prefer a specific data format for this info, when null it defaults to the most suitable format. + /// Whether the converters returned from the given converter resolver support writing. + /// The created info instance. + public static PgResolverTypeInfo CreateInfo(this TypeInfoMapping mapping, PgSerializerOptions options, PgConverterResolver resolver, bool includeDataTypeName, DataFormat? preferredFormat = null, bool supportsWriting = true) => new(options, resolver, includeDataTypeName ? new DataTypeName(mapping.DataTypeName) : null) { PreferredFormat = preferredFormat, diff --git a/src/Npgsql/TypeMapping/UserTypeMapper.cs b/src/Npgsql/TypeMapping/UserTypeMapper.cs index bd990efcee..3b7928bbd2 100644 --- a/src/Npgsql/TypeMapping/UserTypeMapper.cs +++ b/src/Npgsql/TypeMapping/UserTypeMapper.cs @@ -221,7 +221,7 @@ sealed class StructCompositeMapping< where T : struct { internal override void AddMapping(TypeInfoMappingCollection mappings) - => mappings.AddStructType(PgTypeName, (options, mapping, dataTypeNameMatch) => + => mappings.AddStructType(PgTypeName, (options, mapping, requiresDataTypeName) => { var pgType = mapping.GetPgType(options); if (pgType is not PostgresCompositeType compositeType) From b703159907ff112f20d082dd63178308aea0e7f5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B8rnar=20Skinnes?= <76002125+bj8sk@users.noreply.github.com> Date: Sat, 16 Nov 2024 16:35:36 +0100 Subject: [PATCH 496/761] Constrain type loading to a user-provided list of schemas (#5535) Closes #4557 multi-tenant setups can specify which schemas should be loaded and as a result resolve many unqualified name ambiguities. Remaining types are likely shared across all schemas or - if they still need disambiguation - are best fully specified in the code for clarity. --- src/Npgsql/NpgsqlTypeLoadingOptions.cs | 38 ++++++++++++- src/Npgsql/PostgresDatabaseInfo.cs | 45 ++++++++++++--- src/Npgsql/PublicAPI.Unshipped.txt | 1 + src/Npgsql/Util/Statics.cs | 16 ++++-- test/Npgsql.Tests/ConnectionTests.cs | 79 ++++++++++++++++++++++++++ test/Npgsql.Tests/TestUtil.cs | 6 +- 6 files changed, 166 insertions(+), 19 deletions(-) diff --git a/src/Npgsql/NpgsqlTypeLoadingOptions.cs b/src/Npgsql/NpgsqlTypeLoadingOptions.cs index bfca07357d..c031826675 100644 --- a/src/Npgsql/NpgsqlTypeLoadingOptions.cs +++ b/src/Npgsql/NpgsqlTypeLoadingOptions.cs @@ -1,4 +1,5 @@ using System; +using System.Collections.Generic; namespace Npgsql; @@ -17,6 +18,11 @@ sealed class NpgsqlTypeLoadingOptions /// support the basic set of types via information hardcoded inside Npgsql. /// public required bool LoadTypes { get; init; } = true; + + /// + /// Load type definitions from the given schemas. + /// + public required string[]? TypeLoadingSchemas { get; init; } } /// @@ -26,6 +32,7 @@ public sealed class NpgsqlTypeLoadingOptionsBuilder { bool _loadTableComposites; bool _loadTypes = true; + List? _typeLoadingSchemas; internal NpgsqlTypeLoadingOptionsBuilder() {} @@ -47,10 +54,39 @@ public NpgsqlTypeLoadingOptionsBuilder EnableTypeLoading(bool enable = true) return this; } + /// + /// Set the schemas to load types from, this can be used to reduce the work done during type loading. + /// + /// Npgsql will always load types from the following schemas: pg_catalog, information_schema, pg_toast. + /// Any user-defined types (typcategory 'U') will also be loaded regardless of their schema. + /// Schemas to load types from. + public NpgsqlTypeLoadingOptionsBuilder SetTypeLoadingSchemas(params IEnumerable? schemas) + { + if (schemas is null) + { + _typeLoadingSchemas = null; + return this; + } + + _typeLoadingSchemas = new(); + foreach (var schema in schemas) + { + if (schema is not { Length: > 0 }) + { + _typeLoadingSchemas = null; + throw new ArgumentException("Schema cannot be null or empty."); + } + _typeLoadingSchemas.Add(schema); + } + + return this; + } + internal NpgsqlTypeLoadingOptions Build() => new() { LoadTableComposites = _loadTableComposites, - LoadTypes = _loadTypes + LoadTypes = _loadTypes, + TypeLoadingSchemas = _typeLoadingSchemas?.ToArray() }; } diff --git a/src/Npgsql/PostgresDatabaseInfo.cs b/src/Npgsql/PostgresDatabaseInfo.cs index 670a56cd5d..6cd4f2a5fe 100644 --- a/src/Npgsql/PostgresDatabaseInfo.cs +++ b/src/Npgsql/PostgresDatabaseInfo.cs @@ -104,6 +104,8 @@ internal async Task LoadPostgresInfo(NpgsqlConnector conn, NpgsqlTimeout timeout _types = await LoadBackendTypes(conn, timeout, async).ConfigureAwait(false); } + const string BuiltinSchemaListSqlFragment = "'pg_catalog', 'information_schema', 'pg_toast'"; + /// /// Generates a raw SQL query string to select type information. /// @@ -114,7 +116,7 @@ internal async Task LoadPostgresInfo(NpgsqlConnector conn, NpgsqlTimeout timeout /// For arrays and ranges, join in the element OID and type (to filter out arrays of unhandled /// types). /// - static string GenerateLoadTypesQuery(bool withRange, bool withMultirange, bool loadTableComposites) + static string GenerateLoadTypesQuery(bool withRange, bool withMultirange, bool loadTableComposites, string? schemaListSqlFragment, bool hasTypeCategory) => $@" SELECT ns.nspname, t.oid, t.typname, t.typtype, t.typnotnull, t.elemtypoid FROM ( @@ -125,6 +127,7 @@ static string GenerateLoadTypesQuery(bool withRange, bool withMultirange, bool l typ.oid, typ.typnamespace, typ.typname, typ.typtype, typ.typrelid, typ.typnotnull, typ.relkind, elemtyp.oid AS elemtypoid, elemtyp.typname AS elemtypname, elemcls.relkind AS elemrelkind, CASE WHEN elemproc.proname='array_recv' THEN 'a' ELSE elemtyp.typtype END AS elemtyptype + {(hasTypeCategory ? ", typ.typcategory" : "")} FROM ( SELECT typ.oid, typnamespace, typname, typrelid, typnotnull, relkind, typelem AS elemoid, CASE WHEN proc.proname='array_recv' THEN 'a' ELSE typ.typtype END AS typtype, @@ -134,6 +137,7 @@ static string GenerateLoadTypesQuery(bool withRange, bool withMultirange, bool l {(withMultirange ? "WHEN typ.typtype='m' THEN (SELECT rngtypid FROM pg_range WHERE rngmultitypid = typ.oid)" : "")} WHEN typ.typtype='d' THEN typ.typbasetype END AS elemtypoid + {(hasTypeCategory ? ", typ.typcategory" : "")} FROM pg_type AS typ LEFT JOIN pg_class AS cls ON (cls.oid = typ.typrelid) LEFT JOIN pg_proc AS proc ON proc.oid = typ.typreceive @@ -145,14 +149,15 @@ LEFT JOIN pg_class AS elemcls ON (elemcls.oid = elemtyp.typrelid) ) AS t JOIN pg_namespace AS ns ON (ns.oid = typnamespace) WHERE + {(schemaListSqlFragment is not null ? $"(ns.nspname IN ({BuiltinSchemaListSqlFragment}{(schemaListSqlFragment.Length > 0 ? $", {schemaListSqlFragment}" : "")}){(hasTypeCategory ? " OR typcategory = 'U'" : "" )}) AND (" : "(")} typtype IN ('b', 'r', 'm', 'e', 'd') OR -- Base, range, multirange, enum, domain - (typtype = 'c' AND {(loadTableComposites ? "ns.nspname NOT IN ('pg_catalog', 'information_schema', 'pg_toast')" : "relkind='c'")}) OR -- User-defined free-standing composites (not table composites) by default + (typtype = 'c' AND {(loadTableComposites ? $"ns.nspname NOT IN ({BuiltinSchemaListSqlFragment})" : "relkind='c'")}) OR -- User-defined free-standing composites (not table composites) by default (typtype = 'p' AND typname IN ('record', 'void', 'unknown')) OR -- Some special supported pseudo-types (typtype = 'a' AND ( -- Array of... elemtyptype IN ('b', 'r', 'm', 'e', 'd') OR -- Array of base, range, multirange, enum, domain (elemtyptype = 'p' AND elemtypname IN ('record', 'void')) OR -- Arrays of special supported pseudo-types - (elemtyptype = 'c' AND {(loadTableComposites ? "ns.nspname NOT IN ('pg_catalog', 'information_schema', 'pg_toast')" : "elemrelkind='c'")}) -- Array of user-defined free-standing composites (not table composites) by default - )) + (elemtyptype = 'c' AND {(loadTableComposites ? $"ns.nspname NOT IN ({BuiltinSchemaListSqlFragment})" : "elemrelkind='c'")}) -- Array of user-defined free-standing composites (not table composites) by default + ))) ORDER BY CASE WHEN typtype IN ('b', 'e', 'p') THEN 0 -- First base types, enums, pseudo-types WHEN typtype = 'c' THEN 1 -- Composites after (fields loaded later in 2nd pass) @@ -163,7 +168,7 @@ WHEN typtype IN ('b', 'e', 'p') THEN 0 -- First base types, enums, pse WHEN typtype = 'd' AND elemtyptype = 'a' THEN 6 -- Domains over arrays last END;"; - static string GenerateLoadCompositeTypesQuery(bool loadTableComposites) + static string GenerateLoadCompositeTypesQuery(bool loadTableComposites, string? schemaListSqlFragment) => $@" -- Load field definitions for (free-standing) composite types SELECT typ.oid, att.attname, att.atttypid @@ -172,7 +177,8 @@ JOIN pg_namespace AS ns ON (ns.oid = typ.typnamespace) JOIN pg_class AS cls ON (cls.oid = typ.typrelid) JOIN pg_attribute AS att ON (att.attrelid = typ.typrelid) WHERE - (typ.typtype = 'c' AND {(loadTableComposites ? "ns.nspname NOT IN ('pg_catalog', 'information_schema', 'pg_toast')" : "cls.relkind='c'")}) AND + (typ.typtype = 'c' AND {(loadTableComposites ? $"ns.nspname NOT IN ({BuiltinSchemaListSqlFragment})" : "cls.relkind='c'")}) AND + {(schemaListSqlFragment is not null ? $"(ns.nspname IN ({BuiltinSchemaListSqlFragment}{(schemaListSqlFragment.Length > 0 ? $", {schemaListSqlFragment}" : "")})) AND " : "")} attnum > 0 AND -- Don't load system attributes NOT attisdropped ORDER BY typ.oid, att.attnum;"; @@ -199,9 +205,30 @@ FROM pg_enum internal async Task> LoadBackendTypes(NpgsqlConnector conn, NpgsqlTimeout timeout, bool async) { var versionQuery = "SELECT version();"; - var loadTableComposites = conn.DataSource.Configuration.TypeLoading.LoadTableComposites; - var loadTypesQuery = GenerateLoadTypesQuery(SupportsRangeTypes, SupportsMultirangeTypes, loadTableComposites); - var loadCompositeTypesQuery = GenerateLoadCompositeTypesQuery(loadTableComposites); + var typeLoading = conn.DataSource.Configuration.TypeLoading; + var loadTableComposites = typeLoading.LoadTableComposites; + + // Escape the schemas configured by the user, we need these as literals to be used in an IN() operator, and we cannot use parameters. + // Add an opening quote, escape any quotes in the schema, and add a closing quote. + string? schemaListSqlFragment = null; + if (typeLoading.TypeLoadingSchemas is not null) + { + var builder = new StringBuilder(); + for (var i = 0; i < typeLoading.TypeLoadingSchemas.Length; i++) + { + if (i > 0) + builder.Append(", "); + var schema = typeLoading.TypeLoadingSchemas[i]; + builder.Append('\''); + builder.Append(EscapeLiteral(schema)); + builder.Append('\''); + } + + schemaListSqlFragment = builder.ToString(); + } + + var loadTypesQuery = GenerateLoadTypesQuery(SupportsRangeTypes, SupportsMultirangeTypes, loadTableComposites, schemaListSqlFragment, HasTypeCategory); + var loadCompositeTypesQuery = GenerateLoadCompositeTypesQuery(loadTableComposites, schemaListSqlFragment); var loadEnumFieldsQuery = SupportsEnumTypes ? GenerateLoadEnumFieldsQuery(HasEnumSortOrder) : string.Empty; diff --git a/src/Npgsql/PublicAPI.Unshipped.txt b/src/Npgsql/PublicAPI.Unshipped.txt index 003afe82f8..c380dbc8c0 100644 --- a/src/Npgsql/PublicAPI.Unshipped.txt +++ b/src/Npgsql/PublicAPI.Unshipped.txt @@ -37,6 +37,7 @@ Npgsql.NpgsqlTracingOptionsBuilder.EnableFirstResponseEvent(bool enable = true) Npgsql.NpgsqlTypeLoadingOptionsBuilder Npgsql.NpgsqlTypeLoadingOptionsBuilder.EnableTableCompositesLoading(bool enable = true) -> Npgsql.NpgsqlTypeLoadingOptionsBuilder! Npgsql.NpgsqlTypeLoadingOptionsBuilder.EnableTypeLoading(bool enable = true) -> Npgsql.NpgsqlTypeLoadingOptionsBuilder! +Npgsql.NpgsqlTypeLoadingOptionsBuilder.SetTypeLoadingSchemas(params System.Collections.Generic.IEnumerable? schemas) -> Npgsql.NpgsqlTypeLoadingOptionsBuilder! Npgsql.Replication.PgOutput.ReplicationValue.GetFieldName() -> string! Npgsql.Replication.PgOutput.Messages.ParallelStreamAbortMessage Npgsql.Replication.PgOutput.Messages.ParallelStreamAbortMessage.AbortLsn.get -> NpgsqlTypes.NpgsqlLogSequenceNumber diff --git a/src/Npgsql/Util/Statics.cs b/src/Npgsql/Util/Statics.cs index 2b1101171b..c21d10fbe5 100644 --- a/src/Npgsql/Util/Statics.cs +++ b/src/Npgsql/Util/Statics.cs @@ -4,6 +4,7 @@ using System.Diagnostics; using System.Diagnostics.CodeAnalysis; using System.Runtime.CompilerServices; +using System.Text; namespace Npgsql.Util; @@ -25,6 +26,15 @@ static Statics() DisableDateTimeInfinityConversions = AppContext.TryGetSwitch("Npgsql.DisableDateTimeInfinityConversions", out enabled) && enabled; } + /// Returns the escaped SQL representation of a string literal. + /// The identifier to be escaped. + internal static string EscapeLiteral(string literal) + { + // There is no support for escape sequences in quoted values for PostgreSQL, so replacing ' is enough. + // (to be able to use escaped characters an alternative syntax exists, it requires E to appear directly before the opening quote) + return literal.Replace("'", "''"); + } + internal static T Expect(IBackendMessage msg, NpgsqlConnector connector) { if (msg.GetType() != typeof(T)) @@ -88,9 +98,3 @@ static void ThrowUnknownMessageCode(BackendMessageCode code) => ThrowHelper.ThrowNpgsqlException($"Unknown message code: {code}"); } } - -static class EnumerableExtensions -{ - internal static string Join(this IEnumerable values, string separator) - => string.Join(separator, values); -} diff --git a/test/Npgsql.Tests/ConnectionTests.cs b/test/Npgsql.Tests/ConnectionTests.cs index d49b350803..151255b8bc 100644 --- a/test/Npgsql.Tests/ConnectionTests.cs +++ b/test/Npgsql.Tests/ConnectionTests.cs @@ -652,6 +652,85 @@ public void Set_connection_string_to_empty() Assert.That(() => conn.Open(), Throws.Exception.TypeOf()); } + [Test] + [TestCase("test_schema_1", "public", true)] + [TestCase("test_schema_1", "test_schema_2", true)] + [TestCase("test_schema_2", "test_schema_3", true)] + [TestCase("test_schema_1", "public", false)] + [TestCase("test_schema_1", "test_schema_2", false)] + [TestCase("test_schema_2", "test_schema_3", false)] + [TestCase("'DROP TABLE X", "'COMMIT; ", false)] + [Parallelizable(ParallelScope.None)] + public async Task Set_Schemas_And_Load_Relevant_Types(string testSchema, string otherSchema, bool enabled) + { + if (IsMultiplexing) + return; + + await using var conn1 = await OpenConnectionAsync(); + try + { + await conn1.ExecuteNonQueryAsync("DROP TYPE IF EXISTS public.test_type_1"); + await conn1.ExecuteNonQueryAsync("DROP TYPE IF EXISTS public.test_type_2"); + await conn1.ExecuteNonQueryAsync("DROP TYPE IF EXISTS public.test_type_3"); + await conn1.ExecuteNonQueryAsync("CREATE TYPE public.test_type_3 AS (id int, name text)"); + + if (testSchema != "public") + { + await conn1.ExecuteNonQueryAsync($"DROP SCHEMA IF EXISTS \"{testSchema}\" CASCADE"); + await conn1.ExecuteNonQueryAsync($"CREATE SCHEMA \"{testSchema}\""); + } + + if (otherSchema != "public") + { + await conn1.ExecuteNonQueryAsync($"DROP SCHEMA IF EXISTS \"{otherSchema}\" CASCADE"); + await conn1.ExecuteNonQueryAsync($"CREATE SCHEMA \"{otherSchema}\""); + } + + await conn1.ExecuteNonQueryAsync($"DROP TYPE IF EXISTS \"{testSchema}\".test_type_1"); + await conn1.ExecuteNonQueryAsync($"CREATE TYPE \"{testSchema}\".test_type_1 AS (id int)"); + await conn1.ExecuteNonQueryAsync($"DROP TYPE IF EXISTS \"{otherSchema}\".test_type_2"); + await conn1.ExecuteNonQueryAsync($"CREATE TYPE \"{otherSchema}\".test_type_2 AS (id int, name text)"); + + using var dataSource = CreateDataSource(builder => + { + builder.ConfigureTypeLoading(builder => + { + if (enabled) + builder.SetTypeLoadingSchemas(testSchema, otherSchema); + }); + }); + using var conn = await dataSource.OpenConnectionAsync(); + if (enabled) + { + Assert.True(dataSource.DatabaseInfo.CompositeTypes.Any(x => x.Name == "test_type_1")); + if (testSchema == "public" || otherSchema == "public") + { + Assert.True(dataSource.DatabaseInfo.CompositeTypes.Any(x => x.Name == "test_type_2")); + Assert.True(dataSource.DatabaseInfo.CompositeTypes.Any(x => x.Name == "test_type_3")); + } + else + { + Assert.True(dataSource.DatabaseInfo.CompositeTypes.Any(x => x.Name == "test_type_2")); + Assert.False(dataSource.DatabaseInfo.CompositeTypes.Any(x => x.Name == "test_type_3")); + } + } + else + { + Assert.True(dataSource.DatabaseInfo.CompositeTypes.Any(x => x.Name == "test_type_1")); + Assert.True(dataSource.DatabaseInfo.CompositeTypes.Any(x => x.Name == "test_type_2")); + Assert.True(dataSource.DatabaseInfo.CompositeTypes.Any(x => x.Name == "test_type_3")); + } + } + finally + { + if (testSchema != "public") + await conn1.ExecuteNonQueryAsync($"DROP SCHEMA IF EXISTS \"{testSchema}\" CASCADE"); + if (otherSchema != "public") + await conn1.ExecuteNonQueryAsync($"DROP SCHEMA IF EXISTS \"{otherSchema}\" CASCADE"); + } + + } + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/703")] public async Task No_database_defaults_to_username() { diff --git a/test/Npgsql.Tests/TestUtil.cs b/test/Npgsql.Tests/TestUtil.cs index c751ea0018..0cf7a6a75f 100644 --- a/test/Npgsql.Tests/TestUtil.cs +++ b/test/Npgsql.Tests/TestUtil.cs @@ -100,8 +100,8 @@ public static async Task IgnoreOnRedshift(NpgsqlConnection conn, string? ignoreT } } - public static bool IsPgPrerelease(NpgsqlConnection conn) - => ((string)conn.ExecuteScalar("SELECT version()")!).Contains("beta"); + public static async Task IsPgPrerelease(NpgsqlConnection conn) + => ((string) (await conn.ExecuteScalarAsync("SELECT version()"))!).Contains("beta"); public static void EnsureExtension(NpgsqlConnection conn, string extension, string? minVersion = null) => EnsureExtension(conn, extension, minVersion, async: false).GetAwaiter().GetResult(); @@ -168,7 +168,7 @@ static async Task IgnoreIfFeatureNotSupported(NpgsqlConnection conn, string test public static async Task EnsurePostgis(NpgsqlConnection conn) { - var isPreRelease = IsPgPrerelease(conn); + var isPreRelease = await IsPgPrerelease(conn); try { await EnsureExtensionAsync(conn, "postgis"); From acf447910c7278fdd2495f470686a44119f36a92 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Sat, 16 Nov 2024 19:40:27 +0200 Subject: [PATCH 497/761] Correct PublicAPI changes done accidentally in #5928 (#5933) --- src/Npgsql/PublicAPI.Shipped.txt | 2 ++ src/Npgsql/PublicAPI.Unshipped.txt | 2 ++ 2 files changed, 4 insertions(+) diff --git a/src/Npgsql/PublicAPI.Shipped.txt b/src/Npgsql/PublicAPI.Shipped.txt index 3318b574b1..3ec604ddc0 100644 --- a/src/Npgsql/PublicAPI.Shipped.txt +++ b/src/Npgsql/PublicAPI.Shipped.txt @@ -767,6 +767,8 @@ Npgsql.NpgsqlSlimDataSourceBuilder.UsePhysicalConnectionInitializer(System.Actio Npgsql.NpgsqlSlimDataSourceBuilder.UseRootCertificate(System.Security.Cryptography.X509Certificates.X509Certificate2? rootCertificate) -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.UseRootCertificateCallback(System.Func? rootCertificateCallback) -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.UseUserCertificateValidationCallback(System.Net.Security.RemoteCertificateValidationCallback! userCertificateValidationCallback) -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlTracingOptions +Npgsql.NpgsqlTracingOptions.NpgsqlTracingOptions() -> void Npgsql.NpgsqlTransaction Npgsql.NpgsqlTransaction.Connection.get -> Npgsql.NpgsqlConnection? Npgsql.PostgresErrorCodes diff --git a/src/Npgsql/PublicAPI.Unshipped.txt b/src/Npgsql/PublicAPI.Unshipped.txt index c380dbc8c0..10d2965ba0 100644 --- a/src/Npgsql/PublicAPI.Unshipped.txt +++ b/src/Npgsql/PublicAPI.Unshipped.txt @@ -26,6 +26,8 @@ Npgsql.NpgsqlSlimDataSourceBuilder.MapEnum(System.Type! clrType, string? pgName Npgsql.NpgsqlSlimDataSourceBuilder.MapEnum(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.UseNegotiateOptionsCallback(System.Action? negotiateOptionsCallback) -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.UseSslClientAuthenticationOptionsCallback(System.Action? sslClientAuthenticationOptionsCallback) -> Npgsql.NpgsqlSlimDataSourceBuilder! +*REMOVED*Npgsql.NpgsqlTracingOptions +*REMOVED*Npgsql.NpgsqlTracingOptions.NpgsqlTracingOptions() -> void Npgsql.NpgsqlTracingOptionsBuilder Npgsql.NpgsqlTracingOptionsBuilder.ConfigureBatchEnrichmentCallback(System.Action? batchEnrichmentCallback) -> Npgsql.NpgsqlTracingOptionsBuilder! Npgsql.NpgsqlTracingOptionsBuilder.ConfigureBatchFilter(System.Func? batchFilter) -> Npgsql.NpgsqlTracingOptionsBuilder! From 7a54da0bc46617ea293dad301b8396e3d29d43f2 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Sat, 16 Nov 2024 22:06:35 +0300 Subject: [PATCH 498/761] Improve writing IPaddress (and friends) within composite (#5841) --- .../Networking/IPAddressConverter.cs | 2 +- .../Networking/NpgsqlCidrConverter.cs | 2 +- .../Networking/NpgsqlInetConverter.cs | 8 ++++- test/Npgsql.Tests/Types/CompositeTests.cs | 29 +++++++++++++++++++ 4 files changed, 38 insertions(+), 3 deletions(-) diff --git a/src/Npgsql/Internal/Converters/Networking/IPAddressConverter.cs b/src/Npgsql/Internal/Converters/Networking/IPAddressConverter.cs index 9050f36f16..707bcd016b 100644 --- a/src/Npgsql/Internal/Converters/Networking/IPAddressConverter.cs +++ b/src/Npgsql/Internal/Converters/Networking/IPAddressConverter.cs @@ -7,7 +7,7 @@ namespace Npgsql.Internal.Converters; sealed class IPAddressConverter : PgBufferedConverter { public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) - => CanConvertBufferedDefault(format, out bufferRequirements); + => NpgsqlInetConverter.CanConvertImpl(format, out bufferRequirements); public override Size GetSize(SizeContext context, IPAddress value, ref object? writeState) => NpgsqlInetConverter.GetSizeImpl(context, value, ref writeState); diff --git a/src/Npgsql/Internal/Converters/Networking/NpgsqlCidrConverter.cs b/src/Npgsql/Internal/Converters/Networking/NpgsqlCidrConverter.cs index c6d0ab8d88..c3cd43c227 100644 --- a/src/Npgsql/Internal/Converters/Networking/NpgsqlCidrConverter.cs +++ b/src/Npgsql/Internal/Converters/Networking/NpgsqlCidrConverter.cs @@ -6,7 +6,7 @@ namespace Npgsql.Internal.Converters; sealed class NpgsqlCidrConverter : PgBufferedConverter { public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) - => CanConvertBufferedDefault(format, out bufferRequirements); + => NpgsqlInetConverter.CanConvertImpl(format, out bufferRequirements); public override Size GetSize(SizeContext context, NpgsqlCidr value, ref object? writeState) => NpgsqlInetConverter.GetSizeImpl(context, value.Address, ref writeState); diff --git a/src/Npgsql/Internal/Converters/Networking/NpgsqlInetConverter.cs b/src/Npgsql/Internal/Converters/Networking/NpgsqlInetConverter.cs index 26ce7cfa96..ea0066c9de 100644 --- a/src/Npgsql/Internal/Converters/Networking/NpgsqlInetConverter.cs +++ b/src/Npgsql/Internal/Converters/Networking/NpgsqlInetConverter.cs @@ -13,7 +13,13 @@ sealed class NpgsqlInetConverter : PgBufferedConverter const byte IPv6 = 3; public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) - => CanConvertBufferedDefault(format, out bufferRequirements); + => CanConvertImpl(format, out bufferRequirements); + + internal static bool CanConvertImpl(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.Create(Size.CreateUpperBound(20)); + return format == DataFormat.Binary; + } public override Size GetSize(SizeContext context, NpgsqlInet value, ref object? writeState) => GetSizeImpl(context, value.Address, ref writeState); diff --git a/test/Npgsql.Tests/Types/CompositeTests.cs b/test/Npgsql.Tests/Types/CompositeTests.cs index d10daf6734..baaed149f3 100644 --- a/test/Npgsql.Tests/Types/CompositeTests.cs +++ b/test/Npgsql.Tests/Types/CompositeTests.cs @@ -1,5 +1,6 @@ using System; using System.Linq; +using System.Net; using System.Reflection; using System.Threading.Tasks; using Npgsql.PostgresTypes; @@ -338,6 +339,29 @@ await AssertType( comparer: (actual, expected) => actual.EnumValue == expected.EnumValue); } + [Test] + public async Task Composite_containing_IPAddress() + { + await using var adminConnection = await OpenConnectionAsync(); + var compositeType = await GetTempTypeName(adminConnection); + + await adminConnection.ExecuteNonQueryAsync($@" +CREATE TYPE {compositeType} AS (address inet)"); + + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.MapComposite(compositeType); + await using var dataSource = dataSourceBuilder.Build(); + await using var connection = await dataSource.OpenConnectionAsync(); + + await AssertType( + connection, + new SomeCompositeWithIPAddress { Address = IPAddress.Loopback }, + @"(127.0.0.1)", + compositeType, + npgsqlDbType: null, + comparer: (actual, expected) => actual.Address!.Equals(expected.Address)); + } + [Test] public async Task Composite_containing_converter_resolver_type() { @@ -721,6 +745,11 @@ public enum TestEnum public TestEnum EnumValue { get; set; } } + class SomeCompositeWithIPAddress + { + public IPAddress? Address { get; set; } + } + class SomeCompositeWithConverterResolverType { public DateTime[]? DateTimes { get; set; } From 604c00c4ab5f4597fb34d6ed3449a73fbc2468f1 Mon Sep 17 00:00:00 2001 From: Erik O'Leary <969938+onionhammer@users.noreply.github.com> Date: Mon, 18 Nov 2024 08:35:08 -0600 Subject: [PATCH 499/761] If 'out of order properties' enabled, allow type discriminator on JSONB types (#5932) Fixes #5937 Co-authored-by: Nino Floris Co-authored-by: Shay Rojansky --- Directory.Packages.props | 2 +- .../JsonDynamicTypeInfoResolverFactory.cs | 15 +- src/Npgsql/Npgsql.csproj | 2 +- test/Npgsql.Tests/Types/JsonDynamicTests.cs | 277 +++++++++++++----- 4 files changed, 212 insertions(+), 84 deletions(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index b110da1c1e..99cc5afec3 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -5,7 +5,7 @@ - + diff --git a/src/Npgsql/Internal/ResolverFactories/JsonDynamicTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/JsonDynamicTypeInfoResolverFactory.cs index 2aa24ec888..bc882382c6 100644 --- a/src/Npgsql/Internal/ResolverFactories/JsonDynamicTypeInfoResolverFactory.cs +++ b/src/Npgsql/Internal/ResolverFactories/JsonDynamicTypeInfoResolverFactory.cs @@ -94,9 +94,15 @@ void AddUserMappings(bool jsonb, Type[] clrTypes) if (!jsonType.IsValueType && jsonTypeInfo.PolymorphismOptions is not null) { foreach (var derived in jsonTypeInfo.PolymorphismOptions.DerivedTypes) + { + // For jsonb we can't properly support polymorphic serialization unless the SerializerOptions.AllowOutOfOrderMetadataProperties is `true`. + // If `jsonb` AND `AllowOutOfOrderMetadataProperties` is `false`, use `derived.DerivedType` as the base type for the converter, + // this causes STJ to stop serializing the "$type" field; essentially disabling the feature. + var baseType = jsonb && !serializerOptions.AllowOutOfOrderMetadataProperties ? derived.DerivedType : jsonType; dynamicMappings.AddMapping(derived.DerivedType, dataTypeName, factory: (options, mapping, _) => mapping.CreateInfo(options, - CreateSystemTextJsonConverter(mapping.Type, jsonb, options.TextEncoding, serializerOptions, jsonType))); + CreateSystemTextJsonConverter(mapping.Type, jsonb, options.TextEncoding, serializerOptions, baseType))); + } } } mappings.AddRange(dynamicMappings.ToTypeInfoMappingCollection()); @@ -116,9 +122,10 @@ void AddUserMappings(bool jsonb, Type[] clrTypes) { var jsonb = dataTypeName == DataTypeNames.Jsonb; - // For jsonb we can't properly support polymorphic serialization unless we do quite some additional work - // so we default to mapping.Type instead (exact types will never serialize their "$type" fields, essentially disabling the feature). - var baseType = jsonb ? mapping.Type : typeof(object); + // For jsonb we can't properly support polymorphic serialization unless the SerializerOptions.AllowOutOfOrderMetadataProperties is `true`. + // If `jsonb` AND `AllowOutOfOrderMetadataProperties` is `false`, use `mapping.Type` as the base type for the converter, + // this causes STJ to stop serializing the "$type" field; essentially disabling the feature. + var baseType = jsonb && !SerializerOptions.AllowOutOfOrderMetadataProperties ? mapping.Type : typeof(object); return mapping.CreateInfo(options, CreateSystemTextJsonConverter(mapping.Type, jsonb, options.TextEncoding, SerializerOptions, baseType)); diff --git a/src/Npgsql/Npgsql.csproj b/src/Npgsql/Npgsql.csproj index 77b65d47b2..4c5042b340 100644 --- a/src/Npgsql/Npgsql.csproj +++ b/src/Npgsql/Npgsql.csproj @@ -22,9 +22,9 @@ + - diff --git a/test/Npgsql.Tests/Types/JsonDynamicTests.cs b/test/Npgsql.Tests/Types/JsonDynamicTests.cs index 07cecc2a62..b6f3004a64 100644 --- a/test/Npgsql.Tests/Types/JsonDynamicTests.cs +++ b/test/Npgsql.Tests/Types/JsonDynamicTests.cs @@ -215,110 +215,207 @@ await AssertType( isNpgsqlDbTypeInferredFromClrType: false); } + #region Polymorphic + [Test] public async Task Poco_polymorphic_mapping() { - // We don't yet support polymorphic deserialization for jsonb as $type does not come back as the first property. - // This could be fixed by detecting PolymorphicOptions types, always buffering their values and modifying the text. - if (IsJsonb) - return; + await using var dataSource = CreateDataSource(builder => + { + var types = new[] {typeof(WeatherForecast)}; + builder + .ConfigureJsonOptions(new() { AllowOutOfOrderMetadataProperties = true }) + .EnableDynamicJson(jsonClrTypes: IsJsonb ? [] : types, jsonbClrTypes: !IsJsonb ? [] : types); + }); - var dataSourceBuilder = CreateDataSourceBuilder(); - dataSourceBuilder.EnableDynamicJson(jsonClrTypes: [typeof(WeatherForecast)]); - await using var dataSource = dataSourceBuilder.Build(); + var value = new ExtendedDerivedWeatherForecast + { + Date = new DateTime(2019, 9, 1), + Summary = "Partly cloudy", + TemperatureC = 10 + }; - await AssertType( - dataSource, - new ExtendedDerivedWeatherForecast() + // Note: we assert a specific string representation, though jsonb doesn't guarantee the property ordering; so the assert may break + // for jsonb if PostgreSQL changes its implementation. + var sql = + IsJsonb + ? """{"Date": "2019-09-01T00:00:00", "$type": "extended", "Summary": "Partly cloudy", "TemperatureC": 10, "TemperatureF": 49}""" + : """{"$type":"extended","TemperatureF":49,"Date":"2019-09-01T00:00:00","TemperatureC":10,"Summary":"Partly cloudy"}"""; + + await AssertTypeWrite(dataSource, value, sql, PostgresType, NpgsqlDbType, isNpgsqlDbTypeInferredFromClrType: false); + await AssertTypeRead(dataSource, sql, PostgresType, value, isDefault: false); + } + + [Test] + public async Task Poco_polymorphic_mapping_read_parents() + { + await using var dataSource = CreateDataSource(builder => + { + var types = new[] {typeof(WeatherForecast)}; + builder + .ConfigureJsonOptions(new() { AllowOutOfOrderMetadataProperties = true }) + .EnableDynamicJson(jsonClrTypes: IsJsonb ? [] : types, jsonbClrTypes: !IsJsonb ? [] : types); + }); + + var value = new ExtendedDerivedWeatherForecast + { + Date = new DateTime(2019, 9, 1), + Summary = "Partly cloudy", + TemperatureC = 10 + }; + + // Note: we assert a specific string representation, though jsonb doesn't guarantee the property ordering; so the assert may break + // for jsonb if PostgreSQL changes its implementation. + var sql = + IsJsonb + ? """{"Date": "2019-09-01T00:00:00", "$type": "extended", "Summary": "Partly cloudy", "TemperatureC": 10, "TemperatureF": 49}""" + : """{"$type":"extended","TemperatureF":49,"Date":"2019-09-01T00:00:00","TemperatureC":10,"Summary":"Partly cloudy"}"""; + + await AssertTypeWrite(dataSource, value, sql, PostgresType, NpgsqlDbType, + isNpgsqlDbTypeInferredFromClrType: false); + + await AssertTypeRead(dataSource, sql, PostgresType, value, isDefault: false); + await AssertTypeRead(dataSource, sql, PostgresType, + new DerivedWeatherForecast { Date = new DateTime(2019, 9, 1), Summary = "Partly cloudy", TemperatureC = 10 }, - """{"$type":"extended","TemperatureF":49,"Date":"2019-09-01T00:00:00","TemperatureC":10,"Summary":"Partly cloudy"}""", - PostgresType, - NpgsqlDbType, - isDefaultForReading: false, - isNpgsqlDbTypeInferredFromClrType: false); + isDefault: false); + await AssertTypeRead(dataSource, sql, PostgresType, value, isDefault: false); } [Test] - public async Task Poco_polymorphic_mapping_read_parents() + public async Task Poco_exact_polymorphic_mapping() { - // We don't yet support polymorphic deserialization for jsonb as $type does not come back as the first property. - // This could be fixed by detecting PolymorphicOptions types, always buffering their values and modifying the text. - if (IsJsonb) - return; - - var dataSourceBuilder = CreateDataSourceBuilder(); - dataSourceBuilder.EnableDynamicJson(jsonClrTypes: [typeof(WeatherForecast)]); - await using var dataSource = dataSourceBuilder.Build(); + await using var dataSource = CreateDataSource(builder => + { + var types = new[] {typeof(ExtendedDerivedWeatherForecast)}; + builder + .ConfigureJsonOptions(new() { AllowOutOfOrderMetadataProperties = true }) + .EnableDynamicJson(jsonClrTypes: IsJsonb ? [] : types, jsonbClrTypes: !IsJsonb ? [] : types); + }); - var value = new ExtendedDerivedWeatherForecast() + var value = new ExtendedDerivedWeatherForecast { Date = new DateTime(2019, 9, 1), Summary = "Partly cloudy", TemperatureC = 10 }; - var sql = """{"$type":"extended","TemperatureF":49,"Date":"2019-09-01T00:00:00","TemperatureC":10,"Summary":"Partly cloudy"}"""; + // Note: we assert a specific string representation, though jsonb doesn't guarantee the property ordering; so the assert may break + // for jsonb if PostgreSQL changes its implementation. + var sql = + IsJsonb + ? """{"Date": "2019-09-01T00:00:00", "Summary": "Partly cloudy", "TemperatureC": 10, "TemperatureF": 49}""" + : """{"TemperatureF":49,"Date":"2019-09-01T00:00:00","TemperatureC":10,"Summary":"Partly cloudy"}"""; - await AssertTypeWrite( - dataSource, - value, - sql, - PostgresType, - NpgsqlDbType, - isNpgsqlDbTypeInferredFromClrType: false); + await AssertTypeWrite(dataSource, value, sql, PostgresType, NpgsqlDbType, isNpgsqlDbTypeInferredFromClrType: false); + await AssertTypeRead(dataSource, sql, PostgresType, value, isDefault: false); + } - // GetFieldValue - await AssertTypeRead(dataSource, sql, PostgresType, value, - comparer: (_, actual) => actual.GetType() == typeof(ExtendedDerivedWeatherForecast), - isDefault: false); + [Test] + public async Task Poco_unspecified_polymorphic_mapping() + { + await using var dataSource = CreateDataSource(builder => + { + builder + .ConfigureJsonOptions(new() { AllowOutOfOrderMetadataProperties = true }) + .EnableDynamicJson(); + }); - await AssertTypeRead(dataSource, sql, PostgresType, value, - comparer: (_, actual) => actual.GetType() == typeof(DerivedWeatherForecast), isDefault: false); + var value = new ExtendedDerivedWeatherForecast + { + Date = new DateTime(2019, 9, 1), + Summary = "Partly cloudy", + TemperatureC = 10 + }; - await AssertTypeRead(dataSource, sql, PostgresType, value, isDefault: false); - } + // Note: we assert a specific string representation, though jsonb doesn't guarantee the property ordering; so the assert may break + // for jsonb if PostgreSQL changes its implementation. + var sql = + IsJsonb + ? """{"Date": "2019-09-01T00:00:00", "$type": "extended", "Summary": "Partly cloudy", "TemperatureC": 10, "TemperatureF": 49}""" + : """{"$type":"extended","TemperatureF":49,"Date":"2019-09-01T00:00:00","TemperatureC":10,"Summary":"Partly cloudy"}"""; + await AssertTypeWrite(dataSource, value, sql, PostgresType, NpgsqlDbType, isDefault: false); + + // Reading as DerivedWeatherForecast should not cause us to get an instance of ExtendedDerivedWeatherForecast (as it doesn't define JsonDerivedType) + await AssertTypeRead(dataSource, sql, PostgresType, + new DerivedWeatherForecast + { + Date = new DateTime(2019, 9, 1), + Summary = "Partly cloudy", + TemperatureC = 10 + }, + isDefault: false); + await AssertTypeRead(dataSource, sql, PostgresType, value, isDefault: false); + } [Test] - public async Task Poco_exact_polymorphic_mapping() + public async Task Poco_polymorphic_mapping_without_AllowOutOfOrderMetadataProperties() { - // We don't yet support polymorphic deserialization for jsonb as $type does not come back as the first property. - // This could be fixed by detecting PolymorphicOptions types, always buffering their values and modifying the text. - if (IsJsonb) - return; + await using var dataSource = CreateDataSource(builder => + { + var types = new[] {typeof(WeatherForecast)}; + builder + .ConfigureJsonOptions(new() { AllowOutOfOrderMetadataProperties = false }) + .EnableDynamicJson(jsonClrTypes: IsJsonb ? [] : types, jsonbClrTypes: !IsJsonb ? [] : types); + }); - var dataSourceBuilder = CreateDataSourceBuilder(); - dataSourceBuilder.EnableDynamicJson(jsonClrTypes: [typeof(ExtendedDerivedWeatherForecast)]); - await using var dataSource = dataSourceBuilder.Build(); + var value = new ExtendedDerivedWeatherForecast + { + Date = new DateTime(2019, 9, 1), + Summary = "Partly cloudy", + TemperatureC = 10 + }; - await AssertType( - dataSource, - new ExtendedDerivedWeatherForecast() + // Note: we assert a specific string representation, though jsonb doesn't guarantee the property ordering; so the assert may break + // for jsonb if PostgreSQL changes its implementation. + var sql = + IsJsonb + ? """{"Date": "2019-09-01T00:00:00", "Summary": "Partly cloudy", "TemperatureC": 10, "TemperatureF": 49}""" + : """{"$type":"extended","TemperatureF":49,"Date":"2019-09-01T00:00:00","TemperatureC":10,"Summary":"Partly cloudy"}"""; + + await AssertTypeWrite(dataSource, value, sql, PostgresType, NpgsqlDbType, isNpgsqlDbTypeInferredFromClrType: false); + + // As we have disabled polymorphism for jsonb when AllowOutOfOrderMetadataProperties = false we should be able to read it as equalt to a WeatherForecast instance. + if (IsJsonb) + await AssertTypeRead(dataSource, sql, PostgresType, + new WeatherForecast + { + Date = new DateTime(2019, 9, 1), + Summary = "Partly cloudy", + TemperatureC = 10 + }, + isDefault: false); + + // Reading as DerivedWeatherForecast should not cause us to get an instance of ExtendedDerivedWeatherForecast (as it doesn't define JsonDerivedType) + await AssertTypeRead(dataSource, sql, PostgresType, + new DerivedWeatherForecast { Date = new DateTime(2019, 9, 1), Summary = "Partly cloudy", TemperatureC = 10 }, - """{"TemperatureF":49,"Date":"2019-09-01T00:00:00","TemperatureC":10,"Summary":"Partly cloudy"}""", - PostgresType, - NpgsqlDbType, - isDefaultForReading: false, - isNpgsqlDbTypeInferredFromClrType: false); + isDefault: false); + + // We won't get the original value back for jsonb as we can't support polymorphism without also enforcing AllowOutOfOrderMetadataProperties is true. + // If we output $type, jsonb won't have that at the start and STJ will throw due to it appearing later in the object. So it's disabled entirely. + if (!IsJsonb) + await AssertTypeRead(dataSource, sql, PostgresType, value, isDefault: false); } [Test] - public async Task Poco_unspecified_polymorphic_mapping() + public async Task Poco_unspecified_polymorphic_mapping_without_AllowOutOfOrderMetadataProperties() { - // We don't yet support polymorphic deserialization for jsonb as $type does not come back as the first property. - // This could be fixed by detecting PolymorphicOptions types, always buffering their values and modifying the text. - // In this case we don't have any statically mapped base type to check its PolymorphicOptions on. - // Detecting whether the type could be polymorphic would require us to duplicate STJ's nearest polymorphic ancestor search. - if (IsJsonb) - return; + await using var dataSource = CreateDataSource(builder => + { + builder + .ConfigureJsonOptions(new() { AllowOutOfOrderMetadataProperties = false }) + .EnableDynamicJson(); + }); var value = new ExtendedDerivedWeatherForecast { @@ -327,22 +424,44 @@ public async Task Poco_unspecified_polymorphic_mapping() TemperatureC = 10 }; - var sql = """{"$type":"extended","TemperatureF":49,"Date":"2019-09-01T00:00:00","TemperatureC":10,"Summary":"Partly cloudy"}"""; + // Note: we assert a specific string representation, though jsonb doesn't guarantee the property ordering; so the assert may break + // for jsonb if PostgreSQL changes its implementation. + var sql = + IsJsonb + ? """{"Date": "2019-09-01T00:00:00", "Summary": "Partly cloudy", "TemperatureC": 10, "TemperatureF": 49}""" + : """{"$type":"extended","TemperatureF":49,"Date":"2019-09-01T00:00:00","TemperatureC":10,"Summary":"Partly cloudy"}"""; - await AssertType( - value, - sql, - PostgresType, - NpgsqlDbType, - isDefault: false); + await AssertTypeWrite(dataSource, value, sql, PostgresType, NpgsqlDbType, isDefault: false); - await AssertTypeRead(DataSource, sql, PostgresType, value, - comparer: (_, actual) => actual.GetType() == typeof(DerivedWeatherForecast), isDefault: false); + // As we have disabled polymorphism for jsonb when AllowOutOfOrderMetadataProperties = false we should be able to read it as equalt to a WeatherForecast instance. + if (IsJsonb) + await AssertTypeRead(dataSource, sql, PostgresType, + new WeatherForecast + { + Date = new DateTime(2019, 9, 1), + Summary = "Partly cloudy", + TemperatureC = 10 + }, + isDefault: false); + + // Reading as DerivedWeatherForecast should not cause us to get an instance of ExtendedDerivedWeatherForecast (as it doesn't define JsonDerivedType) + await AssertTypeRead(dataSource, sql, PostgresType, + new DerivedWeatherForecast + { + Date = new DateTime(2019, 9, 1), + Summary = "Partly cloudy", + TemperatureC = 10 + }, + isDefault: false); - await AssertTypeRead(DataSource, sql, PostgresType, value, - comparer: (_, actual) => actual.GetType() == typeof(ExtendedDerivedWeatherForecast), isDefault: false); + // We won't get the original value back for jsonb as we can't support polymorphism without also enforcing AllowOutOfOrderMetadataProperties is true. + // If we output $type, jsonb won't have that at the start and STJ will throw due to it appearing later in the object. So it's disabled entirely. + if (!IsJsonb) + await AssertTypeRead(dataSource, sql, PostgresType, value, isDefault: false); } + // ReSharper disable UnusedAutoPropertyAccessor.Local + // ReSharper disable UnusedMember.Local [JsonDerivedType(typeof(ExtendedDerivedWeatherForecast), typeDiscriminator: "extended")] record WeatherForecast { @@ -351,14 +470,16 @@ record WeatherForecast public string Summary { get; set; } = ""; } - record DerivedWeatherForecast : WeatherForecast - { - } + record DerivedWeatherForecast : WeatherForecast; record ExtendedDerivedWeatherForecast : DerivedWeatherForecast { public int TemperatureF => 32 + (int)(TemperatureC / 0.5556); } + // ReSharper restore UnusedMember.Local + // ReSharper restore UnusedAutoPropertyAccessor.Local + + #endregion Polymorphic public JsonDynamicTests(MultiplexingMode multiplexingMode, NpgsqlDbType npgsqlDbType) : base(multiplexingMode) From 94de20fed2e7e64a1eb6f26c9fc044131a362958 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Mon, 18 Nov 2024 16:02:20 +0100 Subject: [PATCH 500/761] Bump version to 10.0.0 --- Directory.Build.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Build.props b/Directory.Build.props index 0117142918..50cb8595eb 100644 --- a/Directory.Build.props +++ b/Directory.Build.props @@ -1,6 +1,6 @@  - 9.0.0 + 10.0.0 latest true enable From 1d93236d0260c26cd1b9dec84ad3605c7efcb821 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Tue, 19 Nov 2024 17:57:26 +0700 Subject: [PATCH 501/761] STJ 9.0 alternative approach (#5941) --- Directory.Packages.props | 4 +-- .../JsonDynamicTypeInfoResolverFactory.cs | 12 +++++++-- src/Npgsql/Npgsql.csproj | 1 - test/Directory.Build.props | 2 +- test/Npgsql.Tests/Types/JsonDynamicTests.cs | 25 +++++++++++++++++++ 5 files changed, 38 insertions(+), 6 deletions(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 99cc5afec3..6f250d7c83 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -5,7 +5,7 @@ - + @@ -44,4 +44,4 @@ - \ No newline at end of file + diff --git a/src/Npgsql/Internal/ResolverFactories/JsonDynamicTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/JsonDynamicTypeInfoResolverFactory.cs index bc882382c6..6384474cd7 100644 --- a/src/Npgsql/Internal/ResolverFactories/JsonDynamicTypeInfoResolverFactory.cs +++ b/src/Npgsql/Internal/ResolverFactories/JsonDynamicTypeInfoResolverFactory.cs @@ -53,6 +53,14 @@ JsonSerializerOptions SerializerOptions readonly Type[] _jsonClrTypes = jsonClrTypes ?? []; TypeInfoMappingCollection? _mappings; +#if NET9_0_OR_GREATER + static Func AllowOutOfOrderMetadataProperties { get; } = options => options.AllowOutOfOrderMetadataProperties; +#else + static Func AllowOutOfOrderMetadataProperties { get; } = + typeof(JsonSerializerOptions).GetProperty("AllowOutOfOrderMetadataProperties") is { } prop && prop.GetGetMethod() is { } getProp + ? getProp.CreateDelegate>() + : _ => false; +#endif protected TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(), _jsonbClrTypes, _jsonClrTypes, SerializerOptions); public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) @@ -98,7 +106,7 @@ void AddUserMappings(bool jsonb, Type[] clrTypes) // For jsonb we can't properly support polymorphic serialization unless the SerializerOptions.AllowOutOfOrderMetadataProperties is `true`. // If `jsonb` AND `AllowOutOfOrderMetadataProperties` is `false`, use `derived.DerivedType` as the base type for the converter, // this causes STJ to stop serializing the "$type" field; essentially disabling the feature. - var baseType = jsonb && !serializerOptions.AllowOutOfOrderMetadataProperties ? derived.DerivedType : jsonType; + var baseType = jsonb && !AllowOutOfOrderMetadataProperties(serializerOptions) ? derived.DerivedType : jsonType; dynamicMappings.AddMapping(derived.DerivedType, dataTypeName, factory: (options, mapping, _) => mapping.CreateInfo(options, CreateSystemTextJsonConverter(mapping.Type, jsonb, options.TextEncoding, serializerOptions, baseType))); @@ -125,7 +133,7 @@ void AddUserMappings(bool jsonb, Type[] clrTypes) // For jsonb we can't properly support polymorphic serialization unless the SerializerOptions.AllowOutOfOrderMetadataProperties is `true`. // If `jsonb` AND `AllowOutOfOrderMetadataProperties` is `false`, use `mapping.Type` as the base type for the converter, // this causes STJ to stop serializing the "$type" field; essentially disabling the feature. - var baseType = jsonb && !SerializerOptions.AllowOutOfOrderMetadataProperties ? mapping.Type : typeof(object); + var baseType = jsonb && !AllowOutOfOrderMetadataProperties(SerializerOptions) ? mapping.Type : typeof(object); return mapping.CreateInfo(options, CreateSystemTextJsonConverter(mapping.Type, jsonb, options.TextEncoding, SerializerOptions, baseType)); diff --git a/src/Npgsql/Npgsql.csproj b/src/Npgsql/Npgsql.csproj index 4c5042b340..426eb06bcd 100644 --- a/src/Npgsql/Npgsql.csproj +++ b/src/Npgsql/Npgsql.csproj @@ -22,7 +22,6 @@ - diff --git a/test/Directory.Build.props b/test/Directory.Build.props index b51b1c04ba..1e2132817e 100644 --- a/test/Directory.Build.props +++ b/test/Directory.Build.props @@ -2,7 +2,7 @@ - net8.0 + net8.0;net9.0 false diff --git a/test/Npgsql.Tests/Types/JsonDynamicTests.cs b/test/Npgsql.Tests/Types/JsonDynamicTests.cs index b6f3004a64..21ff2700da 100644 --- a/test/Npgsql.Tests/Types/JsonDynamicTests.cs +++ b/test/Npgsql.Tests/Types/JsonDynamicTests.cs @@ -220,11 +220,17 @@ await AssertType( [Test] public async Task Poco_polymorphic_mapping() { +#if !NET9_0_OR_GREATER + if (IsJsonb) + return; +#endif await using var dataSource = CreateDataSource(builder => { var types = new[] {typeof(WeatherForecast)}; builder +#if NET9_0_OR_GREATER .ConfigureJsonOptions(new() { AllowOutOfOrderMetadataProperties = true }) +#endif .EnableDynamicJson(jsonClrTypes: IsJsonb ? [] : types, jsonbClrTypes: !IsJsonb ? [] : types); }); @@ -249,11 +255,17 @@ public async Task Poco_polymorphic_mapping() [Test] public async Task Poco_polymorphic_mapping_read_parents() { +#if !NET9_0_OR_GREATER + if (IsJsonb) + return; +#endif await using var dataSource = CreateDataSource(builder => { var types = new[] {typeof(WeatherForecast)}; builder +#if NET9_0_OR_GREATER .ConfigureJsonOptions(new() { AllowOutOfOrderMetadataProperties = true }) +#endif .EnableDynamicJson(jsonClrTypes: IsJsonb ? [] : types, jsonbClrTypes: !IsJsonb ? [] : types); }); @@ -293,7 +305,9 @@ public async Task Poco_exact_polymorphic_mapping() { var types = new[] {typeof(ExtendedDerivedWeatherForecast)}; builder +#if NET9_0_OR_GREATER .ConfigureJsonOptions(new() { AllowOutOfOrderMetadataProperties = true }) +#endif .EnableDynamicJson(jsonClrTypes: IsJsonb ? [] : types, jsonbClrTypes: !IsJsonb ? [] : types); }); @@ -318,10 +332,17 @@ public async Task Poco_exact_polymorphic_mapping() [Test] public async Task Poco_unspecified_polymorphic_mapping() { +#if !NET9_0_OR_GREATER + if (IsJsonb) + return; +#endif + await using var dataSource = CreateDataSource(builder => { builder +#if NET9_0_OR_GREATER .ConfigureJsonOptions(new() { AllowOutOfOrderMetadataProperties = true }) +#endif .EnableDynamicJson(); }); @@ -360,7 +381,9 @@ public async Task Poco_polymorphic_mapping_without_AllowOutOfOrderMetadataProper { var types = new[] {typeof(WeatherForecast)}; builder +#if NET9_0_OR_GREATER .ConfigureJsonOptions(new() { AllowOutOfOrderMetadataProperties = false }) +#endif .EnableDynamicJson(jsonClrTypes: IsJsonb ? [] : types, jsonbClrTypes: !IsJsonb ? [] : types); }); @@ -413,7 +436,9 @@ public async Task Poco_unspecified_polymorphic_mapping_without_AllowOutOfOrderMe await using var dataSource = CreateDataSource(builder => { builder +#if NET9_0_OR_GREATER .ConfigureJsonOptions(new() { AllowOutOfOrderMetadataProperties = false }) +#endif .EnableDynamicJson(); }); From b0dde48c52fb039b9ff8566d13e6c712fb391bcb Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Wed, 20 Nov 2024 12:42:24 +0200 Subject: [PATCH 502/761] Remove support for net6.0 (#5947) Closes #5946 --- global.json | 2 +- src/Directory.Build.props | 6 - .../Npgsql.DependencyInjection.csproj | 5 +- src/Npgsql.GeoJSON/Npgsql.GeoJSON.csproj | 3 +- src/Npgsql.Json.NET/Npgsql.Json.NET.csproj | 3 +- .../Npgsql.NetTopologySuite.csproj | 3 +- src/Npgsql.NodaTime/Npgsql.NodaTime.csproj | 3 +- .../Npgsql.OpenTelemetry.csproj | 3 +- .../Internal/InternalCharConverter.cs | 28 +--- .../Internal/Converters/MoneyConverter.cs | 63 +------- .../Networking/IPNetworkConverter.cs | 6 +- .../Converters/Primitive/DoubleConverter.cs | 28 +--- .../Converters/Primitive/GuidUuidConverter.cs | 24 +-- .../Converters/Primitive/Int2Converter.cs | 57 +------ .../Converters/Primitive/Int4Converter.cs | 56 +------ .../Converters/Primitive/Int8Converter.cs | 57 +------ .../Converters/Primitive/NumericConverters.cs | 61 +------ .../Internal/Converters/Primitive/PgMoney.cs | 4 - .../Converters/Primitive/PgNumeric.cs | 5 - .../Converters/Primitive/RealConverter.cs | 28 +--- src/Npgsql/Internal/NpgsqlConnector.Auth.cs | 17 +- .../Internal/NpgsqlConnector.OldAuth.cs | 153 ------------------ src/Npgsql/Internal/NpgsqlConnector.cs | 12 -- .../JsonDynamicTypeInfoResolverFactory.cs | 7 +- .../NetworkTypeInfoResolverFactory.cs | 2 - src/Npgsql/KerberosUsernameProvider.cs | 4 - src/Npgsql/MetricsReporter.cs | 7 +- src/Npgsql/Npgsql.csproj | 9 +- src/Npgsql/NpgsqlBatchCommand.cs | 21 +-- src/Npgsql/NpgsqlDataSource.cs | 7 +- src/Npgsql/NpgsqlDataSourceBuilder.cs | 2 - src/Npgsql/NpgsqlDataSourceConfiguration.cs | 7 +- src/Npgsql/NpgsqlException.cs | 2 - src/Npgsql/NpgsqlFactory.cs | 2 - src/Npgsql/NpgsqlSlimDataSourceBuilder.cs | 9 +- src/Npgsql/PoolingDataSource.cs | 4 - src/Npgsql/PostgresException.cs | 2 - src/Npgsql/Shims/DbDataSource.cs | 70 -------- src/Npgsql/Shims/ExperimentalAttribute.cs | 21 --- src/Npgsql/Shims/MemoryExtensions.cs | 20 --- src/Npgsql/Shims/StreamExtensions.cs | 38 ----- src/Npgsql/Shims/UnreachableException.cs | 39 ----- src/Shared/CodeAnalysis.cs | 83 ---------- .../DistributedTransactionTests.cs | 4 - 44 files changed, 36 insertions(+), 951 deletions(-) delete mode 100644 src/Npgsql/Internal/NpgsqlConnector.OldAuth.cs delete mode 100644 src/Npgsql/Shims/DbDataSource.cs delete mode 100644 src/Npgsql/Shims/ExperimentalAttribute.cs delete mode 100644 src/Npgsql/Shims/MemoryExtensions.cs delete mode 100644 src/Npgsql/Shims/StreamExtensions.cs delete mode 100644 src/Npgsql/Shims/UnreachableException.cs delete mode 100644 src/Shared/CodeAnalysis.cs diff --git a/global.json b/global.json index 67db748a1b..733b653c18 100644 --- a/global.json +++ b/global.json @@ -2,6 +2,6 @@ "sdk": { "version": "9.0.100", "rollForward": "latestMajor", - "allowPrerelease": "false" + "allowPrerelease": false } } diff --git a/src/Directory.Build.props b/src/Directory.Build.props index 9d07823223..b94a8a91bd 100644 --- a/src/Directory.Build.props +++ b/src/Directory.Build.props @@ -3,14 +3,8 @@ true - - true - - - - diff --git a/src/Npgsql.DependencyInjection/Npgsql.DependencyInjection.csproj b/src/Npgsql.DependencyInjection/Npgsql.DependencyInjection.csproj index 357003cf07..aa33763975 100644 --- a/src/Npgsql.DependencyInjection/Npgsql.DependencyInjection.csproj +++ b/src/Npgsql.DependencyInjection/Npgsql.DependencyInjection.csproj @@ -2,10 +2,7 @@ Shay Rojansky - - - net6.0;net8.0 - net8.0 + net8.0 npgsql;postgresql;postgres;ado;ado.net;database;sql;di;dependency injection README.md diff --git a/src/Npgsql.GeoJSON/Npgsql.GeoJSON.csproj b/src/Npgsql.GeoJSON/Npgsql.GeoJSON.csproj index 072feabea3..7b76bde10f 100644 --- a/src/Npgsql.GeoJSON/Npgsql.GeoJSON.csproj +++ b/src/Npgsql.GeoJSON/Npgsql.GeoJSON.csproj @@ -3,8 +3,7 @@ Yoh Deadfall;Shay Rojansky GeoJSON plugin for Npgsql, allowing mapping of PostGIS geometry types to GeoJSON types. npgsql;postgresql;postgres;postgis;geojson;spatial;ado;ado.net;database;sql - net6.0 - net8.0 + net8.0 $(NoWarn);NPG9001 diff --git a/src/Npgsql.Json.NET/Npgsql.Json.NET.csproj b/src/Npgsql.Json.NET/Npgsql.Json.NET.csproj index 67109a48da..e126980ad1 100644 --- a/src/Npgsql.Json.NET/Npgsql.Json.NET.csproj +++ b/src/Npgsql.Json.NET/Npgsql.Json.NET.csproj @@ -3,8 +3,7 @@ Shay Rojansky Json.NET plugin for Npgsql, allowing transparent serialization/deserialization of JSON objects directly to and from the database. npgsql;postgresql;json;postgres;ado;ado.net;database;sql - net6.0 - net8.0 + net8.0 enable $(NoWarn);NPG9001 diff --git a/src/Npgsql.NetTopologySuite/Npgsql.NetTopologySuite.csproj b/src/Npgsql.NetTopologySuite/Npgsql.NetTopologySuite.csproj index 214f4bd72e..ab318abc5c 100644 --- a/src/Npgsql.NetTopologySuite/Npgsql.NetTopologySuite.csproj +++ b/src/Npgsql.NetTopologySuite/Npgsql.NetTopologySuite.csproj @@ -4,8 +4,7 @@ NetTopologySuite plugin for Npgsql, allowing mapping of PostGIS geometry types to NetTopologySuite types. npgsql;postgresql;postgres;postgis;spatial;nettopologysuite;nts;ado;ado.net;database;sql README.md - net6.0 - net8.0 + net8.0 $(NoWarn);NU5104 $(NoWarn);NPG9001 diff --git a/src/Npgsql.NodaTime/Npgsql.NodaTime.csproj b/src/Npgsql.NodaTime/Npgsql.NodaTime.csproj index 3e4d826188..8bbad55db7 100644 --- a/src/Npgsql.NodaTime/Npgsql.NodaTime.csproj +++ b/src/Npgsql.NodaTime/Npgsql.NodaTime.csproj @@ -4,8 +4,7 @@ NodaTime plugin for Npgsql, allowing mapping of PostgreSQL date/time types to NodaTime types. npgsql;postgresql;postgres;nodatime;date;time;ado;ado;net;database;sql README.md - net6.0 - net8.0 + net8.0 $(NoWarn);NPG9001 diff --git a/src/Npgsql.OpenTelemetry/Npgsql.OpenTelemetry.csproj b/src/Npgsql.OpenTelemetry/Npgsql.OpenTelemetry.csproj index 7aff759251..bb1a60cc8a 100644 --- a/src/Npgsql.OpenTelemetry/Npgsql.OpenTelemetry.csproj +++ b/src/Npgsql.OpenTelemetry/Npgsql.OpenTelemetry.csproj @@ -2,8 +2,7 @@ Shay Rojansky - net6.0 - net8.0 + net8.0 npgsql;postgresql;postgres;ado;ado.net;database;sql;opentelemetry;tracing;diagnostics;instrumentation README.md diff --git a/src/Npgsql/Internal/Converters/Internal/InternalCharConverter.cs b/src/Npgsql/Internal/Converters/Internal/InternalCharConverter.cs index 5d00a26dcb..881d454d3a 100644 --- a/src/Npgsql/Internal/Converters/Internal/InternalCharConverter.cs +++ b/src/Npgsql/Internal/Converters/Internal/InternalCharConverter.cs @@ -4,10 +4,7 @@ // ReSharper disable once CheckNamespace namespace Npgsql.Internal.Converters; -sealed class InternalCharConverter : PgBufferedConverter -#if NET7_0_OR_GREATER - where T : INumberBase -#endif +sealed class InternalCharConverter : PgBufferedConverter where T : INumberBase { public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) { @@ -15,29 +12,6 @@ public override bool CanConvert(DataFormat format, out BufferRequirements buffer return format is DataFormat.Binary; } -#if NET7_0_OR_GREATER protected override T ReadCore(PgReader reader) => T.CreateChecked(reader.ReadByte()); protected override void WriteCore(PgWriter writer, T value) => writer.WriteByte(byte.CreateChecked(value)); -#else - protected override T ReadCore(PgReader reader) - { - var value = reader.ReadByte(); - if (typeof(byte) == typeof(T)) - return (T)(object)value; - if (typeof(char) == typeof(T)) - return (T)(object)(char)value; - - throw new NotSupportedException(); - } - - protected override void WriteCore(PgWriter writer, T value) - { - if (typeof(byte) == typeof(T)) - writer.WriteByte((byte)(object)value!); - else if (typeof(char) == typeof(T)) - writer.WriteByte(checked((byte)(char)(object)value!)); - else - throw new NotSupportedException(); - } -#endif } diff --git a/src/Npgsql/Internal/Converters/MoneyConverter.cs b/src/Npgsql/Internal/Converters/MoneyConverter.cs index 8443acedc3..2b6c078a84 100644 --- a/src/Npgsql/Internal/Converters/MoneyConverter.cs +++ b/src/Npgsql/Internal/Converters/MoneyConverter.cs @@ -3,72 +3,17 @@ namespace Npgsql.Internal.Converters; -sealed class MoneyConverter : PgBufferedConverter -#if NET7_0_OR_GREATER - where T : INumberBase -#endif +sealed class MoneyConverter : PgBufferedConverter where T : INumberBase { public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) { bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(long)); return format is DataFormat.Binary; } + protected override T ReadCore(PgReader reader) => ConvertTo(new PgMoney(reader.ReadInt64())); protected override void WriteCore(PgWriter writer, T value) => writer.WriteInt64(ConvertFrom(value).GetValue()); - static PgMoney ConvertFrom(T value) - { -#if !NET7_0_OR_GREATER - if (typeof(short) == typeof(T)) - return new PgMoney((decimal)(short)(object)value!); - if (typeof(int) == typeof(T)) - return new PgMoney((decimal)(int)(object)value!); - if (typeof(long) == typeof(T)) - return new PgMoney((decimal)(long)(object)value!); - - if (typeof(byte) == typeof(T)) - return new PgMoney((decimal)(byte)(object)value!); - if (typeof(sbyte) == typeof(T)) - return new PgMoney((decimal)(sbyte)(object)value!); - - if (typeof(float) == typeof(T)) - return new PgMoney((decimal)(float)(object)value!); - if (typeof(double) == typeof(T)) - return new PgMoney((decimal)(double)(object)value!); - if (typeof(decimal) == typeof(T)) - return new PgMoney((decimal)(object)value!); - - throw new NotSupportedException(); -#else - return new PgMoney(decimal.CreateChecked(value)); -#endif - } - - static T ConvertTo(PgMoney money) - { -#if !NET7_0_OR_GREATER - if (typeof(short) == typeof(T)) - return (T)(object)(short)money.ToDecimal(); - if (typeof(int) == typeof(T)) - return (T)(object)(int)money.ToDecimal(); - if (typeof(long) == typeof(T)) - return (T)(object)(long)money.ToDecimal(); - - if (typeof(byte) == typeof(T)) - return (T)(object)(byte)money.ToDecimal(); - if (typeof(sbyte) == typeof(T)) - return (T)(object)(sbyte)money.ToDecimal(); - - if (typeof(float) == typeof(T)) - return (T)(object)(float)money.ToDecimal(); - if (typeof(double) == typeof(T)) - return (T)(object)(double)money.ToDecimal(); - if (typeof(decimal) == typeof(T)) - return (T)(object)money.ToDecimal(); - - throw new NotSupportedException(); -#else - return T.CreateChecked(money.ToDecimal()); -#endif - } + static PgMoney ConvertFrom(T value) => new(decimal.CreateChecked(value)); + static T ConvertTo(PgMoney money) => T.CreateChecked(money.ToDecimal()); } diff --git a/src/Npgsql/Internal/Converters/Networking/IPNetworkConverter.cs b/src/Npgsql/Internal/Converters/Networking/IPNetworkConverter.cs index 0371fb32a9..77714edf29 100644 --- a/src/Npgsql/Internal/Converters/Networking/IPNetworkConverter.cs +++ b/src/Npgsql/Internal/Converters/Networking/IPNetworkConverter.cs @@ -1,6 +1,4 @@ -#if NET8_0_OR_GREATER - -using System.Net; +using System.Net; // ReSharper disable once CheckNamespace namespace Npgsql.Internal.Converters; @@ -22,5 +20,3 @@ protected override IPNetwork ReadCore(PgReader reader) protected override void WriteCore(PgWriter writer, IPNetwork value) => NpgsqlInetConverter.WriteImpl(writer, (value.BaseAddress, (byte)value.PrefixLength), isCidr: true); } - -#endif diff --git a/src/Npgsql/Internal/Converters/Primitive/DoubleConverter.cs b/src/Npgsql/Internal/Converters/Primitive/DoubleConverter.cs index 74a56d06ae..8bc9caaf67 100644 --- a/src/Npgsql/Internal/Converters/Primitive/DoubleConverter.cs +++ b/src/Npgsql/Internal/Converters/Primitive/DoubleConverter.cs @@ -4,10 +4,7 @@ // ReSharper disable once CheckNamespace namespace Npgsql.Internal.Converters; -sealed class DoubleConverter : PgBufferedConverter -#if NET7_0_OR_GREATER - where T : INumberBase -#endif +sealed class DoubleConverter : PgBufferedConverter where T : INumberBase { public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) { @@ -15,29 +12,6 @@ public override bool CanConvert(DataFormat format, out BufferRequirements buffer return format is DataFormat.Binary; } -#if NET7_0_OR_GREATER protected override T ReadCore(PgReader reader) => T.CreateChecked(reader.ReadDouble()); protected override void WriteCore(PgWriter writer, T value) => writer.WriteDouble(double.CreateChecked(value)); -#else - protected override T ReadCore(PgReader reader) - { - var value = reader.ReadDouble(); - if (typeof(float) == typeof(T)) - return (T)(object)value; - if (typeof(double) == typeof(T)) - return (T)(object)value; - - throw new NotSupportedException(); - } - - protected override void WriteCore(PgWriter writer, T value) - { - if (typeof(float) == typeof(T)) - writer.WriteDouble((float)(object)value!); - else if (typeof(double) == typeof(T)) - writer.WriteDouble((double)(object)value!); - else - throw new NotSupportedException(); - } -#endif } diff --git a/src/Npgsql/Internal/Converters/Primitive/GuidUuidConverter.cs b/src/Npgsql/Internal/Converters/Primitive/GuidUuidConverter.cs index 596deedfce..a0d19a4fde 100644 --- a/src/Npgsql/Internal/Converters/Primitive/GuidUuidConverter.cs +++ b/src/Npgsql/Internal/Converters/Primitive/GuidUuidConverter.cs @@ -11,35 +11,15 @@ public override bool CanConvert(DataFormat format, out BufferRequirements buffer bufferRequirements = BufferRequirements.CreateFixedSize(16 * sizeof(byte)); return format is DataFormat.Binary; } + protected override Guid ReadCore(PgReader reader) - { -#if NET8_0_OR_GREATER - return new Guid(reader.ReadBytes(16).FirstSpan, bigEndian: true); -#else - return new GuidRaw - { - Data1 = reader.ReadInt32(), - Data2 = reader.ReadInt16(), - Data3 = reader.ReadInt16(), - Data4 = BitConverter.IsLittleEndian ? BinaryPrimitives.ReverseEndianness(reader.ReadInt64()) : reader.ReadInt64() - }.Value; -#endif - } + => new(reader.ReadBytes(16).FirstSpan, bigEndian: true); protected override void WriteCore(PgWriter writer, Guid value) { -#if NET8_0_OR_GREATER Span bytes = stackalloc byte[16]; value.TryWriteBytes(bytes, bigEndian: true, out _); writer.WriteBytes(bytes); -#else - var raw = new GuidRaw(value); - - writer.WriteInt32(raw.Data1); - writer.WriteInt16(raw.Data2); - writer.WriteInt16(raw.Data3); - writer.WriteInt64(BitConverter.IsLittleEndian ? BinaryPrimitives.ReverseEndianness(raw.Data4) : raw.Data4); -#endif } #if !NET8_0_OR_GREATER diff --git a/src/Npgsql/Internal/Converters/Primitive/Int2Converter.cs b/src/Npgsql/Internal/Converters/Primitive/Int2Converter.cs index e54658d925..741af9a75e 100644 --- a/src/Npgsql/Internal/Converters/Primitive/Int2Converter.cs +++ b/src/Npgsql/Internal/Converters/Primitive/Int2Converter.cs @@ -4,67 +4,14 @@ // ReSharper disable once CheckNamespace namespace Npgsql.Internal.Converters; -sealed class Int2Converter : PgBufferedConverter -#if NET7_0_OR_GREATER - where T : INumberBase -#endif +sealed class Int2Converter : PgBufferedConverter where T : INumberBase { public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) { bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(short)); return format is DataFormat.Binary; } -#if NET7_0_OR_GREATER + protected override T ReadCore(PgReader reader) => T.CreateChecked(reader.ReadInt16()); protected override void WriteCore(PgWriter writer, T value) => writer.WriteInt16(short.CreateChecked(value)); -#else - protected override T ReadCore(PgReader reader) - { - var value = reader.ReadInt16(); - if (typeof(short) == typeof(T)) - return (T)(object)value; - if (typeof(int) == typeof(T)) - return (T)(object)(int)value; - if (typeof(long) == typeof(T)) - return (T)(object)(long)value; - - if (typeof(byte) == typeof(T)) - return (T)(object)checked((byte)value); - if (typeof(sbyte) == typeof(T)) - return (T)(object)checked((sbyte)value); - - if (typeof(float) == typeof(T)) - return (T)(object)(float)value; - if (typeof(double) == typeof(T)) - return (T)(object)(double)value; - if (typeof(decimal) == typeof(T)) - return (T)(object)(decimal)value; - - throw new NotSupportedException(); - } - - protected override void WriteCore(PgWriter writer, T value) - { - if (typeof(short) == typeof(T)) - writer.WriteInt16((short)(object)value!); - else if (typeof(int) == typeof(T)) - writer.WriteInt16(checked((short)(int)(object)value!)); - else if (typeof(long) == typeof(T)) - writer.WriteInt16(checked((short)(long)(object)value!)); - - else if (typeof(byte) == typeof(T)) - writer.WriteInt16((byte)(object)value!); - else if (typeof(sbyte) == typeof(T)) - writer.WriteInt16((sbyte)(object)value!); - - else if (typeof(float) == typeof(T)) - writer.WriteInt16(checked((short)(float)(object)value!)); - else if (typeof(double) == typeof(T)) - writer.WriteInt16(checked((short)(double)(object)value!)); - else if (typeof(decimal) == typeof(T)) - writer.WriteInt16((short)(decimal)(object)value!); - else - throw new NotSupportedException(); - } -#endif } diff --git a/src/Npgsql/Internal/Converters/Primitive/Int4Converter.cs b/src/Npgsql/Internal/Converters/Primitive/Int4Converter.cs index 1831ca9b1e..4327d2f2e7 100644 --- a/src/Npgsql/Internal/Converters/Primitive/Int4Converter.cs +++ b/src/Npgsql/Internal/Converters/Primitive/Int4Converter.cs @@ -4,10 +4,7 @@ // ReSharper disable once CheckNamespace namespace Npgsql.Internal.Converters; -sealed class Int4Converter : PgBufferedConverter -#if NET7_0_OR_GREATER - where T : INumberBase -#endif +sealed class Int4Converter : PgBufferedConverter where T : INumberBase { public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) { @@ -15,57 +12,6 @@ public override bool CanConvert(DataFormat format, out BufferRequirements buffer return format is DataFormat.Binary; } -#if NET7_0_OR_GREATER protected override T ReadCore(PgReader reader) => T.CreateChecked(reader.ReadInt32()); protected override void WriteCore(PgWriter writer, T value) => writer.WriteInt32(int.CreateChecked(value)); -#else - protected override T ReadCore(PgReader reader) - { - var value = reader.ReadInt32(); - if (typeof(short) == typeof(T)) - return (T)(object)checked((short)value); - if (typeof(int) == typeof(T)) - return (T)(object)value; - if (typeof(long) == typeof(T)) - return (T)(object)(long)value; - - if (typeof(byte) == typeof(T)) - return (T)(object)checked((byte)value); - if (typeof(sbyte) == typeof(T)) - return (T)(object)checked((sbyte)value); - - if (typeof(float) == typeof(T)) - return (T)(object)(float)value; - if (typeof(double) == typeof(T)) - return (T)(object)(double)value; - if (typeof(decimal) == typeof(T)) - return (T)(object)(decimal)value; - - throw new NotSupportedException(); - } - - protected override void WriteCore(PgWriter writer, T value) - { - if (typeof(short) == typeof(T)) - writer.WriteInt32((short)(object)value!); - else if (typeof(int) == typeof(T)) - writer.WriteInt32((int)(object)value!); - else if (typeof(long) == typeof(T)) - writer.WriteInt32(checked((int)(long)(object)value!)); - - else if (typeof(byte) == typeof(T)) - writer.WriteInt32((byte)(object)value!); - else if (typeof(sbyte) == typeof(T)) - writer.WriteInt32((sbyte)(object)value!); - - else if (typeof(float) == typeof(T)) - writer.WriteInt32(checked((int)(float)(object)value!)); - else if (typeof(double) == typeof(T)) - writer.WriteInt32(checked((int)(double)(object)value!)); - else if (typeof(decimal) == typeof(T)) - writer.WriteInt32((int)(decimal)(object)value!); - else - throw new NotSupportedException(); - } -#endif } diff --git a/src/Npgsql/Internal/Converters/Primitive/Int8Converter.cs b/src/Npgsql/Internal/Converters/Primitive/Int8Converter.cs index b422816244..09a54cf265 100644 --- a/src/Npgsql/Internal/Converters/Primitive/Int8Converter.cs +++ b/src/Npgsql/Internal/Converters/Primitive/Int8Converter.cs @@ -4,10 +4,7 @@ // ReSharper disable once CheckNamespace namespace Npgsql.Internal.Converters; -sealed class Int8Converter : PgBufferedConverter -#if NET7_0_OR_GREATER - where T : INumberBase -#endif +sealed class Int8Converter : PgBufferedConverter where T : INumberBase { public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) { @@ -15,58 +12,6 @@ public override bool CanConvert(DataFormat format, out BufferRequirements buffer return format is DataFormat.Binary; } -#if NET7_0_OR_GREATER protected override T ReadCore(PgReader reader) => T.CreateChecked(reader.ReadInt64()); protected override void WriteCore(PgWriter writer, T value) => writer.WriteInt64(long.CreateChecked(value)); -#else - protected override T ReadCore(PgReader reader) - { - var value = reader.ReadInt64(); - if (typeof(long) == typeof(T)) - return (T)(object)value; - - if (typeof(short) == typeof(T)) - return (T)(object)checked((short)value); - if (typeof(int) == typeof(T)) - return (T)(object)checked((int)value); - - if (typeof(byte) == typeof(T)) - return (T)(object)checked((byte)value); - if (typeof(sbyte) == typeof(T)) - return (T)(object)checked((sbyte)value); - - if (typeof(float) == typeof(T)) - return (T)(object)(float)value; - if (typeof(double) == typeof(T)) - return (T)(object)(double)value; - if (typeof(decimal) == typeof(T)) - return (T)(object)(decimal)value; - - throw new NotSupportedException(); - } - - protected override void WriteCore(PgWriter writer, T value) - { - if (typeof(short) == typeof(T)) - writer.WriteInt64((short)(object)value!); - else if (typeof(int) == typeof(T)) - writer.WriteInt64((int)(object)value!); - else if (typeof(long) == typeof(T)) - writer.WriteInt64((long)(object)value!); - - else if (typeof(byte) == typeof(T)) - writer.WriteInt64((byte)(object)value!); - else if (typeof(sbyte) == typeof(T)) - writer.WriteInt64((sbyte)(object)value!); - - else if (typeof(float) == typeof(T)) - writer.WriteInt64(checked((long)(float)(object)value!)); - else if (typeof(double) == typeof(T)) - writer.WriteInt64(checked((long)(double)(object)value!)); - else if (typeof(decimal) == typeof(T)) - writer.WriteInt64((long)(decimal)(object)value!); - else - throw new NotSupportedException(); - } -#endif } diff --git a/src/Npgsql/Internal/Converters/Primitive/NumericConverters.cs b/src/Npgsql/Internal/Converters/Primitive/NumericConverters.cs index c43e90a1f7..00788f99c8 100644 --- a/src/Npgsql/Internal/Converters/Primitive/NumericConverters.cs +++ b/src/Npgsql/Internal/Converters/Primitive/NumericConverters.cs @@ -82,12 +82,7 @@ static async ValueTask AsyncCore(PgWriter writer, BigInteger value, Cancellation static BigInteger ConvertTo(in PgNumeric numeric) => numeric.ToBigInteger(); } -sealed class DecimalNumericConverter : PgBufferedConverter -#if NET7_0_OR_GREATER - where T : INumberBase -#else - where T : notnull -#endif +sealed class DecimalNumericConverter : PgBufferedConverter where T : INumberBase { const int StackAllocByteThreshold = 64 * sizeof(uint); @@ -129,60 +124,10 @@ protected override void WriteCore(PgWriter writer, T value) } static PgNumeric.Builder ConvertFrom(T value, Span destination) - { -#if !NET7_0_OR_GREATER - if (typeof(short) == typeof(T)) - return new PgNumeric.Builder((decimal)(short)(object)value!, destination); - if (typeof(int) == typeof(T)) - return new PgNumeric.Builder((decimal)(int)(object)value!, destination); - if (typeof(long) == typeof(T)) - return new PgNumeric.Builder((decimal)(long)(object)value!, destination); - - if (typeof(byte) == typeof(T)) - return new PgNumeric.Builder((decimal)(byte)(object)value!, destination); - if (typeof(sbyte) == typeof(T)) - return new PgNumeric.Builder((decimal)(sbyte)(object)value!, destination); - - if (typeof(float) == typeof(T)) - return new PgNumeric.Builder((decimal)(float)(object)value!, destination); - if (typeof(double) == typeof(T)) - return new PgNumeric.Builder((decimal)(double)(object)value!, destination); - if (typeof(decimal) == typeof(T)) - return new PgNumeric.Builder((decimal)(object)value!, destination); - - throw new NotSupportedException(); -#else - return new PgNumeric.Builder(decimal.CreateChecked(value), destination); -#endif - } + => new(decimal.CreateChecked(value), destination); static T ConvertTo(in PgNumeric.Builder numeric) - { -#if !NET7_0_OR_GREATER - if (typeof(short) == typeof(T)) - return (T)(object)(short)numeric.ToDecimal(); - if (typeof(int) == typeof(T)) - return (T)(object)(int)numeric.ToDecimal(); - if (typeof(long) == typeof(T)) - return (T)(object)(long)numeric.ToDecimal(); - - if (typeof(byte) == typeof(T)) - return (T)(object)(byte)numeric.ToDecimal(); - if (typeof(sbyte) == typeof(T)) - return (T)(object)(sbyte)numeric.ToDecimal(); - - if (typeof(float) == typeof(T)) - return (T)(object)(float)numeric.ToDecimal(); - if (typeof(double) == typeof(T)) - return (T)(object)(double)numeric.ToDecimal(); - if (typeof(decimal) == typeof(T)) - return (T)(object)numeric.ToDecimal(); - - throw new NotSupportedException(); -#else - return T.CreateChecked(numeric.ToDecimal()); -#endif - } + => T.CreateChecked(numeric.ToDecimal()); } static class NumericConverter diff --git a/src/Npgsql/Internal/Converters/Primitive/PgMoney.cs b/src/Npgsql/Internal/Converters/Primitive/PgMoney.cs index dc8755de1f..bddbbda648 100644 --- a/src/Npgsql/Internal/Converters/Primitive/PgMoney.cs +++ b/src/Npgsql/Internal/Converters/Primitive/PgMoney.cs @@ -51,10 +51,6 @@ static void GetDecimalBits(decimal value, Span destination, out short scal Debug.Assert(destination.Length >= DecimalBits); decimal.GetBits(value, MemoryMarshal.Cast(destination)); -#if NET7_0_OR_GREATER scale = value.Scale; -#else - scale = (byte)(destination[3] >> 16); -#endif } } diff --git a/src/Npgsql/Internal/Converters/Primitive/PgNumeric.cs b/src/Npgsql/Internal/Converters/Primitive/PgNumeric.cs index 299dd9b419..c90036d381 100644 --- a/src/Npgsql/Internal/Converters/Primitive/PgNumeric.cs +++ b/src/Npgsql/Internal/Converters/Primitive/PgNumeric.cs @@ -32,12 +32,7 @@ static void GetDecimalBits(decimal value, Span destination, out short scal Debug.Assert(destination.Length >= DecimalBits); decimal.GetBits(value, MemoryMarshal.Cast(destination)); - -#if NET7_0_OR_GREATER scale = value.Scale; -#else - scale = (byte)(destination[3] >> 16); -#endif } public static int GetDigitCount(decimal value) diff --git a/src/Npgsql/Internal/Converters/Primitive/RealConverter.cs b/src/Npgsql/Internal/Converters/Primitive/RealConverter.cs index b47e641aa5..89eeebb7fe 100644 --- a/src/Npgsql/Internal/Converters/Primitive/RealConverter.cs +++ b/src/Npgsql/Internal/Converters/Primitive/RealConverter.cs @@ -4,10 +4,7 @@ // ReSharper disable once CheckNamespace namespace Npgsql.Internal.Converters; -sealed class RealConverter : PgBufferedConverter -#if NET7_0_OR_GREATER - where T : INumberBase -#endif +sealed class RealConverter : PgBufferedConverter where T : INumberBase { public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) { @@ -15,29 +12,6 @@ public override bool CanConvert(DataFormat format, out BufferRequirements buffer return format is DataFormat.Binary; } -#if NET7_0_OR_GREATER protected override T ReadCore(PgReader reader) => T.CreateChecked(reader.ReadFloat()); protected override void WriteCore(PgWriter writer, T value) => writer.WriteFloat(float.CreateChecked(value)); -#else - protected override T ReadCore(PgReader reader) - { - var value = reader.ReadFloat(); - if (typeof(float) == typeof(T)) - return (T)(object)value; - if (typeof(double) == typeof(T)) - return (T)(object)(double)value; - - throw new NotSupportedException(); - } - - protected override void WriteCore(PgWriter writer, T value) - { - if (typeof(float) == typeof(T)) - writer.WriteFloat((float)(object)value!); - else if (typeof(double) == typeof(T)) - writer.WriteFloat((float)(double)(object)value!); - else - throw new NotSupportedException(); - } -#endif } diff --git a/src/Npgsql/Internal/NpgsqlConnector.Auth.cs b/src/Npgsql/Internal/NpgsqlConnector.Auth.cs index 59564f5361..bfb4c2b7f1 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.Auth.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.Auth.cs @@ -135,13 +135,7 @@ async Task AuthenticateSASL(List mechanisms, string username, bool async var saltedPassword = Hi(passwd.Normalize(NormalizationForm.FormKC), saltBytes, firstServerMsg.Iteration); var clientKey = HMAC(saltedPassword, "Client Key"); - byte[] storedKey; -#if NET7_0_OR_GREATER - storedKey = SHA256.HashData(clientKey); -#else - using (var sha256 = SHA256.Create()) - storedKey = sha256.ComputeHash(clientKey); -#endif + var storedKey = SHA256.HashData(clientKey); var clientFirstMessageBare = $"n=*,r={clientNonce}"; var serverFirstMessage = $"r={firstServerMsg.Nonce},s={firstServerMsg.Salt},i={firstServerMsg.Iteration}"; var clientFinalMessageWithoutProof = $"c={cbind},r={firstServerMsg.Nonce}"; @@ -280,9 +274,6 @@ async Task AuthenticateMD5(string username, byte[] salt, bool async, Cancellatio throw new NpgsqlException("No password has been provided but the backend requires one (in MD5)"); byte[] result; -#if !NET7_0_OR_GREATER - using (var md5 = MD5.Create()) -#endif { // First phase var passwordBytes = NpgsqlWriteBuffer.UTF8Encoding.GetBytes(passwd); @@ -292,11 +283,7 @@ async Task AuthenticateMD5(string username, byte[] salt, bool async, Cancellatio usernameBytes.CopyTo(cryptBuf, passwordBytes.Length); var sb = new StringBuilder(); -#if NET7_0_OR_GREATER var hashResult = MD5.HashData(cryptBuf); -#else - var hashResult = md5.ComputeHash(cryptBuf); -#endif foreach (var b in hashResult) sb.Append(b.ToString("x2")); @@ -329,7 +316,6 @@ async Task AuthenticateMD5(string username, byte[] salt, bool async, Cancellatio await Flush(async, cancellationToken).ConfigureAwait(false); } -#if NET7_0_OR_GREATER internal async Task AuthenticateGSS(bool async) { var targetName = $"{KerberosServiceName}/{Host}"; @@ -360,7 +346,6 @@ internal async Task AuthenticateGSS(bool async) await Flush(async, UserCancellationToken).ConfigureAwait(false); } } -#endif async ValueTask GetPassword(string username, bool async, CancellationToken cancellationToken = default) { diff --git a/src/Npgsql/Internal/NpgsqlConnector.OldAuth.cs b/src/Npgsql/Internal/NpgsqlConnector.OldAuth.cs deleted file mode 100644 index 91aec10660..0000000000 --- a/src/Npgsql/Internal/NpgsqlConnector.OldAuth.cs +++ /dev/null @@ -1,153 +0,0 @@ -using System; -using System.IO; -using System.Net; -using System.Net.Security; -using System.Security.Cryptography; -using System.Text; -using System.Threading; -using System.Threading.Tasks; -using Npgsql.BackendMessages; -using static Npgsql.Util.Statics; - -namespace Npgsql.Internal; - - -partial class NpgsqlConnector -{ -#if !NET7_0_OR_GREATER - internal async Task AuthenticateGSS(bool async) - { - var targetName = $"{KerberosServiceName}/{Host}"; - - using var negotiateStream = new NegotiateStream(new GSSPasswordMessageStream(this), true); - try - { - if (async) - await negotiateStream.AuthenticateAsClientAsync(CredentialCache.DefaultNetworkCredentials, targetName).ConfigureAwait(false); - else - negotiateStream.AuthenticateAsClient(CredentialCache.DefaultNetworkCredentials, targetName); - } - catch (AuthenticationCompleteException) - { - return; - } - catch (IOException e) when (e.InnerException is AuthenticationCompleteException) - { - return; - } - catch (IOException e) when (e.InnerException is PostgresException) - { - throw e.InnerException; - } - - throw new NpgsqlException("NegotiateStream.AuthenticateAsClient completed unexpectedly without signaling success"); - } - - /// - /// This Stream is placed between NegotiateStream and the socket's NetworkStream (or SSLStream). It intercepts - /// traffic and performs the following operations: - /// * Outgoing messages are framed in PostgreSQL's PasswordMessage, and incoming are stripped of it. - /// * NegotiateStream frames payloads with a 5-byte header, which PostgreSQL doesn't understand. This header is - /// stripped from outgoing messages and added to incoming ones. - /// - /// - /// See https://referencesource.microsoft.com/#System/net/System/Net/_StreamFramer.cs,16417e735f0e9530,references - /// - sealed class GSSPasswordMessageStream : Stream - { - readonly NpgsqlConnector _connector; - int _leftToWrite; - int _leftToRead, _readPos; - byte[]? _readBuf; - - internal GSSPasswordMessageStream(NpgsqlConnector connector) - => _connector = connector; - - public override Task WriteAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken = default) - => Write(buffer, offset, count, true, cancellationToken); - - public override void Write(byte[] buffer, int offset, int count) - => Write(buffer, offset, count, false).GetAwaiter().GetResult(); - - async Task Write(byte[] buffer, int offset, int count, bool async, CancellationToken cancellationToken = default) - { - if (_leftToWrite == 0) - { - // We're writing the frame header, which contains the payload size. - _leftToWrite = (buffer[3] << 8) | buffer[4]; - - buffer[0] = 22; - if (buffer[1] != 1) - throw new NotSupportedException($"Received frame header major v {buffer[1]} (different from 1)"); - if (buffer[2] != 0) - throw new NotSupportedException($"Received frame header minor v {buffer[2]} (different from 0)"); - - // In case of payload data in the same buffer just after the frame header - if (count == 5) - return; - count -= 5; - offset += 5; - } - - if (count > _leftToWrite) - throw new NpgsqlException($"NegotiateStream trying to write {count} bytes but according to frame header we only have {_leftToWrite} left!"); - await _connector.WritePassword(buffer, offset, count, async, cancellationToken).ConfigureAwait(false); - await _connector.Flush(async, cancellationToken).ConfigureAwait(false); - _leftToWrite -= count; - } - - public override Task ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken = default) - => Read(buffer, offset, count, true, cancellationToken); - - public override int Read(byte[] buffer, int offset, int count) - => Read(buffer, offset, count, false).GetAwaiter().GetResult(); - - async Task Read(byte[] buffer, int offset, int count, bool async, CancellationToken cancellationToken = default) - { - if (_leftToRead == 0) - { - var response = ExpectAny(await _connector.ReadMessage(async).ConfigureAwait(false), _connector); - if (response.AuthRequestType == AuthenticationRequestType.Ok) - throw new AuthenticationCompleteException(); - var gssMsg = response as AuthenticationGSSContinueMessage; - if (gssMsg == null) - throw new NpgsqlException($"Received unexpected authentication request message {response.AuthRequestType}"); - _readBuf = gssMsg.AuthenticationData; - _leftToRead = gssMsg.AuthenticationData.Length; - _readPos = 0; - buffer[0] = 22; - buffer[1] = 1; - buffer[2] = 0; - buffer[3] = (byte)((_leftToRead >> 8) & 0xFF); - buffer[4] = (byte)(_leftToRead & 0xFF); - return 5; - } - - if (count > _leftToRead) - throw new NpgsqlException($"NegotiateStream trying to read {count} bytes but according to frame header we only have {_leftToRead} left!"); - count = Math.Min(count, _leftToRead); - Array.Copy(_readBuf!, _readPos, buffer, offset, count); - _leftToRead -= count; - return count; - } - - public override void Flush() { } - - public override long Seek(long offset, SeekOrigin origin) => throw new NotSupportedException(); - public override void SetLength(long value) => throw new NotSupportedException(); - - public override bool CanRead => true; - public override bool CanWrite => true; - public override bool CanSeek => false; - public override long Length => throw new NotSupportedException(); - - public override long Position - { - get => throw new NotSupportedException(); - set => throw new NotSupportedException(); - } - } - - sealed class AuthenticationCompleteException : Exception { } -#endif -} diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index 4e214d4b48..0abcb9323a 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -60,9 +60,7 @@ public sealed partial class NpgsqlConnector ProvidePasswordCallback? ProvidePasswordCallback { get; } #pragma warning restore CS0618 -#if NET7_0_OR_GREATER Action? NegotiateOptionsCallback { get; } -#endif public Encoding TextEncoding { get; private set; } = default!; @@ -913,16 +911,6 @@ internal async Task NegotiateEncryption(SslMode sslMode, NpgsqlTimeout timeout, var host = Host; -#if !NET8_0_OR_GREATER - // If the host is a valid IP address - replace it with an empty string - // We do that because .NET uses targetHost argument to send SNI to the server - // RFC explicitly prohibits sending an IP address so some servers might fail - // This was already fixed for .NET 8 - // See #5543 for discussion - if (IPAddress.TryParse(host, out _)) - host = string.Empty; -#endif - timeout.CheckAndApply(this); var sslStream = new SslStream(_stream, leaveInnerStreamOpen: false); diff --git a/src/Npgsql/Internal/ResolverFactories/JsonDynamicTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/JsonDynamicTypeInfoResolverFactory.cs index 6384474cd7..04e3aa3313 100644 --- a/src/Npgsql/Internal/ResolverFactories/JsonDynamicTypeInfoResolverFactory.cs +++ b/src/Npgsql/Internal/ResolverFactories/JsonDynamicTypeInfoResolverFactory.cs @@ -42,12 +42,7 @@ class Resolver(Type[]? jsonbClrTypes = null, Type[]? jsonClrTypes = null, JsonSe : DynamicTypeInfoResolver, IPgTypeInfoResolver { JsonSerializerOptions? _serializerOptions = serializerOptions; - JsonSerializerOptions SerializerOptions - #if NET7_0_OR_GREATER - => _serializerOptions ??= JsonSerializerOptions.Default; - #else - => _serializerOptions ??= new(); - #endif + JsonSerializerOptions SerializerOptions => _serializerOptions ??= JsonSerializerOptions.Default; readonly Type[] _jsonbClrTypes = jsonbClrTypes ?? []; readonly Type[] _jsonClrTypes = jsonClrTypes ?? []; diff --git a/src/Npgsql/Internal/ResolverFactories/NetworkTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/NetworkTypeInfoResolverFactory.cs index 0a7ebaaa1b..6bf69db4a2 100644 --- a/src/Npgsql/Internal/ResolverFactories/NetworkTypeInfoResolverFactory.cs +++ b/src/Npgsql/Internal/ResolverFactories/NetworkTypeInfoResolverFactory.cs @@ -51,10 +51,8 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) mappings.AddStructType(DataTypeNames.Cidr, static (options, mapping, _) => mapping.CreateInfo(options, new NpgsqlCidrConverter()), isDefault: true); -#if NET8_0_OR_GREATER mappings.AddStructType(DataTypeNames.Cidr, static (options, mapping, _) => mapping.CreateInfo(options, new IPNetworkConverter())); -#endif return mappings; } diff --git a/src/Npgsql/KerberosUsernameProvider.cs b/src/Npgsql/KerberosUsernameProvider.cs index 3afb326548..591c43fd98 100644 --- a/src/Npgsql/KerberosUsernameProvider.cs +++ b/src/Npgsql/KerberosUsernameProvider.cs @@ -61,11 +61,7 @@ sealed class KerberosUsernameProvider var line = default(string); for (var i = 0; i < 2; i++) // ReSharper disable once MethodHasAsyncOverload -#if NET7_0_OR_GREATER if ((line = async ? await process.StandardOutput.ReadLineAsync(cancellationToken).ConfigureAwait(false) : process.StandardOutput.ReadLine()) == null) -#else - if ((line = async ? await process.StandardOutput.ReadLineAsync().ConfigureAwait(false) : process.StandardOutput.ReadLine()) == null) -#endif { connectionLogger.LogDebug("Unexpected output from klist, aborting Kerberos username detection"); return null; diff --git a/src/Npgsql/MetricsReporter.cs b/src/Npgsql/MetricsReporter.cs index 83b804c2f6..3da49ceecc 100644 --- a/src/Npgsql/MetricsReporter.cs +++ b/src/Npgsql/MetricsReporter.cs @@ -136,12 +136,7 @@ internal void ReportCommandStop(long startTimestamp) if (CommandDuration.Enabled && startTimestamp > 0) { -#if NET7_0_OR_GREATER - var duration = Stopwatch.GetElapsedTime(startTimestamp); -#else - var duration = new TimeSpan((long)((Stopwatch.GetTimestamp() - startTimestamp) * StopWatchTickFrequency)); -#endif - CommandDuration.Record(duration.TotalSeconds, _poolNameTag); + CommandDuration.Record(Stopwatch.GetElapsedTime(startTimestamp).TotalSeconds, _poolNameTag); } } diff --git a/src/Npgsql/Npgsql.csproj b/src/Npgsql/Npgsql.csproj index 426eb06bcd..a28f47cfbe 100644 --- a/src/Npgsql/Npgsql.csproj +++ b/src/Npgsql/Npgsql.csproj @@ -5,8 +5,7 @@ Npgsql is the open source .NET data provider for PostgreSQL. npgsql;postgresql;postgres;ado;ado.net;database;sql README.md - net6.0;net8.0 - net8.0 + net8.0 $(NoWarn);CA2017 $(NoWarn);NPG9001 $(NoWarn);NPG9002 @@ -24,12 +23,6 @@ - - - - - - diff --git a/src/Npgsql/NpgsqlBatchCommand.cs b/src/Npgsql/NpgsqlBatchCommand.cs index f25e8937b4..c1cfa3ef87 100644 --- a/src/Npgsql/NpgsqlBatchCommand.cs +++ b/src/Npgsql/NpgsqlBatchCommand.cs @@ -42,28 +42,11 @@ public override string CommandText public new NpgsqlParameterCollection Parameters => _parameters ??= []; -#if NET8_0_OR_GREATER /// - public override NpgsqlParameter CreateParameter() -#else - /// - /// Creates a new instance of a object. - /// - /// An object. - public NpgsqlParameter CreateParameter() -#endif - => new(); + public override NpgsqlParameter CreateParameter() => new(); -#if NET8_0_OR_GREATER /// - public override bool CanCreateParameter -#else - /// - /// Returns whether the method is implemented. - /// - public bool CanCreateParameter -#endif - => true; + public override bool CanCreateParameter => true; /// diff --git a/src/Npgsql/NpgsqlDataSource.cs b/src/Npgsql/NpgsqlDataSource.cs index ce6db8f843..ed434ee3c1 100644 --- a/src/Npgsql/NpgsqlDataSource.cs +++ b/src/Npgsql/NpgsqlDataSource.cs @@ -108,11 +108,8 @@ internal NpgsqlDataSource( var resolverChain, _defaultNameTranslator, ConnectionInitializer, - ConnectionInitializerAsync -#if NET7_0_OR_GREATER - ,_ -#endif - ) + ConnectionInitializerAsync, + _) = dataSourceConfig; _connectionLogger = LoggingConfiguration.ConnectionLogger; diff --git a/src/Npgsql/NpgsqlDataSourceBuilder.cs b/src/Npgsql/NpgsqlDataSourceBuilder.cs index a3b7779083..ce6926a544 100644 --- a/src/Npgsql/NpgsqlDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlDataSourceBuilder.cs @@ -364,7 +364,6 @@ public NpgsqlDataSourceBuilder UsePasswordProvider( return this; } -#if NET7_0_OR_GREATER /// /// When using Kerberos, this is a callback that allows customizing default settings for Kerberos authentication. /// @@ -380,7 +379,6 @@ public NpgsqlDataSourceBuilder UseNegotiateOptionsCallback(Action? ConnectionInitializer, - Func? ConnectionInitializerAsync -#if NET7_0_OR_GREATER - ,Action? NegotiateOptionsCallback -#endif - ); + Func? ConnectionInitializerAsync, + Action? NegotiateOptionsCallback); diff --git a/src/Npgsql/NpgsqlException.cs b/src/Npgsql/NpgsqlException.cs index 91eb84adef..89543b0b50 100644 --- a/src/Npgsql/NpgsqlException.cs +++ b/src/Npgsql/NpgsqlException.cs @@ -58,9 +58,7 @@ public override bool IsTransient /// /// The SerializationInfo that holds the serialized object data about the exception being thrown. /// The StreamingContext that contains contextual information about the source or destination. -#if NET8_0_OR_GREATER [Obsolete("This API supports obsolete formatter-based serialization. It should not be called or extended by application code.")] -#endif protected internal NpgsqlException(SerializationInfo info, StreamingContext context) : base(info, context) {} #endregion diff --git a/src/Npgsql/NpgsqlFactory.cs b/src/Npgsql/NpgsqlFactory.cs index 15a1cd431e..d95e645f70 100644 --- a/src/Npgsql/NpgsqlFactory.cs +++ b/src/Npgsql/NpgsqlFactory.cs @@ -66,11 +66,9 @@ public sealed class NpgsqlFactory : DbProviderFactory, IServiceProvider /// public override DbBatchCommand CreateBatchCommand() => new NpgsqlBatchCommand(); -#if NET7_0_OR_GREATER /// public override DbDataSource CreateDataSource(string connectionString) => NpgsqlDataSource.Create(connectionString); -#endif #region IServiceProvider Members diff --git a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs index 376e3bd7c9..a7d9c455c5 100644 --- a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs @@ -38,9 +38,7 @@ public sealed class NpgsqlSlimDataSourceBuilder : INpgsqlTypeMapper Action? _clientCertificatesCallback; Action? _sslClientAuthenticationOptionsCallback; -#if NET7_0_OR_GREATER Action? _negotiateOptionsCallback; -#endif IntegratedSecurityHandler _integratedSecurityHandler = new(); @@ -853,11 +851,8 @@ _loggerFactory is null _resolverChainBuilder.Build(ConfigureResolverChain), DefaultNameTranslator, _connectionInitializer, - _connectionInitializerAsync -#if NET7_0_OR_GREATER - ,_negotiateOptionsCallback -#endif - )); + _connectionInitializerAsync, + _negotiateOptionsCallback)); } void ValidateMultiHost() diff --git a/src/Npgsql/PoolingDataSource.cs b/src/Npgsql/PoolingDataSource.cs index 46861a5c5e..c339047fb4 100644 --- a/src/Npgsql/PoolingDataSource.cs +++ b/src/Npgsql/PoolingDataSource.cs @@ -268,14 +268,10 @@ bool CheckIdleConnector([NotNullWhen(true)] NpgsqlConnector? connector) try { // We've managed to increase the open counter, open a physical connections. -#if NET7_0_OR_GREATER var startTime = Stopwatch.GetTimestamp(); -#endif var connector = new NpgsqlConnector(this, conn) { ClearCounter = _clearCounter }; await connector.Open(timeout, async, cancellationToken).ConfigureAwait(false); -#if NET7_0_OR_GREATER MetricsReporter.ReportConnectionCreateTime(Stopwatch.GetElapsedTime(startTime)); -#endif var i = 0; for (; i < MaxConnections; i++) diff --git a/src/Npgsql/PostgresException.cs b/src/Npgsql/PostgresException.cs index a157f0ab87..f941901e30 100644 --- a/src/Npgsql/PostgresException.cs +++ b/src/Npgsql/PostgresException.cs @@ -110,9 +110,7 @@ static string GetMessage(string sqlState, string messageText, int position, stri internal static PostgresException Load(NpgsqlReadBuffer buf, bool includeDetail, ILogger exceptionLogger) => new(ErrorOrNoticeMessage.Load(buf, includeDetail, exceptionLogger)); -#if NET8_0_OR_GREATER [Obsolete("This API supports obsolete formatter-based serialization. It should not be called or extended by application code.")] -#endif internal PostgresException(SerializationInfo info, StreamingContext context) : base(info, context) { diff --git a/src/Npgsql/Shims/DbDataSource.cs b/src/Npgsql/Shims/DbDataSource.cs deleted file mode 100644 index 6951d427fb..0000000000 --- a/src/Npgsql/Shims/DbDataSource.cs +++ /dev/null @@ -1,70 +0,0 @@ -#if !NET7_0_OR_GREATER - -using System.Threading; -using System.Threading.Tasks; - -#pragma warning disable CS1591 // Missing XML comment for publicly visible type or member (compatibility shim for old TFMs) - -// ReSharper disable once CheckNamespace -namespace System.Data.Common; - -public abstract class DbDataSource : IDisposable, IAsyncDisposable -{ - public abstract string ConnectionString { get; } - - protected abstract DbConnection CreateDbConnection(); - - // No need for an actual implementation in this compat shim - it's only implementation will be NpgsqlDataSource, which overrides this. - protected virtual DbConnection OpenDbConnection() - => throw new NotSupportedException(); - - // No need for an actual implementation in this compat shim - it's only implementation will be NpgsqlDataSource, which overrides this. - protected virtual ValueTask OpenDbConnectionAsync(CancellationToken cancellationToken = default) - => throw new NotSupportedException(); - - // No need for an actual implementation in this compat shim - it's only implementation will be NpgsqlDataSource, which overrides this. - protected virtual DbCommand CreateDbCommand(string? commandText = null) - => throw new NotSupportedException(); - - // No need for an actual implementation in this compat shim - it's only implementation will be NpgsqlDataSource, which overrides this. - protected virtual DbBatch CreateDbBatch() - => throw new NotSupportedException(); - - public DbConnection CreateConnection() - => CreateDbConnection(); - - public DbConnection OpenConnection() - => OpenDbConnection(); - - public ValueTask OpenConnectionAsync(CancellationToken cancellationToken = default) - => OpenDbConnectionAsync(cancellationToken); - - public DbCommand CreateCommand(string? commandText = null) - => CreateDbCommand(commandText); - - public DbBatch CreateBatch() - => CreateDbBatch(); - - public void Dispose() - { - Dispose(disposing: true); - GC.SuppressFinalize(this); - } - - public async ValueTask DisposeAsync() - { - await DisposeAsyncCore().ConfigureAwait(false); - - Dispose(disposing: false); - GC.SuppressFinalize(this); - } - - protected virtual void Dispose(bool disposing) - { - } - - protected virtual ValueTask DisposeAsyncCore() - => default; -} - -#endif \ No newline at end of file diff --git a/src/Npgsql/Shims/ExperimentalAttribute.cs b/src/Npgsql/Shims/ExperimentalAttribute.cs deleted file mode 100644 index ad6dfbf58c..0000000000 --- a/src/Npgsql/Shims/ExperimentalAttribute.cs +++ /dev/null @@ -1,21 +0,0 @@ -#if !NET8_0_OR_GREATER -namespace System.Diagnostics.CodeAnalysis; - -/// Indicates that an API is experimental and it may change in the future. -[AttributeUsage(AttributeTargets.Assembly | AttributeTargets.Module | AttributeTargets.Class | AttributeTargets.Struct | AttributeTargets.Enum | AttributeTargets.Constructor | AttributeTargets.Method | AttributeTargets.Property | AttributeTargets.Field | AttributeTargets.Event | AttributeTargets.Interface | AttributeTargets.Delegate, Inherited = false)] -public sealed class ExperimentalAttribute : Attribute -{ - /// Initializes a new instance of the class, specifying the ID that the compiler will use when reporting a use of the API the attribute applies to. - /// The ID that the compiler will use when reporting a use of the API the attribute applies to. - public ExperimentalAttribute(string diagnosticId) => DiagnosticId = diagnosticId; - - /// Gets the ID that the compiler will use when reporting a use of the API the attribute applies to. - /// The unique diagnostic ID. - public string DiagnosticId { get; } - - /// Gets or sets the URL for corresponding documentation. - /// The API accepts a format string instead of an actual URL, creating a generic URL that includes the diagnostic ID. - /// The format string that represents a URL to corresponding documentation. - public string? UrlFormat { get; set; } -} -#endif diff --git a/src/Npgsql/Shims/MemoryExtensions.cs b/src/Npgsql/Shims/MemoryExtensions.cs deleted file mode 100644 index 0da143f3c4..0000000000 --- a/src/Npgsql/Shims/MemoryExtensions.cs +++ /dev/null @@ -1,20 +0,0 @@ -#if !NET7_0_OR_GREATER -using System; - -namespace Npgsql; - -static class MemoryExtensions -{ - public static int IndexOfAnyExcept(this ReadOnlySpan span, T value0, T value1) where T : IEquatable - { - for (var i = 0; i < span.Length; i++) - { - var v = span[i]; - if (!v.Equals(value0) && !v.Equals(value1)) - return i; - } - - return -1; - } -} -#endif diff --git a/src/Npgsql/Shims/StreamExtensions.cs b/src/Npgsql/Shims/StreamExtensions.cs deleted file mode 100644 index 60dbf9ca3b..0000000000 --- a/src/Npgsql/Shims/StreamExtensions.cs +++ /dev/null @@ -1,38 +0,0 @@ -#if !NET7_0_OR_GREATER -using System.Threading; -using System.Threading.Tasks; - -// ReSharper disable once CheckNamespace -namespace System.IO -{ - // Helpers to read/write Span/Memory to Stream before netstandard 2.1 - static class StreamExtensions - { - public static void ReadExactly(this Stream stream, Span buffer) - { - var totalRead = 0; - while (totalRead < buffer.Length) - { - var read = stream.Read(buffer.Slice(totalRead)); - if (read is 0) - throw new EndOfStreamException(); - - totalRead += read; - } - } - - public static async ValueTask ReadExactlyAsync(this Stream stream, Memory buffer, CancellationToken cancellationToken = default) - { - var totalRead = 0; - while (totalRead < buffer.Length) - { - var read = await stream.ReadAsync(buffer.Slice(totalRead), cancellationToken).ConfigureAwait(false); - if (read is 0) - throw new EndOfStreamException(); - - totalRead += read; - } - } - } -} -#endif diff --git a/src/Npgsql/Shims/UnreachableException.cs b/src/Npgsql/Shims/UnreachableException.cs deleted file mode 100644 index f75989df13..0000000000 --- a/src/Npgsql/Shims/UnreachableException.cs +++ /dev/null @@ -1,39 +0,0 @@ -#if !NET7_0_OR_GREATER -namespace System.Diagnostics; - -/// -/// Exception thrown when the program executes an instruction that was thought to be unreachable. -/// -sealed class UnreachableException : Exception -{ - /// - /// Initializes a new instance of the class with the default error message. - /// - public UnreachableException() - : base("The program executed an instruction that was thought to be unreachable.") - { - } - - /// - /// Initializes a new instance of the - /// class with a specified error message. - /// - /// The error message that explains the reason for the exception. - public UnreachableException(string? message) - : base(message) - { - } - - /// - /// Initializes a new instance of the - /// class with a specified error message and a reference to the inner exception that is the cause of - /// this exception. - /// - /// The error message that explains the reason for the exception. - /// The exception that is the cause of the current exception. - public UnreachableException(string? message, Exception? innerException) - : base(message, innerException) - { - } -} -#endif diff --git a/src/Shared/CodeAnalysis.cs b/src/Shared/CodeAnalysis.cs deleted file mode 100644 index 091b856c05..0000000000 --- a/src/Shared/CodeAnalysis.cs +++ /dev/null @@ -1,83 +0,0 @@ - -namespace System.Diagnostics.CodeAnalysis -{ -#if !NET7_0_OR_GREATER - /// - /// Indicates that the specified method requires the ability to generate new code at runtime, - /// for example through . - /// - /// - /// This allows tools to understand which methods are unsafe to call when compiling ahead of time. - /// - [AttributeUsage(AttributeTargets.Method | AttributeTargets.Constructor | AttributeTargets.Class, Inherited = false)] - sealed class RequiresDynamicCodeAttribute : Attribute - { - /// - /// Initializes a new instance of the class - /// with the specified message. - /// - /// - /// A message that contains information about the usage of dynamic code. - /// - public RequiresDynamicCodeAttribute(string message) - => Message = message; - - /// - /// Gets a message that contains information about the usage of dynamic code. - /// - public string Message { get; } - - /// - /// Gets or sets an optional URL that contains more information about the method, - /// why it requires dynamic code, and what options a consumer has to deal with it. - /// - public string? Url { get; set; } - } - - [AttributeUsage(AttributeTargets.Constructor, AllowMultiple = false, Inherited = false)] - sealed class SetsRequiredMembersAttribute : Attribute - { - } - [AttributeUsageAttribute(AttributeTargets.Method | AttributeTargets.Property | AttributeTargets.Parameter, AllowMultiple = false, Inherited = false)] - sealed class UnscopedRefAttribute : Attribute - { - /// - /// Initializes a new instance of the class. - /// - public UnscopedRefAttribute() { } - } -#endif -} - -namespace System.Runtime.CompilerServices -{ -#if !NET7_0_OR_GREATER - [AttributeUsage(AttributeTargets.Class | AttributeTargets.Struct | AttributeTargets.Field | AttributeTargets.Property, AllowMultiple = false, Inherited = false)] - sealed class RequiredMemberAttribute : Attribute - { } - - [AttributeUsage(AttributeTargets.All, AllowMultiple = true, Inherited = false)] - sealed class CompilerFeatureRequiredAttribute(string featureName) : Attribute - { - /// - /// The name of the compiler feature. - /// - public string FeatureName { get; } = featureName; - - /// - /// If true, the compiler can choose to allow access to the location where this attribute is applied if it does not understand . - /// - public bool IsOptional { get; set; } - - /// - /// The used for the ref structs C# feature. - /// - public const string RefStructs = nameof(RefStructs); - - /// - /// The used for the required members C# feature. - /// - public const string RequiredMembers = nameof(RequiredMembers); - } -#endif -} diff --git a/test/Npgsql.Tests/DistributedTransactionTests.cs b/test/Npgsql.Tests/DistributedTransactionTests.cs index 5260e3daef..157e5ac112 100644 --- a/test/Npgsql.Tests/DistributedTransactionTests.cs +++ b/test/Npgsql.Tests/DistributedTransactionTests.cs @@ -1,5 +1,3 @@ -#if NET7_0_OR_GREATER - using System; using System.Collections.Concurrent; using System.Collections.Generic; @@ -633,5 +631,3 @@ internal static string CreateTempTable(NpgsqlConnection conn, string columns) #endregion } - -#endif From 2f0bdd10798294fa7114c981ea773763b54356ed Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Wed, 20 Nov 2024 17:33:03 +0200 Subject: [PATCH 503/761] Some leftover cleanup for removin net6.0 (#5949) Part of #5946 --- .../Converters/Primitive/GuidUuidConverter.cs | 26 ------------------- src/Npgsql/Internal/NpgsqlConnector.Auth.cs | 16 +----------- src/Npgsql/Internal/NpgsqlConnector.cs | 3 --- .../NetworkTypeInfoResolverFactory.cs | 3 --- src/Npgsql/MetricsReporter.cs | 7 ----- src/Npgsql/NpgsqlSlimDataSourceBuilder.cs | 2 -- src/Npgsql/PostgresException.cs | 2 -- 7 files changed, 1 insertion(+), 58 deletions(-) diff --git a/src/Npgsql/Internal/Converters/Primitive/GuidUuidConverter.cs b/src/Npgsql/Internal/Converters/Primitive/GuidUuidConverter.cs index a0d19a4fde..18e6b0edc5 100644 --- a/src/Npgsql/Internal/Converters/Primitive/GuidUuidConverter.cs +++ b/src/Npgsql/Internal/Converters/Primitive/GuidUuidConverter.cs @@ -21,30 +21,4 @@ protected override void WriteCore(PgWriter writer, Guid value) value.TryWriteBytes(bytes, bigEndian: true, out _); writer.WriteBytes(bytes); } - -#if !NET8_0_OR_GREATER - // The following table shows .NET GUID vs Postgres UUID (RFC 4122) layouts. - // - // Note that the first fields are converted from/to native endianness (handled by the Read* - // and Write* methods), while the last field is always read/written in big-endian format. - // - // We're reverting endianness on little endian systems to get it into big endian format. - // - // | Bits | Bytes | Name | Endianness (GUID) | Endianness (RFC 4122) | - // | ---- | ----- | ----- | ----------------- | --------------------- | - // | 32 | 4 | Data1 | Native | Big | - // | 16 | 2 | Data2 | Native | Big | - // | 16 | 2 | Data3 | Native | Big | - // | 64 | 8 | Data4 | Big | Big | - [StructLayout(LayoutKind.Explicit)] - struct GuidRaw - { - [FieldOffset(0)] public Guid Value; - [FieldOffset(0)] public int Data1; - [FieldOffset(4)] public short Data2; - [FieldOffset(6)] public short Data3; - [FieldOffset(8)] public long Data4; - public GuidRaw(Guid value) : this() => Value = value; - } -#endif } diff --git a/src/Npgsql/Internal/NpgsqlConnector.Auth.cs b/src/Npgsql/Internal/NpgsqlConnector.Auth.cs index bfb4c2b7f1..8771ce8e33 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.Auth.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.Auth.cs @@ -255,17 +255,7 @@ static byte[] Xor(byte[] buffer1, byte[] buffer2) return buffer1; } - static byte[] HMAC(byte[] key, string data) - { - var dataBytes = Encoding.UTF8.GetBytes(data); -#if NET7_0_OR_GREATER - return HMACSHA256.HashData(key, dataBytes); -#else - using var ih = IncrementalHash.CreateHMAC(HashAlgorithmName.SHA256, key); - ih.AppendData(dataBytes); - return ih.GetHashAndReset(); -#endif - } + static byte[] HMAC(byte[] key, string data) => HMACSHA256.HashData(key, Encoding.UTF8.GetBytes(data)); async Task AuthenticateMD5(string username, byte[] salt, bool async, CancellationToken cancellationToken = default) { @@ -298,11 +288,7 @@ async Task AuthenticateMD5(string username, byte[] salt, bool async, Cancellatio prehashbytes.CopyTo(cryptBuf, 0); sb = new StringBuilder("md5"); -#if NET7_0_OR_GREATER hashResult = MD5.HashData(cryptBuf); -#else - hashResult = md5.ComputeHash(cryptBuf); -#endif foreach (var b in hashResult) sb.Append(b.ToString("x2")); diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index 0abcb9323a..0d6c5e23d8 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -392,10 +392,7 @@ internal NpgsqlConnector(NpgsqlDataSource dataSource, NpgsqlConnection conn) CopyLogger = LoggingConfiguration.CopyLogger; SslClientAuthenticationOptionsCallback = dataSource.SslClientAuthenticationOptionsCallback; - -#if NET7_0_OR_GREATER NegotiateOptionsCallback = dataSource.Configuration.NegotiateOptionsCallback; -#endif State = ConnectorState.Closed; TransactionStatus = TransactionStatus.Idle; diff --git a/src/Npgsql/Internal/ResolverFactories/NetworkTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/NetworkTypeInfoResolverFactory.cs index 6bf69db4a2..f9220cf8a9 100644 --- a/src/Npgsql/Internal/ResolverFactories/NetworkTypeInfoResolverFactory.cs +++ b/src/Npgsql/Internal/ResolverFactories/NetworkTypeInfoResolverFactory.cs @@ -78,10 +78,7 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) // cidr mappings.AddStructArrayType(DataTypeNames.Cidr); - -#if NET8_0_OR_GREATER mappings.AddStructArrayType(DataTypeNames.Cidr); -#endif return mappings; } diff --git a/src/Npgsql/MetricsReporter.cs b/src/Npgsql/MetricsReporter.cs index 3da49ceecc..707193e553 100644 --- a/src/Npgsql/MetricsReporter.cs +++ b/src/Npgsql/MetricsReporter.cs @@ -242,11 +242,4 @@ public void Dispose() Reporters.Remove(this); } } - -#if !NET7_0_OR_GREATER - const long TicksPerMicrosecond = 10; - const long TicksPerMillisecond = TicksPerMicrosecond * 1000; - const long TicksPerSecond = TicksPerMillisecond * 1000; // 10,000,000 - static readonly double StopWatchTickFrequency = (double)TicksPerSecond / Stopwatch.Frequency; -#endif } diff --git a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs index a7d9c455c5..3c7de21fb7 100644 --- a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs @@ -341,7 +341,6 @@ public NpgsqlSlimDataSourceBuilder UsePasswordProvider( return this; } -#if NET7_0_OR_GREATER /// /// When using Kerberos, this is a callback that allows customizing default settings for Kerberos authentication. /// @@ -358,7 +357,6 @@ public NpgsqlSlimDataSourceBuilder UseNegotiateOptionsCallback(Action /// The to populate with data. /// The destination (see ) for this serialization. -#if NET8_0_OR_GREATER [Obsolete("This API supports obsolete formatter-based serialization. It should not be called or extended by application code.")] -#endif public override void GetObjectData(SerializationInfo info, StreamingContext context) { base.GetObjectData(info, context); From d606795781c48f84cc6c8877bb86339a104ea503 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Wed, 20 Nov 2024 18:20:03 +0200 Subject: [PATCH 504/761] Map date/time to DateOnly/TimeOnly by default (#5948) Closes #5328 --- .../Converters/Temporal/DateConverters.cs | 40 +++++----- .../Converters/Temporal/TimeConverters.cs | 12 +-- .../AdoTypeInfoResolverFactory.Multirange.cs | 17 ++-- .../AdoTypeInfoResolverFactory.Range.cs | 11 +-- .../AdoTypeInfoResolverFactory.cs | 13 +-- test/Npgsql.Tests/CommandBuilderTests.cs | 2 +- test/Npgsql.Tests/ExceptionTests.cs | 3 + test/Npgsql.Tests/ReaderTests.cs | 2 +- test/Npgsql.Tests/Types/DateTimeTests.cs | 79 +++++++++---------- test/Npgsql.Tests/Types/MultirangeTests.cs | 4 +- 10 files changed, 92 insertions(+), 91 deletions(-) diff --git a/src/Npgsql/Internal/Converters/Temporal/DateConverters.cs b/src/Npgsql/Internal/Converters/Temporal/DateConverters.cs index 807e2528d2..41e2cb83da 100644 --- a/src/Npgsql/Internal/Converters/Temporal/DateConverters.cs +++ b/src/Npgsql/Internal/Converters/Temporal/DateConverters.cs @@ -4,9 +4,9 @@ // ReSharper disable once CheckNamespace namespace Npgsql.Internal.Converters; -sealed class DateTimeDateConverter(bool dateTimeInfinityConversions) : PgBufferedConverter +sealed class DateOnlyDateConverter(bool dateTimeInfinityConversions) : PgBufferedConverter { - static readonly DateTime BaseValue = new(2000, 1, 1, 0, 0, 0); + static readonly DateOnly BaseValue = new(2000, 1, 1); public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) { @@ -14,42 +14,42 @@ public override bool CanConvert(DataFormat format, out BufferRequirements buffer return format is DataFormat.Binary; } - protected override DateTime ReadCore(PgReader reader) + protected override DateOnly ReadCore(PgReader reader) => reader.ReadInt32() switch { int.MaxValue => dateTimeInfinityConversions - ? DateTime.MaxValue + ? DateOnly.MaxValue : throw new InvalidCastException(NpgsqlStrings.CannotReadInfinityValue), int.MinValue => dateTimeInfinityConversions - ? DateTime.MinValue + ? DateOnly.MinValue : throw new InvalidCastException(NpgsqlStrings.CannotReadInfinityValue), - var value => BaseValue + TimeSpan.FromDays(value) + var value => BaseValue.AddDays(value) }; - protected override void WriteCore(PgWriter writer, DateTime value) + protected override void WriteCore(PgWriter writer, DateOnly value) { if (dateTimeInfinityConversions) { - if (value == DateTime.MaxValue) + if (value == DateOnly.MaxValue) { writer.WriteInt32(int.MaxValue); return; } - if (value == DateTime.MinValue) + if (value == DateOnly.MinValue) { writer.WriteInt32(int.MinValue); return; } } - writer.WriteInt32((value.Date - BaseValue).Days); + writer.WriteInt32(value.DayNumber - BaseValue.DayNumber); } } -sealed class DateOnlyDateConverter(bool dateTimeInfinityConversions) : PgBufferedConverter +sealed class DateTimeDateConverter(bool dateTimeInfinityConversions) : PgBufferedConverter { - static readonly DateOnly BaseValue = new(2000, 1, 1); + static readonly DateTime BaseValue = new(2000, 1, 1, 0, 0, 0); public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) { @@ -57,35 +57,35 @@ public override bool CanConvert(DataFormat format, out BufferRequirements buffer return format is DataFormat.Binary; } - protected override DateOnly ReadCore(PgReader reader) + protected override DateTime ReadCore(PgReader reader) => reader.ReadInt32() switch { int.MaxValue => dateTimeInfinityConversions - ? DateOnly.MaxValue + ? DateTime.MaxValue : throw new InvalidCastException(NpgsqlStrings.CannotReadInfinityValue), int.MinValue => dateTimeInfinityConversions - ? DateOnly.MinValue + ? DateTime.MinValue : throw new InvalidCastException(NpgsqlStrings.CannotReadInfinityValue), - var value => BaseValue.AddDays(value) + var value => BaseValue + TimeSpan.FromDays(value) }; - protected override void WriteCore(PgWriter writer, DateOnly value) + protected override void WriteCore(PgWriter writer, DateTime value) { if (dateTimeInfinityConversions) { - if (value == DateOnly.MaxValue) + if (value == DateTime.MaxValue) { writer.WriteInt32(int.MaxValue); return; } - if (value == DateOnly.MinValue) + if (value == DateTime.MinValue) { writer.WriteInt32(int.MinValue); return; } } - writer.WriteInt32(value.DayNumber - BaseValue.DayNumber); + writer.WriteInt32((value.Date - BaseValue).Days); } } diff --git a/src/Npgsql/Internal/Converters/Temporal/TimeConverters.cs b/src/Npgsql/Internal/Converters/Temporal/TimeConverters.cs index b93a878032..09385712bf 100644 --- a/src/Npgsql/Internal/Converters/Temporal/TimeConverters.cs +++ b/src/Npgsql/Internal/Converters/Temporal/TimeConverters.cs @@ -3,26 +3,26 @@ // ReSharper disable once CheckNamespace namespace Npgsql.Internal.Converters; -sealed class TimeSpanTimeConverter : PgBufferedConverter +sealed class TimeOnlyTimeConverter : PgBufferedConverter { public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) { bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(long)); return format is DataFormat.Binary; } - protected override TimeSpan ReadCore(PgReader reader) => new(reader.ReadInt64() * 10); - protected override void WriteCore(PgWriter writer, TimeSpan value) => writer.WriteInt64(value.Ticks / 10); + protected override TimeOnly ReadCore(PgReader reader) => new(reader.ReadInt64() * 10); + protected override void WriteCore(PgWriter writer, TimeOnly value) => writer.WriteInt64(value.Ticks / 10); } -sealed class TimeOnlyTimeConverter : PgBufferedConverter +sealed class TimeSpanTimeConverter : PgBufferedConverter { public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) { bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(long)); return format is DataFormat.Binary; } - protected override TimeOnly ReadCore(PgReader reader) => new(reader.ReadInt64() * 10); - protected override void WriteCore(PgWriter writer, TimeOnly value) => writer.WriteInt64(value.Ticks / 10); + protected override TimeSpan ReadCore(PgReader reader) => new(reader.ReadInt64() * 10); + protected override void WriteCore(PgWriter writer, TimeSpan value) => writer.WriteInt64(value.Ticks / 10); } sealed class DateTimeOffsetTimeTzConverter : PgBufferedConverter diff --git a/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.Multirange.cs b/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.Multirange.cs index f76ed3a457..3d82ab03f1 100644 --- a/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.Multirange.cs +++ b/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.Multirange.cs @@ -159,24 +159,23 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) CreateListMultirangeConverter(CreateRangeConverter(new Int8Converter(), options), options))); // datemultirange - mappings.AddType[]>(DataTypeNames.DateMultirange, - static (options, mapping, _) => - mapping.CreateInfo(options, CreateArrayMultirangeConverter( - CreateRangeConverter(new DateTimeDateConverter(options.EnableDateTimeInfinityConversions), options), options)), - isDefault: true); - mappings.AddType>>(DataTypeNames.DateMultirange, - static (options, mapping, _) => - mapping.CreateInfo(options, CreateListMultirangeConverter( - CreateRangeConverter(new DateTimeDateConverter(options.EnableDateTimeInfinityConversions), options), options))); mappings.AddType[]>(DataTypeNames.DateMultirange, static (options, mapping, _) => mapping.CreateInfo(options, CreateArrayMultirangeConverter( CreateRangeConverter(new DateOnlyDateConverter(options.EnableDateTimeInfinityConversions), options), options)), isDefault: true); + mappings.AddType[]>(DataTypeNames.DateMultirange, + static (options, mapping, _) => + mapping.CreateInfo(options, CreateArrayMultirangeConverter( + CreateRangeConverter(new DateTimeDateConverter(options.EnableDateTimeInfinityConversions), options), options))); mappings.AddType>>(DataTypeNames.DateMultirange, static (options, mapping, _) => mapping.CreateInfo(options, CreateListMultirangeConverter( CreateRangeConverter(new DateOnlyDateConverter(options.EnableDateTimeInfinityConversions), options), options))); + mappings.AddType>>(DataTypeNames.DateMultirange, + static (options, mapping, _) => + mapping.CreateInfo(options, CreateListMultirangeConverter( + CreateRangeConverter(new DateTimeDateConverter(options.EnableDateTimeInfinityConversions), options), options))); return mappings; } diff --git a/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.Range.cs b/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.Range.cs index 74a9028423..17ba8c3c33 100644 --- a/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.Range.cs +++ b/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.Range.cs @@ -87,15 +87,16 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) static (options, mapping, _) => mapping.CreateInfo(options, CreateRangeConverter(new Int8Converter(), options))); // daterange + mappings.AddStructType>(DataTypeNames.DateRange, + static (options, mapping, _) => + mapping.CreateInfo(options, + CreateRangeConverter(new DateOnlyDateConverter(options.EnableDateTimeInfinityConversions), options)), + isDefault: true); mappings.AddStructType>(DataTypeNames.DateRange, static (options, mapping, _) => mapping.CreateInfo(options, - CreateRangeConverter(new DateTimeDateConverter(options.EnableDateTimeInfinityConversions), options)), - isDefault: true); + CreateRangeConverter(new DateTimeDateConverter(options.EnableDateTimeInfinityConversions), options))); mappings.AddStructType>(DataTypeNames.DateRange, static (options, mapping, _) => mapping.CreateInfo(options, CreateRangeConverter(new Int4Converter(), options))); - mappings.AddStructType>(DataTypeNames.DateRange, - static (options, mapping, _) => - mapping.CreateInfo(options, CreateRangeConverter(new DateOnlyDateConverter(options.EnableDateTimeInfinityConversions), options))); return mappings; } diff --git a/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.cs index b2a39db34b..db350e2fc9 100644 --- a/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.cs +++ b/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.cs @@ -230,14 +230,15 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) static (options, mapping, _) => mapping.CreateInfo(options, new Int8Converter())); // Date + mappings.AddStructType(DataTypeNames.Date, + static (options, mapping, _) => + mapping.CreateInfo(options, new DateOnlyDateConverter(options.EnableDateTimeInfinityConversions)), isDefault: true); mappings.AddStructType(DataTypeNames.Date, static (options, mapping, _) => mapping.CreateInfo(options, new DateTimeDateConverter(options.EnableDateTimeInfinityConversions)), MatchRequirement.DataTypeName); mappings.AddStructType(DataTypeNames.Date, static (options, mapping, _) => mapping.CreateInfo(options, new Int4Converter())); - mappings.AddStructType(DataTypeNames.Date, - static (options, mapping, _) => mapping.CreateInfo(options, new DateOnlyDateConverter(options.EnableDateTimeInfinityConversions))); // Interval mappings.AddStructType(DataTypeNames.Interval, @@ -246,12 +247,12 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) static (options, mapping, _) => mapping.CreateInfo(options, new NpgsqlIntervalConverter())); // Time + mappings.AddStructType(DataTypeNames.Time, + static (options, mapping, _) => mapping.CreateInfo(options, new TimeOnlyTimeConverter()), isDefault: true); mappings.AddStructType(DataTypeNames.Time, - static (options, mapping, _) => mapping.CreateInfo(options, new TimeSpanTimeConverter()), isDefault: true); + static (options, mapping, _) => mapping.CreateInfo(options, new TimeSpanTimeConverter())); mappings.AddStructType(DataTypeNames.Time, static (options, mapping, _) => mapping.CreateInfo(options, new Int8Converter())); - mappings.AddStructType(DataTypeNames.Time, - static (options, mapping, _) => mapping.CreateInfo(options, new TimeOnlyTimeConverter())); // TimeTz mappings.AddStructType(DataTypeNames.TimeTz, @@ -446,9 +447,9 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) mappings.AddStructArrayType(DataTypeNames.TimestampTz); // Date + mappings.AddStructArrayType(DataTypeNames.Date); mappings.AddStructArrayType(DataTypeNames.Date); mappings.AddStructArrayType(DataTypeNames.Date); - mappings.AddStructArrayType(DataTypeNames.Date); // Interval mappings.AddStructArrayType(DataTypeNames.Interval); diff --git a/test/Npgsql.Tests/CommandBuilderTests.cs b/test/Npgsql.Tests/CommandBuilderTests.cs index e917b7f6b3..a9fe980c5b 100644 --- a/test/Npgsql.Tests/CommandBuilderTests.cs +++ b/test/Npgsql.Tests/CommandBuilderTests.cs @@ -341,7 +341,7 @@ PRIMARY KEY (Cod) Assert.That(row[0], Is.EqualTo("key1")); Assert.That(row[1], Is.EqualTo("description")); - Assert.That(row[2], Is.EqualTo(new DateTime(2018, 7, 3))); + Assert.That(row[2], Is.EqualTo(new DateOnly(2018, 7, 3))); Assert.That(row[3], Is.EqualTo(new DateTime(2018, 7, 3, 7, 2, 0))); Assert.That(row[4], Is.EqualTo(123)); Assert.That(row[5], Is.EqualTo(123.4)); diff --git a/test/Npgsql.Tests/ExceptionTests.cs b/test/Npgsql.Tests/ExceptionTests.cs index ac87ef2b0e..21d83ff9fb 100644 --- a/test/Npgsql.Tests/ExceptionTests.cs +++ b/test/Npgsql.Tests/ExceptionTests.cs @@ -250,6 +250,8 @@ PostgresException CreateWithSqlState(string sqlState) #pragma warning disable SYSLIB0011 #pragma warning disable SYSLIB0050 + +#if !NET9_0_OR_GREATER // BinaryFormatter serialization and deserialization have been removed. See https://aka.ms/binaryformatter for more information. [Test] public void Serialization() { @@ -283,6 +285,7 @@ public void Serialization() Assert.That(expected.Line, Is.EqualTo(actual.Line)); Assert.That(expected.Routine, Is.EqualTo(actual.Routine)); } +#endif SerializationInfo CreateSerializationInfo() => new(typeof(PostgresException), new FormatterConverter()); #pragma warning restore SYSLIB0011 diff --git a/test/Npgsql.Tests/ReaderTests.cs b/test/Npgsql.Tests/ReaderTests.cs index 7ee1aa6e11..b46342153f 100644 --- a/test/Npgsql.Tests/ReaderTests.cs +++ b/test/Npgsql.Tests/ReaderTests.cs @@ -475,7 +475,7 @@ public async Task GetValues() dr.Read(); var values = new object[4]; Assert.That(dr.GetValues(values), Is.EqualTo(3)); - Assert.That(values, Is.EqualTo(new object?[] { "hello", 1, new DateTime(2014, 1, 1), null })); + Assert.That(values, Is.EqualTo(new object?[] { "hello", 1, new DateOnly(2014, 1, 1), null })); } using (var dr = await command.ExecuteReaderAsync(Behavior)) { diff --git a/test/Npgsql.Tests/Types/DateTimeTests.cs b/test/Npgsql.Tests/Types/DateTimeTests.cs index 815514031a..078693bf96 100644 --- a/test/Npgsql.Tests/Types/DateTimeTests.cs +++ b/test/Npgsql.Tests/Types/DateTimeTests.cs @@ -13,9 +13,13 @@ public class DateTimeTests : TestBase { #region Date + [Test] + public Task Date_as_DateOnly() + => AssertType(new DateOnly(2020, 10, 1), "2020-10-01", "date", NpgsqlDbType.Date, DbType.Date); + [Test] public Task Date_as_DateTime() - => AssertType(new DateTime(2020, 10, 1), "2020-10-01", "date", NpgsqlDbType.Date, DbType.Date, isDefaultForWriting: false); + => AssertType(new DateTime(2020, 10, 1), "2020-10-01", "date", NpgsqlDbType.Date, DbType.Date, isDefault: false); [Test] public Task Date_as_DateTime_with_date_and_time_before_2000() @@ -26,37 +30,6 @@ public Task Date_as_DateTime_with_date_and_time_before_2000() public Task Date_as_int() => AssertType(7579, "2020-10-01", "date", NpgsqlDbType.Date, DbType.Date, isDefault: false); - [Test] - public Task Daterange_as_NpgsqlRange_of_DateTime() - => AssertType( - new NpgsqlRange(new(2002, 3, 4), true, new(2002, 3, 6), false), - "[2002-03-04,2002-03-06)", - "daterange", - NpgsqlDbType.DateRange, - isDefaultForWriting: false); - - [Test] - public async Task Datemultirange_as_array_of_NpgsqlRange_of_DateTime() - { - await using var conn = await OpenConnectionAsync(); - MinimumPgVersion(conn, "14.0", "Multirange types were introduced in PostgreSQL 14"); - - await AssertType( - new[] - { - new NpgsqlRange(new(2002, 3, 4), true, new(2002, 3, 6), false), - new NpgsqlRange(new(2002, 3, 8), true, new(2002, 3, 11), false) - }, - "{[2002-03-04,2002-03-06),[2002-03-08,2002-03-11)}", - "datemultirange", - NpgsqlDbType.DateMultirange, - isDefaultForWriting: false); - } - - [Test] - public Task Date_as_DateOnly() - => AssertType(new DateOnly(2020, 10, 1), "2020-10-01", "date", NpgsqlDbType.Date, DbType.Date, isDefaultForReading: false); - [Test] public Task Daterange_as_NpgsqlRange_of_DateOnly() => AssertType( @@ -64,7 +37,6 @@ public Task Daterange_as_NpgsqlRange_of_DateOnly() "[2002-03-04,2002-03-06)", "daterange", NpgsqlDbType.DateRange, - isDefaultForReading: false, skipArrayCheck: true); // NpgsqlRange[] is mapped to multirange by default, not array; test separately [Test] @@ -78,6 +50,15 @@ public Task Daterange_array_as_NpgsqlRange_of_DateOnly_array() """{"[2002-03-04,2002-03-06)","[2002-03-08,2002-03-09)"}""", "daterange[]", NpgsqlDbType.DateRange | NpgsqlDbType.Array, + isDefaultForWriting: false); + + [Test] + public Task Daterange_as_NpgsqlRange_of_DateTime() + => AssertType( + new NpgsqlRange(new(2002, 3, 4), true, new(2002, 3, 6), false), + "[2002-03-04,2002-03-06)", + "daterange", + NpgsqlDbType.DateRange, isDefault: false); [Test] @@ -94,8 +75,25 @@ await AssertType( }, "{[2002-03-04,2002-03-06),[2002-03-08,2002-03-11)}", "datemultirange", + NpgsqlDbType.DateMultirange); + } + + [Test] + public async Task Datemultirange_as_array_of_NpgsqlRange_of_DateTime() + { + await using var conn = await OpenConnectionAsync(); + MinimumPgVersion(conn, "14.0", "Multirange types were introduced in PostgreSQL 14"); + + await AssertType( + new[] + { + new NpgsqlRange(new(2002, 3, 4), true, new(2002, 3, 6), false), + new NpgsqlRange(new(2002, 3, 8), true, new(2002, 3, 11), false) + }, + "{[2002-03-04,2002-03-06),[2002-03-08,2002-03-11)}", + "datemultirange", NpgsqlDbType.DateMultirange, - isDefaultForReading: false); + isDefault: false); } #endregion @@ -103,24 +101,23 @@ await AssertType( #region Time [Test] - public Task Time_as_TimeSpan() + public Task Time_as_TimeOnly() => AssertType( - new TimeSpan(0, 10, 45, 34, 500), + new TimeOnly(10, 45, 34, 500), "10:45:34.5", "time without time zone", NpgsqlDbType.Time, - DbType.Time, - isDefaultForWriting: false); + DbType.Time); [Test] - public Task Time_as_TimeOnly() + public Task Time_as_TimeSpan() => AssertType( - new TimeOnly(10, 45, 34, 500), + new TimeSpan(0, 10, 45, 34, 500), "10:45:34.5", "time without time zone", NpgsqlDbType.Time, DbType.Time, - isDefaultForReading: false); + isDefault: false); #endregion diff --git a/test/Npgsql.Tests/Types/MultirangeTests.cs b/test/Npgsql.Tests/Types/MultirangeTests.cs index cf9cdcd8a9..86bebb1b67 100644 --- a/test/Npgsql.Tests/Types/MultirangeTests.cs +++ b/test/Npgsql.Tests/Types/MultirangeTests.cs @@ -47,12 +47,12 @@ public class MultirangeTests : TestBase // daterange new TestCaseData( - new NpgsqlRange[] + new NpgsqlRange[] { new(new(2020, 1, 1), true, false, new(2020, 1, 5), false, false), new(new(2020, 1, 10), true, false, default, false, true) }, - "{[2020-01-01,2020-01-05),[2020-01-10,)}", "datemultirange", NpgsqlDbType.DateMultirange, true, false, default(NpgsqlRange)) + "{[2020-01-01,2020-01-05),[2020-01-10,)}", "datemultirange", NpgsqlDbType.DateMultirange, true, false, default(NpgsqlRange)) .SetName("DateTime DateMultirange"), // tsmultirange From a1f1022a7103e069c48c3450e45d19a642d6995c Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Wed, 20 Nov 2024 18:28:27 +0200 Subject: [PATCH 505/761] Make the cidr<->IPNetwork mapping the default (#5950) Closes #5891 --- .../Converters/Networking/NpgsqlCidrConverter.cs | 2 ++ .../NetworkTypeInfoResolverFactory.cs | 14 +++++++++----- src/Npgsql/NpgsqlTypes/NpgsqlTypes.cs | 1 + test/Npgsql.Tests/Types/NetworkTypeTests.cs | 14 +++++++------- 4 files changed, 19 insertions(+), 12 deletions(-) diff --git a/src/Npgsql/Internal/Converters/Networking/NpgsqlCidrConverter.cs b/src/Npgsql/Internal/Converters/Networking/NpgsqlCidrConverter.cs index c3cd43c227..451fab4959 100644 --- a/src/Npgsql/Internal/Converters/Networking/NpgsqlCidrConverter.cs +++ b/src/Npgsql/Internal/Converters/Networking/NpgsqlCidrConverter.cs @@ -3,6 +3,7 @@ // ReSharper disable once CheckNamespace namespace Npgsql.Internal.Converters; +#pragma warning disable CS0618 // NpgsqlCidr is obsolete sealed class NpgsqlCidrConverter : PgBufferedConverter { public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) @@ -20,3 +21,4 @@ protected override NpgsqlCidr ReadCore(PgReader reader) protected override void WriteCore(PgWriter writer, NpgsqlCidr value) => NpgsqlInetConverter.WriteImpl(writer, (value.Address, value.Netmask), isCidr: true); } +#pragma warning restore CS0618 diff --git a/src/Npgsql/Internal/ResolverFactories/NetworkTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/NetworkTypeInfoResolverFactory.cs index f9220cf8a9..5eb072c1c9 100644 --- a/src/Npgsql/Internal/ResolverFactories/NetworkTypeInfoResolverFactory.cs +++ b/src/Npgsql/Internal/ResolverFactories/NetworkTypeInfoResolverFactory.cs @@ -48,11 +48,13 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) static (options, mapping, _) => mapping.CreateInfo(options, new NpgsqlInetConverter())); // cidr - mappings.AddStructType(DataTypeNames.Cidr, - static (options, mapping, _) => mapping.CreateInfo(options, new NpgsqlCidrConverter()), isDefault: true); - mappings.AddStructType(DataTypeNames.Cidr, - static (options, mapping, _) => mapping.CreateInfo(options, new IPNetworkConverter())); + static (options, mapping, _) => mapping.CreateInfo(options, new IPNetworkConverter()), isDefault: true); + +#pragma warning disable CS0618 // NpgsqlCidr is obsolete + mappings.AddStructType(DataTypeNames.Cidr, + static (options, mapping, _) => mapping.CreateInfo(options, new NpgsqlCidrConverter())); +#pragma warning restore CS0618 return mappings; } @@ -77,8 +79,10 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) mappings.AddStructArrayType(DataTypeNames.Inet); // cidr - mappings.AddStructArrayType(DataTypeNames.Cidr); mappings.AddStructArrayType(DataTypeNames.Cidr); +#pragma warning disable CS0618 // NpgsqlCidr is obsolete + mappings.AddStructArrayType(DataTypeNames.Cidr); +#pragma warning restore CS0618 return mappings; } diff --git a/src/Npgsql/NpgsqlTypes/NpgsqlTypes.cs b/src/Npgsql/NpgsqlTypes/NpgsqlTypes.cs index 493be99dea..7d5eedf7af 100644 --- a/src/Npgsql/NpgsqlTypes/NpgsqlTypes.cs +++ b/src/Npgsql/NpgsqlTypes/NpgsqlTypes.cs @@ -488,6 +488,7 @@ static void CheckAddressFamily(IPAddress address) /// /// https://www.postgresql.org/docs/current/static/datatype-net-types.html /// +[Obsolete("Use .NET IPNetwork instead of NpgsqlCidr to map to PostgreSQL cidr")] public readonly record struct NpgsqlCidr { public IPAddress Address { get; } diff --git a/test/Npgsql.Tests/Types/NetworkTypeTests.cs b/test/Npgsql.Tests/Types/NetworkTypeTests.cs index e24446f136..b3f0b221e6 100644 --- a/test/Npgsql.Tests/Types/NetworkTypeTests.cs +++ b/test/Npgsql.Tests/Types/NetworkTypeTests.cs @@ -53,23 +53,23 @@ public Task IPAddress_Any() => AssertTypeWrite(IPAddress.Any, "0.0.0.0/32", "inet", NpgsqlDbType.Inet, skipArrayCheck: true); [Test] - public Task Cidr() + public Task IPNetwork_as_cidr() => AssertType( - new NpgsqlCidr(IPAddress.Parse("192.168.1.0"), netmask: 24), + new IPNetwork(IPAddress.Parse("192.168.1.0"), 24), "192.168.1.0/24", "cidr", - NpgsqlDbType.Cidr, - isDefaultForWriting: false); + NpgsqlDbType.Cidr); +#pragma warning disable CS0618 // NpgsqlCidr is obsolete [Test] - public Task IPNetwork_as_cidr() + public Task NpgsqlCidr_as_Cidr() => AssertType( - new IPNetwork(IPAddress.Parse("192.168.1.0"), 24), + new NpgsqlCidr(IPAddress.Parse("192.168.1.0"), netmask: 24), "192.168.1.0/24", "cidr", NpgsqlDbType.Cidr, - isDefaultForWriting: false, isDefaultForReading: false); +#pragma warning restore CS0618 [Test] public Task Inet_v4_as_NpgsqlInet() From 2b013ed5216c49ed4257c6bee5a994885a1bd66f Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Wed, 20 Nov 2024 20:29:31 +0300 Subject: [PATCH 506/761] Fix connecting with VerifyCA and VerifyFull (#5944) Fixes #5942 --- .build/ca.crt | 31 ++++++++++ .build/server.crt | 46 +++++++++------ .build/server.key | 79 +++++++++++++++++--------- .github/workflows/build.yml | 20 ++++--- src/Npgsql/Internal/NpgsqlConnector.cs | 2 +- test/Npgsql.Tests/Npgsql.Tests.csproj | 5 ++ test/Npgsql.Tests/SecurityTests.cs | 41 +++++++++++++ 7 files changed, 169 insertions(+), 55 deletions(-) create mode 100644 .build/ca.crt diff --git a/.build/ca.crt b/.build/ca.crt new file mode 100644 index 0000000000..e5a4081a02 --- /dev/null +++ b/.build/ca.crt @@ -0,0 +1,31 @@ +-----BEGIN CERTIFICATE----- +MIIFazCCA1OgAwIBAgIUB/AJgMX+fmeXvBOUWW7WR+XKZ6AwDQYJKoZIhvcNAQEL +BQAwRTELMAkGA1UEBhMCVVMxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM +GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDAeFw0yNDExMjAwNDExMjFaFw0zNDEx +MTgwNDExMjFaMEUxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApTb21lLVN0YXRlMSEw +HwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQwggIiMA0GCSqGSIb3DQEB +AQUAA4ICDwAwggIKAoICAQC8A5+//15VxRCxpHzl7srYx6uWQi1/7q5VFWFZab+7 +82PLr3pV/zMjSMEPBZdq46NWWNnXIFoFHd5MFnN4fNIQ1GIEsTF0kYy142qllnp3 +vLBVBu24n4dsmI8ygl8+1PuGwk45Mz+vOL+RjNIo6ra9yJzYFnZOGCqlt0kWkCau +HR/43ms0vhKq8FaDXPdVXn9Z3EZScxRKQwlfAKOUxLQ8dVkzvRuAm0PF74afRYfg +xiGIX8msFYKzGnWb7ezcag125iEqg+xSplo6QK6vaNURlKwYQ8ZRKz1Hk1oIB4t1 +iEJL2d4nzgTkh/jlVjtTXo6cw96WT9NBT0Rg6JR4PJySlhY+ZwLi6VAxQZ8GyJo4 +YTvx1K3vhXeokKjFTxUtZdx1blX5vCBXv9LCxnjAsBCTRzE425x6UP1gp721gHGW +sqopvkUgN9vk8oigyWLeGvwsBwFTFnY672iCYXhFHs2oKTIX8yo+A2xRr8tewb9C +IsqJSC6JkLs5zbVwKdgVx1H21Uwvi7XjKir9pPp/ks12r9GNMmWc265PK1kCqCHa +oHfgzYMVVFQ3CfYbeeA8/aVf770AfC/1v+VtMse8DEqyep5q0OzOXtWIQlahYiyA +FLTzCBqcHUuRZtS4gEhOk6/Pk1HP3faUC1xGgxO5c/pd7SVMfs+Z58WJbYGFcAlC ++QIDAQABo1MwUTAdBgNVHQ4EFgQUBeKaoc7AMURxdajJ+CF8YrUsdFgwHwYDVR0j +BBgwFoAUBeKaoc7AMURxdajJ+CF8YrUsdFgwDwYDVR0TAQH/BAUwAwEB/zANBgkq +hkiG9w0BAQsFAAOCAgEAGGpFZm0c36Eh5E8QiAg8+8U22Ao+YoF6nJnIlc/ri1pt +J5zXRM2DbCCR9uN5yckmCNIJ4PZO49QBflYGPAkF+Vd0RJYoA4k1Cq+eYcJBWtXl +ESJxeg1QAKAZ4XSasOIijebWlPIZxPGOy8HquKNMDQIm8a7g5zSE4UNJPVY3y9on +zJT7ZhntIwuM8IP6h6gotJfxBHJRWNe/g0zVITQ7vHnxSpobLbuKfY21GLl6clgI +WsePKWWo/mZYquqZz72KBUJ66YX4X7nJCvZs1sLgMnXh87n9hsxAdFlRgLuQ4ztp +mwQbDZ90mJFQLprI4rfyamuloIgOcn05yXfklRAI2P8L2/yf5xNAy+ii0OHRiMVv +jnYUet8Bca1orh7OQ9ol1XTBoCI1gknrdG5Y2IQvQhWLiS5AjIwwQYwjkSFXELtF +X8v9Fv758RA9CFlQDnsp9awNjdLss/TdH6+dNYQfTNGigIPM6oCk5nrcQqF/533W +z2WM0LNHAiQlEn0X38D0wCuRwIVzPG/AFyfsf50vSlH81/uzpyR5q3SJA8OKiCV1 +/OiW7Jv7pOtwqFjxR+m31TqaPM6PLrdasP/CNKSvGuJmtaHK4Wkc3YU9dbtQffzB +MUFwhi233gvE+nSEixse2KlzsrBVZIdz16bZXaAd20JQdq9Hceku2uVgfN1fycI= +-----END CERTIFICATE----- diff --git a/.build/server.crt b/.build/server.crt index d161ab2652..5a2bfc7b01 100644 --- a/.build/server.crt +++ b/.build/server.crt @@ -1,20 +1,30 @@ -----BEGIN CERTIFICATE----- -MIIDUjCCAjoCFAwuj6RwuZSjCGYHja8m9tbr3nFeMA0GCSqGSIb3DQEBCwUAMGgx -EzARBgNVBAoTCk15IENvbXBhbnkxCzAJBgNVBAsTAklUMRAwDgYDVQQHEwdNeSBU -b3duMQ8wDQYDVQQIEwZNb3Njb3cxCzAJBgNVBAYTAlJVMRQwEgYDVQQDEwtsb2Nh -bGhvc3RDQTAeFw0yMTA0MTAxMzA0MDBaFw0yMjA0MTAxMzA0MDBaMGMxEzARBgNV -BAoTCk15IENvbXBhbnkxCzAJBgNVBAsTAklUMRAwDgYDVQQHEwdNeSBUb3duMQ8w -DQYDVQQIEwZNb3Njb3cxCzAJBgNVBAYTAlJVMQ8wDQYDVQQDEwZzZXJ2ZXIwggEi -MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC8LoQbo2DFwC17gZwJ8xrPKHGX -UKxoo5UcyZ3/2zZ006TYkswssejKksuiICTMI89OD8n55pNTZkXPUH7oR2oIyxTY -SiWPiNzbEh0FOxH9Kh5gmajqM/4X44OaprmyQ56m4Y2LZO2nZ9hHoe+ZRoan3+pa -g8weOM/n/wYuXZtdElOxNsB8pg09K4gevHVaLaSBCEeQfHev51vClFdN3+orBi/r -hnQF3vdw7oMT1JSH75Ray51wRaypLIslAc2DcPFTCQJMmXXMTcAcxmjAVUGrfY+d -sSCdXnOZtd7yk+0X0bVGKLBkCTOP7QpmfOVu9bOhscDiK5EoAaDKqdHSMUfhAgMB -AAEwDQYJKoZIhvcNAQELBQADggEBAKCo2Y1uKbudA8JpV6yo35tc7Z6n03++BAdq -egUBKOiE4ze7xQ7lmlt572ptqXlU/8JuPWa2Qb/wGksR0HpVPTAeU3pbXz1dcCXC -A9wCtSxapjyCYbkDrDl2FQuK0OfJi0q71JZU66D58Qu0l45nWON30to9dSiw3zPw -Rdk7X86GHYIBHKsj7mjiy1v8jH1sXeWvThOmU6+rv8UY8VuJiu4MQDdYa0Y5KFh/ -OL3tVsi7zoNu2OXY1cTKuUpKMQPbO+WSdelYromYK2OAXaNqnC27GegPqvCFWJ2I -9NZuXYj3X+j0ydZSKVjDgCda8H68olBnO0zh44XirCBef7uTVLw= +MIIFJTCCAw0CFAKjNOhsMTYUuQngy2k291XuKOGGMA0GCSqGSIb3DQEBCwUAMEUx +CzAJBgNVBAYTAlVTMRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRl +cm5ldCBXaWRnaXRzIFB0eSBMdGQwHhcNMjQxMTIwMDQxOTE0WhcNMjkxMTE5MDQx +OTE0WjBZMQswCQYDVQQGEwJVUzETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UE +CgwYSW50ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMRIwEAYDVQQDDAlsb2NhbGhvc3Qw +ggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDlGT9vXb93yoM1YT0GAxJI +B6/2ExUrdprd049oMVZa4Km0nqwN/xjVvQRIWozmbpvps0mCkFM1ZyL1iqZFwiJG +WcQvvIffFM1qKRMOSTLNPCbM9mfvRKsCU9gjgatdhy8xUZhz7uFGMGADnZdlNMYW +GgzMVZo0EyW7Z2QJ+ZCl8wW5IT4iswZWrJsNZU/g7HaNBrXiidDihkmQ8Kt32R0U +nqJeXMHwkQLxddmcGdDmVCKsAEUu3NcvPeAlSJsNHfGDRsf9fImRqZCsgwI8dJtA +ke/luMTttQ34aADFTmTbVk4ngVhCxgBkJ6FUDFJcp3t3nFssiisNon9k5FwtJ3hl +e/QGM9IRdBvGVcOnZZuXXK2lLtakj5UWUik2xWA0hjX+DsFo7TPwKgZy4zmWCRob +W1e1NX52bqYFWZUKYLqbizllOd98o3yed58PhbF1/IuVEuOoiKu7rNdNgzr8vgRP +pWHQNXp3maCcZq2kWybADU2LQNUKAZLSw3nClcX8QVRAfvf8IyDZ/280EYRGu99V +qLqDPLa1+3CNAb93J1ONvVjKgJwQQWy4dYFLHTYdBzXV5SOpH8YHL/1IHs9W5k28 +BdwbeMtJnOaV8rqiA6Xd4Xem111AMAigHExxG3kpSnAq6jiOX0+2V++f7qAunuC6 +B/oJATXLCbBQILr0ARtKuQIDAQABMA0GCSqGSIb3DQEBCwUAA4ICAQCn6R2fvxfs +R7nN9g6bVNJXkJrDJ+O1suVD0tkZzxZAIAFdhKnSFocJph1bC6bSZEQkhG+0WtfU +DU7m19VDHpZWZ+8LygIVikIkvj47v1/yl7TgwkhNAKXXxl6bF/AEevMUZoxT3r8S +UBFURp8QduSQ7sbDRB9qR1EWPjAXgnedzLSGkt5E6VKuVRwsTjv7QUTV8RCbOl9b +YHtTX3dtvr3PeAB5M3B6qrbpniqJfPxUt658UKrDGFr1MuZZ8ONYpdiGH8uGXZhs +9BBjp0g0xWha9LYDYRpqzlC1hqV0J/9jz9QdS9HHPsqa8PvB/YwaDGQm/RSRMUbU +x0wip0me45WU5pLD1djEGQBlxCGgQXIJsebzipdUsayA4MgY3s2lBj2qsPOqyNoP +dFohMm2+Ypi8UAjEbeGY4XsCODLeCvPx24HyjJUORm9uuPCunSBhtgiEBTJrNwHL +F7T1+/g9gVSwCsz4MqceO7IooJ2omSpwk7xrzocccFb1HGR/tE9GxRLNHiyTfx9s +FN9SNOih5DCcOFOiw0vF1qKHk6CAJ0UCBzVWl3YO9OgnFX4FbRYHd3PduWR+fSkd +icBs2AiOKPbOU8yXR8CE6uZiDoN6A27KOE07adZEWBMwd4us7uBHGgnqqYuwPI3d +nqC8srMQ07fw8HyXn7ojPxXyCk+2d6zVgA== -----END CERTIFICATE----- diff --git a/.build/server.key b/.build/server.key index b6dd15913f..f2a7e607b2 100644 --- a/.build/server.key +++ b/.build/server.key @@ -1,27 +1,52 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEowIBAAKCAQEAvC6EG6NgxcAte4GcCfMazyhxl1CsaKOVHMmd/9s2dNOk2JLM -LLHoypLLoiAkzCPPTg/J+eaTU2ZFz1B+6EdqCMsU2Eolj4jc2xIdBTsR/SoeYJmo -6jP+F+ODmqa5skOepuGNi2Ttp2fYR6HvmUaGp9/qWoPMHjjP5/8GLl2bXRJTsTbA -fKYNPSuIHrx1Wi2kgQhHkHx3r+dbwpRXTd/qKwYv64Z0Bd73cO6DE9SUh++UWsud -cEWsqSyLJQHNg3DxUwkCTJl1zE3AHMZowFVBq32PnbEgnV5zmbXe8pPtF9G1Riiw -ZAkzj+0KZnzlbvWzobHA4iuRKAGgyqnR0jFH4QIDAQABAoIBADnMS7U1dAao5Q9X -GrcPnP9dm63vEFU/URA7eLTZ/prZWntOczmTFz4I4lSUbNjqcsS2IsIHqN5nvi9T -uPbc4Ft9DJT2CR1R2wvKP3GY2AibBCOFbpUojPWHYqeAZ+6xyCvXgSL8R+YwBgTS -XwYD3F35b0CH1Iy/xFOsR5i8FXj7He8lOBA76fPrH64DEBTB2zUGztu4qpfv57v5 -sfTISi2ZOqPpXc+8Fw0RPeVWQgSRUh7U3lzL8bNBod6lYcjkhF5Yqet4MdHSyWMT -aKdZ2GRHHdWjpyx6J0cD/bjjaTSDqTD8r265mPzY6bq4t6UQMq4KeDnbeiextDf4 -ELT90YUCgYEA6insCSDJddhFZ51guPPyYE9GL8QQfnzLvFOA4qWsi0u9SAbJ9aS0 -vABaEuot0PyYPwMYq7st07z3DSKno4tisPJ2X7v2nEWxv8MjgczWpltPTPaEdmZE -WGIwG3pyh5wJk1b3VpBJB5jkjtJfGmUJaezU10bzm4QhPiEawemCjucCgYEAzbri -/6EZPbJJa9hGtkJEEVLwbQ2U/CE7mZXL+AcPlS3qMSwyz/1OArPxdTRR4S3sYRRO -fsRDBL8LED/kKUDWNni/zkzmFf/hVkmGd9zc6eif4Zr1gmtHlsHQdaMGxsomzxGL -qydBqDN+4TMmHmUmp2jR/0LIF5UMlNoCvHcxgfcCgYEAnOBNE6h1j4++n7Yd0IsO -PFufx+xwqGzvCVJgLHeV6xRo0NJLh1g7BSCvN7DP1Q0E6mImqxaRkyMr2A75hGWj -TqyBhY2ln/hJJxGSvij/PSA7NnKJN9E3xIazeBVGmXd+Ksm+lq2/X2mc5domgMZj -0iUqSrdsCSoyIy+Gf5bzMs0CgYBcquG044vLDpOj0DeJwS+H3iQN+yAwsYd3FtJZ -VlTejV//5ji9Fwwci5EnifmXxGfFErCIyT6m1KbXGvBa5KmYv6sl8d1x62BEzbmU -JBgeBHp/1JzhshD9BzAuzNAwmr4AZ5bR8UzRxuBP8AorhsRyg/STVjFq7ehM5CZ3 -Xfke4QKBgHCPo3R/oi/E2E7OIM/ELlDpvPQTMrV+rYlMFsy3JRvataIqEGnVbhOR -4dQHEM3u2bJxN79wUYYmZuymVB78wKxTn6hGWcGoM6Y8mrJjVv9D8V0Gc0sWw5pF -KZxuCgzjaN2T7i1LsXEV3gaQrKItToEpGPzSI23egFaG6g5SFqBt ------END RSA PRIVATE KEY----- +-----BEGIN PRIVATE KEY----- +MIIJQQIBADANBgkqhkiG9w0BAQEFAASCCSswggknAgEAAoICAQDlGT9vXb93yoM1 +YT0GAxJIB6/2ExUrdprd049oMVZa4Km0nqwN/xjVvQRIWozmbpvps0mCkFM1ZyL1 +iqZFwiJGWcQvvIffFM1qKRMOSTLNPCbM9mfvRKsCU9gjgatdhy8xUZhz7uFGMGAD +nZdlNMYWGgzMVZo0EyW7Z2QJ+ZCl8wW5IT4iswZWrJsNZU/g7HaNBrXiidDihkmQ +8Kt32R0UnqJeXMHwkQLxddmcGdDmVCKsAEUu3NcvPeAlSJsNHfGDRsf9fImRqZCs +gwI8dJtAke/luMTttQ34aADFTmTbVk4ngVhCxgBkJ6FUDFJcp3t3nFssiisNon9k +5FwtJ3hle/QGM9IRdBvGVcOnZZuXXK2lLtakj5UWUik2xWA0hjX+DsFo7TPwKgZy +4zmWCRobW1e1NX52bqYFWZUKYLqbizllOd98o3yed58PhbF1/IuVEuOoiKu7rNdN +gzr8vgRPpWHQNXp3maCcZq2kWybADU2LQNUKAZLSw3nClcX8QVRAfvf8IyDZ/280 +EYRGu99VqLqDPLa1+3CNAb93J1ONvVjKgJwQQWy4dYFLHTYdBzXV5SOpH8YHL/1I +Hs9W5k28BdwbeMtJnOaV8rqiA6Xd4Xem111AMAigHExxG3kpSnAq6jiOX0+2V++f +7qAunuC6B/oJATXLCbBQILr0ARtKuQIDAQABAoICAAP97y6VPnPLjgLVJxKbfssa +afz0IxG+9ZH11xrpUl6itjpNBUte8LN97jaF8DLhf9FJtZ2mWHJtODBfzw4wnldf +X/O2Y1MZbvHeXA3LHznXX9ROJ9krg/2DCsu/MIZgh5hvQLEmdK6Iw1q7LH5Pz6YA +Pea/YbPUfWGsVC0rUaBFB/C/oEnk/v0g8VIbFZIvAWrRw6oT0JWESJrGr5b9RYxm +Ljo0Mt0dyorjP/YAUI6u4R+VOp9g+Dvpv7909vfg/j2u5k20e/lgI1xdXqGnvrIx ++/4V/KwPeob9TIqJ/bTOGaFtF5j3dirImP8Yq6rsvSuqodkSSELeAor2XEsDumby +PqJY1MIO9DuZSdqf+Cofgzbd6mpeMAwueb+hfBw8AIMG3M9Xj1uDuU+tjsVA79Er +H9acPxLukGjYP5SY2Mo8hLFLLurpjtcDpYdOP2Wh7PBDwHR8anmPQru2rZXT80NY +j3fXNqnTTFbHuntmZ2qWJovmOuKocU5GEm/QCW/f6miqR9Hzc2vbWaIoEO54vcF6 +eS4iLEkAOfmakz3Sno2AXS1jJI6+2v1899cBINvgpATCMkmXnwFDwr9gNYujwlpF +Yl3QM8Vh9dnVt04oyum5x4sz/mTKj5e9O988iqlOkgID4HBVpy/dwYHsHE+XgDDY +yiFetJ/n0+45QHhyvSwBAoIBAQDnrPz2xCbR03KQwZN2DnZClLVFkZe3tZxR6UsY +63yDTrA0ZMJ8AtE/tX79/Iu7gPidNTCrVmOuelf5q3y3AMo6nlKMCc3tIKr6QtaC +99RtHq5p0T3/TS9tWbGjmxEzyx00R3wz5fSypX76qnQLHs6EmrLxFUNmsHIQS2nH +jWvT1+TdmfmogZ/9RaHyBjHGkDfTmlfEKc7/TleE9XsW+G0cGli3fIO0iY0hJTLd +b65X5Gm0URCqsZgIzD99enIvee13Gw8aUJUt8tJZXQHtOWBu491MLd2AVPQ/7eZa +tl/HtjdMj2E3n5NXTie3laRCX+p9mK6087nE7u3JqPqUXU4BAoIBAQD9Jv3hZeii +0pDgLYgiFVds5n2S4CEB4WOT9wn2vUIrYTSjgjAPfsgeJs6N6+WArwaIJrl4tTK4 +m0VjUG394plvyExU8hNZ7hw0E/33rwsKySnkwUFZtOgbsOgUjajRDfFYsqsDhLK0 +o3dY1M+mdYvU9OBo3EhgFy3fYBhtdGIq/4/3kSM6CARQIjddW2pdbB7pyv3qz0mH +6fpzPXWLIex+WBzRVEz7VPPD4coV3LEhmtdPju4RqFPbHS+OpECun8pyaNt14DRr +t216MiyJGNV74zTLELioVHlhlaPvsWnnIeI+2uhhCgQ8UvHn69x2wiAgLlx/e+RD +qPiINhm/xey5AoIBACCASjSsK+3/xfC8110Whkys5AlQdYJWPgnXuqtSTfN11I5l +HEudcZGIerpS9Z9mZnpXfe5rfix6CWGDR0m9GKHEmDwBHByKGrJlMgbJkcmFJl69 +9f6c62xhyuPy2yTy97Pf23LEbeGqCfhMdV8iAULlGPltTDlZw4a5ratLEbd0cC0O +btHO7YzwedmkONNsZAiRfIKOgvWaHfkPHyeHznbE03FaTHfFXEEsIMij5Ed8Sb/8 +J2Rq6bNCRB3sUZyLdF7jMuk0KNl7WTskKyMGi5rC6MbJIGvifymAzHIpZ6Jy06sv +6imNf3QeCMBeg96z6geYpdnI32TbSAykYhLyTAECggEAOowrCVcdX5LdaMt/AYr4 +BjqkbjShzaKH+i+XQVZyGEBKAUrZvKuwsrB88vvMv187Xn++Q3l8uo9Gk/qFBcPD +gsPLS5YU/aaBJVY+VWtJXXw60SoU6B9b0xOuCRreIUNdPwtLW+vzvK1Vq9jEEZZ7 ++YuM3xObNYYG2POLkrzo+1LRxArwH7q87J+NOG0tA2A/IgkNgqHgOqvVfZOIPN5i +qLHOMGeTykjSe8obh8Tbvo7mHwNKchEBG9r7Jb09LGXOV3mC0BdDaGoqyqkR/b8d +mKJqklBStLOcwwHtwUDB4m/GuIy+U7sSUbVJNz8oZNruvSKbx+wqVa+dkzsX529q +GQKCAQBVzafsrfp3yZKa62R7EMtQh6pHDIKvUzZRwxsj4QzQ1y4Rrb6ceXKxI3EQ +ZK6f1Lte+/ifRn8ZsxQOnjNzO9meOco/7CSNGCCcqO/XVN9ixDdF8lzjIsuRqfkT +lsYy7Zo+ZRDUj73UROBvBJtX4jP5It1B/ISKxHxyBFQiB+UtldLl1H+dmGN9LVnF +583i/vTEcLsj9+8yUU8L46sLKfOhNiSBY8D8oKD9Yht0p9SeDxB/r4Rq8Te5Xp1o +FobswNohYBj2rj9+d24uMcpI5nx33JoRkW7VyAXsq8t4b7ei5/sbwuL25NUXhIxf +mMKDxHebdrFY2ADhWLkWus0ik7JA +-----END PRIVATE KEY----- diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 20c6b93478..add3743612 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -92,9 +92,9 @@ jobs: sudo apt-get install -qq postgresql-${{ matrix.pg_major }} export PGDATA=/etc/postgresql/${{ matrix.pg_major }}/main - sudo cp $GITHUB_WORKSPACE/.build/{server.crt,server.key} $PGDATA - sudo chmod 600 $PGDATA/{server.crt,server.key} - sudo chown postgres $PGDATA/{server.crt,server.key} + sudo cp $GITHUB_WORKSPACE/.build/{server.crt,server.key,ca.crt} $PGDATA + sudo chmod 600 $PGDATA/{server.crt,server.key,ca.crt} + sudo chown postgres $PGDATA/{server.crt,server.key,ca.crt} # Create npgsql_tests user with md5 password 'npgsql_tests' sudo -u postgres psql -c "CREATE USER npgsql_tests SUPERUSER PASSWORD 'md5adf74603a5772843f53e812f03dacb02'" @@ -113,6 +113,7 @@ jobs: sudo sed -i 's/max_connections = 100/max_connections = 500/' $PGDATA/postgresql.conf sudo sed -i 's/#ssl = off/ssl = on/' $PGDATA/postgresql.conf + sudo sed -i "s|ssl_ca_file =|ssl_ca_file = '$PGDATA/ca.crt' #|" $PGDATA/postgresql.conf sudo sed -i "s|ssl_cert_file =|ssl_cert_file = '$PGDATA/server.crt' #|" $PGDATA/postgresql.conf sudo sed -i "s|ssl_key_file =|ssl_key_file = '$PGDATA/server.key' #|" $PGDATA/postgresql.conf sudo sed -i 's/#password_encryption = md5/password_encryption = scram-sha-256/' $PGDATA/postgresql.conf @@ -163,7 +164,7 @@ jobs: unzip pgsql.zip -x 'pgsql/include/**' 'pgsql/doc/**' 'pgsql/pgAdmin 4/**' 'pgsql/StackBuilder/**' # Match Npgsql CI Docker image and stash one level up - cp $GITHUB_WORKSPACE/.build/{server.crt,server.key} pgsql + cp $GITHUB_WORKSPACE/.build/{server.crt,server.key,ca.crt} pgsql # Find OSGEO version number OSGEO_VERSION=$(\ @@ -199,7 +200,7 @@ jobs: sed -i "s|#synchronous_standby_names =|synchronous_standby_names = 'npgsql_test_sync_standby' #|" pgsql/PGDATA/postgresql.conf sed -i "s|#synchronous_commit =|synchronous_commit = local #|" pgsql/PGDATA/postgresql.conf sed -i "s|#max_prepared_transactions = 0|max_prepared_transactions = 100|" pgsql/PGDATA/postgresql.conf - pgsql/bin/pg_ctl -D pgsql/PGDATA -l logfile -o '-c ssl=true -c ssl_cert_file=../server.crt -c ssl_key_file=../server.key' start + pgsql/bin/pg_ctl -D pgsql/PGDATA -l logfile -o '-c ssl=true -c ssl_cert_file=../server.crt -c ssl_key_file=../server.key -c ssl_ca_file=../ca.crt' start # Create npgsql_tests user with md5 password 'npgsql_tests' pgsql/bin/psql -U postgres -c "CREATE ROLE npgsql_tests SUPERUSER LOGIN PASSWORD 'md5adf74603a5772843f53e812f03dacb02'" @@ -214,7 +215,7 @@ jobs: sed -i "s|#password_encryption = md5|password_encryption = scram-sha-256|" pgsql/PGDATA/postgresql.conf fi - pgsql/bin/pg_ctl -D pgsql/PGDATA -l logfile -o '-c ssl=true -c ssl_cert_file=../server.crt -c ssl_key_file=../server.key' restart + pgsql/bin/pg_ctl -D pgsql/PGDATA -l logfile -o '-c ssl=true -c ssl_cert_file=../server.crt -c ssl_key_file=../server.key -c ssl_ca_file=../ca.crt' restart pgsql/bin/psql -U postgres -c "CREATE ROLE npgsql_tests_scram SUPERUSER LOGIN PASSWORD 'npgsql_tests_scram'" @@ -241,13 +242,14 @@ jobs: PGDATA=/opt/homebrew/var/postgresql@${{ matrix.pg_major }} sudo sed -i '' 's/#ssl = off/ssl = on/' $PGDATA/postgresql.conf - cp $GITHUB_WORKSPACE/.build/{server.crt,server.key} $PGDATA - chmod 600 $PGDATA/{server.crt,server.key} + sudo sed -i '' "s/#ssl_ca_file =/ssl_ca_file = 'ca.crt' #/" $PGDATA/postgresql.conf + cp $GITHUB_WORKSPACE/.build/{server.crt,server.key,ca.crt} $PGDATA + chmod 600 $PGDATA/{server.crt,server.key,ca.crt} postgreService=$(brew services list | grep -oe "postgresql@${{ matrix.pg_major }}\S*") brew services start $postgreService - export PATH="/opt/homebrew/opt/postgresql@16/bin:$PATH" + export PATH="/opt/homebrew/opt/postgresql@${{ matrix.pg_major }}/bin:$PATH" echo "Check PostgreSQL service is running" i=5 COMMAND='pg_isready' diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index 0d6c5e23d8..8d131912bd 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -917,7 +917,7 @@ internal async Task NegotiateEncryption(SslMode sslMode, NpgsqlTimeout timeout, TargetHost = host, ClientCertificates = clientCertificates, EnabledSslProtocols = SslProtocols.None, - CertificateRevocationCheckMode = checkCertificateRevocation ? X509RevocationMode.Online : X509RevocationMode.Offline, + CertificateRevocationCheckMode = checkCertificateRevocation ? X509RevocationMode.Online : X509RevocationMode.NoCheck, RemoteCertificateValidationCallback = certificateValidationCallback, ApplicationProtocols = [_alpnProtocol] }; diff --git a/test/Npgsql.Tests/Npgsql.Tests.csproj b/test/Npgsql.Tests/Npgsql.Tests.csproj index 6b7baca8ad..5d100b68b5 100644 --- a/test/Npgsql.Tests/Npgsql.Tests.csproj +++ b/test/Npgsql.Tests/Npgsql.Tests.csproj @@ -10,6 +10,11 @@ + + + PreserveNewest + + true $(NoWarn);NPG9001 diff --git a/test/Npgsql.Tests/SecurityTests.cs b/test/Npgsql.Tests/SecurityTests.cs index c1af68f515..f6451a633f 100644 --- a/test/Npgsql.Tests/SecurityTests.cs +++ b/test/Npgsql.Tests/SecurityTests.cs @@ -522,6 +522,47 @@ public void Direct_ssl_requires_correct_sslmode([Values] SslMode sslMode) } } + [Test] + [Platform(Exclude = "MacOsX", Reason = "Mac requires explicit opt-in to receive CA certificate in TLS handshake")] + public async Task Connect_with_verify_and_ca_cert([Values(SslMode.VerifyCA, SslMode.VerifyFull)] SslMode sslMode) + { + if (!IsOnBuildServer) + Assert.Ignore("Only executed in CI"); + + await using var dataSource = CreateDataSource(csb => + { + csb.SslMode = sslMode; + csb.RootCertificate = "ca.crt"; + }); + + await using var _ = await dataSource.OpenConnectionAsync(); + } + + [Test] + [Platform(Exclude = "MacOsX", Reason = "Mac requires explicit opt-in to receive CA certificate in TLS handshake")] + public async Task Connect_with_verify_check_host([Values(SslMode.VerifyCA, SslMode.VerifyFull)] SslMode sslMode) + { + if (!IsOnBuildServer) + Assert.Ignore("Only executed in CI"); + + await using var dataSource = CreateDataSource(csb => + { + csb.Host = "127.0.0.1"; + csb.SslMode = sslMode; + csb.RootCertificate = "ca.crt"; + }); + + if (sslMode == SslMode.VerifyCA) + { + await using var _ = await dataSource.OpenConnectionAsync(); + } + else + { + var ex = Assert.ThrowsAsync(async () => await dataSource.OpenConnectionAsync())!; + Assert.That(ex.InnerException, Is.TypeOf()); + } + } + [Test] [NonParallelizable] // Sets environment variable public async Task Direct_ssl_via_env_requires_correct_sslmode() From 62dcf217551b263a8fa21bb1336dc3cd57114360 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Tue, 17 Dec 2024 15:06:59 +0300 Subject: [PATCH 507/761] Remove stopwatch allocations (#5977) --- src/Npgsql/Internal/NpgsqlConnector.cs | 8 ++++---- src/Npgsql/MultiplexingDataSource.cs | 11 +++++------ src/Npgsql/NpgsqlEventSource.cs | 4 ++-- 3 files changed, 11 insertions(+), 12 deletions(-) diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index 8d131912bd..20586ce685 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -486,7 +486,7 @@ internal async Task Open(NpgsqlTimeout timeout, bool async, CancellationToken ca State = ConnectorState.Connecting; LogMessages.OpeningPhysicalConnection(ConnectionLogger, Host, Port, Database, UserFacingConnectionString); - var stopwatch = Stopwatch.StartNew(); + var startOpenTimestamp = Stopwatch.GetTimestamp(); try { @@ -557,7 +557,7 @@ internal async Task Open(NpgsqlTimeout timeout, bool async, CancellationToken ca } LogMessages.OpenedPhysicalConnection( - ConnectionLogger, Host, Port, Database, UserFacingConnectionString, stopwatch.ElapsedMilliseconds, Id); + ConnectionLogger, Host, Port, Database, UserFacingConnectionString, (long)Stopwatch.GetElapsedTime(startOpenTimestamp).TotalMilliseconds, Id); } catch (Exception e) { @@ -2646,7 +2646,7 @@ internal async Task Wait(bool async, int timeout, CancellationToken cancel LogMessages.SendingKeepalive(ConnectionLogger, Id); - var keepaliveTime = Stopwatch.StartNew(); + var keepaliveStartTimestamp = Stopwatch.GetTimestamp(); await WriteSync(async, cancellationToken).ConfigureAwait(false); await Flush(async, cancellationToken).ConfigureAwait(false); @@ -2687,7 +2687,7 @@ internal async Task Wait(bool async, int timeout, CancellationToken cancel } if (timeout > 0) - timeout -= (keepaliveMs + (int)keepaliveTime.ElapsedMilliseconds); + timeout -= (keepaliveMs + (int)Stopwatch.GetElapsedTime(keepaliveStartTimestamp).TotalMilliseconds); } } diff --git a/src/Npgsql/MultiplexingDataSource.cs b/src/Npgsql/MultiplexingDataSource.cs index 1d228e1f4c..74c32b8c6f 100644 --- a/src/Npgsql/MultiplexingDataSource.cs +++ b/src/Npgsql/MultiplexingDataSource.cs @@ -75,7 +75,7 @@ async Task MultiplexingWriteLoop() // on to the next connector. Debug.Assert(_multiplexCommandReader != null); - var stats = new MultiplexingStats { Stopwatch = new Stopwatch() }; + var stats = new MultiplexingStats(); while (true) { @@ -358,7 +358,7 @@ static void CompleteWrite(NpgsqlConnector connector, ref MultiplexingStats stats // for over-capacity write. connector.FlagAsWritableForMultiplexing(); - NpgsqlEventSource.Log.MultiplexingBatchSent(stats.NumCommands, stats.Stopwatch); + NpgsqlEventSource.Log.MultiplexingBatchSent(stats.NumCommands, Stopwatch.GetElapsedTime(stats.StartTimestamp).Ticks); } // ReSharper disable once FunctionNeverReturns @@ -380,19 +380,18 @@ protected override async ValueTask DisposeAsyncBase() struct MultiplexingStats { - internal Stopwatch Stopwatch; + internal long StartTimestamp; internal int NumCommands; internal void Reset() { NumCommands = 0; - Stopwatch.Reset(); + StartTimestamp = Stopwatch.GetTimestamp(); } internal MultiplexingStats Clone() { - var clone = new MultiplexingStats { Stopwatch = Stopwatch, NumCommands = NumCommands }; - Stopwatch = new Stopwatch(); + var clone = new MultiplexingStats { StartTimestamp = StartTimestamp, NumCommands = NumCommands }; return clone; } } diff --git a/src/Npgsql/NpgsqlEventSource.cs b/src/Npgsql/NpgsqlEventSource.cs index d50979bb64..82475142d2 100644 --- a/src/Npgsql/NpgsqlEventSource.cs +++ b/src/Npgsql/NpgsqlEventSource.cs @@ -101,14 +101,14 @@ internal void DataSourceCreated(NpgsqlDataSource dataSource) } } - internal void MultiplexingBatchSent(int numCommands, Stopwatch stopwatch) + internal void MultiplexingBatchSent(int numCommands, long elapsedTicks) { // TODO: CAS loop instead of 3 separate interlocked operations? if (IsEnabled()) { Interlocked.Increment(ref _multiplexingBatchesSent); Interlocked.Add(ref _multiplexingCommandsSent, numCommands); - Interlocked.Add(ref _multiplexingTicksWritten, stopwatch.ElapsedTicks); + Interlocked.Add(ref _multiplexingTicksWritten, elapsedTicks); } } From 523033601b8fd6e9b8642938929e025624884349 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 28 Dec 2024 05:10:53 +0200 Subject: [PATCH 508/761] Bump actions/setup-dotnet from 4.1.0 to 4.2.0 (#5983) --- .github/workflows/build.yml | 6 +++--- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/native-aot.yml | 4 ++-- .github/workflows/rich-code-nav.yml | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index add3743612..ab854702fa 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -69,7 +69,7 @@ jobs: ${{ runner.os }}-nuget- - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v4.1.0 + uses: actions/setup-dotnet@v4.2.0 with: dotnet-version: | ${{ env.dotnet_sdk_version }} @@ -354,7 +354,7 @@ jobs: ${{ runner.os }}-nuget- - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v4.1.0 + uses: actions/setup-dotnet@v4.2.0 with: dotnet-version: ${{ env.dotnet_sdk_version }} @@ -388,7 +388,7 @@ jobs: uses: actions/checkout@v4 - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v4.1.0 + uses: actions/setup-dotnet@v4.2.0 with: dotnet-version: ${{ env.dotnet_sdk_version }} diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 013421b14d..dbb8f48a39 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -66,7 +66,7 @@ jobs: # queries: ./path/to/local/query, your-org/your-repo/queries@main - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v4.1.0 + uses: actions/setup-dotnet@v4.2.0 with: dotnet-version: ${{ env.dotnet_sdk_version }} diff --git a/.github/workflows/native-aot.yml b/.github/workflows/native-aot.yml index 2f1b94a0d5..b599342c0e 100644 --- a/.github/workflows/native-aot.yml +++ b/.github/workflows/native-aot.yml @@ -108,7 +108,7 @@ jobs: ${{ runner.os }}-nuget- - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v4.1.0 + uses: actions/setup-dotnet@v4.2.0 with: dotnet-version: | ${{ env.dotnet_sdk_version }} @@ -145,7 +145,7 @@ jobs: ${{ runner.os }}-nuget- - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v4.1.0 + uses: actions/setup-dotnet@v4.2.0 with: dotnet-version: | ${{ env.dotnet_sdk_version }} diff --git a/.github/workflows/rich-code-nav.yml b/.github/workflows/rich-code-nav.yml index 278b05c95a..9175897230 100644 --- a/.github/workflows/rich-code-nav.yml +++ b/.github/workflows/rich-code-nav.yml @@ -24,7 +24,7 @@ jobs: ${{ runner.os }}-nuget- - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v4.1.0 + uses: actions/setup-dotnet@v4.2.0 with: dotnet-version: ${{ env.dotnet_sdk_version }} From 010dc51ae61f9ca71d30ced4d025e87db4b7c17b Mon Sep 17 00:00:00 2001 From: Bruce Bowyer-Smyth Date: Sun, 29 Dec 2024 00:53:33 +1000 Subject: [PATCH 509/761] Use exception convenience methods (#5982) --- src/Npgsql/Internal/NpgsqlDatabaseInfo.cs | 3 +- .../Internal/NpgsqlReadBuffer.Stream.cs | 22 ++++------ src/Npgsql/Internal/NpgsqlReadBuffer.cs | 5 +-- src/Npgsql/Internal/NpgsqlWriteBuffer.cs | 6 +-- src/Npgsql/Internal/PgWriter.cs | 11 ++--- .../NpgsqlSnakeCaseNameTranslator.cs | 3 +- src/Npgsql/NpgsqlCommand.cs | 7 +--- src/Npgsql/NpgsqlConnection.cs | 23 ++++------- src/Npgsql/NpgsqlConnectionStringBuilder.cs | 40 +++++++------------ src/Npgsql/NpgsqlDataSource.cs | 5 +-- src/Npgsql/NpgsqlLargeObjectStream.cs | 25 +++++------- src/Npgsql/NpgsqlNestedDataReader.cs | 7 ++-- src/Npgsql/NpgsqlParameter.cs | 8 ++-- src/Npgsql/NpgsqlParameterCollection.cs | 39 +++++++----------- src/Npgsql/NpgsqlRawCopyStream.cs | 11 ++--- src/Npgsql/NpgsqlSchema.cs | 3 +- src/Npgsql/NpgsqlTransaction.cs | 15 ++----- src/Npgsql/NpgsqlTypes/NpgsqlRange.cs | 3 +- src/Npgsql/NpgsqlTypes/NpgsqlTsQuery.cs | 9 ++--- src/Npgsql/NpgsqlTypes/NpgsqlTsVector.cs | 3 +- src/Npgsql/PreparedTextReader.cs | 18 +++------ .../LogicalReplicationConnectionExtensions.cs | 6 +-- .../Replication/ReplicationConnection.cs | 6 +-- src/Npgsql/ThrowHelper.cs | 4 -- src/Npgsql/Util/SubReadStream.cs | 5 +-- src/Npgsql/VolatileResourceManager.cs | 5 +-- test/Npgsql.Tests/ConnectionTests.cs | 2 +- test/Npgsql.Tests/MultipleHostsTests.cs | 2 +- 28 files changed, 96 insertions(+), 200 deletions(-) diff --git a/src/Npgsql/Internal/NpgsqlDatabaseInfo.cs b/src/Npgsql/Internal/NpgsqlDatabaseInfo.cs index 7e3aebe237..aca4144a09 100644 --- a/src/Npgsql/Internal/NpgsqlDatabaseInfo.cs +++ b/src/Npgsql/Internal/NpgsqlDatabaseInfo.cs @@ -313,8 +313,7 @@ protected static Version ParseServerVersion(string value) /// public static void RegisterFactory(INpgsqlDatabaseInfoFactory factory) { - if (factory == null) - throw new ArgumentNullException(nameof(factory)); + ArgumentNullException.ThrowIfNull(factory); var factories = new INpgsqlDatabaseInfoFactory[Factories.Length + 1]; factories[0] = factory; diff --git a/src/Npgsql/Internal/NpgsqlReadBuffer.Stream.cs b/src/Npgsql/Internal/NpgsqlReadBuffer.Stream.cs index 95b6c712f8..66f53503ed 100644 --- a/src/Npgsql/Internal/NpgsqlReadBuffer.Stream.cs +++ b/src/Npgsql/Internal/NpgsqlReadBuffer.Stream.cs @@ -73,8 +73,7 @@ public override long Position } set { - if (value < 0) - throw new ArgumentOutOfRangeException(nameof(value), "Non - negative number required."); + ArgumentOutOfRangeException.ThrowIfNegative(value); Seek(value, SeekOrigin.Begin); } } @@ -85,8 +84,7 @@ public override long Seek(long offset, SeekOrigin origin) if (!_canSeek) throw new NotSupportedException(); - if (offset > int.MaxValue) - throw new ArgumentOutOfRangeException(nameof(offset), "Stream length must be non-negative and less than 2^31 - 1 - origin."); + ArgumentOutOfRangeException.ThrowIfGreaterThan(offset, int.MaxValue); const string seekBeforeBegin = "An attempt was made to move the position before the beginning of the stream."; @@ -191,10 +189,7 @@ public override void Write(byte[] buffer, int offset, int count) => throw new NotSupportedException(); void CheckDisposed() - { - if (IsDisposed) - ThrowHelper.ThrowObjectDisposedException(nameof(ColumnStream)); - } + => ObjectDisposedException.ThrowIf(IsDisposed, this); protected override void Dispose(bool disposing) { @@ -224,13 +219,10 @@ async ValueTask DisposeCore(bool async) static void ValidateArguments(byte[] buffer, int offset, int count) { - if (buffer == null) - throw new ArgumentNullException(nameof(buffer)); - if (offset < 0) - throw new ArgumentOutOfRangeException(nameof(offset)); - if (count < 0) - throw new ArgumentOutOfRangeException(nameof(count)); + ArgumentNullException.ThrowIfNull(buffer); + ArgumentOutOfRangeException.ThrowIfNegative(offset); + ArgumentOutOfRangeException.ThrowIfNegative(count); if (buffer.Length - offset < count) - throw new ArgumentException("Offset and length were out of bounds for the array or count is greater than the number of elements from index to the end of the source collection."); + ThrowHelper.ThrowArgumentException("Offset and length were out of bounds for the array or count is greater than the number of elements from index to the end of the source collection."); } } diff --git a/src/Npgsql/Internal/NpgsqlReadBuffer.cs b/src/Npgsql/Internal/NpgsqlReadBuffer.cs index a9b094efc2..4befd85146 100644 --- a/src/Npgsql/Internal/NpgsqlReadBuffer.cs +++ b/src/Npgsql/Internal/NpgsqlReadBuffer.cs @@ -113,10 +113,7 @@ internal NpgsqlReadBuffer( Encoding relaxedTextEncoding, bool usePool = false) { - if (size < MinimumSize) - { - throw new ArgumentOutOfRangeException(nameof(size), size, "Buffer size must be at least " + MinimumSize); - } + ArgumentOutOfRangeException.ThrowIfLessThan(size, MinimumSize); Connector = connector!; // TODO: Clean this up Underlying = stream; diff --git a/src/Npgsql/Internal/NpgsqlWriteBuffer.cs b/src/Npgsql/Internal/NpgsqlWriteBuffer.cs index 821bb7e6b1..c768020718 100644 --- a/src/Npgsql/Internal/NpgsqlWriteBuffer.cs +++ b/src/Npgsql/Internal/NpgsqlWriteBuffer.cs @@ -101,8 +101,7 @@ internal NpgsqlWriteBuffer( int size, Encoding textEncoding) { - if (size < MinimumSize) - throw new ArgumentOutOfRangeException(nameof(size), size, "Buffer size must be at least " + MinimumSize); + ArgumentOutOfRangeException.ThrowIfLessThan(size, MinimumSize); Connector = connector!; // TODO: Clean this up; only null when used from PregeneratedMessages, where we don't care. Underlying = stream; @@ -579,8 +578,7 @@ void AdvanceMessageBytesFlushed(int count) void Throw() { - if (count < 0) - throw new ArgumentOutOfRangeException(nameof(count), "Can't advance by a negative count"); + ArgumentOutOfRangeException.ThrowIfNegative(count); if (_messageLength is null) throw Connector.Break(new InvalidOperationException("No message was started")); diff --git a/src/Npgsql/Internal/PgWriter.cs b/src/Npgsql/Internal/PgWriter.cs index 8dd0a9ba9f..fecf4b7474 100644 --- a/src/Npgsql/Internal/PgWriter.cs +++ b/src/Npgsql/Internal/PgWriter.cs @@ -495,14 +495,11 @@ public override Task WriteAsync(byte[] buffer, int offset, int count, Cancellati Task Write(bool async, byte[] buffer, int offset, int count, CancellationToken cancellationToken) { - if (buffer is null) - throw new ArgumentNullException(nameof(buffer)); - if (offset < 0) - throw new ArgumentNullException(nameof(offset)); - if (count < 0) - throw new ArgumentNullException(nameof(count)); + ArgumentNullException.ThrowIfNull(buffer); + ArgumentOutOfRangeException.ThrowIfNegative(offset); + ArgumentOutOfRangeException.ThrowIfNegative(count); if (buffer.Length - offset < count) - throw new ArgumentException("Offset and length were out of bounds for the array or count is greater than the number of elements from index to the end of the source collection."); + ThrowHelper.ThrowArgumentException("Offset and length were out of bounds for the array or count is greater than the number of elements from index to the end of the source collection."); if (async) { diff --git a/src/Npgsql/NameTranslation/NpgsqlSnakeCaseNameTranslator.cs b/src/Npgsql/NameTranslation/NpgsqlSnakeCaseNameTranslator.cs index 760ddb1e5a..998b5f6420 100644 --- a/src/Npgsql/NameTranslation/NpgsqlSnakeCaseNameTranslator.cs +++ b/src/Npgsql/NameTranslation/NpgsqlSnakeCaseNameTranslator.cs @@ -55,8 +55,7 @@ public NpgsqlSnakeCaseNameTranslator(bool legacyMode, CultureInfo? culture = nul /// public string TranslateMemberName(string clrName) { - if (clrName == null) - throw new ArgumentNullException(nameof(clrName)); + ArgumentNullException.ThrowIfNull(clrName); return LegacyMode ? string.Concat(LegacyModeMap(clrName)).ToLower(_culture) diff --git a/src/Npgsql/NpgsqlCommand.cs b/src/Npgsql/NpgsqlCommand.cs index 012ce4cf56..f1ef8bb832 100644 --- a/src/Npgsql/NpgsqlCommand.cs +++ b/src/Npgsql/NpgsqlCommand.cs @@ -223,9 +223,7 @@ public override int CommandTimeout get => _timeout ?? (InternalConnection?.CommandTimeout ?? DefaultTimeout); set { - if (value < 0) { - throw new ArgumentOutOfRangeException(nameof(value), value, "CommandTimeout can't be less than zero."); - } + ArgumentOutOfRangeException.ThrowIfNegative(value); _timeout = value; } @@ -1955,8 +1953,7 @@ public virtual NpgsqlCommand Clone() NpgsqlConnection? CheckAndGetConnection() { - if (State is CommandState.Disposed) - ThrowHelper.ThrowObjectDisposedException(GetType().FullName); + ObjectDisposedException.ThrowIf(State is CommandState.Disposed, this); var conn = InternalConnection; if (conn is null) diff --git a/src/Npgsql/NpgsqlConnection.cs b/src/Npgsql/NpgsqlConnection.cs index 74290d23f9..4b989349f7 100644 --- a/src/Npgsql/NpgsqlConnection.cs +++ b/src/Npgsql/NpgsqlConnection.cs @@ -1145,8 +1145,7 @@ public Task BeginBinaryImportAsync(string copyFromCommand, async Task BeginBinaryImport(bool async, string copyFromCommand, CancellationToken cancellationToken = default) { - if (copyFromCommand == null) - throw new ArgumentNullException(nameof(copyFromCommand)); + ArgumentNullException.ThrowIfNull(copyFromCommand); if (!IsValidCopyCommand(copyFromCommand)) throw new ArgumentException("Must contain a COPY FROM STDIN command!", nameof(copyFromCommand)); @@ -1196,8 +1195,7 @@ public Task BeginBinaryExportAsync(string copyToCommand, C async Task BeginBinaryExport(bool async, string copyToCommand, CancellationToken cancellationToken = default) { - if (copyToCommand == null) - throw new ArgumentNullException(nameof(copyToCommand)); + ArgumentNullException.ThrowIfNull(copyToCommand); if (!IsValidCopyCommand(copyToCommand)) throw new ArgumentException("Must contain a COPY TO STDOUT command!", nameof(copyToCommand)); @@ -1253,8 +1251,7 @@ public Task BeginTextImportAsync(string copyFromCommand, Cancellatio async Task BeginTextImport(bool async, string copyFromCommand, CancellationToken cancellationToken = default) { - if (copyFromCommand == null) - throw new ArgumentNullException(nameof(copyFromCommand)); + ArgumentNullException.ThrowIfNull(copyFromCommand); if (!IsValidCopyCommand(copyFromCommand)) throw new ArgumentException("Must contain a COPY FROM STDIN command!", nameof(copyFromCommand)); @@ -1311,8 +1308,7 @@ public Task BeginTextExportAsync(string copyToCommand, CancellationT async Task BeginTextExport(bool async, string copyToCommand, CancellationToken cancellationToken = default) { - if (copyToCommand == null) - throw new ArgumentNullException(nameof(copyToCommand)); + ArgumentNullException.ThrowIfNull(copyToCommand); if (!IsValidCopyCommand(copyToCommand)) throw new ArgumentException("Must contain a COPY TO STDOUT command!", nameof(copyToCommand)); @@ -1369,8 +1365,7 @@ public Task BeginRawBinaryCopyAsync(string copyCommand, Can async Task BeginRawBinaryCopy(bool async, string copyCommand, CancellationToken cancellationToken = default) { - if (copyCommand == null) - throw new ArgumentNullException(nameof(copyCommand)); + ArgumentNullException.ThrowIfNull(copyCommand); if (!IsValidCopyCommand(copyCommand)) throw new ArgumentException("Must contain a COPY TO STDOUT OR COPY FROM STDIN command!", nameof(copyCommand)); @@ -1534,10 +1529,7 @@ void CheckClosed() } void CheckDisposed() - { - if (_disposed) - ThrowHelper.ThrowObjectDisposedException(nameof(NpgsqlConnection)); - } + => ObjectDisposedException.ThrowIf(_disposed, this); internal void CheckReady() { @@ -1825,8 +1817,7 @@ public async ValueTask CloneWithAsync(string connectionString, /// The name of the database to use in place of the current database. public override void ChangeDatabase(string dbName) { - if (dbName == null) - throw new ArgumentNullException(nameof(dbName)); + ArgumentNullException.ThrowIfNull(dbName); if (string.IsNullOrEmpty(dbName)) throw new ArgumentOutOfRangeException(nameof(dbName), dbName, $"Invalid database name: {dbName}"); diff --git a/src/Npgsql/NpgsqlConnectionStringBuilder.cs b/src/Npgsql/NpgsqlConnectionStringBuilder.cs index f662dd4a83..ca0629d7f7 100644 --- a/src/Npgsql/NpgsqlConnectionStringBuilder.cs +++ b/src/Npgsql/NpgsqlConnectionStringBuilder.cs @@ -244,8 +244,7 @@ public int Port get => _port; set { - if (value <= 0) - throw new ArgumentOutOfRangeException(nameof(value), value, "Invalid port: " + value); + ArgumentOutOfRangeException.ThrowIfNegativeOrZero(value); _port = value; SetValue(nameof(Port), value); @@ -720,8 +719,7 @@ public int MinPoolSize get => _minPoolSize; set { - if (value < 0) - throw new ArgumentOutOfRangeException(nameof(value), value, "MinPoolSize can't be negative"); + ArgumentOutOfRangeException.ThrowIfNegative(value); _minPoolSize = value; SetValue(nameof(MinPoolSize), value); @@ -742,8 +740,7 @@ public int MaxPoolSize get => _maxPoolSize; set { - if (value < 0) - throw new ArgumentOutOfRangeException(nameof(value), value, "MaxPoolSize can't be negative"); + ArgumentOutOfRangeException.ThrowIfNegative(value); _maxPoolSize = value; SetValue(nameof(MaxPoolSize), value); @@ -836,8 +833,8 @@ public int Timeout get => _timeout; set { - if (value < 0 || value > NpgsqlConnection.TimeoutLimit) - throw new ArgumentOutOfRangeException(nameof(value), value, "Timeout must be between 0 and " + NpgsqlConnection.TimeoutLimit); + ArgumentOutOfRangeException.ThrowIfNegative(value); + ArgumentOutOfRangeException.ThrowIfGreaterThan(value, NpgsqlConnection.TimeoutLimit); _timeout = value; SetValue(nameof(Timeout), value); @@ -861,8 +858,7 @@ public int CommandTimeout get => _commandTimeout; set { - if (value < 0) - throw new ArgumentOutOfRangeException(nameof(value), value, "CommandTimeout can't be negative"); + ArgumentOutOfRangeException.ThrowIfNegative(value); _commandTimeout = value; SetValue(nameof(CommandTimeout), value); @@ -885,8 +881,7 @@ public int CancellationTimeout get => _cancellationTimeout; set { - if (value < -1) - throw new ArgumentOutOfRangeException(nameof(value), value, $"{nameof(CancellationTimeout)} can't less than -1"); + ArgumentOutOfRangeException.ThrowIfLessThan(value, -1); _cancellationTimeout = value; SetValue(nameof(CancellationTimeout), value); @@ -975,8 +970,7 @@ public int HostRecheckSeconds get => _hostRecheckSeconds; set { - if (value < 0) - throw new ArgumentException($"{HostRecheckSeconds} cannot be negative", nameof(HostRecheckSeconds)); + ArgumentOutOfRangeException.ThrowIfNegative(value); _hostRecheckSeconds = value; SetValue(nameof(HostRecheckSeconds), value); } @@ -1000,8 +994,7 @@ public int KeepAlive get => _keepAlive; set { - if (value < 0) - throw new ArgumentOutOfRangeException(nameof(value), value, "KeepAlive can't be negative"); + ArgumentOutOfRangeException.ThrowIfNegative(value); _keepAlive = value; SetValue(nameof(KeepAlive), value); @@ -1041,8 +1034,7 @@ public int TcpKeepAliveTime get => _tcpKeepAliveTime; set { - if (value < 0) - throw new ArgumentOutOfRangeException(nameof(value), value, "TcpKeepAliveTime can't be negative"); + ArgumentOutOfRangeException.ThrowIfNegative(value); _tcpKeepAliveTime = value; SetValue(nameof(TcpKeepAliveTime), value); @@ -1063,8 +1055,7 @@ public int TcpKeepAliveInterval get => _tcpKeepAliveInterval; set { - if (value < 0) - throw new ArgumentOutOfRangeException(nameof(value), value, "TcpKeepAliveInterval can't be negative"); + ArgumentOutOfRangeException.ThrowIfNegative(value); _tcpKeepAliveInterval = value; SetValue(nameof(TcpKeepAliveInterval), value); @@ -1160,8 +1151,7 @@ public int MaxAutoPrepare get => _maxAutoPrepare; set { - if (value < 0) - throw new ArgumentOutOfRangeException(nameof(value), value, $"{nameof(MaxAutoPrepare)} cannot be negative"); + ArgumentOutOfRangeException.ThrowIfNegative(value); _maxAutoPrepare = value; SetValue(nameof(MaxAutoPrepare), value); @@ -1183,8 +1173,7 @@ public int AutoPrepareMinUsages get => _autoPrepareMinUsages; set { - if (value < 1) - throw new ArgumentOutOfRangeException(nameof(value), value, $"{nameof(AutoPrepareMinUsages)} must be 1 or greater"); + ArgumentOutOfRangeException.ThrowIfNegativeOrZero(value); _autoPrepareMinUsages = value; SetValue(nameof(AutoPrepareMinUsages), value); @@ -1408,8 +1397,7 @@ public int InternalCommandTimeout internal void PostProcessAndValidate() { - if (string.IsNullOrWhiteSpace(Host)) - throw new ArgumentException("Host can't be null"); + ArgumentException.ThrowIfNullOrWhiteSpace(Host); if (Multiplexing && !Pooling) throw new ArgumentException("Pooling must be on to use multiplexing"); if (SslNegotiation == SslNegotiation.Direct && SslMode is not SslMode.Require and not SslMode.VerifyCA and not SslMode.VerifyFull) diff --git a/src/Npgsql/NpgsqlDataSource.cs b/src/Npgsql/NpgsqlDataSource.cs index ed434ee3c1..7d78fca230 100644 --- a/src/Npgsql/NpgsqlDataSource.cs +++ b/src/Npgsql/NpgsqlDataSource.cs @@ -533,10 +533,7 @@ protected virtual async ValueTask DisposeAsyncBase() } private protected void CheckDisposed() - { - if (_isDisposed == 1) - ThrowHelper.ThrowObjectDisposedException(GetType().FullName); - } + => ObjectDisposedException.ThrowIf(_isDisposed == 1, this); #endregion diff --git a/src/Npgsql/NpgsqlLargeObjectStream.cs b/src/Npgsql/NpgsqlLargeObjectStream.cs index 2f3c8b19b0..09d90b164a 100644 --- a/src/Npgsql/NpgsqlLargeObjectStream.cs +++ b/src/Npgsql/NpgsqlLargeObjectStream.cs @@ -64,14 +64,11 @@ public override Task ReadAsync(byte[] buffer, int offset, int count, Cancel async Task Read(bool async, byte[] buffer, int offset, int count, CancellationToken cancellationToken = default) { - if (buffer == null) - throw new ArgumentNullException(nameof(buffer)); - if (offset < 0) - throw new ArgumentOutOfRangeException(nameof(offset)); - if (count < 0) - throw new ArgumentOutOfRangeException(nameof(count)); + ArgumentNullException.ThrowIfNull(buffer); + ArgumentOutOfRangeException.ThrowIfNegative(offset); + ArgumentOutOfRangeException.ThrowIfNegative(count); if (buffer.Length - offset < count) - throw new ArgumentException("Invalid offset or count for this buffer"); + ThrowHelper.ThrowArgumentException("Invalid offset or count for this buffer"); CheckDisposed(); @@ -115,14 +112,11 @@ public override Task WriteAsync(byte[] buffer, int offset, int count, Cancellati async Task Write(bool async, byte[] buffer, int offset, int count, CancellationToken cancellationToken = default) { - if (buffer == null) - throw new ArgumentNullException(nameof(buffer)); - if (offset < 0) - throw new ArgumentOutOfRangeException(nameof(offset)); - if (count < 0) - throw new ArgumentOutOfRangeException(nameof(count)); + ArgumentNullException.ThrowIfNull(buffer); + ArgumentOutOfRangeException.ThrowIfNegative(offset); + ArgumentOutOfRangeException.ThrowIfNegative(count); if (buffer.Length - offset < count) - throw new ArgumentException("Invalid offset or count for this buffer"); + ThrowHelper.ThrowArgumentException("Invalid offset or count for this buffer"); CheckDisposed(); @@ -262,8 +256,7 @@ async Task SetLength(bool async, long value, CancellationToken cancellationToken { cancellationToken.ThrowIfCancellationRequested(); - if (value < 0) - throw new ArgumentOutOfRangeException(nameof(value)); + ArgumentOutOfRangeException.ThrowIfNegative(value); if (!Has64BitSupport && value != (int)value) throw new ArgumentOutOfRangeException(nameof(value), "offset must fit in 32 bits for PostgreSQL versions older than 9.3"); diff --git a/src/Npgsql/NpgsqlNestedDataReader.cs b/src/Npgsql/NpgsqlNestedDataReader.cs index b505fe04f0..cda412d1a5 100644 --- a/src/Npgsql/NpgsqlNestedDataReader.cs +++ b/src/Npgsql/NpgsqlNestedDataReader.cs @@ -174,8 +174,8 @@ public override bool IsClosed /// public override long GetBytes(int ordinal, long dataOffset, byte[]? buffer, int bufferOffset, int length) { - if (dataOffset is < 0 or > int.MaxValue) - throw new ArgumentOutOfRangeException(nameof(dataOffset), dataOffset, $"dataOffset must be between 0 and {int.MaxValue}"); + ArgumentOutOfRangeException.ThrowIfNegative(dataOffset); + ArgumentOutOfRangeException.ThrowIfGreaterThan(dataOffset, int.MaxValue); if (buffer != null && (bufferOffset < 0 || bufferOffset >= buffer.Length + 1)) throw new IndexOutOfRangeException($"bufferOffset must be between 0 and {buffer.Length}"); if (buffer != null && (length < 0 || length > buffer.Length - bufferOffset)) @@ -303,8 +303,7 @@ public override object GetValue(int ordinal) /// public override int GetValues(object[] values) { - if (values == null) - throw new ArgumentNullException(nameof(values)); + ArgumentNullException.ThrowIfNull(values); CheckOnRow(); var count = Math.Min(FieldCount, values.Length); diff --git a/src/Npgsql/NpgsqlParameter.cs b/src/Npgsql/NpgsqlParameter.cs index 6273a9617a..b1318d9b0a 100644 --- a/src/Npgsql/NpgsqlParameter.cs +++ b/src/Npgsql/NpgsqlParameter.cs @@ -365,9 +365,9 @@ public NpgsqlDbType NpgsqlDbType set { if (value == NpgsqlDbType.Array) - throw new ArgumentOutOfRangeException(nameof(value), "Cannot set NpgsqlDbType to just Array, Binary-Or with the element type (e.g. Array of Box is NpgsqlDbType.Array | NpgsqlDbType.Box)."); + ThrowHelper.ThrowArgumentOutOfRangeException(nameof(value), "Cannot set NpgsqlDbType to just Array, Binary-Or with the element type (e.g. Array of Box is NpgsqlDbType.Array | NpgsqlDbType.Box)."); if (value == NpgsqlDbType.Range) - throw new ArgumentOutOfRangeException(nameof(value), "Cannot set NpgsqlDbType to just Range, Binary-Or with the element type (e.g. Range of integer is NpgsqlDbType.Range | NpgsqlDbType.Integer)"); + ThrowHelper.ThrowArgumentOutOfRangeException(nameof(value), "Cannot set NpgsqlDbType to just Range, Binary-Or with the element type (e.g. Range of integer is NpgsqlDbType.Range | NpgsqlDbType.Integer)"); ResetTypeInfo(); _npgsqlDbType = value; @@ -453,7 +453,7 @@ public sealed override int Size set { if (value < -1) - throw new ArgumentException($"Invalid parameter Size value '{value}'. The value must be greater than or equal to 0."); + ThrowHelper.ThrowArgumentException($"Invalid parameter Size value '{value}'. The value must be greater than or equal to 0."); ResetBindingInfo(); _size = value; @@ -599,7 +599,7 @@ void ThrowNoTypeInfo() void ThrowNotSupported(string dataTypeName) { - throw new NotSupportedException(_npgsqlDbType is not null + ThrowHelper.ThrowNotSupportedException(_npgsqlDbType is not null ? $"The NpgsqlDbType '{_npgsqlDbType}' isn't present in your database. You may need to install an extension or upgrade to a newer version." : $"The data type name '{dataTypeName}' isn't present in your database. You may need to install an extension or upgrade to a newer version."); } diff --git a/src/Npgsql/NpgsqlParameterCollection.cs b/src/Npgsql/NpgsqlParameterCollection.cs index 8031fd7efc..b2c56d7ac7 100644 --- a/src/Npgsql/NpgsqlParameterCollection.cs +++ b/src/Npgsql/NpgsqlParameterCollection.cs @@ -166,28 +166,25 @@ internal void ChangeParameterName(NpgsqlParameter parameter, string? value) { get { - if (parameterName is null) - throw new ArgumentNullException(nameof(parameterName)); + ArgumentNullException.ThrowIfNull(parameterName); var index = IndexOf(parameterName); if (index == -1) - throw new ArgumentException("Parameter not found"); + ThrowHelper.ThrowArgumentException("Parameter not found"); return InternalList[index]; } set { - if (parameterName is null) - throw new ArgumentNullException(nameof(parameterName)); - if (value is null) - throw new ArgumentNullException(nameof(value)); + ArgumentNullException.ThrowIfNull(parameterName); + ArgumentNullException.ThrowIfNull(value); var index = IndexOf(parameterName); if (index == -1) - throw new ArgumentException("Parameter not found"); + ThrowHelper.ThrowArgumentException("Parameter not found"); if (!string.Equals(parameterName, value.TrimmedName, StringComparison.OrdinalIgnoreCase)) - throw new ArgumentException("Parameter name must be a case-insensitive match with the property 'ParameterName' on the given NpgsqlParameter", nameof(parameterName)); + ThrowHelper.ThrowArgumentException("Parameter name must be a case-insensitive match with the property 'ParameterName' on the given NpgsqlParameter", nameof(parameterName)); var oldValue = InternalList[index]; LookupChangeName(value, oldValue.ParameterName, oldValue.TrimmedName, index); @@ -206,8 +203,7 @@ internal void ChangeParameterName(NpgsqlParameter parameter, string? value) get => InternalList[index]; set { - if (value is null) - ThrowHelper.ThrowArgumentNullException(nameof(value)); + ArgumentNullException.ThrowIfNull(value); if (value.Collection is not null) ThrowHelper.ThrowInvalidOperationException("The parameter already belongs to a collection"); @@ -231,8 +227,7 @@ internal void ChangeParameterName(NpgsqlParameter parameter, string? value) /// The parameter that was added. public NpgsqlParameter Add(NpgsqlParameter value) { - if (value is null) - ThrowHelper.ThrowArgumentNullException(nameof(value)); + ArgumentNullException.ThrowIfNull(value); if (value.Collection is not null) ThrowHelper.ThrowInvalidOperationException("The parameter already belongs to a collection"); @@ -430,8 +425,7 @@ void BuildLookup() /// The zero-based index of the parameter. public override void RemoveAt(int index) { - if (InternalList.Count - 1 < index) - throw new ArgumentOutOfRangeException(nameof(index)); + ArgumentOutOfRangeException.ThrowIfGreaterThanOrEqual(index, InternalList.Count); Remove(InternalList[index]); } @@ -446,8 +440,7 @@ public override void Insert(int index, object value) /// The name of the to remove from the collection. public void Remove(string parameterName) { - if (parameterName is null) - ThrowHelper.ThrowArgumentNullException(nameof(parameterName)); + ArgumentNullException.ThrowIfNull(parameterName); var index = IndexOf(parameterName); if (index < 0) @@ -481,8 +474,7 @@ public override bool Contains(object value) /// public bool TryGetValue(string parameterName, [NotNullWhen(true)] out NpgsqlParameter? parameter) { - if (parameterName is null) - throw new ArgumentNullException(nameof(parameterName)); + ArgumentNullException.ThrowIfNull(parameterName); var index = IndexOf(parameterName); @@ -561,8 +553,7 @@ IEnumerator IEnumerable.GetEnumerator() /// public override void AddRange(Array values) { - if (values is null) - throw new ArgumentNullException(nameof(values)); + ArgumentNullException.ThrowIfNull(values); foreach (var parameter in values) Add(Cast(parameter)); @@ -599,8 +590,7 @@ public int IndexOf(NpgsqlParameter item) /// Parameter to insert. public void Insert(int index, NpgsqlParameter item) { - if (item is null) - throw new ArgumentNullException(nameof(item)); + ArgumentNullException.ThrowIfNull(item); if (item.Collection != null) throw new Exception("The parameter already belongs to a collection"); @@ -624,8 +614,7 @@ public void Insert(int index, NpgsqlParameter item) /// True if the parameter was found and removed, otherwise false. public bool Remove(NpgsqlParameter item) { - if (item == null) - ThrowHelper.ThrowArgumentNullException(nameof(item)); + ArgumentNullException.ThrowIfNull(item); if (item.Collection != this) ThrowHelper.ThrowInvalidOperationException("The item does not belong to this collection"); diff --git a/src/Npgsql/NpgsqlRawCopyStream.cs b/src/Npgsql/NpgsqlRawCopyStream.cs index d7b818679a..3648b24075 100644 --- a/src/Npgsql/NpgsqlRawCopyStream.cs +++ b/src/Npgsql/NpgsqlRawCopyStream.cs @@ -445,14 +445,11 @@ public override long Position #region Input validation static void ValidateArguments(byte[] buffer, int offset, int count) { - if (buffer == null) - throw new ArgumentNullException(nameof(buffer)); - if (offset < 0) - throw new ArgumentNullException(nameof(offset)); - if (count < 0) - throw new ArgumentNullException(nameof(count)); + ArgumentNullException.ThrowIfNull(buffer); + ArgumentOutOfRangeException.ThrowIfNegative(offset); + ArgumentOutOfRangeException.ThrowIfNegative(count); if (buffer.Length - offset < count) - throw new ArgumentException("Offset and length were out of bounds for the array or count is greater than the number of elements from index to the end of the source collection."); + ThrowHelper.ThrowArgumentException("Offset and length were out of bounds for the array or count is greater than the number of elements from index to the end of the source collection."); } #endregion } diff --git a/src/Npgsql/NpgsqlSchema.cs b/src/Npgsql/NpgsqlSchema.cs index 7ce5f3ec1d..409fe3b91e 100644 --- a/src/Npgsql/NpgsqlSchema.cs +++ b/src/Npgsql/NpgsqlSchema.cs @@ -19,8 +19,7 @@ static class NpgsqlSchema { public static Task GetSchema(bool async, NpgsqlConnection conn, string? collectionName, string?[]? restrictions, CancellationToken cancellationToken = default) { - if (collectionName is null) - throw new ArgumentNullException(nameof(collectionName)); + ArgumentNullException.ThrowIfNull(collectionName); if (collectionName.Length == 0) throw new ArgumentException("Collection name cannot be empty.", nameof(collectionName)); diff --git a/src/Npgsql/NpgsqlTransaction.cs b/src/Npgsql/NpgsqlTransaction.cs index 1beebd0924..f88efbefa9 100644 --- a/src/Npgsql/NpgsqlTransaction.cs +++ b/src/Npgsql/NpgsqlTransaction.cs @@ -192,10 +192,7 @@ public override Task RollbackAsync(CancellationToken cancellationToken = default /// public override void Save(string name) { - if (name == null) - throw new ArgumentNullException(nameof(name)); - if (string.IsNullOrWhiteSpace(name)) - throw new ArgumentException("name can't be empty", nameof(name)); + ArgumentException.ThrowIfNullOrWhiteSpace(name); CheckReady(); if (!_connector.DatabaseInfo.SupportsTransactions) @@ -236,10 +233,7 @@ public override Task SaveAsync(string name, CancellationToken cancellationToken async Task Rollback(bool async, string name, CancellationToken cancellationToken = default) { - if (name == null) - throw new ArgumentNullException(nameof(name)); - if (string.IsNullOrWhiteSpace(name)) - throw new ArgumentException("name can't be empty", nameof(name)); + ArgumentException.ThrowIfNullOrWhiteSpace(name); CheckReady(); if (!_connector.DatabaseInfo.SupportsTransactions) @@ -271,10 +265,7 @@ public override Task RollbackAsync(string name, CancellationToken cancellationTo async Task Release(bool async, string name, CancellationToken cancellationToken = default) { - if (name == null) - throw new ArgumentNullException(nameof(name)); - if (string.IsNullOrWhiteSpace(name)) - throw new ArgumentException("name can't be empty", nameof(name)); + ArgumentException.ThrowIfNullOrWhiteSpace(name); CheckReady(); if (!_connector.DatabaseInfo.SupportsTransactions) diff --git a/src/Npgsql/NpgsqlTypes/NpgsqlRange.cs b/src/Npgsql/NpgsqlTypes/NpgsqlRange.cs index b447cb5df7..23b2578c13 100644 --- a/src/Npgsql/NpgsqlTypes/NpgsqlRange.cs +++ b/src/Npgsql/NpgsqlTypes/NpgsqlRange.cs @@ -378,8 +378,7 @@ public override string ToString() [RequiresUnreferencedCode("Parse implementations for certain types of T may require members that have been trimmed.")] public static NpgsqlRange Parse(string value) { - if (value is null) - throw new ArgumentNullException(nameof(value)); + ArgumentNullException.ThrowIfNull(value); value = value.Trim(); diff --git a/src/Npgsql/NpgsqlTypes/NpgsqlTsQuery.cs b/src/Npgsql/NpgsqlTypes/NpgsqlTsQuery.cs index bb1629705c..96585832f3 100644 --- a/src/Npgsql/NpgsqlTypes/NpgsqlTsQuery.cs +++ b/src/Npgsql/NpgsqlTypes/NpgsqlTsQuery.cs @@ -79,8 +79,7 @@ public override string ToString() [Obsolete("Client-side parsing of NpgsqlTsQuery is unreliable and cannot fully duplicate the PostgreSQL logic. Use PG functions instead (e.g. to_tsquery)")] public static NpgsqlTsQuery Parse(string value) { - if (value == null) - throw new ArgumentNullException(nameof(value)); + ArgumentNullException.ThrowIfNull(value); var valStack = new Stack(); var opStack = new Stack(); @@ -404,8 +403,7 @@ public string Text get => _text; set { - if (string.IsNullOrEmpty(value)) - throw new ArgumentException("Text is null or empty string", nameof(value)); + ArgumentException.ThrowIfNullOrEmpty(value); _text = value; } @@ -675,8 +673,7 @@ public NpgsqlTsQueryFollowedBy( NpgsqlTsQuery right) : base(NodeKind.Phrase, left, right) { - if (distance < 0) - throw new ArgumentOutOfRangeException(nameof(distance)); + ArgumentOutOfRangeException.ThrowIfNegative(distance); Distance = distance; } diff --git a/src/Npgsql/NpgsqlTypes/NpgsqlTsVector.cs b/src/Npgsql/NpgsqlTypes/NpgsqlTsVector.cs index 2cf1bcb3f7..7d63a547fe 100644 --- a/src/Npgsql/NpgsqlTypes/NpgsqlTsVector.cs +++ b/src/Npgsql/NpgsqlTypes/NpgsqlTsVector.cs @@ -76,8 +76,7 @@ internal NpgsqlTsVector(List lexemes, bool noCheck = false) [Obsolete("Client-side parsing of NpgsqlTsVector is unreliable and cannot fully duplicate the PostgreSQL logic. Use PG functions instead (e.g. to_tsvector)")] public static NpgsqlTsVector Parse(string value) { - if (value == null) - throw new ArgumentNullException(nameof(value)); + ArgumentNullException.ThrowIfNull(value); var lexemes = new List(); var pos = 0; diff --git a/src/Npgsql/PreparedTextReader.cs b/src/Npgsql/PreparedTextReader.cs index 4831850684..80ee543d9b 100644 --- a/src/Npgsql/PreparedTextReader.cs +++ b/src/Npgsql/PreparedTextReader.cs @@ -57,17 +57,12 @@ public override int Read(Span buffer) public override int Read(char[] buffer, int index, int count) { - if (buffer == null) - { - throw new ArgumentNullException(nameof(buffer)); - } - if (index < 0 || count < 0) - { - throw new ArgumentOutOfRangeException(index < 0 ? nameof(index) : nameof(count)); - } + ArgumentNullException.ThrowIfNull(buffer); + ArgumentOutOfRangeException.ThrowIfNegative(index); + ArgumentOutOfRangeException.ThrowIfNegative(count); if (buffer.Length - index < count) { - throw new ArgumentException("Offset and length were out of bounds for the array or count is greater than the number of elements from index to the end of the source collection."); + ThrowHelper.ThrowArgumentException("Offset and length were out of bounds for the array or count is greater than the number of elements from index to the end of the source collection."); } return Read(buffer.AsSpan(index, count)); @@ -95,10 +90,7 @@ public override string ReadToEnd() public override Task ReadToEndAsync() => Task.FromResult(ReadToEnd()); void CheckDisposed() - { - if (_disposed || _stream.IsDisposed) - ThrowHelper.ThrowObjectDisposedException(nameof(PreparedTextReader)); - } + => ObjectDisposedException.ThrowIf(_disposed || _stream.IsDisposed, this); public void Restart() { diff --git a/src/Npgsql/Replication/Internal/LogicalReplicationConnectionExtensions.cs b/src/Npgsql/Replication/Internal/LogicalReplicationConnectionExtensions.cs index 6f703970de..d66a9e55d1 100644 --- a/src/Npgsql/Replication/Internal/LogicalReplicationConnectionExtensions.cs +++ b/src/Npgsql/Replication/Internal/LogicalReplicationConnectionExtensions.cs @@ -61,10 +61,8 @@ public static Task CreateLogicalReplicationSlot( CancellationToken cancellationToken = default) { connection.CheckDisposed(); - if (slotName is null) - throw new ArgumentNullException(nameof(slotName)); - if (outputPlugin is null) - throw new ArgumentNullException(nameof(outputPlugin)); + ArgumentNullException.ThrowIfNull(slotName); + ArgumentNullException.ThrowIfNull(outputPlugin); cancellationToken.ThrowIfCancellationRequested(); diff --git a/src/Npgsql/Replication/ReplicationConnection.cs b/src/Npgsql/Replication/ReplicationConnection.cs index 575efb669b..94fe30ab25 100644 --- a/src/Npgsql/Replication/ReplicationConnection.cs +++ b/src/Npgsql/Replication/ReplicationConnection.cs @@ -322,8 +322,7 @@ public async Task IdentifySystem(CancellationTo /// The current setting of the run-time parameter specified in as . public Task Show(string parameterName, CancellationToken cancellationToken = default) { - if (parameterName is null) - throw new ArgumentNullException(nameof(parameterName)); + ArgumentNullException.ThrowIfNull(parameterName); return ShowInternal(parameterName, cancellationToken); @@ -710,8 +709,7 @@ async void TimerSendFeedback(object? obj) /// A task representing the asynchronous drop operation. public Task DropReplicationSlot(string slotName, bool wait = false, CancellationToken cancellationToken = default) { - if (slotName is null) - throw new ArgumentNullException(nameof(slotName)); + ArgumentNullException.ThrowIfNull(slotName); CheckDisposed(); diff --git a/src/Npgsql/ThrowHelper.cs b/src/Npgsql/ThrowHelper.cs index 1c754884ab..dc79128537 100644 --- a/src/Npgsql/ThrowHelper.cs +++ b/src/Npgsql/ThrowHelper.cs @@ -88,10 +88,6 @@ internal static void ThrowArgumentException(string message) internal static void ThrowArgumentException(string message, string paramName) => throw new ArgumentException(message, paramName); - [DoesNotReturn] - internal static void ThrowArgumentNullException(string paramName) - => throw new ArgumentNullException(paramName); - [DoesNotReturn] internal static void ThrowArgumentNullException(string message, string paramName) => throw new ArgumentNullException(paramName, message); diff --git a/src/Npgsql/Util/SubReadStream.cs b/src/Npgsql/Util/SubReadStream.cs index 9f0176b631..8d9d1b1ec5 100644 --- a/src/Npgsql/Util/SubReadStream.cs +++ b/src/Npgsql/Util/SubReadStream.cs @@ -75,10 +75,7 @@ public override long Position public override bool CanWrite => false; void ThrowIfDisposed() - { - if (_isDisposed) - throw new ObjectDisposedException(GetType().ToString()); - } + => ObjectDisposedException.ThrowIf(_isDisposed, this); void ThrowIfCantRead() { diff --git a/src/Npgsql/VolatileResourceManager.cs b/src/Npgsql/VolatileResourceManager.cs index 2e2d698834..92a716f2e2 100644 --- a/src/Npgsql/VolatileResourceManager.cs +++ b/src/Npgsql/VolatileResourceManager.cs @@ -293,10 +293,7 @@ void Dispose() #pragma warning restore CS8625 void CheckDisposed() - { - if (_isDisposed) - throw new ObjectDisposedException(nameof(VolatileResourceManager)); - } + => ObjectDisposedException.ThrowIf(_isDisposed, this); #endregion diff --git a/test/Npgsql.Tests/ConnectionTests.cs b/test/Npgsql.Tests/ConnectionTests.cs index 151255b8bc..15a550fe50 100644 --- a/test/Npgsql.Tests/ConnectionTests.cs +++ b/test/Npgsql.Tests/ConnectionTests.cs @@ -217,7 +217,7 @@ public void Bad_database() [Test, Description("Tests that mandatory connection string parameters are indeed mandatory")] public void Mandatory_connection_string_params() - => Assert.Throws(() => + => Assert.Throws(() => new NpgsqlConnection("User ID=npgsql_tests;Password=npgsql_tests;Database=npgsql_tests")); [Test, Description("Reuses the same connection instance for a failed connection, then a successful one")] diff --git a/test/Npgsql.Tests/MultipleHostsTests.cs b/test/Npgsql.Tests/MultipleHostsTests.cs index 662c08d5b9..e09cbae401 100644 --- a/test/Npgsql.Tests/MultipleHostsTests.cs +++ b/test/Npgsql.Tests/MultipleHostsTests.cs @@ -323,7 +323,7 @@ public void HostRecheckSeconds_zero_value() [Test] public void HostRecheckSeconds_invalid_throws() - => Assert.Throws(() => + => Assert.Throws(() => new NpgsqlConnectionStringBuilder { HostRecheckSeconds = -1 From 8effce1e3fbc851a66c6ca65101a26961ed325be Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 30 Jan 2025 21:35:12 +0000 Subject: [PATCH 510/761] Bump actions/setup-dotnet from 4.2.0 to 4.3.0 (#6007) --- .github/workflows/build.yml | 6 +++--- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/native-aot.yml | 4 ++-- .github/workflows/rich-code-nav.yml | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index ab854702fa..8547e3b8d1 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -69,7 +69,7 @@ jobs: ${{ runner.os }}-nuget- - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v4.2.0 + uses: actions/setup-dotnet@v4.3.0 with: dotnet-version: | ${{ env.dotnet_sdk_version }} @@ -354,7 +354,7 @@ jobs: ${{ runner.os }}-nuget- - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v4.2.0 + uses: actions/setup-dotnet@v4.3.0 with: dotnet-version: ${{ env.dotnet_sdk_version }} @@ -388,7 +388,7 @@ jobs: uses: actions/checkout@v4 - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v4.2.0 + uses: actions/setup-dotnet@v4.3.0 with: dotnet-version: ${{ env.dotnet_sdk_version }} diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index dbb8f48a39..edfd1fbd0f 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -66,7 +66,7 @@ jobs: # queries: ./path/to/local/query, your-org/your-repo/queries@main - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v4.2.0 + uses: actions/setup-dotnet@v4.3.0 with: dotnet-version: ${{ env.dotnet_sdk_version }} diff --git a/.github/workflows/native-aot.yml b/.github/workflows/native-aot.yml index b599342c0e..78c713c45b 100644 --- a/.github/workflows/native-aot.yml +++ b/.github/workflows/native-aot.yml @@ -108,7 +108,7 @@ jobs: ${{ runner.os }}-nuget- - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v4.2.0 + uses: actions/setup-dotnet@v4.3.0 with: dotnet-version: | ${{ env.dotnet_sdk_version }} @@ -145,7 +145,7 @@ jobs: ${{ runner.os }}-nuget- - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v4.2.0 + uses: actions/setup-dotnet@v4.3.0 with: dotnet-version: | ${{ env.dotnet_sdk_version }} diff --git a/.github/workflows/rich-code-nav.yml b/.github/workflows/rich-code-nav.yml index 9175897230..e007cab721 100644 --- a/.github/workflows/rich-code-nav.yml +++ b/.github/workflows/rich-code-nav.yml @@ -24,7 +24,7 @@ jobs: ${{ runner.os }}-nuget- - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v4.2.0 + uses: actions/setup-dotnet@v4.3.0 with: dotnet-version: ${{ env.dotnet_sdk_version }} From 3505dc3abd7d32b706c97b26fbf8502e511c70a7 Mon Sep 17 00:00:00 2001 From: dvas-hash Date: Tue, 4 Feb 2025 12:37:03 +0100 Subject: [PATCH 511/761] Add support for postgresql type names with dots (#5971) Fixes #5972 --------- Co-authored-by: Dmitry Vasliyev --- src/Npgsql/Internal/Postgres/DataTypeName.cs | 4 +++- test/Npgsql.Tests/Support/TestBase.cs | 12 ++++++++--- test/Npgsql.Tests/Types/CompositeTests.cs | 22 ++++++++++++++++++++ 3 files changed, 34 insertions(+), 4 deletions(-) diff --git a/src/Npgsql/Internal/Postgres/DataTypeName.cs b/src/Npgsql/Internal/Postgres/DataTypeName.cs index c5b223f866..616881f385 100644 --- a/src/Npgsql/Internal/Postgres/DataTypeName.cs +++ b/src/Npgsql/Internal/Postgres/DataTypeName.cs @@ -127,12 +127,14 @@ public static DataTypeName FromDisplayName(string displayName, string? schema = // There is one exception and that's array syntax, which is always resolvable in both ways, while we want the canonical name. var schemaEndIndex = displayNameSpan.IndexOf('.'); if (schemaEndIndex is not -1 && + string.IsNullOrEmpty(schema) && !displayNameSpan.Slice(schemaEndIndex).StartsWith("_".AsSpan(), StringComparison.Ordinal) && !displayNameSpan.EndsWith("[]".AsSpan(), StringComparison.Ordinal)) return new(displayName); // First we strip the schema to get the type name. - if (schemaEndIndex is not -1) + if (schemaEndIndex is not -1 && + string.IsNullOrEmpty(schema)) { schema = displayNameSpan.Slice(0, schemaEndIndex).ToString(); displayNameSpan = displayNameSpan.Slice(schemaEndIndex + 1); diff --git a/test/Npgsql.Tests/Support/TestBase.cs b/test/Npgsql.Tests/Support/TestBase.cs index 66cdfb6780..61c4e2accf 100644 --- a/test/Npgsql.Tests/Support/TestBase.cs +++ b/test/Npgsql.Tests/Support/TestBase.cs @@ -201,7 +201,10 @@ internal static async Task AssertTypeReadCore( if (dotIndex > -1 && dataTypeName.Substring(0, dotIndex) is "pg_catalog" or "public") dataTypeName = dataTypeName.Substring(dotIndex + 1); - Assert.That(dataTypeName, Is.EqualTo(pgTypeName), + // For composite type with dots, postgres works only with quoted name - scheme."My.type.name" + // but npgsql converts it to name without quotes + var pgTypeNameWithoutQuotes = dataTypeName.Replace("\"", string.Empty); + Assert.That(dataTypeName, Is.EqualTo(pgTypeNameWithoutQuotes), $"Got wrong result from GetDataTypeName when reading '{truncatedSqlLiteral}'"); if (isDefault) @@ -300,9 +303,12 @@ internal static async Task AssertTypeWriteCore( } // With data type name - p = new NpgsqlParameter { Value = valueFactory(), DataTypeName = pgTypeNameWithoutFacets }; + // For composite type with dots in name, Postgresql returns name with quotes - scheme."My.type.name" + // but for npgsql mapping we should use names without quotes - scheme.My.type.name + var pgTypeNameWithoutFacetsAndDots = pgTypeNameWithoutFacets.Replace("\"", string.Empty); + p = new NpgsqlParameter { Value = valueFactory(), DataTypeName = pgTypeNameWithoutFacetsAndDots }; cmd.Parameters.Add(p); - errorIdentifier[++errorIdentifierIndex] = $"DataTypeName={pgTypeNameWithoutFacets}"; + errorIdentifier[++errorIdentifierIndex] = $"DataTypeName={pgTypeNameWithoutFacetsAndDots}"; CheckInference(); // With DbType diff --git a/test/Npgsql.Tests/Types/CompositeTests.cs b/test/Npgsql.Tests/Types/CompositeTests.cs index baaed149f3..765508908c 100644 --- a/test/Npgsql.Tests/Types/CompositeTests.cs +++ b/test/Npgsql.Tests/Types/CompositeTests.cs @@ -202,6 +202,28 @@ await AssertType( isDefaultForWriting: true); } + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/5972")] + public async Task With_schema_and_dots_in_type_name() + { + await using var adminConnection = await OpenConnectionAsync(); + var schema = await CreateTempSchema(adminConnection); + var typename = "Some.Composite.with.dots"; + + await adminConnection.ExecuteNonQueryAsync($"CREATE TYPE {schema}.\"{typename}\" AS (x int, some_text text)"); + + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.MapComposite($"{schema}.{typename}"); + await using var dataSource = dataSourceBuilder.Build(); + await using var connection = await dataSource.OpenConnectionAsync(); + + await AssertType( + connection, + new SomeComposite { SomeText = "foobar", X = 10 }, + "(10,foobar)", + $"{schema}.\"{typename}\"", + npgsqlDbType: null); + } + [Test] public async Task Struct() { From e8664e596d9a16caafbabfbe58bdd5f8b6d53acb Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Tue, 4 Feb 2025 14:41:00 +0300 Subject: [PATCH 512/761] Send close_notify TLS alert on connection shutdown (#5995) Fixes #5994 --- src/Npgsql/Internal/NpgsqlConnector.cs | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index 20586ce685..12e113a7a1 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -2189,10 +2189,25 @@ void FullCleanup() /// Closes the socket and cleans up client-side resources associated with this connector. /// /// - /// This method doesn't actually perform any meaningful I/O, and therefore is sync-only. + /// This method doesn't actually perform any meaningful I/O (except sending TLS alert), and therefore is sync-only. /// void Cleanup() { + if (_stream is SslStream sslStream) + { + try + { + // Send close_notify TLS alert to correctly close connection on postgres's side + sslStream.ShutdownAsync().GetAwaiter().GetResult(); + // Theoretically we should do a 0 read here to receive server's close_notify alert + // But overall it doesn't look like it makes much of a difference + } + catch + { + // ignored + } + } + try { _stream?.Dispose(); From 7f1a59fa8dc1ccc34a70154f49a768e1abf826ba Mon Sep 17 00:00:00 2001 From: Bruce Bowyer-Smyth Date: Fri, 7 Feb 2025 00:39:04 +1000 Subject: [PATCH 513/761] Remove DisplayClass struct creation in PgReader (#6014) --- src/Npgsql/Internal/PgReader.cs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Npgsql/Internal/PgReader.cs b/src/Npgsql/Internal/PgReader.cs index 7fbaa695cd..5da3ea7681 100644 --- a/src/Npgsql/Internal/PgReader.cs +++ b/src/Npgsql/Internal/PgReader.cs @@ -744,10 +744,10 @@ public bool ShouldBuffer(Size bufferRequirement) => ShouldBuffer(GetBufferRequirementByteCount(bufferRequirement)); public bool ShouldBuffer(int byteCount) { - return _buffer.ReadBytesLeft < byteCount && ShouldBufferSlow(); + return _buffer.ReadBytesLeft < byteCount && ShouldBufferSlow(byteCount); [MethodImpl(MethodImplOptions.NoInlining)] - bool ShouldBufferSlow() + bool ShouldBufferSlow(int byteCount) { if (byteCount > _buffer.Size) ThrowHelper.ThrowArgumentOutOfRangeException(nameof(byteCount), From 44a7ab1cc95c9a649283e85390896b838a0b3bed Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Fri, 21 Feb 2025 12:55:16 +0300 Subject: [PATCH 514/761] Always dispose RemoteCertificate on SslStream (#6022) Fixes #5993 --- src/Npgsql/Internal/NpgsqlConnector.Auth.cs | 2 ++ src/Npgsql/Internal/NpgsqlConnector.cs | 16 +++++++++++++++- 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/src/Npgsql/Internal/NpgsqlConnector.Auth.cs b/src/Npgsql/Internal/NpgsqlConnector.Auth.cs index 8771ce8e33..1cd2b6e697 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.Auth.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.Auth.cs @@ -196,6 +196,8 @@ internal void AuthenticateSASLSha256Plus(ref string mechanism, ref string cbindF return; } + // While SslStream.RemoteCertificate is X509Certificate2, it actually returns X509Certificate2 + // But to be on the safe side we'll just create a new instance of it using var remoteCertificate = new X509Certificate2(sslStream.RemoteCertificate); // Checking for hashing algorithms HashAlgorithm? hashAlgorithm = null; diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index 12e113a7a1..8f2df119b0 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -2193,7 +2193,8 @@ void FullCleanup() /// void Cleanup() { - if (_stream is SslStream sslStream) + var sslStream = _stream as SslStream; + if (sslStream is not null) { try { @@ -2208,6 +2209,19 @@ void Cleanup() } } + // After we access SslStream.RemoteCertificate (like for SASLSha256Plus) + // SslStream will no longer dispose it for us automatically + // Which is why we have to do it ourselves before disposing the stream + // As otherwise accessing RemoteCertificate will throw an exception + try + { + sslStream?.RemoteCertificate?.Dispose(); + } + catch + { + // ignored + } + try { _stream?.Dispose(); From a46eab961295bd52178e8129888f54f9c8988cbf Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Mon, 24 Feb 2025 13:32:43 +0300 Subject: [PATCH 515/761] Remove LongRunningConnection field from NpgsqlConnector (#6024) --- src/Npgsql/Internal/NpgsqlConnector.cs | 4 ---- src/Npgsql/Internal/NpgsqlReadBuffer.cs | 2 +- src/Npgsql/Replication/ReplicationConnection.cs | 2 -- 3 files changed, 1 insertion(+), 7 deletions(-) diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index 8f2df119b0..b1a92df4b6 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -182,9 +182,6 @@ internal string InferredUserName /// volatile Exception? _breakReason; - // Used by replication to change our cancellation behaviour on ColumnStreams. - internal bool LongRunningConnection { get; set; } - /// /// /// Used by the pool to indicate that I/O is currently in progress on this connector, so that another write @@ -2399,7 +2396,6 @@ internal async Task Reset(bool async) [MethodImpl(MethodImplOptions.AggressiveInlining)] void ResetReadBuffer() { - LongRunningConnection = false; if (_origReadBuffer != null) { Debug.Assert(_origReadBuffer.ReadBytesLeft == 0); diff --git a/src/Npgsql/Internal/NpgsqlReadBuffer.cs b/src/Npgsql/Internal/NpgsqlReadBuffer.cs index 4befd85146..d8622fc7a1 100644 --- a/src/Npgsql/Internal/NpgsqlReadBuffer.cs +++ b/src/Npgsql/Internal/NpgsqlReadBuffer.cs @@ -678,7 +678,7 @@ public ColumnStream CreateStream(int len, bool canSeek, bool consumeOnDispose = { if (_lastStream is not { IsDisposed: true }) _lastStream = new ColumnStream(Connector); - _lastStream.Init(len, canSeek, !Connector.LongRunningConnection, consumeOnDispose); + _lastStream.Init(len, canSeek, Connector.Settings.ReplicationMode == ReplicationMode.Off, consumeOnDispose); return _lastStream; } diff --git a/src/Npgsql/Replication/ReplicationConnection.cs b/src/Npgsql/Replication/ReplicationConnection.cs index 94fe30ab25..4a41467164 100644 --- a/src/Npgsql/Replication/ReplicationConnection.cs +++ b/src/Npgsql/Replication/ReplicationConnection.cs @@ -237,8 +237,6 @@ public async Task Open(CancellationToken cancellationToken = default) SetTimeouts(CommandTimeout, CommandTimeout); - _npgsqlConnection.Connector!.LongRunningConnection = true; - ReplicationLogger = _npgsqlConnection.Connector!.LoggingConfiguration.ReplicationLogger; } From 01155b635f36976c14b4b53fff918d75c34f6928 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Mon, 24 Feb 2025 14:05:39 +0300 Subject: [PATCH 516/761] Tighten SCRAM-SHA-256 SASL check (#6023) --- src/Npgsql/Internal/NpgsqlConnector.Auth.cs | 10 +++++----- src/Npgsql/Internal/NpgsqlConnector.cs | 4 ++-- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/Npgsql/Internal/NpgsqlConnector.Auth.cs b/src/Npgsql/Internal/NpgsqlConnector.Auth.cs index 1cd2b6e697..7d53040bac 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.Auth.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.Auth.cs @@ -71,10 +71,10 @@ async Task AuthenticateSASL(List mechanisms, string username, bool async { // At the time of writing PostgreSQL only supports SCRAM-SHA-256 and SCRAM-SHA-256-PLUS var serverSupportsSha256 = mechanisms.Contains("SCRAM-SHA-256"); - var clientSupportsSha256 = serverSupportsSha256 && Settings.ChannelBinding != ChannelBinding.Require; + var allowSha256 = serverSupportsSha256 && Settings.ChannelBinding != ChannelBinding.Require; var serverSupportsSha256Plus = mechanisms.Contains("SCRAM-SHA-256-PLUS"); - var clientSupportsSha256Plus = serverSupportsSha256Plus && Settings.ChannelBinding != ChannelBinding.Disable; - if (!clientSupportsSha256 && !clientSupportsSha256Plus) + var allowSha256Plus = serverSupportsSha256Plus && Settings.ChannelBinding != ChannelBinding.Disable; + if (!allowSha256 && !allowSha256Plus) { if (serverSupportsSha256 && Settings.ChannelBinding == ChannelBinding.Require) throw new NpgsqlException($"Couldn't connect because {nameof(ChannelBinding)} is set to {nameof(ChannelBinding.Require)} " + @@ -92,10 +92,10 @@ async Task AuthenticateSASL(List mechanisms, string username, bool async var cbind = string.Empty; var successfulBind = false; - if (clientSupportsSha256Plus) + if (allowSha256Plus) DataSource.TransportSecurityHandler.AuthenticateSASLSha256Plus(this, ref mechanism, ref cbindFlag, ref cbind, ref successfulBind); - if (!successfulBind && serverSupportsSha256) + if (!successfulBind && allowSha256) { mechanism = "SCRAM-SHA-256"; // We can get here if PostgreSQL supports only SCRAM-SHA-256 or there was an error while binding to SCRAM-SHA-256-PLUS diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index b1a92df4b6..8208e7386c 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -1658,12 +1658,12 @@ internal void ClearTransaction(Exception? disposeReason = null) internal bool IsSecure { get; private set; } /// - /// Returns whether SCRAM-SHA256 is being user for the connection + /// Returns whether SCRAM-SHA256 is being used for the connection /// internal bool IsScram { get; private set; } /// - /// Returns whether SCRAM-SHA256-PLUS is being user for the connection + /// Returns whether SCRAM-SHA256-PLUS is being used for the connection /// internal bool IsScramPlus { get; private set; } From 3146bcda307d49ecd21d9e73c87a142dd1ee8cba Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Mon, 24 Feb 2025 17:11:06 +0300 Subject: [PATCH 517/761] Add SHA3 hash algorithms for SASL authentication (#6028) Closes #6027 --------- Co-authored-by: Shay Rojansky --- src/Npgsql/Internal/NpgsqlConnector.Auth.cs | 74 ++++++++++----------- 1 file changed, 35 insertions(+), 39 deletions(-) diff --git a/src/Npgsql/Internal/NpgsqlConnector.Auth.cs b/src/Npgsql/Internal/NpgsqlConnector.Auth.cs index 7d53040bac..0d69907e7f 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.Auth.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.Auth.cs @@ -200,51 +200,47 @@ internal void AuthenticateSASLSha256Plus(ref string mechanism, ref string cbindF // But to be on the safe side we'll just create a new instance of it using var remoteCertificate = new X509Certificate2(sslStream.RemoteCertificate); // Checking for hashing algorithms - HashAlgorithm? hashAlgorithm = null; var algorithmName = remoteCertificate.SignatureAlgorithm.FriendlyName; - if (algorithmName is null) - { - ConnectionLogger.LogWarning("Signature algorithm was null, falling back to SCRAM-SHA-256"); - } - else if (algorithmName.StartsWith("sha1", StringComparison.OrdinalIgnoreCase) || - algorithmName.StartsWith("md5", StringComparison.OrdinalIgnoreCase) || - algorithmName.StartsWith("sha256", StringComparison.OrdinalIgnoreCase)) - { - hashAlgorithm = SHA256.Create(); - } - else if (algorithmName.StartsWith("sha384", StringComparison.OrdinalIgnoreCase)) - { - hashAlgorithm = SHA384.Create(); - } - else if (algorithmName.StartsWith("sha512", StringComparison.OrdinalIgnoreCase)) + + HashAlgorithm? hashAlgorithm = algorithmName switch { - hashAlgorithm = SHA512.Create(); - } - else + not null when algorithmName.StartsWith("sha1", StringComparison.OrdinalIgnoreCase) => SHA256.Create(), + not null when algorithmName.StartsWith("md5", StringComparison.OrdinalIgnoreCase) => SHA256.Create(), + not null when algorithmName.StartsWith("sha256", StringComparison.OrdinalIgnoreCase) => SHA256.Create(), + not null when algorithmName.StartsWith("sha384", StringComparison.OrdinalIgnoreCase) => SHA384.Create(), + not null when algorithmName.StartsWith("sha512", StringComparison.OrdinalIgnoreCase) => SHA512.Create(), + not null when algorithmName.StartsWith("sha3-256", StringComparison.OrdinalIgnoreCase) => SHA3_256.Create(), + not null when algorithmName.StartsWith("sha3-384", StringComparison.OrdinalIgnoreCase) => SHA3_384.Create(), + not null when algorithmName.StartsWith("sha3-512", StringComparison.OrdinalIgnoreCase) => SHA3_512.Create(), + + _ => null + }; + + if (hashAlgorithm is null) { ConnectionLogger.LogWarning( - $"Support for signature algorithm {algorithmName} is not yet implemented, falling back to SCRAM-SHA-256"); + algorithmName is null + ? "Signature algorithm was null, falling back to SCRAM-SHA-256" + : $"Support for signature algorithm {algorithmName} is not yet implemented, falling back to SCRAM-SHA-256"); + return; } - if (hashAlgorithm != null) - { - using var _ = hashAlgorithm; - - // RFC 5929 - mechanism = "SCRAM-SHA-256-PLUS"; - // PostgreSQL only supports tls-server-end-point binding - cbindFlag = "p=tls-server-end-point"; - // SCRAM-SHA-256-PLUS depends on using ssl stream, so it's fine - var cbindFlagBytes = Encoding.UTF8.GetBytes($"{cbindFlag},,"); - - var certificateHash = hashAlgorithm.ComputeHash(remoteCertificate.GetRawCertData()); - var cbindBytes = new byte[cbindFlagBytes.Length + certificateHash.Length]; - cbindFlagBytes.CopyTo(cbindBytes, 0); - certificateHash.CopyTo(cbindBytes, cbindFlagBytes.Length); - cbind = Convert.ToBase64String(cbindBytes); - successfulBind = true; - IsScramPlus = true; - } + using var _ = hashAlgorithm; + + // RFC 5929 + mechanism = "SCRAM-SHA-256-PLUS"; + // PostgreSQL only supports tls-server-end-point binding + cbindFlag = "p=tls-server-end-point"; + // SCRAM-SHA-256-PLUS depends on using ssl stream, so it's fine + var cbindFlagBytes = Encoding.UTF8.GetBytes($"{cbindFlag},,"); + + var certificateHash = hashAlgorithm.ComputeHash(remoteCertificate.GetRawCertData()); + var cbindBytes = new byte[cbindFlagBytes.Length + certificateHash.Length]; + cbindFlagBytes.CopyTo(cbindBytes, 0); + certificateHash.CopyTo(cbindBytes, cbindFlagBytes.Length); + cbind = Convert.ToBase64String(cbindBytes); + successfulBind = true; + IsScramPlus = true; } static byte[] Hi(string str, byte[] salt, int count) From 061a5f2059b7fb5132b0cc8bf332a2255dccef7c Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Tue, 4 Mar 2025 09:46:11 +0100 Subject: [PATCH 518/761] Remove dotnet SDK version from CI (use global.json) (#6037) --- .github/workflows/build.yml | 8 -------- .github/workflows/codeql-analysis.yml | 3 --- .github/workflows/native-aot.yml | 7 ------- .github/workflows/rich-code-nav.yml | 3 --- global.json | 2 +- 5 files changed, 1 insertion(+), 22 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 8547e3b8d1..5162a2bb45 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -15,7 +15,6 @@ concurrency: cancel-in-progress: true env: - dotnet_sdk_version: '9.0.100' postgis_version: 3 DOTNET_SKIP_FIRST_TIME_EXPERIENCE: true # Windows comes with PG pre-installed, and defines the PGPASSWORD environment variable. Remove it as it interferes @@ -70,9 +69,6 @@ jobs: - name: Setup .NET Core SDK uses: actions/setup-dotnet@v4.3.0 - with: - dotnet-version: | - ${{ env.dotnet_sdk_version }} - name: Build run: dotnet build -c ${{ matrix.config }} @@ -355,8 +351,6 @@ jobs: - name: Setup .NET Core SDK uses: actions/setup-dotnet@v4.3.0 - with: - dotnet-version: ${{ env.dotnet_sdk_version }} - name: Pack run: dotnet pack Npgsql.sln --configuration Release --property:PackageOutputPath="$PWD/nupkgs" --version-suffix "ci.$(date -u +%Y%m%dT%H%M%S)+sha.${GITHUB_SHA:0:9}" -p:ContinuousIntegrationBuild=true @@ -389,8 +383,6 @@ jobs: - name: Setup .NET Core SDK uses: actions/setup-dotnet@v4.3.0 - with: - dotnet-version: ${{ env.dotnet_sdk_version }} - name: Pack run: dotnet pack Npgsql.sln --configuration Release --property:PackageOutputPath="$PWD/nupkgs" -p:ContinuousIntegrationBuild=true diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index edfd1fbd0f..9fa5eeb8e1 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -32,7 +32,6 @@ concurrency: cancel-in-progress: true env: - dotnet_sdk_version: '9.0.100' DOTNET_SKIP_FIRST_TIME_EXPERIENCE: true jobs: @@ -67,8 +66,6 @@ jobs: - name: Setup .NET Core SDK uses: actions/setup-dotnet@v4.3.0 - with: - dotnet-version: ${{ env.dotnet_sdk_version }} - name: Build run: dotnet build -c Release diff --git a/.github/workflows/native-aot.yml b/.github/workflows/native-aot.yml index 78c713c45b..25514352ce 100644 --- a/.github/workflows/native-aot.yml +++ b/.github/workflows/native-aot.yml @@ -15,7 +15,6 @@ concurrency: cancel-in-progress: true env: - dotnet_sdk_version: '9.0.100' DOTNET_SKIP_FIRST_TIME_EXPERIENCE: true AOT_Compat: | param([string]$targetFramework) @@ -109,9 +108,6 @@ jobs: - name: Setup .NET Core SDK uses: actions/setup-dotnet@v4.3.0 - with: - dotnet-version: | - ${{ env.dotnet_sdk_version }} - name: Write script run: echo "$AOT_Compat" > test-aot-compatibility.ps1 @@ -146,9 +142,6 @@ jobs: - name: Setup .NET Core SDK uses: actions/setup-dotnet@v4.3.0 - with: - dotnet-version: | - ${{ env.dotnet_sdk_version }} - name: Start PostgreSQL run: | diff --git a/.github/workflows/rich-code-nav.yml b/.github/workflows/rich-code-nav.yml index e007cab721..b25a971133 100644 --- a/.github/workflows/rich-code-nav.yml +++ b/.github/workflows/rich-code-nav.yml @@ -4,7 +4,6 @@ on: workflow_dispatch: env: - dotnet_sdk_version: '9.0.100' DOTNET_SKIP_FIRST_TIME_EXPERIENCE: true jobs: @@ -25,8 +24,6 @@ jobs: - name: Setup .NET Core SDK uses: actions/setup-dotnet@v4.3.0 - with: - dotnet-version: ${{ env.dotnet_sdk_version }} - name: Build run: dotnet build Npgsql.sln --configuration Debug diff --git a/global.json b/global.json index 733b653c18..9f1e930171 100644 --- a/global.json +++ b/global.json @@ -1,6 +1,6 @@ { "sdk": { - "version": "9.0.100", + "version": "9.0.200", "rollForward": "latestMajor", "allowPrerelease": false } From a4a7f609c07e5dd9a046c8bd9d20c208ad11a12c Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Tue, 4 Mar 2025 15:12:41 +0300 Subject: [PATCH 519/761] Add support for specifying allowed auth methods (#6036) Closes #6035 --- src/Npgsql/Internal/NpgsqlConnector.Auth.cs | 21 ++++ src/Npgsql/NpgsqlConnectionStringBuilder.cs | 100 +++++++++++++++ src/Npgsql/PostgresEnvironment.cs | 2 + src/Npgsql/PublicAPI.Unshipped.txt | 2 + test/Npgsql.Tests/ConnectionTests.cs | 133 ++++++++++++++++++++ test/Npgsql.Tests/SecurityTests.cs | 12 +- 6 files changed, 264 insertions(+), 6 deletions(-) diff --git a/src/Npgsql/Internal/NpgsqlConnector.Auth.cs b/src/Npgsql/Internal/NpgsqlConnector.Auth.cs index 0d69907e7f..8dc5231fd2 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.Auth.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.Auth.cs @@ -18,6 +18,12 @@ partial class NpgsqlConnector { async Task Authenticate(string username, NpgsqlTimeout timeout, bool async, CancellationToken cancellationToken) { + var requiredAuthModes = Settings.RequireAuthModes; + if (requiredAuthModes == default) + requiredAuthModes = NpgsqlConnectionStringBuilder.ParseAuthMode(PostgresEnvironment.RequireAuth); + + var authenticated = false; + while (true) { timeout.CheckAndApply(this); @@ -25,23 +31,30 @@ async Task Authenticate(string username, NpgsqlTimeout timeout, bool async, Canc switch (msg.AuthRequestType) { case AuthenticationRequestType.Ok: + // If we didn't complete authentication, check whether it's allowed + if (!authenticated) + ThrowIfNotAllowed(requiredAuthModes, RequireAuthMode.None); return; case AuthenticationRequestType.CleartextPassword: + ThrowIfNotAllowed(requiredAuthModes, RequireAuthMode.Password); await AuthenticateCleartext(username, async, cancellationToken).ConfigureAwait(false); break; case AuthenticationRequestType.MD5Password: + ThrowIfNotAllowed(requiredAuthModes, RequireAuthMode.MD5); await AuthenticateMD5(username, ((AuthenticationMD5PasswordMessage)msg).Salt, async, cancellationToken).ConfigureAwait(false); break; case AuthenticationRequestType.SASL: + ThrowIfNotAllowed(requiredAuthModes, RequireAuthMode.ScramSHA256); await AuthenticateSASL(((AuthenticationSASLMessage)msg).Mechanisms, username, async, cancellationToken).ConfigureAwait(false); break; case AuthenticationRequestType.GSS: case AuthenticationRequestType.SSPI: + ThrowIfNotAllowed(requiredAuthModes, msg.AuthRequestType == AuthenticationRequestType.GSS ? RequireAuthMode.GSS : RequireAuthMode.SSPI); await DataSource.IntegratedSecurityHandler.NegotiateAuthentication(async, this).ConfigureAwait(false); return; @@ -51,6 +64,14 @@ await AuthenticateSASL(((AuthenticationSASLMessage)msg).Mechanisms, username, as default: throw new NotSupportedException($"Authentication method not supported (Received: {msg.AuthRequestType})"); } + + authenticated = true; + } + + static void ThrowIfNotAllowed(RequireAuthMode requiredAuthModes, RequireAuthMode requestedAuthMode) + { + if (!requiredAuthModes.HasFlag(requestedAuthMode)) + throw new NpgsqlException($"\"{requestedAuthMode}\" authentication method is not allowed. Allowed methods: {requiredAuthModes}"); } } diff --git a/src/Npgsql/NpgsqlConnectionStringBuilder.cs b/src/Npgsql/NpgsqlConnectionStringBuilder.cs index ca0629d7f7..f0b258d356 100644 --- a/src/Npgsql/NpgsqlConnectionStringBuilder.cs +++ b/src/Npgsql/NpgsqlConnectionStringBuilder.cs @@ -683,6 +683,70 @@ public ChannelBinding ChannelBinding } ChannelBinding _channelBinding; + /// + /// Controls the available authentication methods. + /// + [Category("Security")] + [Description("Controls the available authentication methods.")] + [DisplayName("Require Auth")] + [NpgsqlConnectionStringProperty] + public string? RequireAuth + { + get => _requireAuth; + set + { + RequireAuthModes = ParseAuthMode(value); + _requireAuth = value; + SetValue(nameof(RequireAuth), value); + } + } + string? _requireAuth; + + internal RequireAuthMode RequireAuthModes { get; private set; } + + internal static RequireAuthMode ParseAuthMode(string? value) + { + var modes = value?.Split(',', StringSplitOptions.TrimEntries | StringSplitOptions.RemoveEmptyEntries); + if (modes is not { Length: > 0 }) + return RequireAuthMode.All; + + var isNegative = false; + RequireAuthMode parsedModes = default; + for (var i = 0; i < modes.Length; i++) + { + var mode = modes[i]; + var modeToParse = mode.AsSpan(); + if (mode.StartsWith('!')) + { + if (i > 0 && !isNegative) + throw new ArgumentException("Mixing both positive and negative authentication methods is not supported"); + + modeToParse = modeToParse.Slice(1); + isNegative = true; + } + else + { + if (i > 0 && isNegative) + throw new ArgumentException("Mixing both positive and negative authentication methods is not supported"); + } + + // Explicitly disallow 'All' as libpq doesn't have it + if (!Enum.TryParse(modeToParse, out var parsedMode) || parsedMode == RequireAuthMode.All) + throw new ArgumentException($"Unable to parse authentication method \"{modeToParse}\""); + + parsedModes |= parsedMode; + } + + var allowedModes = isNegative + ? (RequireAuthMode)(RequireAuthMode.All - parsedModes) + : parsedModes; + + if (allowedModes == default) + throw new ArgumentException($"No authentication method is allowed. Check \"{nameof(RequireAuth)}\" in connection string."); + + return allowedModes; + } + #endregion #region Properties - Pooling @@ -1735,4 +1799,40 @@ enum ReplicationMode Logical } +/// +/// Specifies which authentication methods are supported. +/// +[Flags] +enum RequireAuthMode +{ + /// + /// Plaintext password. + /// + Password = 1, + /// + /// MD5 hashed password. + /// + MD5 = 2, + /// + /// Kerberos. + /// + GSS = 4, + /// + /// Windows SSPI. + /// + SSPI = 8, + /// + /// SASL. + /// + ScramSHA256 = 16, + /// + /// No authentication exchange. + /// + None = 32, + /// + /// All authentication methods. For internal use. + /// + All = Password | MD5 | GSS | SSPI | ScramSHA256 | None +} + #endregion diff --git a/src/Npgsql/PostgresEnvironment.cs b/src/Npgsql/PostgresEnvironment.cs index bacdd9bfde..389df7d085 100644 --- a/src/Npgsql/PostgresEnvironment.cs +++ b/src/Npgsql/PostgresEnvironment.cs @@ -50,6 +50,8 @@ internal static string? SslCertRootDefault internal static string? SslNegotiation => Environment.GetEnvironmentVariable("PGSSLNEGOTIATION"); + internal static string? RequireAuth => Environment.GetEnvironmentVariable("PGREQUIREAUTH"); + static string? GetHomeDir() => Environment.GetEnvironmentVariable(RuntimeInformation.IsOSPlatform(OSPlatform.Windows) ? "APPDATA" : "HOME"); diff --git a/src/Npgsql/PublicAPI.Unshipped.txt b/src/Npgsql/PublicAPI.Unshipped.txt index 10d2965ba0..2311f1eb30 100644 --- a/src/Npgsql/PublicAPI.Unshipped.txt +++ b/src/Npgsql/PublicAPI.Unshipped.txt @@ -3,6 +3,8 @@ abstract Npgsql.NpgsqlDataSource.Clear() -> void Npgsql.NpgsqlConnection.CloneWithAsync(string! connectionString, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.ValueTask Npgsql.NpgsqlConnection.SslClientAuthenticationOptionsCallback.get -> System.Action? Npgsql.NpgsqlConnection.SslClientAuthenticationOptionsCallback.set -> void +Npgsql.NpgsqlConnectionStringBuilder.RequireAuth.get -> string? +Npgsql.NpgsqlConnectionStringBuilder.RequireAuth.set -> void Npgsql.NpgsqlConnectionStringBuilder.SslNegotiation.get -> Npgsql.SslNegotiation Npgsql.NpgsqlConnectionStringBuilder.SslNegotiation.set -> void Npgsql.NpgsqlDataSourceBuilder.ConfigureTypeLoading(System.Action! configureAction) -> Npgsql.NpgsqlDataSourceBuilder! diff --git a/test/Npgsql.Tests/ConnectionTests.cs b/test/Npgsql.Tests/ConnectionTests.cs index 15a550fe50..90dbd4ecf1 100644 --- a/test/Npgsql.Tests/ConnectionTests.cs +++ b/test/Npgsql.Tests/ConnectionTests.cs @@ -1679,6 +1679,139 @@ public async Task PhysicalConnectionInitializer_disposes_connection() #endregion Physical connection initialization + #region Require auth + + [Test] + public async Task Connect_with_any_auth() + { + await using var dataSource = CreateDataSource(csb => + { + csb.RequireAuth = $"{RequireAuthMode.Password},{RequireAuthMode.MD5},{RequireAuthMode.GSS},{RequireAuthMode.SSPI},{RequireAuthMode.ScramSHA256},{RequireAuthMode.None}"; + }); + await using var conn = await dataSource.OpenConnectionAsync(); + } + + [Test] + [NonParallelizable] // Sets environment variable + public async Task Connect_with_any_auth_env() + { + using var _ = SetEnvironmentVariable("PGREQUIREAUTH", $"{RequireAuthMode.Password},{RequireAuthMode.MD5},{RequireAuthMode.GSS},{RequireAuthMode.SSPI},{RequireAuthMode.ScramSHA256},{RequireAuthMode.None}"); + await using var dataSource = CreateDataSource(); + await using var conn = await dataSource.OpenConnectionAsync(); + } + + [Test] + public async Task Connect_with_any_except_none_auth() + { + await using var dataSource = CreateDataSource(csb => + { + csb.RequireAuth = $"!{RequireAuthMode.None}"; + }); + await using var conn = await dataSource.OpenConnectionAsync(); + } + + [Test] + [NonParallelizable] // Sets environment variable + public async Task Connect_with_any_except_none_auth_env() + { + using var _ = SetEnvironmentVariable("PGREQUIREAUTH", $"!{RequireAuthMode.None}"); + await using var dataSource = CreateDataSource(); + await using var conn = await dataSource.OpenConnectionAsync(); + } + + [Test] + public async Task Fail_connect_with_none_auth() + { + await using var dataSource = CreateDataSource(csb => + { + csb.RequireAuth = $"{RequireAuthMode.None}"; + }); + var ex = Assert.ThrowsAsync(async () => await dataSource.OpenConnectionAsync())!; + Assert.That(ex.Message, Does.Contain("authentication method is not allowed")); + } + + [Test] + [NonParallelizable] // Sets environment variable + public async Task Fail_connect_with_none_auth_env() + { + using var _ = SetEnvironmentVariable("PGREQUIREAUTH", $"{RequireAuthMode.None}"); + await using var dataSource = CreateDataSource(); + var ex = Assert.ThrowsAsync(async () => await dataSource.OpenConnectionAsync())!; + Assert.That(ex.Message, Does.Contain("authentication method is not allowed")); + } + + [Test] + public async Task Connect_with_md5_auth() + { + await using var dataSource = CreateDataSource(csb => + { + csb.RequireAuth = $"{RequireAuthMode.MD5}"; + }); + try + { + await using var conn = await dataSource.OpenConnectionAsync(); + } + catch (Exception e) when (!IsOnBuildServer) + { + Console.WriteLine(e); + Assert.Ignore("MD5 authentication doesn't seem to be set up"); + } + } + + [Test] + [NonParallelizable] // Sets environment variable + public async Task Connect_with_md5_auth_env() + { + using var _ = SetEnvironmentVariable("PGREQUIREAUTH", $"{RequireAuthMode.MD5}"); + await using var dataSource = CreateDataSource(); + try + { + await using var conn = await dataSource.OpenConnectionAsync(); + } + catch (Exception e) when (!IsOnBuildServer) + { + Console.WriteLine(e); + Assert.Ignore("MD5 authentication doesn't seem to be set up"); + } + } + + [Test] + public void Mixed_auth_methods_not_supported([Values( + $"{nameof(RequireAuthMode.ScramSHA256)},!{nameof(RequireAuthMode.None)}", + $"!{nameof(RequireAuthMode.ScramSHA256)},{nameof(RequireAuthMode.None)}")] + string authMethods) + { + var csb = new NpgsqlConnectionStringBuilder(); + Assert.Throws(() => csb.RequireAuth = authMethods); + } + + [Test] + public void Remove_all_auth_methods_throws() + { + var csb = new NpgsqlConnectionStringBuilder(); + Assert.Throws(() => + csb.RequireAuth = $"!{RequireAuthMode.Password},!{RequireAuthMode.MD5},!{RequireAuthMode.GSS},!{RequireAuthMode.SSPI},!{RequireAuthMode.ScramSHA256},!{RequireAuthMode.None}"); + } + + [Test] + public void Unknown_auth_method_throws() + { + var csb = new NpgsqlConnectionStringBuilder(); + Assert.Throws(() => csb.RequireAuth = "SuperSecure"); + } + + [Test] + public void Auth_methods_are_trimmed() + { + var csb = new NpgsqlConnectionStringBuilder + { + RequireAuth = $"{RequireAuthMode.Password} , {RequireAuthMode.MD5}" + }; + Assert.That(csb.RequireAuthModes, Is.EqualTo(RequireAuthMode.Password | RequireAuthMode.MD5)); + } + + #endregion Require auth + [Test] [NonParallelizable] // Modifies global database info factories [IssueLink("https://github.com/npgsql/npgsql/issues/4425")] diff --git a/test/Npgsql.Tests/SecurityTests.cs b/test/Npgsql.Tests/SecurityTests.cs index f6451a633f..13b7ca7495 100644 --- a/test/Npgsql.Tests/SecurityTests.cs +++ b/test/Npgsql.Tests/SecurityTests.cs @@ -13,27 +13,27 @@ namespace Npgsql.Tests; public class SecurityTests : TestBase { [Test, Description("Establishes an SSL connection, assuming a self-signed server certificate")] - public void Basic_ssl() + public async Task Basic_ssl() { - using var dataSource = CreateDataSource(csb => + await using var dataSource = CreateDataSource(csb => { csb.SslMode = SslMode.Require; }); - using var conn = dataSource.OpenConnection(); + await using var conn = await dataSource.OpenConnectionAsync(); Assert.That(conn.IsSecure, Is.True); } [Test, Description("Default user must run with md5 password encryption")] - public void Default_user_uses_md5_password() + public async Task Default_user_uses_md5_password() { if (!IsOnBuildServer) Assert.Ignore("Only executed in CI"); - using var dataSource = CreateDataSource(csb => + await using var dataSource = CreateDataSource(csb => { csb.SslMode = SslMode.Require; }); - using var conn = dataSource.OpenConnection(); + await using var conn = await dataSource.OpenConnectionAsync(); Assert.That(conn.IsScram, Is.False); Assert.That(conn.IsScramPlus, Is.False); } From 81e9c5808a279e82a8471c8757e08adbe9dcd14f Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Mon, 17 Mar 2025 10:34:47 +0100 Subject: [PATCH 520/761] Migrate to SLNX (#6053) --- .github/workflows/build.yml | 4 +- .github/workflows/rich-code-nav.yml | 2 +- Npgsql.sln | 204 ------------------ Npgsql.slnx | 35 +++ ...sln.DotSettings => Npgsql.slnx.DotSettings | 0 5 files changed, 38 insertions(+), 207 deletions(-) delete mode 100644 Npgsql.sln create mode 100644 Npgsql.slnx rename Npgsql.sln.DotSettings => Npgsql.slnx.DotSettings (100%) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 5162a2bb45..76e67b4513 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -353,7 +353,7 @@ jobs: uses: actions/setup-dotnet@v4.3.0 - name: Pack - run: dotnet pack Npgsql.sln --configuration Release --property:PackageOutputPath="$PWD/nupkgs" --version-suffix "ci.$(date -u +%Y%m%dT%H%M%S)+sha.${GITHUB_SHA:0:9}" -p:ContinuousIntegrationBuild=true + run: dotnet pack --configuration Release --property:PackageOutputPath="$PWD/nupkgs" --version-suffix "ci.$(date -u +%Y%m%dT%H%M%S)+sha.${GITHUB_SHA:0:9}" -p:ContinuousIntegrationBuild=true - name: Upload artifacts (nupkg) uses: actions/upload-artifact@v4 @@ -385,7 +385,7 @@ jobs: uses: actions/setup-dotnet@v4.3.0 - name: Pack - run: dotnet pack Npgsql.sln --configuration Release --property:PackageOutputPath="$PWD/nupkgs" -p:ContinuousIntegrationBuild=true + run: dotnet pack --configuration Release --property:PackageOutputPath="$PWD/nupkgs" -p:ContinuousIntegrationBuild=true - name: Upload artifacts uses: actions/upload-artifact@v4 diff --git a/.github/workflows/rich-code-nav.yml b/.github/workflows/rich-code-nav.yml index b25a971133..0266f288ff 100644 --- a/.github/workflows/rich-code-nav.yml +++ b/.github/workflows/rich-code-nav.yml @@ -26,7 +26,7 @@ jobs: uses: actions/setup-dotnet@v4.3.0 - name: Build - run: dotnet build Npgsql.sln --configuration Debug + run: dotnet build --configuration Debug shell: bash - name: Rich Navigation Indexing diff --git a/Npgsql.sln b/Npgsql.sln deleted file mode 100644 index 80ef02c3a8..0000000000 --- a/Npgsql.sln +++ /dev/null @@ -1,204 +0,0 @@ - -Microsoft Visual Studio Solution File, Format Version 12.00 -# Visual Studio Version 16 -VisualStudioVersion = 16.0.28822.285 -MinimumVisualStudioVersion = 10.0.40219.1 -Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "src", "src", "{8537E50E-CF7F-49CB-B4EF-3E2A1B11F050}" -EndProject -Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "test", "test", "{ED612DB1-AB32-4603-95E7-891BACA71C39}" -EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Npgsql", "src\Npgsql\Npgsql.csproj", "{9D13B739-62B1-4190-B386-7A9547304EB3}" -EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Npgsql.Tests", "test\Npgsql.Tests\Npgsql.Tests.csproj", "{E9C258D7-0D8E-4E6A-9857-5C6438591755}" -EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Npgsql.Benchmarks", "test\Npgsql.Benchmarks\Npgsql.Benchmarks.csproj", "{8B4AE9B6-CDAC-44DD-A5CD-28A470D363B8}" -EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Npgsql.Json.NET", "src\Npgsql.Json.NET\Npgsql.Json.NET.csproj", "{9CBE603F-6746-411D-A5FD-CB2C948CD7D0}" -EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Npgsql.NodaTime", "src\Npgsql.NodaTime\Npgsql.NodaTime.csproj", "{D8DF12D6-FA70-4653-BD8F-C188944836DE}" -EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Npgsql.PluginTests", "test\Npgsql.PluginTests\Npgsql.PluginTests.csproj", "{9BD7FC3D-6956-42A8-A586-2558C499EBA2}" -EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Npgsql.NetTopologySuite", "src\Npgsql.NetTopologySuite\Npgsql.NetTopologySuite.csproj", "{6CB12050-DC9B-4155-BADD-BFDD54CDD70F}" -EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Npgsql.GeoJSON", "src\Npgsql.GeoJSON\Npgsql.GeoJSON.csproj", "{F7C53EBD-0075-474F-A083-419257D04080}" -EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Npgsql.Specification.Tests", "test\Npgsql.Specification.Tests\Npgsql.Specification.Tests.csproj", "{A77E5FAF-D775-4AB4-8846-8965C2104E60}" -EndProject -Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Solution Items", "Solution Items", "{004A2E0F-D34A-44D4-8DF0-D2BC63B57073}" - ProjectSection(SolutionItems) = preProject - .editorconfig = .editorconfig - Directory.Build.props = Directory.Build.props - Directory.Packages.props = Directory.Packages.props - README.md = README.md - global.json = global.json - NuGet.config = NuGet.config - EndProjectSection -EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Npgsql.SourceGenerators", "src\Npgsql.SourceGenerators\Npgsql.SourceGenerators.csproj", "{63026A19-60B8-4906-81CB-216F30E8094B}" -EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Npgsql.OpenTelemetry", "src\Npgsql.OpenTelemetry\Npgsql.OpenTelemetry.csproj", "{DA29F063-1828-47D8-B051-800AF7C9A0BE}" -EndProject -Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Github", "Github", "{BA7B6F53-D24D-45AC-927A-266857EA8D1E}" - ProjectSection(SolutionItems) = preProject - .github\workflows\build.yml = .github\workflows\build.yml - .github\dependabot.yml = .github\dependabot.yml - .github\workflows\codeql-analysis.yml = .github\workflows\codeql-analysis.yml - .github\workflows\rich-code-nav.yml = .github\workflows\rich-code-nav.yml - .github\workflows\native-aot.yml = .github\workflows\native-aot.yml - EndProjectSection -EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Npgsql.DependencyInjection", "src\Npgsql.DependencyInjection\Npgsql.DependencyInjection.csproj", "{B58E12EB-E43D-4D77-894E-5157D2269836}" -EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Npgsql.DependencyInjection.Tests", "test\Npgsql.DependencyInjection.Tests\Npgsql.DependencyInjection.Tests.csproj", "{EB2530FC-69F7-4DCB-A8B3-3671A157ED32}" -EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Npgsql.NativeAotTests", "test\Npgsql.NativeAotTests\Npgsql.NativeAotTests.csproj", "{20F2E9D6-A69E-4BAE-9236-574B0AA59139}" -EndProject -Global - GlobalSection(SolutionConfigurationPlatforms) = preSolution - Debug|Any CPU = Debug|Any CPU - Debug|x86 = Debug|x86 - Release|Any CPU = Release|Any CPU - Release|x86 = Release|x86 - EndGlobalSection - GlobalSection(ProjectConfigurationPlatforms) = postSolution - {9D13B739-62B1-4190-B386-7A9547304EB3}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {9D13B739-62B1-4190-B386-7A9547304EB3}.Debug|Any CPU.Build.0 = Debug|Any CPU - {9D13B739-62B1-4190-B386-7A9547304EB3}.Debug|x86.ActiveCfg = Debug|Any CPU - {9D13B739-62B1-4190-B386-7A9547304EB3}.Debug|x86.Build.0 = Debug|Any CPU - {9D13B739-62B1-4190-B386-7A9547304EB3}.Release|Any CPU.ActiveCfg = Release|Any CPU - {9D13B739-62B1-4190-B386-7A9547304EB3}.Release|Any CPU.Build.0 = Release|Any CPU - {9D13B739-62B1-4190-B386-7A9547304EB3}.Release|x86.ActiveCfg = Release|Any CPU - {9D13B739-62B1-4190-B386-7A9547304EB3}.Release|x86.Build.0 = Release|Any CPU - {E9C258D7-0D8E-4E6A-9857-5C6438591755}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {E9C258D7-0D8E-4E6A-9857-5C6438591755}.Debug|Any CPU.Build.0 = Debug|Any CPU - {E9C258D7-0D8E-4E6A-9857-5C6438591755}.Debug|x86.ActiveCfg = Debug|Any CPU - {E9C258D7-0D8E-4E6A-9857-5C6438591755}.Debug|x86.Build.0 = Debug|Any CPU - {E9C258D7-0D8E-4E6A-9857-5C6438591755}.Release|Any CPU.ActiveCfg = Release|Any CPU - {E9C258D7-0D8E-4E6A-9857-5C6438591755}.Release|Any CPU.Build.0 = Release|Any CPU - {E9C258D7-0D8E-4E6A-9857-5C6438591755}.Release|x86.ActiveCfg = Release|Any CPU - {E9C258D7-0D8E-4E6A-9857-5C6438591755}.Release|x86.Build.0 = Release|Any CPU - {8B4AE9B6-CDAC-44DD-A5CD-28A470D363B8}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {8B4AE9B6-CDAC-44DD-A5CD-28A470D363B8}.Debug|Any CPU.Build.0 = Debug|Any CPU - {8B4AE9B6-CDAC-44DD-A5CD-28A470D363B8}.Debug|x86.ActiveCfg = Debug|Any CPU - {8B4AE9B6-CDAC-44DD-A5CD-28A470D363B8}.Debug|x86.Build.0 = Debug|Any CPU - {8B4AE9B6-CDAC-44DD-A5CD-28A470D363B8}.Release|Any CPU.ActiveCfg = Release|Any CPU - {8B4AE9B6-CDAC-44DD-A5CD-28A470D363B8}.Release|Any CPU.Build.0 = Release|Any CPU - {8B4AE9B6-CDAC-44DD-A5CD-28A470D363B8}.Release|x86.ActiveCfg = Release|Any CPU - {8B4AE9B6-CDAC-44DD-A5CD-28A470D363B8}.Release|x86.Build.0 = Release|Any CPU - {9CBE603F-6746-411D-A5FD-CB2C948CD7D0}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {9CBE603F-6746-411D-A5FD-CB2C948CD7D0}.Debug|Any CPU.Build.0 = Debug|Any CPU - {9CBE603F-6746-411D-A5FD-CB2C948CD7D0}.Debug|x86.ActiveCfg = Debug|Any CPU - {9CBE603F-6746-411D-A5FD-CB2C948CD7D0}.Debug|x86.Build.0 = Debug|Any CPU - {9CBE603F-6746-411D-A5FD-CB2C948CD7D0}.Release|Any CPU.ActiveCfg = Release|Any CPU - {9CBE603F-6746-411D-A5FD-CB2C948CD7D0}.Release|Any CPU.Build.0 = Release|Any CPU - {9CBE603F-6746-411D-A5FD-CB2C948CD7D0}.Release|x86.ActiveCfg = Release|Any CPU - {9CBE603F-6746-411D-A5FD-CB2C948CD7D0}.Release|x86.Build.0 = Release|Any CPU - {D8DF12D6-FA70-4653-BD8F-C188944836DE}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {D8DF12D6-FA70-4653-BD8F-C188944836DE}.Debug|Any CPU.Build.0 = Debug|Any CPU - {D8DF12D6-FA70-4653-BD8F-C188944836DE}.Debug|x86.ActiveCfg = Debug|Any CPU - {D8DF12D6-FA70-4653-BD8F-C188944836DE}.Debug|x86.Build.0 = Debug|Any CPU - {D8DF12D6-FA70-4653-BD8F-C188944836DE}.Release|Any CPU.ActiveCfg = Release|Any CPU - {D8DF12D6-FA70-4653-BD8F-C188944836DE}.Release|Any CPU.Build.0 = Release|Any CPU - {D8DF12D6-FA70-4653-BD8F-C188944836DE}.Release|x86.ActiveCfg = Release|Any CPU - {D8DF12D6-FA70-4653-BD8F-C188944836DE}.Release|x86.Build.0 = Release|Any CPU - {9BD7FC3D-6956-42A8-A586-2558C499EBA2}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {9BD7FC3D-6956-42A8-A586-2558C499EBA2}.Debug|Any CPU.Build.0 = Debug|Any CPU - {9BD7FC3D-6956-42A8-A586-2558C499EBA2}.Debug|x86.ActiveCfg = Debug|Any CPU - {9BD7FC3D-6956-42A8-A586-2558C499EBA2}.Debug|x86.Build.0 = Debug|Any CPU - {9BD7FC3D-6956-42A8-A586-2558C499EBA2}.Release|Any CPU.ActiveCfg = Release|Any CPU - {9BD7FC3D-6956-42A8-A586-2558C499EBA2}.Release|Any CPU.Build.0 = Release|Any CPU - {9BD7FC3D-6956-42A8-A586-2558C499EBA2}.Release|x86.ActiveCfg = Release|Any CPU - {9BD7FC3D-6956-42A8-A586-2558C499EBA2}.Release|x86.Build.0 = Release|Any CPU - {6CB12050-DC9B-4155-BADD-BFDD54CDD70F}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {6CB12050-DC9B-4155-BADD-BFDD54CDD70F}.Debug|Any CPU.Build.0 = Debug|Any CPU - {6CB12050-DC9B-4155-BADD-BFDD54CDD70F}.Debug|x86.ActiveCfg = Debug|Any CPU - {6CB12050-DC9B-4155-BADD-BFDD54CDD70F}.Debug|x86.Build.0 = Debug|Any CPU - {6CB12050-DC9B-4155-BADD-BFDD54CDD70F}.Release|Any CPU.ActiveCfg = Release|Any CPU - {6CB12050-DC9B-4155-BADD-BFDD54CDD70F}.Release|Any CPU.Build.0 = Release|Any CPU - {6CB12050-DC9B-4155-BADD-BFDD54CDD70F}.Release|x86.ActiveCfg = Release|Any CPU - {6CB12050-DC9B-4155-BADD-BFDD54CDD70F}.Release|x86.Build.0 = Release|Any CPU - {F7C53EBD-0075-474F-A083-419257D04080}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {F7C53EBD-0075-474F-A083-419257D04080}.Debug|Any CPU.Build.0 = Debug|Any CPU - {F7C53EBD-0075-474F-A083-419257D04080}.Debug|x86.ActiveCfg = Debug|Any CPU - {F7C53EBD-0075-474F-A083-419257D04080}.Debug|x86.Build.0 = Debug|Any CPU - {F7C53EBD-0075-474F-A083-419257D04080}.Release|Any CPU.ActiveCfg = Release|Any CPU - {F7C53EBD-0075-474F-A083-419257D04080}.Release|Any CPU.Build.0 = Release|Any CPU - {F7C53EBD-0075-474F-A083-419257D04080}.Release|x86.ActiveCfg = Release|Any CPU - {F7C53EBD-0075-474F-A083-419257D04080}.Release|x86.Build.0 = Release|Any CPU - {A77E5FAF-D775-4AB4-8846-8965C2104E60}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {A77E5FAF-D775-4AB4-8846-8965C2104E60}.Debug|Any CPU.Build.0 = Debug|Any CPU - {A77E5FAF-D775-4AB4-8846-8965C2104E60}.Debug|x86.ActiveCfg = Debug|Any CPU - {A77E5FAF-D775-4AB4-8846-8965C2104E60}.Debug|x86.Build.0 = Debug|Any CPU - {A77E5FAF-D775-4AB4-8846-8965C2104E60}.Release|Any CPU.ActiveCfg = Release|Any CPU - {A77E5FAF-D775-4AB4-8846-8965C2104E60}.Release|Any CPU.Build.0 = Release|Any CPU - {A77E5FAF-D775-4AB4-8846-8965C2104E60}.Release|x86.ActiveCfg = Release|Any CPU - {A77E5FAF-D775-4AB4-8846-8965C2104E60}.Release|x86.Build.0 = Release|Any CPU - {63026A19-60B8-4906-81CB-216F30E8094B}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {63026A19-60B8-4906-81CB-216F30E8094B}.Debug|Any CPU.Build.0 = Debug|Any CPU - {63026A19-60B8-4906-81CB-216F30E8094B}.Debug|x86.ActiveCfg = Debug|Any CPU - {63026A19-60B8-4906-81CB-216F30E8094B}.Debug|x86.Build.0 = Debug|Any CPU - {63026A19-60B8-4906-81CB-216F30E8094B}.Release|Any CPU.ActiveCfg = Release|Any CPU - {63026A19-60B8-4906-81CB-216F30E8094B}.Release|Any CPU.Build.0 = Release|Any CPU - {63026A19-60B8-4906-81CB-216F30E8094B}.Release|x86.ActiveCfg = Release|Any CPU - {63026A19-60B8-4906-81CB-216F30E8094B}.Release|x86.Build.0 = Release|Any CPU - {DA29F063-1828-47D8-B051-800AF7C9A0BE}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {DA29F063-1828-47D8-B051-800AF7C9A0BE}.Debug|Any CPU.Build.0 = Debug|Any CPU - {DA29F063-1828-47D8-B051-800AF7C9A0BE}.Debug|x86.ActiveCfg = Debug|Any CPU - {DA29F063-1828-47D8-B051-800AF7C9A0BE}.Debug|x86.Build.0 = Debug|Any CPU - {DA29F063-1828-47D8-B051-800AF7C9A0BE}.Release|Any CPU.ActiveCfg = Release|Any CPU - {DA29F063-1828-47D8-B051-800AF7C9A0BE}.Release|Any CPU.Build.0 = Release|Any CPU - {DA29F063-1828-47D8-B051-800AF7C9A0BE}.Release|x86.ActiveCfg = Release|Any CPU - {DA29F063-1828-47D8-B051-800AF7C9A0BE}.Release|x86.Build.0 = Release|Any CPU - {B58E12EB-E43D-4D77-894E-5157D2269836}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {B58E12EB-E43D-4D77-894E-5157D2269836}.Debug|Any CPU.Build.0 = Debug|Any CPU - {B58E12EB-E43D-4D77-894E-5157D2269836}.Debug|x86.ActiveCfg = Debug|Any CPU - {B58E12EB-E43D-4D77-894E-5157D2269836}.Debug|x86.Build.0 = Debug|Any CPU - {B58E12EB-E43D-4D77-894E-5157D2269836}.Release|Any CPU.ActiveCfg = Release|Any CPU - {B58E12EB-E43D-4D77-894E-5157D2269836}.Release|Any CPU.Build.0 = Release|Any CPU - {B58E12EB-E43D-4D77-894E-5157D2269836}.Release|x86.ActiveCfg = Release|Any CPU - {B58E12EB-E43D-4D77-894E-5157D2269836}.Release|x86.Build.0 = Release|Any CPU - {EB2530FC-69F7-4DCB-A8B3-3671A157ED32}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {EB2530FC-69F7-4DCB-A8B3-3671A157ED32}.Debug|Any CPU.Build.0 = Debug|Any CPU - {EB2530FC-69F7-4DCB-A8B3-3671A157ED32}.Debug|x86.ActiveCfg = Debug|Any CPU - {EB2530FC-69F7-4DCB-A8B3-3671A157ED32}.Debug|x86.Build.0 = Debug|Any CPU - {EB2530FC-69F7-4DCB-A8B3-3671A157ED32}.Release|Any CPU.ActiveCfg = Release|Any CPU - {EB2530FC-69F7-4DCB-A8B3-3671A157ED32}.Release|Any CPU.Build.0 = Release|Any CPU - {EB2530FC-69F7-4DCB-A8B3-3671A157ED32}.Release|x86.ActiveCfg = Release|Any CPU - {EB2530FC-69F7-4DCB-A8B3-3671A157ED32}.Release|x86.Build.0 = Release|Any CPU - {20F2E9D6-A69E-4BAE-9236-574B0AA59139}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {20F2E9D6-A69E-4BAE-9236-574B0AA59139}.Debug|Any CPU.Build.0 = Debug|Any CPU - {20F2E9D6-A69E-4BAE-9236-574B0AA59139}.Debug|x86.ActiveCfg = Debug|Any CPU - {20F2E9D6-A69E-4BAE-9236-574B0AA59139}.Debug|x86.Build.0 = Debug|Any CPU - {20F2E9D6-A69E-4BAE-9236-574B0AA59139}.Release|Any CPU.ActiveCfg = Release|Any CPU - {20F2E9D6-A69E-4BAE-9236-574B0AA59139}.Release|Any CPU.Build.0 = Release|Any CPU - {20F2E9D6-A69E-4BAE-9236-574B0AA59139}.Release|x86.ActiveCfg = Release|Any CPU - {20F2E9D6-A69E-4BAE-9236-574B0AA59139}.Release|x86.Build.0 = Release|Any CPU - EndGlobalSection - GlobalSection(SolutionProperties) = preSolution - HideSolutionNode = FALSE - EndGlobalSection - GlobalSection(NestedProjects) = preSolution - {9D13B739-62B1-4190-B386-7A9547304EB3} = {8537E50E-CF7F-49CB-B4EF-3E2A1B11F050} - {E9C258D7-0D8E-4E6A-9857-5C6438591755} = {ED612DB1-AB32-4603-95E7-891BACA71C39} - {8B4AE9B6-CDAC-44DD-A5CD-28A470D363B8} = {ED612DB1-AB32-4603-95E7-891BACA71C39} - {9CBE603F-6746-411D-A5FD-CB2C948CD7D0} = {8537E50E-CF7F-49CB-B4EF-3E2A1B11F050} - {D8DF12D6-FA70-4653-BD8F-C188944836DE} = {8537E50E-CF7F-49CB-B4EF-3E2A1B11F050} - {9BD7FC3D-6956-42A8-A586-2558C499EBA2} = {ED612DB1-AB32-4603-95E7-891BACA71C39} - {6CB12050-DC9B-4155-BADD-BFDD54CDD70F} = {8537E50E-CF7F-49CB-B4EF-3E2A1B11F050} - {F7C53EBD-0075-474F-A083-419257D04080} = {8537E50E-CF7F-49CB-B4EF-3E2A1B11F050} - {A77E5FAF-D775-4AB4-8846-8965C2104E60} = {ED612DB1-AB32-4603-95E7-891BACA71C39} - {63026A19-60B8-4906-81CB-216F30E8094B} = {8537E50E-CF7F-49CB-B4EF-3E2A1B11F050} - {DA29F063-1828-47D8-B051-800AF7C9A0BE} = {8537E50E-CF7F-49CB-B4EF-3E2A1B11F050} - {BA7B6F53-D24D-45AC-927A-266857EA8D1E} = {004A2E0F-D34A-44D4-8DF0-D2BC63B57073} - {B58E12EB-E43D-4D77-894E-5157D2269836} = {8537E50E-CF7F-49CB-B4EF-3E2A1B11F050} - {EB2530FC-69F7-4DCB-A8B3-3671A157ED32} = {ED612DB1-AB32-4603-95E7-891BACA71C39} - {20F2E9D6-A69E-4BAE-9236-574B0AA59139} = {ED612DB1-AB32-4603-95E7-891BACA71C39} - EndGlobalSection - GlobalSection(ExtensibilityGlobals) = postSolution - SolutionGuid = {C90AEECD-DB4C-4BE6-B506-16A449852FB8} - EndGlobalSection - GlobalSection(MonoDevelopProperties) = preSolution - StartupItem = Npgsql.csproj - EndGlobalSection -EndGlobal diff --git a/Npgsql.slnx b/Npgsql.slnx new file mode 100644 index 0000000000..5404551fcd --- /dev/null +++ b/Npgsql.slnx @@ -0,0 +1,35 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/Npgsql.sln.DotSettings b/Npgsql.slnx.DotSettings similarity index 100% rename from Npgsql.sln.DotSettings rename to Npgsql.slnx.DotSettings From eabc6ab1fc3ecd1b3b2dfd05da04f5ff3bdeb668 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Mon, 17 Mar 2025 13:21:45 +0100 Subject: [PATCH 521/761] Switch to Ubuntu 24.04 in CI (#6054) --- .github/workflows/build.yml | 17 ++++++++--------- .github/workflows/native-aot.yml | 4 ++-- .github/workflows/trigger-doc-build.yml | 2 +- 3 files changed, 11 insertions(+), 12 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 76e67b4513..3d331bc5a8 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -28,12 +28,12 @@ jobs: strategy: fail-fast: false matrix: - os: [ubuntu-22.04] + os: [ubuntu-24.04] pg_major: [17, 16, 15, 14, 13] config: [Release] test_tfm: [net8.0] include: - - os: ubuntu-22.04 + - os: ubuntu-24.04 pg_major: 17 config: Debug test_tfm: net8.0 @@ -45,7 +45,7 @@ jobs: pg_major: 17 config: Release test_tfm: net8.0 -# - os: ubuntu-22.04 +# - os: ubuntu-24.04 # pg_major: 17 # config: Release # test_tfm: net8.0 @@ -80,10 +80,9 @@ jobs: # First uninstall any PostgreSQL installed on the image dpkg-query -W --showformat='${Package}\n' 'postgresql-*' | xargs sudo dpkg -P postgresql - # Import the repository signing key - wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add - - - sudo sh -c 'echo "deb http://apt.postgresql.org/pub/repos/apt/ jammy-pgdg main ${{ matrix.pg_major }}" >> /etc/apt/sources.list.d/pgdg.list' + # Automated repository configuration + sudo apt install -y postgresql-common + sudo /usr/share/postgresql-common/pgdg/apt.postgresql.org.sh -y sudo apt-get update -qq sudo apt-get install -qq postgresql-${{ matrix.pg_major }} export PGDATA=/etc/postgresql/${{ matrix.pg_major }}/main @@ -333,7 +332,7 @@ jobs: publish-ci: needs: build - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 if: github.event_name == 'push' && github.repository == 'npgsql/npgsql' environment: myget @@ -373,7 +372,7 @@ jobs: release: needs: build - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 if: github.event_name == 'push' && startsWith(github.repository, 'npgsql/') && needs.build.outputs.is_release == 'true' environment: nuget.org diff --git a/.github/workflows/native-aot.yml b/.github/workflows/native-aot.yml index 25514352ce..0f18872275 100644 --- a/.github/workflows/native-aot.yml +++ b/.github/workflows/native-aot.yml @@ -87,7 +87,7 @@ jobs: strategy: fail-fast: false matrix: - os: [ ubuntu-22.04 ] + os: [ ubuntu-24.04 ] pg_major: [ 15 ] tfm: [ net8.0 ] @@ -121,7 +121,7 @@ jobs: strategy: fail-fast: false matrix: - os: [ubuntu-22.04] + os: [ubuntu-24.04] pg_major: [15] tfm: [ net8.0 ] diff --git a/.github/workflows/trigger-doc-build.yml b/.github/workflows/trigger-doc-build.yml index dfbe89601e..30c6b5fa62 100644 --- a/.github/workflows/trigger-doc-build.yml +++ b/.github/workflows/trigger-doc-build.yml @@ -10,7 +10,7 @@ on: jobs: build: - runs-on: ubuntu-22.04 + runs-on: ubuntu-latest steps: - name: Trigger documentation build run: | From 562d3954042ae005fef0f9bac918786a51fe3cc9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 18 Mar 2025 00:43:41 +0100 Subject: [PATCH 522/761] Bump actions/setup-dotnet from 4.3.0 to 4.3.1 (#6059) --- .github/workflows/build.yml | 6 +++--- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/native-aot.yml | 4 ++-- .github/workflows/rich-code-nav.yml | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 3d331bc5a8..192b5525d4 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -68,7 +68,7 @@ jobs: ${{ runner.os }}-nuget- - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v4.3.0 + uses: actions/setup-dotnet@v4.3.1 - name: Build run: dotnet build -c ${{ matrix.config }} @@ -349,7 +349,7 @@ jobs: ${{ runner.os }}-nuget- - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v4.3.0 + uses: actions/setup-dotnet@v4.3.1 - name: Pack run: dotnet pack --configuration Release --property:PackageOutputPath="$PWD/nupkgs" --version-suffix "ci.$(date -u +%Y%m%dT%H%M%S)+sha.${GITHUB_SHA:0:9}" -p:ContinuousIntegrationBuild=true @@ -381,7 +381,7 @@ jobs: uses: actions/checkout@v4 - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v4.3.0 + uses: actions/setup-dotnet@v4.3.1 - name: Pack run: dotnet pack --configuration Release --property:PackageOutputPath="$PWD/nupkgs" -p:ContinuousIntegrationBuild=true diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 9fa5eeb8e1..2f34b67e27 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -65,7 +65,7 @@ jobs: # queries: ./path/to/local/query, your-org/your-repo/queries@main - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v4.3.0 + uses: actions/setup-dotnet@v4.3.1 - name: Build run: dotnet build -c Release diff --git a/.github/workflows/native-aot.yml b/.github/workflows/native-aot.yml index 0f18872275..cd6498fd24 100644 --- a/.github/workflows/native-aot.yml +++ b/.github/workflows/native-aot.yml @@ -107,7 +107,7 @@ jobs: ${{ runner.os }}-nuget- - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v4.3.0 + uses: actions/setup-dotnet@v4.3.1 - name: Write script run: echo "$AOT_Compat" > test-aot-compatibility.ps1 @@ -141,7 +141,7 @@ jobs: ${{ runner.os }}-nuget- - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v4.3.0 + uses: actions/setup-dotnet@v4.3.1 - name: Start PostgreSQL run: | diff --git a/.github/workflows/rich-code-nav.yml b/.github/workflows/rich-code-nav.yml index 0266f288ff..d0649277ca 100644 --- a/.github/workflows/rich-code-nav.yml +++ b/.github/workflows/rich-code-nav.yml @@ -23,7 +23,7 @@ jobs: ${{ runner.os }}-nuget- - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v4.3.0 + uses: actions/setup-dotnet@v4.3.1 - name: Build run: dotnet build --configuration Debug From 2bc67c3e8cd1049a1af4f4dc64ad9fd121c21451 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Wed, 19 Mar 2025 17:04:39 +0100 Subject: [PATCH 523/761] Update copyright to 2025 --- Directory.Build.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Build.props b/Directory.Build.props index 50cb8595eb..de99b4fd8c 100644 --- a/Directory.Build.props +++ b/Directory.Build.props @@ -10,7 +10,7 @@ true true - Copyright 2024 © The Npgsql Development Team + Copyright 2025 © The Npgsql Development Team Npgsql PostgreSQL https://github.com/npgsql/npgsql From 22a0aa9bd0bf69bb3f915a0c1858680dc9087897 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Thu, 20 Mar 2025 12:55:20 +0300 Subject: [PATCH 524/761] Add basic testing for tracing (#6051) Closes #4285 --- src/Npgsql/Internal/NpgsqlConnector.cs | 1 - src/Npgsql/NpgsqlActivitySource.cs | 7 +- src/Npgsql/NpgsqlCommand.cs | 10 +- test/Npgsql.Tests/TracingTests.cs | 201 +++++++++++++++++++++++++ 4 files changed, 211 insertions(+), 8 deletions(-) create mode 100644 test/Npgsql.Tests/TracingTests.cs diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index 8208e7386c..dffe542ff0 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -1177,7 +1177,6 @@ async Task MultiplexingReadLoop() // We have a resultset for the command - hand back control to the command (which will // return it to the user) - command.TraceReceivedFirstResponse(DataSource.Configuration.TracingOptions); ReaderCompleted.Reset(); command.ExecutionCompletion.SetResult(this); diff --git a/src/Npgsql/NpgsqlActivitySource.cs b/src/Npgsql/NpgsqlActivitySource.cs index 667728a89a..ce762cc642 100644 --- a/src/Npgsql/NpgsqlActivitySource.cs +++ b/src/Npgsql/NpgsqlActivitySource.cs @@ -107,12 +107,13 @@ internal static void ReceivedFirstResponse(Activity activity, NpgsqlTracingOptio internal static void CommandStop(Activity activity) { - activity.SetTag("otel.status_code", "OK"); + activity.SetStatus(ActivityStatusCode.Ok); activity.Dispose(); } internal static void SetException(Activity activity, Exception ex, bool escaped = true) { + // TODO: We can instead use Activity.AddException whenever we start using .NET 9 var tags = new ActivityTagsCollection { { "exception.type", ex.GetType().FullName }, @@ -122,8 +123,8 @@ internal static void SetException(Activity activity, Exception ex, bool escaped }; var activityEvent = new ActivityEvent("exception", tags: tags); activity.AddEvent(activityEvent); - activity.SetTag("otel.status_code", "ERROR"); - activity.SetTag("otel.status_description", ex is PostgresException pgEx ? pgEx.SqlState : ex.Message); + var statusDescription = ex is PostgresException pgEx ? pgEx.SqlState : ex.Message; + activity.SetStatus(ActivityStatusCode.Error, statusDescription); activity.Dispose(); } } diff --git a/src/Npgsql/NpgsqlCommand.cs b/src/Npgsql/NpgsqlCommand.cs index f1ef8bb832..86748a1b16 100644 --- a/src/Npgsql/NpgsqlCommand.cs +++ b/src/Npgsql/NpgsqlCommand.cs @@ -1597,6 +1597,8 @@ internal virtual async ValueTask ExecuteReader(bool async, Com connector.CurrentReader = reader; await reader.NextResultAsync(cancellationToken).ConfigureAwait(false); + TraceReceivedFirstResponse(connector.DataSource.Configuration.TracingOptions); + return reader; } } @@ -1718,12 +1720,12 @@ internal void TraceCommandStart(NpgsqlConnectionStringBuilder settings, NpgsqlTr ? tracingOptions.BatchFilter?.Invoke(WrappingBatch) ?? true : tracingOptions.CommandFilter?.Invoke(this) ?? true; - var spanName = WrappingBatch is not null - ? tracingOptions.BatchSpanNameProvider?.Invoke(WrappingBatch) - : tracingOptions.CommandSpanNameProvider?.Invoke(this); - if (enableTracing) { + var spanName = WrappingBatch is not null + ? tracingOptions.BatchSpanNameProvider?.Invoke(WrappingBatch) + : tracingOptions.CommandSpanNameProvider?.Invoke(this); + CurrentActivity = NpgsqlActivitySource.CommandStart( settings, WrappingBatch is not null ? GetBatchFullCommandText() : CommandText, diff --git a/test/Npgsql.Tests/TracingTests.cs b/test/Npgsql.Tests/TracingTests.cs new file mode 100644 index 0000000000..e3ff4a7c34 --- /dev/null +++ b/test/Npgsql.Tests/TracingTests.cs @@ -0,0 +1,201 @@ +using System.Collections.Generic; +using System.Diagnostics; +using System.Linq; +using System.Threading.Tasks; +using NUnit.Framework; + +namespace Npgsql.Tests; + +[NonParallelizable] +public class TracingTests(MultiplexingMode multiplexingMode) : MultiplexingTestBase(multiplexingMode) +{ + [Test] + public async Task Basic([Values] bool async, [Values] bool batch) + { + if (IsMultiplexing && !async) + return; + + var activities = new List(); + + using var activityListener = new ActivityListener(); + activityListener.ShouldListenTo = source => source.Name == "Npgsql"; + activityListener.Sample = (ref ActivityCreationOptions _) => ActivitySamplingResult.AllDataAndRecorded; + activityListener.ActivityStopped = activity => activities.Add(activity); + ActivitySource.AddActivityListener(activityListener); + + await using var dataSource = CreateDataSource(); + await using var conn = await dataSource.OpenConnectionAsync(); + await ExecuteScalar(conn, async, batch, "SELECT 42"); + + Assert.That(activities.Count, Is.EqualTo(1)); + var activity = activities[0]; + Assert.That(activity.DisplayName, Is.EqualTo(conn.Settings.Database)); + Assert.That(activity.OperationName, Is.EqualTo(conn.Settings.Database)); + Assert.That(activity.Status, Is.EqualTo(ActivityStatusCode.Ok)); + + Assert.That(activity.Events.Count(), Is.EqualTo(1)); + var firstResponseEvent = activity.Events.First(); + Assert.That(firstResponseEvent.Name, Is.EqualTo("received-first-response")); + + var expectedTagCount = conn.Settings.Port == 5432 ? 9 : 10; + Assert.That(activity.TagObjects.Count(), Is.EqualTo(expectedTagCount)); + + var queryTag = activity.TagObjects.First(x => x.Key == "db.statement"); + Assert.That(queryTag.Value, Is.EqualTo("SELECT 42")); + + var systemTag = activity.TagObjects.First(x => x.Key == "db.system"); + Assert.That(systemTag.Value, Is.EqualTo("postgresql")); + + var userTag = activity.TagObjects.First(x => x.Key == "db.user"); + Assert.That(userTag.Value, Is.EqualTo(conn.Settings.Username)); + + var dbNameTag = activity.TagObjects.First(x => x.Key == "db.name"); + Assert.That(dbNameTag.Value, Is.EqualTo(conn.Settings.Database)); + + var connStringTag = activity.TagObjects.First(x => x.Key == "db.connection_string"); + Assert.That(connStringTag.Value, Is.EqualTo(conn.ConnectionString)); + + if (!IsMultiplexing) + { + var connIDTag = activity.TagObjects.First(x => x.Key == "db.connection_id"); + Assert.That(connIDTag.Value, Is.EqualTo(conn.ProcessID)); + } + } + + [Test] + public async Task Error([Values] bool async, [Values] bool batch) + { + if (IsMultiplexing && !async) + return; + + var activities = new List(); + + using var activityListener = new ActivityListener(); + activityListener.ShouldListenTo = source => source.Name == "Npgsql"; + activityListener.Sample = (ref ActivityCreationOptions _) => ActivitySamplingResult.AllDataAndRecorded; + activityListener.ActivityStopped = activity => activities.Add(activity); + ActivitySource.AddActivityListener(activityListener); + + await using var dataSource = CreateDataSource(); + await using var conn = await dataSource.OpenConnectionAsync(); + Assert.ThrowsAsync(async () => await ExecuteScalar(conn, async, batch, "SELECT * FROM non_existing_table")); + + Assert.That(activities.Count, Is.EqualTo(1)); + var activity = activities[0]; + Assert.That(activity.DisplayName, Is.EqualTo(conn.Settings.Database)); + Assert.That(activity.OperationName, Is.EqualTo(conn.Settings.Database)); + Assert.That(activity.Status, Is.EqualTo(ActivityStatusCode.Error)); + Assert.That(activity.StatusDescription, Is.EqualTo(PostgresErrorCodes.UndefinedTable)); + + Assert.That(activity.Events.Count(), Is.EqualTo(1)); + var exceptionEvent = activity.Events.First(); + Assert.That(exceptionEvent.Name, Is.EqualTo("exception")); + + Assert.That(exceptionEvent.Tags.Count(), Is.EqualTo(4)); + + var exceptionTypeTag = exceptionEvent.Tags.First(x => x.Key == "exception.type"); + Assert.That(exceptionTypeTag.Value, Is.EqualTo("Npgsql.PostgresException")); + + var exceptionMessageTag = exceptionEvent.Tags.First(x => x.Key == "exception.message"); + StringAssert.Contains("relation \"non_existing_table\" does not exist", (string)exceptionMessageTag.Value!); + + var exceptionStacktraceTag = exceptionEvent.Tags.First(x => x.Key == "exception.stacktrace"); + StringAssert.Contains("relation \"non_existing_table\" does not exist", (string)exceptionStacktraceTag.Value!); + + var exceptionEscapedTag = exceptionEvent.Tags.First(x => x.Key == "exception.escaped"); + Assert.That(exceptionEscapedTag.Value, Is.True); + + var expectedTagCount = conn.Settings.Port == 5432 ? 9 : 10; + Assert.That(activity.TagObjects.Count(), Is.EqualTo(expectedTagCount)); + + var queryTag = activity.TagObjects.First(x => x.Key == "db.statement"); + Assert.That(queryTag.Value, Is.EqualTo("SELECT * FROM non_existing_table")); + + var systemTag = activity.TagObjects.First(x => x.Key == "db.system"); + Assert.That(systemTag.Value, Is.EqualTo("postgresql")); + + var userTag = activity.TagObjects.First(x => x.Key == "db.user"); + Assert.That(userTag.Value, Is.EqualTo(conn.Settings.Username)); + + var dbNameTag = activity.TagObjects.First(x => x.Key == "db.name"); + Assert.That(dbNameTag.Value, Is.EqualTo(conn.Settings.Database)); + + var connStringTag = activity.TagObjects.First(x => x.Key == "db.connection_string"); + Assert.That(connStringTag.Value, Is.EqualTo(conn.ConnectionString)); + + if (!IsMultiplexing) + { + var connIDTag = activity.TagObjects.First(x => x.Key == "db.connection_id"); + Assert.That(connIDTag.Value, Is.EqualTo(conn.ProcessID)); + } + } + + [Test] + public async Task Configure_tracing([Values] bool async, [Values] bool batch) + { + if (IsMultiplexing && !async) + return; + + var activities = new List(); + + using var activityListener = new ActivityListener(); + activityListener.ShouldListenTo = source => source.Name == "Npgsql"; + activityListener.Sample = (ref ActivityCreationOptions _) => ActivitySamplingResult.AllDataAndRecorded; + activityListener.ActivityStopped = activity => activities.Add(activity); + ActivitySource.AddActivityListener(activityListener); + + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.ConfigureTracing(options => + { + options + .EnableFirstResponseEvent(enable: false) + .ConfigureCommandFilter(cmd => cmd.CommandText.Contains('2')) + .ConfigureBatchFilter(batch => batch.BatchCommands[0].CommandText.Contains('2')) + .ConfigureCommandSpanNameProvider(_ => "unknown_query") + .ConfigureBatchSpanNameProvider(_ => "unknown_query") + .ConfigureCommandEnrichmentCallback((activity, _) => activity.AddTag("custom_tag", "custom_value")) + .ConfigureBatchEnrichmentCallback((activity, _) => activity.AddTag("custom_tag", "custom_value")); + }); + await using var dataSource = dataSourceBuilder.Build(); + await using var conn = await dataSource.OpenConnectionAsync(); + + await ExecuteScalar(conn, async, batch, "SELECT 1"); + + Assert.That(activities.Count, Is.EqualTo(0)); + + await ExecuteScalar(conn, async, batch, "SELECT 2"); + + Assert.That(activities.Count, Is.EqualTo(1)); + var activity = activities[0]; + Assert.That(activity.DisplayName, Is.EqualTo("unknown_query")); + Assert.That(activity.OperationName, Is.EqualTo("unknown_query")); + + Assert.That(activity.Events.Count(), Is.EqualTo(0)); + + var customTag = activity.TagObjects.First(x => x.Key == "custom_tag"); + Assert.That(customTag.Value, Is.EqualTo("custom_value")); + } + + async Task ExecuteScalar(NpgsqlConnection connection, bool async, bool isBatch, string query) + { + if (!isBatch) + { + if (async) + return await connection.ExecuteScalarAsync(query); + else + return connection.ExecuteScalar(query); + } + else + { + await using var batch = connection.CreateBatch(); + var batchCommand = batch.CreateBatchCommand(); + batchCommand.CommandText = query; + batch.BatchCommands.Add(batchCommand); + + if (async) + return await batch.ExecuteScalarAsync(); + else + return batch.ExecuteScalar(); + } + } +} From e8ce19fe2aea5df32f508ba2b1cc15a1307d1a22 Mon Sep 17 00:00:00 2001 From: Marc Gravell Date: Sat, 22 Mar 2025 15:03:15 +0000 Subject: [PATCH 525/761] NpgsqlParameterCollection.Clone() should set correct collection instance (#6066) fix #6065 --- src/Npgsql/NpgsqlParameterCollection.cs | 2 +- test/Npgsql.Tests/NpgsqlParameterCollectionTests.cs | 13 +++++++++++++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/src/Npgsql/NpgsqlParameterCollection.cs b/src/Npgsql/NpgsqlParameterCollection.cs index b2c56d7ac7..2e6b0f5012 100644 --- a/src/Npgsql/NpgsqlParameterCollection.cs +++ b/src/Npgsql/NpgsqlParameterCollection.cs @@ -653,7 +653,7 @@ internal void CloneTo(NpgsqlParameterCollection other) foreach (var param in InternalList) { var newParam = param.Clone(); - newParam.Collection = this; + newParam.Collection = other; other.InternalList.Add(newParam); } diff --git a/test/Npgsql.Tests/NpgsqlParameterCollectionTests.cs b/test/Npgsql.Tests/NpgsqlParameterCollectionTests.cs index 6c09b7b708..e2c7ba364c 100644 --- a/test/Npgsql.Tests/NpgsqlParameterCollectionTests.cs +++ b/test/Npgsql.Tests/NpgsqlParameterCollectionTests.cs @@ -4,6 +4,7 @@ using System.Data; using System.Data.Common; using System.Diagnostics.CodeAnalysis; +using System.Linq; namespace Npgsql.Tests; @@ -320,6 +321,18 @@ public void Clean_name() Assert.AreEqual(NpgsqlParameter.PositionalName, param.ParameterName); } + [Test] + public void Clone_sets_correct_collection() + { + var cmd = new NpgsqlCommand(); + cmd.Parameters.Add(new NpgsqlParameter { TypedValue = 42 }); + Assert.AreSame(cmd.Parameters, cmd.Parameters.Single().Collection); + + cmd = cmd.Clone(); + Assert.AreSame(cmd.Parameters, cmd.Parameters.Single().Collection); + } + + public NpgsqlParameterCollectionTests(CompatMode compatMode) { _compatMode = compatMode; From aaf92983c9305f10e5892ff815c73b49491d7f0e Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Sat, 22 Mar 2025 16:07:09 +0100 Subject: [PATCH 526/761] Fix brew on mac CI (#6071) --- .github/workflows/build.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 192b5525d4..2bdd30de10 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -232,6 +232,7 @@ jobs: - name: Start PostgreSQL ${{ matrix.pg_major }} (MacOS) if: startsWith(matrix.os, 'macos') run: | + brew update brew install postgresql@${{ matrix.pg_major }} PGDATA=/opt/homebrew/var/postgresql@${{ matrix.pg_major }} From ef219b73f12b040010edb8362522db661a5f5969 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Wed, 26 Mar 2025 14:17:51 +0300 Subject: [PATCH 527/761] Fix adding to hash lookup while renaming an unnamed parameter (#6073) Fixes #6067 --- src/Npgsql/NpgsqlParameterCollection.cs | 2 +- .../NpgsqlParameterCollectionTests.cs | 28 +++++++++++++++++++ 2 files changed, 29 insertions(+), 1 deletion(-) diff --git a/src/Npgsql/NpgsqlParameterCollection.cs b/src/Npgsql/NpgsqlParameterCollection.cs index 2e6b0f5012..106f681f0b 100644 --- a/src/Npgsql/NpgsqlParameterCollection.cs +++ b/src/Npgsql/NpgsqlParameterCollection.cs @@ -143,7 +143,7 @@ internal void ChangeParameterName(NpgsqlParameter parameter, string? value) var oldTrimmedName = parameter.TrimmedName; parameter.ChangeParameterName(value); - if (_caseInsensitiveLookup is null || _caseInsensitiveLookup.Count == 0) + if (_caseInsensitiveLookup is null) return; var index = IndexOf(parameter); diff --git a/test/Npgsql.Tests/NpgsqlParameterCollectionTests.cs b/test/Npgsql.Tests/NpgsqlParameterCollectionTests.cs index e2c7ba364c..f6a188817b 100644 --- a/test/Npgsql.Tests/NpgsqlParameterCollectionTests.cs +++ b/test/Npgsql.Tests/NpgsqlParameterCollectionTests.cs @@ -71,6 +71,34 @@ public void Hash_lookup_parameter_rename_bug() Assert.That(command.Parameters.IndexOf("a_new_name"), Is.GreaterThanOrEqualTo(0)); } + [Test] + [IssueLink("https://github.com/npgsql/npgsql/issues/6067")] + public void Hash_lookup_unnamed_parameter_rename_bug() + { + if (_compatMode == CompatMode.TwoPass) + return; + + using var command = new NpgsqlCommand(); + + for (var i = 0; i < 3; i++) + { + // Put plenty of parameters in the collection to turn on hash lookup functionality. + for (var j = 0; j < LookupThreshold; j++) + { + // Create and add an unnamed parameter before renaming it + var parameter = command.CreateParameter(); + command.Parameters.Add(parameter); + parameter.ParameterName = $"{j}"; + } + + // Make sure hash lookup is generated. + Assert.AreEqual(command.Parameters["3"].ParameterName, "3"); + + // Remove all parameters to clear hash lookup + command.Parameters.Clear(); + } + } + [Test] public void Remove_duplicate_parameter([Values(LookupThreshold, LookupThreshold - 2)] int count) { From cf9d2433bc653f363705296b5804a9658bb49083 Mon Sep 17 00:00:00 2001 From: kurnakovv <59327306+kurnakovv@users.noreply.github.com> Date: Sun, 30 Mar 2025 17:08:53 +0900 Subject: [PATCH 528/761] Update LICENSE date (2024 -> 2025) (#6082) --- LICENSE | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LICENSE b/LICENSE index a74ee166ce..c551cb7b0c 100644 --- a/LICENSE +++ b/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2002-2024, Npgsql +Copyright (c) 2002-2025, Npgsql Permission to use, copy, modify, and distribute this software and its documentation for any purpose, without fee, and without a written agreement From 251d73b01e05d5a125588a9e60ac4d7bec012cd9 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Wed, 9 Apr 2025 15:09:29 +0300 Subject: [PATCH 529/761] Add tracing for physical connection open (#6091) Closes #4136 --- src/Npgsql/Internal/NpgsqlConnector.cs | 31 ++++- src/Npgsql/MultiplexingDataSource.cs | 27 +++- src/Npgsql/NpgsqlActivitySource.cs | 18 ++- src/Npgsql/NpgsqlCommand.cs | 2 +- src/Npgsql/NpgsqlTracingOptionsBuilder.cs | 15 ++- src/Npgsql/PublicAPI.Unshipped.txt | 1 + test/Npgsql.Tests/TracingTests.cs | 152 +++++++++++++++++++++- 7 files changed, 228 insertions(+), 18 deletions(-) diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index dffe542ff0..a926fb1ca2 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -485,9 +485,18 @@ internal async Task Open(NpgsqlTimeout timeout, bool async, CancellationToken ca LogMessages.OpeningPhysicalConnection(ConnectionLogger, Host, Port, Database, UserFacingConnectionString); var startOpenTimestamp = Stopwatch.GetTimestamp(); + Activity? activity = null; + try { - await OpenCore(this, Settings.SslMode, timeout, async, cancellationToken).ConfigureAwait(false); + var username = await GetUsernameAsync(async, cancellationToken).ConfigureAwait(false); + + activity = NpgsqlActivitySource.ConnectionOpen(this); + + await OpenCore(this, username, Settings.SslMode, timeout, async, cancellationToken).ConfigureAwait(false); + + if (activity is not null) + NpgsqlActivitySource.Enrich(activity, this); await DataSource.Bootstrap(this, timeout, forceReload: false, async, cancellationToken).ConfigureAwait(false); @@ -510,6 +519,8 @@ internal async Task Open(NpgsqlTimeout timeout, bool async, CancellationToken ca // It is intentionally not awaited and will run as long as the connector is alive. // The CommandsInFlightWriter channel is completed in Cleanup, which should cause this task // to complete. + // Make sure we do not flow AsyncLocals like Activity.Current + using var __ = ExecutionContext.SuppressFlow(); _ = Task.Run(MultiplexingReadLoop, CancellationToken.None) .ContinueWith(t => { @@ -540,7 +551,7 @@ internal async Task Open(NpgsqlTimeout timeout, bool async, CancellationToken ca { if (async) await DataSource.ConnectionInitializerAsync(tempConnection).ConfigureAwait(false); - else if (!async) + else DataSource.ConnectionInitializer(tempConnection); } finally @@ -553,17 +564,24 @@ internal async Task Open(NpgsqlTimeout timeout, bool async, CancellationToken ca } } + if (activity is not null) + NpgsqlActivitySource.CommandStop(activity); + LogMessages.OpenedPhysicalConnection( - ConnectionLogger, Host, Port, Database, UserFacingConnectionString, (long)Stopwatch.GetElapsedTime(startOpenTimestamp).TotalMilliseconds, Id); + ConnectionLogger, Host, Port, Database, UserFacingConnectionString, + (long)Stopwatch.GetElapsedTime(startOpenTimestamp).TotalMilliseconds, Id); } catch (Exception e) { + if (activity is not null) + NpgsqlActivitySource.SetException(activity, e); Break(e); throw; } static async Task OpenCore( NpgsqlConnector conn, + string username, SslMode sslMode, NpgsqlTimeout timeout, bool async, @@ -571,8 +589,6 @@ static async Task OpenCore( { await conn.RawOpen(sslMode, timeout, async, cancellationToken).ConfigureAwait(false); - var username = await conn.GetUsernameAsync(async, cancellationToken).ConfigureAwait(false); - timeout.CheckAndApply(conn); conn.WriteStartupMessage(username); await conn.Flush(async, cancellationToken).ConfigureAwait(false); @@ -595,6 +611,7 @@ static async Task OpenCore( // If Allow was specified and we failed (without SSL), retry with SSL await OpenCore( conn, + username, sslMode == SslMode.Prefer ? SslMode.Disable : SslMode.Require, timeout, async, @@ -754,6 +771,8 @@ async Task RawOpen(SslMode sslMode, NpgsqlTimeout timeout, bool async, Cancellat else Connect(timeout); + ConnectionLogger.LogTrace("Socket connected to {Host}:{Port}", Host, Port); + _baseStream = new NetworkStream(_socket, true); _stream = _baseStream; @@ -810,8 +829,6 @@ async Task RawOpen(SslMode sslMode, NpgsqlTimeout timeout, bool async, Cancellat if (ReadBuffer.ReadBytesLeft > 0) throw new NpgsqlException("Additional unencrypted data received after SSL negotiation - this should never happen, and may be an indication of a man-in-the-middle attack."); } - - ConnectionLogger.LogTrace("Socket connected to {Host}:{Port}", Host, Port); } catch { diff --git a/src/Npgsql/MultiplexingDataSource.cs b/src/Npgsql/MultiplexingDataSource.cs index 74c32b8c6f..60ba882923 100644 --- a/src/Npgsql/MultiplexingDataSource.cs +++ b/src/Npgsql/MultiplexingDataSource.cs @@ -56,6 +56,8 @@ internal MultiplexingDataSource( _connectionLogger = dataSourceConfig.LoggingConfiguration.ConnectionLogger; _commandLogger = dataSourceConfig.LoggingConfiguration.CommandLogger; + // Make sure we do not flow AsyncLocals like Activity.Current + using var _ = ExecutionContext.SuppressFlow(); _multiplexWriteLoop = Task.Run(MultiplexingWriteLoop, CancellationToken.None) .ContinueWith(t => { @@ -106,15 +108,28 @@ async Task MultiplexingWriteLoop() break; } - connector = await OpenNewConnector( - command.InternalConnection!, - new NpgsqlTimeout(TimeSpan.FromSeconds(Settings.Timeout)), - async: true, - CancellationToken.None).ConfigureAwait(false); + // At no point should we ever have an activity here + Debug.Assert(Activity.Current is null); + // Set current activity as the one from the command + // So child activities from physical open are bound to it + Activity.Current = command.CurrentActivity; + + try + { + connector = await OpenNewConnector( + command.InternalConnection!, + new NpgsqlTimeout(TimeSpan.FromSeconds(Settings.Timeout)), + async: true, + CancellationToken.None).ConfigureAwait(false); + } + finally + { + Activity.Current = null; + } if (connector != null) { - // Managed to created a new connector + // Managed to create a new connector connector.Connection = null; // See increment under over-capacity mode below diff --git a/src/Npgsql/NpgsqlActivitySource.cs b/src/Npgsql/NpgsqlActivitySource.cs index ce762cc642..4493bb272a 100644 --- a/src/Npgsql/NpgsqlActivitySource.cs +++ b/src/Npgsql/NpgsqlActivitySource.cs @@ -9,7 +9,7 @@ namespace Npgsql; static class NpgsqlActivitySource { - static readonly ActivitySource Source = new("Npgsql", "0.1.0"); + static readonly ActivitySource Source = new("Npgsql", "0.2.0"); internal static bool IsEnabled => Source.HasListeners(); @@ -61,6 +61,22 @@ static class NpgsqlActivitySource return activity; } + internal static Activity? ConnectionOpen(NpgsqlConnector connector) + { + if (!connector.DataSource.Configuration.TracingOptions.EnablePhysicalOpenTracing) + return null; + + var dbName = connector.Settings.Database ?? connector.InferredUserName; + var activity = Source.StartActivity(dbName, ActivityKind.Client); + if (activity is not { IsAllDataRequested: true }) + return activity; + + activity.SetTag("db.system", "postgresql"); + activity.SetTag("db.connection_string", connector.UserFacingConnectionString); + + return activity; + } + internal static void Enrich(Activity activity, NpgsqlConnector connector) { if (!activity.IsAllDataRequested) diff --git a/src/Npgsql/NpgsqlCommand.cs b/src/Npgsql/NpgsqlCommand.cs index 86748a1b16..71a646e262 100644 --- a/src/Npgsql/NpgsqlCommand.cs +++ b/src/Npgsql/NpgsqlCommand.cs @@ -51,7 +51,7 @@ public class NpgsqlCommand : DbCommand, ICloneable, IComponent internal List InternalBatchCommands { get; } - Activity? CurrentActivity; + internal Activity? CurrentActivity { get; private set; } /// /// Returns details about each statement that this command has executed. diff --git a/src/Npgsql/NpgsqlTracingOptionsBuilder.cs b/src/Npgsql/NpgsqlTracingOptionsBuilder.cs index 1da344553f..f81f4e7139 100644 --- a/src/Npgsql/NpgsqlTracingOptionsBuilder.cs +++ b/src/Npgsql/NpgsqlTracingOptionsBuilder.cs @@ -15,6 +15,7 @@ public sealed class NpgsqlTracingOptionsBuilder Func? _commandSpanNameProvider; Func? _batchSpanNameProvider; bool _enableFirstResponseEvent = true; + bool _enablePhysicalOpenTracing = true; internal NpgsqlTracingOptionsBuilder() { @@ -88,6 +89,16 @@ public NpgsqlTracingOptionsBuilder EnableFirstResponseEvent(bool enable = true) return this; } + /// + /// Gets or sets a value indicating whether to trace physical connection open. + /// Default is true to preserve existing behavior. + /// + public NpgsqlTracingOptionsBuilder EnablePhysicalOpenTracing(bool enable = true) + { + _enablePhysicalOpenTracing = enable; + return this; + } + internal NpgsqlTracingOptions Build() => new() { CommandFilter = _commandFilter, @@ -96,7 +107,8 @@ public NpgsqlTracingOptionsBuilder EnableFirstResponseEvent(bool enable = true) BatchEnrichmentCallback = _batchEnrichmentCallback, CommandSpanNameProvider = _commandSpanNameProvider, BatchSpanNameProvider = _batchSpanNameProvider, - EnableFirstResponseEvent = _enableFirstResponseEvent + EnableFirstResponseEvent = _enableFirstResponseEvent, + EnablePhysicalOpenTracing = _enablePhysicalOpenTracing }; } @@ -109,4 +121,5 @@ sealed class NpgsqlTracingOptions internal Func? CommandSpanNameProvider { get; init; } internal Func? BatchSpanNameProvider { get; init; } internal bool EnableFirstResponseEvent { get; init; } + internal bool EnablePhysicalOpenTracing { get; init; } } diff --git a/src/Npgsql/PublicAPI.Unshipped.txt b/src/Npgsql/PublicAPI.Unshipped.txt index 2311f1eb30..47198a4165 100644 --- a/src/Npgsql/PublicAPI.Unshipped.txt +++ b/src/Npgsql/PublicAPI.Unshipped.txt @@ -38,6 +38,7 @@ Npgsql.NpgsqlTracingOptionsBuilder.ConfigureCommandEnrichmentCallback(System.Act Npgsql.NpgsqlTracingOptionsBuilder.ConfigureCommandFilter(System.Func? commandFilter) -> Npgsql.NpgsqlTracingOptionsBuilder! Npgsql.NpgsqlTracingOptionsBuilder.ConfigureCommandSpanNameProvider(System.Func? commandSpanNameProvider) -> Npgsql.NpgsqlTracingOptionsBuilder! Npgsql.NpgsqlTracingOptionsBuilder.EnableFirstResponseEvent(bool enable = true) -> Npgsql.NpgsqlTracingOptionsBuilder! +Npgsql.NpgsqlTracingOptionsBuilder.EnablePhysicalOpenTracing(bool enable = true) -> Npgsql.NpgsqlTracingOptionsBuilder! Npgsql.NpgsqlTypeLoadingOptionsBuilder Npgsql.NpgsqlTypeLoadingOptionsBuilder.EnableTableCompositesLoading(bool enable = true) -> Npgsql.NpgsqlTypeLoadingOptionsBuilder! Npgsql.NpgsqlTypeLoadingOptionsBuilder.EnableTypeLoading(bool enable = true) -> Npgsql.NpgsqlTypeLoadingOptionsBuilder! diff --git a/test/Npgsql.Tests/TracingTests.cs b/test/Npgsql.Tests/TracingTests.cs index e3ff4a7c34..5cf0fca200 100644 --- a/test/Npgsql.Tests/TracingTests.cs +++ b/test/Npgsql.Tests/TracingTests.cs @@ -1,6 +1,7 @@ using System.Collections.Generic; using System.Diagnostics; using System.Linq; +using System.Net.Sockets; using System.Threading.Tasks; using NUnit.Framework; @@ -10,7 +11,80 @@ namespace Npgsql.Tests; public class TracingTests(MultiplexingMode multiplexingMode) : MultiplexingTestBase(multiplexingMode) { [Test] - public async Task Basic([Values] bool async, [Values] bool batch) + public async Task Basic_open([Values] bool async) + { + if (IsMultiplexing && !async) + return; + + var activities = new List(); + + using var activityListener = new ActivityListener(); + activityListener.ShouldListenTo = source => source.Name == "Npgsql"; + activityListener.Sample = (ref ActivityCreationOptions _) => ActivitySamplingResult.AllDataAndRecorded; + activityListener.ActivityStopped = activity => activities.Add(activity); + ActivitySource.AddActivityListener(activityListener); + + await using var dataSource = CreateDataSource(); + await using var conn = async + ? await dataSource.OpenConnectionAsync() + : dataSource.OpenConnection(); + + Assert.That(activities.Count, Is.EqualTo(1)); + ValidateActivity(activities[0], conn, IsMultiplexing); + + if (!IsMultiplexing) + return; + + activities.Clear(); + + // For multiplexing, we clear the pool to force next query to open another physical connection + dataSource.Clear(); + + await conn.ExecuteScalarAsync("SELECT 1"); + + Assert.That(activities.Count, Is.EqualTo(2)); + ValidateActivity(activities[0], conn, IsMultiplexing); + + // For multiplexing, query's activity can be considered as a parent for physical open's activity + Assert.That(activities[0].Parent, Is.SameAs(activities[1])); + + static void ValidateActivity(Activity activity, NpgsqlConnection conn, bool isMultiplexing) + { + Assert.That(activity.DisplayName, Is.EqualTo(conn.Settings.Database)); + Assert.That(activity.OperationName, Is.EqualTo(conn.Settings.Database)); + Assert.That(activity.Status, Is.EqualTo(ActivityStatusCode.Ok)); + + Assert.That(activity.Events.Count(), Is.EqualTo(0)); + + var expectedTagCount = conn.Settings.Port == 5432 ? 8 : 9; + Assert.That(activity.TagObjects.Count(), Is.EqualTo(expectedTagCount)); + + Assert.IsFalse(activity.TagObjects.Any(x => x.Key == "db.statement")); + + var systemTag = activity.TagObjects.First(x => x.Key == "db.system"); + Assert.That(systemTag.Value, Is.EqualTo("postgresql")); + + var userTag = activity.TagObjects.First(x => x.Key == "db.user"); + Assert.That(userTag.Value, Is.EqualTo(conn.Settings.Username)); + + var dbNameTag = activity.TagObjects.First(x => x.Key == "db.name"); + Assert.That(dbNameTag.Value, Is.EqualTo(conn.Settings.Database)); + + var connStringTag = activity.TagObjects.First(x => x.Key == "db.connection_string"); + Assert.That(connStringTag.Value, Is.EqualTo(conn.ConnectionString)); + + if (!isMultiplexing) + { + var connIDTag = activity.TagObjects.First(x => x.Key == "db.connection_id"); + Assert.That(connIDTag.Value, Is.EqualTo(conn.ProcessID)); + } + else + Assert.IsTrue(activity.TagObjects.Any(x => x.Key == "db.connection_id")); + } + } + + [Test] + public async Task Basic_query([Values] bool async, [Values] bool batch) { if (IsMultiplexing && !async) return; @@ -25,6 +99,11 @@ public async Task Basic([Values] bool async, [Values] bool batch) await using var dataSource = CreateDataSource(); await using var conn = await dataSource.OpenConnectionAsync(); + + // We're not interested in physical open's activity + Assert.That(activities.Count, Is.EqualTo(1)); + activities.Clear(); + await ExecuteScalar(conn, async, batch, "SELECT 42"); Assert.That(activities.Count, Is.EqualTo(1)); @@ -60,10 +139,68 @@ public async Task Basic([Values] bool async, [Values] bool batch) var connIDTag = activity.TagObjects.First(x => x.Key == "db.connection_id"); Assert.That(connIDTag.Value, Is.EqualTo(conn.ProcessID)); } + else + Assert.IsTrue(activity.TagObjects.Any(x => x.Key == "db.connection_id")); } [Test] - public async Task Error([Values] bool async, [Values] bool batch) + public async Task Error_open([Values] bool async) + { + if (IsMultiplexing && !async) + return; + + var activities = new List(); + + using var activityListener = new ActivityListener(); + activityListener.ShouldListenTo = source => source.Name == "Npgsql"; + activityListener.Sample = (ref ActivityCreationOptions _) => ActivitySamplingResult.AllDataAndRecorded; + activityListener.ActivityStopped = activity => activities.Add(activity); + ActivitySource.AddActivityListener(activityListener); + + await using var dataSource = CreateDataSource(x => x.Host = "not-existing-host"); + var ex = Assert.ThrowsAsync(async () => + { + await using var conn = async + ? await dataSource.OpenConnectionAsync() + : dataSource.OpenConnection(); + })!; + + Assert.That(activities.Count, Is.EqualTo(1)); + var activity = activities[0]; + Assert.That(activity.DisplayName, Is.EqualTo(dataSource.Settings.Database)); + Assert.That(activity.OperationName, Is.EqualTo(dataSource.Settings.Database)); + Assert.That(activity.Status, Is.EqualTo(ActivityStatusCode.Error)); + Assert.That(activity.StatusDescription, Is.EqualTo(ex.Message)); + + Assert.That(activity.Events.Count(), Is.EqualTo(1)); + var exceptionEvent = activity.Events.First(); + Assert.That(exceptionEvent.Name, Is.EqualTo("exception")); + + Assert.That(exceptionEvent.Tags.Count(), Is.EqualTo(4)); + + var exceptionTypeTag = exceptionEvent.Tags.First(x => x.Key == "exception.type"); + Assert.That(exceptionTypeTag.Value, Is.EqualTo(ex.GetType().FullName)); + + var exceptionMessageTag = exceptionEvent.Tags.First(x => x.Key == "exception.message"); + StringAssert.Contains(ex.Message, (string)exceptionMessageTag.Value!); + + var exceptionStacktraceTag = exceptionEvent.Tags.First(x => x.Key == "exception.stacktrace"); + StringAssert.Contains(ex.Message, (string)exceptionStacktraceTag.Value!); + + var exceptionEscapedTag = exceptionEvent.Tags.First(x => x.Key == "exception.escaped"); + Assert.That(exceptionEscapedTag.Value, Is.True); + + Assert.That(activity.TagObjects.Count(), Is.EqualTo(2)); + + var systemTag = activity.TagObjects.First(x => x.Key == "db.system"); + Assert.That(systemTag.Value, Is.EqualTo("postgresql")); + + var connStringTag = activity.TagObjects.First(x => x.Key == "db.connection_string"); + Assert.That(connStringTag.Value, Is.EqualTo(dataSource.ConnectionString)); + } + + [Test] + public async Task Error_query([Values] bool async, [Values] bool batch) { if (IsMultiplexing && !async) return; @@ -78,6 +215,11 @@ public async Task Error([Values] bool async, [Values] bool batch) await using var dataSource = CreateDataSource(); await using var conn = await dataSource.OpenConnectionAsync(); + + // We're not interested in physical open's activity + Assert.That(activities.Count, Is.EqualTo(1)); + activities.Clear(); + Assert.ThrowsAsync(async () => await ExecuteScalar(conn, async, batch, "SELECT * FROM non_existing_table")); Assert.That(activities.Count, Is.EqualTo(1)); @@ -128,6 +270,8 @@ public async Task Error([Values] bool async, [Values] bool batch) var connIDTag = activity.TagObjects.First(x => x.Key == "db.connection_id"); Assert.That(connIDTag.Value, Is.EqualTo(conn.ProcessID)); } + else + Assert.IsTrue(activity.TagObjects.Any(x => x.Key == "db.connection_id")); } [Test] @@ -148,6 +292,7 @@ public async Task Configure_tracing([Values] bool async, [Values] bool batch) dataSourceBuilder.ConfigureTracing(options => { options + .EnablePhysicalOpenTracing(enable: false) .EnableFirstResponseEvent(enable: false) .ConfigureCommandFilter(cmd => cmd.CommandText.Contains('2')) .ConfigureBatchFilter(batch => batch.BatchCommands[0].CommandText.Contains('2')) @@ -159,6 +304,9 @@ public async Task Configure_tracing([Values] bool async, [Values] bool batch) await using var dataSource = dataSourceBuilder.Build(); await using var conn = await dataSource.OpenConnectionAsync(); + // We disabled physical open tracing + Assert.That(activities.Count, Is.EqualTo(0)); + await ExecuteScalar(conn, async, batch, "SELECT 1"); Assert.That(activities.Count, Is.EqualTo(0)); From 42e02af2c98033d72dc8fc3a12404e2b46c1cc0b Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Fri, 11 Apr 2025 17:57:07 +0300 Subject: [PATCH 530/761] Start testing on .NET 9 (#5945) Start testing on .NET 9 --- .github/workflows/build.yml | 10 +++++++--- .github/workflows/native-aot.yml | 8 ++++---- test/MStatDumper/MStatDumper.csproj | 2 +- .../Npgsql.NativeAotTests/Npgsql.NativeAotTests.csproj | 1 + test/Npgsql.Tests/ExceptionTests.cs | 2 ++ 5 files changed, 15 insertions(+), 8 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 2bdd30de10..6c62460816 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -31,17 +31,21 @@ jobs: os: [ubuntu-24.04] pg_major: [17, 16, 15, 14, 13] config: [Release] - test_tfm: [net8.0] + test_tfm: [net9.0] include: - os: ubuntu-24.04 pg_major: 17 config: Debug - test_tfm: net8.0 + test_tfm: net9.0 - os: macos-15 pg_major: 16 config: Release - test_tfm: net8.0 + test_tfm: net9.0 - os: windows-2022 + pg_major: 17 + config: Release + test_tfm: net9.0 + - os: ubuntu-24.04 pg_major: 17 config: Release test_tfm: net8.0 diff --git a/.github/workflows/native-aot.yml b/.github/workflows/native-aot.yml index cd6498fd24..b3b2346a35 100644 --- a/.github/workflows/native-aot.yml +++ b/.github/workflows/native-aot.yml @@ -89,7 +89,7 @@ jobs: matrix: os: [ ubuntu-24.04 ] pg_major: [ 15 ] - tfm: [ net8.0 ] + tfm: [ net9.0 ] steps: - name: Checkout @@ -123,7 +123,7 @@ jobs: matrix: os: [ubuntu-24.04] pg_major: [15] - tfm: [ net8.0 ] + tfm: [ net9.0 ] steps: - name: Checkout @@ -163,11 +163,11 @@ jobs: - name: Write binary size to summary run: | - size="$(ls -l test/Npgsql.NativeAotTests/bin/Release/net8.0/linux-x64/native/Npgsql.NativeAotTests | cut -d ' ' -f 5)" + size="$(ls -l test/Npgsql.NativeAotTests/bin/Release/net9.0/linux-x64/native/Npgsql.NativeAotTests | cut -d ' ' -f 5)" echo "Binary size is $size bytes ($((size / (1024 * 1024))) mb)" >> $GITHUB_STEP_SUMMARY - name: Dump mstat - run: dotnet run --project test/MStatDumper/MStatDumper.csproj -c release -f ${{ matrix.tfm }} -- "test/Npgsql.NativeAotTests/obj/Release/net8.0/linux-x64/native/Npgsql.NativeAotTests.mstat" md >> $GITHUB_STEP_SUMMARY + run: dotnet run --project test/MStatDumper/MStatDumper.csproj -c release -f ${{ matrix.tfm }} -- "test/Npgsql.NativeAotTests/obj/Release/net9.0/linux-x64/native/Npgsql.NativeAotTests.mstat" md >> $GITHUB_STEP_SUMMARY - name: Upload mstat uses: actions/upload-artifact@v4 diff --git a/test/MStatDumper/MStatDumper.csproj b/test/MStatDumper/MStatDumper.csproj index 3cab4d57fd..456bd1f3b9 100644 --- a/test/MStatDumper/MStatDumper.csproj +++ b/test/MStatDumper/MStatDumper.csproj @@ -3,7 +3,7 @@ Exe - net8.0 + net9.0 enable disable diff --git a/test/Npgsql.NativeAotTests/Npgsql.NativeAotTests.csproj b/test/Npgsql.NativeAotTests/Npgsql.NativeAotTests.csproj index 31831faded..0757fb0dd6 100644 --- a/test/Npgsql.NativeAotTests/Npgsql.NativeAotTests.csproj +++ b/test/Npgsql.NativeAotTests/Npgsql.NativeAotTests.csproj @@ -9,6 +9,7 @@ true false true + false diff --git a/test/Npgsql.Tests/ExceptionTests.cs b/test/Npgsql.Tests/ExceptionTests.cs index 21d83ff9fb..b58617ef52 100644 --- a/test/Npgsql.Tests/ExceptionTests.cs +++ b/test/Npgsql.Tests/ExceptionTests.cs @@ -210,6 +210,7 @@ public void NpgsqlException_IsTransient() Assert.False(new NpgsqlException("", new Exception("Inner Exception")).IsTransient); } +#if !NET9_0_OR_GREATER #pragma warning disable SYSLIB0051 #pragma warning disable 618 [Test] @@ -309,4 +310,5 @@ public void Base_exception_property_serialization() Assert.That(ex.StackTrace, Is.EqualTo(info.GetValue("StackTraceString", typeof(string)))); } #pragma warning restore SYSLIB0051 +#endif } From 7ad12a45c0f76e540ab1ab9a32bebfb915c87c05 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Sun, 27 Apr 2025 12:22:42 +0200 Subject: [PATCH 531/761] Turn on (#6097) --- src/Directory.Build.props | 1 + src/Npgsql/Npgsql.csproj | 4 ---- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/src/Directory.Build.props b/src/Directory.Build.props index b94a8a91bd..fdf88bb904 100644 --- a/src/Directory.Build.props +++ b/src/Directory.Build.props @@ -2,6 +2,7 @@ + true true diff --git a/src/Npgsql/Npgsql.csproj b/src/Npgsql/Npgsql.csproj index a28f47cfbe..e173d25794 100644 --- a/src/Npgsql/Npgsql.csproj +++ b/src/Npgsql/Npgsql.csproj @@ -19,10 +19,6 @@ - - - - From 3f89c1df4c5756a014a87e2655fd38bde7eace85 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Mon, 28 Apr 2025 17:49:24 +0200 Subject: [PATCH 532/761] Reenable public API analyzer (#6101) --- src/Npgsql/Npgsql.csproj | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Npgsql/Npgsql.csproj b/src/Npgsql/Npgsql.csproj index e173d25794..d4376c7774 100644 --- a/src/Npgsql/Npgsql.csproj +++ b/src/Npgsql/Npgsql.csproj @@ -13,6 +13,7 @@ + From c243c4f0b7c458208f5224f3cc5320fcf0dbc6e0 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Mon, 28 Apr 2025 18:49:53 +0300 Subject: [PATCH 533/761] Update Npgsql to .NET 9 (#6099) --- src/Npgsql/Internal/NpgsqlConnector.cs | 16 ++++++++++++++++ src/Npgsql/Npgsql.csproj | 2 +- 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index a926fb1ca2..7c231af096 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -881,11 +881,20 @@ internal async Task NegotiateEncryption(SslMode sslMode, NpgsqlTimeout timeout, // Windows crypto API has a bug with pem certs // See #3650 using var previousCert = cert; +#if NET9_0_OR_GREATER + cert = X509CertificateLoader.LoadPkcs12(cert.Export(X509ContentType.Pkcs12), null); +#else cert = new X509Certificate2(cert.Export(X509ContentType.Pkcs12)); +#endif } } +#if NET9_0_OR_GREATER + // If it's null, it's probably PFX + cert ??= X509CertificateLoader.LoadPkcs12FromFile(certPath, password); +#else cert ??= new X509Certificate2(certPath, password); +#endif clientCertificates.Add(cert); _certificate = cert; @@ -1727,7 +1736,14 @@ static RemoteCertificateValidationCallback SslRootValidation(bool verifyFull, st certs.ImportFromPemFile(certRootPath); if (certs.Count == 0) + { +#if NET9_0_OR_GREATER + // This is not a PEM certificate, probably PFX + certs.Add(X509CertificateLoader.LoadPkcs12FromFile(certRootPath, null)); +#else certs.Add(new X509Certificate2(certRootPath)); +#endif + } } chain.ChainPolicy.CustomTrustStore.AddRange(certs); diff --git a/src/Npgsql/Npgsql.csproj b/src/Npgsql/Npgsql.csproj index d4376c7774..096b6c762e 100644 --- a/src/Npgsql/Npgsql.csproj +++ b/src/Npgsql/Npgsql.csproj @@ -5,7 +5,7 @@ Npgsql is the open source .NET data provider for PostgreSQL. npgsql;postgresql;postgres;ado;ado.net;database;sql README.md - net8.0 + net8.0;net9.0 $(NoWarn);CA2017 $(NoWarn);NPG9001 $(NoWarn);NPG9002 From 73202604c3d66d4b55c81d3d532bf0e78cc5c2ee Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Wed, 30 Apr 2025 19:52:50 +0300 Subject: [PATCH 534/761] Ignore system CA store if root certificate is provided (#6102) Closes #6100 --- src/Npgsql/Internal/NpgsqlConnector.cs | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index 7c231af096..6a3b71d576 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -1710,13 +1710,8 @@ static RemoteCertificateValidationCallback SslRootValidation(bool verifyFull, st if (certificate is null || chain is null) return false; - // No errors here - no reason to check further - if (sslPolicyErrors == SslPolicyErrors.None) - return true; - - // That's VerifyCA check and the only error is name mismatch - no reason to check further - if (!verifyFull && sslPolicyErrors == SslPolicyErrors.RemoteCertificateNameMismatch) - return true; + // Even if there was no error while validating, we have to check one more time with the provided certificate + // As this is the exact same behavior as libpq // That's VerifyFull check and we have name mismatch - no reason to check further if (verifyFull && sslPolicyErrors.HasFlag(SslPolicyErrors.RemoteCertificateNameMismatch)) From 4da52a03f441f23a4f5597ac106b7833ab4fbe90 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Wed, 7 May 2025 13:49:01 +0300 Subject: [PATCH 535/761] Fix reading columns asynchronously via JsonNet plugin (#6109) Fixes #6108 --- src/Npgsql.Json.NET/Internal/JsonNetJsonConverter.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Npgsql.Json.NET/Internal/JsonNetJsonConverter.cs b/src/Npgsql.Json.NET/Internal/JsonNetJsonConverter.cs index 5d75568f98..10126d25f9 100644 --- a/src/Npgsql.Json.NET/Internal/JsonNetJsonConverter.cs +++ b/src/Npgsql.Json.NET/Internal/JsonNetJsonConverter.cs @@ -51,7 +51,7 @@ static class JsonNetJsonConverter using var stream = reader.GetStream(); var mem = new MemoryStream(); if (async) - await stream.CopyToAsync(mem, Math.Min((int)mem.Length, 81920), cancellationToken).ConfigureAwait(false); + await stream.CopyToAsync(mem, Math.Min((int)stream.Length, 81920), cancellationToken).ConfigureAwait(false); else stream.CopyTo(mem); mem.Position = 0; From 892774fda1c92b53064d775b59e49a8b204d2304 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Sun, 1 Jun 2025 12:49:53 +0200 Subject: [PATCH 536/761] Fixes #6107 missed should buffer in biginteger numeric converter (#6117) --- .../Internal/Converters/Primitive/NumericConverters.cs | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/src/Npgsql/Internal/Converters/Primitive/NumericConverters.cs b/src/Npgsql/Internal/Converters/Primitive/NumericConverters.cs index 00788f99c8..c14a00b608 100644 --- a/src/Npgsql/Internal/Converters/Primitive/NumericConverters.cs +++ b/src/Npgsql/Internal/Converters/Primitive/NumericConverters.cs @@ -13,6 +13,9 @@ sealed class BigIntegerNumericConverter : PgStreamingConverter public override BigInteger Read(PgReader reader) { + if (reader.ShouldBuffer(sizeof(short))) + reader.Buffer(sizeof(short)); + var digitCount = reader.ReadInt16(); short[]? digitsFromPool = null; var digits = (digitCount <= StackAllocByteThreshold / sizeof(short) @@ -37,7 +40,9 @@ public override ValueTask ReadAsync(PgReader reader, CancellationTok static async ValueTask AsyncCore(PgReader reader, CancellationToken cancellationToken) { - await reader.BufferAsync(PgNumeric.GetByteCount(0), cancellationToken).ConfigureAwait(false); + if (reader.ShouldBuffer(sizeof(short))) + await reader.BufferAsync(sizeof(short), cancellationToken).ConfigureAwait(false); + var digitCount = reader.ReadInt16(); var digits = new ArraySegment(ArrayPool.Shared.Rent(digitCount), 0, digitCount); var value = ConvertTo(await NumericConverter.ReadAsync(reader, digits, cancellationToken).ConfigureAwait(false)); @@ -132,7 +137,7 @@ static T ConvertTo(in PgNumeric.Builder numeric) static class NumericConverter { - public static int DecimalBasedMaxByteCount = PgNumeric.GetByteCount(PgNumeric.Builder.MaxDecimalNumericDigits); + public static readonly int DecimalBasedMaxByteCount = PgNumeric.GetByteCount(PgNumeric.Builder.MaxDecimalNumericDigits); public static PgNumeric.Builder Read(PgReader reader, Span digits) { From fe7f7755ed78b5292d38c890b110b76ca94e111f Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Tue, 3 Jun 2025 11:21:58 +0300 Subject: [PATCH 537/761] Fix logging parameters with batches (#6079) Fixes #6078 --- src/Npgsql/LogMessages.cs | 4 ++-- src/Npgsql/NpgsqlCommand.cs | 5 ++-- src/Npgsql/Util/LoggingEnumerable.cs | 36 ++++++++++++++++++++++++++++ test/Npgsql.Tests/LoggingTests.cs | 22 ++++++++--------- 4 files changed, 51 insertions(+), 16 deletions(-) create mode 100644 src/Npgsql/Util/LoggingEnumerable.cs diff --git a/src/Npgsql/LogMessages.cs b/src/Npgsql/LogMessages.cs index 8d5f471c27..349b91b4b5 100644 --- a/src/Npgsql/LogMessages.cs +++ b/src/Npgsql/LogMessages.cs @@ -180,7 +180,7 @@ static partial class LogMessages Level = LogLevel.Debug, Message = "Executing batch: {BatchCommands}", SkipEnabledCheck = true)] - internal static partial void ExecutingBatchWithParameters(ILogger logger, (string CommandText, object[] Parameters)[] BatchCommands, int ConnectorId); + internal static partial void ExecutingBatchWithParameters(ILogger logger, (string CommandText, IEnumerable Parameters)[] BatchCommands, int ConnectorId); [LoggerMessage( EventId = NpgsqlEventId.CommandExecutionCompleted, @@ -209,7 +209,7 @@ static partial class LogMessages Message = "Batch execution completed (duration={DurationMs}ms): {BatchCommands}", SkipEnabledCheck = true)] internal static partial void BatchExecutionCompletedWithParameters( - ILogger logger, (string CommandText, object[] Parameters)[] BatchCommands, long DurationMs, int ConnectorId); + ILogger logger, (string CommandText, IEnumerable Parameters)[] BatchCommands, long DurationMs, int ConnectorId); [LoggerMessage( EventId = NpgsqlEventId.CancellingCommand, diff --git a/src/Npgsql/NpgsqlCommand.cs b/src/Npgsql/NpgsqlCommand.cs index 71a646e262..e9d86bb222 100644 --- a/src/Npgsql/NpgsqlCommand.cs +++ b/src/Npgsql/NpgsqlCommand.cs @@ -1823,6 +1823,7 @@ internal void LogExecutingCompleted(NpgsqlConnector connector, bool executing) { var logParameters = connector.LoggingConfiguration.IsParameterLoggingEnabled || connector.Settings.LogParameters; var logger = connector.LoggingConfiguration.CommandLogger; + Debug.Assert(executing ? logger.IsEnabled(LogLevel.Debug) : logger.IsEnabled(LogLevel.Information)); if (InternalBatchCommands.Count == 1) { @@ -1860,9 +1861,9 @@ internal void LogExecutingCompleted(NpgsqlConnector connector, bool executing) { if (logParameters) { - var commands = new (string, object[])[InternalBatchCommands.Count]; + var commands = new (string, IEnumerable)[InternalBatchCommands.Count]; for (var i = 0; i < InternalBatchCommands.Count; i++) - commands[i] = (InternalBatchCommands[i].FinalCommandText!, GetParametersForLogging(InternalBatchCommands[i])); + commands[i] = (InternalBatchCommands[i].FinalCommandText!, new LoggingEnumerable(GetParametersForLogging(InternalBatchCommands[i]))); if (executing) LogMessages.ExecutingBatchWithParameters(logger, commands, connector.Id); diff --git a/src/Npgsql/Util/LoggingEnumerable.cs b/src/Npgsql/Util/LoggingEnumerable.cs new file mode 100644 index 0000000000..eabc7ebdd5 --- /dev/null +++ b/src/Npgsql/Util/LoggingEnumerable.cs @@ -0,0 +1,36 @@ +using System.Collections; +using System.Collections.Generic; +using System.Text; + +namespace Npgsql.Util; + +// For logging batches we have to use a wrapper for parameters, otherwise they're logged as object[]. See https://github.com/npgsql/npgsql/issues/6078. +sealed class LoggingEnumerable(IEnumerable wrappedEnumerable) : IEnumerable +{ + public IEnumerator GetEnumerator() => wrappedEnumerable.GetEnumerator(); + + IEnumerator IEnumerable.GetEnumerator() => ((IEnumerable)wrappedEnumerable).GetEnumerator(); + + public override string ToString() + { + var sb = new StringBuilder(); + + sb.Append('['); + + var appended = false; + + foreach (var o in wrappedEnumerable) + { + if (appended) + sb.Append(", "); + else + appended = true; + + sb.Append(o); + } + + sb.Append(']'); + + return sb.ToString(); + } +} diff --git a/test/Npgsql.Tests/LoggingTests.cs b/test/Npgsql.Tests/LoggingTests.cs index b9a566b6a8..76f13ab03c 100644 --- a/test/Npgsql.Tests/LoggingTests.cs +++ b/test/Npgsql.Tests/LoggingTests.cs @@ -143,8 +143,8 @@ public async Task Command_ExecuteScalar_multiple_statement_without_parameters() } var executingCommandEvent = listLoggerProvider.Log.Single(l => l.Id == NpgsqlEventId.CommandExecutionCompleted); - Assert.That(executingCommandEvent.Message, Does.Contain("Batch execution completed").And.Contains("[(SELECT 1, System.Object[]), (SELECT 2, System.Object[])]")); - var batchCommands = (IList<(string CommandText, object[] Parameters)>)AssertLoggingStateContains(executingCommandEvent, "BatchCommands"); + Assert.That(executingCommandEvent.Message, Does.Contain("Batch execution completed").And.Contains("[(SELECT 1, []), (SELECT 2, [])]")); + var batchCommands = (IList<(string CommandText, IEnumerable Parameters)>)AssertLoggingStateContains(executingCommandEvent, "BatchCommands"); Assert.That(batchCommands.Count, Is.EqualTo(2)); Assert.That(batchCommands[0].CommandText, Is.EqualTo("SELECT 1")); Assert.That(batchCommands[0].Parameters, Is.Empty); @@ -171,13 +171,13 @@ public async Task Command_ExecuteScalar_multiple_statement_with_parameters() } var executingCommandEvent = listLoggerProvider.Log.Single(l => l.Id == NpgsqlEventId.CommandExecutionCompleted); - Assert.That(executingCommandEvent.Message, Does.Contain("Batch execution completed").And.Contains("[(SELECT $1, System.Object[]), (SELECT $1, System.Object[])]")); - var batchCommands = (IList<(string CommandText, object[] Parameters)>)AssertLoggingStateContains(executingCommandEvent, "BatchCommands"); + Assert.That(executingCommandEvent.Message, Does.Contain("Batch execution completed").And.Contains("[(SELECT $1, [8]), (SELECT $1, [9])]")); + var batchCommands = (IList<(string CommandText, IEnumerable Parameters)>)AssertLoggingStateContains(executingCommandEvent, "BatchCommands"); Assert.That(batchCommands.Count, Is.EqualTo(2)); Assert.That(batchCommands[0].CommandText, Is.EqualTo("SELECT $1")); - Assert.That(batchCommands[0].Parameters[0], Is.EqualTo(8)); + Assert.That(batchCommands[0].Parameters.First(), Is.EqualTo(8)); Assert.That(batchCommands[1].CommandText, Is.EqualTo("SELECT $1")); - Assert.That(batchCommands[1].Parameters[0], Is.EqualTo(9)); + Assert.That(batchCommands[1].Parameters.First(), Is.EqualTo(9)); AssertLoggingStateDoesNotContain(executingCommandEvent, "Parameters"); if (!IsMultiplexing) @@ -256,21 +256,19 @@ public async Task Batch_ExecuteScalar_multiple_statements_with_parameters() var executingCommandEvent = listLoggerProvider.Log.Single(l => l.Id == NpgsqlEventId.CommandExecutionCompleted); - // Note: the message formatter of Microsoft.Extensions.Logging doesn't seem to handle arrays inside tuples, so we get the - // following ugliness (https://github.com/dotnet/runtime/issues/63165). Serilog handles this fine. - Assert.That(executingCommandEvent.Message, Does.Contain("Batch execution completed").And.Contains("[(SELECT $1, System.Object[]), (SELECT $1, 9, System.Object[])]")); + Assert.That(executingCommandEvent.Message, Does.Contain("Batch execution completed").And.Contains("[(SELECT $1, [8]), (SELECT $1, 9, [9])]")); AssertLoggingStateDoesNotContain(executingCommandEvent, "CommandText"); AssertLoggingStateDoesNotContain(executingCommandEvent, "Parameters"); if (!IsMultiplexing) AssertLoggingStateContains(executingCommandEvent, "ConnectorId", conn.ProcessID); - var batchCommands = (IList<(string CommandText, object[] Parameters)>)AssertLoggingStateContains(executingCommandEvent, "BatchCommands"); + var batchCommands = (IList<(string CommandText, IEnumerable Parameters)>)AssertLoggingStateContains(executingCommandEvent, "BatchCommands"); Assert.That(batchCommands.Count, Is.EqualTo(2)); Assert.That(batchCommands[0].CommandText, Is.EqualTo("SELECT $1")); - Assert.That(batchCommands[0].Parameters[0], Is.EqualTo(8)); + Assert.That(batchCommands[0].Parameters.First(), Is.EqualTo(8)); Assert.That(batchCommands[1].CommandText, Is.EqualTo("SELECT $1, 9")); - Assert.That(batchCommands[1].Parameters[0], Is.EqualTo(9)); + Assert.That(batchCommands[1].Parameters.First(), Is.EqualTo(9)); } [Test] From d9c1b0826df58f58bcf41a921bc2b2109904e8a2 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Mon, 16 Jun 2025 17:18:26 +0300 Subject: [PATCH 538/761] Implement GSSAPI session encryption (#6131) Closes #2957 --- .../Internal/IntegratedSecurityHandler.cs | 12 +- src/Npgsql/Internal/NpgsqlConnector.Auth.cs | 22 +- .../NpgsqlConnector.FrontendMessages.cs | 13 + src/Npgsql/Internal/NpgsqlConnector.cs | 251 +++++++++++++++++- src/Npgsql/NpgsqlConnection.cs | 7 +- src/Npgsql/NpgsqlConnectionStringBuilder.cs | 38 +++ src/Npgsql/NpgsqlSlimDataSourceBuilder.cs | 2 +- src/Npgsql/PostgresEnvironment.cs | 2 + src/Npgsql/PublicAPI.Unshipped.txt | 6 + src/Npgsql/Util/GSSStream.cs | 177 ++++++++++++ test/Npgsql.Tests/SecurityTests.cs | 14 +- test/Npgsql.Tests/Support/PgPostmasterMock.cs | 12 + 12 files changed, 522 insertions(+), 34 deletions(-) create mode 100644 src/Npgsql/Util/GSSStream.cs diff --git a/src/Npgsql/Internal/IntegratedSecurityHandler.cs b/src/Npgsql/Internal/IntegratedSecurityHandler.cs index 2b2f2f1bb9..5edb826497 100644 --- a/src/Npgsql/Internal/IntegratedSecurityHandler.cs +++ b/src/Npgsql/Internal/IntegratedSecurityHandler.cs @@ -16,7 +16,10 @@ class IntegratedSecurityHandler return new(); } - public virtual ValueTask NegotiateAuthentication(bool async, NpgsqlConnector connector) + public virtual ValueTask NegotiateAuthentication(bool async, NpgsqlConnector connector, CancellationToken cancellationToken) + => throw new NotSupportedException(string.Format(NpgsqlStrings.IntegratedSecurityDisabled, nameof(NpgsqlSlimDataSourceBuilder.EnableIntegratedSecurity))); + + public virtual ValueTask GSSEncrypt(bool async, bool isRequired, NpgsqlConnector connector, CancellationToken cancellationToken) => throw new NotSupportedException(string.Format(NpgsqlStrings.IntegratedSecurityDisabled, nameof(NpgsqlSlimDataSourceBuilder.EnableIntegratedSecurity))); } @@ -27,6 +30,9 @@ sealed class RealIntegratedSecurityHandler : IntegratedSecurityHandler public override ValueTask GetUsername(bool async, bool includeRealm, ILogger connectionLogger, CancellationToken cancellationToken) => KerberosUsernameProvider.GetUsername(async, includeRealm, connectionLogger, cancellationToken); - public override ValueTask NegotiateAuthentication(bool async, NpgsqlConnector connector) - => new(connector.AuthenticateGSS(async)); + public override ValueTask NegotiateAuthentication(bool async, NpgsqlConnector connector, CancellationToken cancellationToken) + => connector.AuthenticateGSS(async, cancellationToken); + + public override ValueTask GSSEncrypt(bool async, bool isRequired, NpgsqlConnector connector, CancellationToken cancellationToken) + => connector.GSSEncrypt(async, isRequired, cancellationToken); } diff --git a/src/Npgsql/Internal/NpgsqlConnector.Auth.cs b/src/Npgsql/Internal/NpgsqlConnector.Auth.cs index 8dc5231fd2..4d5fccbad5 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.Auth.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.Auth.cs @@ -33,7 +33,13 @@ async Task Authenticate(string username, NpgsqlTimeout timeout, bool async, Canc case AuthenticationRequestType.Ok: // If we didn't complete authentication, check whether it's allowed if (!authenticated) + { + // User requested GSS authentication, but server said that no auth is required + // If and only if our connection is gss encrypted, we consider us already authenticated + if (requiredAuthModes.HasFlag(RequireAuthMode.GSS) && IsGssEncrypted) + return; ThrowIfNotAllowed(requiredAuthModes, RequireAuthMode.None); + } return; case AuthenticationRequestType.CleartextPassword: @@ -55,7 +61,7 @@ await AuthenticateSASL(((AuthenticationSASLMessage)msg).Mechanisms, username, as case AuthenticationRequestType.GSS: case AuthenticationRequestType.SSPI: ThrowIfNotAllowed(requiredAuthModes, msg.AuthRequestType == AuthenticationRequestType.GSS ? RequireAuthMode.GSS : RequireAuthMode.SSPI); - await DataSource.IntegratedSecurityHandler.NegotiateAuthentication(async, this).ConfigureAwait(false); + await DataSource.IntegratedSecurityHandler.NegotiateAuthentication(async, this, cancellationToken).ConfigureAwait(false); return; case AuthenticationRequestType.GSSContinue: @@ -207,7 +213,7 @@ internal void AuthenticateSASLSha256Plus(ref string mechanism, ref string cbindF // try authenticate without channel binding even though both // the client and server supported it. The SCRAM exchange // checks for that, to prevent downgrade attacks. - if (!IsSecure) + if (!IsSslEncrypted) throw new NpgsqlException("Server offered SCRAM-SHA-256-PLUS authentication over a non-SSL connection"); var sslStream = (SslStream)_stream; @@ -321,7 +327,7 @@ async Task AuthenticateMD5(string username, byte[] salt, bool async, Cancellatio await Flush(async, cancellationToken).ConfigureAwait(false); } - internal async Task AuthenticateGSS(bool async) + internal async ValueTask AuthenticateGSS(bool async, CancellationToken cancellationToken) { var targetName = $"{KerberosServiceName}/{Host}"; @@ -331,8 +337,8 @@ internal async Task AuthenticateGSS(bool async) using var authContext = new NegotiateAuthentication(clientOptions); var data = authContext.GetOutgoingBlob(ReadOnlySpan.Empty, out var statusCode)!; Debug.Assert(statusCode == NegotiateAuthenticationStatusCode.ContinueNeeded); - await WritePassword(data, 0, data.Length, async, UserCancellationToken).ConfigureAwait(false); - await Flush(async, UserCancellationToken).ConfigureAwait(false); + await WritePassword(data, 0, data.Length, async, cancellationToken).ConfigureAwait(false); + await Flush(async, cancellationToken).ConfigureAwait(false); while (true) { var response = ExpectAny(await ReadMessage(async).ConfigureAwait(false), this); @@ -340,15 +346,15 @@ internal async Task AuthenticateGSS(bool async) break; if (response is not AuthenticationGSSContinueMessage gssMsg) throw new NpgsqlException($"Received unexpected authentication request message {response.AuthRequestType}"); - data = authContext.GetOutgoingBlob(gssMsg.AuthenticationData.AsSpan(), out statusCode)!; + data = authContext.GetOutgoingBlob(gssMsg.AuthenticationData.AsSpan(), out statusCode); if (statusCode is not NegotiateAuthenticationStatusCode.Completed and not NegotiateAuthenticationStatusCode.ContinueNeeded) throw new NpgsqlException($"Error while authenticating GSS/SSPI: {statusCode}"); // We might get NegotiateAuthenticationStatusCode.Completed but the data will not be null // This can happen if it's the first cycle, in which case we have to send that data to complete handshake (#4888) if (data is null) continue; - await WritePassword(data, 0, data.Length, async, UserCancellationToken).ConfigureAwait(false); - await Flush(async, UserCancellationToken).ConfigureAwait(false); + await WritePassword(data, 0, data.Length, async, cancellationToken).ConfigureAwait(false); + await Flush(async, cancellationToken).ConfigureAwait(false); } } diff --git a/src/Npgsql/Internal/NpgsqlConnector.FrontendMessages.cs b/src/Npgsql/Internal/NpgsqlConnector.FrontendMessages.cs index 9e0fd45dd3..b801b11b84 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.FrontendMessages.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.FrontendMessages.cs @@ -396,6 +396,19 @@ internal void WriteSslRequest() WriteBuffer.WriteInt32(80877103); } + internal void WriteGSSEncryptRequest() + { + const int len = sizeof(int) + // Length + sizeof(int); // GSSEnc request code + + WriteBuffer.StartMessage(len); + if (WriteBuffer.WriteSpaceLeft < len) + Flush(false).GetAwaiter().GetResult(); + + WriteBuffer.WriteInt32(len); + WriteBuffer.WriteInt32(80877104); + } + internal void WriteStartup(Dictionary parameters) { const int protocolVersion3 = 3 << 16; // 196608 diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index 6a3b71d576..c8916acd0a 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -1,5 +1,6 @@ using System; using System.Buffers; +using System.Buffers.Binary; using System.Collections.Generic; using System.Data; using System.Diagnostics; @@ -19,10 +20,11 @@ using System.Threading.Tasks; using Npgsql.BackendMessages; using Npgsql.Util; -using static Npgsql.Util.Statics; using Microsoft.Extensions.Logging; using Npgsql.Properties; +using static Npgsql.Util.Statics; + namespace Npgsql.Internal; /// @@ -493,7 +495,9 @@ internal async Task Open(NpgsqlTimeout timeout, bool async, CancellationToken ca activity = NpgsqlActivitySource.ConnectionOpen(this); - await OpenCore(this, username, Settings.SslMode, timeout, async, cancellationToken).ConfigureAwait(false); + var gssEncMode = GetGssEncMode(Settings); + + await OpenCore(this, username, Settings.SslMode, gssEncMode, timeout, async, cancellationToken).ConfigureAwait(false); if (activity is not null) NpgsqlActivitySource.Enrich(activity, this); @@ -583,12 +587,12 @@ static async Task OpenCore( NpgsqlConnector conn, string username, SslMode sslMode, + GssEncryptionMode gssEncMode, NpgsqlTimeout timeout, bool async, CancellationToken cancellationToken) { - await conn.RawOpen(sslMode, timeout, async, cancellationToken).ConfigureAwait(false); - + await conn.RawOpen(sslMode, gssEncMode, timeout, async, cancellationToken).ConfigureAwait(false); timeout.CheckAndApply(conn); conn.WriteStartupMessage(username); await conn.Flush(async, cancellationToken).ConfigureAwait(false); @@ -598,10 +602,21 @@ static async Task OpenCore( { await conn.Authenticate(username, timeout, async, cancellationToken).ConfigureAwait(false); } - catch (PostgresException e) - when (e.SqlState == PostgresErrorCodes.InvalidAuthorizationSpecification && - (sslMode == SslMode.Prefer && conn.IsSecure || sslMode == SslMode.Allow && !conn.IsSecure)) + catch (Exception e) when + // Any error after trying with GSS encryption + (gssEncMode == GssEncryptionMode.Prefer || + // Auth error with/without SSL + (e is PostgresException { SqlState: PostgresErrorCodes.InvalidAuthorizationSpecification } && + (sslMode == SslMode.Prefer && conn.IsSslEncrypted || sslMode == SslMode.Allow && !conn.IsSslEncrypted))) { + if (gssEncMode == GssEncryptionMode.Prefer) + { + conn.ConnectionLogger.LogTrace(e, "Error while opening physical connection with GSS encryption, retrying without it"); + gssEncMode = GssEncryptionMode.Disable; + } + else + sslMode = sslMode == SslMode.Prefer ? SslMode.Disable : SslMode.Require; + cancellationRegistration.Dispose(); Debug.Assert(!conn.IsBroken); @@ -612,7 +627,8 @@ static async Task OpenCore( await OpenCore( conn, username, - sslMode == SslMode.Prefer ? SslMode.Disable : SslMode.Require, + sslMode, + gssEncMode, timeout, async, cancellationToken).ConfigureAwait(false); @@ -638,6 +654,131 @@ await OpenCore( } } + internal async ValueTask GSSEncrypt(bool async, bool isRequired, CancellationToken cancellationToken) + { + ConnectionLogger.LogTrace("Negotiating GSS encryption"); + + var targetName = $"{KerberosServiceName}/{Host}"; + var clientOptions = new NegotiateAuthenticationClientOptions { TargetName = targetName }; + + NegotiateOptionsCallback?.Invoke(clientOptions); + + var authentication = new NegotiateAuthentication(clientOptions); + + try + { + var data = authentication.GetOutgoingBlob(ReadOnlySpan.Empty, out var statusCode)!; + if (statusCode != NegotiateAuthenticationStatusCode.ContinueNeeded) + { + // Unable to retrieve credentials + // If it's required, throw an appropriate exception + if (isRequired) + throw new NpgsqlException($"Unable to negotiate GSS encryption: {statusCode}"); + + return GssEncryptionResult.GetCredentialFailure; + } + + WriteGSSEncryptRequest(); + await Flush(async, cancellationToken).ConfigureAwait(false); + + await ReadBuffer.Ensure(1, async).ConfigureAwait(false); + var response = (char)ReadBuffer.ReadByte(); + + // TODO: Server can respond with an error here + // but according to documentation we shouldn't display this error to the user/application + // since the server has not been authenticated (CVE-2024-10977) + // See https://www.postgresql.org/docs/current/protocol-flow.html#PROTOCOL-FLOW-GSSAPI + switch (response) + { + default: + throw new NpgsqlException($"Received unknown response {response} for GSSEncRequest (expecting G or N)"); + case 'N': + return GssEncryptionResult.NegotiateFailure; + case 'G': + break; + } + + if (ReadBuffer.ReadBytesLeft > 0) + throw new NpgsqlException( + "Additional unencrypted data received after GSS encryption negotiation - this should never happen, and may be an indication of a man-in-the-middle attack."); + + var lengthBuffer = new byte[4]; + + await WriteGssEncryptMessage(async, data, lengthBuffer).ConfigureAwait(false); + + while (true) + { + if (async) + await _stream.ReadExactlyAsync(lengthBuffer, cancellationToken).ConfigureAwait(false); + else + _stream.ReadExactly(lengthBuffer); + + var messageLength = BitConverter.IsLittleEndian + ? BinaryPrimitives.ReverseEndianness(Unsafe.ReadUnaligned(ref lengthBuffer[0])) + : Unsafe.ReadUnaligned(ref lengthBuffer[0]); + + var buffer = ArrayPool.Shared.Rent(messageLength); + if (async) + await _stream.ReadExactlyAsync(buffer.AsMemory(0, messageLength), cancellationToken).ConfigureAwait(false); + else + _stream.ReadExactly(buffer.AsSpan(0, messageLength)); + + data = authentication.GetOutgoingBlob(buffer.AsSpan(0, messageLength), out statusCode); + ArrayPool.Shared.Return(buffer, clearArray: true); + if (statusCode is not NegotiateAuthenticationStatusCode.Completed and not NegotiateAuthenticationStatusCode.ContinueNeeded) + throw new NpgsqlException($"Error while negotiating GSS encryption: {statusCode}"); + + // TODO: the code below is the copy from GSS/SSPI auth + // It's unknown whether it holds true here or not + + // We might get NegotiateAuthenticationStatusCode.Completed but the data will not be null + // This can happen if it's the first cycle, in which case we have to send that data to complete handshake (#4888) + if (data is null) + { + Debug.Assert(statusCode == NegotiateAuthenticationStatusCode.Completed); + break; + } + + await WriteGssEncryptMessage(async, data, lengthBuffer).ConfigureAwait(false); + } + + _stream = new GSSStream(_stream, authentication); + ReadBuffer.Underlying = _stream; + WriteBuffer.Underlying = _stream; + IsGssEncrypted = true; + authentication = null; + + ConnectionLogger.LogTrace("GSS encryption successful"); + return GssEncryptionResult.Success; + + async ValueTask WriteGssEncryptMessage(bool async, byte[] data, byte[] lengthBuffer) + { + BinaryPrimitives.WriteInt32BigEndian(lengthBuffer, data.Length); + + if (async) + { + await _stream.WriteAsync(lengthBuffer, cancellationToken).ConfigureAwait(false); + await _stream.WriteAsync(data, cancellationToken).ConfigureAwait(false); + await _stream.FlushAsync(cancellationToken).ConfigureAwait(false); + } + else + { + _stream.Write(lengthBuffer); + _stream.Write(data); + _stream.Flush(); + } + } + } + catch (Exception e) + { + throw new NpgsqlException("Exception while performing GSS encryption", e); + } + finally + { + authentication?.Dispose(); + } + } + internal async ValueTask QueryDatabaseState( NpgsqlTimeout timeout, bool async, CancellationToken cancellationToken = default) { @@ -762,7 +903,7 @@ async ValueTask GetUsernameAsyncInternal() } } - async Task RawOpen(SslMode sslMode, NpgsqlTimeout timeout, bool async, CancellationToken cancellationToken) + async Task RawOpen(SslMode sslMode, GssEncryptionMode gssEncryptionMode, NpgsqlTimeout timeout, bool async, CancellationToken cancellationToken) { try { @@ -792,13 +933,31 @@ async Task RawOpen(SslMode sslMode, NpgsqlTimeout timeout, bool async, Cancellat timeout.CheckAndApply(this); - IsSecure = false; + IsSslEncrypted = false; + IsGssEncrypted = false; + + var gssEncryptResult = await TryNegotiateGssEncryption(gssEncryptionMode, async, cancellationToken).ConfigureAwait(false); + if (gssEncryptResult == GssEncryptionResult.Success) + return; + + timeout.CheckAndApply(this); if (GetSslNegotiation(Settings) == SslNegotiation.Direct) { // We already check that in NpgsqlConnectionStringBuilder.PostProcessAndValidate, but since we also allow environment variables... if (Settings.SslMode is not SslMode.Require and not SslMode.VerifyCA and not SslMode.VerifyFull) throw new ArgumentException("SSL Mode has to be Require or higher to be used with direct SSL Negotiation"); + if (gssEncryptResult == GssEncryptionResult.NegotiateFailure) + { + // We can be here only if it's fallback from preferred (but failed) gss encryption + // In this case, direct encryption isn't going to work anymore, so we throw a bogus exception to retry again without gss + // Alternatively, we can instead just go with the usual route of writing SslRequest, ignoring direct ssl + // But this is how libpq works + Debug.Assert(gssEncryptionMode == GssEncryptionMode.Prefer); + // The exception message doesn't matter since we're going to retry again + throw new NpgsqlException(); + } + await DataSource.TransportSecurityHandler.NegotiateEncryption(async, this, sslMode, timeout, cancellationToken).ConfigureAwait(false); if (ReadBuffer.ReadBytesLeft > 0) throw new NpgsqlException("Additional unencrypted data received after SSL negotiation - this should never happen, and may be an indication of a man-in-the-middle attack."); @@ -845,6 +1004,25 @@ async Task RawOpen(SslMode sslMode, NpgsqlTimeout timeout, bool async, Cancellat } } + async ValueTask TryNegotiateGssEncryption(GssEncryptionMode gssEncryptionMode, bool async, CancellationToken cancellationToken) + { + // GetCredentialFailure is essentially a nop (since we didn't send anything over the wire) + // So we can proceed further as if gss encryption wasn't even attempted + if (gssEncryptionMode == GssEncryptionMode.Disable) return GssEncryptionResult.GetCredentialFailure; + + if (ConnectedEndPoint!.AddressFamily == AddressFamily.Unix) + { + if (gssEncryptionMode == GssEncryptionMode.Prefer) + return GssEncryptionResult.GetCredentialFailure; + + Debug.Assert(gssEncryptionMode == GssEncryptionMode.Require); + throw new NpgsqlException("GSS encryption isn't supported over unix socket"); + } + + return await DataSource.IntegratedSecurityHandler.GSSEncrypt(async, gssEncryptionMode == GssEncryptionMode.Require, this, cancellationToken) + .ConfigureAwait(false); + } + static SslNegotiation GetSslNegotiation(NpgsqlConnectionStringBuilder settings) { if (settings.UserProvidedSslNegotiation is { } userProvidedSslNegotiation) @@ -859,8 +1037,24 @@ static SslNegotiation GetSslNegotiation(NpgsqlConnectionStringBuilder settings) return SslNegotiation.Postgres; } + static GssEncryptionMode GetGssEncMode(NpgsqlConnectionStringBuilder settings) + { + if (settings.UserProvidedGssEncMode is { } userProvidedGssEncMode) + return userProvidedGssEncMode; + + if (PostgresEnvironment.GssEncryptionMode is { } gssEncModeEnv) + { + if (Enum.TryParse(gssEncModeEnv, ignoreCase: true, out var gssEncMode)) + return gssEncMode; + } + + return GssEncryptionMode.Disable; + } + internal async Task NegotiateEncryption(SslMode sslMode, NpgsqlTimeout timeout, bool async, CancellationToken cancellationToken) { + ConnectionLogger.LogTrace("Negotiating SSL encryption"); + var clientCertificates = new X509Certificate2Collection(); var certPath = Settings.SslCertificate ?? PostgresEnvironment.SslCert ?? PostgresEnvironment.SslCertDefault; @@ -981,7 +1175,7 @@ internal async Task NegotiateEncryption(SslMode sslMode, NpgsqlTimeout timeout, ReadBuffer.Underlying = _stream; WriteBuffer.Underlying = _stream; - IsSecure = true; + IsSslEncrypted = true; ConnectionLogger.LogTrace("SSL negotiation successful"); } catch @@ -1680,7 +1874,12 @@ internal void ClearTransaction(Exception? disposeReason = null) /// /// Returns whether SSL is being used for the connection /// - internal bool IsSecure { get; private set; } + internal bool IsSslEncrypted { get; private set; } + + /// + /// Returns whether GSS is being used for the connection + /// + internal bool IsGssEncrypted { get; private set; } /// /// Returns whether SCRAM-SHA256 is being used for the connection @@ -1898,11 +2097,28 @@ internal bool PerformPostgresCancellation() void DoCancelRequest(int backendProcessId, int backendSecretKey) { Debug.Assert(State == ConnectorState.Closed); + var gssEncMode = GetGssEncMode(Settings); try { - RawOpen(Settings.SslMode, new NpgsqlTimeout(TimeSpan.FromSeconds(ConnectionTimeout)), false, CancellationToken.None) - .GetAwaiter().GetResult(); + try + { + RawOpen(Settings.SslMode, gssEncMode, new NpgsqlTimeout(TimeSpan.FromSeconds(ConnectionTimeout)), false, + CancellationToken.None) + .GetAwaiter().GetResult(); + } + catch (Exception e) when (gssEncMode == GssEncryptionMode.Prefer) + { + ConnectionLogger.LogTrace(e, "Error while opening physical connection with GSS encryption, retrying without it"); + Cleanup(); + + // If we hit an error with gss encryption + // Retry again without it + RawOpen(Settings.SslMode, GssEncryptionMode.Disable, new NpgsqlTimeout(TimeSpan.FromSeconds(ConnectionTimeout)), false, + CancellationToken.None) + .GetAwaiter().GetResult(); + } + WriteCancelRequest(backendProcessId, backendSecretKey); Flush(); @@ -2975,4 +3191,11 @@ enum DataRowLoadingMode Skip } +enum GssEncryptionResult +{ + GetCredentialFailure, + NegotiateFailure, + Success +} + #endregion diff --git a/src/Npgsql/NpgsqlConnection.cs b/src/Npgsql/NpgsqlConnection.cs index 4b989349f7..c63f5bc4b6 100644 --- a/src/Npgsql/NpgsqlConnection.cs +++ b/src/Npgsql/NpgsqlConnection.cs @@ -999,7 +999,12 @@ internal void OnNotification(NpgsqlNotificationEventArgs e) /// /// Returns whether SSL is being used for the connection. /// - internal bool IsSecure => CheckOpenAndRunInTemporaryScope(c => c.IsSecure); + internal bool IsSslEncrypted => CheckOpenAndRunInTemporaryScope(c => c.IsSslEncrypted); + + /// + /// Returns whether GSS encryption is being used for the connection. + /// + internal bool IsGssEncrypted => CheckOpenAndRunInTemporaryScope(c => c.IsGssEncrypted); /// /// Returns whether SCRAM-SHA256 is being user for the connection diff --git a/src/Npgsql/NpgsqlConnectionStringBuilder.cs b/src/Npgsql/NpgsqlConnectionStringBuilder.cs index f0b258d356..35a6ed04e0 100644 --- a/src/Npgsql/NpgsqlConnectionStringBuilder.cs +++ b/src/Npgsql/NpgsqlConnectionStringBuilder.cs @@ -479,6 +479,25 @@ public SslNegotiation SslNegotiation internal SslNegotiation? UserProvidedSslNegotiation { get; private set; } + /// + /// Controls whether GSS encryption is required, disabled or preferred, depending on server support. + /// + [Category("Security")] + [Description("Controls whether GSS encryption is required, disabled or preferred, depending on server support.")] + [DisplayName("GSS Encryption Mode")] + [NpgsqlConnectionStringProperty] + public GssEncryptionMode GssEncryptionMode + { + get => UserProvidedGssEncMode ?? GssEncryptionMode.Disable; + set + { + UserProvidedGssEncMode = value; + SetValue(nameof(GssEncryptionMode), value); + } + } + + internal GssEncryptionMode? UserProvidedGssEncMode { get; private set; } + /// /// Location of a client certificate to be sent to the server. /// @@ -1725,6 +1744,25 @@ public enum SslNegotiation Direct } +/// +/// Specifies how to manage GSS encryption. +/// +public enum GssEncryptionMode +{ + /// + /// GSS encryption is disabled. If the server requires GSS encryption, the connection will fail. + /// + Disable, + /// + /// Prefer GSS encrypted connections if the server allows them, but allow connections without GSS encryption. + /// + Prefer, + /// + /// Fail the connection if the server doesn't support GSS encryption. + /// + Require +} + /// /// Specifies how to manage channel binding. /// diff --git a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs index 3c7de21fb7..3fa0a5a9fd 100644 --- a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs @@ -610,7 +610,7 @@ public NpgsqlSlimDataSourceBuilder EnableTransportSecurity() } /// - /// Enables the possibility to use GSS/SSPI authentication for connections to PostgreSQL. This does not guarantee that it will + /// Enables the possibility to use GSS/SSPI authentication and encryption for connections to PostgreSQL. This does not guarantee that it will /// actually be used; see for more details. /// /// The same builder instance so that multiple calls can be chained. diff --git a/src/Npgsql/PostgresEnvironment.cs b/src/Npgsql/PostgresEnvironment.cs index 389df7d085..558f6cfe9f 100644 --- a/src/Npgsql/PostgresEnvironment.cs +++ b/src/Npgsql/PostgresEnvironment.cs @@ -50,6 +50,8 @@ internal static string? SslCertRootDefault internal static string? SslNegotiation => Environment.GetEnvironmentVariable("PGSSLNEGOTIATION"); + internal static string? GssEncryptionMode => Environment.GetEnvironmentVariable("PGGSSENCMODE"); + internal static string? RequireAuth => Environment.GetEnvironmentVariable("PGREQUIREAUTH"); static string? GetHomeDir() diff --git a/src/Npgsql/PublicAPI.Unshipped.txt b/src/Npgsql/PublicAPI.Unshipped.txt index 47198a4165..aad4e3e227 100644 --- a/src/Npgsql/PublicAPI.Unshipped.txt +++ b/src/Npgsql/PublicAPI.Unshipped.txt @@ -1,8 +1,14 @@ #nullable enable abstract Npgsql.NpgsqlDataSource.Clear() -> void +Npgsql.GssEncryptionMode +Npgsql.GssEncryptionMode.Disable = 0 -> Npgsql.GssEncryptionMode +Npgsql.GssEncryptionMode.Prefer = 1 -> Npgsql.GssEncryptionMode +Npgsql.GssEncryptionMode.Require = 2 -> Npgsql.GssEncryptionMode Npgsql.NpgsqlConnection.CloneWithAsync(string! connectionString, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.ValueTask Npgsql.NpgsqlConnection.SslClientAuthenticationOptionsCallback.get -> System.Action? Npgsql.NpgsqlConnection.SslClientAuthenticationOptionsCallback.set -> void +Npgsql.NpgsqlConnectionStringBuilder.GssEncryptionMode.get -> Npgsql.GssEncryptionMode +Npgsql.NpgsqlConnectionStringBuilder.GssEncryptionMode.set -> void Npgsql.NpgsqlConnectionStringBuilder.RequireAuth.get -> string? Npgsql.NpgsqlConnectionStringBuilder.RequireAuth.set -> void Npgsql.NpgsqlConnectionStringBuilder.SslNegotiation.get -> Npgsql.SslNegotiation diff --git a/src/Npgsql/Util/GSSStream.cs b/src/Npgsql/Util/GSSStream.cs new file mode 100644 index 0000000000..c6c47bd4ca --- /dev/null +++ b/src/Npgsql/Util/GSSStream.cs @@ -0,0 +1,177 @@ +using System; +using System.Buffers; +using System.Buffers.Binary; +using System.IO; +using System.Net.Security; +using System.Runtime.CompilerServices; +using System.Threading; +using System.Threading.Tasks; + +namespace Npgsql.Util; + +// For more detailed explanation of communication protocol +// See https://www.postgresql.org/docs/current/protocol-flow.html#PROTOCOL-FLOW-GSSAPI +sealed class GSSStream : Stream +{ + // At most, postgres supports GSS messages up to 16kb + // We use the recommended value of 8kb for the write buffer + // Which will result in messages of slightly larger than 8kb + const int MaxWriteMessageSizeLimit = 8 * 1024; + const int MaxReadMessageSizeLimit = 16 * 1024; + + readonly Stream _stream; + readonly NegotiateAuthentication _authentication; + + readonly ArrayBufferWriter _writeBuffer; + readonly byte[] _writeLengthBuffer; + + readonly byte[] _readBuffer; + int _readPosition; + int _leftToRead; + + internal GSSStream(Stream stream, NegotiateAuthentication authentication) + { + _stream = stream; + _authentication = authentication; + // While we guarantee that unencrypted messages are at most 8kb + // Encrypting them will result in messages slightly larger than the original size + // Which is why the initial capacity has an additional 2kb of free space + _writeBuffer = new ArrayBufferWriter(MaxWriteMessageSizeLimit + 2048); + _writeLengthBuffer = new byte[4]; + _readBuffer = new byte[MaxReadMessageSizeLimit]; + } + + public override void Write(ReadOnlySpan buffer) + { + var start = 0; + while (start != buffer.Length) + { + var lengthToWrite = Math.Min(buffer.Length - start, MaxWriteMessageSizeLimit); + var result = _authentication.Wrap( + buffer.Slice(start, lengthToWrite), + _writeBuffer, + _authentication.IsEncrypted, + out _); + if (result != NegotiateAuthenticationStatusCode.Completed) + throw new NpgsqlException($"Error while encrypting buffer: {result}"); + + var written = _writeBuffer.WrittenMemory; + Unsafe.WriteUnaligned(ref _writeLengthBuffer[0], BitConverter.IsLittleEndian ? BinaryPrimitives.ReverseEndianness(written.Length) : written.Length); + + _stream.Write(_writeLengthBuffer); + _stream.Write(buffer.Slice(start, lengthToWrite)); + + _writeBuffer.ResetWrittenCount(); + start += lengthToWrite; + } + } + + public override void Write(byte[] buffer, int offset, int count) + => Write(buffer.AsSpan(offset, count)); + + public override async ValueTask WriteAsync(ReadOnlyMemory buffer, CancellationToken cancellationToken = default) + { + var start = 0; + while (start != buffer.Length) + { + var lengthToWrite = Math.Min(buffer.Length - start, MaxWriteMessageSizeLimit); + var result = _authentication.Wrap( + buffer.Slice(start, lengthToWrite).Span, + _writeBuffer, + _authentication.IsEncrypted, + out _); + if (result != NegotiateAuthenticationStatusCode.Completed) + throw new NpgsqlException($"Error while encrypting buffer: {result}"); + + var written = _writeBuffer.WrittenMemory; + Unsafe.WriteUnaligned(ref _writeLengthBuffer[0], BitConverter.IsLittleEndian ? BinaryPrimitives.ReverseEndianness(written.Length) : written.Length); + + await _stream.WriteAsync(_writeLengthBuffer, cancellationToken).ConfigureAwait(false); + await _stream.WriteAsync(_writeBuffer.WrittenMemory, cancellationToken).ConfigureAwait(false); + + _writeBuffer.ResetWrittenCount(); + start += lengthToWrite; + } + } + + public override async Task WriteAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) + => await WriteAsync(buffer.AsMemory(offset, count), cancellationToken).ConfigureAwait(false); + + public override void Flush() => _stream.Flush(); + + public override Task FlushAsync(CancellationToken cancellationToken) => _stream.FlushAsync(cancellationToken); + + public override int Read(Span buffer) + { + if (_leftToRead == 0) + { + _stream.ReadExactly(_readBuffer.AsSpan(0, 4)); + var messageLength = BitConverter.IsLittleEndian + ? BinaryPrimitives.ReverseEndianness(Unsafe.ReadUnaligned(ref _readBuffer[0])) + : Unsafe.ReadUnaligned(ref _readBuffer[0]); + var messageBuffer = _readBuffer.AsSpan(0, messageLength); + _stream.ReadExactly(messageBuffer); + var result = _authentication.UnwrapInPlace(messageBuffer, out _readPosition, out _leftToRead, out _); + if (result != NegotiateAuthenticationStatusCode.Completed) + throw new NpgsqlException($"Error while decrypting buffer: {result}"); + } + + var maxRead = Math.Min(_leftToRead, buffer.Length); + _readBuffer.AsSpan(_readPosition, maxRead).CopyTo(buffer); + _readPosition += maxRead; + _leftToRead -= maxRead; + return maxRead; + } + + public override int Read(byte[] buffer, int offset, int count) + => Read(buffer.AsSpan(offset, count)); + + public override async ValueTask ReadAsync(Memory buffer, CancellationToken cancellationToken = default) + { + if (_leftToRead == 0) + { + await _stream.ReadExactlyAsync(_readBuffer.AsMemory(0, 4), cancellationToken).ConfigureAwait(false); + var messageLength = BitConverter.IsLittleEndian + ? BinaryPrimitives.ReverseEndianness(Unsafe.ReadUnaligned(ref _readBuffer[0])) + : Unsafe.ReadUnaligned(ref _readBuffer[0]); + var messageBuffer = _readBuffer.AsMemory(0, messageLength); + await _stream.ReadExactlyAsync(messageBuffer, cancellationToken).ConfigureAwait(false); + var result = _authentication.UnwrapInPlace(messageBuffer.Span, out _readPosition, out _leftToRead, out _); + if (result != NegotiateAuthenticationStatusCode.Completed) + throw new NpgsqlException($"Error while decrypting buffer: {result}"); + } + + var maxRead = Math.Min(_leftToRead, buffer.Length); + _readBuffer.AsMemory(_readPosition, maxRead).CopyTo(buffer); + _readPosition += maxRead; + _leftToRead -= maxRead; + return maxRead; + } + + public override async Task ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) + => await ReadAsync(buffer.AsMemory(offset, count), cancellationToken).ConfigureAwait(false); + + public override void Close() => _stream.Close(); + + protected override void Dispose(bool disposing) + { + _authentication.Dispose(); + _stream.Dispose(); + } + + public override ValueTask DisposeAsync() => _stream.DisposeAsync(); + + public override long Seek(long offset, SeekOrigin origin) => throw new NotSupportedException(); + public override void SetLength(long value) => throw new NotSupportedException(); + + public override bool CanRead => true; + public override bool CanWrite => true; + public override bool CanSeek => false; + public override long Length => throw new NotSupportedException(); + + public override long Position + { + get => throw new NotSupportedException(); + set => throw new NotSupportedException(); + } +} diff --git a/test/Npgsql.Tests/SecurityTests.cs b/test/Npgsql.Tests/SecurityTests.cs index 13b7ca7495..7f47ea8111 100644 --- a/test/Npgsql.Tests/SecurityTests.cs +++ b/test/Npgsql.Tests/SecurityTests.cs @@ -20,7 +20,7 @@ public async Task Basic_ssl() csb.SslMode = SslMode.Require; }); await using var conn = await dataSource.OpenConnectionAsync(); - Assert.That(conn.IsSecure, Is.True); + Assert.That(conn.IsSslEncrypted, Is.True); } [Test, Description("Default user must run with md5 password encryption")] @@ -72,7 +72,7 @@ public void IsSecure_without_ssl() { using var dataSource = CreateDataSource(csb => csb.SslMode = SslMode.Disable); using var conn = dataSource.OpenConnection(); - Assert.That(conn.IsSecure, Is.False); + Assert.That(conn.IsSslEncrypted, Is.False); } [Test, Explicit("Needs to be set up (and run with with Kerberos credentials on Linux)")] @@ -248,7 +248,7 @@ public async Task Connect_with_only_ssl_allowed_user([Values] bool multiplexing, csb.KeepAlive = keepAlive ? 10 : 0; }); await using var conn = await dataSource.OpenConnectionAsync(); - Assert.IsTrue(conn.IsSecure); + Assert.IsTrue(conn.IsSslEncrypted); } catch (Exception e) when (!IsOnBuildServer) { @@ -277,7 +277,7 @@ public async Task Connect_with_only_non_ssl_allowed_user([Values] bool multiplex csb.KeepAlive = keepAlive ? 10 : 0; }); await using var conn = await dataSource.OpenConnectionAsync(); - Assert.IsFalse(conn.IsSecure); + Assert.IsFalse(conn.IsSslEncrypted); } catch (NpgsqlException ex) when (RuntimeInformation.IsOSPlatform(OSPlatform.Windows) && ex.InnerException is IOException) { @@ -400,7 +400,7 @@ public async Task Bug4305_Secure([Values] bool async) try { conn = await dataSource.OpenConnectionAsync(); - Assert.IsTrue(conn.IsSecure); + Assert.IsTrue(conn.IsSslEncrypted); } catch (Exception e) when (!IsOnBuildServer) { @@ -451,7 +451,7 @@ public async Task Bug4305_not_Secure([Values] bool async) try { conn = await dataSource.OpenConnectionAsync(); - Assert.IsFalse(conn.IsSecure); + Assert.IsFalse(conn.IsSslEncrypted); } catch (Exception e) when (!IsOnBuildServer) { @@ -494,7 +494,7 @@ public async Task Direct_ssl_negotiation() csb.SslNegotiation = SslNegotiation.Direct; }); await using var conn = await dataSource.OpenConnectionAsync(); - Assert.IsTrue(conn.IsSecure); + Assert.IsTrue(conn.IsSslEncrypted); } [Test] diff --git a/test/Npgsql.Tests/Support/PgPostmasterMock.cs b/test/Npgsql.Tests/Support/PgPostmasterMock.cs index 3a59ccc2f9..178de2d01d 100644 --- a/test/Npgsql.Tests/Support/PgPostmasterMock.cs +++ b/test/Npgsql.Tests/Support/PgPostmasterMock.cs @@ -16,6 +16,7 @@ class PgPostmasterMock : IAsyncDisposable const int WriteBufferSize = 8192; const int CancelRequestCode = 1234 << 16 | 5678; const int SslRequest = 80877103; + const int GssRequest = 80877104; static readonly Encoding Encoding = NpgsqlWriteBuffer.UTF8Encoding; static readonly Encoding RelaxedEncoding = NpgsqlWriteBuffer.RelaxedUTF8Encoding; @@ -148,6 +149,17 @@ async Task Accept(bool completeCancellationImmediat await readBuffer.EnsureAsync(len - 4); var request = readBuffer.ReadInt32(); + if (request == GssRequest) + { + writeBuffer.WriteByte((byte)'N'); + await writeBuffer.Flush(async: true); + + await readBuffer.EnsureAsync(4); + len = readBuffer.ReadInt32(); + await readBuffer.EnsureAsync(len - 4); + request = readBuffer.ReadInt32(); + } + if (request == SslRequest) { writeBuffer.WriteByte((byte)'N'); From 317c4e357a0c9198ed378476bf57236e1604efeb Mon Sep 17 00:00:00 2001 From: Michael Todorovic Date: Tue, 17 Jun 2025 12:52:36 +0200 Subject: [PATCH 539/761] Add support for PGAPPNAME to set application name (#6139) Signed-off-by: Michael Todorovic --- src/Npgsql/Internal/NpgsqlConnector.cs | 5 ++-- src/Npgsql/PostgresEnvironment.cs | 2 ++ test/Npgsql.Tests/ConnectionTests.cs | 41 ++++++++++++++++++++++++++ 3 files changed, 46 insertions(+), 2 deletions(-) diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index c8916acd0a..7f720f5882 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -833,8 +833,9 @@ void WriteStartupMessage(string username) if (Settings.Database is not null) startupParams["database"] = Settings.Database; - if (Settings.ApplicationName?.Length > 0) - startupParams["application_name"] = Settings.ApplicationName; + var applicationName = Settings.ApplicationName ?? PostgresEnvironment.AppName; + if (applicationName?.Length > 0) + startupParams["application_name"] = applicationName; if (Settings.SearchPath?.Length > 0) startupParams["search_path"] = Settings.SearchPath; diff --git a/src/Npgsql/PostgresEnvironment.cs b/src/Npgsql/PostgresEnvironment.cs index 558f6cfe9f..3ba874ae4c 100644 --- a/src/Npgsql/PostgresEnvironment.cs +++ b/src/Npgsql/PostgresEnvironment.cs @@ -54,6 +54,8 @@ internal static string? SslCertRootDefault internal static string? RequireAuth => Environment.GetEnvironmentVariable("PGREQUIREAUTH"); + internal static string? AppName => Environment.GetEnvironmentVariable("PGAPPNAME"); + static string? GetHomeDir() => Environment.GetEnvironmentVariable(RuntimeInformation.IsOSPlatform(OSPlatform.Windows) ? "APPDATA" : "HOME"); diff --git a/test/Npgsql.Tests/ConnectionTests.cs b/test/Npgsql.Tests/ConnectionTests.cs index 90dbd4ecf1..ac97daf101 100644 --- a/test/Npgsql.Tests/ConnectionTests.cs +++ b/test/Npgsql.Tests/ConnectionTests.cs @@ -391,6 +391,47 @@ public async Task Timezone_connection_param() #endregion Timezone + #region Application Name + + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/6133")] + [NonParallelizable] // Sets environment variable + public async Task Application_name_env_var() + { + const string testAppName = "MyTestApp"; + + // Note that the pool is unaware of the environment variable, so if a connection is + // returned from the pool it may contain the wrong application name + using var _ = SetEnvironmentVariable("PGAPPNAME", testAppName); + await using var dataSource = CreateDataSource(); + await using var conn = await dataSource.OpenConnectionAsync(); + Assert.That(conn.PostgresParameters["application_name"], Is.EqualTo(testAppName)); + } + + [Test] + public async Task Application_name_connection_param() + { + const string testAppName = "MyTestApp2"; + + await using var dataSource = CreateDataSource(csb => csb.ApplicationName = testAppName); + await using var conn = await dataSource.OpenConnectionAsync(); + Assert.That(conn.PostgresParameters["application_name"], Is.EqualTo(testAppName)); + } + + [Test] + [NonParallelizable] // Sets environment variable + public async Task Application_name_connection_param_overrides_env_var() + { + const string envAppName = "EnvApp"; + const string connAppName = "ConnApp"; + + using var _ = SetEnvironmentVariable("PGAPPNAME", envAppName); + await using var dataSource = CreateDataSource(csb => csb.ApplicationName = connAppName); + await using var conn = await dataSource.OpenConnectionAsync(); + Assert.That(conn.PostgresParameters["application_name"], Is.EqualTo(connAppName)); + } + + #endregion Application Name + #region ConnectionString - Host [TestCase("127.0.0.1", ExpectedResult = new [] { "127.0.0.1:5432" })] From e3921f213b251c80bc35f2622db9c7dc99626c68 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Fri, 20 Jun 2025 13:19:59 +0300 Subject: [PATCH 540/761] Fix returning null from KerberosUsernameProvider.GetUsername with concurrent calls (#6137) Fixes #6136 --- src/Npgsql/KerberosUsernameProvider.cs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Npgsql/KerberosUsernameProvider.cs b/src/Npgsql/KerberosUsernameProvider.cs index 591c43fd98..6963e139f0 100644 --- a/src/Npgsql/KerberosUsernameProvider.cs +++ b/src/Npgsql/KerberosUsernameProvider.cs @@ -11,9 +11,9 @@ namespace Npgsql; /// Launches MIT Kerberos klist and parses out the default principal from it. /// Caches the result. /// -sealed class KerberosUsernameProvider +static class KerberosUsernameProvider { - static bool _performedDetection; + static volatile bool _performedDetection; static string? _principalWithRealm; static string? _principalWithoutRealm; From 1b55ebc74d15ef9edff5ab661062de5cd4625a2c Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Mon, 23 Jun 2025 13:03:52 +0200 Subject: [PATCH 541/761] Add NpgsqlTsVector.Empty (#6145) Closes #6134 --- src/Npgsql/NpgsqlTypes/NpgsqlTsVector.cs | 5 +++++ src/Npgsql/PublicAPI.Unshipped.txt | 1 + test/Npgsql.Tests/TypesTests.cs | 7 +++++++ 3 files changed, 13 insertions(+) diff --git a/src/Npgsql/NpgsqlTypes/NpgsqlTsVector.cs b/src/Npgsql/NpgsqlTypes/NpgsqlTsVector.cs index 7d63a547fe..4dd1e28b08 100644 --- a/src/Npgsql/NpgsqlTypes/NpgsqlTsVector.cs +++ b/src/Npgsql/NpgsqlTypes/NpgsqlTsVector.cs @@ -11,6 +11,11 @@ namespace NpgsqlTypes; /// public sealed class NpgsqlTsVector : IEnumerable, IEquatable { + /// + /// Represents an empty tsvector. + /// + public static readonly NpgsqlTsVector Empty = new NpgsqlTsVector([], noCheck: true); + readonly List _lexemes; internal NpgsqlTsVector(List lexemes, bool noCheck = false) diff --git a/src/Npgsql/PublicAPI.Unshipped.txt b/src/Npgsql/PublicAPI.Unshipped.txt index aad4e3e227..a1d261ead0 100644 --- a/src/Npgsql/PublicAPI.Unshipped.txt +++ b/src/Npgsql/PublicAPI.Unshipped.txt @@ -84,3 +84,4 @@ Npgsql.NpgsqlConnection.ReloadTypesAsync(System.Threading.CancellationToken canc *REMOVED*Npgsql.NpgsqlSlimDataSourceBuilder.MapComposite(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! *REMOVED*Npgsql.NpgsqlSlimDataSourceBuilder.MapEnum(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! *REMOVED*Npgsql.NpgsqlSlimDataSourceBuilder.MapEnum(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! +static readonly NpgsqlTypes.NpgsqlTsVector.Empty -> NpgsqlTypes.NpgsqlTsVector! diff --git a/test/Npgsql.Tests/TypesTests.cs b/test/Npgsql.Tests/TypesTests.cs index 113c08b954..1f6b0e8c55 100644 --- a/test/Npgsql.Tests/TypesTests.cs +++ b/test/Npgsql.Tests/TypesTests.cs @@ -86,6 +86,13 @@ public void TsQuery() } #pragma warning restore CS0618 // {NpgsqlTsVector,NpgsqlTsQuery}.Parse are obsolete + [Test] + public void TsVector_empty() + { + Assert.IsEmpty(NpgsqlTsVector.Empty); + Assert.AreEqual(string.Empty, NpgsqlTsVector.Empty.ToString()); + } + [Test] public void TsQueryEquatibility() { From 3dae12114ddd5e59b618972a8fee43b39c2ef7f1 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Wed, 2 Jul 2025 15:16:44 +0300 Subject: [PATCH 542/761] Add assert to NpgsqlCommand.Transaction if it's completed (#6151) Closes #6149 --- src/Npgsql/NpgsqlCommand.cs | 8 +++++++- test/Npgsql.Tests/CommandTests.cs | 15 +++++++++++++++ 2 files changed, 22 insertions(+), 1 deletion(-) diff --git a/src/Npgsql/NpgsqlCommand.cs b/src/Npgsql/NpgsqlCommand.cs index e9d86bb222..d5ad24fdb2 100644 --- a/src/Npgsql/NpgsqlCommand.cs +++ b/src/Npgsql/NpgsqlCommand.cs @@ -1635,7 +1635,13 @@ internal virtual async ValueTask ExecuteReader(bool async, Com protected override DbTransaction? DbTransaction { get => _transaction; - set => _transaction = (NpgsqlTransaction?)value; + set + { + var tx = (NpgsqlTransaction?)value; + if (tx is { IsCompleted: true }) + throw new InvalidOperationException("Transaction is already completed"); + _transaction = tx; + } } /// diff --git a/test/Npgsql.Tests/CommandTests.cs b/test/Npgsql.Tests/CommandTests.cs index f159d7d97c..8cc36df10a 100644 --- a/test/Npgsql.Tests/CommandTests.cs +++ b/test/Npgsql.Tests/CommandTests.cs @@ -1639,4 +1639,19 @@ await server Assert.That(connection.PostgresParameters, Contains.Key("SomeKey").WithValue("SomeValue")); } + + [Test] + public async Task Completed_transaction_throws([Values] bool commit) + { + await using var conn = await OpenConnectionAsync(); + await using var tx = await conn.BeginTransactionAsync(); + await using var cmd = conn.CreateCommand(); + + if (commit) + await tx.CommitAsync(); + else + await tx.RollbackAsync(); + + Assert.Throws(() => cmd.Transaction = tx); + } } From 016ae357277cf2cf65905b9a1bac76a4673753e4 Mon Sep 17 00:00:00 2001 From: 0MG-DEN <31481586+0MG-DEN@users.noreply.github.com> Date: Fri, 4 Jul 2025 15:10:29 +0300 Subject: [PATCH 543/761] Compare normalized type names (#6011) Fixes #6010 --- src/Npgsql/Internal/TypeInfoMapping.cs | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/src/Npgsql/Internal/TypeInfoMapping.cs b/src/Npgsql/Internal/TypeInfoMapping.cs index afb5325590..c8439de6ac 100644 --- a/src/Npgsql/Internal/TypeInfoMapping.cs +++ b/src/Npgsql/Internal/TypeInfoMapping.cs @@ -71,7 +71,8 @@ public readonly struct TypeInfoMapping(Type type, string dataTypeName, TypeInfoF public Func? TypeMatchPredicate { get; init; } public bool TypeEquals(Type type) => TypeMatchPredicate?.Invoke(type) ?? Type == type; - public bool DataTypeNameEquals(string dataTypeName) + + private bool DataTypeNameEqualsCore(string dataTypeName) { var span = DataTypeName.AsSpan(); return Postgres.DataTypeName.IsFullyQualified(span) @@ -79,6 +80,18 @@ public bool DataTypeNameEquals(string dataTypeName) : span.Equals(Postgres.DataTypeName.ValidatedName(dataTypeName).UnqualifiedNameSpan, StringComparison.Ordinal); } + internal bool DataTypeNameEquals(DataTypeName dataTypeName) + { + var value = dataTypeName.Value; + return DataTypeNameEqualsCore(value); + } + + public bool DataTypeNameEquals(string dataTypeName) + { + var normalized = Postgres.DataTypeName.NormalizeName(dataTypeName); + return DataTypeNameEqualsCore(normalized); + } + string DebuggerDisplay { get @@ -125,7 +138,7 @@ public TypeInfoMappingCollection(IEnumerable items) { var looseTypeMatch = mapping.TypeMatchPredicate is { } pred ? pred(type) : type is null || mapping.Type == type; var typeMatch = type is not null && looseTypeMatch; - var dataTypeMatch = dataTypeName is not null && mapping.DataTypeNameEquals(dataTypeName.Value.Value); + var dataTypeMatch = dataTypeName is not null && mapping.DataTypeNameEquals(dataTypeName.Value); var matchRequirement = mapping.MatchRequirement; if (dataTypeMatch && typeMatch From be916c2391daab20616f406e1d3540f43053e380 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Mon, 7 Jul 2025 12:50:25 +0200 Subject: [PATCH 544/761] Do CI testing for PG18 (beta) (#6155) --- .github/workflows/build.yml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 6c62460816..f10c34ea60 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -49,11 +49,11 @@ jobs: pg_major: 17 config: Release test_tfm: net8.0 -# - os: ubuntu-24.04 -# pg_major: 17 -# config: Release -# test_tfm: net8.0 -# pg_prerelease: 'PG Prerelease' + - os: ubuntu-24.04 + pg_major: 18 + config: Release + test_tfm: net8.0 + pg_prerelease: 'PG Prerelease' outputs: is_release: ${{ steps.analyze_tag.outputs.is_release }} @@ -86,7 +86,7 @@ jobs: # Automated repository configuration sudo apt install -y postgresql-common - sudo /usr/share/postgresql-common/pgdg/apt.postgresql.org.sh -y + sudo /usr/share/postgresql-common/pgdg/apt.postgresql.org.sh -v ${{ matrix.pg_major }} -y sudo apt-get update -qq sudo apt-get install -qq postgresql-${{ matrix.pg_major }} export PGDATA=/etc/postgresql/${{ matrix.pg_major }}/main @@ -102,9 +102,9 @@ jobs: sudo -u postgres psql -c "CREATE USER npgsql_tests_nossl SUPERUSER PASSWORD 'npgsql_tests_nossl'" # To disable PostGIS for prereleases (because it usually isn't available until late), surround with the following: - if [ -z "${{ matrix.pg_prerelease }}" ]; then + #if [ -z "${{ matrix.pg_prerelease }}" ]; then sudo apt-get install -qq postgresql-${{ matrix.pg_major }}-postgis-${{ env.postgis_version }} - fi + #fi if [ ${{ matrix.pg_major }} -ge 14 ]; then sudo sed -i "s|unix_socket_directories = '/var/run/postgresql'|unix_socket_directories = '/var/run/postgresql, @/npgsql_unix'|" $PGDATA/postgresql.conf From c378bdbc141dedd00f4d67b2eaa246c5e8d92666 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Mon, 4 Aug 2025 17:10:00 +0300 Subject: [PATCH 545/761] Fix infinite consume on error with connection break (#6161) Fixes #6160 --- src/Npgsql/NpgsqlDataReader.cs | 8 +++++++- test/Npgsql.Tests/ReaderTests.cs | 35 ++++++++++++++++++++++++++++++++ 2 files changed, 42 insertions(+), 1 deletion(-) diff --git a/src/Npgsql/NpgsqlDataReader.cs b/src/Npgsql/NpgsqlDataReader.cs index 4add2e970d..86963afd4a 100644 --- a/src/Npgsql/NpgsqlDataReader.cs +++ b/src/Npgsql/NpgsqlDataReader.cs @@ -985,7 +985,13 @@ async Task Consume(bool async, Exception? firstException = null) // Skip over the other result sets. Note that this does tally records affected from CommandComplete messages, and properly sets // state for auto-prepared statements - while (true) + // + // The only exception is when the connector is broken (which can happen in the middle of consuming) + // As then there is no point in going forward + // + // While we can also check our local state (State == Closed) + // It's probably better to rely on connector since it's private and its state can't be changed + while (!Connector.IsBroken) { try { diff --git a/test/Npgsql.Tests/ReaderTests.cs b/test/Npgsql.Tests/ReaderTests.cs index b46342153f..839ec5b610 100644 --- a/test/Npgsql.Tests/ReaderTests.cs +++ b/test/Npgsql.Tests/ReaderTests.cs @@ -2371,6 +2371,41 @@ await pgMock Assert.That(conn.Connector!.State, Is.EqualTo(ConnectorState.Ready)); } + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/6160")] + [Description("Consuming result set shouldn't go infinite in case connection is broken")] + public async Task Bug6160() + { + var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + { + // Set to -1 to trigger immediate connection break on timeout + CancellationTimeout = -1, + CommandTimeout = 1 + }; + await using var postmasterMock = PgPostmasterMock.Start(csb.ConnectionString); + await using var dataSource = CreateDataSource(postmasterMock.ConnectionString); + await using var conn = await dataSource.OpenConnectionAsync(); + + var pgMock = await postmasterMock.WaitForServerConnection(); + await pgMock + .WriteParseComplete() + .WriteBindComplete() + .WriteRowDescription(new FieldDescription(Int4Oid)) + .WriteDataRow(new byte[4]) + .FlushAsync(); + + await using var cmd = new NpgsqlCommand("SELECT 1", conn); + await using (var reader = await cmd.ExecuteReaderAsync(Behavior | CommandBehavior.SingleRow)) + { + await reader.ReadAsync(); + // The second read will try to consume the whole resultset due to CommandBehavior.SingleRow + // Which will fail with timeout (and immediate connection break) since we didn't send anything else beside the first row + var ex = Assert.ThrowsAsync(async () => await reader.ReadAsync())!; + Assert.That(ex.InnerException, Is.TypeOf()); + + Assert.That(conn.State, Is.EqualTo(ConnectionState.Closed)); + } + } + #endregion #region Initialization / setup / teardown From 98ed04be107a11d62babbc1bbb91cab3c3b78e14 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 12 Aug 2025 06:31:39 +0000 Subject: [PATCH 546/761] Bump actions/checkout from 4 to 5 (#6174) --- .github/workflows/build.yml | 8 ++++---- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/native-aot.yml | 6 +++--- .github/workflows/rich-code-nav.yml | 2 +- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index f10c34ea60..b38261e467 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -61,7 +61,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@v5 - name: NuGet Cache uses: actions/cache@v4 @@ -140,7 +140,7 @@ jobs: sudo -u postgres psql -c "CREATE USER npgsql_tests_scram SUPERUSER PASSWORD 'npgsql_tests_scram'" # Uncomment the following to SSH into the agent running the build (https://github.com/mxschmitt/action-tmate) - #- uses: actions/checkout@v4 + #- uses: actions/checkout@v5 #- name: Setup tmate session # uses: mxschmitt/action-tmate@v3 @@ -343,7 +343,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@v5 - name: NuGet Cache uses: actions/cache@v4 @@ -383,7 +383,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@v5 - name: Setup .NET Core SDK uses: actions/setup-dotnet@v4.3.1 diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 2f34b67e27..4afb98c43a 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -52,7 +52,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@v5 # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL diff --git a/.github/workflows/native-aot.yml b/.github/workflows/native-aot.yml index b3b2346a35..6f590ed720 100644 --- a/.github/workflows/native-aot.yml +++ b/.github/workflows/native-aot.yml @@ -93,7 +93,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@v5 # - name: Setup nuget config # run: echo "$nuget_config" > NuGet.config @@ -127,7 +127,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@v5 # - name: Setup nuget config # run: echo "$nuget_config" > NuGet.config @@ -154,7 +154,7 @@ jobs: shell: bash # Uncomment the following to SSH into the agent running the build (https://github.com/mxschmitt/action-tmate) - #- uses: actions/checkout@v4 + #- uses: actions/checkout@v5 #- name: Setup tmate session # uses: mxschmitt/action-tmate@v3 diff --git a/.github/workflows/rich-code-nav.yml b/.github/workflows/rich-code-nav.yml index d0649277ca..f1987462a4 100644 --- a/.github/workflows/rich-code-nav.yml +++ b/.github/workflows/rich-code-nav.yml @@ -12,7 +12,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@v5 - name: NuGet Cache uses: actions/cache@v4 From 6f8971ce5a811463c537376e8c70ffd9750cd396 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Wed, 20 Aug 2025 12:45:24 +0300 Subject: [PATCH 547/761] Fix concurrent NpgsqlDataSource.Dispose and Bootstrap (#6116) Fixes #6115 --- src/Npgsql/NpgsqlDataSource.cs | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/Npgsql/NpgsqlDataSource.cs b/src/Npgsql/NpgsqlDataSource.cs index 7d78fca230..d8ff956141 100644 --- a/src/Npgsql/NpgsqlDataSource.cs +++ b/src/Npgsql/NpgsqlDataSource.cs @@ -497,8 +497,10 @@ protected virtual void DisposeBase() } _periodicPasswordProviderTimer?.Dispose(); - _setupMappingsSemaphore.Dispose(); MetricsReporter.Dispose(); + // We do not dispose _setupMappingsSemaphore explicitly, leaving it to finalizer + // Due to possible concurrent access, which might lead to deadlock + // See issue #6115 Clear(); } @@ -525,8 +527,10 @@ protected virtual async ValueTask DisposeAsyncBase() if (_periodicPasswordProviderTimer is not null) await _periodicPasswordProviderTimer.DisposeAsync().ConfigureAwait(false); - _setupMappingsSemaphore.Dispose(); MetricsReporter.Dispose(); + // We do not dispose _setupMappingsSemaphore explicitly, leaving it to finalizer + // Due to possible concurrent access, which might lead to deadlock + // See issue #6115 // TODO: async Clear, #4499 Clear(); From 19f466e3e12106b9e7a81e67d07c4df56467a861 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Wed, 20 Aug 2025 12:55:02 +0300 Subject: [PATCH 548/761] Set socket options before connecting to postgres (#6090) Closes #6013 --- src/Npgsql/Internal/NpgsqlConnector.cs | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index 7f720f5882..44b9504c71 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -1217,6 +1217,9 @@ void Connect(NpgsqlTimeout timeout) try { + // Some options are not applied after the socket is open, see #6013 + SetSocketOptions(socket); + try { socket.Connect(endpoint); @@ -1235,7 +1238,6 @@ void Connect(NpgsqlTimeout timeout) if (write.Count is 0) throw new TimeoutException("Timeout during connection attempt"); socket.Blocking = true; - SetSocketOptions(socket); _socket = socket; ConnectedEndPoint = endpoint; return; @@ -1289,8 +1291,11 @@ Task GetHostAddressesAsync(CancellationToken ct) => var socket = new Socket(endpoint.AddressFamily, SocketType.Stream, protocolType); try { - await OpenSocketConnectionAsync(socket, endpoint, endpointTimeout, cancellationToken).ConfigureAwait(false); + // Some options are not applied after the socket is open, see #6013 SetSocketOptions(socket); + + await OpenSocketConnectionAsync(socket, endpoint, endpointTimeout, cancellationToken).ConfigureAwait(false); + _socket = socket; ConnectedEndPoint = endpoint; return; From 604036c68f820b56c6e4936c0f00ecad984fdc53 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 4 Sep 2025 09:16:28 +0200 Subject: [PATCH 549/761] Bump actions/setup-dotnet from 4.3.1 to 5.0.0 (#6182) --- .github/workflows/build.yml | 6 +++--- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/native-aot.yml | 4 ++-- .github/workflows/rich-code-nav.yml | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index b38261e467..6adbfffbaa 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -72,7 +72,7 @@ jobs: ${{ runner.os }}-nuget- - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v4.3.1 + uses: actions/setup-dotnet@v5.0.0 - name: Build run: dotnet build -c ${{ matrix.config }} @@ -354,7 +354,7 @@ jobs: ${{ runner.os }}-nuget- - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v4.3.1 + uses: actions/setup-dotnet@v5.0.0 - name: Pack run: dotnet pack --configuration Release --property:PackageOutputPath="$PWD/nupkgs" --version-suffix "ci.$(date -u +%Y%m%dT%H%M%S)+sha.${GITHUB_SHA:0:9}" -p:ContinuousIntegrationBuild=true @@ -386,7 +386,7 @@ jobs: uses: actions/checkout@v5 - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v4.3.1 + uses: actions/setup-dotnet@v5.0.0 - name: Pack run: dotnet pack --configuration Release --property:PackageOutputPath="$PWD/nupkgs" -p:ContinuousIntegrationBuild=true diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 4afb98c43a..5a465ad42e 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -65,7 +65,7 @@ jobs: # queries: ./path/to/local/query, your-org/your-repo/queries@main - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v4.3.1 + uses: actions/setup-dotnet@v5.0.0 - name: Build run: dotnet build -c Release diff --git a/.github/workflows/native-aot.yml b/.github/workflows/native-aot.yml index 6f590ed720..ecc57d51a8 100644 --- a/.github/workflows/native-aot.yml +++ b/.github/workflows/native-aot.yml @@ -107,7 +107,7 @@ jobs: ${{ runner.os }}-nuget- - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v4.3.1 + uses: actions/setup-dotnet@v5.0.0 - name: Write script run: echo "$AOT_Compat" > test-aot-compatibility.ps1 @@ -141,7 +141,7 @@ jobs: ${{ runner.os }}-nuget- - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v4.3.1 + uses: actions/setup-dotnet@v5.0.0 - name: Start PostgreSQL run: | diff --git a/.github/workflows/rich-code-nav.yml b/.github/workflows/rich-code-nav.yml index f1987462a4..363eaaeb5d 100644 --- a/.github/workflows/rich-code-nav.yml +++ b/.github/workflows/rich-code-nav.yml @@ -23,7 +23,7 @@ jobs: ${{ runner.os }}-nuget- - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v4.3.1 + uses: actions/setup-dotnet@v5.0.0 - name: Build run: dotnet build --configuration Debug From 472259b05cd22edbb1dbdb040edd92f5cf778147 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 8 Sep 2025 20:16:14 +0200 Subject: [PATCH 550/761] Bump BenchmarkDotNet from 0.13.12 to 0.15.2 (#6191) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 6f250d7c83..7e6ccaf1b8 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -37,7 +37,7 @@ - + From 9af3db4edd4a61733dd477bca00171a06b647ba8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 8 Sep 2025 20:18:49 +0200 Subject: [PATCH 551/761] Bump GitHubActionsTestLogger from 2.3.3 to 2.4.1 (#6192) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 7e6ccaf1b8..1d094ba34b 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -32,7 +32,7 @@ - + From 2108dfb8814634ca6dabe32325970ac9809d41c4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 8 Sep 2025 23:07:38 +0200 Subject: [PATCH 552/761] Bump Microsoft.Data.SqlClient from 5.2.2 to 6.1.1 (#6196) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 1d094ba34b..6115c0a079 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -38,7 +38,7 @@ - + From ffc3fba1b4f611390b2b23c4ca78d86de89bd272 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Emmanuel=20Andr=C3=A9?= <2341261+manandre@users.noreply.github.com> Date: Mon, 8 Sep 2025 23:11:43 +0200 Subject: [PATCH 553/761] Move to PublicApiAnalyzers v4 (#6185) --- Directory.Packages.props | 2 +- src/Npgsql/PublicAPI.Shipped.txt | 16 ++++++++++++++++ 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 6115c0a079..0206250cab 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -18,7 +18,7 @@ - + diff --git a/src/Npgsql/PublicAPI.Shipped.txt b/src/Npgsql/PublicAPI.Shipped.txt index 3ec604ddc0..246a515cc2 100644 --- a/src/Npgsql/PublicAPI.Shipped.txt +++ b/src/Npgsql/PublicAPI.Shipped.txt @@ -1228,6 +1228,8 @@ NpgsqlTypes.NpgsqlBox.Width.get -> double NpgsqlTypes.NpgsqlCidr NpgsqlTypes.NpgsqlCidr.Address.get -> System.Net.IPAddress! NpgsqlTypes.NpgsqlCidr.Deconstruct(out System.Net.IPAddress! address, out byte netmask) -> void +NpgsqlTypes.NpgsqlCidr.Equals(NpgsqlTypes.NpgsqlCidr other) -> bool +NpgsqlTypes.NpgsqlInet.Equals(NpgsqlTypes.NpgsqlInet other) -> bool NpgsqlTypes.NpgsqlCidr.Netmask.get -> byte NpgsqlTypes.NpgsqlCidr.NpgsqlCidr() -> void NpgsqlTypes.NpgsqlCidr.NpgsqlCidr(string! addr) -> void @@ -1793,10 +1795,12 @@ override Npgsql.Schema.NpgsqlDbColumn.this[string! propertyName].get -> object? override NpgsqlTypes.NpgsqlBox.Equals(object? obj) -> bool override NpgsqlTypes.NpgsqlBox.GetHashCode() -> int override NpgsqlTypes.NpgsqlBox.ToString() -> string! +override NpgsqlTypes.NpgsqlCidr.GetHashCode() -> int override NpgsqlTypes.NpgsqlCidr.ToString() -> string! override NpgsqlTypes.NpgsqlCircle.Equals(object? obj) -> bool override NpgsqlTypes.NpgsqlCircle.GetHashCode() -> int override NpgsqlTypes.NpgsqlCircle.ToString() -> string! +override NpgsqlTypes.NpgsqlInet.GetHashCode() -> int override NpgsqlTypes.NpgsqlInet.ToString() -> string! override NpgsqlTypes.NpgsqlInterval.Equals(object? obj) -> bool override NpgsqlTypes.NpgsqlInterval.GetHashCode() -> int @@ -1886,10 +1890,14 @@ static NpgsqlTypes.NpgsqlBox.operator !=(NpgsqlTypes.NpgsqlBox x, NpgsqlTypes.Np static NpgsqlTypes.NpgsqlBox.operator ==(NpgsqlTypes.NpgsqlBox x, NpgsqlTypes.NpgsqlBox y) -> bool static NpgsqlTypes.NpgsqlCidr.explicit operator System.Net.IPAddress!(NpgsqlTypes.NpgsqlCidr cidr) -> System.Net.IPAddress! static NpgsqlTypes.NpgsqlCidr.implicit operator NpgsqlTypes.NpgsqlInet(NpgsqlTypes.NpgsqlCidr cidr) -> NpgsqlTypes.NpgsqlInet +static NpgsqlTypes.NpgsqlCidr.operator !=(NpgsqlTypes.NpgsqlCidr left, NpgsqlTypes.NpgsqlCidr right) -> bool +static NpgsqlTypes.NpgsqlCidr.operator ==(NpgsqlTypes.NpgsqlCidr left, NpgsqlTypes.NpgsqlCidr right) -> bool static NpgsqlTypes.NpgsqlCircle.operator !=(NpgsqlTypes.NpgsqlCircle x, NpgsqlTypes.NpgsqlCircle y) -> bool static NpgsqlTypes.NpgsqlCircle.operator ==(NpgsqlTypes.NpgsqlCircle x, NpgsqlTypes.NpgsqlCircle y) -> bool static NpgsqlTypes.NpgsqlInet.explicit operator System.Net.IPAddress!(NpgsqlTypes.NpgsqlInet inet) -> System.Net.IPAddress! static NpgsqlTypes.NpgsqlInet.implicit operator NpgsqlTypes.NpgsqlInet(System.Net.IPAddress! ip) -> NpgsqlTypes.NpgsqlInet +static NpgsqlTypes.NpgsqlInet.operator !=(NpgsqlTypes.NpgsqlInet left, NpgsqlTypes.NpgsqlInet right) -> bool +static NpgsqlTypes.NpgsqlInet.operator ==(NpgsqlTypes.NpgsqlInet left, NpgsqlTypes.NpgsqlInet right) -> bool static NpgsqlTypes.NpgsqlLine.operator !=(NpgsqlTypes.NpgsqlLine x, NpgsqlTypes.NpgsqlLine y) -> bool static NpgsqlTypes.NpgsqlLine.operator ==(NpgsqlTypes.NpgsqlLine x, NpgsqlTypes.NpgsqlLine y) -> bool static NpgsqlTypes.NpgsqlLogSequenceNumber.explicit operator NpgsqlTypes.NpgsqlLogSequenceNumber(ulong value) -> NpgsqlTypes.NpgsqlLogSequenceNumber @@ -1934,5 +1942,13 @@ static NpgsqlTypes.NpgsqlTsVector.Parse(string! value) -> NpgsqlTypes.NpgsqlTsVe static readonly Npgsql.NpgsqlFactory.Instance -> Npgsql.NpgsqlFactory! static readonly NpgsqlTypes.NpgsqlLogSequenceNumber.Invalid -> NpgsqlTypes.NpgsqlLogSequenceNumber static readonly NpgsqlTypes.NpgsqlRange.Empty -> NpgsqlTypes.NpgsqlRange +virtual Npgsql.NoticeEventHandler.Invoke(object! sender, Npgsql.NpgsqlNoticeEventArgs! e) -> void +virtual Npgsql.NotificationEventHandler.Invoke(object! sender, Npgsql.NpgsqlNotificationEventArgs! e) -> void virtual Npgsql.NpgsqlCommand.Clone() -> Npgsql.NpgsqlCommand! +virtual Npgsql.NpgsqlRowUpdatedEventHandler.Invoke(object! sender, Npgsql.NpgsqlRowUpdatedEventArgs! e) -> void +virtual Npgsql.NpgsqlRowUpdatingEventHandler.Invoke(object! sender, Npgsql.NpgsqlRowUpdatingEventArgs! e) -> void +virtual Npgsql.ProvideClientCertificatesCallback.Invoke(System.Security.Cryptography.X509Certificates.X509CertificateCollection! certificates) -> void +virtual Npgsql.ProvidePasswordCallback.Invoke(string! host, int port, string! database, string! username) -> string! virtual Npgsql.Replication.PgOutput.ReplicationTuple.GetAsyncEnumerator(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Collections.Generic.IAsyncEnumerator! +~override NpgsqlTypes.NpgsqlCidr.Equals(object obj) -> bool +~override NpgsqlTypes.NpgsqlInet.Equals(object obj) -> bool From 53b6028d0e782d97ae56dd1db37d0d3333e873f7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Emmanuel=20Andr=C3=A9?= <2341261+manandre@users.noreply.github.com> Date: Mon, 8 Sep 2025 23:31:44 +0200 Subject: [PATCH 554/761] Move to NUnit v4 (#6183) --- .github/workflows/build.yml | 2 +- Directory.Packages.props | 5 +- .../Npgsql.DependencyInjection.Tests.csproj | 4 + .../Npgsql.NativeAotTests.csproj | 6 + test/Npgsql.PluginTests/GeoJSONTests.cs | 4 +- .../Npgsql.PluginTests.csproj | 4 + test/Npgsql.Tests/AuthenticationTests.cs | 14 +- test/Npgsql.Tests/BatchTests.cs | 8 +- test/Npgsql.Tests/BugTests.cs | 6 +- test/Npgsql.Tests/CommandBuilderTests.cs | 2 +- test/Npgsql.Tests/CommandParameterTests.cs | 22 +- test/Npgsql.Tests/CommandTests.cs | 42 +- test/Npgsql.Tests/ConnectionTests.cs | 34 +- test/Npgsql.Tests/CopyTests.cs | 6 +- test/Npgsql.Tests/DataAdapterTests.cs | 118 +++--- test/Npgsql.Tests/DataSourceTests.cs | 10 +- test/Npgsql.Tests/DataTypeNameTests.cs | 2 +- .../DistributedTransactionTests.cs | 48 +-- test/Npgsql.Tests/ExceptionTests.cs | 16 +- test/Npgsql.Tests/FunctionTests.cs | 22 +- test/Npgsql.Tests/LargeObjectTests.cs | 10 +- test/Npgsql.Tests/MultipleHostsTests.cs | 30 +- test/Npgsql.Tests/NestedDataReaderTests.cs | 8 +- test/Npgsql.Tests/NotificationTests.cs | 12 +- test/Npgsql.Tests/Npgsql.Tests.csproj | 4 + .../NpgsqlParameterCollectionTests.cs | 46 +-- test/Npgsql.Tests/NpgsqlParameterTests.cs | 362 +++++++++--------- test/Npgsql.Tests/PgPassEntryTests.cs | 20 +- test/Npgsql.Tests/PoolTests.cs | 4 +- test/Npgsql.Tests/PrepareTests.cs | 8 +- test/Npgsql.Tests/Properties/AssemblyInfo.cs | 2 +- test/Npgsql.Tests/ReaderNewSchemaTests.cs | 6 +- test/Npgsql.Tests/ReaderOldSchemaTests.cs | 16 +- test/Npgsql.Tests/ReaderTests.cs | 52 +-- .../Replication/CommonReplicationTests.cs | 2 +- .../Replication/PgOutputReplicationTests.cs | 18 +- .../Replication/PhysicalReplicationTests.cs | 4 +- .../TestDecodingReplicationTests.cs | 2 +- test/Npgsql.Tests/SchemaTests.cs | 8 +- test/Npgsql.Tests/SecurityTests.cs | 18 +- .../SnakeCaseNameTranslatorTests.cs | 8 +- test/Npgsql.Tests/StoredProcedureTests.cs | 20 +- .../TaskTimeoutAndCancellationTest.cs | 6 +- test/Npgsql.Tests/TestUtil.cs | 2 +- test/Npgsql.Tests/TracingTests.cs | 16 +- test/Npgsql.Tests/Types/ArrayTests.cs | 6 +- test/Npgsql.Tests/Types/ByteaTests.cs | 6 +- .../Types/CompositeHandlerTests.Read.cs | 18 +- .../Types/CompositeHandlerTests.Write.cs | 16 +- test/Npgsql.Tests/Types/CompositeTests.cs | 4 +- test/Npgsql.Tests/Types/DateTimeTests.cs | 16 +- test/Npgsql.Tests/Types/EnumTests.cs | 8 +- .../Npgsql.Tests/Types/FullTextSearchTests.cs | 16 +- test/Npgsql.Tests/Types/InternalTypeTests.cs | 22 +- test/Npgsql.Tests/Types/JsonDynamicTests.cs | 10 +- test/Npgsql.Tests/Types/JsonPathTests.cs | 2 +- test/Npgsql.Tests/Types/MultirangeTests.cs | 6 +- test/Npgsql.Tests/Types/RangeTests.cs | 76 ++-- test/Npgsql.Tests/Types/RecordTests.cs | 12 +- test/Npgsql.Tests/Types/TextTests.cs | 8 +- test/Npgsql.Tests/TypesTests.cs | 56 +-- 61 files changed, 683 insertions(+), 658 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 6adbfffbaa..f37d7e432f 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -310,7 +310,7 @@ jobs: # TODO: Once test/Npgsql.Specification.Tests work, switch to just testing on the solution - name: Test run: | - dotnet test -c ${{ matrix.config }} -f ${{ matrix.test_tfm }} test/Npgsql.Tests --logger "GitHubActions;report-warnings=false" + dotnet test -c ${{ matrix.config }} -f ${{ matrix.test_tfm }} test/Npgsql.Tests --logger "GitHubActions;report-warnings=false" --blame-hang-timeout 30s dotnet test -c ${{ matrix.config }} -f ${{ matrix.test_tfm }} test/Npgsql.DependencyInjection.Tests --logger "GitHubActions;report-warnings=false" shell: bash diff --git a/Directory.Packages.props b/Directory.Packages.props index 0206250cab..d4671ecb06 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -24,12 +24,13 @@ - + + - + diff --git a/test/Npgsql.DependencyInjection.Tests/Npgsql.DependencyInjection.Tests.csproj b/test/Npgsql.DependencyInjection.Tests/Npgsql.DependencyInjection.Tests.csproj index 5f0006d79c..2f1f442547 100644 --- a/test/Npgsql.DependencyInjection.Tests/Npgsql.DependencyInjection.Tests.csproj +++ b/test/Npgsql.DependencyInjection.Tests/Npgsql.DependencyInjection.Tests.csproj @@ -4,6 +4,10 @@ + + all + runtime; build; native; contentfiles; analyzers; buildtransitive + diff --git a/test/Npgsql.NativeAotTests/Npgsql.NativeAotTests.csproj b/test/Npgsql.NativeAotTests/Npgsql.NativeAotTests.csproj index 0757fb0dd6..7f9ce607ca 100644 --- a/test/Npgsql.NativeAotTests/Npgsql.NativeAotTests.csproj +++ b/test/Npgsql.NativeAotTests/Npgsql.NativeAotTests.csproj @@ -18,4 +18,10 @@ + + + all + runtime; build; native; contentfiles; analyzers; buildtransitive + + diff --git a/test/Npgsql.PluginTests/GeoJSONTests.cs b/test/Npgsql.PluginTests/GeoJSONTests.cs index 0a421eee01..287c1277bc 100644 --- a/test/Npgsql.PluginTests/GeoJSONTests.cs +++ b/test/Npgsql.PluginTests/GeoJSONTests.cs @@ -304,7 +304,7 @@ public async Task Import_geometry(TestData data) await using var cmd = conn.CreateCommand(); cmd.CommandText = $"SELECT field FROM {table}"; await using var reader = await cmd.ExecuteReaderAsync(); - Assert.IsTrue(await reader.ReadAsync()); + Assert.That(await reader.ReadAsync()); var actual = reader.GetValue(0); Assert.That(actual, Is.EqualTo(data.Geometry)); } @@ -341,7 +341,7 @@ public async Task Import_big_geometry() await using var cmd = conn.CreateCommand(); cmd.CommandText = $"SELECT field FROM {table}"; await using var reader = await cmd.ExecuteReaderAsync(); - Assert.IsTrue(await reader.ReadAsync()); + Assert.That(await reader.ReadAsync()); var actual = reader.GetValue(0); Assert.That(actual, Is.EqualTo(geometry)); } diff --git a/test/Npgsql.PluginTests/Npgsql.PluginTests.csproj b/test/Npgsql.PluginTests/Npgsql.PluginTests.csproj index 30dfb8ea16..499373bc63 100644 --- a/test/Npgsql.PluginTests/Npgsql.PluginTests.csproj +++ b/test/Npgsql.PluginTests/Npgsql.PluginTests.csproj @@ -5,6 +5,10 @@ + + all + runtime; build; native; contentfiles; analyzers; buildtransitive + diff --git a/test/Npgsql.Tests/AuthenticationTests.cs b/test/Npgsql.Tests/AuthenticationTests.cs index 90bdb79f00..157b1ee287 100644 --- a/test/Npgsql.Tests/AuthenticationTests.cs +++ b/test/Npgsql.Tests/AuthenticationTests.cs @@ -66,7 +66,7 @@ public async Task Password_provider([Values]bool async) using var dataSource = dataSourceBuilder.Build(); using var conn = async ? await dataSource.OpenConnectionAsync() : dataSource.OpenConnection(); - Assert.True(async ? asyncProviderCalled : syncProviderCalled, "Password_provider not used"); + Assert.That(async ? asyncProviderCalled : syncProviderCalled, "Password_provider not used"); } [Test] @@ -418,7 +418,7 @@ public async Task ProvidePasswordCallback_is_used() using (var conn = new NpgsqlConnection(builder.ConnectionString) { ProvidePasswordCallback = ProvidePasswordCallback }) { conn.Open(); - Assert.True(getPasswordDelegateWasCalled, "ProvidePasswordCallback delegate not used"); + Assert.That(getPasswordDelegateWasCalled, "ProvidePasswordCallback delegate not used"); // Do this again, since with multiplexing the very first connection attempt is done via // the non-multiplexing path, to surface any exceptions. @@ -427,7 +427,7 @@ public async Task ProvidePasswordCallback_is_used() getPasswordDelegateWasCalled = false; conn.Open(); Assert.That(await conn.ExecuteScalarAsync("SELECT 1"), Is.EqualTo(1)); - Assert.True(getPasswordDelegateWasCalled, "ProvidePasswordCallback delegate not used"); + Assert.That(getPasswordDelegateWasCalled, "ProvidePasswordCallback delegate not used"); } string ProvidePasswordCallback(string host, int port, string database, string username) @@ -500,10 +500,10 @@ public void ProvidePasswordCallback_gets_correct_arguments() using (var conn = new NpgsqlConnection(builder.ConnectionString) { ProvidePasswordCallback = ProvidePasswordCallback }) { conn.Open(); - Assert.AreEqual(builder.Host, receivedHost); - Assert.AreEqual(builder.Port, receivedPort); - Assert.AreEqual(builder.Database, receivedDatabase); - Assert.AreEqual(builder.Username, receivedUsername); + Assert.That(receivedHost, Is.EqualTo(builder.Host)); + Assert.That(receivedPort, Is.EqualTo(builder.Port)); + Assert.That(receivedDatabase, Is.EqualTo(builder.Database)); + Assert.That(receivedUsername, Is.EqualTo(builder.Username)); } string ProvidePasswordCallback(string host, int port, string database, string username) diff --git a/test/Npgsql.Tests/BatchTests.cs b/test/Npgsql.Tests/BatchTests.cs index 4450635edb..960e6028f9 100644 --- a/test/Npgsql.Tests/BatchTests.cs +++ b/test/Npgsql.Tests/BatchTests.cs @@ -294,10 +294,10 @@ public async Task StatementOID() } [Test] - public void CanCreateParameter() => Assert.True(new NpgsqlBatchCommand().CanCreateParameter); + public void CanCreateParameter() => Assert.That(new NpgsqlBatchCommand().CanCreateParameter); [Test] - public void CreateParameter() => Assert.NotNull(new NpgsqlBatchCommand().CreateParameter()); + public void CreateParameter() => Assert.That(new NpgsqlBatchCommand().CreateParameter(), Is.Not.Null); #endregion NpgsqlBatchCommand @@ -702,7 +702,7 @@ await conn.ExecuteNonQueryAsync($@" // resources are referenced by the exception above, which is very likely to escape the using statement of the command. batch.Dispose(); var cmd2 = conn.CreateBatch(); - Assert.AreNotSame(cmd2, batch); + Assert.That(batch, Is.Not.SameAs(cmd2)); } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/967")] @@ -731,7 +731,7 @@ await conn.ExecuteNonQueryAsync($@" // resources are referenced by the exception above, which is very likely to escape the using statement of the command. batch.Dispose(); var cmd2 = conn.CreateBatch(); - Assert.AreNotSame(cmd2, batch); + Assert.That(batch, Is.Not.SameAs(cmd2)); } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/4202")] diff --git a/test/Npgsql.Tests/BugTests.cs b/test/Npgsql.Tests/BugTests.cs index 2e3dfa97fc..8d46522c0f 100644 --- a/test/Npgsql.Tests/BugTests.cs +++ b/test/Npgsql.Tests/BugTests.cs @@ -1261,12 +1261,12 @@ public async Task Bug3649() using (var exporter = await conn.BeginBinaryExportAsync($"COPY {table} (value) TO STDIN (FORMAT binary)")) { await exporter.StartRowAsync(); - Assert.IsTrue(exporter.IsNull); + Assert.That(exporter.IsNull); await exporter.SkipAsync(); await exporter.StartRowAsync(); - Assert.AreEqual(1, await exporter.ReadAsync()); + Assert.That(await exporter.ReadAsync(), Is.EqualTo(1)); await exporter.StartRowAsync(); - Assert.AreEqual(2, await exporter.ReadAsync()); + Assert.That(await exporter.ReadAsync(), Is.EqualTo(2)); } } diff --git a/test/Npgsql.Tests/CommandBuilderTests.cs b/test/Npgsql.Tests/CommandBuilderTests.cs index a9fe980c5b..b47422e830 100644 --- a/test/Npgsql.Tests/CommandBuilderTests.cs +++ b/test/Npgsql.Tests/CommandBuilderTests.cs @@ -364,7 +364,7 @@ public async Task Get_update_command_with_column_aliases() using var cbCommandBuilder = new NpgsqlCommandBuilder(daDataAdapter); daDataAdapter.UpdateCommand = cbCommandBuilder.GetUpdateCommand(); - Assert.True(daDataAdapter.UpdateCommand.CommandText.Contains("SET \"cod\" = @p1, \"descr\" = @p2, \"data\" = @p3 WHERE ((\"cod\" = @p4) AND ((@p5 = 1 AND \"descr\" IS NULL) OR (\"descr\" = @p6)) AND ((@p7 = 1 AND \"data\" IS NULL) OR (\"data\" = @p8)))")); + Assert.That(daDataAdapter.UpdateCommand.CommandText.Contains("SET \"cod\" = @p1, \"descr\" = @p2, \"data\" = @p3 WHERE ((\"cod\" = @p4) AND ((@p5 = 1 AND \"descr\" IS NULL) OR (\"descr\" = @p6)) AND ((@p7 = 1 AND \"data\" IS NULL) OR (\"data\" = @p8)))")); } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/2846")] diff --git a/test/Npgsql.Tests/CommandParameterTests.cs b/test/Npgsql.Tests/CommandParameterTests.cs index 6fc042bff9..3e758f2413 100644 --- a/test/Npgsql.Tests/CommandParameterTests.cs +++ b/test/Npgsql.Tests/CommandParameterTests.cs @@ -22,8 +22,8 @@ public async Task Input_and_output_parameters(CommandBehavior behavior) cmd.Parameters.Add(c); using (await cmd.ExecuteReaderAsync(behavior)) { - Assert.AreEqual(5, b.Value); - Assert.AreEqual(3, c.Value); + Assert.That(b.Value, Is.EqualTo(5)); + Assert.That(c.Value, Is.EqualTo(3)); } } @@ -128,20 +128,20 @@ public void Parameters_get_name() command.Parameters.Add(new NpgsqlParameter("Parameter4", DbType.DateTime)); var idbPrmtr = command.Parameters["Parameter1"]; - Assert.IsNotNull(idbPrmtr); + Assert.That(idbPrmtr, Is.Not.Null); command.Parameters[0].Value = 1; // Get by indexers. - Assert.AreEqual(":Parameter1", command.Parameters["Parameter1"].ParameterName); - Assert.AreEqual(":Parameter2", command.Parameters["Parameter2"].ParameterName); - Assert.AreEqual(":Parameter3", command.Parameters["Parameter3"].ParameterName); - Assert.AreEqual("Parameter4", command.Parameters["Parameter4"].ParameterName); //Should this work? + Assert.That(command.Parameters["Parameter1"].ParameterName, Is.EqualTo(":Parameter1")); + Assert.That(command.Parameters["Parameter2"].ParameterName, Is.EqualTo(":Parameter2")); + Assert.That(command.Parameters["Parameter3"].ParameterName, Is.EqualTo(":Parameter3")); + Assert.That(command.Parameters["Parameter4"].ParameterName, Is.EqualTo("Parameter4")); //Should this work? - Assert.AreEqual(":Parameter1", command.Parameters[0].ParameterName); - Assert.AreEqual(":Parameter2", command.Parameters[1].ParameterName); - Assert.AreEqual(":Parameter3", command.Parameters[2].ParameterName); - Assert.AreEqual("Parameter4", command.Parameters[3].ParameterName); + Assert.That(command.Parameters[0].ParameterName, Is.EqualTo(":Parameter1")); + Assert.That(command.Parameters[1].ParameterName, Is.EqualTo(":Parameter2")); + Assert.That(command.Parameters[2].ParameterName, Is.EqualTo(":Parameter3")); + Assert.That(command.Parameters[3].ParameterName, Is.EqualTo("Parameter4")); } [Test] diff --git a/test/Npgsql.Tests/CommandTests.cs b/test/Npgsql.Tests/CommandTests.cs index 8cc36df10a..a5fb272851 100644 --- a/test/Npgsql.Tests/CommandTests.cs +++ b/test/Npgsql.Tests/CommandTests.cs @@ -522,7 +522,7 @@ public async Task Cursor_statement() while (dr.Read()) i++; - Assert.AreEqual(3, i); + Assert.That(i, Is.EqualTo(3)); dr.Close(); i = 0; @@ -530,7 +530,7 @@ public async Task Cursor_statement() var dr2 = command.ExecuteReader(); while (dr2.Read()) i++; - Assert.AreEqual(1, i); + Assert.That(i, Is.EqualTo(1)); dr2.Close(); command.CommandText = "close te;"; @@ -546,7 +546,7 @@ public async Task Cursor_move_RecordsAffected() command.ExecuteNonQuery(); command.CommandText = "MOVE FORWARD ALL IN curs"; var count = command.ExecuteNonQuery(); - Assert.AreEqual(3, count); + Assert.That(count, Is.EqualTo(3)); } #endregion @@ -707,7 +707,7 @@ public async Task Parameter_and_operator_unclear() command.Parameters.AddWithValue(":arr", new int[] {5, 4, 3, 2, 1}); await using var rdr = await command.ExecuteReaderAsync(); rdr.Read(); - Assert.AreEqual(rdr.GetInt32(0), 4); + Assert.That(rdr.GetInt32(0), Is.EqualTo(4)); } [Test] @@ -760,15 +760,15 @@ public async Task Statement_mapped_output_parameters(CommandBehavior behavior) await using var reader = await command.ExecuteReaderAsync(behavior); - Assert.AreEqual(4, command.Parameters["param1"].Value); - Assert.AreEqual(5, command.Parameters["param2"].Value); + Assert.That(command.Parameters["param1"].Value, Is.EqualTo(4)); + Assert.That(command.Parameters["param2"].Value, Is.EqualTo(5)); reader.Read(); - Assert.AreEqual(3, reader.GetInt32(0)); - Assert.AreEqual(4, reader.GetInt32(1)); - Assert.AreEqual(5, reader.GetInt32(2)); - Assert.AreEqual(6, reader.GetInt32(3)); + Assert.That(reader.GetInt32(0), Is.EqualTo(3)); + Assert.That(reader.GetInt32(1), Is.EqualTo(4)); + Assert.That(reader.GetInt32(2), Is.EqualTo(5)); + Assert.That(reader.GetInt32(3), Is.EqualTo(6)); } [Test] @@ -799,8 +799,8 @@ public async Task Bug1006158_output_parameters() _ = await command.ExecuteScalarAsync(); - Assert.AreEqual(3, command.Parameters[0].Value); - Assert.AreEqual(true, command.Parameters[1].Value); + Assert.That(command.Parameters[0].Value, Is.EqualTo(3)); + Assert.That(command.Parameters[1].Value, Is.EqualTo(true)); } [Test] @@ -813,16 +813,16 @@ public async Task Bug1010788_UpdateRowSource() var table = await CreateTempTable(conn, "id SERIAL PRIMARY KEY, name TEXT"); var command = new NpgsqlCommand($"SELECT * FROM {table}", conn); - Assert.AreEqual(UpdateRowSource.Both, command.UpdatedRowSource); + Assert.That(command.UpdatedRowSource, Is.EqualTo(UpdateRowSource.Both)); var cmdBuilder = new NpgsqlCommandBuilder(); var da = new NpgsqlDataAdapter(command); cmdBuilder.DataAdapter = da; - Assert.IsNotNull(da.SelectCommand); - Assert.IsNotNull(cmdBuilder.DataAdapter); + Assert.That(da.SelectCommand, Is.Not.Null); + Assert.That(cmdBuilder.DataAdapter, Is.Not.Null); var updateCommand = cmdBuilder.GetUpdateCommand(); - Assert.AreEqual(UpdateRowSource.None, updateCommand.UpdatedRowSource); + Assert.That(updateCommand.UpdatedRowSource, Is.EqualTo(UpdateRowSource.None)); } [Test] @@ -1146,10 +1146,10 @@ public async Task ExecuteReader_Throws_PostgresException([Values] bool async) ? await cmd.ExecuteReaderAsync() : cmd.ExecuteReader(); - Assert.IsTrue(async ? await reader.ReadAsync() : reader.Read()); + Assert.That(async ? await reader.ReadAsync() : reader.Read()); var value = reader.GetInt32(0); Assert.That(value, Is.EqualTo(1)); - Assert.IsFalse(async ? await reader.ReadAsync() : reader.Read()); + Assert.That(async ? await reader.ReadAsync() : reader.Read(), Is.False); var ex = async ? Assert.ThrowsAsync(async () => await reader.NextResultAsync()) : Assert.Throws(() => reader.NextResult()); @@ -1503,8 +1503,8 @@ public async Task Not_cancel_prepended_query([Values] bool failPrependedQuery) var cancellationRequestTask = postmasterMock.WaitForCancellationRequest().AsTask(); // Give 1 second to make sure we didn't send cancellation request await Task.Delay(1000); - Assert.IsFalse(cancelTask.IsCompleted); - Assert.IsFalse(cancellationRequestTask.IsCompleted); + Assert.That(cancelTask.IsCompleted, Is.False); + Assert.That(cancellationRequestTask.IsCompleted, Is.False); if (failPrependedQuery) { @@ -1622,7 +1622,7 @@ await server await connection.CloseAsync(); await connection.OpenAsync(); - Assert.AreSame(connector, connection.Connector); + Assert.That(connection.Connector, Is.SameAs(connector)); // We'll get new value after the next query reads ParameterStatus from the buffer Assert.That(connection.PostgresParameters, Does.Not.ContainKey("SomeKey").WithValue("SomeValue")); diff --git a/test/Npgsql.Tests/ConnectionTests.cs b/test/Npgsql.Tests/ConnectionTests.cs index ac97daf101..cda220a110 100644 --- a/test/Npgsql.Tests/ConnectionTests.cs +++ b/test/Npgsql.Tests/ConnectionTests.cs @@ -111,7 +111,7 @@ public async Task Broken_lifecycle([Values] bool openFromClose) Assert.That(conn.State, Is.EqualTo(ConnectionState.Closed)); Assert.That(eventClosed, Is.True); Assert.That(conn.Connector is null); - Assert.AreEqual(0, conn.NpgsqlDataSource.Statistics.Total); + Assert.That(conn.NpgsqlDataSource.Statistics.Total, Is.EqualTo(0)); if (openFromClose) { @@ -123,8 +123,8 @@ public async Task Broken_lifecycle([Values] bool openFromClose) } Assert.DoesNotThrowAsync(conn.OpenAsync); - Assert.AreEqual(1, await conn.ExecuteScalarAsync("SELECT 1")); - Assert.AreEqual(1, conn.NpgsqlDataSource.Statistics.Total); + Assert.That(await conn.ExecuteScalarAsync("SELECT 1"), Is.EqualTo(1)); + Assert.That(conn.NpgsqlDataSource.Statistics.Total, Is.EqualTo(1)); Assert.DoesNotThrowAsync(conn.CloseAsync); } @@ -743,23 +743,23 @@ public async Task Set_Schemas_And_Load_Relevant_Types(string testSchema, string using var conn = await dataSource.OpenConnectionAsync(); if (enabled) { - Assert.True(dataSource.DatabaseInfo.CompositeTypes.Any(x => x.Name == "test_type_1")); + Assert.That(dataSource.DatabaseInfo.CompositeTypes.Any(x => x.Name == "test_type_1")); if (testSchema == "public" || otherSchema == "public") { - Assert.True(dataSource.DatabaseInfo.CompositeTypes.Any(x => x.Name == "test_type_2")); - Assert.True(dataSource.DatabaseInfo.CompositeTypes.Any(x => x.Name == "test_type_3")); + Assert.That(dataSource.DatabaseInfo.CompositeTypes.Any(x => x.Name == "test_type_2")); + Assert.That(dataSource.DatabaseInfo.CompositeTypes.Any(x => x.Name == "test_type_3")); } else { - Assert.True(dataSource.DatabaseInfo.CompositeTypes.Any(x => x.Name == "test_type_2")); - Assert.False(dataSource.DatabaseInfo.CompositeTypes.Any(x => x.Name == "test_type_3")); + Assert.That(dataSource.DatabaseInfo.CompositeTypes.Any(x => x.Name == "test_type_2")); + Assert.That(dataSource.DatabaseInfo.CompositeTypes.Any(x => x.Name == "test_type_3"), Is.False); } } else { - Assert.True(dataSource.DatabaseInfo.CompositeTypes.Any(x => x.Name == "test_type_1")); - Assert.True(dataSource.DatabaseInfo.CompositeTypes.Any(x => x.Name == "test_type_2")); - Assert.True(dataSource.DatabaseInfo.CompositeTypes.Any(x => x.Name == "test_type_3")); + Assert.That(dataSource.DatabaseInfo.CompositeTypes.Any(x => x.Name == "test_type_1")); + Assert.That(dataSource.DatabaseInfo.CompositeTypes.Any(x => x.Name == "test_type_2")); + Assert.That(dataSource.DatabaseInfo.CompositeTypes.Any(x => x.Name == "test_type_3")); } } finally @@ -945,7 +945,7 @@ public void Bug1011001() var cs1 = csb1.ToString(); var csb2 = new NpgsqlConnectionStringBuilder(cs1); var cs2 = csb2.ToString(); - Assert.IsTrue(cs1 == cs2); + Assert.That(cs1 == cs2); } [Test, IssueLink("https://github.com/npgsql/npgsql/pull/164")] @@ -953,7 +953,7 @@ public void Connection_State_is_Closed_when_disposed() { var c = new NpgsqlConnection(); c.Dispose(); - Assert.AreEqual(ConnectionState.Closed, c.State); + Assert.That(c.State, Is.EqualTo(ConnectionState.Closed)); } [Test] @@ -1131,9 +1131,9 @@ public async Task CloneWith_and_data_source_with_auth_callbacks([Values] bool as var sslClientAuthenticationOptions = new SslClientAuthenticationOptions(); clonedConnection.SslClientAuthenticationOptionsCallback!(sslClientAuthenticationOptions); - Assert.True(clientCertificatesCallbackCalled); + Assert.That(clientCertificatesCallbackCalled); sslClientAuthenticationOptions.RemoteCertificateValidationCallback!(null!, null, null, SslPolicyErrors.None); - Assert.True(userCertificateValidationCallbackCalled); + Assert.That(userCertificateValidationCallbackCalled); bool UserCertificateValidationCallback(object sender, X509Certificate? certificate, X509Chain? chain, SslPolicyErrors errors) => userCertificateValidationCallbackCalled = true; @@ -1359,7 +1359,7 @@ await adminConn.ExecuteNonQueryAsync( await using var cmd = conn.CreateCommand(); cmd.CommandText = "SELECT * FROM foo"; await using var reader = await cmd.ExecuteReaderAsync(); - Assert.IsTrue(await reader.ReadAsync()); + Assert.That(await reader.ReadAsync()); using (var textReader = await reader.GetTextReaderAsync(0)) Assert.That(textReader.ReadToEnd(), Is.EqualTo(value)); @@ -1555,7 +1555,7 @@ public async Task Sync_open_blocked_same_thread() foreach (var sameThreadTask in sameThreadTasks) { - Assert.IsTrue(await sameThreadTask, "Synchronous open completed on different thread"); + Assert.That(await sameThreadTask, "Synchronous open completed on different thread"); } } diff --git a/test/Npgsql.Tests/CopyTests.cs b/test/Npgsql.Tests/CopyTests.cs index 73a2591195..4cddb400eb 100644 --- a/test/Npgsql.Tests/CopyTests.cs +++ b/test/Npgsql.Tests/CopyTests.cs @@ -362,9 +362,9 @@ public async Task Import_numeric() await using var cmd = conn.CreateCommand(); cmd.CommandText = $"SELECT field FROM {table}"; await using var reader = await cmd.ExecuteReaderAsync(); - Assert.IsTrue(await reader.ReadAsync()); + Assert.That(await reader.ReadAsync()); Assert.That(reader.GetValue(0), Is.EqualTo(1234m)); - Assert.IsTrue(await reader.ReadAsync()); + Assert.That(await reader.ReadAsync()); Assert.That(reader.GetValue(0), Is.EqualTo(5678m)); } @@ -753,7 +753,7 @@ public async Task Export_long_string() { var str = reader.Read(); Assert.That(str.Length, Is.EqualTo(len)); - Assert.True(str.AsSpan().IndexOfAnyExcept('x') is -1); + Assert.That(str.AsSpan().IndexOfAnyExcept('x') is -1); } } Assert.That(row, Is.EqualTo(100)); diff --git a/test/Npgsql.Tests/DataAdapterTests.cs b/test/Npgsql.Tests/DataAdapterTests.cs index 016e01b6b3..3c91521ae1 100644 --- a/test/Npgsql.Tests/DataAdapterTests.cs +++ b/test/Npgsql.Tests/DataAdapterTests.cs @@ -92,8 +92,8 @@ public async Task Insert_with_DataSet() var dr2 = new NpgsqlCommand($"SELECT field_int2, field_numeric, field_timestamp FROM {table}", conn).ExecuteReader(); dr2.Read(); - Assert.AreEqual(4, dr2[0]); - Assert.AreEqual(7.3000000M, dr2[1]); + Assert.That(dr2[0], Is.EqualTo(4)); + Assert.That(dr2[1], Is.EqualTo(7.3000000M)); dr2.Close(); } @@ -137,7 +137,7 @@ public async Task DataAdapter_update_return_value() var ds2 = ds.GetChanges()!; var daupdate = da.Update(ds2); - Assert.AreEqual(2, daupdate); + Assert.That(daupdate, Is.EqualTo(2)); } [Test] @@ -166,7 +166,7 @@ public async Task DataAdapter_update_return_value2() //## update should fail, and make a DBConcurrencyException var count = da.Update(ds); //## count is 1, even if the isn't updated in the database - Assert.AreEqual(0, count); + Assert.That(count, Is.EqualTo(0)); } [Test] @@ -180,12 +180,12 @@ public async Task Fill_with_empty_resultset() da.Fill(ds); - Assert.AreEqual(1, ds.Tables.Count); - Assert.AreEqual(4, ds.Tables[0].Columns.Count); - Assert.AreEqual("field_serial", ds.Tables[0].Columns[0].ColumnName); - Assert.AreEqual("field_int2", ds.Tables[0].Columns[1].ColumnName); - Assert.AreEqual("field_timestamp", ds.Tables[0].Columns[2].ColumnName); - Assert.AreEqual("field_numeric", ds.Tables[0].Columns[3].ColumnName); + Assert.That(ds.Tables.Count, Is.EqualTo(1)); + Assert.That(ds.Tables[0].Columns.Count, Is.EqualTo(4)); + Assert.That(ds.Tables[0].Columns[0].ColumnName, Is.EqualTo("field_serial")); + Assert.That(ds.Tables[0].Columns[1].ColumnName, Is.EqualTo("field_int2")); + Assert.That(ds.Tables[0].Columns[2].ColumnName, Is.EqualTo("field_timestamp")); + Assert.That(ds.Tables[0].Columns[3].ColumnName, Is.EqualTo("field_numeric")); } [Test] @@ -206,33 +206,33 @@ public async Task Fill_add_with_key() var field_timestamp = ds.Tables[0].Columns[2]; var field_numeric = ds.Tables[0].Columns[3]; - Assert.IsFalse(field_serial.AllowDBNull); - Assert.IsTrue(field_serial.AutoIncrement); - Assert.AreEqual("field_serial", field_serial.ColumnName); - Assert.AreEqual(typeof(int), field_serial.DataType); - Assert.AreEqual(0, field_serial.Ordinal); - Assert.IsTrue(field_serial.Unique); - - Assert.IsTrue(field_int2.AllowDBNull); - Assert.IsFalse(field_int2.AutoIncrement); - Assert.AreEqual("field_int2", field_int2.ColumnName); - Assert.AreEqual(typeof(short), field_int2.DataType); - Assert.AreEqual(1, field_int2.Ordinal); - Assert.IsFalse(field_int2.Unique); - - Assert.IsTrue(field_timestamp.AllowDBNull); - Assert.IsFalse(field_timestamp.AutoIncrement); - Assert.AreEqual("field_timestamp", field_timestamp.ColumnName); - Assert.AreEqual(typeof(DateTime), field_timestamp.DataType); - Assert.AreEqual(2, field_timestamp.Ordinal); - Assert.IsFalse(field_timestamp.Unique); - - Assert.IsTrue(field_numeric.AllowDBNull); - Assert.IsFalse(field_numeric.AutoIncrement); - Assert.AreEqual("field_numeric", field_numeric.ColumnName); - Assert.AreEqual(typeof(decimal), field_numeric.DataType); - Assert.AreEqual(3, field_numeric.Ordinal); - Assert.IsFalse(field_numeric.Unique); + Assert.That(field_serial.AllowDBNull, Is.False); + Assert.That(field_serial.AutoIncrement); + Assert.That(field_serial.ColumnName, Is.EqualTo("field_serial")); + Assert.That(field_serial.DataType, Is.EqualTo(typeof(int))); + Assert.That(field_serial.Ordinal, Is.EqualTo(0)); + Assert.That(field_serial.Unique); + + Assert.That(field_int2.AllowDBNull); + Assert.That(field_int2.AutoIncrement, Is.False); + Assert.That(field_int2.ColumnName, Is.EqualTo("field_int2")); + Assert.That(field_int2.DataType, Is.EqualTo(typeof(short))); + Assert.That(field_int2.Ordinal, Is.EqualTo(1)); + Assert.That(field_int2.Unique, Is.False); + + Assert.That(field_timestamp.AllowDBNull); + Assert.That(field_timestamp.AutoIncrement, Is.False); + Assert.That(field_timestamp.ColumnName, Is.EqualTo("field_timestamp")); + Assert.That(field_timestamp.DataType, Is.EqualTo(typeof(DateTime))); + Assert.That(field_timestamp.Ordinal, Is.EqualTo(2)); + Assert.That(field_timestamp.Unique, Is.False); + + Assert.That(field_numeric.AllowDBNull); + Assert.That(field_numeric.AutoIncrement, Is.False); + Assert.That(field_numeric.ColumnName, Is.EqualTo("field_numeric")); + Assert.That(field_numeric.DataType, Is.EqualTo(typeof(decimal))); + Assert.That(field_numeric.Ordinal, Is.EqualTo(3)); + Assert.That(field_numeric.Unique, Is.False); } [Test] @@ -252,21 +252,21 @@ public async Task Fill_add_columns() var field_timestamp = ds.Tables[0].Columns[2]; var field_numeric = ds.Tables[0].Columns[3]; - Assert.AreEqual("field_serial", field_serial.ColumnName); - Assert.AreEqual(typeof(int), field_serial.DataType); - Assert.AreEqual(0, field_serial.Ordinal); + Assert.That(field_serial.ColumnName, Is.EqualTo("field_serial")); + Assert.That(field_serial.DataType, Is.EqualTo(typeof(int))); + Assert.That(field_serial.Ordinal, Is.EqualTo(0)); - Assert.AreEqual("field_int2", field_int2.ColumnName); - Assert.AreEqual(typeof(short), field_int2.DataType); - Assert.AreEqual(1, field_int2.Ordinal); + Assert.That(field_int2.ColumnName, Is.EqualTo("field_int2")); + Assert.That(field_int2.DataType, Is.EqualTo(typeof(short))); + Assert.That(field_int2.Ordinal, Is.EqualTo(1)); - Assert.AreEqual("field_timestamp", field_timestamp.ColumnName); - Assert.AreEqual(typeof(DateTime), field_timestamp.DataType); - Assert.AreEqual(2, field_timestamp.Ordinal); + Assert.That(field_timestamp.ColumnName, Is.EqualTo("field_timestamp")); + Assert.That(field_timestamp.DataType, Is.EqualTo(typeof(DateTime))); + Assert.That(field_timestamp.Ordinal, Is.EqualTo(2)); - Assert.AreEqual("field_numeric", field_numeric.ColumnName); - Assert.AreEqual(typeof(decimal), field_numeric.DataType); - Assert.AreEqual(3, field_numeric.Ordinal); + Assert.That(field_numeric.ColumnName, Is.EqualTo("field_numeric")); + Assert.That(field_numeric.DataType, Is.EqualTo(typeof(decimal))); + Assert.That(field_numeric.Ordinal, Is.EqualTo(3)); } [Test] @@ -302,7 +302,7 @@ public async Task Update_letting_null_field_falue() da.Fill(ds); var dt = ds.Tables[0]; - Assert.IsNotNull(dt); + Assert.That(dt, Is.Not.Null); var dr = ds.Tables[0].Rows[^1]; dr["field_int2"] = 4; @@ -314,7 +314,7 @@ public async Task Update_letting_null_field_falue() using var dr2 = new NpgsqlCommand($"SELECT field_int2 FROM {table}", conn).ExecuteReader(); dr2.Read(); - Assert.AreEqual(4, dr2["field_int2"]); + Assert.That(dr2["field_int2"], Is.EqualTo(4)); } [Test] @@ -343,12 +343,12 @@ public async Task DoUpdateWithDataSet() var ds = new DataSet(); var da = new NpgsqlDataAdapter($"select * from {table}", conn); var cb = new NpgsqlCommandBuilder(da); - Assert.IsNotNull(cb); + Assert.That(cb, Is.Not.Null); da.Fill(ds); var dt = ds.Tables[0]; - Assert.IsNotNull(dt); + Assert.That(dt, Is.Not.Null); var dr = ds.Tables[0].Rows[^1]; @@ -361,7 +361,7 @@ public async Task DoUpdateWithDataSet() using var dr2 = new NpgsqlCommand($"select * from {table}", conn).ExecuteReader(); dr2.Read(); - Assert.AreEqual(4, dr2["field_int2"]); + Assert.That(dr2["field_int2"], Is.EqualTo(4)); } [Test] @@ -374,7 +374,7 @@ public async Task Insert_with_CommandBuilder_case_sensitive() var ds = new DataSet(); var da = new NpgsqlDataAdapter($"select * from {table}", conn); var builder = new NpgsqlCommandBuilder(da); - Assert.IsNotNull(builder); + Assert.That(builder, Is.Not.Null); da.Fill(ds); @@ -390,7 +390,7 @@ public async Task Insert_with_CommandBuilder_case_sensitive() using var dr2 = new NpgsqlCommand($"select * from {table}", conn).ExecuteReader(); dr2.Read(); - Assert.AreEqual(4, dr2[1]); + Assert.That(dr2[1], Is.EqualTo(4)); } [Test] @@ -449,7 +449,7 @@ public async Task DataAdapter_command_access() var da = new NpgsqlDataAdapter(); da.SelectCommand = command; System.Data.Common.DbDataAdapter common = da; - Assert.IsNotNull(common.SelectCommand); + Assert.That(common.SelectCommand, Is.Not.Null); } [Test, Description("Makes sure that the INSERT/UPDATE/DELETE commands are auto-populated on NpgsqlDataAdapter")] @@ -532,8 +532,8 @@ public async Task Load_DataTable() dt.Load(dr); dr.Close(); - Assert.AreEqual(5, dt.Columns[0].MaxLength); - Assert.AreEqual(5, dt.Columns[1].MaxLength); + Assert.That(dt.Columns[0].MaxLength, Is.EqualTo(5)); + Assert.That(dt.Columns[1].MaxLength, Is.EqualTo(5)); } public Task SetupTempTable(NpgsqlConnection conn) diff --git a/test/Npgsql.Tests/DataSourceTests.cs b/test/Npgsql.Tests/DataSourceTests.cs index 7e33d00991..f7aa537dd9 100644 --- a/test/Npgsql.Tests/DataSourceTests.cs +++ b/test/Npgsql.Tests/DataSourceTests.cs @@ -76,7 +76,7 @@ public async Task ExecuteReader_on_connectionless_command([Values] bool async) await using (var reader = async ? await command.ExecuteReaderAsync() : command.ExecuteReader()) { - Assert.True(reader.Read()); + Assert.That(reader.Read()); Assert.That(reader.GetInt32(0), Is.EqualTo(1)); } @@ -125,10 +125,10 @@ public async Task ExecuteReader_on_connectionless_batch([Values] bool async) using (var reader = async ? await batch.ExecuteReaderAsync() : batch.ExecuteReader()) { - Assert.True(reader.Read()); + Assert.That(reader.Read()); Assert.That(reader.GetInt32(0), Is.EqualTo(1)); - Assert.True(reader.NextResult()); - Assert.True(reader.Read()); + Assert.That(reader.NextResult()); + Assert.That(reader.Read()); Assert.That(reader.GetInt32(0), Is.EqualTo(2)); } @@ -318,7 +318,7 @@ public async Task Multiplexing_connectionless_command_open_connection() command.CommandText = "SELECT 1"; await using var reader = await command.ExecuteReaderAsync(); - Assert.True(reader.Read()); + Assert.That(reader.Read()); Assert.That(reader.GetInt32(0), Is.EqualTo(1)); } diff --git a/test/Npgsql.Tests/DataTypeNameTests.cs b/test/Npgsql.Tests/DataTypeNameTests.cs index fd366d8258..7ca6c669ce 100644 --- a/test/Npgsql.Tests/DataTypeNameTests.cs +++ b/test/Npgsql.Tests/DataTypeNameTests.cs @@ -12,7 +12,7 @@ public void MaxLengthDataTypeName() var name = new string('a', DataTypeName.NAMEDATALEN); var fullyQualifiedDataTypeName= $"public.{name}"; Assert.DoesNotThrow(() => new DataTypeName(fullyQualifiedDataTypeName)); - Assert.AreEqual(new DataTypeName(fullyQualifiedDataTypeName).Value, fullyQualifiedDataTypeName); + Assert.That(fullyQualifiedDataTypeName, Is.EqualTo(new DataTypeName(fullyQualifiedDataTypeName).Value)); } [Test] diff --git a/test/Npgsql.Tests/DistributedTransactionTests.cs b/test/Npgsql.Tests/DistributedTransactionTests.cs index 157e5ac112..aab4447ff2 100644 --- a/test/Npgsql.Tests/DistributedTransactionTests.cs +++ b/test/Npgsql.Tests/DistributedTransactionTests.cs @@ -179,12 +179,12 @@ public void Transaction_race([Values(false, true)] bool distributed) } catch (Exception ex) { - Assert.Fail( - @"Failed at iteration {0}. -Events: -{1} -Exception {2}", - i, FormatEventQueue(eventQueue), ex); + Assert.Fail($""" + Failed at iteration {i}. + Events: + {FormatEventQueue(eventQueue)} + Exception {ex} + """); } } } @@ -233,12 +233,12 @@ public void Connection_reuse_race_after_transaction([Values(false, true)] bool d } catch (Exception ex) { - Assert.Fail( - @"Failed at iteration {0}. -Events: -{1} -Exception {2}", - i, FormatEventQueue(eventQueue), ex); + Assert.Fail($""" + Failed at iteration {i}. + Events: + {FormatEventQueue(eventQueue)} + Exception {ex} + """); } } } @@ -287,12 +287,12 @@ public void Connection_reuse_race_after_rollback([Values(false, true)] bool dist } catch (Exception ex) { - Assert.Fail( - @"Failed at iteration {0}. -Events: -{1} -Exception {2}", - i, FormatEventQueue(eventQueue), ex); + Assert.Fail($""" + Failed at iteration {i}. + Events: + {FormatEventQueue(eventQueue)} + Exception {ex} + """); } } } @@ -365,12 +365,12 @@ public void Connection_reuse_race_chaining_transaction([Values(false, true)] boo } catch (Exception ex) { - Assert.Fail( - @"Failed at iteration {0}. -Events: -{1} -Exception {2}", - i, FormatEventQueue(eventQueue), ex); + Assert.Fail($""" + Failed at iteration {i}. + Events: + {FormatEventQueue(eventQueue)} + Exception {ex} + """); } } } diff --git a/test/Npgsql.Tests/ExceptionTests.cs b/test/Npgsql.Tests/ExceptionTests.cs index b58617ef52..ca667f8fd1 100644 --- a/test/Npgsql.Tests/ExceptionTests.cs +++ b/test/Npgsql.Tests/ExceptionTests.cs @@ -203,11 +203,11 @@ public void NpgsqlException_with_async() [Test] public void NpgsqlException_IsTransient() { - Assert.True(new NpgsqlException("", new IOException()).IsTransient); - Assert.True(new NpgsqlException("", new SocketException()).IsTransient); - Assert.True(new NpgsqlException("", new TimeoutException()).IsTransient); - Assert.False(new NpgsqlException().IsTransient); - Assert.False(new NpgsqlException("", new Exception("Inner Exception")).IsTransient); + Assert.That(new NpgsqlException("", new IOException()).IsTransient); + Assert.That(new NpgsqlException("", new SocketException()).IsTransient); + Assert.That(new NpgsqlException("", new TimeoutException()).IsTransient); + Assert.That(new NpgsqlException().IsTransient, Is.False); + Assert.That(new NpgsqlException("", new Exception("Inner Exception")).IsTransient, Is.False); } #if !NET9_0_OR_GREATER @@ -216,8 +216,8 @@ public void NpgsqlException_IsTransient() [Test] public void PostgresException_IsTransient() { - Assert.True(CreateWithSqlState("53300").IsTransient); - Assert.False(CreateWithSqlState("0").IsTransient); + Assert.That(CreateWithSqlState("53300").IsTransient); + Assert.That(CreateWithSqlState("0").IsTransient, Is.False); PostgresException CreateWithSqlState(string sqlState) { @@ -303,7 +303,7 @@ public void Base_exception_property_serialization() // Check virtual base properties, which can be incorrectly deserialized if overridden, because the base // Exception.GetObjectData() method writes the fields, not the properties (e.g. "_message" instead of "Message"). - Assert.That(ex.Data, Is.EquivalentTo((IDictionary?)info.GetValue("Data", typeof(IDictionary)))); + Assert.That(ex.Data, Is.EquivalentTo((IDictionary)info.GetValue("Data", typeof(IDictionary))!)); Assert.That(ex.HelpLink, Is.EqualTo(info.GetValue("HelpURL", typeof(string)))); Assert.That(ex.Message, Is.EqualTo(info.GetValue("Message", typeof(string)))); Assert.That(ex.Source, Is.EqualTo(info.GetValue("Source", typeof(string)))); diff --git a/test/Npgsql.Tests/FunctionTests.cs b/test/Npgsql.Tests/FunctionTests.cs index 9323dd2349..4c3b1e10aa 100644 --- a/test/Npgsql.Tests/FunctionTests.cs +++ b/test/Npgsql.Tests/FunctionTests.cs @@ -107,12 +107,12 @@ public async Task Named_parameters() command.Parameters.AddWithValue("sec", 4); var dt = (DateTime)(await command.ExecuteScalarAsync())!; - Assert.AreEqual(new DateTime(2015, 8, 1, 2, 3, 4), dt); + Assert.That(dt, Is.EqualTo(new DateTime(2015, 8, 1, 2, 3, 4))); command.Parameters[0].Value = 2014; command.Parameters[0].ParameterName = ""; // 2014 will be sent as a positional parameter dt = (DateTime)(await command.ExecuteScalarAsync())!; - Assert.AreEqual(new DateTime(2014, 8, 1, 2, 3, 4), dt); + Assert.That(dt, Is.EqualTo(new DateTime(2014, 8, 1, 2, 3, 4))); } [Test] @@ -174,7 +174,7 @@ public async Task CommandBehavior_SchemaOnly_support_function_call() var i = 0; while (dr.Read()) i++; - Assert.AreEqual(0, i); + Assert.That(i, Is.EqualTo(0)); } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/5820")] @@ -290,8 +290,8 @@ await conn.ExecuteNonQueryAsync( { await using var command = new NpgsqlCommand(@"""FunctionCaseSensitive""", conn) { CommandType = CommandType.StoredProcedure }; NpgsqlCommandBuilder.DeriveParameters(command); - Assert.AreEqual(NpgsqlDbType.Integer, command.Parameters[0].NpgsqlDbType); - Assert.AreEqual(NpgsqlDbType.Text, command.Parameters[1].NpgsqlDbType); + Assert.That(command.Parameters[0].NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Integer)); + Assert.That(command.Parameters[1].NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Text)); } finally { @@ -310,8 +310,8 @@ public async Task DeriveParameters_quote_characters_in_function_name() { await using var command = new NpgsqlCommand(function, conn) { CommandType = CommandType.StoredProcedure }; NpgsqlCommandBuilder.DeriveParameters(command); - Assert.AreEqual(NpgsqlDbType.Integer, command.Parameters[0].NpgsqlDbType); - Assert.AreEqual(NpgsqlDbType.Text, command.Parameters[1].NpgsqlDbType); + Assert.That(command.Parameters[0].NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Integer)); + Assert.That(command.Parameters[1].NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Text)); } finally { @@ -330,8 +330,8 @@ await conn.ExecuteNonQueryAsync( { await using var command = new NpgsqlCommand(@"""My.Dotted.Function""", conn) { CommandType = CommandType.StoredProcedure }; NpgsqlCommandBuilder.DeriveParameters(command); - Assert.AreEqual(NpgsqlDbType.Integer, command.Parameters[0].NpgsqlDbType); - Assert.AreEqual(NpgsqlDbType.Text, command.Parameters[1].NpgsqlDbType); + Assert.That(command.Parameters[0].NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Integer)); + Assert.That(command.Parameters[1].NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Text)); } finally { @@ -349,8 +349,8 @@ await conn.ExecuteNonQueryAsync( $"CREATE FUNCTION {function}(x int, y int, out sum int, out product int) AS 'SELECT $1 + $2, $1 * $2' LANGUAGE sql"); await using var command = new NpgsqlCommand(function, conn) { CommandType = CommandType.StoredProcedure }; NpgsqlCommandBuilder.DeriveParameters(command); - Assert.AreEqual("x", command.Parameters[0].ParameterName); - Assert.AreEqual("y", command.Parameters[1].ParameterName); + Assert.That(command.Parameters[0].ParameterName, Is.EqualTo("x")); + Assert.That(command.Parameters[1].ParameterName, Is.EqualTo("y")); } [Test] diff --git a/test/Npgsql.Tests/LargeObjectTests.cs b/test/Npgsql.Tests/LargeObjectTests.cs index 3d11dfd7b1..a252385be4 100644 --- a/test/Npgsql.Tests/LargeObjectTests.cs +++ b/test/Npgsql.Tests/LargeObjectTests.cs @@ -24,23 +24,23 @@ public void Test() stream.ReadExactly(buf2, 0, buf2.Length); Assert.That(buf.SequenceEqual(buf2)); - Assert.AreEqual(5, stream.Position); + Assert.That(stream.Position, Is.EqualTo(5)); - Assert.AreEqual(5, stream.Length); + Assert.That(stream.Length, Is.EqualTo(5)); stream.Seek(-1, System.IO.SeekOrigin.Current); - Assert.AreEqual((int)'o', stream.ReadByte()); + Assert.That(stream.ReadByte(), Is.EqualTo((int)'o')); manager.MaxTransferBlockSize = 3; stream.Write(buf, 0, buf.Length); stream.Seek(-5, System.IO.SeekOrigin.End); var buf3 = new byte[100]; - Assert.AreEqual(5, stream.Read(buf3, 0, 100)); + Assert.That(stream.Read(buf3, 0, 100), Is.EqualTo(5)); Assert.That(buf.SequenceEqual(buf3.Take(5))); stream.SetLength(43); - Assert.AreEqual(43, stream.Length); + Assert.That(stream.Length, Is.EqualTo(43)); } manager.Unlink(oid); diff --git a/test/Npgsql.Tests/MultipleHostsTests.cs b/test/Npgsql.Tests/MultipleHostsTests.cs index e09cbae401..f4026cc7f6 100644 --- a/test/Npgsql.Tests/MultipleHostsTests.cs +++ b/test/Npgsql.Tests/MultipleHostsTests.cs @@ -359,21 +359,21 @@ public async Task Connect_with_load_balancing() secondConnector = secondConnection.Connector!; } - Assert.AreNotSame(firstConnector, secondConnector); + Assert.That(secondConnector, Is.Not.SameAs(firstConnector)); await using (var firstBalancedConnection = await dataSource.OpenConnectionAsync()) { - Assert.AreSame(firstConnector, firstBalancedConnection.Connector); + Assert.That(firstBalancedConnection.Connector, Is.SameAs(firstConnector)); } await using (var secondBalancedConnection = await dataSource.OpenConnectionAsync()) { - Assert.AreSame(secondConnector, secondBalancedConnection.Connector); + Assert.That(secondBalancedConnection.Connector, Is.SameAs(secondConnector)); } await using (var thirdBalancedConnection = await dataSource.OpenConnectionAsync()) { - Assert.AreSame(firstConnector, thirdBalancedConnection.Connector); + Assert.That(thirdBalancedConnection.Connector, Is.SameAs(firstConnector)); } } @@ -403,7 +403,7 @@ public async Task Connect_without_load_balancing() } await using (var secondConnection = await dataSource.OpenConnectionAsync()) { - Assert.AreSame(firstConnector, secondConnection.Connector); + Assert.That(secondConnection.Connector, Is.SameAs(firstConnector)); } await using (var firstConnection = await dataSource.OpenConnectionAsync()) await using (var secondConnection = await dataSource.OpenConnectionAsync()) @@ -411,16 +411,16 @@ public async Task Connect_without_load_balancing() secondConnector = secondConnection.Connector!; } - Assert.AreNotSame(firstConnector, secondConnector); + Assert.That(secondConnector, Is.Not.SameAs(firstConnector)); await using (var firstUnbalancedConnection = await dataSource.OpenConnectionAsync()) { - Assert.AreSame(firstConnector, firstUnbalancedConnection.Connector); + Assert.That(firstUnbalancedConnection.Connector, Is.SameAs(firstConnector)); } await using (var secondUnbalancedConnection = await dataSource.OpenConnectionAsync()) { - Assert.AreSame(firstConnector, secondUnbalancedConnection.Connector); + Assert.That(secondUnbalancedConnection.Connector, Is.SameAs(firstConnector)); } } @@ -481,7 +481,7 @@ public async Task Connect_state_changing_hosts([Values] bool alwaysCheckHostStat } await using var thirdConnection = await dataSource.OpenConnectionAsync(TargetSessionAttributes.PreferPrimary); - Assert.AreSame(alwaysCheckHostState ? secondConnector : firstConnector, thirdConnection.Connector); + Assert.That(thirdConnection.Connector, Is.SameAs(alwaysCheckHostState ? secondConnector : firstConnector)); await firstServerTask; await secondServerTask; @@ -494,22 +494,22 @@ public void Database_state_cache_basic() var timeStamp = DateTime.UtcNow; dataSource.UpdateDatabaseState(DatabaseState.PrimaryReadWrite, timeStamp, TimeSpan.Zero); - Assert.AreEqual(DatabaseState.PrimaryReadWrite, dataSource.GetDatabaseState()); + Assert.That(dataSource.GetDatabaseState(), Is.EqualTo(DatabaseState.PrimaryReadWrite)); // Update with the same timestamp - shouldn't change anything dataSource.UpdateDatabaseState(DatabaseState.Standby, timeStamp, TimeSpan.Zero); - Assert.AreEqual(DatabaseState.PrimaryReadWrite, dataSource.GetDatabaseState()); + Assert.That(dataSource.GetDatabaseState(), Is.EqualTo(DatabaseState.PrimaryReadWrite)); // Update with a new timestamp timeStamp = timeStamp.AddSeconds(1); dataSource.UpdateDatabaseState(DatabaseState.PrimaryReadOnly, timeStamp, TimeSpan.Zero); - Assert.AreEqual(DatabaseState.PrimaryReadOnly, dataSource.GetDatabaseState()); + Assert.That(dataSource.GetDatabaseState(), Is.EqualTo(DatabaseState.PrimaryReadOnly)); // Expired state returns as Unknown (depending on ignoreExpiration) timeStamp = timeStamp.AddSeconds(1); dataSource.UpdateDatabaseState(DatabaseState.PrimaryReadWrite, timeStamp, TimeSpan.FromSeconds(-1)); - Assert.AreEqual(DatabaseState.Unknown, dataSource.GetDatabaseState(ignoreExpiration: false)); - Assert.AreEqual(DatabaseState.PrimaryReadWrite, dataSource.GetDatabaseState(ignoreExpiration: true)); + Assert.That(dataSource.GetDatabaseState(ignoreExpiration: false), Is.EqualTo(DatabaseState.Unknown)); + Assert.That(dataSource.GetDatabaseState(ignoreExpiration: true), Is.EqualTo(DatabaseState.PrimaryReadWrite)); } [Test] @@ -925,7 +925,7 @@ public void IntegrationTest([Values] bool loadBalancing, [Values] bool alwaysChe Assert.DoesNotThrowAsync(() => clientsTask); Assert.ThrowsAsync(() => onlyStandbyClient); Assert.ThrowsAsync(() => readOnlyClient); - Assert.AreEqual(125, queriesDone); + Assert.That(queriesDone, Is.EqualTo(125)); Task Client(NpgsqlMultiHostDataSource multiHostDataSource, TargetSessionAttributes targetSessionAttributes) { diff --git a/test/Npgsql.Tests/NestedDataReaderTests.cs b/test/Npgsql.Tests/NestedDataReaderTests.cs index 72553a6b5e..7e157c3426 100644 --- a/test/Npgsql.Tests/NestedDataReaderTests.cs +++ b/test/Npgsql.Tests/NestedDataReaderTests.cs @@ -199,15 +199,15 @@ public void GetBytes() Assert.That(nestedReader.GetBytes(0, 0, null, 0, 4), Is.EqualTo(3)); Assert.That(nestedReader.GetBytes(0, 0, buf, 0, 3), Is.EqualTo(3)); Assert.That(nestedReader.GetBytes(0, 0, buf, 0, 4), Is.EqualTo(3)); - CollectionAssert.AreEqual(new byte[] { 1, 2, 3, 0 }, buf); + Assert.That(buf, Is.EqualTo(new byte[] { 1, 2, 3, 0 }).AsCollection); buf = new byte[2]; Assert.That(nestedReader.GetBytes(0, 0, buf, 0, 2), Is.EqualTo(2)); - CollectionAssert.AreEqual(new byte[] { 1, 2 }, buf); + Assert.That(buf, Is.EqualTo(new byte[] { 1, 2 }).AsCollection); buf = new byte[2]; Assert.That(nestedReader.GetBytes(0, 1, buf, 1, 1), Is.EqualTo(1)); - CollectionAssert.AreEqual(new byte[] { 0, 2 }, buf); + Assert.That(buf, Is.EqualTo(new byte[] { 0, 2 }).AsCollection); Assert.That(nestedReader.GetBytes(0, 2, buf, 1, 1), Is.EqualTo(1)); - CollectionAssert.AreEqual(new byte[] { 0, 3 }, buf); + Assert.That(buf, Is.EqualTo(new byte[] { 0, 3 }).AsCollection); Assert.Throws(() => nestedReader.GetBytes(1, 0, buf, 0, 1)); Assert.Throws(() => nestedReader.GetBytes(0, 4, buf, 0, 1)); } diff --git a/test/Npgsql.Tests/NotificationTests.cs b/test/Npgsql.Tests/NotificationTests.cs index 9df9aba44d..5f6c11efcd 100644 --- a/test/Npgsql.Tests/NotificationTests.cs +++ b/test/Npgsql.Tests/NotificationTests.cs @@ -19,7 +19,7 @@ public void Notification() conn.ExecuteNonQuery($"LISTEN {notify}"); conn.Notification += (o, e) => receivedNotification = true; conn.ExecuteNonQuery($"NOTIFY {notify}"); - Assert.IsTrue(receivedNotification); + Assert.That(receivedNotification); } [Test, Description("Generates a notification that arrives after reader data that is already being read")] @@ -53,12 +53,12 @@ public async Task Notification_after_data() // Allow some time for the notification to get delivered await Task.Delay(2000); - Assert.IsTrue(reader.Read()); - Assert.AreEqual(1, reader.GetValue(0)); + Assert.That(reader.Read()); + Assert.That(reader.GetValue(0), Is.EqualTo(1)); } Assert.That(conn.ExecuteScalar("SELECT 1"), Is.EqualTo(1)); - Assert.IsTrue(receivedNotification); + Assert.That(receivedNotification); } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/1024")] @@ -73,7 +73,7 @@ public void Wait() notifyingConn.ExecuteNonQuery($"NOTIFY {notify}"); conn.Notification += (o, e) => receivedNotification = true; Assert.That(conn.Wait(0), Is.EqualTo(true)); - Assert.IsTrue(receivedNotification); + Assert.That(receivedNotification); Assert.That(conn.ExecuteScalar("SELECT 1"), Is.EqualTo(1)); } @@ -106,7 +106,7 @@ public async Task WaitAsync() await notifyingConn.ExecuteNonQueryAsync($"NOTIFY {notify}"); conn.Notification += (o, e) => receivedNotification = true; await conn.WaitAsync(0); - Assert.IsTrue(receivedNotification); + Assert.That(receivedNotification); Assert.That(await conn.ExecuteScalarAsync("SELECT 1"), Is.EqualTo(1)); } diff --git a/test/Npgsql.Tests/Npgsql.Tests.csproj b/test/Npgsql.Tests/Npgsql.Tests.csproj index 5d100b68b5..1c300f8215 100644 --- a/test/Npgsql.Tests/Npgsql.Tests.csproj +++ b/test/Npgsql.Tests/Npgsql.Tests.csproj @@ -5,6 +5,10 @@ + + all + runtime; build; native; contentfiles; analyzers; buildtransitive + diff --git a/test/Npgsql.Tests/NpgsqlParameterCollectionTests.cs b/test/Npgsql.Tests/NpgsqlParameterCollectionTests.cs index f6a188817b..901e34ece9 100644 --- a/test/Npgsql.Tests/NpgsqlParameterCollectionTests.cs +++ b/test/Npgsql.Tests/NpgsqlParameterCollectionTests.cs @@ -37,13 +37,13 @@ public void Clear() var c1 = new NpgsqlCommand(); var c2 = new NpgsqlCommand(); c1.Parameters.Add(p); - Assert.AreEqual(1, c1.Parameters.Count); - Assert.AreEqual(0, c2.Parameters.Count); + Assert.That(c1.Parameters.Count, Is.EqualTo(1)); + Assert.That(c2.Parameters.Count, Is.EqualTo(0)); c1.Parameters.Clear(); - Assert.AreEqual(0, c1.Parameters.Count); + Assert.That(c1.Parameters.Count, Is.EqualTo(0)); c2.Parameters.Add(p); - Assert.AreEqual(0, c1.Parameters.Count); - Assert.AreEqual(1, c2.Parameters.Count); + Assert.That(c1.Parameters.Count, Is.EqualTo(0)); + Assert.That(c2.Parameters.Count, Is.EqualTo(1)); } [Test] @@ -60,7 +60,7 @@ public void Hash_lookup_parameter_rename_bug() } // Make sure hash lookup is generated. - Assert.AreEqual(command.Parameters["p03"].ParameterName, "p03"); + Assert.That(command.Parameters["p03"].ParameterName, Is.EqualTo("p03")); // Rename the target parameter. command.Parameters["p03"].ParameterName = "a_new_name"; @@ -92,7 +92,7 @@ public void Hash_lookup_unnamed_parameter_rename_bug() } // Make sure hash lookup is generated. - Assert.AreEqual(command.Parameters["3"].ParameterName, "3"); + Assert.That(command.Parameters["3"].ParameterName, Is.EqualTo("3")); // Remove all parameters to clear hash lookup command.Parameters.Clear(); @@ -114,7 +114,7 @@ public void Remove_duplicate_parameter([Values(LookupThreshold, LookupThreshold } // Make sure lookup is generated. - Assert.AreEqual(command.Parameters["p02"].ParameterName, "p02"); + Assert.That(command.Parameters["p02"].ParameterName, Is.EqualTo("p02")); // Add uppercased version causing a list to be created. command.Parameters.AddWithValue("P02", NpgsqlDbType.Text, "String parameter value 2"); @@ -123,10 +123,10 @@ public void Remove_duplicate_parameter([Values(LookupThreshold, LookupThreshold command.Parameters.Remove(command.Parameters["p02"]); // Test whether we can still find the last added parameter, and if its index is correctly shifted in the lookup. - Assert.IsTrue(command.Parameters.IndexOf("p02") == count - 1); - Assert.IsTrue(command.Parameters.IndexOf("P02") == count - 1); + Assert.That(command.Parameters.IndexOf("p02") == count - 1); + Assert.That(command.Parameters.IndexOf("P02") == count - 1); // And finally test whether other parameters were also correctly shifted. - Assert.IsTrue(command.Parameters.IndexOf("p03") == 1); + Assert.That(command.Parameters.IndexOf("p03") == 1); } [Test] @@ -144,8 +144,8 @@ public void Remove_parameter([Values(LookupThreshold, LookupThreshold - 2)] int command.Parameters.Remove(command.Parameters["p02"]); // Make sure we cannot find it, also not case insensitively. - Assert.IsTrue(command.Parameters.IndexOf("p02") == -1); - Assert.IsTrue(command.Parameters.IndexOf("P02") == -1); + Assert.That(command.Parameters.IndexOf("p02") == -1); + Assert.That(command.Parameters.IndexOf("P02") == -1); } [Test] @@ -184,7 +184,7 @@ public void Correct_index_returned_for_duplicate_ParameterName([Values(LookupThr } // Make sure lookup is generated. - Assert.AreEqual(command.Parameters["parameter02"].ParameterName, "parameter02"); + Assert.That(command.Parameters["parameter02"].ParameterName, Is.EqualTo("parameter02")); // Add uppercased version. command.Parameters.AddWithValue("Parameter02", NpgsqlDbType.Text, "String parameter value 2"); @@ -193,14 +193,14 @@ public void Correct_index_returned_for_duplicate_ParameterName([Values(LookupThr command.Parameters.Insert(0, new NpgsqlParameter("ParameteR02", NpgsqlDbType.Text) { Value = "String parameter value 2" }); // Try to find the exact index. - Assert.IsTrue(command.Parameters.IndexOf("parameter02") == 2); - Assert.IsTrue(command.Parameters.IndexOf("Parameter02") == command.Parameters.Count - 1); - Assert.IsTrue(command.Parameters.IndexOf("ParameteR02") == 0); + Assert.That(command.Parameters.IndexOf("parameter02") == 2); + Assert.That(command.Parameters.IndexOf("Parameter02") == command.Parameters.Count - 1); + Assert.That(command.Parameters.IndexOf("ParameteR02") == 0); // This name does not exist so we expect the first case insensitive match to be returned. - Assert.IsTrue(command.Parameters.IndexOf("ParaMeteR02") == 0); + Assert.That(command.Parameters.IndexOf("ParaMeteR02") == 0); // And finally test whether other parameters were also correctly shifted. - Assert.IsTrue(command.Parameters.IndexOf("parameter03") == 3); + Assert.That(command.Parameters.IndexOf("parameter03") == 3); } [Test] @@ -345,8 +345,8 @@ public void Clean_name() param.ParameterName = null; // These should not throw exceptions - Assert.AreEqual(0, command.Parameters.IndexOf(param.ParameterName)); - Assert.AreEqual(NpgsqlParameter.PositionalName, param.ParameterName); + Assert.That(command.Parameters.IndexOf(param.ParameterName), Is.EqualTo(0)); + Assert.That(param.ParameterName, Is.EqualTo(NpgsqlParameter.PositionalName)); } [Test] @@ -354,10 +354,10 @@ public void Clone_sets_correct_collection() { var cmd = new NpgsqlCommand(); cmd.Parameters.Add(new NpgsqlParameter { TypedValue = 42 }); - Assert.AreSame(cmd.Parameters, cmd.Parameters.Single().Collection); + Assert.That(cmd.Parameters.Single().Collection, Is.SameAs(cmd.Parameters)); cmd = cmd.Clone(); - Assert.AreSame(cmd.Parameters, cmd.Parameters.Single().Collection); + Assert.That(cmd.Parameters.Single().Collection, Is.SameAs(cmd.Parameters)); } diff --git a/test/Npgsql.Tests/NpgsqlParameterTests.cs b/test/Npgsql.Tests/NpgsqlParameterTests.cs index 9a4610aadd..4965491c82 100644 --- a/test/Npgsql.Tests/NpgsqlParameterTests.cs +++ b/test/Npgsql.Tests/NpgsqlParameterTests.cs @@ -147,17 +147,17 @@ public void Setting_value_does_not_change_DbType() public void Constructor1() { var p = new NpgsqlParameter(); - Assert.AreEqual(DbType.Object, p.DbType, "DbType"); - Assert.AreEqual(ParameterDirection.Input, p.Direction, "Direction"); - Assert.IsFalse(p.IsNullable, "IsNullable"); - Assert.AreEqual(string.Empty, p.ParameterName, "ParameterName"); - Assert.AreEqual(0, p.Precision, "Precision"); - Assert.AreEqual(0, p.Scale, "Scale"); - Assert.AreEqual(0, p.Size, "Size"); - Assert.AreEqual(string.Empty, p.SourceColumn, "SourceColumn"); - Assert.AreEqual(DataRowVersion.Current, p.SourceVersion, "SourceVersion"); - Assert.AreEqual(NpgsqlDbType.Unknown, p.NpgsqlDbType, "NpgsqlDbType"); - Assert.IsNull(p.Value, "Value"); + Assert.That(p.DbType, Is.EqualTo(DbType.Object), "DbType"); + Assert.That(p.Direction, Is.EqualTo(ParameterDirection.Input), "Direction"); + Assert.That(p.IsNullable, Is.False, "IsNullable"); + Assert.That(p.ParameterName, Is.Empty, "ParameterName"); + Assert.That(p.Precision, Is.EqualTo(0), "Precision"); + Assert.That(p.Scale, Is.EqualTo(0), "Scale"); + Assert.That(p.Size, Is.EqualTo(0), "Size"); + Assert.That(p.SourceColumn, Is.Empty, "SourceColumn"); + Assert.That(p.SourceVersion, Is.EqualTo(DataRowVersion.Current), "SourceVersion"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Unknown), "NpgsqlDbType"); + Assert.That(p.Value, Is.Null, "Value"); } [Test] @@ -166,51 +166,51 @@ public void Constructor2_Value_DateTime() var value = new DateTime(2004, 8, 24); var p = new NpgsqlParameter("address", value); - Assert.AreEqual(DbType.DateTime2, p.DbType, "B:DbType"); - Assert.AreEqual(ParameterDirection.Input, p.Direction, "B:Direction"); - Assert.IsFalse(p.IsNullable, "B:IsNullable"); - Assert.AreEqual("address", p.ParameterName, "B:ParameterName"); - Assert.AreEqual(0, p.Precision, "B:Precision"); - Assert.AreEqual(0, p.Scale, "B:Scale"); + Assert.That(p.DbType, Is.EqualTo(DbType.DateTime2), "B:DbType"); + Assert.That(p.Direction, Is.EqualTo(ParameterDirection.Input), "B:Direction"); + Assert.That(p.IsNullable, Is.False, "B:IsNullable"); + Assert.That(p.ParameterName, Is.EqualTo("address"), "B:ParameterName"); + Assert.That(p.Precision, Is.EqualTo(0), "B:Precision"); + Assert.That(p.Scale, Is.EqualTo(0), "B:Scale"); //Assert.AreEqual (0, p.Size, "B:Size"); - Assert.AreEqual(string.Empty, p.SourceColumn, "B:SourceColumn"); - Assert.AreEqual(DataRowVersion.Current, p.SourceVersion, "B:SourceVersion"); - Assert.AreEqual(NpgsqlDbType.Timestamp, p.NpgsqlDbType, "B:NpgsqlDbType"); - Assert.AreEqual(value, p.Value, "B:Value"); + Assert.That(p.SourceColumn, Is.Empty, "B:SourceColumn"); + Assert.That(p.SourceVersion, Is.EqualTo(DataRowVersion.Current), "B:SourceVersion"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Timestamp), "B:NpgsqlDbType"); + Assert.That(p.Value, Is.EqualTo(value), "B:Value"); } [Test] public void Constructor2_Value_DBNull() { var p = new NpgsqlParameter("address", DBNull.Value); - Assert.AreEqual(DbType.Object, p.DbType, "B:DbType"); - Assert.AreEqual(ParameterDirection.Input, p.Direction, "B:Direction"); - Assert.IsFalse(p.IsNullable, "B:IsNullable"); - Assert.AreEqual("address", p.ParameterName, "B:ParameterName"); - Assert.AreEqual(0, p.Precision, "B:Precision"); - Assert.AreEqual(0, p.Scale, "B:Scale"); - Assert.AreEqual(0, p.Size, "B:Size"); - Assert.AreEqual(string.Empty, p.SourceColumn, "B:SourceColumn"); - Assert.AreEqual(DataRowVersion.Current, p.SourceVersion, "B:SourceVersion"); - Assert.AreEqual(NpgsqlDbType.Unknown, p.NpgsqlDbType, "B:NpgsqlDbType"); - Assert.AreEqual(DBNull.Value, p.Value, "B:Value"); + Assert.That(p.DbType, Is.EqualTo(DbType.Object), "B:DbType"); + Assert.That(p.Direction, Is.EqualTo(ParameterDirection.Input), "B:Direction"); + Assert.That(p.IsNullable, Is.False, "B:IsNullable"); + Assert.That(p.ParameterName, Is.EqualTo("address"), "B:ParameterName"); + Assert.That(p.Precision, Is.EqualTo(0), "B:Precision"); + Assert.That(p.Scale, Is.EqualTo(0), "B:Scale"); + Assert.That(p.Size, Is.EqualTo(0), "B:Size"); + Assert.That(p.SourceColumn, Is.Empty, "B:SourceColumn"); + Assert.That(p.SourceVersion, Is.EqualTo(DataRowVersion.Current), "B:SourceVersion"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Unknown), "B:NpgsqlDbType"); + Assert.That(p.Value, Is.EqualTo(DBNull.Value), "B:Value"); } [Test] public void Constructor2_Value_null() { var p = new NpgsqlParameter("address", null); - Assert.AreEqual(DbType.Object, p.DbType, "A:DbType"); - Assert.AreEqual(ParameterDirection.Input, p.Direction, "A:Direction"); - Assert.IsFalse(p.IsNullable, "A:IsNullable"); - Assert.AreEqual("address", p.ParameterName, "A:ParameterName"); - Assert.AreEqual(0, p.Precision, "A:Precision"); - Assert.AreEqual(0, p.Scale, "A:Scale"); - Assert.AreEqual(0, p.Size, "A:Size"); - Assert.AreEqual(string.Empty, p.SourceColumn, "A:SourceColumn"); - Assert.AreEqual(DataRowVersion.Current, p.SourceVersion, "A:SourceVersion"); - Assert.AreEqual(NpgsqlDbType.Unknown, p.NpgsqlDbType, "A:NpgsqlDbType"); - Assert.IsNull(p.Value, "A:Value"); + Assert.That(p.DbType, Is.EqualTo(DbType.Object), "A:DbType"); + Assert.That(p.Direction, Is.EqualTo(ParameterDirection.Input), "A:Direction"); + Assert.That(p.IsNullable, Is.False, "A:IsNullable"); + Assert.That(p.ParameterName, Is.EqualTo("address"), "A:ParameterName"); + Assert.That(p.Precision, Is.EqualTo(0), "A:Precision"); + Assert.That(p.Scale, Is.EqualTo(0), "A:Scale"); + Assert.That(p.Size, Is.EqualTo(0), "A:Size"); + Assert.That(p.SourceColumn, Is.Empty, "A:SourceColumn"); + Assert.That(p.SourceVersion, Is.EqualTo(DataRowVersion.Current), "A:SourceVersion"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Unknown), "A:NpgsqlDbType"); + Assert.That(p.Value, Is.Null, "A:Value"); } [Test] @@ -220,20 +220,20 @@ public void Constructor7() var p1 = new NpgsqlParameter("p1Name", NpgsqlDbType.Varchar, 20, "srcCol", ParameterDirection.InputOutput, false, 0, 0, DataRowVersion.Original, "foo"); - Assert.AreEqual(DbType.String, p1.DbType, "DbType"); - Assert.AreEqual(ParameterDirection.InputOutput, p1.Direction, "Direction"); - Assert.AreEqual(false, p1.IsNullable, "IsNullable"); + Assert.That(p1.DbType, Is.EqualTo(DbType.String), "DbType"); + Assert.That(p1.Direction, Is.EqualTo(ParameterDirection.InputOutput), "Direction"); + Assert.That(p1.IsNullable, Is.EqualTo(false), "IsNullable"); //Assert.AreEqual (999, p1.LocaleId, "#"); - Assert.AreEqual("p1Name", p1.ParameterName, "ParameterName"); - Assert.AreEqual(0, p1.Precision, "Precision"); - Assert.AreEqual(0, p1.Scale, "Scale"); - Assert.AreEqual(20, p1.Size, "Size"); - Assert.AreEqual("srcCol", p1.SourceColumn, "SourceColumn"); - Assert.AreEqual(false, p1.SourceColumnNullMapping, "SourceColumnNullMapping"); - Assert.AreEqual(DataRowVersion.Original, p1.SourceVersion, "SourceVersion"); - Assert.AreEqual(NpgsqlDbType.Varchar, p1.NpgsqlDbType, "NpgsqlDbType"); + Assert.That(p1.ParameterName, Is.EqualTo("p1Name"), "ParameterName"); + Assert.That(p1.Precision, Is.EqualTo(0), "Precision"); + Assert.That(p1.Scale, Is.EqualTo(0), "Scale"); + Assert.That(p1.Size, Is.EqualTo(20), "Size"); + Assert.That(p1.SourceColumn, Is.EqualTo("srcCol"), "SourceColumn"); + Assert.That(p1.SourceColumnNullMapping, Is.EqualTo(false), "SourceColumnNullMapping"); + Assert.That(p1.SourceVersion, Is.EqualTo(DataRowVersion.Original), "SourceVersion"); + Assert.That(p1.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Varchar), "NpgsqlDbType"); //Assert.AreEqual (3210, p1.NpgsqlValue, "#"); - Assert.AreEqual("foo", p1.Value, "Value"); + Assert.That(p1.Value, Is.EqualTo("foo"), "Value"); //Assert.AreEqual ("database", p1.XmlSchemaCollectionDatabase, "XmlSchemaCollectionDatabase"); //Assert.AreEqual ("name", p1.XmlSchemaCollectionName, "XmlSchemaCollectionName"); //Assert.AreEqual ("schema", p1.XmlSchemaCollectionOwningSchema, "XmlSchemaCollectionOwningSchema"); @@ -263,22 +263,22 @@ public void Clone() }; var actual = expected.Clone(); - Assert.AreEqual(expected.Value, actual.Value); - Assert.AreEqual(expected.ParameterName, actual.ParameterName); + Assert.That(actual.Value, Is.EqualTo(expected.Value)); + Assert.That(actual.ParameterName, Is.EqualTo(expected.ParameterName)); - Assert.AreEqual(expected.DbType, actual.DbType); - Assert.AreEqual(expected.NpgsqlDbType, actual.NpgsqlDbType); - Assert.AreEqual(expected.DataTypeName, actual.DataTypeName); + Assert.That(actual.DbType, Is.EqualTo(expected.DbType)); + Assert.That(actual.NpgsqlDbType, Is.EqualTo(expected.NpgsqlDbType)); + Assert.That(actual.DataTypeName, Is.EqualTo(expected.DataTypeName)); - Assert.AreEqual(expected.Direction, actual.Direction); - Assert.AreEqual(expected.IsNullable, actual.IsNullable); - Assert.AreEqual(expected.Precision, actual.Precision); - Assert.AreEqual(expected.Scale, actual.Scale); - Assert.AreEqual(expected.Size, actual.Size); + Assert.That(actual.Direction, Is.EqualTo(expected.Direction)); + Assert.That(actual.IsNullable, Is.EqualTo(expected.IsNullable)); + Assert.That(actual.Precision, Is.EqualTo(expected.Precision)); + Assert.That(actual.Scale, Is.EqualTo(expected.Scale)); + Assert.That(actual.Size, Is.EqualTo(expected.Size)); - Assert.AreEqual(expected.SourceVersion, actual.SourceVersion); - Assert.AreEqual(expected.SourceColumn, actual.SourceColumn); - Assert.AreEqual(expected.SourceColumnNullMapping, actual.SourceColumnNullMapping); + Assert.That(actual.SourceVersion, Is.EqualTo(expected.SourceVersion)); + Assert.That(actual.SourceColumn, Is.EqualTo(expected.SourceColumn)); + Assert.That(actual.SourceColumnNullMapping, Is.EqualTo(expected.SourceColumnNullMapping)); } [Test] @@ -305,23 +305,23 @@ public void Clone_generic() }; var actual = (NpgsqlParameter)expected.Clone(); - Assert.AreEqual(expected.Value, actual.Value); - Assert.AreEqual(expected.TypedValue, actual.TypedValue); - Assert.AreEqual(expected.ParameterName, actual.ParameterName); + Assert.That(actual.Value, Is.EqualTo(expected.Value)); + Assert.That(actual.TypedValue, Is.EqualTo(expected.TypedValue)); + Assert.That(actual.ParameterName, Is.EqualTo(expected.ParameterName)); - Assert.AreEqual(expected.DbType, actual.DbType); - Assert.AreEqual(expected.NpgsqlDbType, actual.NpgsqlDbType); - Assert.AreEqual(expected.DataTypeName, actual.DataTypeName); + Assert.That(actual.DbType, Is.EqualTo(expected.DbType)); + Assert.That(actual.NpgsqlDbType, Is.EqualTo(expected.NpgsqlDbType)); + Assert.That(actual.DataTypeName, Is.EqualTo(expected.DataTypeName)); - Assert.AreEqual(expected.Direction, actual.Direction); - Assert.AreEqual(expected.IsNullable, actual.IsNullable); - Assert.AreEqual(expected.Precision, actual.Precision); - Assert.AreEqual(expected.Scale, actual.Scale); - Assert.AreEqual(expected.Size, actual.Size); + Assert.That(actual.Direction, Is.EqualTo(expected.Direction)); + Assert.That(actual.IsNullable, Is.EqualTo(expected.IsNullable)); + Assert.That(actual.Precision, Is.EqualTo(expected.Precision)); + Assert.That(actual.Scale, Is.EqualTo(expected.Scale)); + Assert.That(actual.Size, Is.EqualTo(expected.Size)); - Assert.AreEqual(expected.SourceVersion, actual.SourceVersion); - Assert.AreEqual(expected.SourceColumn, actual.SourceColumn); - Assert.AreEqual(expected.SourceColumnNullMapping, actual.SourceColumnNullMapping); + Assert.That(actual.SourceVersion, Is.EqualTo(expected.SourceVersion)); + Assert.That(actual.SourceColumn, Is.EqualTo(expected.SourceColumn)); + Assert.That(actual.SourceColumnNullMapping, Is.EqualTo(expected.SourceColumnNullMapping)); } #endregion @@ -356,10 +356,10 @@ public void InferType_invalid_throws() catch (ArgumentException ex) { // The parameter data type of ... is invalid - Assert.AreEqual(typeof(ArgumentException), ex.GetType(), "#A2"); - Assert.IsNull(ex.InnerException, "#A3"); - Assert.IsNotNull(ex.Message, "#A4"); - Assert.IsNull(ex.ParamName, "#A5"); + Assert.That(ex.GetType(), Is.EqualTo(typeof(ArgumentException)), "#A2"); + Assert.That(ex.InnerException, Is.Null, "#A3"); + Assert.That(ex.Message, Is.Not.Null, "#A4"); + Assert.That(ex.ParamName, Is.Null, "#A5"); } } } @@ -368,14 +368,14 @@ public void InferType_invalid_throws() public void Parameter_null() { var param = new NpgsqlParameter("param", NpgsqlDbType.Numeric); - Assert.AreEqual(0, param.Scale, "#A1"); + Assert.That(param.Scale, Is.EqualTo(0), "#A1"); param.Value = DBNull.Value; - Assert.AreEqual(0, param.Scale, "#A2"); + Assert.That(param.Scale, Is.EqualTo(0), "#A2"); param = new NpgsqlParameter("param", NpgsqlDbType.Integer); - Assert.AreEqual(0, param.Scale, "#B1"); + Assert.That(param.Scale, Is.EqualTo(0), "#B1"); param.Value = DBNull.Value; - Assert.AreEqual(0, param.Scale, "#B2"); + Assert.That(param.Scale, Is.EqualTo(0), "#B2"); } [Test] @@ -388,53 +388,53 @@ public void Parameter_type() // assigned. The Type should be inferred everytime Value is assigned // If value is null or DBNull, then the current Type should be reset to Text. p = new NpgsqlParameter(); - Assert.AreEqual(DbType.String, p.DbType, "#A1"); - Assert.AreEqual(NpgsqlDbType.Text, p.NpgsqlDbType, "#A2"); + Assert.That(p.DbType, Is.EqualTo(DbType.String), "#A1"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Text), "#A2"); p.Value = DBNull.Value; - Assert.AreEqual(DbType.String, p.DbType, "#B1"); - Assert.AreEqual(NpgsqlDbType.Text, p.NpgsqlDbType, "#B2"); + Assert.That(p.DbType, Is.EqualTo(DbType.String), "#B1"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Text), "#B2"); p.Value = 1; - Assert.AreEqual(DbType.Int32, p.DbType, "#C1"); - Assert.AreEqual(NpgsqlDbType.Integer, p.NpgsqlDbType, "#C2"); + Assert.That(p.DbType, Is.EqualTo(DbType.Int32), "#C1"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Integer), "#C2"); p.Value = DBNull.Value; - Assert.AreEqual(DbType.String, p.DbType, "#D1"); - Assert.AreEqual(NpgsqlDbType.Text, p.NpgsqlDbType, "#D2"); + Assert.That(p.DbType, Is.EqualTo(DbType.String), "#D1"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Text), "#D2"); p.Value = new byte[] { 0x0a }; - Assert.AreEqual(DbType.Binary, p.DbType, "#E1"); - Assert.AreEqual(NpgsqlDbType.Bytea, p.NpgsqlDbType, "#E2"); + Assert.That(p.DbType, Is.EqualTo(DbType.Binary), "#E1"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Bytea), "#E2"); p.Value = null; - Assert.AreEqual(DbType.String, p.DbType, "#F1"); - Assert.AreEqual(NpgsqlDbType.Text, p.NpgsqlDbType, "#F2"); + Assert.That(p.DbType, Is.EqualTo(DbType.String), "#F1"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Text), "#F2"); p.Value = DateTime.Now; - Assert.AreEqual(DbType.DateTime, p.DbType, "#G1"); - Assert.AreEqual(NpgsqlDbType.Timestamp, p.NpgsqlDbType, "#G2"); + Assert.That(p.DbType, Is.EqualTo(DbType.DateTime), "#G1"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Timestamp), "#G2"); p.Value = null; - Assert.AreEqual(DbType.String, p.DbType, "#H1"); - Assert.AreEqual(NpgsqlDbType.Text, p.NpgsqlDbType, "#H2"); + Assert.That(p.DbType, Is.EqualTo(DbType.String), "#H1"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Text), "#H2"); // If DbType is set, then the NpgsqlDbType should not be // inferred from the value assigned. p = new NpgsqlParameter(); p.DbType = DbType.DateTime; - Assert.AreEqual(NpgsqlDbType.Timestamp, p.NpgsqlDbType, "#I1"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Timestamp), "#I1"); p.Value = 1; - Assert.AreEqual(NpgsqlDbType.Timestamp, p.NpgsqlDbType, "#I2"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Timestamp), "#I2"); p.Value = null; - Assert.AreEqual(NpgsqlDbType.Timestamp, p.NpgsqlDbType, "#I3"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Timestamp), "#I3"); p.Value = DBNull.Value; - Assert.AreEqual(NpgsqlDbType.Timestamp, p.NpgsqlDbType, "#I4"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Timestamp), "#I4"); // If NpgsqlDbType is set, then the DbType should not be // inferred from the value assigned. p = new NpgsqlParameter(); p.NpgsqlDbType = NpgsqlDbType.Bytea; - Assert.AreEqual(NpgsqlDbType.Bytea, p.NpgsqlDbType, "#J1"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Bytea), "#J1"); p.Value = 1; - Assert.AreEqual(NpgsqlDbType.Bytea, p.NpgsqlDbType, "#J2"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Bytea), "#J2"); p.Value = null; - Assert.AreEqual(NpgsqlDbType.Bytea, p.NpgsqlDbType, "#J3"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Bytea), "#J3"); p.Value = DBNull.Value; - Assert.AreEqual(NpgsqlDbType.Bytea, p.NpgsqlDbType, "#J4"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Bytea), "#J4"); } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/5428")] @@ -452,24 +452,24 @@ public void ParameterName() { var p = new NpgsqlParameter(); p.ParameterName = "name"; - Assert.AreEqual("name", p.ParameterName, "#A:ParameterName"); - Assert.AreEqual(string.Empty, p.SourceColumn, "#A:SourceColumn"); + Assert.That(p.ParameterName, Is.EqualTo("name"), "#A:ParameterName"); + Assert.That(p.SourceColumn, Is.Empty, "#A:SourceColumn"); p.ParameterName = null; - Assert.AreEqual(string.Empty, p.ParameterName, "#B:ParameterName"); - Assert.AreEqual(string.Empty, p.SourceColumn, "#B:SourceColumn"); + Assert.That(p.ParameterName, Is.Empty, "#B:ParameterName"); + Assert.That(p.SourceColumn, Is.Empty, "#B:SourceColumn"); p.ParameterName = " "; - Assert.AreEqual(" ", p.ParameterName, "#C:ParameterName"); - Assert.AreEqual(string.Empty, p.SourceColumn, "#C:SourceColumn"); + Assert.That(p.ParameterName, Is.EqualTo(" "), "#C:ParameterName"); + Assert.That(p.SourceColumn, Is.Empty, "#C:SourceColumn"); p.ParameterName = " name "; - Assert.AreEqual(" name ", p.ParameterName, "#D:ParameterName"); - Assert.AreEqual(string.Empty, p.SourceColumn, "#D:SourceColumn"); + Assert.That(p.ParameterName, Is.EqualTo(" name "), "#D:ParameterName"); + Assert.That(p.SourceColumn, Is.Empty, "#D:SourceColumn"); p.ParameterName = string.Empty; - Assert.AreEqual(string.Empty, p.ParameterName, "#E:ParameterName"); - Assert.AreEqual(string.Empty, p.SourceColumn, "#E:SourceColumn"); + Assert.That(p.ParameterName, Is.Empty, "#E:ParameterName"); + Assert.That(p.SourceColumn, Is.Empty, "#E:SourceColumn"); } [Test] @@ -480,59 +480,59 @@ public void ResetDbType() //Parameter with an assigned value but no DbType specified p = new NpgsqlParameter("foo", 42); p.ResetDbType(); - Assert.AreEqual(DbType.Int32, p.DbType, "#A:DbType"); - Assert.AreEqual(NpgsqlDbType.Integer, p.NpgsqlDbType, "#A:NpgsqlDbType"); - Assert.AreEqual(42, p.Value, "#A:Value"); + Assert.That(p.DbType, Is.EqualTo(DbType.Int32), "#A:DbType"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Integer), "#A:NpgsqlDbType"); + Assert.That(p.Value, Is.EqualTo(42), "#A:Value"); p.DbType = DbType.DateTime; //assigning a DbType - Assert.AreEqual(DbType.DateTime, p.DbType, "#B:DbType1"); - Assert.AreEqual(NpgsqlDbType.TimestampTz, p.NpgsqlDbType, "#B:SqlDbType1"); + Assert.That(p.DbType, Is.EqualTo(DbType.DateTime), "#B:DbType1"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.TimestampTz), "#B:SqlDbType1"); p.ResetDbType(); - Assert.AreEqual(DbType.Int32, p.DbType, "#B:DbType2"); - Assert.AreEqual(NpgsqlDbType.Integer, p.NpgsqlDbType, "#B:SqlDbtype2"); + Assert.That(p.DbType, Is.EqualTo(DbType.Int32), "#B:DbType2"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Integer), "#B:SqlDbtype2"); //Parameter with an assigned NpgsqlDbType but no specified value p = new NpgsqlParameter("foo", NpgsqlDbType.Integer); p.ResetDbType(); - Assert.AreEqual(DbType.Object, p.DbType, "#C:DbType"); - Assert.AreEqual(NpgsqlDbType.Unknown, p.NpgsqlDbType, "#C:NpgsqlDbType"); + Assert.That(p.DbType, Is.EqualTo(DbType.Object), "#C:DbType"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Unknown), "#C:NpgsqlDbType"); p.NpgsqlDbType = NpgsqlDbType.TimestampTz; //assigning a NpgsqlDbType - Assert.AreEqual(DbType.DateTime, p.DbType, "#D:DbType1"); - Assert.AreEqual(NpgsqlDbType.TimestampTz, p.NpgsqlDbType, "#D:SqlDbType1"); + Assert.That(p.DbType, Is.EqualTo(DbType.DateTime), "#D:DbType1"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.TimestampTz), "#D:SqlDbType1"); p.ResetDbType(); - Assert.AreEqual(DbType.Object, p.DbType, "#D:DbType2"); - Assert.AreEqual(NpgsqlDbType.Unknown, p.NpgsqlDbType, "#D:SqlDbType2"); + Assert.That(p.DbType, Is.EqualTo(DbType.Object), "#D:DbType2"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Unknown), "#D:SqlDbType2"); p = new NpgsqlParameter(); p.Value = DateTime.MaxValue; - Assert.AreEqual(DbType.DateTime2, p.DbType, "#E:DbType1"); - Assert.AreEqual(NpgsqlDbType.Timestamp, p.NpgsqlDbType, "#E:SqlDbType1"); + Assert.That(p.DbType, Is.EqualTo(DbType.DateTime2), "#E:DbType1"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Timestamp), "#E:SqlDbType1"); p.Value = null; p.ResetDbType(); - Assert.AreEqual(DbType.Object, p.DbType, "#E:DbType2"); - Assert.AreEqual(NpgsqlDbType.Unknown, p.NpgsqlDbType, "#E:SqlDbType2"); + Assert.That(p.DbType, Is.EqualTo(DbType.Object), "#E:DbType2"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Unknown), "#E:SqlDbType2"); p = new NpgsqlParameter("foo", NpgsqlDbType.Varchar); p.Value = DateTime.MaxValue; p.ResetDbType(); - Assert.AreEqual(DbType.DateTime2, p.DbType, "#F:DbType"); - Assert.AreEqual(NpgsqlDbType.Timestamp, p.NpgsqlDbType, "#F:NpgsqlDbType"); - Assert.AreEqual(DateTime.MaxValue, p.Value, "#F:Value"); + Assert.That(p.DbType, Is.EqualTo(DbType.DateTime2), "#F:DbType"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Timestamp), "#F:NpgsqlDbType"); + Assert.That(p.Value, Is.EqualTo(DateTime.MaxValue), "#F:Value"); p = new NpgsqlParameter("foo", NpgsqlDbType.Varchar); p.Value = DBNull.Value; p.ResetDbType(); - Assert.AreEqual(DbType.Object, p.DbType, "#G:DbType"); - Assert.AreEqual(NpgsqlDbType.Unknown, p.NpgsqlDbType, "#G:NpgsqlDbType"); - Assert.AreEqual(DBNull.Value, p.Value, "#G:Value"); + Assert.That(p.DbType, Is.EqualTo(DbType.Object), "#G:DbType"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Unknown), "#G:NpgsqlDbType"); + Assert.That(p.Value, Is.EqualTo(DBNull.Value), "#G:Value"); p = new NpgsqlParameter("foo", NpgsqlDbType.Varchar); p.Value = null; p.ResetDbType(); - Assert.AreEqual(DbType.Object, p.DbType, "#G:DbType"); - Assert.AreEqual(NpgsqlDbType.Unknown, p.NpgsqlDbType, "#G:NpgsqlDbType"); - Assert.IsNull(p.Value, "#G:Value"); + Assert.That(p.DbType, Is.EqualTo(DbType.Object), "#G:DbType"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Unknown), "#G:NpgsqlDbType"); + Assert.That(p.Value, Is.Null, "#G:Value"); } [Test] @@ -545,24 +545,24 @@ public void SourceColumn() { var p = new NpgsqlParameter(); p.SourceColumn = "name"; - Assert.AreEqual(string.Empty, p.ParameterName, "#A:ParameterName"); - Assert.AreEqual("name", p.SourceColumn, "#A:SourceColumn"); + Assert.That(p.ParameterName, Is.Empty, "#A:ParameterName"); + Assert.That(p.SourceColumn, Is.EqualTo("name"), "#A:SourceColumn"); p.SourceColumn = null; - Assert.AreEqual(string.Empty, p.ParameterName, "#B:ParameterName"); - Assert.AreEqual(string.Empty, p.SourceColumn, "#B:SourceColumn"); + Assert.That(p.ParameterName, Is.Empty, "#B:ParameterName"); + Assert.That(p.SourceColumn, Is.Empty, "#B:SourceColumn"); p.SourceColumn = " "; - Assert.AreEqual(string.Empty, p.ParameterName, "#C:ParameterName"); - Assert.AreEqual(" ", p.SourceColumn, "#C:SourceColumn"); + Assert.That(p.ParameterName, Is.Empty, "#C:ParameterName"); + Assert.That(p.SourceColumn, Is.EqualTo(" "), "#C:SourceColumn"); p.SourceColumn = " name "; - Assert.AreEqual(string.Empty, p.ParameterName, "#D:ParameterName"); - Assert.AreEqual(" name ", p.SourceColumn, "#D:SourceColumn"); + Assert.That(p.ParameterName, Is.Empty, "#D:ParameterName"); + Assert.That(p.SourceColumn, Is.EqualTo(" name "), "#D:SourceColumn"); p.SourceColumn = string.Empty; - Assert.AreEqual(string.Empty, p.ParameterName, "#E:ParameterName"); - Assert.AreEqual(string.Empty, p.SourceColumn, "#E:SourceColumn"); + Assert.That(p.ParameterName, Is.Empty, "#E:ParameterName"); + Assert.That(p.SourceColumn, Is.Empty, "#E:SourceColumn"); } [Test] @@ -570,8 +570,8 @@ public void Bug1011100_NpgsqlDbType() { var p = new NpgsqlParameter(); p.Value = DBNull.Value; - Assert.AreEqual(DbType.Object, p.DbType, "#A:DbType"); - Assert.AreEqual(NpgsqlDbType.Unknown, p.NpgsqlDbType, "#A:NpgsqlDbType"); + Assert.That(p.DbType, Is.EqualTo(DbType.Object), "#A:DbType"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Unknown), "#A:NpgsqlDbType"); // Now change parameter value. // Note that as we didn't explicitly specified a dbtype, the dbtype property should change when @@ -579,8 +579,8 @@ public void Bug1011100_NpgsqlDbType() p.Value = 8; - Assert.AreEqual(DbType.Int32, p.DbType, "#A:DbType"); - Assert.AreEqual(NpgsqlDbType.Integer, p.NpgsqlDbType, "#A:NpgsqlDbType"); + Assert.That(p.DbType, Is.EqualTo(DbType.Int32), "#A:DbType"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Integer), "#A:NpgsqlDbType"); //Assert.AreEqual(3510, p.Value, "#A:Value"); //p.NpgsqlDbType = NpgsqlDbType.Varchar; @@ -608,19 +608,19 @@ public void NpgsqlParameter_Clone() var newParam = param.Clone(); - Assert.AreEqual(param.Value, newParam.Value); - Assert.AreEqual(param.Precision, newParam.Precision); - Assert.AreEqual(param.Scale, newParam.Scale); - Assert.AreEqual(param.Size, newParam.Size); - Assert.AreEqual(param.Direction, newParam.Direction); - Assert.AreEqual(param.IsNullable, newParam.IsNullable); - Assert.AreEqual(param.ParameterName, newParam.ParameterName); - Assert.AreEqual(param.TrimmedName, newParam.TrimmedName); - Assert.AreEqual(param.SourceColumn, newParam.SourceColumn); - Assert.AreEqual(param.SourceVersion, newParam.SourceVersion); - Assert.AreEqual(param.NpgsqlValue, newParam.NpgsqlValue); - Assert.AreEqual(param.SourceColumnNullMapping, newParam.SourceColumnNullMapping); - Assert.AreEqual(param.NpgsqlValue, newParam.NpgsqlValue); + Assert.That(newParam.Value, Is.EqualTo(param.Value)); + Assert.That(newParam.Precision, Is.EqualTo(param.Precision)); + Assert.That(newParam.Scale, Is.EqualTo(param.Scale)); + Assert.That(newParam.Size, Is.EqualTo(param.Size)); + Assert.That(newParam.Direction, Is.EqualTo(param.Direction)); + Assert.That(newParam.IsNullable, Is.EqualTo(param.IsNullable)); + Assert.That(newParam.ParameterName, Is.EqualTo(param.ParameterName)); + Assert.That(newParam.TrimmedName, Is.EqualTo(param.TrimmedName)); + Assert.That(newParam.SourceColumn, Is.EqualTo(param.SourceColumn)); + Assert.That(newParam.SourceVersion, Is.EqualTo(param.SourceVersion)); + Assert.That(newParam.NpgsqlValue, Is.EqualTo(param.NpgsqlValue)); + Assert.That(newParam.SourceColumnNullMapping, Is.EqualTo(param.SourceColumnNullMapping)); + Assert.That(newParam.NpgsqlValue, Is.EqualTo(param.NpgsqlValue)); } @@ -632,7 +632,7 @@ public void Precision_via_interface() paramIface.Precision = 42; - Assert.AreEqual((byte)42, paramIface.Precision); + Assert.That(paramIface.Precision, Is.EqualTo((byte)42)); } [Test] @@ -643,7 +643,7 @@ public void Precision_via_base_class() paramBase.Precision = 42; - Assert.AreEqual((byte)42, paramBase.Precision); + Assert.That(paramBase.Precision, Is.EqualTo((byte)42)); } [Test] @@ -654,7 +654,7 @@ public void Scale_via_interface() paramIface.Scale = 42; - Assert.AreEqual((byte)42, paramIface.Scale); + Assert.That(paramIface.Scale, Is.EqualTo((byte)42)); } [Test] @@ -665,7 +665,7 @@ public void Scale_via_base_class() paramBase.Scale = 42; - Assert.AreEqual((byte)42, paramBase.Scale); + Assert.That(paramBase.Scale, Is.EqualTo((byte)42)); } [Test] diff --git a/test/Npgsql.Tests/PgPassEntryTests.cs b/test/Npgsql.Tests/PgPassEntryTests.cs index 9db518aabc..db78e893ad 100644 --- a/test/Npgsql.Tests/PgPassEntryTests.cs +++ b/test/Npgsql.Tests/PgPassEntryTests.cs @@ -13,11 +13,11 @@ public void Parses_well_formed_entry() var entry = PgPassFile.Entry.Parse(input); Assert.That(entry, Is.Not.Null); - Assert.That("test", Is.EqualTo(entry.Host)); - Assert.That(1234, Is.EqualTo(entry.Port)); - Assert.That("test2", Is.EqualTo(entry.Database)); - Assert.That("test3", Is.EqualTo(entry.Username)); - Assert.That("test4", Is.EqualTo(entry.Password)); + Assert.That(entry.Host, Is.EqualTo("test")); + Assert.That(entry.Port, Is.EqualTo(1234)); + Assert.That(entry.Database, Is.EqualTo("test2")); + Assert.That(entry.Username, Is.EqualTo("test3")); + Assert.That(entry.Password, Is.EqualTo("test4")); } [Test] @@ -36,11 +36,11 @@ public void Escaped_characters() var entry = PgPassFile.Entry.Parse(input); Assert.That(entry, Is.Not.Null); - Assert.That("t:est", Is.EqualTo(entry.Host)); - Assert.That(1234, Is.EqualTo(entry.Port)); - Assert.That("test2", Is.EqualTo(entry.Database)); - Assert.That("test3", Is.EqualTo(entry.Username)); - Assert.That("test\\4", Is.EqualTo(entry.Password)); + Assert.That(entry.Host, Is.EqualTo("t:est")); + Assert.That(entry.Port, Is.EqualTo(1234)); + Assert.That(entry.Database, Is.EqualTo("test2")); + Assert.That(entry.Username, Is.EqualTo("test3")); + Assert.That(entry.Password, Is.EqualTo("test\\4")); } [Test] diff --git a/test/Npgsql.Tests/PoolTests.cs b/test/Npgsql.Tests/PoolTests.cs index d9024dd0dd..af0fc27096 100644 --- a/test/Npgsql.Tests/PoolTests.cs +++ b/test/Npgsql.Tests/PoolTests.cs @@ -314,7 +314,7 @@ public void ClearPool(int iterations) } // Now have one connection in the pool - Assert.True(PoolManager.Pools.TryGetValue(connString, out var pool)); + Assert.That(PoolManager.Pools.TryGetValue(connString, out var pool)); AssertPoolState(pool, open: 1, idle: 1); NpgsqlConnection.ClearPool(conn); @@ -346,7 +346,7 @@ public void ClearPool_with_busy() NpgsqlConnection.ClearPool(conn); // conn is still busy but should get closed when returned to the pool - Assert.True(PoolManager.Pools.TryGetValue(connString, out pool)); + Assert.That(PoolManager.Pools.TryGetValue(connString, out pool)); AssertPoolState(pool, open: 1, idle: 0); } diff --git a/test/Npgsql.Tests/PrepareTests.cs b/test/Npgsql.Tests/PrepareTests.cs index 8a1c763e9a..fe01ef6fbc 100644 --- a/test/Npgsql.Tests/PrepareTests.cs +++ b/test/Npgsql.Tests/PrepareTests.cs @@ -755,20 +755,20 @@ public async Task Explicitly_prepared_statement_invalidation([Values] bool prepa // Since we've changed the table schema, the next execution of the prepared statement will error with 0A000 var exception = Assert.ThrowsAsync(() => command.ExecuteNonQueryAsync())!; Assert.That(exception.SqlState, Is.EqualTo(PostgresErrorCodes.FeatureNotSupported)); // cached plan must not change result type - Assert.IsFalse(command.IsPrepared); + Assert.That(command.IsPrepared, Is.False); if (unprepareAfterError) { // Just check that calling unprepare after error doesn't break anything await command.UnprepareAsync(); - Assert.IsFalse(command.IsPrepared); + Assert.That(command.IsPrepared, Is.False); } if (prepareAfterError) { // If we explicitly prepare after error, we should replace the previous prepared statement with a new one await command.PrepareAsync(); - Assert.IsTrue(command.IsPrepared); + Assert.That(command.IsPrepared); } // However, Npgsql should invalidate the prepared statement in this case, so the next execution should work @@ -777,7 +777,7 @@ public async Task Explicitly_prepared_statement_invalidation([Values] bool prepa if (!prepareAfterError) { // The command is unprepared, though. It's the user's responsibility to re-prepare if they wish. - Assert.False(command.IsPrepared); + Assert.That(command.IsPrepared, Is.False); } } diff --git a/test/Npgsql.Tests/Properties/AssemblyInfo.cs b/test/Npgsql.Tests/Properties/AssemblyInfo.cs index f7cdcd188d..89a1bb2e0d 100644 --- a/test/Npgsql.Tests/Properties/AssemblyInfo.cs +++ b/test/Npgsql.Tests/Properties/AssemblyInfo.cs @@ -1,7 +1,7 @@ using System.Runtime.CompilerServices; using NUnit.Framework; -[assembly: Parallelizable(ParallelScope.Children), Timeout(30000)] +[assembly: Parallelizable(ParallelScope.Children)] [assembly: InternalsVisibleTo("Npgsql.PluginTests, PublicKey=" + "0024000004800000940000000602000000240000525341310004000001000100" + diff --git a/test/Npgsql.Tests/ReaderNewSchemaTests.cs b/test/Npgsql.Tests/ReaderNewSchemaTests.cs index 79b4b38ddb..2391cf54f0 100644 --- a/test/Npgsql.Tests/ReaderNewSchemaTests.cs +++ b/test/Npgsql.Tests/ReaderNewSchemaTests.cs @@ -757,9 +757,9 @@ public async Task GetColumnSchema_via_interface() var iface = (IDbColumnSchemaGenerator)reader; var schema = iface.GetColumnSchema(); - Assert.NotNull(schema); - Assert.AreEqual(1, schema.Count); - Assert.NotNull(schema[0]); + Assert.That(schema, Is.Not.Null); + Assert.That(schema.Count, Is.EqualTo(1)); + Assert.That(schema[0], Is.Not.Null); } #region Not supported diff --git a/test/Npgsql.Tests/ReaderOldSchemaTests.cs b/test/Npgsql.Tests/ReaderOldSchemaTests.cs index 43ac627f46..d6adcdf88f 100644 --- a/test/Npgsql.Tests/ReaderOldSchemaTests.cs +++ b/test/Npgsql.Tests/ReaderOldSchemaTests.cs @@ -124,12 +124,12 @@ await conn.ExecuteNonQueryAsync($@" var metadata = await GetSchemaTable(dr); var idRow = metadata!.Rows.OfType().FirstOrDefault(x => (string)x["ColumnName"] == "id"); - Assert.IsNotNull(idRow, "Unable to find metadata for id column"); + Assert.That(idRow, Is.Not.Null, "Unable to find metadata for id column"); var int2Row = metadata.Rows.OfType().FirstOrDefault(x => (string)x["ColumnName"] == "int2"); - Assert.IsNotNull(int2Row, "Unable to find metadata for int2 column"); + Assert.That(int2Row, Is.Not.Null, "Unable to find metadata for int2 column"); - Assert.IsFalse((bool)idRow!["IsReadonly"]); - Assert.IsTrue((bool)int2Row!["IsReadonly"]); + Assert.That((bool)idRow!["IsReadonly"], Is.False); + Assert.That((bool)int2Row!["IsReadonly"]); } // ReSharper disable once InconsistentNaming @@ -144,12 +144,12 @@ public async Task AllowDBNull() using var metadata = await GetSchemaTable(reader); var nullableRow = metadata!.Rows.OfType().FirstOrDefault(x => (string)x["ColumnName"] == "nullable"); - Assert.IsNotNull(nullableRow, "Unable to find metadata for nullable column"); + Assert.That(nullableRow, Is.Not.Null, "Unable to find metadata for nullable column"); var nonNullableRow = metadata.Rows.OfType().FirstOrDefault(x => (string)x["ColumnName"] == "non_nullable"); - Assert.IsNotNull(nonNullableRow, "Unable to find metadata for non_nullable column"); + Assert.That(nonNullableRow, Is.Not.Null, "Unable to find metadata for non_nullable column"); - Assert.IsTrue((bool)nullableRow!["AllowDBNull"]); - Assert.IsFalse((bool)nonNullableRow!["AllowDBNull"]); + Assert.That((bool)nullableRow!["AllowDBNull"]); + Assert.That((bool)nonNullableRow!["AllowDBNull"], Is.False); } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/1027")] diff --git a/test/Npgsql.Tests/ReaderTests.cs b/test/Npgsql.Tests/ReaderTests.cs index 839ec5b610..432a9aa327 100644 --- a/test/Npgsql.Tests/ReaderTests.cs +++ b/test/Npgsql.Tests/ReaderTests.cs @@ -237,7 +237,7 @@ public async Task Get_string_with_parameter() using var dr = await command.ExecuteReaderAsync(Behavior); dr.Read(); var result = dr.GetString(0); - Assert.AreEqual(text, result); + Assert.That(result, Is.EqualTo(text)); } [Test] @@ -263,7 +263,7 @@ await conn.ExecuteNonQueryAsync($@" using var dr = await command.ExecuteReaderAsync(Behavior); dr.Read(); var result = dr.GetString(0); - Assert.AreEqual(test, result); + Assert.That(result, Is.EqualTo(test)); } [Test] @@ -496,7 +496,7 @@ public async Task ExecuteReader_getting_empty_resultset_with_output_parameter() param.Direction = ParameterDirection.Output; command.Parameters.Add(param); using var dr = await command.ExecuteReaderAsync(Behavior); - Assert.IsFalse(dr.NextResult()); + Assert.That(dr.NextResult(), Is.False); } [Test] @@ -644,7 +644,7 @@ await conn.ExecuteNonQueryAsync($@" // resources are referenced by the exception above, which is very likely to escape the using statement of the command. cmd.Dispose(); var cmd2 = conn.CreateCommand(); - Assert.AreNotSame(cmd2, cmd); + Assert.That(cmd, Is.Not.SameAs(cmd2)); } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/967")] @@ -672,7 +672,7 @@ await conn.ExecuteNonQueryAsync($@" // resources are referenced by the exception above, which is very likely to escape the using statement of the command. cmd.Dispose(); var cmd2 = conn.CreateCommand(); - Assert.AreNotSame(cmd2, cmd); + Assert.That(cmd, Is.Not.SameAs(cmd2)); } #region SchemaOnly @@ -694,8 +694,8 @@ public async Task SchemaOnly_next_result_beyond_end() using var cmd = new NpgsqlCommand($"SELECT * FROM {table}", conn); using var reader = await cmd.ExecuteReaderAsync(CommandBehavior.SchemaOnly); - Assert.False(reader.NextResult()); - Assert.False(reader.NextResult()); + Assert.That(reader.NextResult(), Is.False); + Assert.That(reader.NextResult(), Is.False); } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/4124")] @@ -879,7 +879,7 @@ public async Task HasRows_without_resultset() var table = await CreateTempTable(conn, "name TEXT"); using var command = new NpgsqlCommand($"DELETE FROM {table} WHERE name = 'unknown'", conn); using var reader = await command.ExecuteReaderAsync(Behavior); - Assert.IsFalse(reader.HasRows); + Assert.That(reader.HasRows, Is.False); } [Test] @@ -888,9 +888,9 @@ public async Task Interval_as_TimeSpan() using var conn = await OpenConnectionAsync(); using var command = new NpgsqlCommand("SELECT CAST('1 hour' AS interval) AS dauer", conn); using var dr = await command.ExecuteReaderAsync(Behavior); - Assert.IsTrue(dr.HasRows); - Assert.IsTrue(dr.Read()); - Assert.IsTrue(dr.HasRows); + Assert.That(dr.HasRows); + Assert.That(dr.Read()); + Assert.That(dr.HasRows); var ts = dr.GetTimeSpan(0); } @@ -946,7 +946,7 @@ public async Task SequentialBufferedSeekReread() //_ = rdr[5]; // uncomment lines for successful execution _ = rdr.IsDBNull(6); _ = rdr[6]; - Assert.True(rdr.IsDBNull(6)); + Assert.That(rdr.IsDBNull(6)); } } @@ -979,7 +979,7 @@ await pgMock .WriteCommandComplete() .WriteReadyForQuery() .FlushAsync(); - Assert.AreEqual(expected, await task); + Assert.That(await task, Is.EqualTo(expected)); } } @@ -1292,8 +1292,8 @@ public async Task Bug3772() reader.GetInt32(0); - Assert.Zero(reader.Connector.ReadBuffer.ReadBytesLeft); - Assert.NotZero(reader.Connector.ReadBuffer.ReadPosition); + Assert.That(reader.Connector.ReadBuffer.ReadBytesLeft, Is.Zero); + Assert.That(reader.Connector.ReadBuffer.ReadPosition, Is.Not.Zero); writeBuffer.WriteInt32(byteValue.Length); writeBuffer.WriteBytes(byteValue); @@ -1351,7 +1351,7 @@ public async Task Read_string_as_char() cmd.CommandText = "SELECT 'abcdefgh', 'ijklmnop'"; await using var reader = await cmd.ExecuteReaderAsync(Behavior); - Assert.IsTrue(await reader.ReadAsync()); + Assert.That(await reader.ReadAsync()); Assert.That(reader.GetChar(0), Is.EqualTo('a')); if (Behavior == CommandBehavior.SequentialAccess) Assert.Throws(() => reader.GetChar(0)); @@ -1590,7 +1590,7 @@ public async Task GetStream_seek() var buffer = new byte[4]; await using var stream = reader.GetStream(0); - Assert.IsTrue(stream.CanSeek); + Assert.That(stream.CanSeek); var seekPosition = stream.Seek(-1, SeekOrigin.End); Assert.That(seekPosition, Is.EqualTo(stream.Length - 1)); @@ -1743,7 +1743,7 @@ public async Task TextReader_zero_length_column() cmd.CommandText = "SELECT ''"; await using var reader = await cmd.ExecuteReaderAsync(Behavior); - Assert.IsTrue(await reader.ReadAsync()); + Assert.That(await reader.ReadAsync()); using var textReader = reader.GetTextReader(0); Assert.That(textReader.Peek(), Is.EqualTo(-1)); @@ -1942,7 +1942,7 @@ await pgMock await using (var reader = await cmd.ExecuteReaderAsync(Behavior)) { // Successfully read the first row - Assert.True(await reader.ReadAsync()); + Assert.That(await reader.ReadAsync()); Assert.That(reader.GetInt32(0), Is.EqualTo(1)); // Attempt to read the second row - simulate blocking and cancellation @@ -1991,7 +1991,7 @@ await pgMock await using (var reader = await cmd.ExecuteReaderAsync(Behavior)) { // Successfully read the first row - Assert.True(await reader.ReadAsync()); + Assert.That(await reader.ReadAsync()); Assert.That(reader.GetInt32(0), Is.EqualTo(1)); // Attempt to read the second row - simulate blocking and cancellation @@ -2043,7 +2043,7 @@ await pgMock await using (var reader = await cmd.ExecuteReaderAsync(Behavior)) { // Successfully read the first resultset - Assert.True(await reader.ReadAsync()); + Assert.That(await reader.ReadAsync()); Assert.That(reader.GetInt32(0), Is.EqualTo(1)); // Attempt to advance to the second resultset - simulate blocking and cancellation @@ -2094,7 +2094,7 @@ await pgMock await using var reader = await cmd.ExecuteReaderAsync(Behavior); // Successfully read the first row - Assert.True(await reader.ReadAsync()); + Assert.That(await reader.ReadAsync()); Assert.That(reader.GetInt32(0), Is.EqualTo(1)); // Attempt to read the second row - simulate blocking and cancellation @@ -2139,7 +2139,7 @@ await pgMock await using var reader = await cmd.ExecuteReaderAsync(Behavior); // Successfully read the first resultset - Assert.True(await reader.ReadAsync()); + Assert.That(await reader.ReadAsync()); Assert.That(reader.GetInt32(0), Is.EqualTo(1)); // Attempt to read the second row - simulate blocking and cancellation @@ -2247,11 +2247,11 @@ public async Task Cancel_multiplexing_disabled() await using var cmd = new NpgsqlCommand("SELECT generate_series(1, 100); SELECT generate_series(1, 100)", conn); await using var reader = await cmd.ExecuteReaderAsync(Behavior); var cancelledToken = new CancellationToken(canceled: true); - Assert.IsTrue(await reader.ReadAsync()); + Assert.That(await reader.ReadAsync()); while (await reader.ReadAsync(cancelledToken)) { } - Assert.IsTrue(await reader.NextResultAsync(cancelledToken)); + Assert.That(await reader.NextResultAsync(cancelledToken)); while (await reader.ReadAsync(cancelledToken)) { } - Assert.IsFalse(conn.Connector!.UserCancellationRequested); + Assert.That(conn.Connector!.UserCancellationRequested, Is.False); } #endregion Cancellation diff --git a/test/Npgsql.Tests/Replication/CommonReplicationTests.cs b/test/Npgsql.Tests/Replication/CommonReplicationTests.cs index 36a11b434a..c57280e184 100644 --- a/test/Npgsql.Tests/Replication/CommonReplicationTests.cs +++ b/test/Npgsql.Tests/Replication/CommonReplicationTests.cs @@ -432,7 +432,7 @@ async Task GetCommitLsn(string valueString) // NpgsqlLogicalReplicationConnection // Begin Transaction, Insert, Commit Transaction for (var i = 0; i < 3; i++) - Assert.True(await messages.MoveNextAsync()); + Assert.That(await messages.MoveNextAsync()); return messages.Current.Lsn; } diff --git a/test/Npgsql.Tests/Replication/PgOutputReplicationTests.cs b/test/Npgsql.Tests/Replication/PgOutputReplicationTests.cs index 0186d4f0d8..e3cffd5766 100644 --- a/test/Npgsql.Tests/Replication/PgOutputReplicationTests.cs +++ b/test/Npgsql.Tests/Replication/PgOutputReplicationTests.cs @@ -1358,7 +1358,7 @@ await c.ExecuteNonQueryAsync($""" async Task AssertTransactionStart(IAsyncEnumerator messages) { - Assert.True(await messages.MoveNextAsync()); + Assert.That(await messages.MoveNextAsync()); switch (messages.Current) { @@ -1379,13 +1379,13 @@ await c.ExecuteNonQueryAsync($""" async Task AssertTransactionCommit(IAsyncEnumerator messages) { - Assert.True(await messages.MoveNextAsync()); + Assert.That(await messages.MoveNextAsync()); switch (messages.Current) { case StreamStopMessage: Assert.That(IsStreaming); - Assert.True(await messages.MoveNextAsync()); + Assert.That(await messages.MoveNextAsync()); Assert.That(messages.Current, Is.TypeOf()); return; case CommitMessage: @@ -1398,10 +1398,10 @@ async Task AssertTransactionCommit(IAsyncEnumerator async Task AssertPrepare(IAsyncEnumerator enumerator) { - Assert.True(await enumerator.MoveNextAsync()); + Assert.That(await enumerator.MoveNextAsync()); if (IsStreaming && enumerator.Current is StreamStopMessage) { - Assert.True(await enumerator.MoveNextAsync()); + Assert.That(await enumerator.MoveNextAsync()); Assert.That(enumerator.Current, Is.TypeOf()); return (PrepareMessageBase)enumerator.Current!; } @@ -1413,16 +1413,16 @@ async Task AssertPrepare(IAsyncEnumerator NextMessage(IAsyncEnumerator enumerator, bool expectRelationMessage = false) where TExpected : PgOutputReplicationMessage { - Assert.True(await enumerator.MoveNextAsync()); + Assert.That(await enumerator.MoveNextAsync()); if (IsStreaming && enumerator.Current is StreamStopMessage) { - Assert.True(await enumerator.MoveNextAsync()); + Assert.That(await enumerator.MoveNextAsync()); Assert.That(enumerator.Current, Is.TypeOf()); - Assert.True(await enumerator.MoveNextAsync()); + Assert.That(await enumerator.MoveNextAsync()); if (expectRelationMessage) { Assert.That(enumerator.Current, Is.TypeOf()); - Assert.True(await enumerator.MoveNextAsync()); + Assert.That(await enumerator.MoveNextAsync()); } } diff --git a/test/Npgsql.Tests/Replication/PhysicalReplicationTests.cs b/test/Npgsql.Tests/Replication/PhysicalReplicationTests.cs index 59698b87ac..62c19451e9 100644 --- a/test/Npgsql.Tests/Replication/PhysicalReplicationTests.cs +++ b/test/Npgsql.Tests/Replication/PhysicalReplicationTests.cs @@ -90,7 +90,7 @@ public Task Replication_with_slot() // other transactions possibly from system processes can // interfere here, inserting additional messages, but more // likely we'll get everything in one big chunk. - Assert.True(await messages.MoveNextAsync()); + Assert.That(await messages.MoveNextAsync()); var message = messages.Current; Assert.That(message.WalStart, Is.EqualTo(info.XLogPos)); Assert.That(message.WalEnd, Is.GreaterThan(message.WalStart)); @@ -128,7 +128,7 @@ public async Task Replication_without_slot() // other transactions possibly from system processes can // interfere here, inserting additional messages, but more // likely we'll get everything in one big chunk. - Assert.True(await messages.MoveNextAsync()); + Assert.That(await messages.MoveNextAsync()); var message = messages.Current; Assert.That(message.WalStart, Is.EqualTo(info.XLogPos)); Assert.That(message.WalEnd, Is.GreaterThan(message.WalStart)); diff --git a/test/Npgsql.Tests/Replication/TestDecodingReplicationTests.cs b/test/Npgsql.Tests/Replication/TestDecodingReplicationTests.cs index 5d7c633f6c..3ecedfdfdd 100644 --- a/test/Npgsql.Tests/Replication/TestDecodingReplicationTests.cs +++ b/test/Npgsql.Tests/Replication/TestDecodingReplicationTests.cs @@ -327,7 +327,7 @@ await c.ExecuteNonQueryAsync(@$" static async ValueTask NextMessage(IAsyncEnumerator enumerator) { - Assert.True(await enumerator.MoveNextAsync()); + Assert.That(await enumerator.MoveNextAsync()); return enumerator.Current!; } diff --git a/test/Npgsql.Tests/SchemaTests.cs b/test/Npgsql.Tests/SchemaTests.cs index 49f31eff19..ebe36269b0 100644 --- a/test/Npgsql.Tests/SchemaTests.cs +++ b/test/Npgsql.Tests/SchemaTests.cs @@ -257,7 +257,7 @@ public async Task ForeignKeys() { await using var conn = await OpenConnectionAsync(); var dt = await GetSchema(conn, "ForeignKeys"); - Assert.IsNotNull(dt); + Assert.That(dt, Is.Not.Null); } [Test] @@ -276,7 +276,7 @@ public async Task ParameterMarkerFormat() command.CommandText = $"SELECT * FROM {table} WHERE int=" + string.Format(parameterMarkerFormat, parameterName); command.Parameters.Add(new NpgsqlParameter(parameterName, 4)); await using var reader = await command.ExecuteReaderAsync(); - Assert.IsTrue(reader.Read()); + Assert.That(reader.Read()); } [Test] @@ -425,11 +425,11 @@ public async Task Unique_constraint() // Columns are not necessarily in the correct order var firstColumn = columns.FirstOrDefault(x => (string)x["column_name"] == "f1")!; - Assert.NotNull(firstColumn); + Assert.That(firstColumn, Is.Not.Null); Assert.That(firstColumn["ordinal_number"], Is.EqualTo(1)); var secondColumn = columns.FirstOrDefault(x => (string)x["column_name"] == "f2")!; - Assert.NotNull(secondColumn); + Assert.That(secondColumn, Is.Not.Null); Assert.That(secondColumn["ordinal_number"], Is.EqualTo(2)); } diff --git a/test/Npgsql.Tests/SecurityTests.cs b/test/Npgsql.Tests/SecurityTests.cs index 7f47ea8111..0ef9fd7b68 100644 --- a/test/Npgsql.Tests/SecurityTests.cs +++ b/test/Npgsql.Tests/SecurityTests.cs @@ -248,7 +248,7 @@ public async Task Connect_with_only_ssl_allowed_user([Values] bool multiplexing, csb.KeepAlive = keepAlive ? 10 : 0; }); await using var conn = await dataSource.OpenConnectionAsync(); - Assert.IsTrue(conn.IsSslEncrypted); + Assert.That(conn.IsSslEncrypted); } catch (Exception e) when (!IsOnBuildServer) { @@ -277,7 +277,7 @@ public async Task Connect_with_only_non_ssl_allowed_user([Values] bool multiplex csb.KeepAlive = keepAlive ? 10 : 0; }); await using var conn = await dataSource.OpenConnectionAsync(); - Assert.IsFalse(conn.IsSslEncrypted); + Assert.That(conn.IsSslEncrypted, Is.False); } catch (NpgsqlException ex) when (RuntimeInformation.IsOSPlatform(OSPlatform.Windows) && ex.InnerException is IOException) { @@ -319,7 +319,7 @@ public async Task DataSource_SslClientAuthenticationOptionsCallback_is_invoked([ Assert.That(ex.InnerException, Is.TypeOf()); } - Assert.IsTrue(callbackWasInvoked); + Assert.That(callbackWasInvoked); } [Test] @@ -348,7 +348,7 @@ public async Task Connection_SslClientAuthenticationOptionsCallback_is_invoked([ Assert.That(ex.InnerException, Is.TypeOf()); } - Assert.IsTrue(callbackWasInvoked); + Assert.That(callbackWasInvoked); } [Test] @@ -400,7 +400,7 @@ public async Task Bug4305_Secure([Values] bool async) try { conn = await dataSource.OpenConnectionAsync(); - Assert.IsTrue(conn.IsSslEncrypted); + Assert.That(conn.IsSslEncrypted); } catch (Exception e) when (!IsOnBuildServer) { @@ -424,7 +424,7 @@ public async Task Bug4305_Secure([Values] bool async) await conn.CloseAsync(); await conn.OpenAsync(); - Assert.AreSame(originalConnector, conn.Connector); + Assert.That(conn.Connector, Is.SameAs(originalConnector)); } cmd.CommandText = "SELECT 1"; @@ -451,7 +451,7 @@ public async Task Bug4305_not_Secure([Values] bool async) try { conn = await dataSource.OpenConnectionAsync(); - Assert.IsFalse(conn.IsSslEncrypted); + Assert.That(conn.IsSslEncrypted, Is.False); } catch (Exception e) when (!IsOnBuildServer) { @@ -473,7 +473,7 @@ public async Task Bug4305_not_Secure([Values] bool async) await conn.CloseAsync(); await conn.OpenAsync(); - Assert.AreSame(originalConnector, conn.Connector); + Assert.That(conn.Connector, Is.SameAs(originalConnector)); cmd.CommandText = "SELECT 1"; if (async) @@ -494,7 +494,7 @@ public async Task Direct_ssl_negotiation() csb.SslNegotiation = SslNegotiation.Direct; }); await using var conn = await dataSource.OpenConnectionAsync(); - Assert.IsTrue(conn.IsSslEncrypted); + Assert.That(conn.IsSslEncrypted); } [Test] diff --git a/test/Npgsql.Tests/SnakeCaseNameTranslatorTests.cs b/test/Npgsql.Tests/SnakeCaseNameTranslatorTests.cs index 52de32bccf..9a64ccdaa2 100644 --- a/test/Npgsql.Tests/SnakeCaseNameTranslatorTests.cs +++ b/test/Npgsql.Tests/SnakeCaseNameTranslatorTests.cs @@ -66,9 +66,9 @@ public void TurkeyTest() const string clrName = "IPhone"; const string expected = "i_phone"; - Assert.AreEqual(expected, translator.TranslateMemberName(clrName)); - Assert.AreEqual(expected, translator.TranslateTypeName(clrName)); - Assert.AreEqual(expected, legacyTranslator.TranslateMemberName(clrName)); - Assert.AreEqual(expected, legacyTranslator.TranslateTypeName(clrName)); + Assert.That(translator.TranslateMemberName(clrName), Is.EqualTo(expected)); + Assert.That(translator.TranslateTypeName(clrName), Is.EqualTo(expected)); + Assert.That(legacyTranslator.TranslateMemberName(clrName), Is.EqualTo(expected)); + Assert.That(legacyTranslator.TranslateTypeName(clrName), Is.EqualTo(expected)); } } diff --git a/test/Npgsql.Tests/StoredProcedureTests.cs b/test/Npgsql.Tests/StoredProcedureTests.cs index 84acb51b36..ae13fa015c 100644 --- a/test/Npgsql.Tests/StoredProcedureTests.cs +++ b/test/Npgsql.Tests/StoredProcedureTests.cs @@ -207,8 +207,8 @@ LANGUAGE plpgsql }; await batch.ExecuteNonQueryAsync(); - Assert.AreEqual(1, batch.BatchCommands[0].Parameters[1].Value); - Assert.AreEqual(1, batch.BatchCommands[1].Parameters[1].Value); + Assert.That(batch.BatchCommands[0].Parameters[1].Value, Is.EqualTo(1)); + Assert.That(batch.BatchCommands[1].Parameters[1].Value, Is.EqualTo(1)); } #region DeriveParameters @@ -295,8 +295,8 @@ public async Task DeriveParameters_procedure_with_case_sensitive_name() { await using var command = new NpgsqlCommand(@"""ProcedureCaseSensitive""", conn) { CommandType = CommandType.StoredProcedure }; NpgsqlCommandBuilder.DeriveParameters(command); - Assert.AreEqual(NpgsqlDbType.Integer, command.Parameters[0].NpgsqlDbType); - Assert.AreEqual(NpgsqlDbType.Text, command.Parameters[1].NpgsqlDbType); + Assert.That(command.Parameters[0].NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Integer)); + Assert.That(command.Parameters[1].NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Text)); } finally { @@ -315,8 +315,8 @@ public async Task DeriveParameters_quote_characters_in_function_name() { await using var command = new NpgsqlCommand(sproc, conn) { CommandType = CommandType.StoredProcedure }; NpgsqlCommandBuilder.DeriveParameters(command); - Assert.AreEqual(NpgsqlDbType.Integer, command.Parameters[0].NpgsqlDbType); - Assert.AreEqual(NpgsqlDbType.Text, command.Parameters[1].NpgsqlDbType); + Assert.That(command.Parameters[0].NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Integer)); + Assert.That(command.Parameters[1].NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Text)); } finally { @@ -335,8 +335,8 @@ await conn.ExecuteNonQueryAsync( { await using var command = new NpgsqlCommand(@"""My.Dotted.Procedure""", conn) { CommandType = CommandType.StoredProcedure }; NpgsqlCommandBuilder.DeriveParameters(command); - Assert.AreEqual(NpgsqlDbType.Integer, command.Parameters[0].NpgsqlDbType); - Assert.AreEqual(NpgsqlDbType.Text, command.Parameters[1].NpgsqlDbType); + Assert.That(command.Parameters[0].NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Integer)); + Assert.That(command.Parameters[1].NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Text)); } finally { @@ -355,8 +355,8 @@ await conn.ExecuteNonQueryAsync( $"CREATE PROCEDURE {sproc}(x int, y int, out sum int, out product int) AS 'SELECT $1 + $2, $1 * $2' LANGUAGE sql"); await using var command = new NpgsqlCommand(sproc, conn) { CommandType = CommandType.StoredProcedure }; NpgsqlCommandBuilder.DeriveParameters(command); - Assert.AreEqual("x", command.Parameters[0].ParameterName); - Assert.AreEqual("y", command.Parameters[1].ParameterName); + Assert.That(command.Parameters[0].ParameterName, Is.EqualTo("x")); + Assert.That(command.Parameters[1].ParameterName, Is.EqualTo("y")); } [Test] diff --git a/test/Npgsql.Tests/TaskTimeoutAndCancellationTest.cs b/test/Npgsql.Tests/TaskTimeoutAndCancellationTest.cs index e3759d35e9..f90bd1dd92 100644 --- a/test/Npgsql.Tests/TaskTimeoutAndCancellationTest.cs +++ b/test/Npgsql.Tests/TaskTimeoutAndCancellationTest.cs @@ -21,7 +21,7 @@ async Task GetResultTaskAsync(int timeout, CancellationToken ct) [Test] public async Task SuccessfulResultTaskAsync() => - Assert.AreEqual(TestResultValue, await TaskTimeoutAndCancellation.ExecuteAsync(ct => GetResultTaskAsync(10, ct), NpgsqlTimeout.Infinite, CancellationToken.None)); + Assert.That(await TaskTimeoutAndCancellation.ExecuteAsync(ct => GetResultTaskAsync(10, ct), NpgsqlTimeout.Infinite, CancellationToken.None), Is.EqualTo(TestResultValue)); [Test] public async Task SuccessfulVoidTaskAsync() => @@ -112,7 +112,7 @@ void OnUnobservedTaskException(object? source, UnobservedTaskExceptionEventArgs await test(() => unobservedTaskException); // Verify the unobserved Task exception event has not been received. - Assert.IsNull(unobservedTaskException, unobservedTaskException?.Message); + Assert.That(unobservedTaskException, Is.Null, unobservedTaskException?.Message); } finally { @@ -157,6 +157,6 @@ await TaskTimeoutAndCancellation.ExecuteAsync( { // Expected due to preemptive cancellation. } - Assert.False(nonCancellableTask.IsCompleted); + Assert.That(nonCancellableTask.IsCompleted, Is.False); } } diff --git a/test/Npgsql.Tests/TestUtil.cs b/test/Npgsql.Tests/TestUtil.cs index 0cf7a6a75f..fc0b5404d8 100644 --- a/test/Npgsql.Tests/TestUtil.cs +++ b/test/Npgsql.Tests/TestUtil.cs @@ -428,7 +428,7 @@ internal static void AssertLoggingStateContains( (LogLevel Level, EventId Id, string Message, object? State, Exception? Exception) log, string key, T value) - => Assert.That(log.State, Contains.Item(new KeyValuePair(key, value))); + => Assert.That(log.State as IEnumerable>, Contains.Item(new KeyValuePair(key, value))); internal static void AssertLoggingStateDoesNotContain( (LogLevel Level, EventId Id, string Message, object? State, Exception? Exception) log, diff --git a/test/Npgsql.Tests/TracingTests.cs b/test/Npgsql.Tests/TracingTests.cs index 5cf0fca200..74e43f63a8 100644 --- a/test/Npgsql.Tests/TracingTests.cs +++ b/test/Npgsql.Tests/TracingTests.cs @@ -59,7 +59,7 @@ static void ValidateActivity(Activity activity, NpgsqlConnection conn, bool isMu var expectedTagCount = conn.Settings.Port == 5432 ? 8 : 9; Assert.That(activity.TagObjects.Count(), Is.EqualTo(expectedTagCount)); - Assert.IsFalse(activity.TagObjects.Any(x => x.Key == "db.statement")); + Assert.That(activity.TagObjects.Any(x => x.Key == "db.statement"), Is.False); var systemTag = activity.TagObjects.First(x => x.Key == "db.system"); Assert.That(systemTag.Value, Is.EqualTo("postgresql")); @@ -79,7 +79,7 @@ static void ValidateActivity(Activity activity, NpgsqlConnection conn, bool isMu Assert.That(connIDTag.Value, Is.EqualTo(conn.ProcessID)); } else - Assert.IsTrue(activity.TagObjects.Any(x => x.Key == "db.connection_id")); + Assert.That(activity.TagObjects.Any(x => x.Key == "db.connection_id")); } } @@ -140,7 +140,7 @@ public async Task Basic_query([Values] bool async, [Values] bool batch) Assert.That(connIDTag.Value, Is.EqualTo(conn.ProcessID)); } else - Assert.IsTrue(activity.TagObjects.Any(x => x.Key == "db.connection_id")); + Assert.That(activity.TagObjects.Any(x => x.Key == "db.connection_id")); } [Test] @@ -182,10 +182,10 @@ public async Task Error_open([Values] bool async) Assert.That(exceptionTypeTag.Value, Is.EqualTo(ex.GetType().FullName)); var exceptionMessageTag = exceptionEvent.Tags.First(x => x.Key == "exception.message"); - StringAssert.Contains(ex.Message, (string)exceptionMessageTag.Value!); + Assert.That((string)exceptionMessageTag.Value!, Does.Contain(ex.Message)); var exceptionStacktraceTag = exceptionEvent.Tags.First(x => x.Key == "exception.stacktrace"); - StringAssert.Contains(ex.Message, (string)exceptionStacktraceTag.Value!); + Assert.That((string)exceptionStacktraceTag.Value!, Does.Contain(ex.Message)); var exceptionEscapedTag = exceptionEvent.Tags.First(x => x.Key == "exception.escaped"); Assert.That(exceptionEscapedTag.Value, Is.True); @@ -239,10 +239,10 @@ public async Task Error_query([Values] bool async, [Values] bool batch) Assert.That(exceptionTypeTag.Value, Is.EqualTo("Npgsql.PostgresException")); var exceptionMessageTag = exceptionEvent.Tags.First(x => x.Key == "exception.message"); - StringAssert.Contains("relation \"non_existing_table\" does not exist", (string)exceptionMessageTag.Value!); + Assert.That((string)exceptionMessageTag.Value!, Does.Contain("relation \"non_existing_table\" does not exist")); var exceptionStacktraceTag = exceptionEvent.Tags.First(x => x.Key == "exception.stacktrace"); - StringAssert.Contains("relation \"non_existing_table\" does not exist", (string)exceptionStacktraceTag.Value!); + Assert.That((string)exceptionStacktraceTag.Value!, Does.Contain("relation \"non_existing_table\" does not exist")); var exceptionEscapedTag = exceptionEvent.Tags.First(x => x.Key == "exception.escaped"); Assert.That(exceptionEscapedTag.Value, Is.True); @@ -271,7 +271,7 @@ public async Task Error_query([Values] bool async, [Values] bool batch) Assert.That(connIDTag.Value, Is.EqualTo(conn.ProcessID)); } else - Assert.IsTrue(activity.TagObjects.Any(x => x.Key == "db.connection_id")); + Assert.That(activity.TagObjects.Any(x => x.Key == "db.connection_id")); } [Test] diff --git a/test/Npgsql.Tests/Types/ArrayTests.cs b/test/Npgsql.Tests/Types/ArrayTests.cs index 3202bd0ba9..07f10330f3 100644 --- a/test/Npgsql.Tests/Types/ArrayTests.cs +++ b/test/Npgsql.Tests/Types/ArrayTests.cs @@ -177,7 +177,7 @@ public async Task Generic_IList() var reader = await cmd.ExecuteReaderAsync(); reader.Read(); - Assert.AreEqual(expected, reader.GetFieldValue(0)); + Assert.That(reader.GetFieldValue(0), Is.EqualTo(expected)); } [Test, Description("Verifies that an InvalidOperationException is thrown when the returned array has a different number of dimensions from what was requested.")] @@ -389,9 +389,9 @@ public async Task Read_two_empty_arrays() await using var cmd = new NpgsqlCommand("SELECT '{}'::INT[], '{}'::INT[]", conn); await using var reader = await cmd.ExecuteReaderAsync(); await reader.ReadAsync(); - Assert.AreSame(reader.GetFieldValue(0), reader.GetFieldValue(1)); + Assert.That(reader.GetFieldValue(1), Is.SameAs(reader.GetFieldValue(0))); // Unlike T[], List is mutable so we should not return the same instance - Assert.AreNotSame(reader.GetFieldValue>(0), reader.GetFieldValue>(1)); + Assert.That(reader.GetFieldValue>(1), Is.Not.SameAs(reader.GetFieldValue>(0))); } [Test] diff --git a/test/Npgsql.Tests/Types/ByteaTests.cs b/test/Npgsql.Tests/Types/ByteaTests.cs index 60cc2830f3..9e8df154b0 100644 --- a/test/Npgsql.Tests/Types/ByteaTests.cs +++ b/test/Npgsql.Tests/Types/ByteaTests.cs @@ -279,9 +279,9 @@ public async Task Array_of_bytea() var inVal = new[] { bytes, bytes }; cmd.Parameters.AddWithValue("p1", NpgsqlDbType.Bytea | NpgsqlDbType.Array, inVal); var retVal = (byte[][]?)await cmd.ExecuteScalarAsync(); - Assert.AreEqual(inVal.Length, retVal!.Length); - Assert.AreEqual(inVal[0], retVal[0]); - Assert.AreEqual(inVal[1], retVal[1]); + Assert.That(retVal!.Length, Is.EqualTo(inVal.Length)); + Assert.That(retVal[0], Is.EqualTo(inVal[0])); + Assert.That(retVal[1], Is.EqualTo(inVal[1])); } sealed class NonSeekableStream(byte[] data) : MemoryStream(data) diff --git a/test/Npgsql.Tests/Types/CompositeHandlerTests.Read.cs b/test/Npgsql.Tests/Types/CompositeHandlerTests.Read.cs index 2188569a49..732dbf83e1 100644 --- a/test/Npgsql.Tests/Types/CompositeHandlerTests.Read.cs +++ b/test/Npgsql.Tests/Types/CompositeHandlerTests.Read.cs @@ -32,27 +32,27 @@ async Task Read(T composite, Action, T> assert, string? schema = null [Test] public Task Read_class_with_property() => - Read((execute, expected) => Assert.AreEqual(expected.Value, execute().Value)); + Read((execute, expected) => Assert.That(execute().Value, Is.EqualTo(expected.Value))); [Test] public Task Read_class_with_field() => - Read((execute, expected) => Assert.AreEqual(expected.Value, execute().Value)); + Read((execute, expected) => Assert.That(execute().Value, Is.EqualTo(expected.Value))); [Test] public Task Read_struct_with_property() => - Read((execute, expected) => Assert.AreEqual(expected.Value, execute().Value)); + Read((execute, expected) => Assert.That(execute().Value, Is.EqualTo(expected.Value))); [Test] public Task Read_struct_with_field() => - Read((execute, expected) => Assert.AreEqual(expected.Value, execute().Value)); + Read((execute, expected) => Assert.That(execute().Value, Is.EqualTo(expected.Value))); [Test] public Task Read_type_with_two_properties() => Read((execute, expected) => { var actual = execute(); - Assert.AreEqual(expected.IntValue, actual.IntValue); - Assert.AreEqual(expected.StringValue, actual.StringValue); + Assert.That(actual.IntValue, Is.EqualTo(expected.IntValue)); + Assert.That(actual.StringValue, Is.EqualTo(expected.StringValue)); }); [Test] @@ -60,8 +60,8 @@ public Task Read_type_with_two_properties_inverted() => Read((execute, expected) => { var actual = execute(); - Assert.AreEqual(expected.IntValue, actual.IntValue); - Assert.AreEqual(expected.StringValue, actual.StringValue); + Assert.That(actual.IntValue, Is.EqualTo(expected.IntValue)); + Assert.That(actual.StringValue, Is.EqualTo(expected.StringValue)); }); [Test] @@ -98,7 +98,7 @@ public Task Read_type_with_more_properties_than_attributes() => Read(new TypeWithMorePropertiesThanAttributes(), (execute, expected) => { var actual = execute(); - Assert.That(actual.IntValue, Is.Not.Null); + Assert.That((int?)actual.IntValue, Is.Not.Null); Assert.That(actual.StringValue, Is.Null); }); diff --git a/test/Npgsql.Tests/Types/CompositeHandlerTests.Write.cs b/test/Npgsql.Tests/Types/CompositeHandlerTests.Write.cs index 160b037a97..800270f7c3 100644 --- a/test/Npgsql.Tests/Types/CompositeHandlerTests.Write.cs +++ b/test/Npgsql.Tests/Types/CompositeHandlerTests.Write.cs @@ -45,34 +45,34 @@ async Task Write(T composite, Action? assert = null, str [Test] public Task Write_class_with_property() - => Write((reader, expected) => Assert.AreEqual(expected.Value, reader.GetString(0))); + => Write((reader, expected) => Assert.That(reader.GetString(0), Is.EqualTo(expected.Value))); [Test] public Task Write_class_with_field() - => Write((reader, expected) => Assert.AreEqual(expected.Value, reader.GetString(0))); + => Write((reader, expected) => Assert.That(reader.GetString(0), Is.EqualTo(expected.Value))); [Test] public Task Write_struct_with_property() - => Write((reader, expected) => Assert.AreEqual(expected.Value, reader.GetString(0))); + => Write((reader, expected) => Assert.That(reader.GetString(0), Is.EqualTo(expected.Value))); [Test] public Task Write_struct_with_field() - => Write((reader, expected) => Assert.AreEqual(expected.Value, reader.GetString(0))); + => Write((reader, expected) => Assert.That(reader.GetString(0), Is.EqualTo(expected.Value))); [Test] public Task Write_type_with_two_properties() => Write((reader, expected) => { - Assert.AreEqual(expected.IntValue, reader.GetInt32(0)); - Assert.AreEqual(expected.StringValue, reader.GetString(1)); + Assert.That(reader.GetInt32(0), Is.EqualTo(expected.IntValue)); + Assert.That(reader.GetString(1), Is.EqualTo(expected.StringValue)); }); [Test] public Task Write_type_with_two_properties_inverted() => Write((reader, expected) => { - Assert.AreEqual(expected.IntValue, reader.GetInt32(1)); - Assert.AreEqual(expected.StringValue, reader.GetString(0)); + Assert.That(reader.GetInt32(1), Is.EqualTo(expected.IntValue)); + Assert.That(reader.GetString(0), Is.EqualTo(expected.StringValue)); }); [Test] diff --git a/test/Npgsql.Tests/Types/CompositeTests.cs b/test/Npgsql.Tests/Types/CompositeTests.cs index 765508908c..1c1b254862 100644 --- a/test/Npgsql.Tests/Types/CompositeTests.cs +++ b/test/Npgsql.Tests/Types/CompositeTests.cs @@ -453,8 +453,8 @@ public async Task Table_as_composite([Values] bool enabled) Assert.ThrowsAsync(DoAssertion); // Start a transaction specifically for multiplexing (to bind a connector to the connection) await using var tx = await connection.BeginTransactionAsync(); - Assert.Null(connection.Connector!.DatabaseInfo.CompositeTypes.SingleOrDefault(c => c.Name.Contains(table))); - Assert.Null(connection.Connector!.DatabaseInfo.ArrayTypes.SingleOrDefault(c => c.Name.Contains(table))); + Assert.That(connection.Connector!.DatabaseInfo.CompositeTypes.SingleOrDefault(c => c.Name.Contains(table)), Is.Null); + Assert.That(connection.Connector!.DatabaseInfo.ArrayTypes.SingleOrDefault(c => c.Name.Contains(table)), Is.Null); } diff --git a/test/Npgsql.Tests/Types/DateTimeTests.cs b/test/Npgsql.Tests/Types/DateTimeTests.cs index 078693bf96..fe7bb1bd27 100644 --- a/test/Npgsql.Tests/Types/DateTimeTests.cs +++ b/test/Npgsql.Tests/Types/DateTimeTests.cs @@ -442,8 +442,8 @@ public void NpgsqlParameterDbType_is_value_dependent_datetime_or_datetime2() { var localtimestamp = new NpgsqlParameter { Value = DateTime.Now }; var unspecifiedtimestamp = new NpgsqlParameter { Value = new DateTime() }; - Assert.AreEqual(DbType.DateTime2, localtimestamp.DbType); - Assert.AreEqual(DbType.DateTime2, unspecifiedtimestamp.DbType); + Assert.That(localtimestamp.DbType, Is.EqualTo(DbType.DateTime2)); + Assert.That(unspecifiedtimestamp.DbType, Is.EqualTo(DbType.DateTime2)); // We don't support any DateTimeOffset other than offset 0 which maps to timestamptz, // we might add an exception for offset == DateTimeOffset.Now.Offset (local offset) mapping to timestamp at some point. @@ -452,8 +452,8 @@ public void NpgsqlParameterDbType_is_value_dependent_datetime_or_datetime2() var timestamptz = new NpgsqlParameter { Value = DateTime.UtcNow }; var dtotimestamptz = new NpgsqlParameter { Value = DateTimeOffset.UtcNow }; - Assert.AreEqual(DbType.DateTime, timestamptz.DbType); - Assert.AreEqual(DbType.DateTime, dtotimestamptz.DbType); + Assert.That(timestamptz.DbType, Is.EqualTo(DbType.DateTime)); + Assert.That(dtotimestamptz.DbType, Is.EqualTo(DbType.DateTime)); } [Test] @@ -461,13 +461,13 @@ public void NpgsqlParameterNpgsqlDbType_is_value_dependent_timestamp_or_timestam { var localtimestamp = new NpgsqlParameter { Value = DateTime.Now }; var unspecifiedtimestamp = new NpgsqlParameter { Value = new DateTime() }; - Assert.AreEqual(NpgsqlDbType.Timestamp, localtimestamp.NpgsqlDbType); - Assert.AreEqual(NpgsqlDbType.Timestamp, unspecifiedtimestamp.NpgsqlDbType); + Assert.That(localtimestamp.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Timestamp)); + Assert.That(unspecifiedtimestamp.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Timestamp)); var timestamptz = new NpgsqlParameter { Value = DateTime.UtcNow }; var dtotimestamptz = new NpgsqlParameter { Value = DateTimeOffset.UtcNow }; - Assert.AreEqual(NpgsqlDbType.TimestampTz, timestamptz.NpgsqlDbType); - Assert.AreEqual(NpgsqlDbType.TimestampTz, dtotimestamptz.NpgsqlDbType); + Assert.That(timestamptz.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.TimestampTz)); + Assert.That(dtotimestamptz.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.TimestampTz)); } [Test] diff --git a/test/Npgsql.Tests/Types/EnumTests.cs b/test/Npgsql.Tests/Types/EnumTests.cs index b6e56c632b..7161e6408c 100644 --- a/test/Npgsql.Tests/Types/EnumTests.cs +++ b/test/Npgsql.Tests/Types/EnumTests.cs @@ -42,7 +42,7 @@ public async Task Data_source_unmap() var isUnmapSuccessful = dataSourceBuilder.UnmapEnum(type); await using var dataSource = dataSourceBuilder.Build(); - Assert.IsTrue(isUnmapSuccessful); + Assert.That(isUnmapSuccessful); Assert.ThrowsAsync(() => AssertType(dataSource, Mood.Happy, "happy", type, npgsqlDbType: null)); } @@ -72,7 +72,7 @@ public async Task Data_source_unmap_non_generic() var isUnmapSuccessful = dataSourceBuilder.UnmapEnum(typeof(Mood), type); await using var dataSource = dataSourceBuilder.Build(); - Assert.IsTrue(isUnmapSuccessful); + Assert.That(isUnmapSuccessful); Assert.ThrowsAsync(() => AssertType(dataSource, Mood.Happy, "happy", type, npgsqlDbType: null)); } @@ -170,11 +170,11 @@ public async Task Unmapped_enum_as_clr_enum_supported_only_with_EnableUnmappedTy nameof(NpgsqlDataSourceBuilder)); var exception = await AssertTypeUnsupportedWrite(Mood.Happy, enumType); - Assert.IsInstanceOf(exception.InnerException); + Assert.That(exception.InnerException, Is.InstanceOf()); Assert.That(exception.InnerException!.Message, Is.EqualTo(errorMessage)); exception = await AssertTypeUnsupportedRead("happy", enumType); - Assert.IsInstanceOf(exception.InnerException); + Assert.That(exception.InnerException, Is.InstanceOf()); Assert.That(exception.InnerException!.Message, Is.EqualTo(errorMessage)); } diff --git a/test/Npgsql.Tests/Types/FullTextSearchTests.cs b/test/Npgsql.Tests/Types/FullTextSearchTests.cs index 9c5104051a..bccefdef3f 100644 --- a/test/Npgsql.Tests/Types/FullTextSearchTests.cs +++ b/test/Npgsql.Tests/Types/FullTextSearchTests.cs @@ -67,20 +67,20 @@ public async Task Full_text_search_not_supported_by_default_on_NpgsqlSlimSourceB await using var dataSource = dataSourceBuilder.Build(); var exception = await AssertTypeUnsupportedRead("a", "tsquery", dataSource); - Assert.IsInstanceOf(exception.InnerException); - Assert.AreEqual(errorMessage, exception.InnerException!.Message); + Assert.That(exception.InnerException, Is.InstanceOf()); + Assert.That(exception.InnerException!.Message, Is.EqualTo(errorMessage)); exception = await AssertTypeUnsupportedWrite(new NpgsqlTsQueryLexeme("a"), pgTypeName: null, dataSource); - Assert.IsInstanceOf(exception.InnerException); - Assert.AreEqual(errorMessage, exception.InnerException!.Message); + Assert.That(exception.InnerException, Is.InstanceOf()); + Assert.That(exception.InnerException!.Message, Is.EqualTo(errorMessage)); exception = await AssertTypeUnsupportedRead("1", "tsvector", dataSource); - Assert.IsInstanceOf(exception.InnerException); - Assert.AreEqual(errorMessage, exception.InnerException!.Message); + Assert.That(exception.InnerException, Is.InstanceOf()); + Assert.That(exception.InnerException!.Message, Is.EqualTo(errorMessage)); exception = await AssertTypeUnsupportedWrite(NpgsqlTsVector.Parse("'1'"), pgTypeName: null, dataSource); - Assert.IsInstanceOf(exception.InnerException); - Assert.AreEqual(errorMessage, exception.InnerException!.Message); + Assert.That(exception.InnerException, Is.InstanceOf()); + Assert.That(exception.InnerException!.Message, Is.EqualTo(errorMessage)); } [Test] diff --git a/test/Npgsql.Tests/Types/InternalTypeTests.cs b/test/Npgsql.Tests/Types/InternalTypeTests.cs index 9c68a66695..cd1c2190ed 100644 --- a/test/Npgsql.Tests/Types/InternalTypeTests.cs +++ b/test/Npgsql.Tests/Types/InternalTypeTests.cs @@ -52,10 +52,10 @@ public async Task Tid() cmd.Parameters.AddWithValue("p", NpgsqlDbType.Tid, expected); using var reader = await cmd.ExecuteReaderAsync(); reader.Read(); - Assert.AreEqual(1234, reader.GetFieldValue(0).BlockNumber); - Assert.AreEqual(40000, reader.GetFieldValue(0).OffsetNumber); - Assert.AreEqual(expected.BlockNumber, reader.GetFieldValue(1).BlockNumber); - Assert.AreEqual(expected.OffsetNumber, reader.GetFieldValue(1).OffsetNumber); + Assert.That(reader.GetFieldValue(0).BlockNumber, Is.EqualTo(1234)); + Assert.That(reader.GetFieldValue(0).OffsetNumber, Is.EqualTo(40000)); + Assert.That(reader.GetFieldValue(1).BlockNumber, Is.EqualTo(expected.BlockNumber)); + Assert.That(reader.GetFieldValue(1).OffsetNumber, Is.EqualTo(expected.OffsetNumber)); } #region NpgsqlLogSequenceNumber / PgLsn @@ -78,7 +78,7 @@ public bool NpgsqlLogSequenceNumber_equals(NpgsqlLogSequenceNumber lsn, object? public async Task NpgsqlLogSequenceNumber() { var expected1 = new NpgsqlLogSequenceNumber(42949672971ul); - Assert.AreEqual(expected1, NpgsqlTypes.NpgsqlLogSequenceNumber.Parse("A/B")); + Assert.That(NpgsqlTypes.NpgsqlLogSequenceNumber.Parse("A/B"), Is.EqualTo(expected1)); await using var conn = await OpenConnectionAsync(); using var cmd = conn.CreateCommand(); cmd.CommandText = "SELECT 'A/B'::pg_lsn, @p::pg_lsn"; @@ -87,12 +87,12 @@ public async Task NpgsqlLogSequenceNumber() reader.Read(); var result1 = reader.GetFieldValue(0); var result2 = reader.GetFieldValue(1); - Assert.AreEqual(expected1, result1); - Assert.AreEqual(42949672971ul, (ulong)result1); - Assert.AreEqual("A/B", result1.ToString()); - Assert.AreEqual(expected1, result2); - Assert.AreEqual(42949672971ul, (ulong)result2); - Assert.AreEqual("A/B", result2.ToString()); + Assert.That(result1, Is.EqualTo(expected1)); + Assert.That((ulong)result1, Is.EqualTo(42949672971ul)); + Assert.That(result1.ToString(), Is.EqualTo("A/B")); + Assert.That(result2, Is.EqualTo(expected1)); + Assert.That((ulong)result2, Is.EqualTo(42949672971ul)); + Assert.That(result2.ToString(), Is.EqualTo("A/B")); } #endregion NpgsqlLogSequenceNumber / PgLsn diff --git a/test/Npgsql.Tests/Types/JsonDynamicTests.cs b/test/Npgsql.Tests/Types/JsonDynamicTests.cs index 21ff2700da..59a3d24662 100644 --- a/test/Npgsql.Tests/Types/JsonDynamicTests.cs +++ b/test/Npgsql.Tests/Types/JsonDynamicTests.cs @@ -132,7 +132,7 @@ public async Task As_poco_supported_only_with_EnableDynamicJson() PostgresType, base.DataSource); - Assert.IsInstanceOf(exception.InnerException); + Assert.That(exception.InnerException, Is.InstanceOf()); Assert.That(exception.InnerException!.Message, Is.EqualTo(errorMessage)); exception = await AssertTypeUnsupportedRead( @@ -142,7 +142,7 @@ public async Task As_poco_supported_only_with_EnableDynamicJson() PostgresType, base.DataSource); - Assert.IsInstanceOf(exception.InnerException); + Assert.That(exception.InnerException, Is.InstanceOf()); Assert.That(exception.InnerException!.Message, Is.EqualTo(errorMessage)); } @@ -520,6 +520,12 @@ public JsonDynamicTests(MultiplexingMode multiplexingMode, NpgsqlDbType npgsqlDb protected override NpgsqlDataSource DataSource { get; } + [OneTimeTearDown] + protected void CleanUpDataSource() + { + DataSource.Dispose(); + } + bool IsJsonb => NpgsqlDbType == NpgsqlDbType.Jsonb; string PostgresType => IsJsonb ? "jsonb" : "json"; readonly NpgsqlDbType NpgsqlDbType; diff --git a/test/Npgsql.Tests/Types/JsonPathTests.cs b/test/Npgsql.Tests/Types/JsonPathTests.cs index 022c8eaaf9..ebd4a468c1 100644 --- a/test/Npgsql.Tests/Types/JsonPathTests.cs +++ b/test/Npgsql.Tests/Types/JsonPathTests.cs @@ -51,6 +51,6 @@ public async Task Write(string query, string expected) using var cmd = new NpgsqlCommand($"SELECT 'Passed' WHERE @p::text = {query}::text", conn) { Parameters = { new NpgsqlParameter("p", NpgsqlDbType.JsonPath) { Value = expected } } }; using var rdr = await cmd.ExecuteReaderAsync(); - Assert.True(rdr.Read()); + Assert.That(rdr.Read()); } } diff --git a/test/Npgsql.Tests/Types/MultirangeTests.cs b/test/Npgsql.Tests/Types/MultirangeTests.cs index 86bebb1b67..adb56ed2b9 100644 --- a/test/Npgsql.Tests/Types/MultirangeTests.cs +++ b/test/Npgsql.Tests/Types/MultirangeTests.cs @@ -150,18 +150,18 @@ public async Task Unmapped_multirange_supported_only_with_EnableUnmappedTypes() new("moo", "zoo"), }, multirangeTypeName); - Assert.IsInstanceOf(exception.InnerException); + Assert.That(exception.InnerException, Is.InstanceOf()); Assert.That(exception.InnerException!.Message, Is.EqualTo(errorMessage)); exception = await AssertTypeUnsupportedRead("""{["bar","foo"],["moo","zoo"]}""", multirangeTypeName); - Assert.IsInstanceOf(exception.InnerException); + Assert.That(exception.InnerException, Is.InstanceOf()); Assert.That(exception.InnerException!.Message, Is.EqualTo(errorMessage)); exception = await AssertTypeUnsupportedRead>( """{["bar","foo"],["moo","zoo"]}""", multirangeTypeName); - Assert.IsInstanceOf(exception.InnerException); + Assert.That(exception.InnerException, Is.InstanceOf()); Assert.That(exception.InnerException!.Message, Is.EqualTo(errorMessage)); } diff --git a/test/Npgsql.Tests/Types/RangeTests.cs b/test/Npgsql.Tests/Types/RangeTests.cs index f0f9fa6637..0b90824d95 100644 --- a/test/Npgsql.Tests/Types/RangeTests.cs +++ b/test/Npgsql.Tests/Types/RangeTests.cs @@ -73,23 +73,23 @@ public void Equality_finite() //different bounds var r2 = new NpgsqlRange(1, true, false, 2, false, false); - Assert.IsFalse(r1 == r2); + Assert.That(r1 == r2, Is.False); //lower bound is not inclusive var r3 = new NpgsqlRange(0, false, false, 1, false, false); - Assert.IsFalse(r1 == r3); + Assert.That(r1 == r3, Is.False); //upper bound is inclusive var r4 = new NpgsqlRange(0, true, false, 1, true, false); - Assert.IsFalse(r1 == r4); + Assert.That(r1 == r4, Is.False); var r5 = new NpgsqlRange(0, true, false, 1, false, false); - Assert.IsTrue(r1 == r5); + Assert.That(r1 == r5); //check some other combinations while we are here - Assert.IsFalse(r2 == r3); - Assert.IsFalse(r2 == r4); - Assert.IsFalse(r3 == r4); + Assert.That(r2 == r3, Is.False); + Assert.That(r2 == r4, Is.False); + Assert.That(r3 == r4, Is.False); } [Test] @@ -99,20 +99,20 @@ public void Equality_infinite() //different upper bound (lower bound shouldn't matter since it is infinite) var r2 = new NpgsqlRange(1, false, true, 2, false, false); - Assert.IsFalse(r1 == r2); + Assert.That(r1 == r2, Is.False); //upper bound is inclusive var r3 = new NpgsqlRange(0, false, true, 1, true, false); - Assert.IsFalse(r1 == r3); + Assert.That(r1 == r3, Is.False); //value of lower bound shouldn't matter since it is infinite var r4 = new NpgsqlRange(10, false, true, 1, false, false); - Assert.IsTrue(r1 == r4); + Assert.That(r1 == r4); //check some other combinations while we are here - Assert.IsFalse(r2 == r3); - Assert.IsFalse(r2 == r4); - Assert.IsFalse(r3 == r4); + Assert.That(r2 == r3, Is.False); + Assert.That(r2 == r4, Is.False); + Assert.That(r3 == r4, Is.False); } [Test] @@ -122,12 +122,12 @@ public void GetHashCode_value_types() NpgsqlRange b = NpgsqlRange.Empty; NpgsqlRange c = NpgsqlRange.Parse("(,)"); - Assert.IsFalse(a.Equals(b)); - Assert.IsFalse(a.Equals(c)); - Assert.IsFalse(b.Equals(c)); - Assert.AreNotEqual(a.GetHashCode(), b.GetHashCode()); - Assert.AreNotEqual(a.GetHashCode(), c.GetHashCode()); - Assert.AreNotEqual(b.GetHashCode(), c.GetHashCode()); + Assert.That(a.Equals(b), Is.False); + Assert.That(a.Equals(c), Is.False); + Assert.That(b.Equals(c), Is.False); + Assert.That(b.GetHashCode(), Is.Not.EqualTo(a.GetHashCode())); + Assert.That(c.GetHashCode(), Is.Not.EqualTo(a.GetHashCode())); + Assert.That(c.GetHashCode(), Is.Not.EqualTo(b.GetHashCode())); } [Test] @@ -137,12 +137,12 @@ public void GetHashCode_reference_types() NpgsqlRange b = NpgsqlRange.Empty; NpgsqlRange c = NpgsqlRange.Parse("(,)"); - Assert.IsFalse(a.Equals(b)); - Assert.IsFalse(a.Equals(c)); - Assert.IsFalse(b.Equals(c)); - Assert.AreNotEqual(a.GetHashCode(), b.GetHashCode()); - Assert.AreNotEqual(a.GetHashCode(), c.GetHashCode()); - Assert.AreNotEqual(b.GetHashCode(), c.GetHashCode()); + Assert.That(a.Equals(b), Is.False); + Assert.That(a.Equals(c), Is.False); + Assert.That(b.Equals(c), Is.False); + Assert.That(b.GetHashCode(), Is.Not.EqualTo(a.GetHashCode())); + Assert.That(c.GetHashCode(), Is.Not.EqualTo(a.GetHashCode())); + Assert.That(c.GetHashCode(), Is.Not.EqualTo(b.GetHashCode())); } [Test] @@ -208,15 +208,15 @@ public async Task Unmapped_range_supported_only_with_EnableUnmappedTypes() nameof(NpgsqlDataSourceBuilder)); var exception = await AssertTypeUnsupportedWrite(new NpgsqlRange("bar", "foo"), rangeType); - Assert.IsInstanceOf(exception.InnerException); + Assert.That(exception.InnerException, Is.InstanceOf()); Assert.That(exception.InnerException!.Message, Is.EqualTo(errorMessage)); exception = await AssertTypeUnsupportedRead("""["bar","foo"]""", rangeType); - Assert.IsInstanceOf(exception.InnerException); + Assert.That(exception.InnerException, Is.InstanceOf()); Assert.That(exception.InnerException!.Message, Is.EqualTo(errorMessage)); exception = await AssertTypeUnsupportedRead>("""["bar","foo"]""", rangeType); - Assert.IsInstanceOf(exception.InnerException); + Assert.That(exception.InnerException, Is.InstanceOf()); Assert.That(exception.InnerException!.Message, Is.EqualTo(errorMessage)); } @@ -288,7 +288,7 @@ public void Roundtrip_DateTime_ranges_through_ToString_and_Parse(NpgsqlRange.Parse(wellKnownText); - Assert.AreEqual(input, result); + Assert.That(result, Is.EqualTo(input)); } [Theory] @@ -298,7 +298,7 @@ public void Roundtrip_DateTime_ranges_through_ToString_and_Parse(NpgsqlRange.Parse(value); - Assert.AreEqual(NpgsqlRange.Empty, result); + Assert.That(result, Is.EqualTo(NpgsqlRange.Empty)); } [Theory] @@ -310,7 +310,7 @@ public void Parse_empty(string value) public void Roundtrip_int_ranges_through_ToString_and_Parse(string input) { var result = NpgsqlRange.Parse(input); - Assert.AreEqual(input.Replace(" ", null), result.ToString()); + Assert.That(result.ToString(), Is.EqualTo(input.Replace(" ", null))); } [Theory] @@ -330,7 +330,7 @@ public void Roundtrip_int_ranges_through_ToString_and_Parse(string input) public void Int_range_Parse_ToString_returns_normalized_representations(string input, string normalized) { var result = NpgsqlRange.Parse(input); - Assert.AreEqual(normalized, result.ToString()); + Assert.That(result.ToString(), Is.EqualTo(normalized)); } [Theory] @@ -350,7 +350,7 @@ public void Int_range_Parse_ToString_returns_normalized_representations(string i public void Nullable_int_range_Parse_ToString_returns_normalized_representations(string input, string normalized) { var result = NpgsqlRange.Parse(input); - Assert.AreEqual(normalized, result.ToString()); + Assert.That(result.ToString(), Is.EqualTo(normalized)); } [Theory] @@ -361,7 +361,7 @@ public void Nullable_int_range_Parse_ToString_returns_normalized_representations public void String_range_Parse_ToString_returns_normalized_representations(string input, string normalized) { var result = NpgsqlRange.Parse(input); - Assert.AreEqual(normalized, result.ToString()); + Assert.That(result.ToString(), Is.EqualTo(normalized)); } [Theory] @@ -369,7 +369,7 @@ public void String_range_Parse_ToString_returns_normalized_representations(strin public void Roundtrip_string_ranges_through_ToString_and_Parse2(string input) { var result = NpgsqlRange.Parse(input); - Assert.AreEqual(input, result.ToString()); + Assert.That(result.ToString(), Is.EqualTo(input)); } [Theory] @@ -388,12 +388,12 @@ public void TypeConverter() var converter = TypeDescriptor.GetConverter(typeof(NpgsqlRange)); // Act - Assert.IsInstanceOf.RangeTypeConverter>(converter); - Assert.IsTrue(converter.CanConvertFrom(typeof(string))); + Assert.That(converter, Is.InstanceOf.RangeTypeConverter>()); + Assert.That(converter.CanConvertFrom(typeof(string))); var result = converter.ConvertFromString("empty"); // Assert - Assert.AreEqual(NpgsqlRange.Empty, result); + Assert.That(result, Is.Empty); } #endregion diff --git a/test/Npgsql.Tests/Types/RecordTests.cs b/test/Npgsql.Tests/Types/RecordTests.cs index 0823323041..86c3fd1875 100644 --- a/test/Npgsql.Tests/Types/RecordTests.cs +++ b/test/Npgsql.Tests/Types/RecordTests.cs @@ -103,8 +103,8 @@ public async Task As_ValueTuple_supported_only_with_EnableRecordsAsTuples() nameof(NpgsqlSlimDataSourceBuilder.EnableRecords)); var exception = Assert.Throws(() => reader.GetFieldValue<(int, string)>(0))!; - Assert.IsInstanceOf(exception.InnerException); - Assert.AreEqual(errorMessage, exception.InnerException!.Message); + Assert.That(exception.InnerException, Is.InstanceOf()); + Assert.That(exception.InnerException!.Message, Is.EqualTo(errorMessage)); } [Test] @@ -127,12 +127,12 @@ public async Task Records_not_supported_by_default_on_NpgsqlSlimSourceBuilder() nameof(NpgsqlSlimDataSourceBuilder.EnableRecords)); var exception = Assert.Throws(() => reader.GetValue(0))!; - Assert.IsInstanceOf(exception.InnerException); - Assert.AreEqual(errorMessage, exception.InnerException!.Message); + Assert.That(exception.InnerException, Is.InstanceOf()); + Assert.That(exception.InnerException!.Message, Is.EqualTo(errorMessage)); exception = Assert.Throws(() => reader.GetFieldValue(0))!; - Assert.IsInstanceOf(exception.InnerException); - Assert.AreEqual(errorMessage, exception.InnerException!.Message); + Assert.That(exception.InnerException, Is.InstanceOf()); + Assert.That(exception.InnerException!.Message, Is.EqualTo(errorMessage)); } [Test] diff --git a/test/Npgsql.Tests/Types/TextTests.cs b/test/Npgsql.Tests/Types/TextTests.cs index f7e770c088..13dd94861b 100644 --- a/test/Npgsql.Tests/Types/TextTests.cs +++ b/test/Npgsql.Tests/Types/TextTests.cs @@ -138,15 +138,15 @@ public async Task Internal_char() var expected = new char[] { 'a', (char)(256 - 3), 'b', (char)66, (char)230 }; for (var i = 0; i < expected.Length; i++) { - Assert.AreEqual(expected[i], reader.GetChar(i)); + Assert.That(reader.GetChar(i), Is.EqualTo(expected[i])); } var arr = (char[])reader.GetValue(5); var arr2 = (char[])reader.GetValue(6); - Assert.AreEqual(testArr.Length, arr.Length); + Assert.That(arr.Length, Is.EqualTo(testArr.Length)); for (var i = 0; i < arr.Length; i++) { - Assert.AreEqual(testArr[i], arr[i]); - Assert.AreEqual(testArr2[i], arr2[i]); + Assert.That(arr[i], Is.EqualTo(testArr[i])); + Assert.That(arr2[i], Is.EqualTo(testArr2[i])); } } } diff --git a/test/Npgsql.Tests/TypesTests.cs b/test/Npgsql.Tests/TypesTests.cs index 1f6b0e8c55..4110a0856f 100644 --- a/test/Npgsql.Tests/TypesTests.cs +++ b/test/Npgsql.Tests/TypesTests.cs @@ -17,22 +17,22 @@ public void TsVector() NpgsqlTsVector vec; vec = NpgsqlTsVector.Parse("a"); - Assert.AreEqual("'a'", vec.ToString()); + Assert.That(vec.ToString(), Is.EqualTo("'a'")); vec = NpgsqlTsVector.Parse("a "); - Assert.AreEqual("'a'", vec.ToString()); + Assert.That(vec.ToString(), Is.EqualTo("'a'")); vec = NpgsqlTsVector.Parse("a:1A"); - Assert.AreEqual("'a':1A", vec.ToString()); + Assert.That(vec.ToString(), Is.EqualTo("'a':1A")); vec = NpgsqlTsVector.Parse(@"\abc\def:1a "); - Assert.AreEqual("'abcdef':1A", vec.ToString()); + Assert.That(vec.ToString(), Is.EqualTo("'abcdef':1A")); vec = NpgsqlTsVector.Parse(@"abc:3A 'abc' abc:4B 'hello''yo' 'meh\'\\':5"); - Assert.AreEqual(@"'abc':3A,4B 'hello''yo' 'meh''\\':5", vec.ToString()); + Assert.That(vec.ToString(), Is.EqualTo(@"'abc':3A,4B 'hello''yo' 'meh''\\':5")); vec = NpgsqlTsVector.Parse(" a:12345C a:24D a:25B b c d 1 2 a:25A,26B,27,28"); - Assert.AreEqual("'1' '2' 'a':24,25A,26B,27,28,12345C 'b' 'c' 'd'", vec.ToString()); + Assert.That(vec.ToString(), Is.EqualTo("'1' '2' 'a':24,25A,26B,27,28,12345C 'b' 'c' 'd'")); } [Test] @@ -47,27 +47,27 @@ public void TsQuery() var str = query.ToString(); query = NpgsqlTsQuery.Parse("a & b | c"); - Assert.AreEqual("'a' & 'b' | 'c'", query.ToString()); + Assert.That(query.ToString(), Is.EqualTo("'a' & 'b' | 'c'")); query = NpgsqlTsQuery.Parse("'a''':*ab&d:d&!c"); - Assert.AreEqual("'a''':*AB & 'd':D & !'c'", query.ToString()); + Assert.That(query.ToString(), Is.EqualTo("'a''':*AB & 'd':D & !'c'")); query = NpgsqlTsQuery.Parse("(a & !(c | d)) & (!!a&b) | c | d | e"); - Assert.AreEqual("( ( 'a' & !( 'c' | 'd' ) & !( !'a' ) & 'b' | 'c' ) | 'd' ) | 'e'", query.ToString()); - Assert.AreEqual(query.ToString(), NpgsqlTsQuery.Parse(query.ToString()).ToString()); + Assert.That(query.ToString(), Is.EqualTo("( ( 'a' & !( 'c' | 'd' ) & !( !'a' ) & 'b' | 'c' ) | 'd' ) | 'e'")); + Assert.That(NpgsqlTsQuery.Parse(query.ToString()).ToString(), Is.EqualTo(query.ToString())); query = NpgsqlTsQuery.Parse("(((a:*)))"); - Assert.AreEqual("'a':*", query.ToString()); + Assert.That(query.ToString(), Is.EqualTo("'a':*")); query = NpgsqlTsQuery.Parse(@"'a\\b''cde'"); - Assert.AreEqual(@"a\b'cde", ((NpgsqlTsQueryLexeme)query).Text); - Assert.AreEqual(@"'a\\b''cde'", query.ToString()); + Assert.That(((NpgsqlTsQueryLexeme)query).Text, Is.EqualTo(@"a\b'cde")); + Assert.That(query.ToString(), Is.EqualTo(@"'a\\b''cde'")); query = NpgsqlTsQuery.Parse(@"a <-> b"); - Assert.AreEqual("'a' <-> 'b'", query.ToString()); + Assert.That(query.ToString(), Is.EqualTo("'a' <-> 'b'")); query = NpgsqlTsQuery.Parse("((a & b) <5> c) <-> !d <0> e"); - Assert.AreEqual("( ( 'a' & 'b' <5> 'c' ) <-> !'d' ) <0> 'e'", query.ToString()); + Assert.That(query.ToString(), Is.EqualTo("( ( 'a' & 'b' <5> 'c' ) <-> !'d' ) <0> 'e'")); Assert.Throws(typeof(FormatException), () => NpgsqlTsQuery.Parse("a b c & &")); Assert.Throws(typeof(FormatException), () => NpgsqlTsQuery.Parse("&")); @@ -89,8 +89,8 @@ public void TsQuery() [Test] public void TsVector_empty() { - Assert.IsEmpty(NpgsqlTsVector.Empty); - Assert.AreEqual(string.Empty, NpgsqlTsVector.Empty.ToString()); + Assert.That(NpgsqlTsVector.Empty, Is.Empty); + Assert.That(NpgsqlTsVector.Empty.ToString(), Is.Empty); } [Test] @@ -167,18 +167,18 @@ public void TsQueryEquatibility() void AreEqual(NpgsqlTsQuery left, NpgsqlTsQuery right) { - Assert.True(left == right); - Assert.False(left != right); - Assert.AreEqual(left, right); - Assert.AreEqual(left.GetHashCode(), right.GetHashCode()); + Assert.That(left == right); + Assert.That(left != right, Is.False); + Assert.That(right, Is.EqualTo(left)); + Assert.That(right.GetHashCode(), Is.EqualTo(left.GetHashCode())); } void AreNotEqual(NpgsqlTsQuery left, NpgsqlTsQuery right) { - Assert.False(left == right); - Assert.True(left != right); - Assert.AreNotEqual(left, right); - Assert.AreNotEqual(left.GetHashCode(), right.GetHashCode()); + Assert.That(left == right, Is.False); + Assert.That(left != right); + Assert.That(right, Is.Not.EqualTo(left)); + Assert.That(right.GetHashCode(), Is.Not.EqualTo(left.GetHashCode())); } } @@ -188,7 +188,7 @@ public void TsQueryOperatorPrecedence() { var query = NpgsqlTsQuery.Parse("!a <-> b & c | d & e"); var expectedGrouping = NpgsqlTsQuery.Parse("((!(a) <-> b) & c) | (d & e)"); - Assert.AreEqual(expectedGrouping.ToString(), query.ToString()); + Assert.That(query.ToString(), Is.EqualTo(expectedGrouping.ToString())); } #pragma warning restore CS0618 // {NpgsqlTsVector,NpgsqlTsQuery}.Parse are obsolete @@ -204,14 +204,14 @@ public void NpgsqlPolygon_empty() public void NpgsqlPath_default() { NpgsqlPath defaultPath = default; - Assert.IsFalse(defaultPath.Equals([new(1, 2)])); + Assert.That(defaultPath.Equals([new(1, 2)]), Is.False); } [Test] public void NpgsqlPolygon_default() { NpgsqlPolygon defaultPolygon = default; - Assert.IsFalse(defaultPolygon.Equals([new(1, 2)])); + Assert.That(defaultPolygon.Equals([new(1, 2)]), Is.False); } [Test] From c62a9be02d10c2cade268eb74283b583c7dfa313 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Wed, 10 Sep 2025 12:13:11 +0200 Subject: [PATCH 555/761] Bump extension versions to 10.0.0-rc.1 (#6209) --- Directory.Packages.props | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index d4671ecb06..bc9d30b8bb 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -1,7 +1,7 @@ - - + + @@ -26,8 +26,8 @@ - - + + @@ -35,7 +35,7 @@ - + From ba77cf8dbf41bfe21db2ea80aec1e03d22a179c1 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Wed, 10 Sep 2025 13:17:33 +0300 Subject: [PATCH 556/761] Fix logical replication tests with PostgreSQL 18 (#6171) PostgreSQL 18 doesn't send messages for aborted transactions --- .../Replication/PgOutputReplicationTests.cs | 129 +++++++++++------- 1 file changed, 76 insertions(+), 53 deletions(-) diff --git a/test/Npgsql.Tests/Replication/PgOutputReplicationTests.cs b/test/Npgsql.Tests/Replication/PgOutputReplicationTests.cs index e3cffd5766..3bbcd1b6ac 100644 --- a/test/Npgsql.Tests/Replication/PgOutputReplicationTests.cs +++ b/test/Npgsql.Tests/Replication/PgOutputReplicationTests.cs @@ -10,6 +10,7 @@ using Npgsql.Replication; using Npgsql.Replication.PgOutput; using Npgsql.Replication.PgOutput.Messages; +using Npgsql.Util; using TruncateOptions = Npgsql.Replication.PgOutput.Messages.TruncateMessage.TruncateOptions; using ReplicaIdentitySetting = Npgsql.Replication.PgOutput.Messages.RelationMessage.ReplicaIdentitySetting; using static Npgsql.Tests.TestUtil; @@ -728,77 +729,99 @@ public Task LogicalDecodingMessage(bool writeMessages, bool readMessages) } } - if (IsStreaming) + // PostgreSQL 18 skips logical decoding of already-aborted transactions + if (c.PostgreSqlVersion.IsGreaterOrEqual(18)) { - // Begin Transaction 2 - transactionXid = await AssertTransactionStart(messages); - - // Relation - await NextMessage(messages); - - // Inserts - for (var insertCount = 0; insertCount < 10; insertCount++) - await NextMessage(messages); - - // LogicalDecodingMessage 2 (transactional) + // LogicalDecodingMessage 2 (non-transactional) if (writeMessages) { var msg = await NextMessage(messages); - Assert.That(msg.TransactionXid, IsStreaming ? Is.EqualTo(transactionXid) : Is.Null); - Assert.That(msg.Flags, Is.EqualTo(1)); + Assert.That(msg.TransactionXid, Is.Null); + Assert.That(msg.Flags, Is.EqualTo(0)); Assert.That(msg.Prefix, Is.EqualTo(prefix)); - Assert.That(msg.Data.Length, Is.EqualTo(transactionalMessage.Length)); + Assert.That(msg.Data.Length, Is.EqualTo(nonTransactionalMessage.Length)); if (readMessages) { var buffer = new MemoryStream(); await msg.Data.CopyToAsync(buffer, CancellationToken.None); - Assert.That(rc.Encoding.GetString(buffer.ToArray()), Is.EqualTo(transactionalMessage)); + Assert.That(rc.Encoding.GetString(buffer.ToArray()), Is.EqualTo(nonTransactionalMessage)); } } - - // Further inserts - // We don't try to predict how many insert messages we get here - // since the streaming transaction will most likely abort before - // we reach the expected number - while (await messages.MoveNextAsync() && messages.Current is InsertMessage - || messages.Current is StreamStopMessage - && await messages.MoveNextAsync() - && messages.Current is StreamStartMessage - && await messages.MoveNextAsync() - && messages.Current is InsertMessage) + } + else + { + if (IsStreaming) { - // Ignore + // Begin Transaction 2 + transactionXid = await AssertTransactionStart(messages); + + // Relation + await NextMessage(messages); + + // Inserts + for (var insertCount = 0; insertCount < 10; insertCount++) + await NextMessage(messages); + + // LogicalDecodingMessage 2 (transactional) + if (writeMessages) + { + var msg = await NextMessage(messages); + Assert.That(msg.TransactionXid, IsStreaming ? Is.EqualTo(transactionXid) : Is.Null); + Assert.That(msg.Flags, Is.EqualTo(1)); + Assert.That(msg.Prefix, Is.EqualTo(prefix)); + Assert.That(msg.Data.Length, Is.EqualTo(transactionalMessage.Length)); + if (readMessages) + { + var buffer = new MemoryStream(); + await msg.Data.CopyToAsync(buffer, CancellationToken.None); + Assert.That(rc.Encoding.GetString(buffer.ToArray()), Is.EqualTo(transactionalMessage)); + } + } + + // Further inserts + // We don't try to predict how many insert messages we get here + // since the streaming transaction will most likely abort before + // we reach the expected number + while (await messages.MoveNextAsync() && messages.Current is InsertMessage + || messages.Current is StreamStopMessage + && await messages.MoveNextAsync() + && messages.Current is StreamStartMessage + && await messages.MoveNextAsync() + && messages.Current is InsertMessage) + { + // Ignore + } } - } - else if (writeMessages) - await messages.MoveNextAsync(); + else if (writeMessages) + await messages.MoveNextAsync(); - // LogicalDecodingMessage 3 (non-transactional) - if (writeMessages) - { - var msg = (LogicalDecodingMessage)messages.Current; - Assert.That(msg.TransactionXid, Is.Null); - Assert.That(msg.Flags, Is.EqualTo(0)); - Assert.That(msg.Prefix, Is.EqualTo(prefix)); - Assert.That(msg.Data.Length, Is.EqualTo(nonTransactionalMessage.Length)); - if (readMessages) + // LogicalDecodingMessage 3 (non-transactional) + if (writeMessages) { - var buffer = new MemoryStream(); - await msg.Data.CopyToAsync(buffer, CancellationToken.None); - Assert.That(rc.Encoding.GetString(buffer.ToArray()), Is.EqualTo(nonTransactionalMessage)); + var msg = (LogicalDecodingMessage)messages.Current; + Assert.That(msg.TransactionXid, Is.Null); + Assert.That(msg.Flags, Is.EqualTo(0)); + Assert.That(msg.Prefix, Is.EqualTo(prefix)); + Assert.That(msg.Data.Length, Is.EqualTo(nonTransactionalMessage.Length)); + if (readMessages) + { + var buffer = new MemoryStream(); + await msg.Data.CopyToAsync(buffer, CancellationToken.None); + Assert.That(rc.Encoding.GetString(buffer.ToArray()), Is.EqualTo(nonTransactionalMessage)); + } + + if (IsStreaming) + await messages.MoveNextAsync(); } + // Rollback Transaction 2 if (IsStreaming) - await messages.MoveNextAsync(); - } - - // Rollback Transaction 2 - if (IsStreaming) - { - Assert.That(messages.Current, - _streamingMode == PgOutputStreamingMode.On - ? Is.TypeOf() - : Is.TypeOf()); + { + Assert.That(messages.Current, + _streamingMode == PgOutputStreamingMode.On + ? Is.TypeOf() + : Is.TypeOf()); + } } streamingCts.Cancel(); From 8f9711249bd6fcafbea350aad4a1cdca448b3ab9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Emmanuel=20Andr=C3=A9?= <2341261+manandre@users.noreply.github.com> Date: Wed, 10 Sep 2025 12:36:38 +0200 Subject: [PATCH 557/761] Update various dependencies (#6188) --- Directory.Packages.props | 23 +++++++++-------------- 1 file changed, 9 insertions(+), 14 deletions(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index bc9d30b8bb..acb8a524c7 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -2,17 +2,12 @@ - - - - - - + - + - + @@ -21,7 +16,7 @@ - + @@ -29,10 +24,10 @@ - + - - + + @@ -40,9 +35,9 @@ - + - + From 7bbf43a67958dc735ac532cf2c9a62d15a927b3c Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Wed, 10 Sep 2025 14:32:51 +0300 Subject: [PATCH 558/761] Fix possible deadlock while asynchronously reading values from reader (#6202) Fixes #6190 --- .../Internal/Converters/AsyncHelpers.cs | 28 +++++++++++++++---- 1 file changed, 22 insertions(+), 6 deletions(-) diff --git a/src/Npgsql/Internal/Converters/AsyncHelpers.cs b/src/Npgsql/Internal/Converters/AsyncHelpers.cs index ddd03a24be..bf85a06a9f 100644 --- a/src/Npgsql/Internal/Converters/AsyncHelpers.cs +++ b/src/Npgsql/Internal/Converters/AsyncHelpers.cs @@ -37,9 +37,17 @@ public abstract class CompletionSource public sealed class CompletionSource : CompletionSource { - AsyncValueTaskMethodBuilder _amb = AsyncValueTaskMethodBuilder.Create(); + AsyncValueTaskMethodBuilder _amb; - public ValueTask Task => _amb.Task; + public ValueTask Task { get; } + + public CompletionSource() + { + _amb = AsyncValueTaskMethodBuilder.Create(); + // AsyncValueTaskMethodBuilder's Task and SetResult aren't thread safe in regard to each other + // Which is why we access it prematurely + Task = _amb.Task; + } public void SetResult(T value) => _amb.SetResult(value); @@ -50,9 +58,17 @@ public override void SetException(Exception exception) public sealed class PoolingCompletionSource : CompletionSource { - PoolingAsyncValueTaskMethodBuilder _amb = PoolingAsyncValueTaskMethodBuilder.Create(); + PoolingAsyncValueTaskMethodBuilder _amb; - public ValueTask Task => _amb.Task; + public ValueTask Task { get; } + + public PoolingCompletionSource() + { + _amb = PoolingAsyncValueTaskMethodBuilder.Create(); + // PoolingAsyncValueTaskMethodBuilder's Task and SetResult aren't thread safe in regard to each other + // Which is why we access it prematurely + Task = _amb.Task; + } public void SetResult(T value) => _amb.SetResult(value); @@ -90,7 +106,7 @@ public CompletionSourceContinuation(object handle, delegate*(); OnCompletedWithSource(task.AsTask(), source, new(instance, &UnboxAndComplete)); return source.Task; @@ -111,7 +127,7 @@ public static unsafe ValueTask ReadAsObjectAsyncAsT(this PgConverter in if (task.IsCompletedSuccessfully) return new((T)task.Result); - // Otherwise we do one additional allocation, this allow us to share state machine codegen for all Ts. + // Otherwise we do one additional allocation, this allows us to share state machine codegen for all Ts. var source = new PoolingCompletionSource(); OnCompletedWithSource(task.AsTask(), source, new(instance, &UnboxAndComplete)); return source.Task; From 398d65a4e2e4c2c52eb79be1fcc6fb2110cc5cca Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Wed, 10 Sep 2025 16:59:17 +0200 Subject: [PATCH 559/761] Revert "Move to PublicApiAnalyzers v4 (#6185)" This reverts commit ffc3fba1b4f611390b2b23c4ca78d86de89bd272. --- Directory.Packages.props | 2 +- src/Npgsql/PublicAPI.Shipped.txt | 16 ---------------- 2 files changed, 1 insertion(+), 17 deletions(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index acb8a524c7..ea2e4ebb3e 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -13,7 +13,7 @@ - + diff --git a/src/Npgsql/PublicAPI.Shipped.txt b/src/Npgsql/PublicAPI.Shipped.txt index 246a515cc2..3ec604ddc0 100644 --- a/src/Npgsql/PublicAPI.Shipped.txt +++ b/src/Npgsql/PublicAPI.Shipped.txt @@ -1228,8 +1228,6 @@ NpgsqlTypes.NpgsqlBox.Width.get -> double NpgsqlTypes.NpgsqlCidr NpgsqlTypes.NpgsqlCidr.Address.get -> System.Net.IPAddress! NpgsqlTypes.NpgsqlCidr.Deconstruct(out System.Net.IPAddress! address, out byte netmask) -> void -NpgsqlTypes.NpgsqlCidr.Equals(NpgsqlTypes.NpgsqlCidr other) -> bool -NpgsqlTypes.NpgsqlInet.Equals(NpgsqlTypes.NpgsqlInet other) -> bool NpgsqlTypes.NpgsqlCidr.Netmask.get -> byte NpgsqlTypes.NpgsqlCidr.NpgsqlCidr() -> void NpgsqlTypes.NpgsqlCidr.NpgsqlCidr(string! addr) -> void @@ -1795,12 +1793,10 @@ override Npgsql.Schema.NpgsqlDbColumn.this[string! propertyName].get -> object? override NpgsqlTypes.NpgsqlBox.Equals(object? obj) -> bool override NpgsqlTypes.NpgsqlBox.GetHashCode() -> int override NpgsqlTypes.NpgsqlBox.ToString() -> string! -override NpgsqlTypes.NpgsqlCidr.GetHashCode() -> int override NpgsqlTypes.NpgsqlCidr.ToString() -> string! override NpgsqlTypes.NpgsqlCircle.Equals(object? obj) -> bool override NpgsqlTypes.NpgsqlCircle.GetHashCode() -> int override NpgsqlTypes.NpgsqlCircle.ToString() -> string! -override NpgsqlTypes.NpgsqlInet.GetHashCode() -> int override NpgsqlTypes.NpgsqlInet.ToString() -> string! override NpgsqlTypes.NpgsqlInterval.Equals(object? obj) -> bool override NpgsqlTypes.NpgsqlInterval.GetHashCode() -> int @@ -1890,14 +1886,10 @@ static NpgsqlTypes.NpgsqlBox.operator !=(NpgsqlTypes.NpgsqlBox x, NpgsqlTypes.Np static NpgsqlTypes.NpgsqlBox.operator ==(NpgsqlTypes.NpgsqlBox x, NpgsqlTypes.NpgsqlBox y) -> bool static NpgsqlTypes.NpgsqlCidr.explicit operator System.Net.IPAddress!(NpgsqlTypes.NpgsqlCidr cidr) -> System.Net.IPAddress! static NpgsqlTypes.NpgsqlCidr.implicit operator NpgsqlTypes.NpgsqlInet(NpgsqlTypes.NpgsqlCidr cidr) -> NpgsqlTypes.NpgsqlInet -static NpgsqlTypes.NpgsqlCidr.operator !=(NpgsqlTypes.NpgsqlCidr left, NpgsqlTypes.NpgsqlCidr right) -> bool -static NpgsqlTypes.NpgsqlCidr.operator ==(NpgsqlTypes.NpgsqlCidr left, NpgsqlTypes.NpgsqlCidr right) -> bool static NpgsqlTypes.NpgsqlCircle.operator !=(NpgsqlTypes.NpgsqlCircle x, NpgsqlTypes.NpgsqlCircle y) -> bool static NpgsqlTypes.NpgsqlCircle.operator ==(NpgsqlTypes.NpgsqlCircle x, NpgsqlTypes.NpgsqlCircle y) -> bool static NpgsqlTypes.NpgsqlInet.explicit operator System.Net.IPAddress!(NpgsqlTypes.NpgsqlInet inet) -> System.Net.IPAddress! static NpgsqlTypes.NpgsqlInet.implicit operator NpgsqlTypes.NpgsqlInet(System.Net.IPAddress! ip) -> NpgsqlTypes.NpgsqlInet -static NpgsqlTypes.NpgsqlInet.operator !=(NpgsqlTypes.NpgsqlInet left, NpgsqlTypes.NpgsqlInet right) -> bool -static NpgsqlTypes.NpgsqlInet.operator ==(NpgsqlTypes.NpgsqlInet left, NpgsqlTypes.NpgsqlInet right) -> bool static NpgsqlTypes.NpgsqlLine.operator !=(NpgsqlTypes.NpgsqlLine x, NpgsqlTypes.NpgsqlLine y) -> bool static NpgsqlTypes.NpgsqlLine.operator ==(NpgsqlTypes.NpgsqlLine x, NpgsqlTypes.NpgsqlLine y) -> bool static NpgsqlTypes.NpgsqlLogSequenceNumber.explicit operator NpgsqlTypes.NpgsqlLogSequenceNumber(ulong value) -> NpgsqlTypes.NpgsqlLogSequenceNumber @@ -1942,13 +1934,5 @@ static NpgsqlTypes.NpgsqlTsVector.Parse(string! value) -> NpgsqlTypes.NpgsqlTsVe static readonly Npgsql.NpgsqlFactory.Instance -> Npgsql.NpgsqlFactory! static readonly NpgsqlTypes.NpgsqlLogSequenceNumber.Invalid -> NpgsqlTypes.NpgsqlLogSequenceNumber static readonly NpgsqlTypes.NpgsqlRange.Empty -> NpgsqlTypes.NpgsqlRange -virtual Npgsql.NoticeEventHandler.Invoke(object! sender, Npgsql.NpgsqlNoticeEventArgs! e) -> void -virtual Npgsql.NotificationEventHandler.Invoke(object! sender, Npgsql.NpgsqlNotificationEventArgs! e) -> void virtual Npgsql.NpgsqlCommand.Clone() -> Npgsql.NpgsqlCommand! -virtual Npgsql.NpgsqlRowUpdatedEventHandler.Invoke(object! sender, Npgsql.NpgsqlRowUpdatedEventArgs! e) -> void -virtual Npgsql.NpgsqlRowUpdatingEventHandler.Invoke(object! sender, Npgsql.NpgsqlRowUpdatingEventArgs! e) -> void -virtual Npgsql.ProvideClientCertificatesCallback.Invoke(System.Security.Cryptography.X509Certificates.X509CertificateCollection! certificates) -> void -virtual Npgsql.ProvidePasswordCallback.Invoke(string! host, int port, string! database, string! username) -> string! virtual Npgsql.Replication.PgOutput.ReplicationTuple.GetAsyncEnumerator(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Collections.Generic.IAsyncEnumerator! -~override NpgsqlTypes.NpgsqlCidr.Equals(object obj) -> bool -~override NpgsqlTypes.NpgsqlInet.Equals(object obj) -> bool From d3314594f48c3722f185842b7ea6f32a3da6503f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 18 Sep 2025 08:32:31 +0200 Subject: [PATCH 560/761] Bump BenchmarkDotNet from 0.15.2 to 0.15.3 (#6215) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index ea2e4ebb3e..49c6e2c5f6 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -33,7 +33,7 @@ - + From d03b78d12c4be25bafa9e456a549b4dd2435d1c6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 25 Sep 2025 17:46:44 +0200 Subject: [PATCH 561/761] Bump BenchmarkDotNet from 0.15.3 to 0.15.4 (#6221) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 49c6e2c5f6..9f5ae3fd0b 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -33,7 +33,7 @@ - + From ee978428cfa1bcfb85f6569c1601a8ab66fff789 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Wed, 1 Oct 2025 17:51:51 +0300 Subject: [PATCH 562/761] Suppress ExecutionContext while creating timers (#6106) Fixes #6105 --- src/Npgsql/Internal/NpgsqlConnector.cs | 5 ++++- src/Npgsql/NpgsqlDataSource.cs | 5 +++-- src/Npgsql/PoolingDataSource.cs | 3 ++- src/Npgsql/Replication/ReplicationConnection.cs | 7 +++++-- 4 files changed, 14 insertions(+), 6 deletions(-) diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index 44b9504c71..8018abe56d 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -400,7 +400,10 @@ internal NpgsqlConnector(NpgsqlDataSource dataSource, NpgsqlConnection conn) _isKeepAliveEnabled = Settings.KeepAlive > 0; if (_isKeepAliveEnabled) - _keepAliveTimer = new Timer(PerformKeepAlive, null, Timeout.Infinite, Timeout.Infinite); + { + using (ExecutionContext.SuppressFlow()) // Don't capture the current ExecutionContext and its AsyncLocals onto the timer causing them to live forever + _keepAliveTimer = new Timer(PerformKeepAlive, null, Timeout.Infinite, Timeout.Infinite); + } DataReader = new NpgsqlDataReader(this); diff --git a/src/Npgsql/NpgsqlDataSource.cs b/src/Npgsql/NpgsqlDataSource.cs index d8ff956141..cc177602db 100644 --- a/src/Npgsql/NpgsqlDataSource.cs +++ b/src/Npgsql/NpgsqlDataSource.cs @@ -124,8 +124,9 @@ internal NpgsqlDataSource( _timerPasswordProviderCancellationTokenSource = new(); - // Create the timer, but don't start it; the manual run below will will schedule the first refresh. - _periodicPasswordProviderTimer = new Timer(state => _ = RefreshPassword(), null, Timeout.InfiniteTimeSpan, Timeout.InfiniteTimeSpan); + // Create the timer, but don't start it; the manual run below will schedule the first refresh. + using (ExecutionContext.SuppressFlow()) // Don't capture the current ExecutionContext and its AsyncLocals onto the timer causing them to live forever + _periodicPasswordProviderTimer = new Timer(state => _ = RefreshPassword(), null, Timeout.InfiniteTimeSpan, Timeout.InfiniteTimeSpan); // Trigger the first refresh attempt right now, outside the timer; this allows us to capture the Task so it can be observed // in GetPasswordAsync. _passwordRefreshTask = Task.Run(RefreshPassword); diff --git a/src/Npgsql/PoolingDataSource.cs b/src/Npgsql/PoolingDataSource.cs index c339047fb4..a6c63494a8 100644 --- a/src/Npgsql/PoolingDataSource.cs +++ b/src/Npgsql/PoolingDataSource.cs @@ -97,7 +97,8 @@ internal PoolingDataSource( if (connectionIdleLifetime < pruningSamplingInterval) throw new ArgumentException($"Connection can't have {nameof(settings.ConnectionIdleLifetime)} {connectionIdleLifetime} under {nameof(settings.ConnectionPruningInterval)} {pruningSamplingInterval}"); - _pruningTimer = new Timer(PruningTimerCallback, this, Timeout.Infinite, Timeout.Infinite); + using (ExecutionContext.SuppressFlow()) // Don't capture the current ExecutionContext and its AsyncLocals onto the timer causing them to live forever + _pruningTimer = new Timer(PruningTimerCallback, this, Timeout.Infinite, Timeout.Infinite); _pruningSampleSize = DivideRoundingUp(settings.ConnectionIdleLifetime, settings.ConnectionPruningInterval); _pruningMedianIndex = DivideRoundingUp(_pruningSampleSize, 2) - 1; // - 1 to go from length to index _pruningSamplingInterval = pruningSamplingInterval; diff --git a/src/Npgsql/Replication/ReplicationConnection.cs b/src/Npgsql/Replication/ReplicationConnection.cs index 4a41467164..8583c31ce0 100644 --- a/src/Npgsql/Replication/ReplicationConnection.cs +++ b/src/Npgsql/Replication/ReplicationConnection.cs @@ -452,8 +452,11 @@ internal async IAsyncEnumerator StartReplicationInternal( SetTimeouts(_walReceiverTimeout, CommandTimeout); - _sendFeedbackTimer = new Timer(TimerSendFeedback, state: null, WalReceiverStatusInterval, Timeout.InfiniteTimeSpan); - _requestFeedbackTimer = new Timer(TimerRequestFeedback, state: null, _requestFeedbackInterval, Timeout.InfiniteTimeSpan); + using (ExecutionContext.SuppressFlow()) // Don't capture the current ExecutionContext and its AsyncLocals onto the timer causing them to live forever + { + _sendFeedbackTimer = new Timer(TimerSendFeedback, state: null, WalReceiverStatusInterval, Timeout.InfiniteTimeSpan); + _requestFeedbackTimer = new Timer(TimerRequestFeedback, state: null, _requestFeedbackInterval, Timeout.InfiniteTimeSpan); + } while (true) { From 444c77aea97e5273e4d938b7023f4e25a5f120b8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Emmanuel=20Andr=C3=A9?= <2341261+manandre@users.noreply.github.com> Date: Thu, 2 Oct 2025 11:26:35 +0200 Subject: [PATCH 563/761] Rewrite NpgsqlConnectionStringBuilderSourceGenerator as incremental (#6186) And bump Microsoft.CodeAnalysis.Analyzers and Microsoft.CodeAnalysis.CSharp --- Directory.Packages.props | 4 +- ...lConnectionStringBuilderSourceGenerator.cs | 161 +++++++++--------- 2 files changed, 83 insertions(+), 82 deletions(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 9f5ae3fd0b..9e51fdc983 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -14,8 +14,8 @@ - - + + diff --git a/src/Npgsql.SourceGenerators/NpgsqlConnectionStringBuilderSourceGenerator.cs b/src/Npgsql.SourceGenerators/NpgsqlConnectionStringBuilderSourceGenerator.cs index 665789e74e..c7c7228321 100644 --- a/src/Npgsql.SourceGenerators/NpgsqlConnectionStringBuilderSourceGenerator.cs +++ b/src/Npgsql.SourceGenerators/NpgsqlConnectionStringBuilderSourceGenerator.cs @@ -9,7 +9,7 @@ namespace Npgsql.SourceGenerators; [Generator] -public class NpgsqlConnectionStringBuilderSourceGenerator : ISourceGenerator +public class NpgsqlConnectionStringBuilderSourceGenerator : IIncrementalGenerator { static readonly DiagnosticDescriptor InternalError = new DiagnosticDescriptor( id: "PGXXXX", @@ -19,106 +19,107 @@ public class NpgsqlConnectionStringBuilderSourceGenerator : ISourceGenerator DiagnosticSeverity.Error, isEnabledByDefault: true); - public void Initialize(GeneratorInitializationContext context) {} - - public void Execute(GeneratorExecutionContext context) + public void Initialize(IncrementalGeneratorInitializationContext context) { - if (context.Compilation.Assembly.GetTypeByMetadataName("Npgsql.NpgsqlConnectionStringBuilder") is not { } type) - return; - - if (context.Compilation.Assembly.GetTypeByMetadataName("Npgsql.NpgsqlConnectionStringPropertyAttribute") is not - { } connectionStringPropertyAttribute) - { - context.ReportDiagnostic(Diagnostic.Create( - InternalError, - location: null, - "Could not find Npgsql.NpgsqlConnectionStringPropertyAttribute")); - return; - } - - var obsoleteAttribute = context.Compilation.GetTypeByMetadataName("System.ObsoleteAttribute"); - var displayNameAttribute = context.Compilation.GetTypeByMetadataName("System.ComponentModel.DisplayNameAttribute"); - var defaultValueAttribute = context.Compilation.GetTypeByMetadataName("System.ComponentModel.DefaultValueAttribute"); - - if (obsoleteAttribute is null || displayNameAttribute is null || defaultValueAttribute is null) + var compilationProvider = context.CompilationProvider; + context.RegisterSourceOutput(compilationProvider, (spc, compilation) => { - context.ReportDiagnostic(Diagnostic.Create( - InternalError, - location: null, - "Could not find ObsoleteAttribute, DisplayNameAttribute or DefaultValueAttribute")); - return; - } - - var properties = new List(); - var propertiesByKeyword = new Dictionary(); - foreach (var member in type.GetMembers()) - { - if (member is not IPropertySymbol property || - property.GetAttributes().FirstOrDefault(a => connectionStringPropertyAttribute.Equals(a.AttributeClass, SymbolEqualityComparer.Default)) is not { } propertyAttribute || - property.GetAttributes() - .FirstOrDefault(a => displayNameAttribute.Equals(a.AttributeClass, SymbolEqualityComparer.Default)) - ?.ConstructorArguments[0].Value is not string displayName) + var type = compilation.Assembly.GetTypeByMetadataName("Npgsql.NpgsqlConnectionStringBuilder"); + if (type is null) + return; + + var connectionStringPropertyAttribute = compilation.Assembly.GetTypeByMetadataName("Npgsql.NpgsqlConnectionStringPropertyAttribute"); + if (connectionStringPropertyAttribute is null) { - continue; + spc.ReportDiagnostic(Diagnostic.Create( + InternalError, + location: null, + "Could not find Npgsql.NpgsqlConnectionStringPropertyAttribute")); + return; } - var explicitDefaultValue = property.GetAttributes() - .FirstOrDefault(a => defaultValueAttribute.Equals(a.AttributeClass, SymbolEqualityComparer.Default)) - ?.ConstructorArguments[0].Value; - - if (explicitDefaultValue is string s) - explicitDefaultValue = '"' + s.Replace("\"", "\"\"") + '"'; + var obsoleteAttribute = compilation.GetTypeByMetadataName("System.ObsoleteAttribute"); + var displayNameAttribute = compilation.GetTypeByMetadataName("System.ComponentModel.DisplayNameAttribute"); + var defaultValueAttribute = compilation.GetTypeByMetadataName("System.ComponentModel.DefaultValueAttribute"); - if (explicitDefaultValue is not null && property.Type.TypeKind == TypeKind.Enum) + if (obsoleteAttribute is null || displayNameAttribute is null || defaultValueAttribute is null) { - explicitDefaultValue = $"({property.Type.Name}){explicitDefaultValue}"; - // var foo = property.Type.Name; - // explicitDefaultValue += $"/* {foo} */"; + spc.ReportDiagnostic(Diagnostic.Create( + InternalError, + location: null, + "Could not find ObsoleteAttribute, DisplayNameAttribute or DefaultValueAttribute")); + return; } - var propertyDetails = new PropertyDetails + var properties = new List(); + var propertiesByKeyword = new Dictionary(); + foreach (var member in type.GetMembers()) { - Name = property.Name, - CanonicalName = displayName, - TypeName = property.Type.Name, - IsEnum = property.Type.TypeKind == TypeKind.Enum, - IsObsolete = property.GetAttributes().Any(a => obsoleteAttribute.Equals(a.AttributeClass, SymbolEqualityComparer.Default)), - DefaultValue = explicitDefaultValue - }; + if (member is not IPropertySymbol property || + property.GetAttributes().FirstOrDefault(a => connectionStringPropertyAttribute.Equals(a.AttributeClass, SymbolEqualityComparer.Default)) is not { } propertyAttribute || + property.GetAttributes() + .FirstOrDefault(a => displayNameAttribute.Equals(a.AttributeClass, SymbolEqualityComparer.Default)) + ?.ConstructorArguments[0].Value is not string displayName) + { + continue; + } - properties.Add(propertyDetails); + var explicitDefaultValue = property.GetAttributes() + .FirstOrDefault(a => defaultValueAttribute.Equals(a.AttributeClass, SymbolEqualityComparer.Default)) + ?.ConstructorArguments[0].Value; - propertiesByKeyword[displayName.ToUpperInvariant()] = propertyDetails; - if (property.Name != displayName) - { - var propertyName = property.Name.ToUpperInvariant(); - if (!propertiesByKeyword.ContainsKey(propertyName)) - propertyDetails.Alternatives.Add(propertyName); - } + if (explicitDefaultValue is string s) + explicitDefaultValue = '"' + s.Replace("\"", "\"\"") + '"'; - if (propertyAttribute.ConstructorArguments.Length == 1) - { - foreach (var synonymArg in propertyAttribute.ConstructorArguments[0].Values) + if (explicitDefaultValue is not null && property.Type.TypeKind == TypeKind.Enum) { - if (synonymArg.Value is string synonym) + explicitDefaultValue = $"({property.Type.Name}){explicitDefaultValue}"; + } + + var propertyDetails = new PropertyDetails + { + Name = property.Name, + CanonicalName = displayName, + TypeName = property.Type.Name, + IsEnum = property.Type.TypeKind == TypeKind.Enum, + IsObsolete = property.GetAttributes().Any(a => obsoleteAttribute.Equals(a.AttributeClass, SymbolEqualityComparer.Default)), + DefaultValue = explicitDefaultValue + }; + + properties.Add(propertyDetails); + + propertiesByKeyword[displayName.ToUpperInvariant()] = propertyDetails; + if (property.Name != displayName) + { + var propertyName = property.Name.ToUpperInvariant(); + if (!propertiesByKeyword.ContainsKey(propertyName)) + propertyDetails.Alternatives.Add(propertyName); + } + + if (propertyAttribute.ConstructorArguments.Length == 1) + { + foreach (var synonymArg in propertyAttribute.ConstructorArguments[0].Values) { - var synonymName = synonym.ToUpperInvariant(); - if (!propertiesByKeyword.ContainsKey(synonymName)) - propertyDetails.Alternatives.Add(synonymName); + if (synonymArg.Value is string synonym) + { + var synonymName = synonym.ToUpperInvariant(); + if (!propertiesByKeyword.ContainsKey(synonymName)) + propertyDetails.Alternatives.Add(synonymName); + } } } } - } - var template = Template.Parse(EmbeddedResource.GetContent("NpgsqlConnectionStringBuilder.snbtxt"), "NpgsqlConnectionStringBuilder.snbtxt"); + var template = Template.Parse(EmbeddedResource.GetContent("NpgsqlConnectionStringBuilder.snbtxt"), "NpgsqlConnectionStringBuilder.snbtxt"); - var output = template.Render(new - { - Properties = properties, - PropertiesByKeyword = propertiesByKeyword - }); + var output = template.Render(new + { + Properties = properties, + PropertiesByKeyword = propertiesByKeyword + }); - context.AddSource(type.Name + ".Generated.cs", SourceText.From(output, Encoding.UTF8)); + spc.AddSource(type.Name + ".Generated.cs", SourceText.From(output, Encoding.UTF8)); + }); } sealed class PropertyDetails From d58efec0c236b7e9763a8ce09b1308521f2850a5 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Thu, 2 Oct 2025 14:15:05 +0300 Subject: [PATCH 564/761] Add COPY operations dispose on initialization failure (#6220) Fixes #6219 --- src/Npgsql/NpgsqlBinaryExporter.cs | 29 ++++++++---- src/Npgsql/NpgsqlBinaryImporter.cs | 6 ++- src/Npgsql/NpgsqlConnection.cs | 75 ++++++++++++++++++++++++------ src/Npgsql/NpgsqlRawCopyStream.cs | 35 ++++++++++---- 4 files changed, 112 insertions(+), 33 deletions(-) diff --git a/src/Npgsql/NpgsqlBinaryExporter.cs b/src/Npgsql/NpgsqlBinaryExporter.cs index f221056119..9473c95959 100644 --- a/src/Npgsql/NpgsqlBinaryExporter.cs +++ b/src/Npgsql/NpgsqlBinaryExporter.cs @@ -24,7 +24,7 @@ public sealed class NpgsqlBinaryExporter : ICancelable NpgsqlConnector _connector; NpgsqlReadBuffer _buf; - bool _isConsumed, _isDisposed; + ExporterState _state = ExporterState.Uninitialized; long _endOfMessagePos; short _column; @@ -91,6 +91,7 @@ internal async Task Init(string copyToCommand, bool async, CancellationToken can throw _connector.UnexpectedMessageReceived(msg.Code); } + _state = ExporterState.Ready; NumColumns = copyOutResponse.NumColumns; _columnInfoCache = new PgConverterInfo[NumColumns]; _rowsExported = 0; @@ -141,7 +142,7 @@ async Task ReadHeader(bool async) async ValueTask StartRow(bool async, CancellationToken cancellationToken = default) { ThrowIfDisposed(); - if (_isConsumed) + if (_state == ExporterState.Consumed) return -1; using var registration = _connector.StartNestedCancellableOperation(cancellationToken); @@ -176,7 +177,7 @@ async ValueTask StartRow(bool async, CancellationToken cancellationToken = Expect(await _connector.ReadMessage(async).ConfigureAwait(false), _connector); Expect(await _connector.ReadMessage(async).ConfigureAwait(false), _connector); _column = BeforeRow; - _isConsumed = true; + _state = ExporterState.Consumed; return -1; } @@ -437,7 +438,7 @@ void ThrowIfNotOnRow() void ThrowIfDisposed() { - if (_isDisposed) + if (_state == ExporterState.Disposed) ThrowHelper.ThrowObjectDisposedException(nameof(NpgsqlBinaryExporter), "The COPY operation has already ended."); } @@ -472,10 +473,10 @@ public Task CancelAsync() async ValueTask DisposeAsync(bool async) { - if (_isDisposed) + if (_state == ExporterState.Disposed) return; - if (_isConsumed) + if (_state is ExporterState.Consumed or ExporterState.Uninitialized) { LogMessages.BinaryCopyOperationCompleted(_copyLogger, _rowsExported, _connector.Id); } @@ -512,7 +513,7 @@ async ValueTask DisposeAsync(bool async) void Cleanup() { - Debug.Assert(!_isDisposed); + Debug.Assert(_state != ExporterState.Disposed); var connector = _connector; if (!ReferenceEquals(connector, null)) @@ -523,9 +524,21 @@ void Cleanup() } _buf = null!; - _isDisposed = true; + _state = ExporterState.Disposed; } } #endregion + + #region Enums + + enum ExporterState + { + Uninitialized, + Ready, + Consumed, + Disposed + } + + #endregion Enums } diff --git a/src/Npgsql/NpgsqlBinaryImporter.cs b/src/Npgsql/NpgsqlBinaryImporter.cs index 633d1bac15..52c9438fde 100644 --- a/src/Npgsql/NpgsqlBinaryImporter.cs +++ b/src/Npgsql/NpgsqlBinaryImporter.cs @@ -25,7 +25,7 @@ public sealed class NpgsqlBinaryImporter : ICancelable NpgsqlConnector _connector; NpgsqlWriteBuffer _buf; - ImporterState _state; + ImporterState _state = ImporterState.Uninitialized; /// /// The number of columns in the current (not-yet-written) row. @@ -99,6 +99,7 @@ internal async Task Init(string copyFromCommand, bool async, CancellationToken c throw _connector.UnexpectedMessageReceived(msg.Code); } + _state = ImporterState.Ready; _params = new NpgsqlParameter[copyInResponse.NumColumns]; _rowsImported = 0; _buf.StartCopyMode(); @@ -512,6 +513,7 @@ async ValueTask CloseAsync(bool async, CancellationToken cancellationToken = def case ImporterState.Ready: await Cancel(async, cancellationToken).ConfigureAwait(false); break; + case ImporterState.Uninitialized: case ImporterState.Cancelled: case ImporterState.Committed: break; @@ -553,6 +555,7 @@ void CheckReady() static void Throw(ImporterState state) => throw (state switch { + ImporterState.Uninitialized => throw new InvalidOperationException("The COPY operation has not been initialized."), ImporterState.Disposed => new ObjectDisposedException(typeof(NpgsqlBinaryImporter).FullName, "The COPY operation has already ended."), ImporterState.Cancelled => new InvalidOperationException("The COPY operation has already been cancelled."), @@ -567,6 +570,7 @@ static void Throw(ImporterState state) enum ImporterState { + Uninitialized, Ready, Committed, Cancelled, diff --git a/src/Npgsql/NpgsqlConnection.cs b/src/Npgsql/NpgsqlConnection.cs index c63f5bc4b6..6bda4f71d5 100644 --- a/src/Npgsql/NpgsqlConnection.cs +++ b/src/Npgsql/NpgsqlConnection.cs @@ -1160,17 +1160,26 @@ async Task BeginBinaryImport(bool async, string copyFromCo LogMessages.StartingBinaryImport(connector.LoggingConfiguration.CopyLogger, connector.Id); // no point in passing a cancellationToken here, as we register the cancellation in the Init method connector.StartUserAction(ConnectorState.Copy, attemptPgCancellation: false); + var importer = new NpgsqlBinaryImporter(connector); try { - var importer = new NpgsqlBinaryImporter(connector); await importer.Init(copyFromCommand, async, cancellationToken).ConfigureAwait(false); connector.CurrentCopyOperation = importer; return importer; } catch { - connector.EndUserAction(); - EndBindingScope(ConnectorBindingScope.Copy); + try + { + if (async) + await importer.DisposeAsync().ConfigureAwait(false); + else + importer.Dispose(); + } + catch + { + // ignored + } throw; } } @@ -1210,17 +1219,26 @@ async Task BeginBinaryExport(bool async, string copyToComm LogMessages.StartingBinaryExport(connector.LoggingConfiguration.CopyLogger, connector.Id); // no point in passing a cancellationToken here, as we register the cancellation in the Init method connector.StartUserAction(ConnectorState.Copy, attemptPgCancellation: false); + var exporter = new NpgsqlBinaryExporter(connector); try { - var exporter = new NpgsqlBinaryExporter(connector); await exporter.Init(copyToCommand, async, cancellationToken).ConfigureAwait(false); connector.CurrentCopyOperation = exporter; return exporter; } catch { - connector.EndUserAction(); - EndBindingScope(ConnectorBindingScope.Copy); + try + { + if (async) + await exporter.DisposeAsync().ConfigureAwait(false); + else + exporter.Dispose(); + } + catch + { + // ignored + } throw; } } @@ -1266,9 +1284,9 @@ async Task BeginTextImport(bool async, string copyFromCommand, Cance LogMessages.StartingTextImport(connector.LoggingConfiguration.CopyLogger, connector.Id); // no point in passing a cancellationToken here, as we register the cancellation in the Init method connector.StartUserAction(ConnectorState.Copy, attemptPgCancellation: false); + var copyStream = new NpgsqlRawCopyStream(connector); try { - var copyStream = new NpgsqlRawCopyStream(connector); await copyStream.Init(copyFromCommand, async, cancellationToken).ConfigureAwait(false); var writer = new NpgsqlCopyTextWriter(connector, copyStream); connector.CurrentCopyOperation = writer; @@ -1276,8 +1294,17 @@ async Task BeginTextImport(bool async, string copyFromCommand, Cance } catch { - connector.EndUserAction(); - EndBindingScope(ConnectorBindingScope.Copy); + try + { + if (async) + await copyStream.DisposeAsync().ConfigureAwait(false); + else + copyStream.Dispose(); + } + catch + { + // ignored + } throw; } } @@ -1323,9 +1350,9 @@ async Task BeginTextExport(bool async, string copyToCommand, Cancell LogMessages.StartingTextExport(connector.LoggingConfiguration.CopyLogger, connector.Id); // no point in passing a cancellationToken here, as we register the cancellation in the Init method connector.StartUserAction(ConnectorState.Copy, attemptPgCancellation: false); + var copyStream = new NpgsqlRawCopyStream(connector); try { - var copyStream = new NpgsqlRawCopyStream(connector); await copyStream.Init(copyToCommand, async, cancellationToken).ConfigureAwait(false); var reader = new NpgsqlCopyTextReader(connector, copyStream); connector.CurrentCopyOperation = reader; @@ -1333,8 +1360,17 @@ async Task BeginTextExport(bool async, string copyToCommand, Cancell } catch { - connector.EndUserAction(); - EndBindingScope(ConnectorBindingScope.Copy); + try + { + if (async) + await copyStream.DisposeAsync().ConfigureAwait(false); + else + copyStream.Dispose(); + } + catch + { + // ignored + } throw; } } @@ -1380,9 +1416,9 @@ async Task BeginRawBinaryCopy(bool async, string copyComman LogMessages.StartingRawCopy(connector.LoggingConfiguration.CopyLogger, connector.Id); // no point in passing a cancellationToken here, as we register the cancellation in the Init method connector.StartUserAction(ConnectorState.Copy, attemptPgCancellation: false); + var stream = new NpgsqlRawCopyStream(connector); try { - var stream = new NpgsqlRawCopyStream(connector); await stream.Init(copyCommand, async, cancellationToken).ConfigureAwait(false); if (!stream.IsBinary) { @@ -1395,8 +1431,17 @@ async Task BeginRawBinaryCopy(bool async, string copyComman } catch { - connector.EndUserAction(); - EndBindingScope(ConnectorBindingScope.Copy); + try + { + if (async) + await stream.DisposeAsync().ConfigureAwait(false); + else + stream.Dispose(); + } + catch + { + // ignored + } throw; } } diff --git a/src/Npgsql/NpgsqlRawCopyStream.cs b/src/Npgsql/NpgsqlRawCopyStream.cs index 3648b24075..664c39a1b8 100644 --- a/src/Npgsql/NpgsqlRawCopyStream.cs +++ b/src/Npgsql/NpgsqlRawCopyStream.cs @@ -28,7 +28,7 @@ public sealed class NpgsqlRawCopyStream : Stream, ICancelable NpgsqlWriteBuffer _writeBuf; int _leftToReadInDataMsg; - bool _isDisposed, _isConsumed; + CopyStreamState _state = CopyStreamState.Uninitialized; bool _canRead; bool _canWrite; @@ -84,12 +84,14 @@ internal async Task Init(string copyCommand, bool async, CancellationToken cance switch (msg.Code) { case BackendMessageCode.CopyInResponse: + _state = CopyStreamState.Ready; var copyInResponse = (CopyInResponseMessage) msg; IsBinary = copyInResponse.IsBinary; _canWrite = true; _writeBuf.StartCopyMode(); break; case BackendMessageCode.CopyOutResponse: + _state = CopyStreamState.Ready; var copyOutResponse = (CopyOutResponseMessage) msg; IsBinary = copyOutResponse.IsBinary; _canRead = true; @@ -245,7 +247,7 @@ async ValueTask ReadAsyncInternal() async ValueTask ReadCore(int count, bool async, CancellationToken cancellationToken = default) { - if (_isConsumed) + if (_state == CopyStreamState.Consumed) return 0; using var registration = _connector.StartNestedCancellableOperation(cancellationToken, attemptPgCancellation: false); @@ -261,7 +263,7 @@ async ValueTask ReadCore(int count, bool async, CancellationToken cancellat } catch { - if (!_isDisposed) + if (_state != CopyStreamState.Disposed) Cleanup(); throw; } @@ -274,7 +276,7 @@ async ValueTask ReadCore(int count, bool async, CancellationToken cancellat case BackendMessageCode.CopyDone: Expect(await _connector.ReadMessage(async).ConfigureAwait(false), _connector); Expect(await _connector.ReadMessage(async).ConfigureAwait(false), _connector); - _isConsumed = true; + _state = CopyStreamState.Consumed; return 0; default: throw _connector.UnexpectedMessageReceived(msg.Code); @@ -331,6 +333,9 @@ async Task Cancel(bool async) } catch (PostgresException e) { + // TODO: NpgsqlBinaryImporter doesn't cleanup on cancellation + // And instead relies on users disposing the object + // We probably should do the same here Cleanup(); if (e.SqlState != PostgresErrorCodes.QueryCanceled) @@ -355,7 +360,7 @@ public override ValueTask DisposeAsync() async ValueTask DisposeAsync(bool disposing, bool async) { - if (_isDisposed || !disposing) + if (_state == CopyStreamState.Disposed || !disposing) return; try @@ -373,7 +378,7 @@ async ValueTask DisposeAsync(bool disposing, bool async) } else { - if (!_isConsumed) + if (_state != CopyStreamState.Consumed && _state != CopyStreamState.Uninitialized) { try { @@ -403,7 +408,7 @@ async ValueTask DisposeAsync(bool disposing, bool async) #pragma warning disable CS8625 void Cleanup() { - Debug.Assert(!_isDisposed); + Debug.Assert(_state != CopyStreamState.Disposed); LogMessages.CopyOperationCompleted(_copyLogger, _connector.Id); _connector.EndUserAction(); _connector.CurrentCopyOperation = null; @@ -411,13 +416,13 @@ void Cleanup() _connector = null; _readBuf = null; _writeBuf = null; - _isDisposed = true; + _state = CopyStreamState.Disposed; } #pragma warning restore CS8625 void CheckDisposed() { - if (_isDisposed) { + if (_state == CopyStreamState.Disposed) { throw new ObjectDisposedException(nameof(NpgsqlRawCopyStream), "The COPY operation has already ended."); } } @@ -452,6 +457,18 @@ static void ValidateArguments(byte[] buffer, int offset, int count) ThrowHelper.ThrowArgumentException("Offset and length were out of bounds for the array or count is greater than the number of elements from index to the end of the source collection."); } #endregion + + #region Enums + + enum CopyStreamState + { + Uninitialized, + Ready, + Consumed, + Disposed + } + + #endregion Enums } /// From c9f5866979208c4c999215fcb3cf147145b995cd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 3 Oct 2025 09:04:39 +0200 Subject: [PATCH 565/761] Bump OpenTelemetry.Api from 1.12.0 to 1.13.0 (#6229) --- updated-dependencies: - dependency-nme: OpenTelemetry.Api dependency-version: 1.13.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 9e51fdc983..52977fb7e1 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -2,7 +2,7 @@ - + From 266e121738b846b8776873904b79832fce1a2651 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 3 Oct 2025 09:05:13 +0200 Subject: [PATCH 566/761] Bump Newtonsoft.Json from 13.0.3 to 13.0.4 (#6228) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 52977fb7e1..4968435b70 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -9,7 +9,7 @@ - + From c4d4ac60e292e318227d421ae08d979f0f4f4930 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 3 Oct 2025 09:05:39 +0200 Subject: [PATCH 567/761] Bump Microsoft.NET.Test.Sdk from 17.14.1 to 18.0.0 (#6227) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 4968435b70..adfbc4adcf 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -24,7 +24,7 @@ - + From 4631da46702817020fe353972ffdcb07cbed1caf Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 3 Oct 2025 09:18:38 +0200 Subject: [PATCH 568/761] Bump Microsoft.CodeAnalysis.CSharp from 4.13.0 to 4.14.0 (#6226) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index adfbc4adcf..9a6a2e0e7d 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -14,7 +14,7 @@ - + From b9bb342c0494c1b00d2a6d59b7c6525bc3c4415f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 4 Oct 2025 01:49:50 +0200 Subject: [PATCH 569/761] Bump Scriban.Signed from 6.2.1 to 6.4.0 (#6230) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 9a6a2e0e7d..aee0fe3c5f 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -16,7 +16,7 @@ - + From 96a86f142ac6f5d6ad95f935ea315eaa291763d7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 4 Oct 2025 01:50:12 +0200 Subject: [PATCH 570/761] Bump xunit.runner.visualstudio from 3.1.4 to 3.1.5 (#6231) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index aee0fe3c5f..2f6c36102a 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -27,7 +27,7 @@ - + From 6e308c393a10198ca0d55a25bb181ec788087750 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Sun, 5 Oct 2025 20:57:33 +0300 Subject: [PATCH 571/761] Allow specifying TargetSessionAttributes in connection string with NpgsqlDataSourceBuilder (#6046) --- src/Npgsql/MultiHostDataSourceWrapper.cs | 16 +-- src/Npgsql/NpgsqlConnectionStringBuilder.cs | 2 +- src/Npgsql/NpgsqlMultiHostDataSource.cs | 2 +- src/Npgsql/NpgsqlSlimDataSourceBuilder.cs | 2 - .../Properties/NpgsqlStrings.Designer.cs | 9 -- src/Npgsql/Properties/NpgsqlStrings.resx | 3 - test/Npgsql.Tests/MultipleHostsTests.cs | 110 ++++++++++++++++-- 7 files changed, 109 insertions(+), 35 deletions(-) diff --git a/src/Npgsql/MultiHostDataSourceWrapper.cs b/src/Npgsql/MultiHostDataSourceWrapper.cs index 3217ec95cf..b6b7d3e5f5 100644 --- a/src/Npgsql/MultiHostDataSourceWrapper.cs +++ b/src/Npgsql/MultiHostDataSourceWrapper.cs @@ -10,9 +10,11 @@ namespace Npgsql; sealed class MultiHostDataSourceWrapper(NpgsqlMultiHostDataSource wrappedSource, TargetSessionAttributes targetSessionAttributes) : NpgsqlDataSource(CloneSettingsForTargetSessionAttributes(wrappedSource.Settings, targetSessionAttributes), wrappedSource.Configuration) { + internal NpgsqlMultiHostDataSource WrappedSource { get; } = wrappedSource; + internal override bool OwnsConnectors => false; - public override void Clear() => wrappedSource.Clear(); + public override void Clear() => WrappedSource.Clear(); static NpgsqlConnectionStringBuilder CloneSettingsForTargetSessionAttributes( NpgsqlConnectionStringBuilder settings, @@ -23,22 +25,22 @@ static NpgsqlConnectionStringBuilder CloneSettingsForTargetSessionAttributes( return clonedSettings; } - internal override (int Total, int Idle, int Busy) Statistics => wrappedSource.Statistics; + internal override (int Total, int Idle, int Busy) Statistics => WrappedSource.Statistics; internal override ValueTask Get(NpgsqlConnection conn, NpgsqlTimeout timeout, bool async, CancellationToken cancellationToken) - => wrappedSource.Get(conn, timeout, async, cancellationToken); + => WrappedSource.Get(conn, timeout, async, cancellationToken); internal override bool TryGetIdleConnector([NotNullWhen(true)] out NpgsqlConnector? connector) => throw new NpgsqlException("Npgsql bug: trying to get an idle connector from " + nameof(MultiHostDataSourceWrapper)); internal override ValueTask OpenNewConnector(NpgsqlConnection conn, NpgsqlTimeout timeout, bool async, CancellationToken cancellationToken) => throw new NpgsqlException("Npgsql bug: trying to open a new connector from " + nameof(MultiHostDataSourceWrapper)); internal override void Return(NpgsqlConnector connector) - => wrappedSource.Return(connector); + => WrappedSource.Return(connector); internal override void AddPendingEnlistedConnector(NpgsqlConnector connector, Transaction transaction) - => wrappedSource.AddPendingEnlistedConnector(connector, transaction); + => WrappedSource.AddPendingEnlistedConnector(connector, transaction); internal override bool TryRemovePendingEnlistedConnector(NpgsqlConnector connector, Transaction transaction) - => wrappedSource.TryRemovePendingEnlistedConnector(connector, transaction); + => WrappedSource.TryRemovePendingEnlistedConnector(connector, transaction); internal override bool TryRentEnlistedPending(Transaction transaction, NpgsqlConnection connection, [NotNullWhen(true)] out NpgsqlConnector? connector) - => wrappedSource.TryRentEnlistedPending(transaction, connection, out connector); + => WrappedSource.TryRentEnlistedPending(transaction, connection, out connector); } diff --git a/src/Npgsql/NpgsqlConnectionStringBuilder.cs b/src/Npgsql/NpgsqlConnectionStringBuilder.cs index 35a6ed04e0..7200e45130 100644 --- a/src/Npgsql/NpgsqlConnectionStringBuilder.cs +++ b/src/Npgsql/NpgsqlConnectionStringBuilder.cs @@ -1001,7 +1001,7 @@ public string? TargetSessionAttributes set { - TargetSessionAttributesParsed = value is null ? null : ParseTargetSessionAttributes(value); + TargetSessionAttributesParsed = value is null ? null : ParseTargetSessionAttributes(value.ToLowerInvariant()); SetValue(nameof(TargetSessionAttributes), value); } } diff --git a/src/Npgsql/NpgsqlMultiHostDataSource.cs b/src/Npgsql/NpgsqlMultiHostDataSource.cs index 7236e7bb8b..4997a6093a 100644 --- a/src/Npgsql/NpgsqlMultiHostDataSource.cs +++ b/src/Npgsql/NpgsqlMultiHostDataSource.cs @@ -456,6 +456,6 @@ bool TryGetValidConnector(List list, TargetSessionAttributes pr static TargetSessionAttributes GetTargetSessionAttributes(NpgsqlConnection connection) => connection.Settings.TargetSessionAttributesParsed ?? (PostgresEnvironment.TargetSessionAttributes is { } s - ? NpgsqlConnectionStringBuilder.ParseTargetSessionAttributes(s) + ? NpgsqlConnectionStringBuilder.ParseTargetSessionAttributes(s.ToLowerInvariant()) : TargetSessionAttributes.Any); } diff --git a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs index 3fa0a5a9fd..4a3d6fdad7 100644 --- a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs @@ -855,8 +855,6 @@ _loggerFactory is null void ValidateMultiHost() { - if (ConnectionStringBuilder.TargetSessionAttributes is not null) - throw new InvalidOperationException(NpgsqlStrings.CannotSpecifyTargetSessionAttributes); if (ConnectionStringBuilder.Multiplexing) throw new NotSupportedException("Multiplexing is not supported with multiple hosts"); if (ConnectionStringBuilder.ReplicationMode != ReplicationMode.Off) diff --git a/src/Npgsql/Properties/NpgsqlStrings.Designer.cs b/src/Npgsql/Properties/NpgsqlStrings.Designer.cs index 7f6fca99cd..7f71914ca2 100644 --- a/src/Npgsql/Properties/NpgsqlStrings.Designer.cs +++ b/src/Npgsql/Properties/NpgsqlStrings.Designer.cs @@ -113,15 +113,6 @@ internal static string CannotSetMultiplePasswordProviderKinds { } } - /// - /// Looks up a localized string similar to When creating a multi-host data source, TargetSessionAttributes cannot be specified. Create without TargetSessionAttributes, and then obtain DataSource wrappers from it. Consult the docs for more information.. - /// - internal static string CannotSpecifyTargetSessionAttributes { - get { - return ResourceManager.GetString("CannotSpecifyTargetSessionAttributes", resourceCulture); - } - } - /// /// Looks up a localized string similar to RootCertificate cannot be used in conjunction with SslClientAuthenticationOptionsCallback overwriting RemoteCertificateValidationCallback; when registering a validation callback, perform whatever validation you require in that callback.. /// diff --git a/src/Npgsql/Properties/NpgsqlStrings.resx b/src/Npgsql/Properties/NpgsqlStrings.resx index f523cf6eb2..af951d1a07 100644 --- a/src/Npgsql/Properties/NpgsqlStrings.resx +++ b/src/Npgsql/Properties/NpgsqlStrings.resx @@ -54,9 +54,6 @@ '{0}' must be positive. - - When creating a multi-host data source, TargetSessionAttributes cannot be specified. Create without TargetSessionAttributes, and then obtain DataSource wrappers from it. Consult the docs for more information. - Cannot read interval values with non-zero months as TimeSpan, since that type doesn't support months. Consider using NodaTime Period which better corresponds to PostgreSQL interval, or read the value as NpgsqlInterval, or transform the interval to not contain months or years in PostgreSQL before reading it. diff --git a/test/Npgsql.Tests/MultipleHostsTests.cs b/test/Npgsql.Tests/MultipleHostsTests.cs index f4026cc7f6..66c1a99cf7 100644 --- a/test/Npgsql.Tests/MultipleHostsTests.cs +++ b/test/Npgsql.Tests/MultipleHostsTests.cs @@ -11,7 +11,6 @@ using System.Threading; using System.Threading.Tasks; using System.Transactions; -using Npgsql.Properties; using static Npgsql.Tests.Support.MockState; using static Npgsql.Tests.TestUtil; using IsolationLevel = System.Transactions.IsolationLevel; @@ -92,6 +91,55 @@ public async Task Connect_to_correct_host_unpooled(TargetSessionAttributes targe _ = await postmasters[i].WaitForServerConnection(); } + [Test] + [TestCaseSource(nameof(MyCases))] + public async Task Connect_to_correct_host_legacy(TargetSessionAttributes targetSessionAttributes, MockState[] servers, int expectedServer) + { + var postmasters = servers.Select(s => PgPostmasterMock.Start(state: s)).ToArray(); + await using var __ = new DisposableWrapper(postmasters); + + var connectionStringBuilder = new NpgsqlConnectionStringBuilder + { + Host = MultipleHosts(postmasters), + ServerCompatibilityMode = ServerCompatibilityMode.NoTypeLoading, + TargetSessionAttributes = TargetSessionAttributesAsString(targetSessionAttributes) + }; + + using var pool = CreateTempPool(connectionStringBuilder, out var connectionString); + await using var conn = new NpgsqlConnection(connectionString); + await conn.OpenAsync(); + + Assert.That(conn.Port, Is.EqualTo(postmasters[expectedServer].Port)); + + for (var i = 0; i <= expectedServer; i++) + _ = await postmasters[i].WaitForServerConnection(); + } + + [Test] + [TestCaseSource(nameof(MyCases))] + public async Task Connect_to_correct_host_connection_string(TargetSessionAttributes targetSessionAttributes, MockState[] servers, int expectedServer) + { + var postmasters = servers.Select(s => PgPostmasterMock.Start(state: s)).ToArray(); + await using var __ = new DisposableWrapper(postmasters); + + var connectionStringBuilder = new NpgsqlConnectionStringBuilder + { + Host = MultipleHosts(postmasters), + ServerCompatibilityMode = ServerCompatibilityMode.NoTypeLoading, + TargetSessionAttributes = TargetSessionAttributesAsString(targetSessionAttributes) + }; + + await using var dataSource = new NpgsqlDataSourceBuilder(connectionStringBuilder.ConnectionString) + .Build(); + Assert.That(dataSource, Is.TypeOf()); + await using var conn = await dataSource.OpenConnectionAsync(); + + Assert.That(conn.Port, Is.EqualTo(postmasters[expectedServer].Port)); + + for (var i = 0; i <= expectedServer; i++) + _ = await postmasters[i].WaitForServerConnection(); + } + [Test] [TestCaseSource(nameof(MyCases))] public async Task Connect_to_correct_host_with_available_idle( @@ -132,6 +180,40 @@ public async Task Connect_to_correct_host_with_available_idle( _ = await postmasters[i].WaitForServerConnection(); } + [Test] + public async Task Legacy_connection_shares_datasource() + { + await using var primaryPostmaster = PgPostmasterMock.Start(state: Primary); + await using var standbyPostmaster = PgPostmasterMock.Start(state: Standby); + + var builder1 = new NpgsqlConnectionStringBuilder + { + Host = MultipleHosts(primaryPostmaster, standbyPostmaster), + ServerCompatibilityMode = ServerCompatibilityMode.NoTypeLoading, + TargetSessionAttributes = "Prefer-Primary" + }; + + // Use the exact same pool for both connections as CreateTempPool adds a unique `ApplicationName` to connection string + using var pool = CreateTempPool(builder1, out var connectionString1); + var connectionString2 = new NpgsqlConnectionStringBuilder(connectionString1) + { + TargetSessionAttributes = "Prefer-Standby" + }.ConnectionString; + + await using var conn1 = new NpgsqlConnection(connectionString1); + await conn1.OpenAsync(); + Assert.That(conn1.Port, Is.EqualTo(primaryPostmaster.Port)); + + await using var conn2 = new NpgsqlConnection(connectionString2); + await conn2.OpenAsync(); + Assert.That(conn2.Port, Is.EqualTo(standbyPostmaster.Port)); + + Assert.That(conn1.NpgsqlDataSource, Is.Not.SameAs(conn2.NpgsqlDataSource)); + Assert.That(conn1.NpgsqlDataSource, Is.TypeOf()); + Assert.That(conn2.NpgsqlDataSource, Is.TypeOf()); + Assert.That(((MultiHostDataSourceWrapper)conn1.NpgsqlDataSource).WrappedSource, Is.SameAs(((MultiHostDataSourceWrapper)conn2.NpgsqlDataSource).WrappedSource)); + } + [Test] [TestCase(TargetSessionAttributes.Standby, new[] { Primary, Primary })] [TestCase(TargetSessionAttributes.Primary, new[] { Standby, Standby })] @@ -254,7 +336,7 @@ public async Task TargetSessionAttributes_with_single_host(string targetSessionA if (targetSessionAttributes == "any") { - await using var postmasterMock = PgPostmasterMock.Start(ConnectionString); + await using var postmasterMock = PgPostmasterMock.Start(connectionString); using var pool = CreateTempPool(postmasterMock.ConnectionString, out connectionString); await using var conn = new NpgsqlConnection(connectionString); await conn.OpenAsync(); @@ -1023,15 +1105,6 @@ public async Task DataSource_without_wrappers() Assert.That(standbyConnection.Port, Is.EqualTo(standbyPostmasterMock.Port)); } - [Test] - public void DataSource_with_TargetSessionAttributes_is_not_supported() - { - var builder = new NpgsqlDataSourceBuilder("Host=foo,bar;Target Session Attributes=primary"); - - Assert.That(() => builder.BuildMultiHost(), Throws.Exception.TypeOf() - .With.Message.EqualTo(NpgsqlStrings.CannotSpecifyTargetSessionAttributes)); - } - [Test] public async Task BuildMultiHost_with_single_host_is_supported() { @@ -1171,7 +1244,20 @@ public async Task LoadBalancing_is_fair_if_first_host_is_down([Values]TargetSess static string MultipleHosts(params PgPostmasterMock[] postmasters) => string.Join(",", postmasters.Select(p => $"{p.Host}:{p.Port}")); - class DisposableWrapper(IEnumerable disposables) : IAsyncDisposable + static string? TargetSessionAttributesAsString(TargetSessionAttributes targetSessionAttributes) + => targetSessionAttributes switch + { + TargetSessionAttributes.Any => "Any", + TargetSessionAttributes.Primary => "Primary", + TargetSessionAttributes.Standby => "Standby", + TargetSessionAttributes.PreferPrimary => "Prefer-Primary", + TargetSessionAttributes.PreferStandby => "Prefer-Standby", + TargetSessionAttributes.ReadOnly => "Read-Only", + TargetSessionAttributes.ReadWrite => "Read-Write", + _ => null + }; + + sealed class DisposableWrapper(IEnumerable disposables) : IAsyncDisposable { public async ValueTask DisposeAsync() { From 5d073dad647994754ad672c134474c0fd4869662 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Sun, 5 Oct 2025 21:45:35 +0300 Subject: [PATCH 572/761] Add support for multiple client certificates (#6162) Fixes #6152 --- src/Npgsql/Internal/NpgsqlConnector.cs | 78 +++++++++++++++++++------- 1 file changed, 57 insertions(+), 21 deletions(-) diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index 8018abe56d..236b5375aa 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -283,7 +283,7 @@ internal bool PostgresCancellationPerformed #pragma warning disable CA1859 // We're casting to IDisposable to not explicitly reference X509Certificate2 for NativeAOT // TODO: probably pointless now, needs to be rechecked - IDisposable? _certificate; + List? _certificates; #pragma warning restore CA1859 internal NpgsqlLoggingConfiguration LoggingConfiguration { get; } @@ -1066,36 +1066,61 @@ internal async Task NegotiateEncryption(SslMode sslMode, NpgsqlTimeout timeout, { var password = Settings.SslPassword; - X509Certificate2? cert = null; if (!string.Equals(Path.GetExtension(certPath), ".pfx", StringComparison.OrdinalIgnoreCase)) { // It's PEM time var keyPath = Settings.SslKey ?? PostgresEnvironment.SslKey ?? PostgresEnvironment.SslKeyDefault; - cert = string.IsNullOrEmpty(password) + + // With PEM certificates we might have multiple certificates in a single file + // Where the first one is a leaf (and it has to have a private key) + // And others are intermediate between it and CA cert + // To support this, we first load the leaf certificate with private key + // And then we load everything else including the leaf, but without private key + // And afterwards we just get rid of the duplicate + var firstClientCert = string.IsNullOrEmpty(password) ? X509Certificate2.CreateFromPemFile(certPath, keyPath) : X509Certificate2.CreateFromEncryptedPemFile(certPath, password, keyPath); + clientCertificates.Add(firstClientCert); + + clientCertificates.ImportFromPemFile(certPath); + clientCertificates[1].Dispose(); + clientCertificates.RemoveAt(1); + if (RuntimeInformation.IsOSPlatform(OSPlatform.Windows)) { - // Windows crypto API has a bug with pem certs - // See #3650 - using var previousCert = cert; + for (var i = 0; i < clientCertificates.Count; i++) + { + var cert = clientCertificates[i]; + + // Windows crypto API has a bug with pem certs + // See #3650 + using var previousCert = cert; #if NET9_0_OR_GREATER - cert = X509CertificateLoader.LoadPkcs12(cert.Export(X509ContentType.Pkcs12), null); + cert = X509CertificateLoader.LoadPkcs12(cert.Export(X509ContentType.Pkcs12), null); #else - cert = new X509Certificate2(cert.Export(X509ContentType.Pkcs12)); + cert = new X509Certificate2(cert.Export(X509ContentType.Pkcs12)); #endif + clientCertificates[i] = cert; + } } } + // If it's empty, it's probably PFX + if (clientCertificates.Count == 0) + { #if NET9_0_OR_GREATER - // If it's null, it's probably PFX - cert ??= X509CertificateLoader.LoadPkcs12FromFile(certPath, password); + var certs = X509CertificateLoader.LoadPkcs12CollectionFromFile(certPath, password); + clientCertificates.AddRange(certs); #else - cert ??= new X509Certificate2(certPath, password); + var cert = new X509Certificate2(certPath, password); + clientCertificates.Add(cert); #endif - clientCertificates.Add(cert); + } - _certificate = cert; + var certificates = new List(); + foreach (var certificate in clientCertificates) + certificates.Add(certificate); + _certificates = certificates; } try @@ -1127,6 +1152,20 @@ internal async Task NegotiateEncryption(SslMode sslMode, NpgsqlTimeout timeout, certificateValidationCallback = SslVerifyFullValidation; } + SslStreamCertificateContext? clientCertificateContext = null; + if (clientCertificates.Count > 0) + { + // SslClientAuthenticationOptions.ClientCertificates only sends trusted certificates or if they have private key + // Which makes us unable to send intermediate certificates + // Work around this by specifying the first certificate as target + // And others as additional + // See https://github.com/dotnet/runtime/issues/26323 + var clientCertificate = clientCertificates[0]; + clientCertificates.RemoveAt(0); + + clientCertificateContext = SslStreamCertificateContext.Create(clientCertificate, clientCertificates); + } + var host = Host; timeout.CheckAndApply(this); @@ -1136,7 +1175,7 @@ internal async Task NegotiateEncryption(SslMode sslMode, NpgsqlTimeout timeout, var sslStreamOptions = new SslClientAuthenticationOptions { TargetHost = host, - ClientCertificates = clientCertificates, + ClientCertificateContext = clientCertificateContext, EnabledSslProtocols = SslProtocols.None, CertificateRevocationCheckMode = checkCertificateRevocation ? X509RevocationMode.Online : X509RevocationMode.NoCheck, RemoteCertificateValidationCallback = certificateValidationCallback, @@ -1184,8 +1223,8 @@ internal async Task NegotiateEncryption(SslMode sslMode, NpgsqlTimeout timeout, } catch { - _certificate?.Dispose(); - _certificate = null; + _certificates?.ForEach(x => x.Dispose()); + _certificates = null; throw; } @@ -2525,11 +2564,8 @@ void Cleanup() PostgresParameters.Clear(); _currentCommand = null; - if (_certificate is not null) - { - _certificate.Dispose(); - _certificate = null; - } + _certificates?.ForEach(x => x.Dispose()); + _certificates = null; } void GenerateResetMessage() From 5ede53c5ba8cfc221b37fb2d0dcf00ad830dac80 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Sun, 5 Oct 2025 21:47:19 +0300 Subject: [PATCH 573/761] Fix getting wrong schema with CommandBehavior.SchemaOnly and autoprepare (#6040) Fixes #6038 --- src/Npgsql/NpgsqlCommand.cs | 19 +++++++-- src/Npgsql/NpgsqlDataReader.cs | 16 +++----- test/Npgsql.Tests/AutoPrepareTests.cs | 57 +++++++++++++++++++++++++++ 3 files changed, 78 insertions(+), 14 deletions(-) diff --git a/src/Npgsql/NpgsqlCommand.cs b/src/Npgsql/NpgsqlCommand.cs index d5ad24fdb2..8f6816b657 100644 --- a/src/Npgsql/NpgsqlCommand.cs +++ b/src/Npgsql/NpgsqlCommand.cs @@ -1118,11 +1118,24 @@ async Task WriteExecuteSchemaOnly(NpgsqlConnector connector, bool async, bool fl await new TaskSchedulerAwaitable(ConstrainedConcurrencyScheduler); var batchCommand = InternalBatchCommands[i]; + var pStatement = batchCommand.PreparedStatement; + + pStatement?.RefreshLastUsed(); + + Debug.Assert(batchCommand.FinalCommandText is not null); + + if (pStatement != null && !batchCommand.IsPreparing) + { + // Prepared, we already have the RowDescription + Debug.Assert(pStatement.IsPrepared); + continue; + } - if (batchCommand.PreparedStatement?.State == PreparedState.Prepared) - continue; // Prepared, we already have the RowDescription + // We may have a prepared statement that replaces an existing statement - close the latter first. + if (pStatement?.StatementBeingReplaced != null) + await connector.WriteClose(StatementOrPortal.Statement, pStatement.StatementBeingReplaced.Name!, async, cancellationToken).ConfigureAwait(false); - await connector.WriteParse(batchCommand.FinalCommandText!, batchCommand.StatementName, + await connector.WriteParse(batchCommand.FinalCommandText, batchCommand.StatementName, batchCommand.CurrentParametersReadOnly, async, cancellationToken).ConfigureAwait(false); await connector.WriteDescribe(StatementOrPortal.Statement, batchCommand.StatementName, async, cancellationToken).ConfigureAwait(false); diff --git a/src/Npgsql/NpgsqlDataReader.cs b/src/Npgsql/NpgsqlDataReader.cs index 86963afd4a..10c383f14d 100644 --- a/src/Npgsql/NpgsqlDataReader.cs +++ b/src/Npgsql/NpgsqlDataReader.cs @@ -713,7 +713,11 @@ async Task NextResultSchemaOnly(bool async, bool isConsuming = false, Canc break; case BackendMessageCode.RowDescription: // We have a resultset - RowDescription = _statements[StatementIndex].Description = (RowDescriptionMessage)msg; + // RowDescription messages are cached on the connector, but if we're auto-preparing, we need to + // clone our own copy which will last beyond the lifetime of this invocation. + RowDescription = _statements[StatementIndex].Description = preparedStatement == null + ? (RowDescriptionMessage)msg + : ((RowDescriptionMessage)msg).Clone(); Command.FixupRowDescription(RowDescription, StatementIndex == 0); break; default: @@ -734,17 +738,7 @@ async Task NextResultSchemaOnly(bool async, bool isConsuming = false, Canc // Found a resultset if (RowDescription is not null) - { - if (ColumnInfoCache?.Length >= ColumnCount) - Array.Clear(ColumnInfoCache, 0, ColumnCount); - else - { - if (ColumnInfoCache is { } cache) - ArrayPool.Shared.Return(cache, clearArray: true); - ColumnInfoCache = ArrayPool.Shared.Rent(ColumnCount); - } return true; - } } State = ReaderState.Consumed; diff --git a/test/Npgsql.Tests/AutoPrepareTests.cs b/test/Npgsql.Tests/AutoPrepareTests.cs index 00d9455147..b35fe7c5d3 100644 --- a/test/Npgsql.Tests/AutoPrepareTests.cs +++ b/test/Npgsql.Tests/AutoPrepareTests.cs @@ -538,6 +538,63 @@ public async Task SchemaOnly() await cmd.ExecuteScalarAsync(); } + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/6038")] + public async Task Auto_prepared_schema_only_correct_schema() + { + await using var dataSource = CreateDataSource(csb => + { + csb.MaxAutoPrepare = 1; + csb.AutoPrepareMinUsages = 5; + }); + await using var connection = await dataSource.OpenConnectionAsync(); + var table1 = await CreateTempTable(connection, "foo int"); + var table2 = await CreateTempTable(connection, "bar int"); + + await using var cmd = connection.CreateCommand(); + cmd.CommandText = $"SELECT * FROM {table1}"; + for (var i = 0; i < 5; i++) + { + // Make sure we prepare the first query + await using (await cmd.ExecuteReaderAsync(CommandBehavior.SchemaOnly)) { } + } + + cmd.CommandText = $"SELECT * FROM {table2}"; + // The second query will load RowDescription, which is a singleton on NpgsqlConnector + // This shouldn't affect the first query, because we create a copy of RowDescription on prepare + await using (await cmd.ExecuteReaderAsync(CommandBehavior.SchemaOnly)) { } + + cmd.CommandText = $"SELECT * FROM {table1}"; + // If we indeed made a copy of RowDescription on prepare, we should get the column for the first query and not for the second + await using var reader = await cmd.ExecuteReaderAsync(CommandBehavior.SchemaOnly | CommandBehavior.KeyInfo); + var columns = await reader.GetColumnSchemaAsync(); + Assert.That(columns.Count, Is.EqualTo(1)); + Assert.That(columns[0].ColumnName, Is.EqualTo("foo")); + } + + [Test] + public async Task Auto_prepared_schema_only_replace() + { + await using var dataSource = CreateDataSource(csb => + { + csb.MaxAutoPrepare = 1; + csb.AutoPrepareMinUsages = 5; + }); + await using var connection = await dataSource.OpenConnectionAsync(); + + await using var cmd = connection.CreateCommand(); + cmd.CommandText = "SELECT 1"; + for (var i = 0; i < 5; i++) + { + await using (await cmd.ExecuteReaderAsync(CommandBehavior.SchemaOnly)) { } + } + + cmd.CommandText = "SELECT 2"; + for (var i = 0; i < 5; i++) + { + await using (await cmd.ExecuteReaderAsync(CommandBehavior.SchemaOnly)) { } + } + } + [Test] public async Task Auto_prepared_statement_invalidation() { From 5fd06df3a13fb67e001c62ec9211b08050ec3b01 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Sun, 5 Oct 2025 22:12:17 +0300 Subject: [PATCH 574/761] Remove timeout translation from NpgsqlReadBuffer (#6126) Fixes #6122 --- src/Npgsql/Internal/NpgsqlConnector.cs | 20 +++++++++++++++----- src/Npgsql/Internal/NpgsqlReadBuffer.cs | 17 ++++------------- 2 files changed, 19 insertions(+), 18 deletions(-) diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index 236b5375aa..4ef0bd44dc 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -276,7 +276,7 @@ internal bool PostgresCancellationPerformed internal bool UserCancellationRequested => _userCancellationRequested; internal CancellationToken UserCancellationToken { get; set; } internal bool AttemptPostgresCancellation { get; private set; } - static readonly TimeSpan _cancelImmediatelyTimeout = TimeSpan.FromMilliseconds(-1); + static readonly TimeSpan _cancelImmediatelyTimeout = TimeSpan.Zero; static readonly SslApplicationProtocol _alpnProtocol = new("postgresql"); @@ -2082,6 +2082,8 @@ void PerformUserCancellationUnsynchronized() var cancellationTimeout = Settings.CancellationTimeout; if (PerformPostgresCancellation() && cancellationTimeout >= 0) { + // TODO: according to docs, we treat 0 timeout as infinite, yet we do not change the actual value + // We should revisit this here and in NpgsqlReadBuffer if (cancellationTimeout > 0) { ReadBuffer.Timeout = TimeSpan.FromMilliseconds(cancellationTimeout); @@ -2799,8 +2801,9 @@ UserAction DoStartUserAction(ConnectorState newState, NpgsqlCommand? command, // We reset the ReadBuffer.Timeout for every user action, so it wouldn't leak from the previous query or action // For example, we might have successfully cancelled the previous query (so the connection is not broken) - // But the next time, we call the Prepare, which doesn't set it's own timeout - ReadBuffer.Timeout = TimeSpan.FromSeconds(command?.CommandTimeout ?? Settings.CommandTimeout); + // But the next time, we call the Prepare, which doesn't set its own timeout + var timeoutSeconds = command?.CommandTimeout ?? Settings.CommandTimeout; + ReadBuffer.Timeout = timeoutSeconds > 0 ? TimeSpan.FromSeconds(timeoutSeconds) : Timeout.InfiniteTimeSpan; return new UserAction(this); } @@ -2935,12 +2938,15 @@ internal async Task Wait(bool async, int timeout, CancellationToken cancel await Flush(async, cancellationToken).ConfigureAwait(false); var keepaliveMs = Settings.KeepAlive * 1000; + var isTimeoutInfinite = timeout <= 0; while (true) { cancellationToken.ThrowIfCancellationRequested(); - var timeoutForKeepalive = _isKeepAliveEnabled && (timeout <= 0 || keepaliveMs < timeout); - ReadBuffer.Timeout = TimeSpan.FromMilliseconds(timeoutForKeepalive ? keepaliveMs : timeout); + var timeoutForKeepalive = _isKeepAliveEnabled && (isTimeoutInfinite || keepaliveMs < timeout); + ReadBuffer.Timeout = timeoutForKeepalive + ? TimeSpan.FromMilliseconds(keepaliveMs) + : isTimeoutInfinite ? Timeout.InfiniteTimeSpan : TimeSpan.FromMilliseconds(timeout); try { var msg = await ReadMessageWithNotifications(async).ConfigureAwait(false); @@ -3000,7 +3006,11 @@ internal async Task Wait(bool async, int timeout, CancellationToken cancel } if (timeout > 0) + { timeout -= (keepaliveMs + (int)Stopwatch.GetElapsedTime(keepaliveStartTimestamp).TotalMilliseconds); + // Make sure we don't accidentally set -1 as a timeout (because it's infinite) + timeout = Math.Max(timeout, 0); + } } } diff --git a/src/Npgsql/Internal/NpgsqlReadBuffer.cs b/src/Npgsql/Internal/NpgsqlReadBuffer.cs index d8622fc7a1..0f91bad9d4 100644 --- a/src/Npgsql/Internal/NpgsqlReadBuffer.cs +++ b/src/Npgsql/Internal/NpgsqlReadBuffer.cs @@ -36,25 +36,16 @@ sealed partial class NpgsqlReadBuffer : IDisposable internal ResettableCancellationTokenSource Cts { get; } readonly MetricsReporter? _metricsReporter; - TimeSpan _preTranslatedTimeout = TimeSpan.Zero; - /// /// Timeout for sync and async reads /// internal TimeSpan Timeout { - get => _preTranslatedTimeout; + get => Cts.Timeout; set { - if (_preTranslatedTimeout != value) + if (Cts.Timeout != value) { - _preTranslatedTimeout = value; - - if (value == TimeSpan.Zero) - value = InfiniteTimeSpan; - else if (value < TimeSpan.Zero) - value = TimeSpan.Zero; - Debug.Assert(_underlyingSocket != null); _underlyingSocket.ReceiveTimeout = (int)value.TotalMilliseconds; @@ -189,7 +180,7 @@ int ReadWithTimeout(Span buffer) async ValueTask ReadWithTimeoutAsync(Memory buffer, CancellationToken cancellationToken) { - var finalCt = Timeout != TimeSpan.Zero + var finalCt = Timeout != InfiniteTimeSpan ? Cts.Start(cancellationToken) : Cts.Reset(); @@ -289,7 +280,7 @@ static async ValueTask EnsureLong( buffer.ReadPosition = 0; } - var finalCt = async && buffer.Timeout != TimeSpan.Zero + var finalCt = async && buffer.Timeout != InfiniteTimeSpan ? buffer.Cts.Start() : buffer.Cts.Reset(); From f5ac3a85e81f67721570c12712d70797d5fbf6ba Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Sun, 5 Oct 2025 22:31:10 +0300 Subject: [PATCH 575/761] Add a connection string parameter to control NpgsqlException.BatchCommand (#6098) Closes #6042 --- src/Npgsql/NpgsqlConnectionStringBuilder.cs | 18 ++++++++++++++++++ src/Npgsql/NpgsqlDataReader.cs | 7 +++++-- src/Npgsql/NpgsqlException.cs | 1 + src/Npgsql/PublicAPI.Unshipped.txt | 2 ++ test/Npgsql.Tests/BatchTests.cs | 9 +++++++-- test/Npgsql.Tests/ReaderTests.cs | 20 ++++++++++++++------ 6 files changed, 47 insertions(+), 10 deletions(-) diff --git a/src/Npgsql/NpgsqlConnectionStringBuilder.cs b/src/Npgsql/NpgsqlConnectionStringBuilder.cs index 7200e45130..5b2d13a1d0 100644 --- a/src/Npgsql/NpgsqlConnectionStringBuilder.cs +++ b/src/Npgsql/NpgsqlConnectionStringBuilder.cs @@ -683,6 +683,24 @@ public bool IncludeErrorDetail } bool _includeErrorDetail; + /// + /// When enabled, failed statements are included on . + /// + [Category("Security")] + [Description("When enabled, failed batched commands are included on NpgsqlException.BatchCommand.")] + [DisplayName("Include Failed Batched Command")] + [NpgsqlConnectionStringProperty] + public bool IncludeFailedBatchedCommand + { + get => _includeFailedBatchedCommand; + set + { + _includeFailedBatchedCommand = value; + SetValue(nameof(IncludeFailedBatchedCommand), value); + } + } + bool _includeFailedBatchedCommand; + /// /// Controls whether channel binding is required, disabled or preferred, depending on server support. /// diff --git a/src/Npgsql/NpgsqlDataReader.cs b/src/Npgsql/NpgsqlDataReader.cs index 10c383f14d..32d66090a9 100644 --- a/src/Npgsql/NpgsqlDataReader.cs +++ b/src/Npgsql/NpgsqlDataReader.cs @@ -516,7 +516,8 @@ async Task NextResult(bool async, bool isConsuming = false, CancellationTo var statement = _statements[StatementIndex]; // Reference the triggering statement from the exception - postgresException.BatchCommand = statement; + if (Connector.Settings.IncludeFailedBatchedCommand) + postgresException.BatchCommand = statement; // Prevent the command or batch from being recycled (by the connection) when it's disposed. This is important since // the exception is very likely to escape the using statement of the command, and by that time some other user may @@ -754,7 +755,9 @@ async Task NextResultSchemaOnly(bool async, bool isConsuming = false, Canc // Reference the triggering statement from the exception if (e is PostgresException postgresException && StatementIndex >= 0 && StatementIndex < _statements.Count) { - postgresException.BatchCommand = _statements[StatementIndex]; + // Reference the triggering statement from the exception + if (Connector.Settings.IncludeFailedBatchedCommand) + postgresException.BatchCommand = _statements[StatementIndex]; // Prevent the command or batch from being recycled (by the connection) when it's disposed. This is important since // the exception is very likely to escape the using statement of the command, and by that time some other user may diff --git a/src/Npgsql/NpgsqlException.cs b/src/Npgsql/NpgsqlException.cs index 89543b0b50..9e2dfe9ee0 100644 --- a/src/Npgsql/NpgsqlException.cs +++ b/src/Npgsql/NpgsqlException.cs @@ -46,6 +46,7 @@ public override bool IsTransient => InnerException is IOException or SocketException or TimeoutException or NpgsqlException { IsTransient: true }; /// + /// This property is null unless in connection string is set to true. public new NpgsqlBatchCommand? BatchCommand { get; set; } /// diff --git a/src/Npgsql/PublicAPI.Unshipped.txt b/src/Npgsql/PublicAPI.Unshipped.txt index a1d261ead0..43905953db 100644 --- a/src/Npgsql/PublicAPI.Unshipped.txt +++ b/src/Npgsql/PublicAPI.Unshipped.txt @@ -9,6 +9,8 @@ Npgsql.NpgsqlConnection.SslClientAuthenticationOptionsCallback.get -> System.Act Npgsql.NpgsqlConnection.SslClientAuthenticationOptionsCallback.set -> void Npgsql.NpgsqlConnectionStringBuilder.GssEncryptionMode.get -> Npgsql.GssEncryptionMode Npgsql.NpgsqlConnectionStringBuilder.GssEncryptionMode.set -> void +Npgsql.NpgsqlConnectionStringBuilder.IncludeFailedBatchedCommand.get -> bool +Npgsql.NpgsqlConnectionStringBuilder.IncludeFailedBatchedCommand.set -> void Npgsql.NpgsqlConnectionStringBuilder.RequireAuth.get -> string? Npgsql.NpgsqlConnectionStringBuilder.RequireAuth.set -> void Npgsql.NpgsqlConnectionStringBuilder.SslNegotiation.get -> Npgsql.SslNegotiation diff --git a/test/Npgsql.Tests/BatchTests.cs b/test/Npgsql.Tests/BatchTests.cs index 960e6028f9..837ace48fd 100644 --- a/test/Npgsql.Tests/BatchTests.cs +++ b/test/Npgsql.Tests/BatchTests.cs @@ -12,7 +12,7 @@ namespace Npgsql.Tests; [TestFixture(MultiplexingMode.Multiplexing, CommandBehavior.Default)] [TestFixture(MultiplexingMode.NonMultiplexing, CommandBehavior.SequentialAccess)] [TestFixture(MultiplexingMode.Multiplexing, CommandBehavior.SequentialAccess)] -public class BatchTests : MultiplexingTestBase +public class BatchTests : MultiplexingTestBase, IDisposable { #region Parameters @@ -477,7 +477,7 @@ public async Task Batch_with_multiple_errors([Values] bool withErrorBarriers) public async Task Batch_close_dispose_reader_with_multiple_errors([Values] bool withErrorBarriers, [Values] bool dispose) { // Create a temp pool since we dispose the reader (and check the state afterwards) and it can be reused by another connection - await using var dataSource = CreateDataSource(); + await using var dataSource = CreateDataSource(x => x.IncludeFailedBatchedCommand = true); await using var conn = await dataSource.OpenConnectionAsync(); var table = await CreateTempTable(conn, "id INT"); @@ -804,11 +804,16 @@ public async Task Batch_dispose_reuse() readonly CommandBehavior Behavior; // ReSharper restore InconsistentNaming + NpgsqlDataSource? _dataSource; + protected override NpgsqlDataSource DataSource => _dataSource ??= CreateDataSource(csb => csb.IncludeFailedBatchedCommand = true); + public BatchTests(MultiplexingMode multiplexingMode, CommandBehavior behavior) : base(multiplexingMode) { Behavior = behavior; IsSequential = (Behavior & CommandBehavior.SequentialAccess) != 0; } + public void Dispose() => DataSource.Dispose(); + #endregion } diff --git a/test/Npgsql.Tests/ReaderTests.cs b/test/Npgsql.Tests/ReaderTests.cs index 432a9aa327..2d1e7040cd 100644 --- a/test/Npgsql.Tests/ReaderTests.cs +++ b/test/Npgsql.Tests/ReaderTests.cs @@ -623,9 +623,10 @@ await conn.ExecuteNonQueryAsync($@" } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/967")] - public async Task NpgsqlException_references_BatchCommand_with_single_command() + public async Task NpgsqlException_references_BatchCommand_with_single_command([Values] bool includeFailedBatchedCommand) { - await using var conn = await OpenConnectionAsync(); + await using var dataSource = CreateDataSource(x => x.IncludeFailedBatchedCommand = includeFailedBatchedCommand); + await using var conn = await dataSource.OpenConnectionAsync(); var function = await GetTempFunctionName(conn); await conn.ExecuteNonQueryAsync($@" @@ -638,7 +639,10 @@ await conn.ExecuteNonQueryAsync($@" cmd.CommandText = $"SELECT {function}()"; var exception = Assert.ThrowsAsync(() => cmd.ExecuteReaderAsync(Behavior))!; - Assert.That(exception.BatchCommand, Is.SameAs(cmd.InternalBatchCommands[0])); + if (includeFailedBatchedCommand) + Assert.That(exception.BatchCommand, Is.SameAs(cmd.InternalBatchCommands[0])); + else + Assert.That(exception.BatchCommand, Is.Null); // Make sure the command isn't recycled by the connection when it's disposed - this is important since internal command // resources are referenced by the exception above, which is very likely to escape the using statement of the command. @@ -648,9 +652,10 @@ await conn.ExecuteNonQueryAsync($@" } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/967")] - public async Task NpgsqlException_references_BatchCommand_with_multiple_commands() + public async Task NpgsqlException_references_BatchCommand_with_multiple_commands([Values] bool includeFailedBatchedCommand) { - await using var conn = await OpenConnectionAsync(); + await using var dataSource = CreateDataSource(x => x.IncludeFailedBatchedCommand = includeFailedBatchedCommand); + await using var conn = await dataSource.OpenConnectionAsync(); var function = await GetTempFunctionName(conn); await conn.ExecuteNonQueryAsync($@" @@ -665,7 +670,10 @@ await conn.ExecuteNonQueryAsync($@" await using (var reader = await cmd.ExecuteReaderAsync(Behavior)) { var exception = Assert.ThrowsAsync(() => reader.NextResultAsync())!; - Assert.That(exception.BatchCommand, Is.SameAs(cmd.InternalBatchCommands[1])); + if (includeFailedBatchedCommand) + Assert.That(exception.BatchCommand, Is.SameAs(cmd.InternalBatchCommands[1])); + else + Assert.That(exception.BatchCommand, Is.Null); } // Make sure the command isn't recycled by the connection when it's disposed - this is important since internal command From 0fc7f66c67271a01ab41cefe35af8b2a92c414e7 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Sun, 5 Oct 2025 21:53:15 +0200 Subject: [PATCH 576/761] Add implicit cast from .NET IPNetwork to NpgsqlInet (#6232) Helps EFCore.PG especially, continues #5821. --- .../Converters/Networking/IPNetworkConverter.cs | 13 +++++++++++-- src/Npgsql/NpgsqlTypes/NpgsqlTypes.cs | 7 +++++++ src/Npgsql/PublicAPI.Unshipped.txt | 1 + 3 files changed, 19 insertions(+), 2 deletions(-) diff --git a/src/Npgsql/Internal/Converters/Networking/IPNetworkConverter.cs b/src/Npgsql/Internal/Converters/Networking/IPNetworkConverter.cs index 77714edf29..6fc7b5401e 100644 --- a/src/Npgsql/Internal/Converters/Networking/IPNetworkConverter.cs +++ b/src/Npgsql/Internal/Converters/Networking/IPNetworkConverter.cs @@ -1,4 +1,5 @@ -using System.Net; +using System; +using System.Net; // ReSharper disable once CheckNamespace namespace Npgsql.Internal.Converters; @@ -18,5 +19,13 @@ protected override IPNetwork ReadCore(PgReader reader) } protected override void WriteCore(PgWriter writer, IPNetwork value) - => NpgsqlInetConverter.WriteImpl(writer, (value.BaseAddress, (byte)value.PrefixLength), isCidr: true); + => NpgsqlInetConverter.WriteImpl( + writer, + ( + value.BaseAddress, + value.PrefixLength <= byte.MaxValue + ? (byte)value.PrefixLength + : throw new ArgumentOutOfRangeException(nameof(value), "IPNetwork.PrefixLength is too large to fit in a byte") + ), + isCidr: true); } diff --git a/src/Npgsql/NpgsqlTypes/NpgsqlTypes.cs b/src/Npgsql/NpgsqlTypes/NpgsqlTypes.cs index 7d5eedf7af..4736ca00ec 100644 --- a/src/Npgsql/NpgsqlTypes/NpgsqlTypes.cs +++ b/src/Npgsql/NpgsqlTypes/NpgsqlTypes.cs @@ -469,6 +469,13 @@ public static explicit operator IPAddress(NpgsqlInet inet) public static implicit operator NpgsqlInet(IPAddress ip) => new(ip); + public static implicit operator NpgsqlInet(IPNetwork cidr) + => new( + cidr.BaseAddress, + cidr.PrefixLength <= byte.MaxValue + ? (byte)cidr.PrefixLength + : throw new ArgumentOutOfRangeException(nameof(cidr), "IPNetwork.PrefixLength is too large to fit in a byte")); + public void Deconstruct(out IPAddress address, out byte netmask) { address = Address; diff --git a/src/Npgsql/PublicAPI.Unshipped.txt b/src/Npgsql/PublicAPI.Unshipped.txt index 43905953db..8de71da9e5 100644 --- a/src/Npgsql/PublicAPI.Unshipped.txt +++ b/src/Npgsql/PublicAPI.Unshipped.txt @@ -86,4 +86,5 @@ Npgsql.NpgsqlConnection.ReloadTypesAsync(System.Threading.CancellationToken canc *REMOVED*Npgsql.NpgsqlSlimDataSourceBuilder.MapComposite(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! *REMOVED*Npgsql.NpgsqlSlimDataSourceBuilder.MapEnum(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! *REMOVED*Npgsql.NpgsqlSlimDataSourceBuilder.MapEnum(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! +static NpgsqlTypes.NpgsqlInet.implicit operator NpgsqlTypes.NpgsqlInet(System.Net.IPNetwork cidr) -> NpgsqlTypes.NpgsqlInet static readonly NpgsqlTypes.NpgsqlTsVector.Empty -> NpgsqlTypes.NpgsqlTsVector! From 92a455a3a1cc7bd71a009f7514ecb28fa54f2d23 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Sun, 5 Oct 2025 22:03:24 +0200 Subject: [PATCH 577/761] Set version to 10.0.0-rc.1 --- Directory.Build.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Build.props b/Directory.Build.props index de99b4fd8c..a3a4b11e2a 100644 --- a/Directory.Build.props +++ b/Directory.Build.props @@ -1,6 +1,6 @@  - 10.0.0 + 10.0.0-rc.1 latest true enable From 0af34c82c5f1c4192902ec73c5f1cdb026475276 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Sun, 5 Oct 2025 22:06:19 +0200 Subject: [PATCH 578/761] Bump version to 10.0.0-rc.2 --- Directory.Build.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Build.props b/Directory.Build.props index a3a4b11e2a..56ff636631 100644 --- a/Directory.Build.props +++ b/Directory.Build.props @@ -1,6 +1,6 @@  - 10.0.0-rc.1 + 10.0.0-rc.2 latest true enable From 7042543eabaf185f281c7b79cf9907c9c804f58e Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Sun, 5 Oct 2025 22:18:27 +0200 Subject: [PATCH 579/761] Use dotnet SDK 10.0.100-rc.1 --- global.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/global.json b/global.json index 9f1e930171..838198f254 100644 --- a/global.json +++ b/global.json @@ -1,6 +1,6 @@ { "sdk": { - "version": "9.0.200", + "version": "10.0.100-rc.1.25451.107", "rollForward": "latestMajor", "allowPrerelease": false } From 91cc5c18273594366833aad9d22734aee4305867 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 6 Oct 2025 21:18:48 +0000 Subject: [PATCH 580/761] Bump NUnit3TestAdapter from 5.1.0 to 5.2.0 (#6236) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 2f6c36102a..2f2363af80 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -25,7 +25,7 @@ - + From ac774b32c0d72c9227fc460401a849523e1cc14b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 8 Oct 2025 23:29:38 +0200 Subject: [PATCH 581/761] Bump Microsoft.Data.SqlClient from 6.1.1 to 6.1.2 (#6242) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 2f2363af80..7eec3856fc 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -34,7 +34,7 @@ - + From 4cc0884cca95055b5d8a7a7e8d9253628098affb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 8 Oct 2025 23:48:12 +0200 Subject: [PATCH 582/761] Bump github/codeql-action from 3 to 4 (#6239) --- .github/workflows/codeql-analysis.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 5a465ad42e..666e98fc85 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -56,7 +56,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@v3 + uses: github/codeql-action/init@v4 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -87,4 +87,4 @@ jobs: # make release - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v3 + uses: github/codeql-action/analyze@v4 From 154604112d6fb12175082f2d79a75800737a2442 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 10 Oct 2025 11:10:08 +0200 Subject: [PATCH 583/761] Bump OpenTelemetry.Api from 1.13.0 to 1.13.1 (#6245) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 7eec3856fc..17a7ffc37c 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -2,7 +2,7 @@ - + From 6d2f348d9f9121815769e2ba4c1091087bae74a2 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Wed, 15 Oct 2025 16:30:19 +0300 Subject: [PATCH 584/761] Fix a few issues with type mappings for schema generator (#6241) Fixes #6240 --- src/Npgsql/Schema/DbColumnSchemaGenerator.cs | 7 ++- test/Npgsql.Tests/CommandBuilderTests.cs | 56 ++++++++++++++++++++ test/Npgsql.Tests/ReaderNewSchemaTests.cs | 18 ++++++- test/Npgsql.Tests/ReaderTests.cs | 2 +- 4 files changed, 79 insertions(+), 4 deletions(-) diff --git a/src/Npgsql/Schema/DbColumnSchemaGenerator.cs b/src/Npgsql/Schema/DbColumnSchemaGenerator.cs index e42c6f2505..458dc725fc 100644 --- a/src/Npgsql/Schema/DbColumnSchemaGenerator.cs +++ b/src/Npgsql/Schema/DbColumnSchemaGenerator.cs @@ -259,8 +259,11 @@ void ColumnPostConfig(NpgsqlDbColumn column, int typeModifier) { var serializerOptions = _connection.Connector!.SerializerOptions; - column.NpgsqlDbType = column.PostgresType.DataTypeName.ToNpgsqlDbType(); - if (serializerOptions.GetDefaultTypeInfo(serializerOptions.ToCanonicalTypeId(column.PostgresType)) is { } typeInfo) + // Call GetRepresentationalType to also handle domain types + // Because NpgsqlCommandBuilder relies on NpgsqlDbType for correct type mapping + // And otherwise we'll get NpgsqlDbType.Unknown + column.NpgsqlDbType = column.PostgresType.GetRepresentationalType().DataTypeName.ToNpgsqlDbType(); + if (serializerOptions.GetTypeInfo(typeof(object), serializerOptions.ToCanonicalTypeId(column.PostgresType)) is { } typeInfo) { column.DataType = typeInfo.Type; column.IsLong = column.PostgresType.DataTypeName == DataTypeNames.Bytea; diff --git a/test/Npgsql.Tests/CommandBuilderTests.cs b/test/Npgsql.Tests/CommandBuilderTests.cs index b47422e830..f9643adfd5 100644 --- a/test/Npgsql.Tests/CommandBuilderTests.cs +++ b/test/Npgsql.Tests/CommandBuilderTests.cs @@ -387,4 +387,60 @@ public async Task Get_update_command_with_array_column_type() daDataAdapter.Update(dtTable); } + + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/6240")] + public async Task Get_update_command_with_domain_column_type() + { + await using var adminConnection = await OpenConnectionAsync(); + var domainTypeName = await GetTempTypeName(adminConnection); + + await adminConnection.ExecuteNonQueryAsync($"CREATE DOMAIN {domainTypeName} AS smallint"); + + var tableName = await CreateTempTable(adminConnection, $"id serial PRIMARY KEY, domtest {domainTypeName}"); + + await using var dataSource = CreateDataSource(); + await using var conn = await dataSource.OpenConnectionAsync(); + + using var adapter = new NpgsqlDataAdapter($"select * from {tableName}", conn); + + var builder = new NpgsqlCommandBuilder(adapter) + { + ConflictOption = ConflictOption.CompareAllSearchableValues, + SetAllValues = true + }; + + adapter.InsertCommand = builder.GetInsertCommand(); + adapter.UpdateCommand = builder.GetUpdateCommand(); + adapter.DeleteCommand = builder.GetDeleteCommand(); + + using var dataTable = new DataTable(); + + adapter.Fill(dataTable); + + const short sval = 5; + + var newRow = dataTable.NewRow(); + newRow[1] = sval; + dataTable.Rows.Add(newRow); + + adapter.Update(dataTable); + } + + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/6240")] + public async Task Fill_datatable_with_array_column_type() + { + await using var connection = await OpenConnectionAsync(); + + var tableName = await CreateTempTable(connection, "id serial PRIMARY KEY, textarr text[] COLLATE pg_catalog.\"default\""); + + using var adapter = new NpgsqlDataAdapter($"select * from {tableName}", connection); + + using var dataTable = new DataTable(); + + adapter.FillSchema(dataTable, SchemaType.Source); + + adapter.MissingSchemaAction = MissingSchemaAction.Ignore; + + adapter.Fill(dataTable); + } } diff --git a/test/Npgsql.Tests/ReaderNewSchemaTests.cs b/test/Npgsql.Tests/ReaderNewSchemaTests.cs index 2391cf54f0..f892670d96 100644 --- a/test/Npgsql.Tests/ReaderNewSchemaTests.cs +++ b/test/Npgsql.Tests/ReaderNewSchemaTests.cs @@ -1,4 +1,5 @@ -using System.Collections.ObjectModel; +using System; +using System.Collections.ObjectModel; using System.Data; using System.Data.Common; using System.Linq; @@ -450,6 +451,19 @@ public async Task DataType_with_composite() Assert.That(columns[1].UdtAssemblyQualifiedName, Is.EqualTo(typeof(SomeComposite).AssemblyQualifiedName)); } + [Test] + public async Task DataType_with_array() + { + using var conn = await OpenConnectionAsync(); + var table = await CreateTempTable(conn, "foo INTEGER[]"); + + using var cmd = new NpgsqlCommand($"SELECT foo, ARRAY[1::INTEGER, 2::INTEGER] FROM {table}", conn); + using var reader = await cmd.ExecuteReaderAsync(CommandBehavior.SchemaOnly); + var columns = await GetColumnSchema(reader); + Assert.That(columns[0].DataType, Is.SameAs(typeof(Array))); + Assert.That(columns[1].DataType, Is.SameAs(typeof(Array))); + } + [Test] public async Task UdtAssemblyQualifiedName() { @@ -673,6 +687,8 @@ public async Task Domain_type() var pgType = domainSchema.PostgresType; Assert.That(pgType, Is.InstanceOf()); Assert.That(((PostgresDomainType)pgType).BaseType.Name, Is.EqualTo("character varying")); + // For domains we should return the underlying type + Assert.That(domainSchema.NpgsqlDbType, Is.EqualTo(NpgsqlTypes.NpgsqlDbType.Varchar)); } [Test] diff --git a/test/Npgsql.Tests/ReaderTests.cs b/test/Npgsql.Tests/ReaderTests.cs index 2d1e7040cd..aa39a38467 100644 --- a/test/Npgsql.Tests/ReaderTests.cs +++ b/test/Npgsql.Tests/ReaderTests.cs @@ -304,7 +304,7 @@ public async Task GetFieldType_SchemaOnly() { await using var conn = await OpenConnectionAsync(); await using var cmd = new NpgsqlCommand(@"SELECT 1::INT4 AS some_column", conn); - await using var reader = await cmd.ExecuteReaderAsync(CommandBehavior.SchemaOnly); + await using var reader = await cmd.ExecuteReaderAsync(Behavior | CommandBehavior.SchemaOnly); reader.Read(); Assert.That(reader.GetFieldType(0), Is.SameAs(typeof(int))); } From 92be7c724a642e2c695134bfe006a18ab11a4ce4 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Fri, 17 Oct 2025 10:21:59 +0200 Subject: [PATCH 585/761] Bump dependencies to 10.0.0-rc.2 (#6260) --- Directory.Packages.props | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 17a7ffc37c..1434e03287 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -1,7 +1,7 @@ - - + + @@ -21,8 +21,8 @@ - - + + @@ -30,7 +30,7 @@ - + From 960050f5567c6ad05a34ab6447e47ee76c8f177f Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Fri, 24 Oct 2025 14:26:07 +0300 Subject: [PATCH 586/761] Fix infinite loop when a connector is closed while concurrently consuming result set (#6265) Fixes #6264 --- src/Npgsql/Internal/NpgsqlConnector.cs | 2 +- src/Npgsql/NpgsqlDataReader.cs | 7 +++++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index 4ef0bd44dc..e4c37f061d 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -467,7 +467,7 @@ internal ConnectorState State /// /// Returns whether the connector is open, regardless of any task it is currently performing /// - bool IsConnected => State is not (ConnectorState.Closed or ConnectorState.Connecting or ConnectorState.Broken); + internal bool IsConnected => State is not (ConnectorState.Closed or ConnectorState.Connecting or ConnectorState.Broken); internal bool IsReady => State == ConnectorState.Ready; internal bool IsClosed => State == ConnectorState.Closed; diff --git a/src/Npgsql/NpgsqlDataReader.cs b/src/Npgsql/NpgsqlDataReader.cs index 32d66090a9..585536da18 100644 --- a/src/Npgsql/NpgsqlDataReader.cs +++ b/src/Npgsql/NpgsqlDataReader.cs @@ -984,11 +984,14 @@ async Task Consume(bool async, Exception? firstException = null) // state for auto-prepared statements // // The only exception is when the connector is broken (which can happen in the middle of consuming) - // As then there is no point in going forward + // As then there is no point in going forward. + // An exception to the exception above is when connector is concurrently closed while + // the reader is still going over the result set. + // While this is undefined behavior and user error, we should try to at least do our best to not loop indefinitely. // // While we can also check our local state (State == Closed) // It's probably better to rely on connector since it's private and its state can't be changed - while (!Connector.IsBroken) + while (Connector.IsConnected) { try { From c746805c9d43b5941ca16ec99ee9939561d6074e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 25 Oct 2025 17:10:27 +0200 Subject: [PATCH 587/761] Bump actions/upload-artifact from 4 to 5 (#6266) --- .github/workflows/build.yml | 4 ++-- .github/workflows/native-aot.yml | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index f37d7e432f..ff7a6581f7 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -360,7 +360,7 @@ jobs: run: dotnet pack --configuration Release --property:PackageOutputPath="$PWD/nupkgs" --version-suffix "ci.$(date -u +%Y%m%dT%H%M%S)+sha.${GITHUB_SHA:0:9}" -p:ContinuousIntegrationBuild=true - name: Upload artifacts (nupkg) - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v5 with: name: Npgsql.CI path: nupkgs @@ -392,7 +392,7 @@ jobs: run: dotnet pack --configuration Release --property:PackageOutputPath="$PWD/nupkgs" -p:ContinuousIntegrationBuild=true - name: Upload artifacts - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v5 with: name: Npgsql.Release path: nupkgs diff --git a/.github/workflows/native-aot.yml b/.github/workflows/native-aot.yml index ecc57d51a8..ef6d7b96b5 100644 --- a/.github/workflows/native-aot.yml +++ b/.github/workflows/native-aot.yml @@ -170,21 +170,21 @@ jobs: run: dotnet run --project test/MStatDumper/MStatDumper.csproj -c release -f ${{ matrix.tfm }} -- "test/Npgsql.NativeAotTests/obj/Release/net9.0/linux-x64/native/Npgsql.NativeAotTests.mstat" md >> $GITHUB_STEP_SUMMARY - name: Upload mstat - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v5 with: name: npgsql.mstat path: "test/Npgsql.NativeAotTests/obj/Release/${{ matrix.tfm }}/linux-x64/native/Npgsql.NativeAotTests.mstat" retention-days: 3 - name: Upload codedgen dgml - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v5 with: name: npgsql.codegen.dgml.xml path: "test/Npgsql.NativeAotTests/obj/Release/${{ matrix.tfm }}/linux-x64/native/Npgsql.NativeAotTests.codegen.dgml.xml" retention-days: 3 - name: Upload scan dgml - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v5 with: name: npgsql.scan.dgml.xml path: "test/Npgsql.NativeAotTests/obj/Release/${{ matrix.tfm }}/linux-x64/native/Npgsql.NativeAotTests.scan.dgml.xml" From 1d1ea6f7e4e8cccb8d65b60ed20592a08611c9b3 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Mon, 27 Oct 2025 14:01:44 +0100 Subject: [PATCH 588/761] Fix streaming threshold value (#6269) Closes #5978 --- src/Npgsql/Internal/Converters/JsonConverter.cs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Npgsql/Internal/Converters/JsonConverter.cs b/src/Npgsql/Internal/Converters/JsonConverter.cs index 77157875b3..074575e4e1 100644 --- a/src/Npgsql/Internal/Converters/JsonConverter.cs +++ b/src/Npgsql/Internal/Converters/JsonConverter.cs @@ -107,8 +107,8 @@ public override ValueTask WriteAsync(PgWriter writer, T? value, CancellationToke static class JsonConverter { public const byte JsonbProtocolVersion = 1; - // We pick a value that is the largest multiple of 4096 that is still smaller than the large object heap threshold (85K). - const int StreamingThreshold = 81920; + // Largest value that is a power of 2 and a multiple of 4096 while staying under the large object heap threshold (85K). + const int StreamingThreshold = 65536; public static bool TryReadStream(bool jsonb, Encoding encoding, PgReader reader, out int byteCount, [NotNullWhen(true)]out Stream? stream) { From 62fd0ad621dab74efdfde07a0b698b7d5a9f86d0 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Mon, 27 Oct 2025 21:59:28 +0100 Subject: [PATCH 589/761] Respect configured schemas in enum field loading (#6268) Closes #6246 --- src/Npgsql/PostgresDatabaseInfo.cs | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/src/Npgsql/PostgresDatabaseInfo.cs b/src/Npgsql/PostgresDatabaseInfo.cs index 6cd4f2a5fe..1c1b518a3f 100644 --- a/src/Npgsql/PostgresDatabaseInfo.cs +++ b/src/Npgsql/PostgresDatabaseInfo.cs @@ -149,8 +149,8 @@ LEFT JOIN pg_class AS elemcls ON (elemcls.oid = elemtyp.typrelid) ) AS t JOIN pg_namespace AS ns ON (ns.oid = typnamespace) WHERE - {(schemaListSqlFragment is not null ? $"(ns.nspname IN ({BuiltinSchemaListSqlFragment}{(schemaListSqlFragment.Length > 0 ? $", {schemaListSqlFragment}" : "")}){(hasTypeCategory ? " OR typcategory = 'U'" : "" )}) AND (" : "(")} - typtype IN ('b', 'r', 'm', 'e', 'd') OR -- Base, range, multirange, enum, domain + {(schemaListSqlFragment is not null ? $"(ns.nspname IN ({schemaListSqlFragment}){(hasTypeCategory ? " OR typcategory = 'U'" : "" )}) AND " : "")} + (typtype IN ('b', 'r', 'm', 'e', 'd') OR -- Base, range, multirange, enum, domain (typtype = 'c' AND {(loadTableComposites ? $"ns.nspname NOT IN ({BuiltinSchemaListSqlFragment})" : "relkind='c'")}) OR -- User-defined free-standing composites (not table composites) by default (typtype = 'p' AND typname IN ('record', 'void', 'unknown')) OR -- Some special supported pseudo-types (typtype = 'a' AND ( -- Array of... @@ -178,17 +178,19 @@ JOIN pg_class AS cls ON (cls.oid = typ.typrelid) JOIN pg_attribute AS att ON (att.attrelid = typ.typrelid) WHERE (typ.typtype = 'c' AND {(loadTableComposites ? $"ns.nspname NOT IN ({BuiltinSchemaListSqlFragment})" : "cls.relkind='c'")}) AND - {(schemaListSqlFragment is not null ? $"(ns.nspname IN ({BuiltinSchemaListSqlFragment}{(schemaListSqlFragment.Length > 0 ? $", {schemaListSqlFragment}" : "")})) AND " : "")} + {(schemaListSqlFragment is not null ? $"(ns.nspname IN ({schemaListSqlFragment})) AND " : "")} attnum > 0 AND -- Don't load system attributes NOT attisdropped ORDER BY typ.oid, att.attnum;"; - static string GenerateLoadEnumFieldsQuery(bool withEnumSortOrder) + static string GenerateLoadEnumFieldsQuery(bool withEnumSortOrder, string? schemaListSqlFragment) => $@" -- Load enum fields -SELECT pg_type.oid, enumlabel +SELECT typ.oid, enumlabel FROM pg_enum -JOIN pg_type ON pg_type.oid=enumtypid +JOIN pg_type AS typ ON typ.oid = enumtypid +JOIN pg_namespace AS ns ON ns.oid = typ.typnamespace +{(schemaListSqlFragment is not null ? $"WHERE (ns.nspname IN ({schemaListSqlFragment}))" : "")} ORDER BY oid{(withEnumSortOrder ? ", enumsortorder" : "")};"; /// @@ -213,11 +215,10 @@ internal async Task> LoadBackendTypes(NpgsqlConnector conn, N string? schemaListSqlFragment = null; if (typeLoading.TypeLoadingSchemas is not null) { - var builder = new StringBuilder(); + var builder = new StringBuilder(BuiltinSchemaListSqlFragment); for (var i = 0; i < typeLoading.TypeLoadingSchemas.Length; i++) { - if (i > 0) - builder.Append(", "); + builder.Append(", "); var schema = typeLoading.TypeLoadingSchemas[i]; builder.Append('\''); builder.Append(EscapeLiteral(schema)); @@ -230,7 +231,7 @@ internal async Task> LoadBackendTypes(NpgsqlConnector conn, N var loadTypesQuery = GenerateLoadTypesQuery(SupportsRangeTypes, SupportsMultirangeTypes, loadTableComposites, schemaListSqlFragment, HasTypeCategory); var loadCompositeTypesQuery = GenerateLoadCompositeTypesQuery(loadTableComposites, schemaListSqlFragment); var loadEnumFieldsQuery = SupportsEnumTypes - ? GenerateLoadEnumFieldsQuery(HasEnumSortOrder) + ? GenerateLoadEnumFieldsQuery(HasEnumSortOrder, schemaListSqlFragment) : string.Empty; timeout.CheckAndApply(conn); From 9f6a662eaeaf92560ac516aa8947b5b58109bb91 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 27 Oct 2025 22:43:09 +0100 Subject: [PATCH 590/761] Bump Scriban.Signed from 6.4.0 to 6.5.0 (#6273) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 1434e03287..2643731072 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -16,7 +16,7 @@ - + From 153b91fc656ffef2f62bece778f6d9a88dbbbac3 Mon Sep 17 00:00:00 2001 From: Artem Serostanov Date: Wed, 29 Oct 2025 17:53:42 +0300 Subject: [PATCH 591/761] Incorrect multi-threading synchronization in NpgsqlDataSource.UpdateDatabaseState() (#6114) Co-authored-by: Serostanov Artem Sergeevich --- src/Npgsql/NpgsqlDataSource.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Npgsql/NpgsqlDataSource.cs b/src/Npgsql/NpgsqlDataSource.cs index cc177602db..080906ca26 100644 --- a/src/Npgsql/NpgsqlDataSource.cs +++ b/src/Npgsql/NpgsqlDataSource.cs @@ -424,7 +424,7 @@ internal DatabaseState UpdateDatabaseState( var databaseStateInfo = _databaseStateInfo; if (!ignoreTimeStamp && timeStamp <= databaseStateInfo.TimeStamp) - return _databaseStateInfo.State; + return databaseStateInfo.State; _databaseStateInfo = new(newState, new NpgsqlTimeout(stateExpiration), timeStamp); From dd3b003a0d90cc948d2cba3ed9f088891a01e188 Mon Sep 17 00:00:00 2001 From: Bruce Bowyer-Smyth Date: Thu, 30 Oct 2025 01:37:22 +1000 Subject: [PATCH 592/761] Additional GetBytes/GetStream tests (#5934) --- test/Npgsql.Tests/ReaderTests.cs | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/test/Npgsql.Tests/ReaderTests.cs b/test/Npgsql.Tests/ReaderTests.cs index aa39a38467..0d27bc5d03 100644 --- a/test/Npgsql.Tests/ReaderTests.cs +++ b/test/Npgsql.Tests/ReaderTests.cs @@ -817,6 +817,7 @@ public async Task Null() Assert.That(reader.GetFieldValue(i), Is.EqualTo(DBNull.Value)); Assert.That(reader.GetProviderSpecificValue(i), Is.EqualTo(DBNull.Value)); Assert.That(() => reader.GetString(i), Throws.Exception.TypeOf()); + Assert.That(() => reader.GetStream(i), Throws.Exception.TypeOf()); } } @@ -1436,6 +1437,25 @@ public async Task GetStream_second_time_throws([Values(true, false)] bool isAsyn Throws.Exception.TypeOf()); } + [Test] + public async Task GetBytes_before_getstream([Values(true, false)] bool isAsync) + { + var expected = new byte[] { 1, 2, 3, 4, 5, 6, 7, 8 }; + var streamGetter = BuildStreamGetter(isAsync); + + using var conn = await OpenConnectionAsync(); + using var cmd = new NpgsqlCommand($"SELECT {EncodeByteaHex(expected)}::bytea", conn); + using var reader = await cmd.ExecuteReaderAsync(Behavior); + + await reader.ReadAsync(); + + // GetBytes with null buffer won't consume column in any way + Assert.That(reader.GetBytes(0, 0, null, 0, 0), Is.EqualTo(expected.Length), "Bad column length"); + + using var stream = await streamGetter(reader, 0); + Assert.That(stream.Length, Is.EqualTo(expected.Length)); + } + public static IEnumerable GetStreamCases() { var binary = MemoryMarshal From 2ef750c1a4ed5dd5d7e19f9cb7c840ebb23a5fb8 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Wed, 29 Oct 2025 17:23:19 +0100 Subject: [PATCH 593/761] Fix inclusive infinity upper bound interval conversion (#6270) Closes #6080 --- .../Internal/IntervalConverter.cs | 9 +++++++-- ...aTimeTypeInfoResolverFactory.Multirange.cs | 4 ++-- .../NodaTimeTypeInfoResolverFactory.Range.cs | 2 +- .../NodaTimeInfinityTests.cs | 20 +++++++++++++++++++ 4 files changed, 30 insertions(+), 5 deletions(-) diff --git a/src/Npgsql.NodaTime/Internal/IntervalConverter.cs b/src/Npgsql.NodaTime/Internal/IntervalConverter.cs index 7da4aa401c..f062079a4a 100644 --- a/src/Npgsql.NodaTime/Internal/IntervalConverter.cs +++ b/src/Npgsql.NodaTime/Internal/IntervalConverter.cs @@ -6,7 +6,7 @@ namespace Npgsql.NodaTime.Internal; -public class IntervalConverter(PgConverter> rangeConverter) : PgStreamingConverter +sealed class IntervalConverter(PgConverter> rangeConverter, bool dateTimeInfinityConversions) : PgStreamingConverter { public override Interval Read(PgReader reader) => Read(async: false, reader, CancellationToken.None).GetAwaiter().GetResult(); @@ -27,7 +27,12 @@ async ValueTask Read(bool async, PgReader reader, CancellationToken ca : range.LowerBoundIsInclusive ? range.LowerBound : range.LowerBound + Duration.Epsilon; - Instant? end = range.UpperBoundInfinite + // For ranges with element types with infinity values (datetime, date etc.) an + // inclusive lower/upper bound causes their -/+ infinity (respectively) to fall within the range. + // If those values are returned for such a range postgres will not mark the affected bound as infinite accordingly. + // This is documented in https://www.postgresql.org/docs/current/rangetypes.html#RANGETYPES-INFINITE + // As NodaTime uses an exclusive upper bound we must consider this case as being another form of infinity (null). + Instant? end = range.UpperBoundInfinite || (dateTimeInfinityConversions && range.UpperBoundIsInclusive && range.UpperBound == Instant.MaxValue) ? null : range.UpperBoundIsInclusive ? range.UpperBound + Duration.Epsilon diff --git a/src/Npgsql.NodaTime/Internal/NodaTimeTypeInfoResolverFactory.Multirange.cs b/src/Npgsql.NodaTime/Internal/NodaTimeTypeInfoResolverFactory.Multirange.cs index 42c6360dad..fdd8d4c78f 100644 --- a/src/Npgsql.NodaTime/Internal/NodaTimeTypeInfoResolverFactory.Multirange.cs +++ b/src/Npgsql.NodaTime/Internal/NodaTimeTypeInfoResolverFactory.Multirange.cs @@ -31,12 +31,12 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) mappings.AddType(TimestampTzMultirangeDataTypeName, static (options, mapping, _) => mapping.CreateInfo(options, CreateArrayMultirangeConverter(new IntervalConverter( - CreateRangeConverter(new InstantConverter(options.EnableDateTimeInfinityConversions), options)), options)), + CreateRangeConverter(new InstantConverter(options.EnableDateTimeInfinityConversions), options), options.EnableDateTimeInfinityConversions), options)), isDefault: true); mappings.AddType>(TimestampTzMultirangeDataTypeName, static (options, mapping, _) => mapping.CreateInfo(options, CreateListMultirangeConverter(new IntervalConverter( - CreateRangeConverter(new InstantConverter(options.EnableDateTimeInfinityConversions), options)), options))); + CreateRangeConverter(new InstantConverter(options.EnableDateTimeInfinityConversions), options), options.EnableDateTimeInfinityConversions), options))); mappings.AddType[]>(TimestampTzMultirangeDataTypeName, static (options, mapping, _) => mapping.CreateInfo(options, diff --git a/src/Npgsql.NodaTime/Internal/NodaTimeTypeInfoResolverFactory.Range.cs b/src/Npgsql.NodaTime/Internal/NodaTimeTypeInfoResolverFactory.Range.cs index f62669333c..8958f88846 100644 --- a/src/Npgsql.NodaTime/Internal/NodaTimeTypeInfoResolverFactory.Range.cs +++ b/src/Npgsql.NodaTime/Internal/NodaTimeTypeInfoResolverFactory.Range.cs @@ -31,7 +31,7 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) static (options, mapping, _) => mapping.CreateInfo(options, new IntervalConverter( - CreateRangeConverter(new InstantConverter(options.EnableDateTimeInfinityConversions), options))), + CreateRangeConverter(new InstantConverter(options.EnableDateTimeInfinityConversions), options), options.EnableDateTimeInfinityConversions)), isDefault: true); mappings.AddStructType>(TimestampTzRangeDataTypeName, static (options, mapping, _) => mapping.CreateInfo(options, diff --git a/test/Npgsql.PluginTests/NodaTimeInfinityTests.cs b/test/Npgsql.PluginTests/NodaTimeInfinityTests.cs index 75559169f0..e8f8036ada 100644 --- a/test/Npgsql.PluginTests/NodaTimeInfinityTests.cs +++ b/test/Npgsql.PluginTests/NodaTimeInfinityTests.cs @@ -361,6 +361,26 @@ public async Task Interval_convert_infinity() } } + [Test] + public async Task Inclusive_End_Range_Infinity_read() + { + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand( + "SELECT tstzrange('-infinity', 'infinity','[]') as val", conn); + + await using var reader = await cmd.ExecuteReaderAsync(); + await reader.ReadAsync(); + + if (Statics.DisableDateTimeInfinityConversions) + { + Assert.That(() => reader[0], Throws.Exception.TypeOf()); + } + else + { + Assert.That(reader[0], Is.EqualTo(new Interval(Instant.MinValue, null))); + } + } + protected override NpgsqlDataSource DataSource { get; } public NodaTimeInfinityTests(bool disableDateTimeInfinityConversions) From 19d62f642d6d062d08111aa5c79643bc5a6be4d2 Mon Sep 17 00:00:00 2001 From: Pedro Henrique Windisch Date: Wed, 29 Oct 2025 13:44:52 -0300 Subject: [PATCH 594/761] Wrap GetHostAddresses/Async calls to catch SocketException (#5664) Closes #5606 --- src/Npgsql/Internal/NpgsqlConnector.cs | 52 +++++++++++++++++++------- test/Npgsql.Tests/ConnectionTests.cs | 33 ++++++++++++++++ test/Npgsql.Tests/TracingTests.cs | 2 +- 3 files changed, 73 insertions(+), 14 deletions(-) diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index e4c37f061d..2ceed5cfb9 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -1232,10 +1232,24 @@ internal async Task NegotiateEncryption(SslMode sslMode, NpgsqlTimeout timeout, void Connect(NpgsqlTimeout timeout) { - // Note that there aren't any timeout-able or cancellable DNS methods - var endpoints = NpgsqlConnectionStringBuilder.IsUnixSocket(Host, Port, out var socketPath) - ? new EndPoint[] { new UnixDomainSocketEndPoint(socketPath) } - : IPAddressesToEndpoints(Dns.GetHostAddresses(Host), Port); + EndPoint[]? endpoints; + if (NpgsqlConnectionStringBuilder.IsUnixSocket(Host, Port, out var socketPath)) + { + endpoints = [new UnixDomainSocketEndPoint(socketPath!)]; + } + else + { + // Note that there aren't any timeout-able or cancellable DNS methods + try + { + endpoints = IPAddressesToEndpoints(Dns.GetHostAddresses(Host), Port); + } + catch (SocketException ex) + { + throw new NpgsqlException(ex.Message, ex); + } + } + timeout.Check(); // Give each endpoint an equal share of the remaining time @@ -1302,16 +1316,28 @@ void Connect(NpgsqlTimeout timeout) async Task ConnectAsync(NpgsqlTimeout timeout, CancellationToken cancellationToken) { - Task GetHostAddressesAsync(CancellationToken ct) => - Dns.GetHostAddressesAsync(Host, ct); - // Whether the framework and/or the OS platform support Dns.GetHostAddressesAsync cancellation API or they do not, // we always fake-cancel the operation with the help of TaskTimeoutAndCancellation.ExecuteAsync. It stops waiting // and raises the exception, while the actual task may be left running. - var endpoints = NpgsqlConnectionStringBuilder.IsUnixSocket(Host, Port, out var socketPath) - ? new EndPoint[] { new UnixDomainSocketEndPoint(socketPath) } - : IPAddressesToEndpoints(await TaskTimeoutAndCancellation.ExecuteAsync(GetHostAddressesAsync, timeout, cancellationToken).ConfigureAwait(false), - Port); + EndPoint[] endpoints; + if (NpgsqlConnectionStringBuilder.IsUnixSocket(Host, Port, out var socketPath)) + { + endpoints = [new UnixDomainSocketEndPoint(socketPath)]; + } + else + { + IPAddress[] ipAddresses; + try + { + ipAddresses = await Dns.GetHostAddressesAsync(Host, cancellationToken) + .WaitAsync(timeout.CheckAndGetTimeLeft(), cancellationToken).ConfigureAwait(false); + } + catch (SocketException ex) + { + throw new NpgsqlException(ex.Message, ex); + } + endpoints = IPAddressesToEndpoints(ipAddresses, Port); + } // Give each endpoint an equal share of the remaining time var perEndpointTimeout = default(TimeSpan); @@ -1376,9 +1402,9 @@ Task ConnectAsync(CancellationToken ct) => } } - IPEndPoint[] IPAddressesToEndpoints(IPAddress[] ipAddresses, int port) + EndPoint[] IPAddressesToEndpoints(IPAddress[] ipAddresses, int port) { - var result = new IPEndPoint[ipAddresses.Length]; + var result = new EndPoint[ipAddresses.Length]; for (var i = 0; i < ipAddresses.Length; i++) result[i] = new IPEndPoint(ipAddresses[i], port); return result; diff --git a/test/Npgsql.Tests/ConnectionTests.cs b/test/Npgsql.Tests/ConnectionTests.cs index cda220a110..7ef94350fd 100644 --- a/test/Npgsql.Tests/ConnectionTests.cs +++ b/test/Npgsql.Tests/ConnectionTests.cs @@ -6,6 +6,7 @@ using System.Linq; using System.Net; using System.Net.Security; +using System.Net.Sockets; using System.Runtime.InteropServices; using System.Security.Cryptography.X509Certificates; using System.Text; @@ -313,6 +314,38 @@ public void Connect_timeout_cancel() Assert.That(conn.State, Is.EqualTo(ConnectionState.Closed)); } + [Test] + public void Bad_hostname() + { + using var dataSource = CreateDataSource(csb => csb.Host = "hostname.that.does.not.exist"); + using var conn = dataSource.CreateConnection(); + + Assert.That( + () => conn.Open(), + Throws.Exception + .TypeOf() + .With + .Property(nameof(NpgsqlException.InnerException)) + .TypeOf() + ); + } + + [Test] + public void Bad_hostname_async() + { + using var dataSource = CreateDataSource(csb => csb.Host = "hostname.that.does.not.exist"); + using var conn = dataSource.CreateConnection(); + + Assert.That( + async () => await conn.OpenAsync(), + Throws.Exception + .TypeOf() + .With + .Property(nameof(NpgsqlException.InnerException)) + .TypeOf() + ); + } + #endregion #region Client Encoding diff --git a/test/Npgsql.Tests/TracingTests.cs b/test/Npgsql.Tests/TracingTests.cs index 74e43f63a8..faec49c238 100644 --- a/test/Npgsql.Tests/TracingTests.cs +++ b/test/Npgsql.Tests/TracingTests.cs @@ -158,7 +158,7 @@ public async Task Error_open([Values] bool async) ActivitySource.AddActivityListener(activityListener); await using var dataSource = CreateDataSource(x => x.Host = "not-existing-host"); - var ex = Assert.ThrowsAsync(async () => + var ex = Assert.ThrowsAsync(async () => { await using var conn = async ? await dataSource.OpenConnectionAsync() From a06475b9378a23bc830c381380f22f9bb72198ed Mon Sep 17 00:00:00 2001 From: Dipankar Das Date: Wed, 29 Oct 2025 22:49:43 +0530 Subject: [PATCH 595/761] Add Deconstruct() for Npgsql types (#5695) Closes #5672 --- src/Npgsql/NpgsqlTypes/NpgsqlTypes.cs | 49 +++++++++++++++++++++++++++ src/Npgsql/PublicAPI.Unshipped.txt | 9 +++++ 2 files changed, 58 insertions(+) diff --git a/src/Npgsql/NpgsqlTypes/NpgsqlTypes.cs b/src/Npgsql/NpgsqlTypes/NpgsqlTypes.cs index 4736ca00ec..4f63a9defb 100644 --- a/src/Npgsql/NpgsqlTypes/NpgsqlTypes.cs +++ b/src/Npgsql/NpgsqlTypes/NpgsqlTypes.cs @@ -38,6 +38,8 @@ public override int GetHashCode() public override string ToString() => string.Format(CultureInfo.InvariantCulture, "({0},{1})", X, Y); + + public void Deconstruct(out double x, out double y) => (x, y) = (X, Y); } /// @@ -66,6 +68,8 @@ public override bool Equals(object? obj) public static bool operator ==(NpgsqlLine x, NpgsqlLine y) => x.Equals(y); public static bool operator !=(NpgsqlLine x, NpgsqlLine y) => !(x == y); + + public void Deconstruct(out double a, out double b, out double c) => (a, b, c) = (A, B, C); } /// @@ -103,6 +107,8 @@ public override bool Equals(object? obj) public static bool operator ==(NpgsqlLSeg x, NpgsqlLSeg y) => x.Equals(y); public static bool operator !=(NpgsqlLSeg x, NpgsqlLSeg y) => !(x == y); + + public void Deconstruct(out NpgsqlPoint start, out NpgsqlPoint end) => (start, end) = (Start, End); } /// @@ -178,6 +184,30 @@ void NormalizeBox() if (_upperRight.Y < _lowerLeft.Y) (_upperRight.Y, _lowerLeft.Y) = (_lowerLeft.Y, _upperRight.Y); } + + public void Deconstruct(out NpgsqlPoint lowerLeft, out NpgsqlPoint upperRight) + { + lowerLeft = LowerLeft; + upperRight = UpperRight; + } + + public void Deconstruct(out double left, out double right, out double bottom, out double top) + { + left = Left; + right = Right; + bottom = Bottom; + top = Top; + } + + public void Deconstruct(out double left, out double right, out double bottom, out double top, out double width, out double height) + { + left = Left; + right = Right; + bottom = Bottom; + top = Top; + width = Width; + height = Height; + } } /// @@ -413,6 +443,19 @@ public override string ToString() public override int GetHashCode() => HashCode.Combine(X, Y, Radius); + + public void Deconstruct(out double x, out double y, out double radius) + { + x = X; + y = Y; + radius = Radius; + } + + public void Deconstruct(out NpgsqlPoint center, out double radius) + { + center = Center; + radius = Radius; + } } /// @@ -562,6 +605,12 @@ public override bool Equals(object? o) public static bool operator ==(NpgsqlTid left, NpgsqlTid right) => left.Equals(right); public static bool operator !=(NpgsqlTid left, NpgsqlTid right) => !(left == right); public override string ToString() => $"({BlockNumber},{OffsetNumber})"; + + public void Deconstruct(out uint blockNumber, out ushort offsetNumber) + { + blockNumber = BlockNumber; + offsetNumber = OffsetNumber; + } } #pragma warning restore 1591 diff --git a/src/Npgsql/PublicAPI.Unshipped.txt b/src/Npgsql/PublicAPI.Unshipped.txt index 8de71da9e5..e7c8061376 100644 --- a/src/Npgsql/PublicAPI.Unshipped.txt +++ b/src/Npgsql/PublicAPI.Unshipped.txt @@ -88,3 +88,12 @@ Npgsql.NpgsqlConnection.ReloadTypesAsync(System.Threading.CancellationToken canc *REMOVED*Npgsql.NpgsqlSlimDataSourceBuilder.MapEnum(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! static NpgsqlTypes.NpgsqlInet.implicit operator NpgsqlTypes.NpgsqlInet(System.Net.IPNetwork cidr) -> NpgsqlTypes.NpgsqlInet static readonly NpgsqlTypes.NpgsqlTsVector.Empty -> NpgsqlTypes.NpgsqlTsVector! +NpgsqlTypes.NpgsqlBox.Deconstruct(out double left, out double right, out double bottom, out double top) -> void +NpgsqlTypes.NpgsqlBox.Deconstruct(out double left, out double right, out double bottom, out double top, out double width, out double height) -> void +NpgsqlTypes.NpgsqlBox.Deconstruct(out NpgsqlTypes.NpgsqlPoint lowerLeft, out NpgsqlTypes.NpgsqlPoint upperRight) -> void +NpgsqlTypes.NpgsqlCircle.Deconstruct(out double x, out double y, out double radius) -> void +NpgsqlTypes.NpgsqlCircle.Deconstruct(out NpgsqlTypes.NpgsqlPoint center, out double radius) -> void +NpgsqlTypes.NpgsqlLine.Deconstruct(out double a, out double b, out double c) -> void +NpgsqlTypes.NpgsqlLSeg.Deconstruct(out NpgsqlTypes.NpgsqlPoint start, out NpgsqlTypes.NpgsqlPoint end) -> void +NpgsqlTypes.NpgsqlPoint.Deconstruct(out double x, out double y) -> void +NpgsqlTypes.NpgsqlTid.Deconstruct(out uint blockNumber, out ushort offsetNumber) -> void From 05e042760920afe1f09bab6a89a638b658a85c2e Mon Sep 17 00:00:00 2001 From: Sergiusz <38229504+KeterSCP@users.noreply.github.com> Date: Wed, 29 Oct 2025 18:22:34 +0100 Subject: [PATCH 596/761] Explicitly set histogram bucket bounds (#6167) (#6168) Closes #6167 --- Directory.Packages.props | 3 +++ src/Npgsql/MetricsReporter.cs | 16 +++++++++++----- src/Npgsql/Npgsql.csproj | 1 + 3 files changed, 15 insertions(+), 5 deletions(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 2643731072..836b907a56 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -3,6 +3,9 @@ + + + diff --git a/src/Npgsql/MetricsReporter.cs b/src/Npgsql/MetricsReporter.cs index 707193e553..a25a3173cb 100644 --- a/src/Npgsql/MetricsReporter.cs +++ b/src/Npgsql/MetricsReporter.cs @@ -1,7 +1,6 @@ -using System; - namespace Npgsql; +using System; using System.Collections.Generic; using System.Diagnostics; using System.Diagnostics.Metrics; @@ -9,7 +8,7 @@ namespace Npgsql; using System.Threading; // .NET docs on metric instrumentation: https://learn.microsoft.com/en-us/dotnet/core/diagnostics/metrics-instrumentation -// OpenTelemetry semantic conventions for database metric: https://opentelemetry.io/docs/specs/otel/metrics/semantic_conventions/database-metrics +// OpenTelemetry semantic conventions for database metric: https://opentelemetry.io/docs/specs/semconv/database/database-metrics sealed class MetricsReporter : IDisposable { const string Version = "0.1.0"; @@ -33,6 +32,11 @@ sealed class MetricsReporter : IDisposable static readonly List Reporters = []; + static readonly InstrumentAdvice ShortHistogramAdvice = new() + { + HistogramBucketBoundaries = [0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1, 5, 10] + }; + CommandCounters _commandCounters; [StructLayout(LayoutKind.Explicit)] @@ -60,7 +64,8 @@ static MetricsReporter() CommandDuration = Meter.CreateHistogram( "db.client.commands.duration", unit: "s", - description: "The duration of database commands, in seconds."); + description: "The duration of database commands, in seconds.", + advice: ShortHistogramAdvice); BytesWritten = Meter.CreateCounter( "db.client.commands.bytes_written", @@ -85,7 +90,8 @@ static MetricsReporter() ConnectionCreateTime = Meter.CreateHistogram( "db.client.connections.create_time", unit: "s", - description: "The time it took to create a new connection."); + description: "The time it took to create a new connection.", + advice: ShortHistogramAdvice); // Observable metrics; these are for values we already track internally (and efficiently) inside the connection pool implementation. Meter.CreateObservableUpDownCounter( diff --git a/src/Npgsql/Npgsql.csproj b/src/Npgsql/Npgsql.csproj index 096b6c762e..80c42ba561 100644 --- a/src/Npgsql/Npgsql.csproj +++ b/src/Npgsql/Npgsql.csproj @@ -14,6 +14,7 @@ + From 4f7cf71ff1b8d8057eac2e438d6d5401fabeeb22 Mon Sep 17 00:00:00 2001 From: Bruce Bowyer-Smyth Date: Thu, 30 Oct 2025 06:06:04 +1000 Subject: [PATCH 597/761] Reduce temporary string creation during types load (#5986) --- src/Npgsql/Internal/NpgsqlDatabaseInfo.cs | 6 +- src/Npgsql/Internal/Postgres/DataTypeName.cs | 139 +++++++++--------- src/Npgsql/PostgresTypes/PostgresType.cs | 2 +- .../PostgresTypes/PostgresUnknownType.cs | 6 +- test/Npgsql.Tests/DataTypeNameTests.cs | 39 +++++ 5 files changed, 116 insertions(+), 76 deletions(-) diff --git a/src/Npgsql/Internal/NpgsqlDatabaseInfo.cs b/src/Npgsql/Internal/NpgsqlDatabaseInfo.cs index aca4144a09..5c700ac7e3 100644 --- a/src/Npgsql/Internal/NpgsqlDatabaseInfo.cs +++ b/src/Npgsql/Internal/NpgsqlDatabaseInfo.cs @@ -241,9 +241,9 @@ internal void ProcessTypes() ByFullName[type.DataTypeName.Value] = type; // If more than one type exists with the same partial name, we place a null value. // This allows us to detect this case later and force the user to use full names only. - ByName[type.InternalName] = ByName.ContainsKey(type.InternalName) - ? null - : type; + var typeInternalName = type.InternalName; + if (!ByName.TryAdd(typeInternalName, type)) + ByName[typeInternalName] = null; switch (type) { diff --git a/src/Npgsql/Internal/Postgres/DataTypeName.cs b/src/Npgsql/Internal/Postgres/DataTypeName.cs index 616881f385..e1b8225911 100644 --- a/src/Npgsql/Internal/Postgres/DataTypeName.cs +++ b/src/Npgsql/Internal/Postgres/DataTypeName.cs @@ -27,7 +27,7 @@ namespace Npgsql.Internal.Postgres; if (!validated) { var schemaEndIndex = fullyQualifiedDataTypeName.IndexOf('.'); - if (schemaEndIndex == -1) + if (schemaEndIndex is -1 or 0) throw new ArgumentException("Given value does not contain a schema.", nameof(fullyQualifiedDataTypeName)); // Friendly array syntax is the only fully qualified name quirk that's allowed by postgres (see FromDisplayName). @@ -86,108 +86,108 @@ public DataTypeName ToArrayName() if (unqualifiedNameSpan.StartsWith("_".AsSpan(), StringComparison.Ordinal)) return this; - var unqualifiedName = unqualifiedNameSpan.ToString(); - if (unqualifiedName.Length + "_".Length > NAMEDATALEN) - unqualifiedName = unqualifiedName.Substring(0, NAMEDATALEN - "_".Length); + if (unqualifiedNameSpan.Length + "_".Length > NAMEDATALEN) + unqualifiedNameSpan = unqualifiedNameSpan.Slice(0, NAMEDATALEN - "_".Length); - return new(Schema + "._" + unqualifiedName); + return new(string.Concat(Schema, "._", unqualifiedNameSpan)); } // Static transform as defined by https://www.postgresql.org/docs/current/sql-createtype.html#SQL-CREATETYPE-RANGE // Manual testing on PG confirmed it's only the first occurence of 'range' that gets replaced. public DataTypeName ToDefaultMultirangeName() { - var unqualifiedNameSpan = UnqualifiedNameSpan; - if (UnqualifiedNameSpan.IndexOf("multirange".AsSpan(), StringComparison.Ordinal) != -1) + var nameSpan = UnqualifiedNameSpan; + if (nameSpan.IndexOf("multirange".AsSpan(), StringComparison.Ordinal) is not -1) return this; - var unqualifiedName = unqualifiedNameSpan.ToString(); - var rangeIndex = unqualifiedName.IndexOf("range", StringComparison.Ordinal); - if (rangeIndex != -1) + if (nameSpan.IndexOf("range", StringComparison.Ordinal) is var rangeIndex and not -1) { - var str = unqualifiedName.Substring(0, rangeIndex) + "multirange" + unqualifiedName.Substring(rangeIndex + "range".Length); - - return new($"{Schema}." + (unqualifiedName.Length + "multi".Length > NAMEDATALEN - ? str.Substring(0, NAMEDATALEN - "multi".Length) - : str)); + nameSpan = string.Concat(nameSpan.Slice(0, rangeIndex), "multirange", nameSpan.Slice(rangeIndex + "range".Length)); + return new(string.Concat(SchemaSpan, ".", + nameSpan.Length > NAMEDATALEN ? nameSpan.Slice(0, NAMEDATALEN) : nameSpan)); } - return new($"{Schema}." + (unqualifiedName.Length + "multi".Length > NAMEDATALEN - ? unqualifiedName.Substring(0, NAMEDATALEN - "_multirange".Length) + "_multirange" - : unqualifiedName + "_multirange")); + if (nameSpan.Length + "_multirange".Length > NAMEDATALEN) + nameSpan = nameSpan.Slice(0, NAMEDATALEN - "_multirange".Length); + + return new(string.Concat(SchemaSpan, ".", nameSpan, "_multirange")); } // Create a DataTypeName from a broader range of valid names. // including SQL aliases like 'timestamp without time zone', trailing facet info etc. public static DataTypeName FromDisplayName(string displayName, string? schema = null) + => FromDisplayName(displayName, schema, assumeUnqualified: false); // user strings may come fully qualified. + + // This method is used during type loading, it allows us to accept friendly names in constructors, without having to preconcatenate the schema. + internal static DataTypeName FromDisplayName(string displayName, string? schema, bool assumeUnqualified) { var displayNameSpan = displayName.AsSpan().Trim(); - // If we have a schema we're done, Postgres doesn't do display name conversions on fully qualified names. - // There is one exception and that's array syntax, which is always resolvable in both ways, while we want the canonical name. var schemaEndIndex = displayNameSpan.IndexOf('.'); - if (schemaEndIndex is not -1 && - string.IsNullOrEmpty(schema) && - !displayNameSpan.Slice(schemaEndIndex).StartsWith("_".AsSpan(), StringComparison.Ordinal) && - !displayNameSpan.EndsWith("[]".AsSpan(), StringComparison.Ordinal)) - return new(displayName); - - // First we strip the schema to get the type name. - if (schemaEndIndex is not -1 && - string.IsNullOrEmpty(schema)) + ReadOnlySpan schemaSpan; + if (schemaEndIndex is not -1 && !assumeUnqualified) { - schema = displayNameSpan.Slice(0, schemaEndIndex).ToString(); + if (schema is not null) + throw new ArgumentException("Schema provided for a fully qualified name."); + + schemaSpan = displayNameSpan.Slice(0, schemaEndIndex); displayNameSpan = displayNameSpan.Slice(schemaEndIndex + 1); } + else + { + schemaSpan = schema is null ? "pg_catalog" : schema.AsSpan(); + } // Then we strip either of the two valid array representations to get the base type name (with or without facets). var isArray = false; - if (displayNameSpan.StartsWith("_".AsSpan())) + if (displayNameSpan.StartsWith("_", StringComparison.Ordinal)) { isArray = true; displayNameSpan = displayNameSpan.Slice(1); } - else if (displayNameSpan.EndsWith("[]".AsSpan())) + else if (displayNameSpan.EndsWith("[]", StringComparison.Ordinal)) { isArray = true; displayNameSpan = displayNameSpan.Slice(0, displayNameSpan.Length - 2); } - string mapped; - if (schemaEndIndex is -1) + if (schemaEndIndex is not -1) { - // Finally we strip the facet info. - var parenIndex = displayNameSpan.IndexOf('('); - if (parenIndex > -1) - displayNameSpan = displayNameSpan.Slice(0, parenIndex); - - // Map any aliases to the internal type name. - mapped = displayNameSpan.ToString() switch - { - "boolean" => "bool", - "character" => "bpchar", - "decimal" => "numeric", - "real" => "float4", - "double precision" => "float8", - "smallint" => "int2", - "integer" => "int4", - "bigint" => "int8", - "time without time zone" => "time", - "timestamp without time zone" => "timestamp", - "time with time zone" => "timetz", - "timestamp with time zone" => "timestamptz", - "bit varying" => "varbit", - "character varying" => "varchar", - var value => value - }; + // If we have a schema we're done, Postgres doesn't do display name conversions on fully qualified names. + // There is one exception and that's array syntax, which is always resolvable in both ways, while we want the canonical name. + return !isArray + ? new(displayName.Length == schemaEndIndex + displayNameSpan.Length + ? displayName + : string.Concat(schemaSpan, ".", displayNameSpan)) + : new(string.Concat(schemaSpan, ".", "_", displayNameSpan)); } - else + + // Finally we strip the facet info. + var parenIndex = displayNameSpan.IndexOf('('); + if (parenIndex > -1) + displayNameSpan = displayNameSpan.Slice(0, parenIndex); + + // Map any aliases to the internal type name. + var mapped = displayNameSpan switch { - // If we had a schema originally we stop here, see comment at schemaEndIndex. - mapped = displayNameSpan.ToString(); - } + "boolean" => "bool", + "character" => "bpchar", + "decimal" => "numeric", + "real" => "float4", + "double precision" => "float8", + "smallint" => "int2", + "integer" => "int4", + "bigint" => "int8", + "time without time zone" => "time", + "timestamp without time zone" => "timestamp", + "time with time zone" => "timetz", + "timestamp with time zone" => "timestamptz", + "bit varying" => "varbit", + "character varying" => "varchar", + var value => value + }; - return new((schema ?? "pg_catalog") + "." + (isArray ? "_" : "") + mapped); + return new(string.Concat(schemaSpan, ".", isArray ? "_" : "", mapped)); } // The type names stored in a DataTypeName are usually the actual typname from the pg_type column. @@ -197,8 +197,8 @@ public static DataTypeName FromDisplayName(string displayName, string? schema = // Alternatively some of the source lives at https://github.com/postgres/postgres/blob/c8e1ba736b2b9e8c98d37a5b77c4ed31baf94147/src/backend/utils/adt/format_type.c#L186 static string ToDisplayName(ReadOnlySpan unqualifiedName) { - var isArray = unqualifiedName.IndexOf('_') == 0; - var baseTypeName = isArray ? unqualifiedName.Slice(1).ToString() : unqualifiedName.ToString(); + var isArray = unqualifiedName.IndexOf('_') is 0; + var baseTypeName = isArray ? unqualifiedName.Slice(1) : unqualifiedName; var mappedBaseType = baseTypeName switch { @@ -216,13 +216,12 @@ static string ToDisplayName(ReadOnlySpan unqualifiedName) "timestamptz" => "timestamp with time zone", "varbit" => "bit varying", "varchar" => "character varying", - _ => baseTypeName + _ => null }; - if (isArray) - return mappedBaseType + "[]"; - - return mappedBaseType; + return isArray + ? string.Concat(mappedBaseType ?? baseTypeName, "[]") + : mappedBaseType ?? baseTypeName.ToString(); } internal static bool IsFullyQualified(ReadOnlySpan dataTypeName) => dataTypeName.Contains(".".AsSpan(), StringComparison.Ordinal); diff --git a/src/Npgsql/PostgresTypes/PostgresType.cs b/src/Npgsql/PostgresTypes/PostgresType.cs index 1182588c8c..842d1f3eea 100644 --- a/src/Npgsql/PostgresTypes/PostgresType.cs +++ b/src/Npgsql/PostgresTypes/PostgresType.cs @@ -24,7 +24,7 @@ public abstract class PostgresType /// The data type's OID. private protected PostgresType(string ns, string name, uint oid) { - DataTypeName = DataTypeName.FromDisplayName(name, ns); + DataTypeName = DataTypeName.FromDisplayName(name, ns, assumeUnqualified: true); OID = oid; FullName = Namespace + "." + Name; } diff --git a/src/Npgsql/PostgresTypes/PostgresUnknownType.cs b/src/Npgsql/PostgresTypes/PostgresUnknownType.cs index 2955295000..d7cfc983e9 100644 --- a/src/Npgsql/PostgresTypes/PostgresUnknownType.cs +++ b/src/Npgsql/PostgresTypes/PostgresUnknownType.cs @@ -1,4 +1,6 @@ -namespace Npgsql.PostgresTypes; +using Npgsql.Internal.Postgres; + +namespace Npgsql.PostgresTypes; /// /// Represents a PostgreSQL data type that isn't known to Npgsql and cannot be handled. @@ -10,5 +12,5 @@ public sealed class UnknownBackendType : PostgresType /// /// Constructs a the unknown backend type. /// - UnknownBackendType() : base("", "", 0) { } + UnknownBackendType() : base(DataTypeName.Unspecified,0) { } } diff --git a/test/Npgsql.Tests/DataTypeNameTests.cs b/test/Npgsql.Tests/DataTypeNameTests.cs index 7ca6c669ce..5c64baa607 100644 --- a/test/Npgsql.Tests/DataTypeNameTests.cs +++ b/test/Npgsql.Tests/DataTypeNameTests.cs @@ -23,4 +23,43 @@ public void TooLongDataTypeName() var exception = Assert.Throws(() => new DataTypeName(fullyQualifiedDataTypeName)); Assert.That(exception!.Message, Does.EndWith($": public.{new string('a', DataTypeName.NAMEDATALEN)}")); } + + [TestCase("public.name", ExpectedResult = "public._name")] + [TestCase("public._name", ExpectedResult = "public._name")] + [TestCase("public.zzzaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa123", ExpectedResult = "public._zzzaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa12")] + public string ToArrayName(string name) + => new DataTypeName(name).ToArrayName(); + + [TestCase("public.multirange", ExpectedResult = "public.multirange")] + [TestCase("public.abcmultirange123", ExpectedResult = "public.abcmultirange123")] + [TestCase("public.multiRANGE", ExpectedResult = "public.multiRANGE_multirange")] + public string ToDefaultMultirangeNameHasMultiRange(string name) + => new DataTypeName(name).ToDefaultMultirangeName(); + + [TestCase("public.range", ExpectedResult = "public.multirange")] + [TestCase("public.abcrange123", ExpectedResult = "public.abcmultirange123")] + [TestCase("public.aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaarange", ExpectedResult = "public.aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaamultirange")] // Replace goes to max length + [TestCase("public.aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaarange1", ExpectedResult = "public.aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaamultir")] // Replace goes over max length + [TestCase("public.RANGE", ExpectedResult = "public.RANGE_multirange")] + public string ToDefaultMultirangeNameHasRange(string name) + => new DataTypeName(name).ToDefaultMultirangeName(); + + [TestCase("public.name", null, ExpectedResult = "public.name")] + [TestCase("public._name", null, ExpectedResult = "public._name")] + [TestCase("public.name[]", null, ExpectedResult = "public._name")] + [TestCase("public.integer", null, ExpectedResult = "public.integer")] + [TestCase("name", null, ExpectedResult = "pg_catalog.name")] + [TestCase("_name", null, ExpectedResult = "pg_catalog._name")] + [TestCase("name[]", null, ExpectedResult = "pg_catalog._name")] + [TestCase("character varying", null, ExpectedResult = "pg_catalog.varchar")] + [TestCase("decimal(facet_name)", null, ExpectedResult = "pg_catalog.numeric")] + [TestCase("name", "public", ExpectedResult = "public.name")] + [TestCase("name ", "public", ExpectedResult = "public.name")] + [TestCase("_name", "public", ExpectedResult = "public._name")] + [TestCase("name[]", "public", ExpectedResult = "public._name")] + [TestCase("timestamp with time zone", "public", ExpectedResult = "public.timestamptz")] + [TestCase("boolean(facet_name)", "public", ExpectedResult = "public.bool")] + [TestCase(" public.name ", null, ExpectedResult = "public.name")] + public string FromDisplayName(string name, string? schema) + => DataTypeName.FromDisplayName(name, schema).Value; } From cc310e6191abf81764e34682eae6550edc40d89c Mon Sep 17 00:00:00 2001 From: Sergiusz <38229504+KeterSCP@users.noreply.github.com> Date: Thu, 30 Oct 2025 10:04:14 +0100 Subject: [PATCH 598/761] Use actual version of the Npgsql for ActivitySource (#6277) --- src/Npgsql/NpgsqlActivitySource.cs | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/Npgsql/NpgsqlActivitySource.cs b/src/Npgsql/NpgsqlActivitySource.cs index 4493bb272a..e40ae5a9bd 100644 --- a/src/Npgsql/NpgsqlActivitySource.cs +++ b/src/Npgsql/NpgsqlActivitySource.cs @@ -4,12 +4,13 @@ using System.Diagnostics; using System.Net; using System.Net.Sockets; +using System.Reflection; namespace Npgsql; static class NpgsqlActivitySource { - static readonly ActivitySource Source = new("Npgsql", "0.2.0"); + static readonly ActivitySource Source = new("Npgsql", GetLibraryVersion()); internal static bool IsEnabled => Source.HasListeners(); @@ -143,4 +144,9 @@ internal static void SetException(Activity activity, Exception ex, bool escaped activity.SetStatus(ActivityStatusCode.Error, statusDescription); activity.Dispose(); } + + static string GetLibraryVersion() + => typeof(NpgsqlDataSource).Assembly + .GetCustomAttribute()? + .InformationalVersion ?? "UNKNOWN"; } From b38cc40fab9d123b8551355bf528267ec2c5f7e0 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Thu, 30 Oct 2025 10:08:02 +0100 Subject: [PATCH 599/761] Remove manual CodeQL workflow As it now runs integrated with github --- .github/workflows/codeql-analysis.yml | 90 --------------------------- 1 file changed, 90 deletions(-) delete mode 100644 .github/workflows/codeql-analysis.yml diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml deleted file mode 100644 index 666e98fc85..0000000000 --- a/.github/workflows/codeql-analysis.yml +++ /dev/null @@ -1,90 +0,0 @@ -# For most projects, this workflow file will not need changing; you simply need -# to commit it to your repository. -# -# You may wish to alter this file to override the set of languages analyzed, -# or to provide custom queries or build logic. -# -# ******** NOTE ******** -# We have attempted to detect the languages in your repository. Please check -# the `language` matrix defined below to confirm you have the correct set of -# supported CodeQL languages. -# -name: "CodeQL" - -on: - push: - branches: - - main - - 'hotfix/**' - - 'release/**' - pull_request: - # The branches below must be a subset of the branches above - branches: - - main - - 'hotfix/**' - - 'release/**' - schedule: - - cron: '21 0 * * 4' - -# Cancel previous PR branch commits (head_ref is only defined on PRs) -concurrency: - group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} - cancel-in-progress: true - -env: - DOTNET_SKIP_FIRST_TIME_EXPERIENCE: true - -jobs: - analyze: - name: Analyze - runs-on: ubuntu-latest - permissions: - actions: read - contents: read - security-events: write - - strategy: - fail-fast: false - matrix: - language: [ 'csharp' ] - # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ] - # Learn more about CodeQL language support at https://git.io/codeql-language-support - - steps: - - name: Checkout repository - uses: actions/checkout@v5 - - # Initializes the CodeQL tools for scanning. - - name: Initialize CodeQL - uses: github/codeql-action/init@v4 - with: - languages: ${{ matrix.language }} - # If you wish to specify custom queries, you can do so here or in a config file. - # By default, queries listed here will override any specified in a config file. - # Prefix the list here with "+" to use these queries and those in the config file. - # queries: ./path/to/local/query, your-org/your-repo/queries@main - - - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v5.0.0 - - - name: Build - run: dotnet build -c Release - - # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). - # If this step fails, then you should remove it and run the build manually (see below) - #- name: Autobuild - # uses: github/codeql-action/autobuild@v2 - - # ℹ️ Command-line programs to run using the OS shell. - # 📚 https://git.io/JvXDl - - # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines - # and modify them (or add more) to build your code if your project - # uses a compiled language - - #- run: | - # make bootstrap - # make release - - - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v4 From 492a56e1ecff115a083c00b621cf365c6bde6113 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Thu, 30 Oct 2025 13:18:20 +0100 Subject: [PATCH 600/761] Stop testing PostGIS on Windows in CI (#6275) Because of installation reliability issues --- .github/workflows/build.yml | 28 ++++++---------------------- test/Npgsql.Tests/TestUtil.cs | 13 ++++--------- 2 files changed, 10 insertions(+), 31 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index ff7a6581f7..e474e94763 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -59,6 +59,12 @@ jobs: is_release: ${{ steps.analyze_tag.outputs.is_release }} is_prerelease: ${{ steps.analyze_tag.outputs.is_prerelease }} + # Installing PostGIS on Windows is complicated/unreliable, so we don't test on it. + # The NPGSQL_TEST_POSTGIS environment variable ensures that if PostGIS isn't installed, + # the PostGIS tests fail and therefore fail the build. + env: + NPGSQL_TEST_POSTGIS: ${{ !startsWith(matrix.os, 'windows') }} + steps: - name: Checkout uses: actions/checkout@v5 @@ -165,28 +171,6 @@ jobs: # Match Npgsql CI Docker image and stash one level up cp $GITHUB_WORKSPACE/.build/{server.crt,server.key,ca.crt} pgsql - # Find OSGEO version number - OSGEO_VERSION=$(\ - curl -Ls https://download.osgeo.org/postgis/windows/pg${{ matrix.pg_major }} | - sed -n 's/.*>postgis-bundle-pg${{ matrix.pg_major }}-\(${{ env.postgis_version }}.[0-9]*.[0-9]*\)x64.zip<.*/\1/p' | - tail -n 1) - if [ -z "$OSGEO_VERSION" ]; then - OSGEO_VERSION=$(\ - curl -Ls https://download.osgeo.org/postgis/windows/pg${{ matrix.pg_major }}/archive | - sed -n 's/.*>postgis-bundle-pg${{ matrix.pg_major }}-\(${{ env.postgis_version }}.[0-9]*.[0-9]*\)x64.zip<.*/\1/p' | - tail -n 1) - POSTGIS_PATH="archive/" - else - POSTGIS_PATH="" - fi - - # Install PostGIS - echo "Installing PostGIS (version: ${OSGEO_VERSION})" - POSTGIS_FILE="postgis-bundle-pg${{ matrix.pg_major }}-${OSGEO_VERSION}x64" - curl -o postgis.zip -L https://download.osgeo.org/postgis/windows/pg${{ matrix.pg_major }}/${POSTGIS_PATH}${POSTGIS_FILE}.zip - unzip postgis.zip -d postgis - cp -a postgis/$POSTGIS_FILE/. pgsql/ - # Start PostgreSQL pgsql/bin/initdb -D pgsql/PGDATA -E UTF8 -U postgres SOCKET_DIR=$(echo "$LOCALAPPDATA\Temp" | sed 's|\\|/|g') diff --git a/test/Npgsql.Tests/TestUtil.cs b/test/Npgsql.Tests/TestUtil.cs index fc0b5404d8..85141c1cfa 100644 --- a/test/Npgsql.Tests/TestUtil.cs +++ b/test/Npgsql.Tests/TestUtil.cs @@ -100,9 +100,6 @@ public static async Task IgnoreOnRedshift(NpgsqlConnection conn, string? ignoreT } } - public static async Task IsPgPrerelease(NpgsqlConnection conn) - => ((string) (await conn.ExecuteScalarAsync("SELECT version()"))!).Contains("beta"); - public static void EnsureExtension(NpgsqlConnection conn, string extension, string? minVersion = null) => EnsureExtension(conn, extension, minVersion, async: false).GetAwaiter().GetResult(); @@ -168,21 +165,19 @@ static async Task IgnoreIfFeatureNotSupported(NpgsqlConnection conn, string test public static async Task EnsurePostgis(NpgsqlConnection conn) { - var isPreRelease = await IsPgPrerelease(conn); try { await EnsureExtensionAsync(conn, "postgis"); } - catch (PostgresException e) when (e.SqlState == PostgresErrorCodes.UndefinedFile) + catch (PostgresException) { - // PostGIS packages aren't available for PostgreSQL prereleases - if (isPreRelease) + if (Environment.GetEnvironmentVariable("NPGSQL_TEST_POSTGIS")?.ToLower(CultureInfo.InvariantCulture) is "1" or "true") { - Assert.Ignore($"PostGIS could not be installed, but PostgreSQL is prerelease ({conn.ServerVersion}), ignoring test suite."); + throw; } else { - throw; + Assert.Ignore($"PostGIS isn't installed, skipping tests"); } } } From 332ce0b2ffd7af66d9bd02ad4fdc20ba1dba9f00 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Thu, 30 Oct 2025 17:04:00 +0300 Subject: [PATCH 601/761] Upgrade to postgres 18 for CI (#6223) Co-authored-by: Shay Rojansky --- .github/workflows/build.yml | 24 ++++++++++++------------ global.json | 2 +- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index e474e94763..1eff7639e1 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -29,31 +29,31 @@ jobs: fail-fast: false matrix: os: [ubuntu-24.04] - pg_major: [17, 16, 15, 14, 13] + pg_major: [18, 17, 16, 15, 14] config: [Release] - test_tfm: [net9.0] + test_tfm: [net10.0] include: - os: ubuntu-24.04 - pg_major: 17 + pg_major: 18 config: Debug - test_tfm: net9.0 + test_tfm: net10.0 - os: macos-15 pg_major: 16 config: Release - test_tfm: net9.0 + test_tfm: net10.0 - os: windows-2022 - pg_major: 17 - config: Release - test_tfm: net9.0 - - os: ubuntu-24.04 - pg_major: 17 + pg_major: 18 config: Release - test_tfm: net8.0 + test_tfm: net10.0 - os: ubuntu-24.04 pg_major: 18 config: Release test_tfm: net8.0 - pg_prerelease: 'PG Prerelease' +# - os: ubuntu-24.04 +# pg_major: 19 +# config: Release +# test_tfm: net10.0 +# pg_prerelease: 'PG Prerelease' outputs: is_release: ${{ steps.analyze_tag.outputs.is_release }} diff --git a/global.json b/global.json index 838198f254..a08ab85427 100644 --- a/global.json +++ b/global.json @@ -1,6 +1,6 @@ { "sdk": { - "version": "10.0.100-rc.1.25451.107", + "version": "10.0.100-rc.2.25502.107", "rollForward": "latestMajor", "allowPrerelease": false } From 7d0e3e140462cfc2a3647451f4035c54f4f2146d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 31 Oct 2025 09:34:31 +0100 Subject: [PATCH 602/761] Bump BenchmarkDotNet from 0.15.4 to 0.15.5 (#6279) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 836b907a56..25efb12069 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -36,7 +36,7 @@ - + From aa3e200f7fa41443d72a1d41f6d2bfbf11a88f09 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 31 Oct 2025 08:37:09 +0000 Subject: [PATCH 603/761] Bump NUnit.Analyzers from 4.10.0 to 4.11.0 (#6280) --- Directory.Packages.props | 2 +- test/Npgsql.Tests/Npgsql.Tests.csproj | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 25efb12069..5355861f54 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -23,7 +23,7 @@ - + diff --git a/test/Npgsql.Tests/Npgsql.Tests.csproj b/test/Npgsql.Tests/Npgsql.Tests.csproj index 1c300f8215..3aeb70bc28 100644 --- a/test/Npgsql.Tests/Npgsql.Tests.csproj +++ b/test/Npgsql.Tests/Npgsql.Tests.csproj @@ -21,6 +21,7 @@ true + $(NoWarn);NUnit1001 $(NoWarn);NPG9001 $(NoWarn);NPG9002 From ebf251dd127883fddc85ea0abdc1b1ab6cea328b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 31 Oct 2025 23:12:43 +0100 Subject: [PATCH 604/761] Bump NUnit.Analyzers from 4.11.0 to 4.11.1 (#6281) --- updated-dependencies: - dependency-name: NUnit.Analyzers dependency-version: 4.11.1 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 5355861f54..6a99e09cfc 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -23,7 +23,7 @@ - + From 24b9e401b066a4e5d064d13eabe8bda5b7555722 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Sun, 2 Nov 2025 14:12:49 +0100 Subject: [PATCH 605/761] Move json dom mappings to the NativeAOT compatible resolver (#6271) Closes #6052 --- .../JsonDynamicTypeInfoResolverFactory.cs | 23 ------- .../JsonTypeInfoResolverFactory.cs | 19 ++++++ test/Npgsql.Tests/Types/JsonDynamicTests.cs | 59 ------------------ test/Npgsql.Tests/Types/JsonTests.cs | 60 ++++++++++++++++++- 4 files changed, 77 insertions(+), 84 deletions(-) diff --git a/src/Npgsql/Internal/ResolverFactories/JsonDynamicTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/JsonDynamicTypeInfoResolverFactory.cs index 04e3aa3313..696aac8efb 100644 --- a/src/Npgsql/Internal/ResolverFactories/JsonDynamicTypeInfoResolverFactory.cs +++ b/src/Npgsql/Internal/ResolverFactories/JsonDynamicTypeInfoResolverFactory.cs @@ -2,7 +2,6 @@ using System.Diagnostics.CodeAnalysis; using System.Text; using System.Text.Json; -using System.Text.Json.Nodes; using System.Text.Json.Serialization.Metadata; using Npgsql.Internal.Converters; using Npgsql.Internal.Postgres; @@ -66,20 +65,6 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings, // We do GetTypeInfo calls directly so we need a resolver. serializerOptions.TypeInfoResolver ??= new DefaultJsonTypeInfoResolver(); - // These live in the RUC/RDC part as JsonValues can contain any .NET type. - foreach (var dataTypeName in new[] { DataTypeNames.Jsonb, DataTypeNames.Json }) - { - var jsonb = dataTypeName == DataTypeNames.Jsonb; - mappings.AddType(dataTypeName, (options, mapping, _) => - mapping.CreateInfo(options, new JsonConverter(jsonb, options.TextEncoding, serializerOptions))); - mappings.AddType(dataTypeName, (options, mapping, _) => - mapping.CreateInfo(options, new JsonConverter(jsonb, options.TextEncoding, serializerOptions))); - mappings.AddType(dataTypeName, (options, mapping, _) => - mapping.CreateInfo(options, new JsonConverter(jsonb, options.TextEncoding, serializerOptions))); - mappings.AddType(dataTypeName, (options, mapping, _) => - mapping.CreateInfo(options, new JsonConverter(jsonb, options.TextEncoding, serializerOptions))); - } - AddUserMappings(jsonb: true, jsonbClrTypes); AddUserMappings(jsonb: false, jsonClrTypes); @@ -164,14 +149,6 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings, if (baseMappings.Items.Count == 0) return mappings; - foreach (var dataTypeName in new[] { DataTypeNames.Jsonb, DataTypeNames.Json }) - { - mappings.AddArrayType(dataTypeName); - mappings.AddArrayType(dataTypeName); - mappings.AddArrayType(dataTypeName); - mappings.AddArrayType(dataTypeName); - } - var dynamicMappings = CreateCollection(baseMappings); foreach (var mapping in baseMappings.Items) dynamicMappings.AddArrayMapping(mapping.Type, mapping.DataTypeName); diff --git a/src/Npgsql/Internal/ResolverFactories/JsonTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/JsonTypeInfoResolverFactory.cs index 250e000022..f778bea186 100644 --- a/src/Npgsql/Internal/ResolverFactories/JsonTypeInfoResolverFactory.cs +++ b/src/Npgsql/Internal/ResolverFactories/JsonTypeInfoResolverFactory.cs @@ -1,5 +1,6 @@ using System; using System.Text.Json; +using System.Text.Json.Nodes; using System.Text.Json.Serialization.Metadata; using Npgsql.Internal.Converters; using Npgsql.Internal.Postgres; @@ -47,6 +48,15 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings, mappings.AddStructType(dataTypeName, (options, mapping, _) => mapping.CreateInfo(options, new JsonConverter(jsonb, options.TextEncoding, serializerOptions))); + + mappings.AddType(dataTypeName, (options, mapping, _) => + mapping.CreateInfo(options, new JsonConverter(jsonb, options.TextEncoding, serializerOptions))); + mappings.AddType(dataTypeName, (options, mapping, _) => + mapping.CreateInfo(options, new JsonConverter(jsonb, options.TextEncoding, serializerOptions))); + mappings.AddType(dataTypeName, (options, mapping, _) => + mapping.CreateInfo(options, new JsonConverter(jsonb, options.TextEncoding, serializerOptions))); + mappings.AddType(dataTypeName, (options, mapping, _) => + mapping.CreateInfo(options, new JsonConverter(jsonb, options.TextEncoding, serializerOptions))); } return mappings; @@ -63,6 +73,12 @@ sealed class BasicJsonTypeInfoResolver : IJsonTypeInfoResolver return JsonMetadataServices.CreateValueInfo(options, JsonMetadataServices.JsonDocumentConverter); if (type == typeof(JsonElement)) return JsonMetadataServices.CreateValueInfo(options, JsonMetadataServices.JsonElementConverter); + if (type == typeof(JsonObject)) + return JsonMetadataServices.CreateValueInfo(options, JsonMetadataServices.JsonObjectConverter); + if (type == typeof(JsonArray)) + return JsonMetadataServices.CreateValueInfo(options, JsonMetadataServices.JsonArrayConverter); + if (type == typeof(JsonValue)) + return JsonMetadataServices.CreateValueInfo(options, JsonMetadataServices.JsonValueConverter); return null; } } @@ -82,6 +98,9 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) { mappings.AddArrayType(dataTypeName); mappings.AddStructArrayType(dataTypeName); + mappings.AddArrayType(dataTypeName); + mappings.AddArrayType(dataTypeName); + mappings.AddArrayType(dataTypeName); } return mappings; diff --git a/test/Npgsql.Tests/Types/JsonDynamicTests.cs b/test/Npgsql.Tests/Types/JsonDynamicTests.cs index 59a3d24662..af282f82d0 100644 --- a/test/Npgsql.Tests/Types/JsonDynamicTests.cs +++ b/test/Npgsql.Tests/Types/JsonDynamicTests.cs @@ -1,6 +1,5 @@ using System; using System.Text.Json; -using System.Text.Json.Nodes; using System.Text.Json.Serialization; using System.Threading.Tasks; using Npgsql.Properties; @@ -15,64 +14,6 @@ namespace Npgsql.Tests.Types; [TestFixture(MultiplexingMode.Multiplexing, NpgsqlDbType.Jsonb)] public class JsonDynamicTests : MultiplexingTestBase { - [Test] - public Task Roundtrip_JsonObject() - => AssertType( - new JsonObject { ["Bar"] = 8 }, - IsJsonb ? """{"Bar": 8}""" : """{"Bar":8}""", - PostgresType, - NpgsqlDbType, - // By default we map JsonObject to jsonb - isDefaultForWriting: IsJsonb, - isDefaultForReading: false, - isNpgsqlDbTypeInferredFromClrType: false, - comparer: (x, y) => x.ToString() == y.ToString()); - - [Test] - public Task Roundtrip_JsonArray() - => AssertType( - new JsonArray { 1, 2, 3 }, - IsJsonb ? "[1, 2, 3]" : "[1,2,3]", - PostgresType, - NpgsqlDbType, - // By default we map JsonArray to jsonb - isDefaultForWriting: IsJsonb, - isDefaultForReading: false, - isNpgsqlDbTypeInferredFromClrType: false, - comparer: (x, y) => x.ToString() == y.ToString()); - - [Test] - [IssueLink("https://github.com/npgsql/npgsql/issues/4537")] - public async Task Write_jsonobject_array_without_npgsqldbtype() - { - // By default we map JsonObject to jsonb - if (!IsJsonb) - return; - - await using var conn = await OpenConnectionAsync(); - var tableName = await TestUtil.CreateTempTable(conn, "key SERIAL PRIMARY KEY, ingredients json[]"); - - await using var cmd = new NpgsqlCommand { Connection = conn }; - - var jsonObject1 = new JsonObject - { - { "name", "value1" }, - { "amount", 1 }, - { "unit", "ml" } - }; - - var jsonObject2 = new JsonObject - { - { "name", "value2" }, - { "amount", 2 }, - { "unit", "g" } - }; - - cmd.CommandText = $"INSERT INTO {tableName} (ingredients) VALUES (@p)"; - cmd.Parameters.Add(new("p", new[] { jsonObject1, jsonObject2 })); - await cmd.ExecuteNonQueryAsync(); - } - [Test] public async Task As_poco() => await AssertType( diff --git a/test/Npgsql.Tests/Types/JsonTests.cs b/test/Npgsql.Tests/Types/JsonTests.cs index e7a9b4576e..84b95389bb 100644 --- a/test/Npgsql.Tests/Types/JsonTests.cs +++ b/test/Npgsql.Tests/Types/JsonTests.cs @@ -1,10 +1,8 @@ using System; -using System.Data; using System.IO; using System.Text; using System.Text.Json; using System.Text.Json.Nodes; -using System.Text.Json.Serialization; using System.Threading.Tasks; using NpgsqlTypes; using NUnit.Framework; @@ -168,6 +166,64 @@ public async Task Can_read_two_json_documents() Assert.That(car.RootElement.GetProperty("key").GetString(), Is.EqualTo("foo")); } + [Test] + public Task Roundtrip_JsonObject() + => AssertType( + new JsonObject { ["Bar"] = 8 }, + IsJsonb ? """{"Bar": 8}""" : """{"Bar":8}""", + PostgresType, + NpgsqlDbType, + // By default we map JsonObject to jsonb + isDefaultForWriting: IsJsonb, + isDefaultForReading: false, + isNpgsqlDbTypeInferredFromClrType: false, + comparer: (x, y) => x.ToString() == y.ToString()); + + [Test] + public Task Roundtrip_JsonArray() + => AssertType( + new JsonArray { 1, 2, 3 }, + IsJsonb ? "[1, 2, 3]" : "[1,2,3]", + PostgresType, + NpgsqlDbType, + // By default we map JsonArray to jsonb + isDefaultForWriting: IsJsonb, + isDefaultForReading: false, + isNpgsqlDbTypeInferredFromClrType: false, + comparer: (x, y) => x.ToString() == y.ToString()); + + [Test] + [IssueLink("https://github.com/npgsql/npgsql/issues/4537")] + public async Task Write_jsonobject_array_without_npgsqldbtype() + { + // By default we map JsonObject to jsonb + if (!IsJsonb) + return; + + await using var conn = await OpenConnectionAsync(); + var tableName = await TestUtil.CreateTempTable(conn, "key SERIAL PRIMARY KEY, ingredients json[]"); + + await using var cmd = new NpgsqlCommand { Connection = conn }; + + var jsonObject1 = new JsonObject + { + { "name", "value1" }, + { "amount", 1 }, + { "unit", "ml" } + }; + + var jsonObject2 = new JsonObject + { + { "name", "value2" }, + { "amount", 2 }, + { "unit", "g" } + }; + + cmd.CommandText = $"INSERT INTO {tableName} (ingredients) VALUES (@p)"; + cmd.Parameters.Add(new("p", new[] { jsonObject1, jsonObject2 })); + await cmd.ExecuteNonQueryAsync(); + } + public JsonTests(MultiplexingMode multiplexingMode, NpgsqlDbType npgsqlDbType) : base(multiplexingMode) { From 5719dc735dac27b1428f32e659f1f6bc0ebfa42d Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Mon, 3 Nov 2025 14:15:26 +0100 Subject: [PATCH 606/761] Undo NoWarn --- test/Npgsql.Tests/Npgsql.Tests.csproj | 1 - 1 file changed, 1 deletion(-) diff --git a/test/Npgsql.Tests/Npgsql.Tests.csproj b/test/Npgsql.Tests/Npgsql.Tests.csproj index 3aeb70bc28..1c300f8215 100644 --- a/test/Npgsql.Tests/Npgsql.Tests.csproj +++ b/test/Npgsql.Tests/Npgsql.Tests.csproj @@ -21,7 +21,6 @@ true - $(NoWarn);NUnit1001 $(NoWarn);NPG9001 $(NoWarn);NPG9002 From c3e2fc2026a6272fd1ad711b1d31160c7131f21b Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Mon, 3 Nov 2025 14:24:45 +0100 Subject: [PATCH 607/761] Add SupportsReading plumbing (#5472) --- src/Npgsql/Internal/AdoSerializerHelpers.cs | 4 +++ src/Npgsql/Internal/PgTypeInfo.cs | 7 +++++ .../AdoTypeInfoResolverFactory.cs | 16 +++++----- .../NetworkTypeInfoResolverFactory.cs | 9 ++---- src/Npgsql/Internal/TypeInfoMapping.cs | 31 ++++++++++--------- src/Npgsql/NpgsqlParameter.cs | 3 -- test/Npgsql.Tests/Types/ByteaTests.cs | 14 +++++++++ 7 files changed, 52 insertions(+), 32 deletions(-) diff --git a/src/Npgsql/Internal/AdoSerializerHelpers.cs b/src/Npgsql/Internal/AdoSerializerHelpers.cs index 177114f78d..21010b3f99 100644 --- a/src/Npgsql/Internal/AdoSerializerHelpers.cs +++ b/src/Npgsql/Internal/AdoSerializerHelpers.cs @@ -15,6 +15,8 @@ public static PgTypeInfo GetTypeInfoForReading(Type type, PgTypeId pgTypeId, PgS try { typeInfo = options.GetTypeInfoInternal(type, pgTypeId); + if (typeInfo is { SupportsReading: false }) + typeInfo = null; } catch (Exception ex) { @@ -41,6 +43,8 @@ public static PgTypeInfo GetTypeInfoForWriting(Type? type, PgTypeId? pgTypeId, P try { typeInfo = options.GetTypeInfoInternal(type, pgTypeId); + if (typeInfo is { SupportsWriting: false }) + typeInfo = null; } catch (Exception ex) { diff --git a/src/Npgsql/Internal/PgTypeInfo.cs b/src/Npgsql/Internal/PgTypeInfo.cs index 836cd941b8..93b90b3a70 100644 --- a/src/Npgsql/Internal/PgTypeInfo.cs +++ b/src/Npgsql/Internal/PgTypeInfo.cs @@ -21,6 +21,7 @@ public class PgTypeInfo Options = options; IsBoxing = unboxedType is not null; Type = unboxedType ?? type; + SupportsReading = GetDefaultSupportsReading(type, unboxedType); SupportsWriting = true; } @@ -54,6 +55,7 @@ private protected PgTypeInfo(PgSerializerOptions options, Type type, PgConverter public Type Type { get; } public PgSerializerOptions Options { get; } + public bool SupportsReading { get; init; } public bool SupportsWriting { get; init; } public DataFormat? PreferredFormat { get; init; } @@ -240,6 +242,11 @@ DataFormat ResolveFormat(PgConverter converter, out BufferRequirements bufferReq return default; } } + + // We assume a boxing type info does not support reading as the converter won't be able to produce the derived type statically. + // Cases like Array converters unboxing to int[], int[,] etc. are the exception and the reason why SupportsReading is a settable property. + internal static bool GetDefaultSupportsReading(Type type, Type? unboxedType) + => unboxedType is null || unboxedType == type; } public sealed class PgResolverTypeInfo( diff --git a/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.cs index db350e2fc9..8db547315f 100644 --- a/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.cs +++ b/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.cs @@ -91,10 +91,10 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) mapping => mapping with { MatchRequirement = MatchRequirement.DataTypeName, TypeMatchPredicate = type => typeof(Stream).IsAssignableFrom(type) }); //Special mappings, these have no corresponding array mapping. mappings.AddType(DataTypeNames.Text, - static (options, mapping, _) => mapping.CreateInfo(options, new TextReaderTextConverter(options.TextEncoding), supportsWriting: false, preferredFormat: DataFormat.Text), + static (options, mapping, _) => mapping.CreateInfo(options, new TextReaderTextConverter(options.TextEncoding), preferredFormat: DataFormat.Text, supportsWriting: false), MatchRequirement.DataTypeName); mappings.AddStructType(DataTypeNames.Text, - static (options, mapping, _) => mapping.CreateInfo(options, new GetCharsTextConverter(options.TextEncoding), supportsWriting: false, preferredFormat: DataFormat.Text), + static (options, mapping, _) => mapping.CreateInfo(options, new GetCharsTextConverter(options.TextEncoding), preferredFormat: DataFormat.Text, supportsWriting: false), MatchRequirement.DataTypeName); // Alternative text types @@ -118,10 +118,10 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) mapping => mapping with { MatchRequirement = MatchRequirement.DataTypeName, TypeMatchPredicate = type => typeof(Stream).IsAssignableFrom(type) }); //Special mappings, these have no corresponding array mapping. mappings.AddType(dataTypeName, - static (options, mapping, _) => mapping.CreateInfo(options, new TextReaderTextConverter(options.TextEncoding), supportsWriting: false, preferredFormat: DataFormat.Text), + static (options, mapping, _) => mapping.CreateInfo(options, new TextReaderTextConverter(options.TextEncoding), preferredFormat: DataFormat.Text, supportsWriting: false), MatchRequirement.DataTypeName); mappings.AddStructType(dataTypeName, - static (options, mapping, _) => mapping.CreateInfo(options, new GetCharsTextConverter(options.TextEncoding), supportsWriting: false, preferredFormat: DataFormat.Text), + static (options, mapping, _) => mapping.CreateInfo(options, new GetCharsTextConverter(options.TextEncoding), preferredFormat: DataFormat.Text, supportsWriting: false), MatchRequirement.DataTypeName); } @@ -142,10 +142,10 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) mapping => mapping with { MatchRequirement = MatchRequirement.DataTypeName, TypeMatchPredicate = type => typeof(Stream).IsAssignableFrom(type) }); //Special mappings, these have no corresponding array mapping. mappings.AddType(DataTypeNames.Jsonb, - static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter(jsonbVersion, new TextReaderTextConverter(options.TextEncoding)), supportsWriting: false, preferredFormat: DataFormat.Text), + static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter(jsonbVersion, new TextReaderTextConverter(options.TextEncoding)), preferredFormat: DataFormat.Text, supportsWriting: false), MatchRequirement.DataTypeName); mappings.AddStructType(DataTypeNames.Jsonb, - static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter(jsonbVersion, new GetCharsTextConverter(options.TextEncoding)), supportsWriting: false, preferredFormat: DataFormat.Text), + static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter(jsonbVersion, new GetCharsTextConverter(options.TextEncoding)), preferredFormat: DataFormat.Text, supportsWriting: false), MatchRequirement.DataTypeName); // Jsonpath @@ -154,10 +154,10 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter(jsonpathVersion, new StringTextConverter(options.TextEncoding))), isDefault: true); //Special mappings, these have no corresponding array mapping. mappings.AddType(DataTypeNames.Jsonpath, - static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter(jsonpathVersion, new TextReaderTextConverter(options.TextEncoding)), supportsWriting: false, preferredFormat: DataFormat.Text), + static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter(jsonpathVersion, new TextReaderTextConverter(options.TextEncoding)), preferredFormat: DataFormat.Text, supportsWriting: false), MatchRequirement.DataTypeName); mappings.AddStructType(DataTypeNames.Jsonpath, - static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter(jsonpathVersion, new GetCharsTextConverter(options.TextEncoding)), supportsWriting: false, preferredFormat: DataFormat.Text), + static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter(jsonpathVersion, new GetCharsTextConverter(options.TextEncoding)), preferredFormat: DataFormat.Text, supportsWriting: false), MatchRequirement.DataTypeName); // Bytea diff --git a/src/Npgsql/Internal/ResolverFactories/NetworkTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/NetworkTypeInfoResolverFactory.cs index 5eb072c1c9..6a2af4453f 100644 --- a/src/Npgsql/Internal/ResolverFactories/NetworkTypeInfoResolverFactory.cs +++ b/src/Npgsql/Internal/ResolverFactories/NetworkTypeInfoResolverFactory.cs @@ -31,14 +31,9 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) // inet // There are certain IPAddress values like Loopback or Any that return a *private* derived type (see https://github.com/dotnet/runtime/issues/27870). - // However we still need to be able to resolve some typed converter for those values. - // We do so by returning a boxing info when we deal with a derived type, as a result we don't need an exact typed converter. - // For arrays users can't actually reference the private type so we'll only see some version of ArrayType. - // For reads we'll only see the public type so we never surface an InvalidCastException trying to cast IPAddress to ReadOnlyIPAddress. - // Finally we add a custom predicate to be able to match any type which values are assignable to IPAddress. mappings.AddType(DataTypeNames.Inet, - static (options, mapping, _) => new PgTypeInfo(options, new IPAddressConverter(), - new DataTypeName(mapping.DataTypeName), unboxedType: mapping.Type == typeof(IPAddress) ? null : mapping.Type), + static (options, mapping, _) => new PgTypeInfo(options, new IPAddressConverter(), new DataTypeName(mapping.DataTypeName), + unboxedType: mapping.Type != typeof(IPAddress) ? mapping.Type : null), mapping => mapping with { MatchRequirement = MatchRequirement.Single, diff --git a/src/Npgsql/Internal/TypeInfoMapping.cs b/src/Npgsql/Internal/TypeInfoMapping.cs index c8439de6ac..64b14dff73 100644 --- a/src/Npgsql/Internal/TypeInfoMapping.cs +++ b/src/Npgsql/Internal/TypeInfoMapping.cs @@ -72,7 +72,7 @@ public readonly struct TypeInfoMapping(Type type, string dataTypeName, TypeInfoF public bool TypeEquals(Type type) => TypeMatchPredicate?.Invoke(type) ?? Type == type; - private bool DataTypeNameEqualsCore(string dataTypeName) + bool DataTypeNameEqualsCore(string dataTypeName) { var span = DataTypeName.AsSpan(); return Postgres.DataTypeName.IsFullyQualified(span) @@ -196,7 +196,7 @@ TypeInfoMapping GetMapping(Type type, string dataTypeName) => TryGetMapping(type, dataTypeName, out var info) ? info : throw new InvalidOperationException($"Could not find mapping for {type} <-> {dataTypeName}"); // Helper to eliminate generic display class duplication. - static TypeInfoFactory CreateComposedFactory(Type mappingType, TypeInfoMapping innerMapping, Func mapper, bool copyPreferredFormat = false, bool supportsWriting = true) + static TypeInfoFactory CreateComposedFactory(Type mappingType, TypeInfoMapping innerMapping, Func mapper, bool copyPreferredFormat = false, bool? supportsReading = null, bool? supportsWriting = null) => (options, mapping, requiresDataTypeName) => { var resolvedInnerMapping = innerMapping; @@ -206,18 +206,20 @@ static TypeInfoFactory CreateComposedFactory(Type mappingType, TypeInfoMapping i var innerInfo = innerMapping.Factory(options, resolvedInnerMapping, requiresDataTypeName); var converter = mapper(mapping, innerInfo); var preferredFormat = copyPreferredFormat ? innerInfo.PreferredFormat : null; - var writingSupported = supportsWriting && innerInfo.SupportsWriting; var unboxedType = ComputeUnboxedType(defaultType: mappingType, converter.TypeToConvert, mapping.Type); + var readingSupported = innerInfo.SupportsReading && (supportsReading ?? PgTypeInfo.GetDefaultSupportsReading(converter.TypeToConvert, unboxedType)); + var writingSupported = innerInfo.SupportsWriting && (supportsWriting ?? true); return new PgTypeInfo(options, converter, options.GetCanonicalTypeId(new DataTypeName(mapping.DataTypeName)), unboxedType) { PreferredFormat = preferredFormat, + SupportsReading = readingSupported, SupportsWriting = writingSupported }; }; // Helper to eliminate generic display class duplication. - static TypeInfoFactory CreateComposedFactory(Type mappingType, TypeInfoMapping innerMapping, Func mapper, bool copyPreferredFormat = false, bool supportsWriting = true) + static TypeInfoFactory CreateComposedFactory(Type mappingType, TypeInfoMapping innerMapping, Func mapper, bool copyPreferredFormat = false, bool? supportsReading = null, bool? supportsWriting = null) => (options, mapping, requiresDataTypeName) => { var resolvedInnerMapping = innerMapping; @@ -227,8 +229,9 @@ static TypeInfoFactory CreateComposedFactory(Type mappingType, TypeInfoMapping i var innerInfo = (PgResolverTypeInfo)innerMapping.Factory(options, resolvedInnerMapping, requiresDataTypeName); var resolver = mapper(mapping, innerInfo); var preferredFormat = copyPreferredFormat ? innerInfo.PreferredFormat : null; - var writingSupported = supportsWriting && innerInfo.SupportsWriting; var unboxedType = ComputeUnboxedType(defaultType: mappingType, resolver.TypeToConvert, mapping.Type); + var readingSupported = innerInfo.SupportsReading && (supportsReading ?? PgTypeInfo.GetDefaultSupportsReading(resolver.TypeToConvert, unboxedType)); + var writingSupported = innerInfo.SupportsWriting && (supportsWriting ?? true); // We include the data type name if the inner info did so as well. // This way we can rely on its logic around resolvedDataTypeName, including when it ignores that flag. PgTypeId? pgTypeId = innerInfo.PgTypeId is not null @@ -237,6 +240,7 @@ static TypeInfoFactory CreateComposedFactory(Type mappingType, TypeInfoMapping i return new PgResolverTypeInfo(options, resolver, pgTypeId, unboxedType) { PreferredFormat = preferredFormat, + SupportsReading = readingSupported, SupportsWriting = writingSupported }; }; @@ -351,7 +355,7 @@ public void AddArrayType(TypeInfoMapping elementMapping, bool suppress void AddArrayType(TypeInfoMapping elementMapping, Type type, Func converter, Func? typeMatchPredicate = null, bool suppressObjectMapping = false) { - var arrayMapping = new TypeInfoMapping(type, arrayDataTypeName, CreateComposedFactory(type, elementMapping, converter)) + var arrayMapping = new TypeInfoMapping(type, arrayDataTypeName, CreateComposedFactory(type, elementMapping, converter, supportsReading: true)) { MatchRequirement = elementMapping.MatchRequirement, TypeMatchPredicate = typeMatchPredicate @@ -391,7 +395,7 @@ public void AddResolverArrayType(TypeInfoMapping elementMapping, bool void AddResolverArrayType(TypeInfoMapping elementMapping, Type type, Func converter, Func? typeMatchPredicate = null, bool suppressObjectMapping = false) { - var arrayMapping = new TypeInfoMapping(type, arrayDataTypeName, CreateComposedFactory(type, elementMapping, converter)) + var arrayMapping = new TypeInfoMapping(type, arrayDataTypeName, CreateComposedFactory(type, elementMapping, converter, supportsReading: true)) { MatchRequirement = elementMapping.MatchRequirement, TypeMatchPredicate = typeMatchPredicate @@ -483,12 +487,12 @@ void AddStructArrayType(TypeInfoMapping elementMapping, TypeInfoMapping nullable Func? typeMatchPredicate, Func? nullableTypeMatchPredicate, bool suppressObjectMapping) { var arrayDataTypeName = GetArrayDataTypeName(elementMapping.DataTypeName); - var arrayMapping = new TypeInfoMapping(type, arrayDataTypeName, CreateComposedFactory(type, elementMapping, converter)) + var arrayMapping = new TypeInfoMapping(type, arrayDataTypeName, CreateComposedFactory(type, elementMapping, converter, supportsReading: true)) { MatchRequirement = elementMapping.MatchRequirement, TypeMatchPredicate = typeMatchPredicate }; - var nullableArrayMapping = new TypeInfoMapping(nullableType, arrayDataTypeName, CreateComposedFactory(nullableType, nullableElementMapping, nullableConverter)) + var nullableArrayMapping = new TypeInfoMapping(nullableType, arrayDataTypeName, CreateComposedFactory(nullableType, nullableElementMapping, nullableConverter, supportsReading: true)) { MatchRequirement = arrayMapping.MatchRequirement, TypeMatchPredicate = nullableTypeMatchPredicate @@ -601,12 +605,12 @@ void AddResolverStructArrayType(TypeInfoMapping elementMapping, TypeInfoMapping { var arrayDataTypeName = GetArrayDataTypeName(elementMapping.DataTypeName); - var arrayMapping = new TypeInfoMapping(type, arrayDataTypeName, CreateComposedFactory(type, elementMapping, converter)) + var arrayMapping = new TypeInfoMapping(type, arrayDataTypeName, CreateComposedFactory(type, elementMapping, converter, supportsReading: true)) { MatchRequirement = elementMapping.MatchRequirement, TypeMatchPredicate = typeMatchPredicate }; - var nullableArrayMapping = new TypeInfoMapping(nullableType, arrayDataTypeName, CreateComposedFactory(nullableType, nullableElementMapping, nullableConverter)) + var nullableArrayMapping = new TypeInfoMapping(nullableType, arrayDataTypeName, CreateComposedFactory(nullableType, nullableElementMapping, nullableConverter, supportsReading: true)) { MatchRequirement = elementMapping.MatchRequirement, TypeMatchPredicate = nullableTypeMatchPredicate @@ -654,7 +658,7 @@ void AddPolymorphicResolverArrayType(TypeInfoMapping elementMapping, Type type, { var arrayDataTypeName = GetArrayDataTypeName(elementMapping.DataTypeName); var mapping = new TypeInfoMapping(type, arrayDataTypeName, - CreateComposedFactory(typeof(Array), elementMapping, converter, supportsWriting: false)) + CreateComposedFactory(typeof(Array), elementMapping, converter, supportsReading: true, supportsWriting: false)) { MatchRequirement = elementMapping.MatchRequirement, TypeMatchPredicate = typeMatchPredicate @@ -811,8 +815,7 @@ public static PgTypeInfo CreateInfo(this TypeInfoMapping mapping, PgSerializerOp public static PgResolverTypeInfo CreateInfo(this TypeInfoMapping mapping, PgSerializerOptions options, PgConverterResolver resolver, bool includeDataTypeName) => new(options, resolver, includeDataTypeName ? new DataTypeName(mapping.DataTypeName) : null) { - PreferredFormat = null, - SupportsWriting = true + PreferredFormat = null }; /// diff --git a/src/Npgsql/NpgsqlParameter.cs b/src/Npgsql/NpgsqlParameter.cs index b1318d9b0a..ba2d840925 100644 --- a/src/Npgsql/NpgsqlParameter.cs +++ b/src/Npgsql/NpgsqlParameter.cs @@ -618,9 +618,6 @@ internal void Bind(out DataFormat format, out Size size, DataFormat? requiredFor if (TypeInfo is null) ThrowHelper.ThrowInvalidOperationException($"Missing type info, {nameof(ResolveTypeInfo)} needs to be called before {nameof(Bind)}."); - if (!TypeInfo.SupportsWriting) - ThrowHelper.ThrowNotSupportedException($"Cannot write values for parameters of type '{TypeInfo.Type}' and postgres type '{TypeInfo.Options.DatabaseInfo.GetDataTypeName(PgTypeId).DisplayName}'."); - // We might call this twice, once during validation and once during WriteBind, only compute things once. if (WriteSize is null) { diff --git a/test/Npgsql.Tests/Types/ByteaTests.cs b/test/Npgsql.Tests/Types/ByteaTests.cs index 9e8df154b0..216ae64cc4 100644 --- a/test/Npgsql.Tests/Types/ByteaTests.cs +++ b/test/Npgsql.Tests/Types/ByteaTests.cs @@ -2,6 +2,7 @@ using System.Collections.Generic; using System.Data; using System.IO; +using System.Net.Sockets; using System.Threading.Tasks; using NpgsqlTypes; using NUnit.Framework; @@ -284,6 +285,19 @@ public async Task Array_of_bytea() Assert.That(retVal[1], Is.EqualTo(inVal[1])); } + [Test] + public async Task InvalidCastException_unknown_stream_read() + { + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT :p1", conn); + cmd.Parameters.AddWithValue("p1", NpgsqlDbType.Bytea, new byte[] { 1 }); + await using var reader = await cmd.ExecuteReaderAsync(); + while (await reader.ReadAsync()) + { + Assert.Throws(() => reader.GetFieldValue(0)); + } + } + sealed class NonSeekableStream(byte[] data) : MemoryStream(data) { public override bool CanSeek => false; From e77dee01733aad5e891a29ebadcde780a21aea56 Mon Sep 17 00:00:00 2001 From: Bruno Hoffmeister Date: Mon, 3 Nov 2025 10:46:43 -0300 Subject: [PATCH 608/761] Populate CommandText when NpgsqlBatchCommand is created (#6234) --- src/Npgsql/SqlQueryParser.cs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Npgsql/SqlQueryParser.cs b/src/Npgsql/SqlQueryParser.cs index 88728a34f7..c037a51342 100644 --- a/src/Npgsql/SqlQueryParser.cs +++ b/src/Npgsql/SqlQueryParser.cs @@ -506,6 +506,7 @@ void MoveToNextBatchCommand() else { batchCommand = new NpgsqlBatchCommand { _parameters = parameters }; + batchCommand.CommandText = sql; batchCommands.Add(batchCommand); } } From 530f0fb31a26a610fd0fdadee019dc5e97de84eb Mon Sep 17 00:00:00 2001 From: Bruce Bowyer-Smyth Date: Tue, 4 Nov 2025 00:40:33 +1000 Subject: [PATCH 609/761] Remove unsafe from WriteStringChunked (#5988) --- .../Internal/Converters/ArrayConverter.cs | 2 +- src/Npgsql/Internal/NpgsqlWriteBuffer.cs | 106 ++++-------------- src/Npgsql/Internal/PgWriter.cs | 3 +- src/Npgsql/PregeneratedMessages.cs | 2 +- test/Npgsql.Tests/WriteBufferTests.cs | 64 +++-------- 5 files changed, 41 insertions(+), 136 deletions(-) diff --git a/src/Npgsql/Internal/Converters/ArrayConverter.cs b/src/Npgsql/Internal/Converters/ArrayConverter.cs index 262f748651..2d6d443329 100644 --- a/src/Npgsql/Internal/Converters/ArrayConverter.cs +++ b/src/Npgsql/Internal/Converters/ArrayConverter.cs @@ -161,7 +161,7 @@ sealed class WriteState : MultiWriteState public required int[]? Lengths { get; init; } } - unsafe object ReadDimsAndCreateCollection(PgReader reader, int dimensions, out int lastDimLength) + object ReadDimsAndCreateCollection(PgReader reader, int dimensions, out int lastDimLength) { Debug.Assert(!reader.ShouldBuffer((sizeof(int) + sizeof(int)) * dimensions)); diff --git a/src/Npgsql/Internal/NpgsqlWriteBuffer.cs b/src/Npgsql/Internal/NpgsqlWriteBuffer.cs index c768020718..0e176c2986 100644 --- a/src/Npgsql/Internal/NpgsqlWriteBuffer.cs +++ b/src/Npgsql/Internal/NpgsqlWriteBuffer.cs @@ -84,6 +84,8 @@ internal PgWriter GetWriter(NpgsqlDatabaseInfo typeCatalog, FlushMode flushMode bool _disposed; readonly PgWriter _pgWriter; + Span Span => Buffer.AsSpan(WritePosition, WriteSpaceLeft); + /// /// The minimum buffer size possible. /// @@ -329,46 +331,49 @@ static void ThrowNotSpaceLeft() => ThrowHelper.ThrowInvalidOperationException("There is not enough space left in the buffer."); public Task WriteString(string s, int byteLen, bool async, CancellationToken cancellationToken = default) - => WriteString(s, s.Length, byteLen, async, cancellationToken); - - public Task WriteString(string s, int charLen, int byteLen, bool async, CancellationToken cancellationToken = default) { if (byteLen <= WriteSpaceLeft) { - WriteString(s, charLen); + WriteString(s); return Task.CompletedTask; } - return WriteStringLong(this, async, s, charLen, byteLen, cancellationToken); + return WriteStringLong(this, async, s, byteLen, cancellationToken); - static async Task WriteStringLong(NpgsqlWriteBuffer buffer, bool async, string s, int charLen, int byteLen, CancellationToken cancellationToken) + static async Task WriteStringLong(NpgsqlWriteBuffer buffer, bool async, string s, int byteLen, CancellationToken cancellationToken) { Debug.Assert(byteLen > buffer.WriteSpaceLeft); if (byteLen <= buffer.Size) { // String can fit entirely in an empty buffer. Flush and retry rather than - // going into the partial writing flow below (which requires ToCharArray()) + // going into the partial writing flow below await buffer.Flush(async, cancellationToken).ConfigureAwait(false); - buffer.WriteString(s, charLen); + buffer.WriteString(s); } else { - var charPos = 0; - while (true) + var encoder = buffer._textEncoder; + encoder.Reset(); + var data = s.AsMemory(); + var minBufferSize = buffer.TextEncoding.GetMaxByteCount(1); + + bool completed; + do { - buffer.WriteStringChunked(s, charPos, charLen - charPos, true, out var charsUsed, out var completed); - if (completed) - break; - await buffer.Flush(async, cancellationToken).ConfigureAwait(false); - charPos += charsUsed; - } + if (buffer.WriteSpaceLeft < minBufferSize) + await buffer.Flush(async, cancellationToken).ConfigureAwait(false); + encoder.Convert(data.Span, buffer.Span, flush: data.Length * minBufferSize <= buffer.Span.Length, + out var charsUsed, out var bytesUsed, out completed); + data = data.Slice(charsUsed); + buffer.WritePosition += bytesUsed; + } while (!completed); } } } - public void WriteString(string s, int len = 0) + public void WriteString(string s) { Debug.Assert(TextEncoding.GetByteCount(s) <= WriteSpaceLeft); - WritePosition += TextEncoding.GetBytes(s, 0, len == 0 ? s.Length : len, Buffer, WritePosition); + WritePosition += TextEncoding.GetBytes(s, 0, s.Length, Buffer, WritePosition); } public void WriteBytes(ReadOnlySpan buf) @@ -421,30 +426,6 @@ static async Task WriteBytesLong(NpgsqlWriteBuffer buffer, bool async, ReadOnlyM } } - public async Task WriteStreamRaw(Stream stream, int count, bool async, CancellationToken cancellationToken = default) - { - while (count > 0) - { - if (WriteSpaceLeft == 0) - await Flush(async, cancellationToken).ConfigureAwait(false); - try - { - var read = async - ? await stream.ReadAsync(Buffer, WritePosition, Math.Min(WriteSpaceLeft, count), cancellationToken).ConfigureAwait(false) - : stream.Read(Buffer, WritePosition, Math.Min(WriteSpaceLeft, count)); - if (read == 0) - throw new EndOfStreamException(); - WritePosition += read; - count -= read; - } - catch (Exception e) - { - throw Connector.Break(new NpgsqlException("Exception while writing to stream", e)); - } - } - Debug.Assert(count == 0); - } - public void WriteNullTerminatedString(string s) { AssertASCIIOnly(s); @@ -463,47 +444,6 @@ public void WriteNullTerminatedString(byte[] s) #endregion - #region Write Complex - - internal void WriteStringChunked(char[] chars, int charIndex, int charCount, - bool flush, out int charsUsed, out bool completed) - { - if (WriteSpaceLeft < _textEncoder.GetByteCount(chars, charIndex, char.IsHighSurrogate(chars[charIndex]) ? 2 : 1, flush: false)) - { - charsUsed = 0; - completed = false; - return; - } - - _textEncoder.Convert(chars, charIndex, charCount, Buffer, WritePosition, WriteSpaceLeft, - flush, out charsUsed, out var bytesUsed, out completed); - WritePosition += bytesUsed; - } - - internal unsafe void WriteStringChunked(string s, int charIndex, int charCount, - bool flush, out int charsUsed, out bool completed) - { - int bytesUsed; - - fixed (char* sPtr = s) - fixed (byte* bufPtr = Buffer) - { - if (WriteSpaceLeft < _textEncoder.GetByteCount(sPtr + charIndex, char.IsHighSurrogate(*(sPtr + charIndex)) ? 2 : 1, flush: false)) - { - charsUsed = 0; - completed = false; - return; - } - - _textEncoder.Convert(sPtr + charIndex, charCount, bufPtr + WritePosition, WriteSpaceLeft, - flush, out charsUsed, out bytesUsed, out completed); - } - - WritePosition += bytesUsed; - } - - #endregion - #region Copy internal void StartCopyMode() diff --git a/src/Npgsql/Internal/PgWriter.cs b/src/Npgsql/Internal/PgWriter.cs index fecf4b7474..8e22a54d55 100644 --- a/src/Npgsql/Internal/PgWriter.cs +++ b/src/Npgsql/Internal/PgWriter.cs @@ -298,7 +298,8 @@ void Core(ReadOnlySpan data, Encoding encoding) if (ShouldFlush(minBufferSize)) Flush(); Ensure(minBufferSize); - encoder.Convert(data, Span, flush: data.Length <= Span.Length, out var charsUsed, out var bytesUsed, out completed); + encoder.Convert(data, Span, flush: data.Length * minBufferSize <= Span.Length, + out var charsUsed, out var bytesUsed, out completed); data = data.Slice(charsUsed); Advance(bytesUsed); } while (!completed); diff --git a/src/Npgsql/PregeneratedMessages.cs b/src/Npgsql/PregeneratedMessages.cs index 54c736b64c..4e6434e9c1 100644 --- a/src/Npgsql/PregeneratedMessages.cs +++ b/src/Npgsql/PregeneratedMessages.cs @@ -26,7 +26,7 @@ internal static byte[] Generate(NpgsqlWriteBuffer buf, string query) { NpgsqlWriteBuffer.AssertASCIIOnly(query); - var queryByteLen = Encoding.ASCII.GetByteCount(query); + var queryByteLen = buf.TextEncoding.GetByteCount(query); buf.WriteByte(FrontendMessageCode.Query); buf.WriteInt32(4 + // Message length (including self excluding code) diff --git a/test/Npgsql.Tests/WriteBufferTests.cs b/test/Npgsql.Tests/WriteBufferTests.cs index ff8d9413ce..53bf753dd6 100644 --- a/test/Npgsql.Tests/WriteBufferTests.cs +++ b/test/Npgsql.Tests/WriteBufferTests.cs @@ -33,20 +33,17 @@ public void GetWriter_Full_Buffer() } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/1275")] - public void Write_zero_characters() + public void Chunked_string_with_full_buffer() { // Fill up the buffer entirely WriteBuffer.WriteBytes(new byte[WriteBuffer.Size], 0, WriteBuffer.Size); Assert.That(WriteBuffer.WriteSpaceLeft, Is.Zero); - int charsUsed; - bool completed; - WriteBuffer.WriteStringChunked("hello", 0, 5, true, out charsUsed, out completed); - Assert.That(charsUsed, Is.Zero); - Assert.That(completed, Is.False); - WriteBuffer.WriteStringChunked("hello".ToCharArray(), 0, 5, true, out charsUsed, out completed); - Assert.That(charsUsed, Is.Zero); - Assert.That(completed, Is.False); + var data = new string('a', WriteBuffer.Size) + "hello"; + var byteLength = WriteBuffer.TextEncoding.GetByteCount(data); + WriteBuffer.WriteString(data, byteLength, false); + Assert.That(WriteBuffer.WritePosition, Is.EqualTo(5)); + Assert.That(WriteBuffer.Buffer.AsSpan(0, 5).ToArray(), Is.EqualTo(new byte[] { (byte)'h', (byte)'e', (byte)'l', (byte)'l', (byte)'o' })); } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/2849")] @@ -55,26 +52,11 @@ public void Chunked_string_encoding_fits() WriteBuffer.WriteBytes(new byte[WriteBuffer.Size - 1], 0, WriteBuffer.Size - 1); Assert.That(WriteBuffer.WriteSpaceLeft, Is.EqualTo(1)); - var charsUsed = 1; - var completed = true; // This unicode character is three bytes when encoded in UTF8 - Assert.That(() => WriteBuffer.WriteStringChunked("\uD55C", 0, 1, true, out charsUsed, out completed), Throws.Nothing); - Assert.That(charsUsed, Is.EqualTo(0)); - Assert.That(completed, Is.False); - } - - [Test, IssueLink("https://github.com/npgsql/npgsql/issues/2849")] - public void Chunked_byte_array_encoding_fits() - { - WriteBuffer.WriteBytes(new byte[WriteBuffer.Size - 1], 0, WriteBuffer.Size - 1); - Assert.That(WriteBuffer.WriteSpaceLeft, Is.EqualTo(1)); - - var charsUsed = 1; - var completed = true; - // This unicode character is three bytes when encoded in UTF8 - Assert.That(() => WriteBuffer.WriteStringChunked("\uD55C".ToCharArray(), 0, 1, true, out charsUsed, out completed), Throws.Nothing); - Assert.That(charsUsed, Is.EqualTo(0)); - Assert.That(completed, Is.False); + var data = "\uD55C" + new string('a', WriteBuffer.Size); + var byteLength = WriteBuffer.TextEncoding.GetByteCount(data); + WriteBuffer.WriteString(data, byteLength, false); + Assert.That(WriteBuffer.WritePosition, Is.EqualTo(3)); } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/3733")] @@ -83,28 +65,10 @@ public void Chunked_string_encoding_fits_with_surrogates() WriteBuffer.WriteBytes(new byte[WriteBuffer.Size - 1]); Assert.That(WriteBuffer.WriteSpaceLeft, Is.EqualTo(1)); - var charsUsed = 1; - var completed = true; - var cyclone = "🌀"; - - Assert.That(() => WriteBuffer.WriteStringChunked(cyclone, 0, cyclone.Length, true, out charsUsed, out completed), Throws.Nothing); - Assert.That(charsUsed, Is.EqualTo(0)); - Assert.That(completed, Is.False); - } - - [Test, IssueLink("https://github.com/npgsql/npgsql/issues/3733")] - public void Chunked_char_array_encoding_fits_with_surrogates() - { - WriteBuffer.WriteBytes(new byte[WriteBuffer.Size - 1]); - Assert.That(WriteBuffer.WriteSpaceLeft, Is.EqualTo(1)); - - var charsUsed = 1; - var completed = true; - var cyclone = "🌀"; - - Assert.That(() => WriteBuffer.WriteStringChunked(cyclone.ToCharArray(), 0, cyclone.Length, true, out charsUsed, out completed), Throws.Nothing); - Assert.That(charsUsed, Is.EqualTo(0)); - Assert.That(completed, Is.False); + var cyclone = "🌀" + new string('a', WriteBuffer.Size); + var byteLength = WriteBuffer.TextEncoding.GetByteCount(cyclone); + WriteBuffer.WriteString(cyclone, byteLength, false); + Assert.That(WriteBuffer.WritePosition, Is.EqualTo(4)); } [SetUp] From 0ec29b34afedfcbc07870ead61d1f71706c0d5e8 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Mon, 3 Nov 2025 15:50:08 +0100 Subject: [PATCH 610/761] As the inputs are all expected to be well-formed and complete flush can be true --- src/Npgsql/Internal/NpgsqlWriteBuffer.cs | 3 +-- src/Npgsql/Internal/PgWriter.cs | 5 ++--- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/src/Npgsql/Internal/NpgsqlWriteBuffer.cs b/src/Npgsql/Internal/NpgsqlWriteBuffer.cs index 0e176c2986..ea0b4b265a 100644 --- a/src/Npgsql/Internal/NpgsqlWriteBuffer.cs +++ b/src/Npgsql/Internal/NpgsqlWriteBuffer.cs @@ -361,8 +361,7 @@ static async Task WriteStringLong(NpgsqlWriteBuffer buffer, bool async, string s { if (buffer.WriteSpaceLeft < minBufferSize) await buffer.Flush(async, cancellationToken).ConfigureAwait(false); - encoder.Convert(data.Span, buffer.Span, flush: data.Length * minBufferSize <= buffer.Span.Length, - out var charsUsed, out var bytesUsed, out completed); + encoder.Convert(data.Span, buffer.Span, flush: true, out var charsUsed, out var bytesUsed, out completed); data = data.Slice(charsUsed); buffer.WritePosition += bytesUsed; } while (!completed); diff --git a/src/Npgsql/Internal/PgWriter.cs b/src/Npgsql/Internal/PgWriter.cs index 8e22a54d55..2d08a38e53 100644 --- a/src/Npgsql/Internal/PgWriter.cs +++ b/src/Npgsql/Internal/PgWriter.cs @@ -298,8 +298,7 @@ void Core(ReadOnlySpan data, Encoding encoding) if (ShouldFlush(minBufferSize)) Flush(); Ensure(minBufferSize); - encoder.Convert(data, Span, flush: data.Length * minBufferSize <= Span.Length, - out var charsUsed, out var bytesUsed, out completed); + encoder.Convert(data, Span, flush: true, out var charsUsed, out var bytesUsed, out completed); data = data.Slice(charsUsed); Advance(bytesUsed); } while (!completed); @@ -335,7 +334,7 @@ async ValueTask Core(ReadOnlyMemory data, Encoding encoding, CancellationT if (ShouldFlush(minBufferSize)) await FlushAsync(cancellationToken).ConfigureAwait(false); Ensure(minBufferSize); - encoder.Convert(data.Span, Span, flush: data.Length <= Span.Length, out var charsUsed, out var bytesUsed, out completed); + encoder.Convert(data.Span, Span, flush: true, out var charsUsed, out var bytesUsed, out completed); data = data.Slice(charsUsed); Advance(bytesUsed); } while (!completed); From 9163444a38b293e72f1ba591a6b6e4ab7ba07cc6 Mon Sep 17 00:00:00 2001 From: Kirk Brauer Date: Mon, 3 Nov 2025 10:03:31 -0500 Subject: [PATCH 611/761] Cube support (#3867) Closes #698 --- .../Converters/Geometric/CubeConverter.cs | 87 ++++++ .../CubeTypeInfoResolverFactory.cs | 56 ++++ .../UnsupportedTypeInfoResolver.cs | 1 + src/Npgsql/NpgsqlDataSourceBuilder.cs | 4 +- src/Npgsql/NpgsqlSlimDataSourceBuilder.cs | 10 + src/Npgsql/NpgsqlTypes/NpgsqlCube.cs | 251 ++++++++++++++++ src/Npgsql/NpgsqlTypes/NpgsqlDbType.cs | 8 + .../Properties/NpgsqlStrings.Designer.cs | 11 +- src/Npgsql/Properties/NpgsqlStrings.resx | 3 + src/Npgsql/PublicAPI.Unshipped.txt | 21 ++ test/Npgsql.Tests/Types/CubeTests.cs | 277 ++++++++++++++++++ 11 files changed, 727 insertions(+), 2 deletions(-) create mode 100644 src/Npgsql/Internal/Converters/Geometric/CubeConverter.cs create mode 100644 src/Npgsql/Internal/ResolverFactories/CubeTypeInfoResolverFactory.cs create mode 100644 src/Npgsql/NpgsqlTypes/NpgsqlCube.cs create mode 100644 test/Npgsql.Tests/Types/CubeTests.cs diff --git a/src/Npgsql/Internal/Converters/Geometric/CubeConverter.cs b/src/Npgsql/Internal/Converters/Geometric/CubeConverter.cs new file mode 100644 index 0000000000..05b539cf12 --- /dev/null +++ b/src/Npgsql/Internal/Converters/Geometric/CubeConverter.cs @@ -0,0 +1,87 @@ +using System.Threading; +using System.Threading.Tasks; +using NpgsqlTypes; + +// ReSharper disable once CheckNamespace +namespace Npgsql.Internal.Converters; + +sealed class CubeConverter : PgStreamingConverter +{ + const uint PointBit = 0x80000000; + const int DimMask = 0x7fffffff; + + public override NpgsqlCube Read(PgReader reader) + => Read(async: false, reader, CancellationToken.None).GetAwaiter().GetResult(); + + public override ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) + => Read(async: true, reader, cancellationToken); + + async ValueTask Read(bool async, PgReader reader, CancellationToken cancellationToken) + { + if (reader.ShouldBuffer(sizeof(int))) + await reader.Buffer(async, sizeof(int), cancellationToken).ConfigureAwait(false); + + var header = reader.ReadInt32(); + var dim = header & DimMask; + var point = (header & PointBit) != 0; + + var lowerLeft = new double[dim]; + for (var i = 0; i < dim; i++) + { + if (reader.ShouldBuffer(sizeof(double))) + await reader.Buffer(async, sizeof(double), cancellationToken).ConfigureAwait(false); + lowerLeft[i] = reader.ReadDouble(); + } + + if (point) + return new NpgsqlCube(lowerLeft); + + var upperRight = new double[dim]; + for (var i = 0; i < dim; i++) + { + if (reader.ShouldBuffer(sizeof(double))) + await reader.Buffer(async, sizeof(double), cancellationToken).ConfigureAwait(false); + upperRight[i] = reader.ReadDouble(); + } + + return new NpgsqlCube(lowerLeft, upperRight); + } + + public override Size GetSize(SizeContext context, NpgsqlCube value, ref object? writeState) + => sizeof(int) + sizeof(double) * (value.IsPoint ? value.Dimensions : value.Dimensions * 2); + + public override void Write(PgWriter writer, NpgsqlCube value) + => Write(async: false, writer, value, CancellationToken.None).GetAwaiter().GetResult(); + + public override ValueTask WriteAsync(PgWriter writer, NpgsqlCube value, CancellationToken cancellationToken = default) + => Write(async: true, writer, value, cancellationToken); + + async ValueTask Write(bool async, PgWriter writer, NpgsqlCube value, CancellationToken cancellationToken) + { + if (writer.ShouldFlush(sizeof(int))) + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + + var header = value.Dimensions; + if (value.IsPoint) + header |= 1 << 31; + + writer.WriteInt32(header); + + for (var i = 0; i < value.Dimensions; i++) + { + if (writer.ShouldFlush(sizeof(double))) + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + writer.WriteDouble(value.LowerLeft[i]); + } + + if (value.IsPoint) + return; + + for (var i = 0; i < value.Dimensions; i++) + { + if (writer.ShouldFlush(sizeof(double))) + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + writer.WriteDouble(value.UpperRight[i]); + } + } +} diff --git a/src/Npgsql/Internal/ResolverFactories/CubeTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/CubeTypeInfoResolverFactory.cs new file mode 100644 index 0000000000..90b872f458 --- /dev/null +++ b/src/Npgsql/Internal/ResolverFactories/CubeTypeInfoResolverFactory.cs @@ -0,0 +1,56 @@ +using System; +using Npgsql.Internal.Converters; +using Npgsql.Internal.Postgres; +using Npgsql.Properties; +using NpgsqlTypes; + +namespace Npgsql.Internal.ResolverFactories; + +sealed class CubeTypeInfoResolverFactory : PgTypeInfoResolverFactory +{ + const string CubeTypeName = "cube"; + + public override IPgTypeInfoResolver CreateResolver() => new Resolver(); + public override IPgTypeInfoResolver CreateArrayResolver() => new ArrayResolver(); + + public static void ThrowIfUnsupported(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + { + if (dataTypeName is { UnqualifiedNameSpan: "cube" or "_cube" } || type == typeof(NpgsqlCube)) + throw new NotSupportedException( + string.Format(NpgsqlStrings.CubeNotEnabled, nameof(NpgsqlSlimDataSourceBuilder.EnableCube), + typeof(TBuilder).Name)); + } + + class Resolver : IPgTypeInfoResolver + { + TypeInfoMappingCollection? _mappings; + protected TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new()); + + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); + + static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) + { + mappings.AddStructType(CubeTypeName, + static (options, mapping, _) => mapping.CreateInfo(options, new CubeConverter()), isDefault: true); + + return mappings; + } + } + + sealed class ArrayResolver : Resolver, IPgTypeInfoResolver + { + TypeInfoMappingCollection? _mappings; + new TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(base.Mappings)); + + public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + => Mappings.Find(type, dataTypeName, options); + + static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) + { + mappings.AddStructArrayType(CubeTypeName); + + return mappings; + } + } +} diff --git a/src/Npgsql/Internal/ResolverFactories/UnsupportedTypeInfoResolver.cs b/src/Npgsql/Internal/ResolverFactories/UnsupportedTypeInfoResolver.cs index 2d47f86807..efcc4633ba 100644 --- a/src/Npgsql/Internal/ResolverFactories/UnsupportedTypeInfoResolver.cs +++ b/src/Npgsql/Internal/ResolverFactories/UnsupportedTypeInfoResolver.cs @@ -16,6 +16,7 @@ sealed class UnsupportedTypeInfoResolver : IPgTypeInfoResolver RecordTypeInfoResolverFactory.ThrowIfUnsupported(type, dataTypeName, options); FullTextSearchTypeInfoResolverFactory.ThrowIfUnsupported(type, dataTypeName, options); LTreeTypeInfoResolverFactory.ThrowIfUnsupported(type, dataTypeName, options); + CubeTypeInfoResolverFactory.ThrowIfUnsupported(type, dataTypeName, options); JsonDynamicTypeInfoResolverFactory.Support.ThrowIfUnsupported(type, dataTypeName); diff --git a/src/Npgsql/NpgsqlDataSourceBuilder.cs b/src/Npgsql/NpgsqlDataSourceBuilder.cs index ce6926a544..68dd517ba0 100644 --- a/src/Npgsql/NpgsqlDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlDataSourceBuilder.cs @@ -59,7 +59,8 @@ internal static void ResetGlobalMappings(bool overwrite) new FullTextSearchTypeInfoResolverFactory(), new NetworkTypeInfoResolverFactory(), new GeometricTypeInfoResolverFactory(), - new LTreeTypeInfoResolverFactory() + new LTreeTypeInfoResolverFactory(), + new CubeTypeInfoResolverFactory() ], static () => { var builder = new PgTypeInfoResolverChainBuilder(); @@ -88,6 +89,7 @@ public NpgsqlDataSourceBuilder(string? connectionString = null) instance.AppendResolverFactory(new NetworkTypeInfoResolverFactory()); instance.AppendResolverFactory(new GeometricTypeInfoResolverFactory()); instance.AppendResolverFactory(new LTreeTypeInfoResolverFactory()); + instance.AppendResolverFactory(new CubeTypeInfoResolverFactory()); }; _internalBuilder.ConfigureResolverChain = static chain => chain.Add(UnsupportedTypeInfoResolver); _internalBuilder.EnableTransportSecurity(); diff --git a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs index 4a3d6fdad7..bc15fca563 100644 --- a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs @@ -588,6 +588,16 @@ public NpgsqlSlimDataSourceBuilder EnableLTree() return this; } + /// + /// Sets up mappings for the PostgreSQL cube extension type. + /// + /// The same builder instance so that multiple calls can be chained. + public NpgsqlSlimDataSourceBuilder EnableCube() + { + AddTypeInfoResolverFactory(new CubeTypeInfoResolverFactory()); + return this; + } + /// /// Sets up mappings for extra conversions from PostgreSQL to .NET types. /// diff --git a/src/Npgsql/NpgsqlTypes/NpgsqlCube.cs b/src/Npgsql/NpgsqlTypes/NpgsqlCube.cs new file mode 100644 index 0000000000..15a89f56ee --- /dev/null +++ b/src/Npgsql/NpgsqlTypes/NpgsqlCube.cs @@ -0,0 +1,251 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using System.Text; + +// ReSharper disable once CheckNamespace +namespace NpgsqlTypes +{ + /// + /// Represents a PostgreSQl cube data type. + /// + /// + /// See https://www.postgresql.org/docs/current/cube.html + /// + public readonly struct NpgsqlCube : IEquatable + { + // Store the coordinates as a value tuple array + readonly double[] _lowerLeft; + readonly double[] _upperRight; + + /// + /// The lower left coordinates of the cube. + /// + public IReadOnlyList LowerLeft => _lowerLeft; + + /// + /// The upper right coordinates of the cube. + /// + public IReadOnlyList UpperRight => _upperRight; + + /// + /// The number of dimensions of the cube. + /// + public int Dimensions => _lowerLeft.Length; + + /// + /// True if the cube is a point, that is, the two defining corners are the same. + /// + public bool IsPoint { get; } + + /// + /// Makes a cube with upper right and lower left coordinates as defined by the two arrays, which must be of the same length. + /// + /// This is an internal constructor to optimize the number of allocations. + /// The lower left values. + /// The upper right values. + /// + /// Thrown if the number of dimensions in the upper left and lower right values do not match. + /// + internal NpgsqlCube(double[] lowerLeft, double[] upperRight) + { + if (lowerLeft.Length != upperRight.Length) + throw new ArgumentException($"Not a valid cube: Different point dimensions in {lowerLeft} and {upperRight}."); + + IsPoint = lowerLeft.SequenceEqual(upperRight); + _lowerLeft = lowerLeft; + _upperRight = upperRight; + } + + /// + /// Makes a one dimensional cube with both coordinates the same. + /// + /// The point coordinate. + public NpgsqlCube(double coord) + { + IsPoint = true; + _lowerLeft = [coord]; + _upperRight = _lowerLeft; + } + + /// + /// Makes a one dimensional cube. + /// + /// The lower left value. + /// The upper right value. + public NpgsqlCube(double lowerLeft, double upperRight) + { + IsPoint = lowerLeft.CompareTo(upperRight) == 0; + _lowerLeft = [lowerLeft]; + _upperRight = IsPoint ? _lowerLeft : [upperRight]; + } + + /// + /// Makes a zero-volume cube using the coordinates defined by the array. + /// + /// The coordinates. + public NpgsqlCube(IEnumerable coords) + { + // Always create a defensive copy to prevent external mutation + _lowerLeft = coords.ToArray(); + IsPoint = true; + _upperRight = _lowerLeft; + } + + /// + /// Makes a cube with upper right and lower left coordinates as defined by the two arrays, which must be of the same length. + /// + /// The lower left values. + /// The upper right values. + /// + /// Thrown if the number of dimensions in the upper left and lower right values do not match + /// or if the cube exceeds the maximum dimensions (100). + /// + public NpgsqlCube(IEnumerable lowerLeft, IEnumerable upperRight) : + this(lowerLeft.ToArray(), upperRight.ToArray()) + { } + + /// + /// Makes a new cube by adding a dimension on to an existing cube, with the same values for both endpoints of the new coordinate. + /// This is useful for building cubes piece by piece from calculated values. + /// + /// The existing cube. + /// The coordinate to add. + public NpgsqlCube(NpgsqlCube cube, double coord) + { + IsPoint = cube.IsPoint; + if (IsPoint) + { + _lowerLeft = cube._lowerLeft.Append(coord).ToArray(); + _upperRight = _lowerLeft; + } + else + { + _lowerLeft = cube._lowerLeft.Append(coord).ToArray(); + _upperRight = cube._upperRight.Append(coord).ToArray(); + } + } + + /// + /// Makes a new cube by adding a dimension on to an existing cube. + /// This is useful for building cubes piece by piece from calculated values. + /// + /// The existing cube. + /// The lower left value. + /// The upper right value. + public NpgsqlCube(NpgsqlCube cube, double lowerLeft, double upperRight) + { + IsPoint = cube.IsPoint && lowerLeft.CompareTo(upperRight) == 0; + if (IsPoint) + { + _lowerLeft = cube._lowerLeft.Append(lowerLeft).ToArray(); + _upperRight = _lowerLeft; + } + else + { + _lowerLeft = cube._lowerLeft.Append(lowerLeft).ToArray(); + _upperRight = cube._upperRight.Append(upperRight).ToArray(); + } + } + + /// + /// Makes a new cube from an existing cube, using a list of dimension indexes from an array. + /// Can be used to extract the endpoints of a single dimension, or to drop dimensions, or to reorder them as desired. + /// + /// The list of dimension indexes. + /// A new cube. + /// + /// + /// var cube = new NpgsqlCube(new[] { 1, 3, 5 }, new[] { 6, 7, 8 }); // '(1,3,5),(6,7,8)' + /// cube.ToSubset(1); // '(3),(7)' + /// cube.ToSubset(2, 1, 0, 0); // '(5,3,1,1),(8,7,6,6)' + /// + /// + public NpgsqlCube ToSubset(params int[] indexes) + { + var lowerLeft = new double[indexes.Length]; + var upperRight = new double[indexes.Length]; + + for (var i = 0; i < indexes.Length; i++) + { + lowerLeft[i] = _lowerLeft[indexes[i]]; + upperRight[i] = _upperRight[indexes[i]]; + } + + return new NpgsqlCube(lowerLeft, upperRight); + } + + /// + public bool Equals(NpgsqlCube other) => Dimensions == other.Dimensions + && _lowerLeft.SequenceEqual(other._lowerLeft) + && _upperRight.SequenceEqual(other._upperRight); + + /// + public override bool Equals(object? obj) => obj is NpgsqlCube other && Equals(other); + + /// + public static bool operator ==(NpgsqlCube x, NpgsqlCube y) => x.Equals(y); + + /// + public static bool operator !=(NpgsqlCube x, NpgsqlCube y) => !(x == y); + + /// + public override int GetHashCode() + { + var hashCode = new HashCode(); + for (var i = 0; i < Dimensions; i++) + { + hashCode.Add(_lowerLeft[i]); + hashCode.Add(_upperRight[i]); + } + return hashCode.ToHashCode(); + } + + /// + /// Writes the cube in PostgreSQL's text format. + /// + void Write(StringBuilder stringBuilder) + { + var leftBuilder = new StringBuilder(); + var rightBuilder = new StringBuilder(); + + leftBuilder.Append('('); + rightBuilder.Append('('); + + for (var i = 0; i < Dimensions; i++) + { + leftBuilder.Append(_lowerLeft[i]); + rightBuilder.Append(_upperRight[i]); + + if (i >= Dimensions - 1) continue; + + leftBuilder.Append(", "); + rightBuilder.Append(", "); + } + + leftBuilder.Append(')'); + rightBuilder.Append(')'); + + if (IsPoint) + { + stringBuilder.Append(leftBuilder); + } + else + { + stringBuilder.Append(leftBuilder); + stringBuilder.Append(','); + stringBuilder.Append(rightBuilder); + } + } + + /// + /// Writes the cube in PostgreSQL's text format. + /// + public override string ToString() + { + var sb = new StringBuilder(); + Write(sb); + return sb.ToString(); + } + } +} diff --git a/src/Npgsql/NpgsqlTypes/NpgsqlDbType.cs b/src/Npgsql/NpgsqlTypes/NpgsqlDbType.cs index 687ebf16b7..abb24c74d0 100644 --- a/src/Npgsql/NpgsqlTypes/NpgsqlDbType.cs +++ b/src/Npgsql/NpgsqlTypes/NpgsqlDbType.cs @@ -123,6 +123,12 @@ public enum NpgsqlDbType /// See https://www.postgresql.org/docs/current/static/datatype-geometric.html Polygon = 16, + /// + /// Corresponds to the PostgreSQL "cube" type, a geometric type representing multi-dimensional cubes. + /// + /// See https://www.postgresql.org/docs/current/cube.html + Cube = 63, // Extension type + #endregion #region Character Types @@ -740,6 +746,7 @@ public static DbType ToDbType(this NpgsqlDbType npgsqlDbType) // Plugin types NpgsqlDbType.Citext => "citext", + NpgsqlDbType.Cube => "cube", NpgsqlDbType.LQuery => "lquery", NpgsqlDbType.LTree => "ltree", NpgsqlDbType.LTxtQuery => "ltxtquery", @@ -964,6 +971,7 @@ _ when npgsqlDbType.HasFlag(NpgsqlDbType.Multirange) // Plugin types "citext" => NpgsqlDbType.Citext, + "cube" => NpgsqlDbType.Cube, "lquery" => NpgsqlDbType.LQuery, "ltree" => NpgsqlDbType.LTree, "ltxtquery" => NpgsqlDbType.LTxtQuery, diff --git a/src/Npgsql/Properties/NpgsqlStrings.Designer.cs b/src/Npgsql/Properties/NpgsqlStrings.Designer.cs index 7f71914ca2..d0b7839d6c 100644 --- a/src/Npgsql/Properties/NpgsqlStrings.Designer.cs +++ b/src/Npgsql/Properties/NpgsqlStrings.Designer.cs @@ -139,7 +139,16 @@ internal static string CannotUseValidationRootCertificateCallbackWithCustomValid return ResourceManager.GetString("CannotUseValidationRootCertificateCallbackWithCustomValidationCallback", resourceCulture); } } - + + /// + /// Looks up a localized string similar to Cube isn't enabled; please call {0} on {1} to enable Cube.. + /// + internal static string CubeNotEnabled { + get { + return ResourceManager.GetString("CubeNotEnabled", resourceCulture); + } + } + /// /// Looks up a localized string similar to Type '{0}' required dynamic JSON serialization, which requires an explicit opt-in; call '{1}' on '{2}' or NpgsqlConnection.GlobalTypeMapper (see https://www.npgsql.org/doc/types/json.html and the 8.0 release notes for more details). Alternatively, if you meant to use Newtonsoft JSON.NET instead of System.Text.Json, call UseJsonNet() instead. ///. diff --git a/src/Npgsql/Properties/NpgsqlStrings.resx b/src/Npgsql/Properties/NpgsqlStrings.resx index af951d1a07..c39af4abc4 100644 --- a/src/Npgsql/Properties/NpgsqlStrings.resx +++ b/src/Npgsql/Properties/NpgsqlStrings.resx @@ -79,6 +79,9 @@ Ltree isn't enabled; please call {0} on {1} to enable LTree. + + Cube isn't enabled; please call {0} on {1} to enable Cube. + Ranges aren't enabled; please call {0} on {1} to enable ranges. diff --git a/src/Npgsql/PublicAPI.Unshipped.txt b/src/Npgsql/PublicAPI.Unshipped.txt index e7c8061376..7f28aa9e2e 100644 --- a/src/Npgsql/PublicAPI.Unshipped.txt +++ b/src/Npgsql/PublicAPI.Unshipped.txt @@ -27,6 +27,7 @@ Npgsql.NpgsqlMetricsOptions Npgsql.NpgsqlMetricsOptions.NpgsqlMetricsOptions() -> void Npgsql.NpgsqlSlimDataSourceBuilder.ConfigureTracing(System.Action! configureAction) -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.ConfigureTypeLoading(System.Action! configureAction) -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.EnableCube() -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.EnableGeometricTypes() -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.EnableJsonTypes() -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.EnableNetworkTypes() -> Npgsql.NpgsqlSlimDataSourceBuilder! @@ -93,6 +94,26 @@ NpgsqlTypes.NpgsqlBox.Deconstruct(out double left, out double right, out double NpgsqlTypes.NpgsqlBox.Deconstruct(out NpgsqlTypes.NpgsqlPoint lowerLeft, out NpgsqlTypes.NpgsqlPoint upperRight) -> void NpgsqlTypes.NpgsqlCircle.Deconstruct(out double x, out double y, out double radius) -> void NpgsqlTypes.NpgsqlCircle.Deconstruct(out NpgsqlTypes.NpgsqlPoint center, out double radius) -> void +NpgsqlTypes.NpgsqlCube +NpgsqlTypes.NpgsqlCube.NpgsqlCube() -> void +NpgsqlTypes.NpgsqlCube.Dimensions.get -> int +NpgsqlTypes.NpgsqlCube.Equals(NpgsqlTypes.NpgsqlCube other) -> bool +NpgsqlTypes.NpgsqlCube.LowerLeft.get -> System.Collections.Generic.IReadOnlyList! +NpgsqlTypes.NpgsqlCube.NpgsqlCube(double coord) -> void +NpgsqlTypes.NpgsqlCube.NpgsqlCube(double lowerLeft, double upperRight) -> void +NpgsqlTypes.NpgsqlCube.NpgsqlCube(NpgsqlTypes.NpgsqlCube cube, double coord) -> void +NpgsqlTypes.NpgsqlCube.NpgsqlCube(NpgsqlTypes.NpgsqlCube cube, double lowerLeft, double upperRight) -> void +NpgsqlTypes.NpgsqlCube.NpgsqlCube(System.Collections.Generic.IEnumerable! coords) -> void +NpgsqlTypes.NpgsqlCube.NpgsqlCube(System.Collections.Generic.IEnumerable! lowerLeft, System.Collections.Generic.IEnumerable! upperRight) -> void +NpgsqlTypes.NpgsqlCube.IsPoint.get -> bool +NpgsqlTypes.NpgsqlCube.ToSubset(params int[]! indexes) -> NpgsqlTypes.NpgsqlCube +NpgsqlTypes.NpgsqlCube.UpperRight.get -> System.Collections.Generic.IReadOnlyList! +NpgsqlTypes.NpgsqlDbType.Cube = 63 -> NpgsqlTypes.NpgsqlDbType +override NpgsqlTypes.NpgsqlCube.Equals(object? obj) -> bool +override NpgsqlTypes.NpgsqlCube.GetHashCode() -> int +override NpgsqlTypes.NpgsqlCube.ToString() -> string! +static NpgsqlTypes.NpgsqlCube.operator !=(NpgsqlTypes.NpgsqlCube x, NpgsqlTypes.NpgsqlCube y) -> bool +static NpgsqlTypes.NpgsqlCube.operator ==(NpgsqlTypes.NpgsqlCube x, NpgsqlTypes.NpgsqlCube y) -> bool NpgsqlTypes.NpgsqlLine.Deconstruct(out double a, out double b, out double c) -> void NpgsqlTypes.NpgsqlLSeg.Deconstruct(out NpgsqlTypes.NpgsqlPoint start, out NpgsqlTypes.NpgsqlPoint end) -> void NpgsqlTypes.NpgsqlPoint.Deconstruct(out double x, out double y) -> void diff --git a/test/Npgsql.Tests/Types/CubeTests.cs b/test/Npgsql.Tests/Types/CubeTests.cs new file mode 100644 index 0000000000..8b766c0366 --- /dev/null +++ b/test/Npgsql.Tests/Types/CubeTests.cs @@ -0,0 +1,277 @@ +using System; +using System.Threading.Tasks; +using Npgsql.Properties; +using NpgsqlTypes; +using NUnit.Framework; + +namespace Npgsql.Tests.Types; + +public class CubeTests : MultiplexingTestBase +{ + static readonly TestCaseData[] CubeValues = + { + new TestCaseData(new NpgsqlCube(new[] { 1.0, 2.0, 3.0 }, new[] { 4.0, 5.0, 6.0 }), "(1, 2, 3),(4, 5, 6)") + .SetName("Cube_MultiDimensional"), + new TestCaseData(new NpgsqlCube(new[] { 1.0, 2.0, 3.0 }), "(1, 2, 3)") + .SetName("Cube_MultiDimensionalPoint"), + new TestCaseData(new NpgsqlCube(1.0), "(1)") + .SetName("Cube_SingleDimensionalPoint"), + new TestCaseData(new NpgsqlCube(1.0, 2.0), "(1),(2)") + .SetName("Cube_SingleDimensional") + }; + + [Test, TestCaseSource(nameof(CubeValues))] + public Task Cube(NpgsqlCube cube, string sqlLiteral) + => AssertType(cube, sqlLiteral, "cube", NpgsqlDbType.Cube, isDefault: true, isNpgsqlDbTypeInferredFromClrType: false); + + [Test] + public void Cube_Constructor_SingleValue() + { + var cube = new NpgsqlCube(1.0); + Assert.That(cube.IsPoint, Is.True); + Assert.That(cube.Dimensions, Is.EqualTo(1)); + Assert.That(cube.LowerLeft, Is.EquivalentTo(new [] { 1.0 })); + Assert.That(cube.UpperRight, Is.EquivalentTo(new [] { 1.0 })); + } + + [Test] + public void Cube_Constructor_SingleCoord_Point() + { + var cube = new NpgsqlCube(1.0, 1.0); + Assert.That(cube.IsPoint, Is.True); + Assert.That(cube.Dimensions, Is.EqualTo(1)); + Assert.That(cube.LowerLeft, Is.EquivalentTo(new [] { 1.0 })); + Assert.That(cube.UpperRight, Is.EquivalentTo(new [] { 1.0 })); + } + + [Test] + public void Cube_Constructor_SingleCoord_NotPoint() + { + var cube = new NpgsqlCube(1.0, 2.0); + Assert.That(cube.IsPoint, Is.False); + Assert.That(cube.Dimensions, Is.EqualTo(1)); + Assert.That(cube.LowerLeft, Is.EquivalentTo(new [] { 1.0 })); + Assert.That(cube.UpperRight, Is.EquivalentTo(new [] { 2.0 })); + } + + [Test] + public void Cube_Constructor_LowerLeft_UpperRight_NotPoint() + { + var cube = new NpgsqlCube(new[] { 1.0, 2.0 }, new[] { 3.0, 4.0 }); + Assert.That(cube.IsPoint, Is.False); + Assert.That(cube.Dimensions, Is.EqualTo(2)); + Assert.That(cube.LowerLeft, Is.EquivalentTo(new [] { 1.0, 2.0 })); + Assert.That(cube.UpperRight, Is.EquivalentTo(new [] { 3.0, 4.0 })); + } + + [Test] + public void Cube_Constructor_LowerLeft_UpperRight_Point() + { + var cube = new NpgsqlCube(new[] { 1.0, 2.0 }, new[] { 1.0, 2.0 }); + Assert.That(cube.IsPoint, Is.True); + Assert.That(cube.Dimensions, Is.EqualTo(2)); + Assert.That(cube.LowerLeft, Is.EquivalentTo(new [] { 1.0, 2.0 })); + Assert.That(cube.UpperRight, Is.EquivalentTo(new [] { 1.0, 2.0 })); + } + + [Test] + public void Cube_Constructor_AddDimension_Single_Point() + { + var existingCube = new NpgsqlCube(new[] { 1.0, 2.0, 3.0 }); + var cube = new NpgsqlCube(existingCube, 4.0); + Assert.That(cube.IsPoint, Is.True); + Assert.That(cube.Dimensions, Is.EqualTo(4)); + Assert.That(cube.LowerLeft, Is.EquivalentTo(new [] { 1.0, 2.0, 3.0, 4.0 })); + Assert.That(cube.UpperRight, Is.EquivalentTo(new [] { 1.0, 2.0, 3.0, 4.0 })); + } + + [Test] + public void Cube_Constructor_AddDimension_Single_NotPoint() + { + var existingCube = new NpgsqlCube(new [] { 1.0, 2.0 }, new [] { 3.0, 4.0 }); + var cube = new NpgsqlCube(existingCube, 3.0); + Assert.That(cube.IsPoint, Is.False); + Assert.That(cube.Dimensions, Is.EqualTo(3)); + Assert.That(cube.LowerLeft, Is.EquivalentTo(new [] { 1.0, 2.0, 3.0 })); + Assert.That(cube.UpperRight, Is.EquivalentTo(new [] { 3.0, 4.0, 3.0 })); + } + + [Test] + public void Cube_Constructor_AddDimension_LowerLeft_UpperRight_Point() + { + var existingCube = new NpgsqlCube(new[] { 1.0, 2.0, 3.0 }); + var cube = new NpgsqlCube(existingCube, 4.0, 4.0); + Assert.That(cube.IsPoint, Is.True); + Assert.That(cube.Dimensions, Is.EqualTo(4)); + Assert.That(cube.LowerLeft, Is.EquivalentTo(new [] { 1.0, 2.0, 3.0, 4.0 })); + Assert.That(cube.UpperRight, Is.EquivalentTo(new [] { 1.0, 2.0, 3.0, 4.0 })); + } + + [Test] + public void Cube_Constructor_AddDimension_LowerLeft_UpperRight_NotPoint() + { + var existingCube = new NpgsqlCube(new [] { 1.0, 2.0 }, new [] { 3.0, 4.0 }); + var cube = new NpgsqlCube(existingCube, 4.0, 5.0); + Assert.That(cube.IsPoint, Is.False); + Assert.That(cube.Dimensions, Is.EqualTo(3)); + Assert.That(cube.LowerLeft, Is.EquivalentTo(new [] { 1.0, 2.0, 4.0 })); + Assert.That(cube.UpperRight, Is.EquivalentTo(new [] { 3.0, 4.0, 5.0 })); + } + + [Test] + public void Cube_Subset() + { + var cube = new NpgsqlCube(new [] { 1.0, 2.0, 3.0 }, new [] { 4.0, 5.0, 6.0 }); + Assert.That(cube.ToSubset(0, 2, 1, 1), Is.EqualTo(new NpgsqlCube(new [] { 1.0, 3.0, 2.0, 2.0 }, new [] { 4.0, 6.0, 5.0, 5.0 }))); + } + + [Test] + public void Cube_ToString_NotPoint() + { + var cube = new NpgsqlCube(new[] { 1.0, 2.0, 3.0 }, new[] { 4.0, 5.0, 6.0 }); + Assert.That(cube.ToString(), Is.EqualTo("(1, 2, 3),(4, 5, 6)")); + } + + [Test] + public void Cube_ToString_Point() + { + var cube = new NpgsqlCube(new[] { 1.0, 2.0, 3.0 }); + Assert.That(cube.ToString(), Is.EqualTo("(1, 2, 3)")); + } + + [Test] + public async Task Cube_Array() + { + var data = new[] + { + new NpgsqlCube(new[] { 1.0, 2.0 }, new[] { 3.0, 4.0 }), + new NpgsqlCube(new[] { 5.0, 6.0 }), + new NpgsqlCube(1.0, 2.0) + }; + + await AssertType( + data, + @"{""(1, 2),(3, 4)"",""(5, 6)"",""(1),(2)""}", + "cube[]", + NpgsqlDbType.Cube | NpgsqlDbType.Array, + isDefault: true, + isNpgsqlDbTypeInferredFromClrType: false); + } + + [Test] + public void Cube_DimensionMismatch_ThrowsArgumentException() + { + var ex = Assert.Throws(() => new NpgsqlCube(new[] { 1.0, 2.0 }, new[] { 3.0 })); + Assert.That(ex!.Message, Does.Contain("Different point dimensions")); + } + + [Test] + public Task Cube_NegativeValues() + => AssertType( + new NpgsqlCube(new[] { -1.0, -2.0, -3.0 }, new[] { -4.0, -5.0, -6.0 }), + "(-1, -2, -3),(-4, -5, -6)", + "cube", + NpgsqlDbType.Cube, + isDefault: true, + isNpgsqlDbTypeInferredFromClrType: false); + + [Test] + public void Cube_Equality_HashCode() + { + var cube1 = new NpgsqlCube(new[] { 1.0, 2.0 }, new[] { 3.0, 4.0 }); + var cube2 = new NpgsqlCube(new[] { 1.0, 2.0 }, new[] { 3.0, 4.0 }); + var cube3 = new NpgsqlCube(new[] { 1.0, 2.0 }, new[] { 3.0, 5.0 }); + + // Test equality + Assert.That(cube1, Is.EqualTo(cube2)); + Assert.That(cube1 == cube2, Is.True); + Assert.That(cube1 != cube3, Is.True); + Assert.That(cube1.Equals(cube2), Is.True); + Assert.That(cube1.Equals(cube3), Is.False); + + // Test hash code consistency + Assert.That(cube1.GetHashCode(), Is.EqualTo(cube2.GetHashCode())); + Assert.That(cube1.GetHashCode(), Is.Not.EqualTo(cube3.GetHashCode())); + } + + [Test] + public Task Cube_ZeroValues() + => AssertType( + new NpgsqlCube(0.0, 0.0), + "(0)", + "cube", + NpgsqlDbType.Cube, + isDefault: true, + isNpgsqlDbTypeInferredFromClrType: false); + + [Test] + public Task Cube_MaxDimensions() + { + var lowerLeft = new double[100]; + var upperRight = new double[100]; + for (var i = 0; i < 100; i++) + { + lowerLeft[i] = i; + upperRight[i] = i + 100; + } + + var expectedLower = string.Join(", ", lowerLeft); + var expectedUpper = string.Join(", ", upperRight); + var expected = $"({expectedLower}),({expectedUpper})"; + + return AssertType( + new NpgsqlCube(lowerLeft, upperRight), + expected, + "cube", + NpgsqlDbType.Cube, + isDefault: true, + isNpgsqlDbTypeInferredFromClrType: false); + } + + [Test] + public async Task Cube_not_supported_by_default_on_NpgsqlSlimSourceBuilder() + { + var errorMessage = string.Format( + NpgsqlStrings.CubeNotEnabled, nameof(NpgsqlSlimDataSourceBuilder.EnableCube), nameof(NpgsqlSlimDataSourceBuilder)); + + var dataSourceBuilder = new NpgsqlSlimDataSourceBuilder(ConnectionString); + await using var dataSource = dataSourceBuilder.Build(); + + var exception = + await AssertTypeUnsupportedRead("(1),(2)", "cube", dataSource); + Assert.That(exception.InnerException!.Message, Is.EqualTo(errorMessage)); + exception = await AssertTypeUnsupportedWrite(new NpgsqlCube(1.0, 2.0), "cube", dataSource); + Assert.That(exception.InnerException!.Message, Is.EqualTo(errorMessage)); + } + + [Test] + public async Task NpgsqlSlimSourceBuilder_EnableCube() + { + var dataSourceBuilder = new NpgsqlSlimDataSourceBuilder(ConnectionString); + dataSourceBuilder.EnableCube(); + await using var dataSource = dataSourceBuilder.Build(); + + await AssertType(dataSource, new NpgsqlCube(1.0, 2.0), "(1),(2)", "cube", NpgsqlDbType.Cube, isDefaultForWriting: false, skipArrayCheck: true); + } + + [Test] + public async Task NpgsqlSlimSourceBuilder_EnableArrays() + { + var dataSourceBuilder = new NpgsqlSlimDataSourceBuilder(ConnectionString); + dataSourceBuilder.EnableCube(); + dataSourceBuilder.EnableArrays(); + await using var dataSource = dataSourceBuilder.Build(); + + await AssertType(dataSource, new NpgsqlCube(1.0, 2.0), "(1),(2)", "cube", NpgsqlDbType.Cube, isDefaultForWriting: false); + } + + [OneTimeSetUp] + public async Task SetUp() + { + await using var conn = await OpenConnectionAsync(); + TestUtil.MinimumPgVersion(conn, "13.0"); + await TestUtil.EnsureExtensionAsync(conn, "cube"); + } + + public CubeTests(MultiplexingMode multiplexingMode) : base(multiplexingMode) { } +} From fcf2d7fe6370243759637cdcd14e241b1c2e5ad7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 3 Nov 2025 22:17:26 +0100 Subject: [PATCH 612/761] Bump NUnit.Analyzers from 4.11.1 to 4.11.2 (#6288) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 6a99e09cfc..e70fd53bb5 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -23,7 +23,7 @@ - + From ed512e5297894f9cd9751b540ac6de9f1f493388 Mon Sep 17 00:00:00 2001 From: Trivalik <3148279+trivalik@users.noreply.github.com> Date: Wed, 5 Nov 2025 15:08:54 +0100 Subject: [PATCH 613/761] handles SSL ConnectionReset on Windows; fixes #6274 (#6287) Fixes #6274 --- src/Npgsql/Internal/NpgsqlConnector.cs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index 2ceed5cfb9..6749773781 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -609,8 +609,7 @@ static async Task OpenCore( // Any error after trying with GSS encryption (gssEncMode == GssEncryptionMode.Prefer || // Auth error with/without SSL - (e is PostgresException { SqlState: PostgresErrorCodes.InvalidAuthorizationSpecification } && - (sslMode == SslMode.Prefer && conn.IsSslEncrypted || sslMode == SslMode.Allow && !conn.IsSslEncrypted))) + (sslMode == SslMode.Prefer && conn.IsSslEncrypted || sslMode == SslMode.Allow && !conn.IsSslEncrypted)) { if (gssEncMode == GssEncryptionMode.Prefer) { From 8fd4968565b8f3c0bdce914e0cd6ca81ab938090 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Wed, 5 Nov 2025 17:26:57 +0100 Subject: [PATCH 614/761] Remove task cancellation helpers (#6291) --- src/Npgsql/Internal/NpgsqlConnector.cs | 40 +++-- src/Npgsql/TaskTimeoutAndCancellation.cs | 66 ------- .../TaskTimeoutAndCancellationTest.cs | 162 ------------------ 3 files changed, 24 insertions(+), 244 deletions(-) delete mode 100644 src/Npgsql/TaskTimeoutAndCancellation.cs delete mode 100644 test/Npgsql.Tests/TaskTimeoutAndCancellationTest.cs diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index 6749773781..098277dd47 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -1315,9 +1315,6 @@ void Connect(NpgsqlTimeout timeout) async Task ConnectAsync(NpgsqlTimeout timeout, CancellationToken cancellationToken) { - // Whether the framework and/or the OS platform support Dns.GetHostAddressesAsync cancellation API or they do not, - // we always fake-cancel the operation with the help of TaskTimeoutAndCancellation.ExecuteAsync. It stops waiting - // and raises the exception, while the actual task may be left running. EndPoint[] endpoints; if (NpgsqlConnectionStringBuilder.IsUnixSocket(Host, Port, out var socketPath)) { @@ -1328,8 +1325,18 @@ async Task ConnectAsync(NpgsqlTimeout timeout, CancellationToken cancellationTok IPAddress[] ipAddresses; try { - ipAddresses = await Dns.GetHostAddressesAsync(Host, cancellationToken) - .WaitAsync(timeout.CheckAndGetTimeLeft(), cancellationToken).ConfigureAwait(false); + using var combinedCts = timeout.IsSet ? CancellationTokenSource.CreateLinkedTokenSource(cancellationToken) : null; + combinedCts?.CancelAfter(timeout.CheckAndGetTimeLeft()); + var combinedToken = combinedCts?.Token ?? cancellationToken; + try + { + ipAddresses = await Dns.GetHostAddressesAsync(Host, combinedToken).ConfigureAwait(false); + } + catch (OperationCanceledException oce) when ( + oce.CancellationToken == combinedToken && !cancellationToken.IsCancellationRequested) + { + throw new TimeoutException(); + } } catch (SocketException ex) { @@ -1361,7 +1368,18 @@ async Task ConnectAsync(NpgsqlTimeout timeout, CancellationToken cancellationTok // Some options are not applied after the socket is open, see #6013 SetSocketOptions(socket); - await OpenSocketConnectionAsync(socket, endpoint, endpointTimeout, cancellationToken).ConfigureAwait(false); + using var combinedCts = endpointTimeout.IsSet ? CancellationTokenSource.CreateLinkedTokenSource(cancellationToken) : null; + combinedCts?.CancelAfter(endpointTimeout.CheckAndGetTimeLeft()); + var combinedToken = combinedCts?.Token ?? cancellationToken; + try + { + await socket.ConnectAsync(endpoint, combinedToken).ConfigureAwait(false); + } + catch (OperationCanceledException oce) when ( + oce.CancellationToken == combinedToken && !cancellationToken.IsCancellationRequested) + { + throw new TimeoutException(); + } _socket = socket; ConnectedEndPoint = endpoint; @@ -1389,16 +1407,6 @@ async Task ConnectAsync(NpgsqlTimeout timeout, CancellationToken cancellationTok throw new NpgsqlException($"Failed to connect to {endpoint}", e); } } - - static Task OpenSocketConnectionAsync(Socket socket, EndPoint endpoint, NpgsqlTimeout perIpTimeout, CancellationToken cancellationToken) - { - // Whether the OS platform supports Socket.ConnectAsync cancellation API or not, - // we always fake-cancel the operation with the help of TaskTimeoutAndCancellation.ExecuteAsync. It stops waiting - // and raises the exception, while the actual task may be left running. - Task ConnectAsync(CancellationToken ct) => - socket.ConnectAsync(endpoint, ct).AsTask(); - return TaskTimeoutAndCancellation.ExecuteAsync(ConnectAsync, perIpTimeout, cancellationToken); - } } EndPoint[] IPAddressesToEndpoints(IPAddress[] ipAddresses, int port) diff --git a/src/Npgsql/TaskTimeoutAndCancellation.cs b/src/Npgsql/TaskTimeoutAndCancellation.cs deleted file mode 100644 index ceed87ba94..0000000000 --- a/src/Npgsql/TaskTimeoutAndCancellation.cs +++ /dev/null @@ -1,66 +0,0 @@ -using System; -using System.Threading; -using System.Threading.Tasks; -using Npgsql.Util; - -namespace Npgsql; - -/// -/// Utility class to execute a potentially non-cancellable while allowing to timeout and/or cancel awaiting for it and at the same time prevent event if the original fails later. -/// -static class TaskTimeoutAndCancellation -{ - /// - /// Executes a potentially non-cancellable while allowing to timeout and/or cancel awaiting for it. - /// If the given task does not complete within , a is thrown. - /// The executed may be left in an incomplete state after the that this method returns completes dues to timeout and/or cancellation request. - /// The method guarantees that the abandoned, incomplete is not going to produce event if it fails later. - /// - /// Gets the for execution with a combined that attempts to cancel the in an event of the timeout or external cancellation request. - /// The timeout after which the should be faulted with a if it hasn't otherwise completed. - /// The to monitor for a cancellation request. - /// The result . - /// The representing the asynchronous wait. - internal static async Task ExecuteAsync(Func> getTaskFunc, NpgsqlTimeout timeout, CancellationToken cancellationToken) - { - Task? task = default; - await ExecuteAsync(ct => (Task)(task = getTaskFunc(ct)), timeout, cancellationToken).ConfigureAwait(false); - return await task!.ConfigureAwait(false); - } - - /// - /// Executes a potentially non-cancellable while allowing to timeout and/or cancel awaiting for it. - /// If the given task does not complete within , a is thrown. - /// The executed may be left in an incomplete state after the that this method returns completes dues to timeout and/or cancellation request. - /// The method guarantees that the abandoned, incomplete is not going to produce event if it fails later. - /// - /// Gets the for execution with a combined that attempts to cancel the in an event of the timeout or external cancellation request. - /// The timeout after which the should be faulted with a if it hasn't otherwise completed. - /// The to monitor for a cancellation request. - /// The representing the asynchronous wait. - internal static async Task ExecuteAsync(Func getTaskFunc, NpgsqlTimeout timeout, CancellationToken cancellationToken) - { - using var combinedCts = timeout.IsSet ? CancellationTokenSource.CreateLinkedTokenSource(cancellationToken) : null; - var task = getTaskFunc(combinedCts?.Token ?? cancellationToken); - try - { - try - { - await task.WaitAsync(timeout.CheckAndGetTimeLeft(), cancellationToken).ConfigureAwait(false); - } - catch (TimeoutException) when (!task!.IsCompleted) - { - // Attempt to stop the Task in progress. - combinedCts?.Cancel(); - throw; - } - } - catch - { - // Prevent unobserved Task notifications by observing the failed Task exception. - // To test: comment the next line out and re-run TaskExtensionsTest.DelayedFaultedTaskCancellation. - _ = task.ContinueWith(t => _ = t.Exception, CancellationToken.None, TaskContinuationOptions.OnlyOnFaulted, TaskScheduler.Current); - throw; - } - } -} diff --git a/test/Npgsql.Tests/TaskTimeoutAndCancellationTest.cs b/test/Npgsql.Tests/TaskTimeoutAndCancellationTest.cs deleted file mode 100644 index f90bd1dd92..0000000000 --- a/test/Npgsql.Tests/TaskTimeoutAndCancellationTest.cs +++ /dev/null @@ -1,162 +0,0 @@ -using System; -using System.Threading; -using System.Threading.Tasks; -using NUnit.Framework; -using Npgsql.Util; - -namespace Npgsql.Tests; - -[NonParallelizable] // To make sure unobserved tasks from other tests do not leak -public class TaskTimeoutAndCancellationTest : TestBase -{ - const int TestResultValue = 777; - - async Task GetResultTaskAsync(int timeout, CancellationToken ct) - { - await Task.Delay(timeout, ct); - return TestResultValue; - } - - Task GetVoidTaskAsync(int timeout, CancellationToken ct) => Task.Delay(timeout, ct); - - [Test] - public async Task SuccessfulResultTaskAsync() => - Assert.That(await TaskTimeoutAndCancellation.ExecuteAsync(ct => GetResultTaskAsync(10, ct), NpgsqlTimeout.Infinite, CancellationToken.None), Is.EqualTo(TestResultValue)); - - [Test] - public async Task SuccessfulVoidTaskAsync() => - await TaskTimeoutAndCancellation.ExecuteAsync(ct => GetVoidTaskAsync(10, ct), NpgsqlTimeout.Infinite, CancellationToken.None); - - [Test] - public void InfinitelyLongTaskTimeout() => - Assert.ThrowsAsync(async () => - await TaskTimeoutAndCancellation.ExecuteAsync(ct => GetVoidTaskAsync(Timeout.Infinite, ct), new NpgsqlTimeout(TimeSpan.FromMilliseconds(10)), CancellationToken.None)); - - [Test] - public void InfinitelyLongTaskCancellation() - { - using var cts = new CancellationTokenSource(10); - Assert.ThrowsAsync(async () => - await TaskTimeoutAndCancellation.ExecuteAsync(ct => GetVoidTaskAsync(Timeout.Infinite, ct), NpgsqlTimeout.Infinite, cts.Token)); - } - - /// - /// The test creates a delayed execution Task that is being fake-cancelled and fails subsequently and triggers 'TaskScheduler.UnobservedTaskException event'. - /// - /// - /// The test is based on timing and depends on availability of thread pool threads. Therefore it could become unstable if the environment is under pressure. - /// - [Theory, IssueLink("https://github.com/npgsql/npgsql/issues/4149")] - [TestCase("CancelAndTimeout")] - [TestCase("CancelOnly")] - [TestCase("TimeoutOnly")] - [TestCase("CancelAndTimeout")] - [TestCase("CancelOnly")] - [TestCase("TimeoutOnly")] - public Task DelayedFaultedTaskCancellation(string testCase) => RunDelayedFaultedTaskTestAsync(async getUnobservedTaskException => - { - var cancel = true; - var timeout = true; - switch (testCase) - { - case "TimeoutOnly": - cancel = false; - break; - case "CancelOnly": - timeout = false; - break; - } - - var notifyDelayCompleted = new SemaphoreSlim(0, 1); - - // Invoke the method that creates a delayed execution Task that fails subsequently. - await CreateTaskAndPreemptWithCancellationAsync(500, cancel, timeout, notifyDelayCompleted); - - // Wait enough time for the non-cancelable task to notify us that an exception is thrown. - await notifyDelayCompleted.WaitAsync(); - - // And then wait some more. - var repeatCount = 2; - while (getUnobservedTaskException() is null && repeatCount-- > 0) - { - await Task.Delay(100); - - // Run the garbage collector to collect unobserved Tasks. - GC.Collect(); - GC.WaitForPendingFinalizers(); - } - }); - - static async Task RunDelayedFaultedTaskTestAsync(Func, Task> test) - { - // Run the garbage collector to collect unobserved Tasks from other tests. - GC.Collect(); - GC.WaitForPendingFinalizers(); - GC.Collect(); - - Exception? unobservedTaskException = null; - - // Subscribe to UnobservedTaskException event to store the Exception, if any. - void OnUnobservedTaskException(object? source, UnobservedTaskExceptionEventArgs args) - { - if (!args.Observed) - { - args.SetObserved(); - } - unobservedTaskException = args.Exception; - } - TaskScheduler.UnobservedTaskException += OnUnobservedTaskException; - - try - { - await test(() => unobservedTaskException); - - // Verify the unobserved Task exception event has not been received. - Assert.That(unobservedTaskException, Is.Null, unobservedTaskException?.Message); - } - finally - { - TaskScheduler.UnobservedTaskException -= OnUnobservedTaskException; - } - } - - /// - /// Create a delayed execution, non-Cancellable Task that fails subsequently after the Task goes out of scope. - /// - static async Task CreateTaskAndPreemptWithCancellationAsync(int delayMs, bool cancel, bool timeout, SemaphoreSlim notifyDelayCompleted) - { - var nonCancellableTask = Task.Delay(delayMs, CancellationToken.None) - .ContinueWith( - async _ => - { - try - { - await Task.FromException(new Exception("Unobserved Task Test Exception")); - } - finally - { - notifyDelayCompleted.Release(); - } - }) - .Unwrap(); - - var timeoutMs = delayMs / 5; - using var cts = cancel ? new CancellationTokenSource(timeoutMs) : null; - try - { - await TaskTimeoutAndCancellation.ExecuteAsync( - _ => nonCancellableTask, - timeout ? new NpgsqlTimeout(TimeSpan.FromMilliseconds(timeoutMs)) : NpgsqlTimeout.Infinite, - cts?.Token ?? CancellationToken.None); - } - catch (TimeoutException) - { - // Expected due to preemptive time out. - } - catch (OperationCanceledException) when (cts?.IsCancellationRequested == true) - { - // Expected due to preemptive cancellation. - } - Assert.That(nonCancellableTask.IsCompleted, Is.False); - } -} From 88cfc120f79a38ed7ba9ada60481f4c0819d7cd4 Mon Sep 17 00:00:00 2001 From: Ruslan Date: Wed, 5 Nov 2025 20:04:30 +0300 Subject: [PATCH 615/761] =?UTF-8?q?NpgsqlMultiHostDataSource:=20rethrow=20?= =?UTF-8?q?OperationCanceledException=20on=20canc=E2=80=A6=20(#6283)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Closes #6282 --- src/Npgsql/NpgsqlMultiHostDataSource.cs | 6 ++++++ test/Npgsql.Tests/MultipleHostsTests.cs | 11 +++++++++++ 2 files changed, 17 insertions(+) diff --git a/src/Npgsql/NpgsqlMultiHostDataSource.cs b/src/Npgsql/NpgsqlMultiHostDataSource.cs index 4997a6093a..5d21ab8954 100644 --- a/src/Npgsql/NpgsqlMultiHostDataSource.cs +++ b/src/Npgsql/NpgsqlMultiHostDataSource.cs @@ -219,6 +219,12 @@ static bool IsOnline(DatabaseState state, TargetSessionAttributes preferredType) } } } + catch (OperationCanceledException oce) when (cancellationToken.IsCancellationRequested && oce.CancellationToken == cancellationToken) + { + if (connector is not null) + pool.Return(connector); + throw; + } catch (Exception ex) { exceptions.Add(ex); diff --git a/test/Npgsql.Tests/MultipleHostsTests.cs b/test/Npgsql.Tests/MultipleHostsTests.cs index 66c1a99cf7..9025586c55 100644 --- a/test/Npgsql.Tests/MultipleHostsTests.cs +++ b/test/Npgsql.Tests/MultipleHostsTests.cs @@ -1133,6 +1133,17 @@ public async Task Build_with_multiple_hosts_is_supported() await using var connection = await dataSource.OpenConnectionAsync(); } + [Test] + public async Task OpenConnection_when_canceled_throws_TaskCanceledException() + { + var builder = new NpgsqlDataSourceBuilder(ConnectionString); + await using var dataSource = builder.BuildMultiHost(); + Assert.ThrowsAsync(async () => + { + await using var connection = await dataSource.OpenConnectionAsync(new CancellationToken(true)); + }); + } + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/4181")] [Explicit("Fails until #4181 is fixed.")] public async Task LoadBalancing_is_fair_if_first_host_is_down([Values]TargetSessionAttributes targetSessionAttributes) From a68839b9c8c4aaec343e05ca02f89175f3cf99a9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 5 Nov 2025 22:51:37 +0100 Subject: [PATCH 616/761] Bump BenchmarkDotNet from 0.15.5 to 0.15.6 (#6293) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index e70fd53bb5..758397b7cd 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -36,7 +36,7 @@ - + From c3cf9f94b09cfe6252e1e3122ff4764247f120e2 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Thu, 6 Nov 2025 11:09:06 +0300 Subject: [PATCH 617/761] Allow specifying multiple root certificates in NpgsqlDataSourceBuilder (#6057) Closes #6056 --- src/Npgsql/Internal/NpgsqlConnector.cs | 16 ++++---- .../Internal/TransportSecurityHandler.cs | 4 +- src/Npgsql/NpgsqlDataSourceBuilder.cs | 30 +++++++++++++- src/Npgsql/NpgsqlSlimDataSourceBuilder.cs | 36 ++++++++++++++-- src/Npgsql/PublicAPI.Unshipped.txt | 4 ++ test/Npgsql.Tests/SecurityTests.cs | 41 +++++++++++++++++++ 6 files changed, 117 insertions(+), 14 deletions(-) diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index 098277dd47..40dcf6e941 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -1127,7 +1127,7 @@ internal async Task NegotiateEncryption(SslMode sslMode, NpgsqlTimeout timeout, var checkCertificateRevocation = Settings.CheckCertificateRevocation; RemoteCertificateValidationCallback? certificateValidationCallback; - X509Certificate2? caCert; + X509Certificate2Collection? caCerts; string? certRootPath = null; if (sslMode is SslMode.Prefer or SslMode.Require) @@ -1135,11 +1135,11 @@ internal async Task NegotiateEncryption(SslMode sslMode, NpgsqlTimeout timeout, certificateValidationCallback = SslTrustServerValidation; checkCertificateRevocation = false; } - else if ((caCert = DataSource.TransportSecurityHandler.RootCertificateCallback?.Invoke()) is not null || + else if (((caCerts = DataSource.TransportSecurityHandler.RootCertificatesCallback?.Invoke()) is not null && caCerts.Count > 0) || (certRootPath = Settings.RootCertificate ?? PostgresEnvironment.SslCertRoot ?? PostgresEnvironment.SslCertRootDefault) is not null) { - certificateValidationCallback = SslRootValidation(sslMode == SslMode.VerifyFull, certRootPath, caCert); + certificateValidationCallback = SslRootValidation(sslMode == SslMode.VerifyFull, certRootPath, caCerts); } else if (sslMode == SslMode.VerifyCA) { @@ -1195,7 +1195,7 @@ internal async Task NegotiateEncryption(SslMode sslMode, NpgsqlTimeout timeout, if (Settings.RootCertificate is not null) throw new ArgumentException(NpgsqlStrings.CannotUseSslRootCertificateWithCustomValidationCallback); - if (DataSource.TransportSecurityHandler.RootCertificateCallback is not null) + if (DataSource.TransportSecurityHandler.RootCertificatesCallback is not null) throw new ArgumentException(NpgsqlStrings.CannotUseValidationRootCertificateCallbackWithCustomValidationCallback); } } @@ -1984,7 +1984,7 @@ internal void ClearTransaction(Exception? disposeReason = null) (sender, certificate, chain, sslPolicyErrors) => true; - static RemoteCertificateValidationCallback SslRootValidation(bool verifyFull, string? certRootPath, X509Certificate2? caCertificate) + static RemoteCertificateValidationCallback SslRootValidation(bool verifyFull, string? certRootPath, X509Certificate2Collection? caCertificates) => (_, certificate, chain, sslPolicyErrors) => { if (certificate is null || chain is null) @@ -2001,12 +2001,12 @@ static RemoteCertificateValidationCallback SslRootValidation(bool verifyFull, st if (certRootPath is null) { - Debug.Assert(caCertificate is not null); - certs.Add(caCertificate); + Debug.Assert(caCertificates is { Count: > 0 }); + certs.AddRange(caCertificates); } else { - Debug.Assert(caCertificate is null); + Debug.Assert(caCertificates is null or { Count: > 0 }); if (Path.GetExtension(certRootPath).ToUpperInvariant() != ".PFX") certs.ImportFromPemFile(certRootPath); diff --git a/src/Npgsql/Internal/TransportSecurityHandler.cs b/src/Npgsql/Internal/TransportSecurityHandler.cs index 9945e80534..fbe8cad72e 100644 --- a/src/Npgsql/Internal/TransportSecurityHandler.cs +++ b/src/Npgsql/Internal/TransportSecurityHandler.cs @@ -11,7 +11,7 @@ class TransportSecurityHandler { public virtual bool SupportEncryption => false; - public virtual Func? RootCertificateCallback + public virtual Func? RootCertificatesCallback { get => throw new NotSupportedException(string.Format(NpgsqlStrings.TransportSecurityDisabled, nameof(NpgsqlSlimDataSourceBuilder.EnableTransportSecurity))); set => throw new NotSupportedException(string.Format(NpgsqlStrings.TransportSecurityDisabled, nameof(NpgsqlSlimDataSourceBuilder.EnableTransportSecurity))); @@ -29,7 +29,7 @@ sealed class RealTransportSecurityHandler : TransportSecurityHandler { public override bool SupportEncryption => true; - public override Func? RootCertificateCallback { get; set; } + public override Func? RootCertificatesCallback { get; set; } public override Task NegotiateEncryption(bool async, NpgsqlConnector connector, SslMode sslMode, NpgsqlTimeout timeout, CancellationToken cancellationToken) => connector.NegotiateEncryption(sslMode, timeout, async, cancellationToken); diff --git a/src/Npgsql/NpgsqlDataSourceBuilder.cs b/src/Npgsql/NpgsqlDataSourceBuilder.cs index 68dd517ba0..b08f175533 100644 --- a/src/Npgsql/NpgsqlDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlDataSourceBuilder.cs @@ -41,7 +41,7 @@ public INpgsqlNameTranslator DefaultNameTranslator } /// - /// A connection string builder that can be used to configured the connection string on the builder. + /// A connection string builder that can be used to configure the connection string on the builder. /// public NpgsqlConnectionStringBuilder ConnectionStringBuilder => _internalBuilder.ConnectionStringBuilder; @@ -297,6 +297,17 @@ public NpgsqlDataSourceBuilder UseRootCertificate(X509Certificate2? rootCertific return this; } + /// + /// Sets the that will be used validate SSL certificate, received from the server. + /// + /// The CA certificates. + /// The same builder instance so that multiple calls can be chained. + public NpgsqlDataSourceBuilder UseRootCertificates(X509Certificate2Collection? rootCertificates) + { + _internalBuilder.UseRootCertificates(rootCertificates); + return this; + } + /// /// Specifies a callback that will be used to validate SSL certificate, received from the server. /// @@ -313,6 +324,23 @@ public NpgsqlDataSourceBuilder UseRootCertificateCallback(Func return this; } + /// + /// Specifies a callback that will be used to validate SSL certificate, received from the server. + /// + /// The callback to get CA certificates. + /// The same builder instance so that multiple calls can be chained. + /// + /// This overload, which accepts a callback, is suitable for scenarios where the certificate rotates + /// and might change during the lifetime of the application. + /// When that's not the case, use the overload which directly accepts the certificate. + /// + /// The same builder instance so that multiple calls can be chained. + public NpgsqlDataSourceBuilder UseRootCertificatesCallback(Func? rootCertificateCallback) + { + _internalBuilder.UseRootCertificatesCallback(rootCertificateCallback); + return this; + } + /// /// Configures a periodic password provider, which is automatically called by the data source at some regular interval. This is the /// recommended way to fetch a rotating access token. diff --git a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs index bc15fca563..8bfe449c4a 100644 --- a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs @@ -60,7 +60,7 @@ public sealed class NpgsqlSlimDataSourceBuilder : INpgsqlTypeMapper internal Action ConfigureDefaultFactories { get; set; } /// - /// A connection string builder that can be used to configured the connection string on the builder. + /// A connection string builder that can be used to configure the connection string on the builder. /// public NpgsqlConnectionStringBuilder ConnectionStringBuilder { get; } @@ -252,9 +252,19 @@ public NpgsqlSlimDataSourceBuilder UseClientCertificatesCallback(ActionThe same builder instance so that multiple calls can be chained. public NpgsqlSlimDataSourceBuilder UseRootCertificate(X509Certificate2? rootCertificate) => rootCertificate is null - ? UseRootCertificateCallback(null) + ? UseRootCertificatesCallback((Func?)null) : UseRootCertificateCallback(() => rootCertificate); + /// + /// Sets the that will be used validate SSL certificate, received from the server. + /// + /// The CA certificates. + /// The same builder instance so that multiple calls can be chained. + public NpgsqlSlimDataSourceBuilder UseRootCertificates(X509Certificate2Collection? rootCertificates) + => rootCertificates is null + ? UseRootCertificatesCallback((Func?)null) + : UseRootCertificatesCallback(() => rootCertificates); + /// /// Specifies a callback that will be used to validate SSL certificate, received from the server. /// @@ -268,7 +278,27 @@ public NpgsqlSlimDataSourceBuilder UseRootCertificate(X509Certificate2? rootCert /// The same builder instance so that multiple calls can be chained. public NpgsqlSlimDataSourceBuilder UseRootCertificateCallback(Func? rootCertificateCallback) { - _transportSecurityHandler.RootCertificateCallback = rootCertificateCallback; + _transportSecurityHandler.RootCertificatesCallback = () => rootCertificateCallback is not null + ? new X509Certificate2Collection(rootCertificateCallback()) + : null; + + return this; + } + + /// + /// Specifies a callback that will be used to validate SSL certificate, received from the server. + /// + /// The callback to get CA certificates. + /// The same builder instance so that multiple calls can be chained. + /// + /// This overload, which accepts a callback, is suitable for scenarios where the certificate rotates + /// and might change during the lifetime of the application. + /// When that's not the case, use the overload which directly accepts the certificate. + /// + /// The same builder instance so that multiple calls can be chained. + public NpgsqlSlimDataSourceBuilder UseRootCertificatesCallback(Func? rootCertificateCallback) + { + _transportSecurityHandler.RootCertificatesCallback = rootCertificateCallback; return this; } diff --git a/src/Npgsql/PublicAPI.Unshipped.txt b/src/Npgsql/PublicAPI.Unshipped.txt index 7f28aa9e2e..bb06704140 100644 --- a/src/Npgsql/PublicAPI.Unshipped.txt +++ b/src/Npgsql/PublicAPI.Unshipped.txt @@ -22,6 +22,8 @@ Npgsql.NpgsqlDataSourceBuilder.MapEnum(System.Type! clrType, string? pgName = nu Npgsql.NpgsqlDataSourceBuilder.MapEnum(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.NpgsqlDataSourceBuilder! Npgsql.NpgsqlDataSourceBuilder.ConfigureTracing(System.Action! configureAction) -> Npgsql.NpgsqlDataSourceBuilder! Npgsql.NpgsqlDataSourceBuilder.UseNegotiateOptionsCallback(System.Action? negotiateOptionsCallback) -> Npgsql.NpgsqlDataSourceBuilder! +Npgsql.NpgsqlDataSourceBuilder.UseRootCertificates(System.Security.Cryptography.X509Certificates.X509Certificate2Collection? rootCertificates) -> Npgsql.NpgsqlDataSourceBuilder! +Npgsql.NpgsqlDataSourceBuilder.UseRootCertificatesCallback(System.Func? rootCertificateCallback) -> Npgsql.NpgsqlDataSourceBuilder! Npgsql.NpgsqlDataSourceBuilder.UseSslClientAuthenticationOptionsCallback(System.Action? sslClientAuthenticationOptionsCallback) -> Npgsql.NpgsqlDataSourceBuilder! Npgsql.NpgsqlMetricsOptions Npgsql.NpgsqlMetricsOptions.NpgsqlMetricsOptions() -> void @@ -36,6 +38,8 @@ Npgsql.NpgsqlSlimDataSourceBuilder.MapComposite(string? pgName = null, Npgsql Npgsql.NpgsqlSlimDataSourceBuilder.MapEnum(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.MapEnum(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.UseNegotiateOptionsCallback(System.Action? negotiateOptionsCallback) -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.UseRootCertificates(System.Security.Cryptography.X509Certificates.X509Certificate2Collection? rootCertificates) -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.UseRootCertificatesCallback(System.Func? rootCertificateCallback) -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.UseSslClientAuthenticationOptionsCallback(System.Action? sslClientAuthenticationOptionsCallback) -> Npgsql.NpgsqlSlimDataSourceBuilder! *REMOVED*Npgsql.NpgsqlTracingOptions *REMOVED*Npgsql.NpgsqlTracingOptions.NpgsqlTracingOptions() -> void diff --git a/test/Npgsql.Tests/SecurityTests.cs b/test/Npgsql.Tests/SecurityTests.cs index 0ef9fd7b68..c9448b19ee 100644 --- a/test/Npgsql.Tests/SecurityTests.cs +++ b/test/Npgsql.Tests/SecurityTests.cs @@ -2,6 +2,8 @@ using System.IO; using System.Runtime.InteropServices; using System.Security.Authentication; +using System.Security.Cryptography; +using System.Security.Cryptography.X509Certificates; using System.Threading; using System.Threading.Tasks; using Npgsql.Properties; @@ -563,6 +565,45 @@ public async Task Connect_with_verify_check_host([Values(SslMode.VerifyCA, SslMo } } + [Test] + [Platform(Exclude = "MacOsX", Reason = "Mac requires explicit opt-in to receive CA certificate in TLS handshake")] + public async Task Connect_with_verify_and_multiple_ca_cert([Values(SslMode.VerifyCA, SslMode.VerifyFull)] SslMode sslMode, [Values] bool realCaFirst) + { + if (!IsOnBuildServer) + Assert.Ignore("Only executed in CI"); + + var certificates = new X509Certificate2Collection(); + +#if NET9_0_OR_GREATER + using var realCaCert = X509CertificateLoader.LoadCertificateFromFile("ca.crt"); +#else + using var realCaCert = new X509Certificate2("ca.crt"); +#endif + + using var ecdsa = ECDsa.Create(); + var req = new CertificateRequest("cn=localhost", ecdsa, HashAlgorithmName.SHA256); + using var unrelatedCaCert = req.CreateSelfSigned(DateTimeOffset.UtcNow.AddDays(-1), DateTimeOffset.UtcNow.AddDays(1)); + + if (realCaFirst) + { + certificates.Add(realCaCert); + certificates.Add(unrelatedCaCert); + } + else + { + certificates.Add(unrelatedCaCert); + certificates.Add(realCaCert); + } + + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.ConnectionStringBuilder.SslMode = sslMode; + dataSourceBuilder.UseRootCertificates(certificates); + + await using var dataSource = dataSourceBuilder.Build(); + + await using var _ = await dataSource.OpenConnectionAsync(); + } + [Test] [NonParallelizable] // Sets environment variable public async Task Direct_ssl_via_env_requires_correct_sslmode() From ce11aaa601ca7ba4d1974ee59d4ef791f3cb71e0 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Thu, 6 Nov 2025 13:39:57 +0300 Subject: [PATCH 618/761] Fix returning properties from NpgsqlConnectionStringBuilder.GetProperties (#6290) Fixes #6289 --- src/Npgsql/NpgsqlConnectionStringBuilder.cs | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/src/Npgsql/NpgsqlConnectionStringBuilder.cs b/src/Npgsql/NpgsqlConnectionStringBuilder.cs index 5b2d13a1d0..ca0d734c5f 100644 --- a/src/Npgsql/NpgsqlConnectionStringBuilder.cs +++ b/src/Npgsql/NpgsqlConnectionStringBuilder.cs @@ -1673,9 +1673,22 @@ protected override void GetProperties(Hashtable propertyDescriptors) foreach (var value in propertyDescriptors.Values) { var d = (PropertyDescriptor)value; + var isConnectionStringProperty = false; + var isObsolete = false; foreach (var attribute in d.Attributes) - if (attribute is NpgsqlConnectionStringPropertyAttribute or ObsoleteAttribute) - toRemove.Add(d); + { + if (attribute is NpgsqlConnectionStringPropertyAttribute) + { + isConnectionStringProperty = true; + } + else if (attribute is ObsoleteAttribute) + { + isObsolete = true; + } + } + + if (!isConnectionStringProperty || isObsolete) + toRemove.Add(d); } foreach (var o in toRemove) From 33d8cdcd44b99ff1832e653f297a522ebf4170ab Mon Sep 17 00:00:00 2001 From: Kirk Brauer Date: Fri, 7 Nov 2025 14:14:35 -0500 Subject: [PATCH 619/761] Fix the NpgsqlCube to use the full G17 floating-point format (#6295) * Fix the Write() method to use the full G17 floating-point format * Move to one-line namespace --- src/Npgsql/NpgsqlTypes/NpgsqlCube.cs | 416 +++++++++++++-------------- 1 file changed, 208 insertions(+), 208 deletions(-) diff --git a/src/Npgsql/NpgsqlTypes/NpgsqlCube.cs b/src/Npgsql/NpgsqlTypes/NpgsqlCube.cs index 15a89f56ee..24a1ea38b4 100644 --- a/src/Npgsql/NpgsqlTypes/NpgsqlCube.cs +++ b/src/Npgsql/NpgsqlTypes/NpgsqlCube.cs @@ -1,251 +1,251 @@ using System; using System.Collections.Generic; +using System.Globalization; using System.Linq; using System.Text; // ReSharper disable once CheckNamespace -namespace NpgsqlTypes +namespace NpgsqlTypes; + +/// +/// Represents a PostgreSQL cube data type. +/// +/// +/// See https://www.postgresql.org/docs/current/cube.html +/// +public readonly struct NpgsqlCube : IEquatable { + // Store the coordinates as a value tuple array + readonly double[] _lowerLeft; + readonly double[] _upperRight; + /// - /// Represents a PostgreSQl cube data type. + /// The lower left coordinates of the cube. /// - /// - /// See https://www.postgresql.org/docs/current/cube.html - /// - public readonly struct NpgsqlCube : IEquatable + public IReadOnlyList LowerLeft => _lowerLeft; + + /// + /// The upper right coordinates of the cube. + /// + public IReadOnlyList UpperRight => _upperRight; + + /// + /// The number of dimensions of the cube. + /// + public int Dimensions => _lowerLeft.Length; + + /// + /// True if the cube is a point, that is, the two defining corners are the same. + /// + public bool IsPoint { get; } + + /// + /// Makes a cube with upper right and lower left coordinates as defined by the two arrays, which must be of the same length. + /// + /// This is an internal constructor to optimize the number of allocations. + /// The lower left values. + /// The upper right values. + /// + /// Thrown if the number of dimensions in the upper left and lower right values do not match. + /// + internal NpgsqlCube(double[] lowerLeft, double[] upperRight) { - // Store the coordinates as a value tuple array - readonly double[] _lowerLeft; - readonly double[] _upperRight; - - /// - /// The lower left coordinates of the cube. - /// - public IReadOnlyList LowerLeft => _lowerLeft; - - /// - /// The upper right coordinates of the cube. - /// - public IReadOnlyList UpperRight => _upperRight; - - /// - /// The number of dimensions of the cube. - /// - public int Dimensions => _lowerLeft.Length; - - /// - /// True if the cube is a point, that is, the two defining corners are the same. - /// - public bool IsPoint { get; } - - /// - /// Makes a cube with upper right and lower left coordinates as defined by the two arrays, which must be of the same length. - /// - /// This is an internal constructor to optimize the number of allocations. - /// The lower left values. - /// The upper right values. - /// - /// Thrown if the number of dimensions in the upper left and lower right values do not match. - /// - internal NpgsqlCube(double[] lowerLeft, double[] upperRight) - { - if (lowerLeft.Length != upperRight.Length) - throw new ArgumentException($"Not a valid cube: Different point dimensions in {lowerLeft} and {upperRight}."); + if (lowerLeft.Length != upperRight.Length) + throw new ArgumentException($"Not a valid cube: Different point dimensions in {lowerLeft} and {upperRight}."); - IsPoint = lowerLeft.SequenceEqual(upperRight); - _lowerLeft = lowerLeft; - _upperRight = upperRight; - } + IsPoint = lowerLeft.SequenceEqual(upperRight); + _lowerLeft = lowerLeft; + _upperRight = upperRight; + } + + /// + /// Makes a one dimensional cube with both coordinates the same. + /// + /// The point coordinate. + public NpgsqlCube(double coord) + { + IsPoint = true; + _lowerLeft = [coord]; + _upperRight = _lowerLeft; + } + + /// + /// Makes a one dimensional cube. + /// + /// The lower left value. + /// The upper right value. + public NpgsqlCube(double lowerLeft, double upperRight) + { + IsPoint = lowerLeft.CompareTo(upperRight) == 0; + _lowerLeft = [lowerLeft]; + _upperRight = IsPoint ? _lowerLeft : [upperRight]; + } + + /// + /// Makes a zero-volume cube using the coordinates defined by the array. + /// + /// The coordinates. + public NpgsqlCube(IEnumerable coords) + { + // Always create a defensive copy to prevent external mutation + _lowerLeft = coords.ToArray(); + IsPoint = true; + _upperRight = _lowerLeft; + } + + /// + /// Makes a cube with upper right and lower left coordinates as defined by the two arrays, which must be of the same length. + /// + /// The lower left values. + /// The upper right values. + /// + /// Thrown if the number of dimensions in the upper left and lower right values do not match + /// or if the cube exceeds the maximum dimensions (100). + /// + public NpgsqlCube(IEnumerable lowerLeft, IEnumerable upperRight) : + this(lowerLeft.ToArray(), upperRight.ToArray()) + { } - /// - /// Makes a one dimensional cube with both coordinates the same. - /// - /// The point coordinate. - public NpgsqlCube(double coord) + /// + /// Makes a new cube by adding a dimension on to an existing cube, with the same values for both endpoints of the new coordinate. + /// This is useful for building cubes piece by piece from calculated values. + /// + /// The existing cube. + /// The coordinate to add. + public NpgsqlCube(NpgsqlCube cube, double coord) + { + IsPoint = cube.IsPoint; + if (IsPoint) { - IsPoint = true; - _lowerLeft = [coord]; + _lowerLeft = cube._lowerLeft.Append(coord).ToArray(); _upperRight = _lowerLeft; } - - /// - /// Makes a one dimensional cube. - /// - /// The lower left value. - /// The upper right value. - public NpgsqlCube(double lowerLeft, double upperRight) + else { - IsPoint = lowerLeft.CompareTo(upperRight) == 0; - _lowerLeft = [lowerLeft]; - _upperRight = IsPoint ? _lowerLeft : [upperRight]; + _lowerLeft = cube._lowerLeft.Append(coord).ToArray(); + _upperRight = cube._upperRight.Append(coord).ToArray(); } + } - /// - /// Makes a zero-volume cube using the coordinates defined by the array. - /// - /// The coordinates. - public NpgsqlCube(IEnumerable coords) + /// + /// Makes a new cube by adding a dimension on to an existing cube. + /// This is useful for building cubes piece by piece from calculated values. + /// + /// The existing cube. + /// The lower left value. + /// The upper right value. + public NpgsqlCube(NpgsqlCube cube, double lowerLeft, double upperRight) + { + IsPoint = cube.IsPoint && lowerLeft.CompareTo(upperRight) == 0; + if (IsPoint) { - // Always create a defensive copy to prevent external mutation - _lowerLeft = coords.ToArray(); - IsPoint = true; + _lowerLeft = cube._lowerLeft.Append(lowerLeft).ToArray(); _upperRight = _lowerLeft; } - - /// - /// Makes a cube with upper right and lower left coordinates as defined by the two arrays, which must be of the same length. - /// - /// The lower left values. - /// The upper right values. - /// - /// Thrown if the number of dimensions in the upper left and lower right values do not match - /// or if the cube exceeds the maximum dimensions (100). - /// - public NpgsqlCube(IEnumerable lowerLeft, IEnumerable upperRight) : - this(lowerLeft.ToArray(), upperRight.ToArray()) - { } - - /// - /// Makes a new cube by adding a dimension on to an existing cube, with the same values for both endpoints of the new coordinate. - /// This is useful for building cubes piece by piece from calculated values. - /// - /// The existing cube. - /// The coordinate to add. - public NpgsqlCube(NpgsqlCube cube, double coord) + else { - IsPoint = cube.IsPoint; - if (IsPoint) - { - _lowerLeft = cube._lowerLeft.Append(coord).ToArray(); - _upperRight = _lowerLeft; - } - else - { - _lowerLeft = cube._lowerLeft.Append(coord).ToArray(); - _upperRight = cube._upperRight.Append(coord).ToArray(); - } + _lowerLeft = cube._lowerLeft.Append(lowerLeft).ToArray(); + _upperRight = cube._upperRight.Append(upperRight).ToArray(); } + } + + /// + /// Makes a new cube from an existing cube, using a list of dimension indexes from an array. + /// Can be used to extract the endpoints of a single dimension, or to drop dimensions, or to reorder them as desired. + /// + /// The list of dimension indexes. + /// A new cube. + /// + /// + /// var cube = new NpgsqlCube(new[] { 1, 3, 5 }, new[] { 6, 7, 8 }); // '(1,3,5),(6,7,8)' + /// cube.ToSubset(1); // '(3),(7)' + /// cube.ToSubset(2, 1, 0, 0); // '(5,3,1,1),(8,7,6,6)' + /// + /// + public NpgsqlCube ToSubset(params int[] indexes) + { + var lowerLeft = new double[indexes.Length]; + var upperRight = new double[indexes.Length]; - /// - /// Makes a new cube by adding a dimension on to an existing cube. - /// This is useful for building cubes piece by piece from calculated values. - /// - /// The existing cube. - /// The lower left value. - /// The upper right value. - public NpgsqlCube(NpgsqlCube cube, double lowerLeft, double upperRight) + for (var i = 0; i < indexes.Length; i++) { - IsPoint = cube.IsPoint && lowerLeft.CompareTo(upperRight) == 0; - if (IsPoint) - { - _lowerLeft = cube._lowerLeft.Append(lowerLeft).ToArray(); - _upperRight = _lowerLeft; - } - else - { - _lowerLeft = cube._lowerLeft.Append(lowerLeft).ToArray(); - _upperRight = cube._upperRight.Append(upperRight).ToArray(); - } + lowerLeft[i] = _lowerLeft[indexes[i]]; + upperRight[i] = _upperRight[indexes[i]]; } - /// - /// Makes a new cube from an existing cube, using a list of dimension indexes from an array. - /// Can be used to extract the endpoints of a single dimension, or to drop dimensions, or to reorder them as desired. - /// - /// The list of dimension indexes. - /// A new cube. - /// - /// - /// var cube = new NpgsqlCube(new[] { 1, 3, 5 }, new[] { 6, 7, 8 }); // '(1,3,5),(6,7,8)' - /// cube.ToSubset(1); // '(3),(7)' - /// cube.ToSubset(2, 1, 0, 0); // '(5,3,1,1),(8,7,6,6)' - /// - /// - public NpgsqlCube ToSubset(params int[] indexes) - { - var lowerLeft = new double[indexes.Length]; - var upperRight = new double[indexes.Length]; + return new NpgsqlCube(lowerLeft, upperRight); + } - for (var i = 0; i < indexes.Length; i++) - { - lowerLeft[i] = _lowerLeft[indexes[i]]; - upperRight[i] = _upperRight[indexes[i]]; - } + /// + public bool Equals(NpgsqlCube other) => Dimensions == other.Dimensions + && _lowerLeft.SequenceEqual(other._lowerLeft) + && _upperRight.SequenceEqual(other._upperRight); - return new NpgsqlCube(lowerLeft, upperRight); - } + /// + public override bool Equals(object? obj) => obj is NpgsqlCube other && Equals(other); - /// - public bool Equals(NpgsqlCube other) => Dimensions == other.Dimensions - && _lowerLeft.SequenceEqual(other._lowerLeft) - && _upperRight.SequenceEqual(other._upperRight); + /// + public static bool operator ==(NpgsqlCube x, NpgsqlCube y) => x.Equals(y); - /// - public override bool Equals(object? obj) => obj is NpgsqlCube other && Equals(other); + /// + public static bool operator !=(NpgsqlCube x, NpgsqlCube y) => !(x == y); - /// - public static bool operator ==(NpgsqlCube x, NpgsqlCube y) => x.Equals(y); + /// + public override int GetHashCode() + { + var hashCode = new HashCode(); + for (var i = 0; i < Dimensions; i++) + { + hashCode.Add(_lowerLeft[i]); + hashCode.Add(_upperRight[i]); + } + return hashCode.ToHashCode(); + } - /// - public static bool operator !=(NpgsqlCube x, NpgsqlCube y) => !(x == y); + /// + /// Writes the cube in PostgreSQL's text format. + /// + void Write(StringBuilder stringBuilder) + { + var leftBuilder = new StringBuilder(); + var rightBuilder = new StringBuilder(); - /// - public override int GetHashCode() + leftBuilder.Append('('); + rightBuilder.Append('('); + + for (var i = 0; i < Dimensions; i++) { - var hashCode = new HashCode(); - for (var i = 0; i < Dimensions; i++) - { - hashCode.Add(_lowerLeft[i]); - hashCode.Add(_upperRight[i]); - } - return hashCode.ToHashCode(); + leftBuilder.Append(_lowerLeft[i].ToString("G17", CultureInfo.InvariantCulture)); + rightBuilder.Append(_upperRight[i].ToString("G17", CultureInfo.InvariantCulture)); + + if (i >= Dimensions - 1) continue; + + leftBuilder.Append(", "); + rightBuilder.Append(", "); } - /// - /// Writes the cube in PostgreSQL's text format. - /// - void Write(StringBuilder stringBuilder) + leftBuilder.Append(')'); + rightBuilder.Append(')'); + + if (IsPoint) { - var leftBuilder = new StringBuilder(); - var rightBuilder = new StringBuilder(); - - leftBuilder.Append('('); - rightBuilder.Append('('); - - for (var i = 0; i < Dimensions; i++) - { - leftBuilder.Append(_lowerLeft[i]); - rightBuilder.Append(_upperRight[i]); - - if (i >= Dimensions - 1) continue; - - leftBuilder.Append(", "); - rightBuilder.Append(", "); - } - - leftBuilder.Append(')'); - rightBuilder.Append(')'); - - if (IsPoint) - { - stringBuilder.Append(leftBuilder); - } - else - { - stringBuilder.Append(leftBuilder); - stringBuilder.Append(','); - stringBuilder.Append(rightBuilder); - } + stringBuilder.Append(leftBuilder); } - - /// - /// Writes the cube in PostgreSQL's text format. - /// - public override string ToString() + else { - var sb = new StringBuilder(); - Write(sb); - return sb.ToString(); + stringBuilder.Append(leftBuilder); + stringBuilder.Append(','); + stringBuilder.Append(rightBuilder); } } + + /// + /// Writes the cube in PostgreSQL's text format. + /// + public override string ToString() + { + var sb = new StringBuilder(); + Write(sb); + return sb.ToString(); + } } From f9ac0b7358ad2cfb0d4c8b1a3a20032b517fed65 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Sat, 8 Nov 2025 17:13:26 +0100 Subject: [PATCH 620/761] Remove intermediate string allocations from NpgsqlCube.Write --- src/Npgsql/NpgsqlTypes/NpgsqlCube.cs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Npgsql/NpgsqlTypes/NpgsqlCube.cs b/src/Npgsql/NpgsqlTypes/NpgsqlCube.cs index 24a1ea38b4..b84953c483 100644 --- a/src/Npgsql/NpgsqlTypes/NpgsqlCube.cs +++ b/src/Npgsql/NpgsqlTypes/NpgsqlCube.cs @@ -215,8 +215,8 @@ void Write(StringBuilder stringBuilder) for (var i = 0; i < Dimensions; i++) { - leftBuilder.Append(_lowerLeft[i].ToString("G17", CultureInfo.InvariantCulture)); - rightBuilder.Append(_upperRight[i].ToString("G17", CultureInfo.InvariantCulture)); + leftBuilder.Append(CultureInfo.InvariantCulture, $"{_lowerLeft[i]:G17}"); + rightBuilder.Append(CultureInfo.InvariantCulture, $"{_upperRight[i]:G17}"); if (i >= Dimensions - 1) continue; From b3f4f3efeb9d1b7385a759138b4259c4cfa9a252 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Sat, 8 Nov 2025 18:10:23 +0100 Subject: [PATCH 621/761] Include .NET 10.0 for the main project and all tests (#6300) --- .github/workflows/native-aot.yml | 12 ++++++------ Directory.Packages.props | 1 - src/Npgsql/Npgsql.csproj | 2 +- test/Directory.Build.props | 2 +- test/MStatDumper/MStatDumper.csproj | 2 -- test/Npgsql.Tests/Npgsql.Tests.csproj | 1 - 6 files changed, 8 insertions(+), 12 deletions(-) diff --git a/.github/workflows/native-aot.yml b/.github/workflows/native-aot.yml index ef6d7b96b5..3cb3ab007a 100644 --- a/.github/workflows/native-aot.yml +++ b/.github/workflows/native-aot.yml @@ -88,8 +88,8 @@ jobs: fail-fast: false matrix: os: [ ubuntu-24.04 ] - pg_major: [ 15 ] - tfm: [ net9.0 ] + pg_major: [ 18 ] + tfm: [ net10.0 ] steps: - name: Checkout @@ -122,8 +122,8 @@ jobs: fail-fast: false matrix: os: [ubuntu-24.04] - pg_major: [15] - tfm: [ net9.0 ] + pg_major: [ 18 ] + tfm: [ net10.0 ] steps: - name: Checkout @@ -163,11 +163,11 @@ jobs: - name: Write binary size to summary run: | - size="$(ls -l test/Npgsql.NativeAotTests/bin/Release/net9.0/linux-x64/native/Npgsql.NativeAotTests | cut -d ' ' -f 5)" + size="$(ls -l test/Npgsql.NativeAotTests/bin/Release/${{ matrix.tfm }}/linux-x64/native/Npgsql.NativeAotTests | cut -d ' ' -f 5)" echo "Binary size is $size bytes ($((size / (1024 * 1024))) mb)" >> $GITHUB_STEP_SUMMARY - name: Dump mstat - run: dotnet run --project test/MStatDumper/MStatDumper.csproj -c release -f ${{ matrix.tfm }} -- "test/Npgsql.NativeAotTests/obj/Release/net9.0/linux-x64/native/Npgsql.NativeAotTests.mstat" md >> $GITHUB_STEP_SUMMARY + run: dotnet run --project test/MStatDumper/MStatDumper.csproj -c release -f ${{ matrix.tfm }} -- "test/Npgsql.NativeAotTests/obj/Release/${{ matrix.tfm }}/linux-x64/native/Npgsql.NativeAotTests.mstat" md >> $GITHUB_STEP_SUMMARY - name: Upload mstat uses: actions/upload-artifact@v5 diff --git a/Directory.Packages.props b/Directory.Packages.props index 758397b7cd..6864076aef 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -26,7 +26,6 @@ - diff --git a/src/Npgsql/Npgsql.csproj b/src/Npgsql/Npgsql.csproj index 80c42ba561..5273736ceb 100644 --- a/src/Npgsql/Npgsql.csproj +++ b/src/Npgsql/Npgsql.csproj @@ -5,7 +5,7 @@ Npgsql is the open source .NET data provider for PostgreSQL. npgsql;postgresql;postgres;ado;ado.net;database;sql README.md - net8.0;net9.0 + net8.0;net9.0;net10.0 $(NoWarn);CA2017 $(NoWarn);NPG9001 $(NoWarn);NPG9002 diff --git a/test/Directory.Build.props b/test/Directory.Build.props index 1e2132817e..0c3bd8dba0 100644 --- a/test/Directory.Build.props +++ b/test/Directory.Build.props @@ -2,7 +2,7 @@ - net8.0;net9.0 + net8.0;net10.0 false diff --git a/test/MStatDumper/MStatDumper.csproj b/test/MStatDumper/MStatDumper.csproj index 456bd1f3b9..6405431678 100644 --- a/test/MStatDumper/MStatDumper.csproj +++ b/test/MStatDumper/MStatDumper.csproj @@ -2,8 +2,6 @@ Exe - - net9.0 enable disable diff --git a/test/Npgsql.Tests/Npgsql.Tests.csproj b/test/Npgsql.Tests/Npgsql.Tests.csproj index 1c300f8215..d2da055a6e 100644 --- a/test/Npgsql.Tests/Npgsql.Tests.csproj +++ b/test/Npgsql.Tests/Npgsql.Tests.csproj @@ -3,7 +3,6 @@ - all From 6714f65997fc50410cf82f3e82a201bf58b0014b Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Sun, 9 Nov 2025 14:07:10 +0100 Subject: [PATCH 622/761] Prioritize _dataTypeName over _npgsqlDbType (#6299) --- src/Npgsql/NpgsqlParameter.cs | 8 ++++---- test/Npgsql.Tests/NpgsqlParameterTests.cs | 20 ++++++++++++++++++++ 2 files changed, 24 insertions(+), 4 deletions(-) diff --git a/src/Npgsql/NpgsqlParameter.cs b/src/Npgsql/NpgsqlParameter.cs index ba2d840925..befef4cef2 100644 --- a/src/Npgsql/NpgsqlParameter.cs +++ b/src/Npgsql/NpgsqlParameter.cs @@ -533,10 +533,10 @@ internal void ResolveTypeInfo(PgSerializerOptions options) if (!previouslyResolved) { var dataTypeName = - _npgsqlDbType is { } npgsqlDbType - ? npgsqlDbType.ToDataTypeName() ?? npgsqlDbType.ToUnqualifiedDataTypeNameOrThrow() - : _dataTypeName is not null - ? Internal.Postgres.DataTypeName.NormalizeName(_dataTypeName) + _dataTypeName is not null + ? Internal.Postgres.DataTypeName.NormalizeName(_dataTypeName) + : _npgsqlDbType is { } npgsqlDbType + ? npgsqlDbType.ToDataTypeName() ?? npgsqlDbType.ToUnqualifiedDataTypeNameOrThrow() : null; PgTypeId? pgTypeId = null; diff --git a/test/Npgsql.Tests/NpgsqlParameterTests.cs b/test/Npgsql.Tests/NpgsqlParameterTests.cs index 4965491c82..cab1047015 100644 --- a/test/Npgsql.Tests/NpgsqlParameterTests.cs +++ b/test/Npgsql.Tests/NpgsqlParameterTests.cs @@ -750,6 +750,26 @@ public void Changing_value_type_reresolves([Values]bool generic) Assert.That(thirdTypeInfo, Is.Not.SameAs(typeInfo)); } + [Test] + public void DataTypeName_prioritized_over_NpgsqlDbType([Values]bool generic) + { + var param = generic ? new NpgsqlParameter + { + NpgsqlDbType = NpgsqlDbType.Integer, + DataTypeName = "text", + Value = "value" + } : new NpgsqlParameter + { + NpgsqlDbType = NpgsqlDbType.Integer, + DataTypeName = "text", + Value = "value" + }; + param.ResolveTypeInfo(DataSource.SerializerOptions); + param.GetResolutionInfo(out var typeInfo, out _, out _); + Assert.That(typeInfo, Is.Not.Null); + Assert.That(typeInfo.PgTypeId, Is.EqualTo(DataSource.SerializerOptions.TextPgTypeId)); + } + #if NeedsPorting [Test] [Category ("NotWorking")] From 82a849fd163df617ad791c140e3ab0de6be3ea77 Mon Sep 17 00:00:00 2001 From: lfpraca <134505289+lfpraca@users.noreply.github.com> Date: Sun, 9 Nov 2025 14:56:31 -0300 Subject: [PATCH 623/761] Do not parse batch commands with no parameters (#6298) --- src/Npgsql/NpgsqlCommand.cs | 5 +++++ test/Npgsql.Tests/BatchTests.cs | 20 +++++++++++++++++++- 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/src/Npgsql/NpgsqlCommand.cs b/src/Npgsql/NpgsqlCommand.cs index 8f6816b657..41f2198a27 100644 --- a/src/Npgsql/NpgsqlCommand.cs +++ b/src/Npgsql/NpgsqlCommand.cs @@ -911,6 +911,11 @@ internal void ProcessRawQuery(SqlQueryParser? parser, bool standardConformingStr break; case PlaceholderType.NoParameters: + if (batchCommand is not null) + { + batchCommand.FinalCommandText = batchCommand.CommandText; + break; + } // Unless the EnableSqlRewriting AppContext switch is explicitly disabled, queries with no parameters are parsed just // like queries with named parameters, since they may contain a semicolon (legacy batching). if (EnableSqlRewriting) diff --git a/test/Npgsql.Tests/BatchTests.cs b/test/Npgsql.Tests/BatchTests.cs index 837ace48fd..d1df99faca 100644 --- a/test/Npgsql.Tests/BatchTests.cs +++ b/test/Npgsql.Tests/BatchTests.cs @@ -669,7 +669,7 @@ public async Task Empty_batch() } [Test] - public async Task Semicolon_is_not_allowed() + public async Task Semicolon_is_not_allowed_with_no_parameters() { await using var conn = await OpenConnectionAsync(); await using var batch = new NpgsqlBatch(conn) @@ -677,6 +677,24 @@ public async Task Semicolon_is_not_allowed() BatchCommands = { new("SELECT 1; SELECT 2") } }; + Assert.That(() => batch.ExecuteReaderAsync(Behavior), Throws.Exception.TypeOf()); + } + + [Test] + public async Task Semicolon_is_not_allowed_with_named_parameters() + { + await using var conn = await OpenConnectionAsync(); + await using var batch = new NpgsqlBatch(conn) + { + BatchCommands = + { + new("SELECT @p1; SELECT 2") + { + Parameters = { new("p1", 1) } + } + } + }; + Assert.That(() => batch.ExecuteReaderAsync(Behavior), Throws.Exception.TypeOf()); } From ea5a2e189a71fee7340684d9ba51ba8a38d8a35f Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Mon, 10 Nov 2025 14:01:42 +0100 Subject: [PATCH 624/761] Improve output parameter handling (#5645) Closes #2252 --- src/Npgsql/NpgsqlBatchCommand.cs | 109 ++++++++++++++++++++++++++++++ src/Npgsql/NpgsqlDataReader.cs | 79 ++++++++-------------- src/Npgsql/NpgsqlParameter.cs | 10 +++ src/Npgsql/NpgsqlParameter`.cs | 3 + test/Npgsql.Tests/CommandTests.cs | 46 ++++++++++++- 5 files changed, 194 insertions(+), 53 deletions(-) diff --git a/src/Npgsql/NpgsqlBatchCommand.cs b/src/Npgsql/NpgsqlBatchCommand.cs index c1cfa3ef87..17cec381b2 100644 --- a/src/Npgsql/NpgsqlBatchCommand.cs +++ b/src/Npgsql/NpgsqlBatchCommand.cs @@ -1,10 +1,12 @@ using System; +using System.Buffers; using System.Collections.Generic; using System.Data; using System.Data.Common; using System.Diagnostics; using System.Diagnostics.CodeAnalysis; using System.Runtime.CompilerServices; +using Microsoft.Extensions.Logging; using Npgsql.BackendMessages; using Npgsql.Internal; @@ -41,6 +43,7 @@ public override string CommandText /// public new NpgsqlParameterCollection Parameters => _parameters ??= []; + internal bool HasOutputParameters => _parameters?.HasOutputParameters == true; /// public override NpgsqlParameter CreateParameter() => new(); @@ -273,6 +276,112 @@ internal void ApplyCommandComplete(CommandCompleteMessage msg) internal void ResetPreparation() => ConnectorPreparedOn = null; + internal void PopulateOutputParameters(NpgsqlDataReader reader, ILogger logger) + { + Debug.Assert(_parameters is not null); + var parameters = _parameters; + var fieldCount = reader.FieldCount; + switch (parameters.PlaceholderType) + { + case PlaceholderType.Mixed: + case PlaceholderType.Named: + { + // In the case of named and mixed parameters we first try to populate all parameters with a named column match. + // For backwards compat we allow populating named parameters as long as they haven't been filled yet. + // So for every column that we couldn't match by name we fill the first output direction parameter that wasn't filled previously. + // This means a row like {"a" => 1, "some_field" => 2} will populate the following output db params {"a" => 1, "b" => 2}. + // And a row like {"some_field" => 1, "a" => 2} will populate them as follows {"a" => 2, "b" => 1}. + + var parameterIndices = new ArraySegment(ArrayPool.Shared.Rent(fieldCount), 0, fieldCount); + var secondPassOrdinal = -1; + for (var ordinal = 0; ordinal < fieldCount; ordinal++) + { + var name = reader.GetName(ordinal); + var i = parameters.IndexOf(name); + if (i is not -1 && parameters[i] is { IsOutputDirection: true } parameter) + { + SetValue(reader, logger, parameter, ordinal, i); + parameterIndices[ordinal] = i; + } + else + { + parameterIndices[ordinal] = -1; + if (secondPassOrdinal is -1) + secondPassOrdinal = ordinal; + } + } + + if (secondPassOrdinal is -1) + { + ArrayPool.Shared.Return(parameterIndices.Array!); + break; + } + + // This set will also contain -1, but that's not a valid index so we can ignore it is included. + var matchedParameters = new HashSet(parameterIndices); + var parameterList = parameters.InternalList; + for (var i = 0; i < parameterList.Count; i++) + { + // Find an output parameter that wasn't matched by name. + if (parameterList[i] is not { IsOutputDirection: true } parameter || matchedParameters.Contains(i)) + continue; + + SetValue(reader, logger, parameter, secondPassOrdinal, i); + + // And find the next unhandled ordinal. + secondPassOrdinal = NextSecondPassOrdinal(parameterIndices, secondPassOrdinal); + if (secondPassOrdinal is -1) + break; + } + + ArrayPool.Shared.Return(parameterIndices.Array!); + break; + + static int NextSecondPassOrdinal(ArraySegment indices, int offset) + { + for (var i = offset + 1; i < indices.Count; i++) + { + if (indices[i] is -1) + return i; + } + + return -1; + } + } + case PlaceholderType.Positional: + { + var parameterList = parameters.InternalList; + var ordinal = 0; + for (var i = 0; i < parameterList.Count; i++) + { + if (parameterList[i] is not { IsOutputDirection: true } parameter) + continue; + + SetValue(reader, logger, parameter, ordinal, i); + + ordinal++; + if (ordinal == fieldCount) + break; + } + break; + } + } + + static void SetValue(NpgsqlDataReader reader, ILogger logger, NpgsqlParameter p, int ordinal, int index) + { + try + { + p.SetOutputValue(reader, ordinal); + } + catch (Exception ex) + { + logger.LogDebug(ex, "Failed to set value on output parameter instance '{ParameterNameOrIndex}' for output parameter {OutputName}", + p.ParameterName is NpgsqlParameter.PositionalName ? index : p.ParameterName, reader.GetName(ordinal)); + throw; + } + } + } + /// /// Returns the . /// diff --git a/src/Npgsql/NpgsqlDataReader.cs b/src/Npgsql/NpgsqlDataReader.cs index 585536da18..2421e7e5ca 100644 --- a/src/Npgsql/NpgsqlDataReader.cs +++ b/src/Npgsql/NpgsqlDataReader.cs @@ -459,18 +459,44 @@ async Task NextResult(bool async, bool isConsuming = false, CancellationTo continue; } - if ((Command.WrappingBatch is not null || StatementIndex is 0) && Command.InternalBatchCommands[StatementIndex]._parameters?.HasOutputParameters == true) + if ((Command.WrappingBatch is not null || StatementIndex is 0) && Command.InternalBatchCommands[StatementIndex] is { HasOutputParameters: true } command) { // If output parameters are present and this is the first row of the resultset, // we must always read it in non-sequential mode because it will be traversed twice (once // here for the parameters, then as a regular row). - msg = await Connector.ReadMessage(async).ConfigureAwait(false); + msg = await Connector.ReadMessage(async, dataRowLoadingMode: DataRowLoadingMode.NonSequential).ConfigureAwait(false); ProcessMessage(msg); if (msg.Code == BackendMessageCode.DataRow) { + Debug.Assert(RowDescription != null); + Debug.Assert(State == ReaderState.BeforeResult); + try { - PopulateOutputParameters(Command.InternalBatchCommands[StatementIndex]._parameters!); + // Temporarily set our state to InResult and non-sequential to allow us to read the values, and in any order. + var isSequential = _isSequential; + var currentPosition = Buffer.ReadPosition; + State = ReaderState.InResult; + _isSequential = false; + try + { + command.PopulateOutputParameters(this, _commandLogger); + + // On success we want to revert any row and column state for the user to be able to read the same row again. + if (async) + await PgReader.CommitAsync().ConfigureAwait(false); + else + PgReader.Commit(); + + State = ReaderState.BeforeResult; // Set the state back + Buffer.ReadPosition = currentPosition; // Restore position + _column = -1; + } + finally + { + // To be on the safe side we always revert this CommandBehavior state change, including on failure. + _isSequential = isSequential; + } } catch (Exception e) { @@ -612,53 +638,6 @@ async ValueTask ConsumeResultSet(bool async) } } - - void PopulateOutputParameters(NpgsqlParameterCollection parameters) - { - // The first row in a stored procedure command that has output parameters needs to be traversed twice - - // once for populating the output parameters and once for the actual result set traversal. So in this - // case we can't be sequential. - Debug.Assert(RowDescription != null); - Debug.Assert(State == ReaderState.BeforeResult); - - var currentPosition = Buffer.ReadPosition; - - // Temporarily set our state to InResult to allow us to read the values - State = ReaderState.InResult; - - var pending = new Queue(); - var taken = new List(); - for (var i = 0; i < ColumnCount; i++) - { - if (parameters.TryGetValue(GetName(i), out var p) && p.IsOutputDirection) - { - p.Value = GetValue(i); - taken.Add(p); - } - else - pending.Enqueue(GetValue(i)); - } - - // Not sure where this odd behavior comes from: all output parameters which did not get matched by - // name now get populated with column values which weren't matched. Keeping this for backwards compat, - // opened #2252 for investigation. - foreach (var p in (IEnumerable)parameters) - { - if (!p.IsOutputDirection || taken.Contains(p)) - continue; - - if (pending.Count == 0) - break; - p.Value = pending.Dequeue(); - } - - PgReader.Commit(); - State = ReaderState.BeforeResult; // Set the state back - Buffer.ReadPosition = currentPosition; // Restore position - - _column = -1; - } - /// /// Note that in SchemaOnly mode there are no resultsets, and we read nothing from the backend (all /// RowDescriptions have already been processed and are available) diff --git a/src/Npgsql/NpgsqlParameter.cs b/src/Npgsql/NpgsqlParameter.cs index befef4cef2..ca7ec17cbb 100644 --- a/src/Npgsql/NpgsqlParameter.cs +++ b/src/Npgsql/NpgsqlParameter.cs @@ -497,6 +497,16 @@ public sealed override string SourceColumn Type? GetValueType(Type staticValueType) => staticValueType != typeof(object) ? staticValueType : Value?.GetType(); + internal void SetOutputValue(NpgsqlDataReader reader, int ordinal) + { + if (GetType() == typeof(NpgsqlParameter)) + Value = reader.GetValue(ordinal); + else + SetOutputValueCore(reader, ordinal); + } + + private protected virtual void SetOutputValueCore(NpgsqlDataReader reader, int ordinal) {} + internal bool ShouldResetObjectTypeInfo(object? value) { var currentType = TypeInfo?.Type; diff --git a/src/Npgsql/NpgsqlParameter`.cs b/src/Npgsql/NpgsqlParameter`.cs index e50618a510..d353cdce45 100644 --- a/src/Npgsql/NpgsqlParameter`.cs +++ b/src/Npgsql/NpgsqlParameter`.cs @@ -81,6 +81,9 @@ public NpgsqlParameter(string parameterName, DbType dbType) #endregion Constructors + private protected override void SetOutputValueCore(NpgsqlDataReader reader, int ordinal) + => TypedValue = reader.GetFieldValue(ordinal); + private protected override PgConverterResolution ResolveConverter(PgTypeInfo typeInfo) { if (typeof(T) == typeof(object) || TypeInfo!.IsBoxing) diff --git a/test/Npgsql.Tests/CommandTests.cs b/test/Npgsql.Tests/CommandTests.cs index a5fb272851..584a3cc433 100644 --- a/test/Npgsql.Tests/CommandTests.cs +++ b/test/Npgsql.Tests/CommandTests.cs @@ -741,14 +741,14 @@ public async Task Cached_command_clears_parameters_placeholder_type() public async Task Statement_mapped_output_parameters(CommandBehavior behavior) { await using var conn = await OpenConnectionAsync(); - var command = new NpgsqlCommand("select 3, 4 as param1, 5 as param2, 6;", conn); + var command = new NpgsqlCommand("select 3 as unknown, 4 as param1, 5 as param2, 6;", conn); - var p = new NpgsqlParameter("param2", NpgsqlDbType.Integer); + var p = new NpgsqlParameter("param1", NpgsqlDbType.Integer); p.Direction = ParameterDirection.Output; p.Value = -1; command.Parameters.Add(p); - p = new NpgsqlParameter("param1", NpgsqlDbType.Integer); + p = new NpgsqlParameter("param2", NpgsqlDbType.Integer); p.Direction = ParameterDirection.Output; p.Value = -1; command.Parameters.Add(p); @@ -760,6 +760,7 @@ public async Task Statement_mapped_output_parameters(CommandBehavior behavior) await using var reader = await command.ExecuteReaderAsync(behavior); + Assert.That(command.Parameters["p"].Value, Is.EqualTo(3)); Assert.That(command.Parameters["param1"].Value, Is.EqualTo(4)); Assert.That(command.Parameters["param2"].Value, Is.EqualTo(5)); @@ -771,6 +772,45 @@ public async Task Statement_mapped_output_parameters(CommandBehavior behavior) Assert.That(reader.GetInt32(3), Is.EqualTo(6)); } + + [Test] + [TestCase(CommandBehavior.Default)] + [TestCase(CommandBehavior.SequentialAccess)] + public async Task Statement_mapped_generic_output_parameters(CommandBehavior behavior) + { + await using var conn = await OpenConnectionAsync(); + var command = new NpgsqlCommand("select '' as unknown, 4 as param1, 5 as param2, 6;", conn); + + var p = new NpgsqlParameter("param1", NpgsqlDbType.Integer); + p.Direction = ParameterDirection.Output; + p.Value = -1; + command.Parameters.Add(p); + + p = new NpgsqlParameter("param2", NpgsqlDbType.Integer); + p.Direction = ParameterDirection.Output; + p.Value = -1; + command.Parameters.Add(p); + + // char[] is one alternative mapping for text. + var textP = new NpgsqlParameter("p", NpgsqlDbType.Text); + textP.Direction = ParameterDirection.Output; + textP.Value = "text".ToCharArray(); + command.Parameters.Add(textP); + + await using var reader = await command.ExecuteReaderAsync(behavior); + + Assert.That(command.Parameters["p"].Value, Is.EquivalentTo(Array.Empty())); + Assert.That(command.Parameters["param1"].Value, Is.EqualTo(4)); + Assert.That(command.Parameters["param2"].Value, Is.EqualTo(5)); + + reader.Read(); + + Assert.That(reader.GetFieldValue(0), Is.EquivalentTo(Array.Empty())); + Assert.That(reader.GetInt32(1), Is.EqualTo(4)); + Assert.That(reader.GetInt32(2), Is.EqualTo(5)); + Assert.That(reader.GetInt32(3), Is.EqualTo(6)); + } + [Test] public async Task Bug1006158_output_parameters() { From 53030fb432b246c37d9ed0c85d6102f509f5ea75 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Mon, 10 Nov 2025 23:55:10 +0100 Subject: [PATCH 625/761] Check pgTypeId for default resolution call in ObjectConverter (#6304) --- src/Npgsql/Internal/Converters/ObjectConverter.cs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/Npgsql/Internal/Converters/ObjectConverter.cs b/src/Npgsql/Internal/Converters/ObjectConverter.cs index 394f7a8a4c..4889c60fad 100644 --- a/src/Npgsql/Internal/Converters/ObjectConverter.cs +++ b/src/Npgsql/Internal/Converters/ObjectConverter.cs @@ -1,4 +1,5 @@ using System; +using System.Diagnostics; using System.Threading; using System.Threading.Tasks; using Npgsql.Internal.Postgres; @@ -37,6 +38,7 @@ public override Size GetSize(SizeContext context, object value, ref object? writ // We can call GetDefaultResolution here as validation has already happened in IsDbNullValue. // And we know it was called due to the writeState being filled. + Debug.Assert(typeInfo.PgTypeId is not null); var converter = typeInfo is PgResolverTypeInfo resolverTypeInfo ? resolverTypeInfo.GetDefaultResolution(null).Converter : typeInfo.GetResolution().Converter; @@ -79,6 +81,7 @@ async ValueTask Write(bool async, PgWriter writer, object value, CancellationTok // We can call GetDefaultResolution here as validation has already happened in IsDbNullValue. // And we know it was called due to the writeState being filled. + Debug.Assert(typeInfo.PgTypeId is not null); var converter = typeInfo is PgResolverTypeInfo resolverTypeInfo ? resolverTypeInfo.GetDefaultResolution(null).Converter : typeInfo.GetResolution().Converter; From dcbb5c29964ba07bd7268fe2ec9d73aa64066317 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Tue, 11 Nov 2025 21:13:36 +0100 Subject: [PATCH 626/761] Move all reloadable state into one reference (#6303) --- src/Npgsql/Internal/NpgsqlConnector.cs | 12 +++--- src/Npgsql/NpgsqlCommand.cs | 18 +++++---- src/Npgsql/NpgsqlDataSource.cs | 47 ++++++++++++++--------- src/Npgsql/NpgsqlParameterCollection.cs | 5 +-- src/Npgsql/PoolingDataSource.cs | 8 +--- test/Npgsql.Benchmarks/ResolveHandler.cs | 2 +- test/Npgsql.Tests/ConnectionTests.cs | 23 +++++------ test/Npgsql.Tests/NpgsqlParameterTests.cs | 16 ++++---- test/Npgsql.Tests/PostgresTypeTests.cs | 2 +- 9 files changed, 70 insertions(+), 63 deletions(-) diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index 40dcf6e941..bdd82c761f 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -117,12 +117,13 @@ internal string InferredUserName /// internal int Id => BackendProcessId; - internal PgSerializerOptions SerializerOptions { get; set; } = default!; + internal NpgsqlDataSource.ReloadableState ReloadableState = null!; /// /// Information about PostgreSQL and PostgreSQL-like databases (e.g. type definitions, capabilities...). /// - public NpgsqlDatabaseInfo DatabaseInfo { get; internal set; } = default!; + public NpgsqlDatabaseInfo DatabaseInfo => ReloadableState.DatabaseInfo; + internal PgSerializerOptions SerializerOptions => ReloadableState.SerializerOptions; /// /// The current transaction status for this connector. @@ -507,10 +508,9 @@ internal async Task Open(NpgsqlTimeout timeout, bool async, CancellationToken ca await DataSource.Bootstrap(this, timeout, forceReload: false, async, cancellationToken).ConfigureAwait(false); - Debug.Assert(DataSource.SerializerOptions is not null); - Debug.Assert(DataSource.DatabaseInfo is not null); - SerializerOptions = DataSource.SerializerOptions; - DatabaseInfo = DataSource.DatabaseInfo; + // The connector directly references the current reloadable state reference, to protect it against changes by a concurrent + // ReloadTypes. We update them here before returning the connector from the pool. + ReloadableState = DataSource.CurrentReloadableState; if (Settings.Pooling && Settings is { Multiplexing: false, NoResetOnClose: false } && DatabaseInfo.SupportsDiscard) { diff --git a/src/Npgsql/NpgsqlCommand.cs b/src/Npgsql/NpgsqlCommand.cs index 41f2198a27..5f2cb8832f 100644 --- a/src/Npgsql/NpgsqlCommand.cs +++ b/src/Npgsql/NpgsqlCommand.cs @@ -671,7 +671,7 @@ Task Prepare(bool async, CancellationToken cancellationToken = default) { foreach (var batchCommand in InternalBatchCommands) { - batchCommand._parameters?.ProcessParameters(connector.SerializerOptions, validateValues: false, batchCommand.CommandType); + batchCommand._parameters?.ProcessParameters(connector.ReloadableState, validateValues: false, batchCommand.CommandType); ProcessRawQuery(connector.SqlQueryParser, connector.UseConformingStrings, batchCommand); needToPrepare = batchCommand.ExplicitPrepare(connector) || needToPrepare; @@ -689,7 +689,7 @@ IEnumerable CommandTexts() } else { - _parameters?.ProcessParameters(connector.SerializerOptions, validateValues: false, CommandType); + _parameters?.ProcessParameters(connector.ReloadableState, validateValues: false, CommandType); ProcessRawQuery(connector.SqlQueryParser, connector.UseConformingStrings, batchCommand: null); foreach (var batchCommand in InternalBatchCommands) @@ -1410,6 +1410,7 @@ internal virtual async ValueTask ExecuteReader(bool async, Com if (connector is not null) { var logger = connector.CommandLogger; + var reloadableState = connector.ReloadableState; cancellationToken.ThrowIfCancellationRequested(); // We cannot pass a token here, as we'll cancel a non-send query @@ -1440,7 +1441,7 @@ internal virtual async ValueTask ExecuteReader(bool async, Com goto case false; } - batchCommand._parameters?.ProcessParameters(connector.SerializerOptions, validateParameterValues, batchCommand.CommandType); + batchCommand._parameters?.ProcessParameters(reloadableState, validateParameterValues, batchCommand.CommandType); } } else @@ -1453,7 +1454,7 @@ internal virtual async ValueTask ExecuteReader(bool async, Com ResetPreparation(); goto case false; } - _parameters?.ProcessParameters(connector.SerializerOptions, validateParameterValues, CommandType); + _parameters?.ProcessParameters(reloadableState, validateParameterValues, CommandType); } NpgsqlEventSource.Log.CommandStartPrepared(); @@ -1469,7 +1470,7 @@ internal virtual async ValueTask ExecuteReader(bool async, Com { var batchCommand = InternalBatchCommands[i]; - batchCommand._parameters?.ProcessParameters(connector.SerializerOptions, validateParameterValues, batchCommand.CommandType); + batchCommand._parameters?.ProcessParameters(reloadableState, validateParameterValues, batchCommand.CommandType); ProcessRawQuery(connector.SqlQueryParser, connector.UseConformingStrings, batchCommand); if (connector.Settings.MaxAutoPrepare > 0 && batchCommand.TryAutoPrepare(connector)) @@ -1481,7 +1482,7 @@ internal virtual async ValueTask ExecuteReader(bool async, Com } else { - _parameters?.ProcessParameters(connector.SerializerOptions, validateParameterValues, CommandType); + _parameters?.ProcessParameters(reloadableState, validateParameterValues, CommandType); ProcessRawQuery(connector.SqlQueryParser, connector.UseConformingStrings, batchCommand: null); if (connector.Settings.MaxAutoPrepare > 0) @@ -1565,6 +1566,7 @@ internal virtual async ValueTask ExecuteReader(bool async, Com // The connection isn't bound to a connector - it's multiplexing time. var dataSource = (MultiplexingDataSource)conn.NpgsqlDataSource; + var reloadableState = dataSource.CurrentReloadableState; if (!async) { @@ -1577,13 +1579,13 @@ internal virtual async ValueTask ExecuteReader(bool async, Com { foreach (var batchCommand in InternalBatchCommands) { - batchCommand._parameters?.ProcessParameters(dataSource.SerializerOptions, validateValues: true, batchCommand.CommandType); + batchCommand._parameters?.ProcessParameters(reloadableState, validateValues: true, batchCommand.CommandType); ProcessRawQuery(null, standardConformingStrings: true, batchCommand); } } else { - _parameters?.ProcessParameters(dataSource.SerializerOptions, validateValues: true, CommandType); + _parameters?.ProcessParameters(reloadableState, validateValues: true, CommandType); ProcessRawQuery(null, standardConformingStrings: true, batchCommand: null); } diff --git a/src/Npgsql/NpgsqlDataSource.cs b/src/Npgsql/NpgsqlDataSource.cs index 080906ca26..51042bb654 100644 --- a/src/Npgsql/NpgsqlDataSource.cs +++ b/src/Npgsql/NpgsqlDataSource.cs @@ -31,12 +31,19 @@ public abstract class NpgsqlDataSource : DbDataSource internal NpgsqlLoggingConfiguration LoggingConfiguration { get; } readonly PgTypeInfoResolverChain _resolverChain; - internal PgSerializerOptions SerializerOptions { get; private set; } = null!; // Initialized at bootstrapping - /// - /// Information about PostgreSQL and PostgreSQL-like databases (e.g. type definitions, capabilities...). - /// - internal NpgsqlDatabaseInfo DatabaseInfo { get; private set; } = null!; // Initialized at bootstrapping + internal ReloadableState CurrentReloadableState = null!; // Initialized during bootstrapping. + + // Initialized at bootstrapping + internal sealed class ReloadableState(NpgsqlDatabaseInfo databaseInfo, PgSerializerOptions serializerOptions) + { + /// + /// Information about PostgreSQL and PostgreSQL-like databases (e.g. type definitions, capabilities...). + /// + public NpgsqlDatabaseInfo DatabaseInfo { get; } = databaseInfo; + + public PgSerializerOptions SerializerOptions { get; } = serializerOptions; + } internal TransportSecurityHandler TransportSecurityHandler { get; } @@ -105,7 +112,7 @@ internal NpgsqlDataSource( _periodicPasswordProvider, _periodicPasswordSuccessRefreshInterval, _periodicPasswordFailureRefreshInterval, - var resolverChain, + _resolverChain, _defaultNameTranslator, ConnectionInitializer, ConnectionInitializerAsync, @@ -115,7 +122,6 @@ internal NpgsqlDataSource( Debug.Assert(_passwordProvider is null || _passwordProviderAsync is not null); - _resolverChain = resolverChain; _password = settings.Password; if (_periodicPasswordSuccessRefreshInterval != default) @@ -272,27 +278,30 @@ internal async Task Bootstrap( // The type loading below will need to send queries to the database, and that depends on a type mapper being set up (even if its // empty). So we set up a minimal version here, and then later inject the actual DatabaseInfo. - connector.SerializerOptions = - new(PostgresMinimalDatabaseInfo.DefaultTypeCatalog) + connector.ReloadableState = new( + databaseInfo: PostgresMinimalDatabaseInfo.DefaultTypeCatalog, + serializerOptions: new(PostgresMinimalDatabaseInfo.DefaultTypeCatalog) { TextEncoding = connector.TextEncoding, TypeInfoResolver = AdoTypeInfoResolverFactory.Instance.CreateResolver(), - }; + }); NpgsqlDatabaseInfo databaseInfo; using (connector.StartUserAction(ConnectorState.Executing, cancellationToken)) databaseInfo = await NpgsqlDatabaseInfo.Load(connector, timeout, async).ConfigureAwait(false); - connector.DatabaseInfo = DatabaseInfo = databaseInfo; - connector.SerializerOptions = SerializerOptions = - new(databaseInfo, _resolverChain, CreateTimeZoneProvider(connector.Timezone)) - { - ArrayNullabilityMode = Settings.ArrayNullabilityMode, - EnableDateTimeInfinityConversions = !Statics.DisableDateTimeInfinityConversions, - TextEncoding = connector.TextEncoding, - DefaultNameTranslator = _defaultNameTranslator - }; + var serializerOptions = new PgSerializerOptions(databaseInfo, _resolverChain, CreateTimeZoneProvider(connector.Timezone)) + { + ArrayNullabilityMode = Settings.ArrayNullabilityMode, + EnableDateTimeInfinityConversions = !Statics.DisableDateTimeInfinityConversions, + TextEncoding = connector.TextEncoding, + DefaultNameTranslator = _defaultNameTranslator + }; + + connector.ReloadableState = CurrentReloadableState = new ReloadableState( + databaseInfo: databaseInfo, + serializerOptions: serializerOptions); IsBootstrapped = true; } diff --git a/src/Npgsql/NpgsqlParameterCollection.cs b/src/Npgsql/NpgsqlParameterCollection.cs index 106f681f0b..17e7fb7969 100644 --- a/src/Npgsql/NpgsqlParameterCollection.cs +++ b/src/Npgsql/NpgsqlParameterCollection.cs @@ -5,7 +5,6 @@ using System.Data.Common; using System.Diagnostics; using System.Diagnostics.CodeAnalysis; -using Npgsql.Internal; using NpgsqlTypes; namespace Npgsql; @@ -668,7 +667,7 @@ internal void CloneTo(NpgsqlParameterCollection other) } } - internal void ProcessParameters(PgSerializerOptions options, bool validateValues, CommandType commandType) + internal void ProcessParameters(NpgsqlDataSource.ReloadableState reloadableState, bool validateValues, CommandType commandType) { HasOutputParameters = false; PlaceholderType = PlaceholderType.NoParameters; @@ -725,7 +724,7 @@ internal void ProcessParameters(PgSerializerOptions options, bool validateValues break; } - p.ResolveTypeInfo(options); + p.ResolveTypeInfo(reloadableState.SerializerOptions); if (validateValues) { diff --git a/src/Npgsql/PoolingDataSource.cs b/src/Npgsql/PoolingDataSource.cs index a6c63494a8..b2e96d0d4c 100644 --- a/src/Npgsql/PoolingDataSource.cs +++ b/src/Npgsql/PoolingDataSource.cs @@ -5,7 +5,6 @@ using System.Threading; using System.Threading.Channels; using System.Threading.Tasks; -using System.Transactions; using Microsoft.Extensions.Logging; using Npgsql.Internal; using Npgsql.Util; @@ -239,12 +238,9 @@ bool CheckIdleConnector([NotNullWhen(true)] NpgsqlConnector? connector) return false; } - // The connector directly references the data source type mapper into the connector, to protect it against changes by a concurrent + // The connector directly references the current reloadable state reference, to protect it against changes by a concurrent // ReloadTypes. We update them here before returning the connector from the pool. - Debug.Assert(SerializerOptions is not null); - Debug.Assert(DatabaseInfo is not null); - connector.SerializerOptions = SerializerOptions; - connector.DatabaseInfo = DatabaseInfo; + connector.ReloadableState = CurrentReloadableState; Debug.Assert(connector.State == ConnectorState.Ready, $"Got idle connector but {nameof(connector.State)} is {connector.State}"); diff --git a/test/Npgsql.Benchmarks/ResolveHandler.cs b/test/Npgsql.Benchmarks/ResolveHandler.cs index e082b81c4e..ead3a547ed 100644 --- a/test/Npgsql.Benchmarks/ResolveHandler.cs +++ b/test/Npgsql.Benchmarks/ResolveHandler.cs @@ -22,7 +22,7 @@ public void Setup() if (NumPlugins > 1) dataSourceBuilder.UseNetTopologySuite(); _dataSource = dataSourceBuilder.Build(); - _serializerOptions = _dataSource.SerializerOptions; + _serializerOptions = _dataSource.CurrentReloadableState.SerializerOptions; } [GlobalCleanup] diff --git a/test/Npgsql.Tests/ConnectionTests.cs b/test/Npgsql.Tests/ConnectionTests.cs index 7ef94350fd..e64b3ba982 100644 --- a/test/Npgsql.Tests/ConnectionTests.cs +++ b/test/Npgsql.Tests/ConnectionTests.cs @@ -431,7 +431,7 @@ public async Task Timezone_connection_param() public async Task Application_name_env_var() { const string testAppName = "MyTestApp"; - + // Note that the pool is unaware of the environment variable, so if a connection is // returned from the pool it may contain the wrong application name using var _ = SetEnvironmentVariable("PGAPPNAME", testAppName); @@ -444,7 +444,7 @@ public async Task Application_name_env_var() public async Task Application_name_connection_param() { const string testAppName = "MyTestApp2"; - + await using var dataSource = CreateDataSource(csb => csb.ApplicationName = testAppName); await using var conn = await dataSource.OpenConnectionAsync(); Assert.That(conn.PostgresParameters["application_name"], Is.EqualTo(testAppName)); @@ -456,7 +456,7 @@ public async Task Application_name_connection_param_overrides_env_var() { const string envAppName = "EnvApp"; const string connAppName = "ConnApp"; - + using var _ = SetEnvironmentVariable("PGAPPNAME", envAppName); await using var dataSource = CreateDataSource(csb => csb.ApplicationName = connAppName); await using var conn = await dataSource.OpenConnectionAsync(); @@ -774,25 +774,26 @@ public async Task Set_Schemas_And_Load_Relevant_Types(string testSchema, string }); }); using var conn = await dataSource.OpenConnectionAsync(); + var databaseInfo = dataSource.CurrentReloadableState.DatabaseInfo; if (enabled) { - Assert.That(dataSource.DatabaseInfo.CompositeTypes.Any(x => x.Name == "test_type_1")); + Assert.That(databaseInfo.CompositeTypes.Any(x => x.Name == "test_type_1")); if (testSchema == "public" || otherSchema == "public") { - Assert.That(dataSource.DatabaseInfo.CompositeTypes.Any(x => x.Name == "test_type_2")); - Assert.That(dataSource.DatabaseInfo.CompositeTypes.Any(x => x.Name == "test_type_3")); + Assert.That(databaseInfo.CompositeTypes.Any(x => x.Name == "test_type_2")); + Assert.That(databaseInfo.CompositeTypes.Any(x => x.Name == "test_type_3")); } else { - Assert.That(dataSource.DatabaseInfo.CompositeTypes.Any(x => x.Name == "test_type_2")); - Assert.That(dataSource.DatabaseInfo.CompositeTypes.Any(x => x.Name == "test_type_3"), Is.False); + Assert.That(databaseInfo.CompositeTypes.Any(x => x.Name == "test_type_2")); + Assert.That(databaseInfo.CompositeTypes.Any(x => x.Name == "test_type_3"), Is.False); } } else { - Assert.That(dataSource.DatabaseInfo.CompositeTypes.Any(x => x.Name == "test_type_1")); - Assert.That(dataSource.DatabaseInfo.CompositeTypes.Any(x => x.Name == "test_type_2")); - Assert.That(dataSource.DatabaseInfo.CompositeTypes.Any(x => x.Name == "test_type_3")); + Assert.That(databaseInfo.CompositeTypes.Any(x => x.Name == "test_type_1")); + Assert.That(databaseInfo.CompositeTypes.Any(x => x.Name == "test_type_2")); + Assert.That(databaseInfo.CompositeTypes.Any(x => x.Name == "test_type_3")); } } finally diff --git a/test/Npgsql.Tests/NpgsqlParameterTests.cs b/test/Npgsql.Tests/NpgsqlParameterTests.cs index cab1047015..e02031ea9f 100644 --- a/test/Npgsql.Tests/NpgsqlParameterTests.cs +++ b/test/Npgsql.Tests/NpgsqlParameterTests.cs @@ -698,7 +698,7 @@ public void Null_value_with_nullable_type() public void DBNull_reuses_type_info([Values]bool generic) { var param = generic ? new NpgsqlParameter { Value = "value" } : new NpgsqlParameter { Value = "value" }; - param.ResolveTypeInfo(DataSource.SerializerOptions); + param.ResolveTypeInfo(DataSource.CurrentReloadableState.SerializerOptions); param.GetResolutionInfo(out var typeInfo, out _, out _); Assert.That(typeInfo, Is.Not.Null); @@ -708,7 +708,7 @@ public void DBNull_reuses_type_info([Values]bool generic) Assert.That(secondTypeInfo, Is.SameAs(typeInfo)); // Make sure we don't resolve a different type info either. - param.ResolveTypeInfo(DataSource.SerializerOptions); + param.ResolveTypeInfo(DataSource.CurrentReloadableState.SerializerOptions); param.GetResolutionInfo(out var thirdTypeInfo, out _, out _); Assert.That(thirdTypeInfo, Is.SameAs(secondTypeInfo)); } @@ -717,7 +717,7 @@ public void DBNull_reuses_type_info([Values]bool generic) public void DBNull_followed_by_non_null_reresolves([Values]bool generic) { var param = generic ? new NpgsqlParameter { Value = DBNull.Value } : new NpgsqlParameter { Value = DBNull.Value }; - param.ResolveTypeInfo(DataSource.SerializerOptions); + param.ResolveTypeInfo(DataSource.CurrentReloadableState.SerializerOptions); param.GetResolutionInfo(out var typeInfo, out _, out var pgTypeId); Assert.That(typeInfo, Is.Not.Null); Assert.That(pgTypeId.IsUnspecified, Is.True); @@ -727,7 +727,7 @@ public void DBNull_followed_by_non_null_reresolves([Values]bool generic) Assert.That(secondTypeInfo, Is.Null); // Make sure we don't resolve the same type info either. - param.ResolveTypeInfo(DataSource.SerializerOptions); + param.ResolveTypeInfo(DataSource.CurrentReloadableState.SerializerOptions); param.GetResolutionInfo(out var thirdTypeInfo, out _, out _); Assert.That(thirdTypeInfo, Is.Not.SameAs(typeInfo)); } @@ -736,7 +736,7 @@ public void DBNull_followed_by_non_null_reresolves([Values]bool generic) public void Changing_value_type_reresolves([Values]bool generic) { var param = generic ? new NpgsqlParameter { Value = "value" } : new NpgsqlParameter { Value = "value" }; - param.ResolveTypeInfo(DataSource.SerializerOptions); + param.ResolveTypeInfo(DataSource.CurrentReloadableState.SerializerOptions); param.GetResolutionInfo(out var typeInfo, out _, out _); Assert.That(typeInfo, Is.Not.Null); @@ -745,7 +745,7 @@ public void Changing_value_type_reresolves([Values]bool generic) Assert.That(secondTypeInfo, Is.Null); // Make sure we don't resolve a different type info either. - param.ResolveTypeInfo(DataSource.SerializerOptions); + param.ResolveTypeInfo(DataSource.CurrentReloadableState.SerializerOptions); param.GetResolutionInfo(out var thirdTypeInfo, out _, out _); Assert.That(thirdTypeInfo, Is.Not.SameAs(typeInfo)); } @@ -764,10 +764,10 @@ public void DataTypeName_prioritized_over_NpgsqlDbType([Values]bool generic) DataTypeName = "text", Value = "value" }; - param.ResolveTypeInfo(DataSource.SerializerOptions); + param.ResolveTypeInfo(DataSource.CurrentReloadableState.SerializerOptions); param.GetResolutionInfo(out var typeInfo, out _, out _); Assert.That(typeInfo, Is.Not.Null); - Assert.That(typeInfo.PgTypeId, Is.EqualTo(DataSource.SerializerOptions.TextPgTypeId)); + Assert.That(typeInfo.PgTypeId, Is.EqualTo(DataSource.CurrentReloadableState.SerializerOptions.TextPgTypeId)); } #if NeedsPorting diff --git a/test/Npgsql.Tests/PostgresTypeTests.cs b/test/Npgsql.Tests/PostgresTypeTests.cs index 056830cf32..7c3945fb0a 100644 --- a/test/Npgsql.Tests/PostgresTypeTests.cs +++ b/test/Npgsql.Tests/PostgresTypeTests.cs @@ -69,6 +69,6 @@ public async Task Multirange() async Task GetDatabaseInfo() { await using var conn = await OpenConnectionAsync(); - return conn.NpgsqlDataSource.DatabaseInfo; + return conn.NpgsqlDataSource.CurrentReloadableState.DatabaseInfo; } } From 0ec21969809aa9ae9da313921e4716f619a06c87 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jostein=20Kj=C3=B8nigsen?= Date: Sun, 16 Nov 2025 10:50:35 +0100 Subject: [PATCH 627/761] Upgrade from .NET10-RC2 to .NET10-RTM. (#6311) --- Directory.Build.props | 2 +- Directory.Packages.props | 12 ++++++------ global.json | 2 +- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/Directory.Build.props b/Directory.Build.props index 56ff636631..a7f8f89bff 100644 --- a/Directory.Build.props +++ b/Directory.Build.props @@ -1,6 +1,6 @@  - 10.0.0-rc.2 + 10.0.0-rtm latest true enable diff --git a/Directory.Packages.props b/Directory.Packages.props index 6864076aef..5a72db7721 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -1,11 +1,11 @@ - - + + - + @@ -24,15 +24,15 @@ - - + + - + diff --git a/global.json b/global.json index a08ab85427..6a288505a1 100644 --- a/global.json +++ b/global.json @@ -1,6 +1,6 @@ { "sdk": { - "version": "10.0.100-rc.2.25502.107", + "version": "10.0.100", "rollForward": "latestMajor", "allowPrerelease": false } From 99169989714bf16880958e06c506c68e0401e92a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 17 Nov 2025 21:11:15 +0000 Subject: [PATCH 628/761] Bump Microsoft.Data.SqlClient from 6.1.2 to 6.1.3 (#6319) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 5a72db7721..fb2dc723fb 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -36,7 +36,7 @@ - + From 712e0f353fb5503138289faac90860933eff96a1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 17 Nov 2025 22:19:36 +0100 Subject: [PATCH 629/761] Bump Microsoft.NET.Test.Sdk from 18.0.0 to 18.0.1 (#6320) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index fb2dc723fb..46e8962ffa 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -26,7 +26,7 @@ - + From bddee32c2634b32f9d3e7c8c4cf1220782310bd0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 17 Nov 2025 21:21:18 +0000 Subject: [PATCH 630/761] Bump OpenTelemetry.Api from 1.13.1 to 1.14.0 (#6321) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 46e8962ffa..2ef94f40a2 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -2,7 +2,7 @@ - + From 63152950a75f0f75adc3c74ce195aa3b91bd830e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 17 Nov 2025 21:23:03 +0000 Subject: [PATCH 631/761] Bump Scriban.Signed from 6.5.0 to 6.5.1 (#6322) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 2ef94f40a2..f8a9e16cd0 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -19,7 +19,7 @@ - + From fe0f5eedb2cb72c06028b23e4319db7cc5ab2c79 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Tue, 18 Nov 2025 18:13:16 +0100 Subject: [PATCH 632/761] Add experimental to AddTypeInfoResolverFactory (#6325) --- src/Npgsql/NpgsqlDataSourceBuilder.cs | 1 + src/Npgsql/NpgsqlSlimDataSourceBuilder.cs | 1 + src/Npgsql/TypeMapping/INpgsqlTypeMapper.cs | 1 + 3 files changed, 3 insertions(+) diff --git a/src/Npgsql/NpgsqlDataSourceBuilder.cs b/src/Npgsql/NpgsqlDataSourceBuilder.cs index b08f175533..b18ce75848 100644 --- a/src/Npgsql/NpgsqlDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlDataSourceBuilder.cs @@ -415,6 +415,7 @@ public NpgsqlDataSourceBuilder UseNegotiateOptionsCallback(Action + [Experimental(NpgsqlDiagnostics.ConvertersExperimental)] public void AddTypeInfoResolverFactory(PgTypeInfoResolverFactory factory) => _internalBuilder.AddTypeInfoResolverFactory(factory); diff --git a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs index 8bfe449c4a..9f91ef66b2 100644 --- a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs @@ -532,6 +532,7 @@ public bool UnmapComposite([DynamicallyAccessedMembers(DynamicallyAccessedMember => _userTypeMapper.UnmapComposite(clrType, pgName, nameTranslator); /// + [Experimental(NpgsqlDiagnostics.ConvertersExperimental)] public void AddTypeInfoResolverFactory(PgTypeInfoResolverFactory factory) => _resolverChainBuilder.PrependResolverFactory(factory); /// diff --git a/src/Npgsql/TypeMapping/INpgsqlTypeMapper.cs b/src/Npgsql/TypeMapping/INpgsqlTypeMapper.cs index b5f2de7594..cbb6ac8ccc 100644 --- a/src/Npgsql/TypeMapping/INpgsqlTypeMapper.cs +++ b/src/Npgsql/TypeMapping/INpgsqlTypeMapper.cs @@ -196,6 +196,7 @@ bool UnmapComposite( /// Typically used by plugins. /// /// The type resolver factory to be added. + [Experimental(NpgsqlDiagnostics.ConvertersExperimental)] void AddTypeInfoResolverFactory(PgTypeInfoResolverFactory factory); /// From b16f7229270846d68044f406aaa2c405951c1d7a Mon Sep 17 00:00:00 2001 From: Paulo Mattos Date: Tue, 18 Nov 2025 16:08:43 -0300 Subject: [PATCH 633/761] Add IDbTypeResolver to allow plugins to control how DbTypes are mapped (#6267) --- src/Npgsql/Internal/ChainDbTypeResolver.cs | 33 ++++ src/Npgsql/Internal/DbTypeResolverFactory.cs | 9 + src/Npgsql/Internal/IDbTypeResolver.cs | 28 +++ src/Npgsql/Internal/NpgsqlConnector.cs | 1 + .../PgTypeInfoResolverChainBuilder.cs | 1 + src/Npgsql/Internal/Postgres/DataTypeName.cs | 24 ++- src/Npgsql/Internal/Postgres/DataTypeNames.cs | 22 +++ src/Npgsql/Npgsql.csproj | 1 + src/Npgsql/NpgsqlBinaryImporter.cs | 2 +- src/Npgsql/NpgsqlCommandBuilder.cs | 6 +- src/Npgsql/NpgsqlDataSource.cs | 17 +- src/Npgsql/NpgsqlDataSourceBuilder.cs | 4 + src/Npgsql/NpgsqlDataSourceConfiguration.cs | 2 + src/Npgsql/NpgsqlDiagnostics.cs | 1 + src/Npgsql/NpgsqlParameter.cs | 181 ++++++++++++------ src/Npgsql/NpgsqlParameterCollection.cs | 2 +- src/Npgsql/NpgsqlParameter`.cs | 1 + src/Npgsql/NpgsqlSlimDataSourceBuilder.cs | 9 +- src/Npgsql/NpgsqlTypes/NpgsqlDbType.cs | 12 +- src/Npgsql/PublicAPI.Unshipped.txt | 1 + src/Npgsql/TypeMapping/GlobalTypeMapper.cs | 5 +- src/Npgsql/TypeMapping/INpgsqlTypeMapper.cs | 8 + test/Npgsql.Tests/CommandParameterTests.cs | 2 +- test/Npgsql.Tests/DataTypeNameTests.cs | 3 + test/Npgsql.Tests/Npgsql.Tests.csproj | 1 + test/Npgsql.Tests/NpgsqlParameterTests.cs | 16 +- test/Npgsql.Tests/Support/TestBase.cs | 62 +++--- test/Npgsql.Tests/TypeMapperTests.cs | 168 ++++++++++++++++ .../Types/DateTimeInfinityTests.cs | 6 +- test/Npgsql.Tests/Types/MiscTypeTests.cs | 9 - 30 files changed, 513 insertions(+), 124 deletions(-) create mode 100644 src/Npgsql/Internal/ChainDbTypeResolver.cs create mode 100644 src/Npgsql/Internal/DbTypeResolverFactory.cs create mode 100644 src/Npgsql/Internal/IDbTypeResolver.cs diff --git a/src/Npgsql/Internal/ChainDbTypeResolver.cs b/src/Npgsql/Internal/ChainDbTypeResolver.cs new file mode 100644 index 0000000000..16f3c229ee --- /dev/null +++ b/src/Npgsql/Internal/ChainDbTypeResolver.cs @@ -0,0 +1,33 @@ +using System; +using System.Collections.Generic; +using System.Data; +using Npgsql.Internal.Postgres; + +namespace Npgsql.Internal; + +sealed class ChainDbTypeResolver(IEnumerable resolvers) : IDbTypeResolver +{ + readonly IDbTypeResolver[] _resolvers = new List(resolvers).ToArray(); + + public string? GetDataTypeName(DbType dbType, Type? type) + { + foreach (var resolver in _resolvers) + { + if (resolver.GetDataTypeName(dbType, type) is { } dataTypeName) + return dataTypeName; + } + + return null; + } + + public DbType? GetDbType(DataTypeName dataTypeName) + { + foreach (var resolver in _resolvers) + { + if (resolver.GetDbType(dataTypeName) is { } dbType) + return dbType; + } + + return null; + } +} diff --git a/src/Npgsql/Internal/DbTypeResolverFactory.cs b/src/Npgsql/Internal/DbTypeResolverFactory.cs new file mode 100644 index 0000000000..55b3b71235 --- /dev/null +++ b/src/Npgsql/Internal/DbTypeResolverFactory.cs @@ -0,0 +1,9 @@ +using System.Diagnostics.CodeAnalysis; + +namespace Npgsql.Internal; + +[Experimental(NpgsqlDiagnostics.DbTypeResolverExperimental)] +public abstract class DbTypeResolverFactory +{ + public abstract IDbTypeResolver CreateDbTypeResolver(NpgsqlDatabaseInfo databaseInfo); +} diff --git a/src/Npgsql/Internal/IDbTypeResolver.cs b/src/Npgsql/Internal/IDbTypeResolver.cs new file mode 100644 index 0000000000..c4586a2bee --- /dev/null +++ b/src/Npgsql/Internal/IDbTypeResolver.cs @@ -0,0 +1,28 @@ +using System; +using System.Data; +using System.Diagnostics.CodeAnalysis; +using Npgsql.Internal.Postgres; + +namespace Npgsql.Internal; + +/// +/// An Npgsql resolver for DbType. Used by Npgsql to resolve a DbType to DataTypeName and back. +/// +[Experimental(NpgsqlDiagnostics.DbTypeResolverExperimental)] +public interface IDbTypeResolver +{ + /// + /// Attempts to resolve a DbType to a data type name. + /// + /// The DbType name to resolve. + /// The type of the value to resolve a data type name for. + /// The data type name if it could be mapped, the name can be non-normalized and without schema. + string? GetDataTypeName(DbType dbType, Type? type); + + /// + /// Attempts to resolve a data type name to a DbType. + /// + /// The data type name to map, in a normalized form but possibly without schema. + /// The DbType if it could be mapped, null otherwise. + DbType? GetDbType(DataTypeName dataTypeName); +} diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index bdd82c761f..bf6f97c1d9 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -124,6 +124,7 @@ internal string InferredUserName /// public NpgsqlDatabaseInfo DatabaseInfo => ReloadableState.DatabaseInfo; internal PgSerializerOptions SerializerOptions => ReloadableState.SerializerOptions; + internal IDbTypeResolver? DbTypeResolver => ReloadableState.DbTypeResolver; /// /// The current transaction status for this connector. diff --git a/src/Npgsql/Internal/PgTypeInfoResolverChainBuilder.cs b/src/Npgsql/Internal/PgTypeInfoResolverChainBuilder.cs index f83fa384f4..86c96231a0 100644 --- a/src/Npgsql/Internal/PgTypeInfoResolverChainBuilder.cs +++ b/src/Npgsql/Internal/PgTypeInfoResolverChainBuilder.cs @@ -115,6 +115,7 @@ public PgTypeInfoResolverChain Build(Action>? configur _addRangeResolvers?.Invoke(instance, resolvers); _addMultirangeResolvers?.Invoke(instance, resolvers); _addArrayResolvers?.Invoke(instance, resolvers); + configure?.Invoke(resolvers); return new( resolvers, diff --git a/src/Npgsql/Internal/Postgres/DataTypeName.cs b/src/Npgsql/Internal/Postgres/DataTypeName.cs index e1b8225911..9c9f43e41a 100644 --- a/src/Npgsql/Internal/Postgres/DataTypeName.cs +++ b/src/Npgsql/Internal/Postgres/DataTypeName.cs @@ -5,12 +5,14 @@ namespace Npgsql.Internal.Postgres; /// -/// Represents the fully-qualified name of a PostgreSQL type. +/// Represents the normalized name of a PostgreSQL data type. /// [Experimental(NpgsqlDiagnostics.ConvertersExperimental)] [DebuggerDisplay("{DisplayName,nq}")] public readonly struct DataTypeName : IEquatable { + const char InvalidIdentifier = '-'; + /// /// The maximum length of names in an unmodified PostgreSQL installation. /// @@ -50,9 +52,9 @@ public DataTypeName(string fullyQualifiedDataTypeName) internal static DataTypeName ValidatedName(string fullyQualifiedDataTypeName) => new(fullyQualifiedDataTypeName, validated: true); - // Includes schema unless it's pg_catalog or the name is unspecified. + // Includes schema unless it's pg_catalog or the schema is an invalid character used to represent an unspecified schema. public string DisplayName => - Value.StartsWith("pg_catalog", StringComparison.Ordinal) || Value == Unspecified + Value.StartsWith("pg_catalog", StringComparison.Ordinal) || IsUnqualified ? UnqualifiedDisplayName : Schema + "." + UnqualifiedDisplayName; @@ -71,12 +73,19 @@ static string ThrowDefaultException() => // This contains two invalid sql identifiers (schema and name are both separate identifiers, and would both have to be quoted to be valid). // Given this is an invalid name it's fine for us to represent a fully qualified 'unspecified' name with it. - public static DataTypeName Unspecified => new("-.-", validated: true); + static string UnspecifiedName => $"{InvalidIdentifier}.{InvalidIdentifier}"; + public static DataTypeName Unspecified => ValidatedName(UnspecifiedName); + + public static string GetUnqualifiedName(string dataTypeName) + => dataTypeName.IndexOf('.') is not -1 and var index + ? dataTypeName.Substring(index + 1) : dataTypeName; + + public bool IsUnqualified => Value.StartsWith(InvalidIdentifier) && Value != UnspecifiedName; public bool IsArray => UnqualifiedNameSpan.StartsWith("_".AsSpan(), StringComparison.Ordinal); internal static DataTypeName CreateFullyQualifiedName(string dataTypeName) - => dataTypeName.IndexOf('.') != -1 ? new(dataTypeName) : new("pg_catalog." + dataTypeName); + => dataTypeName.IndexOf('.') != -1 ? new(dataTypeName) : new("-." + dataTypeName); // Static transform as defined by https://www.postgresql.org/docs/current/sql-createtype.html#SQL-CREATETYPE-ARRAY // We don't have to deal with [] as we're always starting from a normalized fully qualified name. @@ -135,7 +144,7 @@ internal static DataTypeName FromDisplayName(string displayName, string? schema, } else { - schemaSpan = schema is null ? "pg_catalog" : schema.AsSpan(); + schemaSpan = schema is null ? $"{InvalidIdentifier}" : schema.AsSpan(); } // Then we strip either of the two valid array representations to get the base type name (with or without facets). @@ -187,6 +196,9 @@ internal static DataTypeName FromDisplayName(string displayName, string? schema, var value => value }; + if (schema is null && DataTypeNames.IsWellKnownUnqualifiedName(mapped)) + schemaSpan = "pg_catalog".AsSpan(); + return new(string.Concat(schemaSpan, ".", isArray ? "_" : "", mapped)); } diff --git a/src/Npgsql/Internal/Postgres/DataTypeNames.cs b/src/Npgsql/Internal/Postgres/DataTypeNames.cs index 275bcb9937..6c4ca73b2f 100644 --- a/src/Npgsql/Internal/Postgres/DataTypeNames.cs +++ b/src/Npgsql/Internal/Postgres/DataTypeNames.cs @@ -1,3 +1,4 @@ +using System; using static Npgsql.Internal.Postgres.DataTypeName; namespace Npgsql.Internal.Postgres; @@ -7,6 +8,27 @@ namespace Npgsql.Internal.Postgres; /// static class DataTypeNames { + // Generated from the following query: + // SELECT '"' || string_agg(typname, '" or "') || '"' FROM ( + // SELECT typname FROM pg_catalog.pg_type WHERE typtype = 'b' AND typcategory <> 'A' + // AND typnamespace = (SELECT oid FROM pg_namespace WHERE nspname = 'pg_catalog') ORDER BY typname); + public static bool IsWellKnownUnqualifiedName(ReadOnlySpan name) => name switch + { + "aclitem" or "bit" or "bool" or "box" or "bpchar" or "bytea" or "char" or "cid" or + "cidr" or "circle" or "date" or "float4" or "float8" or "gtsvector" or "inet" or + "int2" or "int4" or "int8" or "interval" or "json" or "jsonb" or "jsonpath" or + "line" or "lseg" or "macaddr" or "macaddr8" or "money" or "name" or "numeric" or + "oid" or "path" or "pg_brin_bloom_summary" or "pg_brin_minmax_multi_summary" or + "pg_dependencies" or "pg_lsn" or "pg_mcv_list" or "pg_ndistinct" or "pg_node_tree" or + "pg_snapshot" or "point" or "polygon" or "refcursor" or "regclass" or "regcollation" or + "regconfig" or "regdictionary" or "regnamespace" or "regoper" or "regoperator" or + "regproc" or "regprocedure" or "regrole" or "regtype" or "text" or "tid" or "time" or + "timestamp" or "timestamptz" or "timetz" or "tsquery" or "tsvector" or "txid_snapshot" or + "uuid" or "varbit" or "varchar" or "xid" or "xid8" or "xml" + => true, + _ => false + }; + // Note: The names are fully qualified in source so the strings are constants and instances will be interned after the first call. // Uses an internal constructor bypassing the public DataTypeName constructor validation, as we don't want to store all these names on // fields either. diff --git a/src/Npgsql/Npgsql.csproj b/src/Npgsql/Npgsql.csproj index 5273736ceb..0eab75cd66 100644 --- a/src/Npgsql/Npgsql.csproj +++ b/src/Npgsql/Npgsql.csproj @@ -9,6 +9,7 @@ $(NoWarn);CA2017 $(NoWarn);NPG9001 $(NoWarn);NPG9002 + $(NoWarn);NPG9003 diff --git a/src/Npgsql/NpgsqlBinaryImporter.cs b/src/Npgsql/NpgsqlBinaryImporter.cs index 52c9438fde..8c240468d4 100644 --- a/src/Npgsql/NpgsqlBinaryImporter.cs +++ b/src/Npgsql/NpgsqlBinaryImporter.cs @@ -282,7 +282,7 @@ async Task Core(bool async, T value, NpgsqlDbType? npgsqlDbType, string? dataTyp // These actions can reset or change the type info, we'll check afterwards whether we're still consistent with the original values. param.TypedValue = value; - param.ResolveTypeInfo(_connector.SerializerOptions); + param.ResolveTypeInfo(_connector.SerializerOptions, _connector.DbTypeResolver); if (previousTypeInfo is not null && previousConverter is not null && param.PgTypeId != previousTypeId) { diff --git a/src/Npgsql/NpgsqlCommandBuilder.cs b/src/Npgsql/NpgsqlCommandBuilder.cs index 9665b8356c..d9a698c2ef 100644 --- a/src/Npgsql/NpgsqlCommandBuilder.cs +++ b/src/Npgsql/NpgsqlCommandBuilder.cs @@ -212,7 +212,11 @@ private static void SetParameterValuesFromRow(NpgsqlCommand command, DataRow row protected override void ApplyParameterInfo(DbParameter p, DataRow row, System.Data.StatementType statementType, bool whereClause) { var param = (NpgsqlParameter)p; - param.NpgsqlDbType = (NpgsqlDbType)row[SchemaTableColumn.ProviderType]; + // DbCommandBuilder is going to set DbType.Int32 onto an existing parameter, reset other db type fields. + if (param.SourceColumnNullMapping) + param.ResetDbType(); + else + param.NpgsqlDbType = (NpgsqlDbType)row[SchemaTableColumn.ProviderType]; } /// diff --git a/src/Npgsql/NpgsqlDataSource.cs b/src/Npgsql/NpgsqlDataSource.cs index 51042bb654..000d3f1ae3 100644 --- a/src/Npgsql/NpgsqlDataSource.cs +++ b/src/Npgsql/NpgsqlDataSource.cs @@ -31,11 +31,12 @@ public abstract class NpgsqlDataSource : DbDataSource internal NpgsqlLoggingConfiguration LoggingConfiguration { get; } readonly PgTypeInfoResolverChain _resolverChain; + readonly IEnumerable _dbTypeResolverFactories; internal ReloadableState CurrentReloadableState = null!; // Initialized during bootstrapping. // Initialized at bootstrapping - internal sealed class ReloadableState(NpgsqlDatabaseInfo databaseInfo, PgSerializerOptions serializerOptions) + internal sealed class ReloadableState(NpgsqlDatabaseInfo databaseInfo, PgSerializerOptions serializerOptions, IDbTypeResolver? dbTypeResolver) { /// /// Information about PostgreSQL and PostgreSQL-like databases (e.g. type definitions, capabilities...). @@ -43,8 +44,11 @@ internal sealed class ReloadableState(NpgsqlDatabaseInfo databaseInfo, PgSeriali public NpgsqlDatabaseInfo DatabaseInfo { get; } = databaseInfo; public PgSerializerOptions SerializerOptions { get; } = serializerOptions; + + public IDbTypeResolver? DbTypeResolver { get; } = dbTypeResolver; } + internal TransportSecurityHandler TransportSecurityHandler { get; } internal Action? SslClientAuthenticationOptionsCallback { get; } @@ -113,6 +117,7 @@ internal NpgsqlDataSource( _periodicPasswordSuccessRefreshInterval, _periodicPasswordFailureRefreshInterval, _resolverChain, + _dbTypeResolverFactories, _defaultNameTranslator, ConnectionInitializer, ConnectionInitializerAsync, @@ -284,7 +289,8 @@ internal async Task Bootstrap( { TextEncoding = connector.TextEncoding, TypeInfoResolver = AdoTypeInfoResolverFactory.Instance.CreateResolver(), - }); + }, + dbTypeResolver: null); NpgsqlDatabaseInfo databaseInfo; @@ -299,9 +305,14 @@ internal async Task Bootstrap( DefaultNameTranslator = _defaultNameTranslator }; + var resolvers = new List(); + foreach (var dbTypeResolverFactory in _dbTypeResolverFactories) + resolvers.Add(dbTypeResolverFactory.CreateDbTypeResolver(databaseInfo)); + connector.ReloadableState = CurrentReloadableState = new ReloadableState( databaseInfo: databaseInfo, - serializerOptions: serializerOptions); + serializerOptions: serializerOptions, + dbTypeResolver: new ChainDbTypeResolver(resolvers)); IsBootstrapped = true; } diff --git a/src/Npgsql/NpgsqlDataSourceBuilder.cs b/src/Npgsql/NpgsqlDataSourceBuilder.cs index b18ce75848..bc4b65d7f6 100644 --- a/src/Npgsql/NpgsqlDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlDataSourceBuilder.cs @@ -414,6 +414,10 @@ public NpgsqlDataSourceBuilder UseNegotiateOptionsCallback(Action + void INpgsqlTypeMapper.AddDbTypeResolverFactory(DbTypeResolverFactory factory) + => ((INpgsqlTypeMapper)_internalBuilder).AddDbTypeResolverFactory(factory); + /// [Experimental(NpgsqlDiagnostics.ConvertersExperimental)] public void AddTypeInfoResolverFactory(PgTypeInfoResolverFactory factory) diff --git a/src/Npgsql/NpgsqlDataSourceConfiguration.cs b/src/Npgsql/NpgsqlDataSourceConfiguration.cs index 8ba336cd5b..f3cdd4b513 100644 --- a/src/Npgsql/NpgsqlDataSourceConfiguration.cs +++ b/src/Npgsql/NpgsqlDataSourceConfiguration.cs @@ -1,4 +1,5 @@ using System; +using System.Collections.Generic; using System.Net.Security; using System.Threading; using System.Threading.Tasks; @@ -19,6 +20,7 @@ sealed record NpgsqlDataSourceConfiguration(string? Name, TimeSpan PeriodicPasswordSuccessRefreshInterval, TimeSpan PeriodicPasswordFailureRefreshInterval, PgTypeInfoResolverChain ResolverChain, + IEnumerable DbTypeResolverFactories, INpgsqlNameTranslator DefaultNameTranslator, Action? ConnectionInitializer, Func? ConnectionInitializerAsync, diff --git a/src/Npgsql/NpgsqlDiagnostics.cs b/src/Npgsql/NpgsqlDiagnostics.cs index 2037fec667..0d9ff5f846 100644 --- a/src/Npgsql/NpgsqlDiagnostics.cs +++ b/src/Npgsql/NpgsqlDiagnostics.cs @@ -4,4 +4,5 @@ static class NpgsqlDiagnostics { public const string ConvertersExperimental = "NPG9001"; public const string DatabaseInfoExperimental = "NPG9002"; + public const string DbTypeResolverExperimental = "NPG9003"; } diff --git a/src/Npgsql/NpgsqlParameter.cs b/src/Npgsql/NpgsqlParameter.cs index ca7ec17cbb..8930724c92 100644 --- a/src/Npgsql/NpgsqlParameter.cs +++ b/src/Npgsql/NpgsqlParameter.cs @@ -30,6 +30,7 @@ public class NpgsqlParameter : DbParameter, IDbDataParameter, ICloneable internal NpgsqlDbType? _npgsqlDbType; internal string? _dataTypeName; + internal DbType? _dbType; private protected string _name = string.Empty; object? _value; @@ -40,6 +41,7 @@ public class NpgsqlParameter : DbParameter, IDbDataParameter, ICloneable internal string TrimmedName { get; private protected set; } = PositionalName; internal const string PositionalName = ""; + IDbTypeResolver? _dbTypeResolver; private protected PgTypeInfo? TypeInfo { get; private set; } internal PgTypeId PgTypeId { get; private set; } @@ -315,26 +317,32 @@ public sealed override DbType DbType { get { - if (_npgsqlDbType is { } npgsqlDbType) - return npgsqlDbType.ToDbType(); + if (_dbType is { } dbType) + return dbType; if (_dataTypeName is not null) - return Internal.Postgres.DataTypeName.FromDisplayName(_dataTypeName).ToNpgsqlDbType()?.ToDbType() ?? DbType.Object; + { + var dataTypeName = Internal.Postgres.DataTypeName.FromDisplayName(_dataTypeName); + if (TryResolveDbType(dataTypeName, out var resolvedDbType)) + return resolvedDbType; + + return dataTypeName.ToNpgsqlDbType()?.ToDbType() ?? DbType.Object; + } + + if (_npgsqlDbType is { } npgsqlDbType) + return npgsqlDbType.ToDbType(); // Infer from value but don't cache - if (Value is not null) - // We pass ValueType here for the generic derived type, where we should respect T and not the runtime type. - return GlobalTypeMapper.Instance.FindDataTypeName(GetValueType(StaticValueType)!, Value)?.ToNpgsqlDbType()?.ToDbType() ?? DbType.Object; + // We pass ValueType here for the generic derived type, where we should respect T and not the runtime type. + if (GetValueType(StaticValueType) is { } valueType) + return GlobalTypeMapper.Instance.FindDataTypeName(valueType, Value)?.ToNpgsqlDbType()?.ToDbType() ?? DbType.Object; return DbType.Object; } set { ResetTypeInfo(); - _npgsqlDbType = value == DbType.Object - ? null - : value.ToNpgsqlDbType() - ?? throw new NotSupportedException($"The parameter type DbType.{value} isn't supported by PostgreSQL or Npgsql"); + _dbType = value; } } @@ -355,10 +363,19 @@ public NpgsqlDbType NpgsqlDbType if (_dataTypeName is not null) return Internal.Postgres.DataTypeName.FromDisplayName(_dataTypeName).ToNpgsqlDbType() ?? NpgsqlDbType.Unknown; + var valueType = GetValueType(StaticValueType); + if (_dbType is { } dbType) + { + if (TryResolveDbTypeDataTypeName(dbType, valueType, out var dataTypeName)) + return NpgsqlDbTypeExtensions.ToNpgsqlDbType(dataTypeName) ?? NpgsqlDbType.Unknown; + + return dbType.ToNpgsqlDbType() ?? NpgsqlDbType.Unknown; + } + // Infer from value but don't cache - if (Value is not null) - // We pass ValueType here for the generic derived type (NpgsqlParameter) where we should respect T and not the runtime type. - return GlobalTypeMapper.Instance.FindDataTypeName(GetValueType(StaticValueType)!, Value)?.ToNpgsqlDbType() ?? NpgsqlDbType.Unknown; + // We pass ValueType here for the generic derived type, where we should respect T and not the runtime type. + if (valueType is not null) + return GlobalTypeMapper.Instance.FindDataTypeName(valueType, Value)?.ToNpgsqlDbType() ?? NpgsqlDbType.Unknown; return NpgsqlDbType.Unknown; } @@ -392,10 +409,21 @@ public string? DataTypeName "pg_catalog." + unqualifiedName).UnqualifiedDisplayName; } + var valueType = GetValueType(StaticValueType); + if (_dbType is { } dbType) + { + if (TryResolveDbTypeDataTypeName(dbType, valueType, out var dataTypeName)) + return dataTypeName; + + var unqualifiedName = dbType.ToNpgsqlDbType()?.ToUnqualifiedDataTypeName(); + return unqualifiedName is null ? null : Internal.Postgres.DataTypeName.ValidatedName( + "pg_catalog." + unqualifiedName).UnqualifiedDisplayName; + } + // Infer from value but don't cache - if (Value is not null) - // We pass ValueType here for the generic derived type, where we should respect T and not the runtime type. - return GlobalTypeMapper.Instance.FindDataTypeName(GetValueType(StaticValueType)!, Value)?.DisplayName; + // We pass ValueType here for the generic derived type, where we should respect T and not the runtime type. + if (valueType is not null) + return GlobalTypeMapper.Instance.FindDataTypeName(valueType, Value)?.DisplayName; return null; } @@ -497,6 +525,30 @@ public sealed override string SourceColumn Type? GetValueType(Type staticValueType) => staticValueType != typeof(object) ? staticValueType : Value?.GetType(); + bool TryResolveDbType(DataTypeName dataTypeName, out DbType dbType) + { + if (_dbTypeResolver?.GetDbType(dataTypeName) is { } result) + { + dbType = result; + return true; + } + + dbType = default; + return false; + } + + bool TryResolveDbTypeDataTypeName(DbType dbType, Type? type, [NotNullWhen(true)]out string? normalizedDataTypeName) + { + if (_dbTypeResolver?.GetDataTypeName(dbType, type) is { } result) + { + normalizedDataTypeName = Internal.Postgres.DataTypeName.NormalizeName(result); + return true; + } + + normalizedDataTypeName = null; + return false; + } + internal void SetOutputValue(NpgsqlDataReader reader, int ordinal) { if (GetType() == typeof(NpgsqlParameter)) @@ -536,18 +588,44 @@ internal void SetResolutionInfo(PgTypeInfo typeInfo, PgConverter converter, PgTy } /// Attempt to resolve a type info based on available (postgres) type information on the parameter. - internal void ResolveTypeInfo(PgSerializerOptions options) + internal void ResolveTypeInfo(PgSerializerOptions options, IDbTypeResolver? dbTypeResolver) { var typeInfo = TypeInfo; var previouslyResolved = ReferenceEquals(typeInfo?.Options, options); if (!previouslyResolved) { - var dataTypeName = - _dataTypeName is not null - ? Internal.Postgres.DataTypeName.NormalizeName(_dataTypeName) - : _npgsqlDbType is { } npgsqlDbType - ? npgsqlDbType.ToDataTypeName() ?? npgsqlDbType.ToUnqualifiedDataTypeNameOrThrow() - : null; + var staticValueType = StaticValueType; + var valueType = GetValueType(staticValueType); + + string? dataTypeName = null; + if (_dataTypeName is not null) + { + dataTypeName = Internal.Postgres.DataTypeName.NormalizeName(_dataTypeName); + } + else if (_npgsqlDbType is { } npgsqlDbType) + { + dataTypeName = npgsqlDbType.ToDataTypeName() ?? npgsqlDbType.ToUnqualifiedDataTypeNameOrThrow(); + } + else if (_dbType is { } dbType) + { + if (dbTypeResolver is not null) + { + _dbTypeResolver = dbTypeResolver; + if (dbTypeResolver.GetDataTypeName(dbType, valueType) is { } result) + { + dataTypeName = Internal.Postgres.DataTypeName.NormalizeName(result); + } + } + + // Fall back to builtin mappings if there was no resolver, or it didn't produce a result. + if (dataTypeName is null) + { + dataTypeName = dbType.ToNpgsqlDbType()?.ToDataTypeName(); + // If DbType.Object was specified we will only throw (see ThrowNoTypeInfo) if valueType is also null. + if (dataTypeName is null && dbType is not DbType.Object) + ThrowDbTypeNotSupported(); + } + } PgTypeId? pgTypeId = null; if (dataTypeName is not null) @@ -561,35 +639,24 @@ _dataTypeName is not null pgTypeId = options.ToCanonicalTypeId(pgType.GetRepresentationalType()); } - var unspecifiedDBNull = false; - var valueType = StaticValueType; - if (valueType == typeof(object)) + if (pgTypeId is null && valueType is null) { - valueType = Value?.GetType(); - if (valueType is null && pgTypeId is null) - { - ThrowNoTypeInfo(); - return; - } - - // We treat object typed DBNull values as default info. - // Unless we don't have a pgTypeId either, at which point we'll use an 'unspecified' PgTypeInfo to help us write a NULL. - if (valueType == typeof(DBNull)) - { - if (pgTypeId is null) - { - unspecifiedDBNull = true; - typeInfo = options.UnspecifiedDBNullTypeInfo; - } - else - valueType = null; - } + ThrowNoTypeInfo(); + return; } - if (!unspecifiedDBNull) - typeInfo = AdoSerializerHelpers.GetTypeInfoForWriting(valueType, pgTypeId, options, _npgsqlDbType); - - TypeInfo = typeInfo; + // We treat object typed DBNull values as default info (we don't supply a type). + // Unless we don't have a pgTypeId either, at which point we'll use an 'unspecified' PgTypeInfo to help us write a NULL. + if (valueType == typeof(DBNull) && staticValueType == typeof(object)) + { + TypeInfo = typeInfo = pgTypeId is null + ? options.UnspecifiedDBNullTypeInfo + : AdoSerializerHelpers.GetTypeInfoForWriting(type: null, pgTypeId, options, _npgsqlDbType); + } + else + { + TypeInfo = typeInfo = AdoSerializerHelpers.GetTypeInfoForWriting(valueType, pgTypeId, options, _npgsqlDbType); + } } // This step isn't part of BindValue because we need to know the PgTypeId beforehand for things like SchemaOnly with null values. @@ -605,14 +672,16 @@ _dataTypeName is not null void ThrowNoTypeInfo() => ThrowHelper.ThrowInvalidOperationException( - $"Parameter '{(!string.IsNullOrEmpty(ParameterName) ? ParameterName : $"${Collection?.IndexOf(this) + 1}")}' must have either its NpgsqlDbType or its DataTypeName or its Value set."); + $"Parameter '{(!string.IsNullOrEmpty(ParameterName) ? ParameterName : $"${Collection?.IndexOf(this) + 1}")}' must have either its DbType, NpgsqlDbType, DataTypeName or its Value set."); + + void ThrowDbTypeNotSupported() + => ThrowHelper.ThrowNotSupportedException( + $"The DbType '{_dbType}' isn't supported by Npgsql. There might be an Npgsql plugin with support for this DbType."); void ThrowNotSupported(string dataTypeName) - { - ThrowHelper.ThrowNotSupportedException(_npgsqlDbType is not null - ? $"The NpgsqlDbType '{_npgsqlDbType}' isn't present in your database. You may need to install an extension or upgrade to a newer version." - : $"The data type name '{dataTypeName}' isn't present in your database. You may need to install an extension or upgrade to a newer version."); - } + => ThrowHelper.ThrowNotSupportedException( + $"The data type name '{dataTypeName}'{(_npgsqlDbType is not null ? $", provided as NpgsqlDbType '{_npgsqlDbType}'," : null)} could not be found in the types that were loaded by Npgsql. " + + $"Your database details or Npgsql type loading configuration may be incorrect. Alternatively your PostgreSQL installation might need to be upgraded, or an extension adding the missing data type might not have been installed."); } // Pull from Value so we also support object typed generic params. @@ -755,6 +824,7 @@ private protected virtual ValueTask WriteValue(bool async, PgWriter writer, Canc /// public override void ResetDbType() { + _dbType = null; _npgsqlDbType = null; _dataTypeName = null; ResetTypeInfo(); @@ -815,6 +885,7 @@ private protected virtual NpgsqlParameter CloneCore() => _precision = _precision, _scale = _scale, _size = _size, + _dbType = _dbType, _npgsqlDbType = _npgsqlDbType, _dataTypeName = _dataTypeName, Direction = Direction, diff --git a/src/Npgsql/NpgsqlParameterCollection.cs b/src/Npgsql/NpgsqlParameterCollection.cs index 17e7fb7969..85b418b157 100644 --- a/src/Npgsql/NpgsqlParameterCollection.cs +++ b/src/Npgsql/NpgsqlParameterCollection.cs @@ -724,7 +724,7 @@ internal void ProcessParameters(NpgsqlDataSource.ReloadableState reloadableState break; } - p.ResolveTypeInfo(reloadableState.SerializerOptions); + p.ResolveTypeInfo(reloadableState.SerializerOptions, reloadableState.DbTypeResolver); if (validateValues) { diff --git a/src/Npgsql/NpgsqlParameter`.cs b/src/Npgsql/NpgsqlParameter`.cs index d353cdce45..2f1e1b24bc 100644 --- a/src/Npgsql/NpgsqlParameter`.cs +++ b/src/Npgsql/NpgsqlParameter`.cs @@ -138,6 +138,7 @@ private protected override NpgsqlParameter CloneCore() => _precision = _precision, _scale = _scale, _size = _size, + _dbType = _dbType, _npgsqlDbType = _npgsqlDbType, _dataTypeName = _dataTypeName, Direction = Direction, diff --git a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs index 9f91ef66b2..e8d8ea5061 100644 --- a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs @@ -48,6 +48,7 @@ public sealed class NpgsqlSlimDataSourceBuilder : INpgsqlTypeMapper Func>? _periodicPasswordProvider; TimeSpan _periodicPasswordSuccessRefreshInterval, _periodicPasswordFailureRefreshInterval; + List? _dbTypeResolverFactories; PgTypeInfoResolverChainBuilder _resolverChainBuilder = new(); // mutable struct, don't make readonly. readonly UserTypeMapper _userTypeMapper; @@ -531,9 +532,14 @@ public bool UnmapComposite([DynamicallyAccessedMembers(DynamicallyAccessedMember Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) => _userTypeMapper.UnmapComposite(clrType, pgName, nameTranslator); + /// + void INpgsqlTypeMapper.AddDbTypeResolverFactory(DbTypeResolverFactory factory) + => (_dbTypeResolverFactories ??= new()).Add(factory); + /// [Experimental(NpgsqlDiagnostics.ConvertersExperimental)] - public void AddTypeInfoResolverFactory(PgTypeInfoResolverFactory factory) => _resolverChainBuilder.PrependResolverFactory(factory); + public void AddTypeInfoResolverFactory(PgTypeInfoResolverFactory factory) + => _resolverChainBuilder.PrependResolverFactory(factory); /// void INpgsqlTypeMapper.Reset() => _resolverChainBuilder.Clear(); @@ -888,6 +894,7 @@ _loggerFactory is null _periodicPasswordSuccessRefreshInterval, _periodicPasswordFailureRefreshInterval, _resolverChainBuilder.Build(ConfigureResolverChain), + _dbTypeResolverFactories ?? [], DefaultNameTranslator, _connectionInitializer, _connectionInitializerAsync, diff --git a/src/Npgsql/NpgsqlTypes/NpgsqlDbType.cs b/src/Npgsql/NpgsqlTypes/NpgsqlDbType.cs index abb24c74d0..f9b952e479 100644 --- a/src/Npgsql/NpgsqlTypes/NpgsqlDbType.cs +++ b/src/Npgsql/NpgsqlTypes/NpgsqlDbType.cs @@ -876,11 +876,11 @@ _ when npgsqlDbType.HasFlag(NpgsqlDbType.Multirange) internal static NpgsqlDbType? ToNpgsqlDbType(this DataTypeName dataTypeName) => ToNpgsqlDbType(dataTypeName.UnqualifiedName); /// Should not be used with display names, first normalize it instead. - internal static NpgsqlDbType? ToNpgsqlDbType(string dataTypeName) + internal static NpgsqlDbType? ToNpgsqlDbType(string normalizedDataTypeName) { - var unqualifiedName = dataTypeName; - if (dataTypeName.IndexOf(".", StringComparison.Ordinal) is not -1 and var index) - unqualifiedName = dataTypeName.Substring(0, index); + var unqualifiedName = normalizedDataTypeName.AsSpan(); + if (unqualifiedName.IndexOf('.') is not -1 and var index) + unqualifiedName = unqualifiedName.Slice(index + 1); return unqualifiedName switch { @@ -979,12 +979,12 @@ _ when npgsqlDbType.HasFlag(NpgsqlDbType.Multirange) "geometry" => NpgsqlDbType.Geometry, "geography" => NpgsqlDbType.Geography, - _ when unqualifiedName.Contains("unknown") + _ when unqualifiedName.IndexOf("unknown") != -1 => !unqualifiedName.StartsWith("_", StringComparison.Ordinal) ? NpgsqlDbType.Unknown : null, _ when unqualifiedName.StartsWith("_", StringComparison.Ordinal) - => ToNpgsqlDbType(unqualifiedName.Substring(1)) is { } elementNpgsqlDbType + => ToNpgsqlDbType(unqualifiedName.Slice(1).ToString()) is { } elementNpgsqlDbType ? elementNpgsqlDbType | NpgsqlDbType.Array : null, // e.g. custom ranges, plugin types etc. diff --git a/src/Npgsql/PublicAPI.Unshipped.txt b/src/Npgsql/PublicAPI.Unshipped.txt index bb06704140..52f47e43d7 100644 --- a/src/Npgsql/PublicAPI.Unshipped.txt +++ b/src/Npgsql/PublicAPI.Unshipped.txt @@ -4,6 +4,7 @@ Npgsql.GssEncryptionMode Npgsql.GssEncryptionMode.Disable = 0 -> Npgsql.GssEncryptionMode Npgsql.GssEncryptionMode.Prefer = 1 -> Npgsql.GssEncryptionMode Npgsql.GssEncryptionMode.Require = 2 -> Npgsql.GssEncryptionMode +Npgsql.TypeMapping.INpgsqlTypeMapper.AddDbTypeResolverFactory(Npgsql.Internal.DbTypeResolverFactory! factory) -> void Npgsql.NpgsqlConnection.CloneWithAsync(string! connectionString, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.ValueTask Npgsql.NpgsqlConnection.SslClientAuthenticationOptionsCallback.get -> System.Action? Npgsql.NpgsqlConnection.SslClientAuthenticationOptionsCallback.set -> void diff --git a/src/Npgsql/TypeMapping/GlobalTypeMapper.cs b/src/Npgsql/TypeMapping/GlobalTypeMapper.cs index 2f9b5b3479..ef3981d22f 100644 --- a/src/Npgsql/TypeMapping/GlobalTypeMapper.cs +++ b/src/Npgsql/TypeMapping/GlobalTypeMapper.cs @@ -95,7 +95,7 @@ PgSerializerOptions TypeMappingOptions } } - internal DataTypeName? FindDataTypeName(Type type, object value) + internal DataTypeName? FindDataTypeName(Type type, object? value) { DataTypeName? dataTypeName; try @@ -148,6 +148,9 @@ public void AddTypeInfoResolverFactory(PgTypeInfoResolverFactory factory) } } + public void AddDbTypeResolverFactory(DbTypeResolverFactory factory) + => throw new NotSupportedException("The global type mapper does not support DbTypeResolverFactories. Call this method on a data source builder instead."); + void ReplaceTypeInfoResolverFactory(PgTypeInfoResolverFactory factory) { _lock.EnterWriteLock(); diff --git a/src/Npgsql/TypeMapping/INpgsqlTypeMapper.cs b/src/Npgsql/TypeMapping/INpgsqlTypeMapper.cs index cbb6ac8ccc..3fc5d0cbf1 100644 --- a/src/Npgsql/TypeMapping/INpgsqlTypeMapper.cs +++ b/src/Npgsql/TypeMapping/INpgsqlTypeMapper.cs @@ -199,6 +199,14 @@ bool UnmapComposite( [Experimental(NpgsqlDiagnostics.ConvertersExperimental)] void AddTypeInfoResolverFactory(PgTypeInfoResolverFactory factory); + /// + /// Adds a DbType resolver factory which can change how DbType cases are mapped to PostgreSQL data types. + /// Typically used by plugins. + /// + /// The resolver factory to be added. + [Experimental(NpgsqlDiagnostics.DbTypeResolverExperimental)] + public void AddDbTypeResolverFactory(DbTypeResolverFactory factory); + /// /// Configures the JSON serializer options used when reading and writing all System.Text.Json data. /// diff --git a/test/Npgsql.Tests/CommandParameterTests.cs b/test/Npgsql.Tests/CommandParameterTests.cs index 3e758f2413..7f5a5dc5c4 100644 --- a/test/Npgsql.Tests/CommandParameterTests.cs +++ b/test/Npgsql.Tests/CommandParameterTests.cs @@ -189,7 +189,7 @@ public async Task Parameter_must_be_set(bool genericParam) Assert.That(async () => await cmd.ExecuteReaderAsync(), Throws.Exception .TypeOf() - .With.Message.EqualTo("Parameter 'p1' must have either its NpgsqlDbType or its DataTypeName or its Value set.")); + .With.Message.EqualTo("Parameter 'p1' must have either its DbType, NpgsqlDbType, DataTypeName or its Value set.")); } [Test] diff --git a/test/Npgsql.Tests/DataTypeNameTests.cs b/test/Npgsql.Tests/DataTypeNameTests.cs index 5c64baa607..067eb217c4 100644 --- a/test/Npgsql.Tests/DataTypeNameTests.cs +++ b/test/Npgsql.Tests/DataTypeNameTests.cs @@ -51,6 +51,9 @@ public string ToDefaultMultirangeNameHasRange(string name) [TestCase("name", null, ExpectedResult = "pg_catalog.name")] [TestCase("_name", null, ExpectedResult = "pg_catalog._name")] [TestCase("name[]", null, ExpectedResult = "pg_catalog._name")] + [TestCase("mytype", null, ExpectedResult = "-.mytype")] + [TestCase("_mytype", null, ExpectedResult = "-._mytype")] + [TestCase("mytype[]", null, ExpectedResult = "-._mytype")] [TestCase("character varying", null, ExpectedResult = "pg_catalog.varchar")] [TestCase("decimal(facet_name)", null, ExpectedResult = "pg_catalog.numeric")] [TestCase("name", "public", ExpectedResult = "public.name")] diff --git a/test/Npgsql.Tests/Npgsql.Tests.csproj b/test/Npgsql.Tests/Npgsql.Tests.csproj index d2da055a6e..2944755e6c 100644 --- a/test/Npgsql.Tests/Npgsql.Tests.csproj +++ b/test/Npgsql.Tests/Npgsql.Tests.csproj @@ -22,5 +22,6 @@ true $(NoWarn);NPG9001 $(NoWarn);NPG9002 + $(NoWarn);NPG9003 diff --git a/test/Npgsql.Tests/NpgsqlParameterTests.cs b/test/Npgsql.Tests/NpgsqlParameterTests.cs index e02031ea9f..e1f0ef9c48 100644 --- a/test/Npgsql.Tests/NpgsqlParameterTests.cs +++ b/test/Npgsql.Tests/NpgsqlParameterTests.cs @@ -133,7 +133,7 @@ public void Setting_NpgsqlDbType_sets_DbType() [Test] public void Setting_value_does_not_change_DbType() { - var p = new NpgsqlParameter { DbType = DbType.String, NpgsqlDbType = NpgsqlDbType.Bytea }; + var p = new NpgsqlParameter { DbType = DbType.Binary, NpgsqlDbType = NpgsqlDbType.Bytea }; p.Value = 8; Assert.That(p.DbType, Is.EqualTo(DbType.Binary)); Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Bytea)); @@ -698,7 +698,7 @@ public void Null_value_with_nullable_type() public void DBNull_reuses_type_info([Values]bool generic) { var param = generic ? new NpgsqlParameter { Value = "value" } : new NpgsqlParameter { Value = "value" }; - param.ResolveTypeInfo(DataSource.CurrentReloadableState.SerializerOptions); + param.ResolveTypeInfo(DataSource.CurrentReloadableState.SerializerOptions, null); param.GetResolutionInfo(out var typeInfo, out _, out _); Assert.That(typeInfo, Is.Not.Null); @@ -708,7 +708,7 @@ public void DBNull_reuses_type_info([Values]bool generic) Assert.That(secondTypeInfo, Is.SameAs(typeInfo)); // Make sure we don't resolve a different type info either. - param.ResolveTypeInfo(DataSource.CurrentReloadableState.SerializerOptions); + param.ResolveTypeInfo(DataSource.CurrentReloadableState.SerializerOptions, null); param.GetResolutionInfo(out var thirdTypeInfo, out _, out _); Assert.That(thirdTypeInfo, Is.SameAs(secondTypeInfo)); } @@ -717,7 +717,7 @@ public void DBNull_reuses_type_info([Values]bool generic) public void DBNull_followed_by_non_null_reresolves([Values]bool generic) { var param = generic ? new NpgsqlParameter { Value = DBNull.Value } : new NpgsqlParameter { Value = DBNull.Value }; - param.ResolveTypeInfo(DataSource.CurrentReloadableState.SerializerOptions); + param.ResolveTypeInfo(DataSource.CurrentReloadableState.SerializerOptions, null); param.GetResolutionInfo(out var typeInfo, out _, out var pgTypeId); Assert.That(typeInfo, Is.Not.Null); Assert.That(pgTypeId.IsUnspecified, Is.True); @@ -727,7 +727,7 @@ public void DBNull_followed_by_non_null_reresolves([Values]bool generic) Assert.That(secondTypeInfo, Is.Null); // Make sure we don't resolve the same type info either. - param.ResolveTypeInfo(DataSource.CurrentReloadableState.SerializerOptions); + param.ResolveTypeInfo(DataSource.CurrentReloadableState.SerializerOptions, null); param.GetResolutionInfo(out var thirdTypeInfo, out _, out _); Assert.That(thirdTypeInfo, Is.Not.SameAs(typeInfo)); } @@ -736,7 +736,7 @@ public void DBNull_followed_by_non_null_reresolves([Values]bool generic) public void Changing_value_type_reresolves([Values]bool generic) { var param = generic ? new NpgsqlParameter { Value = "value" } : new NpgsqlParameter { Value = "value" }; - param.ResolveTypeInfo(DataSource.CurrentReloadableState.SerializerOptions); + param.ResolveTypeInfo(DataSource.CurrentReloadableState.SerializerOptions, null); param.GetResolutionInfo(out var typeInfo, out _, out _); Assert.That(typeInfo, Is.Not.Null); @@ -745,7 +745,7 @@ public void Changing_value_type_reresolves([Values]bool generic) Assert.That(secondTypeInfo, Is.Null); // Make sure we don't resolve a different type info either. - param.ResolveTypeInfo(DataSource.CurrentReloadableState.SerializerOptions); + param.ResolveTypeInfo(DataSource.CurrentReloadableState.SerializerOptions, null); param.GetResolutionInfo(out var thirdTypeInfo, out _, out _); Assert.That(thirdTypeInfo, Is.Not.SameAs(typeInfo)); } @@ -764,7 +764,7 @@ public void DataTypeName_prioritized_over_NpgsqlDbType([Values]bool generic) DataTypeName = "text", Value = "value" }; - param.ResolveTypeInfo(DataSource.CurrentReloadableState.SerializerOptions); + param.ResolveTypeInfo(DataSource.CurrentReloadableState.SerializerOptions, null); param.GetResolutionInfo(out var typeInfo, out _, out _); Assert.That(typeInfo, Is.Not.Null); Assert.That(typeInfo.PgTypeId, Is.EqualTo(DataSource.CurrentReloadableState.SerializerOptions.TextPgTypeId)); diff --git a/test/Npgsql.Tests/Support/TestBase.cs b/test/Npgsql.Tests/Support/TestBase.cs index 61c4e2accf..198e10a326 100644 --- a/test/Npgsql.Tests/Support/TestBase.cs +++ b/test/Npgsql.Tests/Support/TestBase.cs @@ -148,7 +148,8 @@ public async Task AssertTypeWrite( bool skipArrayCheck = false) { await using var connection = await OpenConnectionAsync(); - await AssertTypeWrite(connection, valueFactory, expectedSqlLiteral, pgTypeName, npgsqlDbType, dbType, inferredDbType, isDefault, isNpgsqlDbTypeInferredFromClrType, skipArrayCheck); + await AssertTypeWrite(connection, valueFactory, expectedSqlLiteral, pgTypeName, npgsqlDbType, dbType, inferredDbType, isDefault, + isNpgsqlDbTypeInferredFromClrType, skipArrayCheck); } internal static async Task AssertTypeRead( @@ -254,7 +255,7 @@ await AssertTypeWriteCore( } } - internal static async Task AssertTypeWriteCore( + static async Task AssertTypeWriteCore( NpgsqlConnection connection, Func valueFactory, string expectedSqlLiteral, @@ -263,12 +264,10 @@ internal static async Task AssertTypeWriteCore( DbType? dbType = null, DbType? inferredDbType = null, bool isDefault = true, - bool isNpgsqlDbTypeInferredFromClrType = true) + bool isDataTypeInferredFromClrType = true) { if (npgsqlDbType is null) - isNpgsqlDbTypeInferredFromClrType = false; - - inferredDbType ??= isNpgsqlDbTypeInferredFromClrType ? dbType ?? DbType.Object : DbType.Object; + isDataTypeInferredFromClrType = false; // TODO: Interferes with both multiplexing and connection-specific mapping (used e.g. in NodaTime) // Reset the type mapper to make sure we're resolving this type with a clean slate (for isolation, just in case) @@ -281,6 +280,10 @@ internal static async Task AssertTypeWriteCore( ? pgTypeName[..parenIndex] + pgTypeName[(pgTypeName.IndexOf(')') + 1)..] : pgTypeName; + // For composite type with dots in name, Postgresql returns name with quotes - scheme."My.type.name" + // but for npgsql mapping we should use names without quotes - scheme.My.type.name + var pgTypeNameWithoutFacetsAndQuotes = pgTypeNameWithoutFacets.Replace("\"", string.Empty); + // We test the following scenarios (between 2 and 5 in total): // 1. With NpgsqlDbType explicitly set // 2. With DataTypeName explicitly set @@ -293,6 +296,13 @@ internal static async Task AssertTypeWriteCore( await using var cmd = new NpgsqlCommand { Connection = connection }; NpgsqlParameter p; + + // With data type name + p = new NpgsqlParameter { Value = valueFactory(), DataTypeName = pgTypeNameWithoutFacetsAndQuotes }; + cmd.Parameters.Add(p); + errorIdentifier[++errorIdentifierIndex] = $"DataTypeName={pgTypeNameWithoutFacetsAndQuotes}"; + CheckInference(); + // With NpgsqlDbType if (npgsqlDbType is not null) { @@ -302,22 +312,13 @@ internal static async Task AssertTypeWriteCore( CheckInference(); } - // With data type name - // For composite type with dots in name, Postgresql returns name with quotes - scheme."My.type.name" - // but for npgsql mapping we should use names without quotes - scheme.My.type.name - var pgTypeNameWithoutFacetsAndDots = pgTypeNameWithoutFacets.Replace("\"", string.Empty); - p = new NpgsqlParameter { Value = valueFactory(), DataTypeName = pgTypeNameWithoutFacetsAndDots }; - cmd.Parameters.Add(p); - errorIdentifier[++errorIdentifierIndex] = $"DataTypeName={pgTypeNameWithoutFacetsAndDots}"; - CheckInference(); - // With DbType if (dbType is not null) { p = new NpgsqlParameter { Value = valueFactory(), DbType = dbType.Value }; cmd.Parameters.Add(p); errorIdentifier[++errorIdentifierIndex] = $"DbType={dbType}"; - CheckInference(); + CheckInference(dbTypeApplied: true); } if (isDefault) @@ -326,13 +327,13 @@ internal static async Task AssertTypeWriteCore( p = new NpgsqlParameter { Value = valueFactory() }; cmd.Parameters.Add(p); errorIdentifier[++errorIdentifierIndex] = $"Value only (type {p.Value!.GetType().Name}, non-generic)"; - CheckInference(valueOnlyInference: true); + CheckInference(valueSolelyApplied: true); // With (generic) value only p = new NpgsqlParameter { TypedValue = valueFactory() }; cmd.Parameters.Add(p); errorIdentifier[++errorIdentifierIndex] = $"Value only (type {p.Value!.GetType().Name}, generic)"; - CheckInference(valueOnlyInference: true); + CheckInference(valueSolelyApplied: true); } Debug.Assert(cmd.Parameters.Count == errorIdentifierIndex + 1); @@ -349,20 +350,25 @@ internal static async Task AssertTypeWriteCore( Assert.That(reader[i+1], Is.EqualTo(expectedSqlLiteral), $"Got wrong SQL literal when writing with {errorIdentifier[i / 2]}"); } - void CheckInference(bool valueOnlyInference = false) + void CheckInference(bool dbTypeApplied = false, bool valueSolelyApplied = false) { - if (isNpgsqlDbTypeInferredFromClrType && npgsqlDbType is not null) - { - Assert.That(p.NpgsqlDbType, Is.EqualTo(npgsqlDbType), + if (!valueSolelyApplied || isDataTypeInferredFromClrType) + Assert.That(p.DataTypeName, Is.EqualTo(pgTypeNameWithoutFacetsAndQuotes), + () => $"Got wrong inferred DataTypeName when inferring with {errorIdentifier[errorIdentifierIndex]}"); + + if (!valueSolelyApplied || isDataTypeInferredFromClrType) + Assert.That(p.NpgsqlDbType, Is.EqualTo(npgsqlDbType ?? NpgsqlDbType.Unknown), () => $"Got wrong inferred NpgsqlDbType when inferring with {errorIdentifier[errorIdentifierIndex]}"); - } - Assert.That(p.DbType, Is.EqualTo(valueOnlyInference ? inferredDbType : isNpgsqlDbTypeInferredFromClrType ? inferredDbType : dbType ?? DbType.Object), + DbType expectedDbType; + if (dbTypeApplied) + expectedDbType = dbType.GetValueOrDefault(); + else if (!valueSolelyApplied || isDataTypeInferredFromClrType) + expectedDbType = inferredDbType ?? dbType ?? DbType.Object; + else + expectedDbType = DbType.Object; + Assert.That(p.DbType, Is.EqualTo(expectedDbType), () => $"Got wrong inferred DbType when inferring with {errorIdentifier[errorIdentifierIndex]}"); - - if (isNpgsqlDbTypeInferredFromClrType) - Assert.That(p.DataTypeName, Is.EqualTo(pgTypeNameWithoutFacets), - () => $"Got wrong inferred DataTypeName when inferring with {errorIdentifier[errorIdentifierIndex]}"); } } diff --git a/test/Npgsql.Tests/TypeMapperTests.cs b/test/Npgsql.Tests/TypeMapperTests.cs index d0d1e36587..2819c80810 100644 --- a/test/Npgsql.Tests/TypeMapperTests.cs +++ b/test/Npgsql.Tests/TypeMapperTests.cs @@ -1,9 +1,12 @@ using Npgsql.Internal; using NUnit.Framework; using System; +using System.Data; using System.Threading.Tasks; using Npgsql.Internal.Converters; using Npgsql.Internal.Postgres; +using Npgsql.TypeMapping; +using NpgsqlTypes; using static Npgsql.Tests.TestUtil; namespace Npgsql.Tests; @@ -58,6 +61,96 @@ public async Task String_to_citext() Assert.That(command.ExecuteScalar(), Is.True); } + [Test] + [NonParallelizable] // Depends on citext which could be dropped concurrently + public async Task String_to_citext_with_db_type_string() + { + await using var adminConnection = await OpenConnectionAsync(); + await EnsureExtensionAsync(adminConnection, "citext"); + + var dataSourceBuilder = CreateDataSourceBuilder(); + ((INpgsqlTypeMapper)dataSourceBuilder).AddDbTypeResolverFactory(new ForceStringToCitextResolverFactory()); + await using var dataSource = dataSourceBuilder.Build(); + await using var connection = await dataSource.OpenConnectionAsync(); + + await using var command = new NpgsqlCommand("SELECT @p = 'hello'::citext", connection); + var parameter = new NpgsqlParameter("p", DbType.String) + { + Value = "HeLLo" + }; + command.Parameters.Add(parameter); + + Assert.That(command.ExecuteScalar(), Is.True); + Assert.That(parameter.DbType, Is.EqualTo(DbType.String)); + Assert.That(parameter.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Citext)); + Assert.That(parameter.DataTypeName, Is.EqualTo("citext")); + } + + [Test] + public async Task Guid_to_custom_type() + { + await using var adminConnection = await OpenConnectionAsync(); + var type = await GetTempTypeName(adminConnection); + + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.AddTypeInfoResolverFactory(new GuidTextConverterFactory(type)); + ((INpgsqlTypeMapper)dataSourceBuilder).AddDbTypeResolverFactory(new GuidTextDbTypeResolverFactory(type)); + await using var dataSource = dataSourceBuilder.Build(); + await using var connection = await dataSource.OpenConnectionAsync(); + + await connection.ExecuteNonQueryAsync($"CREATE TYPE {type}"); + await connection.ExecuteNonQueryAsync($""" + -- Input: cstring -> Custom type + CREATE FUNCTION {type}_in(cstring) + RETURNS {type} + AS 'textin' + LANGUAGE internal IMMUTABLE STRICT; + + -- Output: Custom type -> cstring + CREATE FUNCTION {type}_out({type}) + RETURNS cstring + AS 'textout' + LANGUAGE internal IMMUTABLE STRICT; + + -- 3️⃣ Create wrappers for binary I/O + CREATE FUNCTION {type}_recv(internal) + RETURNS {type} + AS 'textrecv' + LANGUAGE internal IMMUTABLE STRICT; + + CREATE FUNCTION {type}_send({type}) + RETURNS bytea + AS 'textsend' + LANGUAGE internal IMMUTABLE STRICT; + """); + + await connection.ExecuteNonQueryAsync($""" + CREATE TYPE {type} ( + internallength = variable, + input = {type}_in, + output = {type}_out, + receive = {type}_recv, + send = {type}_send, + alignment = int4 + ); + CREATE CAST ({type} AS text) WITH INOUT AS IMPLICIT; + """); + await connection.ReloadTypesAsync(); + + var guid = Guid.NewGuid(); + await using var command = new NpgsqlCommand($"SELECT @p::text = '{guid}'", connection); + var parameter = new NpgsqlParameter("p", DbType.Guid) + { + Value = guid + }; + command.Parameters.Add(parameter); + + Assert.That(command.ExecuteScalar(), Is.True); + Assert.That(parameter.DbType, Is.EqualTo(DbType.Guid)); + Assert.That(parameter.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Unknown)); + Assert.That(parameter.DataTypeName, Is.EqualTo(type)); + } + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/4582")] [NonParallelizable] // Drops extension public async Task Type_in_non_default_schema() @@ -109,6 +202,81 @@ sealed class Resolver : IPgTypeInfoResolver } + class ForceStringToCitextResolverFactory : DbTypeResolverFactory + { + public override IDbTypeResolver CreateDbTypeResolver(NpgsqlDatabaseInfo databaseInfo) => new DbTypeResolver(); + + sealed class DbTypeResolver : IDbTypeResolver + { + public string? GetDataTypeName(DbType dbType, Type? type) + { + if (dbType == DbType.String) + return "citext"; + + return null; + } + + public DbType? GetDbType(DataTypeName dataTypeName) + { + if (dataTypeName.UnqualifiedName == "citext") + return DbType.String; + + return null; + } + } + } + + class GuidTextConverterFactory(string typeName) : PgTypeInfoResolverFactory + { + public override IPgTypeInfoResolver? CreateArrayResolver() => null; + public override IPgTypeInfoResolver CreateResolver() => new GuidTextTypeInfoResolver(typeName); + + sealed class GuidTextTypeInfoResolver(string typeName) : IPgTypeInfoResolver + { + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + { + if (type == typeof(Guid) || dataTypeName?.UnqualifiedName == typeName) + if (options.DatabaseInfo.TryGetPostgresTypeByName(typeName, out var pgType)) + return new(options, new GuidTextConverter(options.TextEncoding), options.ToCanonicalTypeId(pgType)); + + return null; + } + } + + sealed class GuidTextConverter(System.Text.Encoding encoding) : StringBasedTextConverter(encoding) + { + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.None; + return format is DataFormat.Text; + } + protected override Guid ConvertFrom(string value) => Guid.Parse(value); + protected override ReadOnlyMemory ConvertTo(Guid value) => value.ToString().AsMemory(); + } + } + + class GuidTextDbTypeResolverFactory(string typeName) : DbTypeResolverFactory + { + public override IDbTypeResolver CreateDbTypeResolver(NpgsqlDatabaseInfo databaseInfo) => new DbTypeResolver(typeName); + + sealed class DbTypeResolver(string typeName) : IDbTypeResolver + { + public string? GetDataTypeName(DbType dbType, Type? type) + { + if (dbType == DbType.Guid) + return typeName; + return null; + } + + public DbType? GetDbType(DataTypeName dataTypeName) + { + if (dataTypeName == typeName) + return DbType.Guid; + return null; + } + } + } + enum Mood { Sad, Ok, Happy } #endregion Support diff --git a/test/Npgsql.Tests/Types/DateTimeInfinityTests.cs b/test/Npgsql.Tests/Types/DateTimeInfinityTests.cs index 7a4876e47c..147f7f1be9 100644 --- a/test/Npgsql.Tests/Types/DateTimeInfinityTests.cs +++ b/test/Npgsql.Tests/Types/DateTimeInfinityTests.cs @@ -7,9 +7,9 @@ namespace Npgsql.Tests.Types; -[TestFixture(true)] -#if DEBUG [TestFixture(false)] +#if DEBUG +[TestFixture(true)] [NonParallelizable] #endif public sealed class DateTimeInfinityTests : TestBase, IDisposable @@ -70,7 +70,7 @@ public Task TimestampTz_DateTime(DateTime dateTime, string sqlLiteral, string in => AssertType(new(dateTime.Ticks, DateTimeKind.Utc), DisableDateTimeInfinityConversions ? sqlLiteral : infinityConvertedSqlLiteral, "timestamp with time zone", NpgsqlDbType.TimestampTz, DbType.DateTime, DbType.DateTime, comparer: MaxValuePrecisionLenientComparer, - isDefault: true, isNpgsqlDbTypeInferredFromClrType: false); + isDefault: true); [Test, TestCaseSource(nameof(TimestampTzDateTimeOffsetValues))] public Task TimestampTz_DateTimeOffset(DateTimeOffset dateTime, string sqlLiteral, string infinityConvertedSqlLiteral) diff --git a/test/Npgsql.Tests/Types/MiscTypeTests.cs b/test/Npgsql.Tests/Types/MiscTypeTests.cs index 7f1fe7c0ba..143e3d0f07 100644 --- a/test/Npgsql.Tests/Types/MiscTypeTests.cs +++ b/test/Npgsql.Tests/Types/MiscTypeTests.cs @@ -198,13 +198,4 @@ public async Task Void() await using var conn = await OpenConnectionAsync(); Assert.That(await conn.ExecuteScalarAsync("SELECT pg_sleep(0)"), Is.SameAs(null)); } - - [Test, IssueLink("https://github.com/npgsql/npgsql/issues/1364")] - public async Task Unsupported_DbType() - { - await using var conn = await OpenConnectionAsync(); - await using var cmd = new NpgsqlCommand("SELECT @p", conn); - Assert.That(() => cmd.Parameters.Add(new NpgsqlParameter("p", DbType.UInt32) { Value = 8u }), - Throws.Exception.TypeOf()); - } } From 70ad2946a27f607a06ff4d030955e72004e337ef Mon Sep 17 00:00:00 2001 From: Etienne Lafarge Date: Tue, 18 Nov 2025 20:44:27 +0100 Subject: [PATCH 634/761] Fix idle/busy conn. pool metrics when using NpgsqlDataSource (#5497) Fixes #4798 --- src/Npgsql/NpgsqlConnection.cs | 15 +--- src/Npgsql/NpgsqlDataSource.cs | 8 ++ src/Npgsql/NpgsqlEventSource.cs | 130 ++++++++++++++++++++++++-------- 3 files changed, 108 insertions(+), 45 deletions(-) diff --git a/src/Npgsql/NpgsqlConnection.cs b/src/Npgsql/NpgsqlConnection.cs index 6bda4f71d5..be827cc51b 100644 --- a/src/Npgsql/NpgsqlConnection.cs +++ b/src/Npgsql/NpgsqlConnection.cs @@ -220,20 +220,7 @@ void SetupDataSource() _cloningInstantiator = s => new NpgsqlConnection(s); _dataSource = PoolManager.Pools.GetOrAdd(canonical, newDataSource); - if (_dataSource == newDataSource) - { - Debug.Assert(_dataSource is not MultiHostDataSourceWrapper); - // If the pool we created was the one that ended up being stored we need to increment the appropriate counter. - // Avoids a race condition where multiple threads will create a pool but only one will be stored. - if (_dataSource is NpgsqlMultiHostDataSource multiHostConnectorPool) - foreach (var hostPool in multiHostConnectorPool.Pools) - NpgsqlEventSource.Log.DataSourceCreated(hostPool); - else - { - NpgsqlEventSource.Log.DataSourceCreated(newDataSource); - } - } - else + if (_dataSource != newDataSource) newDataSource.Dispose(); // If this is a multi-host data source and the user specified a TargetSessionAttributes, create a wrapper in front of the diff --git a/src/Npgsql/NpgsqlDataSource.cs b/src/Npgsql/NpgsqlDataSource.cs index 000d3f1ae3..78d0ca95c8 100644 --- a/src/Npgsql/NpgsqlDataSource.cs +++ b/src/Npgsql/NpgsqlDataSource.cs @@ -92,6 +92,7 @@ private protected readonly Dictionary> _pendi readonly SemaphoreSlim _setupMappingsSemaphore = new(1); readonly INpgsqlNameTranslator _defaultNameTranslator; + IDisposable? _eventSourceEvents; internal NpgsqlDataSource( NpgsqlConnectionStringBuilder settings, @@ -314,6 +315,10 @@ internal async Task Bootstrap( serializerOptions: serializerOptions, dbTypeResolver: new ChainDbTypeResolver(resolvers)); + if (!NpgsqlEventSource.Log.TryTrackDataSource(Name, this, out _eventSourceEvents)) + _connectionLogger.LogDebug("NpgsqlEventSource could not start tracking a DataSource, " + + "this can happen if more than one data source uses the same connection string."); + IsBootstrapped = true; } finally @@ -519,6 +524,8 @@ protected virtual void DisposeBase() _periodicPasswordProviderTimer?.Dispose(); MetricsReporter.Dispose(); + _eventSourceEvents?.Dispose(); + // We do not dispose _setupMappingsSemaphore explicitly, leaving it to finalizer // Due to possible concurrent access, which might lead to deadlock // See issue #6115 @@ -549,6 +556,7 @@ protected virtual async ValueTask DisposeAsyncBase() await _periodicPasswordProviderTimer.DisposeAsync().ConfigureAwait(false); MetricsReporter.Dispose(); + _eventSourceEvents?.Dispose(); // We do not dispose _setupMappingsSemaphore explicitly, leaving it to finalizer // Due to possible concurrent access, which might lead to deadlock // See issue #6115 diff --git a/src/Npgsql/NpgsqlEventSource.cs b/src/Npgsql/NpgsqlEventSource.cs index 82475142d2..00cfc1ed31 100644 --- a/src/Npgsql/NpgsqlEventSource.cs +++ b/src/Npgsql/NpgsqlEventSource.cs @@ -1,14 +1,19 @@ using System; -using System.Collections.Generic; +using System.Collections.Concurrent; using System.Diagnostics; +using System.Diagnostics.CodeAnalysis; using System.Threading; using System.Diagnostics.Tracing; +using System.Runtime.CompilerServices; namespace Npgsql; sealed class NpgsqlEventSource : EventSource { public static readonly NpgsqlEventSource Log = new(); + // A static to keep the CWT values from making themselves uncollectable if they would have a reference through the + // NpgsqlEventSource instance to the CWT table, which they would if this was an instance field. + static readonly NpgsqlEventSourceDataSources DataSourceEvents = new(Log); const string EventSourceName = "Npgsql"; @@ -25,8 +30,6 @@ sealed class NpgsqlEventSource : EventSource PollingCounter? _preparedCommandsRatioCounter; PollingCounter? _poolsCounter; - readonly object _dataSourcesLock = new(); - readonly Dictionary _dataSources = new(); PollingCounter? _multiplexingAverageCommandsPerBatchCounter; PollingCounter? _multiplexingAverageWriteTimePerBatchCounter; @@ -64,7 +67,7 @@ internal void BytesRead(long bytesRead) Interlocked.Add(ref _bytesRead, bytesRead); } - public void CommandStart(string sql) + internal void CommandStart(string sql) { if (IsEnabled()) { @@ -74,7 +77,7 @@ public void CommandStart(string sql) NpgsqlSqlEventSource.Log.CommandStart(sql); } - public void CommandStop() + internal void CommandStop() { if (IsEnabled()) Interlocked.Decrement(ref _currentCommands); @@ -93,13 +96,8 @@ internal void CommandFailed() Interlocked.Increment(ref _failedCommands); } - internal void DataSourceCreated(NpgsqlDataSource dataSource) - { - lock (_dataSourcesLock) - { - _dataSources.Add(dataSource, null); - } - } + internal bool TryTrackDataSource(string name, NpgsqlDataSource dataSource, [NotNullWhen(true)]out IDisposable? untrack) + => DataSourceEvents.TryTrack(name, dataSource, out untrack); internal void MultiplexingBatchSent(int numCommands, long elapsedTicks) { @@ -112,13 +110,7 @@ internal void MultiplexingBatchSent(int numCommands, long elapsedTicks) } } - double GetDataSourceCount() - { - lock (_dataSourcesLock) - { - return _dataSources.Count; - } - } + double GetDataSourceCount() => DataSourceEvents.GetDataSourceCount(); double GetMultiplexingAverageCommandsPerBatch() { @@ -142,7 +134,7 @@ double GetMultiplexingAverageWriteTimePerBatch() protected override void OnEventCommand(EventCommandEventArgs command) { - if (command.Command == EventCommand.Enable) + if (command.Command is EventCommand.Enable) { // Comment taken from RuntimeEventSource in CoreCLR // NOTE: These counters will NOT be disposed on disable command because we may be introducing @@ -207,18 +199,94 @@ protected override void OnEventCommand(EventCommandEventArgs command) DisplayName = "Average write time per multiplexing batch", DisplayUnits = "us" }; - lock (_dataSourcesLock) + + DataSourceEvents.EnableAll(); + } + } +} + +// This is a separate class to avoid accidentally making the CWT instance reachable through the value. +// The EventSource is stored in the counters, part of the value, so the EventSource *must not* reference this instance on an instance field. +// This goes for any state captured by the value, which is why the other state has its own object for the value to reference. +// See https://github.com/dotnet/runtime/issues/12255. +sealed class NpgsqlEventSourceDataSources(EventSource eventSource) +{ + readonly ConditionalWeakTable> _dataSources = new(); + readonly StrongBox<(int DataSourceCount, ConcurrentDictionary DataSourceNames)> _nonCwtState = new((0, new())); + + internal double GetDataSourceCount() => _nonCwtState.Value.DataSourceCount; + + internal bool TryTrack(string name, NpgsqlDataSource dataSource, [NotNullWhen(true)]out IDisposable? untrack) + { + untrack = null; + if (!_nonCwtState.Value.DataSourceNames.TryAdd(name, default)) + return false; + + var lazy = new Lazy( + () => new DataSourceEvents(name: name, dataSource, eventSource, _nonCwtState), + LazyThreadSafetyMode.ExecutionAndPublication); + var tracked = _dataSources.TryAdd(dataSource, lazy); + + if (tracked) + { + Interlocked.Increment(ref _nonCwtState.Value.DataSourceCount); + // We must initialize directly when the event source is already enabled. + if (eventSource.IsEnabled()) + untrack = lazy.Value; + else + untrack = new DataSourceEventsDisposable(lazy); + } + + return tracked; + } + + internal void EnableAll() + { + foreach (var dataSourceKv in _dataSources) + { + _ = dataSourceKv.Value.Value; + } + } + + sealed class DataSourceEventsDisposable(Lazy events) : IDisposable + { + public void Dispose() => events.Value.Dispose(); + } + + sealed class DataSourceEvents : IDisposable + { + readonly string _name; + readonly StrongBox<(int Count, ConcurrentDictionary Names)> _state; + readonly PollingCounter _idleConnections; + readonly PollingCounter _busyConnections; + + int _disposed; + + public DataSourceEvents(string name, NpgsqlDataSource dataSource, EventSource eventSource, StrongBox<(int, ConcurrentDictionary)> state) + { + _name = name; + _state = state; + _idleConnections = new($"idle-connections-{name}", eventSource, () => dataSource.Statistics.Idle) + { + DisplayName = $"Idle Connections [{name}]" + }; + _busyConnections = new($"busy-connections-{name}", eventSource, () => dataSource.Statistics.Busy) { - foreach (var dataSource in _dataSources.Keys) - { - if (!_dataSources[dataSource].HasValue) - { - _dataSources[dataSource] = ( - new PollingCounter($"Idle Connections ({dataSource.Settings.ToStringWithoutPassword()}])", this, () => dataSource.Statistics.Idle), - new PollingCounter($"Busy Connections ({dataSource.Settings.ToStringWithoutPassword()}])", this, () => dataSource.Statistics.Busy)); - } - } - } + DisplayName = $"Busy Connections [{name}]" + }; + } + + public void Dispose() + { + if (Interlocked.Exchange(ref _disposed, 1) is 1) + return; + + _idleConnections.Dispose(); + _busyConnections.Dispose(); + + Interlocked.Decrement(ref _state.Value.Count); + var success = _state.Value.Names.TryRemove(_name, out _); + Debug.Assert(success); } } } From 302af43fb196d3cfc2d379857d65b484e19c6309 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Wed, 19 Nov 2025 15:16:29 +0300 Subject: [PATCH 635/761] Add Timeout property to text COPY operations (#6294) Closes #5758 --- src/Npgsql/Internal/NpgsqlConnector.cs | 4 ++-- src/Npgsql/NpgsqlCommand.cs | 10 --------- src/Npgsql/NpgsqlConnection.cs | 12 +++++------ src/Npgsql/NpgsqlRawCopyStream.cs | 28 ++++++++++++++++++++++++++ src/Npgsql/PublicAPI.Unshipped.txt | 12 +++++++++++ 5 files changed, 48 insertions(+), 18 deletions(-) diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index bf6f97c1d9..c7508ac2e6 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -2833,11 +2833,11 @@ UserAction DoStartUserAction(ConnectorState newState, NpgsqlCommand? command, StartCancellableOperation(cancellationToken, attemptPgCancellation); - // We reset the ReadBuffer.Timeout for every user action, so it wouldn't leak from the previous query or action + // We reset the ReadBuffer.Timeout and WriteBuffer.Timeout for every user action, so it wouldn't leak from the previous query or action // For example, we might have successfully cancelled the previous query (so the connection is not broken) // But the next time, we call the Prepare, which doesn't set its own timeout var timeoutSeconds = command?.CommandTimeout ?? Settings.CommandTimeout; - ReadBuffer.Timeout = timeoutSeconds > 0 ? TimeSpan.FromSeconds(timeoutSeconds) : Timeout.InfiniteTimeSpan; + ReadBuffer.Timeout = WriteBuffer.Timeout = timeoutSeconds > 0 ? TimeSpan.FromSeconds(timeoutSeconds) : Timeout.InfiniteTimeSpan; return new UserAction(this); } diff --git a/src/Npgsql/NpgsqlCommand.cs b/src/Npgsql/NpgsqlCommand.cs index 5f2cb8832f..a83b939a53 100644 --- a/src/Npgsql/NpgsqlCommand.cs +++ b/src/Npgsql/NpgsqlCommand.cs @@ -1041,9 +1041,6 @@ static void ValidateParameterCount(NpgsqlBatchCommand batchCommand) #region Message Creation / Population - void BeginSend(NpgsqlConnector connector) - => connector.WriteBuffer.Timeout = TimeSpan.FromSeconds(CommandTimeout); - internal Task Write(NpgsqlConnector connector, bool async, bool flush, CancellationToken cancellationToken = default) { return (_behavior & CommandBehavior.SchemaOnly) == 0 @@ -1158,8 +1155,6 @@ await connector.WriteParse(batchCommand.FinalCommandText, batchCommand.Statement async Task SendDeriveParameters(NpgsqlConnector connector, bool async, CancellationToken cancellationToken = default) { - BeginSend(connector); - var syncCaller = !async; for (var i = 0; i < InternalBatchCommands.Count; i++) { @@ -1178,8 +1173,6 @@ async Task SendDeriveParameters(NpgsqlConnector connector, bool async, Cancellat async Task SendPrepare(NpgsqlConnector connector, bool async, CancellationToken cancellationToken = default) { - BeginSend(connector); - var syncCaller = !async; for (var i = 0; i < InternalBatchCommands.Count; i++) { @@ -1227,8 +1220,6 @@ bool ShouldSchedule(ref bool async, int indexOfStatementInBatch) async Task SendClose(NpgsqlConnector connector, bool async, CancellationToken cancellationToken = default) { - BeginSend(connector); - foreach (var batchCommand in InternalBatchCommands) { if (!batchCommand.IsPrepared) @@ -1531,7 +1522,6 @@ internal virtual async ValueTask ExecuteReader(bool async, Com // Instead, all sends for non-first statements are performed asynchronously (even if the user requested sync), // in a special synchronization context to prevents a dependency on the thread pool (which would also trigger // deadlocks). - BeginSend(connector); sendTask = Write(connector, async, flush: true, CancellationToken.None); // The following is a hack. It raises an exception if one was thrown in the first phases diff --git a/src/Npgsql/NpgsqlConnection.cs b/src/Npgsql/NpgsqlConnection.cs index be827cc51b..ca8b6ba9fe 100644 --- a/src/Npgsql/NpgsqlConnection.cs +++ b/src/Npgsql/NpgsqlConnection.cs @@ -1241,7 +1241,7 @@ async Task BeginBinaryExport(bool async, string copyToComm /// /// See https://www.postgresql.org/docs/current/static/sql-copy.html. /// - public TextWriter BeginTextImport(string copyFromCommand) + public NpgsqlCopyTextWriter BeginTextImport(string copyFromCommand) => BeginTextImport(async: false, copyFromCommand, CancellationToken.None).GetAwaiter().GetResult(); /// @@ -1256,10 +1256,10 @@ public TextWriter BeginTextImport(string copyFromCommand) /// /// See https://www.postgresql.org/docs/current/static/sql-copy.html. /// - public Task BeginTextImportAsync(string copyFromCommand, CancellationToken cancellationToken = default) + public Task BeginTextImportAsync(string copyFromCommand, CancellationToken cancellationToken = default) => BeginTextImport(async: true, copyFromCommand, cancellationToken); - async Task BeginTextImport(bool async, string copyFromCommand, CancellationToken cancellationToken = default) + async Task BeginTextImport(bool async, string copyFromCommand, CancellationToken cancellationToken = default) { ArgumentNullException.ThrowIfNull(copyFromCommand); if (!IsValidCopyCommand(copyFromCommand)) @@ -1307,7 +1307,7 @@ async Task BeginTextImport(bool async, string copyFromCommand, Cance /// /// See https://www.postgresql.org/docs/current/static/sql-copy.html. /// - public TextReader BeginTextExport(string copyToCommand) + public NpgsqlCopyTextReader BeginTextExport(string copyToCommand) => BeginTextExport(async: false, copyToCommand, CancellationToken.None).GetAwaiter().GetResult(); /// @@ -1322,10 +1322,10 @@ public TextReader BeginTextExport(string copyToCommand) /// /// See https://www.postgresql.org/docs/current/static/sql-copy.html. /// - public Task BeginTextExportAsync(string copyToCommand, CancellationToken cancellationToken = default) + public Task BeginTextExportAsync(string copyToCommand, CancellationToken cancellationToken = default) => BeginTextExport(async: true, copyToCommand, cancellationToken); - async Task BeginTextExport(bool async, string copyToCommand, CancellationToken cancellationToken = default) + async Task BeginTextExport(bool async, string copyToCommand, CancellationToken cancellationToken = default) { ArgumentNullException.ThrowIfNull(copyToCommand); if (!IsValidCopyCommand(copyToCommand)) diff --git a/src/Npgsql/NpgsqlRawCopyStream.cs b/src/Npgsql/NpgsqlRawCopyStream.cs index 664c39a1b8..d0cfad82a7 100644 --- a/src/Npgsql/NpgsqlRawCopyStream.cs +++ b/src/Npgsql/NpgsqlRawCopyStream.cs @@ -485,6 +485,20 @@ internal NpgsqlCopyTextWriter(NpgsqlConnector connector, NpgsqlRawCopyStream und throw connector.Break(new Exception("Can't use a binary copy stream for text writing")); } + /// + /// Gets or sets a value, in milliseconds, that determines how long the text writer will attempt to write before timing out. + /// + public int Timeout + { + get => ((NpgsqlRawCopyStream)BaseStream).WriteTimeout; + set + { + var stream = (NpgsqlRawCopyStream)BaseStream; + stream.ReadTimeout = value; + stream.WriteTimeout = value; + } + } + /// /// Cancels and terminates an ongoing import. Any data already written will be discarded. /// @@ -511,6 +525,20 @@ internal NpgsqlCopyTextReader(NpgsqlConnector connector, NpgsqlRawCopyStream und throw connector.Break(new Exception("Can't use a binary copy stream for text reading")); } + /// + /// Gets or sets a value, in milliseconds, that determines how long the text reader will attempt to read before timing out. + /// + public int Timeout + { + get => ((NpgsqlRawCopyStream)BaseStream).ReadTimeout; + set + { + var stream = (NpgsqlRawCopyStream)BaseStream; + stream.ReadTimeout = value; + stream.WriteTimeout = value; + } + } + /// /// Cancels and terminates an ongoing export. /// diff --git a/src/Npgsql/PublicAPI.Unshipped.txt b/src/Npgsql/PublicAPI.Unshipped.txt index 52f47e43d7..b86cac3908 100644 --- a/src/Npgsql/PublicAPI.Unshipped.txt +++ b/src/Npgsql/PublicAPI.Unshipped.txt @@ -5,6 +5,10 @@ Npgsql.GssEncryptionMode.Disable = 0 -> Npgsql.GssEncryptionMode Npgsql.GssEncryptionMode.Prefer = 1 -> Npgsql.GssEncryptionMode Npgsql.GssEncryptionMode.Require = 2 -> Npgsql.GssEncryptionMode Npgsql.TypeMapping.INpgsqlTypeMapper.AddDbTypeResolverFactory(Npgsql.Internal.DbTypeResolverFactory! factory) -> void +Npgsql.NpgsqlConnection.BeginTextExport(string! copyToCommand) -> Npgsql.NpgsqlCopyTextReader! +Npgsql.NpgsqlConnection.BeginTextExportAsync(string! copyToCommand, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! +Npgsql.NpgsqlConnection.BeginTextImport(string! copyFromCommand) -> Npgsql.NpgsqlCopyTextWriter! +Npgsql.NpgsqlConnection.BeginTextImportAsync(string! copyFromCommand, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! Npgsql.NpgsqlConnection.CloneWithAsync(string! connectionString, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.ValueTask Npgsql.NpgsqlConnection.SslClientAuthenticationOptionsCallback.get -> System.Action? Npgsql.NpgsqlConnection.SslClientAuthenticationOptionsCallback.set -> void @@ -16,6 +20,10 @@ Npgsql.NpgsqlConnectionStringBuilder.RequireAuth.get -> string? Npgsql.NpgsqlConnectionStringBuilder.RequireAuth.set -> void Npgsql.NpgsqlConnectionStringBuilder.SslNegotiation.get -> Npgsql.SslNegotiation Npgsql.NpgsqlConnectionStringBuilder.SslNegotiation.set -> void +Npgsql.NpgsqlCopyTextReader.Timeout.get -> int +Npgsql.NpgsqlCopyTextReader.Timeout.set -> void +Npgsql.NpgsqlCopyTextWriter.Timeout.get -> int +Npgsql.NpgsqlCopyTextWriter.Timeout.set -> void Npgsql.NpgsqlDataSourceBuilder.ConfigureTypeLoading(System.Action! configureAction) -> Npgsql.NpgsqlDataSourceBuilder! Npgsql.NpgsqlDataSourceBuilder.MapComposite(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.NpgsqlDataSourceBuilder! Npgsql.NpgsqlDataSourceBuilder.MapComposite(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.NpgsqlDataSourceBuilder! @@ -123,3 +131,7 @@ NpgsqlTypes.NpgsqlLine.Deconstruct(out double a, out double b, out double c) -> NpgsqlTypes.NpgsqlLSeg.Deconstruct(out NpgsqlTypes.NpgsqlPoint start, out NpgsqlTypes.NpgsqlPoint end) -> void NpgsqlTypes.NpgsqlPoint.Deconstruct(out double x, out double y) -> void NpgsqlTypes.NpgsqlTid.Deconstruct(out uint blockNumber, out ushort offsetNumber) -> void +*REMOVED*Npgsql.NpgsqlConnection.BeginTextExport(string! copyToCommand) -> System.IO.TextReader! +*REMOVED*Npgsql.NpgsqlConnection.BeginTextExportAsync(string! copyToCommand, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! +*REMOVED*Npgsql.NpgsqlConnection.BeginTextImport(string! copyFromCommand) -> System.IO.TextWriter! +*REMOVED*Npgsql.NpgsqlConnection.BeginTextImportAsync(string! copyFromCommand, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! From a8b95649beabcd9e6eb3a19894c6a37ceb97c60a Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Wed, 19 Nov 2025 15:54:07 +0300 Subject: [PATCH 636/761] Fix synchronous GSS session encryption and enable it by default (#6324) Followup to #2957 --- src/Npgsql/Internal/NpgsqlConnector.Auth.cs | 6 +- src/Npgsql/Internal/NpgsqlConnector.cs | 168 ++++++++++++-------- src/Npgsql/NpgsqlConnectionStringBuilder.cs | 2 +- src/Npgsql/Util/GSSStream.cs | 2 +- 4 files changed, 109 insertions(+), 69 deletions(-) diff --git a/src/Npgsql/Internal/NpgsqlConnector.Auth.cs b/src/Npgsql/Internal/NpgsqlConnector.Auth.cs index 4d5fccbad5..f837f08026 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.Auth.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.Auth.cs @@ -336,7 +336,11 @@ internal async ValueTask AuthenticateGSS(bool async, CancellationToken cancellat using var authContext = new NegotiateAuthentication(clientOptions); var data = authContext.GetOutgoingBlob(ReadOnlySpan.Empty, out var statusCode)!; - Debug.Assert(statusCode == NegotiateAuthenticationStatusCode.ContinueNeeded); + if (statusCode != NegotiateAuthenticationStatusCode.ContinueNeeded) + { + // Unable to retrieve credentials or some other issue + throw new NpgsqlException($"Unable to authenticate with GSS: received {statusCode} instead of the expected ContinueNeeded"); + } await WritePassword(data, 0, data.Length, async, cancellationToken).ConfigureAwait(false); await Flush(async, cancellationToken).ConfigureAwait(false); while (true) diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index c7508ac2e6..aa2a996322 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -596,17 +596,27 @@ static async Task OpenCore( bool async, CancellationToken cancellationToken) { - await conn.RawOpen(sslMode, gssEncMode, timeout, async, cancellationToken).ConfigureAwait(false); - timeout.CheckAndApply(conn); - conn.WriteStartupMessage(username); - await conn.Flush(async, cancellationToken).ConfigureAwait(false); + // If we fail to connect to the socket, there is no reason to retry even if SslMode/GssEncryption allows it + await conn.RawOpen(timeout, async, cancellationToken).ConfigureAwait(false); - using var cancellationRegistration = conn.StartCancellableOperation(cancellationToken, attemptPgCancellation: false); try { + await conn.SetupEncryption(sslMode, gssEncMode, timeout, async, cancellationToken).ConfigureAwait(false); + timeout.CheckAndApply(conn); + conn.WriteStartupMessage(username); + await conn.Flush(async, cancellationToken).ConfigureAwait(false); + + using var cancellationRegistration = conn.StartCancellableOperation(cancellationToken, attemptPgCancellation: false); await conn.Authenticate(username, timeout, async, cancellationToken).ConfigureAwait(false); } + // We handle any exception here because on Windows while receiving a response from Postgres + // We might hit connection reset, in which case the actual error will be lost + // And we only read some IO error + // In addition, this behavior mimics libpq, where it retries as long as GssEncryptionMode and SslMode allows it catch (Exception e) when + // We might also get here OperationCancelledException/TimeoutException + // But it's fine to fall down and retry because we'll immediately exit with the exact same exception + // // Any error after trying with GSS encryption (gssEncMode == GssEncryptionMode.Prefer || // Auth error with/without SSL @@ -620,9 +630,6 @@ static async Task OpenCore( else sslMode = sslMode == SslMode.Prefer ? SslMode.Disable : SslMode.Require; - cancellationRegistration.Dispose(); - Debug.Assert(!conn.IsBroken); - conn.Cleanup(); // If Prefer was specified and we failed (with SSL), retry without SSL. @@ -696,6 +703,8 @@ internal async ValueTask GSSEncrypt(bool async, bool isRequ default: throw new NpgsqlException($"Received unknown response {response} for GSSEncRequest (expecting G or N)"); case 'N': + if (isRequired) + throw new NpgsqlException("GGS encryption requested. No GSS encryption enabled connection from this host is configured."); return GssEncryptionResult.NegotiateFailure; case 'G': break; @@ -907,7 +916,7 @@ async ValueTask GetUsernameAsyncInternal() } } - async Task RawOpen(SslMode sslMode, GssEncryptionMode gssEncryptionMode, NpgsqlTimeout timeout, bool async, CancellationToken cancellationToken) + async Task RawOpen(NpgsqlTimeout timeout, bool async, CancellationToken cancellationToken) { try { @@ -939,59 +948,6 @@ async Task RawOpen(SslMode sslMode, GssEncryptionMode gssEncryptionMode, NpgsqlT IsSslEncrypted = false; IsGssEncrypted = false; - - var gssEncryptResult = await TryNegotiateGssEncryption(gssEncryptionMode, async, cancellationToken).ConfigureAwait(false); - if (gssEncryptResult == GssEncryptionResult.Success) - return; - - timeout.CheckAndApply(this); - - if (GetSslNegotiation(Settings) == SslNegotiation.Direct) - { - // We already check that in NpgsqlConnectionStringBuilder.PostProcessAndValidate, but since we also allow environment variables... - if (Settings.SslMode is not SslMode.Require and not SslMode.VerifyCA and not SslMode.VerifyFull) - throw new ArgumentException("SSL Mode has to be Require or higher to be used with direct SSL Negotiation"); - if (gssEncryptResult == GssEncryptionResult.NegotiateFailure) - { - // We can be here only if it's fallback from preferred (but failed) gss encryption - // In this case, direct encryption isn't going to work anymore, so we throw a bogus exception to retry again without gss - // Alternatively, we can instead just go with the usual route of writing SslRequest, ignoring direct ssl - // But this is how libpq works - Debug.Assert(gssEncryptionMode == GssEncryptionMode.Prefer); - // The exception message doesn't matter since we're going to retry again - throw new NpgsqlException(); - } - - await DataSource.TransportSecurityHandler.NegotiateEncryption(async, this, sslMode, timeout, cancellationToken).ConfigureAwait(false); - if (ReadBuffer.ReadBytesLeft > 0) - throw new NpgsqlException("Additional unencrypted data received after SSL negotiation - this should never happen, and may be an indication of a man-in-the-middle attack."); - } - else if ((sslMode is SslMode.Prefer && DataSource.TransportSecurityHandler.SupportEncryption) || - sslMode is SslMode.Require or SslMode.VerifyCA or SslMode.VerifyFull) - { - WriteSslRequest(); - await Flush(async, cancellationToken).ConfigureAwait(false); - - await ReadBuffer.Ensure(1, async).ConfigureAwait(false); - var response = (char)ReadBuffer.ReadByte(); - timeout.CheckAndApply(this); - - switch (response) - { - default: - throw new NpgsqlException($"Received unknown response {response} for SSLRequest (expecting S or N)"); - case 'N': - if (sslMode != SslMode.Prefer) - throw new NpgsqlException("SSL connection requested. No SSL enabled connection from this host is configured."); - break; - case 'S': - await DataSource.TransportSecurityHandler.NegotiateEncryption(async, this, sslMode, timeout, cancellationToken).ConfigureAwait(false); - break; - } - - if (ReadBuffer.ReadBytesLeft > 0) - throw new NpgsqlException("Additional unencrypted data received after SSL negotiation - this should never happen, and may be an indication of a man-in-the-middle attack."); - } } catch { @@ -1008,12 +964,80 @@ async Task RawOpen(SslMode sslMode, GssEncryptionMode gssEncryptionMode, NpgsqlT } } + async Task SetupEncryption(SslMode sslMode, GssEncryptionMode gssEncryptionMode, NpgsqlTimeout timeout, bool async, CancellationToken cancellationToken) + { + var gssEncryptResult = await TryNegotiateGssEncryption(gssEncryptionMode, async, cancellationToken).ConfigureAwait(false); + if (gssEncryptResult == GssEncryptionResult.Success) + return; + + // TryNegotiateGssEncryption should already throw a much more meaningful exception + // if GSS encryption is required but for some reason we can't negotiate it. + // But since we have to return a specific result instead of generic true/false + // To make absolutely sure we didn't miss anything, recheck again + if (gssEncryptionMode == GssEncryptionMode.Require) + throw new NpgsqlException($"Unable to negotiate GSS encryption: {gssEncryptResult}"); + + timeout.CheckAndApply(this); + + if (GetSslNegotiation(Settings) == SslNegotiation.Direct) + { + // We already check that in NpgsqlConnectionStringBuilder.PostProcessAndValidate, but since we also allow environment variables... + if (Settings.SslMode is not SslMode.Require and not SslMode.VerifyCA and not SslMode.VerifyFull) + throw new ArgumentException("SSL Mode has to be Require or higher to be used with direct SSL Negotiation"); + if (gssEncryptResult == GssEncryptionResult.NegotiateFailure) + { + // We can be here only if it's fallback from preferred (but failed) gss encryption + // In this case, direct encryption isn't going to work anymore, so we throw a bogus exception to retry again without gss + // Alternatively, we can instead just go with the usual route of writing SslRequest, ignoring direct ssl + // But this is how libpq works + Debug.Assert(gssEncryptionMode == GssEncryptionMode.Prefer); + // The exception message doesn't matter since we're going to retry again + throw new NpgsqlException(); + } + + await DataSource.TransportSecurityHandler.NegotiateEncryption(async, this, sslMode, timeout, cancellationToken).ConfigureAwait(false); + if (ReadBuffer.ReadBytesLeft > 0) + throw new NpgsqlException("Additional unencrypted data received after SSL negotiation - this should never happen, and may be an indication of a man-in-the-middle attack."); + } + else if ((sslMode is SslMode.Prefer && DataSource.TransportSecurityHandler.SupportEncryption) || + sslMode is SslMode.Require or SslMode.VerifyCA or SslMode.VerifyFull) + { + WriteSslRequest(); + await Flush(async, cancellationToken).ConfigureAwait(false); + + await ReadBuffer.Ensure(1, async).ConfigureAwait(false); + var response = (char)ReadBuffer.ReadByte(); + timeout.CheckAndApply(this); + + switch (response) + { + default: + throw new NpgsqlException($"Received unknown response {response} for SSLRequest (expecting S or N)"); + case 'N': + if (sslMode != SslMode.Prefer) + throw new NpgsqlException("SSL connection requested. No SSL enabled connection from this host is configured."); + break; + case 'S': + await DataSource.TransportSecurityHandler.NegotiateEncryption(async, this, sslMode, timeout, cancellationToken).ConfigureAwait(false); + break; + } + + if (ReadBuffer.ReadBytesLeft > 0) + throw new NpgsqlException("Additional unencrypted data received after SSL negotiation - this should never happen, and may be an indication of a man-in-the-middle attack."); + } + } + async ValueTask TryNegotiateGssEncryption(GssEncryptionMode gssEncryptionMode, bool async, CancellationToken cancellationToken) { // GetCredentialFailure is essentially a nop (since we didn't send anything over the wire) // So we can proceed further as if gss encryption wasn't even attempted if (gssEncryptionMode == GssEncryptionMode.Disable) return GssEncryptionResult.GetCredentialFailure; + // Same thing as above, though in this case user doesn't require GSS encryption but didn't enable encryption + // Most of the time they're using the default value, in which case also exit without throwing an error + if (gssEncryptionMode == GssEncryptionMode.Prefer && !DataSource.TransportSecurityHandler.SupportEncryption) + return GssEncryptionResult.GetCredentialFailure; + if (ConnectedEndPoint!.AddressFamily == AddressFamily.Unix) { if (gssEncryptionMode == GssEncryptionMode.Prefer) @@ -1038,7 +1062,9 @@ static SslNegotiation GetSslNegotiation(NpgsqlConnectionStringBuilder settings) return sslNegotiation; } - return SslNegotiation.Postgres; + // If user hasn't provided the value via connection string or environment variable + // Retrieve the default value from property + return settings.SslNegotiation; } static GssEncryptionMode GetGssEncMode(NpgsqlConnectionStringBuilder settings) @@ -1052,7 +1078,9 @@ static GssEncryptionMode GetGssEncMode(NpgsqlConnectionStringBuilder settings) return gssEncMode; } - return GssEncryptionMode.Disable; + // If user hasn't provided the value via connection string or environment variable + // Retrieve the default value from property + return settings.GssEncryptionMode; } internal async Task NegotiateEncryption(SslMode sslMode, NpgsqlTimeout timeout, bool async, CancellationToken cancellationToken) @@ -2187,9 +2215,13 @@ void DoCancelRequest(int backendProcessId, int backendSecretKey) { try { - RawOpen(Settings.SslMode, gssEncMode, new NpgsqlTimeout(TimeSpan.FromSeconds(ConnectionTimeout)), false, + var timeout = new NpgsqlTimeout(TimeSpan.FromSeconds(ConnectionTimeout)); + RawOpen(timeout, false, CancellationToken.None) .GetAwaiter().GetResult(); + SetupEncryption(Settings.SslMode, gssEncMode, timeout, false, + CancellationToken.None). + GetAwaiter().GetResult(); } catch (Exception e) when (gssEncMode == GssEncryptionMode.Prefer) { @@ -2198,9 +2230,13 @@ void DoCancelRequest(int backendProcessId, int backendSecretKey) // If we hit an error with gss encryption // Retry again without it - RawOpen(Settings.SslMode, GssEncryptionMode.Disable, new NpgsqlTimeout(TimeSpan.FromSeconds(ConnectionTimeout)), false, + var timeout = new NpgsqlTimeout(TimeSpan.FromSeconds(ConnectionTimeout)); + RawOpen(timeout, false, CancellationToken.None) .GetAwaiter().GetResult(); + SetupEncryption(Settings.SslMode, GssEncryptionMode.Disable, timeout, false, + CancellationToken.None). + GetAwaiter().GetResult(); } WriteCancelRequest(backendProcessId, backendSecretKey); diff --git a/src/Npgsql/NpgsqlConnectionStringBuilder.cs b/src/Npgsql/NpgsqlConnectionStringBuilder.cs index ca0d734c5f..1e1e87107d 100644 --- a/src/Npgsql/NpgsqlConnectionStringBuilder.cs +++ b/src/Npgsql/NpgsqlConnectionStringBuilder.cs @@ -488,7 +488,7 @@ public SslNegotiation SslNegotiation [NpgsqlConnectionStringProperty] public GssEncryptionMode GssEncryptionMode { - get => UserProvidedGssEncMode ?? GssEncryptionMode.Disable; + get => UserProvidedGssEncMode ?? GssEncryptionMode.Prefer; set { UserProvidedGssEncMode = value; diff --git a/src/Npgsql/Util/GSSStream.cs b/src/Npgsql/Util/GSSStream.cs index c6c47bd4ca..4f98a1d1fa 100644 --- a/src/Npgsql/Util/GSSStream.cs +++ b/src/Npgsql/Util/GSSStream.cs @@ -59,7 +59,7 @@ public override void Write(ReadOnlySpan buffer) Unsafe.WriteUnaligned(ref _writeLengthBuffer[0], BitConverter.IsLittleEndian ? BinaryPrimitives.ReverseEndianness(written.Length) : written.Length); _stream.Write(_writeLengthBuffer); - _stream.Write(buffer.Slice(start, lengthToWrite)); + _stream.Write(_writeBuffer.WrittenMemory.Span); _writeBuffer.ResetWrittenCount(); start += lengthToWrite; From fc0a675c67130a77418d5d52e9d266c0afb3597c Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Wed, 19 Nov 2025 17:56:46 +0100 Subject: [PATCH 637/761] Move TryTrackDataSource to constructor and enable metrics more accurately (#6329) --- src/Npgsql/MetricsReporter.cs | 24 +++++++-------- src/Npgsql/MultiHostDataSourceWrapper.cs | 2 +- src/Npgsql/NpgsqlDataSource.cs | 38 +++++++++++++++++------- src/Npgsql/NpgsqlMultiHostDataSource.cs | 5 ++-- src/Npgsql/PoolingDataSource.cs | 2 +- src/Npgsql/UnpooledDataSource.cs | 2 +- 6 files changed, 42 insertions(+), 31 deletions(-) diff --git a/src/Npgsql/MetricsReporter.cs b/src/Npgsql/MetricsReporter.cs index a25a3173cb..71b43923fe 100644 --- a/src/Npgsql/MetricsReporter.cs +++ b/src/Npgsql/MetricsReporter.cs @@ -178,20 +178,16 @@ static IEnumerable> GetConnectionUsage() { var reporter = Reporters[i]; - if (reporter._dataSource is PoolingDataSource poolingDataSource) - { - var stats = poolingDataSource.Statistics; - - measurements.Add(new Measurement( - stats.Idle, - reporter._poolNameTag, - new KeyValuePair("state", "idle"))); - - measurements.Add(new Measurement( - stats.Busy, - reporter._poolNameTag, - new KeyValuePair("state", "used"))); - } + var connectionStats = reporter._dataSource.Statistics; + measurements.Add(new Measurement( + connectionStats.Idle, + reporter._poolNameTag, + new KeyValuePair("state", "idle"))); + + measurements.Add(new Measurement( + connectionStats.Busy, + reporter._poolNameTag, + new KeyValuePair("state", "used"))); } return measurements; diff --git a/src/Npgsql/MultiHostDataSourceWrapper.cs b/src/Npgsql/MultiHostDataSourceWrapper.cs index b6b7d3e5f5..432875ae67 100644 --- a/src/Npgsql/MultiHostDataSourceWrapper.cs +++ b/src/Npgsql/MultiHostDataSourceWrapper.cs @@ -8,7 +8,7 @@ namespace Npgsql; sealed class MultiHostDataSourceWrapper(NpgsqlMultiHostDataSource wrappedSource, TargetSessionAttributes targetSessionAttributes) - : NpgsqlDataSource(CloneSettingsForTargetSessionAttributes(wrappedSource.Settings, targetSessionAttributes), wrappedSource.Configuration) + : NpgsqlDataSource(CloneSettingsForTargetSessionAttributes(wrappedSource.Settings, targetSessionAttributes), wrappedSource.Configuration, reportMetrics: false) { internal NpgsqlMultiHostDataSource WrappedSource { get; } = wrappedSource; diff --git a/src/Npgsql/NpgsqlDataSource.cs b/src/Npgsql/NpgsqlDataSource.cs index 78d0ca95c8..280b32c128 100644 --- a/src/Npgsql/NpgsqlDataSource.cs +++ b/src/Npgsql/NpgsqlDataSource.cs @@ -92,11 +92,11 @@ private protected readonly Dictionary> _pendi readonly SemaphoreSlim _setupMappingsSemaphore = new(1); readonly INpgsqlNameTranslator _defaultNameTranslator; - IDisposable? _eventSourceEvents; + readonly IDisposable? _eventSourceEvents; internal NpgsqlDataSource( NpgsqlConnectionStringBuilder settings, - NpgsqlDataSourceConfiguration dataSourceConfig) + NpgsqlDataSourceConfiguration dataSourceConfig, bool reportMetrics) { Settings = settings; ConnectionString = settings.PersistSecurityInfo @@ -145,7 +145,21 @@ internal NpgsqlDataSource( } Name = name ?? ConnectionString; - MetricsReporter = new MetricsReporter(this); + + // TODO this needs a rework, but for now we just avoid tracking multi-host data sources directly. + if (reportMetrics) + { + MetricsReporter = new MetricsReporter(this); + if (!NpgsqlEventSource.Log.TryTrackDataSource(Name, this, out _eventSourceEvents)) + _connectionLogger.LogDebug("NpgsqlEventSource could not start tracking a DataSource, " + + "this can happen if more than one data source uses the same connection string."); + } + else + { + // This is not accessed anywhere currently for multi-host data sources. + // Connectors which handle the metrics always access their nonpooling/pooling data source instead. + MetricsReporter = null!; + } } /// @@ -315,10 +329,6 @@ internal async Task Bootstrap( serializerOptions: serializerOptions, dbTypeResolver: new ChainDbTypeResolver(resolvers)); - if (!NpgsqlEventSource.Log.TryTrackDataSource(Name, this, out _eventSourceEvents)) - _connectionLogger.LogDebug("NpgsqlEventSource could not start tracking a DataSource, " + - "this can happen if more than one data source uses the same connection string."); - IsBootstrapped = true; } finally @@ -523,8 +533,11 @@ protected virtual void DisposeBase() } _periodicPasswordProviderTimer?.Dispose(); - MetricsReporter.Dispose(); - _eventSourceEvents?.Dispose(); + if (MetricsReporter is not null) + { + MetricsReporter.Dispose(); + _eventSourceEvents?.Dispose(); + } // We do not dispose _setupMappingsSemaphore explicitly, leaving it to finalizer // Due to possible concurrent access, which might lead to deadlock @@ -555,8 +568,11 @@ protected virtual async ValueTask DisposeAsyncBase() if (_periodicPasswordProviderTimer is not null) await _periodicPasswordProviderTimer.DisposeAsync().ConfigureAwait(false); - MetricsReporter.Dispose(); - _eventSourceEvents?.Dispose(); + if (MetricsReporter is not null) + { + MetricsReporter.Dispose(); + _eventSourceEvents?.Dispose(); + } // We do not dispose _setupMappingsSemaphore explicitly, leaving it to finalizer // Due to possible concurrent access, which might lead to deadlock // See issue #6115 diff --git a/src/Npgsql/NpgsqlMultiHostDataSource.cs b/src/Npgsql/NpgsqlMultiHostDataSource.cs index 5d21ab8954..4ccc0809b5 100644 --- a/src/Npgsql/NpgsqlMultiHostDataSource.cs +++ b/src/Npgsql/NpgsqlMultiHostDataSource.cs @@ -4,7 +4,6 @@ using System.Collections.Generic; using System.Diagnostics; using System.Diagnostics.CodeAnalysis; -using System.Linq; using System.Threading; using System.Threading.Tasks; using System.Transactions; @@ -31,7 +30,7 @@ public sealed class NpgsqlMultiHostDataSource : NpgsqlDataSource volatile int _roundRobinIndex = -1; internal NpgsqlMultiHostDataSource(NpgsqlConnectionStringBuilder settings, NpgsqlDataSourceConfiguration dataSourceConfig) - : base(settings, dataSourceConfig) + : base(settings, dataSourceConfig, reportMetrics: false) { var hosts = settings.Host!.Split(','); _pools = new NpgsqlDataSource[hosts.Length]; @@ -53,7 +52,7 @@ internal NpgsqlMultiHostDataSource(NpgsqlConnectionStringBuilder settings, Npgsq : new UnpooledDataSource(poolSettings, dataSourceConfig); } - var targetSessionAttributeValues = Enum.GetValues().ToArray(); + var targetSessionAttributeValues = Enum.GetValues(); var highestValue = 0; foreach (var value in targetSessionAttributeValues) if ((int)value > highestValue) diff --git a/src/Npgsql/PoolingDataSource.cs b/src/Npgsql/PoolingDataSource.cs index b2e96d0d4c..18ddc1e63f 100644 --- a/src/Npgsql/PoolingDataSource.cs +++ b/src/Npgsql/PoolingDataSource.cs @@ -74,7 +74,7 @@ internal sealed override (int Total, int Idle, int Busy) Statistics internal PoolingDataSource( NpgsqlConnectionStringBuilder settings, NpgsqlDataSourceConfiguration dataSourceConfig) - : base(settings, dataSourceConfig) + : base(settings, dataSourceConfig, reportMetrics: true) { if (settings.MaxPoolSize < settings.MinPoolSize) throw new ArgumentException($"Connection can't have 'Max Pool Size' {settings.MaxPoolSize} under 'Min Pool Size' {settings.MinPoolSize}"); diff --git a/src/Npgsql/UnpooledDataSource.cs b/src/Npgsql/UnpooledDataSource.cs index e801f537eb..55ce5d65af 100644 --- a/src/Npgsql/UnpooledDataSource.cs +++ b/src/Npgsql/UnpooledDataSource.cs @@ -7,7 +7,7 @@ namespace Npgsql; sealed class UnpooledDataSource(NpgsqlConnectionStringBuilder settings, NpgsqlDataSourceConfiguration dataSourceConfig) - : NpgsqlDataSource(settings, dataSourceConfig) + : NpgsqlDataSource(settings, dataSourceConfig, reportMetrics: true) { volatile int _numConnectors; From 90f699bb8b0024ec2f0bf648a7106b0104717a43 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Ros?= Date: Wed, 19 Nov 2025 10:05:44 -0800 Subject: [PATCH 638/761] Define TFM-specific dependencies (#6326) Co-authored-by: Shay Rojansky Co-authored-by: Nino Floris --- Directory.Packages.props | 44 +++++++++++++++---- .../Npgsql.DependencyInjection.csproj | 2 +- src/Npgsql/Npgsql.csproj | 2 + test/Npgsql.Tests/Types/JsonTests.cs | 5 +++ 4 files changed, 44 insertions(+), 9 deletions(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index f8a9e16cd0..d15be7ff34 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -1,11 +1,39 @@ + + 10.0.0 + 10.0.0 + + + 10.0.0 + 10.0.0 + + + + 9.0.0 + 9.0.0 + + + 9.0.11 + 9.0.11 + + + + 8.0.0 + 8.0.0 + + + 9.0.11 + + + 8.0.1 + 8.0.1 + + - - + + - - - + @@ -24,15 +52,15 @@ - - + + + - diff --git a/src/Npgsql.DependencyInjection/Npgsql.DependencyInjection.csproj b/src/Npgsql.DependencyInjection/Npgsql.DependencyInjection.csproj index aa33763975..bf502446e1 100644 --- a/src/Npgsql.DependencyInjection/Npgsql.DependencyInjection.csproj +++ b/src/Npgsql.DependencyInjection/Npgsql.DependencyInjection.csproj @@ -2,7 +2,7 @@ Shay Rojansky - net8.0 + net8.0 npgsql;postgresql;postgres;ado;ado.net;database;sql;di;dependency injection README.md diff --git a/src/Npgsql/Npgsql.csproj b/src/Npgsql/Npgsql.csproj index 0eab75cd66..01aaa5013d 100644 --- a/src/Npgsql/Npgsql.csproj +++ b/src/Npgsql/Npgsql.csproj @@ -15,6 +15,8 @@ + + diff --git a/test/Npgsql.Tests/Types/JsonTests.cs b/test/Npgsql.Tests/Types/JsonTests.cs index 84b95389bb..9cfb07de8c 100644 --- a/test/Npgsql.Tests/Types/JsonTests.cs +++ b/test/Npgsql.Tests/Types/JsonTests.cs @@ -182,7 +182,12 @@ public Task Roundtrip_JsonObject() [Test] public Task Roundtrip_JsonArray() => AssertType( +#if NET8_0 + // Necessary until we drop STJ 8.0, see https://github.com/dotnet/runtime/pull/103733 + new JsonArray { (JsonValue)1, (JsonValue)2, (JsonValue)3 }, +#else new JsonArray { 1, 2, 3 }, +#endif IsJsonb ? "[1, 2, 3]" : "[1,2,3]", PostgresType, NpgsqlDbType, From a27566ff3e75ab1f75feb6d24cb69cdbd3340ab4 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Wed, 19 Nov 2025 21:21:25 +0100 Subject: [PATCH 639/761] Align OTel metrics to latest spec (#6328) Closes #6313 --- Directory.Packages.props | 2 + src/Npgsql/MetricsReporter.cs | 85 ++++++++------ test/Npgsql.Tests/MetricTests.cs | 161 ++++++++++++++++++++++++++ test/Npgsql.Tests/Npgsql.Tests.csproj | 2 + test/Npgsql.Tests/TracingTests.cs | 1 - 5 files changed, 214 insertions(+), 37 deletions(-) create mode 100644 test/Npgsql.Tests/MetricTests.cs diff --git a/Directory.Packages.props b/Directory.Packages.props index d15be7ff34..2ca1f14b38 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -61,6 +61,8 @@ + + diff --git a/src/Npgsql/MetricsReporter.cs b/src/Npgsql/MetricsReporter.cs index 71b43923fe..431c0ea734 100644 --- a/src/Npgsql/MetricsReporter.cs +++ b/src/Npgsql/MetricsReporter.cs @@ -28,7 +28,9 @@ sealed class MetricsReporter : IDisposable static readonly ObservableGauge PreparedRatio; readonly NpgsqlDataSource _dataSource; + readonly KeyValuePair _poolNameTag; + readonly TagList _durationMetricTags; static readonly List Reporters = []; @@ -51,66 +53,68 @@ static MetricsReporter() { Meter = new("Npgsql", Version); + // db.client.operation.duration is stable in the OpenTelemetry spec + CommandDuration = Meter.CreateHistogram( + "db.client.operation.duration", + unit: "s", + description: "Duration of database client operations.", + advice: ShortHistogramAdvice); + + // From here, metrics have "development" status (not stable) + Meter.CreateObservableUpDownCounter( + "db.client.connection.count", + GetConnectionCount, + unit: "{connection}", + description: "The number of connections that are currently in state described by the state attribute."); + + // It's a bit ridiculous to manage "max connections" as an observable counter, given that it never changes for a given pool. + // However, we can't simply report it once at startup, since clients who connect later wouldn't have it. And since reporting it + // repeatedly isn't possible because we need to provide incremental figures, we just manage it as an observable counter. + Meter.CreateObservableUpDownCounter( + "db.client.connection.max", + GetConnectionMax, + unit: "{connection}", + description: "The maximum number of open connections allowed."); + + // From here, metrics are entirely Npgsql-specific and not covered by the OpenTelemetry spec. CommandsExecuting = Meter.CreateUpDownCounter( - "db.client.commands.executing", + "db.client.operation.npgsql.executing", unit: "{command}", description: "The number of currently executing database commands."); CommandsFailed = Meter.CreateCounter( - "db.client.commands.failed", + "db.client.operation.failed", unit: "{command}", description: "The number of database commands which have failed."); - CommandDuration = Meter.CreateHistogram( - "db.client.commands.duration", - unit: "s", - description: "The duration of database commands, in seconds.", - advice: ShortHistogramAdvice); - BytesWritten = Meter.CreateCounter( - "db.client.commands.bytes_written", + "db.client.operation.npgsql.bytes_written", unit: "By", description: "The number of bytes written."); BytesRead = Meter.CreateCounter( - "db.client.commands.bytes_read", + "db.client.operation.npgsql.bytes_read", unit: "By", description: "The number of bytes read."); PendingConnectionRequests = Meter.CreateUpDownCounter( - "db.client.connections.pending_requests", + "db.client.connection.npgsql.pending_requests", unit: "{request}", description: "The number of pending requests for an open connection, cumulative for the entire pool."); ConnectionTimeouts = Meter.CreateCounter( - "db.client.connections.timeouts", + "db.client.connection.npgsql.timeouts", unit: "{timeout}", description: "The number of connection timeouts that have occurred trying to obtain a connection from the pool."); ConnectionCreateTime = Meter.CreateHistogram( - "db.client.connections.create_time", + "db.client.connection.npgsql.create_time", unit: "s", description: "The time it took to create a new connection.", advice: ShortHistogramAdvice); - // Observable metrics; these are for values we already track internally (and efficiently) inside the connection pool implementation. - Meter.CreateObservableUpDownCounter( - "db.client.connections.usage", - GetConnectionUsage, - unit: "{connection}", - description: "The number of connections that are currently in state described by the state attribute."); - - // It's a bit ridiculous to manage "max connections" as an observable counter, given that it never changes for a given pool. - // However, we can't simply report it once at startup, since clients who connect later wouldn't have it. And since reporting it - // repeatedly isn't possible because we need to provide incremental figures, we just manage it as an observable counter. - Meter.CreateObservableUpDownCounter( - "db.client.connections.max", - GetMaxConnections, - unit: "{connection}", - description: "The maximum number of open connections allowed."); - PreparedRatio = Meter.CreateObservableGauge( - "db.client.commands.prepared_ratio", + "db.client.operation.npgsql.prepared_ratio", GetPreparedCommandsRatio, description: "The ratio of prepared command executions."); } @@ -118,7 +122,16 @@ static MetricsReporter() public MetricsReporter(NpgsqlDataSource dataSource) { _dataSource = dataSource; - _poolNameTag = new KeyValuePair("pool.name", dataSource.Name); + _poolNameTag = new KeyValuePair("db.client.connection.pool.name", dataSource.Name); + + _durationMetricTags = new TagList + { + // TODO: Vary this for PG-like databases (e.g. CockroachDB)? + { "db.system.name", "postgresql" }, + { "db.client.connection.pool.name", _dataSource.Name }, + { "server.address", _dataSource.Settings.Host }, + { "server.port", _dataSource.Settings.Port } + }; lock (Reporters) { @@ -142,7 +155,7 @@ internal void ReportCommandStop(long startTimestamp) if (CommandDuration.Enabled && startTimestamp > 0) { - CommandDuration.Record(Stopwatch.GetElapsedTime(startTimestamp).TotalSeconds, _poolNameTag); + CommandDuration.Record(Stopwatch.GetElapsedTime(startTimestamp).TotalSeconds, _durationMetricTags); } } @@ -168,7 +181,7 @@ internal void ReportPendingConnectionRequestStop() internal void ReportConnectionCreateTime(TimeSpan duration) => ConnectionCreateTime.Record(duration.TotalSeconds, _poolNameTag); - static IEnumerable> GetConnectionUsage() + static IEnumerable> GetConnectionCount() { lock (Reporters) { @@ -182,19 +195,19 @@ static IEnumerable> GetConnectionUsage() measurements.Add(new Measurement( connectionStats.Idle, reporter._poolNameTag, - new KeyValuePair("state", "idle"))); + new KeyValuePair("db.client.connection.state", "idle"))); measurements.Add(new Measurement( connectionStats.Busy, reporter._poolNameTag, - new KeyValuePair("state", "used"))); + new KeyValuePair("db.client.connection.state", "used"))); } return measurements; } } - static IEnumerable> GetMaxConnections() + static IEnumerable> GetConnectionMax() { lock (Reporters) { diff --git a/test/Npgsql.Tests/MetricTests.cs b/test/Npgsql.Tests/MetricTests.cs new file mode 100644 index 0000000000..235f8b4e27 --- /dev/null +++ b/test/Npgsql.Tests/MetricTests.cs @@ -0,0 +1,161 @@ +using System.Collections.Generic; +using System.Diagnostics; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; +using NUnit.Framework; +using OpenTelemetry; +using OpenTelemetry.Metrics; + +namespace Npgsql.Tests; + +public class MetricTests : TestBase +{ + [Test] + public async Task OperationDuration() + { + var exportedItems = new List(); + using var meterProvider = Sdk.CreateMeterProviderBuilder() + .AddMeter("Npgsql") + .AddInMemoryExporter(exportedItems) + .Build(); + + await using var dataSource = CreateDataSource(); + await using var conn = await dataSource.OpenConnectionAsync(); + await using var cmd = conn.CreateCommand(); + cmd.CommandText = "SELECT 1"; + await using (var reader = await cmd.ExecuteReaderAsync()) + while (await reader.ReadAsync()); + + meterProvider.ForceFlush(); + + var metric = exportedItems.SingleOrDefault(m => m.Name == "db.client.operation.duration"); + Assert.That(metric, Is.Not.Null, "Metric 'db.client.operation.duration' not found."); + + var point = GetFilteredPoints(metric.GetMetricPoints(), dataSource.Name).Single(); + + Assert.That(point.GetHistogramSum(), Is.GreaterThan(0)); + Assert.That(point.GetHistogramCount(), Is.EqualTo(1)); + + var tags = ToDictionary(point.Tags); + + using (Assert.EnterMultipleScope()) + { + // TODO: Vary this for PG-like databases (e.g. CockroachDB)? + Assert.That(tags["db.system.name"], Is.EqualTo("postgresql")); + + Assert.That(tags["server.address"], Is.EqualTo(dataSource.Settings.Host)); + Assert.That(tags["server.port"], Is.EqualTo(dataSource.Settings.Port)); + Assert.That(tags["db.client.connection.pool.name"], Is.EqualTo(dataSource.Name)); + } + } + + [Test] + public async Task ConnectionCount() + { + var exportedItems = new List(); + using var meterProvider = Sdk.CreateMeterProviderBuilder() + .AddMeter("Npgsql") + .AddInMemoryExporter(exportedItems) + .Build(); + + await using var dataSource = CreateDataSource(); + + using (var _ = await dataSource.OpenConnectionAsync()) + { + meterProvider.ForceFlush(); + + var metric = exportedItems.Single(m => m.Name == "db.client.connection.count"); + var points = GetFilteredPoints(metric.GetMetricPoints(), dataSource.Name); + + var usedPoint = GetPoint(points, "used"); + Assert.That(usedPoint.GetSumLong(), Is.EqualTo(1), "Expected used connections to be 1"); + + var idlePoint = GetPoint(points, "idle"); + Assert.That(idlePoint.GetSumLong(), Is.Zero, "Expected idle connections to be 0"); + + exportedItems.Clear(); + } + + meterProvider.ForceFlush(); + + { + var metric = exportedItems.Single(m => m.Name == "db.client.connection.count"); + var points = GetFilteredPoints(metric.GetMetricPoints(), dataSource.Name); + + var usedPoint = GetPoint(points, "used"); + Assert.That(usedPoint.GetSumLong(), Is.Zero, "Expected used connections to be 0"); + + var idlePoint = GetPoint(points, "idle"); + Assert.That(idlePoint.GetSumLong(), Is.EqualTo(1), "Expected idle connections to be 1"); + } + + static MetricPoint GetPoint(IEnumerable points, string state) + { + foreach (var point in points) + { + foreach (var tag in point.Tags) + { + if (tag.Key == "db.client.connection.state" && (string?)tag.Value == state) + return point; + } + } + + Assert.Fail($"Point with state '{state}' not found"); + throw new UnreachableException(); + } + } + + [Test] + public async Task ConnectionMax() + { + var exportedItems = new List(); + using var meterProvider = Sdk.CreateMeterProviderBuilder() + .AddMeter("Npgsql") + .AddInMemoryExporter(exportedItems) + .Build(); + + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.ConnectionStringBuilder.MaxPoolSize = 134; + await using var dataSource = dataSourceBuilder.Build(); + + meterProvider.ForceFlush(); + + var metric = exportedItems.Single(m => m.Name == "db.client.connection.max"); + var point = GetFilteredPoints(metric.GetMetricPoints(), dataSource.Name).First(p => p.GetSumLong() == 134); + var tags = ToDictionary(point.Tags); + Assert.That(tags["db.client.connection.pool.name"], Is.EqualTo(dataSource.Name)); + } + + static Dictionary ToDictionary(ReadOnlyTagCollection tags) + { + var dict = new Dictionary(); + foreach (var tag in tags) + dict[tag.Key] = tag.Value; + return dict; + } + + protected override NpgsqlDataSourceBuilder CreateDataSourceBuilder() + { + var dataSourceBuilder = base.CreateDataSourceBuilder(); + dataSourceBuilder.Name = "MetricsDataSource" + Interlocked.Increment(ref _dataSourceCounter); + return dataSourceBuilder; + } + + protected override NpgsqlDataSource CreateDataSource() + => CreateDataSourceBuilder().Build(); + + int _dataSourceCounter; + + static IEnumerable GetFilteredPoints(MetricPointsAccessor points, string dataSourceName) + { + foreach (var point in points) + { + foreach (var tag in point.Tags) + { + if (tag.Key == "db.client.connection.pool.name" && (string?)tag.Value == dataSourceName) + yield return point; + } + } + } +} diff --git a/test/Npgsql.Tests/Npgsql.Tests.csproj b/test/Npgsql.Tests/Npgsql.Tests.csproj index 2944755e6c..3714b9edaa 100644 --- a/test/Npgsql.Tests/Npgsql.Tests.csproj +++ b/test/Npgsql.Tests/Npgsql.Tests.csproj @@ -9,6 +9,8 @@ runtime; build; native; contentfiles; analyzers; buildtransitive + + diff --git a/test/Npgsql.Tests/TracingTests.cs b/test/Npgsql.Tests/TracingTests.cs index faec49c238..fe7464f0ce 100644 --- a/test/Npgsql.Tests/TracingTests.cs +++ b/test/Npgsql.Tests/TracingTests.cs @@ -1,7 +1,6 @@ using System.Collections.Generic; using System.Diagnostics; using System.Linq; -using System.Net.Sockets; using System.Threading.Tasks; using NUnit.Framework; From c15f00a0f86fc5dba2ff2a4fffa807ee0287bf92 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 20 Nov 2025 08:12:14 +0100 Subject: [PATCH 640/761] Bump Microsoft.CodeAnalysis.CSharp from 4.14.0 to 5.0.0 (#6330) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 2ca1f14b38..52becfa8c3 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -45,7 +45,7 @@ - + From 572671e949d6a69a75f8d1a9cd79c0ddc898d608 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Thu, 20 Nov 2025 13:42:55 +0300 Subject: [PATCH 641/761] Fix failing Timeout_during_authentication test (#6332) --- test/Npgsql.Tests/AuthenticationTests.cs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/test/Npgsql.Tests/AuthenticationTests.cs b/test/Npgsql.Tests/AuthenticationTests.cs index 157b1ee287..1503d7f373 100644 --- a/test/Npgsql.Tests/AuthenticationTests.cs +++ b/test/Npgsql.Tests/AuthenticationTests.cs @@ -368,9 +368,8 @@ public async Task Timeout_during_authentication() // request. This should trigger a timeout await using var dataSource = CreateDataSource(postmasterMock.ConnectionString); await using var connection = dataSource.CreateConnection(); - Assert.That(async () => await connection.OpenAsync(), - Throws.Exception.TypeOf() - .With.InnerException.TypeOf()); + var ex = Assert.ThrowsAsync(async () => await connection.OpenAsync()); + Assert.That(ex.InnerException, Is.TypeOf()); } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/1180")] From 41c5f40a8ecc1e3e0da680c2f700ccb953590607 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Thu, 20 Nov 2025 16:59:11 +0300 Subject: [PATCH 642/761] Complete fix for failing Timeout_during_authentication test (#6334) --- src/Npgsql/Internal/NpgsqlConnector.cs | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index aa2a996322..774d19c9f7 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -1278,8 +1278,6 @@ void Connect(NpgsqlTimeout timeout) } } - timeout.Check(); - // Give each endpoint an equal share of the remaining time var perEndpointTimeout = -1; // Default to infinity if (timeout.IsSet) @@ -1400,15 +1398,7 @@ async Task ConnectAsync(NpgsqlTimeout timeout, CancellationToken cancellationTok using var combinedCts = endpointTimeout.IsSet ? CancellationTokenSource.CreateLinkedTokenSource(cancellationToken) : null; combinedCts?.CancelAfter(endpointTimeout.CheckAndGetTimeLeft()); var combinedToken = combinedCts?.Token ?? cancellationToken; - try - { - await socket.ConnectAsync(endpoint, combinedToken).ConfigureAwait(false); - } - catch (OperationCanceledException oce) when ( - oce.CancellationToken == combinedToken && !cancellationToken.IsCancellationRequested) - { - throw new TimeoutException(); - } + await socket.ConnectAsync(endpoint, combinedToken).ConfigureAwait(false); _socket = socket; ConnectedEndPoint = endpoint; @@ -1429,6 +1419,8 @@ async Task ConnectAsync(NpgsqlTimeout timeout, CancellationToken cancellationTok if (e is OperationCanceledException) e = new TimeoutException("Timeout during connection attempt"); + else if (e is NpgsqlException) + e = e.InnerException!; // We throw NpgsqlException for timeouts, wrapping TimeoutException ConnectionLogger.LogTrace(e, "Failed to connect to {Endpoint}", endpoint); From b8b7f34fb99db8f96cfc8606fb3fe8df5c130ead Mon Sep 17 00:00:00 2001 From: kevbot18 <4250416+kevbot18@users.noreply.github.com> Date: Thu, 20 Nov 2025 09:32:02 -0500 Subject: [PATCH 643/761] Throw ObjectDisposedException when assigining to NpgsqlCommand (#6048) --- src/Npgsql/NpgsqlCommand.cs | 23 ++++++++++++++++++----- test/Npgsql.Tests/CommandTests.cs | 14 ++++++++++++++ 2 files changed, 32 insertions(+), 5 deletions(-) diff --git a/src/Npgsql/NpgsqlCommand.cs b/src/Npgsql/NpgsqlCommand.cs index a83b939a53..f1767d6e76 100644 --- a/src/Npgsql/NpgsqlCommand.cs +++ b/src/Npgsql/NpgsqlCommand.cs @@ -183,8 +183,18 @@ public override string CommandText { Debug.Assert(WrappingBatch is null); - if (State != CommandState.Idle) - ThrowHelper.ThrowInvalidOperationException("An open data reader exists for this command."); + switch (State) + { + case CommandState.Idle: + break; + case CommandState.Disposed: + ThrowHelper.ThrowObjectDisposedException(typeof(NpgsqlCommand).FullName); + break; + case CommandState.InProgress: + default: + ThrowHelper.ThrowInvalidOperationException("An open data reader exists for this command."); + break; + } _commandText = value ?? string.Empty; @@ -252,9 +262,12 @@ protected override DbConnection? DbConnection if (InternalConnection == value) return; - InternalConnection = State == CommandState.Idle - ? (NpgsqlConnection?)value - : throw new InvalidOperationException("An open data reader exists for this command."); + InternalConnection = State switch + { + CommandState.Idle => (NpgsqlConnection?)value, + CommandState.Disposed => throw new ObjectDisposedException(typeof(NpgsqlCommand).FullName), + _ => throw new InvalidOperationException("An open data reader exists for this command."), + }; Transaction = null; } diff --git a/test/Npgsql.Tests/CommandTests.cs b/test/Npgsql.Tests/CommandTests.cs index 584a3cc433..70488be12e 100644 --- a/test/Npgsql.Tests/CommandTests.cs +++ b/test/Npgsql.Tests/CommandTests.cs @@ -1694,4 +1694,18 @@ public async Task Completed_transaction_throws([Values] bool commit) Assert.Throws(() => cmd.Transaction = tx); } + + [Test, Description("Writing to properties of a disposed command raises ObjectDisposedException.")] + public async Task Disposed_command_throws_on_assignment() + { + await using var conn = await OpenConnectionAsync(); + var command = new NpgsqlCommand("SELECT 1"); + command.Dispose(); + + Assert.Throws(() => command.Connection = conn); + Assert.Throws(() => command.CommandText = "SELECT 2"); + + Assert.That(command.Connection, Is.Null); + Assert.That(command.CommandText, Is.EqualTo("SELECT 1")); + } } From ed05ab2ad3217970e972d07b5af2eb92bb8d093b Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Thu, 20 Nov 2025 20:52:52 +0100 Subject: [PATCH 644/761] Override GetColumnSchemaAsync (#6337) Fixes #6017 --- src/Npgsql/NpgsqlDataReader.cs | 14 +++++++------- src/Npgsql/PublicAPI.Unshipped.txt | 2 ++ src/Npgsql/Schema/DbColumnSchemaGenerator.cs | 11 ++++++----- test/Npgsql.Tests/ReaderNewSchemaTests.cs | 6 ++++-- 4 files changed, 19 insertions(+), 14 deletions(-) diff --git a/src/Npgsql/NpgsqlDataReader.cs b/src/Npgsql/NpgsqlDataReader.cs index 2421e7e5ca..94499c14ae 100644 --- a/src/Npgsql/NpgsqlDataReader.cs +++ b/src/Npgsql/NpgsqlDataReader.cs @@ -1771,7 +1771,7 @@ public override IEnumerator GetEnumerator() /// /// public ReadOnlyCollection GetColumnSchema() - => GetColumnSchema(async: false).GetAwaiter().GetResult(); + => GetColumnSchema(async: false).GetAwaiter().GetResult(); ReadOnlyCollection IDbColumnSchemaGenerator.GetColumnSchema() { @@ -1788,14 +1788,14 @@ ReadOnlyCollection IDbColumnSchemaGenerator.GetColumnSchema() /// Asynchronously returns schema information for the columns in the current resultset. /// /// - public new Task> GetColumnSchemaAsync(CancellationToken cancellationToken = default) - => GetColumnSchema(async: true, cancellationToken); + public override Task> GetColumnSchemaAsync(CancellationToken cancellationToken = default) + => GetColumnSchema(async: true, cancellationToken); - Task> GetColumnSchema(bool async, CancellationToken cancellationToken = default) + Task> GetColumnSchema(bool async, CancellationToken cancellationToken = default) where T : DbColumn => RowDescription == null || ColumnCount == 0 - ? Task.FromResult(new List().AsReadOnly()) + ? Task.FromResult(new List().AsReadOnly()) : new DbColumnSchemaGenerator(_connection!, RowDescription, _behavior.HasFlag(CommandBehavior.KeyInfo)) - .GetColumnSchema(async, cancellationToken); + .GetColumnSchema(async, cancellationToken); #endregion @@ -1853,7 +1853,7 @@ Task> GetColumnSchema(bool async, Cancellatio table.Columns.Add("ProviderSpecificDataType", typeof(Type)); table.Columns.Add("DataTypeName", typeof(string)); - foreach (var column in await GetColumnSchema(async, cancellationToken).ConfigureAwait(false)) + foreach (var column in await GetColumnSchema(async, cancellationToken).ConfigureAwait(false)) { var row = table.NewRow(); diff --git a/src/Npgsql/PublicAPI.Unshipped.txt b/src/Npgsql/PublicAPI.Unshipped.txt index b86cac3908..9cea3e5609 100644 --- a/src/Npgsql/PublicAPI.Unshipped.txt +++ b/src/Npgsql/PublicAPI.Unshipped.txt @@ -87,6 +87,7 @@ Npgsql.Replication.PgOutput.PgOutputStreamingMode.Parallel = 2 -> Npgsql.Replica Npgsql.SslNegotiation Npgsql.SslNegotiation.Direct = 1 -> Npgsql.SslNegotiation Npgsql.SslNegotiation.Postgres = 0 -> Npgsql.SslNegotiation +override Npgsql.NpgsqlDataReader.GetColumnSchemaAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task!>! override Npgsql.NpgsqlMultiHostDataSource.Clear() -> void Npgsql.NpgsqlDataSource.ReloadTypes() -> void Npgsql.NpgsqlDataSource.ReloadTypesAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! @@ -135,3 +136,4 @@ NpgsqlTypes.NpgsqlTid.Deconstruct(out uint blockNumber, out ushort offsetNumber) *REMOVED*Npgsql.NpgsqlConnection.BeginTextExportAsync(string! copyToCommand, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! *REMOVED*Npgsql.NpgsqlConnection.BeginTextImport(string! copyFromCommand) -> System.IO.TextWriter! *REMOVED*Npgsql.NpgsqlConnection.BeginTextImportAsync(string! copyFromCommand, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! +*REMOVED*Npgsql.NpgsqlDataReader.GetColumnSchemaAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task!>! diff --git a/src/Npgsql/Schema/DbColumnSchemaGenerator.cs b/src/Npgsql/Schema/DbColumnSchemaGenerator.cs index 458dc725fc..ed7afd822b 100644 --- a/src/Npgsql/Schema/DbColumnSchemaGenerator.cs +++ b/src/Npgsql/Schema/DbColumnSchemaGenerator.cs @@ -2,6 +2,7 @@ using System.Collections.Generic; using System.Collections.ObjectModel; using System.Data; +using System.Data.Common; using System.Threading; using System.Threading.Tasks; using System.Transactions; @@ -93,13 +94,13 @@ nspname NOT IN ('pg_catalog', 'information_schema') AND #endregion Column queries - internal async Task> GetColumnSchema(bool async, CancellationToken cancellationToken = default) + internal async Task> GetColumnSchema(bool async, CancellationToken cancellationToken = default) where T : DbColumn { // This is mainly for Amazon Redshift var oldQueryMode = _connection.PostgreSqlVersion < new Version(8, 2); var numFields = _rowDescription.Count; - var result = new List(numFields); + var result = new List(numFields); for (var i = 0; i < numFields; i++) result.Add(null); var populatedColumns = 0; @@ -153,7 +154,7 @@ internal async Task> GetColumnSchema(bool asy // The column's ordinal is with respect to the resultset, not its table column.ColumnOrdinal = ordinal; - result[ordinal] = column; + result[ordinal] = (T?)(object)column; } } } @@ -172,14 +173,14 @@ internal async Task> GetColumnSchema(bool asy // Fill in whatever info we have from the RowDescription itself for (var i = 0; i < numFields; i++) { - var column = result[i]; + var column = (NpgsqlDbColumn?)(object?)result[i]; var field = _rowDescription[i]; if (column is null) { column = SetUpNonColumnField(field); column.ColumnOrdinal = i; - result[i] = column; + result[i] = (T?)(object)column; populatedColumns++; } diff --git a/test/Npgsql.Tests/ReaderNewSchemaTests.cs b/test/Npgsql.Tests/ReaderNewSchemaTests.cs index f892670d96..2b28501aeb 100644 --- a/test/Npgsql.Tests/ReaderNewSchemaTests.cs +++ b/test/Npgsql.Tests/ReaderNewSchemaTests.cs @@ -1,8 +1,10 @@ using System; +using System.Collections.Generic; using System.Collections.ObjectModel; using System.Data; using System.Data.Common; using System.Linq; +using System.Threading; using System.Threading.Tasks; using Npgsql.PostgresTypes; using NUnit.Framework; @@ -809,6 +811,6 @@ class SomeComposite public int Foo { get; set; } } - async Task> GetColumnSchema(NpgsqlDataReader reader) - => IsAsync ? await reader.GetColumnSchemaAsync() : reader.GetColumnSchema(); + async Task> GetColumnSchema(NpgsqlDataReader reader) + => IsAsync ? (await reader.GetColumnSchemaAsync(CancellationToken.None)).Cast().ToArray() : reader.GetColumnSchema(); } From 937a9bd223f5956a9772284496c31127670bcc0e Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Thu, 20 Nov 2025 22:15:06 +0100 Subject: [PATCH 645/761] Align with the stable OTel tracing specs (#6338) Closes #6064 --- src/Npgsql/Internal/NpgsqlConnector.cs | 5 +- src/Npgsql/NpgsqlActivitySource.cs | 128 +++++++------- src/Npgsql/NpgsqlCommand.cs | 2 +- test/Npgsql.Tests/TracingTests.cs | 227 +++++++++++-------------- 4 files changed, 168 insertions(+), 194 deletions(-) diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index 774d19c9f7..a90da65959 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -498,7 +498,7 @@ internal async Task Open(NpgsqlTimeout timeout, bool async, CancellationToken ca { var username = await GetUsernameAsync(async, cancellationToken).ConfigureAwait(false); - activity = NpgsqlActivitySource.ConnectionOpen(this); + activity = NpgsqlActivitySource.PhysicalConnectionOpen(this); var gssEncMode = GetGssEncMode(Settings); @@ -572,8 +572,7 @@ internal async Task Open(NpgsqlTimeout timeout, bool async, CancellationToken ca } } - if (activity is not null) - NpgsqlActivitySource.CommandStop(activity); + activity?.Dispose(); LogMessages.OpenedPhysicalConnection( ConnectionLogger, Host, Port, Database, UserFacingConnectionString, diff --git a/src/Npgsql/NpgsqlActivitySource.cs b/src/Npgsql/NpgsqlActivitySource.cs index e40ae5a9bd..8c41cbfca1 100644 --- a/src/Npgsql/NpgsqlActivitySource.cs +++ b/src/Npgsql/NpgsqlActivitySource.cs @@ -8,6 +8,8 @@ namespace Npgsql; +// Semantic conventions for database client spans: https://opentelemetry.io/docs/specs/semconv/database/database-spans/ +// Semantic conventions for PostgreSQL client operations: https://opentelemetry.io/docs/specs/semconv/database/postgresql/ static class NpgsqlActivitySource { static readonly ActivitySource Source = new("Npgsql", GetLibraryVersion()); @@ -16,64 +18,69 @@ static class NpgsqlActivitySource internal static Activity? CommandStart(NpgsqlConnectionStringBuilder settings, string commandText, CommandType commandType, string? spanName) { - var dbName = settings.Database ?? "UNKNOWN"; - string? dbOperation = null; - string? dbSqlTable = null; - string activityName; + string? operationName = null; + switch (commandType) { case CommandType.StoredProcedure: - dbOperation = NpgsqlCommand.EnableStoredProcedureCompatMode ? "SELECT" : "CALL"; - // In this case our activity name follows the concept of the CommandType.TableDirect case - // (" .") but replaces db.sql.table with the procedure name - // which seems to match the spec's intent without being explicitly specified that way (it suggests - // using the procedure name but doesn't mention using db.operation or db.name in that case). - activityName = $"{dbOperation} {dbName}.{commandText}"; + // We follow the {db.operation.name} {target} pattern of the spec, with the operation being SELECT/CALL and + // the target being the stored procedure name. + operationName = NpgsqlCommand.EnableStoredProcedureCompatMode ? "SELECT" : "CALL"; + spanName ??= $"{operationName} {commandText}"; break; case CommandType.TableDirect: - dbOperation = "SELECT"; - // The OpenTelemetry spec actually asks to include the database name into db.sql.table - // but then again mixes the concept of database and schema. - // As I interpret it, it actually wants db.sql.table to include the schema name and not the - // database name if the concept of schemas exists in the database system. - // This also makes sense in the context of the activity name which otherwise would include the - // database name twice. - dbSqlTable = commandText; - activityName = $"{dbOperation} {dbName}.{dbSqlTable}"; + // We follow the {db.operation.name} {target} pattern of the spec, with the operation being SELECT and + // the target being the table (collection) name. + operationName = "SELECT"; + spanName ??= $"{operationName} {commandText}"; break; case CommandType.Text: - activityName = dbName; + // We don't have db.query.summary, db.operation.name or target (without parsing SQL), + // so we fall back to db.system.name as per the specs. + spanName ??= "postgresql"; break; default: throw new ArgumentOutOfRangeException(nameof(commandType), commandType, null); } - var activity = Source.StartActivity(spanName ?? activityName, ActivityKind.Client); + var activity = Source.StartActivity(spanName, ActivityKind.Client); if (activity is not { IsAllDataRequested: true }) return activity; - activity.SetTag("db.statement", commandText); + activity.SetTag("db.query.text", commandText); - if (dbOperation != null) - activity.SetTag("db.operation", dbOperation); - if (dbSqlTable != null) - activity.SetTag("db.sql.table", dbSqlTable); + switch (commandType) + { + case CommandType.StoredProcedure: + Debug.Assert(operationName is not null); + activity.SetTag("db.operation.name", operationName); + activity.SetTag("db.stored_procedure.name", commandText); + break; + case CommandType.TableDirect: + Debug.Assert(operationName is not null); + activity.SetTag("db.operation.name", operationName); + activity.SetTag("db.collection.name", commandText); + break; + } return activity; } - internal static Activity? ConnectionOpen(NpgsqlConnector connector) + internal static Activity? PhysicalConnectionOpen(NpgsqlConnector connector) { if (!connector.DataSource.Configuration.TracingOptions.EnablePhysicalOpenTracing) return null; + // Note that physical connection open is not part of the OpenTelemetry spec. + // We emit it if enabled, following the general name/tags guidelines. var dbName = connector.Settings.Database ?? connector.InferredUserName; - var activity = Source.StartActivity(dbName, ActivityKind.Client); + var activity = Source.StartActivity("CONNECT " + dbName, ActivityKind.Client); if (activity is not { IsAllDataRequested: true }) return activity; - activity.SetTag("db.system", "postgresql"); - activity.SetTag("db.connection_string", connector.UserFacingConnectionString); + // We set these basic tags on the activity so that they're populated even when the physical open fails. + activity.SetTag("db.system.name", "postgresql"); + activity.SetTag("db.npgsql.data_source", connector.DataSource.Name); return activity; } @@ -83,34 +90,33 @@ internal static void Enrich(Activity activity, NpgsqlConnector connector) if (!activity.IsAllDataRequested) return; - activity.SetTag("db.system", "postgresql"); - activity.SetTag("db.connection_string", connector.UserFacingConnectionString); - activity.SetTag("db.user", connector.InferredUserName); - // We trace the actual (maybe inferred) database name we're connected to, even if it - // wasn't specified in the connection string - activity.SetTag("db.name", connector.Settings.Database ?? connector.InferredUserName); - activity.SetTag("db.connection_id", connector.Id); + activity.SetTag("db.system.name", "postgresql"); + + // TODO: For now, we only set the database name, without adding the first schema in the search_path + // as per the PG tracing specs (https://opentelemetry.io/docs/specs/semconv/database/postgresql/). + // See #6336 + activity.SetTag("db.namespace", connector.Settings.Database ?? connector.InferredUserName); var endPoint = connector.ConnectedEndPoint; Debug.Assert(endPoint is not null); + activity.SetTag("server.address", connector.Host); switch (endPoint) { case IPEndPoint ipEndPoint: - activity.SetTag("net.transport", "ip_tcp"); - activity.SetTag("net.peer.ip", ipEndPoint.Address.ToString()); if (ipEndPoint.Port != 5432) - activity.SetTag("net.peer.port", ipEndPoint.Port); - activity.SetTag("net.peer.name", connector.Host); + activity.SetTag("server.port", ipEndPoint.Port); break; case UnixDomainSocketEndPoint: - activity.SetTag("net.transport", "unix"); - activity.SetTag("net.peer.name", connector.Host); break; default: - throw new ArgumentOutOfRangeException("Invalid endpoint type: " + endPoint.GetType()); + throw new UnreachableException("Invalid endpoint type: " + endPoint.GetType()); } + + // Npgsql-specific tags + activity.SetTag("db.npgsql.data_source", connector.DataSource.Name); + activity.SetTag("db.npgsql.connection_id", connector.Id); } internal static void ReceivedFirstResponse(Activity activity, NpgsqlTracingOptions tracingOptions) @@ -122,25 +128,27 @@ internal static void ReceivedFirstResponse(Activity activity, NpgsqlTracingOptio activity.AddEvent(activityEvent); } - internal static void CommandStop(Activity activity) + internal static void SetException(Activity activity, Exception exception, bool escaped = true) { - activity.SetStatus(ActivityStatusCode.Ok); - activity.Dispose(); - } + activity.AddException(exception); - internal static void SetException(Activity activity, Exception ex, bool escaped = true) - { - // TODO: We can instead use Activity.AddException whenever we start using .NET 9 - var tags = new ActivityTagsCollection + if (exception is PostgresException { SqlState: var sqlState }) { - { "exception.type", ex.GetType().FullName }, - { "exception.message", ex.Message }, - { "exception.stacktrace", ex.ToString() }, - { "exception.escaped", escaped } - }; - var activityEvent = new ActivityEvent("exception", tags: tags); - activity.AddEvent(activityEvent); - var statusDescription = ex is PostgresException pgEx ? pgEx.SqlState : ex.Message; + activity.SetTag("db.response.status_code", sqlState); + + // error.type SHOULD match the db.response.status_code returned by the database or the client library, or the canonical name of exception that occurred. + // Since we don't have a table to map the error code to a textual description, the SQL state is the best we can do. + activity.SetTag("error.type", sqlState); + } + else + { + if (exception is NpgsqlException { InnerException: Exception innerException }) + exception = innerException; + + activity.SetTag("error.type", exception.GetType().FullName); + } + + var statusDescription = exception is PostgresException pgEx ? pgEx.SqlState : exception.Message; activity.SetStatus(ActivityStatusCode.Error, statusDescription); activity.Dispose(); } diff --git a/src/Npgsql/NpgsqlCommand.cs b/src/Npgsql/NpgsqlCommand.cs index f1767d6e76..cfd48189ec 100644 --- a/src/Npgsql/NpgsqlCommand.cs +++ b/src/Npgsql/NpgsqlCommand.cs @@ -1787,7 +1787,7 @@ internal void TraceCommandStop() { if (CurrentActivity is not null) { - NpgsqlActivitySource.CommandStop(CurrentActivity); + CurrentActivity.Dispose(); CurrentActivity = null; } } diff --git a/test/Npgsql.Tests/TracingTests.cs b/test/Npgsql.Tests/TracingTests.cs index fe7464f0ce..da7f038d4a 100644 --- a/test/Npgsql.Tests/TracingTests.cs +++ b/test/Npgsql.Tests/TracingTests.cs @@ -17,10 +17,12 @@ public async Task Basic_open([Values] bool async) var activities = new List(); - using var activityListener = new ActivityListener(); - activityListener.ShouldListenTo = source => source.Name == "Npgsql"; - activityListener.Sample = (ref ActivityCreationOptions _) => ActivitySamplingResult.AllDataAndRecorded; - activityListener.ActivityStopped = activity => activities.Add(activity); + using var activityListener = new ActivityListener + { + ShouldListenTo = source => source.Name == "Npgsql", + Sample = (ref _) => ActivitySamplingResult.AllDataAndRecorded, + ActivityStopped = activity => activities.Add(activity) + }; ActivitySource.AddActivityListener(activityListener); await using var dataSource = CreateDataSource(); @@ -28,7 +30,7 @@ public async Task Basic_open([Values] bool async) ? await dataSource.OpenConnectionAsync() : dataSource.OpenConnection(); - Assert.That(activities.Count, Is.EqualTo(1)); + Assert.That(activities, Has.Count.EqualTo(1)); ValidateActivity(activities[0], conn, IsMultiplexing); if (!IsMultiplexing) @@ -41,7 +43,7 @@ public async Task Basic_open([Values] bool async) await conn.ExecuteScalarAsync("SELECT 1"); - Assert.That(activities.Count, Is.EqualTo(2)); + Assert.That(activities, Has.Count.EqualTo(2)); ValidateActivity(activities[0], conn, IsMultiplexing); // For multiplexing, query's activity can be considered as a parent for physical open's activity @@ -49,36 +51,26 @@ public async Task Basic_open([Values] bool async) static void ValidateActivity(Activity activity, NpgsqlConnection conn, bool isMultiplexing) { - Assert.That(activity.DisplayName, Is.EqualTo(conn.Settings.Database)); - Assert.That(activity.OperationName, Is.EqualTo(conn.Settings.Database)); - Assert.That(activity.Status, Is.EqualTo(ActivityStatusCode.Ok)); + Assert.That(activity.DisplayName, Is.EqualTo("CONNECT " + conn.Settings.Database)); + Assert.That(activity.OperationName, Is.EqualTo("CONNECT " + conn.Settings.Database)); + Assert.That(activity.Status, Is.EqualTo(ActivityStatusCode.Unset)); Assert.That(activity.Events.Count(), Is.EqualTo(0)); - var expectedTagCount = conn.Settings.Port == 5432 ? 8 : 9; - Assert.That(activity.TagObjects.Count(), Is.EqualTo(expectedTagCount)); - - Assert.That(activity.TagObjects.Any(x => x.Key == "db.statement"), Is.False); + var tags = activity.TagObjects.ToDictionary(t => t.Key, t => t.Value); + Assert.That(tags, Has.Count.EqualTo(conn.Settings.Port == 5432 ? 5 : 6)); - var systemTag = activity.TagObjects.First(x => x.Key == "db.system"); - Assert.That(systemTag.Value, Is.EqualTo("postgresql")); + Assert.That(tags["db.system.name"], Is.EqualTo("postgresql")); + Assert.That(tags["db.namespace"], Is.EqualTo(conn.Settings.Database)); - var userTag = activity.TagObjects.First(x => x.Key == "db.user"); - Assert.That(userTag.Value, Is.EqualTo(conn.Settings.Username)); + Assert.That(tags, Does.Not.ContainKey("db.query.text")); - var dbNameTag = activity.TagObjects.First(x => x.Key == "db.name"); - Assert.That(dbNameTag.Value, Is.EqualTo(conn.Settings.Database)); + Assert.That(tags["db.npgsql.data_source"], Is.EqualTo(conn.ConnectionString)); - var connStringTag = activity.TagObjects.First(x => x.Key == "db.connection_string"); - Assert.That(connStringTag.Value, Is.EqualTo(conn.ConnectionString)); - - if (!isMultiplexing) - { - var connIDTag = activity.TagObjects.First(x => x.Key == "db.connection_id"); - Assert.That(connIDTag.Value, Is.EqualTo(conn.ProcessID)); - } + if (isMultiplexing) + Assert.That(tags, Does.ContainKey("db.npgsql.connection_id")); else - Assert.That(activity.TagObjects.Any(x => x.Key == "db.connection_id")); + Assert.That(tags["db.npgsql.connection_id"], Is.EqualTo(conn.ProcessID)); } } @@ -90,56 +82,48 @@ public async Task Basic_query([Values] bool async, [Values] bool batch) var activities = new List(); - using var activityListener = new ActivityListener(); - activityListener.ShouldListenTo = source => source.Name == "Npgsql"; - activityListener.Sample = (ref ActivityCreationOptions _) => ActivitySamplingResult.AllDataAndRecorded; - activityListener.ActivityStopped = activity => activities.Add(activity); + using var activityListener = new ActivityListener + { + ShouldListenTo = source => source.Name == "Npgsql", + Sample = (ref _) => ActivitySamplingResult.AllDataAndRecorded, + ActivityStopped = activity => activities.Add(activity) + }; ActivitySource.AddActivityListener(activityListener); - await using var dataSource = CreateDataSource(); + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.Name = "TestTracingDataSource"; + await using var dataSource = dataSourceBuilder.Build(); await using var conn = await dataSource.OpenConnectionAsync(); // We're not interested in physical open's activity - Assert.That(activities.Count, Is.EqualTo(1)); + Assert.That(activities, Has.Count.EqualTo(1)); activities.Clear(); await ExecuteScalar(conn, async, batch, "SELECT 42"); - Assert.That(activities.Count, Is.EqualTo(1)); + Assert.That(activities, Has.Count.EqualTo(1)); var activity = activities[0]; - Assert.That(activity.DisplayName, Is.EqualTo(conn.Settings.Database)); - Assert.That(activity.OperationName, Is.EqualTo(conn.Settings.Database)); - Assert.That(activity.Status, Is.EqualTo(ActivityStatusCode.Ok)); + Assert.That(activity.DisplayName, Is.EqualTo("postgresql")); + Assert.That(activity.OperationName, Is.EqualTo("postgresql")); + Assert.That(activity.Status, Is.EqualTo(ActivityStatusCode.Unset)); Assert.That(activity.Events.Count(), Is.EqualTo(1)); var firstResponseEvent = activity.Events.First(); Assert.That(firstResponseEvent.Name, Is.EqualTo("received-first-response")); - var expectedTagCount = conn.Settings.Port == 5432 ? 9 : 10; - Assert.That(activity.TagObjects.Count(), Is.EqualTo(expectedTagCount)); - - var queryTag = activity.TagObjects.First(x => x.Key == "db.statement"); - Assert.That(queryTag.Value, Is.EqualTo("SELECT 42")); - - var systemTag = activity.TagObjects.First(x => x.Key == "db.system"); - Assert.That(systemTag.Value, Is.EqualTo("postgresql")); - - var userTag = activity.TagObjects.First(x => x.Key == "db.user"); - Assert.That(userTag.Value, Is.EqualTo(conn.Settings.Username)); + var tags = activity.TagObjects.ToDictionary(t => t.Key, t => t.Value); + Assert.That(tags, Has.Count.EqualTo(conn.Settings.Port == 5432 ? 6 : 7)); - var dbNameTag = activity.TagObjects.First(x => x.Key == "db.name"); - Assert.That(dbNameTag.Value, Is.EqualTo(conn.Settings.Database)); + Assert.That(tags["db.query.text"], Is.EqualTo("SELECT 42")); + Assert.That(tags["db.system.name"], Is.EqualTo("postgresql")); + Assert.That(tags["db.namespace"], Is.EqualTo(conn.Settings.Database)); - var connStringTag = activity.TagObjects.First(x => x.Key == "db.connection_string"); - Assert.That(connStringTag.Value, Is.EqualTo(conn.ConnectionString)); + Assert.That(tags["db.npgsql.data_source"], Is.EqualTo("TestTracingDataSource")); - if (!IsMultiplexing) - { - var connIDTag = activity.TagObjects.First(x => x.Key == "db.connection_id"); - Assert.That(connIDTag.Value, Is.EqualTo(conn.ProcessID)); - } + if (IsMultiplexing) + Assert.That(tags, Does.ContainKey("db.npgsql.connection_id")); else - Assert.That(activity.TagObjects.Any(x => x.Key == "db.connection_id")); + Assert.That(tags["db.npgsql.connection_id"], Is.EqualTo(conn.ProcessID)); } [Test] @@ -150,10 +134,12 @@ public async Task Error_open([Values] bool async) var activities = new List(); - using var activityListener = new ActivityListener(); - activityListener.ShouldListenTo = source => source.Name == "Npgsql"; - activityListener.Sample = (ref ActivityCreationOptions _) => ActivitySamplingResult.AllDataAndRecorded; - activityListener.ActivityStopped = activity => activities.Add(activity); + using var activityListener = new ActivityListener + { + ShouldListenTo = source => source.Name == "Npgsql", + Sample = (ref _) => ActivitySamplingResult.AllDataAndRecorded, + ActivityStopped = activity => activities.Add(activity) + }; ActivitySource.AddActivityListener(activityListener); await using var dataSource = CreateDataSource(x => x.Host = "not-existing-host"); @@ -164,10 +150,10 @@ public async Task Error_open([Values] bool async) : dataSource.OpenConnection(); })!; - Assert.That(activities.Count, Is.EqualTo(1)); + Assert.That(activities, Has.Count.EqualTo(1)); var activity = activities[0]; - Assert.That(activity.DisplayName, Is.EqualTo(dataSource.Settings.Database)); - Assert.That(activity.OperationName, Is.EqualTo(dataSource.Settings.Database)); + Assert.That(activity.DisplayName, Is.EqualTo("CONNECT " + dataSource.Settings.Database)); + Assert.That(activity.OperationName, Is.EqualTo("CONNECT " + dataSource.Settings.Database)); Assert.That(activity.Status, Is.EqualTo(ActivityStatusCode.Error)); Assert.That(activity.StatusDescription, Is.EqualTo(ex.Message)); @@ -175,27 +161,20 @@ public async Task Error_open([Values] bool async) var exceptionEvent = activity.Events.First(); Assert.That(exceptionEvent.Name, Is.EqualTo("exception")); - Assert.That(exceptionEvent.Tags.Count(), Is.EqualTo(4)); - - var exceptionTypeTag = exceptionEvent.Tags.First(x => x.Key == "exception.type"); - Assert.That(exceptionTypeTag.Value, Is.EqualTo(ex.GetType().FullName)); - - var exceptionMessageTag = exceptionEvent.Tags.First(x => x.Key == "exception.message"); - Assert.That((string)exceptionMessageTag.Value!, Does.Contain(ex.Message)); + var exceptionTags = exceptionEvent.Tags.ToDictionary(t => t.Key, t => t.Value); + Assert.That(exceptionTags, Has.Count.EqualTo(3)); - var exceptionStacktraceTag = exceptionEvent.Tags.First(x => x.Key == "exception.stacktrace"); - Assert.That((string)exceptionStacktraceTag.Value!, Does.Contain(ex.Message)); + Assert.That(exceptionTags["exception.type"], Is.EqualTo(ex.GetType().FullName)); + Assert.That(exceptionTags["exception.message"], Does.Contain(ex.Message)); + Assert.That(exceptionTags["exception.stacktrace"], Does.Contain(ex.Message)); - var exceptionEscapedTag = exceptionEvent.Tags.First(x => x.Key == "exception.escaped"); - Assert.That(exceptionEscapedTag.Value, Is.True); + var activityTags = activity.TagObjects.ToDictionary(t => t.Key, t => t.Value); + Assert.That(activityTags, Has.Count.EqualTo(3)); - Assert.That(activity.TagObjects.Count(), Is.EqualTo(2)); + Assert.That(activityTags["db.system.name"], Is.EqualTo("postgresql")); + Assert.That(activityTags["db.npgsql.data_source"], Is.EqualTo(dataSource.ConnectionString)); - var systemTag = activity.TagObjects.First(x => x.Key == "db.system"); - Assert.That(systemTag.Value, Is.EqualTo("postgresql")); - - var connStringTag = activity.TagObjects.First(x => x.Key == "db.connection_string"); - Assert.That(connStringTag.Value, Is.EqualTo(dataSource.ConnectionString)); + Assert.That(activityTags["error.type"], Is.EqualTo("System.Net.Sockets.SocketException")); } [Test] @@ -206,10 +185,12 @@ public async Task Error_query([Values] bool async, [Values] bool batch) var activities = new List(); - using var activityListener = new ActivityListener(); - activityListener.ShouldListenTo = source => source.Name == "Npgsql"; - activityListener.Sample = (ref ActivityCreationOptions _) => ActivitySamplingResult.AllDataAndRecorded; - activityListener.ActivityStopped = activity => activities.Add(activity); + using var activityListener = new ActivityListener + { + ShouldListenTo = source => source.Name == "Npgsql", + Sample = (ref _) => ActivitySamplingResult.AllDataAndRecorded, + ActivityStopped = activity => activities.Add(activity) + }; ActivitySource.AddActivityListener(activityListener); await using var dataSource = CreateDataSource(); @@ -221,10 +202,10 @@ public async Task Error_query([Values] bool async, [Values] bool batch) Assert.ThrowsAsync(async () => await ExecuteScalar(conn, async, batch, "SELECT * FROM non_existing_table")); - Assert.That(activities.Count, Is.EqualTo(1)); + Assert.That(activities, Has.Count.EqualTo(1)); var activity = activities[0]; - Assert.That(activity.DisplayName, Is.EqualTo(conn.Settings.Database)); - Assert.That(activity.OperationName, Is.EqualTo(conn.Settings.Database)); + Assert.That(activity.DisplayName, Is.EqualTo("postgresql")); + Assert.That(activity.OperationName, Is.EqualTo("postgresql")); Assert.That(activity.Status, Is.EqualTo(ActivityStatusCode.Error)); Assert.That(activity.StatusDescription, Is.EqualTo(PostgresErrorCodes.UndefinedTable)); @@ -232,45 +213,29 @@ public async Task Error_query([Values] bool async, [Values] bool batch) var exceptionEvent = activity.Events.First(); Assert.That(exceptionEvent.Name, Is.EqualTo("exception")); - Assert.That(exceptionEvent.Tags.Count(), Is.EqualTo(4)); - - var exceptionTypeTag = exceptionEvent.Tags.First(x => x.Key == "exception.type"); - Assert.That(exceptionTypeTag.Value, Is.EqualTo("Npgsql.PostgresException")); - - var exceptionMessageTag = exceptionEvent.Tags.First(x => x.Key == "exception.message"); - Assert.That((string)exceptionMessageTag.Value!, Does.Contain("relation \"non_existing_table\" does not exist")); - - var exceptionStacktraceTag = exceptionEvent.Tags.First(x => x.Key == "exception.stacktrace"); - Assert.That((string)exceptionStacktraceTag.Value!, Does.Contain("relation \"non_existing_table\" does not exist")); + var exceptionTags = exceptionEvent.Tags.ToDictionary(t => t.Key, t => t.Value); + Assert.That(exceptionTags, Has.Count.EqualTo(3)); - var exceptionEscapedTag = exceptionEvent.Tags.First(x => x.Key == "exception.escaped"); - Assert.That(exceptionEscapedTag.Value, Is.True); + Assert.That(exceptionTags["exception.type"], Is.EqualTo("Npgsql.PostgresException")); + Assert.That(exceptionTags["exception.message"], Does.Contain("relation \"non_existing_table\" does not exist")); + Assert.That(exceptionTags["exception.stacktrace"], Does.Contain("relation \"non_existing_table\" does not exist")); - var expectedTagCount = conn.Settings.Port == 5432 ? 9 : 10; - Assert.That(activity.TagObjects.Count(), Is.EqualTo(expectedTagCount)); + var activityTags = activity.TagObjects.ToDictionary(t => t.Key, t => t.Value); + Assert.That(activityTags, Has.Count.EqualTo(conn.Settings.Port == 5432 ? 8 : 9)); - var queryTag = activity.TagObjects.First(x => x.Key == "db.statement"); - Assert.That(queryTag.Value, Is.EqualTo("SELECT * FROM non_existing_table")); + Assert.That(activityTags["db.query.text"], Is.EqualTo("SELECT * FROM non_existing_table")); + Assert.That(activityTags["db.system.name"], Is.EqualTo("postgresql")); + Assert.That(activityTags["db.namespace"], Is.EqualTo(conn.Settings.Database)); - var systemTag = activity.TagObjects.First(x => x.Key == "db.system"); - Assert.That(systemTag.Value, Is.EqualTo("postgresql")); + Assert.That(activityTags["db.response.status_code"], Is.EqualTo(PostgresErrorCodes.UndefinedTable)); + Assert.That(activityTags["error.type"], Is.EqualTo(PostgresErrorCodes.UndefinedTable)); - var userTag = activity.TagObjects.First(x => x.Key == "db.user"); - Assert.That(userTag.Value, Is.EqualTo(conn.Settings.Username)); + Assert.That(activityTags["db.npgsql.data_source"], Is.EqualTo(conn.ConnectionString)); - var dbNameTag = activity.TagObjects.First(x => x.Key == "db.name"); - Assert.That(dbNameTag.Value, Is.EqualTo(conn.Settings.Database)); - - var connStringTag = activity.TagObjects.First(x => x.Key == "db.connection_string"); - Assert.That(connStringTag.Value, Is.EqualTo(conn.ConnectionString)); - - if (!IsMultiplexing) - { - var connIDTag = activity.TagObjects.First(x => x.Key == "db.connection_id"); - Assert.That(connIDTag.Value, Is.EqualTo(conn.ProcessID)); - } + if (IsMultiplexing) + Assert.That(activityTags, Does.ContainKey("db.npgsql.connection_id")); else - Assert.That(activity.TagObjects.Any(x => x.Key == "db.connection_id")); + Assert.That(activityTags["db.npgsql.connection_id"], Is.EqualTo(conn.ProcessID)); } [Test] @@ -281,10 +246,12 @@ public async Task Configure_tracing([Values] bool async, [Values] bool batch) var activities = new List(); - using var activityListener = new ActivityListener(); - activityListener.ShouldListenTo = source => source.Name == "Npgsql"; - activityListener.Sample = (ref ActivityCreationOptions _) => ActivitySamplingResult.AllDataAndRecorded; - activityListener.ActivityStopped = activity => activities.Add(activity); + using var activityListener = new ActivityListener + { + ShouldListenTo = source => source.Name == "Npgsql", + Sample = (ref _) => ActivitySamplingResult.AllDataAndRecorded, + ActivityStopped = activity => activities.Add(activity) + }; ActivitySource.AddActivityListener(activityListener); var dataSourceBuilder = CreateDataSourceBuilder(); @@ -308,19 +275,19 @@ public async Task Configure_tracing([Values] bool async, [Values] bool batch) await ExecuteScalar(conn, async, batch, "SELECT 1"); - Assert.That(activities.Count, Is.EqualTo(0)); + Assert.That(activities, Is.Empty); await ExecuteScalar(conn, async, batch, "SELECT 2"); - Assert.That(activities.Count, Is.EqualTo(1)); + Assert.That(activities, Has.Count.EqualTo(1)); var activity = activities[0]; Assert.That(activity.DisplayName, Is.EqualTo("unknown_query")); Assert.That(activity.OperationName, Is.EqualTo("unknown_query")); Assert.That(activity.Events.Count(), Is.EqualTo(0)); - var customTag = activity.TagObjects.First(x => x.Key == "custom_tag"); - Assert.That(customTag.Value, Is.EqualTo("custom_value")); + var tags = activity.TagObjects.ToDictionary(t => t.Key, t => t.Value); + Assert.That(tags["custom_tag"], Is.EqualTo("custom_value")); } async Task ExecuteScalar(NpgsqlConnection connection, bool async, bool isBatch, string query) From 7b6ac490c735748677dea3c8f760bedc5b973ba4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 20 Nov 2025 22:16:13 +0100 Subject: [PATCH 646/761] Bump actions/checkout from 5 to 6 (#6344) --- .github/workflows/build.yml | 8 ++++---- .github/workflows/native-aot.yml | 6 +++--- .github/workflows/rich-code-nav.yml | 2 +- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 1eff7639e1..14e7ad6875 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -67,7 +67,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v5 + uses: actions/checkout@v6 - name: NuGet Cache uses: actions/cache@v4 @@ -146,7 +146,7 @@ jobs: sudo -u postgres psql -c "CREATE USER npgsql_tests_scram SUPERUSER PASSWORD 'npgsql_tests_scram'" # Uncomment the following to SSH into the agent running the build (https://github.com/mxschmitt/action-tmate) - #- uses: actions/checkout@v5 + #- uses: actions/checkout@v6 #- name: Setup tmate session # uses: mxschmitt/action-tmate@v3 @@ -327,7 +327,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v5 + uses: actions/checkout@v6 - name: NuGet Cache uses: actions/cache@v4 @@ -367,7 +367,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v5 + uses: actions/checkout@v6 - name: Setup .NET Core SDK uses: actions/setup-dotnet@v5.0.0 diff --git a/.github/workflows/native-aot.yml b/.github/workflows/native-aot.yml index 3cb3ab007a..7e92384ea2 100644 --- a/.github/workflows/native-aot.yml +++ b/.github/workflows/native-aot.yml @@ -93,7 +93,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v5 + uses: actions/checkout@v6 # - name: Setup nuget config # run: echo "$nuget_config" > NuGet.config @@ -127,7 +127,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v5 + uses: actions/checkout@v6 # - name: Setup nuget config # run: echo "$nuget_config" > NuGet.config @@ -154,7 +154,7 @@ jobs: shell: bash # Uncomment the following to SSH into the agent running the build (https://github.com/mxschmitt/action-tmate) - #- uses: actions/checkout@v5 + #- uses: actions/checkout@v6 #- name: Setup tmate session # uses: mxschmitt/action-tmate@v3 diff --git a/.github/workflows/rich-code-nav.yml b/.github/workflows/rich-code-nav.yml index 363eaaeb5d..e6842c11ca 100644 --- a/.github/workflows/rich-code-nav.yml +++ b/.github/workflows/rich-code-nav.yml @@ -12,7 +12,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v5 + uses: actions/checkout@v6 - name: NuGet Cache uses: actions/cache@v4 From 7f7296d1d7056efa66a97d397bafe048dd2da2df Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Thu, 20 Nov 2025 23:23:05 +0100 Subject: [PATCH 647/761] Make Include Realm the default (#6341) Closes #4628 --- src/Npgsql/NpgsqlConnectionStringBuilder.cs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Npgsql/NpgsqlConnectionStringBuilder.cs b/src/Npgsql/NpgsqlConnectionStringBuilder.cs index 1e1e87107d..9dd2696e51 100644 --- a/src/Npgsql/NpgsqlConnectionStringBuilder.cs +++ b/src/Npgsql/NpgsqlConnectionStringBuilder.cs @@ -614,6 +614,7 @@ public string KerberosServiceName [Category("Security")] [Description("The Kerberos realm to be used for authentication.")] [DisplayName("Include Realm")] + [DefaultValue(true)] [NpgsqlConnectionStringProperty] public bool IncludeRealm { From 6d297236532b8d2b219f8727f238b18f2d810c51 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Fri, 21 Nov 2025 00:33:58 +0100 Subject: [PATCH 648/761] Re-enable skipped macOS tests (#6340) --- test/Npgsql.Tests/ConnectionTests.cs | 5 +---- test/Npgsql.Tests/MultipleHostsTests.cs | 1 - .../Replication/CommonLogicalReplicationTests.cs | 1 - test/Npgsql.Tests/Replication/CommonReplicationTests.cs | 1 - test/Npgsql.Tests/Replication/PgOutputReplicationTests.cs | 1 - .../Npgsql.Tests/Replication/TestDecodingReplicationTests.cs | 1 - 6 files changed, 1 insertion(+), 9 deletions(-) diff --git a/test/Npgsql.Tests/ConnectionTests.cs b/test/Npgsql.Tests/ConnectionTests.cs index e64b3ba982..2eed87dab3 100644 --- a/test/Npgsql.Tests/ConnectionTests.cs +++ b/test/Npgsql.Tests/ConnectionTests.cs @@ -130,7 +130,6 @@ public async Task Broken_lifecycle([Values] bool openFromClose) } [Test] - [Platform(Exclude = "MacOsX", Reason = "Flaky on MacOS")] public async Task Break_while_open() { if (IsMultiplexing) @@ -818,7 +817,6 @@ public async Task No_database_defaults_to_username() } [Test, Description("Breaks a connector while it's in the pool, with a keepalive and without")] - [Platform(Exclude = "MacOsX", Reason = "Fails only on mac, needs to be investigated")] [TestCase(false, TestName = nameof(Break_connector_in_pool) + "_without_keep_alive")] [TestCase(true, TestName = nameof(Break_connector_in_pool) + "_with_keep_alive")] public async Task Break_connector_in_pool(bool keepAlive) @@ -1358,8 +1356,7 @@ await conn.ExecuteNonQueryAsync(@" } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/392")] - [NonParallelizable] - [Platform(Exclude = "MacOsX", Reason = "Flaky in CI on Mac")] + [NonParallelizable] // Drops and creates same database across modes public async Task Non_UTF8_Encoding() { Encoding.RegisterProvider(CodePagesEncodingProvider.Instance); diff --git a/test/Npgsql.Tests/MultipleHostsTests.cs b/test/Npgsql.Tests/MultipleHostsTests.cs index 9025586c55..a98e0d60c2 100644 --- a/test/Npgsql.Tests/MultipleHostsTests.cs +++ b/test/Npgsql.Tests/MultipleHostsTests.cs @@ -296,7 +296,6 @@ public async Task All_hosts_are_unavailable( } [Test] - [Platform(Exclude = "MacOsX", Reason = "Flaky in CI on Mac")] public async Task First_host_is_down() { using var socket = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp); diff --git a/test/Npgsql.Tests/Replication/CommonLogicalReplicationTests.cs b/test/Npgsql.Tests/Replication/CommonLogicalReplicationTests.cs index a8a363a583..cb434edd8a 100644 --- a/test/Npgsql.Tests/Replication/CommonLogicalReplicationTests.cs +++ b/test/Npgsql.Tests/Replication/CommonLogicalReplicationTests.cs @@ -16,7 +16,6 @@ namespace Npgsql.Tests.Replication; /// for the individual logical replication tests, they are in fact not, because /// the methods they test are extension points for plugin developers. /// -[Platform(Exclude = "MacOsX", Reason = "Replication tests are flaky in CI on Mac")] [NonParallelizable] public class CommonLogicalReplicationTests : SafeReplicationTestBase { diff --git a/test/Npgsql.Tests/Replication/CommonReplicationTests.cs b/test/Npgsql.Tests/Replication/CommonReplicationTests.cs index c57280e184..2be1f3faff 100644 --- a/test/Npgsql.Tests/Replication/CommonReplicationTests.cs +++ b/test/Npgsql.Tests/Replication/CommonReplicationTests.cs @@ -14,7 +14,6 @@ namespace Npgsql.Tests.Replication; [TestFixture(typeof(LogicalReplicationConnection))] [TestFixture(typeof(PhysicalReplicationConnection))] -[Platform(Exclude = "MacOsX", Reason = "Replication tests are flaky in CI on Mac")] [NonParallelizable] public class CommonReplicationTests : SafeReplicationTestBase where TConnection : ReplicationConnection, new() diff --git a/test/Npgsql.Tests/Replication/PgOutputReplicationTests.cs b/test/Npgsql.Tests/Replication/PgOutputReplicationTests.cs index 3bbcd1b6ac..a9a90842d3 100644 --- a/test/Npgsql.Tests/Replication/PgOutputReplicationTests.cs +++ b/test/Npgsql.Tests/Replication/PgOutputReplicationTests.cs @@ -645,7 +645,6 @@ await c.ExecuteNonQueryAsync(@$" await NextMessage(messages); }, nameof(Dispose_while_replicating)); - [Platform(Exclude = "MacOsX", Reason = "Test is flaky in CI on Mac, see https://github.com/npgsql/npgsql/issues/5294")] [TestCase(true, true)] [TestCase(true, false)] [TestCase(false, false)] diff --git a/test/Npgsql.Tests/Replication/TestDecodingReplicationTests.cs b/test/Npgsql.Tests/Replication/TestDecodingReplicationTests.cs index 3ecedfdfdd..406be7b809 100644 --- a/test/Npgsql.Tests/Replication/TestDecodingReplicationTests.cs +++ b/test/Npgsql.Tests/Replication/TestDecodingReplicationTests.cs @@ -12,7 +12,6 @@ namespace Npgsql.Tests.Replication; /// implementation of logical replication was still somewhat incomplete. /// Please don't change them without confirming that they still work on those old versions. /// -[Platform(Exclude = "MacOsX", Reason = "Replication tests are flaky in CI on Mac")] [NonParallelizable] // These tests aren't designed to be parallelizable public class TestDecodingReplicationTests : SafeReplicationTestBase { From eaa88b87bc36e3ddc4a7de1c524e3e53efcc1c99 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Fri, 21 Nov 2025 00:34:47 +0100 Subject: [PATCH 649/761] Re-enable many NonParallelizable and Ignore tests (#6339) --- test/Npgsql.Tests/ConnectionTests.cs | 8 ++- test/Npgsql.Tests/CopyTests.cs | 3 +- test/Npgsql.Tests/DataAdapterTests.cs | 20 +++--- test/Npgsql.Tests/NpgsqlParameterTests.cs | 72 +++++----------------- test/Npgsql.Tests/TypeMapperTests.cs | 4 +- test/Npgsql.Tests/Types/MultirangeTests.cs | 1 - test/Npgsql.Tests/Types/RangeTests.cs | 1 - test/Npgsql.Tests/Types/TextTests.cs | 1 - 8 files changed, 30 insertions(+), 80 deletions(-) diff --git a/test/Npgsql.Tests/ConnectionTests.cs b/test/Npgsql.Tests/ConnectionTests.cs index 2eed87dab3..106daae81f 100644 --- a/test/Npgsql.Tests/ConnectionTests.cs +++ b/test/Npgsql.Tests/ConnectionTests.cs @@ -188,9 +188,11 @@ public async Task Connection_refused_async(bool pooled) #endif [Test] - [Ignore("Fails in a non-determinstic manner and only on the build server... investigate...")] public void Invalid_Username() { + if (IsMultiplexing) + Assert.Ignore(); + var connString = new NpgsqlConnectionStringBuilder(ConnectionString) { Username = "unknown", Pooling = false @@ -1257,9 +1259,11 @@ public async Task Many_open_close_with_transaction() [Test] [IssueLink("https://github.com/npgsql/npgsql/issues/927")] [IssueLink("https://github.com/npgsql/npgsql/issues/736")] - [Ignore("Fails when running the entire test suite but not on its own...")] public async Task Rollback_on_close() { + if (IsMultiplexing) + Assert.Ignore(); + // Npgsql 3.0.0 to 3.0.4 prepended a rollback for the next time the connector is used, as an optimization. // This caused some issues (#927) and was removed. diff --git a/test/Npgsql.Tests/CopyTests.cs b/test/Npgsql.Tests/CopyTests.cs index 4cddb400eb..eb43420917 100644 --- a/test/Npgsql.Tests/CopyTests.cs +++ b/test/Npgsql.Tests/CopyTests.cs @@ -677,7 +677,6 @@ public async Task Wrong_format_binary_export() } [Test, NonParallelizable, IssueLink("https://github.com/npgsql/npgsql/issues/661")] - [Ignore("Unreliable")] public async Task Unexpected_exception_binary_import() { if (IsMultiplexing) @@ -701,7 +700,7 @@ public async Task Unexpected_exception_binary_import() writer.StartRow(); writer.Write(data); writer.Dispose(); - }, Throws.Exception.TypeOf()); + }, Throws.Exception.InstanceOf()); Assert.That(conn.FullState, Is.EqualTo(ConnectionState.Broken)); } diff --git a/test/Npgsql.Tests/DataAdapterTests.cs b/test/Npgsql.Tests/DataAdapterTests.cs index 3c91521ae1..91de36e734 100644 --- a/test/Npgsql.Tests/DataAdapterTests.cs +++ b/test/Npgsql.Tests/DataAdapterTests.cs @@ -141,7 +141,6 @@ public async Task DataAdapter_update_return_value() } [Test] - [Ignore("")] public async Task DataAdapter_update_return_value2() { using var conn = await OpenConnectionAsync(); @@ -158,15 +157,15 @@ public async Task DataAdapter_update_return_value2() da.Update(ds); //## change id from 1 to 2 - cmd.CommandText = $"update {table} set field_float4 = 0.8"; + cmd.CommandText = $"update {table} set field_numeric = 0.8"; cmd.ExecuteNonQuery(); //## change value to newvalue ds.Tables[0].Rows[0][1] = 0.7; //## update should fail, and make a DBConcurrencyException var count = da.Update(ds); - //## count is 1, even if the isn't updated in the database - Assert.That(count, Is.EqualTo(0)); + //## count is 1, even if the row isn't updated in the database + Assert.That(count, Is.EqualTo(1)); } [Test] @@ -189,7 +188,6 @@ public async Task Fill_with_empty_resultset() } [Test] - [Ignore("")] public async Task Fill_add_with_key() { using var conn = await OpenConnectionAsync(); @@ -211,7 +209,7 @@ public async Task Fill_add_with_key() Assert.That(field_serial.ColumnName, Is.EqualTo("field_serial")); Assert.That(field_serial.DataType, Is.EqualTo(typeof(int))); Assert.That(field_serial.Ordinal, Is.EqualTo(0)); - Assert.That(field_serial.Unique); + Assert.That(field_serial.Unique, Is.False); Assert.That(field_int2.AllowDBNull); Assert.That(field_int2.AutoIncrement, Is.False); @@ -329,7 +327,6 @@ public async Task Fill_with_duplicate_column_name() } [Test] - [Ignore("")] public Task Update_with_DataSet() => DoUpdateWithDataSet(); public async Task DoUpdateWithDataSet() @@ -365,7 +362,6 @@ public async Task DoUpdateWithDataSet() } [Test] - [Ignore("")] public async Task Insert_with_CommandBuilder_case_sensitive() { using var conn = await OpenConnectionAsync(); @@ -380,7 +376,7 @@ public async Task Insert_with_CommandBuilder_case_sensitive() var dt = ds.Tables[0]; var dr = dt.NewRow(); - dr["Field_Case_Sensitive"] = 4; + dr["Field_int4"] = 4; dt.Rows.Add(dr); var ds2 = ds.GetChanges()!; @@ -390,7 +386,7 @@ public async Task Insert_with_CommandBuilder_case_sensitive() using var dr2 = new NpgsqlCommand($"select * from {table}", conn).ExecuteReader(); dr2.Read(); - Assert.That(dr2[1], Is.EqualTo(4)); + Assert.That(dr2["field_int4"], Is.EqualTo(4)); } [Test] @@ -454,7 +450,6 @@ public async Task DataAdapter_command_access() [Test, Description("Makes sure that the INSERT/UPDATE/DELETE commands are auto-populated on NpgsqlDataAdapter")] [IssueLink("https://github.com/npgsql/npgsql/issues/179")] - [Ignore("Somehow related to us using a temporary table???")] public async Task Auto_populate_adapter_commands() { using var conn = await OpenConnectionAsync(); @@ -494,7 +489,6 @@ public void Command_builder_quoting() [Test, Description("Makes sure a correct SQL string is built with GetUpdateCommand(true) using correct parameter names and placeholders")] [IssueLink("https://github.com/npgsql/npgsql/issues/397")] - [Ignore("Somehow related to us using a temporary table???")] public async Task Get_UpdateCommand() { using var conn = await OpenConnectionAsync(); @@ -538,7 +532,7 @@ public async Task Load_DataTable() public Task SetupTempTable(NpgsqlConnection conn) => CreateTempTable(conn, @" -field_pk SERIAL PRIMARY KEY, +field_pk INTEGER PRIMARY KEY GENERATED ALWAYS AS IDENTITY, field_serial SERIAL, field_int2 SMALLINT, field_int4 INTEGER, diff --git a/test/Npgsql.Tests/NpgsqlParameterTests.cs b/test/Npgsql.Tests/NpgsqlParameterTests.cs index e1f0ef9c48..6070cc7266 100644 --- a/test/Npgsql.Tests/NpgsqlParameterTests.cs +++ b/test/Npgsql.Tests/NpgsqlParameterTests.cs @@ -4,7 +4,6 @@ using System.Data; using System.Data.Common; using System.Threading.Tasks; -using Npgsql.Internal.Postgres; namespace Npgsql.Tests; @@ -326,44 +325,6 @@ public void Clone_generic() #endregion - [Test] - [Ignore("")] - public void InferType_invalid_throws() - { - var notsupported = new object[] - { - ushort.MaxValue, - uint.MaxValue, - ulong.MaxValue, - sbyte.MaxValue, - new NpgsqlParameter() - }; - - var param = new NpgsqlParameter(); - - for (var i = 0; i < notsupported.Length; i++) - { - try - { - param.Value = notsupported[i]; - Assert.Fail("#A1:" + i); - } - catch (FormatException) - { - // appears to be bug in .NET 1.1 while - // constructing exception message - } - catch (ArgumentException ex) - { - // The parameter data type of ... is invalid - Assert.That(ex.GetType(), Is.EqualTo(typeof(ArgumentException)), "#A2"); - Assert.That(ex.InnerException, Is.Null, "#A3"); - Assert.That(ex.Message, Is.Not.Null, "#A4"); - Assert.That(ex.ParamName, Is.Null, "#A5"); - } - } - } - [Test] // bug #320196 public void Parameter_null() { @@ -379,50 +340,49 @@ public void Parameter_null() } [Test] - [Ignore("")] public void Parameter_type() { NpgsqlParameter p; // If Type is not set, then type is inferred from the value // assigned. The Type should be inferred everytime Value is assigned - // If value is null or DBNull, then the current Type should be reset to Text. - p = new NpgsqlParameter(); + // If value is null or DBNull, then the current Type should be reset to Unknown (DbType.Object and NpgsqlDbType.Unknown). + p = new NpgsqlParameter { Value = "" }; Assert.That(p.DbType, Is.EqualTo(DbType.String), "#A1"); Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Text), "#A2"); p.Value = DBNull.Value; - Assert.That(p.DbType, Is.EqualTo(DbType.String), "#B1"); - Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Text), "#B2"); + Assert.That(p.DbType, Is.EqualTo(DbType.Object), "#B1"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Unknown), "#B2"); p.Value = 1; Assert.That(p.DbType, Is.EqualTo(DbType.Int32), "#C1"); Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Integer), "#C2"); p.Value = DBNull.Value; - Assert.That(p.DbType, Is.EqualTo(DbType.String), "#D1"); - Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Text), "#D2"); + Assert.That(p.DbType, Is.EqualTo(DbType.Object), "#D1"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Unknown), "#D2"); p.Value = new byte[] { 0x0a }; Assert.That(p.DbType, Is.EqualTo(DbType.Binary), "#E1"); Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Bytea), "#E2"); p.Value = null; - Assert.That(p.DbType, Is.EqualTo(DbType.String), "#F1"); - Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Text), "#F2"); + Assert.That(p.DbType, Is.EqualTo(DbType.Object), "#F1"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Unknown), "#F2"); p.Value = DateTime.Now; - Assert.That(p.DbType, Is.EqualTo(DbType.DateTime), "#G1"); + Assert.That(p.DbType, Is.EqualTo(DbType.DateTime2), "#G1"); Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Timestamp), "#G2"); p.Value = null; - Assert.That(p.DbType, Is.EqualTo(DbType.String), "#H1"); - Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Text), "#H2"); + Assert.That(p.DbType, Is.EqualTo(DbType.Object), "#H1"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Unknown), "#H2"); // If DbType is set, then the NpgsqlDbType should not be // inferred from the value assigned. p = new NpgsqlParameter(); p.DbType = DbType.DateTime; - Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Timestamp), "#I1"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.TimestampTz), "#I1"); p.Value = 1; - Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Timestamp), "#I2"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.TimestampTz), "#I2"); p.Value = null; - Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Timestamp), "#I3"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.TimestampTz), "#I3"); p.Value = DBNull.Value; - Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.Timestamp), "#I4"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(NpgsqlDbType.TimestampTz), "#I4"); // If NpgsqlDbType is set, then the DbType should not be // inferred from the value assigned. @@ -447,7 +407,6 @@ public async Task Match_param_index_case_insensitively() } [Test] - [Ignore("")] public void ParameterName() { var p = new NpgsqlParameter(); @@ -540,7 +499,6 @@ public void ParameterName_retains_prefix() => Assert.That(new NpgsqlParameter("@p", DbType.String).ParameterName, Is.EqualTo("@p")); [Test] - [Ignore("")] public void SourceColumn() { var p = new NpgsqlParameter(); diff --git a/test/Npgsql.Tests/TypeMapperTests.cs b/test/Npgsql.Tests/TypeMapperTests.cs index 2819c80810..469a57be01 100644 --- a/test/Npgsql.Tests/TypeMapperTests.cs +++ b/test/Npgsql.Tests/TypeMapperTests.cs @@ -45,7 +45,6 @@ public async Task ReloadTypes_across_connections_in_data_source() } [Test] - [NonParallelizable] // Depends on citext which could be dropped concurrently public async Task String_to_citext() { await using var adminConnection = await OpenConnectionAsync(); @@ -62,7 +61,6 @@ public async Task String_to_citext() } [Test] - [NonParallelizable] // Depends on citext which could be dropped concurrently public async Task String_to_citext_with_db_type_string() { await using var adminConnection = await OpenConnectionAsync(); @@ -152,7 +150,7 @@ await connection.ExecuteNonQueryAsync($""" } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/4582")] - [NonParallelizable] // Drops extension + [NonParallelizable] // Drops global citext extension. public async Task Type_in_non_default_schema() { await using var conn = await OpenConnectionAsync(); diff --git a/test/Npgsql.Tests/Types/MultirangeTests.cs b/test/Npgsql.Tests/Types/MultirangeTests.cs index adb56ed2b9..1de001f93b 100644 --- a/test/Npgsql.Tests/Types/MultirangeTests.cs +++ b/test/Npgsql.Tests/Types/MultirangeTests.cs @@ -100,7 +100,6 @@ public Task Multirange_as_list( sqlLiteral, pgTypeName, npgsqlDbType, isDefaultForReading: false, isDefaultForWriting: isDefaultForWriting); [Test] - [NonParallelizable] public async Task Unmapped_multirange_with_mapped_subtype() { await using var dataSource = CreateDataSource(b => b.EnableUnmappedTypes().ConnectionStringBuilder.MaxPoolSize = 1); diff --git a/test/Npgsql.Tests/Types/RangeTests.cs b/test/Npgsql.Tests/Types/RangeTests.cs index 0b90824d95..7582d4ad40 100644 --- a/test/Npgsql.Tests/Types/RangeTests.cs +++ b/test/Npgsql.Tests/Types/RangeTests.cs @@ -165,7 +165,6 @@ public async Task TimestampTz_range_with_DateTimeOffset() } [Test] - [NonParallelizable] public async Task Unmapped_range_with_mapped_subtype() { await using var dataSource = CreateDataSource(b => b.EnableUnmappedTypes().ConnectionStringBuilder.MaxPoolSize = 1); diff --git a/test/Npgsql.Tests/Types/TextTests.cs b/test/Npgsql.Tests/Types/TextTests.cs index 13dd94861b..6d4adae741 100644 --- a/test/Npgsql.Tests/Types/TextTests.cs +++ b/test/Npgsql.Tests/Types/TextTests.cs @@ -44,7 +44,6 @@ public Task Char_as_char() => AssertType('f', "f", "character", NpgsqlDbType.Char, inferredDbType: DbType.String, isDefault: false); [Test] - [NonParallelizable] public async Task Citext_as_string() { await using var conn = await OpenConnectionAsync(); From 4f4fe1990b7922e9288ee6cc75a9c53c4d1f3ffa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Emmanuel=20Andr=C3=A9?= <2341261+manandre@users.noreply.github.com> Date: Sat, 22 Nov 2025 09:45:29 +0100 Subject: [PATCH 650/761] Instrument NpgsqlBinaryImporter with OpenTelemetry (#5921) Co-authored-by: Shay Rojansky --- src/Npgsql/Internal/NpgsqlConnector.cs | 21 + src/Npgsql/NpgsqlActivitySource.cs | 25 + src/Npgsql/NpgsqlBinaryExporter.cs | 159 +++-- src/Npgsql/NpgsqlBinaryImporter.cs | 111 ++- src/Npgsql/NpgsqlConnection.cs | 6 +- src/Npgsql/NpgsqlRawCopyStream.cs | 167 +++-- src/Npgsql/NpgsqlTracingOptionsBuilder.cs | 41 +- src/Npgsql/PublicAPI.Unshipped.txt | 3 + test/Npgsql.Tests/TracingTests.cs | 785 ++++++++++++++++++---- 9 files changed, 1041 insertions(+), 277 deletions(-) diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index a90da65959..617ddcd03e 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -3204,6 +3204,27 @@ void ReadParameterStatus(ReadOnlySpan incomingName, ReadOnlySpan inc return null; } + internal Activity? TraceCopyStart(string copyCommand, string operation) + { + Activity? activity = null; + if (NpgsqlActivitySource.IsEnabled) + { + var tracingOptions = DataSource.Configuration.TracingOptions; + + if (tracingOptions.CopyOperationFilter?.Invoke(copyCommand) ?? true) + { + var spanName = tracingOptions.CopyOperationSpanNameProvider?.Invoke(copyCommand); + activity = NpgsqlActivitySource.CopyStart(copyCommand, this, spanName, operation); + + if (activity != null) + { + tracingOptions.CopyOperationEnrichmentCallback?.Invoke(activity, copyCommand); + } + } + } + return activity; + } + #endregion Misc } diff --git a/src/Npgsql/NpgsqlActivitySource.cs b/src/Npgsql/NpgsqlActivitySource.cs index 8c41cbfca1..91da0f0548 100644 --- a/src/Npgsql/NpgsqlActivitySource.cs +++ b/src/Npgsql/NpgsqlActivitySource.cs @@ -153,6 +153,31 @@ internal static void SetException(Activity activity, Exception exception, bool e activity.Dispose(); } + internal static Activity? CopyStart(string command, NpgsqlConnector connector, string? spanName, string operation) + { + var activity = Source.StartActivity(spanName ?? operation, ActivityKind.Client); + if (activity is not { IsAllDataRequested: true }) + return activity; + activity.SetTag("db.query.text", command); + activity.SetTag("db.operation.name", operation); + Enrich(activity, connector); + return activity; + } + + internal static void SetOperation(Activity activity, string operation) + { + if (!activity.IsAllDataRequested) + return; + activity.SetTag("db.operation.name", operation); + } + + internal static void CopyStop(Activity activity, ulong? rows = null) + { + if (rows.HasValue) + activity.SetTag("db.npgsql.rows", rows.Value); + activity.Dispose(); + } + static string GetLibraryVersion() => typeof(NpgsqlDataSource).Assembly .GetCustomAttribute()? diff --git a/src/Npgsql/NpgsqlBinaryExporter.cs b/src/Npgsql/NpgsqlBinaryExporter.cs index 9473c95959..4828f0ecb1 100644 --- a/src/Npgsql/NpgsqlBinaryExporter.cs +++ b/src/Npgsql/NpgsqlBinaryExporter.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Diagnostics; using System.Threading; using System.Threading.Tasks; @@ -49,6 +49,8 @@ public TimeSpan Timeout set => _buf.Timeout = value; } + Activity? _activity; + #endregion #region Construction / Initialization @@ -64,39 +66,50 @@ internal NpgsqlBinaryExporter(NpgsqlConnector connector) internal async Task Init(string copyToCommand, bool async, CancellationToken cancellationToken = default) { - await _connector.WriteQuery(copyToCommand, async, cancellationToken).ConfigureAwait(false); - await _connector.Flush(async, cancellationToken).ConfigureAwait(false); - - using var registration = _connector.StartNestedCancellableOperation(cancellationToken, attemptPgCancellation: false); + Debug.Assert(_activity is null); + _activity = _connector.TraceCopyStart(copyToCommand, "COPY TO"); - CopyOutResponseMessage copyOutResponse; - var msg = await _connector.ReadMessage(async).ConfigureAwait(false); - switch (msg.Code) + try { - case BackendMessageCode.CopyOutResponse: - copyOutResponse = (CopyOutResponseMessage)msg; - if (!copyOutResponse.IsBinary) + await _connector.WriteQuery(copyToCommand, async, cancellationToken).ConfigureAwait(false); + await _connector.Flush(async, cancellationToken).ConfigureAwait(false); + + using var registration = _connector.StartNestedCancellableOperation(cancellationToken, attemptPgCancellation: false); + + CopyOutResponseMessage copyOutResponse; + var msg = await _connector.ReadMessage(async).ConfigureAwait(false); + switch (msg.Code) { - throw _connector.Break( - new ArgumentException("copyToCommand triggered a text transfer, only binary is allowed", - nameof(copyToCommand))); + case BackendMessageCode.CopyOutResponse: + copyOutResponse = (CopyOutResponseMessage)msg; + if (!copyOutResponse.IsBinary) + { + throw _connector.Break( + new ArgumentException("copyToCommand triggered a text transfer, only binary is allowed", + nameof(copyToCommand))); + } + break; + case BackendMessageCode.CommandComplete: + throw new InvalidOperationException( + "This API only supports import/export from the client, i.e. COPY commands containing TO/FROM STDIN. " + + "To import/export with files on your PostgreSQL machine, simply execute the command with ExecuteNonQuery. " + + "Note that your data has been successfully imported/exported."); + default: + throw _connector.UnexpectedMessageReceived(msg.Code); } - break; - case BackendMessageCode.CommandComplete: - throw new InvalidOperationException( - "This API only supports import/export from the client, i.e. COPY commands containing TO/FROM STDIN. " + - "To import/export with files on your PostgreSQL machine, simply execute the command with ExecuteNonQuery. " + - "Note that your data has been successfully imported/exported."); - default: - throw _connector.UnexpectedMessageReceived(msg.Code); - } - _state = ExporterState.Ready; - NumColumns = copyOutResponse.NumColumns; - _columnInfoCache = new PgConverterInfo[NumColumns]; - _rowsExported = 0; - _endOfMessagePos = _buf.CumulativeReadPosition; - await ReadHeader(async).ConfigureAwait(false); + _state = ExporterState.Ready; + NumColumns = copyOutResponse.NumColumns; + _columnInfoCache = new PgConverterInfo[NumColumns]; + _rowsExported = 0; + _endOfMessagePos = _buf.CumulativeReadPosition; + await ReadHeader(async).ConfigureAwait(false); + } + catch (Exception e) + { + TraceSetException(e); + throw; + } } async Task ReadHeader(bool async) @@ -476,40 +489,50 @@ async ValueTask DisposeAsync(bool async) if (_state == ExporterState.Disposed) return; - if (_state is ExporterState.Consumed or ExporterState.Uninitialized) - { - LogMessages.BinaryCopyOperationCompleted(_copyLogger, _rowsExported, _connector.Id); - } - else if (!_connector.IsBroken) + try { - try - { - using var registration = _connector.StartNestedCancellableOperation(attemptPgCancellation: false); - // Be sure to commit the reader. - if (async) - await PgReader.CommitAsync().ConfigureAwait(false); - else - PgReader.Commit(); - // Finish the current CopyData message - await _buf.Skip(async, checked((int)(_endOfMessagePos - _buf.CumulativeReadPosition))).ConfigureAwait(false); - // Read to the end - _connector.SkipUntil(BackendMessageCode.CopyDone); - // We intentionally do not pass a CancellationToken since we don't want to cancel cleanup - Expect(await _connector.ReadMessage(async).ConfigureAwait(false), _connector); - Expect(await _connector.ReadMessage(async).ConfigureAwait(false), _connector); - } - catch (OperationCanceledException e) when (e.InnerException is PostgresException { SqlState: PostgresErrorCodes.QueryCanceled }) + if (_state is ExporterState.Consumed or ExporterState.Uninitialized) { - LogMessages.CopyOperationCancelled(_copyLogger, _connector.Id); + LogMessages.BinaryCopyOperationCompleted(_copyLogger, _rowsExported, _connector.Id); + TraceExportStop(); } - catch (Exception e) + else if (!_connector.IsBroken) { - LogMessages.ExceptionWhenDisposingCopyOperation(_copyLogger, _connector.Id, e); + try + { + using var registration = _connector.StartNestedCancellableOperation(attemptPgCancellation: false); + // Be sure to commit the reader. + if (async) + await PgReader.CommitAsync().ConfigureAwait(false); + else + PgReader.Commit(); + // Finish the current CopyData message + await _buf.Skip(async, checked((int)(_endOfMessagePos - _buf.CumulativeReadPosition))).ConfigureAwait(false); + // Read to the end + _connector.SkipUntil(BackendMessageCode.CopyDone); + // We intentionally do not pass a CancellationToken since we don't want to cancel cleanup + Expect(await _connector.ReadMessage(async).ConfigureAwait(false), _connector); + Expect(await _connector.ReadMessage(async).ConfigureAwait(false), _connector); + + TraceExportStop(); + } + catch (OperationCanceledException e) when (e.InnerException is PostgresException { SqlState: PostgresErrorCodes.QueryCanceled }) + { + LogMessages.CopyOperationCancelled(_copyLogger, _connector.Id); + TraceExportStop(); + } + catch (Exception e) + { + LogMessages.ExceptionWhenDisposingCopyOperation(_copyLogger, _connector.Id, e); + TraceSetException(e); + } } } - - _connector.EndUserAction(); - Cleanup(); + finally + { + _connector.EndUserAction(); + Cleanup(); + } void Cleanup() { @@ -530,6 +553,28 @@ void Cleanup() #endregion + #region Tracing + + void TraceExportStop() + { + if (_activity is not null) + { + NpgsqlActivitySource.CopyStop(_activity, _rowsExported); + _activity = null; + } + } + + void TraceSetException(Exception exception) + { + if (_activity is not null) + { + NpgsqlActivitySource.SetException(_activity, exception); + _activity = null; + } + } + + #endregion Tracing + #region Enums enum ExporterState diff --git a/src/Npgsql/NpgsqlBinaryImporter.cs b/src/Npgsql/NpgsqlBinaryImporter.cs index 8c240468d4..6cd592dd06 100644 --- a/src/Npgsql/NpgsqlBinaryImporter.cs +++ b/src/Npgsql/NpgsqlBinaryImporter.cs @@ -1,4 +1,5 @@ using System; +using System.Diagnostics; using System.Runtime.CompilerServices; using System.Threading; using System.Threading.Tasks; @@ -45,6 +46,8 @@ public sealed class NpgsqlBinaryImporter : ICancelable readonly ILogger _copyLogger; PgWriter _pgWriter = null!; // Setup in Init + Activity? _activity; + /// /// Current timeout /// @@ -72,40 +75,51 @@ internal NpgsqlBinaryImporter(NpgsqlConnector connector) internal async Task Init(string copyFromCommand, bool async, CancellationToken cancellationToken = default) { - await _connector.WriteQuery(copyFromCommand, async, cancellationToken).ConfigureAwait(false); - await _connector.Flush(async, cancellationToken).ConfigureAwait(false); + Debug.Assert(_activity is null); + _activity = _connector.TraceCopyStart(copyFromCommand, "COPY FROM"); - using var registration = _connector.StartNestedCancellableOperation(cancellationToken, attemptPgCancellation: false); - - CopyInResponseMessage copyInResponse; - var msg = await _connector.ReadMessage(async).ConfigureAwait(false); - switch (msg.Code) + try { - case BackendMessageCode.CopyInResponse: - copyInResponse = (CopyInResponseMessage)msg; - if (!copyInResponse.IsBinary) + await _connector.WriteQuery(copyFromCommand, async, cancellationToken).ConfigureAwait(false); + await _connector.Flush(async, cancellationToken).ConfigureAwait(false); + + using var registration = _connector.StartNestedCancellableOperation(cancellationToken, attemptPgCancellation: false); + + CopyInResponseMessage copyInResponse; + var msg = await _connector.ReadMessage(async).ConfigureAwait(false); + switch (msg.Code) { - throw _connector.Break( - new ArgumentException("copyFromCommand triggered a text transfer, only binary is allowed", - nameof(copyFromCommand))); + case BackendMessageCode.CopyInResponse: + copyInResponse = (CopyInResponseMessage)msg; + if (!copyInResponse.IsBinary) + { + throw _connector.Break( + new ArgumentException("copyFromCommand triggered a text transfer, only binary is allowed", + nameof(copyFromCommand))); + } + break; + case BackendMessageCode.CommandComplete: + throw new InvalidOperationException( + "This API only supports import/export from the client, i.e. COPY commands containing TO/FROM STDIN. " + + "To import/export with files on your PostgreSQL machine, simply execute the command with ExecuteNonQuery. " + + "Note that your data has been successfully imported/exported."); + default: + throw _connector.UnexpectedMessageReceived(msg.Code); } - break; - case BackendMessageCode.CommandComplete: - throw new InvalidOperationException( - "This API only supports import/export from the client, i.e. COPY commands containing TO/FROM STDIN. " + - "To import/export with files on your PostgreSQL machine, simply execute the command with ExecuteNonQuery. " + - "Note that your data has been successfully imported/exported."); - default: - throw _connector.UnexpectedMessageReceived(msg.Code); - } - _state = ImporterState.Ready; - _params = new NpgsqlParameter[copyInResponse.NumColumns]; - _rowsImported = 0; - _buf.StartCopyMode(); - WriteHeader(); - // Only init after header. - _pgWriter = _buf.GetWriter(_connector.DatabaseInfo); + _state = ImporterState.Ready; + _params = new NpgsqlParameter[copyInResponse.NumColumns]; + _rowsImported = 0; + _buf.StartCopyMode(); + WriteHeader(); + // Only init after header. + _pgWriter = _buf.GetWriter(_connector.DatabaseInfo); + } + catch (Exception e) + { + TraceSetException(e); + throw; + } } void WriteHeader() @@ -308,6 +322,7 @@ await param.Write(async, _pgWriter.WithFlushMode(async ? FlushMode.NonBlocking : } catch (Exception ex) { + TraceSetException(ex); _connector.Break(ex); throw; } @@ -426,8 +441,9 @@ async ValueTask Complete(bool async, CancellationToken cancellationToken _state = ImporterState.Committed; return cmdComplete.Rows; } - catch + catch (Exception e) { + TraceSetException(e); Cleanup(); throw; } @@ -521,6 +537,7 @@ async ValueTask CloseAsync(bool async, CancellationToken cancellationToken = def throw new Exception("Invalid state: " + _state); } + TraceImportStop(); Cleanup(); } @@ -581,4 +598,38 @@ enum ImporterState void ThrowColumnMismatch() => throw new InvalidOperationException($"The binary import operation was started with {NumColumns} column(s), but {_column + 1} value(s) were provided."); + + #region Tracing + + void TraceImportStop() + { + if (_activity is not null) + { + switch (_state) + { + case ImporterState.Committed: + NpgsqlActivitySource.CopyStop(_activity, _rowsImported); + break; + case ImporterState.Cancelled: + NpgsqlActivitySource.CopyStop(_activity, rows: 0); + break; + default: + Debug.Fail("Invalid state: " + _state); + break; + } + + _activity = null; + } + } + + void TraceSetException(Exception exception) + { + if (_activity is not null) + { + NpgsqlActivitySource.SetException(_activity, exception); + _activity = null; + } + } + + #endregion Tracing } diff --git a/src/Npgsql/NpgsqlConnection.cs b/src/Npgsql/NpgsqlConnection.cs index ca8b6ba9fe..57299a3eec 100644 --- a/src/Npgsql/NpgsqlConnection.cs +++ b/src/Npgsql/NpgsqlConnection.cs @@ -1274,7 +1274,7 @@ async Task BeginTextImport(bool async, string copyFromComm var copyStream = new NpgsqlRawCopyStream(connector); try { - await copyStream.Init(copyFromCommand, async, cancellationToken).ConfigureAwait(false); + await copyStream.Init(copyFromCommand, async, forExport: false, cancellationToken).ConfigureAwait(false); var writer = new NpgsqlCopyTextWriter(connector, copyStream); connector.CurrentCopyOperation = writer; return writer; @@ -1340,7 +1340,7 @@ async Task BeginTextExport(bool async, string copyToComman var copyStream = new NpgsqlRawCopyStream(connector); try { - await copyStream.Init(copyToCommand, async, cancellationToken).ConfigureAwait(false); + await copyStream.Init(copyToCommand, async, forExport: true, cancellationToken).ConfigureAwait(false); var reader = new NpgsqlCopyTextReader(connector, copyStream); connector.CurrentCopyOperation = reader; return reader; @@ -1406,7 +1406,7 @@ async Task BeginRawBinaryCopy(bool async, string copyComman var stream = new NpgsqlRawCopyStream(connector); try { - await stream.Init(copyCommand, async, cancellationToken).ConfigureAwait(false); + await stream.Init(copyCommand, async, forExport: null, cancellationToken).ConfigureAwait(false); if (!stream.IsBinary) { // TODO: Stop the COPY operation gracefully, no breaking diff --git a/src/Npgsql/NpgsqlRawCopyStream.cs b/src/Npgsql/NpgsqlRawCopyStream.cs index d0cfad82a7..45cfdf825e 100644 --- a/src/Npgsql/NpgsqlRawCopyStream.cs +++ b/src/Npgsql/NpgsqlRawCopyStream.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Diagnostics; using System.IO; using System.Threading; @@ -60,6 +60,7 @@ public override int ReadTimeout ]; readonly ILogger _copyLogger; + Activity? _activity; #endregion @@ -73,36 +74,54 @@ internal NpgsqlRawCopyStream(NpgsqlConnector connector) _copyLogger = connector.LoggingConfiguration.CopyLogger; } - internal async Task Init(string copyCommand, bool async, CancellationToken cancellationToken = default) + internal async Task Init(string copyCommand, bool async, bool? forExport, CancellationToken cancellationToken = default) { - await _connector.WriteQuery(copyCommand, async, cancellationToken).ConfigureAwait(false); - await _connector.Flush(async, cancellationToken).ConfigureAwait(false); + Debug.Assert(_activity is null); + _activity = _connector.TraceCopyStart(copyCommand, forExport switch + { + true => "COPY TO", + false => "COPY FROM", + null => "COPY", + }); - using var registration = _connector.StartNestedCancellableOperation(cancellationToken, attemptPgCancellation: false); + try + { + await _connector.WriteQuery(copyCommand, async, cancellationToken).ConfigureAwait(false); + await _connector.Flush(async, cancellationToken).ConfigureAwait(false); - var msg = await _connector.ReadMessage(async).ConfigureAwait(false); - switch (msg.Code) + using var registration = _connector.StartNestedCancellableOperation(cancellationToken, attemptPgCancellation: false); + + var msg = await _connector.ReadMessage(async).ConfigureAwait(false); + switch (msg.Code) + { + case BackendMessageCode.CopyInResponse: + _state = CopyStreamState.Ready; + var copyInResponse = (CopyInResponseMessage)msg; + IsBinary = copyInResponse.IsBinary; + _canWrite = true; + _writeBuf.StartCopyMode(); + TraceSetImport(); + break; + case BackendMessageCode.CopyOutResponse: + _state = CopyStreamState.Ready; + var copyOutResponse = (CopyOutResponseMessage)msg; + IsBinary = copyOutResponse.IsBinary; + _canRead = true; + TraceSetExport(); + break; + case BackendMessageCode.CommandComplete: + throw new InvalidOperationException( + "This API only supports import/export from the client, i.e. COPY commands containing TO/FROM STDIN. " + + "To import/export with files on your PostgreSQL machine, simply execute the command with ExecuteNonQuery. " + + "Note that your data has been successfully imported/exported."); + default: + throw _connector.UnexpectedMessageReceived(msg.Code); + } + } + catch (Exception e) { - case BackendMessageCode.CopyInResponse: - _state = CopyStreamState.Ready; - var copyInResponse = (CopyInResponseMessage) msg; - IsBinary = copyInResponse.IsBinary; - _canWrite = true; - _writeBuf.StartCopyMode(); - break; - case BackendMessageCode.CopyOutResponse: - _state = CopyStreamState.Ready; - var copyOutResponse = (CopyOutResponseMessage) msg; - IsBinary = copyOutResponse.IsBinary; - _canRead = true; - break; - case BackendMessageCode.CommandComplete: - throw new InvalidOperationException( - "This API only supports import/export from the client, i.e. COPY commands containing TO/FROM STDIN. " + - "To import/export with files on your PostgreSQL machine, simply execute the command with ExecuteNonQuery. " + - "Note that your data has been successfully imported/exported."); - default: - throw _connector.UnexpectedMessageReceived(msg.Code); + TraceSetException(e); + throw; } } @@ -261,10 +280,13 @@ async ValueTask ReadCore(int count, bool async, CancellationToken cancellat // read the next message msg = await _connector.ReadMessage(async).ConfigureAwait(false); } - catch + catch (Exception e) { if (_state != CopyStreamState.Disposed) + { + TraceSetException(e); Cleanup(); + } throw; } @@ -339,7 +361,12 @@ async Task Cancel(bool async) Cleanup(); if (e.SqlState != PostgresErrorCodes.QueryCanceled) + { + TraceSetException(e); throw; + } + + TraceStop(); } } else @@ -357,7 +384,6 @@ async Task Cancel(bool async) public override ValueTask DisposeAsync() => DisposeAsync(disposing: true, async: true); - async ValueTask DisposeAsync(bool disposing, bool async) { if (_state == CopyStreamState.Disposed || !disposing) @@ -369,18 +395,27 @@ async ValueTask DisposeAsync(bool disposing, bool async) if (CanWrite) { - await FlushAsync(async).ConfigureAwait(false); - _writeBuf.EndCopyMode(); - await _connector.WriteCopyDone(async).ConfigureAwait(false); - await _connector.Flush(async).ConfigureAwait(false); - Expect(await _connector.ReadMessage(async).ConfigureAwait(false), _connector); - Expect(await _connector.ReadMessage(async).ConfigureAwait(false), _connector); + try + { + await FlushAsync(async).ConfigureAwait(false); + _writeBuf.EndCopyMode(); + await _connector.WriteCopyDone(async).ConfigureAwait(false); + await _connector.Flush(async).ConfigureAwait(false); + Expect(await _connector.ReadMessage(async).ConfigureAwait(false), _connector); + Expect(await _connector.ReadMessage(async).ConfigureAwait(false), _connector); + TraceStop(); + } + catch (Exception e) + { + TraceSetException(e); + throw; + } } else { - if (_state != CopyStreamState.Consumed && _state != CopyStreamState.Uninitialized) + try { - try + if (_state != CopyStreamState.Consumed && _state != CopyStreamState.Uninitialized) { if (_leftToReadInDataMsg > 0) { @@ -388,14 +423,18 @@ async ValueTask DisposeAsync(bool disposing, bool async) } _connector.SkipUntil(BackendMessageCode.ReadyForQuery); } - catch (OperationCanceledException e) when (e.InnerException is PostgresException { SqlState: PostgresErrorCodes.QueryCanceled }) - { - LogMessages.CopyOperationCancelled(_copyLogger, _connector.Id); - } - catch (Exception e) - { - LogMessages.ExceptionWhenDisposingCopyOperation(_copyLogger, _connector.Id, e); - } + + TraceStop(); + } + catch (OperationCanceledException e) when (e.InnerException is PostgresException { SqlState: PostgresErrorCodes.QueryCanceled }) + { + LogMessages.CopyOperationCancelled(_copyLogger, _connector.Id); + TraceStop(); + } + catch (Exception e) + { + LogMessages.ExceptionWhenDisposingCopyOperation(_copyLogger, _connector.Id, e); + TraceSetException(e); } } } @@ -458,6 +497,44 @@ static void ValidateArguments(byte[] buffer, int offset, int count) } #endregion + #region Tracing + + private void TraceSetImport() + { + if (_activity is not null) + { + NpgsqlActivitySource.SetOperation(_activity, "COPY FROM"); + } + } + + private void TraceSetExport() + { + if (_activity is not null) + { + NpgsqlActivitySource.SetOperation(_activity, "COPY TO"); + } + } + + private void TraceStop() + { + if (_activity is not null) + { + NpgsqlActivitySource.CopyStop(_activity); + _activity = null; + } + } + + private void TraceSetException(Exception e) + { + if (_activity is not null) + { + NpgsqlActivitySource.SetException(_activity, e); + _activity = null; + } + } + + #endregion + #region Enums enum CopyStreamState diff --git a/src/Npgsql/NpgsqlTracingOptionsBuilder.cs b/src/Npgsql/NpgsqlTracingOptionsBuilder.cs index f81f4e7139..d38cf1d32d 100644 --- a/src/Npgsql/NpgsqlTracingOptionsBuilder.cs +++ b/src/Npgsql/NpgsqlTracingOptionsBuilder.cs @@ -17,6 +17,10 @@ public sealed class NpgsqlTracingOptionsBuilder bool _enableFirstResponseEvent = true; bool _enablePhysicalOpenTracing = true; + Func? _copyOperationFilter; + Action? _copyOperationEnrichmentCallback; + Func? _copyOperationSpanNameProvider; + internal NpgsqlTracingOptionsBuilder() { } @@ -99,6 +103,35 @@ public NpgsqlTracingOptionsBuilder EnablePhysicalOpenTracing(bool enable = true) return this; } + /// + /// Configures a filter function that determines whether to emit tracing information for a copy operation. + /// By default, tracing information is emitted for all copy operations. + /// + public NpgsqlTracingOptionsBuilder ConfigureCopyOperationFilter(Func? copyOperationFilter) + { + _copyOperationFilter = copyOperationFilter; + return this; + } + + /// + /// Configures a callback that can enrich the emitted for a given copy operation. + /// + public NpgsqlTracingOptionsBuilder ConfigureCopyOperationEnrichmentCallback(Action? copyOperationEnrichmentCallback) + { + _copyOperationEnrichmentCallback = copyOperationEnrichmentCallback; + return this; + } + + /// + /// Configures a callback that provides the tracing span's name for a copy operation. If null, the default standard + /// span name is used, which is the database name. + /// + public NpgsqlTracingOptionsBuilder ConfigureCopyOperationSpanNameProvider(Func? copyOperationSpanNameProvider) + { + _copyOperationSpanNameProvider = copyOperationSpanNameProvider; + return this; + } + internal NpgsqlTracingOptions Build() => new() { CommandFilter = _commandFilter, @@ -108,7 +141,10 @@ public NpgsqlTracingOptionsBuilder EnablePhysicalOpenTracing(bool enable = true) CommandSpanNameProvider = _commandSpanNameProvider, BatchSpanNameProvider = _batchSpanNameProvider, EnableFirstResponseEvent = _enableFirstResponseEvent, - EnablePhysicalOpenTracing = _enablePhysicalOpenTracing + EnablePhysicalOpenTracing = _enablePhysicalOpenTracing, + CopyOperationFilter = _copyOperationFilter, + CopyOperationEnrichmentCallback = _copyOperationEnrichmentCallback, + CopyOperationSpanNameProvider = _copyOperationSpanNameProvider }; } @@ -122,4 +158,7 @@ sealed class NpgsqlTracingOptions internal Func? BatchSpanNameProvider { get; init; } internal bool EnableFirstResponseEvent { get; init; } internal bool EnablePhysicalOpenTracing { get; init; } + internal Func? CopyOperationFilter { get; init; } + internal Action? CopyOperationEnrichmentCallback { get; init; } + internal Func? CopyOperationSpanNameProvider { get; init; } } diff --git a/src/Npgsql/PublicAPI.Unshipped.txt b/src/Npgsql/PublicAPI.Unshipped.txt index 9cea3e5609..6694c16f4f 100644 --- a/src/Npgsql/PublicAPI.Unshipped.txt +++ b/src/Npgsql/PublicAPI.Unshipped.txt @@ -59,6 +59,9 @@ Npgsql.NpgsqlTracingOptionsBuilder.ConfigureBatchSpanNameProvider(System.Func? commandEnrichmentCallback) -> Npgsql.NpgsqlTracingOptionsBuilder! Npgsql.NpgsqlTracingOptionsBuilder.ConfigureCommandFilter(System.Func? commandFilter) -> Npgsql.NpgsqlTracingOptionsBuilder! Npgsql.NpgsqlTracingOptionsBuilder.ConfigureCommandSpanNameProvider(System.Func? commandSpanNameProvider) -> Npgsql.NpgsqlTracingOptionsBuilder! +Npgsql.NpgsqlTracingOptionsBuilder.ConfigureCopyOperationEnrichmentCallback(System.Action? copyOperationEnrichmentCallback) -> Npgsql.NpgsqlTracingOptionsBuilder! +Npgsql.NpgsqlTracingOptionsBuilder.ConfigureCopyOperationFilter(System.Func? copyOperationFilter) -> Npgsql.NpgsqlTracingOptionsBuilder! +Npgsql.NpgsqlTracingOptionsBuilder.ConfigureCopyOperationSpanNameProvider(System.Func? copyOperationSpanNameProvider) -> Npgsql.NpgsqlTracingOptionsBuilder! Npgsql.NpgsqlTracingOptionsBuilder.EnableFirstResponseEvent(bool enable = true) -> Npgsql.NpgsqlTracingOptionsBuilder! Npgsql.NpgsqlTracingOptionsBuilder.EnablePhysicalOpenTracing(bool enable = true) -> Npgsql.NpgsqlTracingOptionsBuilder! Npgsql.NpgsqlTypeLoadingOptionsBuilder diff --git a/test/Npgsql.Tests/TracingTests.cs b/test/Npgsql.Tests/TracingTests.cs index da7f038d4a..3ed31d6a30 100644 --- a/test/Npgsql.Tests/TracingTests.cs +++ b/test/Npgsql.Tests/TracingTests.cs @@ -1,37 +1,34 @@ -using System.Collections.Generic; +using System; +using System.Collections.Generic; using System.Diagnostics; using System.Linq; using System.Threading.Tasks; +using NpgsqlTypes; using NUnit.Framework; +using static Npgsql.Tests.TestUtil; namespace Npgsql.Tests; [NonParallelizable] -public class TracingTests(MultiplexingMode multiplexingMode) : MultiplexingTestBase(multiplexingMode) +[TestFixture(MultiplexingMode.NonMultiplexing, true)] +[TestFixture(MultiplexingMode.NonMultiplexing, false)] +[TestFixture(MultiplexingMode.Multiplexing, true)] +// Sync I/O not supported with multiplexing +public class TracingTests(MultiplexingMode multiplexingMode, bool async) : MultiplexingTestBase(multiplexingMode) { + #region Physical open + [Test] - public async Task Basic_open([Values] bool async) + public async Task PhysicalOpen() { - if (IsMultiplexing && !async) - return; - - var activities = new List(); - - using var activityListener = new ActivityListener - { - ShouldListenTo = source => source.Name == "Npgsql", - Sample = (ref _) => ActivitySamplingResult.AllDataAndRecorded, - ActivityStopped = activity => activities.Add(activity) - }; - ActivitySource.AddActivityListener(activityListener); - + using var activityListener = StartListener(out var activities); await using var dataSource = CreateDataSource(); - await using var conn = async - ? await dataSource.OpenConnectionAsync() - : dataSource.OpenConnection(); + await using var connection = async + ? await dataSource.OpenConnectionAsync() + : dataSource.OpenConnection(); Assert.That(activities, Has.Count.EqualTo(1)); - ValidateActivity(activities[0], conn, IsMultiplexing); + ValidateActivity(activities[0], connection, IsMultiplexing); if (!IsMultiplexing) return; @@ -41,10 +38,10 @@ public async Task Basic_open([Values] bool async) // For multiplexing, we clear the pool to force next query to open another physical connection dataSource.Clear(); - await conn.ExecuteScalarAsync("SELECT 1"); + await connection.ExecuteScalarAsync("SELECT 1"); Assert.That(activities, Has.Count.EqualTo(2)); - ValidateActivity(activities[0], conn, IsMultiplexing); + ValidateActivity(activities[0], connection, IsMultiplexing); // For multiplexing, query's activity can be considered as a parent for physical open's activity Assert.That(activities[0].Parent, Is.SameAs(activities[1])); @@ -75,87 +72,101 @@ static void ValidateActivity(Activity activity, NpgsqlConnection conn, bool isMu } [Test] - public async Task Basic_query([Values] bool async, [Values] bool batch) + public async Task PhysicalOpen_error() { - if (IsMultiplexing && !async) - return; + using var activityListener = StartListener(out var activities); + await using var dataSource = CreateDataSource(x => x.Host = "not-existing-host"); + var exception = Assert.ThrowsAsync(async () => + { + await using var connection = async + ? await dataSource.OpenConnectionAsync() + : dataSource.OpenConnection(); + })!; - var activities = new List(); + var activity = GetSingleActivity(activities, "CONNECT " + dataSource.Settings.Database, "CONNECT " + dataSource.Settings.Database, ActivityStatusCode.Error, exception.Message); - using var activityListener = new ActivityListener - { - ShouldListenTo = source => source.Name == "Npgsql", - Sample = (ref _) => ActivitySamplingResult.AllDataAndRecorded, - ActivityStopped = activity => activities.Add(activity) - }; - ActivitySource.AddActivityListener(activityListener); + Assert.That(activity.Events.Count(), Is.EqualTo(1)); + var exceptionEvent = activity.Events.First(); + Assert.That(exceptionEvent.Name, Is.EqualTo("exception")); + + var exceptionTags = exceptionEvent.Tags.ToDictionary(t => t.Key, t => t.Value); + Assert.That(exceptionTags, Has.Count.EqualTo(3)); + + Assert.That(exceptionTags["exception.type"], Is.EqualTo(exception.GetType().FullName)); + Assert.That(exceptionTags["exception.message"], Does.Contain(exception.Message)); + Assert.That(exceptionTags["exception.stacktrace"], Does.Contain(exception.Message)); + + var activityTags = activity.TagObjects.ToDictionary(t => t.Key, t => t.Value); + Assert.That(activityTags, Has.Count.EqualTo(3)); + + Assert.That(activityTags["db.system.name"], Is.EqualTo("postgresql")); + Assert.That(activityTags["db.npgsql.data_source"], Is.EqualTo(dataSource.ConnectionString)); + + Assert.That(activityTags["error.type"], Is.EqualTo("System.Net.Sockets.SocketException")); + } + [Test] + public async Task PhysicalOpen_disable() + { + using var activityListener = StartListener(out var activities); + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.ConfigureTracing(options => options.EnablePhysicalOpenTracing(enable: false)); + await using var dataSource = dataSourceBuilder.Build(); + + await using var connection = async ? await dataSource.OpenConnectionAsync() : dataSource.OpenConnection(); + + Assert.That(activities, Is.Empty); + } + + #endregion Physical open + + #region Command execution + + [Test] + public async Task CommandExecute([Values] bool batch) + { var dataSourceBuilder = CreateDataSourceBuilder(); dataSourceBuilder.Name = "TestTracingDataSource"; + dataSourceBuilder.ConfigureTracing(o => o.EnablePhysicalOpenTracing(false)); await using var dataSource = dataSourceBuilder.Build(); - await using var conn = await dataSource.OpenConnectionAsync(); + await using var connection = await dataSource.OpenConnectionAsync(); - // We're not interested in physical open's activity - Assert.That(activities, Has.Count.EqualTo(1)); - activities.Clear(); + using var activityListener = StartListener(out var activities); - await ExecuteScalar(conn, async, batch, "SELECT 42"); + await ExecuteScalar(connection, async, batch, "SELECT 42"); - Assert.That(activities, Has.Count.EqualTo(1)); - var activity = activities[0]; - Assert.That(activity.DisplayName, Is.EqualTo("postgresql")); - Assert.That(activity.OperationName, Is.EqualTo("postgresql")); - Assert.That(activity.Status, Is.EqualTo(ActivityStatusCode.Unset)); + var activity = GetSingleActivity(activities, "postgresql", "postgresql"); Assert.That(activity.Events.Count(), Is.EqualTo(1)); var firstResponseEvent = activity.Events.First(); Assert.That(firstResponseEvent.Name, Is.EqualTo("received-first-response")); var tags = activity.TagObjects.ToDictionary(t => t.Key, t => t.Value); - Assert.That(tags, Has.Count.EqualTo(conn.Settings.Port == 5432 ? 6 : 7)); + Assert.That(tags, Has.Count.EqualTo(connection.Settings.Port == 5432 ? 6 : 7)); Assert.That(tags["db.query.text"], Is.EqualTo("SELECT 42")); Assert.That(tags["db.system.name"], Is.EqualTo("postgresql")); - Assert.That(tags["db.namespace"], Is.EqualTo(conn.Settings.Database)); + Assert.That(tags["db.namespace"], Is.EqualTo(connection.Settings.Database)); Assert.That(tags["db.npgsql.data_source"], Is.EqualTo("TestTracingDataSource")); if (IsMultiplexing) Assert.That(tags, Does.ContainKey("db.npgsql.connection_id")); else - Assert.That(tags["db.npgsql.connection_id"], Is.EqualTo(conn.ProcessID)); + Assert.That(tags["db.npgsql.connection_id"], Is.EqualTo(connection.ProcessID)); } [Test] - public async Task Error_open([Values] bool async) + public async Task CommandExecute_error([Values] bool batch) { - if (IsMultiplexing && !async) - return; + await using var dataSource = CreateDataSource(ds => ds.ConfigureTracing(o => o.EnablePhysicalOpenTracing(false))); + await using var connection = await dataSource.OpenConnectionAsync(); - var activities = new List(); - - using var activityListener = new ActivityListener - { - ShouldListenTo = source => source.Name == "Npgsql", - Sample = (ref _) => ActivitySamplingResult.AllDataAndRecorded, - ActivityStopped = activity => activities.Add(activity) - }; - ActivitySource.AddActivityListener(activityListener); + using var activityListener = StartListener(out var activities); - await using var dataSource = CreateDataSource(x => x.Host = "not-existing-host"); - var ex = Assert.ThrowsAsync(async () => - { - await using var conn = async - ? await dataSource.OpenConnectionAsync() - : dataSource.OpenConnection(); - })!; + Assert.ThrowsAsync(async () => await ExecuteScalar(connection, async, batch, "SELECT * FROM non_existing_table")); - Assert.That(activities, Has.Count.EqualTo(1)); - var activity = activities[0]; - Assert.That(activity.DisplayName, Is.EqualTo("CONNECT " + dataSource.Settings.Database)); - Assert.That(activity.OperationName, Is.EqualTo("CONNECT " + dataSource.Settings.Database)); - Assert.That(activity.Status, Is.EqualTo(ActivityStatusCode.Error)); - Assert.That(activity.StatusDescription, Is.EqualTo(ex.Message)); + var activity = GetSingleActivity(activities, "postgresql", "postgresql", ActivityStatusCode.Error, PostgresErrorCodes.UndefinedTable); Assert.That(activity.Events.Count(), Is.EqualTo(1)); var exceptionEvent = activity.Events.First(); @@ -164,50 +175,283 @@ public async Task Error_open([Values] bool async) var exceptionTags = exceptionEvent.Tags.ToDictionary(t => t.Key, t => t.Value); Assert.That(exceptionTags, Has.Count.EqualTo(3)); - Assert.That(exceptionTags["exception.type"], Is.EqualTo(ex.GetType().FullName)); - Assert.That(exceptionTags["exception.message"], Does.Contain(ex.Message)); - Assert.That(exceptionTags["exception.stacktrace"], Does.Contain(ex.Message)); + Assert.That(exceptionTags["exception.type"], Is.EqualTo("Npgsql.PostgresException")); + Assert.That(exceptionTags["exception.message"], Does.Contain("relation \"non_existing_table\" does not exist")); + Assert.That(exceptionTags["exception.stacktrace"], Does.Contain("relation \"non_existing_table\" does not exist")); var activityTags = activity.TagObjects.ToDictionary(t => t.Key, t => t.Value); - Assert.That(activityTags, Has.Count.EqualTo(3)); + Assert.That(activityTags, Has.Count.EqualTo(connection.Settings.Port == 5432 ? 8 : 9)); + Assert.That(activityTags["db.query.text"], Is.EqualTo("SELECT * FROM non_existing_table")); Assert.That(activityTags["db.system.name"], Is.EqualTo("postgresql")); - Assert.That(activityTags["db.npgsql.data_source"], Is.EqualTo(dataSource.ConnectionString)); + Assert.That(activityTags["db.namespace"], Is.EqualTo(connection.Settings.Database)); - Assert.That(activityTags["error.type"], Is.EqualTo("System.Net.Sockets.SocketException")); + Assert.That(activityTags["db.response.status_code"], Is.EqualTo(PostgresErrorCodes.UndefinedTable)); + Assert.That(activityTags["error.type"], Is.EqualTo(PostgresErrorCodes.UndefinedTable)); + + Assert.That(activityTags["db.npgsql.data_source"], Is.EqualTo(connection.ConnectionString)); + + if (IsMultiplexing) + Assert.That(activityTags, Does.ContainKey("db.npgsql.connection_id")); + else + Assert.That(activityTags["db.npgsql.connection_id"], Is.EqualTo(connection.ProcessID)); } [Test] - public async Task Error_query([Values] bool async, [Values] bool batch) + public async Task CommandExecute_ConfigureTracing([Values] bool batch) { - if (IsMultiplexing && !async) - return; + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.ConfigureTracing(options => + { + options + .EnablePhysicalOpenTracing(false) + .EnableFirstResponseEvent(enable: false) + .ConfigureCommandFilter(cmd => cmd.CommandText.Contains('2')) + .ConfigureBatchFilter(batch => batch.BatchCommands[0].CommandText.Contains('2')) + .ConfigureCommandSpanNameProvider(_ => "unknown_query") + .ConfigureBatchSpanNameProvider(_ => "unknown_query") + .ConfigureCommandEnrichmentCallback((activity, _) => activity.AddTag("custom_tag", "custom_value")) + .ConfigureBatchEnrichmentCallback((activity, _) => activity.AddTag("custom_tag", "custom_value")); + }); + await using var dataSource = dataSourceBuilder.Build(); + await using var connection = await dataSource.OpenConnectionAsync(); + + using var activityListener = StartListener(out var activities); + + await ExecuteScalar(connection, async, batch, "SELECT 1"); + + Assert.That(activities, Is.Empty); + + await ExecuteScalar(connection, async, batch, "SELECT 2"); + + var activity = GetSingleActivity(activities, "unknown_query", "unknown_query"); + + Assert.That(activity.Events.Count(), Is.EqualTo(0)); + + var tags = activity.TagObjects.ToDictionary(t => t.Key, t => t.Value); + Assert.That(tags["custom_tag"], Is.EqualTo("custom_value")); + } + + #endregion Command execution + + #region Binary import + + [Test] + public async Task BinaryImport() + { + await using var dataSource = CreateDataSource(ds => ds.ConfigureTracing(o => o.EnablePhysicalOpenTracing(false))); + await using var connection = await dataSource.OpenConnectionAsync(); + + var table = await CreateTempTable(connection, "field_text TEXT, field_int2 SMALLINT"); + + using var activityListener = StartListener(out var activities); - var activities = new List(); + var copyFromCommand = $"COPY {table} (field_text, field_int2) FROM STDIN BINARY"; - using var activityListener = new ActivityListener + if (async) { - ShouldListenTo = source => source.Name == "Npgsql", - Sample = (ref _) => ActivitySamplingResult.AllDataAndRecorded, - ActivityStopped = activity => activities.Add(activity) - }; - ActivitySource.AddActivityListener(activityListener); + await using var writer = await connection.BeginBinaryImportAsync(copyFromCommand); - await using var dataSource = CreateDataSource(); + await writer.StartRowAsync(); + await writer.WriteAsync("Hello"); + await writer.WriteAsync((short)8, NpgsqlDbType.Smallint); + + await writer.CompleteAsync(); + } + else + { + using var writer = connection.BeginBinaryImport(copyFromCommand); + + writer.StartRow(); + writer.Write("Hello"); + writer.Write((short)8, NpgsqlDbType.Smallint); + + writer.Complete(); + } + + var activity = GetSingleActivity(activities, "COPY FROM"); + + var tags = activity.TagObjects.ToDictionary(t => t.Key, t => t.Value); + Assert.That(tags, Has.Count.EqualTo(connection.Settings.Port == 5432 ? 8 : 9)); + + Assert.That(tags["db.query.text"], Is.EqualTo(copyFromCommand)); + Assert.That(tags["db.operation.name"], Is.EqualTo("COPY FROM")); + Assert.That(tags["db.system.name"], Is.EqualTo("postgresql")); + Assert.That(tags["db.namespace"], Is.EqualTo(connection.Settings.Database)); + + Assert.That(tags["db.npgsql.data_source"], Is.EqualTo(connection.ConnectionString)); + Assert.That(tags["db.npgsql.rows"], Is.EqualTo(1)); + + if (IsMultiplexing) + Assert.That(tags, Does.ContainKey("db.npgsql.connection_id")); + else + Assert.That(tags["db.npgsql.connection_id"], Is.EqualTo(connection.ProcessID)); + } + + [Test] + public async Task BinaryImport_cancel() + { + await using var dataSource = CreateDataSource(ds => ds.ConfigureTracing(o => o.EnablePhysicalOpenTracing(false))); + await using var connection = await dataSource.OpenConnectionAsync(); + + var table = await CreateTempTable(connection, "field_text TEXT, field_int2 SMALLINT"); + + using var activityListener = StartListener(out var activities); + + var copyFromCommand = $"COPY {table} (field_text, field_int2) FROM STDIN BINARY"; + + if (async) + { + await using var writer = await connection.BeginBinaryImportAsync(copyFromCommand); + await writer.StartRowAsync(); + await writer.WriteAsync("Hello"); + await writer.WriteAsync((short)8, NpgsqlDbType.Smallint); + // No Complete() call - disposing cancels + } + else + { + using var writer = connection.BeginBinaryImport(copyFromCommand); + writer.StartRow(); + writer.Write("Hello"); + writer.Write((short)8, NpgsqlDbType.Smallint); + // No Complete() call - disposing cancels + } + + _ = GetSingleActivity(activities, "COPY FROM"); + } + + [Test] + public async Task BinaryImport_error() + { + await using var dataSource = CreateDataSource(ds => ds.ConfigureTracing(o => o.EnablePhysicalOpenTracing(false))); + await using var connection = await dataSource.OpenConnectionAsync(); + + using var activityListener = StartListener(out var activities); + + var copyFromCommand = $"COPY non_existing_table (field_text, field_int2) FROM STDIN BINARY"; + + var ex = Assert.ThrowsAsync(async () => + { + await using var writer = async + ? await connection.BeginBinaryImportAsync(copyFromCommand) + : connection.BeginBinaryImport(copyFromCommand); + }); + + var activity = GetSingleActivity(activities, "COPY FROM", "COPY FROM", ActivityStatusCode.Error, PostgresErrorCodes.UndefinedTable); + + Assert.That(activity.Events.Count(), Is.EqualTo(1)); + var exceptionEvent = activity.Events.First(); + Assert.That(exceptionEvent.Name, Is.EqualTo("exception")); + + var exceptionTags = exceptionEvent.Tags.ToDictionary(t => t.Key, t => t.Value); + Assert.That(exceptionTags, Has.Count.EqualTo(3)); + + Assert.That(exceptionTags["exception.type"], Is.EqualTo("Npgsql.PostgresException")); + Assert.That(exceptionTags["exception.message"], Does.Contain("relation \"non_existing_table\" does not exist")); + Assert.That(exceptionTags["exception.stacktrace"], Does.Contain("relation \"non_existing_table\" does not exist")); + } + + #endregion Binary import + + #region Binary export + + [Test] + public async Task BinaryExport() + { + await using var dataSource = CreateDataSource(ds => ds.ConfigureTracing(o => o.EnablePhysicalOpenTracing(false))); + await using var connection = await dataSource.OpenConnectionAsync(); + + var table = await CreateTempTable(connection, "field_text TEXT, field_int2 SMALLINT"); + await connection.ExecuteNonQueryAsync($"INSERT INTO {table} (field_text, field_int2) VALUES ('Hello', 8)"); + + using var activityListener = StartListener(out var activities); + + var copyToCommand = $"COPY {table} (field_text, field_int2) TO STDOUT BINARY"; + + if (async) + { + await using var reader = await connection.BeginBinaryExportAsync(copyToCommand); + while (await reader.StartRowAsync() != -1) + { + _ = await reader.ReadAsync(); + _ = await reader.ReadAsync(); + } + } + else + { + using var reader = connection.BeginBinaryExport(copyToCommand); + while (reader.StartRow() != -1) + { + _ = reader.Read(); + _ = reader.Read(); + } + } + + var activity = GetSingleActivity(activities, "COPY TO"); + + var tags = activity.TagObjects.ToDictionary(t => t.Key, t => t.Value); + Assert.That(tags, Has.Count.EqualTo(connection.Settings.Port == 5432 ? 8 : 9)); + + Assert.That(tags["db.query.text"], Is.EqualTo(copyToCommand)); + Assert.That(tags["db.operation.name"], Is.EqualTo("COPY TO")); + Assert.That(tags["db.system.name"], Is.EqualTo("postgresql")); + Assert.That(tags["db.namespace"], Is.EqualTo(connection.Settings.Database)); + + Assert.That(tags["db.npgsql.data_source"], Is.EqualTo(connection.ConnectionString)); + Assert.That(tags["db.npgsql.rows"], Is.EqualTo(1)); + + if (IsMultiplexing) + Assert.That(tags, Does.ContainKey("db.npgsql.connection_id")); + else + Assert.That(tags["db.npgsql.connection_id"], Is.EqualTo(connection.ProcessID)); + } + + [Test] + public async Task BinaryExport_cancel() + { + await using var dataSource = CreateDataSource(ds => ds.ConfigureTracing(o => o.EnablePhysicalOpenTracing(false))); await using var conn = await dataSource.OpenConnectionAsync(); - // We're not interested in physical open's activity - Assert.That(activities.Count, Is.EqualTo(1)); - activities.Clear(); + using var activityListener = StartListener(out var activities); - Assert.ThrowsAsync(async () => await ExecuteScalar(conn, async, batch, "SELECT * FROM non_existing_table")); + // This must be large enough to cause Postgres to queue up CopyData messages. + const string copyToCommand = "COPY (select md5(random()::text) as id from generate_series(1, 100000)) TO STDOUT BINARY"; - Assert.That(activities, Has.Count.EqualTo(1)); - var activity = activities[0]; - Assert.That(activity.DisplayName, Is.EqualTo("postgresql")); - Assert.That(activity.OperationName, Is.EqualTo("postgresql")); - Assert.That(activity.Status, Is.EqualTo(ActivityStatusCode.Error)); - Assert.That(activity.StatusDescription, Is.EqualTo(PostgresErrorCodes.UndefinedTable)); + if (async) + { + await using var exporter = await conn.BeginBinaryExportAsync(copyToCommand); + await exporter.StartRowAsync(); + await exporter.ReadAsync(); + await exporter.CancelAsync(); + } + else + { + using var exporter = await conn.BeginBinaryExportAsync(copyToCommand); + exporter.StartRow(); + exporter.Read(); + exporter.Cancel(); + } + + _ = GetSingleActivity(activities, "COPY TO"); + } + + [Test] + public async Task BinaryExport_error() + { + await using var dataSource = CreateDataSource(ds => ds.ConfigureTracing(o => o.EnablePhysicalOpenTracing(false))); + await using var connection = await dataSource.OpenConnectionAsync(); + + using var activityListener = StartListener(out var activities); + + var copyToCommand = $"COPY non_existing_table (field_text, field_int2) TO STDOUT BINARY"; + var ex = Assert.ThrowsAsync(async () => + { + await using var reader = async + ? await connection.BeginBinaryExportAsync(copyToCommand) + : connection.BeginBinaryExport(copyToCommand); + }); + + var activity = GetSingleActivity(activities, "COPY TO", "COPY TO", ActivityStatusCode.Error, PostgresErrorCodes.UndefinedTable); Assert.That(activity.Events.Count(), Is.EqualTo(1)); var exceptionEvent = activity.Events.First(); @@ -219,78 +463,337 @@ public async Task Error_query([Values] bool async, [Values] bool batch) Assert.That(exceptionTags["exception.type"], Is.EqualTo("Npgsql.PostgresException")); Assert.That(exceptionTags["exception.message"], Does.Contain("relation \"non_existing_table\" does not exist")); Assert.That(exceptionTags["exception.stacktrace"], Does.Contain("relation \"non_existing_table\" does not exist")); + } - var activityTags = activity.TagObjects.ToDictionary(t => t.Key, t => t.Value); - Assert.That(activityTags, Has.Count.EqualTo(conn.Settings.Port == 5432 ? 8 : 9)); + #endregion Binary export - Assert.That(activityTags["db.query.text"], Is.EqualTo("SELECT * FROM non_existing_table")); - Assert.That(activityTags["db.system.name"], Is.EqualTo("postgresql")); - Assert.That(activityTags["db.namespace"], Is.EqualTo(conn.Settings.Database)); + #region Raw binary - Assert.That(activityTags["db.response.status_code"], Is.EqualTo(PostgresErrorCodes.UndefinedTable)); - Assert.That(activityTags["error.type"], Is.EqualTo(PostgresErrorCodes.UndefinedTable)); + [Test] + public async Task RawBinaryExport() + { + await using var dataSource = CreateDataSource(ds => ds.ConfigureTracing(o => o.EnablePhysicalOpenTracing(false))); + await using var connection = await dataSource.OpenConnectionAsync(); - Assert.That(activityTags["db.npgsql.data_source"], Is.EqualTo(conn.ConnectionString)); + var table = await CreateTempTable(connection, "field_text TEXT, field_int2 SMALLINT"); + await connection.ExecuteNonQueryAsync($"INSERT INTO {table} (field_text, field_int2) VALUES ('Hello', 8)"); + + using var activityListener = StartListener(out var activities); + + // Raw binary export + var copyToCommand = $"COPY {table} (field_text, field_int2) TO STDIN BINARY"; + var buffer = new byte[1024]; + if (async) + { + await using var stream = await connection.BeginRawBinaryCopyAsync(copyToCommand); + while (await stream.ReadAsync(buffer, 0, buffer.Length) > 0) { } + } + else + { + using var stream = connection.BeginRawBinaryCopy(copyToCommand); + while (stream.Read(buffer, 0, buffer.Length) > 0) { } + } + + var activity = GetSingleActivity(activities, "COPY"); + + var tags = activity.TagObjects.ToDictionary(t => t.Key, t => t.Value); + + Assert.That(tags, Has.Count.EqualTo(connection.Settings.Port == 5432 ? 7 : 8)); + + Assert.That(tags["db.query.text"], Is.EqualTo(copyToCommand)); + Assert.That(tags["db.operation.name"], Is.EqualTo("COPY TO")); + Assert.That(tags["db.system.name"], Is.EqualTo("postgresql")); + Assert.That(tags["db.namespace"], Is.EqualTo(connection.Settings.Database)); + + Assert.That(tags["db.npgsql.data_source"], Is.EqualTo(connection.ConnectionString)); if (IsMultiplexing) - Assert.That(activityTags, Does.ContainKey("db.npgsql.connection_id")); + Assert.That(tags, Does.ContainKey("db.npgsql.connection_id")); else - Assert.That(activityTags["db.npgsql.connection_id"], Is.EqualTo(conn.ProcessID)); + Assert.That(tags["db.npgsql.connection_id"], Is.EqualTo(connection.ProcessID)); + + Assert.That(tags, Does.Not.ContainKey("db.npgsql.rows")); } [Test] - public async Task Configure_tracing([Values] bool async, [Values] bool batch) + public async Task RawBinaryExport_cancel() { - if (IsMultiplexing && !async) - return; + await using var dataSource = CreateDataSource(ds => ds.ConfigureTracing(o => o.EnablePhysicalOpenTracing(false))); + await using var connection = await dataSource.OpenConnectionAsync(); - var activities = new List(); + var table = await CreateTempTable(connection, "field_text TEXT, field_int2 SMALLINT"); + await connection.ExecuteNonQueryAsync($"INSERT INTO {table} (field_text, field_int2) VALUES ('Hello', 8)"); - using var activityListener = new ActivityListener + using var activityListener = StartListener(out var activities); + + var copyToCommand = $"COPY {table} (field_text, field_int2) TO STDIN BINARY"; + var buffer = new byte[1024]; + if (async) { - ShouldListenTo = source => source.Name == "Npgsql", - Sample = (ref _) => ActivitySamplingResult.AllDataAndRecorded, - ActivityStopped = activity => activities.Add(activity) - }; - ActivitySource.AddActivityListener(activityListener); + await using var stream = await connection.BeginRawBinaryCopyAsync(copyToCommand); + var _ = await stream.ReadAsync(buffer, 0, buffer.Length); + await stream.CancelAsync(); + } + else + { + using var stream = connection.BeginRawBinaryCopy(copyToCommand); + var _ = stream.Read(buffer, 0, buffer.Length); + stream.Cancel(); + } - var dataSourceBuilder = CreateDataSourceBuilder(); - dataSourceBuilder.ConfigureTracing(options => + _ = GetSingleActivity(activities, "COPY"); + } + + [Test] + public async Task RawBinaryImport_cancel() + { + await using var dataSource = CreateDataSource(ds => ds.ConfigureTracing(o => o.EnablePhysicalOpenTracing(false))); + await using var connection = await dataSource.OpenConnectionAsync(); + + var table = await CreateTempTable(connection, "field_text TEXT, field_int2 SMALLINT"); + + using var activityListener = StartListener(out var activities); + + var copyToCommand = $"COPY {table} (field_text, field_int2) FROM STDIN BINARY"; + byte[] garbage = [1, 2, 3, 4]; + if (async) { - options - .EnablePhysicalOpenTracing(enable: false) - .EnableFirstResponseEvent(enable: false) - .ConfigureCommandFilter(cmd => cmd.CommandText.Contains('2')) - .ConfigureBatchFilter(batch => batch.BatchCommands[0].CommandText.Contains('2')) - .ConfigureCommandSpanNameProvider(_ => "unknown_query") - .ConfigureBatchSpanNameProvider(_ => "unknown_query") - .ConfigureCommandEnrichmentCallback((activity, _) => activity.AddTag("custom_tag", "custom_value")) - .ConfigureBatchEnrichmentCallback((activity, _) => activity.AddTag("custom_tag", "custom_value")); + await using var stream = await connection.BeginRawBinaryCopyAsync(copyToCommand); + await stream.WriteAsync(garbage); + await stream.FlushAsync(); + await stream.CancelAsync(); + } + else + { + using var stream = connection.BeginRawBinaryCopy(copyToCommand); + stream.Write(garbage); + stream.Flush(); + stream.Cancel(); + } + + _ = GetSingleActivity(activities, "COPY"); + } + + [Test] + public async Task RawBinaryImport_error() + { + await using var dataSource = CreateDataSource(ds => ds.ConfigureTracing(o => o.EnablePhysicalOpenTracing(false))); + await using var connection = await dataSource.OpenConnectionAsync(); + + using var activityListener = StartListener(out var activities); + + var copyFromCommand = $"COPY non_existing_table (field_text, field_int2) FROM STDIN BINARY"; + var ex = Assert.ThrowsAsync(async () => + { + await using var stream = async + ? await connection.BeginRawBinaryCopyAsync(copyFromCommand) + : connection.BeginRawBinaryCopy(copyFromCommand); }); - await using var dataSource = dataSourceBuilder.Build(); + + var activity = GetSingleActivity(activities, "COPY", "COPY", ActivityStatusCode.Error, PostgresErrorCodes.UndefinedTable); + + Assert.That(activity.Events.Count(), Is.EqualTo(1)); + var exceptionEvent = activity.Events.First(); + Assert.That(exceptionEvent.Name, Is.EqualTo("exception")); + + var exceptionTags = exceptionEvent.Tags.ToDictionary(t => t.Key, t => t.Value); + Assert.That(exceptionTags, Has.Count.EqualTo(3)); + + Assert.That(exceptionTags["exception.type"], Is.EqualTo("Npgsql.PostgresException")); + Assert.That(exceptionTags["exception.message"], Does.Contain("relation \"non_existing_table\" does not exist")); + Assert.That(exceptionTags["exception.stacktrace"], Does.Contain("relation \"non_existing_table\" does not exist")); + } + + #endregion Raw binary + + #region Text COPY + + [Test] + public async Task TextImport() + { + await using var dataSource = CreateDataSource(ds => ds.ConfigureTracing(o => o.EnablePhysicalOpenTracing(false))); + await using var connection = await dataSource.OpenConnectionAsync(); + + var table = await CreateTempTable(connection, "field_text TEXT, field_int2 SMALLINT"); + + using var activityListener = StartListener(out var activities); + + var copyFromCommand = $"COPY {table} (field_text, field_int2) FROM STDIN"; + + if (async) + { + await using var writer = await connection.BeginTextImportAsync(copyFromCommand); + await writer.WriteAsync("Hello\t8\n"); + } + else + { + using var writer = connection.BeginTextImport(copyFromCommand); + writer.Write("Hello\t8\n"); + } + + var activity = GetSingleActivity(activities, "COPY FROM"); + + var tags = activity.TagObjects.ToDictionary(t => t.Key, t => t.Value); + + Assert.That(tags, Has.Count.EqualTo(connection.Settings.Port == 5432 ? 7 : 8)); + + Assert.That(tags["db.query.text"], Is.EqualTo(copyFromCommand)); + Assert.That(tags["db.operation.name"], Is.EqualTo("COPY FROM")); + Assert.That(tags["db.system.name"], Is.EqualTo("postgresql")); + Assert.That(tags["db.namespace"], Is.EqualTo(connection.Settings.Database)); + + Assert.That(tags["db.npgsql.data_source"], Is.EqualTo(connection.ConnectionString)); + + if (IsMultiplexing) + Assert.That(tags, Does.ContainKey("db.npgsql.connection_id")); + else + Assert.That(tags["db.npgsql.connection_id"], Is.EqualTo(connection.ProcessID)); + + Assert.That(tags, Does.Not.ContainKey("db.npgsql.rows")); + } + + [Test] + public async Task TextExport() + { + await using var dataSource = CreateDataSource(ds => ds.ConfigureTracing(o => o.EnablePhysicalOpenTracing(false))); + await using var connection = await dataSource.OpenConnectionAsync(); + + var table = await CreateTempTable(connection, "field_text TEXT, field_int2 SMALLINT"); + + var insertCmd = $"INSERT INTO {table} (field_text, field_int2) VALUES ('Hello', 8)"; + await connection.ExecuteNonQueryAsync(insertCmd); + + using var activityListener = StartListener(out var activities); + + var copyFromCommand = $"COPY {table} (field_text, field_int2) TO STDIN"; + + var chars = new char[30]; + if (async) + { + await using var reader = await connection.BeginTextExportAsync(copyFromCommand); + _ = await reader.ReadAsync(chars); + } + else + { + using var reader = connection.BeginTextExport(copyFromCommand); + _ = reader.Read(chars); + } + + var activity = GetSingleActivity(activities, "COPY TO"); + + var tags = activity.TagObjects.ToDictionary(t => t.Key, t => t.Value); + + Assert.That(tags, Has.Count.EqualTo(connection.Settings.Port == 5432 ? 7 : 8)); + + Assert.That(tags["db.query.text"], Is.EqualTo(copyFromCommand)); + Assert.That(tags["db.operation.name"], Is.EqualTo("COPY TO")); + Assert.That(tags["db.system.name"], Is.EqualTo("postgresql")); + Assert.That(tags["db.namespace"], Is.EqualTo(connection.Settings.Database)); + + Assert.That(tags["db.npgsql.data_source"], Is.EqualTo(connection.ConnectionString)); + + if (IsMultiplexing) + Assert.That(tags, Does.ContainKey("db.npgsql.connection_id")); + else + Assert.That(tags["db.npgsql.connection_id"], Is.EqualTo(connection.ProcessID)); + + Assert.That(tags, Does.Not.ContainKey("db.npgsql.rows")); + } + + // Text COPY is implemented over NpgsqlRawCopyStream internally, without any additional tracing-related logic. + // So we do only basic direct coverage and depend on the general raw tests for the rest. + + #endregion Text COPY + + // All ConfigureTracing() aspects of COPY are implemented in a single code path for all COPY paths, so we test just one. + + [Test] + public async Task Copy_ConfigureTracing() + { + await using var dataSource = CreateDataSource(builder => builder.ConfigureTracing(options => + options + .EnablePhysicalOpenTracing(false) + .ConfigureCopyOperationFilter(command => command.Contains("filter_in")) + .ConfigureCopyOperationSpanNameProvider(_ => "custom_binary_import") + .ConfigureCopyOperationEnrichmentCallback((activity, _) => activity.AddTag("custom_tag", "custom_value")))); + await using var conn = await dataSource.OpenConnectionAsync(); - // We disabled physical open tracing - Assert.That(activities.Count, Is.EqualTo(0)); + var table = await CreateTempTable(conn, "field_text TEXT, field_int_filter_in SMALLINT"); + var copyCommand = $"COPY {table} (field_text, field_int_filter_in) FROM STDIN BINARY"; - await ExecuteScalar(conn, async, batch, "SELECT 1"); + var filteredOutTable = await CreateTempTable(conn, "field_text TEXT, field_int_filter_out SMALLINT"); + var filteredOutCopyCommand = $"COPY {filteredOutTable} (field_text, field_int_filter_out) FROM STDIN BINARY"; - Assert.That(activities, Is.Empty); + using var activityListener = StartListener(out var activities); - await ExecuteScalar(conn, async, batch, "SELECT 2"); - Assert.That(activities, Has.Count.EqualTo(1)); - var activity = activities[0]; - Assert.That(activity.DisplayName, Is.EqualTo("unknown_query")); - Assert.That(activity.OperationName, Is.EqualTo("unknown_query")); + if (async) + { + await using (var writer = await conn.BeginBinaryImportAsync(copyCommand)) + { + await writer.CompleteAsync(); + } + + await using (var writer = await conn.BeginBinaryImportAsync(filteredOutCopyCommand)) + { + await writer.CompleteAsync(); + } + } + else + { + using (var writer = conn.BeginBinaryImport(copyCommand)) + { + writer.Complete(); + } + + using (var writer = conn.BeginBinaryImport(filteredOutCopyCommand)) + { + writer.Complete(); + } + } - Assert.That(activity.Events.Count(), Is.EqualTo(0)); + // There should be just one activity since one of the two COPY commands is filtered out + var activity = GetSingleActivity(activities, "custom_binary_import", "custom_binary_import"); var tags = activity.TagObjects.ToDictionary(t => t.Key, t => t.Value); Assert.That(tags["custom_tag"], Is.EqualTo("custom_value")); } - async Task ExecuteScalar(NpgsqlConnection connection, bool async, bool isBatch, string query) + static ActivityListener StartListener(out List activities) + { + var a = new List(); + + var activityListener = new ActivityListener + { + ShouldListenTo = source => source.Name == "Npgsql", + Sample = (ref _) => ActivitySamplingResult.AllDataAndRecorded, + ActivityStopped = activity => a.Add(activity) + }; + ActivitySource.AddActivityListener(activityListener); + + activities = a; + return activityListener; + } + + static Activity GetSingleActivity( + List activities, + string? expectedDisplayName, + string? expectedOperationName = null, + ActivityStatusCode? expectedStatusCode = null, + string? expectedStatusDescription = null) + { + Assert.That(activities, Has.Count.EqualTo(1)); + var activity = activities[0]; + Assert.That(activity.DisplayName, Is.EqualTo(expectedDisplayName)); + Assert.That(activity.OperationName, Is.EqualTo(expectedOperationName ?? expectedDisplayName)); + Assert.That(activity.Status, Is.EqualTo(expectedStatusCode ?? ActivityStatusCode.Unset)); + Assert.That(activity.StatusDescription, Is.EqualTo(expectedStatusDescription)); + + return activity; + } + + static async Task ExecuteScalar(NpgsqlConnection connection, bool async, bool isBatch, string query) { if (!isBatch) { From 4cf1d33347b7de18835942b0519ce2b6e8c2249d Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Sat, 22 Nov 2025 15:26:55 +0100 Subject: [PATCH 651/761] Add db.npgsql.prepared to tracing (#6346) Closes #4636 --- src/Npgsql/MultiplexingDataSource.cs | 11 +++- src/Npgsql/NpgsqlActivitySource.cs | 5 +- src/Npgsql/NpgsqlCommand.cs | 13 +++-- test/Npgsql.Tests/TracingTests.cs | 87 +++++++++++++++++++++++++--- 4 files changed, 101 insertions(+), 15 deletions(-) diff --git a/src/Npgsql/MultiplexingDataSource.cs b/src/Npgsql/MultiplexingDataSource.cs index 60ba882923..8c529cf7e7 100644 --- a/src/Npgsql/MultiplexingDataSource.cs +++ b/src/Npgsql/MultiplexingDataSource.cs @@ -236,11 +236,20 @@ bool WriteCommand(NpgsqlConnector connector, NpgsqlCommand command, ref Multiple // to buffer in memory), and the actual flush will occur at the level above. For cases where the // command overflows the buffer, async I/O is done, and we schedule continuations separately - // but the main thread continues to handle other commands on other connectors. + + var fullyPrepared = _autoPrepare; + if (_autoPrepare) { // TODO: Need to log based on numPrepared like in non-multiplexing mode... for (var i = 0; i < command.InternalBatchCommands.Count; i++) - command.InternalBatchCommands[i].TryAutoPrepare(connector); + if (!command.InternalBatchCommands[i].TryAutoPrepare(connector)) + fullyPrepared = false; + } + + if (command.CurrentActivity is not null && fullyPrepared) + { + command.CurrentActivity.SetTag("db.npgsql.prepared", true); } var written = connector.CommandsInFlightWriter!.TryWrite(command); diff --git a/src/Npgsql/NpgsqlActivitySource.cs b/src/Npgsql/NpgsqlActivitySource.cs index 91da0f0548..be4e257c48 100644 --- a/src/Npgsql/NpgsqlActivitySource.cs +++ b/src/Npgsql/NpgsqlActivitySource.cs @@ -16,7 +16,7 @@ static class NpgsqlActivitySource internal static bool IsEnabled => Source.HasListeners(); - internal static Activity? CommandStart(NpgsqlConnectionStringBuilder settings, string commandText, CommandType commandType, string? spanName) + internal static Activity? CommandStart(string commandText, CommandType commandType, bool? prepared, string? spanName) { string? operationName = null; @@ -49,6 +49,9 @@ static class NpgsqlActivitySource activity.SetTag("db.query.text", commandText); + if (prepared is true) + activity.SetTag("db.npgsql.prepared", true); + switch (commandType) { case CommandType.StoredProcedure: diff --git a/src/Npgsql/NpgsqlCommand.cs b/src/Npgsql/NpgsqlCommand.cs index cfd48189ec..ffbf86029f 100644 --- a/src/Npgsql/NpgsqlCommand.cs +++ b/src/Npgsql/NpgsqlCommand.cs @@ -1429,6 +1429,8 @@ internal virtual async ValueTask ExecuteReader(bool async, Com try { + var fullyPrepared = false; + switch (IsExplicitlyPrepared) { case true: @@ -1463,6 +1465,7 @@ internal virtual async ValueTask ExecuteReader(bool async, Com NpgsqlEventSource.Log.CommandStartPrepared(); connector.DataSource.MetricsReporter.CommandStartPrepared(); + fullyPrepared = true; break; case false: @@ -1502,6 +1505,7 @@ internal virtual async ValueTask ExecuteReader(bool async, Com { NpgsqlEventSource.Log.CommandStartPrepared(); connector.DataSource.MetricsReporter.CommandStartPrepared(); + fullyPrepared = true; } } @@ -1524,7 +1528,7 @@ internal virtual async ValueTask ExecuteReader(bool async, Com NpgsqlEventSource.Log.CommandStart(CommandText); startTimestamp = connector.DataSource.MetricsReporter.ReportCommandStart(); - TraceCommandStart(connector.Settings, connector.DataSource.Configuration.TracingOptions); + TraceCommandStart(connector.DataSource.Configuration.TracingOptions, fullyPrepared); TraceCommandEnrich(connector); // We do not wait for the entire send to complete before proceeding to reading - @@ -1594,7 +1598,8 @@ internal virtual async ValueTask ExecuteReader(bool async, Com State = CommandState.InProgress; - TraceCommandStart(conn.Settings, conn.NpgsqlDataSource.Configuration.TracingOptions); + // In multiplexing, we don't yet know whether the command will execute as prepared or not; that will be determined later. + TraceCommandStart(conn.NpgsqlDataSource.Configuration.TracingOptions, prepared: null); // TODO: Experiment: do we want to wait on *writing* here, or on *reading*? // Previous behavior was to wait on reading, which throw the exception from ExecuteReader (and not from @@ -1739,7 +1744,7 @@ internal void Reset() #region Tracing - internal void TraceCommandStart(NpgsqlConnectionStringBuilder settings, NpgsqlTracingOptions tracingOptions) + internal void TraceCommandStart(NpgsqlTracingOptions tracingOptions, bool? prepared) { Debug.Assert(CurrentActivity is null); @@ -1756,9 +1761,9 @@ internal void TraceCommandStart(NpgsqlConnectionStringBuilder settings, NpgsqlTr : tracingOptions.CommandSpanNameProvider?.Invoke(this); CurrentActivity = NpgsqlActivitySource.CommandStart( - settings, WrappingBatch is not null ? GetBatchFullCommandText() : CommandText, CommandType, + prepared, spanName); } } diff --git a/test/Npgsql.Tests/TracingTests.cs b/test/Npgsql.Tests/TracingTests.cs index 3ed31d6a30..9fe94b7216 100644 --- a/test/Npgsql.Tests/TracingTests.cs +++ b/test/Npgsql.Tests/TracingTests.cs @@ -197,6 +197,56 @@ public async Task CommandExecute_error([Values] bool batch) Assert.That(activityTags["db.npgsql.connection_id"], Is.EqualTo(connection.ProcessID)); } + [Test] + public async Task CommandExecute_explicit_prepare([Values] bool batch) + { + if (IsMultiplexing) + { + Assert.Ignore("Explicit prepare is not supported with multiplexing"); + return; + } + + await using var dataSource = CreateDataSource(o => o.ConfigureTracing(o => o.EnablePhysicalOpenTracing(false))); + await using var connection = await dataSource.OpenConnectionAsync(); + + using var activityListener = StartListener(out var activities); + + await ExecuteScalar(connection, async, batch, "SELECT 42", prepare: false); + var activity = GetSingleActivity(activities, "postgresql", "postgresql"); + var tags = activity.TagObjects.ToDictionary(t => t.Key, t => t.Value); + Assert.That(tags, Does.Not.ContainKey("db.npgsql.prepared")); + + activities.Clear(); + await ExecuteScalar(connection, async, batch, "SELECT 42", prepare: true); + activity = GetSingleActivity(activities, "postgresql", "postgresql"); + tags = activity.TagObjects.ToDictionary(t => t.Key, t => t.Value); + Assert.That(tags["db.npgsql.prepared"], Is.True); + } + + [Test] + public async Task CommandExecute_auto_prepare([Values] bool batch) + { + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.ConnectionStringBuilder.MaxAutoPrepare = 10; + dataSourceBuilder.ConnectionStringBuilder.AutoPrepareMinUsages = 2; + dataSourceBuilder.ConfigureTracing(o => o.EnablePhysicalOpenTracing(false)); + await using var dataSource = dataSourceBuilder.Build(); + await using var connection = await dataSource.OpenConnectionAsync(); + + using var activityListener = StartListener(out var activities); + + await ExecuteScalar(connection, async, batch, "SELECT 42"); + var activity = GetSingleActivity(activities, "postgresql", "postgresql"); + var tags = activity.TagObjects.ToDictionary(t => t.Key, t => t.Value); + Assert.That(tags, Does.Not.ContainKey("db.npgsql.prepared")); + + activities.Clear(); + await ExecuteScalar(connection, async, batch, "SELECT 42"); + activity = GetSingleActivity(activities, "postgresql", "postgresql"); + tags = activity.TagObjects.ToDictionary(t => t.Key, t => t.Value); + Assert.That(tags["db.npgsql.prepared"], Is.True); + } + [Test] public async Task CommandExecute_ConfigureTracing([Values] bool batch) { @@ -793,26 +843,45 @@ static Activity GetSingleActivity( return activity; } - static async Task ExecuteScalar(NpgsqlConnection connection, bool async, bool isBatch, string query) + static async Task ExecuteScalar(NpgsqlConnection connection, bool async, bool isBatch, string query, bool prepare = false) { - if (!isBatch) - { - if (async) - return await connection.ExecuteScalarAsync(query); - else - return connection.ExecuteScalar(query); - } - else + if (isBatch) { await using var batch = connection.CreateBatch(); var batchCommand = batch.CreateBatchCommand(); batchCommand.CommandText = query; batch.BatchCommands.Add(batchCommand); + if (prepare) + { + if (async) + await batch.PrepareAsync(); + else + batch.Prepare(); + } + if (async) return await batch.ExecuteScalarAsync(); else return batch.ExecuteScalar(); } + else + { + await using var command = connection.CreateCommand(); + command.CommandText = query; + + if (prepare) + { + if (async) + await command.PrepareAsync(); + else + command.Prepare(); + } + + if (async) + return await command.ExecuteScalarAsync(); + else + return command.ExecuteScalar(); + } } } From a77ee4a48af52bf9dea9b9b8dd6aff70abc58157 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Emmanuel=20Andr=C3=A9?= <2341261+manandre@users.noreply.github.com> Date: Sat, 22 Nov 2025 15:47:07 +0100 Subject: [PATCH 652/761] Update devcontainer for .NET 10 (#6347) * Update devcontainer for .NET 10 * Use devcontainer features for dotnet * Remove useless setting --- .devcontainer/devcontainer.json | 17 ++++++++++------- .devcontainer/docker-compose.yml | 2 +- .devcontainer/dotnet/Dockerfile | 5 ----- .vscode/extensions.json | 6 +----- .vscode/settings.json | 3 +-- 5 files changed, 13 insertions(+), 20 deletions(-) delete mode 100644 .devcontainer/dotnet/Dockerfile diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 1cdf7d0550..1efa473614 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -5,6 +5,13 @@ "workspaceFolder": "/workspace", + "features": { + "ghcr.io/devcontainers/features/dotnet:2": { + "version": "10.0", + "dotnetRuntimeVersions": "8.0,9.0" + } + }, + "customizations": { "vscode": { "settings": { @@ -21,18 +28,14 @@ "extensions": [ "ms-dotnettools.csharp", - "formulahendry.dotnet-test-explorer", + "ms-dotnettools.csdevkit", "ms-azuretools.vscode-docker", "mutantdino.resourcemonitor" ] } }, - - "forwardPorts": [5432, 5050], - "remoteEnv": { - "DeveloperBuild": "True" - }, + "forwardPorts": [5432, 5050], - "postCreateCommand": "dotnet restore Npgsql.sln" + "postCreateCommand": "dotnet restore Npgsql.slnx" } \ No newline at end of file diff --git a/.devcontainer/docker-compose.yml b/.devcontainer/docker-compose.yml index 99d9177380..3926f919de 100644 --- a/.devcontainer/docker-compose.yml +++ b/.devcontainer/docker-compose.yml @@ -2,7 +2,7 @@ version: '3' services: npgsql-dev: - build: ./dotnet + image: mcr.microsoft.com/devcontainers/base:ubuntu volumes: - ..:/workspace:cached tty: true diff --git a/.devcontainer/dotnet/Dockerfile b/.devcontainer/dotnet/Dockerfile deleted file mode 100644 index 66aaa421c9..0000000000 --- a/.devcontainer/dotnet/Dockerfile +++ /dev/null @@ -1,5 +0,0 @@ -# Source for tags: https://mcr.microsoft.com/v2/dotnet/sdk/tags/list -FROM mcr.microsoft.com/dotnet/sdk:9.0 - -# "install" the .NET 8 runtime -COPY --from=mcr.microsoft.com/dotnet/sdk:8.0 /usr/share/dotnet/shared /usr/share/dotnet/shared \ No newline at end of file diff --git a/.vscode/extensions.json b/.vscode/extensions.json index 6bf5ff40c5..a505eb8cfc 100644 --- a/.vscode/extensions.json +++ b/.vscode/extensions.json @@ -2,10 +2,6 @@ // List of extensions which should be recommended for users of this workspace. "recommendations": [ "ms-dotnettools.csharp", - "formulahendry.dotnet-test-explorer", - ], - // List of extensions recommended by VS Code that should not be recommended for users of this workspace. - "unwantedRecommendations": [ - + "ms-dotnettools.csdevkit" ] } \ No newline at end of file diff --git a/.vscode/settings.json b/.vscode/settings.json index 4e57c5f1c2..22993a3100 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -1,4 +1,3 @@ { - "dotnet-test-explorer.testProjectPath": "**/*.Tests.csproj", - "dotnet.defaultSolution": "Npgsql.sln" + "dotnet.defaultSolution": "Npgsql.slnx" } \ No newline at end of file From 563ebf15e65e53cd00e0b05af726673685e4c590 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Sat, 22 Nov 2025 15:43:16 +0100 Subject: [PATCH 653/761] Bump version to 10.0.0 (GA) --- Directory.Build.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Build.props b/Directory.Build.props index a7f8f89bff..de99b4fd8c 100644 --- a/Directory.Build.props +++ b/Directory.Build.props @@ -1,6 +1,6 @@  - 10.0.0-rtm + 10.0.0 latest true enable From a18021849f244716d3b68eefd705677f131f9ace Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Sat, 22 Nov 2025 17:35:35 +0100 Subject: [PATCH 654/761] Fix tracing autoprepare test --- test/Npgsql.Tests/TracingTests.cs | 1 + 1 file changed, 1 insertion(+) diff --git a/test/Npgsql.Tests/TracingTests.cs b/test/Npgsql.Tests/TracingTests.cs index 9fe94b7216..1033dc7a55 100644 --- a/test/Npgsql.Tests/TracingTests.cs +++ b/test/Npgsql.Tests/TracingTests.cs @@ -227,6 +227,7 @@ public async Task CommandExecute_explicit_prepare([Values] bool batch) public async Task CommandExecute_auto_prepare([Values] bool batch) { var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.ConnectionStringBuilder.MaxPoolSize = 1; dataSourceBuilder.ConnectionStringBuilder.MaxAutoPrepare = 10; dataSourceBuilder.ConnectionStringBuilder.AutoPrepareMinUsages = 2; dataSourceBuilder.ConfigureTracing(o => o.EnablePhysicalOpenTracing(false)); From 98f28f354ea28ec6c0ea7f07b9e3a9810d08c1ac Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Sat, 22 Nov 2025 17:49:05 +0100 Subject: [PATCH 655/761] bump to 11.0.0-preview.1 --- Directory.Build.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Build.props b/Directory.Build.props index de99b4fd8c..081cb72aa6 100644 --- a/Directory.Build.props +++ b/Directory.Build.props @@ -1,6 +1,6 @@  - 10.0.0 + 11.0.0-preview.1 latest true enable From 43d118dbe2f30121324cab6f056f92452f7d0e32 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 24 Nov 2025 23:03:38 +0100 Subject: [PATCH 656/761] Bump GitHubActionsTestLogger from 2.4.1 to 3.0.1 (#6353) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 52becfa8c3..62e9ae9851 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -59,7 +59,7 @@ - + From 56ad76e3d57494058739c0d7f8611360cdc8c346 Mon Sep 17 00:00:00 2001 From: nima nikoonazar Date: Tue, 25 Nov 2025 23:10:56 +0330 Subject: [PATCH 657/761] Docs: clarify NpgsqlParameterCollection.Add(object) requires NpgsqlParameter (#6342) --- src/Npgsql/NpgsqlParameterCollection.cs | 46 ++++++++++++++++++++++--- 1 file changed, 41 insertions(+), 5 deletions(-) diff --git a/src/Npgsql/NpgsqlParameterCollection.cs b/src/Npgsql/NpgsqlParameterCollection.cs index 85b418b157..51a40e6648 100644 --- a/src/Npgsql/NpgsqlParameterCollection.cs +++ b/src/Npgsql/NpgsqlParameterCollection.cs @@ -429,12 +429,20 @@ public override void RemoveAt(int index) Remove(InternalList[index]); } - /// + /// + /// Inserts a parameter into the at the specified index. + /// + /// The zero-based index at which to insert the parameter. + /// The parameter to insert. + /// + /// Although this method accepts , only instances of are supported. + /// Passing any other type will result in an . + /// public override void Insert(int index, object value) => Insert(index, Cast(value)); /// - /// Removes the specified from the collection. + /// Removes the with the specified name from the collection. /// /// The name of the to remove from the collection. public void Remove(string parameterName) @@ -452,6 +460,10 @@ public void Remove(string parameterName) /// Removes the specified from the collection. /// /// The to remove from the collection. + /// + /// Although this method accepts , only instances of are supported. + /// Passing any other type will result in an . + /// public override void Remove(object value) => Remove(Cast(value)); @@ -500,11 +512,29 @@ public override void Clear() LookupClear(); } - /// + /// + /// Returns the index of the specified parameter in the . + /// + /// The parameter to find. + /// The index of the parameter if found; otherwise, -1. + /// + /// Although this method accepts , only instances of are supported. + /// Passing any other type will result in an . + /// public override int IndexOf(object value) => IndexOf(Cast(value)); - /// + /// + /// Adds a parameter to the . + /// + /// The parameter to add. + /// The zero-based index at which the parameter was added. + /// + /// Although this method accepts , only instances of are supported. + /// Passing any other type will result in an . + /// To add a parameter by value, use , , + /// or one of the typed overloads. + /// public override int Add(object value) { Add(Cast(value)); @@ -549,7 +579,13 @@ IEnumerator IEnumerable.GetEnumerator() #endregion - /// + /// + /// Adds the elements of the specified array to the end of the . + /// + /// + /// An array of s to add. Each item must be an instance of . + /// Passing any other type will result in an . + /// public override void AddRange(Array values) { ArgumentNullException.ThrowIfNull(values); From fc4816c7843f8f101279a6e5368fa426072de727 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 25 Nov 2025 21:05:46 +0000 Subject: [PATCH 658/761] Bump actions/setup-dotnet from 5.0.0 to 5.0.1 (#6355) --- .github/workflows/build.yml | 6 +++--- .github/workflows/native-aot.yml | 4 ++-- .github/workflows/rich-code-nav.yml | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 14e7ad6875..0a18e3bb9d 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -78,7 +78,7 @@ jobs: ${{ runner.os }}-nuget- - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v5.0.0 + uses: actions/setup-dotnet@v5.0.1 - name: Build run: dotnet build -c ${{ matrix.config }} @@ -338,7 +338,7 @@ jobs: ${{ runner.os }}-nuget- - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v5.0.0 + uses: actions/setup-dotnet@v5.0.1 - name: Pack run: dotnet pack --configuration Release --property:PackageOutputPath="$PWD/nupkgs" --version-suffix "ci.$(date -u +%Y%m%dT%H%M%S)+sha.${GITHUB_SHA:0:9}" -p:ContinuousIntegrationBuild=true @@ -370,7 +370,7 @@ jobs: uses: actions/checkout@v6 - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v5.0.0 + uses: actions/setup-dotnet@v5.0.1 - name: Pack run: dotnet pack --configuration Release --property:PackageOutputPath="$PWD/nupkgs" -p:ContinuousIntegrationBuild=true diff --git a/.github/workflows/native-aot.yml b/.github/workflows/native-aot.yml index 7e92384ea2..5d41da2014 100644 --- a/.github/workflows/native-aot.yml +++ b/.github/workflows/native-aot.yml @@ -107,7 +107,7 @@ jobs: ${{ runner.os }}-nuget- - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v5.0.0 + uses: actions/setup-dotnet@v5.0.1 - name: Write script run: echo "$AOT_Compat" > test-aot-compatibility.ps1 @@ -141,7 +141,7 @@ jobs: ${{ runner.os }}-nuget- - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v5.0.0 + uses: actions/setup-dotnet@v5.0.1 - name: Start PostgreSQL run: | diff --git a/.github/workflows/rich-code-nav.yml b/.github/workflows/rich-code-nav.yml index e6842c11ca..ce358eefde 100644 --- a/.github/workflows/rich-code-nav.yml +++ b/.github/workflows/rich-code-nav.yml @@ -23,7 +23,7 @@ jobs: ${{ runner.os }}-nuget- - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v5.0.0 + uses: actions/setup-dotnet@v5.0.1 - name: Build run: dotnet build --configuration Debug From 46e38e837e6db5470e074c3f23bfe8cfc3da9f45 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 25 Nov 2025 23:14:43 +0100 Subject: [PATCH 659/761] Bump Scriban.Signed from 6.5.1 to 6.5.2 (#6356) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 62e9ae9851..63ed08886e 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -47,7 +47,7 @@ - + From c67ae13314bf275aa122d918a211410ed4517a8b Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Wed, 26 Nov 2025 12:46:05 +0300 Subject: [PATCH 660/761] Fix flaky test PostgresType (#6354) --- src/Npgsql/NpgsqlConnection.cs | 9 ++++++++- test/Npgsql.Tests/Types/CompositeTests.cs | 5 ++++- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/src/Npgsql/NpgsqlConnection.cs b/src/Npgsql/NpgsqlConnection.cs index 57299a3eec..93b0b27f7a 100644 --- a/src/Npgsql/NpgsqlConnection.cs +++ b/src/Npgsql/NpgsqlConnection.cs @@ -1663,6 +1663,12 @@ internal EndScopeDisposable StartTemporaryBindingScope(out NpgsqlConnector conne return new EndScopeDisposable(this); } + internal async ValueTask<(EndScopeDisposable, NpgsqlConnector)> StartTemporaryBindingScopeAsync(CancellationToken cancellationToken) + { + var connector = await StartBindingScope(ConnectorBindingScope.Temporary, NpgsqlTimeout.Infinite, async: true, cancellationToken).ConfigureAwait(false); + return (new EndScopeDisposable(this), connector); + } + internal T CheckOpenAndRunInTemporaryScope(Func func) { CheckOpen(); @@ -1929,7 +1935,8 @@ public async Task ReloadTypesAsync(CancellationToken cancellationToken = default { CheckReady(); - using var scope = StartTemporaryBindingScope(out var connector); + var (scope, connector) = await StartTemporaryBindingScopeAsync(cancellationToken).ConfigureAwait(false); + using var _ = scope; await _dataSource!.Bootstrap( connector, diff --git a/test/Npgsql.Tests/Types/CompositeTests.cs b/test/Npgsql.Tests/Types/CompositeTests.cs index 1c1b254862..6a62e418e3 100644 --- a/test/Npgsql.Tests/Types/CompositeTests.cs +++ b/test/Npgsql.Tests/Types/CompositeTests.cs @@ -547,7 +547,10 @@ await AssertType( [Test] public async Task PostgresType() { - await using var connection = await OpenConnectionAsync(); + // With multiplexing we can't guarantee that after ReloadTypesAsync we'll execute the query on a connection which has the new types + // Set max pool size to 1 enforce this + await using var dataSource = CreateDataSource(connectionStringBuilderAction: csb => csb.MaxPoolSize = 1); + await using var connection = await dataSource.OpenConnectionAsync(); var type1 = await GetTempTypeName(connection); var type2 = await GetTempTypeName(connection); From 34f9a48140495807d9f3e9ef4e263b6eead81bb4 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Thu, 27 Nov 2025 08:06:06 +0100 Subject: [PATCH 661/761] Drop net8.0 and net9.0 TFMs (#6358) Closes #6357 --- .github/workflows/build.yml | 12 ++- Directory.Packages.props | 22 ---- Npgsql.slnx | 2 + src/Directory.Build.props | 2 +- .../Npgsql.DependencyInjection.csproj | 2 +- src/Npgsql.GeoJSON/Npgsql.GeoJSON.csproj | 2 +- src/Npgsql.Json.NET/Npgsql.Json.NET.csproj | 3 +- .../Npgsql.NetTopologySuite.csproj | 2 +- src/Npgsql.NodaTime/Npgsql.NodaTime.csproj | 2 +- .../Npgsql.OpenTelemetry.csproj | 2 +- .../Npgsql.SourceGenerators.csproj | 1 + .../Internal/DynamicTypeInfoResolver.cs | 10 -- src/Npgsql/Internal/NpgsqlConnector.cs | 13 --- .../JsonDynamicTypeInfoResolverFactory.cs | 12 +-- src/Npgsql/Npgsql.csproj | 5 +- src/Npgsql/NpgsqlConnection.cs | 2 +- src/Npgsql/NpgsqlDataSourceBuilder.cs | 2 +- src/Npgsql/NpgsqlSlimDataSourceBuilder.cs | 2 +- test/Directory.Build.props | 2 +- test/Npgsql.Tests/ExceptionTests.cs | 102 ------------------ test/Npgsql.Tests/SecurityTests.cs | 4 - test/Npgsql.Tests/Types/JsonDynamicTests.cs | 25 ----- test/Npgsql.Tests/Types/JsonTests.cs | 5 - 23 files changed, 26 insertions(+), 210 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 0a18e3bb9d..5ac7398beb 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -45,10 +45,14 @@ jobs: pg_major: 18 config: Release test_tfm: net10.0 - - os: ubuntu-24.04 - pg_major: 18 - config: Release - test_tfm: net8.0 + + # Minimal support TFM build +# - os: ubuntu-24.04 +# pg_major: 18 +# config: Release +# test_tfm: net8.0 + + # PG prerelease build # - os: ubuntu-24.04 # pg_major: 19 # config: Release diff --git a/Directory.Packages.props b/Directory.Packages.props index 63ed08886e..7a2208b8cc 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -8,32 +8,10 @@ 10.0.0 - - 9.0.0 - 9.0.0 - - - 9.0.11 - 9.0.11 - - - - 8.0.0 - 8.0.0 - - - 9.0.11 - - - 8.0.1 - 8.0.1 - - - diff --git a/Npgsql.slnx b/Npgsql.slnx index 5404551fcd..516cbc2784 100644 --- a/Npgsql.slnx +++ b/Npgsql.slnx @@ -15,6 +15,7 @@ + @@ -25,6 +26,7 @@ + diff --git a/src/Directory.Build.props b/src/Directory.Build.props index fdf88bb904..6e8c5bb19f 100644 --- a/src/Directory.Build.props +++ b/src/Directory.Build.props @@ -2,7 +2,7 @@ - true + true true diff --git a/src/Npgsql.DependencyInjection/Npgsql.DependencyInjection.csproj b/src/Npgsql.DependencyInjection/Npgsql.DependencyInjection.csproj index bf502446e1..3c10503037 100644 --- a/src/Npgsql.DependencyInjection/Npgsql.DependencyInjection.csproj +++ b/src/Npgsql.DependencyInjection/Npgsql.DependencyInjection.csproj @@ -2,7 +2,7 @@ Shay Rojansky - net8.0 + net10.0 npgsql;postgresql;postgres;ado;ado.net;database;sql;di;dependency injection README.md diff --git a/src/Npgsql.GeoJSON/Npgsql.GeoJSON.csproj b/src/Npgsql.GeoJSON/Npgsql.GeoJSON.csproj index 7b76bde10f..824c5b79e6 100644 --- a/src/Npgsql.GeoJSON/Npgsql.GeoJSON.csproj +++ b/src/Npgsql.GeoJSON/Npgsql.GeoJSON.csproj @@ -3,7 +3,7 @@ Yoh Deadfall;Shay Rojansky GeoJSON plugin for Npgsql, allowing mapping of PostGIS geometry types to GeoJSON types. npgsql;postgresql;postgres;postgis;geojson;spatial;ado;ado.net;database;sql - net8.0 + net10.0 $(NoWarn);NPG9001 diff --git a/src/Npgsql.Json.NET/Npgsql.Json.NET.csproj b/src/Npgsql.Json.NET/Npgsql.Json.NET.csproj index e126980ad1..df6b2ccef6 100644 --- a/src/Npgsql.Json.NET/Npgsql.Json.NET.csproj +++ b/src/Npgsql.Json.NET/Npgsql.Json.NET.csproj @@ -3,8 +3,9 @@ Shay Rojansky Json.NET plugin for Npgsql, allowing transparent serialization/deserialization of JSON objects directly to and from the database. npgsql;postgresql;json;postgres;ado;ado.net;database;sql - net8.0 + net10.0 enable + false $(NoWarn);NPG9001 diff --git a/src/Npgsql.NetTopologySuite/Npgsql.NetTopologySuite.csproj b/src/Npgsql.NetTopologySuite/Npgsql.NetTopologySuite.csproj index ab318abc5c..91a4c268a0 100644 --- a/src/Npgsql.NetTopologySuite/Npgsql.NetTopologySuite.csproj +++ b/src/Npgsql.NetTopologySuite/Npgsql.NetTopologySuite.csproj @@ -4,7 +4,7 @@ NetTopologySuite plugin for Npgsql, allowing mapping of PostGIS geometry types to NetTopologySuite types. npgsql;postgresql;postgres;postgis;spatial;nettopologysuite;nts;ado;ado.net;database;sql README.md - net8.0 + net10.0 $(NoWarn);NU5104 $(NoWarn);NPG9001 diff --git a/src/Npgsql.NodaTime/Npgsql.NodaTime.csproj b/src/Npgsql.NodaTime/Npgsql.NodaTime.csproj index 8bbad55db7..1fd5d4b767 100644 --- a/src/Npgsql.NodaTime/Npgsql.NodaTime.csproj +++ b/src/Npgsql.NodaTime/Npgsql.NodaTime.csproj @@ -4,7 +4,7 @@ NodaTime plugin for Npgsql, allowing mapping of PostgreSQL date/time types to NodaTime types. npgsql;postgresql;postgres;nodatime;date;time;ado;ado;net;database;sql README.md - net8.0 + net10.0 $(NoWarn);NPG9001 diff --git a/src/Npgsql.OpenTelemetry/Npgsql.OpenTelemetry.csproj b/src/Npgsql.OpenTelemetry/Npgsql.OpenTelemetry.csproj index bb1a60cc8a..18592f8a5f 100644 --- a/src/Npgsql.OpenTelemetry/Npgsql.OpenTelemetry.csproj +++ b/src/Npgsql.OpenTelemetry/Npgsql.OpenTelemetry.csproj @@ -2,7 +2,7 @@ Shay Rojansky - net8.0 + net10.0 npgsql;postgresql;postgres;ado;ado.net;database;sql;opentelemetry;tracing;diagnostics;instrumentation README.md diff --git a/src/Npgsql.SourceGenerators/Npgsql.SourceGenerators.csproj b/src/Npgsql.SourceGenerators/Npgsql.SourceGenerators.csproj index bc0f37e9bb..4f5c1eb42d 100644 --- a/src/Npgsql.SourceGenerators/Npgsql.SourceGenerators.csproj +++ b/src/Npgsql.SourceGenerators/Npgsql.SourceGenerators.csproj @@ -2,6 +2,7 @@ netstandard2.0 + false 1591 true diff --git a/src/Npgsql/Internal/DynamicTypeInfoResolver.cs b/src/Npgsql/Internal/DynamicTypeInfoResolver.cs index 91af319207..dfdb5a79e7 100644 --- a/src/Npgsql/Internal/DynamicTypeInfoResolver.cs +++ b/src/Npgsql/Internal/DynamicTypeInfoResolver.cs @@ -6,12 +6,7 @@ namespace Npgsql.Internal; [Experimental(NpgsqlDiagnostics.ConvertersExperimental)] -#if NET9_0_OR_GREATER [RequiresDynamicCode("A dynamic type info resolver may need to construct a generic converter for a statically unknown type.")] -#else -[RequiresUnreferencedCode("A dynamic type info resolver may need to construct a generic converter for a statically unknown type.")] -[RequiresDynamicCode("A dynamic type info resolver may need to construct a generic converter for a statically unknown type.")] -#endif public abstract class DynamicTypeInfoResolver : IPgTypeInfoResolver { public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) @@ -47,12 +42,7 @@ protected static bool IsArrayDataTypeName(DataTypeName dataTypeName, PgSerialize protected abstract DynamicMappingCollection? GetMappings(Type? type, DataTypeName dataTypeName, PgSerializerOptions options); -#if NET9_0_OR_GREATER - [RequiresDynamicCode("A dynamic type info resolver may need to construct a generic converter for a statically unknown type.")] -#else - [RequiresUnreferencedCode("A dynamic type info resolver may need to construct a generic converter for a statically unknown type.")] [RequiresDynamicCode("A dynamic type info resolver may need to construct a generic converter for a statically unknown type.")] -#endif protected class DynamicMappingCollection { TypeInfoMappingCollection? _mappings; diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index 617ddcd03e..2b0b77bb33 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -1122,11 +1122,7 @@ internal async Task NegotiateEncryption(SslMode sslMode, NpgsqlTimeout timeout, // Windows crypto API has a bug with pem certs // See #3650 using var previousCert = cert; -#if NET9_0_OR_GREATER cert = X509CertificateLoader.LoadPkcs12(cert.Export(X509ContentType.Pkcs12), null); -#else - cert = new X509Certificate2(cert.Export(X509ContentType.Pkcs12)); -#endif clientCertificates[i] = cert; } } @@ -1135,13 +1131,8 @@ internal async Task NegotiateEncryption(SslMode sslMode, NpgsqlTimeout timeout, // If it's empty, it's probably PFX if (clientCertificates.Count == 0) { -#if NET9_0_OR_GREATER var certs = X509CertificateLoader.LoadPkcs12CollectionFromFile(certPath, password); clientCertificates.AddRange(certs); -#else - var cert = new X509Certificate2(certPath, password); - clientCertificates.Add(cert); -#endif } var certificates = new List(); @@ -2032,12 +2023,8 @@ static RemoteCertificateValidationCallback SslRootValidation(bool verifyFull, st if (certs.Count == 0) { -#if NET9_0_OR_GREATER // This is not a PEM certificate, probably PFX certs.Add(X509CertificateLoader.LoadPkcs12FromFile(certRootPath, null)); -#else - certs.Add(new X509Certificate2(certRootPath)); -#endif } } diff --git a/src/Npgsql/Internal/ResolverFactories/JsonDynamicTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/JsonDynamicTypeInfoResolverFactory.cs index 696aac8efb..aca5484b77 100644 --- a/src/Npgsql/Internal/ResolverFactories/JsonDynamicTypeInfoResolverFactory.cs +++ b/src/Npgsql/Internal/ResolverFactories/JsonDynamicTypeInfoResolverFactory.cs @@ -47,14 +47,6 @@ class Resolver(Type[]? jsonbClrTypes = null, Type[]? jsonClrTypes = null, JsonSe readonly Type[] _jsonClrTypes = jsonClrTypes ?? []; TypeInfoMappingCollection? _mappings; -#if NET9_0_OR_GREATER - static Func AllowOutOfOrderMetadataProperties { get; } = options => options.AllowOutOfOrderMetadataProperties; -#else - static Func AllowOutOfOrderMetadataProperties { get; } = - typeof(JsonSerializerOptions).GetProperty("AllowOutOfOrderMetadataProperties") is { } prop && prop.GetGetMethod() is { } getProp - ? getProp.CreateDelegate>() - : _ => false; -#endif protected TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(), _jsonbClrTypes, _jsonClrTypes, SerializerOptions); public new PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) @@ -86,7 +78,7 @@ void AddUserMappings(bool jsonb, Type[] clrTypes) // For jsonb we can't properly support polymorphic serialization unless the SerializerOptions.AllowOutOfOrderMetadataProperties is `true`. // If `jsonb` AND `AllowOutOfOrderMetadataProperties` is `false`, use `derived.DerivedType` as the base type for the converter, // this causes STJ to stop serializing the "$type" field; essentially disabling the feature. - var baseType = jsonb && !AllowOutOfOrderMetadataProperties(serializerOptions) ? derived.DerivedType : jsonType; + var baseType = jsonb && !serializerOptions.AllowOutOfOrderMetadataProperties ? derived.DerivedType : jsonType; dynamicMappings.AddMapping(derived.DerivedType, dataTypeName, factory: (options, mapping, _) => mapping.CreateInfo(options, CreateSystemTextJsonConverter(mapping.Type, jsonb, options.TextEncoding, serializerOptions, baseType))); @@ -113,7 +105,7 @@ void AddUserMappings(bool jsonb, Type[] clrTypes) // For jsonb we can't properly support polymorphic serialization unless the SerializerOptions.AllowOutOfOrderMetadataProperties is `true`. // If `jsonb` AND `AllowOutOfOrderMetadataProperties` is `false`, use `mapping.Type` as the base type for the converter, // this causes STJ to stop serializing the "$type" field; essentially disabling the feature. - var baseType = jsonb && !AllowOutOfOrderMetadataProperties(SerializerOptions) ? mapping.Type : typeof(object); + var baseType = jsonb && !SerializerOptions.AllowOutOfOrderMetadataProperties ? mapping.Type : typeof(object); return mapping.CreateInfo(options, CreateSystemTextJsonConverter(mapping.Type, jsonb, options.TextEncoding, SerializerOptions, baseType)); diff --git a/src/Npgsql/Npgsql.csproj b/src/Npgsql/Npgsql.csproj index 01aaa5013d..d06e91fcc8 100644 --- a/src/Npgsql/Npgsql.csproj +++ b/src/Npgsql/Npgsql.csproj @@ -5,7 +5,7 @@ Npgsql is the open source .NET data provider for PostgreSQL. npgsql;postgresql;postgres;ado;ado.net;database;sql README.md - net8.0;net9.0;net10.0 + net10.0 $(NoWarn);CA2017 $(NoWarn);NPG9001 $(NoWarn);NPG9002 @@ -15,9 +15,6 @@ - - - diff --git a/src/Npgsql/NpgsqlConnection.cs b/src/Npgsql/NpgsqlConnection.cs index 93b0b27f7a..0621c91193 100644 --- a/src/Npgsql/NpgsqlConnection.cs +++ b/src/Npgsql/NpgsqlConnection.cs @@ -1033,7 +1033,7 @@ internal void OnNotification(NpgsqlNotificationEventArgs e) /// /// /// - /// See . + /// See . /// /// public Action? SslClientAuthenticationOptionsCallback { get; set; } diff --git a/src/Npgsql/NpgsqlDataSourceBuilder.cs b/src/Npgsql/NpgsqlDataSourceBuilder.cs index bc4b65d7f6..156885d04e 100644 --- a/src/Npgsql/NpgsqlDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlDataSourceBuilder.cs @@ -252,7 +252,7 @@ public NpgsqlDataSourceBuilder UseClientCertificates(X509CertificateCollection? /// The callback to customize SslStream's authentication options. /// /// - /// See . + /// See . /// /// /// The same builder instance so that multiple calls can be chained. diff --git a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs index e8d8ea5061..358ab30b95 100644 --- a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs @@ -210,7 +210,7 @@ public NpgsqlSlimDataSourceBuilder UseClientCertificates(X509CertificateCollecti /// The callback to customize SslStream's authentication options. /// /// - /// See . + /// See . /// /// /// The same builder instance so that multiple calls can be chained. diff --git a/test/Directory.Build.props b/test/Directory.Build.props index 0c3bd8dba0..6af6edc496 100644 --- a/test/Directory.Build.props +++ b/test/Directory.Build.props @@ -2,7 +2,7 @@ - net8.0;net10.0 + net10.0 false diff --git a/test/Npgsql.Tests/ExceptionTests.cs b/test/Npgsql.Tests/ExceptionTests.cs index ca667f8fd1..ec7e7f18db 100644 --- a/test/Npgsql.Tests/ExceptionTests.cs +++ b/test/Npgsql.Tests/ExceptionTests.cs @@ -209,106 +209,4 @@ public void NpgsqlException_IsTransient() Assert.That(new NpgsqlException().IsTransient, Is.False); Assert.That(new NpgsqlException("", new Exception("Inner Exception")).IsTransient, Is.False); } - -#if !NET9_0_OR_GREATER -#pragma warning disable SYSLIB0051 -#pragma warning disable 618 - [Test] - public void PostgresException_IsTransient() - { - Assert.That(CreateWithSqlState("53300").IsTransient); - Assert.That(CreateWithSqlState("0").IsTransient, Is.False); - - PostgresException CreateWithSqlState(string sqlState) - { - var info = CreateSerializationInfo(); - new Exception().GetObjectData(info, default); - - info.AddValue(nameof(PostgresException.Severity), null); - info.AddValue(nameof(PostgresException.InvariantSeverity), null); - info.AddValue(nameof(PostgresException.SqlState), sqlState); - info.AddValue(nameof(PostgresException.MessageText), null); - info.AddValue(nameof(PostgresException.Detail), null); - info.AddValue(nameof(PostgresException.Hint), null); - info.AddValue(nameof(PostgresException.Position), 0); - info.AddValue(nameof(PostgresException.InternalPosition), 0); - info.AddValue(nameof(PostgresException.InternalQuery), null); - info.AddValue(nameof(PostgresException.Where), null); - info.AddValue(nameof(PostgresException.SchemaName), null); - info.AddValue(nameof(PostgresException.TableName), null); - info.AddValue(nameof(PostgresException.ColumnName), null); - info.AddValue(nameof(PostgresException.DataTypeName), null); - info.AddValue(nameof(PostgresException.ConstraintName), null); - info.AddValue(nameof(PostgresException.File), null); - info.AddValue(nameof(PostgresException.Line), null); - info.AddValue(nameof(PostgresException.Routine), null); - - return new PostgresException(info, default); - } - } -#pragma warning restore SYSLIB0051 -#pragma warning restore 618 - -#pragma warning disable SYSLIB0011 -#pragma warning disable SYSLIB0050 - -#if !NET9_0_OR_GREATER // BinaryFormatter serialization and deserialization have been removed. See https://aka.ms/binaryformatter for more information. - [Test] - public void Serialization() - { - var actual = new PostgresException("message text", "high", "high2", "53300", "detail", "hint", 18, 42, "internal query", - "where", "schema", "table", "column", "data type", "constraint", "file", "line", "routine"); - - var formatter = new BinaryFormatter(); - var stream = new MemoryStream(); - - formatter.Serialize(stream, actual); - stream.Seek(0, SeekOrigin.Begin); - - var expected = (PostgresException)formatter.Deserialize(stream); - - Assert.That(expected.Severity, Is.EqualTo(actual.Severity)); - Assert.That(expected.InvariantSeverity, Is.EqualTo(actual.InvariantSeverity)); - Assert.That(expected.SqlState, Is.EqualTo(actual.SqlState)); - Assert.That(expected.MessageText, Is.EqualTo(actual.MessageText)); - Assert.That(expected.Detail, Is.EqualTo(actual.Detail)); - Assert.That(expected.Hint, Is.EqualTo(actual.Hint)); - Assert.That(expected.Position, Is.EqualTo(actual.Position)); - Assert.That(expected.InternalPosition, Is.EqualTo(actual.InternalPosition)); - Assert.That(expected.InternalQuery, Is.EqualTo(actual.InternalQuery)); - Assert.That(expected.Where, Is.EqualTo(actual.Where)); - Assert.That(expected.SchemaName, Is.EqualTo(actual.SchemaName)); - Assert.That(expected.TableName, Is.EqualTo(actual.TableName)); - Assert.That(expected.ColumnName, Is.EqualTo(actual.ColumnName)); - Assert.That(expected.DataTypeName, Is.EqualTo(actual.DataTypeName)); - Assert.That(expected.ConstraintName, Is.EqualTo(actual.ConstraintName)); - Assert.That(expected.File, Is.EqualTo(actual.File)); - Assert.That(expected.Line, Is.EqualTo(actual.Line)); - Assert.That(expected.Routine, Is.EqualTo(actual.Routine)); - } -#endif - - SerializationInfo CreateSerializationInfo() => new(typeof(PostgresException), new FormatterConverter()); -#pragma warning restore SYSLIB0011 - -#pragma warning disable SYSLIB0051 - [Test] - [IssueLink("https://github.com/npgsql/npgsql/issues/3204")] - public void Base_exception_property_serialization() - { - var ex = new PostgresException("the message", "low", "low2", "XX123"); - - var info = CreateSerializationInfo(); - ex.GetObjectData(info, default); - - // Check virtual base properties, which can be incorrectly deserialized if overridden, because the base - // Exception.GetObjectData() method writes the fields, not the properties (e.g. "_message" instead of "Message"). - Assert.That(ex.Data, Is.EquivalentTo((IDictionary)info.GetValue("Data", typeof(IDictionary))!)); - Assert.That(ex.HelpLink, Is.EqualTo(info.GetValue("HelpURL", typeof(string)))); - Assert.That(ex.Message, Is.EqualTo(info.GetValue("Message", typeof(string)))); - Assert.That(ex.Source, Is.EqualTo(info.GetValue("Source", typeof(string)))); - Assert.That(ex.StackTrace, Is.EqualTo(info.GetValue("StackTraceString", typeof(string)))); - } -#pragma warning restore SYSLIB0051 -#endif } diff --git a/test/Npgsql.Tests/SecurityTests.cs b/test/Npgsql.Tests/SecurityTests.cs index c9448b19ee..c7a595c8b8 100644 --- a/test/Npgsql.Tests/SecurityTests.cs +++ b/test/Npgsql.Tests/SecurityTests.cs @@ -574,11 +574,7 @@ public async Task Connect_with_verify_and_multiple_ca_cert([Values(SslMode.Verif var certificates = new X509Certificate2Collection(); -#if NET9_0_OR_GREATER using var realCaCert = X509CertificateLoader.LoadCertificateFromFile("ca.crt"); -#else - using var realCaCert = new X509Certificate2("ca.crt"); -#endif using var ecdsa = ECDsa.Create(); var req = new CertificateRequest("cn=localhost", ecdsa, HashAlgorithmName.SHA256); diff --git a/test/Npgsql.Tests/Types/JsonDynamicTests.cs b/test/Npgsql.Tests/Types/JsonDynamicTests.cs index af282f82d0..036374a2d2 100644 --- a/test/Npgsql.Tests/Types/JsonDynamicTests.cs +++ b/test/Npgsql.Tests/Types/JsonDynamicTests.cs @@ -161,17 +161,11 @@ await AssertType( [Test] public async Task Poco_polymorphic_mapping() { -#if !NET9_0_OR_GREATER - if (IsJsonb) - return; -#endif await using var dataSource = CreateDataSource(builder => { var types = new[] {typeof(WeatherForecast)}; builder -#if NET9_0_OR_GREATER .ConfigureJsonOptions(new() { AllowOutOfOrderMetadataProperties = true }) -#endif .EnableDynamicJson(jsonClrTypes: IsJsonb ? [] : types, jsonbClrTypes: !IsJsonb ? [] : types); }); @@ -196,17 +190,11 @@ public async Task Poco_polymorphic_mapping() [Test] public async Task Poco_polymorphic_mapping_read_parents() { -#if !NET9_0_OR_GREATER - if (IsJsonb) - return; -#endif await using var dataSource = CreateDataSource(builder => { var types = new[] {typeof(WeatherForecast)}; builder -#if NET9_0_OR_GREATER .ConfigureJsonOptions(new() { AllowOutOfOrderMetadataProperties = true }) -#endif .EnableDynamicJson(jsonClrTypes: IsJsonb ? [] : types, jsonbClrTypes: !IsJsonb ? [] : types); }); @@ -246,9 +234,7 @@ public async Task Poco_exact_polymorphic_mapping() { var types = new[] {typeof(ExtendedDerivedWeatherForecast)}; builder -#if NET9_0_OR_GREATER .ConfigureJsonOptions(new() { AllowOutOfOrderMetadataProperties = true }) -#endif .EnableDynamicJson(jsonClrTypes: IsJsonb ? [] : types, jsonbClrTypes: !IsJsonb ? [] : types); }); @@ -273,17 +259,10 @@ public async Task Poco_exact_polymorphic_mapping() [Test] public async Task Poco_unspecified_polymorphic_mapping() { -#if !NET9_0_OR_GREATER - if (IsJsonb) - return; -#endif - await using var dataSource = CreateDataSource(builder => { builder -#if NET9_0_OR_GREATER .ConfigureJsonOptions(new() { AllowOutOfOrderMetadataProperties = true }) -#endif .EnableDynamicJson(); }); @@ -322,9 +301,7 @@ public async Task Poco_polymorphic_mapping_without_AllowOutOfOrderMetadataProper { var types = new[] {typeof(WeatherForecast)}; builder -#if NET9_0_OR_GREATER .ConfigureJsonOptions(new() { AllowOutOfOrderMetadataProperties = false }) -#endif .EnableDynamicJson(jsonClrTypes: IsJsonb ? [] : types, jsonbClrTypes: !IsJsonb ? [] : types); }); @@ -377,9 +354,7 @@ public async Task Poco_unspecified_polymorphic_mapping_without_AllowOutOfOrderMe await using var dataSource = CreateDataSource(builder => { builder -#if NET9_0_OR_GREATER .ConfigureJsonOptions(new() { AllowOutOfOrderMetadataProperties = false }) -#endif .EnableDynamicJson(); }); diff --git a/test/Npgsql.Tests/Types/JsonTests.cs b/test/Npgsql.Tests/Types/JsonTests.cs index 9cfb07de8c..84b95389bb 100644 --- a/test/Npgsql.Tests/Types/JsonTests.cs +++ b/test/Npgsql.Tests/Types/JsonTests.cs @@ -182,12 +182,7 @@ public Task Roundtrip_JsonObject() [Test] public Task Roundtrip_JsonArray() => AssertType( -#if NET8_0 - // Necessary until we drop STJ 8.0, see https://github.com/dotnet/runtime/pull/103733 - new JsonArray { (JsonValue)1, (JsonValue)2, (JsonValue)3 }, -#else new JsonArray { 1, 2, 3 }, -#endif IsJsonb ? "[1, 2, 3]" : "[1,2,3]", PostgresType, NpgsqlDbType, From 0eda0cc277171f96997023cd7592bec099de034a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 1 Dec 2025 23:14:21 +0000 Subject: [PATCH 662/761] Bump BenchmarkDotNet from 0.15.6 to 0.15.8 (#6361) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 7a2208b8cc..bba3fd780e 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -43,7 +43,7 @@ - + From f9dd9190f18fc0e3fbfe91d10b1ee50dae400e80 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 9 Dec 2025 07:51:40 +0100 Subject: [PATCH 663/761] Bump NUnit3TestAdapter from 5.2.0 to 6.0.0 (#6365) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index bba3fd780e..9ab0c24bc6 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -34,7 +34,7 @@ - + From 19863c1562a48d2c7a2285091954988e60017b8a Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Tue, 9 Dec 2025 12:58:51 +0300 Subject: [PATCH 664/761] Throw OperationCancelledException with correct CancellationToken from Dns.GetHostAddressesAsync (#6364) Fixes #6362 --- src/Npgsql/Internal/NpgsqlConnector.cs | 4 ++-- test/Npgsql.Tests/MultipleHostsTests.cs | 7 +++++-- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index 2b0b77bb33..6bca520d43 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -1349,9 +1349,9 @@ async Task ConnectAsync(NpgsqlTimeout timeout, CancellationToken cancellationTok { ipAddresses = await Dns.GetHostAddressesAsync(Host, combinedToken).ConfigureAwait(false); } - catch (OperationCanceledException oce) when ( - oce.CancellationToken == combinedToken && !cancellationToken.IsCancellationRequested) + catch (OperationCanceledException) { + cancellationToken.ThrowIfCancellationRequested(); throw new TimeoutException(); } } diff --git a/test/Npgsql.Tests/MultipleHostsTests.cs b/test/Npgsql.Tests/MultipleHostsTests.cs index a98e0d60c2..b357417bb1 100644 --- a/test/Npgsql.Tests/MultipleHostsTests.cs +++ b/test/Npgsql.Tests/MultipleHostsTests.cs @@ -1137,10 +1137,13 @@ public async Task OpenConnection_when_canceled_throws_TaskCanceledException() { var builder = new NpgsqlDataSourceBuilder(ConnectionString); await using var dataSource = builder.BuildMultiHost(); - Assert.ThrowsAsync(async () => + using var cts = new CancellationTokenSource(); + cts.Cancel(); + var ex = Assert.ThrowsAsync(async () => { - await using var connection = await dataSource.OpenConnectionAsync(new CancellationToken(true)); + await using var connection = await dataSource.OpenConnectionAsync(cts.Token); }); + Assert.That(ex.CancellationToken, Is.EqualTo(cts.Token)); } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/4181")] From 910a1d6082c11ad7d9e12b322e7179b307f89b5b Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Wed, 10 Dec 2025 09:02:36 +0100 Subject: [PATCH 665/761] Bump dotnet logging and DI dependencies (#6371) --- Directory.Packages.props | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 9ab0c24bc6..99ba52ea8b 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -1,11 +1,11 @@ - 10.0.0 - 10.0.0 + 10.0.1 + 10.0.1 - 10.0.0 - 10.0.0 + 10.0.1 + 10.0.1 From b2bdaee65213a0e6a6521c68f91d6440eff4dddb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 12 Dec 2025 08:43:00 +0100 Subject: [PATCH 666/761] Bump NodaTime from 3.2.2 to 3.2.3 (#6375) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 99ba52ea8b..b6d41479e6 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -16,7 +16,7 @@ - + From e9f4dbfa17810ceeb92208746c0f7a215053b4a4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 12 Dec 2025 22:22:33 +0100 Subject: [PATCH 667/761] Bump actions/upload-artifact from 5 to 6 (#6377) --- .github/workflows/build.yml | 4 ++-- .github/workflows/native-aot.yml | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 5ac7398beb..614db3d264 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -348,7 +348,7 @@ jobs: run: dotnet pack --configuration Release --property:PackageOutputPath="$PWD/nupkgs" --version-suffix "ci.$(date -u +%Y%m%dT%H%M%S)+sha.${GITHUB_SHA:0:9}" -p:ContinuousIntegrationBuild=true - name: Upload artifacts (nupkg) - uses: actions/upload-artifact@v5 + uses: actions/upload-artifact@v6 with: name: Npgsql.CI path: nupkgs @@ -380,7 +380,7 @@ jobs: run: dotnet pack --configuration Release --property:PackageOutputPath="$PWD/nupkgs" -p:ContinuousIntegrationBuild=true - name: Upload artifacts - uses: actions/upload-artifact@v5 + uses: actions/upload-artifact@v6 with: name: Npgsql.Release path: nupkgs diff --git a/.github/workflows/native-aot.yml b/.github/workflows/native-aot.yml index 5d41da2014..86a9bae242 100644 --- a/.github/workflows/native-aot.yml +++ b/.github/workflows/native-aot.yml @@ -170,21 +170,21 @@ jobs: run: dotnet run --project test/MStatDumper/MStatDumper.csproj -c release -f ${{ matrix.tfm }} -- "test/Npgsql.NativeAotTests/obj/Release/${{ matrix.tfm }}/linux-x64/native/Npgsql.NativeAotTests.mstat" md >> $GITHUB_STEP_SUMMARY - name: Upload mstat - uses: actions/upload-artifact@v5 + uses: actions/upload-artifact@v6 with: name: npgsql.mstat path: "test/Npgsql.NativeAotTests/obj/Release/${{ matrix.tfm }}/linux-x64/native/Npgsql.NativeAotTests.mstat" retention-days: 3 - name: Upload codedgen dgml - uses: actions/upload-artifact@v5 + uses: actions/upload-artifact@v6 with: name: npgsql.codegen.dgml.xml path: "test/Npgsql.NativeAotTests/obj/Release/${{ matrix.tfm }}/linux-x64/native/Npgsql.NativeAotTests.codegen.dgml.xml" retention-days: 3 - name: Upload scan dgml - uses: actions/upload-artifact@v5 + uses: actions/upload-artifact@v6 with: name: npgsql.scan.dgml.xml path: "test/Npgsql.NativeAotTests/obj/Release/${{ matrix.tfm }}/linux-x64/native/Npgsql.NativeAotTests.scan.dgml.xml" From 3cbf66ac8e2caf43fab9d49a66cfd7ba5b2b03bd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 12 Dec 2025 22:22:53 +0100 Subject: [PATCH 668/761] Bump actions/cache from 4 to 5 (#6378) --- .github/workflows/build.yml | 4 ++-- .github/workflows/native-aot.yml | 4 ++-- .github/workflows/rich-code-nav.yml | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 614db3d264..41b2f87b54 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -74,7 +74,7 @@ jobs: uses: actions/checkout@v6 - name: NuGet Cache - uses: actions/cache@v4 + uses: actions/cache@v5 with: path: ~/.nuget/packages key: ${{ runner.os }}-nuget-${{ hashFiles('**/Directory.Build.targets') }} @@ -334,7 +334,7 @@ jobs: uses: actions/checkout@v6 - name: NuGet Cache - uses: actions/cache@v4 + uses: actions/cache@v5 with: path: ~/.nuget/packages key: ${{ runner.os }}-nuget-${{ hashFiles('**/Directory.Build.targets') }} diff --git a/.github/workflows/native-aot.yml b/.github/workflows/native-aot.yml index 86a9bae242..fc90b3b66c 100644 --- a/.github/workflows/native-aot.yml +++ b/.github/workflows/native-aot.yml @@ -99,7 +99,7 @@ jobs: # run: echo "$nuget_config" > NuGet.config - name: NuGet Cache - uses: actions/cache@v4 + uses: actions/cache@v5 with: path: ~/.nuget/packages key: ${{ runner.os }}-nuget-${{ hashFiles('**/Directory.Build.targets') }} @@ -133,7 +133,7 @@ jobs: # run: echo "$nuget_config" > NuGet.config - name: NuGet Cache - uses: actions/cache@v4 + uses: actions/cache@v5 with: path: ~/.nuget/packages key: ${{ runner.os }}-nuget-${{ hashFiles('**/Directory.Build.targets') }} diff --git a/.github/workflows/rich-code-nav.yml b/.github/workflows/rich-code-nav.yml index ce358eefde..c0ea339d66 100644 --- a/.github/workflows/rich-code-nav.yml +++ b/.github/workflows/rich-code-nav.yml @@ -15,7 +15,7 @@ jobs: uses: actions/checkout@v6 - name: NuGet Cache - uses: actions/cache@v4 + uses: actions/cache@v5 with: path: ~/.nuget/packages key: ${{ runner.os }}-nuget-${{ hashFiles('**/Directory.Build.targets') }} From 785b0b8f03c98231892caf9f1c9f50bc5a60bb3f Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Thu, 18 Dec 2025 17:01:51 +0300 Subject: [PATCH 669/761] For COPY operations treat 0 as infinite timeout in addition to -1 (#6352) Fixes #6351 --- src/Npgsql/NpgsqlBinaryExporter.cs | 3 ++- src/Npgsql/NpgsqlBinaryImporter.cs | 6 ++++-- src/Npgsql/NpgsqlRawCopyStream.cs | 5 +++-- 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/src/Npgsql/NpgsqlBinaryExporter.cs b/src/Npgsql/NpgsqlBinaryExporter.cs index 4828f0ecb1..65c616f496 100644 --- a/src/Npgsql/NpgsqlBinaryExporter.cs +++ b/src/Npgsql/NpgsqlBinaryExporter.cs @@ -7,6 +7,7 @@ using Npgsql.Internal; using Npgsql.Internal.Postgres; using NpgsqlTypes; +using InfiniteTimeout = System.Threading.Timeout; using static Npgsql.Util.Statics; namespace Npgsql; @@ -46,7 +47,7 @@ public sealed class NpgsqlBinaryExporter : ICancelable /// public TimeSpan Timeout { - set => _buf.Timeout = value; + set => _buf.Timeout = value > TimeSpan.Zero ? value : InfiniteTimeout.InfiniteTimeSpan; } Activity? _activity; diff --git a/src/Npgsql/NpgsqlBinaryImporter.cs b/src/Npgsql/NpgsqlBinaryImporter.cs index 6cd592dd06..60a1f09daf 100644 --- a/src/Npgsql/NpgsqlBinaryImporter.cs +++ b/src/Npgsql/NpgsqlBinaryImporter.cs @@ -8,6 +8,7 @@ using Npgsql.Internal; using Npgsql.Internal.Postgres; using NpgsqlTypes; +using InfiniteTimeout = System.Threading.Timeout; using static Npgsql.Util.Statics; namespace Npgsql; @@ -55,8 +56,9 @@ public TimeSpan Timeout { set { - _buf.Timeout = value; - _connector.ReadBuffer.Timeout = value; + var timeout = value > TimeSpan.Zero ? value : InfiniteTimeout.InfiniteTimeSpan; + _buf.Timeout = timeout; + _connector.ReadBuffer.Timeout = timeout; } } diff --git a/src/Npgsql/NpgsqlRawCopyStream.cs b/src/Npgsql/NpgsqlRawCopyStream.cs index 45cfdf825e..bbc641ff66 100644 --- a/src/Npgsql/NpgsqlRawCopyStream.cs +++ b/src/Npgsql/NpgsqlRawCopyStream.cs @@ -6,6 +6,7 @@ using Microsoft.Extensions.Logging; using Npgsql.BackendMessages; using Npgsql.Internal; +using InfiniteTimeout = System.Threading.Timeout; using static Npgsql.Util.Statics; #pragma warning disable 1591 @@ -42,12 +43,12 @@ public sealed class NpgsqlRawCopyStream : Stream, ICancelable public override int WriteTimeout { get => (int) _writeBuf.Timeout.TotalMilliseconds; - set => _writeBuf.Timeout = TimeSpan.FromMilliseconds(value); + set => _writeBuf.Timeout = value > 0 ? TimeSpan.FromMilliseconds(value) : InfiniteTimeout.InfiniteTimeSpan; } public override int ReadTimeout { get => (int) _readBuf.Timeout.TotalMilliseconds; - set => _readBuf.Timeout = TimeSpan.FromMilliseconds(value); + set => _readBuf.Timeout = value > 0 ? TimeSpan.FromMilliseconds(value) : InfiniteTimeout.InfiniteTimeSpan; } /// From 4e92d17036ba850c67f067154f7d564e5be95830 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 22 Dec 2025 23:39:11 +0100 Subject: [PATCH 670/761] Bump NUnit3TestAdapter from 6.0.0 to 6.0.1 (#6387) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index b6d41479e6..72f3cdab82 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -34,7 +34,7 @@ - + From 381b0fada639e6fbc8fcd2df5f9c320c09745afc Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Tue, 23 Dec 2025 12:20:44 +0300 Subject: [PATCH 671/761] Fix reading numerics as BigInteger (#6385) Fixes #6383 --- .../Converters/Primitive/NumericConverters.cs | 2 +- test/Npgsql.Tests/Types/NumericTests.cs | 14 ++++++++++++++ 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/src/Npgsql/Internal/Converters/Primitive/NumericConverters.cs b/src/Npgsql/Internal/Converters/Primitive/NumericConverters.cs index c14a00b608..714d3c49d4 100644 --- a/src/Npgsql/Internal/Converters/Primitive/NumericConverters.cs +++ b/src/Npgsql/Internal/Converters/Primitive/NumericConverters.cs @@ -166,7 +166,7 @@ public static async ValueTask ReadAsync(PgReader reader, ArraySegment var sign = reader.ReadInt16(); var scale = reader.ReadInt16(); var array = digits.Array!; - for (var i = digits.Offset; i < array.Length; i++) + for (var i = digits.Offset; i < digits.Offset + digits.Count; i++) { if (reader.ShouldBuffer(sizeof(short))) await reader.BufferAsync(sizeof(short), cancellationToken).ConfigureAwait(false); diff --git a/test/Npgsql.Tests/Types/NumericTests.cs b/test/Npgsql.Tests/Types/NumericTests.cs index 20eed3fa04..439d651559 100644 --- a/test/Npgsql.Tests/Types/NumericTests.cs +++ b/test/Npgsql.Tests/Types/NumericTests.cs @@ -212,4 +212,18 @@ public async Task NumericZero_WithScale() Assert.That(value.Scale, Is.EqualTo(2)); } + + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/6383")] + public async Task Read_Many_Numerics_As_BigInteger([Values(CommandBehavior.Default, CommandBehavior.SequentialAccess)] CommandBehavior behavior) + { + await using var conn = await OpenConnectionAsync(); + await using var cmd = conn.CreateCommand(); + cmd.CommandText = "SELECT 1234567890::numeric FROM generate_series(1, 8000)"; + + await using var reader = await cmd.ExecuteReaderAsync(behavior); + while (await reader.ReadAsync()) + { + Assert.DoesNotThrowAsync(async () => await reader.GetFieldValueAsync(0)); + } + } } From 7751d2ea9716cd311cb3b040add7c234e6d47a50 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Thu, 25 Dec 2025 11:16:58 +0100 Subject: [PATCH 672/761] Add empty permissions block in Github Actions configs (#3694) (#6392) To restrict all permissions by default (CodeQL warning) --- .github/workflows/build.yml | 3 +++ .github/workflows/native-aot.yml | 3 +++ .github/workflows/rich-code-nav.yml | 36 ------------------------- .github/workflows/trigger-doc-build.yml | 3 +++ Npgsql.slnx | 3 +-- 5 files changed, 10 insertions(+), 38 deletions(-) delete mode 100644 .github/workflows/rich-code-nav.yml diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 41b2f87b54..6967eb7e38 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -9,6 +9,9 @@ on: - '*' pull_request: +permissions: + contents: read + # Cancel previous PR branch commits (head_ref is only defined on PRs) concurrency: group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} diff --git a/.github/workflows/native-aot.yml b/.github/workflows/native-aot.yml index fc90b3b66c..909b19966e 100644 --- a/.github/workflows/native-aot.yml +++ b/.github/workflows/native-aot.yml @@ -9,6 +9,9 @@ on: - '*' pull_request: +permissions: + contents: read + # Cancel previous PR branch commits (head_ref is only defined on PRs) concurrency: group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} diff --git a/.github/workflows/rich-code-nav.yml b/.github/workflows/rich-code-nav.yml deleted file mode 100644 index c0ea339d66..0000000000 --- a/.github/workflows/rich-code-nav.yml +++ /dev/null @@ -1,36 +0,0 @@ -name: Rich Code Navigation - -on: - workflow_dispatch: - -env: - DOTNET_SKIP_FIRST_TIME_EXPERIENCE: true - -jobs: - build: - runs-on: windows-latest - - steps: - - name: Checkout - uses: actions/checkout@v6 - - - name: NuGet Cache - uses: actions/cache@v5 - with: - path: ~/.nuget/packages - key: ${{ runner.os }}-nuget-${{ hashFiles('**/Directory.Build.targets') }} - restore-keys: | - ${{ runner.os }}-nuget- - - - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v5.0.1 - - - name: Build - run: dotnet build --configuration Debug - shell: bash - - - name: Rich Navigation Indexing - uses: microsoft/RichCodeNavIndexer@v0 - with: - languages: csharp - repo-token: ${{ github.token }} diff --git a/.github/workflows/trigger-doc-build.yml b/.github/workflows/trigger-doc-build.yml index 30c6b5fa62..e8783c9e16 100644 --- a/.github/workflows/trigger-doc-build.yml +++ b/.github/workflows/trigger-doc-build.yml @@ -8,6 +8,9 @@ on: branches: - docs +permissions: + contents: read + jobs: build: runs-on: ubuntu-latest diff --git a/Npgsql.slnx b/Npgsql.slnx index 516cbc2784..e69a6728c8 100644 --- a/Npgsql.slnx +++ b/Npgsql.slnx @@ -10,9 +10,8 @@ - + - From 8cf2fa6b3aa49852b1230c4dcac835f0a6b0b35b Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Sat, 27 Dec 2025 11:49:30 +0300 Subject: [PATCH 673/761] Fix test timeout_during_authentication (#6388) --- .github/workflows/build.yml | 11 ++++++++++- src/Npgsql/Internal/NpgsqlConnector.cs | 5 +++-- 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 6967eb7e38..4dd0bb9b16 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -301,10 +301,19 @@ jobs: # TODO: Once test/Npgsql.Specification.Tests work, switch to just testing on the solution - name: Test run: | - dotnet test -c ${{ matrix.config }} -f ${{ matrix.test_tfm }} test/Npgsql.Tests --logger "GitHubActions;report-warnings=false" --blame-hang-timeout 30s + dotnet test -c ${{ matrix.config }} -f ${{ matrix.test_tfm }} test/Npgsql.Tests --logger "GitHubActions;report-warnings=false" --blame-crash --blame-hang-timeout 30s dotnet test -c ${{ matrix.config }} -f ${{ matrix.test_tfm }} test/Npgsql.DependencyInjection.Tests --logger "GitHubActions;report-warnings=false" shell: bash + - name: Upload Test Hang Dumps + uses: actions/upload-artifact@v6 + if: failure() + with: + name: test-hang-dumps + path: | + **/*.dmp + **/Sequence*.xml + - name: Test Plugins if: "!startsWith(matrix.os, 'macos')" run: | diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index 6bca520d43..4c18372699 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -1339,7 +1339,7 @@ async Task ConnectAsync(NpgsqlTimeout timeout, CancellationToken cancellationTok } else { - IPAddress[] ipAddresses; + IPAddress[] ipAddresses = []; try { using var combinedCts = timeout.IsSet ? CancellationTokenSource.CreateLinkedTokenSource(cancellationToken) : null; @@ -1352,7 +1352,8 @@ async Task ConnectAsync(NpgsqlTimeout timeout, CancellationToken cancellationTok catch (OperationCanceledException) { cancellationToken.ThrowIfCancellationRequested(); - throw new TimeoutException(); + Debug.Assert(timeout.HasExpired); + ThrowHelper.ThrowNpgsqlExceptionWithInnerTimeoutException("The operation has timed out"); } } catch (SocketException ex) From e3d54f8c8a22870fdc53cc0769de54ce6735a740 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Sun, 28 Dec 2025 01:51:54 +0300 Subject: [PATCH 674/761] Fix retrieving sequence file for hanging tests (#6397) --- .github/workflows/build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 4dd0bb9b16..2324059911 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -312,7 +312,7 @@ jobs: name: test-hang-dumps path: | **/*.dmp - **/Sequence*.xml + **/*_Sequence.xml - name: Test Plugins if: "!startsWith(matrix.os, 'macos')" From 3c535681c828565fbd7bb1a9c7c1b3ae68b26b7a Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Sun, 28 Dec 2025 13:56:33 +0300 Subject: [PATCH 675/761] Fix reading BigInteger with composites (#6390) Fixes #6389 --- .../Converters/Primitive/NumericConverters.cs | 2 +- test/Npgsql.Tests/BugTests.cs | 27 +++++++++++++++++++ 2 files changed, 28 insertions(+), 1 deletion(-) diff --git a/src/Npgsql/Internal/Converters/Primitive/NumericConverters.cs b/src/Npgsql/Internal/Converters/Primitive/NumericConverters.cs index 714d3c49d4..79a82a1bfa 100644 --- a/src/Npgsql/Internal/Converters/Primitive/NumericConverters.cs +++ b/src/Npgsql/Internal/Converters/Primitive/NumericConverters.cs @@ -34,7 +34,7 @@ public override ValueTask ReadAsync(PgReader reader, CancellationTok { // If we don't need a read and can read buffered we delegate to our sync read method which won't do IO in such a case. if (!reader.ShouldBuffer(reader.CurrentRemaining)) - Read(reader); + return new(Read(reader)); return AsyncCore(reader, cancellationToken); diff --git a/test/Npgsql.Tests/BugTests.cs b/test/Npgsql.Tests/BugTests.cs index 8d46522c0f..b3cd644afd 100644 --- a/test/Npgsql.Tests/BugTests.cs +++ b/test/Npgsql.Tests/BugTests.cs @@ -4,6 +4,7 @@ using NUnit.Framework; using System; using System.Data; +using System.Numerics; using System.Text; using System.Threading; using System.Threading.Tasks; @@ -1391,4 +1392,30 @@ public async Task Bug4123() Assert.DoesNotThrowAsync(stream.FlushAsync); Assert.DoesNotThrow(stream.Flush); } + + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/6389")] + public async Task Composite_with_BigInteger([Values(CommandBehavior.Default, CommandBehavior.SequentialAccess)] CommandBehavior behavior) + { + await using var adminConnection = await OpenConnectionAsync(); + var type = await GetTempTypeName(adminConnection); + await adminConnection.ExecuteNonQueryAsync($"CREATE TYPE {type} as (value numeric)"); + + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.MapComposite(type); + await using var dataSource = dataSourceBuilder.Build(); + await using var connection = await dataSource.OpenConnectionAsync(); + + await using var cmd = connection.CreateCommand(); + cmd.CommandText = $"SELECT ROW(1234567890::numeric)::{type} FROM generate_series(1, 8000)"; + await using var reader = await cmd.ExecuteReaderAsync(behavior); + while (await reader.ReadAsync()) + { + Assert.DoesNotThrowAsync(async () => await reader.GetFieldValueAsync(0)); + } + } + + class Composite_with_BigInteger_Composite + { + public BigInteger Value { get; set; } + } } From 847d69f641a70e5abdb9c67fbcbb51f6d34fe832 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Mon, 29 Dec 2025 14:55:37 +0100 Subject: [PATCH 676/761] Do not include password in data source name with Persist Security Info=true (#6395) Closes #6394 --- src/Npgsql/NpgsqlDataSource.cs | 26 ++++++++++++++++---------- test/Npgsql.Tests/MetricTests.cs | 25 +++++++++++++++++++++++++ test/Npgsql.Tests/TracingTests.cs | 21 +++++++++++++++++++++ 3 files changed, 62 insertions(+), 10 deletions(-) diff --git a/src/Npgsql/NpgsqlDataSource.cs b/src/Npgsql/NpgsqlDataSource.cs index 280b32c128..dabbc978a5 100644 --- a/src/Npgsql/NpgsqlDataSource.cs +++ b/src/Npgsql/NpgsqlDataSource.cs @@ -94,15 +94,8 @@ private protected readonly Dictionary> _pendi readonly INpgsqlNameTranslator _defaultNameTranslator; readonly IDisposable? _eventSourceEvents; - internal NpgsqlDataSource( - NpgsqlConnectionStringBuilder settings, - NpgsqlDataSourceConfiguration dataSourceConfig, bool reportMetrics) + internal NpgsqlDataSource(NpgsqlConnectionStringBuilder settings, NpgsqlDataSourceConfiguration dataSourceConfig, bool reportMetrics) { - Settings = settings; - ConnectionString = settings.PersistSecurityInfo - ? settings.ToString() - : settings.ToStringWithoutPassword(); - Configuration = dataSourceConfig; (var name, @@ -128,6 +121,21 @@ internal NpgsqlDataSource( Debug.Assert(_passwordProvider is null || _passwordProviderAsync is not null); + Settings = settings; + + if (settings.PersistSecurityInfo) + { + ConnectionString = settings.ToString(); + + // The data source name is reported in tracing/metrics, so avoid leaking the password through there. + Name = name ?? settings.ToStringWithoutPassword(); + } + else + { + ConnectionString = settings.ToStringWithoutPassword(); + Name = name ?? ConnectionString; + } + _password = settings.Password; if (_periodicPasswordSuccessRefreshInterval != default) @@ -144,8 +152,6 @@ internal NpgsqlDataSource( _passwordRefreshTask = Task.Run(RefreshPassword); } - Name = name ?? ConnectionString; - // TODO this needs a rework, but for now we just avoid tracking multi-host data sources directly. if (reportMetrics) { diff --git a/test/Npgsql.Tests/MetricTests.cs b/test/Npgsql.Tests/MetricTests.cs index 235f8b4e27..9a8b2757e3 100644 --- a/test/Npgsql.Tests/MetricTests.cs +++ b/test/Npgsql.Tests/MetricTests.cs @@ -127,6 +127,31 @@ public async Task ConnectionMax() Assert.That(tags["db.client.connection.pool.name"], Is.EqualTo(dataSource.Name)); } + [Test] + public async Task Password_does_not_leak_via_datasource_name([Values] bool persistSecurityInfo) + { + var exportedItems = new List(); + using var meterProvider = Sdk.CreateMeterProviderBuilder() + .AddMeter("Npgsql") + .AddInMemoryExporter(exportedItems) + .Build(); + + var dataSourceBuilder = base.CreateDataSourceBuilder(); + dataSourceBuilder.ConnectionStringBuilder.ApplicationName = "MetricsDataSource" + Interlocked.Increment(ref _dataSourceCounter); + dataSourceBuilder.ConnectionStringBuilder.PersistSecurityInfo = persistSecurityInfo; + // Do not set the data source name - this makes it default to the connection string, but without + // the password (even when Persist Security Info is true) + await using var dataSource = dataSourceBuilder.Build(); + + meterProvider.ForceFlush(); + + var metric = exportedItems.Single(m => m.Name == "db.client.connection.max"); + var point = GetFilteredPoints(metric.GetMetricPoints(), dataSource.Name).First(); + var tags = ToDictionary(point.Tags); + var connectionString = new NpgsqlConnectionStringBuilder((string)tags["db.client.connection.pool.name"]!); + Assert.That(connectionString.Password, Is.Null); + } + static Dictionary ToDictionary(ReadOnlyTagCollection tags) { var dict = new Dictionary(); diff --git a/test/Npgsql.Tests/TracingTests.cs b/test/Npgsql.Tests/TracingTests.cs index 1033dc7a55..f845861366 100644 --- a/test/Npgsql.Tests/TracingTests.cs +++ b/test/Npgsql.Tests/TracingTests.cs @@ -811,6 +811,27 @@ public async Task Copy_ConfigureTracing() Assert.That(tags["custom_tag"], Is.EqualTo("custom_value")); } + [Test] + public async Task Password_does_not_leak_via_datasource_name([Values] bool persistSecurityInfo) + { + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.ConnectionStringBuilder.PersistSecurityInfo = persistSecurityInfo; + // Do not set the data source name - this makes it default to the connection string, but without + // the password (even when Persist Security Info is true) + await using var dataSource = dataSourceBuilder.Build(); + await using var connection = await dataSource.OpenConnectionAsync(); + + using var activityListener = StartListener(out var activities); + + await ExecuteScalar(connection, async, isBatch: false, query: "SELECT 42"); + + var activity = GetSingleActivity(activities, "postgresql", "postgresql"); + + var tags = activity.TagObjects.ToDictionary(t => t.Key, t => t.Value); + var connectionString = new NpgsqlConnectionStringBuilder((string)tags["db.npgsql.data_source"]!); + Assert.That(connectionString.Password, Is.Null); + } + static ActivityListener StartListener(out List activities) { var a = new List(); From 811c9f4d17d4efba9aac2ec7f4eb2a6d30f71145 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Fri, 2 Jan 2026 16:15:07 +0100 Subject: [PATCH 677/761] [Tests] Remove npgsqldbtype passing and rename related flag (#6305) Remove npgsqldbtype passing and rename related flag --- test/Npgsql.PluginTests/JsonNetTests.cs | 61 ++++----- .../Npgsql.PluginTests/LegacyNodaTimeTests.cs | 15 +-- .../NetTopologySuiteTests.cs | 3 +- .../NodaTimeInfinityTests.cs | 7 +- test/Npgsql.PluginTests/NodaTimeTests.cs | 127 ++++++------------ test/Npgsql.Tests/CommandParameterTests.cs | 8 +- test/Npgsql.Tests/GlobalTypeMapperTests.cs | 16 +-- test/Npgsql.Tests/Support/TestBase.cs | 62 ++++----- test/Npgsql.Tests/Types/ArrayTests.cs | 23 ++-- test/Npgsql.Tests/Types/BitStringTests.cs | 10 +- test/Npgsql.Tests/Types/ByteaTests.cs | 20 +-- test/Npgsql.Tests/Types/CompositeTests.cs | 54 ++++---- test/Npgsql.Tests/Types/CubeTests.cs | 18 +-- .../Types/DateTimeInfinityTests.cs | 11 +- test/Npgsql.Tests/Types/DateTimeTests.cs | 53 +++----- test/Npgsql.Tests/Types/DomainTests.cs | 2 +- test/Npgsql.Tests/Types/EnumTests.cs | 34 ++--- .../Npgsql.Tests/Types/FullTextSearchTests.cs | 9 +- test/Npgsql.Tests/Types/GeometricTypeTests.cs | 32 ++--- test/Npgsql.Tests/Types/HstoreTests.cs | 12 +- test/Npgsql.Tests/Types/JsonDynamicTests.cs | 40 +++--- test/Npgsql.Tests/Types/JsonPathTests.cs | 2 +- test/Npgsql.Tests/Types/JsonTests.cs | 52 +++---- test/Npgsql.Tests/Types/LTreeTests.cs | 10 +- .../Npgsql.Tests/Types/LegacyDateTimeTests.cs | 7 +- test/Npgsql.Tests/Types/MiscTypeTests.cs | 21 ++- test/Npgsql.Tests/Types/MoneyTests.cs | 4 +- test/Npgsql.Tests/Types/MultirangeTests.cs | 22 +-- test/Npgsql.Tests/Types/NetworkTypeTests.cs | 21 ++- test/Npgsql.Tests/Types/NumericTests.cs | 18 +-- test/Npgsql.Tests/Types/NumericTypeTests.cs | 65 +++++---- test/Npgsql.Tests/Types/RangeTests.cs | 37 +++-- test/Npgsql.Tests/Types/TextTests.cs | 26 ++-- 33 files changed, 376 insertions(+), 526 deletions(-) diff --git a/test/Npgsql.PluginTests/JsonNetTests.cs b/test/Npgsql.PluginTests/JsonNetTests.cs index e251bf7249..c02bc3aa91 100644 --- a/test/Npgsql.PluginTests/JsonNetTests.cs +++ b/test/Npgsql.PluginTests/JsonNetTests.cs @@ -15,9 +15,9 @@ namespace Npgsql.PluginTests; /// /// Tests for the Npgsql.Json.NET mapping plugin /// -[TestFixture(NpgsqlDbType.Jsonb)] -[TestFixture(NpgsqlDbType.Json)] -public class JsonNetTests(NpgsqlDbType npgsqlDbType) : TestBase +[TestFixture("jsonb")] +[TestFixture("json")] +public class JsonNetTests(string dataTypeName) : TestBase { [Test] public Task Roundtrip_object() @@ -25,10 +25,9 @@ public Task Roundtrip_object() JsonDataSource, new Foo { Bar = 8 }, IsJsonb ? @"{""Bar"": 8}" : @"{""Bar"":8}", - _pgTypeName, - npgsqlDbType, + dataTypeName, isDefault: false, - isNpgsqlDbTypeInferredFromClrType: false); + isDataTypeInferredFromValue: false); [Test, IssueLink("https://github.com/npgsql/npgsql/issues/3085")] public Task Roundtrip_string() @@ -36,10 +35,9 @@ public Task Roundtrip_string() JsonDataSource, @"{""p"": 1}", @"{""p"": 1}", - _pgTypeName, - npgsqlDbType, + dataTypeName, isDefaultForWriting: false, - isNpgsqlDbTypeInferredFromClrType: false); + isDataTypeInferredFromValue: false); [Test, IssueLink("https://github.com/npgsql/npgsql/issues/3085")] public Task Roundtrip_char_array() @@ -47,10 +45,9 @@ public Task Roundtrip_char_array() JsonDataSource, @"{""p"": 1}".ToCharArray(), @"{""p"": 1}", - _pgTypeName, - npgsqlDbType, + dataTypeName, isDefault: false, - isNpgsqlDbTypeInferredFromClrType: false); + isDataTypeInferredFromValue: false); [Test, IssueLink("https://github.com/npgsql/npgsql/issues/3085")] public Task Roundtrip_byte_array() @@ -58,10 +55,9 @@ public Task Roundtrip_byte_array() JsonDataSource, Encoding.ASCII.GetBytes(@"{""p"": 1}"), @"{""p"": 1}", - _pgTypeName, - npgsqlDbType, + dataTypeName, isDefault: false, - isNpgsqlDbTypeInferredFromClrType: false); + isDataTypeInferredFromValue: false); [Test] public Task Roundtrip_JObject() @@ -69,12 +65,11 @@ public Task Roundtrip_JObject() JsonDataSource, new JObject { ["Bar"] = 8 }, IsJsonb ? @"{""Bar"": 8}" : @"{""Bar"":8}", - _pgTypeName, - npgsqlDbType, + dataTypeName, // By default we map JObject to jsonb isDefaultForWriting: IsJsonb, isDefaultForReading: false, - isNpgsqlDbTypeInferredFromClrType: false); + isDataTypeInferredFromValue: false); [Test] public Task Roundtrip_JArray() @@ -82,18 +77,17 @@ public Task Roundtrip_JArray() JsonDataSource, new JArray(new[] { 1, 2, 3 }), IsJsonb ? "[1, 2, 3]" : "[1,2,3]", - _pgTypeName, - npgsqlDbType, + dataTypeName, // By default we map JArray to jsonb isDefaultForWriting: IsJsonb, isDefaultForReading: false, - isNpgsqlDbTypeInferredFromClrType: false); + isDataTypeInferredFromValue: false); [Test] public async Task Deserialize_failure() { await using var conn = await JsonDataSource.OpenConnectionAsync(); - await using var cmd = new NpgsqlCommand($@"SELECT '[1, 2, 3]'::{_pgTypeName}", conn); + await using var cmd = new NpgsqlCommand($@"SELECT '[1, 2, 3]'::{dataTypeName}", conn); await using var reader = await cmd.ExecuteReaderAsync(); await reader.ReadAsync(); // Attempt to deserialize JSON array into object @@ -117,10 +111,9 @@ await AssertType( dataSource, new Foo { Bar = 8 }, IsJsonb ? @"{""Bar"": 8}" : @"{""Bar"":8}", - _pgTypeName, - npgsqlDbType, + dataTypeName, isDefaultForReading: false, - isNpgsqlDbTypeInferredFromClrType: false); + isDataTypeInferredFromValue: false); } [Test] @@ -137,10 +130,9 @@ await AssertType( dataSource, new[] { 1, 2, 3 }, IsJsonb ? "[1, 2, 3]" : "[1,2,3]", - _pgTypeName, - npgsqlDbType, + dataTypeName, isDefaultForReading: false, - isNpgsqlDbTypeInferredFromClrType: false); + isDataTypeInferredFromValue: false); } class DateWrapper @@ -166,10 +158,9 @@ await AssertType( dataSource, new DateWrapper { Date = new DateTime(2018, 04, 20) }, IsJsonb ? "{\"Date\": \"The 20th of April, 2018\"}" : "{\"Date\":\"The 20th of April, 2018\"}", - _pgTypeName, - npgsqlDbType, + dataTypeName, isDefault: false, - isNpgsqlDbTypeInferredFromClrType: false); + isDataTypeInferredFromValue: false); } [Test] @@ -183,8 +174,8 @@ public async Task Bug3464() await using var conn = await dataSource.OpenConnectionAsync(); await using var cmd = new NpgsqlCommand(@"SELECT @p1, @p2", conn); - cmd.Parameters.AddWithValue("p1", expected).NpgsqlDbType = npgsqlDbType; - cmd.Parameters.AddWithValue("p2", expected).NpgsqlDbType = npgsqlDbType; + cmd.Parameters.AddWithValue("p1", expected).DataTypeName = dataTypeName; + cmd.Parameters.AddWithValue("p2", expected).DataTypeName = dataTypeName; await using var reader = cmd.ExecuteReader(); } @@ -261,8 +252,6 @@ class Foo public override int GetHashCode() => Bar.GetHashCode(); } - readonly string _pgTypeName = npgsqlDbType.ToString().ToLower(); - [OneTimeSetUp] public void SetUp() { @@ -275,7 +264,7 @@ public void SetUp() public async Task Teardown() => await JsonDataSource.DisposeAsync(); - bool IsJsonb => npgsqlDbType == NpgsqlDbType.Jsonb; + bool IsJsonb => dataTypeName == "jsonb"; NpgsqlDataSource JsonDataSource = default!; } diff --git a/test/Npgsql.PluginTests/LegacyNodaTimeTests.cs b/test/Npgsql.PluginTests/LegacyNodaTimeTests.cs index c6e5f25a9d..9df00dcdb1 100644 --- a/test/Npgsql.PluginTests/LegacyNodaTimeTests.cs +++ b/test/Npgsql.PluginTests/LegacyNodaTimeTests.cs @@ -2,10 +2,9 @@ using System.Data; using System.Threading.Tasks; using NodaTime; +using Npgsql.NodaTime.Internal; using Npgsql.Tests; -using NpgsqlTypes; using NUnit.Framework; -using Npgsql.NodaTime.Internal; namespace Npgsql.PluginTests; @@ -20,9 +19,8 @@ public async Task Timestamp_as_ZonedDateTime() new LocalDateTime(1998, 4, 12, 13, 26, 38, 789).InZoneLeniently(DateTimeZoneProviders.Tzdb[TimeZone]), "1998-04-12 13:26:38.789+02", "timestamp with time zone", - NpgsqlDbType.TimestampTz, DbType.DateTimeOffset, - isNpgsqlDbTypeInferredFromClrType: false, isDefault: false); + isDataTypeInferredFromValue: false, isDefault: false); [Test] public Task Timestamp_as_Instant() @@ -30,9 +28,8 @@ public Task Timestamp_as_Instant() new LocalDateTime(1998, 4, 12, 13, 26, 38, 789).InUtc().ToInstant(), "1998-04-12 13:26:38.789", "timestamp without time zone", - NpgsqlDbType.Timestamp, DbType.DateTime, - isNpgsqlDbTypeInferredFromClrType: false); + isDataTypeInferredFromValue: false); [Test] public Task Timestamp_as_LocalDateTime() @@ -40,10 +37,9 @@ public Task Timestamp_as_LocalDateTime() new LocalDateTime(1998, 4, 12, 13, 26, 38, 789), "1998-04-12 13:26:38.789", "timestamp without time zone", - NpgsqlDbType.Timestamp, DbType.DateTime, isDefaultForReading: false, - isNpgsqlDbTypeInferredFromClrType: false); + isDataTypeInferredFromValue: false); [Test] public Task Timestamptz_as_Instant() @@ -51,10 +47,9 @@ public Task Timestamptz_as_Instant() new LocalDateTime(1998, 4, 12, 13, 26, 38, 789).InUtc().ToInstant(), "1998-04-12 15:26:38.789+02", "timestamp with time zone", - NpgsqlDbType.TimestampTz, DbType.DateTimeOffset, isDefaultForWriting: false, - isNpgsqlDbTypeInferredFromClrType: false); + isDataTypeInferredFromValue: false); [Test] public async Task Timestamptz_ZonedDateTime_infinite_values_are_not_supported() diff --git a/test/Npgsql.PluginTests/NetTopologySuiteTests.cs b/test/Npgsql.PluginTests/NetTopologySuiteTests.cs index 4cece1952c..34f701e4a4 100644 --- a/test/Npgsql.PluginTests/NetTopologySuiteTests.cs +++ b/test/Npgsql.PluginTests/NetTopologySuiteTests.cs @@ -160,8 +160,7 @@ await AssertType( new Geometry[] { point }, '{' + GetSqlLiteral(point) + '}', "geometry[]", - NpgsqlDbType.Geometry | NpgsqlDbType.Array, - isNpgsqlDbTypeInferredFromClrType: false); + isDataTypeInferredFromValue: false); } [Test] diff --git a/test/Npgsql.PluginTests/NodaTimeInfinityTests.cs b/test/Npgsql.PluginTests/NodaTimeInfinityTests.cs index e8f8036ada..9fe78a989e 100644 --- a/test/Npgsql.PluginTests/NodaTimeInfinityTests.cs +++ b/test/Npgsql.PluginTests/NodaTimeInfinityTests.cs @@ -26,14 +26,12 @@ await AssertType( new DateInterval(LocalDate.MinIsoValue, LocalDate.MaxIsoValue), "[-infinity,infinity]", "daterange", - NpgsqlDbType.DateRange, - isNpgsqlDbTypeInferredFromClrType: false, skipArrayCheck: true); // NpgsqlRange[] is mapped to multirange by default, not array; test separately + isDataTypeInferredFromValue: false, skipArrayCheck: true); // NpgsqlRange[] is mapped to multirange by default, not array; test separately await AssertType( new [] {new DateInterval(LocalDate.MinIsoValue, LocalDate.MaxIsoValue)}, """{"[-infinity,infinity]"}""", "daterange[]", - NpgsqlDbType.DateRange | NpgsqlDbType.Array, isDefault: false, skipArrayCheck: true); await using var conn = await OpenConnectionAsync(); @@ -43,8 +41,7 @@ await AssertType( await AssertType( new [] {new DateInterval(LocalDate.MinIsoValue, LocalDate.MaxIsoValue)}, """{[-infinity,infinity]}""", - "datemultirange", - NpgsqlDbType.DateMultirange, isNpgsqlDbTypeInferredFromClrType: false, skipArrayCheck: true); + "datemultirange", isDataTypeInferredFromValue: false, skipArrayCheck: true); } [Test] diff --git a/test/Npgsql.PluginTests/NodaTimeTests.cs b/test/Npgsql.PluginTests/NodaTimeTests.cs index adddf9cf25..c348660331 100644 --- a/test/Npgsql.PluginTests/NodaTimeTests.cs +++ b/test/Npgsql.PluginTests/NodaTimeTests.cs @@ -29,8 +29,8 @@ public class NodaTimeTests : MultiplexingTestBase, IDisposable [Test, TestCaseSource(nameof(TimestampValues))] public Task Timestamp_as_LocalDateTime(LocalDateTime localDateTime, string sqlLiteral) - => AssertType(localDateTime, sqlLiteral, "timestamp without time zone", NpgsqlDbType.Timestamp, DbType.DateTime2, - isNpgsqlDbTypeInferredFromClrType: false); + => AssertType(localDateTime, sqlLiteral, "timestamp without time zone", DbType.DateTime2, + isDataTypeInferredFromValue: false); [Test] public Task Timestamp_as_unspecified_DateTime() @@ -38,7 +38,6 @@ public Task Timestamp_as_unspecified_DateTime() new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Unspecified), "1998-04-12 13:26:38", "timestamp without time zone", - NpgsqlDbType.Timestamp, DbType.DateTime2, isDefaultForReading: false); @@ -48,7 +47,6 @@ public Task Timestamp_as_long() -54297202000000, "1998-04-12 13:26:38", "timestamp without time zone", - NpgsqlDbType.Timestamp, DbType.DateTime2, isDefault: false); @@ -93,8 +91,7 @@ await AssertType( new(1998, 4, 12, 15, 26, 38)), """["1998-04-12 13:26:38","1998-04-12 15:26:38"]""", "tsrange", - NpgsqlDbType.TimestampRange, - isNpgsqlDbTypeInferredFromClrType: false, skipArrayCheck: true); // NpgsqlRange[] is mapped to multirange by default, not array; test separately + isDataTypeInferredFromValue: false, skipArrayCheck: true); // NpgsqlRange[] is mapped to multirange by default, not array; test separately await AssertType( new [] { new NpgsqlRange( @@ -102,7 +99,6 @@ await AssertType( new(1998, 4, 12, 15, 26, 38)), }, """{"[\"1998-04-12 13:26:38\",\"1998-04-12 15:26:38\"]"}""", "tsrange[]", - NpgsqlDbType.TimestampRange | NpgsqlDbType.Array, isDefault: false, skipArrayCheck: true); await using var conn = await OpenConnectionAsync(); @@ -114,8 +110,7 @@ await AssertType( new(1998, 4, 12, 13, 26, 38), new(1998, 4, 12, 15, 26, 38)), }, """{["1998-04-12 13:26:38","1998-04-12 15:26:38"]}""", - "tsmultirange", - NpgsqlDbType.TimestampMultirange, isNpgsqlDbTypeInferredFromClrType: false, skipArrayCheck: true); + "tsmultirange", isDataTypeInferredFromValue: false, skipArrayCheck: true); } [Test] @@ -136,8 +131,7 @@ await AssertType( }, """{["1998-04-12 13:26:38","1998-04-12 15:26:38"],["1998-04-13 13:26:38","1998-04-13 15:26:38"]}""", "tsmultirange", - NpgsqlDbType.TimestampMultirange, - isNpgsqlDbTypeInferredFromClrType: false); + isDataTypeInferredFromValue: false); } #endregion Timestamp without time zone @@ -158,8 +152,8 @@ await AssertType( [Test, TestCaseSource(nameof(TimestamptzValues))] public Task Timestamptz_as_Instant(Instant instant, string sqlLiteral) - => AssertType(instant, sqlLiteral, "timestamp with time zone", NpgsqlDbType.TimestampTz, DbType.DateTime, - isNpgsqlDbTypeInferredFromClrType: false); + => AssertType(instant, sqlLiteral, "timestamp with time zone", DbType.DateTime, + isDataTypeInferredFromValue: false); [Test] public Task Timestamptz_as_ZonedDateTime() @@ -167,9 +161,8 @@ public Task Timestamptz_as_ZonedDateTime() new LocalDateTime(1998, 4, 12, 13, 26, 38).InUtc(), "1998-04-12 15:26:38+02", "timestamp with time zone", - NpgsqlDbType.TimestampTz, DbType.DateTime, - isNpgsqlDbTypeInferredFromClrType: false, + isDataTypeInferredFromValue: false, isDefaultForReading: false); [Test] @@ -178,9 +171,8 @@ public Task Timestamptz_as_OffsetDateTime() new LocalDateTime(1998, 4, 12, 13, 26, 38).WithOffset(Offset.Zero), "1998-04-12 15:26:38+02", "timestamp with time zone", - NpgsqlDbType.TimestampTz, DbType.DateTime, - isNpgsqlDbTypeInferredFromClrType: false, + isDataTypeInferredFromValue: false, isDefaultForReading: false); [Test] @@ -189,7 +181,6 @@ public Task Timestamptz_as_utc_DateTime() new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Utc), "1998-04-12 15:26:38+02", "timestamp with time zone", - NpgsqlDbType.TimestampTz, DbType.DateTime, isDefaultForReading: false); @@ -199,7 +190,6 @@ public Task Timestamptz_as_DateTimeOffset() new DateTimeOffset(1998, 4, 12, 13, 26, 38, TimeSpan.Zero), "1998-04-12 15:26:38+02", "timestamp with time zone", - NpgsqlDbType.TimestampTz, DbType.DateTime, isDefaultForReading: false); @@ -209,7 +199,6 @@ public Task Timestamptz_as_long() -54297202000000, "1998-04-12 15:26:38+02", "timestamp with time zone", - NpgsqlDbType.TimestampTz, DbType.DateTime, isDefault: false); @@ -243,8 +232,7 @@ await AssertType( new LocalDateTime(1998, 4, 12, 15, 26, 38).InUtc().ToInstant()), """["1998-04-12 15:26:38+02","1998-04-12 17:26:38+02")""", "tstzrange", - NpgsqlDbType.TimestampTzRange, - isNpgsqlDbTypeInferredFromClrType: false, skipArrayCheck: true); // NpgsqlRange[] is mapped to multirange by default, not array; test separately + isDataTypeInferredFromValue: false, skipArrayCheck: true); // NpgsqlRange[] is mapped to multirange by default, not array; test separately await AssertType( new [] { new Interval( @@ -252,7 +240,6 @@ await AssertType( new LocalDateTime(1998, 4, 12, 15, 26, 38).InUtc().ToInstant()), }, """{"[\"1998-04-12 15:26:38+02\",\"1998-04-12 17:26:38+02\")"}""", "tstzrange[]", - NpgsqlDbType.TimestampTzRange | NpgsqlDbType.Array, isDefault: false, skipArrayCheck: true); await using var conn = await OpenConnectionAsync(); @@ -264,8 +251,7 @@ await AssertType( new LocalDateTime(1998, 4, 12, 13, 26, 38).InUtc().ToInstant(), new LocalDateTime(1998, 4, 12, 15, 26, 38).InUtc().ToInstant()), }, """{["1998-04-12 15:26:38+02","1998-04-12 17:26:38+02")}""", - "tstzmultirange", - NpgsqlDbType.TimestampTzMultirange, isNpgsqlDbTypeInferredFromClrType: false, skipArrayCheck: true); + "tstzmultirange", isDataTypeInferredFromValue: false, skipArrayCheck: true); } [Test] @@ -273,18 +259,14 @@ public Task Tstzrange_with_no_end_as_Interval() => AssertType( new Interval(new LocalDateTime(1998, 4, 12, 13, 26, 38).InUtc().ToInstant(), null), """["1998-04-12 15:26:38+02",)""", - "tstzrange", - NpgsqlDbType.TimestampTzRange, - isNpgsqlDbTypeInferredFromClrType: false, skipArrayCheck: true); + "tstzrange", isDataTypeInferredFromValue: false, skipArrayCheck: true); [Test] public Task Tstzrange_with_no_start_as_Interval() => AssertType( new Interval(null, new LocalDateTime(1998, 4, 12, 13, 26, 38).InUtc().ToInstant()), """(,"1998-04-12 15:26:38+02")""", - "tstzrange", - NpgsqlDbType.TimestampTzRange, - isNpgsqlDbTypeInferredFromClrType: false, skipArrayCheck: true); + "tstzrange", isDataTypeInferredFromValue: false, skipArrayCheck: true); [Test] public Task Tstzrange_with_no_start_or_end_as_Interval() @@ -292,8 +274,7 @@ public Task Tstzrange_with_no_start_or_end_as_Interval() new Interval(null, null), """(,)""", "tstzrange", - NpgsqlDbType.TimestampTzRange, - isNpgsqlDbTypeInferredFromClrType: false, skipArrayCheck: true); + isDataTypeInferredFromValue: false, skipArrayCheck: true); [Test] public Task Tstzrange_as_NpgsqlRange_of_Instant() @@ -303,8 +284,7 @@ public Task Tstzrange_as_NpgsqlRange_of_Instant() new LocalDateTime(1998, 4, 12, 15, 26, 38).InUtc().ToInstant()), """["1998-04-12 15:26:38+02","1998-04-12 17:26:38+02"]""", "tstzrange", - NpgsqlDbType.TimestampTzRange, - isNpgsqlDbTypeInferredFromClrType: false, + isDataTypeInferredFromValue: false, isDefaultForReading: false, skipArrayCheck: true); [Test] @@ -315,8 +295,7 @@ public Task Tstzrange_as_NpgsqlRange_of_ZonedDateTime() new LocalDateTime(1998, 4, 12, 15, 26, 38).InUtc()), """["1998-04-12 15:26:38+02","1998-04-12 17:26:38+02"]""", "tstzrange", - NpgsqlDbType.TimestampTzRange, - isNpgsqlDbTypeInferredFromClrType: false, + isDataTypeInferredFromValue: false, isDefaultForReading: false, skipArrayCheck: true); [Test] @@ -327,8 +306,7 @@ public Task Tstzrange_as_NpgsqlRange_of_OffsetDateTime() new LocalDateTime(1998, 4, 12, 15, 26, 38).WithOffset(Offset.Zero)), """["1998-04-12 15:26:38+02","1998-04-12 17:26:38+02"]""", "tstzrange", - NpgsqlDbType.TimestampTzRange, - isNpgsqlDbTypeInferredFromClrType: false, + isDataTypeInferredFromValue: false, isDefaultForReading: false, skipArrayCheck: true); [Test] @@ -349,8 +327,7 @@ await AssertType( }, """{["1998-04-12 15:26:38+02","1998-04-12 17:26:38+02"),["1998-04-13 15:26:38+02","1998-04-13 17:26:38+02")}""", "tstzmultirange", - NpgsqlDbType.TimestampTzMultirange, - isNpgsqlDbTypeInferredFromClrType: false); + isDataTypeInferredFromValue: false); } [Test] @@ -371,8 +348,7 @@ await AssertType( }, """{["1998-04-12 15:26:38+02","1998-04-12 17:26:38+02"],["1998-04-13 15:26:38+02","1998-04-13 17:26:38+02"]}""", "tstzmultirange", - NpgsqlDbType.TimestampTzMultirange, - isNpgsqlDbTypeInferredFromClrType: false, + isDataTypeInferredFromValue: false, isDefaultForReading: false); } @@ -394,8 +370,7 @@ await AssertType( }, """{["1998-04-12 15:26:38+02","1998-04-12 17:26:38+02"],["1998-04-13 15:26:38+02","1998-04-13 17:26:38+02"]}""", "tstzmultirange", - NpgsqlDbType.TimestampTzMultirange, - isNpgsqlDbTypeInferredFromClrType: false, + isDataTypeInferredFromValue: false, isDefaultForReading: false); } @@ -417,8 +392,7 @@ await AssertType( }, """{["1998-04-12 15:26:38+02","1998-04-12 17:26:38+02"],["1998-04-13 15:26:38+02","1998-04-13 17:26:38+02"]}""", "tstzmultirange", - NpgsqlDbType.TimestampTzMultirange, - isNpgsqlDbTypeInferredFromClrType: false, + isDataTypeInferredFromValue: false, isDefaultForReading: false); } @@ -448,8 +422,7 @@ await AssertType( }, """{"[\"1998-04-12 15:26:38+02\",\"1998-04-12 17:26:38+02\")","[\"1998-04-13 15:26:38+02\",\"1998-04-13 17:26:38+02\")","[\"1998-04-13 15:26:38+02\",)","(,\"1998-04-13 15:26:38+02\")","(,)"}""", "tstzrange[]", - NpgsqlDbType.TimestampTzRange | NpgsqlDbType.Array, - isNpgsqlDbTypeInferredFromClrType: false, + isDataTypeInferredFromValue: false, isDefaultForWriting: false); } @@ -470,8 +443,7 @@ await AssertType( }, """{"[\"1998-04-12 15:26:38+02\",\"1998-04-12 17:26:38+02\"]","[\"1998-04-13 15:26:38+02\",\"1998-04-13 17:26:38+02\"]"}""", "tstzrange[]", - NpgsqlDbType.TimestampTzRange | NpgsqlDbType.Array, - isNpgsqlDbTypeInferredFromClrType: false, + isDataTypeInferredFromValue: false, isDefault: false); } @@ -481,16 +453,16 @@ await AssertType( [Test] public Task Date_as_LocalDate() - => AssertType(new LocalDate(2020, 10, 1), "2020-10-01", "date", NpgsqlDbType.Date, DbType.Date, - isNpgsqlDbTypeInferredFromClrType: false); + => AssertType(new LocalDate(2020, 10, 1), "2020-10-01", "date", DbType.Date, + isDataTypeInferredFromValue: false); [Test] public Task Date_as_DateTime() - => AssertType(new DateTime(2020, 10, 1), "2020-10-01", "date", NpgsqlDbType.Date, DbType.Date, isDefault: false); + => AssertType(new DateTime(2020, 10, 1), "2020-10-01", "date", DbType.Date, isDefault: false); [Test] public Task Date_as_int() - => AssertType(7579, "2020-10-01", "date", NpgsqlDbType.Date, DbType.Date, isDefault: false); + => AssertType(7579, "2020-10-01", "date", DbType.Date, isDefault: false); [Test] public async Task Daterange_as_DateInterval() @@ -499,14 +471,12 @@ await AssertType( new DateInterval(new(2002, 3, 4), new(2002, 3, 6)), "[2002-03-04,2002-03-07)", "daterange", - NpgsqlDbType.DateRange, - isNpgsqlDbTypeInferredFromClrType: false, skipArrayCheck: true); // DateInterval[] is mapped to multirange by default, not array; test separately + isDataTypeInferredFromValue: false, skipArrayCheck: true); // DateInterval[] is mapped to multirange by default, not array; test separately await AssertType( new [] {new DateInterval(new(2002, 3, 4), new(2002, 3, 6))}, """{"[2002-03-04,2002-03-07)"}""", "daterange[]", - NpgsqlDbType.DateRange | NpgsqlDbType.Array, isDefault: false, skipArrayCheck: true); await using var conn = await OpenConnectionAsync(); @@ -516,8 +486,7 @@ await AssertType( await AssertType( new [] {new DateInterval(new(2002, 3, 4), new(2002, 3, 6))}, """{[2002-03-04,2002-03-07)}""", - "datemultirange", - NpgsqlDbType.DateMultirange, isNpgsqlDbTypeInferredFromClrType: false, skipArrayCheck: true); + "datemultirange", isDataTypeInferredFromValue: false, skipArrayCheck: true); } [Test] @@ -527,15 +496,13 @@ await AssertType( new NpgsqlRange(new(2002, 3, 4), true, new(2002, 3, 6), false), "[2002-03-04,2002-03-06)", "daterange", - NpgsqlDbType.DateRange, - isNpgsqlDbTypeInferredFromClrType: false, + isDataTypeInferredFromValue: false, isDefaultForReading: false, skipArrayCheck: true); // NpgsqlRange[] is mapped to multirange by default, not array; test separately await AssertType( new [] { new NpgsqlRange(new(2002, 3, 4), true, new(2002, 3, 6), false) }, """{"[2002-03-04,2002-03-06)"}""", "daterange[]", - NpgsqlDbType.DateRange | NpgsqlDbType.Array, isDefault: false, skipArrayCheck: true); await using var conn = await OpenConnectionAsync(); @@ -545,8 +512,7 @@ await AssertType( await AssertType( new [] { new NpgsqlRange(new(2002, 3, 4), true, new(2002, 3, 6), false) }, """{[2002-03-04,2002-03-06)}""", - "datemultirange", - NpgsqlDbType.DateMultirange, isDefault: false, skipArrayCheck: true); + "datemultirange", isDefault: false, skipArrayCheck: true); } [Test] @@ -563,8 +529,7 @@ await AssertType( }, "{[2002-03-04,2002-03-06),[2002-03-08,2002-03-11)}", "datemultirange", - NpgsqlDbType.DateMultirange, - isNpgsqlDbTypeInferredFromClrType: false); + isDataTypeInferredFromValue: false); } [Test] @@ -581,14 +546,13 @@ await AssertType( }, "{[2002-03-04,2002-03-06),[2002-03-08,2002-03-11)}", "datemultirange", - NpgsqlDbType.DateMultirange, isDefaultForReading: false, - isNpgsqlDbTypeInferredFromClrType: false); + isDataTypeInferredFromValue: false); } [Test] public Task Date_as_DateOnly() - => AssertType(new DateOnly(2020, 10, 1), "2020-10-01", "date", NpgsqlDbType.Date, DbType.Date, isDefaultForReading: false); + => AssertType(new DateOnly(2020, 10, 1), "2020-10-01", "date", DbType.Date, isDefaultForReading: false); [Test] public async Task Daterange_as_NpgsqlRange_of_DateOnly() @@ -597,14 +561,12 @@ await AssertType( new NpgsqlRange(new(2002, 3, 4), true, new(2002, 3, 6), false), "[2002-03-04,2002-03-06)", "daterange", - NpgsqlDbType.DateRange, isDefaultForReading: false, skipArrayCheck: true); await AssertType( new [] { new NpgsqlRange(new(2002, 3, 4), true, new(2002, 3, 6), false) }, """{"[2002-03-04,2002-03-06)"}""", "daterange[]", - NpgsqlDbType.DateRange | NpgsqlDbType.Array, isDefault: false, skipArrayCheck: true); await using var conn = await OpenConnectionAsync(); @@ -614,8 +576,7 @@ await AssertType( await AssertType( new [] { new NpgsqlRange(new(2002, 3, 4), true, new(2002, 3, 6), false) }, """{[2002-03-04,2002-03-06)}""", - "datemultirange", - NpgsqlDbType.DateMultirange, isDefault: false, skipArrayCheck: true); + "datemultirange", isDefault: false, skipArrayCheck: true); } [Test] @@ -631,7 +592,6 @@ await AssertType( }, """{"[2002-03-04,2002-03-06)","[2002-03-08,2002-03-11)"}""", "daterange[]", - NpgsqlDbType.DateRange | NpgsqlDbType.Array, isDefaultForWriting: false); } @@ -648,7 +608,6 @@ await AssertType( }, """{"[2002-03-04,2002-03-06)","[2002-03-08,2002-03-11)"}""", "daterange[]", - NpgsqlDbType.DateRange | NpgsqlDbType.Array, isDefault: false); } @@ -658,8 +617,8 @@ await AssertType( [Test] public Task Time_as_LocalTime() - => AssertType(new LocalTime(10, 45, 34, 500), "10:45:34.5", "time without time zone", NpgsqlDbType.Time, DbType.Time, - isNpgsqlDbTypeInferredFromClrType: false); + => AssertType(new LocalTime(10, 45, 34, 500), "10:45:34.5", "time without time zone", DbType.Time, + isDataTypeInferredFromValue: false); [Test] public Task Time_as_TimeSpan() @@ -667,7 +626,6 @@ public Task Time_as_TimeSpan() new TimeSpan(0, 10, 45, 34, 500), "10:45:34.5", "time without time zone", - NpgsqlDbType.Time, DbType.Time, isDefault: false); @@ -677,7 +635,6 @@ public Task Time_as_TimeOnly() new TimeOnly(10, 45, 34, 500), "10:45:34.5", "time without time zone", - NpgsqlDbType.Time, DbType.Time, isDefaultForReading: false); @@ -691,8 +648,7 @@ public Task TimeTz_as_OffsetTime() new OffsetTime(new LocalTime(1, 2, 3, 4).PlusNanoseconds(5000), Offset.FromHoursAndMinutes(3, 30) + Offset.FromSeconds(5)), "01:02:03.004005+03:30:05", "time with time zone", - NpgsqlDbType.TimeTz, - isNpgsqlDbTypeInferredFromClrType: false); + isDataTypeInferredFromValue: false); [Test] public async Task TimeTz_as_DateTimeOffset() @@ -706,7 +662,6 @@ await AssertTypeWrite( new DateTimeOffset(1, 1, 1, 13, 3, 45, 510, TimeSpan.FromHours(2)), "13:03:45.51+02", "time with time zone", - NpgsqlDbType.TimeTz, isDefault: false); } @@ -731,8 +686,7 @@ public Task Interval_as_Period() }.Build().Normalize(), "1 year 2 mons 25 days 05:06:07.008009", "interval", - NpgsqlDbType.Interval, - isNpgsqlDbTypeInferredFromClrType: false); + isDataTypeInferredFromValue: false); [Test] public Task Interval_as_Duration() @@ -741,9 +695,8 @@ public Task Interval_as_Duration() Duration.FromNanoseconds(1000), "5 days 00:04:03.002001", "interval", - NpgsqlDbType.Interval, isDefaultForReading: false, - isNpgsqlDbTypeInferredFromClrType: false); + isDataTypeInferredFromValue: false); [Test] public async Task Interval_as_Duration_with_months_fails() diff --git a/test/Npgsql.Tests/CommandParameterTests.cs b/test/Npgsql.Tests/CommandParameterTests.cs index 7f5a5dc5c4..783484a8fa 100644 --- a/test/Npgsql.Tests/CommandParameterTests.cs +++ b/test/Npgsql.Tests/CommandParameterTests.cs @@ -195,10 +195,10 @@ public async Task Parameter_must_be_set(bool genericParam) [Test] public async Task Object_generic_param_does_runtime_lookup() { - await AssertTypeWrite(1, "1", "integer", NpgsqlDbType.Integer, DbType.Int32, DbType.Int32, isDefault: false, - isNpgsqlDbTypeInferredFromClrType: true, skipArrayCheck: true); - await AssertTypeWrite(new[] {1, 1}, "{1,1}", "integer[]", NpgsqlDbType.Integer | NpgsqlDbType.Array, isDefault: false, - isNpgsqlDbTypeInferredFromClrType: true, skipArrayCheck: true); + await AssertTypeWrite(1, "1", "integer", DbType.Int32, DbType.Int32, isDefault: false, + isDataTypeInferredFromValue: true, skipArrayCheck: true); + await AssertTypeWrite(new[] {1, 1}, "{1,1}", "integer[]", isDefault: false, + isDataTypeInferredFromValue: true, skipArrayCheck: true); } [Test] diff --git a/test/Npgsql.Tests/GlobalTypeMapperTests.cs b/test/Npgsql.Tests/GlobalTypeMapperTests.cs index a5c75e41bf..2385ba0710 100644 --- a/test/Npgsql.Tests/GlobalTypeMapperTests.cs +++ b/test/Npgsql.Tests/GlobalTypeMapperTests.cs @@ -26,17 +26,17 @@ public async Task MapEnum() await connection.ExecuteNonQueryAsync($"CREATE TYPE {type} AS ENUM ('sad', 'ok', 'happy')"); await connection.ReloadTypesAsync(); - await AssertType(connection, Mood.Happy, "happy", type, npgsqlDbType: null); + await AssertType(connection, Mood.Happy, "happy", type, isDataTypeInferredFromValue: false); } NpgsqlConnection.GlobalTypeMapper.UnmapEnum(type); // Global mapping changes have no effect on already-built data sources - await AssertType(dataSource1, Mood.Happy, "happy", type, npgsqlDbType: null); + await AssertType(dataSource1, Mood.Happy, "happy", type, isDataTypeInferredFromValue: false); // But they do affect new data sources await using var dataSource2 = CreateDataSource(); - await AssertType(dataSource2, "happy", "happy", type, npgsqlDbType: null, isDefault: false); + await AssertType(dataSource2, "happy", "happy", type, isDataTypeInferredFromValue: false, isDefault: false); } [Test] @@ -55,17 +55,17 @@ public async Task MapEnum_NonGeneric() await connection.ExecuteNonQueryAsync($"CREATE TYPE {type} AS ENUM ('sad', 'ok', 'happy')"); await connection.ReloadTypesAsync(); - await AssertType(connection, Mood.Happy, "happy", type, npgsqlDbType: null); + await AssertType(connection, Mood.Happy, "happy", type, isDataTypeInferredFromValue: false); } NpgsqlConnection.GlobalTypeMapper.UnmapEnum(typeof(Mood), type); // Global mapping changes have no effect on already-built data sources - await AssertType(dataSource1, Mood.Happy, "happy", type, npgsqlDbType: null); + await AssertType(dataSource1, Mood.Happy, "happy", type, isDataTypeInferredFromValue: false); // But they do affect new data sources await using var dataSource2 = CreateDataSource(); - Assert.ThrowsAsync(() => AssertType(dataSource2, Mood.Happy, "happy", type, npgsqlDbType: null)); + Assert.ThrowsAsync(() => AssertType(dataSource2, Mood.Happy, "happy", type, isDataTypeInferredFromValue: false)); } finally { @@ -92,11 +92,11 @@ public async Task Reset() NpgsqlConnection.GlobalTypeMapper.Reset(); // Global mapping changes have no effect on already-built data sources - await AssertType(dataSource1, Mood.Happy, "happy", type, npgsqlDbType: null); + await AssertType(dataSource1, Mood.Happy, "happy", type, isDataTypeInferredFromValue: false); // But they do affect new data sources await using var dataSource2 = CreateDataSource(); - await AssertType(dataSource2, "happy", "happy", type, npgsqlDbType: null, isDefault: false); + await AssertType(dataSource2, "happy", "happy", type, isDataTypeInferredFromValue: false, isDefault: false); } [Test] diff --git a/test/Npgsql.Tests/Support/TestBase.cs b/test/Npgsql.Tests/Support/TestBase.cs index 198e10a326..11ebb5003e 100644 --- a/test/Npgsql.Tests/Support/TestBase.cs +++ b/test/Npgsql.Tests/Support/TestBase.cs @@ -7,6 +7,7 @@ using System.Threading; using System.Threading.Tasks; using Microsoft.Extensions.Logging; +using Npgsql.Internal.Postgres; using Npgsql.Tests.Support; using NpgsqlTypes; using NUnit.Framework; @@ -33,20 +34,19 @@ public async Task AssertType( T value, string sqlLiteral, string pgTypeName, - NpgsqlDbType? npgsqlDbType, DbType? dbType = null, DbType? inferredDbType = null, bool isDefaultForReading = true, bool isDefaultForWriting = true, bool? isDefault = null, - bool isNpgsqlDbTypeInferredFromClrType = true, + bool isDataTypeInferredFromValue = true, Func? comparer = null, bool skipArrayCheck = false) { await using var connection = await OpenConnectionAsync(); return await AssertType( - connection, value, sqlLiteral, pgTypeName, npgsqlDbType, dbType, inferredDbType, isDefaultForReading, isDefaultForWriting, - isDefault, isNpgsqlDbTypeInferredFromClrType, comparer, skipArrayCheck); + connection, value, sqlLiteral, pgTypeName, dbType, inferredDbType, isDefaultForReading, isDefaultForWriting, + isDefault, isDataTypeInferredFromValue, comparer, skipArrayCheck); } public async Task AssertType( @@ -54,20 +54,19 @@ public async Task AssertType( T value, string sqlLiteral, string pgTypeName, - NpgsqlDbType? npgsqlDbType, DbType? dbType = null, DbType? inferredDbType = null, bool isDefaultForReading = true, bool isDefaultForWriting = true, bool? isDefault = null, - bool isNpgsqlDbTypeInferredFromClrType = true, + bool isDataTypeInferredFromValue = true, Func? comparer = null, bool skipArrayCheck = false) { await using var connection = await dataSource.OpenConnectionAsync(); - return await AssertType(connection, value, sqlLiteral, pgTypeName, npgsqlDbType, dbType, inferredDbType, isDefaultForReading, - isDefaultForWriting, isDefault, isNpgsqlDbTypeInferredFromClrType, comparer, skipArrayCheck); + return await AssertType(connection, value, sqlLiteral, pgTypeName, dbType, inferredDbType, isDefaultForReading, + isDefaultForWriting, isDefault, isDataTypeInferredFromValue, comparer, skipArrayCheck); } public async Task AssertType( @@ -75,20 +74,19 @@ public async Task AssertType( T value, string sqlLiteral, string pgTypeName, - NpgsqlDbType? npgsqlDbType, DbType? dbType = null, DbType? inferredDbType = null, bool isDefaultForReading = true, bool isDefaultForWriting = true, bool? isDefault = null, - bool isNpgsqlDbTypeInferredFromClrType = true, + bool isDataTypeInferredFromValue = true, Func? comparer = null, bool skipArrayCheck = false) { if (isDefault is not null) isDefaultForReading = isDefaultForWriting = isDefault.Value; - await AssertTypeWrite(connection, () => value, sqlLiteral, pgTypeName, npgsqlDbType, dbType, inferredDbType, isDefaultForWriting, isNpgsqlDbTypeInferredFromClrType, skipArrayCheck); + await AssertTypeWrite(connection, () => value, sqlLiteral, pgTypeName, dbType, inferredDbType, isDefaultForWriting, isDataTypeInferredFromValue, skipArrayCheck); return await AssertTypeRead(connection, sqlLiteral, pgTypeName, value, isDefaultForReading, comparer, fieldType: null, skipArrayCheck); } @@ -110,46 +108,42 @@ public async Task AssertTypeWrite( T value, string expectedSqlLiteral, string pgTypeName, - NpgsqlDbType npgsqlDbType, DbType? dbType = null, DbType? inferredDbType = null, bool isDefault = true, - bool isNpgsqlDbTypeInferredFromClrType = true, + bool isDataTypeInferredFromValue = true, bool skipArrayCheck = false) { await using var connection = await dataSource.OpenConnectionAsync(); - await AssertTypeWrite(connection, () => value, expectedSqlLiteral, pgTypeName, npgsqlDbType, dbType, inferredDbType, isDefault, - isNpgsqlDbTypeInferredFromClrType, skipArrayCheck); + await AssertTypeWrite(connection, () => value, expectedSqlLiteral, pgTypeName, dbType, inferredDbType, isDefault, + isDataTypeInferredFromValue, skipArrayCheck); } public Task AssertTypeWrite( T value, string expectedSqlLiteral, string pgTypeName, - NpgsqlDbType npgsqlDbType, DbType? dbType = null, DbType? inferredDbType = null, bool isDefault = true, - bool isNpgsqlDbTypeInferredFromClrType = true, + bool isDataTypeInferredFromValue = true, bool skipArrayCheck = false) - => AssertTypeWrite(() => value, expectedSqlLiteral, pgTypeName, npgsqlDbType, dbType, inferredDbType, isDefault, - isNpgsqlDbTypeInferredFromClrType, skipArrayCheck); + => AssertTypeWrite(() => value, expectedSqlLiteral, pgTypeName, dbType, inferredDbType, isDefault, + isDataTypeInferredFromValue, skipArrayCheck); public async Task AssertTypeWrite( Func valueFactory, string expectedSqlLiteral, string pgTypeName, - NpgsqlDbType npgsqlDbType, DbType? dbType = null, DbType? inferredDbType = null, bool isDefault = true, - bool isNpgsqlDbTypeInferredFromClrType = true, + bool isDataTypeInferredFromValue = true, bool skipArrayCheck = false) { await using var connection = await OpenConnectionAsync(); - await AssertTypeWrite(connection, valueFactory, expectedSqlLiteral, pgTypeName, npgsqlDbType, dbType, inferredDbType, isDefault, - isNpgsqlDbTypeInferredFromClrType, skipArrayCheck); + await AssertTypeWrite(connection, valueFactory, expectedSqlLiteral, pgTypeName, dbType, inferredDbType, isDefault, isDataTypeInferredFromValue, skipArrayCheck); } internal static async Task AssertTypeRead( @@ -228,16 +222,15 @@ internal static async Task AssertTypeWrite( Func valueFactory, string expectedSqlLiteral, string pgTypeName, - NpgsqlDbType? npgsqlDbType, DbType? dbType = null, DbType? inferredDbType = null, bool isDefault = true, - bool isNpgsqlDbTypeInferredFromClrType = true, + bool isDataTypeInferredFromValue = true, bool skipArrayCheck = false) { await AssertTypeWriteCore( - connection, valueFactory, expectedSqlLiteral, pgTypeName, npgsqlDbType, dbType, inferredDbType, isDefault, - isNpgsqlDbTypeInferredFromClrType); + connection, valueFactory, expectedSqlLiteral, pgTypeName, dbType, inferredDbType, isDefault, + isDataTypeInferredFromValue); // Check the corresponding array type as well if (!skipArrayCheck && !pgTypeName.EndsWith("[]", StringComparison.Ordinal)) @@ -247,11 +240,10 @@ await AssertTypeWriteCore( () => new[] { valueFactory(), valueFactory() }, ArrayLiteral(expectedSqlLiteral), pgTypeName + "[]", - npgsqlDbType | NpgsqlDbType.Array, dbType: null, inferredDbType: null, isDefault, - isNpgsqlDbTypeInferredFromClrType); + isDataTypeInferredFromValue); } } @@ -260,14 +252,12 @@ static async Task AssertTypeWriteCore( Func valueFactory, string expectedSqlLiteral, string pgTypeName, - NpgsqlDbType? npgsqlDbType, DbType? dbType = null, DbType? inferredDbType = null, bool isDefault = true, - bool isDataTypeInferredFromClrType = true) + bool isDataTypeInferredFromValue = true) { - if (npgsqlDbType is null) - isDataTypeInferredFromClrType = false; + var npgsqlDbType = DataTypeName.FromDisplayName(pgTypeName).ToNpgsqlDbType(); // TODO: Interferes with both multiplexing and connection-specific mapping (used e.g. in NodaTime) // Reset the type mapper to make sure we're resolving this type with a clean slate (for isolation, just in case) @@ -352,18 +342,18 @@ static async Task AssertTypeWriteCore( void CheckInference(bool dbTypeApplied = false, bool valueSolelyApplied = false) { - if (!valueSolelyApplied || isDataTypeInferredFromClrType) + if (!valueSolelyApplied || isDataTypeInferredFromValue) Assert.That(p.DataTypeName, Is.EqualTo(pgTypeNameWithoutFacetsAndQuotes), () => $"Got wrong inferred DataTypeName when inferring with {errorIdentifier[errorIdentifierIndex]}"); - if (!valueSolelyApplied || isDataTypeInferredFromClrType) + if (!valueSolelyApplied || isDataTypeInferredFromValue) Assert.That(p.NpgsqlDbType, Is.EqualTo(npgsqlDbType ?? NpgsqlDbType.Unknown), () => $"Got wrong inferred NpgsqlDbType when inferring with {errorIdentifier[errorIdentifierIndex]}"); DbType expectedDbType; if (dbTypeApplied) expectedDbType = dbType.GetValueOrDefault(); - else if (!valueSolelyApplied || isDataTypeInferredFromClrType) + else if (!valueSolelyApplied || isDataTypeInferredFromValue) expectedDbType = inferredDbType ?? dbType ?? DbType.Object; else expectedDbType = DbType.Object; diff --git a/test/Npgsql.Tests/Types/ArrayTests.cs b/test/Npgsql.Tests/Types/ArrayTests.cs index 07f10330f3..7b4e45e1f4 100644 --- a/test/Npgsql.Tests/Types/ArrayTests.cs +++ b/test/Npgsql.Tests/Types/ArrayTests.cs @@ -3,7 +3,6 @@ using System.Collections.Generic; using System.Collections.Immutable; using System.Data; -using System.Linq; using System.Text; using System.Threading.Tasks; using Npgsql.Internal.Converters; @@ -25,19 +24,19 @@ public class ArrayTests(MultiplexingMode multiplexingMode) : MultiplexingTestBas { static readonly TestCaseData[] ArrayTestCases = [ - new TestCaseData(new[] { 1, 2, 3 }, "{1,2,3}", "integer[]", NpgsqlDbType.Integer | NpgsqlDbType.Array) + new TestCaseData(new[] { 1, 2, 3 }, "{1,2,3}", "integer[]") .SetName("Integer_array"), - new TestCaseData(Array.Empty(), "{}", "integer[]", NpgsqlDbType.Integer | NpgsqlDbType.Array) + new TestCaseData(Array.Empty(), "{}", "integer[]") .SetName("Empty_array"), - new TestCaseData(new[,] { { 1, 2, 3 }, { 7, 8, 9 } }, "{{1,2,3},{7,8,9}}", "integer[]", NpgsqlDbType.Integer | NpgsqlDbType.Array) + new TestCaseData(new[,] { { 1, 2, 3 }, { 7, 8, 9 } }, "{{1,2,3},{7,8,9}}", "integer[]") .SetName("Two_dimensional_array"), - new TestCaseData(new[] { [1, 2], new byte[] { 3, 4 } }, """{"\\x0102","\\x0304"}""", "bytea[]", NpgsqlDbType.Bytea | NpgsqlDbType.Array) + new TestCaseData(new[] { [1, 2], new byte[] { 3, 4 } }, """{"\\x0102","\\x0304"}""", "bytea[]") .SetName("Bytea_array") ]; [Test, TestCaseSource(nameof(ArrayTestCases))] - public Task Arrays(T array, string sqlLiteral, string pgTypeName, NpgsqlDbType? npgsqlDbType) - => AssertType(array, sqlLiteral, pgTypeName, npgsqlDbType); + public Task Arrays(T array, string sqlLiteral, string pgTypeName) + => AssertType(array, sqlLiteral, pgTypeName); [Test] public async Task NullableInts() @@ -49,7 +48,7 @@ public async Task NullableInts() var dataSourceBuilder = new NpgsqlDataSourceBuilder(connectionStringBuilder.ToString()); await using var dataSource = dataSourceBuilder.Build(); - await AssertType(dataSource, new int?[] { 1, 2, null, 3 }, "{1,2,NULL,3}", "integer[]", NpgsqlDbType.Integer | NpgsqlDbType.Array); + await AssertType(dataSource, new int?[] { 1, 2, null, 3 }, "{1,2,NULL,3}", "integer[]"); } [Test, Description("Checks that PG arrays containing nulls can't be read as CLR arrays of non-nullable value types (the default).")] @@ -149,17 +148,17 @@ public async Task Value_type_array_nullabilities(ArrayNullabilityMode mode) // Note that PG normalizes empty multidimensional arrays to single-dimensional, e.g. ARRAY[[], []]::integer[] returns {}. [Test] public async Task Write_empty_multidimensional_array() - => await AssertTypeWrite(new int[0, 0], "{}", "integer[]", NpgsqlDbType.Integer | NpgsqlDbType.Array); + => await AssertTypeWrite(new int[0, 0], "{}", "integer[]"); [Test] public async Task Generic_List() => await AssertType( - new List { 1, 2, 3 }, "{1,2,3}", "integer[]", NpgsqlDbType.Integer | NpgsqlDbType.Array, isDefaultForReading: false); + new List { 1, 2, 3 }, "{1,2,3}", "integer[]", isDefaultForReading: false); [Test] public async Task Write_IList_implementation() => await AssertTypeWrite( - ImmutableArray.Create(1, 2, 3), "{1,2,3}", "integer[]", NpgsqlDbType.Integer | NpgsqlDbType.Array); + ImmutableArray.Create(1, 2, 3), "{1,2,3}", "integer[]"); [Test] public void Read_IList_implementation_throws() @@ -411,6 +410,6 @@ public async Task NpgsqlSlimSourceBuilder_EnableArrays() dataSourceBuilder.EnableArrays(); await using var dataSource = dataSourceBuilder.Build(); - await AssertType(dataSource, new[] { 1, 2, 3 }, "{1,2,3}", "integer[]", NpgsqlDbType.Integer | NpgsqlDbType.Array); + await AssertType(dataSource, new[] { 1, 2, 3 }, "{1,2,3}", "integer[]"); } } diff --git a/test/Npgsql.Tests/Types/BitStringTests.cs b/test/Npgsql.Tests/Types/BitStringTests.cs index fb8377c395..c3dddaf01b 100644 --- a/test/Npgsql.Tests/Types/BitStringTests.cs +++ b/test/Npgsql.Tests/Types/BitStringTests.cs @@ -27,10 +27,10 @@ public async Task BitArray(string sqlLiteral) for (var i = 0; i < sqlLiteral.Length; i++) bitArray[i] = sqlLiteral[i] == '1'; - await AssertType(bitArray, sqlLiteral, "bit varying", NpgsqlDbType.Varbit); + await AssertType(bitArray, sqlLiteral, "bit varying"); if (len > 0) - await AssertType(bitArray, sqlLiteral, $"bit({len})", NpgsqlDbType.Bit, isDefaultForWriting: false); + await AssertType(bitArray, sqlLiteral, $"bit({len})", isDefaultForWriting: false); } [Test] @@ -47,7 +47,7 @@ public async Task BitArray_long() [Test] public Task BitVector32() => AssertType( - new BitVector32(4), "00000000000000000000000000000100", "bit varying", NpgsqlDbType.Varbit, isDefaultForReading: false); + new BitVector32(4), "00000000000000000000000000000100", "bit varying", isDefaultForReading: false); [Test] public Task BitVector32_too_long() @@ -55,7 +55,7 @@ public Task BitVector32_too_long() [Test] public Task Bool() - => AssertType(true, "1", "bit(1)", NpgsqlDbType.Bit, isDefault: false); + => AssertType(true, "1", "bit(1)", isDefault: false); [Test] public async Task Bitstring_with_multiple_bits_as_bool_throws() @@ -118,7 +118,7 @@ public async Task Array_of_single_bits_and_null() [Test] public Task As_string() - => AssertType("010101", "010101", "bit varying", NpgsqlDbType.Varbit, isDefault: false); + => AssertType("010101", "010101", "bit varying", isDefault: false); [Test] public Task Write_as_string_validation() diff --git a/test/Npgsql.Tests/Types/ByteaTests.cs b/test/Npgsql.Tests/Types/ByteaTests.cs index 216ae64cc4..9829cdb289 100644 --- a/test/Npgsql.Tests/Types/ByteaTests.cs +++ b/test/Npgsql.Tests/Types/ByteaTests.cs @@ -21,7 +21,7 @@ public class ByteaTests(MultiplexingMode multiplexingMode) : MultiplexingTestBas [TestCase(new byte[] { 1, 2, 3, 4, 5 }, "\\x0102030405", TestName = "Bytea")] [TestCase(new byte[] { }, "\\x", TestName = "Bytea_empty")] public Task Bytea(byte[] byteArray, string sqlLiteral) - => AssertType(byteArray, sqlLiteral, "bytea", NpgsqlDbType.Bytea, DbType.Binary); + => AssertType(byteArray, sqlLiteral, "bytea", DbType.Binary); [Test] public async Task Bytea_long() @@ -38,24 +38,24 @@ public async Task Bytea_long() [Test] public Task AsMemory() => AssertType( - new Memory([1, 2, 3]), "\\x010203", "bytea", NpgsqlDbType.Bytea, DbType.Binary, isDefault: false, + new Memory([1, 2, 3]), "\\x010203", "bytea", DbType.Binary, isDefault: false, comparer: (left, right) => left.Span.SequenceEqual(right.Span)); [Test] public Task AsReadOnlyMemory() => AssertType( - new ReadOnlyMemory([1, 2, 3]), "\\x010203", "bytea", NpgsqlDbType.Bytea, DbType.Binary, isDefault: false, + new ReadOnlyMemory([1, 2, 3]), "\\x010203", "bytea", DbType.Binary, isDefault: false, comparer: (left, right) => left.Span.SequenceEqual(right.Span)); [Test] public Task AsArraySegment() => AssertType( - new ArraySegment([1, 2, 3]), "\\x010203", "bytea", NpgsqlDbType.Bytea, DbType.Binary, isDefault: false); + new ArraySegment([1, 2, 3]), "\\x010203", "bytea", DbType.Binary, isDefault: false); [Test] public Task Write_as_MemoryStream() => AssertTypeWrite( - () => new MemoryStream([1, 2, 3]), "\\x010203", "bytea", NpgsqlDbType.Bytea, DbType.Binary, isDefault: false); + () => new MemoryStream([1, 2, 3]), "\\x010203", "bytea", DbType.Binary, isDefault: false); [Test] public Task Write_as_MemoryStream_truncated() @@ -68,7 +68,7 @@ public Task Write_as_MemoryStream_truncated() }; return AssertTypeWrite( - msFactory, "\\x020304", "bytea", NpgsqlDbType.Bytea, DbType.Binary, isDefault: false); + msFactory, "\\x020304", "bytea", DbType.Binary, isDefault: false); } [Test] @@ -86,7 +86,7 @@ public Task Write_as_MemoryStream_exposableArray() }; return AssertTypeWrite( - msFactory, "\\x020304", "bytea", NpgsqlDbType.Bytea, DbType.Binary, isDefault: false); + msFactory, "\\x020304", "bytea", DbType.Binary, isDefault: false); } [Test] @@ -98,7 +98,7 @@ public async Task Write_as_MemoryStream_long() var expectedSql = "\\x" + ToHex(bytes); await AssertTypeWrite( - () => new MemoryStream(bytes), expectedSql, "bytea", NpgsqlDbType.Bytea, DbType.Binary, isDefault: false); + () => new MemoryStream(bytes), expectedSql, "bytea", DbType.Binary, isDefault: false); } [Test] @@ -111,7 +111,7 @@ public async Task Write_as_FileStream() await File.WriteAllBytesAsync(filePath, [1, 2, 3]); await AssertTypeWrite( - () => FileStreamFactory(filePath, fsList), "\\x010203", "bytea", NpgsqlDbType.Bytea, DbType.Binary, isDefault: false); + () => FileStreamFactory(filePath, fsList), "\\x010203", "bytea", DbType.Binary, isDefault: false); } finally { @@ -147,7 +147,7 @@ public async Task Write_as_FileStream_long() var expectedSql = "\\x" + ToHex(bytes); await AssertTypeWrite( - () => FileStreamFactory(filePath, fsList), expectedSql, "bytea", NpgsqlDbType.Bytea, DbType.Binary, isDefault: false); + () => FileStreamFactory(filePath, fsList), expectedSql, "bytea", DbType.Binary, isDefault: false); } finally { diff --git a/test/Npgsql.Tests/Types/CompositeTests.cs b/test/Npgsql.Tests/Types/CompositeTests.cs index 6a62e418e3..a5f783b9ab 100644 --- a/test/Npgsql.Tests/Types/CompositeTests.cs +++ b/test/Npgsql.Tests/Types/CompositeTests.cs @@ -30,7 +30,7 @@ await AssertType( new SomeComposite { SomeText = "foo", X = 8 }, "(8,foo)", type, - npgsqlDbType: null); + isDataTypeInferredFromValue: false); } [Test] @@ -52,7 +52,7 @@ await AssertType( new SomeComposite { SomeText = "foo", X = 8 }, "(8,foo)", type, - npgsqlDbType: null); + isDataTypeInferredFromValue: false); } [Test] @@ -73,7 +73,7 @@ await AssertType( new SomeComposite { SomeText = "foo", X = 8 }, "(8,foo)", type, - npgsqlDbType: null); + isDataTypeInferredFromValue: false); } class CustomTranslator : INpgsqlNameTranslator @@ -106,7 +106,7 @@ await AssertType( new SomeComposite { SomeText = "foo", X = 8 }, "(8,foo)", type, - npgsqlDbType: null); + isDataTypeInferredFromValue: false); } finally { @@ -139,7 +139,7 @@ await AssertType( new SomeCompositeContainer { A = 8, Containee = new() { SomeText = "foo", X = 9 } }, @"(8,""(9,foo)"")", containerType, - npgsqlDbType: null); + isDataTypeInferredFromValue: false); } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/1168")] @@ -160,7 +160,7 @@ await AssertType( new SomeComposite { SomeText = "foo", X = 8 }, "(8,foo)", $"{schema}.some_composite", - npgsqlDbType: null); + isDataTypeInferredFromValue: false); } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/4365")] @@ -190,7 +190,7 @@ await AssertType( new SomeCompositeContainer { A = 8, Containee = new() { SomeText = "foo", X = 9 } }, @"(8,""(9,foo)"")", $"{secondSchemaName}.container", - npgsqlDbType: null, + isDataTypeInferredFromValue: false, isDefaultForWriting: false); await AssertType( @@ -198,7 +198,7 @@ await AssertType( new SomeCompositeContainer { A = 8, Containee = new() { SomeText = "foo", X = 9 } }, @"(8,""(9,foo)"")", $"{firstSchemaName}.container", - npgsqlDbType: null, + isDataTypeInferredFromValue: false, isDefaultForWriting: true); } @@ -221,7 +221,7 @@ await AssertType( new SomeComposite { SomeText = "foobar", X = 10 }, "(10,foobar)", $"{schema}.\"{typename}\"", - npgsqlDbType: null); + isDataTypeInferredFromValue: false); } [Test] @@ -242,7 +242,7 @@ await AssertType( new SomeCompositeStruct { SomeText = "foo", X = 8 }, "(8,foo)", type, - npgsqlDbType: null); + isDataTypeInferredFromValue: false); } [Test] @@ -263,7 +263,7 @@ await AssertType( new SomeComposite[] { new() { SomeText = "foo", X = 8 }, new() { SomeText = "bar", X = 9 }}, @"{""(8,foo)"",""(9,bar)""}", type + "[]", - npgsqlDbType: null); + isDataTypeInferredFromValue: false); } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/859")] @@ -285,7 +285,7 @@ await AssertType( new NameTranslationComposite { Simple = 2, TwoWords = 3, SomeClrName = 4 }, "(2,3,4)", type, - npgsqlDbType: null); + isDataTypeInferredFromValue: false); } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/856")] @@ -309,7 +309,7 @@ await AssertType( new Address { PostalCode = "12345", Street = "Main St." }, @"(""Main St."",12345)", compositeType, - npgsqlDbType: null); + isDataTypeInferredFromValue: false); } [Test] @@ -331,7 +331,7 @@ await AssertType( new SomeCompositeWithArray { Ints = [1, 2, 3, 4] }, @"(""{1,2,3,4}"")", compositeType, - npgsqlDbType: null, + isDataTypeInferredFromValue: false, comparer: (actual, expected) => actual.Ints!.SequenceEqual(expected.Ints!)); } @@ -357,7 +357,7 @@ await AssertType( new SomeCompositeWithEnum { EnumValue = SomeCompositeWithEnum.TestEnum.Value2 }, @"(value2)", compositeType, - npgsqlDbType: null, + isDataTypeInferredFromValue: false, comparer: (actual, expected) => actual.EnumValue == expected.EnumValue); } @@ -380,7 +380,7 @@ await AssertType( new SomeCompositeWithIPAddress { Address = IPAddress.Loopback }, @"(127.0.0.1)", compositeType, - npgsqlDbType: null, + isDataTypeInferredFromValue: false, comparer: (actual, expected) => actual.Address!.Equals(expected.Address)); } @@ -406,7 +406,7 @@ await AssertType( }, """("{""1970-01-01 00:00:00"",""1970-01-02 00:00:00""}")""", compositeType, - npgsqlDbType: null, + isDataTypeInferredFromValue: false, comparer: (actual, expected) => actual.DateTimes!.SequenceEqual(expected.DateTimes!)); } @@ -430,7 +430,7 @@ await adminConnection.ExecuteNonQueryAsync($@" new SomeCompositeWithConverterResolverType { DateTimes = [DateTime.UnixEpoch] }, // UTC DateTime """("{""1970-01-01 01:00:00"",""1970-01-02 01:00:00""}")""", compositeType, - npgsqlDbType: null, + isDataTypeInferredFromValue: false, comparer: (actual, expected) => actual.DateTimes!.SequenceEqual(expected.DateTimes!))); } @@ -464,7 +464,7 @@ Task DoAssertion() new SomeComposite { SomeText = "foo", X = 8 }, "(8,foo)", table, - npgsqlDbType: null); + isDataTypeInferredFromValue: false); } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/1267")] @@ -485,7 +485,7 @@ await AssertType( new SomeComposite { SomeText = "foo", X = 8 }, "(8,foo)", table, - npgsqlDbType: null); + isDataTypeInferredFromValue: false); } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/1125")] @@ -506,14 +506,14 @@ await AssertType( new ClassWithNullableProperty { Foo = 8 }, "(8)", type, - npgsqlDbType: null); + isDataTypeInferredFromValue: false); await AssertType( connection, new ClassWithNullableProperty { Foo = null }, "()", type, - npgsqlDbType: null); + isDataTypeInferredFromValue: false); } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/1125")] @@ -534,14 +534,14 @@ await AssertType( new StructWithNullableProperty { Foo = 8 }, "(8)", type, - npgsqlDbType: null); + isDataTypeInferredFromValue: false); await AssertType( connection, new StructWithNullableProperty { Foo = null }, "()", type, - npgsqlDbType: null); + isDataTypeInferredFromValue: false); } [Test] @@ -599,7 +599,7 @@ public async Task DuplicateConstructorParameters() new DuplicateOneLongOneBool(true, 1), "(1,t)", type, - npgsqlDbType: null)); + isDataTypeInferredFromValue: false)); Assert.That(ex!.InnerException, Is.TypeOf()); } @@ -642,7 +642,7 @@ await AssertType( new OneLongOneBool(1) { BooleanValue = true }, "(1,t)", type, - npgsqlDbType: null); + isDataTypeInferredFromValue: false); } [Test] @@ -677,7 +677,7 @@ await AssertType( new NpgsqlRange(composite1, composite2), "[\"(8,foo)\",\"(42,bar)\"]", rangeType, - npgsqlDbType: null, + isDataTypeInferredFromValue: false, isDefaultForWriting: false); } diff --git a/test/Npgsql.Tests/Types/CubeTests.cs b/test/Npgsql.Tests/Types/CubeTests.cs index 8b766c0366..f571707377 100644 --- a/test/Npgsql.Tests/Types/CubeTests.cs +++ b/test/Npgsql.Tests/Types/CubeTests.cs @@ -22,7 +22,7 @@ public class CubeTests : MultiplexingTestBase [Test, TestCaseSource(nameof(CubeValues))] public Task Cube(NpgsqlCube cube, string sqlLiteral) - => AssertType(cube, sqlLiteral, "cube", NpgsqlDbType.Cube, isDefault: true, isNpgsqlDbTypeInferredFromClrType: false); + => AssertType(cube, sqlLiteral, "cube", isDefault: true, isDataTypeInferredFromValue: false); [Test] public void Cube_Constructor_SingleValue() @@ -153,9 +153,8 @@ await AssertType( data, @"{""(1, 2),(3, 4)"",""(5, 6)"",""(1),(2)""}", "cube[]", - NpgsqlDbType.Cube | NpgsqlDbType.Array, isDefault: true, - isNpgsqlDbTypeInferredFromClrType: false); + isDataTypeInferredFromValue: false); } [Test] @@ -171,9 +170,8 @@ public Task Cube_NegativeValues() new NpgsqlCube(new[] { -1.0, -2.0, -3.0 }, new[] { -4.0, -5.0, -6.0 }), "(-1, -2, -3),(-4, -5, -6)", "cube", - NpgsqlDbType.Cube, isDefault: true, - isNpgsqlDbTypeInferredFromClrType: false); + isDataTypeInferredFromValue: false); [Test] public void Cube_Equality_HashCode() @@ -200,9 +198,8 @@ public Task Cube_ZeroValues() new NpgsqlCube(0.0, 0.0), "(0)", "cube", - NpgsqlDbType.Cube, isDefault: true, - isNpgsqlDbTypeInferredFromClrType: false); + isDataTypeInferredFromValue: false); [Test] public Task Cube_MaxDimensions() @@ -223,9 +220,8 @@ public Task Cube_MaxDimensions() new NpgsqlCube(lowerLeft, upperRight), expected, "cube", - NpgsqlDbType.Cube, isDefault: true, - isNpgsqlDbTypeInferredFromClrType: false); + isDataTypeInferredFromValue: false); } [Test] @@ -251,7 +247,7 @@ public async Task NpgsqlSlimSourceBuilder_EnableCube() dataSourceBuilder.EnableCube(); await using var dataSource = dataSourceBuilder.Build(); - await AssertType(dataSource, new NpgsqlCube(1.0, 2.0), "(1),(2)", "cube", NpgsqlDbType.Cube, isDefaultForWriting: false, skipArrayCheck: true); + await AssertType(dataSource, new NpgsqlCube(1.0, 2.0), "(1),(2)", "cube", isDefaultForWriting: false, skipArrayCheck: true); } [Test] @@ -262,7 +258,7 @@ public async Task NpgsqlSlimSourceBuilder_EnableArrays() dataSourceBuilder.EnableArrays(); await using var dataSource = dataSourceBuilder.Build(); - await AssertType(dataSource, new NpgsqlCube(1.0, 2.0), "(1),(2)", "cube", NpgsqlDbType.Cube, isDefaultForWriting: false); + await AssertType(dataSource, new NpgsqlCube(1.0, 2.0), "(1),(2)", "cube", isDefaultForWriting: false); } [OneTimeSetUp] diff --git a/test/Npgsql.Tests/Types/DateTimeInfinityTests.cs b/test/Npgsql.Tests/Types/DateTimeInfinityTests.cs index 147f7f1be9..594d32f31a 100644 --- a/test/Npgsql.Tests/Types/DateTimeInfinityTests.cs +++ b/test/Npgsql.Tests/Types/DateTimeInfinityTests.cs @@ -1,7 +1,6 @@ using System; using System.Data; using System.Threading.Tasks; -using NpgsqlTypes; using NUnit.Framework; using static Npgsql.Util.Statics; @@ -61,28 +60,28 @@ public sealed class DateTimeInfinityTests : TestBase, IDisposable [Test, TestCaseSource(nameof(TimestampDateTimeValues))] public Task Timestamp_DateTime(DateTime dateTime, string sqlLiteral, string infinityConvertedSqlLiteral) => AssertType(dateTime, DisableDateTimeInfinityConversions ? sqlLiteral : infinityConvertedSqlLiteral, - "timestamp without time zone", NpgsqlDbType.Timestamp, DbType.DateTime2, + "timestamp without time zone", DbType.DateTime2, comparer: MaxValuePrecisionLenientComparer, isDefault: true); [Test, TestCaseSource(nameof(TimestampTzDateTimeValues))] public Task TimestampTz_DateTime(DateTime dateTime, string sqlLiteral, string infinityConvertedSqlLiteral) => AssertType(new(dateTime.Ticks, DateTimeKind.Utc), DisableDateTimeInfinityConversions ? sqlLiteral : infinityConvertedSqlLiteral, - "timestamp with time zone", NpgsqlDbType.TimestampTz, DbType.DateTime, DbType.DateTime, + "timestamp with time zone", DbType.DateTime, DbType.DateTime, comparer: MaxValuePrecisionLenientComparer, isDefault: true); [Test, TestCaseSource(nameof(TimestampTzDateTimeOffsetValues))] public Task TimestampTz_DateTimeOffset(DateTimeOffset dateTime, string sqlLiteral, string infinityConvertedSqlLiteral) => AssertType(dateTime, DisableDateTimeInfinityConversions ? sqlLiteral : infinityConvertedSqlLiteral, - "timestamp with time zone", NpgsqlDbType.TimestampTz, DbType.DateTime, DbType.DateTime, + "timestamp with time zone", DbType.DateTime, DbType.DateTime, comparer: (expected, actual) => MaxValuePrecisionLenientComparer(expected.DateTime, actual.DateTime), isDefault: false); [Test, TestCaseSource(nameof(DateDateTimeValues))] public Task Date_DateTime(DateTime dateTime, string sqlLiteral, string infinityConvertedSqlLiteral) => AssertType(DisableDateTimeInfinityConversions ? dateTime.Date : dateTime, DisableDateTimeInfinityConversions ? sqlLiteral : infinityConvertedSqlLiteral, - "date", NpgsqlDbType.Date, DbType.Date, + "date", DbType.Date, isDefault: false); static readonly TestCaseData[] DateOnlyDateTimeValues = @@ -98,7 +97,7 @@ public Task Date_DateTime(DateTime dateTime, string sqlLiteral, string infinityC [Test, TestCaseSource(nameof(DateOnlyDateTimeValues))] public Task Date_DateOnly(DateOnly dateTime, string sqlLiteral, string infinityConvertedSqlLiteral) => AssertType(dateTime, - DisableDateTimeInfinityConversions ? sqlLiteral : infinityConvertedSqlLiteral, "date", NpgsqlDbType.Date, DbType.Date, + DisableDateTimeInfinityConversions ? sqlLiteral : infinityConvertedSqlLiteral, "date", DbType.Date, isDefault: false); NpgsqlDataSource? _dataSource; diff --git a/test/Npgsql.Tests/Types/DateTimeTests.cs b/test/Npgsql.Tests/Types/DateTimeTests.cs index fe7bb1bd27..99736fb104 100644 --- a/test/Npgsql.Tests/Types/DateTimeTests.cs +++ b/test/Npgsql.Tests/Types/DateTimeTests.cs @@ -15,20 +15,20 @@ public class DateTimeTests : TestBase [Test] public Task Date_as_DateOnly() - => AssertType(new DateOnly(2020, 10, 1), "2020-10-01", "date", NpgsqlDbType.Date, DbType.Date); + => AssertType(new DateOnly(2020, 10, 1), "2020-10-01", "date", DbType.Date); [Test] public Task Date_as_DateTime() - => AssertType(new DateTime(2020, 10, 1), "2020-10-01", "date", NpgsqlDbType.Date, DbType.Date, isDefault: false); + => AssertType(new DateTime(2020, 10, 1), "2020-10-01", "date", DbType.Date, isDefault: false); [Test] public Task Date_as_DateTime_with_date_and_time_before_2000() - => AssertTypeWrite(new DateTime(1980, 10, 1, 11, 0, 0), "1980-10-01", "date", NpgsqlDbType.Date, DbType.Date, isDefault: false); + => AssertTypeWrite(new DateTime(1980, 10, 1, 11, 0, 0), "1980-10-01", "date", DbType.Date, isDefault: false); // Internal PostgreSQL representation (days since 2020-01-01), for out-of-range values. [Test] public Task Date_as_int() - => AssertType(7579, "2020-10-01", "date", NpgsqlDbType.Date, DbType.Date, isDefault: false); + => AssertType(7579, "2020-10-01", "date", DbType.Date, isDefault: false); [Test] public Task Daterange_as_NpgsqlRange_of_DateOnly() @@ -36,7 +36,6 @@ public Task Daterange_as_NpgsqlRange_of_DateOnly() new NpgsqlRange(new(2002, 3, 4), true, new(2002, 3, 6), false), "[2002-03-04,2002-03-06)", "daterange", - NpgsqlDbType.DateRange, skipArrayCheck: true); // NpgsqlRange[] is mapped to multirange by default, not array; test separately [Test] @@ -49,7 +48,6 @@ public Task Daterange_array_as_NpgsqlRange_of_DateOnly_array() }, """{"[2002-03-04,2002-03-06)","[2002-03-08,2002-03-09)"}""", "daterange[]", - NpgsqlDbType.DateRange | NpgsqlDbType.Array, isDefaultForWriting: false); [Test] @@ -58,7 +56,6 @@ public Task Daterange_as_NpgsqlRange_of_DateTime() new NpgsqlRange(new(2002, 3, 4), true, new(2002, 3, 6), false), "[2002-03-04,2002-03-06)", "daterange", - NpgsqlDbType.DateRange, isDefault: false); [Test] @@ -74,8 +71,7 @@ await AssertType( new NpgsqlRange(new(2002, 3, 8), true, new(2002, 3, 11), false) }, "{[2002-03-04,2002-03-06),[2002-03-08,2002-03-11)}", - "datemultirange", - NpgsqlDbType.DateMultirange); + "datemultirange"); } [Test] @@ -92,7 +88,6 @@ await AssertType( }, "{[2002-03-04,2002-03-06),[2002-03-08,2002-03-11)}", "datemultirange", - NpgsqlDbType.DateMultirange, isDefault: false); } @@ -106,7 +101,6 @@ public Task Time_as_TimeOnly() new TimeOnly(10, 45, 34, 500), "10:45:34.5", "time without time zone", - NpgsqlDbType.Time, DbType.Time); [Test] @@ -115,7 +109,6 @@ public Task Time_as_TimeSpan() new TimeSpan(0, 10, 45, 34, 500), "10:45:34.5", "time without time zone", - NpgsqlDbType.Time, DbType.Time, isDefault: false); @@ -137,7 +130,7 @@ public Task Time_as_TimeSpan() [Test, TestCaseSource(nameof(TimeTzValues))] public Task TimeTz_as_DateTimeOffset(DateTimeOffset time, string sqlLiteral) - => AssertType(time, sqlLiteral, "time with time zone", NpgsqlDbType.TimeTz, isDefault: false); + => AssertType(time, sqlLiteral, "time with time zone", isDefault: false); #endregion @@ -156,12 +149,12 @@ public Task TimeTz_as_DateTimeOffset(DateTimeOffset time, string sqlLiteral) [Test, TestCaseSource(nameof(TimestampValues))] public async Task Timestamp_as_DateTime(DateTime dateTime, string sqlLiteral) { - await AssertType(dateTime, sqlLiteral, "timestamp without time zone", NpgsqlDbType.Timestamp, DbType.DateTime2, + await AssertType(dateTime, sqlLiteral, "timestamp without time zone", DbType.DateTime2, // Explicitly check kind as well. comparer: (actual, expected) => actual.Kind == expected.Kind && actual.Equals(expected)); await AssertType( - new List { dateTime, dateTime }, $$"""{"{{sqlLiteral}}","{{sqlLiteral}}"}""", "timestamp without time zone[]", NpgsqlDbType.Timestamp | NpgsqlDbType.Array, + new List { dateTime, dateTime }, $$"""{"{{sqlLiteral}}","{{sqlLiteral}}"}""", "timestamp without time zone[]", isDefaultForReading: false); } @@ -175,7 +168,6 @@ public Task Timestamp_as_long() -54297202000000, "1998-04-12 13:26:38", "timestamp without time zone", - NpgsqlDbType.Timestamp, DbType.DateTime2, isDefault: false); @@ -194,7 +186,6 @@ public Task Tsrange_as_NpgsqlRange_of_DateTime() new(1998, 4, 12, 15, 26, 38, DateTimeKind.Local)), @"[""1998-04-12 13:26:38"",""1998-04-12 15:26:38""]", "tsrange", - NpgsqlDbType.TimestampRange, skipArrayCheck: true); // NpgsqlRange[] is mapped to multirange by default, not array; test separately [Test] @@ -211,7 +202,6 @@ public Task Tsrange_array_as_NpgsqlRange_of_DateTime_array() }, """{"[\"1998-04-12 13:26:38\",\"1998-04-12 15:26:38\"]","[\"1998-04-13 13:26:38\",\"1998-04-13 15:26:38\"]"}""", "tsrange[]", - NpgsqlDbType.TimestampRange | NpgsqlDbType.Array, isDefault: false); [Test] @@ -231,8 +221,7 @@ await AssertType( new(1998, 4, 13, 15, 26, 38, DateTimeKind.Local)), }, @"{[""1998-04-12 13:26:38"",""1998-04-12 15:26:38""],[""1998-04-13 13:26:38"",""1998-04-13 15:26:38""]}", - "tsmultirange", - NpgsqlDbType.TimestampMultirange); + "tsmultirange"); } #endregion @@ -254,12 +243,12 @@ await AssertType( [Test, TestCaseSource(nameof(TimestampTzWriteValues))] public async Task Timestamptz_as_DateTime(DateTime dateTime, string sqlLiteral) { - await AssertType(dateTime, sqlLiteral, "timestamp with time zone", NpgsqlDbType.TimestampTz, DbType.DateTime, + await AssertType(dateTime, sqlLiteral, "timestamp with time zone", DbType.DateTime, // Explicitly check kind as well. comparer: (actual, expected) => actual.Kind == expected.Kind && actual.Equals(expected)); await AssertType( - new List { dateTime, dateTime }, $$"""{"{{sqlLiteral}}","{{sqlLiteral}}"}""", "timestamp with time zone[]", NpgsqlDbType.TimestampTz | NpgsqlDbType.Array, + new List { dateTime, dateTime }, $$"""{"{{sqlLiteral}}","{{sqlLiteral}}"}""", "timestamp with time zone[]", isDefaultForReading: false); } @@ -267,9 +256,9 @@ await AssertType( [Test] public async Task Timestamptz_infinity_as_DateTime() { - await AssertType(DateTime.MinValue, "-infinity", "timestamp with time zone", NpgsqlDbType.TimestampTz, DbType.DateTime, + await AssertType(DateTime.MinValue, "-infinity", "timestamp with time zone", DbType.DateTime, isDefault: false); - await AssertType(DateTime.MaxValue, "infinity", "timestamp with time zone", NpgsqlDbType.TimestampTz, DbType.DateTime, + await AssertType(DateTime.MaxValue, "infinity", "timestamp with time zone", DbType.DateTime, isDefault: false); } @@ -287,7 +276,6 @@ public async Task Timestamptz_as_DateTimeOffset_utc() new DateTimeOffset(1998, 4, 12, 13, 26, 38, TimeSpan.Zero), "1998-04-12 15:26:38+02", "timestamp with time zone", - NpgsqlDbType.TimestampTz, DbType.DateTime, isDefaultForReading: false); @@ -300,7 +288,6 @@ public Task Timestamptz_as_DateTimeOffset_utc_with_DbType_DateTimeOffset() new DateTimeOffset(1998, 4, 12, 13, 26, 38, TimeSpan.Zero), "1998-04-12 15:26:38+02", "timestamp with time zone", - NpgsqlDbType.TimestampTz, DbType.DateTimeOffset, inferredDbType: DbType.DateTime, isDefault: false); @@ -315,7 +302,6 @@ public Task Timestamptz_as_long() -54297202000000, "1998-04-12 15:26:38+02", "timestamp with time zone", - NpgsqlDbType.TimestampTz, DbType.DateTime, isDefault: false); @@ -330,7 +316,6 @@ public async Task Timestamptz_array_as_DateTimeOffset_array() }, """{"1998-04-12 15:26:38+02","1999-04-12 15:26:38+02"}""", "timestamp with time zone[]", - NpgsqlDbType.TimestampTz | NpgsqlDbType.Array, isDefaultForReading: false); Assert.That(dateTimeOffsets[0].Offset, Is.EqualTo(TimeSpan.Zero)); @@ -345,7 +330,6 @@ public Task Tstzrange_as_NpgsqlRange_of_DateTime() new DateTime(1998, 4, 12, 15, 26, 38, DateTimeKind.Utc)), @"[""1998-04-12 15:26:38+02"",""1998-04-12 17:26:38+02""]", "tstzrange", - NpgsqlDbType.TimestampTzRange, skipArrayCheck: true); // NpgsqlRange[] is mapped to multirange by default, not array; test separately [Test] @@ -362,7 +346,6 @@ public Task Tstzrange_array_as_NpgsqlRange_of_DateTime_array() }, """{"[\"1998-04-12 15:26:38+02\",\"1998-04-12 17:26:38+02\"]","[\"1998-04-13 15:26:38+02\",\"1998-04-13 17:26:38+02\"]"}""", "tstzrange[]", - NpgsqlDbType.TimestampTzRange | NpgsqlDbType.Array, isDefault: false); [Test] @@ -382,8 +365,7 @@ await AssertType( new DateTime(1998, 4, 13, 15, 26, 38, DateTimeKind.Utc)), }, @"{[""1998-04-12 15:26:38+02"",""1998-04-12 17:26:38+02""],[""1998-04-13 15:26:38+02"",""1998-04-13 17:26:38+02""]}", - "tstzmultirange", - NpgsqlDbType.TimestampTzMultirange); + "tstzmultirange"); } [Test] @@ -480,7 +462,6 @@ public async Task Array_of_nullable_timestamptz() }, @"{""1998-04-12 15:26:38+02"",NULL}", "timestamp with time zone[]", - NpgsqlDbType.TimestampTz | NpgsqlDbType.Array, isDefault: false); #endregion @@ -501,22 +482,20 @@ public async Task Array_of_nullable_timestamptz() [Test, TestCaseSource(nameof(IntervalValues))] public Task Interval_as_TimeSpan(TimeSpan timeSpan, string sqlLiteral) - => AssertType(timeSpan, sqlLiteral, "interval", NpgsqlDbType.Interval); + => AssertType(timeSpan, sqlLiteral, "interval"); [Test] public Task Interval_write_as_TimeSpan_truncates_ticks() => AssertTypeWrite( new TimeSpan(new TimeSpan(2, 3, 4).Ticks + 1), "02:03:04", - "interval", - NpgsqlDbType.Interval); + "interval"); [Test] public Task Interval_as_NpgsqlInterval() => AssertType( new NpgsqlInterval(2, 15, 7384005000), "2 mons 15 days 02:03:04.005", "interval", - NpgsqlDbType.Interval, isDefaultForReading: false); [Test] diff --git a/test/Npgsql.Tests/Types/DomainTests.cs b/test/Npgsql.Tests/Types/DomainTests.cs index 117204d307..dbfd147c39 100644 --- a/test/Npgsql.Tests/Types/DomainTests.cs +++ b/test/Npgsql.Tests/Types/DomainTests.cs @@ -95,7 +95,7 @@ await AssertType( new NpgsqlRange(1, 2), "[1,2]", rangeType, - npgsqlDbType: null, + isDataTypeInferredFromValue: false, isDefaultForWriting: false); } } diff --git a/test/Npgsql.Tests/Types/EnumTests.cs b/test/Npgsql.Tests/Types/EnumTests.cs index 7161e6408c..5f01534bcf 100644 --- a/test/Npgsql.Tests/Types/EnumTests.cs +++ b/test/Npgsql.Tests/Types/EnumTests.cs @@ -26,7 +26,7 @@ public async Task Data_source_mapping() dataSourceBuilder.MapEnum(type); await using var dataSource = dataSourceBuilder.Build(); - await AssertType(dataSource, Mood.Happy, "happy", type, npgsqlDbType: null); + await AssertType(dataSource, Mood.Happy, "happy", type, isDataTypeInferredFromValue: false); } [Test] @@ -43,7 +43,7 @@ public async Task Data_source_unmap() await using var dataSource = dataSourceBuilder.Build(); Assert.That(isUnmapSuccessful); - Assert.ThrowsAsync(() => AssertType(dataSource, Mood.Happy, "happy", type, npgsqlDbType: null)); + Assert.ThrowsAsync(() => AssertType(dataSource, Mood.Happy, "happy", type, isDataTypeInferredFromValue: false)); } [Test] @@ -56,7 +56,7 @@ public async Task Data_source_mapping_non_generic() var dataSourceBuilder = CreateDataSourceBuilder(); dataSourceBuilder.MapEnum(typeof(Mood), type); await using var dataSource = dataSourceBuilder.Build(); - await AssertType(dataSource, Mood.Happy, "happy", type, npgsqlDbType: null); + await AssertType(dataSource, Mood.Happy, "happy", type, isDataTypeInferredFromValue: false); } [Test] @@ -73,7 +73,7 @@ public async Task Data_source_unmap_non_generic() await using var dataSource = dataSourceBuilder.Build(); Assert.That(isUnmapSuccessful); - Assert.ThrowsAsync(() => AssertType(dataSource, Mood.Happy, "happy", type, npgsqlDbType: null)); + Assert.ThrowsAsync(() => AssertType(dataSource, Mood.Happy, "happy", type, isDataTypeInferredFromValue: false)); } [Test] @@ -91,7 +91,7 @@ await adminConnection.ExecuteNonQueryAsync($@" dataSourceBuilder.MapEnum(type2); await using var dataSource = dataSourceBuilder.Build(); - await AssertType(dataSource, new[] { Mood.Ok, Mood.Sad }, "{ok,sad}", type1 + "[]", npgsqlDbType: null); + await AssertType(dataSource, new[] { Mood.Ok, Mood.Sad }, "{ok,sad}", type1 + "[]", isDataTypeInferredFromValue: false); } [Test] @@ -105,7 +105,7 @@ public async Task Array() dataSourceBuilder.MapEnum(type); await using var dataSource = dataSourceBuilder.Build(); - await AssertType(dataSource, new[] { Mood.Ok, Mood.Happy }, "{ok,happy}", type + "[]", npgsqlDbType: null); + await AssertType(dataSource, new[] { Mood.Ok, Mood.Happy }, "{ok,happy}", type + "[]", isDataTypeInferredFromValue: false); } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/859")] @@ -119,9 +119,9 @@ public async Task Name_translation_default_snake_case() dataSourceBuilder.MapEnum(enumName1); await using var dataSource = dataSourceBuilder.Build(); - await AssertType(dataSource, NameTranslationEnum.Simple, "simple", enumName1, npgsqlDbType: null); - await AssertType(dataSource, NameTranslationEnum.TwoWords, "two_words", enumName1, npgsqlDbType: null); - await AssertType(dataSource, NameTranslationEnum.SomeClrName, "some_database_name", enumName1, npgsqlDbType: null); + await AssertType(dataSource, NameTranslationEnum.Simple, "simple", enumName1, isDataTypeInferredFromValue: false); + await AssertType(dataSource, NameTranslationEnum.TwoWords, "two_words", enumName1, isDataTypeInferredFromValue: false); + await AssertType(dataSource, NameTranslationEnum.SomeClrName, "some_database_name", enumName1, isDataTypeInferredFromValue: false); } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/859")] @@ -135,9 +135,9 @@ public async Task Name_translation_null() dataSourceBuilder.MapEnum(type, nameTranslator: new NpgsqlNullNameTranslator()); await using var dataSource = dataSourceBuilder.Build(); - await AssertType(dataSource, NameTranslationEnum.Simple, "Simple", type, npgsqlDbType: null); - await AssertType(dataSource, NameTranslationEnum.TwoWords, "TwoWords", type, npgsqlDbType: null); - await AssertType(dataSource, NameTranslationEnum.SomeClrName, "some_database_name", type, npgsqlDbType: null); + await AssertType(dataSource, NameTranslationEnum.Simple, "Simple", type, isDataTypeInferredFromValue: false); + await AssertType(dataSource, NameTranslationEnum.TwoWords, "TwoWords", type, isDataTypeInferredFromValue: false); + await AssertType(dataSource, NameTranslationEnum.SomeClrName, "some_database_name", type, isDataTypeInferredFromValue: false); } [Test] @@ -152,8 +152,8 @@ await connection.ExecuteNonQueryAsync(@$" CREATE TYPE {type2} AS ENUM ('value1', 'value2');"); await connection.ReloadTypesAsync(); - await AssertType(connection, Mood.Happy, "happy", type1, npgsqlDbType: null, isDefault: false); - await AssertType(connection, AnotherEnum.Value2, "value2", type2, npgsqlDbType: null, isDefault: false); + await AssertType(connection, Mood.Happy, "happy", type1, isDataTypeInferredFromValue: false, isDefault: false); + await AssertType(connection, AnotherEnum.Value2, "value2", type2, isDataTypeInferredFromValue: false, isDefault: false); } [Test] @@ -186,7 +186,7 @@ public async Task Unmapped_enum_as_string() await connection.ExecuteNonQueryAsync($"CREATE TYPE {type} AS ENUM ('sad', 'ok', 'happy')"); await connection.ReloadTypesAsync(); - await AssertType(connection, "happy", "happy", type, npgsqlDbType: null, isDefaultForWriting: false); + await AssertType(connection, "happy", "happy", type, isDataTypeInferredFromValue: false, isDefaultForWriting: false); } enum NameTranslationEnum @@ -212,8 +212,8 @@ await adminConnection.ExecuteNonQueryAsync($@" dataSourceBuilder.MapEnum($"{schema2}.my_enum"); await using var dataSource = dataSourceBuilder.Build(); - await AssertType(dataSource, Enum1.One, "one", $"{schema1}.my_enum", npgsqlDbType: null); - await AssertType(dataSource, Enum2.Alpha, "alpha", $"{schema2}.my_enum", npgsqlDbType: null); + await AssertType(dataSource, Enum1.One, "one", $"{schema1}.my_enum", isDataTypeInferredFromValue: false); + await AssertType(dataSource, Enum2.Alpha, "alpha", $"{schema2}.my_enum", isDataTypeInferredFromValue: false); } enum Enum1 { One } diff --git a/test/Npgsql.Tests/Types/FullTextSearchTests.cs b/test/Npgsql.Tests/Types/FullTextSearchTests.cs index bccefdef3f..de04814346 100644 --- a/test/Npgsql.Tests/Types/FullTextSearchTests.cs +++ b/test/Npgsql.Tests/Types/FullTextSearchTests.cs @@ -16,8 +16,7 @@ public Task TsVector() => AssertType( NpgsqlTsVector.Parse("'1' '2' 'a':24,25A,26B,27,28,12345C 'b' 'c' 'd'"), "'1' '2' 'a':24,25A,26B,27,28,12345C 'b' 'c' 'd'", - "tsvector", - NpgsqlDbType.TsVector); + "tsvector"); public static IEnumerable TsQueryTestCases() => new[] { @@ -53,7 +52,7 @@ public static IEnumerable TsQueryTestCases() => new[] [Test] [TestCaseSource(nameof(TsQueryTestCases))] public Task TsQuery(string sqlLiteral, NpgsqlTsQuery query) - => AssertType(query, sqlLiteral, "tsquery", NpgsqlDbType.TsQuery); + => AssertType(query, sqlLiteral, "tsquery"); [Test] public async Task Full_text_search_not_supported_by_default_on_NpgsqlSlimSourceBuilder() @@ -90,7 +89,7 @@ public async Task NpgsqlSlimSourceBuilder_EnableFullTextSearch() dataSourceBuilder.EnableFullTextSearch(); await using var dataSource = dataSourceBuilder.Build(); - await AssertType(new NpgsqlTsQueryLexeme("a"), "'a'", "tsquery", NpgsqlDbType.TsQuery); - await AssertType(NpgsqlTsVector.Parse("'1'"), "'1'", "tsvector", NpgsqlDbType.TsVector); + await AssertType(new NpgsqlTsQueryLexeme("a"), "'a'", "tsquery"); + await AssertType(NpgsqlTsVector.Parse("'1'"), "'1'", "tsvector"); } } diff --git a/test/Npgsql.Tests/Types/GeometricTypeTests.cs b/test/Npgsql.Tests/Types/GeometricTypeTests.cs index 8c858b4f64..016aee1b03 100644 --- a/test/Npgsql.Tests/Types/GeometricTypeTests.cs +++ b/test/Npgsql.Tests/Types/GeometricTypeTests.cs @@ -14,15 +14,15 @@ class GeometricTypeTests(MultiplexingMode multiplexingMode) : MultiplexingTestBa { [Test] public Task Point() - => AssertType(new NpgsqlPoint(1.2, 3.4), "(1.2,3.4)", "point", NpgsqlDbType.Point); + => AssertType(new NpgsqlPoint(1.2, 3.4), "(1.2,3.4)", "point"); [Test] public Task Line() - => AssertType(new NpgsqlLine(1, 2, 3), "{1,2,3}", "line", NpgsqlDbType.Line); + => AssertType(new NpgsqlLine(1, 2, 3), "{1,2,3}", "line"); [Test] public Task LineSegment() - => AssertType(new NpgsqlLSeg(1, 2, 3, 4), "[(1,2),(3,4)]", "lseg", NpgsqlDbType.LSeg); + => AssertType(new NpgsqlLSeg(1, 2, 3, 4), "[(1,2),(3,4)]", "lseg"); [Test] public async Task Box() @@ -31,21 +31,18 @@ await AssertType( new NpgsqlBox(top: 3, right: 4, bottom: 1, left: 2), "(4,3),(2,1)", "box", - NpgsqlDbType.Box, skipArrayCheck: true); // Uses semicolon instead of comma as separator await AssertType( new NpgsqlBox(top: -10, right: 0, bottom: -20, left: -10), "(0,-10),(-10,-20)", "box", - NpgsqlDbType.Box, skipArrayCheck: true); // Uses semicolon instead of comma as separator await AssertType( new NpgsqlBox(top: 1, right: 2, bottom: 3, left: 4), "(4,3),(2,1)", "box", - NpgsqlDbType.Box, skipArrayCheck: true); // Uses semicolon instead of comma as separator var swapped = new NpgsqlBox(top: -20, right: -10, bottom: -10, left: 0); @@ -54,21 +51,18 @@ await AssertType( swapped, "(0,-10),(-10,-20)", "box", - NpgsqlDbType.Box, skipArrayCheck: true); // Uses semicolon instead of comma as separator await AssertType( swapped with { UpperRight = new NpgsqlPoint(-20,-10) }, "(-10,-10),(-20,-20)", "box", - NpgsqlDbType.Box, skipArrayCheck: true); // Uses semicolon instead of comma as separator await AssertType( swapped with { LowerLeft = new NpgsqlPoint(10, 10) }, "(10,10),(0,-10)", "box", - NpgsqlDbType.Box, skipArrayCheck: true); // Uses semicolon instead of comma as separator } @@ -85,9 +79,7 @@ public async Task Box_array() await AssertType( data, "{(4,3),(2,1);(6,5),(4,3);(0,-10),(-10,-20)}", - "box[]", - NpgsqlDbType.Box | NpgsqlDbType.Array - ); + "box[]"); var swappedData = new[] { @@ -99,9 +91,7 @@ await AssertType( await AssertType( swappedData, "{(4,3),(2,1);(6,5),(4,3);(0,-10),(-10,-20)}", - "box[]", - NpgsqlDbType.Box | NpgsqlDbType.Array - ); + "box[]"); } [Test] @@ -109,30 +99,26 @@ public Task Path_closed() => AssertType( new NpgsqlPath([new NpgsqlPoint(1, 2), new NpgsqlPoint(3, 4)], false), "((1,2),(3,4))", - "path", - NpgsqlDbType.Path); + "path"); [Test] public Task Path_open() => AssertType( new NpgsqlPath([new NpgsqlPoint(1, 2), new NpgsqlPoint(3, 4)], true), "[(1,2),(3,4)]", - "path", - NpgsqlDbType.Path); + "path"); [Test] public Task Polygon() => AssertType( new NpgsqlPolygon(new NpgsqlPoint(1, 2), new NpgsqlPoint(3, 4)), "((1,2),(3,4))", - "polygon", - NpgsqlDbType.Polygon); + "polygon"); [Test] public Task Circle() => AssertType( new NpgsqlCircle(1, 2, 0.5), "<(1,2),0.5>", - "circle", - NpgsqlDbType.Circle); + "circle"); } diff --git a/test/Npgsql.Tests/Types/HstoreTests.cs b/test/Npgsql.Tests/Types/HstoreTests.cs index 366b38bb44..20706dfd39 100644 --- a/test/Npgsql.Tests/Types/HstoreTests.cs +++ b/test/Npgsql.Tests/Types/HstoreTests.cs @@ -1,7 +1,6 @@ using System.Collections.Generic; using System.Collections.Immutable; using System.Threading.Tasks; -using NpgsqlTypes; using NUnit.Framework; namespace Npgsql.Tests.Types; @@ -18,12 +17,11 @@ public Task Hstore() {"cd", "hello"} }, @"""a""=>""3"", ""b""=>NULL, ""cd""=>""hello""", - "hstore", - NpgsqlDbType.Hstore, isNpgsqlDbTypeInferredFromClrType: false); + "hstore", isDataTypeInferredFromValue: false); [Test] public Task Hstore_empty() - => AssertType(new Dictionary(), @"", "hstore", NpgsqlDbType.Hstore, isNpgsqlDbTypeInferredFromClrType: false); + => AssertType(new Dictionary(), @"", "hstore", isDataTypeInferredFromValue: false); [Test] public Task Hstore_as_ImmutableDictionary() @@ -38,8 +36,7 @@ public Task Hstore_as_ImmutableDictionary() immutableDictionary, @"""a""=>""3"", ""b""=>NULL, ""cd""=>""hello""", "hstore", - NpgsqlDbType.Hstore, - isDefaultForReading: false, isNpgsqlDbTypeInferredFromClrType: false); + isDefaultForReading: false, isDataTypeInferredFromValue: false); } [Test] @@ -53,8 +50,7 @@ public Task Hstore_as_IDictionary() }, @"""a""=>""3"", ""b""=>NULL, ""cd""=>""hello""", "hstore", - NpgsqlDbType.Hstore, - isDefaultForReading: false, isNpgsqlDbTypeInferredFromClrType: false); + isDefaultForReading: false, isDataTypeInferredFromValue: false); [OneTimeSetUp] public async Task SetUp() diff --git a/test/Npgsql.Tests/Types/JsonDynamicTests.cs b/test/Npgsql.Tests/Types/JsonDynamicTests.cs index 036374a2d2..1a7337ea18 100644 --- a/test/Npgsql.Tests/Types/JsonDynamicTests.cs +++ b/test/Npgsql.Tests/Types/JsonDynamicTests.cs @@ -3,15 +3,14 @@ using System.Text.Json.Serialization; using System.Threading.Tasks; using Npgsql.Properties; -using NpgsqlTypes; using NUnit.Framework; namespace Npgsql.Tests.Types; -[TestFixture(MultiplexingMode.NonMultiplexing, NpgsqlDbType.Json)] -[TestFixture(MultiplexingMode.NonMultiplexing, NpgsqlDbType.Jsonb)] -[TestFixture(MultiplexingMode.Multiplexing, NpgsqlDbType.Json)] -[TestFixture(MultiplexingMode.Multiplexing, NpgsqlDbType.Jsonb)] +[TestFixture(MultiplexingMode.NonMultiplexing, "json")] +[TestFixture(MultiplexingMode.NonMultiplexing, "jsonb")] +[TestFixture(MultiplexingMode.Multiplexing, "json")] +[TestFixture(MultiplexingMode.Multiplexing, "jsonb")] public class JsonDynamicTests : MultiplexingTestBase { [Test] @@ -27,7 +26,6 @@ public async Task As_poco() ? """{"Date": "2019-09-01T00:00:00", "Summary": "Partly cloudy", "TemperatureC": 10}""" : """{"Date":"2019-09-01T00:00:00","TemperatureC":10,"Summary":"Partly cloudy"}""", PostgresType, - NpgsqlDbType, isDefault: false); [Test] @@ -48,7 +46,6 @@ await AssertType( ? $$"""{"Date": "2019-09-01T00:00:00", "Summary": "{{bigString}}", "TemperatureC": 10}""" : $$"""{"Date":"2019-09-01T00:00:00","TemperatureC":10,"Summary":"{{bigString}}"}""", PostgresType, - NpgsqlDbType, isDefault: false); } @@ -125,7 +122,6 @@ await AssertTypeWrite( ? """{"date": "2019-09-01T00:00:00", "summary": "Partly cloudy", "temperatureC": 10}""" : """{"date":"2019-09-01T00:00:00","temperatureC":10,"summary":"Partly cloudy"}""", PostgresType, - NpgsqlDbType, isDefault: false); } @@ -151,9 +147,8 @@ await AssertType( ? """{"Date": "2019-09-01T00:00:00", "Summary": "Partly cloudy", "TemperatureC": 10}""" : """{"Date":"2019-09-01T00:00:00","TemperatureC":10,"Summary":"Partly cloudy"}""", PostgresType, - NpgsqlDbType, isDefaultForReading: false, - isNpgsqlDbTypeInferredFromClrType: false); + isDataTypeInferredFromValue: false); } #region Polymorphic @@ -183,7 +178,7 @@ public async Task Poco_polymorphic_mapping() ? """{"Date": "2019-09-01T00:00:00", "$type": "extended", "Summary": "Partly cloudy", "TemperatureC": 10, "TemperatureF": 49}""" : """{"$type":"extended","TemperatureF":49,"Date":"2019-09-01T00:00:00","TemperatureC":10,"Summary":"Partly cloudy"}"""; - await AssertTypeWrite(dataSource, value, sql, PostgresType, NpgsqlDbType, isNpgsqlDbTypeInferredFromClrType: false); + await AssertTypeWrite(dataSource, value, sql, PostgresType, isDataTypeInferredFromValue: false); await AssertTypeRead(dataSource, sql, PostgresType, value, isDefault: false); } @@ -212,8 +207,8 @@ public async Task Poco_polymorphic_mapping_read_parents() ? """{"Date": "2019-09-01T00:00:00", "$type": "extended", "Summary": "Partly cloudy", "TemperatureC": 10, "TemperatureF": 49}""" : """{"$type":"extended","TemperatureF":49,"Date":"2019-09-01T00:00:00","TemperatureC":10,"Summary":"Partly cloudy"}"""; - await AssertTypeWrite(dataSource, value, sql, PostgresType, NpgsqlDbType, - isNpgsqlDbTypeInferredFromClrType: false); + await AssertTypeWrite(dataSource, value, sql, PostgresType, + isDataTypeInferredFromValue: false); await AssertTypeRead(dataSource, sql, PostgresType, value, isDefault: false); await AssertTypeRead(dataSource, sql, PostgresType, @@ -252,7 +247,7 @@ public async Task Poco_exact_polymorphic_mapping() ? """{"Date": "2019-09-01T00:00:00", "Summary": "Partly cloudy", "TemperatureC": 10, "TemperatureF": 49}""" : """{"TemperatureF":49,"Date":"2019-09-01T00:00:00","TemperatureC":10,"Summary":"Partly cloudy"}"""; - await AssertTypeWrite(dataSource, value, sql, PostgresType, NpgsqlDbType, isNpgsqlDbTypeInferredFromClrType: false); + await AssertTypeWrite(dataSource, value, sql, PostgresType, isDataTypeInferredFromValue: false); await AssertTypeRead(dataSource, sql, PostgresType, value, isDefault: false); } @@ -280,7 +275,7 @@ public async Task Poco_unspecified_polymorphic_mapping() ? """{"Date": "2019-09-01T00:00:00", "$type": "extended", "Summary": "Partly cloudy", "TemperatureC": 10, "TemperatureF": 49}""" : """{"$type":"extended","TemperatureF":49,"Date":"2019-09-01T00:00:00","TemperatureC":10,"Summary":"Partly cloudy"}"""; - await AssertTypeWrite(dataSource, value, sql, PostgresType, NpgsqlDbType, isDefault: false); + await AssertTypeWrite(dataSource, value, sql, PostgresType, isDefault: false); // Reading as DerivedWeatherForecast should not cause us to get an instance of ExtendedDerivedWeatherForecast (as it doesn't define JsonDerivedType) await AssertTypeRead(dataSource, sql, PostgresType, @@ -319,7 +314,7 @@ public async Task Poco_polymorphic_mapping_without_AllowOutOfOrderMetadataProper ? """{"Date": "2019-09-01T00:00:00", "Summary": "Partly cloudy", "TemperatureC": 10, "TemperatureF": 49}""" : """{"$type":"extended","TemperatureF":49,"Date":"2019-09-01T00:00:00","TemperatureC":10,"Summary":"Partly cloudy"}"""; - await AssertTypeWrite(dataSource, value, sql, PostgresType, NpgsqlDbType, isNpgsqlDbTypeInferredFromClrType: false); + await AssertTypeWrite(dataSource, value, sql, PostgresType, isDataTypeInferredFromValue: false); // As we have disabled polymorphism for jsonb when AllowOutOfOrderMetadataProperties = false we should be able to read it as equalt to a WeatherForecast instance. if (IsJsonb) @@ -372,7 +367,7 @@ public async Task Poco_unspecified_polymorphic_mapping_without_AllowOutOfOrderMe ? """{"Date": "2019-09-01T00:00:00", "Summary": "Partly cloudy", "TemperatureC": 10, "TemperatureF": 49}""" : """{"$type":"extended","TemperatureF":49,"Date":"2019-09-01T00:00:00","TemperatureC":10,"Summary":"Partly cloudy"}"""; - await AssertTypeWrite(dataSource, value, sql, PostgresType, NpgsqlDbType, isDefault: false); + await AssertTypeWrite(dataSource, value, sql, PostgresType, isDefault: false); // As we have disabled polymorphism for jsonb when AllowOutOfOrderMetadataProperties = false we should be able to read it as equalt to a WeatherForecast instance. if (IsJsonb) @@ -422,16 +417,16 @@ record ExtendedDerivedWeatherForecast : DerivedWeatherForecast #endregion Polymorphic - public JsonDynamicTests(MultiplexingMode multiplexingMode, NpgsqlDbType npgsqlDbType) + public JsonDynamicTests(MultiplexingMode multiplexingMode, string dataTypeName) : base(multiplexingMode) { DataSource = CreateDataSource(b => b.EnableDynamicJson()); - if (npgsqlDbType == NpgsqlDbType.Jsonb) + if (dataTypeName == "jsonb") using (var conn = OpenConnection()) TestUtil.MinimumPgVersion(conn, "9.4.0", "JSONB data type not yet introduced"); - NpgsqlDbType = npgsqlDbType; + PostgresType = dataTypeName; } protected override NpgsqlDataSource DataSource { get; } @@ -442,7 +437,6 @@ protected void CleanUpDataSource() DataSource.Dispose(); } - bool IsJsonb => NpgsqlDbType == NpgsqlDbType.Jsonb; - string PostgresType => IsJsonb ? "jsonb" : "json"; - readonly NpgsqlDbType NpgsqlDbType; + bool IsJsonb => PostgresType == "jsonb"; + string PostgresType { get; } } diff --git a/test/Npgsql.Tests/Types/JsonPathTests.cs b/test/Npgsql.Tests/Types/JsonPathTests.cs index ebd4a468c1..2a1d3b8b52 100644 --- a/test/Npgsql.Tests/Types/JsonPathTests.cs +++ b/test/Npgsql.Tests/Types/JsonPathTests.cs @@ -22,7 +22,7 @@ public async Task JsonPath(string jsonPath) using var conn = await OpenConnectionAsync(); MinimumPgVersion(conn, "12.0", "The jsonpath type was introduced in PostgreSQL 12"); await AssertType( - jsonPath, jsonPath, "jsonpath", NpgsqlDbType.JsonPath, isDefaultForWriting: false, isNpgsqlDbTypeInferredFromClrType: false, + jsonPath, jsonPath, "jsonpath", isDefaultForWriting: false, isDataTypeInferredFromValue: false, inferredDbType: DbType.Object); } diff --git a/test/Npgsql.Tests/Types/JsonTests.cs b/test/Npgsql.Tests/Types/JsonTests.cs index 84b95389bb..511544d430 100644 --- a/test/Npgsql.Tests/Types/JsonTests.cs +++ b/test/Npgsql.Tests/Types/JsonTests.cs @@ -4,20 +4,19 @@ using System.Text.Json; using System.Text.Json.Nodes; using System.Threading.Tasks; -using NpgsqlTypes; using NUnit.Framework; namespace Npgsql.Tests.Types; -[TestFixture(MultiplexingMode.NonMultiplexing, NpgsqlDbType.Json)] -[TestFixture(MultiplexingMode.NonMultiplexing, NpgsqlDbType.Jsonb)] -[TestFixture(MultiplexingMode.Multiplexing, NpgsqlDbType.Json)] -[TestFixture(MultiplexingMode.Multiplexing, NpgsqlDbType.Jsonb)] +[TestFixture(MultiplexingMode.NonMultiplexing, "json")] +[TestFixture(MultiplexingMode.NonMultiplexing, "jsonb")] +[TestFixture(MultiplexingMode.Multiplexing, "json")] +[TestFixture(MultiplexingMode.Multiplexing, "jsonb")] public class JsonTests : MultiplexingTestBase { [Test] public async Task As_string() - => await AssertType("""{"K": "V"}""", """{"K": "V"}""", PostgresType, NpgsqlDbType, isDefaultForWriting: false); + => await AssertType("""{"K": "V"}""", """{"K": "V"}""", PostgresType, isDefaultForWriting: false); [Test] public async Task As_string_long() @@ -30,7 +29,7 @@ public async Task As_string_long() .Append(@"""}") .ToString(); - await AssertType(value, value, PostgresType, NpgsqlDbType, isDefaultForWriting: false); + await AssertType(value, value, PostgresType, isDefaultForWriting: false); } [Test] @@ -46,25 +45,25 @@ public async Task As_string_with_GetTextReader() [Test] public async Task As_char_array() - => await AssertType("""{"K": "V"}""".ToCharArray(), """{"K": "V"}""", PostgresType, NpgsqlDbType, isDefault: false); + => await AssertType("""{"K": "V"}""".ToCharArray(), """{"K": "V"}""", PostgresType, isDefault: false); [Test] public async Task As_bytes() - => await AssertType("""{"K": "V"}"""u8.ToArray(), """{"K": "V"}""", PostgresType, NpgsqlDbType, isDefault: false); + => await AssertType("""{"K": "V"}"""u8.ToArray(), """{"K": "V"}""", PostgresType, isDefault: false); [Test] public async Task Write_as_ReadOnlyMemory_of_byte() - => await AssertTypeWrite(new ReadOnlyMemory("""{"K": "V"}"""u8.ToArray()), """{"K": "V"}""", PostgresType, NpgsqlDbType, + => await AssertTypeWrite(new ReadOnlyMemory("""{"K": "V"}"""u8.ToArray()), """{"K": "V"}""", PostgresType, isDefault: false); [Test] public async Task Write_as_ArraySegment_of_char() - => await AssertTypeWrite(new ArraySegment("""{"K": "V"}""".ToCharArray()), """{"K": "V"}""", PostgresType, NpgsqlDbType, + => await AssertTypeWrite(new ArraySegment("""{"K": "V"}""".ToCharArray()), """{"K": "V"}""", PostgresType, isDefault: false); [Test] public Task As_MemoryStream() - => AssertTypeWrite(() => new MemoryStream("""{"K": "V"}"""u8.ToArray()), """{"K": "V"}""", PostgresType, NpgsqlDbType, isDefault: false); + => AssertTypeWrite(() => new MemoryStream("""{"K": "V"}"""u8.ToArray()), """{"K": "V"}""", PostgresType, isDefault: false); [Test] public async Task As_JsonDocument() @@ -72,7 +71,6 @@ public async Task As_JsonDocument() JsonDocument.Parse("""{"K": "V"}"""), IsJsonb ? """{"K": "V"}""" : """{"K":"V"}""", PostgresType, - NpgsqlDbType, isDefault: false, comparer: (x, y) => x.RootElement.GetProperty("K").GetString() == y.RootElement.GetProperty("K").GetString()); @@ -82,7 +80,6 @@ public async Task As_JsonDocument_with_null_root() JsonDocument.Parse("null"), "null", PostgresType, - NpgsqlDbType, isDefault: false, comparer: (x, y) => x.RootElement.ValueKind == y.RootElement.ValueKind, skipArrayCheck: true); @@ -93,7 +90,6 @@ public async Task As_JsonElement_with_null_root() JsonDocument.Parse("null").RootElement, "null", PostgresType, - NpgsqlDbType, isDefault: false, comparer: (x, y) => x.ValueKind == y.ValueKind, skipArrayCheck: true); @@ -116,9 +112,8 @@ public Task Roundtrip_string() @"{""p"": 1}", @"{""p"": 1}", PostgresType, - NpgsqlDbType, isDefault: false, - isNpgsqlDbTypeInferredFromClrType: false); + isDataTypeInferredFromValue: false); [Test] public Task Roundtrip_char_array() @@ -126,9 +121,8 @@ public Task Roundtrip_char_array() @"{""p"": 1}".ToCharArray(), @"{""p"": 1}", PostgresType, - NpgsqlDbType, isDefault: false, - isNpgsqlDbTypeInferredFromClrType: false); + isDataTypeInferredFromValue: false); [Test] public Task Roundtrip_byte_array() @@ -136,9 +130,8 @@ public Task Roundtrip_byte_array() Encoding.ASCII.GetBytes(@"{""p"": 1}"), @"{""p"": 1}", PostgresType, - NpgsqlDbType, isDefault: false, - isNpgsqlDbTypeInferredFromClrType: false); + isDataTypeInferredFromValue: false); [Test] [IssueLink("https://github.com/npgsql/npgsql/issues/2811")] @@ -172,11 +165,10 @@ public Task Roundtrip_JsonObject() new JsonObject { ["Bar"] = 8 }, IsJsonb ? """{"Bar": 8}""" : """{"Bar":8}""", PostgresType, - NpgsqlDbType, // By default we map JsonObject to jsonb isDefaultForWriting: IsJsonb, isDefaultForReading: false, - isNpgsqlDbTypeInferredFromClrType: false, + isDataTypeInferredFromValue: false, comparer: (x, y) => x.ToString() == y.ToString()); [Test] @@ -185,11 +177,10 @@ public Task Roundtrip_JsonArray() new JsonArray { 1, 2, 3 }, IsJsonb ? "[1, 2, 3]" : "[1,2,3]", PostgresType, - NpgsqlDbType, // By default we map JsonArray to jsonb isDefaultForWriting: IsJsonb, isDefaultForReading: false, - isNpgsqlDbTypeInferredFromClrType: false, + isDataTypeInferredFromValue: false, comparer: (x, y) => x.ToString() == y.ToString()); [Test] @@ -224,17 +215,16 @@ public async Task Write_jsonobject_array_without_npgsqldbtype() await cmd.ExecuteNonQueryAsync(); } - public JsonTests(MultiplexingMode multiplexingMode, NpgsqlDbType npgsqlDbType) + public JsonTests(MultiplexingMode multiplexingMode, string dataTypeName) : base(multiplexingMode) { - if (npgsqlDbType == NpgsqlDbType.Jsonb) + if (dataTypeName == "jsonb") using (var conn = OpenConnection()) TestUtil.MinimumPgVersion(conn, "9.4.0", "JSONB data type not yet introduced"); - NpgsqlDbType = npgsqlDbType; + PostgresType = dataTypeName; } - bool IsJsonb => NpgsqlDbType == NpgsqlDbType.Jsonb; - string PostgresType => IsJsonb ? "jsonb" : "json"; - readonly NpgsqlDbType NpgsqlDbType; + bool IsJsonb => PostgresType == "jsonb"; + string PostgresType { get; } } diff --git a/test/Npgsql.Tests/Types/LTreeTests.cs b/test/Npgsql.Tests/Types/LTreeTests.cs index c90cc88a20..035debff5c 100644 --- a/test/Npgsql.Tests/Types/LTreeTests.cs +++ b/test/Npgsql.Tests/Types/LTreeTests.cs @@ -9,15 +9,15 @@ public class LTreeTests(MultiplexingMode multiplexingMode) : MultiplexingTestBas { [Test] public Task LQuery() - => AssertType("Top.Science.*", "Top.Science.*", "lquery", NpgsqlDbType.LQuery, isDefaultForWriting: false); + => AssertType("Top.Science.*", "Top.Science.*", "lquery", isDefaultForWriting: false); [Test] public Task LTree() - => AssertType("Top.Science.Astronomy", "Top.Science.Astronomy", "ltree", NpgsqlDbType.LTree, isDefaultForWriting: false); + => AssertType("Top.Science.Astronomy", "Top.Science.Astronomy", "ltree", isDefaultForWriting: false); [Test] public Task LTxtQuery() - => AssertType("Science & Astronomy", "Science & Astronomy", "ltxtquery", NpgsqlDbType.LTxtQuery, isDefaultForWriting: false); + => AssertType("Science & Astronomy", "Science & Astronomy", "ltxtquery", isDefaultForWriting: false); [Test] public async Task LTree_not_supported_by_default_on_NpgsqlSlimSourceBuilder() @@ -42,7 +42,7 @@ public async Task NpgsqlSlimSourceBuilder_EnableLTree() dataSourceBuilder.EnableLTree(); await using var dataSource = dataSourceBuilder.Build(); - await AssertType(dataSource, "Top.Science.Astronomy", "Top.Science.Astronomy", "ltree", NpgsqlDbType.LTree, isDefaultForWriting: false, skipArrayCheck: true); + await AssertType(dataSource, "Top.Science.Astronomy", "Top.Science.Astronomy", "ltree", isDefaultForWriting: false, skipArrayCheck: true); } [Test] @@ -53,7 +53,7 @@ public async Task NpgsqlSlimSourceBuilder_EnableArrays() dataSourceBuilder.EnableArrays(); await using var dataSource = dataSourceBuilder.Build(); - await AssertType(dataSource, "Top.Science.Astronomy", "Top.Science.Astronomy", "ltree", NpgsqlDbType.LTree, isDefaultForWriting: false); + await AssertType(dataSource, "Top.Science.Astronomy", "Top.Science.Astronomy", "ltree", isDefaultForWriting: false); } [OneTimeSetUp] diff --git a/test/Npgsql.Tests/Types/LegacyDateTimeTests.cs b/test/Npgsql.Tests/Types/LegacyDateTimeTests.cs index c500324986..ef6e2f0936 100644 --- a/test/Npgsql.Tests/Types/LegacyDateTimeTests.cs +++ b/test/Npgsql.Tests/Types/LegacyDateTimeTests.cs @@ -18,7 +18,6 @@ public Task Timestamp_with_all_DateTime_kinds([Values] DateTimeKind kind) new DateTime(1998, 4, 12, 13, 26, 38, 789, kind), "1998-04-12 13:26:38.789", "timestamp without time zone", - NpgsqlDbType.Timestamp, DbType.DateTime); [Test] @@ -32,7 +31,7 @@ public async Task Timestamp_read_as_Unspecified_DateTime() [Test] public async Task Timestamptz_negative_infinity() { - var dto = await AssertType(DateTimeOffset.MinValue, "-infinity", "timestamp with time zone", NpgsqlDbType.TimestampTz, + var dto = await AssertType(DateTimeOffset.MinValue, "-infinity", "timestamp with time zone", DbType.DateTimeOffset, isDefaultForReading: false); Assert.That(dto.Offset, Is.EqualTo(TimeSpan.Zero)); } @@ -41,7 +40,7 @@ public async Task Timestamptz_negative_infinity() public async Task Timestamptz_infinity() { var dto = await AssertType( - DateTimeOffset.MaxValue, "infinity", "timestamp with time zone", NpgsqlDbType.TimestampTz, DbType.DateTimeOffset, + DateTimeOffset.MaxValue, "infinity", "timestamp with time zone", DbType.DateTimeOffset, isDefaultForReading: false); Assert.That(dto.Offset, Is.EqualTo(TimeSpan.Zero)); } @@ -54,7 +53,6 @@ public Task Timestamptz_write_utc_DateTime_does_not_convert(DateTimeKind kind) new DateTime(1998, 4, 12, 13, 26, 38, 789, kind), "1998-04-12 15:26:38.789+02", "timestamp with time zone", - NpgsqlDbType.TimestampTz, DbType.DateTimeOffset, isDefault: false); @@ -69,7 +67,6 @@ public Task Timestamptz_local_DateTime_converts() dateTime, "1998-04-12 15:26:38.789+02", "timestamp with time zone", - NpgsqlDbType.TimestampTz, DbType.DateTimeOffset, isDefaultForWriting: false); } diff --git a/test/Npgsql.Tests/Types/MiscTypeTests.cs b/test/Npgsql.Tests/Types/MiscTypeTests.cs index 143e3d0f07..1fc0c68b36 100644 --- a/test/Npgsql.Tests/Types/MiscTypeTests.cs +++ b/test/Npgsql.Tests/Types/MiscTypeTests.cs @@ -14,11 +14,11 @@ class MiscTypeTests(MultiplexingMode multiplexingMode) : MultiplexingTestBase(mu [Test] public async Task Boolean() { - await AssertType(true, "true", "boolean", NpgsqlDbType.Boolean, DbType.Boolean, skipArrayCheck: true); - await AssertType(false, "false", "boolean", NpgsqlDbType.Boolean, DbType.Boolean, skipArrayCheck: true); + await AssertType(true, "true", "boolean", DbType.Boolean, skipArrayCheck: true); + await AssertType(false, "false", "boolean", DbType.Boolean, skipArrayCheck: true); // The literal representations for bools inside array are different ({t,f} instead of true/false, so we check separately. - await AssertType(new[] { true, false }, "{t,f}", "boolean[]", NpgsqlDbType.Boolean | NpgsqlDbType.Array); + await AssertType(new[] { true, false }, "{t,f}", "boolean[]"); } [Test] @@ -26,7 +26,7 @@ public Task Uuid() => AssertType( new Guid("a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11"), "a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11", - "uuid", NpgsqlDbType.Uuid, DbType.Guid); + "uuid", DbType.Guid); [Test, Description("Makes sure that the PostgreSQL 'unknown' type (OID 705) is read properly")] public async Task Read_unknown() @@ -172,25 +172,24 @@ public async Task Send_unknown() [Test] public async Task ObjectArray() { - await AssertTypeWrite(new object?[] { (short)4, null, (long)5, 6 }, "{4,NULL,5,6}", "integer[]", NpgsqlDbType.Integer | NpgsqlDbType.Array, isDefault: false); - await AssertTypeWrite(new object?[] { "text", null, DBNull.Value, "chars".ToCharArray(), 'c' }, "{text,NULL,NULL,chars,c}", "text[]", NpgsqlDbType.Text | NpgsqlDbType.Array, isDefault: false); + await AssertTypeWrite(new object?[] { (short)4, null, (long)5, 6 }, "{4,NULL,5,6}", "integer[]", isDefault: false); + await AssertTypeWrite(new object?[] { "text", null, DBNull.Value, "chars".ToCharArray(), 'c' }, "{text,NULL,NULL,chars,c}", "text[]", isDefault: false); await using var dataSource = CreateDataSource(b => b.ConnectionStringBuilder.Timezone = "Europe/Berlin"); - await AssertTypeWrite(dataSource, new object?[] { DateTime.UnixEpoch, null, DBNull.Value, DateTime.UnixEpoch.AddDays(1) }, "{\"1970-01-01 01:00:00+01\",NULL,NULL,\"1970-01-02 01:00:00+01\"}", "timestamp with time zone[]", NpgsqlDbType.TimestampTz | NpgsqlDbType.Array, isDefault: false); + await AssertTypeWrite(dataSource, new object?[] { DateTime.UnixEpoch, null, DBNull.Value, DateTime.UnixEpoch.AddDays(1) }, "{\"1970-01-01 01:00:00+01\",NULL,NULL,\"1970-01-02 01:00:00+01\"}", "timestamp with time zone[]", isDefault: false); Assert.ThrowsAsync(() => AssertTypeWrite(dataSource, new object?[] { DateTime.Now, null, DBNull.Value, DateTime.UnixEpoch.AddDays(1) - }, "{\"1970-01-01 01:00:00+01\",NULL,NULL,\"1970-01-02 01:00:00+01\"}", "timestamp with time zone[]", - NpgsqlDbType.TimestampTz | NpgsqlDbType.Array, isDefault: false)); + }, "{\"1970-01-01 01:00:00+01\",NULL,NULL,\"1970-01-02 01:00:00+01\"}", "timestamp with time zone[]", isDefault: false)); } [Test] public Task Int2Vector() - => AssertType(new short[] { 4, 5, 6 }, "4 5 6", "int2vector", NpgsqlDbType.Int2Vector, isDefault: false); + => AssertType(new short[] { 4, 5, 6 }, "4 5 6", "int2vector", isDefault: false); [Test] public Task Oidvector() - => AssertType(new uint[] { 4, 5, 6 }, "4 5 6", "oidvector", NpgsqlDbType.Oidvector, isDefault: false); + => AssertType(new uint[] { 4, 5, 6 }, "4 5 6", "oidvector", isDefault: false); [Test, IssueLink("https://github.com/npgsql/npgsql/issues/1138")] public async Task Void() diff --git a/test/Npgsql.Tests/Types/MoneyTests.cs b/test/Npgsql.Tests/Types/MoneyTests.cs index 5f4a31b65b..3351f0abaf 100644 --- a/test/Npgsql.Tests/Types/MoneyTests.cs +++ b/test/Npgsql.Tests/Types/MoneyTests.cs @@ -27,7 +27,7 @@ public async Task Money(string sqlLiteral, decimal money) { using var conn = await OpenConnectionAsync(); await conn.ExecuteNonQueryAsync("SET lc_monetary='C'"); - await AssertType(conn, money, sqlLiteral, "money", NpgsqlDbType.Money, DbType.Currency, isDefault: false); + await AssertType(conn, money, sqlLiteral, "money", DbType.Currency, isDefault: false); } [Test] @@ -59,4 +59,4 @@ public async Task Write_with_large_scale(string query, decimal parameter, decima Assert.That(decimal.GetBits(rdr.GetFieldValue(0)), Is.EqualTo(decimal.GetBits(expected))); Assert.That(rdr.GetFieldValue(1)); } -} \ No newline at end of file +} diff --git a/test/Npgsql.Tests/Types/MultirangeTests.cs b/test/Npgsql.Tests/Types/MultirangeTests.cs index 1de001f93b..7553e07471 100644 --- a/test/Npgsql.Tests/Types/MultirangeTests.cs +++ b/test/Npgsql.Tests/Types/MultirangeTests.cs @@ -21,7 +21,7 @@ public class MultirangeTests : TestBase new(3, true, false, 7, false, false), new(9, true, false, 0, false, true) }, - "{[3,7),[9,)}", "int4multirange", NpgsqlDbType.IntegerMultirange, true, true, default(NpgsqlRange)) + "{[3,7),[9,)}", "int4multirange", true, true, default(NpgsqlRange)) .SetName("Int"), // int8multirange @@ -31,7 +31,7 @@ public class MultirangeTests : TestBase new(3, true, false, 7, false, false), new(9, true, false, 0, false, true) }, - "{[3,7),[9,)}", "int8multirange", NpgsqlDbType.BigIntMultirange, true, true, default(NpgsqlRange)) + "{[3,7),[9,)}", "int8multirange", true, true, default(NpgsqlRange)) .SetName("Long"), // nummultirange @@ -42,7 +42,7 @@ public class MultirangeTests : TestBase new(3, true, false, 7, true, false), new(9, false, false, 0, false, true) }, - "{[3,7],(9,)}", "nummultirange", NpgsqlDbType.NumericMultirange, true, true, default(NpgsqlRange)) + "{[3,7],(9,)}", "nummultirange", true, true, default(NpgsqlRange)) .SetName("Decimal"), // daterange @@ -52,7 +52,7 @@ public class MultirangeTests : TestBase new(new(2020, 1, 1), true, false, new(2020, 1, 5), false, false), new(new(2020, 1, 10), true, false, default, false, true) }, - "{[2020-01-01,2020-01-05),[2020-01-10,)}", "datemultirange", NpgsqlDbType.DateMultirange, true, false, default(NpgsqlRange)) + "{[2020-01-01,2020-01-05),[2020-01-10,)}", "datemultirange", true, false, default(NpgsqlRange)) .SetName("DateTime DateMultirange"), // tsmultirange @@ -62,7 +62,7 @@ public class MultirangeTests : TestBase new(new(2020, 1, 1), true, false, new(2020, 1, 5), false, false), new(new(2020, 1, 10), true, false, default, false, true) }, - """{["2020-01-01 00:00:00","2020-01-05 00:00:00"),["2020-01-10 00:00:00",)}""", "tsmultirange", NpgsqlDbType.TimestampMultirange, true, true, default(NpgsqlRange)) + """{["2020-01-01 00:00:00","2020-01-05 00:00:00"),["2020-01-10 00:00:00",)}""", "tsmultirange", true, true, default(NpgsqlRange)) .SetName("DateTime TimestampMultirange"), // tstzmultirange @@ -72,7 +72,7 @@ public class MultirangeTests : TestBase new(new(2020, 1, 1, 0, 0, 0, kind: DateTimeKind.Utc), true, false, new(2020, 1, 5, 0, 0, 0, kind: DateTimeKind.Utc), false, false), new(new(2020, 1, 10, 0, 0, 0, kind: DateTimeKind.Utc), true, false, default, false, true) }, - """{["2020-01-01 01:00:00+01","2020-01-05 01:00:00+01"),["2020-01-10 01:00:00+01",)}""", "tstzmultirange", NpgsqlDbType.TimestampTzMultirange, true, true, default(NpgsqlRange)) + """{["2020-01-01 01:00:00+01","2020-01-05 01:00:00+01"),["2020-01-10 01:00:00+01",)}""", "tstzmultirange", true, true, default(NpgsqlRange)) .SetName("DateTime TimestampTzMultirange"), new TestCaseData( @@ -81,23 +81,23 @@ public class MultirangeTests : TestBase new(new(2020, 1, 1), true, false, new(2020, 1, 5), false, false), new(new(2020, 1, 10), true, false, default, false, true) }, - "{[2020-01-01,2020-01-05),[2020-01-10,)}", "datemultirange", NpgsqlDbType.DateMultirange, false, false, default(NpgsqlRange)) + "{[2020-01-01,2020-01-05),[2020-01-10,)}", "datemultirange", false, false, default(NpgsqlRange)) .SetName("DateOnly") ]; [Test, TestCaseSource(nameof(MultirangeTestCases))] public Task Multirange_as_array( - T multirangeAsArray, string sqlLiteral, string pgTypeName, NpgsqlDbType? npgsqlDbType, bool isDefaultForReading, bool isDefaultForWriting, TRange _) - => AssertType(multirangeAsArray, sqlLiteral, pgTypeName, npgsqlDbType, isDefaultForReading: isDefaultForReading, + T multirangeAsArray, string sqlLiteral, string pgTypeName, bool isDefaultForReading, bool isDefaultForWriting, TRange _) + => AssertType(multirangeAsArray, sqlLiteral, pgTypeName, isDefaultForReading: isDefaultForReading, isDefaultForWriting: isDefaultForWriting); [Test, TestCaseSource(nameof(MultirangeTestCases))] public Task Multirange_as_list( - T multirangeAsArray, string sqlLiteral, string pgTypeName, NpgsqlDbType? npgsqlDbType, bool isDefaultForReading, bool isDefaultForWriting, TRange _) + T multirangeAsArray, string sqlLiteral, string pgTypeName, bool isDefaultForReading, bool isDefaultForWriting, TRange _) where T : IList => AssertType( new List(multirangeAsArray), - sqlLiteral, pgTypeName, npgsqlDbType, isDefaultForReading: false, isDefaultForWriting: isDefaultForWriting); + sqlLiteral, pgTypeName, isDefaultForReading: false, isDefaultForWriting: isDefaultForWriting); [Test] public async Task Unmapped_multirange_with_mapped_subtype() diff --git a/test/Npgsql.Tests/Types/NetworkTypeTests.cs b/test/Npgsql.Tests/Types/NetworkTypeTests.cs index b3f0b221e6..edd8f1dee6 100644 --- a/test/Npgsql.Tests/Types/NetworkTypeTests.cs +++ b/test/Npgsql.Tests/Types/NetworkTypeTests.cs @@ -17,7 +17,7 @@ class NetworkTypeTests(MultiplexingMode multiplexingMode) : MultiplexingTestBase { [Test] public Task Inet_v4_as_IPAddress() - => AssertType(IPAddress.Parse("192.168.1.1"), "192.168.1.1/32", "inet", NpgsqlDbType.Inet, skipArrayCheck: true); + => AssertType(IPAddress.Parse("192.168.1.1"), "192.168.1.1/32", "inet", skipArrayCheck: true); [Test] public Task Inet_v4_array_as_IPAddress_array() @@ -27,7 +27,7 @@ public Task Inet_v4_array_as_IPAddress_array() IPAddress.Parse("192.168.1.1"), IPAddress.Parse("192.168.1.2") }, - "{192.168.1.1,192.168.1.2}", "inet[]", NpgsqlDbType.Inet | NpgsqlDbType.Array); + "{192.168.1.1,192.168.1.2}", "inet[]"); [Test] public Task Inet_v6_as_IPAddress() @@ -35,7 +35,6 @@ public Task Inet_v6_as_IPAddress() IPAddress.Parse("2001:1db8:85a3:1142:1000:8a2e:1370:7334"), "2001:1db8:85a3:1142:1000:8a2e:1370:7334/128", "inet", - NpgsqlDbType.Inet, skipArrayCheck: true); [Test] @@ -46,19 +45,18 @@ public Task Inet_v6_array_as_IPAddress_array() IPAddress.Parse("2001:1db8:85a3:1142:1000:8a2e:1370:7334"), IPAddress.Parse("2001:1db8:85a3:1142:1000:8a2e:1370:7335") }, - "{2001:1db8:85a3:1142:1000:8a2e:1370:7334,2001:1db8:85a3:1142:1000:8a2e:1370:7335}", "inet[]", NpgsqlDbType.Inet | NpgsqlDbType.Array); + "{2001:1db8:85a3:1142:1000:8a2e:1370:7334,2001:1db8:85a3:1142:1000:8a2e:1370:7335}", "inet[]"); [Test, IssueLink("https://github.com/dotnet/corefx/issues/33373")] public Task IPAddress_Any() - => AssertTypeWrite(IPAddress.Any, "0.0.0.0/32", "inet", NpgsqlDbType.Inet, skipArrayCheck: true); + => AssertTypeWrite(IPAddress.Any, "0.0.0.0/32", "inet", skipArrayCheck: true); [Test] public Task IPNetwork_as_cidr() => AssertType( new IPNetwork(IPAddress.Parse("192.168.1.0"), 24), "192.168.1.0/24", - "cidr", - NpgsqlDbType.Cidr); + "cidr"); #pragma warning disable CS0618 // NpgsqlCidr is obsolete [Test] @@ -67,7 +65,6 @@ public Task NpgsqlCidr_as_Cidr() new NpgsqlCidr(IPAddress.Parse("192.168.1.0"), netmask: 24), "192.168.1.0/24", "cidr", - NpgsqlDbType.Cidr, isDefaultForReading: false); #pragma warning restore CS0618 @@ -77,7 +74,6 @@ public Task Inet_v4_as_NpgsqlInet() new NpgsqlInet(IPAddress.Parse("192.168.1.1"), 24), "192.168.1.1/24", "inet", - NpgsqlDbType.Inet, isDefaultForReading: false); [Test] @@ -86,12 +82,11 @@ public Task Inet_v6_as_NpgsqlInet() new NpgsqlInet(IPAddress.Parse("2001:1db8:85a3:1142:1000:8a2e:1370:7334"), 24), "2001:1db8:85a3:1142:1000:8a2e:1370:7334/24", "inet", - NpgsqlDbType.Inet, isDefaultForReading: false); [Test] public Task Macaddr() - => AssertType(PhysicalAddress.Parse("08-00-2B-01-02-03"), "08:00:2b:01:02:03", "macaddr", NpgsqlDbType.MacAddr); + => AssertType(PhysicalAddress.Parse("08-00-2B-01-02-03"), "08:00:2b:01:02:03", "macaddr"); [Test] public async Task Macaddr8() @@ -100,7 +95,7 @@ public async Task Macaddr8() if (conn.PostgreSqlVersion < new Version(10, 0)) Assert.Ignore("macaddr8 only supported on PostgreSQL 10 and above"); - await AssertType(PhysicalAddress.Parse("08-00-2B-01-02-03-04-05"), "08:00:2b:01:02:03:04:05", "macaddr8", NpgsqlDbType.MacAddr8, + await AssertType(PhysicalAddress.Parse("08-00-2B-01-02-03-04-05"), "08:00:2b:01:02:03:04:05", "macaddr8", isDefaultForWriting: false); } @@ -111,7 +106,7 @@ public async Task Macaddr8_write_with_6_bytes() if (conn.PostgreSqlVersion < new Version(10, 0)) Assert.Ignore("macaddr8 only supported on PostgreSQL 10 and above"); - await AssertTypeWrite(PhysicalAddress.Parse("08-00-2B-01-02-03"), "08:00:2b:ff:fe:01:02:03", "macaddr8", NpgsqlDbType.MacAddr8, + await AssertTypeWrite(PhysicalAddress.Parse("08-00-2B-01-02-03"), "08:00:2b:ff:fe:01:02:03", "macaddr8", isDefault: false); } diff --git a/test/Npgsql.Tests/Types/NumericTests.cs b/test/Npgsql.Tests/Types/NumericTests.cs index 439d651559..5b6c366b96 100644 --- a/test/Npgsql.Tests/Types/NumericTests.cs +++ b/test/Npgsql.Tests/Types/NumericTests.cs @@ -114,15 +114,15 @@ public async Task Write(string query, decimal expected) [Test] public async Task Numeric() { - await AssertType(5.5m, "5.5", "numeric", NpgsqlDbType.Numeric, DbType.Decimal); - await AssertTypeWrite(5.5m, "5.5", "numeric", NpgsqlDbType.Numeric, DbType.VarNumeric, inferredDbType: DbType.Decimal); - - await AssertType((short)8, "8", "numeric", NpgsqlDbType.Numeric, DbType.Decimal, isDefault: false); - await AssertType(8, "8", "numeric", NpgsqlDbType.Numeric, DbType.Decimal, isDefault: false); - await AssertType((byte)8, "8", "numeric", NpgsqlDbType.Numeric, DbType.Decimal, isDefault: false); - await AssertType(8F, "8", "numeric", NpgsqlDbType.Numeric, DbType.Decimal, isDefault: false); - await AssertType(8D, "8", "numeric", NpgsqlDbType.Numeric, DbType.Decimal, isDefault: false); - await AssertType(8M, "8", "numeric", NpgsqlDbType.Numeric, DbType.Decimal, isDefault: false); + await AssertType(5.5m, "5.5", "numeric", DbType.Decimal); + await AssertTypeWrite(5.5m, "5.5", "numeric", DbType.VarNumeric, inferredDbType: DbType.Decimal); + + await AssertType((short)8, "8", "numeric", DbType.Decimal, isDefault: false); + await AssertType(8, "8", "numeric", DbType.Decimal, isDefault: false); + await AssertType((byte)8, "8", "numeric", DbType.Decimal, isDefault: false); + await AssertType(8F, "8", "numeric", DbType.Decimal, isDefault: false); + await AssertType(8D, "8", "numeric", DbType.Decimal, isDefault: false); + await AssertType(8M, "8", "numeric", DbType.Decimal, isDefault: false); } [Test, Description("Tests that when Numeric value does not fit in a System.Decimal and reader is in ReaderState.InResult, the value was read wholly and it is safe to continue reading")] diff --git a/test/Npgsql.Tests/Types/NumericTypeTests.cs b/test/Npgsql.Tests/Types/NumericTypeTests.cs index 5fda011158..406e1b7f44 100644 --- a/test/Npgsql.Tests/Types/NumericTypeTests.cs +++ b/test/Npgsql.Tests/Types/NumericTypeTests.cs @@ -2,7 +2,6 @@ using System.Data; using System.Globalization; using System.Threading.Tasks; -using NpgsqlTypes; using NUnit.Framework; using static Npgsql.Tests.TestUtil; @@ -19,59 +18,59 @@ public class NumericTypeTests(MultiplexingMode multiplexingMode) : MultiplexingT [Test] public async Task Int16() { - await AssertType((short)8, "8", "smallint", NpgsqlDbType.Smallint, DbType.Int16); + await AssertType((short)8, "8", "smallint", DbType.Int16); // Clr byte/sbyte maps to 'int2' as there is no byte type in PostgreSQL, byte[] maps to bytea however. - await AssertType((byte)8, "8", "smallint", NpgsqlDbType.Smallint, DbType.Int16, isDefaultForReading: false, skipArrayCheck: true); - await AssertType((sbyte)8, "8", "smallint", NpgsqlDbType.Smallint, DbType.Int16, isDefaultForReading: false); + await AssertType((byte)8, "8", "smallint", DbType.Int16, isDefaultForReading: false, skipArrayCheck: true); + await AssertType((sbyte)8, "8", "smallint", DbType.Int16, isDefaultForReading: false); - await AssertType(8, "8", "smallint", NpgsqlDbType.Smallint, DbType.Int16, isDefault: false); - await AssertType(8L, "8", "smallint", NpgsqlDbType.Smallint, DbType.Int16, isDefault: false); - await AssertType(8F, "8", "smallint", NpgsqlDbType.Smallint, DbType.Int16, isDefault: false); - await AssertType(8D, "8", "smallint", NpgsqlDbType.Smallint, DbType.Int16, isDefault: false); - await AssertType(8M, "8", "smallint", NpgsqlDbType.Smallint, DbType.Int16, isDefault: false); + await AssertType(8, "8", "smallint", DbType.Int16, isDefault: false); + await AssertType(8L, "8", "smallint", DbType.Int16, isDefault: false); + await AssertType(8F, "8", "smallint", DbType.Int16, isDefault: false); + await AssertType(8D, "8", "smallint", DbType.Int16, isDefault: false); + await AssertType(8M, "8", "smallint", DbType.Int16, isDefault: false); } [Test] public async Task Int32() { - await AssertType(8, "8", "integer", NpgsqlDbType.Integer, DbType.Int32); + await AssertType(8, "8", "integer", DbType.Int32); - await AssertType((short)8, "8", "integer", NpgsqlDbType.Integer, DbType.Int32, isDefault: false); - await AssertType(8L, "8", "integer", NpgsqlDbType.Integer, DbType.Int32, isDefault: false); - await AssertType((byte)8, "8", "integer", NpgsqlDbType.Integer, DbType.Int32, isDefault: false); - await AssertType(8F, "8", "integer", NpgsqlDbType.Integer, DbType.Int32, isDefault: false); - await AssertType(8D, "8", "integer", NpgsqlDbType.Integer, DbType.Int32, isDefault: false); - await AssertType(8M, "8", "integer", NpgsqlDbType.Integer, DbType.Int32, isDefault: false); + await AssertType((short)8, "8", "integer", DbType.Int32, isDefault: false); + await AssertType(8L, "8", "integer", DbType.Int32, isDefault: false); + await AssertType((byte)8, "8", "integer", DbType.Int32, isDefault: false); + await AssertType(8F, "8", "integer", DbType.Int32, isDefault: false); + await AssertType(8D, "8", "integer", DbType.Int32, isDefault: false); + await AssertType(8M, "8", "integer", DbType.Int32, isDefault: false); } [Test, Description("Tests some types which are aliased to UInt32")] - [TestCase("oid", NpgsqlDbType.Oid, TestName="OID")] - [TestCase("xid", NpgsqlDbType.Xid, TestName="XID")] - [TestCase("cid", NpgsqlDbType.Cid, TestName="CID")] - public Task UInt32(string pgTypeName, NpgsqlDbType npgsqlDbType) - => AssertType(8u, "8", pgTypeName, npgsqlDbType, isDefaultForWriting: false); + [TestCase("oid", TestName="OID")] + [TestCase("xid", TestName="XID")] + [TestCase("cid", TestName="CID")] + public Task UInt32(string pgTypeName) + => AssertType(8u, "8", pgTypeName, isDefaultForWriting: false); [Test] - [TestCase("xid8", NpgsqlDbType.Xid8, TestName="XID8")] - public async Task UInt64(string pgTypeName, NpgsqlDbType npgsqlDbType) + [TestCase("xid8", TestName="XID8")] + public async Task UInt64(string pgTypeName) { await using var conn = await OpenConnectionAsync(); MinimumPgVersion(conn, "13.0", "The xid8 type was introduced in PostgreSQL 13"); - await AssertType(8ul, "8", pgTypeName, npgsqlDbType, isDefaultForWriting: false); + await AssertType(8ul, "8", pgTypeName, isDefaultForWriting: false); } [Test] public async Task Int64() { - await AssertType(8L, "8", "bigint", NpgsqlDbType.Bigint, DbType.Int64); + await AssertType(8L, "8", "bigint", DbType.Int64); - await AssertType((short)8, "8", "bigint", NpgsqlDbType.Bigint, DbType.Int64, isDefault: false); - await AssertType(8, "8", "bigint", NpgsqlDbType.Bigint, DbType.Int64, isDefault: false); - await AssertType((byte)8, "8", "bigint", NpgsqlDbType.Bigint, DbType.Int64, isDefault: false); - await AssertType(8F, "8", "bigint", NpgsqlDbType.Bigint, DbType.Int64, isDefault: false); - await AssertType(8D, "8", "bigint", NpgsqlDbType.Bigint, DbType.Int64, isDefault: false); - await AssertType(8M, "8", "bigint", NpgsqlDbType.Bigint, DbType.Int64, isDefault: false); + await AssertType((short)8, "8", "bigint", DbType.Int64, isDefault: false); + await AssertType(8, "8", "bigint", DbType.Int64, isDefault: false); + await AssertType((byte)8, "8", "bigint", DbType.Int64, isDefault: false); + await AssertType(8F, "8", "bigint", DbType.Int64, isDefault: false); + await AssertType(8D, "8", "bigint", DbType.Int64, isDefault: false); + await AssertType(8M, "8", "bigint", DbType.Int64, isDefault: false); } [Test] @@ -84,7 +83,7 @@ public async Task Double(double value, string sqlLiteral) await using var conn = await OpenConnectionAsync(); MinimumPgVersion(conn, "12.0"); - await AssertType(value, sqlLiteral, "double precision", NpgsqlDbType.Double, DbType.Double); + await AssertType(value, sqlLiteral, "double precision", DbType.Double); } [Test] @@ -93,7 +92,7 @@ public async Task Double(double value, string sqlLiteral) [TestCase(float.PositiveInfinity, "Infinity", TestName = "Float_PositiveInfinity")] [TestCase(float.NegativeInfinity, "-Infinity", TestName = "Float_NegativeInfinity")] public Task Float(float value, string sqlLiteral) - => AssertType(value, sqlLiteral, "real", NpgsqlDbType.Real, DbType.Single); + => AssertType(value, sqlLiteral, "real", DbType.Single); [Test] [TestCase(short.MaxValue + 1, "smallint")] diff --git a/test/Npgsql.Tests/Types/RangeTests.cs b/test/Npgsql.Tests/Types/RangeTests.cs index 7582d4ad40..6997cd9bb4 100644 --- a/test/Npgsql.Tests/Types/RangeTests.cs +++ b/test/Npgsql.Tests/Types/RangeTests.cs @@ -16,46 +16,46 @@ class RangeTests : MultiplexingTestBase { static readonly TestCaseData[] RangeTestCases = [ - new TestCaseData(new NpgsqlRange(1, true, 10, false), "[1,10)", "int4range", NpgsqlDbType.IntegerRange) + new TestCaseData(new NpgsqlRange(1, true, 10, false), "[1,10)", "int4range") .SetName("IntegerRange"), - new TestCaseData(new NpgsqlRange(1, true, 10, false), "[1,10)", "int8range", NpgsqlDbType.BigIntRange) + new TestCaseData(new NpgsqlRange(1, true, 10, false), "[1,10)", "int8range") .SetName("BigIntRange"), - new TestCaseData(new NpgsqlRange(1, true, 10, false), "[1,10)", "numrange", NpgsqlDbType.NumericRange) + new TestCaseData(new NpgsqlRange(1, true, 10, false), "[1,10)", "numrange") .SetName("NumericRange"), new TestCaseData(new NpgsqlRange( new DateTime(2020, 1, 1, 12, 0, 0), true, new DateTime(2020, 1, 3, 13, 0, 0), false), - """["2020-01-01 12:00:00","2020-01-03 13:00:00")""", "tsrange", NpgsqlDbType.TimestampRange) + """["2020-01-01 12:00:00","2020-01-03 13:00:00")""", "tsrange") .SetName("TimestampRange"), // Note that the below text representations are local (according to TimeZone, which is set to Europe/Berlin in this test class), // because that's how PG does timestamptz *text* representation. new TestCaseData(new NpgsqlRange( new DateTime(2020, 1, 1, 12, 0, 0, DateTimeKind.Utc), true, new DateTime(2020, 1, 3, 13, 0, 0, DateTimeKind.Utc), false), - """["2020-01-01 13:00:00+01","2020-01-03 14:00:00+01")""", "tstzrange", NpgsqlDbType.TimestampTzRange) + """["2020-01-01 13:00:00+01","2020-01-03 14:00:00+01")""", "tstzrange") .SetName("TimestampTzRange"), // Note that numrange is a non-discrete range, and therefore doesn't undergo normalization to inclusive/exclusive in PG - new TestCaseData(NpgsqlRange.Empty, "empty", "numrange", NpgsqlDbType.NumericRange) + new TestCaseData(NpgsqlRange.Empty, "empty", "numrange") .SetName("EmptyRange"), - new TestCaseData(new NpgsqlRange(1, true, 10, true), "[1,10]", "numrange", NpgsqlDbType.NumericRange) + new TestCaseData(new NpgsqlRange(1, true, 10, true), "[1,10]", "numrange") .SetName("Inclusive"), - new TestCaseData(new NpgsqlRange(1, false, 10, false), "(1,10)", "numrange", NpgsqlDbType.NumericRange) + new TestCaseData(new NpgsqlRange(1, false, 10, false), "(1,10)", "numrange") .SetName("Exclusive"), - new TestCaseData(new NpgsqlRange(1, true, 10, false), "[1,10)", "numrange", NpgsqlDbType.NumericRange) + new TestCaseData(new NpgsqlRange(1, true, 10, false), "[1,10)", "numrange") .SetName("InclusiveExclusive"), - new TestCaseData(new NpgsqlRange(1, false, 10, true), "(1,10]", "numrange", NpgsqlDbType.NumericRange) + new TestCaseData(new NpgsqlRange(1, false, 10, true), "(1,10]", "numrange") .SetName("ExclusiveInclusive"), - new TestCaseData(new NpgsqlRange(1, false, true, 10, false, false), "(,10)", "numrange", NpgsqlDbType.NumericRange) + new TestCaseData(new NpgsqlRange(1, false, true, 10, false, false), "(,10)", "numrange") .SetName("InfiniteLowerBound"), - new TestCaseData(new NpgsqlRange(1, true, false, 10, false, true), "[1,)", "numrange", NpgsqlDbType.NumericRange) + new TestCaseData(new NpgsqlRange(1, true, false, 10, false, true), "[1,)", "numrange") .SetName("InfiniteUpperBound") ]; // See more test cases in DateTimeTests [Test, TestCaseSource(nameof(RangeTestCases))] - public Task Range(T range, string sqlLiteral, string pgTypeName, NpgsqlDbType? npgsqlDbType) - => AssertType(range, sqlLiteral, pgTypeName, npgsqlDbType, + public Task Range(T range, string sqlLiteral, string pgTypeName) + => AssertType(range, sqlLiteral, pgTypeName, // NpgsqlRange[] is mapped to multirange by default, not array, so the built-in AssertType testing for arrays fails // (see below) skipArrayCheck: true); @@ -63,8 +63,8 @@ public Task Range(T range, string sqlLiteral, string pgTypeName, NpgsqlDbType // This re-executes the same scenario as above, but with isDefaultForWriting: false and without skipArrayCheck: true. // This tests coverage of range arrays (as opposed to multiranges). [Test, TestCaseSource(nameof(RangeTestCases))] - public Task Range_array(T range, string sqlLiteral, string pgTypeName, NpgsqlDbType? npgsqlDbType) - => AssertType(range, sqlLiteral, pgTypeName, npgsqlDbType, isDefaultForWriting: false); + public Task Range_array(T range, string sqlLiteral, string pgTypeName) + => AssertType(range, sqlLiteral, pgTypeName, isDefaultForWriting: false); [Test] public void Equality_finite() @@ -244,9 +244,8 @@ await AssertType( }, """{"[3,4)","[5,6)"}""", "int4range[]", - NpgsqlDbType.IntegerRange | NpgsqlDbType.Array, isDefaultForWriting: !supportsMultirange, - isNpgsqlDbTypeInferredFromClrType: false); + isDataTypeInferredFromValue: false); } [Test] @@ -273,7 +272,7 @@ public async Task NpgsqlSlimSourceBuilder_EnableRanges() await AssertType( dataSource, - new NpgsqlRange(1, true, 10, false), "[1,10)", "int4range", NpgsqlDbType.IntegerRange, skipArrayCheck: true); + new NpgsqlRange(1, true, 10, false), "[1,10)", "int4range", skipArrayCheck: true); } protected override NpgsqlConnection OpenConnection() diff --git a/test/Npgsql.Tests/Types/TextTests.cs b/test/Npgsql.Tests/Types/TextTests.cs index 6d4adae741..f93918bd54 100644 --- a/test/Npgsql.Tests/Types/TextTests.cs +++ b/test/Npgsql.Tests/Types/TextTests.cs @@ -19,29 +19,29 @@ public class TextTests(MultiplexingMode multiplexingMode) : MultiplexingTestBase { [Test] public Task Text_as_string() - => AssertType("foo", "foo", "text", NpgsqlDbType.Text, DbType.String); + => AssertType("foo", "foo", "text", DbType.String); [Test] public Task Text_as_array_of_chars() - => AssertType("foo".ToCharArray(), "foo", "text", NpgsqlDbType.Text, DbType.String, isDefaultForReading: false); + => AssertType("foo".ToCharArray(), "foo", "text", DbType.String, isDefaultForReading: false); [Test] public Task Text_as_ArraySegment_of_chars() - => AssertTypeWrite(new ArraySegment("foo".ToCharArray()), "foo", "text", NpgsqlDbType.Text, DbType.String, + => AssertTypeWrite(new ArraySegment("foo".ToCharArray()), "foo", "text", DbType.String, isDefault: false); [Test] public Task Text_as_array_of_bytes() - => AssertType(Encoding.UTF8.GetBytes("foo"), "foo", "text", NpgsqlDbType.Text, DbType.String, isDefault: false); + => AssertType(Encoding.UTF8.GetBytes("foo"), "foo", "text", DbType.String, isDefault: false); [Test] public Task Text_as_ReadOnlyMemory_of_bytes() - => AssertTypeWrite(new ReadOnlyMemory(Encoding.UTF8.GetBytes("foo")), "foo", "text", NpgsqlDbType.Text, DbType.String, + => AssertTypeWrite(new ReadOnlyMemory(Encoding.UTF8.GetBytes("foo")), "foo", "text", DbType.String, isDefault: false); [Test] public Task Char_as_char() - => AssertType('f', "f", "character", NpgsqlDbType.Char, inferredDbType: DbType.String, isDefault: false); + => AssertType('f', "f", "character", inferredDbType: DbType.String, isDefault: false); [Test] public async Task Citext_as_string() @@ -49,12 +49,12 @@ public async Task Citext_as_string() await using var conn = await OpenConnectionAsync(); await EnsureExtensionAsync(conn, "citext"); - await AssertType("foo", "foo", "citext", NpgsqlDbType.Citext, inferredDbType: DbType.String, isDefaultForWriting: false); + await AssertType("foo", "foo", "citext", inferredDbType: DbType.String, isDefaultForWriting: false); } [Test] public Task Text_as_MemoryStream() - => AssertTypeWrite(() => new MemoryStream("foo"u8.ToArray()), "foo", "text", NpgsqlDbType.Text, DbType.String, isDefault: false); + => AssertTypeWrite(() => new MemoryStream("foo"u8.ToArray()), "foo", "text", DbType.String, isDefault: false); [Test] public async Task Text_long() @@ -64,7 +64,7 @@ public async Task Text_long() builder.Append('X', conn.Settings.WriteBufferSize); var value = builder.ToString(); - await AssertType(value, value, "text", NpgsqlDbType.Text, DbType.String); + await AssertType(value, value, "text", DbType.String); } [Test, Description("Tests that strings are truncated when the NpgsqlParameter's Size is set")] @@ -102,10 +102,10 @@ public async Task Null_character() } [Test, Description("Tests some types which are aliased to strings")] - [TestCase("character varying", NpgsqlDbType.Varchar)] - [TestCase("name", NpgsqlDbType.Name)] - public Task Aliased_postgres_types(string pgTypeName, NpgsqlDbType npgsqlDbType) - => AssertType("foo", "foo", pgTypeName, npgsqlDbType, inferredDbType: DbType.String, isDefaultForWriting: false); + [TestCase("character varying")] + [TestCase("name")] + public Task Aliased_postgres_types(string pgTypeName) + => AssertType("foo", "foo", pgTypeName, inferredDbType: DbType.String, isDefaultForWriting: false); [Test] [TestCase(DbType.AnsiString)] From b43902a5f6477c8fe54cc132039752836fd607d8 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Sat, 3 Jan 2026 11:28:09 +0300 Subject: [PATCH 678/761] Year++ (#6402) --- Directory.Build.props | 2 +- LICENSE | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Directory.Build.props b/Directory.Build.props index 081cb72aa6..482dbaf297 100644 --- a/Directory.Build.props +++ b/Directory.Build.props @@ -10,7 +10,7 @@ true true - Copyright 2025 © The Npgsql Development Team + Copyright 2026 © The Npgsql Development Team Npgsql PostgreSQL https://github.com/npgsql/npgsql diff --git a/LICENSE b/LICENSE index c551cb7b0c..5f0d26b868 100644 --- a/LICENSE +++ b/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2002-2025, Npgsql +Copyright (c) 2002-2026, Npgsql Permission to use, copy, modify, and distribute this software and its documentation for any purpose, without fee, and without a written agreement From fd232f79b00909b5923d8a625aaf71699e0f668b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 5 Jan 2026 21:12:46 +0000 Subject: [PATCH 679/761] Bump NodaTime from 3.2.3 to 3.3.0 (#6403) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 72f3cdab82..91e9b5b7c7 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -16,7 +16,7 @@ - + From 847efc11804c649f0fc2d5b74ceb923e6cbe66a6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 8 Jan 2026 07:51:06 +0100 Subject: [PATCH 680/761] Bump NUnit3TestAdapter from 6.0.1 to 6.1.0 (#6405) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 91e9b5b7c7..0cece6ea56 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -34,7 +34,7 @@ - + From 17db0ac73ef3cd78ae2938e8b4025e51e5304198 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Wed, 14 Jan 2026 07:58:21 +0100 Subject: [PATCH 681/761] Bump some dependencies (#6411) --- Directory.Packages.props | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 0cece6ea56..495ba87262 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -1,11 +1,11 @@ - 10.0.1 - 10.0.1 + 10.0.2 + 10.0.2 - 10.0.1 - 10.0.1 + 10.0.2 + 10.0.2 @@ -21,7 +21,7 @@ - + From a82f3565c0162d414d2c3628c508325a3ab6b6bc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 15 Jan 2026 07:48:26 +0100 Subject: [PATCH 682/761] Bump actions/setup-dotnet from 5.0.1 to 5.1.0 (#6412) --- .github/workflows/build.yml | 6 +++--- .github/workflows/native-aot.yml | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 2324059911..34537b0ca8 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -85,7 +85,7 @@ jobs: ${{ runner.os }}-nuget- - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v5.0.1 + uses: actions/setup-dotnet@v5.1.0 - name: Build run: dotnet build -c ${{ matrix.config }} @@ -354,7 +354,7 @@ jobs: ${{ runner.os }}-nuget- - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v5.0.1 + uses: actions/setup-dotnet@v5.1.0 - name: Pack run: dotnet pack --configuration Release --property:PackageOutputPath="$PWD/nupkgs" --version-suffix "ci.$(date -u +%Y%m%dT%H%M%S)+sha.${GITHUB_SHA:0:9}" -p:ContinuousIntegrationBuild=true @@ -386,7 +386,7 @@ jobs: uses: actions/checkout@v6 - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v5.0.1 + uses: actions/setup-dotnet@v5.1.0 - name: Pack run: dotnet pack --configuration Release --property:PackageOutputPath="$PWD/nupkgs" -p:ContinuousIntegrationBuild=true diff --git a/.github/workflows/native-aot.yml b/.github/workflows/native-aot.yml index 909b19966e..25b193a17f 100644 --- a/.github/workflows/native-aot.yml +++ b/.github/workflows/native-aot.yml @@ -110,7 +110,7 @@ jobs: ${{ runner.os }}-nuget- - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v5.0.1 + uses: actions/setup-dotnet@v5.1.0 - name: Write script run: echo "$AOT_Compat" > test-aot-compatibility.ps1 @@ -144,7 +144,7 @@ jobs: ${{ runner.os }}-nuget- - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v5.0.1 + uses: actions/setup-dotnet@v5.1.0 - name: Start PostgreSQL run: | From 242b5a2cd5bf5cd98d16e1c8e4cdd60ae405540a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 18 Jan 2026 12:34:15 +0100 Subject: [PATCH 683/761] Bump Microsoft.Data.SqlClient from 6.1.3 to 6.1.4 (#6414) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 495ba87262..ff5076762b 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -44,7 +44,7 @@ - + From a306f94c11abb3a399ab70230e931ba631a5a525 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 21 Jan 2026 23:17:31 +0100 Subject: [PATCH 684/761] Bump OpenTelemetry.Api from 1.14.0 to 1.15.0 (#6420) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index ff5076762b..1adf2b695f 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -11,7 +11,7 @@ - + From f7cf29d26aa9a5c6f6ea53e527c62992b9189321 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 21 Jan 2026 23:17:59 +0100 Subject: [PATCH 685/761] Bump OpenTelemetry and OpenTelemetry.Exporter.InMemory (#6421) --- Directory.Packages.props | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 1adf2b695f..140a32607a 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -39,8 +39,8 @@ - - + + From e222c911bc25b0495e21d03df7fe6c63ef8b41e4 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Fri, 23 Jan 2026 13:48:08 +0300 Subject: [PATCH 686/761] Improve fallback handling for GSS session encryption when native library is missing (#6422) Improves #6416 --- src/Npgsql/Internal/NpgsqlConnector.cs | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index 4c18372699..dc864904f5 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -676,7 +676,22 @@ internal async ValueTask GSSEncrypt(bool async, bool isRequ try { - var data = authentication.GetOutgoingBlob(ReadOnlySpan.Empty, out var statusCode)!; + byte[]? data; + NegotiateAuthenticationStatusCode statusCode; + + try + { + data = authentication.GetOutgoingBlob(ReadOnlySpan.Empty, out statusCode)!; + } + catch (TypeInitializationException) + { + // On UNIX .NET throws TypeInitializationException if it's unable to load the native library + if (isRequired) + throw new NpgsqlException("Unable to load native library to negotiate GSS encryption"); + + return GssEncryptionResult.GetCredentialFailure; + } + if (statusCode != NegotiateAuthenticationStatusCode.ContinueNeeded) { // Unable to retrieve credentials From c937447a45cadbc1d131ffd39d085b64ea80724e Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Mon, 26 Jan 2026 18:56:09 +0300 Subject: [PATCH 687/761] Fix wrapping OperationCancelledException for physical open (#6425) Fixes #6404 --- src/Npgsql/Internal/NpgsqlConnector.cs | 44 +++++++++++++++----------- 1 file changed, 25 insertions(+), 19 deletions(-) diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index dc864904f5..0b4d49890d 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -608,6 +608,10 @@ static async Task OpenCore( using var cancellationRegistration = conn.StartCancellableOperation(cancellationToken, attemptPgCancellation: false); await conn.Authenticate(username, timeout, async, cancellationToken).ConfigureAwait(false); } + catch (OperationCanceledException) + { + throw; + } // We handle any exception here because on Windows while receiving a response from Postgres // We might hit connection reset, in which case the actual error will be lost // And we only read some IO error @@ -730,7 +734,7 @@ internal async ValueTask GSSEncrypt(bool async, bool isRequ var lengthBuffer = new byte[4]; - await WriteGssEncryptMessage(async, data, lengthBuffer).ConfigureAwait(false); + await WriteGssEncryptMessage(async, data, lengthBuffer, cancellationToken).ConfigureAwait(false); while (true) { @@ -765,7 +769,7 @@ internal async ValueTask GSSEncrypt(bool async, bool isRequ break; } - await WriteGssEncryptMessage(async, data, lengthBuffer).ConfigureAwait(false); + await WriteGssEncryptMessage(async, data, lengthBuffer, cancellationToken).ConfigureAwait(false); } _stream = new GSSStream(_stream, authentication); @@ -777,7 +781,7 @@ internal async ValueTask GSSEncrypt(bool async, bool isRequ ConnectionLogger.LogTrace("GSS encryption successful"); return GssEncryptionResult.Success; - async ValueTask WriteGssEncryptMessage(bool async, byte[] data, byte[] lengthBuffer) + async ValueTask WriteGssEncryptMessage(bool async, byte[] data, byte[] lengthBuffer, CancellationToken cancellationToken) { BinaryPrimitives.WriteInt32BigEndian(lengthBuffer, data.Length); @@ -795,7 +799,7 @@ async ValueTask WriteGssEncryptMessage(bool async, byte[] data, byte[] lengthBuf } } } - catch (Exception e) + catch (Exception e) when (e is not OperationCanceledException) { throw new NpgsqlException("Exception while performing GSS encryption", e); } @@ -1242,12 +1246,16 @@ internal async Task NegotiateEncryption(SslMode sslMode, NpgsqlTimeout timeout, sslStream.AuthenticateAsClient(sslStreamOptions); _stream = sslStream; + sslStream = null; } - catch (Exception e) + catch (Exception e) when (e is not OperationCanceledException) { - sslStream.Dispose(); throw new NpgsqlException("Exception while performing SSL handshake", e); } + finally + { + sslStream?.Dispose(); + } ReadBuffer.Underlying = _stream; WriteBuffer.Underlying = _stream; @@ -1355,26 +1363,24 @@ async Task ConnectAsync(NpgsqlTimeout timeout, CancellationToken cancellationTok else { IPAddress[] ipAddresses = []; + using var combinedCts = timeout.IsSet ? CancellationTokenSource.CreateLinkedTokenSource(cancellationToken) : null; + combinedCts?.CancelAfter(timeout.CheckAndGetTimeLeft()); + var combinedToken = combinedCts?.Token ?? cancellationToken; try { - using var combinedCts = timeout.IsSet ? CancellationTokenSource.CreateLinkedTokenSource(cancellationToken) : null; - combinedCts?.CancelAfter(timeout.CheckAndGetTimeLeft()); - var combinedToken = combinedCts?.Token ?? cancellationToken; - try - { - ipAddresses = await Dns.GetHostAddressesAsync(Host, combinedToken).ConfigureAwait(false); - } - catch (OperationCanceledException) - { - cancellationToken.ThrowIfCancellationRequested(); - Debug.Assert(timeout.HasExpired); - ThrowHelper.ThrowNpgsqlExceptionWithInnerTimeoutException("The operation has timed out"); - } + ipAddresses = await Dns.GetHostAddressesAsync(Host, combinedToken).ConfigureAwait(false); + } + catch (OperationCanceledException) + { + cancellationToken.ThrowIfCancellationRequested(); + Debug.Assert(timeout.HasExpired); + ThrowHelper.ThrowNpgsqlExceptionWithInnerTimeoutException("The operation has timed out"); } catch (SocketException ex) { throw new NpgsqlException(ex.Message, ex); } + endpoints = IPAddressesToEndpoints(ipAddresses, Port); } From f66537e7c72ab923e60c25f4717eb9b2ff10088c Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Mon, 2 Feb 2026 13:13:51 +0300 Subject: [PATCH 688/761] Fix reusing NpgsqlBatch with auto prepare (#6433) Fixes #6432 --- src/Npgsql/NpgsqlBatchCommand.cs | 8 ++++++-- src/Npgsql/NpgsqlCommand.cs | 13 +++++++------ src/Npgsql/NpgsqlDataReader.cs | 2 -- test/Npgsql.Tests/AutoPrepareTests.cs | 27 +++++++++++++++++++++++++++ 4 files changed, 40 insertions(+), 10 deletions(-) diff --git a/src/Npgsql/NpgsqlBatchCommand.cs b/src/Npgsql/NpgsqlBatchCommand.cs index 17cec381b2..2812fdbead 100644 --- a/src/Npgsql/NpgsqlBatchCommand.cs +++ b/src/Npgsql/NpgsqlBatchCommand.cs @@ -172,7 +172,7 @@ internal PreparedStatement? PreparedStatement get => _preparedStatement is { State: PreparedState.Unprepared } ? _preparedStatement = null : _preparedStatement; - set => _preparedStatement = value; + private set => _preparedStatement = value; } PreparedStatement? _preparedStatement; @@ -274,7 +274,11 @@ internal void ApplyCommandComplete(CommandCompleteMessage msg) OID = msg.OID; } - internal void ResetPreparation() => ConnectorPreparedOn = null; + internal void ResetPreparation() + { + ConnectorPreparedOn = null; + PreparedStatement = null; + } internal void PopulateOutputParameters(NpgsqlDataReader reader, ILogger logger) { diff --git a/src/Npgsql/NpgsqlCommand.cs b/src/Npgsql/NpgsqlCommand.cs index ffbf86029f..8ddbb2e5fb 100644 --- a/src/Npgsql/NpgsqlCommand.cs +++ b/src/Npgsql/NpgsqlCommand.cs @@ -406,7 +406,12 @@ internal CommandState State } } - internal void ResetPreparation() => _connectorPreparedOn = null; + internal void ResetPreparation() + { + _connectorPreparedOn = null; + foreach (var s in InternalBatchCommands) + s.ResetPreparation(); + } #endregion State management @@ -873,7 +878,7 @@ async Task Unprepare(bool async, CancellationToken cancellationToken = default) if (!pStatement.IsExplicit) connector.PreparedStatementManager.AutoPrepared[pStatement.AutoPreparedSlotIndex] = null; - batchCommand.PreparedStatement = null; + batchCommand.ResetPreparation(); } } @@ -1441,8 +1446,6 @@ internal virtual async ValueTask ExecuteReader(bool async, Com { if (batchCommand.ConnectorPreparedOn != connector) { - foreach (var s in InternalBatchCommands) - s.ResetPreparation(); ResetPreparation(); goto case false; } @@ -1455,8 +1458,6 @@ internal virtual async ValueTask ExecuteReader(bool async, Com if (_connectorPreparedOn != connector) { // The command was prepared, but since then the connector has changed. Detach all prepared statements. - foreach (var s in InternalBatchCommands) - s.PreparedStatement = null; ResetPreparation(); goto case false; } diff --git a/src/Npgsql/NpgsqlDataReader.cs b/src/Npgsql/NpgsqlDataReader.cs index 94499c14ae..55753dc7f6 100644 --- a/src/Npgsql/NpgsqlDataReader.cs +++ b/src/Npgsql/NpgsqlDataReader.cs @@ -559,8 +559,6 @@ async Task NextResult(bool async, bool isConsuming = false, CancellationTo { preparedStatement.State = PreparedState.Invalidated; Command.ResetPreparation(); - foreach (var s in Command.InternalBatchCommands) - s.ResetPreparation(); } } diff --git a/test/Npgsql.Tests/AutoPrepareTests.cs b/test/Npgsql.Tests/AutoPrepareTests.cs index b35fe7c5d3..97a46ad277 100644 --- a/test/Npgsql.Tests/AutoPrepareTests.cs +++ b/test/Npgsql.Tests/AutoPrepareTests.cs @@ -620,6 +620,33 @@ public async Task Auto_prepared_statement_invalidation() Assert.DoesNotThrowAsync(() => command.ExecuteNonQueryAsync()); } + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/6432")] + public async Task Reuse_batch_with_different_connectors() + { + await using var dataSource = CreateDataSource(csb => + { + csb.MaxAutoPrepare = 10; + csb.AutoPrepareMinUsages = 2; + }); + await using var batch = new NpgsqlBatch(); + batch.BatchCommands.Add(new NpgsqlBatchCommand("SELECT 1")); + await using (var connection = await dataSource.OpenConnectionAsync()) + { + batch.Connection = connection; + + for (var i = 0; i < 2; i++) + await batch.ExecuteNonQueryAsync(); + } + + dataSource.Clear(); + + await using (var connection = await dataSource.OpenConnectionAsync()) + { + batch.Connection = connection; + await batch.ExecuteNonQueryAsync(); + } + } + void DumpPreparedStatements(NpgsqlConnection conn) { using var cmd = new NpgsqlCommand("SELECT name,statement FROM pg_prepared_statements", conn); From 2328a2cd9ae23ada84c4ed6b6b22f497373438ab Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Mon, 2 Feb 2026 13:25:03 +0300 Subject: [PATCH 689/761] Do not clear pool while establishing connection if we'll retry it (#6431) Fixes #6427 --- src/Npgsql/Internal/NpgsqlConnector.cs | 10 +++++-- test/Npgsql.Tests/ConnectionTests.cs | 29 +++++++++++++++++++ test/Npgsql.Tests/Support/PgPostmasterMock.cs | 18 ++++++++++-- 3 files changed, 51 insertions(+), 6 deletions(-) diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index 0b4d49890d..20b28684d3 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -582,7 +582,7 @@ internal async Task Open(NpgsqlTimeout timeout, bool async, CancellationToken ca { if (activity is not null) NpgsqlActivitySource.SetException(activity, e); - Break(e); + Break(e, markHostAsOfflineOnConnecting: true); throw; } @@ -2429,14 +2429,16 @@ internal Exception UnexpectedMessageReceived(BackendMessageCode received) /// Note that fatal errors during the Open phase do *not* pass through here. /// /// The exception that caused the break. + /// Whether we treat host as down, even if we're still connecting to PostgreSQL instance. /// The exception given in for chaining calls. - internal Exception Break(Exception reason) + internal Exception Break(Exception reason, bool markHostAsOfflineOnConnecting = false) { Debug.Assert(!IsClosed); Monitor.Enter(SyncObj); - if (State == ConnectorState.Broken) + var state = State; + if (state == ConnectorState.Broken) { // We're already broken. // Exit SingleUseLock to unblock other threads (like cancellation). @@ -2471,7 +2473,9 @@ internal Exception Break(Exception reason) // Note we only set the cluster to offline and clear the pool if the connection is being broken (we're in this method), // *and* the exception indicates that the PG cluster really is down; the latter includes any IO/timeout issue, // but does not include e.g. authentication failure or timeouts with disabled cancellation. + // We also do not treat host as down if we're still connecting, as we might retry without GSS/TLS if (reason is NpgsqlException { IsTransient: true } ne && + (state != ConnectorState.Connecting || markHostAsOfflineOnConnecting) && (ne.InnerException is not TimeoutException || Settings.CancellationTimeout != -1) || reason is PostgresException pe && PostgresErrorCodes.IsCriticalFailure(pe)) { diff --git a/test/Npgsql.Tests/ConnectionTests.cs b/test/Npgsql.Tests/ConnectionTests.cs index 106daae81f..f776e3bc4b 100644 --- a/test/Npgsql.Tests/ConnectionTests.cs +++ b/test/Npgsql.Tests/ConnectionTests.cs @@ -14,6 +14,7 @@ using System.Threading.Tasks; using Npgsql.Internal; using Npgsql.PostgresTypes; +using Npgsql.Tests.Support; using Npgsql.Util; using NpgsqlTypes; using NUnit.Framework; @@ -1594,6 +1595,34 @@ public async Task Sync_open_blocked_same_thread() } } + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/6427")] + public async Task Gss_encryption_retry_does_not_clear_pool() + { + if (IsMultiplexing) + return; + + var csb = new NpgsqlConnectionStringBuilder(ConnectionString) + { + GssEncryptionMode = GssEncryptionMode.Prefer + }; + // Break connection on gss encryption request to force the client to create a new connection and retry again + // This emulates the behavior of older versions of PostgreSQL or its forks, like Supabase + await using var postmaster = PgPostmasterMock.Start(csb.ConnectionString, breakOnGssEncryptionRequest: true); + await using var dataSource = CreateDataSource(postmaster.ConnectionString); + + int processID; + await using (var conn = await dataSource.OpenConnectionAsync()) + { + processID = conn.ProcessID; + } + + // The second time we get a connection from the pool we should ge the exact same connection + await using (var conn = await dataSource.OpenConnectionAsync()) + { + Assert.That(conn.ProcessID, Is.EqualTo(processID)); + } + } + #region Physical connection initialization [Test] diff --git a/test/Npgsql.Tests/Support/PgPostmasterMock.cs b/test/Npgsql.Tests/Support/PgPostmasterMock.cs index 178de2d01d..d9a93531a1 100644 --- a/test/Npgsql.Tests/Support/PgPostmasterMock.cs +++ b/test/Npgsql.Tests/Support/PgPostmasterMock.cs @@ -29,6 +29,7 @@ class PgPostmasterMock : IAsyncDisposable readonly bool _completeCancellationImmediately; readonly string? _startupErrorCode; + readonly bool _breakOnGssEncryptionRequest; ChannelWriter> _pendingRequestsWriter { get; } ChannelReader> _pendingRequestsReader { get; } @@ -49,9 +50,10 @@ internal static PgPostmasterMock Start( string? connectionString = null, bool completeCancellationImmediately = true, MockState state = MockState.MultipleHostsDisabled, - string? startupErrorCode = null) + string? startupErrorCode = null, + bool breakOnGssEncryptionRequest = false) { - var mock = new PgPostmasterMock(connectionString, completeCancellationImmediately, state, startupErrorCode); + var mock = new PgPostmasterMock(connectionString, completeCancellationImmediately, state, startupErrorCode, breakOnGssEncryptionRequest); mock.AcceptClients(); return mock; } @@ -60,7 +62,8 @@ internal PgPostmasterMock( string? connectionString = null, bool completeCancellationImmediately = true, MockState state = MockState.MultipleHostsDisabled, - string? startupErrorCode = null) + string? startupErrorCode = null, + bool breakOnGssEncryptionRequest = false) { var pendingRequestsChannel = Channel.CreateUnbounded>(); _pendingRequestsReader = pendingRequestsChannel.Reader; @@ -71,6 +74,7 @@ internal PgPostmasterMock( _completeCancellationImmediately = completeCancellationImmediately; State = state; _startupErrorCode = startupErrorCode; + _breakOnGssEncryptionRequest = breakOnGssEncryptionRequest; _socket = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp); var endpoint = new IPEndPoint(IPAddress.Loopback, 0); @@ -151,6 +155,14 @@ async Task Accept(bool completeCancellationImmediat var request = readBuffer.ReadInt32(); if (request == GssRequest) { + if (_breakOnGssEncryptionRequest) + { + readBuffer.Dispose(); + writeBuffer.Dispose(); + await stream.DisposeAsync(); + return default; + } + writeBuffer.WriteByte((byte)'N'); await writeBuffer.Flush(async: true); From b4183e2cb6a0cf759e5e0f7d17a81102631ad929 Mon Sep 17 00:00:00 2001 From: KeltorHD <35000839+KeltorHD@users.noreply.github.com> Date: Sat, 7 Feb 2026 11:21:32 +0300 Subject: [PATCH 690/761] Fix pg9 (#6438) --- src/Npgsql/Schema/DbColumnSchemaGenerator.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Npgsql/Schema/DbColumnSchemaGenerator.cs b/src/Npgsql/Schema/DbColumnSchemaGenerator.cs index ed7afd822b..e41ede3101 100644 --- a/src/Npgsql/Schema/DbColumnSchemaGenerator.cs +++ b/src/Npgsql/Schema/DbColumnSchemaGenerator.cs @@ -39,7 +39,7 @@ CASE WHEN atthasdef THEN (SELECT pg_get_expr(adbin, cls.oid) FROM pg_attrdef WHE CASE WHEN ((cls.relkind = ANY (ARRAY['r'::""char"", 'p'::""char""])) OR ((cls.relkind = ANY (ARRAY['v'::""char"", 'f'::""char""])) AND pg_column_is_updatable((cls.oid)::regclass, attr.attnum, false))) - AND attr.attidentity NOT IN ('a') THEN 'true'::boolean + {(pgVersion.IsGreaterOrEqual(10) ? "AND attr.attidentity NOT IN ('a')" : "")} THEN 'true'::boolean ELSE 'false'::boolean END AS is_updatable, EXISTS ( From 0675b3ac2dff24b5afcd8a4996580fcdd36502d3 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Sat, 7 Feb 2026 09:49:19 +0100 Subject: [PATCH 691/761] Some SQL query cleanup (#6439) --- src/Npgsql/Schema/DbColumnSchemaGenerator.cs | 93 ++++++++++---------- 1 file changed, 48 insertions(+), 45 deletions(-) diff --git a/src/Npgsql/Schema/DbColumnSchemaGenerator.cs b/src/Npgsql/Schema/DbColumnSchemaGenerator.cs index e41ede3101..3abda51fae 100644 --- a/src/Npgsql/Schema/DbColumnSchemaGenerator.cs +++ b/src/Npgsql/Schema/DbColumnSchemaGenerator.cs @@ -31,66 +31,69 @@ internal DbColumnSchemaGenerator(NpgsqlConnection connection, RowDescriptionMess #region Columns queries static string GenerateColumnsQuery(Version pgVersion, string columnFieldFilter) => - $@"SELECT - typ.oid AS typoid, nspname, relname, attname, attrelid, attnum, attnotnull, - {(pgVersion.IsGreaterOrEqual(10) ? "attidentity != ''" : "FALSE")} AS isidentity, - CASE WHEN typ.typtype = 'd' THEN typ.typtypmod ELSE atttypmod END AS typmod, - CASE WHEN atthasdef THEN (SELECT pg_get_expr(adbin, cls.oid) FROM pg_attrdef WHERE adrelid = cls.oid AND adnum = attr.attnum) ELSE NULL END AS default, - CASE WHEN ((cls.relkind = ANY (ARRAY['r'::""char"", 'p'::""char""])) - OR ((cls.relkind = ANY (ARRAY['v'::""char"", 'f'::""char""])) - AND pg_column_is_updatable((cls.oid)::regclass, attr.attnum, false))) - {(pgVersion.IsGreaterOrEqual(10) ? "AND attr.attidentity NOT IN ('a')" : "")} THEN 'true'::boolean - ELSE 'false'::boolean - END AS is_updatable, - EXISTS ( - SELECT * FROM pg_index - WHERE pg_index.indrelid = cls.oid AND - pg_index.indisprimary AND - attnum = ANY (indkey) - ) AS isprimarykey, - EXISTS ( - SELECT * FROM pg_index - WHERE pg_index.indrelid = cls.oid AND - pg_index.indisunique AND - pg_index.{(pgVersion.IsGreaterOrEqual(11) ? "indnkeyatts" : "indnatts")} = 1 AND - attnum = pg_index.indkey[0] - ) AS isunique + $""" +SELECT + typ.oid AS typoid, nspname, relname, attname, attrelid, attnum, attnotnull, + {(pgVersion.IsGreaterOrEqual(10) ? "attidentity != ''" : "FALSE")} AS isidentity, + CASE WHEN typ.typtype = 'd' THEN typ.typtypmod ELSE atttypmod END AS typmod, + CASE WHEN atthasdef THEN (SELECT pg_get_expr(adbin, cls.oid) FROM pg_attrdef WHERE adrelid = cls.oid AND adnum = attr.attnum) ELSE NULL END AS default, + ((cls.relkind = ANY (ARRAY['r'::"char", 'p'::"char"])) + OR ((cls.relkind = ANY (ARRAY['v'::"char", 'f'::"char"])) + AND pg_column_is_updatable((cls.oid)::regclass, attr.attnum, false))) + {(pgVersion.IsGreaterOrEqual(10) ? "AND attr.attidentity NOT IN ('a')" : "")} + AS is_updatable, + EXISTS ( + SELECT * FROM pg_index + WHERE pg_index.indrelid = cls.oid AND + pg_index.indisprimary AND + attnum = ANY (indkey) + ) AS isprimarykey, + EXISTS ( + SELECT * FROM pg_index + WHERE pg_index.indrelid = cls.oid AND + pg_index.indisunique AND + pg_index.{(pgVersion.IsGreaterOrEqual(11) ? "indnkeyatts" : "indnatts")} = 1 AND + attnum = pg_index.indkey[0] + ) AS isunique FROM pg_attribute AS attr JOIN pg_type AS typ ON attr.atttypid = typ.oid JOIN pg_class AS cls ON cls.oid = attr.attrelid JOIN pg_namespace AS ns ON ns.oid = cls.relnamespace WHERE - atttypid <> 0 AND - relkind IN ('r', 'v', 'm') AND - NOT attisdropped AND - nspname NOT IN ('pg_catalog', 'information_schema') AND - attnum > 0 AND - ({columnFieldFilter}) -ORDER BY attnum"; + atttypid <> 0 AND + relkind IN ('r', 'v', 'm') AND + NOT attisdropped AND + nspname NOT IN ('pg_catalog', 'information_schema') AND + attnum > 0 AND + ({columnFieldFilter}) +ORDER BY attnum +"""; /// /// Stripped-down version of , mainly to support Amazon Redshift. /// static string GenerateOldColumnsQuery(string columnFieldFilter) => - $@"SELECT - typ.oid AS typoid, nspname, relname, attname, attrelid, attnum, attnotnull, - CASE WHEN typ.typtype = 'd' THEN typ.typtypmod ELSE atttypmod END AS typmod, - CASE WHEN atthasdef THEN (SELECT pg_get_expr(adbin, cls.oid) FROM pg_attrdef WHERE adrelid = cls.oid AND adnum = attr.attnum) ELSE NULL END AS default, - TRUE AS is_updatable, /* Supported only since PG 8.2 */ - FALSE AS isprimarykey, /* Can't do ANY() on pg_index.indkey which is int2vector */ - FALSE AS isunique /* Can't do ANY() on pg_index.indkey which is int2vector */ + $""" +SELECT + typ.oid AS typoid, nspname, relname, attname, attrelid, attnum, attnotnull, + CASE WHEN typ.typtype = 'd' THEN typ.typtypmod ELSE atttypmod END AS typmod, + CASE WHEN atthasdef THEN (SELECT pg_get_expr(adbin, cls.oid) FROM pg_attrdef WHERE adrelid = cls.oid AND adnum = attr.attnum) ELSE NULL END AS default, + TRUE AS is_updatable, /* Supported only since PG 8.2 */ + FALSE AS isprimarykey, /* Can't do ANY() on pg_index.indkey which is int2vector */ + FALSE AS isunique /* Can't do ANY() on pg_index.indkey which is int2vector */ FROM pg_attribute AS attr JOIN pg_type AS typ ON attr.atttypid = typ.oid JOIN pg_class AS cls ON cls.oid = attr.attrelid JOIN pg_namespace AS ns ON ns.oid = cls.relnamespace WHERE - atttypid <> 0 AND - relkind IN ('r', 'v', 'm') AND - NOT attisdropped AND - nspname NOT IN ('pg_catalog', 'information_schema') AND - attnum > 0 AND - ({columnFieldFilter}) -ORDER BY attnum"; + atttypid <> 0 AND + relkind IN ('r', 'v', 'm') AND + NOT attisdropped AND + nspname NOT IN ('pg_catalog', 'information_schema') AND + attnum > 0 AND + ({columnFieldFilter}) +ORDER BY attnum +"""; #endregion Column queries From 792a2735296accd24b3cc0d7cfa3204087f39d6d Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Mon, 9 Feb 2026 14:04:00 +0100 Subject: [PATCH 692/761] Reset connection when closing within TransactionScope (#6424) Fixes #3735 --- src/Npgsql/Internal/NpgsqlConnector.cs | 35 +++++++++++++++++++++ src/Npgsql/NpgsqlConnection.cs | 12 +++---- src/Npgsql/NpgsqlDataSource.cs | 1 - test/Npgsql.Tests/MultipleHostsTests.cs | 2 ++ test/Npgsql.Tests/SystemTransactionTests.cs | 28 +++++++++++++++++ 5 files changed, 70 insertions(+), 8 deletions(-) diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index 20b28684d3..347c0e0d52 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -2644,8 +2644,14 @@ void Cleanup() _certificates = null; } + [MemberNotNull(nameof(_resetWithoutDeallocateMessage))] void GenerateResetMessage() { + // Generate a reset message that resets connection state without using DISCARD ALL. + // This is used in two scenarios: + // 1. When closing a pooled connection that has prepared statements (DISCARD ALL would deallocate them) + // 2. When closing a connection within an enlisted System.Transactions transaction (DISCARD ALL cannot + // run inside a transaction block, but its component commands can) var sb = new StringBuilder("SET SESSION AUTHORIZATION DEFAULT;RESET ALL;"); _resetWithoutDeallocateResponseCount = 2; if (DatabaseInfo.SupportsCloseAll) @@ -2753,6 +2759,35 @@ internal async Task Reset(bool async) } } + /// + /// Called when a pooled connection with an enlisted System.Transactions transaction is closed. + /// Since we're inside a transaction block, we cannot send DISCARD ALL; + /// we prepend a reset message that only includes commands that can safely run within a transaction. + /// + internal void ResetWithinEnlistedTransaction() + { + // We start user action in case a keeplive happens concurrently, or a concurrent user command (bug) + using var _ = StartUserAction(attemptPgCancellation: false); + + // Our buffer may contain unsent prepended messages, so clear it out. + WriteBuffer.Clear(); + PendingPrependedResponses = 0; + + ResetReadBuffer(); + + if (_sendResetOnClose) + { + if (_resetWithoutDeallocateMessage is null) + { + GenerateResetMessage(); + } + + PrependInternalMessage(_resetWithoutDeallocateMessage, _resetWithoutDeallocateResponseCount); + } + + DataReader.UnbindIfNecessary(); + } + /// /// The connector may have allocated an oversize read buffer, to hold big rows in non-sequential reading. /// This switches us back to the original one and returns the buffer to . diff --git a/src/Npgsql/NpgsqlConnection.cs b/src/Npgsql/NpgsqlConnection.cs index 0621c91193..25319858d9 100644 --- a/src/Npgsql/NpgsqlConnection.cs +++ b/src/Npgsql/NpgsqlConnection.cs @@ -5,7 +5,6 @@ using System.Data.Common; using System.Diagnostics; using System.Diagnostics.CodeAnalysis; -using System.IO; using System.Net.Security; using System.Net.Sockets; using System.Runtime.CompilerServices; @@ -844,14 +843,13 @@ async Task CloseAsync(bool async) if (EnlistedTransaction != null) { - // A System.Transactions transaction is still in progress - - connector.Connection = null; - - // Close the connection and disconnect it from the resource manager but leave the + // A System.Transactions transaction is still in progress. + // Close the connection and disconnect it from the resource manager and reset the connector, but leave the // connector in an enlisted pending list in the data source. If another connection is opened within // the same transaction scope, we will reuse this connector to avoid escalating to a distributed - // transaction + // transaction. + connector.ResetWithinEnlistedTransaction(); + connector.Connection = null; _dataSource?.AddPendingEnlistedConnector(connector, EnlistedTransaction); EnlistedTransaction = null; diff --git a/src/Npgsql/NpgsqlDataSource.cs b/src/Npgsql/NpgsqlDataSource.cs index dabbc978a5..e9311a16c4 100644 --- a/src/Npgsql/NpgsqlDataSource.cs +++ b/src/Npgsql/NpgsqlDataSource.cs @@ -48,7 +48,6 @@ internal sealed class ReloadableState(NpgsqlDatabaseInfo databaseInfo, PgSeriali public IDbTypeResolver? DbTypeResolver { get; } = dbTypeResolver; } - internal TransportSecurityHandler TransportSecurityHandler { get; } internal Action? SslClientAuthenticationOptionsCallback { get; } diff --git a/test/Npgsql.Tests/MultipleHostsTests.cs b/test/Npgsql.Tests/MultipleHostsTests.cs index b357417bb1..94ba45dd00 100644 --- a/test/Npgsql.Tests/MultipleHostsTests.cs +++ b/test/Npgsql.Tests/MultipleHostsTests.cs @@ -873,6 +873,8 @@ public async Task Transaction_enlist_reuses_connection(string targetSessionAttri TargetSessionAttributes = targetSessionAttributes, ServerCompatibilityMode = ServerCompatibilityMode.NoTypeLoading, MaxPoolSize = 10, + // Our mock PG server doesn't know how to handle the reset messages + NoResetOnClose = true, }; using var _ = CreateTempPool(csb, out var connString); diff --git a/test/Npgsql.Tests/SystemTransactionTests.cs b/test/Npgsql.Tests/SystemTransactionTests.cs index c5bced6bfa..2363fb170a 100644 --- a/test/Npgsql.Tests/SystemTransactionTests.cs +++ b/test/Npgsql.Tests/SystemTransactionTests.cs @@ -258,6 +258,34 @@ public void Reuse_connection_rollback() AssertNumberOfRows(0, tableName); } + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/3735")] + public void Reuse_connection_resets_temp_tables() + { + // When a connection is closed inside a TransactionScope and then reopened, + // temp tables should be discarded. + using var dataSource = CreateDataSource(csb => csb.Enlist = true); + using (new TransactionScope()) + using (var conn = dataSource.CreateConnection()) + { + conn.Open(); + var processId = conn.ProcessID; + + // Create a temp table + conn.ExecuteNonQuery("CREATE TEMP TABLE temp_test (id INT)"); + + conn.Close(); + + // Reopen - should get the same physical connection but with reset state + conn.Open(); + Assert.That(conn.ProcessID, Is.EqualTo(processId), "Should reuse the same physical connection"); + + // The temp table should have been discarded + Assert.That(() => conn.ExecuteScalar("SELECT COUNT(*) FROM temp_test"), + Throws.Exception.TypeOf() + .With.Property(nameof(PostgresException.SqlState)).EqualTo(PostgresErrorCodes.UndefinedTable)); + } + } + [Test, Ignore("Timeout doesn't seem to fire on .NET Core / Linux")] public void Timeout_triggers_rollback_while_busy() { From aed30af7895487e42136c9a8735bf4ad21de40fe Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Tue, 10 Feb 2026 19:32:29 +0100 Subject: [PATCH 693/761] Fix per instance nullability converter resolver info code (#6435) --- src/Npgsql/Internal/TypeInfoMapping.cs | 2 +- test/Npgsql.Tests/Types/ArrayTests.cs | 106 ++++++++++++++++++++++- test/Npgsql.Tests/Types/DateTimeTests.cs | 20 ++++- 3 files changed, 123 insertions(+), 5 deletions(-) diff --git a/src/Npgsql/Internal/TypeInfoMapping.cs b/src/Npgsql/Internal/TypeInfoMapping.cs index 64b14dff73..1fc028153f 100644 --- a/src/Npgsql/Internal/TypeInfoMapping.cs +++ b/src/Npgsql/Internal/TypeInfoMapping.cs @@ -640,7 +640,7 @@ PgTypeInfo CreateComposedPerInstance(PgTypeInfo innerTypeInfo, PgTypeInfo nullab (PgResolverTypeInfo)nullableInnerTypeInfo); return new PgResolverTypeInfo(innerTypeInfo.Options, resolver, - innerTypeInfo.Options.GetCanonicalTypeId(new DataTypeName(dataTypeName))) { SupportsWriting = false }; + innerTypeInfo.Options.GetCanonicalTypeId(new DataTypeName(dataTypeName)), unboxedType: typeof(Array)) { SupportsWriting = false }; } } diff --git a/test/Npgsql.Tests/Types/ArrayTests.cs b/test/Npgsql.Tests/Types/ArrayTests.cs index 7b4e45e1f4..59ef2ceffe 100644 --- a/test/Npgsql.Tests/Types/ArrayTests.cs +++ b/test/Npgsql.Tests/Types/ArrayTests.cs @@ -3,6 +3,7 @@ using System.Collections.Generic; using System.Collections.Immutable; using System.Data; +using System.Diagnostics; using System.Text; using System.Threading.Tasks; using Npgsql.Internal.Converters; @@ -141,7 +142,110 @@ public async Task Value_type_array_nullabilities(ArrayNullabilityMode mode) Assert.That(value, Is.EqualTo(new int?[,]{{5, null},{6, 7}})); break; default: - throw new ArgumentOutOfRangeException(nameof(mode), mode, null); + throw new UnreachableException($"Unknown case {mode}"); + } + } + + [Test, Description("Checks that PG arrays containing nulls are returned as set via ValueTypeArrayMode.")] + [TestCase(ArrayNullabilityMode.Always)] + [TestCase(ArrayNullabilityMode.Never)] + [TestCase(ArrayNullabilityMode.PerInstance)] + public async Task Value_type_array_nullabilities_converter_resolver(ArrayNullabilityMode mode) + { + await using var dataSource = CreateDataSource(csb => + { + csb.ArrayNullabilityMode = mode; + csb.Timezone = "Europe/Berlin"; + }); + await using var conn = await dataSource.OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand( +""" +SELECT onedim, twodim FROM (VALUES +('{"1998-04-12 15:26:38+02"}'::timestamptz[],'{{"1998-04-12 15:26:38+02"},{"1998-04-13 15:26:38+02"}}'::timestamptz[][]), +('{"1998-04-14 15:26:38+02", NULL}'::timestamptz[],'{{"1998-04-14 15:26:38+02", NULL},{"1998-04-15 15:26:38+02", "1998-04-16 15:26:38+02"}}'::timestamptz[][])) AS x(onedim,twodim) +""", conn); + await using var reader = await cmd.ExecuteReaderAsync(); + + switch (mode) + { + case ArrayNullabilityMode.Never: + reader.Read(); + var value = reader.GetValue(0); + Assert.That(reader.GetFieldType(0), Is.EqualTo(typeof(Array))); + Assert.That(value.GetType(), Is.EqualTo(typeof(DateTime[]))); + Assert.That(value, Is.EqualTo(new []{new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Utc)})); + value = reader.GetValue(1); + Assert.That(reader.GetFieldType(1), Is.EqualTo(typeof(Array))); + Assert.That(value.GetType(), Is.EqualTo(typeof(DateTime[,]))); + Assert.That(value, Is.EqualTo(new [,] + { + { new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Utc) }, + { new DateTime(1998, 4, 13, 13, 26, 38, DateTimeKind.Utc) } + })); + reader.Read(); + Assert.That(reader.GetFieldType(0), Is.EqualTo(typeof(Array))); + Assert.That(() => reader.GetValue(0), Throws.Exception.TypeOf()); + Assert.That(reader.GetFieldType(1), Is.EqualTo(typeof(Array))); + Assert.That(() => reader.GetValue(1), Throws.Exception.TypeOf()); + break; + case ArrayNullabilityMode.Always: + reader.Read(); + value = reader.GetValue(0); + Assert.That(reader.GetFieldType(0), Is.EqualTo(typeof(Array))); + Assert.That(value.GetType(), Is.EqualTo(typeof(DateTime?[]))); + Assert.That(value, Is.EqualTo(new DateTime?[]{new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Utc)})); + value = reader.GetValue(1); + Assert.That(reader.GetFieldType(1), Is.EqualTo(typeof(Array))); + Assert.That(value.GetType(), Is.EqualTo(typeof(DateTime?[,]))); + Assert.That(value, Is.EqualTo(new DateTime?[,] + { + { new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Utc) }, + { new DateTime(1998, 4, 13, 13, 26, 38, DateTimeKind.Utc) } + })); + reader.Read(); + value = reader.GetValue(0); + Assert.That(reader.GetFieldType(0), Is.EqualTo(typeof(Array))); + Assert.That(value.GetType(), Is.EqualTo(typeof(DateTime?[]))); + Assert.That(value, Is.EqualTo(new DateTime?[]{ new DateTime(1998, 4, 14, 13, 26, 38, DateTimeKind.Utc), null })); + value = reader.GetValue(1); + Assert.That(reader.GetFieldType(1), Is.EqualTo(typeof(Array))); + Assert.That(value.GetType(), Is.EqualTo(typeof(DateTime?[,]))); + Assert.That(value, Is.EqualTo(new DateTime?[,] + { + { new DateTime(1998, 4, 14, 13, 26, 38, DateTimeKind.Utc), null }, + { new DateTime(1998, 4, 15, 13, 26, 38, DateTimeKind.Utc), new DateTime(1998, 4, 16, 13, 26, 38, DateTimeKind.Utc) } + })); + break; + case ArrayNullabilityMode.PerInstance: + reader.Read(); + value = reader.GetValue(0); + Assert.That(reader.GetFieldType(0), Is.EqualTo(typeof(Array))); + Assert.That(value.GetType(), Is.EqualTo(typeof(DateTime[]))); + Assert.That(value, Is.EqualTo(new []{new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Utc)})); + value = reader.GetValue(1); + Assert.That(reader.GetFieldType(1), Is.EqualTo(typeof(Array))); + Assert.That(value.GetType(), Is.EqualTo(typeof(DateTime[,]))); + Assert.That(value, Is.EqualTo(new [,] + { + { new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Utc) }, + { new DateTime(1998, 4, 13, 13, 26, 38, DateTimeKind.Utc) } + })); + reader.Read(); + value = reader.GetValue(0); + Assert.That(reader.GetFieldType(0), Is.EqualTo(typeof(Array))); + Assert.That(value.GetType(), Is.EqualTo(typeof(DateTime?[]))); + Assert.That(value, Is.EqualTo(new DateTime?[]{ new DateTime(1998, 4, 14, 13, 26, 38, DateTimeKind.Utc), null })); + value = reader.GetValue(1); + Assert.That(reader.GetFieldType(1), Is.EqualTo(typeof(Array))); + Assert.That(value.GetType(), Is.EqualTo(typeof(DateTime?[,]))); + Assert.That(value, Is.EqualTo(new DateTime?[,] + { + { new DateTime(1998, 4, 14, 13, 26, 38, DateTimeKind.Utc), null }, + { new DateTime(1998, 4, 15, 13, 26, 38, DateTimeKind.Utc), new DateTime(1998, 4, 16, 13, 26, 38, DateTimeKind.Utc) } + })); + break; + default: + throw new UnreachableException($"Unknown case {mode}"); } } diff --git a/test/Npgsql.Tests/Types/DateTimeTests.cs b/test/Npgsql.Tests/Types/DateTimeTests.cs index 99736fb104..d9bed9baac 100644 --- a/test/Npgsql.Tests/Types/DateTimeTests.cs +++ b/test/Npgsql.Tests/Types/DateTimeTests.cs @@ -454,15 +454,29 @@ public void NpgsqlParameterNpgsqlDbType_is_value_dependent_timestamp_or_timestam [Test] public async Task Array_of_nullable_timestamptz() - => await AssertType( + { + await using var datasource = CreateDataSource(csb => + { + csb.ArrayNullabilityMode = ArrayNullabilityMode.PerInstance; + csb.Timezone = "Europe/Berlin"; + }); + await AssertType(datasource, new DateTime?[] { new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Utc), null }, @"{""1998-04-12 15:26:38+02"",NULL}", - "timestamp with time zone[]", - isDefault: false); + "timestamp with time zone[]"); + + await AssertType(datasource, + new DateTime?[] + { + new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Utc), + }, + @"{""1998-04-12 15:26:38+02""}", + "timestamp with time zone[]", isDefaultForReading: false); // we write DateTime?[], but will read DateTime[] from GetValue + } #endregion From a8c9b0dcad23d7b9ad17105b3fd42ab4771be8ca Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Feb 2026 23:13:15 +0100 Subject: [PATCH 694/761] Bump Microsoft.SourceLink.GitHub from 10.0.102 to 10.0.103 (#6445) --- updated-dependencies: - dependency-name: Microsoft.SourceLink.GitHub dependency-version: 10.0.103 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 140a32607a..a9df51c90f 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -21,7 +21,7 @@ - + From 329350a9bebd67428733113f7c8c127c630ac66b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Feb 2026 23:35:16 +0100 Subject: [PATCH 695/761] Bump Microsoft.Extensions.DependencyInjection.Abstractions from 10.0.2 to 10.0.3 (#6441) --- Directory.Packages.props | 2 +- src/Npgsql/Npgsql.csproj | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index a9df51c90f..be27332022 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -1,7 +1,7 @@ 10.0.2 - 10.0.2 + 10.0.3 10.0.2 diff --git a/src/Npgsql/Npgsql.csproj b/src/Npgsql/Npgsql.csproj index d06e91fcc8..a4e47f12cf 100644 --- a/src/Npgsql/Npgsql.csproj +++ b/src/Npgsql/Npgsql.csproj @@ -33,6 +33,7 @@ + True True From 85e23d7394c371c0c29b99537e9e0a10b55a0226 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Wed, 11 Feb 2026 07:41:02 +0100 Subject: [PATCH 696/761] Bump extensions versions to 10.0.3 (#6448) --- Directory.Packages.props | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index be27332022..8a4e80616b 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -1,11 +1,11 @@ - 10.0.2 + 10.0.3 10.0.3 - 10.0.2 - 10.0.2 + 10.0.3 + 10.0.3 From 15dd093055562a0842314c4fd794417491135a19 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 19 Feb 2026 08:09:27 +0100 Subject: [PATCH 697/761] Bump Scriban.Signed from 6.5.2 to 6.5.3 (#6455) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 8a4e80616b..735a8607b3 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -25,7 +25,7 @@ - + From 594b5d333915141528425645eb151d8435921b48 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Mon, 23 Feb 2026 02:10:16 +0100 Subject: [PATCH 698/761] [Tests] Fully rework type mapping assertion helpers (#6314) --- src/Npgsql/NpgsqlTypes/NpgsqlDbType.cs | 6 - test/Npgsql.PluginTests/JsonNetTests.cs | 50 +- .../Npgsql.PluginTests/LegacyNodaTimeTests.cs | 22 +- .../NetTopologySuiteTests.cs | 2 +- .../NodaTimeInfinityTests.cs | 7 +- test/Npgsql.PluginTests/NodaTimeTests.cs | 206 +++--- test/Npgsql.Tests/CommandParameterTests.cs | 6 +- test/Npgsql.Tests/CommandTests.cs | 4 +- test/Npgsql.Tests/GlobalTypeMapperTests.cs | 34 +- test/Npgsql.Tests/LargeObjectTests.cs | 2 +- test/Npgsql.Tests/Support/TestBase.cs | 675 +++++++++++------- test/Npgsql.Tests/Types/ArrayTests.cs | 8 +- test/Npgsql.Tests/Types/BitStringTests.cs | 11 +- test/Npgsql.Tests/Types/ByteaTests.cs | 33 +- test/Npgsql.Tests/Types/CompositeTests.cs | 114 ++- test/Npgsql.Tests/Types/CubeTests.cs | 18 +- .../Types/DateTimeInfinityTests.cs | 29 +- test/Npgsql.Tests/Types/DateTimeTests.cs | 102 ++- test/Npgsql.Tests/Types/DomainTests.cs | 3 +- test/Npgsql.Tests/Types/EnumTests.cs | 37 +- .../Npgsql.Tests/Types/FullTextSearchTests.cs | 4 +- test/Npgsql.Tests/Types/HstoreTests.cs | 8 +- test/Npgsql.Tests/Types/JsonDynamicTests.cs | 49 +- test/Npgsql.Tests/Types/JsonPathTests.cs | 4 +- test/Npgsql.Tests/Types/JsonTests.cs | 69 +- test/Npgsql.Tests/Types/LTreeTests.cs | 35 +- .../Npgsql.Tests/Types/LegacyDateTimeTests.cs | 25 +- test/Npgsql.Tests/Types/MiscTypeTests.cs | 27 +- test/Npgsql.Tests/Types/MoneyTests.cs | 4 +- test/Npgsql.Tests/Types/MultirangeTests.cs | 29 +- test/Npgsql.Tests/Types/NetworkTypeTests.cs | 14 +- test/Npgsql.Tests/Types/NumericTests.cs | 25 +- test/Npgsql.Tests/Types/NumericTypeTests.cs | 87 ++- test/Npgsql.Tests/Types/RangeTests.cs | 14 +- test/Npgsql.Tests/Types/TextTests.cs | 34 +- 35 files changed, 969 insertions(+), 828 deletions(-) diff --git a/src/Npgsql/NpgsqlTypes/NpgsqlDbType.cs b/src/Npgsql/NpgsqlTypes/NpgsqlDbType.cs index f9b952e479..ab8d1480f8 100644 --- a/src/Npgsql/NpgsqlTypes/NpgsqlDbType.cs +++ b/src/Npgsql/NpgsqlTypes/NpgsqlDbType.cs @@ -633,10 +633,6 @@ public static DbType ToDbType(this NpgsqlDbType npgsqlDbType) NpgsqlDbType.Char => DbType.String, NpgsqlDbType.Name => DbType.String, NpgsqlDbType.Citext => DbType.String, - NpgsqlDbType.Refcursor => DbType.Object, - NpgsqlDbType.Jsonb => DbType.Object, - NpgsqlDbType.Json => DbType.Object, - NpgsqlDbType.JsonPath => DbType.Object, // Date/time types NpgsqlDbType.Timestamp => LegacyTimestampBehavior ? DbType.DateTime : DbType.DateTime2, @@ -649,8 +645,6 @@ public static DbType ToDbType(this NpgsqlDbType npgsqlDbType) NpgsqlDbType.Boolean => DbType.Boolean, NpgsqlDbType.Uuid => DbType.Guid, - NpgsqlDbType.Unknown => DbType.Object, - _ => DbType.Object }; diff --git a/test/Npgsql.PluginTests/JsonNetTests.cs b/test/Npgsql.PluginTests/JsonNetTests.cs index c02bc3aa91..f20704e52f 100644 --- a/test/Npgsql.PluginTests/JsonNetTests.cs +++ b/test/Npgsql.PluginTests/JsonNetTests.cs @@ -1,10 +1,9 @@ using Newtonsoft.Json; using Newtonsoft.Json.Linq; using Npgsql.Tests; -using NpgsqlTypes; using NUnit.Framework; using System; -using System.Text; +using System.Data; using System.Threading.Tasks; // ReSharper disable AccessToModifiedClosure @@ -25,9 +24,8 @@ public Task Roundtrip_object() JsonDataSource, new Foo { Bar = 8 }, IsJsonb ? @"{""Bar"": 8}" : @"{""Bar"":8}", - dataTypeName, - isDefault: false, - isDataTypeInferredFromValue: false); + dataTypeName, dataTypeInference: DataTypeInference.Nothing, + valueTypeEqualsFieldType: false); [Test, IssueLink("https://github.com/npgsql/npgsql/issues/3085")] public Task Roundtrip_string() @@ -35,9 +33,8 @@ public Task Roundtrip_string() JsonDataSource, @"{""p"": 1}", @"{""p"": 1}", - dataTypeName, - isDefaultForWriting: false, - isDataTypeInferredFromValue: false); + dataTypeName, dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Object, DbType.String)); [Test, IssueLink("https://github.com/npgsql/npgsql/issues/3085")] public Task Roundtrip_char_array() @@ -45,19 +42,17 @@ public Task Roundtrip_char_array() JsonDataSource, @"{""p"": 1}".ToCharArray(), @"{""p"": 1}", - dataTypeName, - isDefault: false, - isDataTypeInferredFromValue: false); + dataTypeName, dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Object, DbType.String), valueTypeEqualsFieldType: false); [Test, IssueLink("https://github.com/npgsql/npgsql/issues/3085")] public Task Roundtrip_byte_array() => AssertType( JsonDataSource, - Encoding.ASCII.GetBytes(@"{""p"": 1}"), + @"{""p"": 1}"u8.ToArray(), @"{""p"": 1}", - dataTypeName, - isDefault: false, - isDataTypeInferredFromValue: false); + dataTypeName, dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Object, DbType.Binary), valueTypeEqualsFieldType: false); [Test] public Task Roundtrip_JObject() @@ -65,11 +60,8 @@ public Task Roundtrip_JObject() JsonDataSource, new JObject { ["Bar"] = 8 }, IsJsonb ? @"{""Bar"": 8}" : @"{""Bar"":8}", - dataTypeName, - // By default we map JObject to jsonb - isDefaultForWriting: IsJsonb, - isDefaultForReading: false, - isDataTypeInferredFromValue: false); + dataTypeName, dataTypeInference: DataTypeInference.Nothing, + valueTypeEqualsFieldType: false); [Test] public Task Roundtrip_JArray() @@ -77,11 +69,8 @@ public Task Roundtrip_JArray() JsonDataSource, new JArray(new[] { 1, 2, 3 }), IsJsonb ? "[1, 2, 3]" : "[1,2,3]", - dataTypeName, - // By default we map JArray to jsonb - isDefaultForWriting: IsJsonb, - isDefaultForReading: false, - isDataTypeInferredFromValue: false); + dataTypeName, dataTypeInference: DataTypeInference.Nothing, + valueTypeEqualsFieldType: false); [Test] public async Task Deserialize_failure() @@ -112,8 +101,7 @@ await AssertType( new Foo { Bar = 8 }, IsJsonb ? @"{""Bar"": 8}" : @"{""Bar"":8}", dataTypeName, - isDefaultForReading: false, - isDataTypeInferredFromValue: false); + dataTypeInference: DataTypeInference.Nothing, valueTypeEqualsFieldType: false); } [Test] @@ -130,9 +118,8 @@ await AssertType( dataSource, new[] { 1, 2, 3 }, IsJsonb ? "[1, 2, 3]" : "[1,2,3]", - dataTypeName, - isDefaultForReading: false, - isDataTypeInferredFromValue: false); + dataTypeName, dataTypeInference: DataTypeInference.Mismatch, + valueTypeEqualsFieldType: false, skipArrayCheck: true); // there is no value only mapping for int[][] } class DateWrapper @@ -159,8 +146,7 @@ await AssertType( new DateWrapper { Date = new DateTime(2018, 04, 20) }, IsJsonb ? "{\"Date\": \"The 20th of April, 2018\"}" : "{\"Date\":\"The 20th of April, 2018\"}", dataTypeName, - isDefault: false, - isDataTypeInferredFromValue: false); + dataTypeInference: DataTypeInference.Nothing, valueTypeEqualsFieldType: false); } [Test] diff --git a/test/Npgsql.PluginTests/LegacyNodaTimeTests.cs b/test/Npgsql.PluginTests/LegacyNodaTimeTests.cs index 9df00dcdb1..ff177b38a4 100644 --- a/test/Npgsql.PluginTests/LegacyNodaTimeTests.cs +++ b/test/Npgsql.PluginTests/LegacyNodaTimeTests.cs @@ -18,38 +18,32 @@ public async Task Timestamp_as_ZonedDateTime() => await AssertType( new LocalDateTime(1998, 4, 12, 13, 26, 38, 789).InZoneLeniently(DateTimeZoneProviders.Tzdb[TimeZone]), "1998-04-12 13:26:38.789+02", - "timestamp with time zone", - DbType.DateTimeOffset, - isDataTypeInferredFromValue: false, isDefault: false); + "timestamp with time zone", dataTypeInference: DataTypeInference.Nothing, + dbType: new(DbType.DateTimeOffset, DbType.Object), valueTypeEqualsFieldType: false); [Test] public Task Timestamp_as_Instant() => AssertType( new LocalDateTime(1998, 4, 12, 13, 26, 38, 789).InUtc().ToInstant(), "1998-04-12 13:26:38.789", - "timestamp without time zone", - DbType.DateTime, - isDataTypeInferredFromValue: false); + "timestamp without time zone", dataTypeInference: DataTypeInference.Nothing, + dbType: new(DbType.DateTime, DbType.Object)); [Test] public Task Timestamp_as_LocalDateTime() => AssertType( new LocalDateTime(1998, 4, 12, 13, 26, 38, 789), "1998-04-12 13:26:38.789", - "timestamp without time zone", - DbType.DateTime, - isDefaultForReading: false, - isDataTypeInferredFromValue: false); + "timestamp without time zone", dataTypeInference: DataTypeInference.Nothing, + dbType: new(DbType.DateTime, DbType.Object), valueTypeEqualsFieldType: false); [Test] public Task Timestamptz_as_Instant() => AssertType( new LocalDateTime(1998, 4, 12, 13, 26, 38, 789).InUtc().ToInstant(), "1998-04-12 15:26:38.789+02", - "timestamp with time zone", - DbType.DateTimeOffset, - isDefaultForWriting: false, - isDataTypeInferredFromValue: false); + "timestamp with time zone", dataTypeInference: DataTypeInference.Nothing, + dbType: new(DbType.DateTimeOffset, DbType.Object)); [Test] public async Task Timestamptz_ZonedDateTime_infinite_values_are_not_supported() diff --git a/test/Npgsql.PluginTests/NetTopologySuiteTests.cs b/test/Npgsql.PluginTests/NetTopologySuiteTests.cs index 34f701e4a4..54a1a91026 100644 --- a/test/Npgsql.PluginTests/NetTopologySuiteTests.cs +++ b/test/Npgsql.PluginTests/NetTopologySuiteTests.cs @@ -160,7 +160,7 @@ await AssertType( new Geometry[] { point }, '{' + GetSqlLiteral(point) + '}', "geometry[]", - isDataTypeInferredFromValue: false); + dataTypeInference: DataTypeInference.Nothing); } [Test] diff --git a/test/Npgsql.PluginTests/NodaTimeInfinityTests.cs b/test/Npgsql.PluginTests/NodaTimeInfinityTests.cs index 9fe78a989e..52068898d2 100644 --- a/test/Npgsql.PluginTests/NodaTimeInfinityTests.cs +++ b/test/Npgsql.PluginTests/NodaTimeInfinityTests.cs @@ -26,13 +26,12 @@ await AssertType( new DateInterval(LocalDate.MinIsoValue, LocalDate.MaxIsoValue), "[-infinity,infinity]", "daterange", - isDataTypeInferredFromValue: false, skipArrayCheck: true); // NpgsqlRange[] is mapped to multirange by default, not array; test separately + dataTypeInference: DataTypeInference.Nothing, skipArrayCheck: true); // NpgsqlRange[] is mapped to multirange by default, not array; test separately await AssertType( new [] {new DateInterval(LocalDate.MinIsoValue, LocalDate.MaxIsoValue)}, """{"[-infinity,infinity]"}""", - "daterange[]", - isDefault: false, skipArrayCheck: true); + "daterange[]", dataTypeInference: DataTypeInference.Nothing, skipArrayCheck: true); await using var conn = await OpenConnectionAsync(); if (conn.PostgreSqlVersion < new Version(14, 0)) @@ -41,7 +40,7 @@ await AssertType( await AssertType( new [] {new DateInterval(LocalDate.MinIsoValue, LocalDate.MaxIsoValue)}, """{[-infinity,infinity]}""", - "datemultirange", isDataTypeInferredFromValue: false, skipArrayCheck: true); + "datemultirange", dataTypeInference: DataTypeInference.Nothing, skipArrayCheck: true); } [Test] diff --git a/test/Npgsql.PluginTests/NodaTimeTests.cs b/test/Npgsql.PluginTests/NodaTimeTests.cs index c348660331..557e841b5e 100644 --- a/test/Npgsql.PluginTests/NodaTimeTests.cs +++ b/test/Npgsql.PluginTests/NodaTimeTests.cs @@ -29,8 +29,9 @@ public class NodaTimeTests : MultiplexingTestBase, IDisposable [Test, TestCaseSource(nameof(TimestampValues))] public Task Timestamp_as_LocalDateTime(LocalDateTime localDateTime, string sqlLiteral) - => AssertType(localDateTime, sqlLiteral, "timestamp without time zone", DbType.DateTime2, - isDataTypeInferredFromValue: false); + => AssertType(localDateTime, sqlLiteral, + "timestamp without time zone", dataTypeInference: DataTypeInference.Nothing, + dbType: new(DbType.DateTime2, DbType.Object)); [Test] public Task Timestamp_as_unspecified_DateTime() @@ -38,17 +39,15 @@ public Task Timestamp_as_unspecified_DateTime() new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Unspecified), "1998-04-12 13:26:38", "timestamp without time zone", - DbType.DateTime2, - isDefaultForReading: false); + dbType: DbType.DateTime2, valueTypeEqualsFieldType: false); [Test] public Task Timestamp_as_long() => AssertType( -54297202000000, "1998-04-12 13:26:38", - "timestamp without time zone", - DbType.DateTime2, - isDefault: false); + "timestamp without time zone", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.DateTime2, DbType.Int64), valueTypeEqualsFieldType: false); [Test] public Task Timestamp_cannot_use_as_Instant() @@ -91,7 +90,7 @@ await AssertType( new(1998, 4, 12, 15, 26, 38)), """["1998-04-12 13:26:38","1998-04-12 15:26:38"]""", "tsrange", - isDataTypeInferredFromValue: false, skipArrayCheck: true); // NpgsqlRange[] is mapped to multirange by default, not array; test separately + dataTypeInference: DataTypeInference.Nothing, skipArrayCheck: true); // NpgsqlRange[] is mapped to multirange by default, not array; test separately await AssertType( new [] { new NpgsqlRange( @@ -99,7 +98,8 @@ await AssertType( new(1998, 4, 12, 15, 26, 38)), }, """{"[\"1998-04-12 13:26:38\",\"1998-04-12 15:26:38\"]"}""", "tsrange[]", - isDefault: false, skipArrayCheck: true); + dataTypeInference: DataTypeInference.Nothing, + skipArrayCheck: true); await using var conn = await OpenConnectionAsync(); if (conn.PostgreSqlVersion < new Version(14, 0)) @@ -110,7 +110,7 @@ await AssertType( new(1998, 4, 12, 13, 26, 38), new(1998, 4, 12, 15, 26, 38)), }, """{["1998-04-12 13:26:38","1998-04-12 15:26:38"]}""", - "tsmultirange", isDataTypeInferredFromValue: false, skipArrayCheck: true); + "tsmultirange", dataTypeInference: DataTypeInference.Nothing, skipArrayCheck: true); } [Test] @@ -131,7 +131,7 @@ await AssertType( }, """{["1998-04-12 13:26:38","1998-04-12 15:26:38"],["1998-04-13 13:26:38","1998-04-13 15:26:38"]}""", "tsmultirange", - isDataTypeInferredFromValue: false); + dataTypeInference: DataTypeInference.Nothing); } #endregion Timestamp without time zone @@ -152,55 +152,49 @@ await AssertType( [Test, TestCaseSource(nameof(TimestamptzValues))] public Task Timestamptz_as_Instant(Instant instant, string sqlLiteral) - => AssertType(instant, sqlLiteral, "timestamp with time zone", DbType.DateTime, - isDataTypeInferredFromValue: false); + => AssertType(instant, sqlLiteral, + "timestamp with time zone", dataTypeInference: DataTypeInference.Nothing, + dbType: new(DbType.DateTime, DbType.Object)); [Test] public Task Timestamptz_as_ZonedDateTime() => AssertType( new LocalDateTime(1998, 4, 12, 13, 26, 38).InUtc(), "1998-04-12 15:26:38+02", - "timestamp with time zone", - DbType.DateTime, - isDataTypeInferredFromValue: false, - isDefaultForReading: false); + "timestamp with time zone", dataTypeInference: DataTypeInference.Nothing, + dbType: new(DbType.DateTime, DbType.Object), valueTypeEqualsFieldType: false); [Test] public Task Timestamptz_as_OffsetDateTime() => AssertType( new LocalDateTime(1998, 4, 12, 13, 26, 38).WithOffset(Offset.Zero), "1998-04-12 15:26:38+02", - "timestamp with time zone", - DbType.DateTime, - isDataTypeInferredFromValue: false, - isDefaultForReading: false); + "timestamp with time zone", dataTypeInference: DataTypeInference.Nothing, + dbType: new(DbType.DateTime, DbType.Object), valueTypeEqualsFieldType: false); [Test] public Task Timestamptz_as_utc_DateTime() => AssertType( new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Utc), "1998-04-12 15:26:38+02", - "timestamp with time zone", - DbType.DateTime, - isDefaultForReading: false); + "timestamp with time zone", dataTypeInference: DataTypeInference.Mismatch, + dbType: DbType.DateTime, valueTypeEqualsFieldType: false); [Test] public Task Timestamptz_as_DateTimeOffset() => AssertType( new DateTimeOffset(1998, 4, 12, 13, 26, 38, TimeSpan.Zero), "1998-04-12 15:26:38+02", - "timestamp with time zone", - DbType.DateTime, - isDefaultForReading: false); + "timestamp with time zone", dataTypeInference: DataTypeInference.Mismatch, + dbType: DbType.DateTime, valueTypeEqualsFieldType: false); [Test] public Task Timestamptz_as_long() => AssertType( -54297202000000, "1998-04-12 15:26:38+02", - "timestamp with time zone", - DbType.DateTime, - isDefault: false); + "timestamp with time zone", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.DateTime, DbType.Int64), valueTypeEqualsFieldType: false); [Test] public Task Timestamptz_cannot_use_as_LocalDateTime() @@ -232,15 +226,14 @@ await AssertType( new LocalDateTime(1998, 4, 12, 15, 26, 38).InUtc().ToInstant()), """["1998-04-12 15:26:38+02","1998-04-12 17:26:38+02")""", "tstzrange", - isDataTypeInferredFromValue: false, skipArrayCheck: true); // NpgsqlRange[] is mapped to multirange by default, not array; test separately + dataTypeInference: DataTypeInference.Nothing, skipArrayCheck: true); // NpgsqlRange[] is mapped to multirange by default, not array; test separately await AssertType( new [] { new Interval( new LocalDateTime(1998, 4, 12, 13, 26, 38).InUtc().ToInstant(), new LocalDateTime(1998, 4, 12, 15, 26, 38).InUtc().ToInstant()), }, """{"[\"1998-04-12 15:26:38+02\",\"1998-04-12 17:26:38+02\")"}""", - "tstzrange[]", - isDefault: false, skipArrayCheck: true); + "tstzrange[]", dataTypeInference: DataTypeInference.Nothing, skipArrayCheck: true); await using var conn = await OpenConnectionAsync(); if (conn.PostgreSqlVersion < new Version(14, 0)) @@ -251,7 +244,7 @@ await AssertType( new LocalDateTime(1998, 4, 12, 13, 26, 38).InUtc().ToInstant(), new LocalDateTime(1998, 4, 12, 15, 26, 38).InUtc().ToInstant()), }, """{["1998-04-12 15:26:38+02","1998-04-12 17:26:38+02")}""", - "tstzmultirange", isDataTypeInferredFromValue: false, skipArrayCheck: true); + "tstzmultirange", dataTypeInference: DataTypeInference.Nothing, skipArrayCheck: true); } [Test] @@ -259,14 +252,14 @@ public Task Tstzrange_with_no_end_as_Interval() => AssertType( new Interval(new LocalDateTime(1998, 4, 12, 13, 26, 38).InUtc().ToInstant(), null), """["1998-04-12 15:26:38+02",)""", - "tstzrange", isDataTypeInferredFromValue: false, skipArrayCheck: true); + "tstzrange", dataTypeInference: DataTypeInference.Nothing, skipArrayCheck: true); [Test] public Task Tstzrange_with_no_start_as_Interval() => AssertType( new Interval(null, new LocalDateTime(1998, 4, 12, 13, 26, 38).InUtc().ToInstant()), """(,"1998-04-12 15:26:38+02")""", - "tstzrange", isDataTypeInferredFromValue: false, skipArrayCheck: true); + "tstzrange", dataTypeInference: DataTypeInference.Nothing, skipArrayCheck: true); [Test] public Task Tstzrange_with_no_start_or_end_as_Interval() @@ -274,7 +267,7 @@ public Task Tstzrange_with_no_start_or_end_as_Interval() new Interval(null, null), """(,)""", "tstzrange", - isDataTypeInferredFromValue: false, skipArrayCheck: true); + dataTypeInference: DataTypeInference.Nothing, skipArrayCheck: true); [Test] public Task Tstzrange_as_NpgsqlRange_of_Instant() @@ -283,9 +276,9 @@ public Task Tstzrange_as_NpgsqlRange_of_Instant() new LocalDateTime(1998, 4, 12, 13, 26, 38).InUtc().ToInstant(), new LocalDateTime(1998, 4, 12, 15, 26, 38).InUtc().ToInstant()), """["1998-04-12 15:26:38+02","1998-04-12 17:26:38+02"]""", - "tstzrange", - isDataTypeInferredFromValue: false, - isDefaultForReading: false, skipArrayCheck: true); + "tstzrange", dataTypeInference: DataTypeInference.Nothing, + valueTypeEqualsFieldType: false, + skipArrayCheck: true); [Test] public Task Tstzrange_as_NpgsqlRange_of_ZonedDateTime() @@ -294,9 +287,9 @@ public Task Tstzrange_as_NpgsqlRange_of_ZonedDateTime() new LocalDateTime(1998, 4, 12, 13, 26, 38).InUtc(), new LocalDateTime(1998, 4, 12, 15, 26, 38).InUtc()), """["1998-04-12 15:26:38+02","1998-04-12 17:26:38+02"]""", - "tstzrange", - isDataTypeInferredFromValue: false, - isDefaultForReading: false, skipArrayCheck: true); + "tstzrange", dataTypeInference: DataTypeInference.Nothing, + valueTypeEqualsFieldType: false, + skipArrayCheck: true); [Test] public Task Tstzrange_as_NpgsqlRange_of_OffsetDateTime() @@ -305,9 +298,9 @@ public Task Tstzrange_as_NpgsqlRange_of_OffsetDateTime() new LocalDateTime(1998, 4, 12, 13, 26, 38).WithOffset(Offset.Zero), new LocalDateTime(1998, 4, 12, 15, 26, 38).WithOffset(Offset.Zero)), """["1998-04-12 15:26:38+02","1998-04-12 17:26:38+02"]""", - "tstzrange", - isDataTypeInferredFromValue: false, - isDefaultForReading: false, skipArrayCheck: true); + "tstzrange", dataTypeInference: DataTypeInference.Nothing, + valueTypeEqualsFieldType: false, + skipArrayCheck: true); [Test] public async Task Tstzmultirange_as_array_of_Interval() @@ -326,8 +319,7 @@ await AssertType( new LocalDateTime(1998, 4, 13, 15, 26, 38).InUtc().ToInstant()), }, """{["1998-04-12 15:26:38+02","1998-04-12 17:26:38+02"),["1998-04-13 15:26:38+02","1998-04-13 17:26:38+02")}""", - "tstzmultirange", - isDataTypeInferredFromValue: false); + "tstzmultirange", dataTypeInference: DataTypeInference.Nothing); } [Test] @@ -347,9 +339,8 @@ await AssertType( new LocalDateTime(1998, 4, 13, 15, 26, 38).InUtc().ToInstant()), }, """{["1998-04-12 15:26:38+02","1998-04-12 17:26:38+02"],["1998-04-13 15:26:38+02","1998-04-13 17:26:38+02"]}""", - "tstzmultirange", - isDataTypeInferredFromValue: false, - isDefaultForReading: false); + "tstzmultirange", dataTypeInference: DataTypeInference.Nothing, + valueTypeEqualsFieldType: false); } [Test] @@ -369,9 +360,8 @@ await AssertType( new LocalDateTime(1998, 4, 13, 15, 26, 38).InUtc()), }, """{["1998-04-12 15:26:38+02","1998-04-12 17:26:38+02"],["1998-04-13 15:26:38+02","1998-04-13 17:26:38+02"]}""", - "tstzmultirange", - isDataTypeInferredFromValue: false, - isDefaultForReading: false); + "tstzmultirange", dataTypeInference: DataTypeInference.Nothing, + valueTypeEqualsFieldType: false); } [Test] @@ -391,9 +381,8 @@ await AssertType( new LocalDateTime(1998, 4, 13, 15, 26, 38).WithOffset(Offset.Zero)), }, """{["1998-04-12 15:26:38+02","1998-04-12 17:26:38+02"],["1998-04-13 15:26:38+02","1998-04-13 17:26:38+02"]}""", - "tstzmultirange", - isDataTypeInferredFromValue: false, - isDefaultForReading: false); + "tstzmultirange", dataTypeInference: DataTypeInference.Nothing, + valueTypeEqualsFieldType: false); } [Test] @@ -421,9 +410,7 @@ await AssertType( null) }, """{"[\"1998-04-12 15:26:38+02\",\"1998-04-12 17:26:38+02\")","[\"1998-04-13 15:26:38+02\",\"1998-04-13 17:26:38+02\")","[\"1998-04-13 15:26:38+02\",)","(,\"1998-04-13 15:26:38+02\")","(,)"}""", - "tstzrange[]", - isDataTypeInferredFromValue: false, - isDefaultForWriting: false); + "tstzrange[]", dataTypeInference: DataTypeInference.Nothing); } [Test] @@ -442,9 +429,8 @@ await AssertType( new LocalDateTime(1998, 4, 13, 15, 26, 38).InUtc().ToInstant()), }, """{"[\"1998-04-12 15:26:38+02\",\"1998-04-12 17:26:38+02\"]","[\"1998-04-13 15:26:38+02\",\"1998-04-13 17:26:38+02\"]"}""", - "tstzrange[]", - isDataTypeInferredFromValue: false, - isDefault: false); + "tstzrange[]", dataTypeInference: DataTypeInference.Nothing, + valueTypeEqualsFieldType: false); } #endregion Timestamp with time zone @@ -453,16 +439,21 @@ await AssertType( [Test] public Task Date_as_LocalDate() - => AssertType(new LocalDate(2020, 10, 1), "2020-10-01", "date", DbType.Date, - isDataTypeInferredFromValue: false); + => AssertType(new LocalDate(2020, 10, 1), "2020-10-01", + "date", dataTypeInference: DataTypeInference.Nothing, + dbType: new(DbType.Date, DbType.Object)); [Test] public Task Date_as_DateTime() - => AssertType(new DateTime(2020, 10, 1), "2020-10-01", "date", DbType.Date, isDefault: false); + => AssertType(new DateTime(2020, 10, 1), "2020-10-01", + "date", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Date, DbType.DateTime2), valueTypeEqualsFieldType: false); [Test] public Task Date_as_int() - => AssertType(7579, "2020-10-01", "date", DbType.Date, isDefault: false); + => AssertType(7579, "2020-10-01", + "date", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Date, DbType.Int32), valueTypeEqualsFieldType: false); [Test] public async Task Daterange_as_DateInterval() @@ -471,13 +462,13 @@ await AssertType( new DateInterval(new(2002, 3, 4), new(2002, 3, 6)), "[2002-03-04,2002-03-07)", "daterange", - isDataTypeInferredFromValue: false, skipArrayCheck: true); // DateInterval[] is mapped to multirange by default, not array; test separately + dataTypeInference: DataTypeInference.Nothing, skipArrayCheck: true); // DateInterval[] is mapped to multirange by default, not array; test separately await AssertType( new [] {new DateInterval(new(2002, 3, 4), new(2002, 3, 6))}, """{"[2002-03-04,2002-03-07)"}""", - "daterange[]", - isDefault: false, skipArrayCheck: true); + "daterange[]", dataTypeInference: DataTypeInference.Nothing, + skipArrayCheck: true); await using var conn = await OpenConnectionAsync(); if (conn.PostgreSqlVersion < new Version(14, 0)) @@ -486,7 +477,8 @@ await AssertType( await AssertType( new [] {new DateInterval(new(2002, 3, 4), new(2002, 3, 6))}, """{[2002-03-04,2002-03-07)}""", - "datemultirange", isDataTypeInferredFromValue: false, skipArrayCheck: true); + "datemultirange", dataTypeInference: DataTypeInference.Nothing, + skipArrayCheck: true); } [Test] @@ -495,15 +487,16 @@ public async Task Daterange_as_NpgsqlRange_of_LocalDate() await AssertType( new NpgsqlRange(new(2002, 3, 4), true, new(2002, 3, 6), false), "[2002-03-04,2002-03-06)", - "daterange", - isDataTypeInferredFromValue: false, - isDefaultForReading: false, skipArrayCheck: true); // NpgsqlRange[] is mapped to multirange by default, not array; test separately + "daterange", dataTypeInference: DataTypeInference.Nothing, + valueTypeEqualsFieldType: false, + skipArrayCheck: true); // NpgsqlRange[] is mapped to multirange by default, not array; test separately await AssertType( new [] { new NpgsqlRange(new(2002, 3, 4), true, new(2002, 3, 6), false) }, """{"[2002-03-04,2002-03-06)"}""", - "daterange[]", - isDefault: false, skipArrayCheck: true); + "daterange[]", dataTypeInference: DataTypeInference.Nothing, + valueTypeEqualsFieldType: false, + skipArrayCheck: true); await using var conn = await OpenConnectionAsync(); if (conn.PostgreSqlVersion < new Version(14, 0)) @@ -512,7 +505,9 @@ await AssertType( await AssertType( new [] { new NpgsqlRange(new(2002, 3, 4), true, new(2002, 3, 6), false) }, """{[2002-03-04,2002-03-06)}""", - "datemultirange", isDefault: false, skipArrayCheck: true); + "datemultirange", dataTypeInference: DataTypeInference.Nothing, + valueTypeEqualsFieldType: false, + skipArrayCheck: true); } [Test] @@ -529,7 +524,7 @@ await AssertType( }, "{[2002-03-04,2002-03-06),[2002-03-08,2002-03-11)}", "datemultirange", - isDataTypeInferredFromValue: false); + dataTypeInference: DataTypeInference.Nothing); } [Test] @@ -545,14 +540,13 @@ await AssertType( new NpgsqlRange(new(2002, 3, 8), true, new(2002, 3, 11), false) }, "{[2002-03-04,2002-03-06),[2002-03-08,2002-03-11)}", - "datemultirange", - isDefaultForReading: false, - isDataTypeInferredFromValue: false); + "datemultirange", dataTypeInference: DataTypeInference.Nothing, + valueTypeEqualsFieldType: false); } [Test] public Task Date_as_DateOnly() - => AssertType(new DateOnly(2020, 10, 1), "2020-10-01", "date", DbType.Date, isDefaultForReading: false); + => AssertType(new DateOnly(2020, 10, 1), "2020-10-01", "date", dbType: DbType.Date, valueTypeEqualsFieldType: false); [Test] public async Task Daterange_as_NpgsqlRange_of_DateOnly() @@ -561,13 +555,15 @@ await AssertType( new NpgsqlRange(new(2002, 3, 4), true, new(2002, 3, 6), false), "[2002-03-04,2002-03-06)", "daterange", - isDefaultForReading: false, skipArrayCheck: true); + valueTypeEqualsFieldType: false, + skipArrayCheck: true); await AssertType( new [] { new NpgsqlRange(new(2002, 3, 4), true, new(2002, 3, 6), false) }, """{"[2002-03-04,2002-03-06)"}""", - "daterange[]", - isDefault: false, skipArrayCheck: true); + "daterange[]", dataTypeInference: DataTypeInference.Mismatch, + valueTypeEqualsFieldType: false, + skipArrayCheck: true); await using var conn = await OpenConnectionAsync(); if (conn.PostgreSqlVersion < new Version(14, 0)) @@ -576,7 +572,9 @@ await AssertType( await AssertType( new [] { new NpgsqlRange(new(2002, 3, 4), true, new(2002, 3, 6), false) }, """{[2002-03-04,2002-03-06)}""", - "datemultirange", isDefault: false, skipArrayCheck: true); + "datemultirange", dataTypeInference: DataTypeInference.Mismatch, + valueTypeEqualsFieldType: false, + skipArrayCheck: true); } [Test] @@ -591,8 +589,7 @@ await AssertType( new DateInterval(new(2002, 3, 8), new(2002, 3, 10)) }, """{"[2002-03-04,2002-03-06)","[2002-03-08,2002-03-11)"}""", - "daterange[]", - isDefaultForWriting: false); + "daterange[]", dataTypeInference: DataTypeInference.Nothing); } [Test] @@ -607,8 +604,7 @@ await AssertType( new NpgsqlRange(new(2002, 3, 8), true, new(2002, 3, 11), false) }, """{"[2002-03-04,2002-03-06)","[2002-03-08,2002-03-11)"}""", - "daterange[]", - isDefault: false); + "daterange[]", dataTypeInference: DataTypeInference.Nothing, valueTypeEqualsFieldType: false); } #endregion Date @@ -617,26 +613,25 @@ await AssertType( [Test] public Task Time_as_LocalTime() - => AssertType(new LocalTime(10, 45, 34, 500), "10:45:34.5", "time without time zone", DbType.Time, - isDataTypeInferredFromValue: false); + => AssertType(new LocalTime(10, 45, 34, 500), "10:45:34.5", + "time without time zone", dataTypeInference: DataTypeInference.Nothing, + dbType: new(DbType.Time, DbType.Object)); [Test] public Task Time_as_TimeSpan() => AssertType( new TimeSpan(0, 10, 45, 34, 500), "10:45:34.5", - "time without time zone", - DbType.Time, - isDefault: false); + "time without time zone", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Time, DbType.Object), valueTypeEqualsFieldType: false); [Test] public Task Time_as_TimeOnly() => AssertType( new TimeOnly(10, 45, 34, 500), "10:45:34.5", - "time without time zone", - DbType.Time, - isDefaultForReading: false); + "time without time zone", dataTypeInference: DataTypeInference.Mismatch, + dbType: DbType.Time, valueTypeEqualsFieldType: false); #endregion Time @@ -648,7 +643,7 @@ public Task TimeTz_as_OffsetTime() new OffsetTime(new LocalTime(1, 2, 3, 4).PlusNanoseconds(5000), Offset.FromHoursAndMinutes(3, 30) + Offset.FromSeconds(5)), "01:02:03.004005+03:30:05", "time with time zone", - isDataTypeInferredFromValue: false); + dataTypeInference: DataTypeInference.Nothing); [Test] public async Task TimeTz_as_DateTimeOffset() @@ -656,13 +651,13 @@ public async Task TimeTz_as_DateTimeOffset() await AssertTypeRead( "13:03:45.51+02", "time with time zone", - new DateTimeOffset(1, 1, 2, 13, 3, 45, 510, TimeSpan.FromHours(2)), isDefault: false); + new DateTimeOffset(1, 1, 2, 13, 3, 45, 510, TimeSpan.FromHours(2)), valueTypeEqualsFieldType: false); await AssertTypeWrite( new DateTimeOffset(1, 1, 1, 13, 3, 45, 510, TimeSpan.FromHours(2)), "13:03:45.51+02", - "time with time zone", - isDefault: false); + "time with time zone", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Object, DbType.DateTime)); } #endregion Time with time zone @@ -686,7 +681,7 @@ public Task Interval_as_Period() }.Build().Normalize(), "1 year 2 mons 25 days 05:06:07.008009", "interval", - isDataTypeInferredFromValue: false); + dataTypeInference: DataTypeInference.Nothing); [Test] public Task Interval_as_Duration() @@ -694,9 +689,8 @@ public Task Interval_as_Duration() Duration.FromDays(5) + Duration.FromMinutes(4) + Duration.FromSeconds(3) + Duration.FromMilliseconds(2) + Duration.FromNanoseconds(1000), "5 days 00:04:03.002001", - "interval", - isDefaultForReading: false, - isDataTypeInferredFromValue: false); + "interval", dataTypeInference: DataTypeInference.Nothing, + valueTypeEqualsFieldType: false); [Test] public async Task Interval_as_Duration_with_months_fails() diff --git a/test/Npgsql.Tests/CommandParameterTests.cs b/test/Npgsql.Tests/CommandParameterTests.cs index 783484a8fa..a0d88dccd6 100644 --- a/test/Npgsql.Tests/CommandParameterTests.cs +++ b/test/Npgsql.Tests/CommandParameterTests.cs @@ -195,10 +195,8 @@ public async Task Parameter_must_be_set(bool genericParam) [Test] public async Task Object_generic_param_does_runtime_lookup() { - await AssertTypeWrite(1, "1", "integer", DbType.Int32, DbType.Int32, isDefault: false, - isDataTypeInferredFromValue: true, skipArrayCheck: true); - await AssertTypeWrite(new[] {1, 1}, "{1,1}", "integer[]", isDefault: false, - isDataTypeInferredFromValue: true, skipArrayCheck: true); + await AssertTypeWrite(1, "1", "integer", dbType: DbType.Int32, skipArrayCheck: true); + await AssertTypeWrite(new[] {1, 1}, "{1,1}", "integer[]", skipArrayCheck: true); } [Test] diff --git a/test/Npgsql.Tests/CommandTests.cs b/test/Npgsql.Tests/CommandTests.cs index 70488be12e..3288cfe6af 100644 --- a/test/Npgsql.Tests/CommandTests.cs +++ b/test/Npgsql.Tests/CommandTests.cs @@ -1649,7 +1649,7 @@ await server // Just to make sure we have enough space await server.FlushAsync(); await server - .WriteDataRow(Encoding.ASCII.GetBytes("abc")) + .WriteDataRow("abc"u8.ToArray()) .WriteCommandComplete() .WriteReadyForQuery() .WriteParameterStatus("SomeKey", "SomeValue") @@ -1670,7 +1670,7 @@ await server .WriteParseComplete() .WriteBindComplete() .WriteRowDescription(new FieldDescription(TextOid)) - .WriteDataRow(Encoding.ASCII.GetBytes("abc")) + .WriteDataRow("abc"u8.ToArray()) .WriteCommandComplete() .WriteReadyForQuery() .FlushAsync(); diff --git a/test/Npgsql.Tests/GlobalTypeMapperTests.cs b/test/Npgsql.Tests/GlobalTypeMapperTests.cs index 2385ba0710..51f950045e 100644 --- a/test/Npgsql.Tests/GlobalTypeMapperTests.cs +++ b/test/Npgsql.Tests/GlobalTypeMapperTests.cs @@ -1,4 +1,5 @@ using System; +using System.Data; using System.Threading.Tasks; using Npgsql.Internal; using Npgsql.Internal.Postgres; @@ -26,17 +27,22 @@ public async Task MapEnum() await connection.ExecuteNonQueryAsync($"CREATE TYPE {type} AS ENUM ('sad', 'ok', 'happy')"); await connection.ReloadTypesAsync(); - await AssertType(connection, Mood.Happy, "happy", type, isDataTypeInferredFromValue: false); + await AssertType(connection, Mood.Happy, "happy", type, dataTypeInference: DataTypeInference.Nothing); } NpgsqlConnection.GlobalTypeMapper.UnmapEnum(type); // Global mapping changes have no effect on already-built data sources - await AssertType(dataSource1, Mood.Happy, "happy", type, isDataTypeInferredFromValue: false); + await AssertType(dataSource1, Mood.Happy, "happy", type, dataTypeInference: DataTypeInference.Nothing); + await AssertType(dataSource1, "happy", "happy", + type, dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Object, DbType.String), valueTypeEqualsFieldType: false); // But they do affect new data sources await using var dataSource2 = CreateDataSource(); - await AssertType(dataSource2, "happy", "happy", type, isDataTypeInferredFromValue: false, isDefault: false); + Assert.ThrowsAsync(() => AssertType(dataSource2, Mood.Happy, "happy", + type, dataTypeInference: DataTypeInference.Nothing)); + await AssertType(dataSource2, "happy", "happy", "text", dbType: DbType.String); } [Test] @@ -55,17 +61,21 @@ public async Task MapEnum_NonGeneric() await connection.ExecuteNonQueryAsync($"CREATE TYPE {type} AS ENUM ('sad', 'ok', 'happy')"); await connection.ReloadTypesAsync(); - await AssertType(connection, Mood.Happy, "happy", type, isDataTypeInferredFromValue: false); + await AssertType(connection, Mood.Happy, "happy", type, dataTypeInference: DataTypeInference.Nothing); } NpgsqlConnection.GlobalTypeMapper.UnmapEnum(typeof(Mood), type); // Global mapping changes have no effect on already-built data sources - await AssertType(dataSource1, Mood.Happy, "happy", type, isDataTypeInferredFromValue: false); + await AssertType(dataSource1, Mood.Happy, "happy", type, dataTypeInference: DataTypeInference.Nothing); + await AssertType(dataSource1, "happy", "happy", + type, dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Object, DbType.String), valueTypeEqualsFieldType: false); // But they do affect new data sources await using var dataSource2 = CreateDataSource(); - Assert.ThrowsAsync(() => AssertType(dataSource2, Mood.Happy, "happy", type, isDataTypeInferredFromValue: false)); + Assert.ThrowsAsync(() => AssertType(dataSource2, Mood.Happy, "happy", type, dataTypeInference: DataTypeInference.Nothing)); + await AssertType(dataSource2, "happy", "happy", "text", dbType: DbType.String); } finally { @@ -86,17 +96,25 @@ public async Task Reset() { await connection.ExecuteNonQueryAsync($"CREATE TYPE {type} AS ENUM ('sad', 'ok', 'happy')"); await connection.ReloadTypesAsync(); + + await AssertType(connection, Mood.Happy, "happy", type, dataTypeInference: DataTypeInference.Nothing); } // A global mapping change has no effects on data sources which have already been built NpgsqlConnection.GlobalTypeMapper.Reset(); // Global mapping changes have no effect on already-built data sources - await AssertType(dataSource1, Mood.Happy, "happy", type, isDataTypeInferredFromValue: false); + await AssertType(dataSource1, Mood.Happy, "happy", type, dataTypeInference: DataTypeInference.Nothing); + await AssertType(dataSource1, "happy", "happy", + type, dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Object, DbType.String), valueTypeEqualsFieldType: false); // But they do affect new data sources await using var dataSource2 = CreateDataSource(); - await AssertType(dataSource2, "happy", "happy", type, isDataTypeInferredFromValue: false, isDefault: false); + Assert.ThrowsAsync(() => AssertType(dataSource2, Mood.Happy, "happy", type, dataTypeInference: DataTypeInference.Nothing)); + await AssertType(dataSource2, "happy", "happy", + type, dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Object, DbType.String)); } [Test] diff --git a/test/Npgsql.Tests/LargeObjectTests.cs b/test/Npgsql.Tests/LargeObjectTests.cs index a252385be4..bdb1c51084 100644 --- a/test/Npgsql.Tests/LargeObjectTests.cs +++ b/test/Npgsql.Tests/LargeObjectTests.cs @@ -17,7 +17,7 @@ public void Test() var oid = manager.Create(); using (var stream = manager.OpenReadWrite(oid)) { - var buf = Encoding.UTF8.GetBytes("Hello"); + var buf = "Hello"u8.ToArray(); stream.Write(buf, 0, buf.Length); stream.Seek(0, System.IO.SeekOrigin.Begin); var buf2 = new byte[buf.Length]; diff --git a/test/Npgsql.Tests/Support/TestBase.cs b/test/Npgsql.Tests/Support/TestBase.cs index 11ebb5003e..5d690004b6 100644 --- a/test/Npgsql.Tests/Support/TestBase.cs +++ b/test/Npgsql.Tests/Support/TestBase.cs @@ -11,6 +11,7 @@ using Npgsql.Tests.Support; using NpgsqlTypes; using NUnit.Framework; +using NUnit.Framework.Constraints; namespace Npgsql.Tests; @@ -30,256 +31,266 @@ public abstract class TestBase #region Type testing - public async Task AssertType( + public Task AssertType( T value, string sqlLiteral, - string pgTypeName, - DbType? dbType = null, - DbType? inferredDbType = null, - bool isDefaultForReading = true, - bool isDefaultForWriting = true, - bool? isDefault = null, - bool isDataTypeInferredFromValue = true, + string dataTypeName, + DataTypeInference? dataTypeInference = null, + DbTypes? dbType = null, Func? comparer = null, + bool valueTypeEqualsFieldType = true, bool skipArrayCheck = false) - { - await using var connection = await OpenConnectionAsync(); - return await AssertType( - connection, value, sqlLiteral, pgTypeName, dbType, inferredDbType, isDefaultForReading, isDefaultForWriting, - isDefault, isDataTypeInferredFromValue, comparer, skipArrayCheck); - } + => AssertTypeCore(OpenConnectionAsync(), disposeConnection: true, () => value, sqlLiteral, dataTypeName, dataTypeInference, + dbType, comparer, valueTypeEqualsFieldType, skipArrayCheck); - public async Task AssertType( + public Task AssertType( NpgsqlDataSource dataSource, T value, string sqlLiteral, - string pgTypeName, - DbType? dbType = null, - DbType? inferredDbType = null, - bool isDefaultForReading = true, - bool isDefaultForWriting = true, - bool? isDefault = null, - bool isDataTypeInferredFromValue = true, + string dataTypeName, + DataTypeInference? dataTypeInference = null, + DbTypes? dbType = null, Func? comparer = null, + bool valueTypeEqualsFieldType = true, bool skipArrayCheck = false) - { - await using var connection = await dataSource.OpenConnectionAsync(); + => AssertTypeCore(dataSource.OpenConnectionAsync(), disposeConnection: true, () => value, sqlLiteral, dataTypeName, dataTypeInference, + dbType, comparer, valueTypeEqualsFieldType, skipArrayCheck); - return await AssertType(connection, value, sqlLiteral, pgTypeName, dbType, inferredDbType, isDefaultForReading, - isDefaultForWriting, isDefault, isDataTypeInferredFromValue, comparer, skipArrayCheck); - } - - public async Task AssertType( + public Task AssertType( NpgsqlConnection connection, T value, string sqlLiteral, - string pgTypeName, - DbType? dbType = null, - DbType? inferredDbType = null, - bool isDefaultForReading = true, - bool isDefaultForWriting = true, - bool? isDefault = null, - bool isDataTypeInferredFromValue = true, + string dataTypeName, + DataTypeInference? dataTypeInference = null, + DbTypes? dbType = null, Func? comparer = null, + bool valueTypeEqualsFieldType = true, bool skipArrayCheck = false) - { - if (isDefault is not null) - isDefaultForReading = isDefaultForWriting = isDefault.Value; + => AssertTypeCore(new(connection), disposeConnection: false, () => value, sqlLiteral, dataTypeName, dataTypeInference, + dbType, comparer, valueTypeEqualsFieldType, skipArrayCheck); - await AssertTypeWrite(connection, () => value, sqlLiteral, pgTypeName, dbType, inferredDbType, isDefaultForWriting, isDataTypeInferredFromValue, skipArrayCheck); - return await AssertTypeRead(connection, sqlLiteral, pgTypeName, value, isDefaultForReading, comparer, fieldType: null, skipArrayCheck); - } + public Task AssertType( + Func valueFactory, + string sqlLiteral, + string dataTypeName, + DataTypeInference? dataTypeInference = null, + DbTypes? dbType = null, + Func? comparer = null, + bool valueTypeEqualsFieldType = true, + bool skipArrayCheck = false) + => AssertTypeCore(OpenConnectionAsync(), disposeConnection: true, valueFactory, sqlLiteral, dataTypeName, dataTypeInference, + dbType, comparer, valueTypeEqualsFieldType, skipArrayCheck); - public async Task AssertTypeRead(string sqlLiteral, string pgTypeName, T expected, bool isDefault = true, bool skipArrayCheck = false) - { - await using var connection = await OpenConnectionAsync(); - return await AssertTypeRead(connection, sqlLiteral, pgTypeName, expected, isDefault, comparer: null, fieldType: null, skipArrayCheck); - } + public Task AssertType( + NpgsqlDataSource dataSource, + Func valueFactory, + string sqlLiteral, + string dataTypeName, + DataTypeInference? dataTypeInference = null, + DbTypes? dbType = null, + Func? comparer = null, + bool valueTypeEqualsFieldType = true, + bool skipArrayCheck = false) + => AssertTypeCore(dataSource.OpenConnectionAsync(), disposeConnection: true, valueFactory, sqlLiteral, dataTypeName, dataTypeInference, + dbType, comparer, valueTypeEqualsFieldType, skipArrayCheck); - public async Task AssertTypeRead(NpgsqlDataSource dataSource, string sqlLiteral, string pgTypeName, T expected, - bool isDefault = true, Func? comparer = null, Type? fieldType = null, bool skipArrayCheck = false) - { - await using var connection = await dataSource.OpenConnectionAsync(); - return await AssertTypeRead(connection, sqlLiteral, pgTypeName, expected, isDefault, comparer, fieldType, skipArrayCheck); - } + public Task AssertType( + NpgsqlConnection connection, + Func valueFactory, + string sqlLiteral, + string dataTypeName, + DataTypeInference? dataTypeInference = null, + DbTypes? dbType = null, + Func? comparer = null, + bool valueTypeEqualsFieldType = true, + bool skipArrayCheck = false) + => AssertTypeCore(new(connection), disposeConnection: false, valueFactory, sqlLiteral, dataTypeName, dataTypeInference, + dbType, comparer, valueTypeEqualsFieldType, skipArrayCheck); - public async Task AssertTypeWrite( - NpgsqlDataSource dataSource, - T value, - string expectedSqlLiteral, - string pgTypeName, - DbType? dbType = null, - DbType? inferredDbType = null, - bool isDefault = true, - bool isDataTypeInferredFromValue = true, + static async Task AssertTypeCore( + ValueTask connectionTask, + bool disposeConnection, + Func valueFactory, + string sqlLiteral, + string dataTypeName, + DataTypeInference? dataTypeInference = null, + DbTypes? dbType = null, + Func? comparer = null, + bool valueTypeEqualsFieldType = true, bool skipArrayCheck = false) { - await using var connection = await dataSource.OpenConnectionAsync(); + var connection = await connectionTask; + await using var _ = disposeConnection ? connection : null; - await AssertTypeWrite(connection, () => value, expectedSqlLiteral, pgTypeName, dbType, inferredDbType, isDefault, - isDataTypeInferredFromValue, skipArrayCheck); + await AssertTypeWriteCore(new(connection), disposeConnection: false, valueFactory, sqlLiteral, + dataTypeName, dataTypeInference, dbType, skipArrayCheck); + return await AssertTypeReadCore(new(connection), disposeConnection: false, sqlLiteral, dataTypeName, valueFactory(), + valueTypeEqualsFieldType, comparer, skipArrayCheck); } public Task AssertTypeWrite( T value, - string expectedSqlLiteral, - string pgTypeName, - DbType? dbType = null, - DbType? inferredDbType = null, - bool isDefault = true, - bool isDataTypeInferredFromValue = true, + string sqlLiteral, + string dataTypeName, + DataTypeInference? dataTypeInference = null, + DbTypes? dbType = null, bool skipArrayCheck = false) - => AssertTypeWrite(() => value, expectedSqlLiteral, pgTypeName, dbType, inferredDbType, isDefault, - isDataTypeInferredFromValue, skipArrayCheck); + => AssertTypeWriteCore(OpenConnectionAsync(), disposeConnection: true, () => value, sqlLiteral, + dataTypeName, dataTypeInference, dbType, skipArrayCheck); - public async Task AssertTypeWrite( - Func valueFactory, - string expectedSqlLiteral, - string pgTypeName, - DbType? dbType = null, - DbType? inferredDbType = null, - bool isDefault = true, - bool isDataTypeInferredFromValue = true, + public Task AssertTypeWrite( + NpgsqlDataSource dataSource, + T value, + string sqlLiteral, + string dataTypeName, + DataTypeInference? dataTypeInference = null, + DbTypes? dbType = null, bool skipArrayCheck = false) - { - await using var connection = await OpenConnectionAsync(); - await AssertTypeWrite(connection, valueFactory, expectedSqlLiteral, pgTypeName, dbType, inferredDbType, isDefault, isDataTypeInferredFromValue, skipArrayCheck); - } + => AssertTypeWriteCore(dataSource.OpenConnectionAsync(), disposeConnection: true, () => value, sqlLiteral, + dataTypeName, dataTypeInference, dbType, skipArrayCheck); - internal static async Task AssertTypeRead( + public Task AssertTypeWrite( NpgsqlConnection connection, + T value, string sqlLiteral, - string pgTypeName, - T expected, - bool isDefault = true, - Func? comparer = null, - Type? fieldType = null, + string dataTypeName, + DataTypeInference? dataTypeInference = null, + DbTypes? dbType = null, bool skipArrayCheck = false) - { - var result = await AssertTypeReadCore(connection, sqlLiteral, pgTypeName, expected, isDefault, comparer); - - // Check the corresponding array type as well - if (!skipArrayCheck && !pgTypeName.EndsWith("[]", StringComparison.Ordinal)) - { - await AssertTypeReadCore( - connection, - ArrayLiteral(sqlLiteral), - pgTypeName + "[]", - new[] { expected, expected }, - isDefault, - comparer is null ? null : (array1, array2) => comparer(array1[0], array2[0]) && comparer(array1[1], array2[1])); - } + => AssertTypeWriteCore(new(connection), disposeConnection: false, () => value, sqlLiteral, dataTypeName, dataTypeInference, + dbType, skipArrayCheck); - return result; - } - - internal static async Task AssertTypeReadCore( - NpgsqlConnection connection, + public Task AssertTypeWrite( + Func valueFactory, string sqlLiteral, - string pgTypeName, - T expected, - bool isDefault = true, - Func? comparer = null, - Type? fieldType = null) - { - if (sqlLiteral.Contains('\'')) - sqlLiteral = sqlLiteral.Replace("'", "''"); - - await using var cmd = new NpgsqlCommand($"SELECT '{sqlLiteral}'::{pgTypeName}", connection); - await using var reader = await cmd.ExecuteReaderAsync(CommandBehavior.SequentialAccess); - await reader.ReadAsync(); - - var truncatedSqlLiteral = sqlLiteral.Length > 40 ? sqlLiteral[..40] + "..." : sqlLiteral; - - var dataTypeName = reader.GetDataTypeName(0); - var dotIndex = dataTypeName.IndexOf('.'); - if (dotIndex > -1 && dataTypeName.Substring(0, dotIndex) is "pg_catalog" or "public") - dataTypeName = dataTypeName.Substring(dotIndex + 1); - - // For composite type with dots, postgres works only with quoted name - scheme."My.type.name" - // but npgsql converts it to name without quotes - var pgTypeNameWithoutQuotes = dataTypeName.Replace("\"", string.Empty); - Assert.That(dataTypeName, Is.EqualTo(pgTypeNameWithoutQuotes), - $"Got wrong result from GetDataTypeName when reading '{truncatedSqlLiteral}'"); - - if (isDefault) - { - // For arrays, GetFieldType always returns typeof(Array), since PG arrays can have arbitrary dimensionality - Assert.That(reader.GetFieldType(0), Is.EqualTo(dataTypeName.EndsWith("[]") ? typeof(Array) : fieldType ?? typeof(T)), - $"Got wrong result from GetFieldType when reading '{truncatedSqlLiteral}'"); - } - - var actual = isDefault ? (T)reader.GetValue(0) : reader.GetFieldValue(0); - - Assert.That(actual, comparer is null ? Is.EqualTo(expected) : Is.EqualTo(expected).Using(new SimpleComparer(comparer)), - $"Got wrong result from GetFieldValue value when reading '{truncatedSqlLiteral}'"); + string dataTypeName, + DataTypeInference? dataTypeInference = null, + DbTypes? dbType = null, + bool skipArrayCheck = false) + => AssertTypeWriteCore(OpenConnectionAsync(), disposeConnection: true, valueFactory, sqlLiteral, + dataTypeName, dataTypeInference, dbType, skipArrayCheck); - return actual; - } + public Task AssertTypeWrite( + NpgsqlDataSource dataSource, + Func valueFactory, + string sqlLiteral, + string dataTypeName, + DataTypeInference? dataTypeInference = null, + DbTypes? dbType = null, + bool skipArrayCheck = false) + => AssertTypeWriteCore(dataSource.OpenConnectionAsync(), disposeConnection: true, valueFactory, sqlLiteral, + dataTypeName, dataTypeInference, dbType, skipArrayCheck); - internal static async Task AssertTypeWrite( + public Task AssertTypeWrite( NpgsqlConnection connection, Func valueFactory, - string expectedSqlLiteral, - string pgTypeName, - DbType? dbType = null, - DbType? inferredDbType = null, - bool isDefault = true, - bool isDataTypeInferredFromValue = true, + string sqlLiteral, + string dataTypeName, + DataTypeInference? dataTypeInference = null, + DbTypes? dbType = null, bool skipArrayCheck = false) + => AssertTypeWriteCore(new(connection), disposeConnection: false, valueFactory, sqlLiteral, + dataTypeName, dataTypeInference, dbType, skipArrayCheck); + + static async Task AssertTypeWriteCore( + ValueTask connectionTask, + bool disposeConnection, + Func valueFactory, + string sqlLiteral, + string dataTypeName, + DataTypeInference? dataTypeInference, + DbTypes? dbType, + bool skipArrayCheck) { + var connection = await connectionTask; + await using var _ = disposeConnection ? connection : null; + await AssertTypeWriteCore( - connection, valueFactory, expectedSqlLiteral, pgTypeName, dbType, inferredDbType, isDefault, - isDataTypeInferredFromValue); + connection, valueFactory, sqlLiteral, + dataTypeName, dataTypeInference ?? DataTypeInference.Match, + dbType); // Check the corresponding array type as well - if (!skipArrayCheck && !pgTypeName.EndsWith("[]", StringComparison.Ordinal)) + if (!skipArrayCheck && !dataTypeName.EndsWith("[]", StringComparison.Ordinal)) { await AssertTypeWriteCore( connection, () => new[] { valueFactory(), valueFactory() }, - ArrayLiteral(expectedSqlLiteral), - pgTypeName + "[]", - dbType: null, - inferredDbType: null, - isDefault, - isDataTypeInferredFromValue); + ArrayLiteral(sqlLiteral), + dataTypeName + "[]", dataTypeInference ?? DataTypeInference.Match, + expectedDbTypes: null); } } + public enum DataTypeInference + { + /// + /// Data type is inferred from the CLR value and matches the data type under test. + /// + Match, + + /// + /// Data type is inferred from the CLR value but differs from the data type under test. + /// + /// + /// Used when we get some inferred data type (e.g. CLR strings are inferred to be 'text') but this does not match the data type (e.g. 'json') under test. + /// + Mismatch, + + /// + /// Data type can not be inferred from the CLR value. + /// + /// + /// This is for CLR types that are statically unknown to Npgsql (plugin types: NodaTime/NTS, composite types, enums...), + /// or where we specifically don't want to infer a data type because there's no good option + /// (e.g. uint can be mapped to 'oid/xid/cid', but we don't want any of these as a default/inferred data type) + /// + Nothing, + } + + public readonly struct DbTypes(DbType dataTypeMappedDbType, DbType valueInferredDbType, DbType dbTypeToSet) + { + public DbType DataTypeMappedDbType { get; } = dataTypeMappedDbType; + public DbType ValueInferredDbType { get; } = valueInferredDbType; + + // The DbType to explicitly set on the parameter. Usually same as ValueInferredDbType, + // It differs when testing DbType aliases (e.g. VarNumeric → DbType.Decimal) as we want to test those also work correctly. + public DbType DbTypeToSet { get; } = dbTypeToSet; + + public DbTypes(DbType dataTypeMappedDbType, DbType valueInferredDbType) + : this(dataTypeMappedDbType, valueInferredDbType, valueInferredDbType) {} + + public static implicit operator DbTypes(DbType dbType) => new(dbType, dbType, dbType); + } + static async Task AssertTypeWriteCore( NpgsqlConnection connection, Func valueFactory, - string expectedSqlLiteral, - string pgTypeName, - DbType? dbType = null, - DbType? inferredDbType = null, - bool isDefault = true, - bool isDataTypeInferredFromValue = true) + string sqlLiteral, + string dataTypeName, + DataTypeInference dataTypeInference, + DbTypes? expectedDbTypes) { - var npgsqlDbType = DataTypeName.FromDisplayName(pgTypeName).ToNpgsqlDbType(); - - // TODO: Interferes with both multiplexing and connection-specific mapping (used e.g. in NodaTime) - // Reset the type mapper to make sure we're resolving this type with a clean slate (for isolation, just in case) - // connection.TypeMapper.Reset(); + var npgsqlDbType = DataTypeName.FromDisplayName(dataTypeName).ToNpgsqlDbType(); // Strip any facet information (length/precision/scale) - var parenIndex = pgTypeName.IndexOf('('); - // var pgTypeNameWithoutFacets = parenIndex > -1 ? pgTypeName[..parenIndex] : pgTypeName; - var pgTypeNameWithoutFacets = parenIndex > -1 - ? pgTypeName[..parenIndex] + pgTypeName[(pgTypeName.IndexOf(')') + 1)..] - : pgTypeName; + var parenIndex = dataTypeName.IndexOf('('); + var dataTypeNameWithoutFacets = parenIndex > -1 + ? dataTypeName[..parenIndex] + dataTypeName[(dataTypeName.IndexOf(')') + 1)..] + : dataTypeName; // For composite type with dots in name, Postgresql returns name with quotes - scheme."My.type.name" // but for npgsql mapping we should use names without quotes - scheme.My.type.name - var pgTypeNameWithoutFacetsAndQuotes = pgTypeNameWithoutFacets.Replace("\"", string.Empty); + var dataTypeNameWithoutFacetsAndQuotes = dataTypeNameWithoutFacets.Replace("\"", string.Empty); // We test the following scenarios (between 2 and 5 in total): - // 1. With NpgsqlDbType explicitly set - // 2. With DataTypeName explicitly set - // 3. With DbType explicitly set (if one was provided) - // 4. With only the value set (if it's the default) - // 5. With only the value set, using generic NpgsqlParameter (if it's the default) + // 1. With value and DataTypeName set + // 2. With value and NpgsqlDbType set (when available) + // 3. With value and DbType explicitly set + // 4. With only the value set + // 5. With only the value set, using generic NpgsqlParameter + + // We only actually attempt to write to the database with a set DataTypeName, NpgsqlDbType, or when data type inference is exact. var errorIdentifierIndex = -1; var errorIdentifier = new Dictionary(); @@ -288,45 +299,42 @@ static async Task AssertTypeWriteCore( NpgsqlParameter p; // With data type name - p = new NpgsqlParameter { Value = valueFactory(), DataTypeName = pgTypeNameWithoutFacetsAndQuotes }; + p = new NpgsqlParameter { Value = valueFactory(), DataTypeName = dataTypeNameWithoutFacetsAndQuotes }; + errorIdentifier[++errorIdentifierIndex] = $"Value and DataTypeName={dataTypeNameWithoutFacetsAndQuotes}"; + DataTypeAsserts(); cmd.Parameters.Add(p); - errorIdentifier[++errorIdentifierIndex] = $"DataTypeName={pgTypeNameWithoutFacetsAndQuotes}"; - CheckInference(); // With NpgsqlDbType if (npgsqlDbType is not null) { p = new NpgsqlParameter { Value = valueFactory(), NpgsqlDbType = npgsqlDbType.Value }; + errorIdentifier[++errorIdentifierIndex] = $"Value and NpgsqlDbType={npgsqlDbType}"; + DataTypeAsserts(); cmd.Parameters.Add(p); - errorIdentifier[++errorIdentifierIndex] = $"NpgsqlDbType={npgsqlDbType}"; - CheckInference(); } - // With DbType - if (dbType is not null) - { - p = new NpgsqlParameter { Value = valueFactory(), DbType = dbType.Value }; + // With DbType, if none was supplied we verify it's DbType.Object. + p = new NpgsqlParameter { Value = valueFactory() }; + errorIdentifier[++errorIdentifierIndex] = $"Value and DbType={expectedDbTypes?.DbTypeToSet}"; + if (expectedDbTypes?.DbTypeToSet is { } expectedDbType) + p.DbType = expectedDbType; + DbTypeAsserts(); + if (dataTypeInference is DataTypeInference.Match) cmd.Parameters.Add(p); - errorIdentifier[++errorIdentifierIndex] = $"DbType={dbType}"; - CheckInference(dbTypeApplied: true); - } - if (isDefault) - { - // With (non-generic) value only - p = new NpgsqlParameter { Value = valueFactory() }; + // With (non-generic) value only + p = new NpgsqlParameter { Value = valueFactory() }; + errorIdentifier[++errorIdentifierIndex] = $"Value (type {p.Value!.GetType().Name}, non-generic)"; + ValueAsserts(); + if (dataTypeInference is DataTypeInference.Match) cmd.Parameters.Add(p); - errorIdentifier[++errorIdentifierIndex] = $"Value only (type {p.Value!.GetType().Name}, non-generic)"; - CheckInference(valueSolelyApplied: true); - // With (generic) value only - p = new NpgsqlParameter { TypedValue = valueFactory() }; + // With (generic) value only + p = new NpgsqlParameter { TypedValue = valueFactory() }; + errorIdentifier[++errorIdentifierIndex] = $"Value (type {p.Value!.GetType().Name}, generic)"; + ValueAsserts(); + if (dataTypeInference is DataTypeInference.Match) cmd.Parameters.Add(p); - errorIdentifier[++errorIdentifierIndex] = $"Value only (type {p.Value!.GetType().Name}, generic)"; - CheckInference(valueSolelyApplied: true); - } - - Debug.Assert(cmd.Parameters.Count == errorIdentifierIndex + 1); cmd.CommandText = "SELECT " + string.Join(", ", Enumerable.Range(1, cmd.Parameters.Count).Select(i => "pg_typeof($1)::text, $1::text".Replace("$1", $"${i}"))); @@ -336,72 +344,208 @@ static async Task AssertTypeWriteCore( for (var i = 0; i < cmd.Parameters.Count * 2; i += 2) { - Assert.That(reader[i], Is.EqualTo(pgTypeNameWithoutFacets), $"Got wrong PG type name when writing with {errorIdentifier[i / 2]}"); - Assert.That(reader[i+1], Is.EqualTo(expectedSqlLiteral), $"Got wrong SQL literal when writing with {errorIdentifier[i / 2]}"); + var error = errorIdentifier[i / 2]; + Assert.That(reader[i], Is.EqualTo(dataTypeNameWithoutFacets), $"Got wrong data type name when writing with {error}"); + Assert.That(reader[i+1], Is.EqualTo(sqlLiteral), $"Got wrong SQL literal when writing with {error}"); } - void CheckInference(bool dbTypeApplied = false, bool valueSolelyApplied = false) + void DataTypeAsserts() { - if (!valueSolelyApplied || isDataTypeInferredFromValue) - Assert.That(p.DataTypeName, Is.EqualTo(pgTypeNameWithoutFacetsAndQuotes), - () => $"Got wrong inferred DataTypeName when inferring with {errorIdentifier[errorIdentifierIndex]}"); - - if (!valueSolelyApplied || isDataTypeInferredFromValue) - Assert.That(p.NpgsqlDbType, Is.EqualTo(npgsqlDbType ?? NpgsqlDbType.Unknown), - () => $"Got wrong inferred NpgsqlDbType when inferring with {errorIdentifier[errorIdentifierIndex]}"); - - DbType expectedDbType; - if (dbTypeApplied) - expectedDbType = dbType.GetValueOrDefault(); - else if (!valueSolelyApplied || isDataTypeInferredFromValue) - expectedDbType = inferredDbType ?? dbType ?? DbType.Object; - else - expectedDbType = DbType.Object; + var expectedDataTypeName = dataTypeNameWithoutFacetsAndQuotes; + var expectedNpgsqlDbType = npgsqlDbType ?? NpgsqlDbType.Unknown; + + var expectedDbType = expectedDbTypes?.DataTypeMappedDbType ?? DbType.Object; + + AssertParameterProperties(expectedDataTypeName, expectedNpgsqlDbType, expectedDbType); + } + + void DbTypeAsserts() + { + // If DbType was set it overrules any value based data type inference. + // As DbType.Object never has any mapping either we check for null/Unknown when DbType.Object was set. + var (expectedDataTypeName, expectedNpgsqlDbType) = + expectedDbTypes is { DbTypeToSet: DbType.Object } + ? (null, NpgsqlDbType.Unknown) + : GetInferredDataType(); + + var expectedDbType = expectedDbTypes?.DbTypeToSet ?? DbType.Object; + + AssertParameterProperties(expectedDataTypeName, expectedNpgsqlDbType, expectedDbType); + } + + void ValueAsserts() + { + var (expectedDataTypeName, expectedNpgsqlDbType) = GetInferredDataType(); + + var expectedDbType = expectedDbTypes?.ValueInferredDbType ?? DbType.Object; + + AssertParameterProperties(expectedDataTypeName, expectedNpgsqlDbType, expectedDbType); + } + + void AssertParameterProperties(string? expectedDataTypeName, NpgsqlDbType expectedNpgsqlDbType, DbType expectedDbType) + { + Assert.That(p.DataTypeName, Is.EqualTo(expectedDataTypeName), + $"Got wrong DataTypeName when checking with {errorIdentifier[errorIdentifierIndex]}"); + Assert.That(p.NpgsqlDbType, Is.EqualTo(expectedNpgsqlDbType), + $"Got wrong NpgsqlDbType when checking with {errorIdentifier[errorIdentifierIndex]}"); Assert.That(p.DbType, Is.EqualTo(expectedDbType), - () => $"Got wrong inferred DbType when inferring with {errorIdentifier[errorIdentifierIndex]}"); + $"Got wrong DbType when checking with {errorIdentifier[errorIdentifierIndex]}"); } + + (string? ExpectedDataTypeName, NpgsqlDbType ExpectedNpgsqlDbType) GetInferredDataType() + => dataTypeInference switch + { + DataTypeInference.Match => + (dataTypeNameWithoutFacetsAndQuotes, npgsqlDbType ?? NpgsqlDbType.Unknown), + DataTypeInference.Mismatch => + // Only respect Mismatch if the type is well known (for now that means it has an NpgsqlDbType). + // Otherwise use the exact values so we'll error with the right details. + p.NpgsqlDbType is not NpgsqlDbType.Unknown + ? (p.DataTypeName, p.NpgsqlDbType) + : (dataTypeNameWithoutFacetsAndQuotes, npgsqlDbType ?? NpgsqlDbType.Unknown), + DataTypeInference.Nothing => + (null, NpgsqlDbType.Unknown), + _ => throw new UnreachableException($"Unknown case {dataTypeInference}") + }; } - public async Task AssertTypeUnsupported(T value, string sqlLiteral, string pgTypeName, NpgsqlDataSource? dataSource = null) + public Task AssertTypeRead(string sqlLiteral, string dataTypeName, T value, + bool valueTypeEqualsFieldType = true, Func? comparer = null, bool skipArrayCheck = false) + => AssertTypeReadCore(OpenConnectionAsync(), disposeConnection: true, sqlLiteral, dataTypeName, + value, valueTypeEqualsFieldType, comparer, skipArrayCheck); + + public Task AssertTypeRead(NpgsqlDataSource dataSource, string sqlLiteral, string dataTypeName, T value, + bool valueTypeEqualsFieldType = true, Func? comparer = null, bool skipArrayCheck = false) + => AssertTypeReadCore(dataSource.OpenConnectionAsync(), disposeConnection: true, sqlLiteral, dataTypeName, + value, valueTypeEqualsFieldType, comparer, skipArrayCheck); + + public Task AssertTypeRead(NpgsqlConnection connection, string sqlLiteral, string dataTypeName, T value, + bool valueTypeEqualsFieldType = true, Func? comparer = null, bool skipArrayCheck = false) + => AssertTypeReadCore(new(connection), disposeConnection: false, sqlLiteral, dataTypeName, + value, valueTypeEqualsFieldType, comparer, skipArrayCheck); + + static async Task AssertTypeReadCore( + ValueTask connectionTask, + bool disposeConnection, + string sqlLiteral, + string dataTypeName, + T value, + bool valueTypeEqualsFieldType, + Func? comparer, + bool skipArrayCheck) { - await AssertTypeUnsupportedRead(sqlLiteral, pgTypeName, dataSource); - await AssertTypeUnsupportedWrite(value, pgTypeName, dataSource); + var connection = await connectionTask; + await using var _ = disposeConnection ? connection : null; + + var result = await AssertTypeReadCore(connection, sqlLiteral, dataTypeName, value, valueTypeEqualsFieldType, comparer); + + // Check the corresponding array type as well + if (!skipArrayCheck && !dataTypeName.EndsWith("[]", StringComparison.Ordinal)) + { + await AssertTypeReadCore( + connection, + ArrayLiteral(sqlLiteral), + dataTypeName + "[]", + new[] { value, value }, + valueTypeEqualsFieldType, + comparer is null ? null : (array1, array2) => array1.SequenceEqual(array2, CreateEqualityComparer(comparer!))); + } + return result; } - public async Task AssertTypeUnsupportedRead(string sqlLiteral, string pgTypeName, NpgsqlDataSource? dataSource = null) + static async Task AssertTypeReadCore( + NpgsqlConnection connection, + string sqlLiteral, + string dataTypeName, + T value, + bool valueTypeEqualsFieldType, + Func? comparer) { - dataSource ??= DataSource; + if (sqlLiteral.Contains('\'')) + sqlLiteral = sqlLiteral.Replace("'", "''"); - await using var conn = await dataSource.OpenConnectionAsync(); - // Make sure we don't poison the connection with a fault, potentially terminating other perfectly passing tests as well. - await using var tx = dataSource.Settings.Multiplexing ? await conn.BeginTransactionAsync() : null; - await using var cmd = new NpgsqlCommand($"SELECT '{sqlLiteral}'::{pgTypeName}", conn); + await using var cmd = new NpgsqlCommand($"SELECT '{sqlLiteral}'::{dataTypeName}", connection); await using var reader = await cmd.ExecuteReaderAsync(); await reader.ReadAsync(); - return Assert.Throws(() => reader.GetValue(0))!; + var truncatedSqlLiteral = sqlLiteral.Length > 40 ? sqlLiteral[..40] + "..." : sqlLiteral; + + var actualDataTypeName = reader.GetDataTypeName(0); + var dotIndex = actualDataTypeName.IndexOf('.'); + if (dotIndex > -1 && actualDataTypeName.Substring(0, dotIndex) is "pg_catalog" or "public") + actualDataTypeName = actualDataTypeName.Substring(dotIndex + 1); + + // For composite type with dots, postgres works only with quoted name - scheme."My.type.name" + // but npgsql converts it to name without quotes + var dataTypeNameWithoutQuotes = dataTypeName.Replace("\"", string.Empty); + Assert.That(actualDataTypeName, Is.EqualTo(dataTypeNameWithoutQuotes), + $"Got wrong result from GetDataTypeName when reading '{truncatedSqlLiteral}'"); + + // For arrays, GetFieldType always returns typeof(Array), since PG arrays can have arbitrary dimensionality. + var isArrayTest = actualDataTypeName.EndsWith("[]", StringComparison.Ordinal) && typeof(T).IsArray; + Assert.That(reader.GetFieldType(0), + (valueTypeEqualsFieldType || isArrayTest ? new ConstraintExpression() : Is.Not) + .EqualTo(isArrayTest ? typeof(Array) : typeof(T)), + $"Got wrong result from GetFieldType when reading '{truncatedSqlLiteral}'"); + + T actual; + if (valueTypeEqualsFieldType) + { + actual = (T)reader.GetValue(0); + Assert.That(actual, comparer is null ? Is.EqualTo(value) : Is.EqualTo(value).Using(CreateEqualityComparer(comparer!)), + $"Got wrong result from GetValue() value when reading '{truncatedSqlLiteral}'"); + + actual = (T)reader.GetFieldValue(0); + Assert.That(actual, comparer is null ? Is.EqualTo(value) : Is.EqualTo(value).Using(CreateEqualityComparer(comparer)), + $"Got wrong result from GetFieldValue() value when reading '{truncatedSqlLiteral}'"); + + return actual; + } + + actual = reader.GetFieldValue(0); + + Assert.That(actual, comparer is null ? Is.EqualTo(value) : Is.EqualTo(value).Using(CreateEqualityComparer(comparer!)), + $"Got wrong result from GetFieldValue() value when reading '{truncatedSqlLiteral}'"); + + return actual; + } + + static EqualityComparer CreateEqualityComparer(Func comparer) + => EqualityComparer.Create((x, y) => + { + if (x is null && y is null) + return true; + if (x is null || y is null) + return false; + return comparer(x, y); + }); + + public async Task AssertTypeUnsupported(T value, string sqlLiteral, string dataTypeName, NpgsqlDataSource? dataSource = null, bool skipArrayCheck = false) + { + await AssertTypeUnsupportedRead(sqlLiteral, dataTypeName, dataSource, skipArrayCheck); + await AssertTypeUnsupportedWrite(value, dataTypeName, dataSource, skipArrayCheck); } - public Task AssertTypeUnsupportedRead(string sqlLiteral, string pgTypeName, + public Task AssertTypeUnsupportedRead(string sqlLiteral, string dataTypeName, NpgsqlDataSource? dataSource = null, bool skipArrayCheck = false) - => AssertTypeUnsupportedRead(sqlLiteral, pgTypeName, dataSource); + => AssertTypeUnsupportedRead(sqlLiteral, dataTypeName, dataSource, skipArrayCheck); - public async Task AssertTypeUnsupportedRead(string sqlLiteral, string pgTypeName, + public async Task AssertTypeUnsupportedRead(string sqlLiteral, string dataTypeName, NpgsqlDataSource? dataSource = null, bool skipArrayCheck = false) where TException : Exception { - var result = await AssertTypeUnsupportedReadCore(sqlLiteral, pgTypeName, dataSource); + var result = await AssertTypeUnsupportedReadCore(sqlLiteral, dataTypeName, dataSource); // Check the corresponding array type as well - if (!skipArrayCheck && !pgTypeName.EndsWith("[]", StringComparison.Ordinal)) + if (!skipArrayCheck && !dataTypeName.EndsWith("[]", StringComparison.Ordinal)) { - await AssertTypeUnsupportedReadCore(ArrayLiteral(sqlLiteral), pgTypeName + "[]", dataSource); + await AssertTypeUnsupportedReadCore(ArrayLiteral(sqlLiteral), dataTypeName + "[]", dataSource); } return result; } - async Task AssertTypeUnsupportedReadCore(string sqlLiteral, string pgTypeName, NpgsqlDataSource? dataSource = null) + async Task AssertTypeUnsupportedReadCore(string sqlLiteral, string dataTypeName, NpgsqlDataSource? dataSource = null) where TException : Exception { dataSource ??= DataSource; @@ -409,33 +553,36 @@ async Task AssertTypeUnsupportedReadCore(string sqlLi await using var conn = await dataSource.OpenConnectionAsync(); // Make sure we don't poison the connection with a fault, potentially terminating other perfectly passing tests as well. await using var tx = dataSource.Settings.Multiplexing ? await conn.BeginTransactionAsync() : null; - await using var cmd = new NpgsqlCommand($"SELECT '{sqlLiteral}'::{pgTypeName}", conn); + await using var cmd = new NpgsqlCommand($"SELECT '{sqlLiteral}'::{dataTypeName}", conn); await using var reader = await cmd.ExecuteReaderAsync(); await reader.ReadAsync(); - return Assert.Throws(() => reader.GetFieldValue(0))!; + return Assert.Throws(() => + { + _ = typeof(T) == typeof(object) ? reader.GetValue(0) : reader.GetFieldValue(0); + })!; } - public Task AssertTypeUnsupportedWrite(T value, string? pgTypeName = null, NpgsqlDataSource? dataSource = null, + public Task AssertTypeUnsupportedWrite(T value, string? dataTypeName = null, NpgsqlDataSource? dataSource = null, bool skipArrayCheck = false) - => AssertTypeUnsupportedWrite(value, pgTypeName, dataSource, skipArrayCheck: false); + => AssertTypeUnsupportedWrite(value, dataTypeName, dataSource, skipArrayCheck); - public async Task AssertTypeUnsupportedWrite(T value, string? pgTypeName = null, + public async Task AssertTypeUnsupportedWrite(T value, string? dataTypeName = null, NpgsqlDataSource? dataSource = null, bool skipArrayCheck = false) where TException : Exception { - var result = await AssertTypeUnsupportedWriteCore(value, pgTypeName, dataSource); + var result = await AssertTypeUnsupportedWriteCore(value, dataTypeName, dataSource); // Check the corresponding array type as well - if (!skipArrayCheck && !pgTypeName?.EndsWith("[]", StringComparison.Ordinal) == true) + if (!skipArrayCheck && !dataTypeName?.EndsWith("[]", StringComparison.Ordinal) == true) { - await AssertTypeUnsupportedWriteCore([value, value], pgTypeName + "[]", dataSource); + await AssertTypeUnsupportedWriteCore([value, value], dataTypeName + "[]", dataSource); } return result; } - async Task AssertTypeUnsupportedWriteCore(T value, string? pgTypeName = null, NpgsqlDataSource? dataSource = null) + async Task AssertTypeUnsupportedWriteCore(T value, string? dataTypeName = null, NpgsqlDataSource? dataSource = null) where TException : Exception { dataSource ??= DataSource; @@ -448,22 +595,12 @@ async Task AssertTypeUnsupportedWriteCore(T value, st Parameters = { new() { Value = value } } }; - if (pgTypeName is not null) - cmd.Parameters[0].DataTypeName = pgTypeName; + if (dataTypeName is not null) + cmd.Parameters[0].DataTypeName = dataTypeName; return Assert.ThrowsAsync(() => cmd.ExecuteReaderAsync())!; } - class SimpleComparer(Func comparerDelegate) : IEqualityComparer - { - public bool Equals(T? x, T? y) - => x is null - ? y is null - : y is not null && comparerDelegate(x, y); - - public int GetHashCode(T obj) => throw new NotSupportedException(); - } - // For array quoting rules, see array_out in https://github.com/postgres/postgres/blob/master/src/backend/utils/adt/arrayfuncs.c static string ArrayLiteral(string elementLiteral) { diff --git a/test/Npgsql.Tests/Types/ArrayTests.cs b/test/Npgsql.Tests/Types/ArrayTests.cs index 59ef2ceffe..82209de20f 100644 --- a/test/Npgsql.Tests/Types/ArrayTests.cs +++ b/test/Npgsql.Tests/Types/ArrayTests.cs @@ -36,8 +36,8 @@ public class ArrayTests(MultiplexingMode multiplexingMode) : MultiplexingTestBas ]; [Test, TestCaseSource(nameof(ArrayTestCases))] - public Task Arrays(T array, string sqlLiteral, string pgTypeName) - => AssertType(array, sqlLiteral, pgTypeName); + public Task Arrays(T array, string sqlLiteral, string dataTypeName) + => AssertType(array, sqlLiteral, dataTypeName); [Test] public async Task NullableInts() @@ -257,7 +257,7 @@ public async Task Write_empty_multidimensional_array() [Test] public async Task Generic_List() => await AssertType( - new List { 1, 2, 3 }, "{1,2,3}", "integer[]", isDefaultForReading: false); + new List { 1, 2, 3 }, "{1,2,3}", "integer[]", valueTypeEqualsFieldType: false); [Test] public async Task Write_IList_implementation() @@ -267,7 +267,7 @@ public async Task Write_IList_implementation() [Test] public void Read_IList_implementation_throws() => Assert.ThrowsAsync(() => - AssertTypeRead("{1,2,3}", "integer[]", ImmutableArray.Create(1, 2, 3), isDefault: false)); + AssertTypeRead("{1,2,3}", "integer[]", ImmutableArray.Create(1, 2, 3), valueTypeEqualsFieldType: false)); [Test] public async Task Generic_IList() diff --git a/test/Npgsql.Tests/Types/BitStringTests.cs b/test/Npgsql.Tests/Types/BitStringTests.cs index c3dddaf01b..7e64238b69 100644 --- a/test/Npgsql.Tests/Types/BitStringTests.cs +++ b/test/Npgsql.Tests/Types/BitStringTests.cs @@ -1,6 +1,7 @@ using System; using System.Collections; using System.Collections.Specialized; +using System.Data; using System.Threading.Tasks; using NpgsqlTypes; using NUnit.Framework; @@ -30,7 +31,7 @@ public async Task BitArray(string sqlLiteral) await AssertType(bitArray, sqlLiteral, "bit varying"); if (len > 0) - await AssertType(bitArray, sqlLiteral, $"bit({len})", isDefaultForWriting: false); + await AssertType(bitArray, sqlLiteral, $"bit({len})", dataTypeInference: DataTypeInference.Mismatch); } [Test] @@ -47,7 +48,7 @@ public async Task BitArray_long() [Test] public Task BitVector32() => AssertType( - new BitVector32(4), "00000000000000000000000000000100", "bit varying", isDefaultForReading: false); + new BitVector32(4), "00000000000000000000000000000100", "bit varying", valueTypeEqualsFieldType: false); [Test] public Task BitVector32_too_long() @@ -55,7 +56,7 @@ public Task BitVector32_too_long() [Test] public Task Bool() - => AssertType(true, "1", "bit(1)", isDefault: false); + => AssertType(true, "1", "bit(1)", dataTypeInference: DataTypeInference.Mismatch, dbType: new(DbType.Object, DbType.Boolean)); [Test] public async Task Bitstring_with_multiple_bits_as_bool_throws() @@ -118,7 +119,9 @@ public async Task Array_of_single_bits_and_null() [Test] public Task As_string() - => AssertType("010101", "010101", "bit varying", isDefault: false); + => AssertType("010101", "010101", + "bit varying", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Object, DbType.String), valueTypeEqualsFieldType: false); [Test] public Task Write_as_string_validation() diff --git a/test/Npgsql.Tests/Types/ByteaTests.cs b/test/Npgsql.Tests/Types/ByteaTests.cs index 9829cdb289..9bff02c273 100644 --- a/test/Npgsql.Tests/Types/ByteaTests.cs +++ b/test/Npgsql.Tests/Types/ByteaTests.cs @@ -21,7 +21,7 @@ public class ByteaTests(MultiplexingMode multiplexingMode) : MultiplexingTestBas [TestCase(new byte[] { 1, 2, 3, 4, 5 }, "\\x0102030405", TestName = "Bytea")] [TestCase(new byte[] { }, "\\x", TestName = "Bytea_empty")] public Task Bytea(byte[] byteArray, string sqlLiteral) - => AssertType(byteArray, sqlLiteral, "bytea", DbType.Binary); + => AssertType(byteArray, sqlLiteral, "bytea", dbType: DbType.Binary); [Test] public async Task Bytea_long() @@ -38,24 +38,26 @@ public async Task Bytea_long() [Test] public Task AsMemory() => AssertType( - new Memory([1, 2, 3]), "\\x010203", "bytea", DbType.Binary, isDefault: false, - comparer: (left, right) => left.Span.SequenceEqual(right.Span)); + new Memory([1, 2, 3]), "\\x010203", "bytea", dbType: DbType.Binary, + comparer: (left, right) => left.Span.SequenceEqual(right.Span), + valueTypeEqualsFieldType: false); [Test] public Task AsReadOnlyMemory() => AssertType( - new ReadOnlyMemory([1, 2, 3]), "\\x010203", "bytea", DbType.Binary, isDefault: false, - comparer: (left, right) => left.Span.SequenceEqual(right.Span)); + new ReadOnlyMemory([1, 2, 3]), "\\x010203", "bytea", dbType: DbType.Binary, + comparer: (left, right) => left.Span.SequenceEqual(right.Span), + valueTypeEqualsFieldType: false); [Test] public Task AsArraySegment() - => AssertType( - new ArraySegment([1, 2, 3]), "\\x010203", "bytea", DbType.Binary, isDefault: false); + => AssertType(new ArraySegment([1, 2, 3]), "\\x010203", + "bytea", dbType: DbType.Binary, valueTypeEqualsFieldType: false); [Test] public Task Write_as_MemoryStream() => AssertTypeWrite( - () => new MemoryStream([1, 2, 3]), "\\x010203", "bytea", DbType.Binary, isDefault: false); + () => new MemoryStream([1, 2, 3]), "\\x010203", "bytea", dbType: DbType.Binary); [Test] public Task Write_as_MemoryStream_truncated() @@ -67,8 +69,7 @@ public Task Write_as_MemoryStream_truncated() return ms; }; - return AssertTypeWrite( - msFactory, "\\x020304", "bytea", DbType.Binary, isDefault: false); + return AssertTypeWrite(valueFactory: msFactory, "\\x020304", "bytea", dbType: DbType.Binary); } [Test] @@ -85,8 +86,7 @@ public Task Write_as_MemoryStream_exposableArray() return ms; }; - return AssertTypeWrite( - msFactory, "\\x020304", "bytea", DbType.Binary, isDefault: false); + return AssertTypeWrite(valueFactory: msFactory, "\\x020304", "bytea", dbType: DbType.Binary); } [Test] @@ -97,8 +97,7 @@ public async Task Write_as_MemoryStream_long() rnd.NextBytes(bytes); var expectedSql = "\\x" + ToHex(bytes); - await AssertTypeWrite( - () => new MemoryStream(bytes), expectedSql, "bytea", DbType.Binary, isDefault: false); + await AssertTypeWrite(() => new MemoryStream(bytes), expectedSql, "bytea", dbType: DbType.Binary); } [Test] @@ -110,8 +109,7 @@ public async Task Write_as_FileStream() { await File.WriteAllBytesAsync(filePath, [1, 2, 3]); - await AssertTypeWrite( - () => FileStreamFactory(filePath, fsList), "\\x010203", "bytea", DbType.Binary, isDefault: false); + await AssertTypeWrite(() => FileStreamFactory(filePath, fsList), "\\x010203", "bytea", dbType: DbType.Binary); } finally { @@ -146,8 +144,7 @@ public async Task Write_as_FileStream_long() await File.WriteAllBytesAsync(filePath, bytes); var expectedSql = "\\x" + ToHex(bytes); - await AssertTypeWrite( - () => FileStreamFactory(filePath, fsList), expectedSql, "bytea", DbType.Binary, isDefault: false); + await AssertTypeWrite(() => FileStreamFactory(filePath, fsList), expectedSql, "bytea", dbType: DbType.Binary); } finally { diff --git a/test/Npgsql.Tests/Types/CompositeTests.cs b/test/Npgsql.Tests/Types/CompositeTests.cs index a5f783b9ab..8c8c336478 100644 --- a/test/Npgsql.Tests/Types/CompositeTests.cs +++ b/test/Npgsql.Tests/Types/CompositeTests.cs @@ -30,7 +30,7 @@ await AssertType( new SomeComposite { SomeText = "foo", X = 8 }, "(8,foo)", type, - isDataTypeInferredFromValue: false); + dataTypeInference: DataTypeInference.Nothing); } [Test] @@ -52,7 +52,7 @@ await AssertType( new SomeComposite { SomeText = "foo", X = 8 }, "(8,foo)", type, - isDataTypeInferredFromValue: false); + dataTypeInference: DataTypeInference.Nothing); } [Test] @@ -73,7 +73,7 @@ await AssertType( new SomeComposite { SomeText = "foo", X = 8 }, "(8,foo)", type, - isDataTypeInferredFromValue: false); + dataTypeInference: DataTypeInference.Nothing); } class CustomTranslator : INpgsqlNameTranslator @@ -106,7 +106,7 @@ await AssertType( new SomeComposite { SomeText = "foo", X = 8 }, "(8,foo)", type, - isDataTypeInferredFromValue: false); + dataTypeInference: DataTypeInference.Nothing); } finally { @@ -139,7 +139,7 @@ await AssertType( new SomeCompositeContainer { A = 8, Containee = new() { SomeText = "foo", X = 9 } }, @"(8,""(9,foo)"")", containerType, - isDataTypeInferredFromValue: false); + dataTypeInference: DataTypeInference.Nothing); } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/1168")] @@ -160,7 +160,7 @@ await AssertType( new SomeComposite { SomeText = "foo", X = 8 }, "(8,foo)", $"{schema}.some_composite", - isDataTypeInferredFromValue: false); + dataTypeInference: DataTypeInference.Nothing); } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/4365")] @@ -190,16 +190,14 @@ await AssertType( new SomeCompositeContainer { A = 8, Containee = new() { SomeText = "foo", X = 9 } }, @"(8,""(9,foo)"")", $"{secondSchemaName}.container", - isDataTypeInferredFromValue: false, - isDefaultForWriting: false); + dataTypeInference: DataTypeInference.Nothing); await AssertType( connection, new SomeCompositeContainer { A = 8, Containee = new() { SomeText = "foo", X = 9 } }, @"(8,""(9,foo)"")", $"{firstSchemaName}.container", - isDataTypeInferredFromValue: false, - isDefaultForWriting: true); + dataTypeInference: DataTypeInference.Nothing); } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/5972")] @@ -221,7 +219,7 @@ await AssertType( new SomeComposite { SomeText = "foobar", X = 10 }, "(10,foobar)", $"{schema}.\"{typename}\"", - isDataTypeInferredFromValue: false); + dataTypeInference: DataTypeInference.Nothing); } [Test] @@ -242,7 +240,7 @@ await AssertType( new SomeCompositeStruct { SomeText = "foo", X = 8 }, "(8,foo)", type, - isDataTypeInferredFromValue: false); + dataTypeInference: DataTypeInference.Nothing); } [Test] @@ -263,7 +261,7 @@ await AssertType( new SomeComposite[] { new() { SomeText = "foo", X = 8 }, new() { SomeText = "bar", X = 9 }}, @"{""(8,foo)"",""(9,bar)""}", type + "[]", - isDataTypeInferredFromValue: false); + dataTypeInference: DataTypeInference.Nothing); } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/859")] @@ -285,7 +283,7 @@ await AssertType( new NameTranslationComposite { Simple = 2, TwoWords = 3, SomeClrName = 4 }, "(2,3,4)", type, - isDataTypeInferredFromValue: false); + dataTypeInference: DataTypeInference.Nothing); } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/856")] @@ -309,7 +307,7 @@ await AssertType( new Address { PostalCode = "12345", Street = "Main St." }, @"(""Main St."",12345)", compositeType, - isDataTypeInferredFromValue: false); + dataTypeInference: DataTypeInference.Nothing); } [Test] @@ -331,7 +329,7 @@ await AssertType( new SomeCompositeWithArray { Ints = [1, 2, 3, 4] }, @"(""{1,2,3,4}"")", compositeType, - isDataTypeInferredFromValue: false, + dataTypeInference: DataTypeInference.Nothing, comparer: (actual, expected) => actual.Ints!.SequenceEqual(expected.Ints!)); } @@ -357,7 +355,7 @@ await AssertType( new SomeCompositeWithEnum { EnumValue = SomeCompositeWithEnum.TestEnum.Value2 }, @"(value2)", compositeType, - isDataTypeInferredFromValue: false, + dataTypeInference: DataTypeInference.Nothing, comparer: (actual, expected) => actual.EnumValue == expected.EnumValue); } @@ -380,7 +378,7 @@ await AssertType( new SomeCompositeWithIPAddress { Address = IPAddress.Loopback }, @"(127.0.0.1)", compositeType, - isDataTypeInferredFromValue: false, + dataTypeInference: DataTypeInference.Nothing, comparer: (actual, expected) => actual.Address!.Equals(expected.Address)); } @@ -406,7 +404,7 @@ await AssertType( }, """("{""1970-01-01 00:00:00"",""1970-01-02 00:00:00""}")""", compositeType, - isDataTypeInferredFromValue: false, + dataTypeInference: DataTypeInference.Nothing, comparer: (actual, expected) => actual.DateTimes!.SequenceEqual(expected.DateTimes!)); } @@ -430,7 +428,7 @@ await adminConnection.ExecuteNonQueryAsync($@" new SomeCompositeWithConverterResolverType { DateTimes = [DateTime.UnixEpoch] }, // UTC DateTime """("{""1970-01-01 01:00:00"",""1970-01-02 01:00:00""}")""", compositeType, - isDataTypeInferredFromValue: false, + dataTypeInference: DataTypeInference.Nothing, comparer: (actual, expected) => actual.DateTimes!.SequenceEqual(expected.DateTimes!))); } @@ -460,11 +458,9 @@ public async Task Table_as_composite([Values] bool enabled) Task DoAssertion() => AssertType( - connection, - new SomeComposite { SomeText = "foo", X = 8 }, - "(8,foo)", - table, - isDataTypeInferredFromValue: false); + dataSource, + new SomeComposite { SomeText = "foo", X = 8 }, "(8,foo)", + table, dataTypeInference: DataTypeInference.Nothing); } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/1267")] @@ -478,14 +474,11 @@ public async Task Table_as_composite_with_deleted_columns() dataSourceBuilder.ConfigureTypeLoading(b => b.EnableTableCompositesLoading()); dataSourceBuilder.MapComposite(table); await using var dataSource = dataSourceBuilder.Build(); - await using var connection = await dataSource.OpenConnectionAsync(); await AssertType( - connection, - new SomeComposite { SomeText = "foo", X = 8 }, - "(8,foo)", - table, - isDataTypeInferredFromValue: false); + dataSource, + new SomeComposite { SomeText = "foo", X = 8 }, "(8,foo)", + table, dataTypeInference: DataTypeInference.Nothing); } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/1125")] @@ -499,21 +492,16 @@ public async Task Nullable_property_in_class_composite() var dataSourceBuilder = CreateDataSourceBuilder(); dataSourceBuilder.MapComposite(type); await using var dataSource = dataSourceBuilder.Build(); - await using var connection = await dataSource.OpenConnectionAsync(); await AssertType( - connection, - new ClassWithNullableProperty { Foo = 8 }, - "(8)", - type, - isDataTypeInferredFromValue: false); + dataSource, + new ClassWithNullableProperty { Foo = 8 }, "(8)", + type, dataTypeInference: DataTypeInference.Nothing); await AssertType( - connection, - new ClassWithNullableProperty { Foo = null }, - "()", - type, - isDataTypeInferredFromValue: false); + dataSource, + new ClassWithNullableProperty { Foo = null }, "()", + type, dataTypeInference: DataTypeInference.Nothing); } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/1125")] @@ -527,21 +515,16 @@ public async Task Nullable_property_in_struct_composite() var dataSourceBuilder = CreateDataSourceBuilder(); dataSourceBuilder.MapComposite(type); await using var dataSource = dataSourceBuilder.Build(); - await using var connection = await dataSource.OpenConnectionAsync(); await AssertType( - connection, - new StructWithNullableProperty { Foo = 8 }, - "(8)", - type, - isDataTypeInferredFromValue: false); + dataSource, + new StructWithNullableProperty { Foo = 8 }, "(8)", + type, dataTypeInference: DataTypeInference.Nothing); await AssertType( - connection, - new StructWithNullableProperty { Foo = null }, - "()", - type, - isDataTypeInferredFromValue: false); + dataSource, + new StructWithNullableProperty { Foo = null }, "()", + type, dataTypeInference: DataTypeInference.Nothing); } [Test] @@ -592,14 +575,12 @@ public async Task DuplicateConstructorParameters() var dataSourceBuilder = CreateDataSourceBuilder(); dataSourceBuilder.MapComposite(type); await using var dataSource = dataSourceBuilder.Build(); - await using var connection = await dataSource.OpenConnectionAsync(); var ex = Assert.ThrowsAsync(async () => await AssertType( - connection, + dataSource, new DuplicateOneLongOneBool(true, 1), "(1,t)", - type, - isDataTypeInferredFromValue: false)); + type, dataTypeInference: DataTypeInference.Nothing)); Assert.That(ex!.InnerException, Is.TypeOf()); } @@ -614,10 +595,9 @@ public async Task PartialConstructorMissingSetter() var dataSourceBuilder = CreateDataSourceBuilder(); dataSourceBuilder.MapComposite(type); await using var dataSource = dataSourceBuilder.Build(); - await using var connection = await dataSource.OpenConnectionAsync(); var ex = Assert.ThrowsAsync(async () => await AssertTypeRead( - connection, + dataSource, "(1,t)", type, new MissingSetterOneLongOneBool(true, 1))); @@ -635,14 +615,11 @@ public async Task PartialConstructorWorks() var dataSourceBuilder = CreateDataSourceBuilder(); dataSourceBuilder.MapComposite(type); await using var dataSource = dataSourceBuilder.Build(); - await using var connection = await dataSource.OpenConnectionAsync(); await AssertType( - connection, - new OneLongOneBool(1) { BooleanValue = true }, - "(1,t)", - type, - isDataTypeInferredFromValue: false); + dataSource, + new OneLongOneBool(1) { BooleanValue = true }, "(1,t)", + type, dataTypeInference: DataTypeInference.Nothing); } [Test] @@ -658,7 +635,6 @@ public async Task CompositeOverRange() dataSourceBuilder.MapComposite(type); dataSourceBuilder.EnableUnmappedTypes(); await using var dataSource = dataSourceBuilder.Build(); - await using var connection = await dataSource.OpenConnectionAsync(); var composite1 = new SomeComposite { @@ -673,12 +649,10 @@ public async Task CompositeOverRange() }; await AssertType( - connection, + dataSource, new NpgsqlRange(composite1, composite2), "[\"(8,foo)\",\"(42,bar)\"]", - rangeType, - isDataTypeInferredFromValue: false, - isDefaultForWriting: false); + rangeType, dataTypeInference: DataTypeInference.Nothing); } #region Test Types diff --git a/test/Npgsql.Tests/Types/CubeTests.cs b/test/Npgsql.Tests/Types/CubeTests.cs index f571707377..4b59a85370 100644 --- a/test/Npgsql.Tests/Types/CubeTests.cs +++ b/test/Npgsql.Tests/Types/CubeTests.cs @@ -22,7 +22,7 @@ public class CubeTests : MultiplexingTestBase [Test, TestCaseSource(nameof(CubeValues))] public Task Cube(NpgsqlCube cube, string sqlLiteral) - => AssertType(cube, sqlLiteral, "cube", isDefault: true, isDataTypeInferredFromValue: false); + => AssertType(cube, sqlLiteral, "cube", dataTypeInference: DataTypeInference.Nothing); [Test] public void Cube_Constructor_SingleValue() @@ -153,8 +153,7 @@ await AssertType( data, @"{""(1, 2),(3, 4)"",""(5, 6)"",""(1),(2)""}", "cube[]", - isDefault: true, - isDataTypeInferredFromValue: false); + dataTypeInference: DataTypeInference.Nothing); } [Test] @@ -170,8 +169,7 @@ public Task Cube_NegativeValues() new NpgsqlCube(new[] { -1.0, -2.0, -3.0 }, new[] { -4.0, -5.0, -6.0 }), "(-1, -2, -3),(-4, -5, -6)", "cube", - isDefault: true, - isDataTypeInferredFromValue: false); + dataTypeInference: DataTypeInference.Nothing); [Test] public void Cube_Equality_HashCode() @@ -198,8 +196,7 @@ public Task Cube_ZeroValues() new NpgsqlCube(0.0, 0.0), "(0)", "cube", - isDefault: true, - isDataTypeInferredFromValue: false); + dataTypeInference: DataTypeInference.Nothing); [Test] public Task Cube_MaxDimensions() @@ -220,8 +217,7 @@ public Task Cube_MaxDimensions() new NpgsqlCube(lowerLeft, upperRight), expected, "cube", - isDefault: true, - isDataTypeInferredFromValue: false); + dataTypeInference: DataTypeInference.Nothing); } [Test] @@ -247,7 +243,7 @@ public async Task NpgsqlSlimSourceBuilder_EnableCube() dataSourceBuilder.EnableCube(); await using var dataSource = dataSourceBuilder.Build(); - await AssertType(dataSource, new NpgsqlCube(1.0, 2.0), "(1),(2)", "cube", isDefaultForWriting: false, skipArrayCheck: true); + await AssertType(dataSource, new NpgsqlCube(1.0, 2.0), "(1),(2)", "cube", dataTypeInference: DataTypeInference.Nothing, skipArrayCheck: true); } [Test] @@ -258,7 +254,7 @@ public async Task NpgsqlSlimSourceBuilder_EnableArrays() dataSourceBuilder.EnableArrays(); await using var dataSource = dataSourceBuilder.Build(); - await AssertType(dataSource, new NpgsqlCube(1.0, 2.0), "(1),(2)", "cube", isDefaultForWriting: false); + await AssertType(dataSource, new NpgsqlCube(1.0, 2.0), "(1),(2)", "cube", dataTypeInference: DataTypeInference.Nothing); } [OneTimeSetUp] diff --git a/test/Npgsql.Tests/Types/DateTimeInfinityTests.cs b/test/Npgsql.Tests/Types/DateTimeInfinityTests.cs index 594d32f31a..834ad346e9 100644 --- a/test/Npgsql.Tests/Types/DateTimeInfinityTests.cs +++ b/test/Npgsql.Tests/Types/DateTimeInfinityTests.cs @@ -60,29 +60,30 @@ public sealed class DateTimeInfinityTests : TestBase, IDisposable [Test, TestCaseSource(nameof(TimestampDateTimeValues))] public Task Timestamp_DateTime(DateTime dateTime, string sqlLiteral, string infinityConvertedSqlLiteral) => AssertType(dateTime, DisableDateTimeInfinityConversions ? sqlLiteral : infinityConvertedSqlLiteral, - "timestamp without time zone", DbType.DateTime2, - comparer: MaxValuePrecisionLenientComparer, - isDefault: true); + "timestamp without time zone", + dbType: DbType.DateTime2, + comparer: MaxValuePrecisionLenientComparer); [Test, TestCaseSource(nameof(TimestampTzDateTimeValues))] public Task TimestampTz_DateTime(DateTime dateTime, string sqlLiteral, string infinityConvertedSqlLiteral) - => AssertType(new(dateTime.Ticks, DateTimeKind.Utc), DisableDateTimeInfinityConversions ? sqlLiteral : infinityConvertedSqlLiteral, - "timestamp with time zone", DbType.DateTime, DbType.DateTime, - comparer: MaxValuePrecisionLenientComparer, - isDefault: true); + => AssertType(new DateTime(dateTime.Ticks, DateTimeKind.Utc), DisableDateTimeInfinityConversions ? sqlLiteral : infinityConvertedSqlLiteral, + "timestamp with time zone", + dbType: DbType.DateTime, + comparer: MaxValuePrecisionLenientComparer); [Test, TestCaseSource(nameof(TimestampTzDateTimeOffsetValues))] public Task TimestampTz_DateTimeOffset(DateTimeOffset dateTime, string sqlLiteral, string infinityConvertedSqlLiteral) => AssertType(dateTime, DisableDateTimeInfinityConversions ? sqlLiteral : infinityConvertedSqlLiteral, - "timestamp with time zone", DbType.DateTime, DbType.DateTime, + "timestamp with time zone", + dbType: DbType.DateTime, comparer: (expected, actual) => MaxValuePrecisionLenientComparer(expected.DateTime, actual.DateTime), - isDefault: false); + valueTypeEqualsFieldType: false); [Test, TestCaseSource(nameof(DateDateTimeValues))] public Task Date_DateTime(DateTime dateTime, string sqlLiteral, string infinityConvertedSqlLiteral) => AssertType(DisableDateTimeInfinityConversions ? dateTime.Date : dateTime, DisableDateTimeInfinityConversions ? sqlLiteral : infinityConvertedSqlLiteral, - "date", DbType.Date, - isDefault: false); + "date", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Date, DbType.DateTime2), valueTypeEqualsFieldType: false); static readonly TestCaseData[] DateOnlyDateTimeValues = [ @@ -96,9 +97,9 @@ public Task Date_DateTime(DateTime dateTime, string sqlLiteral, string infinityC [Test, TestCaseSource(nameof(DateOnlyDateTimeValues))] public Task Date_DateOnly(DateOnly dateTime, string sqlLiteral, string infinityConvertedSqlLiteral) - => AssertType(dateTime, - DisableDateTimeInfinityConversions ? sqlLiteral : infinityConvertedSqlLiteral, "date", DbType.Date, - isDefault: false); + => AssertType(dateTime, DisableDateTimeInfinityConversions ? sqlLiteral : infinityConvertedSqlLiteral, + "date", + dbType: DbType.Date); NpgsqlDataSource? _dataSource; protected override NpgsqlDataSource DataSource => _dataSource ??= CreateDataSource(csb => csb.Timezone = "UTC"); diff --git a/test/Npgsql.Tests/Types/DateTimeTests.cs b/test/Npgsql.Tests/Types/DateTimeTests.cs index d9bed9baac..e81da27ee0 100644 --- a/test/Npgsql.Tests/Types/DateTimeTests.cs +++ b/test/Npgsql.Tests/Types/DateTimeTests.cs @@ -15,20 +15,26 @@ public class DateTimeTests : TestBase [Test] public Task Date_as_DateOnly() - => AssertType(new DateOnly(2020, 10, 1), "2020-10-01", "date", DbType.Date); + => AssertType(new DateOnly(2020, 10, 1), "2020-10-01", "date", dbType: DbType.Date); [Test] public Task Date_as_DateTime() - => AssertType(new DateTime(2020, 10, 1), "2020-10-01", "date", DbType.Date, isDefault: false); + => AssertType(new DateTime(2020, 10, 1), "2020-10-01", + "date", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Date, DbType.DateTime2), valueTypeEqualsFieldType: false); [Test] public Task Date_as_DateTime_with_date_and_time_before_2000() - => AssertTypeWrite(new DateTime(1980, 10, 1, 11, 0, 0), "1980-10-01", "date", DbType.Date, isDefault: false); + => AssertTypeWrite(new DateTime(1980, 10, 1, 11, 0, 0), "1980-10-01", + "date", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Date, DbType.DateTime2)); // Internal PostgreSQL representation (days since 2020-01-01), for out-of-range values. [Test] public Task Date_as_int() - => AssertType(7579, "2020-10-01", "date", DbType.Date, isDefault: false); + => AssertType(7579, "2020-10-01", + "date", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Date, DbType.Int32), valueTypeEqualsFieldType: false); [Test] public Task Daterange_as_NpgsqlRange_of_DateOnly() @@ -47,16 +53,16 @@ public Task Daterange_array_as_NpgsqlRange_of_DateOnly_array() new NpgsqlRange(new(2002, 3, 8), true, new(2002, 3, 9), false) }, """{"[2002-03-04,2002-03-06)","[2002-03-08,2002-03-09)"}""", - "daterange[]", - isDefaultForWriting: false); + "daterange[]", dataTypeInference: DataTypeInference.Mismatch, + valueTypeEqualsFieldType: false); [Test] public Task Daterange_as_NpgsqlRange_of_DateTime() => AssertType( new NpgsqlRange(new(2002, 3, 4), true, new(2002, 3, 6), false), "[2002-03-04,2002-03-06)", - "daterange", - isDefault: false); + "daterange", dataTypeInference: DataTypeInference.Mismatch, + valueTypeEqualsFieldType: false); [Test] public async Task Datemultirange_as_array_of_NpgsqlRange_of_DateOnly() @@ -87,8 +93,8 @@ await AssertType( new NpgsqlRange(new(2002, 3, 8), true, new(2002, 3, 11), false) }, "{[2002-03-04,2002-03-06),[2002-03-08,2002-03-11)}", - "datemultirange", - isDefault: false); + "datemultirange", dataTypeInference: DataTypeInference.Mismatch, + valueTypeEqualsFieldType: false); } #endregion @@ -97,20 +103,15 @@ await AssertType( [Test] public Task Time_as_TimeOnly() - => AssertType( - new TimeOnly(10, 45, 34, 500), - "10:45:34.5", + => AssertType(new TimeOnly(10, 45, 34, 500), "10:45:34.5", "time without time zone", - DbType.Time); + dbType: DbType.Time); [Test] public Task Time_as_TimeSpan() - => AssertType( - new TimeSpan(0, 10, 45, 34, 500), - "10:45:34.5", - "time without time zone", - DbType.Time, - isDefault: false); + => AssertType(new TimeSpan(0, 10, 45, 34, 500), "10:45:34.5", + "time without time zone", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Time, DbType.Object), valueTypeEqualsFieldType: false); #endregion @@ -130,7 +131,9 @@ public Task Time_as_TimeSpan() [Test, TestCaseSource(nameof(TimeTzValues))] public Task TimeTz_as_DateTimeOffset(DateTimeOffset time, string sqlLiteral) - => AssertType(time, sqlLiteral, "time with time zone", isDefault: false); + => AssertType(time, sqlLiteral, + "time with time zone", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Object, DbType.DateTime)); #endregion @@ -149,13 +152,13 @@ public Task TimeTz_as_DateTimeOffset(DateTimeOffset time, string sqlLiteral) [Test, TestCaseSource(nameof(TimestampValues))] public async Task Timestamp_as_DateTime(DateTime dateTime, string sqlLiteral) { - await AssertType(dateTime, sqlLiteral, "timestamp without time zone", DbType.DateTime2, + await AssertType(dateTime, sqlLiteral, "timestamp without time zone", dbType: DbType.DateTime2, // Explicitly check kind as well. comparer: (actual, expected) => actual.Kind == expected.Kind && actual.Equals(expected)); await AssertType( new List { dateTime, dateTime }, $$"""{"{{sqlLiteral}}","{{sqlLiteral}}"}""", "timestamp without time zone[]", - isDefaultForReading: false); + valueTypeEqualsFieldType: false); } [Test] @@ -164,12 +167,9 @@ public Task Timestamp_cannot_write_utc_DateTime() [Test] public Task Timestamp_as_long() - => AssertType( - -54297202000000, - "1998-04-12 13:26:38", - "timestamp without time zone", - DbType.DateTime2, - isDefault: false); + => AssertType(-54297202000000, "1998-04-12 13:26:38", + "timestamp without time zone", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.DateTime2, DbType.Int64), valueTypeEqualsFieldType: false); [Test] public Task Timestamp_cannot_use_as_DateTimeOffset() @@ -202,7 +202,7 @@ public Task Tsrange_array_as_NpgsqlRange_of_DateTime_array() }, """{"[\"1998-04-12 13:26:38\",\"1998-04-12 15:26:38\"]","[\"1998-04-13 13:26:38\",\"1998-04-13 15:26:38\"]"}""", "tsrange[]", - isDefault: false); + dataTypeInference: DataTypeInference.Mismatch); [Test] public async Task Tsmultirange_as_array_of_NpgsqlRange_of_DateTime() @@ -243,23 +243,25 @@ await AssertType( [Test, TestCaseSource(nameof(TimestampTzWriteValues))] public async Task Timestamptz_as_DateTime(DateTime dateTime, string sqlLiteral) { - await AssertType(dateTime, sqlLiteral, "timestamp with time zone", DbType.DateTime, + await AssertType(dateTime, sqlLiteral, "timestamp with time zone", dbType: DbType.DateTime, // Explicitly check kind as well. comparer: (actual, expected) => actual.Kind == expected.Kind && actual.Equals(expected)); await AssertType( new List { dateTime, dateTime }, $$"""{"{{sqlLiteral}}","{{sqlLiteral}}"}""", "timestamp with time zone[]", - isDefaultForReading: false); + valueTypeEqualsFieldType: false); } [Test] public async Task Timestamptz_infinity_as_DateTime() { - await AssertType(DateTime.MinValue, "-infinity", "timestamp with time zone", DbType.DateTime, - isDefault: false); - await AssertType(DateTime.MaxValue, "infinity", "timestamp with time zone", DbType.DateTime, - isDefault: false); + await AssertType(DateTime.MinValue, "-infinity", + "timestamp with time zone", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.DateTime, DbType.DateTime2)); + await AssertType(DateTime.MaxValue, "infinity", + "timestamp with time zone", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.DateTime, DbType.DateTime2)); } [Test] @@ -276,8 +278,8 @@ public async Task Timestamptz_as_DateTimeOffset_utc() new DateTimeOffset(1998, 4, 12, 13, 26, 38, TimeSpan.Zero), "1998-04-12 15:26:38+02", "timestamp with time zone", - DbType.DateTime, - isDefaultForReading: false); + dbType: DbType.DateTime, + valueTypeEqualsFieldType: false); Assert.That(dateTimeOffset.Offset, Is.EqualTo(TimeSpan.Zero)); } @@ -287,10 +289,8 @@ public Task Timestamptz_as_DateTimeOffset_utc_with_DbType_DateTimeOffset() => AssertTypeWrite( new DateTimeOffset(1998, 4, 12, 13, 26, 38, TimeSpan.Zero), "1998-04-12 15:26:38+02", - "timestamp with time zone", - DbType.DateTimeOffset, - inferredDbType: DbType.DateTime, - isDefault: false); + "timestamp with time zone", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.DateTime, DbType.DateTime, DbType.DateTimeOffset)); [Test] public Task Timestamptz_cannot_write_non_utc_DateTimeOffset() @@ -298,12 +298,9 @@ public Task Timestamptz_cannot_write_non_utc_DateTimeOffset() [Test] public Task Timestamptz_as_long() - => AssertType( - -54297202000000, - "1998-04-12 15:26:38+02", - "timestamp with time zone", - DbType.DateTime, - isDefault: false); + => AssertType(-54297202000000, "1998-04-12 15:26:38+02", + "timestamp with time zone", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.DateTime, DbType.Int64), valueTypeEqualsFieldType: false); [Test] public async Task Timestamptz_array_as_DateTimeOffset_array() @@ -316,7 +313,7 @@ public async Task Timestamptz_array_as_DateTimeOffset_array() }, """{"1998-04-12 15:26:38+02","1999-04-12 15:26:38+02"}""", "timestamp with time zone[]", - isDefaultForReading: false); + valueTypeEqualsFieldType: false); Assert.That(dateTimeOffsets[0].Offset, Is.EqualTo(TimeSpan.Zero)); Assert.That(dateTimeOffsets[1].Offset, Is.EqualTo(TimeSpan.Zero)); @@ -346,7 +343,7 @@ public Task Tstzrange_array_as_NpgsqlRange_of_DateTime_array() }, """{"[\"1998-04-12 15:26:38+02\",\"1998-04-12 17:26:38+02\"]","[\"1998-04-13 15:26:38+02\",\"1998-04-13 17:26:38+02\"]"}""", "tstzrange[]", - isDefault: false); + dataTypeInference: DataTypeInference.Mismatch); [Test] public async Task Tstzmultirange_as_array_of_NpgsqlRange_of_DateTime() @@ -475,7 +472,7 @@ await AssertType(datasource, new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Utc), }, @"{""1998-04-12 15:26:38+02""}", - "timestamp with time zone[]", isDefaultForReading: false); // we write DateTime?[], but will read DateTime[] from GetValue + "timestamp with time zone[]", valueTypeEqualsFieldType: false); // we write DateTime?[], but will read DateTime[] from GetValue } #endregion @@ -509,8 +506,7 @@ public Task Interval_write_as_TimeSpan_truncates_ticks() public Task Interval_as_NpgsqlInterval() => AssertType( new NpgsqlInterval(2, 15, 7384005000), - "2 mons 15 days 02:03:04.005", "interval", - isDefaultForReading: false); + "2 mons 15 days 02:03:04.005", "interval", valueTypeEqualsFieldType: false); [Test] public Task Interval_with_months_cannot_read_as_TimeSpan() diff --git a/test/Npgsql.Tests/Types/DomainTests.cs b/test/Npgsql.Tests/Types/DomainTests.cs index dbfd147c39..7b9a7e6571 100644 --- a/test/Npgsql.Tests/Types/DomainTests.cs +++ b/test/Npgsql.Tests/Types/DomainTests.cs @@ -95,7 +95,6 @@ await AssertType( new NpgsqlRange(1, 2), "[1,2]", rangeType, - isDataTypeInferredFromValue: false, - isDefaultForWriting: false); + dataTypeInference: DataTypeInference.Mismatch); } } diff --git a/test/Npgsql.Tests/Types/EnumTests.cs b/test/Npgsql.Tests/Types/EnumTests.cs index 5f01534bcf..30eb522b5d 100644 --- a/test/Npgsql.Tests/Types/EnumTests.cs +++ b/test/Npgsql.Tests/Types/EnumTests.cs @@ -1,5 +1,6 @@ using System; using System.Collections.Generic; +using System.Data; using System.Threading.Tasks; using Npgsql.NameTranslation; using Npgsql.PostgresTypes; @@ -26,7 +27,7 @@ public async Task Data_source_mapping() dataSourceBuilder.MapEnum(type); await using var dataSource = dataSourceBuilder.Build(); - await AssertType(dataSource, Mood.Happy, "happy", type, isDataTypeInferredFromValue: false); + await AssertType(dataSource, Mood.Happy, "happy", type, dataTypeInference: DataTypeInference.Nothing); } [Test] @@ -43,7 +44,7 @@ public async Task Data_source_unmap() await using var dataSource = dataSourceBuilder.Build(); Assert.That(isUnmapSuccessful); - Assert.ThrowsAsync(() => AssertType(dataSource, Mood.Happy, "happy", type, isDataTypeInferredFromValue: false)); + Assert.ThrowsAsync(() => AssertType(dataSource, Mood.Happy, "happy", type, dataTypeInference: DataTypeInference.Nothing)); } [Test] @@ -56,7 +57,7 @@ public async Task Data_source_mapping_non_generic() var dataSourceBuilder = CreateDataSourceBuilder(); dataSourceBuilder.MapEnum(typeof(Mood), type); await using var dataSource = dataSourceBuilder.Build(); - await AssertType(dataSource, Mood.Happy, "happy", type, isDataTypeInferredFromValue: false); + await AssertType(dataSource, Mood.Happy, "happy", type, dataTypeInference: DataTypeInference.Nothing); } [Test] @@ -73,7 +74,7 @@ public async Task Data_source_unmap_non_generic() await using var dataSource = dataSourceBuilder.Build(); Assert.That(isUnmapSuccessful); - Assert.ThrowsAsync(() => AssertType(dataSource, Mood.Happy, "happy", type, isDataTypeInferredFromValue: false)); + Assert.ThrowsAsync(() => AssertType(dataSource, Mood.Happy, "happy", type, dataTypeInference: DataTypeInference.Nothing)); } [Test] @@ -91,7 +92,7 @@ await adminConnection.ExecuteNonQueryAsync($@" dataSourceBuilder.MapEnum(type2); await using var dataSource = dataSourceBuilder.Build(); - await AssertType(dataSource, new[] { Mood.Ok, Mood.Sad }, "{ok,sad}", type1 + "[]", isDataTypeInferredFromValue: false); + await AssertType(dataSource, new[] { Mood.Ok, Mood.Sad }, "{ok,sad}", type1 + "[]", dataTypeInference: DataTypeInference.Nothing); } [Test] @@ -105,7 +106,7 @@ public async Task Array() dataSourceBuilder.MapEnum(type); await using var dataSource = dataSourceBuilder.Build(); - await AssertType(dataSource, new[] { Mood.Ok, Mood.Happy }, "{ok,happy}", type + "[]", isDataTypeInferredFromValue: false); + await AssertType(dataSource, new[] { Mood.Ok, Mood.Happy }, "{ok,happy}", type + "[]", dataTypeInference: DataTypeInference.Nothing); } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/859")] @@ -119,9 +120,9 @@ public async Task Name_translation_default_snake_case() dataSourceBuilder.MapEnum(enumName1); await using var dataSource = dataSourceBuilder.Build(); - await AssertType(dataSource, NameTranslationEnum.Simple, "simple", enumName1, isDataTypeInferredFromValue: false); - await AssertType(dataSource, NameTranslationEnum.TwoWords, "two_words", enumName1, isDataTypeInferredFromValue: false); - await AssertType(dataSource, NameTranslationEnum.SomeClrName, "some_database_name", enumName1, isDataTypeInferredFromValue: false); + await AssertType(dataSource, NameTranslationEnum.Simple, "simple", enumName1, dataTypeInference: DataTypeInference.Nothing); + await AssertType(dataSource, NameTranslationEnum.TwoWords, "two_words", enumName1, dataTypeInference: DataTypeInference.Nothing); + await AssertType(dataSource, NameTranslationEnum.SomeClrName, "some_database_name", enumName1, dataTypeInference: DataTypeInference.Nothing); } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/859")] @@ -135,9 +136,9 @@ public async Task Name_translation_null() dataSourceBuilder.MapEnum(type, nameTranslator: new NpgsqlNullNameTranslator()); await using var dataSource = dataSourceBuilder.Build(); - await AssertType(dataSource, NameTranslationEnum.Simple, "Simple", type, isDataTypeInferredFromValue: false); - await AssertType(dataSource, NameTranslationEnum.TwoWords, "TwoWords", type, isDataTypeInferredFromValue: false); - await AssertType(dataSource, NameTranslationEnum.SomeClrName, "some_database_name", type, isDataTypeInferredFromValue: false); + await AssertType(dataSource, NameTranslationEnum.Simple, "Simple", type, dataTypeInference: DataTypeInference.Nothing); + await AssertType(dataSource, NameTranslationEnum.TwoWords, "TwoWords", type, dataTypeInference: DataTypeInference.Nothing); + await AssertType(dataSource, NameTranslationEnum.SomeClrName, "some_database_name", type, dataTypeInference: DataTypeInference.Nothing); } [Test] @@ -152,8 +153,8 @@ await connection.ExecuteNonQueryAsync(@$" CREATE TYPE {type2} AS ENUM ('value1', 'value2');"); await connection.ReloadTypesAsync(); - await AssertType(connection, Mood.Happy, "happy", type1, isDataTypeInferredFromValue: false, isDefault: false); - await AssertType(connection, AnotherEnum.Value2, "value2", type2, isDataTypeInferredFromValue: false, isDefault: false); + await AssertType(connection, Mood.Happy, "happy", type1, dataTypeInference: DataTypeInference.Nothing, valueTypeEqualsFieldType: false); + await AssertType(connection, AnotherEnum.Value2, "value2", type2, dataTypeInference: DataTypeInference.Nothing, valueTypeEqualsFieldType: false); } [Test] @@ -186,7 +187,9 @@ public async Task Unmapped_enum_as_string() await connection.ExecuteNonQueryAsync($"CREATE TYPE {type} AS ENUM ('sad', 'ok', 'happy')"); await connection.ReloadTypesAsync(); - await AssertType(connection, "happy", "happy", type, isDataTypeInferredFromValue: false, isDefaultForWriting: false); + await AssertType(connection, "happy", "happy", type, + dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Object, DbType.String)); } enum NameTranslationEnum @@ -212,8 +215,8 @@ await adminConnection.ExecuteNonQueryAsync($@" dataSourceBuilder.MapEnum($"{schema2}.my_enum"); await using var dataSource = dataSourceBuilder.Build(); - await AssertType(dataSource, Enum1.One, "one", $"{schema1}.my_enum", isDataTypeInferredFromValue: false); - await AssertType(dataSource, Enum2.Alpha, "alpha", $"{schema2}.my_enum", isDataTypeInferredFromValue: false); + await AssertType(dataSource, Enum1.One, "one", $"{schema1}.my_enum", dataTypeInference: DataTypeInference.Nothing); + await AssertType(dataSource, Enum2.Alpha, "alpha", $"{schema2}.my_enum", dataTypeInference: DataTypeInference.Nothing); } enum Enum1 { One } diff --git a/test/Npgsql.Tests/Types/FullTextSearchTests.cs b/test/Npgsql.Tests/Types/FullTextSearchTests.cs index de04814346..5f63826a99 100644 --- a/test/Npgsql.Tests/Types/FullTextSearchTests.cs +++ b/test/Npgsql.Tests/Types/FullTextSearchTests.cs @@ -69,7 +69,7 @@ public async Task Full_text_search_not_supported_by_default_on_NpgsqlSlimSourceB Assert.That(exception.InnerException, Is.InstanceOf()); Assert.That(exception.InnerException!.Message, Is.EqualTo(errorMessage)); - exception = await AssertTypeUnsupportedWrite(new NpgsqlTsQueryLexeme("a"), pgTypeName: null, dataSource); + exception = await AssertTypeUnsupportedWrite(new NpgsqlTsQueryLexeme("a"), dataTypeName: null, dataSource); Assert.That(exception.InnerException, Is.InstanceOf()); Assert.That(exception.InnerException!.Message, Is.EqualTo(errorMessage)); @@ -77,7 +77,7 @@ public async Task Full_text_search_not_supported_by_default_on_NpgsqlSlimSourceB Assert.That(exception.InnerException, Is.InstanceOf()); Assert.That(exception.InnerException!.Message, Is.EqualTo(errorMessage)); - exception = await AssertTypeUnsupportedWrite(NpgsqlTsVector.Parse("'1'"), pgTypeName: null, dataSource); + exception = await AssertTypeUnsupportedWrite(NpgsqlTsVector.Parse("'1'"), dataTypeName: null, dataSource); Assert.That(exception.InnerException, Is.InstanceOf()); Assert.That(exception.InnerException!.Message, Is.EqualTo(errorMessage)); } diff --git a/test/Npgsql.Tests/Types/HstoreTests.cs b/test/Npgsql.Tests/Types/HstoreTests.cs index 20706dfd39..779aa8fad1 100644 --- a/test/Npgsql.Tests/Types/HstoreTests.cs +++ b/test/Npgsql.Tests/Types/HstoreTests.cs @@ -17,11 +17,11 @@ public Task Hstore() {"cd", "hello"} }, @"""a""=>""3"", ""b""=>NULL, ""cd""=>""hello""", - "hstore", isDataTypeInferredFromValue: false); + "hstore", dataTypeInference: DataTypeInference.Nothing); [Test] public Task Hstore_empty() - => AssertType(new Dictionary(), @"", "hstore", isDataTypeInferredFromValue: false); + => AssertType(new Dictionary(), @"", "hstore", dataTypeInference: DataTypeInference.Nothing); [Test] public Task Hstore_as_ImmutableDictionary() @@ -36,7 +36,7 @@ public Task Hstore_as_ImmutableDictionary() immutableDictionary, @"""a""=>""3"", ""b""=>NULL, ""cd""=>""hello""", "hstore", - isDefaultForReading: false, isDataTypeInferredFromValue: false); + dataTypeInference: DataTypeInference.Nothing, valueTypeEqualsFieldType: false); } [Test] @@ -50,7 +50,7 @@ public Task Hstore_as_IDictionary() }, @"""a""=>""3"", ""b""=>NULL, ""cd""=>""hello""", "hstore", - isDefaultForReading: false, isDataTypeInferredFromValue: false); + dataTypeInference: DataTypeInference.Nothing, valueTypeEqualsFieldType: false); [OneTimeSetUp] public async Task SetUp() diff --git a/test/Npgsql.Tests/Types/JsonDynamicTests.cs b/test/Npgsql.Tests/Types/JsonDynamicTests.cs index 1a7337ea18..ecc3fb5a66 100644 --- a/test/Npgsql.Tests/Types/JsonDynamicTests.cs +++ b/test/Npgsql.Tests/Types/JsonDynamicTests.cs @@ -26,7 +26,8 @@ public async Task As_poco() ? """{"Date": "2019-09-01T00:00:00", "Summary": "Partly cloudy", "TemperatureC": 10}""" : """{"Date":"2019-09-01T00:00:00","TemperatureC":10,"Summary":"Partly cloudy"}""", PostgresType, - isDefault: false); + dataTypeInference: DataTypeInference.Nothing, + valueTypeEqualsFieldType: false); [Test] public async Task As_poco_long() @@ -46,7 +47,8 @@ await AssertType( ? $$"""{"Date": "2019-09-01T00:00:00", "Summary": "{{bigString}}", "TemperatureC": 10}""" : $$"""{"Date":"2019-09-01T00:00:00","TemperatureC":10,"Summary":"{{bigString}}"}""", PostgresType, - isDefault: false); + dataTypeInference: DataTypeInference.Nothing, + valueTypeEqualsFieldType: false); } [Test] @@ -122,7 +124,7 @@ await AssertTypeWrite( ? """{"date": "2019-09-01T00:00:00", "summary": "Partly cloudy", "temperatureC": 10}""" : """{"date":"2019-09-01T00:00:00","temperatureC":10,"summary":"Partly cloudy"}""", PostgresType, - isDefault: false); + dataTypeInference: DataTypeInference.Nothing); } [Test, Ignore("TODO We should not change the default type for json/jsonb, it makes little sense.")] @@ -147,8 +149,7 @@ await AssertType( ? """{"Date": "2019-09-01T00:00:00", "Summary": "Partly cloudy", "TemperatureC": 10}""" : """{"Date":"2019-09-01T00:00:00","TemperatureC":10,"Summary":"Partly cloudy"}""", PostgresType, - isDefaultForReading: false, - isDataTypeInferredFromValue: false); + dataTypeInference: DataTypeInference.Nothing); } #region Polymorphic @@ -178,8 +179,8 @@ public async Task Poco_polymorphic_mapping() ? """{"Date": "2019-09-01T00:00:00", "$type": "extended", "Summary": "Partly cloudy", "TemperatureC": 10, "TemperatureF": 49}""" : """{"$type":"extended","TemperatureF":49,"Date":"2019-09-01T00:00:00","TemperatureC":10,"Summary":"Partly cloudy"}"""; - await AssertTypeWrite(dataSource, value, sql, PostgresType, isDataTypeInferredFromValue: false); - await AssertTypeRead(dataSource, sql, PostgresType, value, isDefault: false); + await AssertTypeWrite(dataSource, value, sql, PostgresType, dataTypeInference: DataTypeInference.Nothing); + await AssertTypeRead(dataSource, sql, PostgresType, value, valueTypeEqualsFieldType: false); } [Test] @@ -208,9 +209,9 @@ public async Task Poco_polymorphic_mapping_read_parents() : """{"$type":"extended","TemperatureF":49,"Date":"2019-09-01T00:00:00","TemperatureC":10,"Summary":"Partly cloudy"}"""; await AssertTypeWrite(dataSource, value, sql, PostgresType, - isDataTypeInferredFromValue: false); + dataTypeInference: DataTypeInference.Nothing); - await AssertTypeRead(dataSource, sql, PostgresType, value, isDefault: false); + await AssertTypeRead(dataSource, sql, PostgresType, value, valueTypeEqualsFieldType: false); await AssertTypeRead(dataSource, sql, PostgresType, new DerivedWeatherForecast { @@ -218,8 +219,8 @@ await AssertTypeRead(dataSource, sql, PostgresType, Summary = "Partly cloudy", TemperatureC = 10 }, - isDefault: false); - await AssertTypeRead(dataSource, sql, PostgresType, value, isDefault: false); + valueTypeEqualsFieldType: false); + await AssertTypeRead(dataSource, sql, PostgresType, value, valueTypeEqualsFieldType: false); } [Test] @@ -247,8 +248,8 @@ public async Task Poco_exact_polymorphic_mapping() ? """{"Date": "2019-09-01T00:00:00", "Summary": "Partly cloudy", "TemperatureC": 10, "TemperatureF": 49}""" : """{"TemperatureF":49,"Date":"2019-09-01T00:00:00","TemperatureC":10,"Summary":"Partly cloudy"}"""; - await AssertTypeWrite(dataSource, value, sql, PostgresType, isDataTypeInferredFromValue: false); - await AssertTypeRead(dataSource, sql, PostgresType, value, isDefault: false); + await AssertTypeWrite(dataSource, value, sql, PostgresType, dataTypeInference: DataTypeInference.Nothing); + await AssertTypeRead(dataSource, sql, PostgresType, value, valueTypeEqualsFieldType: false); } [Test] @@ -275,7 +276,7 @@ public async Task Poco_unspecified_polymorphic_mapping() ? """{"Date": "2019-09-01T00:00:00", "$type": "extended", "Summary": "Partly cloudy", "TemperatureC": 10, "TemperatureF": 49}""" : """{"$type":"extended","TemperatureF":49,"Date":"2019-09-01T00:00:00","TemperatureC":10,"Summary":"Partly cloudy"}"""; - await AssertTypeWrite(dataSource, value, sql, PostgresType, isDefault: false); + await AssertTypeWrite(dataSource, value, sql, PostgresType, dataTypeInference: DataTypeInference.Nothing); // Reading as DerivedWeatherForecast should not cause us to get an instance of ExtendedDerivedWeatherForecast (as it doesn't define JsonDerivedType) await AssertTypeRead(dataSource, sql, PostgresType, @@ -285,8 +286,8 @@ await AssertTypeRead(dataSource, sql, PostgresType, Summary = "Partly cloudy", TemperatureC = 10 }, - isDefault: false); - await AssertTypeRead(dataSource, sql, PostgresType, value, isDefault: false); + valueTypeEqualsFieldType: false); + await AssertTypeRead(dataSource, sql, PostgresType, value, valueTypeEqualsFieldType: false); } [Test] @@ -314,7 +315,7 @@ public async Task Poco_polymorphic_mapping_without_AllowOutOfOrderMetadataProper ? """{"Date": "2019-09-01T00:00:00", "Summary": "Partly cloudy", "TemperatureC": 10, "TemperatureF": 49}""" : """{"$type":"extended","TemperatureF":49,"Date":"2019-09-01T00:00:00","TemperatureC":10,"Summary":"Partly cloudy"}"""; - await AssertTypeWrite(dataSource, value, sql, PostgresType, isDataTypeInferredFromValue: false); + await AssertTypeWrite(dataSource, value, sql, PostgresType, dataTypeInference: DataTypeInference.Nothing); // As we have disabled polymorphism for jsonb when AllowOutOfOrderMetadataProperties = false we should be able to read it as equalt to a WeatherForecast instance. if (IsJsonb) @@ -325,7 +326,7 @@ await AssertTypeRead(dataSource, sql, PostgresType, Summary = "Partly cloudy", TemperatureC = 10 }, - isDefault: false); + valueTypeEqualsFieldType: false); // Reading as DerivedWeatherForecast should not cause us to get an instance of ExtendedDerivedWeatherForecast (as it doesn't define JsonDerivedType) await AssertTypeRead(dataSource, sql, PostgresType, @@ -335,12 +336,12 @@ await AssertTypeRead(dataSource, sql, PostgresType, Summary = "Partly cloudy", TemperatureC = 10 }, - isDefault: false); + valueTypeEqualsFieldType: false); // We won't get the original value back for jsonb as we can't support polymorphism without also enforcing AllowOutOfOrderMetadataProperties is true. // If we output $type, jsonb won't have that at the start and STJ will throw due to it appearing later in the object. So it's disabled entirely. if (!IsJsonb) - await AssertTypeRead(dataSource, sql, PostgresType, value, isDefault: false); + await AssertTypeRead(dataSource, sql, PostgresType, value, valueTypeEqualsFieldType: false); } [Test] @@ -367,7 +368,7 @@ public async Task Poco_unspecified_polymorphic_mapping_without_AllowOutOfOrderMe ? """{"Date": "2019-09-01T00:00:00", "Summary": "Partly cloudy", "TemperatureC": 10, "TemperatureF": 49}""" : """{"$type":"extended","TemperatureF":49,"Date":"2019-09-01T00:00:00","TemperatureC":10,"Summary":"Partly cloudy"}"""; - await AssertTypeWrite(dataSource, value, sql, PostgresType, isDefault: false); + await AssertTypeWrite(dataSource, value, sql, PostgresType, dataTypeInference: DataTypeInference.Nothing); // As we have disabled polymorphism for jsonb when AllowOutOfOrderMetadataProperties = false we should be able to read it as equalt to a WeatherForecast instance. if (IsJsonb) @@ -378,7 +379,7 @@ await AssertTypeRead(dataSource, sql, PostgresType, Summary = "Partly cloudy", TemperatureC = 10 }, - isDefault: false); + valueTypeEqualsFieldType: false); // Reading as DerivedWeatherForecast should not cause us to get an instance of ExtendedDerivedWeatherForecast (as it doesn't define JsonDerivedType) await AssertTypeRead(dataSource, sql, PostgresType, @@ -388,12 +389,12 @@ await AssertTypeRead(dataSource, sql, PostgresType, Summary = "Partly cloudy", TemperatureC = 10 }, - isDefault: false); + valueTypeEqualsFieldType: false); // We won't get the original value back for jsonb as we can't support polymorphism without also enforcing AllowOutOfOrderMetadataProperties is true. // If we output $type, jsonb won't have that at the start and STJ will throw due to it appearing later in the object. So it's disabled entirely. if (!IsJsonb) - await AssertTypeRead(dataSource, sql, PostgresType, value, isDefault: false); + await AssertTypeRead(dataSource, sql, PostgresType, value, valueTypeEqualsFieldType: false); } // ReSharper disable UnusedAutoPropertyAccessor.Local diff --git a/test/Npgsql.Tests/Types/JsonPathTests.cs b/test/Npgsql.Tests/Types/JsonPathTests.cs index 2a1d3b8b52..044ecc1827 100644 --- a/test/Npgsql.Tests/Types/JsonPathTests.cs +++ b/test/Npgsql.Tests/Types/JsonPathTests.cs @@ -22,8 +22,8 @@ public async Task JsonPath(string jsonPath) using var conn = await OpenConnectionAsync(); MinimumPgVersion(conn, "12.0", "The jsonpath type was introduced in PostgreSQL 12"); await AssertType( - jsonPath, jsonPath, "jsonpath", isDefaultForWriting: false, isDataTypeInferredFromValue: false, - inferredDbType: DbType.Object); + jsonPath, jsonPath, "jsonpath", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Object, DbType.String)); } [Test] diff --git a/test/Npgsql.Tests/Types/JsonTests.cs b/test/Npgsql.Tests/Types/JsonTests.cs index 511544d430..8b5e36bc7e 100644 --- a/test/Npgsql.Tests/Types/JsonTests.cs +++ b/test/Npgsql.Tests/Types/JsonTests.cs @@ -1,4 +1,5 @@ using System; +using System.Data; using System.IO; using System.Text; using System.Text.Json; @@ -16,7 +17,9 @@ public class JsonTests : MultiplexingTestBase { [Test] public async Task As_string() - => await AssertType("""{"K": "V"}""", """{"K": "V"}""", PostgresType, isDefaultForWriting: false); + => await AssertType("""{"K": "V"}""", """{"K": "V"}""", + PostgresType, dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Object, DbType.String)); [Test] public async Task As_string_long() @@ -29,7 +32,9 @@ public async Task As_string_long() .Append(@"""}") .ToString(); - await AssertType(value, value, PostgresType, isDefaultForWriting: false); + await AssertType(value, value, + PostgresType, dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Object, DbType.String)); } [Test] @@ -45,25 +50,33 @@ public async Task As_string_with_GetTextReader() [Test] public async Task As_char_array() - => await AssertType("""{"K": "V"}""".ToCharArray(), """{"K": "V"}""", PostgresType, isDefault: false); + => await AssertType("""{"K": "V"}""".ToCharArray(), """{"K": "V"}""", + PostgresType, dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Object, DbType.String), valueTypeEqualsFieldType: false); [Test] public async Task As_bytes() - => await AssertType("""{"K": "V"}"""u8.ToArray(), """{"K": "V"}""", PostgresType, isDefault: false); + => await AssertType("""{"K": "V"}"""u8.ToArray(), """{"K": "V"}""", + PostgresType, dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Object, DbType.Binary), valueTypeEqualsFieldType: false); [Test] public async Task Write_as_ReadOnlyMemory_of_byte() - => await AssertTypeWrite(new ReadOnlyMemory("""{"K": "V"}"""u8.ToArray()), """{"K": "V"}""", PostgresType, - isDefault: false); + => await AssertTypeWrite(new ReadOnlyMemory("""{"K": "V"}"""u8.ToArray()), """{"K": "V"}""", + PostgresType, dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Object, DbType.Binary)); [Test] public async Task Write_as_ArraySegment_of_char() - => await AssertTypeWrite(new ArraySegment("""{"K": "V"}""".ToCharArray()), """{"K": "V"}""", PostgresType, - isDefault: false); + => await AssertTypeWrite(new ArraySegment("""{"K": "V"}""".ToCharArray()), """{"K": "V"}""", + PostgresType, dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Object, DbType.String)); [Test] public Task As_MemoryStream() - => AssertTypeWrite(() => new MemoryStream("""{"K": "V"}"""u8.ToArray()), """{"K": "V"}""", PostgresType, isDefault: false); + => AssertTypeWrite(() => new MemoryStream("""{"K": "V"}"""u8.ToArray()), """{"K": "V"}""", + PostgresType, dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Object, DbType.Binary)); [Test] public async Task As_JsonDocument() @@ -71,8 +84,9 @@ public async Task As_JsonDocument() JsonDocument.Parse("""{"K": "V"}"""), IsJsonb ? """{"K": "V"}""" : """{"K":"V"}""", PostgresType, - isDefault: false, - comparer: (x, y) => x.RootElement.GetProperty("K").GetString() == y.RootElement.GetProperty("K").GetString()); + dataTypeInference: DataTypeInference.Mismatch, + comparer: (x, y) => x.RootElement.GetProperty("K").GetString() == y.RootElement.GetProperty("K").GetString(), + valueTypeEqualsFieldType: false); [Test, IssueLink("https://github.com/npgsql/npgsql/issues/5540")] public async Task As_JsonDocument_with_null_root() @@ -80,8 +94,9 @@ public async Task As_JsonDocument_with_null_root() JsonDocument.Parse("null"), "null", PostgresType, - isDefault: false, + dataTypeInference: DataTypeInference.Mismatch, comparer: (x, y) => x.RootElement.ValueKind == y.RootElement.ValueKind, + valueTypeEqualsFieldType: false, skipArrayCheck: true); [Test] @@ -90,8 +105,9 @@ public async Task As_JsonElement_with_null_root() JsonDocument.Parse("null").RootElement, "null", PostgresType, - isDefault: false, + dataTypeInference: DataTypeInference.Mismatch, comparer: (x, y) => x.ValueKind == y.ValueKind, + valueTypeEqualsFieldType: false, skipArrayCheck: true); [Test] @@ -111,27 +127,24 @@ public Task Roundtrip_string() => AssertType( @"{""p"": 1}", @"{""p"": 1}", - PostgresType, - isDefault: false, - isDataTypeInferredFromValue: false); + PostgresType, dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Object, DbType.String), valueTypeEqualsFieldType: true); [Test] public Task Roundtrip_char_array() => AssertType( @"{""p"": 1}".ToCharArray(), @"{""p"": 1}", - PostgresType, - isDefault: false, - isDataTypeInferredFromValue: false); + PostgresType, dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Object, DbType.String), valueTypeEqualsFieldType: false); [Test] public Task Roundtrip_byte_array() => AssertType( - Encoding.ASCII.GetBytes(@"{""p"": 1}"), + @"{""p"": 1}"u8.ToArray(), @"{""p"": 1}", - PostgresType, - isDefault: false, - isDataTypeInferredFromValue: false); + PostgresType, dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Object, DbType.Binary), valueTypeEqualsFieldType: false); [Test] [IssueLink("https://github.com/npgsql/npgsql/issues/2811")] @@ -166,9 +179,8 @@ public Task Roundtrip_JsonObject() IsJsonb ? """{"Bar": 8}""" : """{"Bar":8}""", PostgresType, // By default we map JsonObject to jsonb - isDefaultForWriting: IsJsonb, - isDefaultForReading: false, - isDataTypeInferredFromValue: false, + dataTypeInference: IsJsonb ? DataTypeInference.Match : DataTypeInference.Mismatch, + valueTypeEqualsFieldType: false, comparer: (x, y) => x.ToString() == y.ToString()); [Test] @@ -178,9 +190,8 @@ public Task Roundtrip_JsonArray() IsJsonb ? "[1, 2, 3]" : "[1,2,3]", PostgresType, // By default we map JsonArray to jsonb - isDefaultForWriting: IsJsonb, - isDefaultForReading: false, - isDataTypeInferredFromValue: false, + dataTypeInference: IsJsonb ? DataTypeInference.Match : DataTypeInference.Mismatch, + valueTypeEqualsFieldType: false, comparer: (x, y) => x.ToString() == y.ToString()); [Test] diff --git a/test/Npgsql.Tests/Types/LTreeTests.cs b/test/Npgsql.Tests/Types/LTreeTests.cs index 035debff5c..d0ab3dbf29 100644 --- a/test/Npgsql.Tests/Types/LTreeTests.cs +++ b/test/Npgsql.Tests/Types/LTreeTests.cs @@ -1,4 +1,5 @@ -using System.Threading.Tasks; +using System.Data; +using System.Threading.Tasks; using Npgsql.Properties; using NpgsqlTypes; using NUnit.Framework; @@ -9,15 +10,21 @@ public class LTreeTests(MultiplexingMode multiplexingMode) : MultiplexingTestBas { [Test] public Task LQuery() - => AssertType("Top.Science.*", "Top.Science.*", "lquery", isDefaultForWriting: false); + => AssertType("Top.Science.*", "Top.Science.*", + "lquery", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Object, DbType.String)); [Test] public Task LTree() - => AssertType("Top.Science.Astronomy", "Top.Science.Astronomy", "ltree", isDefaultForWriting: false); + => AssertType("Top.Science.Astronomy", "Top.Science.Astronomy", + "ltree", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Object, DbType.String)); [Test] public Task LTxtQuery() - => AssertType("Science & Astronomy", "Science & Astronomy", "ltxtquery", isDefaultForWriting: false); + => AssertType("Science & Astronomy", "Science & Astronomy", + "ltxtquery", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Object, DbType.String)); [Test] public async Task LTree_not_supported_by_default_on_NpgsqlSlimSourceBuilder() @@ -36,24 +43,18 @@ public async Task LTree_not_supported_by_default_on_NpgsqlSlimSourceBuilder() } [Test] - public async Task NpgsqlSlimSourceBuilder_EnableLTree() + public async Task NpgsqlSlimSourceBuilder_EnableLTree([Values] bool withArrays) { var dataSourceBuilder = new NpgsqlSlimDataSourceBuilder(ConnectionString); dataSourceBuilder.EnableLTree(); + if (withArrays) + dataSourceBuilder.EnableArrays(); await using var dataSource = dataSourceBuilder.Build(); - await AssertType(dataSource, "Top.Science.Astronomy", "Top.Science.Astronomy", "ltree", isDefaultForWriting: false, skipArrayCheck: true); - } - - [Test] - public async Task NpgsqlSlimSourceBuilder_EnableArrays() - { - var dataSourceBuilder = new NpgsqlSlimDataSourceBuilder(ConnectionString); - dataSourceBuilder.EnableLTree(); - dataSourceBuilder.EnableArrays(); - await using var dataSource = dataSourceBuilder.Build(); - - await AssertType(dataSource, "Top.Science.Astronomy", "Top.Science.Astronomy", "ltree", isDefaultForWriting: false); + await AssertType(dataSource, "Top.Science.Astronomy", "Top.Science.Astronomy", + "ltree", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Object, DbType.String), + skipArrayCheck: !withArrays); } [OneTimeSetUp] diff --git a/test/Npgsql.Tests/Types/LegacyDateTimeTests.cs b/test/Npgsql.Tests/Types/LegacyDateTimeTests.cs index ef6e2f0936..9dd2e9e9fb 100644 --- a/test/Npgsql.Tests/Types/LegacyDateTimeTests.cs +++ b/test/Npgsql.Tests/Types/LegacyDateTimeTests.cs @@ -2,7 +2,6 @@ using System.Data; using System.Threading.Tasks; using Npgsql.Internal.ResolverFactories; -using NpgsqlTypes; using NUnit.Framework; using static Npgsql.Util.Statics; @@ -18,7 +17,7 @@ public Task Timestamp_with_all_DateTime_kinds([Values] DateTimeKind kind) new DateTime(1998, 4, 12, 13, 26, 38, 789, kind), "1998-04-12 13:26:38.789", "timestamp without time zone", - DbType.DateTime); + dbType: DbType.DateTime); [Test] public async Task Timestamp_read_as_Unspecified_DateTime() @@ -32,7 +31,7 @@ public async Task Timestamp_read_as_Unspecified_DateTime() public async Task Timestamptz_negative_infinity() { var dto = await AssertType(DateTimeOffset.MinValue, "-infinity", "timestamp with time zone", - DbType.DateTimeOffset, isDefaultForReading: false); + dbType: DbType.DateTimeOffset, valueTypeEqualsFieldType: false); Assert.That(dto.Offset, Is.EqualTo(TimeSpan.Zero)); } @@ -40,8 +39,8 @@ public async Task Timestamptz_negative_infinity() public async Task Timestamptz_infinity() { var dto = await AssertType( - DateTimeOffset.MaxValue, "infinity", "timestamp with time zone", DbType.DateTimeOffset, - isDefaultForReading: false); + DateTimeOffset.MaxValue, "infinity", "timestamp with time zone", dbType: DbType.DateTimeOffset, + valueTypeEqualsFieldType: false); Assert.That(dto.Offset, Is.EqualTo(TimeSpan.Zero)); } @@ -50,11 +49,9 @@ public async Task Timestamptz_infinity() [TestCase(DateTimeKind.Unspecified, TestName = "Timestamptz_write_unspecified_DateTime_does_not_convert")] public Task Timestamptz_write_utc_DateTime_does_not_convert(DateTimeKind kind) => AssertTypeWrite( - new DateTime(1998, 4, 12, 13, 26, 38, 789, kind), - "1998-04-12 15:26:38.789+02", - "timestamp with time zone", - DbType.DateTimeOffset, - isDefault: false); + new DateTime(1998, 4, 12, 13, 26, 38, 789, kind), "1998-04-12 15:26:38.789+02", + "timestamp with time zone", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.DateTimeOffset, DbType.DateTime)); [Test] public Task Timestamptz_local_DateTime_converts() @@ -64,11 +61,9 @@ public Task Timestamptz_local_DateTime_converts() var dateTime = new DateTime(1998, 4, 12, 13, 26, 38, 789, DateTimeKind.Utc).ToLocalTime(); return AssertType( - dateTime, - "1998-04-12 15:26:38.789+02", - "timestamp with time zone", - DbType.DateTimeOffset, - isDefaultForWriting: false); + dateTime, "1998-04-12 15:26:38.789+02", + "timestamp with time zone", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.DateTimeOffset, DbType.DateTime)); } NpgsqlDataSource _dataSource = null!; diff --git a/test/Npgsql.Tests/Types/MiscTypeTests.cs b/test/Npgsql.Tests/Types/MiscTypeTests.cs index 1fc0c68b36..ce638ebfef 100644 --- a/test/Npgsql.Tests/Types/MiscTypeTests.cs +++ b/test/Npgsql.Tests/Types/MiscTypeTests.cs @@ -14,8 +14,8 @@ class MiscTypeTests(MultiplexingMode multiplexingMode) : MultiplexingTestBase(mu [Test] public async Task Boolean() { - await AssertType(true, "true", "boolean", DbType.Boolean, skipArrayCheck: true); - await AssertType(false, "false", "boolean", DbType.Boolean, skipArrayCheck: true); + await AssertType(true, "true", "boolean", dbType: DbType.Boolean, skipArrayCheck: true); + await AssertType(false, "false", "boolean", dbType: DbType.Boolean, skipArrayCheck: true); // The literal representations for bools inside array are different ({t,f} instead of true/false, so we check separately. await AssertType(new[] { true, false }, "{t,f}", "boolean[]"); @@ -26,7 +26,7 @@ public Task Uuid() => AssertType( new Guid("a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11"), "a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11", - "uuid", DbType.Guid); + "uuid", dbType: DbType.Guid); [Test, Description("Makes sure that the PostgreSQL 'unknown' type (OID 705) is read properly")] public async Task Read_unknown() @@ -172,24 +172,33 @@ public async Task Send_unknown() [Test] public async Task ObjectArray() { - await AssertTypeWrite(new object?[] { (short)4, null, (long)5, 6 }, "{4,NULL,5,6}", "integer[]", isDefault: false); - await AssertTypeWrite(new object?[] { "text", null, DBNull.Value, "chars".ToCharArray(), 'c' }, "{text,NULL,NULL,chars,c}", "text[]", isDefault: false); + await AssertTypeWrite(new object?[] { (short)4, null, (long)5, 6 }, "{4,NULL,5,6}", + "integer[]", dataTypeInference: DataTypeInference.Nothing); + await AssertTypeWrite(new object?[] { "text", null, DBNull.Value, "chars".ToCharArray(), 'c' }, "{text,NULL,NULL,chars,c}", + "text[]", dataTypeInference: DataTypeInference.Nothing); await using var dataSource = CreateDataSource(b => b.ConnectionStringBuilder.Timezone = "Europe/Berlin"); - await AssertTypeWrite(dataSource, new object?[] { DateTime.UnixEpoch, null, DBNull.Value, DateTime.UnixEpoch.AddDays(1) }, "{\"1970-01-01 01:00:00+01\",NULL,NULL,\"1970-01-02 01:00:00+01\"}", "timestamp with time zone[]", isDefault: false); + await AssertTypeWrite(dataSource, new object?[] { DateTime.UnixEpoch, null, DBNull.Value, DateTime.UnixEpoch.AddDays(1) }, + "{\"1970-01-01 01:00:00+01\",NULL,NULL,\"1970-01-02 01:00:00+01\"}", + "timestamp with time zone[]", dataTypeInference: DataTypeInference.Nothing); Assert.ThrowsAsync(() => AssertTypeWrite(dataSource, new object?[] { DateTime.Now, null, DBNull.Value, DateTime.UnixEpoch.AddDays(1) - }, "{\"1970-01-01 01:00:00+01\",NULL,NULL,\"1970-01-02 01:00:00+01\"}", "timestamp with time zone[]", isDefault: false)); + }, "{\"1970-01-01 01:00:00+01\",NULL,NULL,\"1970-01-02 01:00:00+01\"}", "timestamp with time zone[]", + dataTypeInference: DataTypeInference.Nothing)); } [Test] public Task Int2Vector() - => AssertType(new short[] { 4, 5, 6 }, "4 5 6", "int2vector", isDefault: false); + => AssertType(new short[] { 4, 5, 6 }, "4 5 6", + "int2vector", dataTypeInference: DataTypeInference.Mismatch, + // int2vector mappings require a data type name, so passing a value of type short[][] will result in no mapping. + skipArrayCheck: true); [Test] public Task Oidvector() - => AssertType(new uint[] { 4, 5, 6 }, "4 5 6", "oidvector", isDefault: false); + => AssertType(new uint[] { 4, 5, 6 }, "4 5 6", + "oidvector", dataTypeInference: DataTypeInference.Nothing); [Test, IssueLink("https://github.com/npgsql/npgsql/issues/1138")] public async Task Void() diff --git a/test/Npgsql.Tests/Types/MoneyTests.cs b/test/Npgsql.Tests/Types/MoneyTests.cs index 3351f0abaf..8f277a6e34 100644 --- a/test/Npgsql.Tests/Types/MoneyTests.cs +++ b/test/Npgsql.Tests/Types/MoneyTests.cs @@ -27,7 +27,9 @@ public async Task Money(string sqlLiteral, decimal money) { using var conn = await OpenConnectionAsync(); await conn.ExecuteNonQueryAsync("SET lc_monetary='C'"); - await AssertType(conn, money, sqlLiteral, "money", DbType.Currency, isDefault: false); + await AssertType(conn, money, sqlLiteral, + "money", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Currency, DbType.Decimal)); } [Test] diff --git a/test/Npgsql.Tests/Types/MultirangeTests.cs b/test/Npgsql.Tests/Types/MultirangeTests.cs index 7553e07471..53eb6e210a 100644 --- a/test/Npgsql.Tests/Types/MultirangeTests.cs +++ b/test/Npgsql.Tests/Types/MultirangeTests.cs @@ -1,7 +1,6 @@ using System; using System.Collections.Generic; using System.Data; -using System.Linq; using System.Threading.Tasks; using Npgsql.Properties; using NpgsqlTypes; @@ -21,7 +20,7 @@ public class MultirangeTests : TestBase new(3, true, false, 7, false, false), new(9, true, false, 0, false, true) }, - "{[3,7),[9,)}", "int4multirange", true, true, default(NpgsqlRange)) + "{[3,7),[9,)}", "int4multirange", DataTypeInference.Match, true, default(NpgsqlRange)) .SetName("Int"), // int8multirange @@ -31,7 +30,7 @@ public class MultirangeTests : TestBase new(3, true, false, 7, false, false), new(9, true, false, 0, false, true) }, - "{[3,7),[9,)}", "int8multirange", true, true, default(NpgsqlRange)) + "{[3,7),[9,)}", "int8multirange", DataTypeInference.Match, true, default(NpgsqlRange)) .SetName("Long"), // nummultirange @@ -42,7 +41,7 @@ public class MultirangeTests : TestBase new(3, true, false, 7, true, false), new(9, false, false, 0, false, true) }, - "{[3,7],(9,)}", "nummultirange", true, true, default(NpgsqlRange)) + "{[3,7],(9,)}", "nummultirange", DataTypeInference.Match, true, default(NpgsqlRange)) .SetName("Decimal"), // daterange @@ -52,7 +51,7 @@ public class MultirangeTests : TestBase new(new(2020, 1, 1), true, false, new(2020, 1, 5), false, false), new(new(2020, 1, 10), true, false, default, false, true) }, - "{[2020-01-01,2020-01-05),[2020-01-10,)}", "datemultirange", true, false, default(NpgsqlRange)) + "{[2020-01-01,2020-01-05),[2020-01-10,)}", "datemultirange", DataTypeInference.Match, true, default(NpgsqlRange)) .SetName("DateTime DateMultirange"), // tsmultirange @@ -62,7 +61,7 @@ public class MultirangeTests : TestBase new(new(2020, 1, 1), true, false, new(2020, 1, 5), false, false), new(new(2020, 1, 10), true, false, default, false, true) }, - """{["2020-01-01 00:00:00","2020-01-05 00:00:00"),["2020-01-10 00:00:00",)}""", "tsmultirange", true, true, default(NpgsqlRange)) + """{["2020-01-01 00:00:00","2020-01-05 00:00:00"),["2020-01-10 00:00:00",)}""", "tsmultirange", DataTypeInference.Match, true, default(NpgsqlRange)) .SetName("DateTime TimestampMultirange"), // tstzmultirange @@ -72,7 +71,7 @@ public class MultirangeTests : TestBase new(new(2020, 1, 1, 0, 0, 0, kind: DateTimeKind.Utc), true, false, new(2020, 1, 5, 0, 0, 0, kind: DateTimeKind.Utc), false, false), new(new(2020, 1, 10, 0, 0, 0, kind: DateTimeKind.Utc), true, false, default, false, true) }, - """{["2020-01-01 01:00:00+01","2020-01-05 01:00:00+01"),["2020-01-10 01:00:00+01",)}""", "tstzmultirange", true, true, default(NpgsqlRange)) + """{["2020-01-01 01:00:00+01","2020-01-05 01:00:00+01"),["2020-01-10 01:00:00+01",)}""", "tstzmultirange", DataTypeInference.Match, true, default(NpgsqlRange)) .SetName("DateTime TimestampTzMultirange"), new TestCaseData( @@ -81,23 +80,23 @@ public class MultirangeTests : TestBase new(new(2020, 1, 1), true, false, new(2020, 1, 5), false, false), new(new(2020, 1, 10), true, false, default, false, true) }, - "{[2020-01-01,2020-01-05),[2020-01-10,)}", "datemultirange", false, false, default(NpgsqlRange)) + "{[2020-01-01,2020-01-05),[2020-01-10,)}", "datemultirange", DataTypeInference.Mismatch, true, default(NpgsqlRange)) .SetName("DateOnly") ]; [Test, TestCaseSource(nameof(MultirangeTestCases))] public Task Multirange_as_array( - T multirangeAsArray, string sqlLiteral, string pgTypeName, bool isDefaultForReading, bool isDefaultForWriting, TRange _) - => AssertType(multirangeAsArray, sqlLiteral, pgTypeName, isDefaultForReading: isDefaultForReading, - isDefaultForWriting: isDefaultForWriting); + T multirangeAsArray, string sqlLiteral, string dataTypeName, DataTypeInference datatypeDataTypeInference, bool valueTypeEqualsFieldType, TRange _) + => AssertType(multirangeAsArray, sqlLiteral, dataTypeName, + dataTypeInference: datatypeDataTypeInference, valueTypeEqualsFieldType: valueTypeEqualsFieldType); [Test, TestCaseSource(nameof(MultirangeTestCases))] public Task Multirange_as_list( - T multirangeAsArray, string sqlLiteral, string pgTypeName, bool isDefaultForReading, bool isDefaultForWriting, TRange _) + T multirangeAsArray, string sqlLiteral, string dataTypeName, DataTypeInference datatypeDataTypeInference, bool valueTypeEqualsFieldType, TRange _) where T : IList => AssertType( - new List(multirangeAsArray), - sqlLiteral, pgTypeName, isDefaultForReading: false, isDefaultForWriting: isDefaultForWriting); + new List(multirangeAsArray), sqlLiteral, dataTypeName, + dataTypeInference: datatypeDataTypeInference, valueTypeEqualsFieldType: false); [Test] public async Task Unmapped_multirange_with_mapped_subtype() @@ -152,7 +151,7 @@ public async Task Unmapped_multirange_supported_only_with_EnableUnmappedTypes() Assert.That(exception.InnerException, Is.InstanceOf()); Assert.That(exception.InnerException!.Message, Is.EqualTo(errorMessage)); - exception = await AssertTypeUnsupportedRead("""{["bar","foo"],["moo","zoo"]}""", + exception = await AssertTypeUnsupportedRead("""{["bar","foo"],["moo","zoo"]}""", multirangeTypeName); Assert.That(exception.InnerException, Is.InstanceOf()); Assert.That(exception.InnerException!.Message, Is.EqualTo(errorMessage)); diff --git a/test/Npgsql.Tests/Types/NetworkTypeTests.cs b/test/Npgsql.Tests/Types/NetworkTypeTests.cs index edd8f1dee6..9a15555989 100644 --- a/test/Npgsql.Tests/Types/NetworkTypeTests.cs +++ b/test/Npgsql.Tests/Types/NetworkTypeTests.cs @@ -65,7 +65,7 @@ public Task NpgsqlCidr_as_Cidr() new NpgsqlCidr(IPAddress.Parse("192.168.1.0"), netmask: 24), "192.168.1.0/24", "cidr", - isDefaultForReading: false); + valueTypeEqualsFieldType: false); #pragma warning restore CS0618 [Test] @@ -74,7 +74,7 @@ public Task Inet_v4_as_NpgsqlInet() new NpgsqlInet(IPAddress.Parse("192.168.1.1"), 24), "192.168.1.1/24", "inet", - isDefaultForReading: false); + valueTypeEqualsFieldType: false); [Test] public Task Inet_v6_as_NpgsqlInet() @@ -82,7 +82,7 @@ public Task Inet_v6_as_NpgsqlInet() new NpgsqlInet(IPAddress.Parse("2001:1db8:85a3:1142:1000:8a2e:1370:7334"), 24), "2001:1db8:85a3:1142:1000:8a2e:1370:7334/24", "inet", - isDefaultForReading: false); + valueTypeEqualsFieldType: false); [Test] public Task Macaddr() @@ -95,8 +95,8 @@ public async Task Macaddr8() if (conn.PostgreSqlVersion < new Version(10, 0)) Assert.Ignore("macaddr8 only supported on PostgreSQL 10 and above"); - await AssertType(PhysicalAddress.Parse("08-00-2B-01-02-03-04-05"), "08:00:2b:01:02:03:04:05", "macaddr8", - isDefaultForWriting: false); + await AssertType(PhysicalAddress.Parse("08-00-2B-01-02-03-04-05"), "08:00:2b:01:02:03:04:05", + "macaddr8", dataTypeInference: DataTypeInference.Mismatch); } [Test] @@ -106,8 +106,8 @@ public async Task Macaddr8_write_with_6_bytes() if (conn.PostgreSqlVersion < new Version(10, 0)) Assert.Ignore("macaddr8 only supported on PostgreSQL 10 and above"); - await AssertTypeWrite(PhysicalAddress.Parse("08-00-2B-01-02-03"), "08:00:2b:ff:fe:01:02:03", "macaddr8", - isDefault: false); + await AssertTypeWrite(PhysicalAddress.Parse("08-00-2B-01-02-03"), "08:00:2b:ff:fe:01:02:03", + "macaddr8", dataTypeInference: DataTypeInference.Mismatch); } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/835")] diff --git a/test/Npgsql.Tests/Types/NumericTests.cs b/test/Npgsql.Tests/Types/NumericTests.cs index 5b6c366b96..2a651b3b87 100644 --- a/test/Npgsql.Tests/Types/NumericTests.cs +++ b/test/Npgsql.Tests/Types/NumericTests.cs @@ -3,7 +3,6 @@ using System.Linq; using System.Numerics; using System.Threading.Tasks; -using NpgsqlTypes; using NUnit.Framework; namespace Npgsql.Tests.Types; @@ -114,15 +113,21 @@ public async Task Write(string query, decimal expected) [Test] public async Task Numeric() { - await AssertType(5.5m, "5.5", "numeric", DbType.Decimal); - await AssertTypeWrite(5.5m, "5.5", "numeric", DbType.VarNumeric, inferredDbType: DbType.Decimal); - - await AssertType((short)8, "8", "numeric", DbType.Decimal, isDefault: false); - await AssertType(8, "8", "numeric", DbType.Decimal, isDefault: false); - await AssertType((byte)8, "8", "numeric", DbType.Decimal, isDefault: false); - await AssertType(8F, "8", "numeric", DbType.Decimal, isDefault: false); - await AssertType(8D, "8", "numeric", DbType.Decimal, isDefault: false); - await AssertType(8M, "8", "numeric", DbType.Decimal, isDefault: false); + await AssertType(5.5m, "5.5", "numeric", dbType: DbType.Decimal); + await AssertTypeWrite(5.5m, "5.5", "numeric", dbType: new(DbType.Decimal, DbType.Decimal, DbType.VarNumeric)); + + await AssertType((short)8, "8", "numeric", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Decimal, DbType.Int16), valueTypeEqualsFieldType: false); + await AssertType(8, "8", "numeric", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Decimal, DbType.Int32), valueTypeEqualsFieldType: false); + await AssertType(8L, "8", "numeric", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Decimal, DbType.Int64), valueTypeEqualsFieldType: false); + await AssertType((byte)8, "8", "numeric", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Decimal, DbType.Int16), valueTypeEqualsFieldType: false, skipArrayCheck: true); + await AssertType(8F, "8", "numeric", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Decimal, DbType.Single), valueTypeEqualsFieldType: false); + await AssertType(8D, "8", "numeric", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Decimal, DbType.Double), valueTypeEqualsFieldType: false); } [Test, Description("Tests that when Numeric value does not fit in a System.Decimal and reader is in ReaderState.InResult, the value was read wholly and it is safe to continue reading")] diff --git a/test/Npgsql.Tests/Types/NumericTypeTests.cs b/test/Npgsql.Tests/Types/NumericTypeTests.cs index 406e1b7f44..f3e535152e 100644 --- a/test/Npgsql.Tests/Types/NumericTypeTests.cs +++ b/test/Npgsql.Tests/Types/NumericTypeTests.cs @@ -18,59 +18,82 @@ public class NumericTypeTests(MultiplexingMode multiplexingMode) : MultiplexingT [Test] public async Task Int16() { - await AssertType((short)8, "8", "smallint", DbType.Int16); + await AssertType((short)8, "8", "smallint", dbType: DbType.Int16); // Clr byte/sbyte maps to 'int2' as there is no byte type in PostgreSQL, byte[] maps to bytea however. - await AssertType((byte)8, "8", "smallint", DbType.Int16, isDefaultForReading: false, skipArrayCheck: true); - await AssertType((sbyte)8, "8", "smallint", DbType.Int16, isDefaultForReading: false); + await AssertType((byte)8, "8", "smallint", dataTypeInference: DataTypeInference.Mismatch, + dbType: DbType.Int16, valueTypeEqualsFieldType: false, skipArrayCheck: true); + await AssertType((sbyte)8, "8", "smallint", dataTypeInference: DataTypeInference.Mismatch, + dbType: DbType.Int16, valueTypeEqualsFieldType: false); - await AssertType(8, "8", "smallint", DbType.Int16, isDefault: false); - await AssertType(8L, "8", "smallint", DbType.Int16, isDefault: false); - await AssertType(8F, "8", "smallint", DbType.Int16, isDefault: false); - await AssertType(8D, "8", "smallint", DbType.Int16, isDefault: false); - await AssertType(8M, "8", "smallint", DbType.Int16, isDefault: false); + await AssertType(8, "8", "smallint", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Int16, DbType.Int32), valueTypeEqualsFieldType: false); + await AssertType(8L, "8", "smallint", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Int16, DbType.Int64), valueTypeEqualsFieldType: false); + await AssertType(8F, "8", "smallint", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Int16, DbType.Single), valueTypeEqualsFieldType: false); + await AssertType(8D, "8", "smallint", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Int16, DbType.Double), valueTypeEqualsFieldType: false); + await AssertType(8M, "8", "smallint", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Int16, DbType.Decimal), valueTypeEqualsFieldType: false); } [Test] public async Task Int32() { - await AssertType(8, "8", "integer", DbType.Int32); + await AssertType(8, "8", "integer", dbType: DbType.Int32); - await AssertType((short)8, "8", "integer", DbType.Int32, isDefault: false); - await AssertType(8L, "8", "integer", DbType.Int32, isDefault: false); - await AssertType((byte)8, "8", "integer", DbType.Int32, isDefault: false); - await AssertType(8F, "8", "integer", DbType.Int32, isDefault: false); - await AssertType(8D, "8", "integer", DbType.Int32, isDefault: false); - await AssertType(8M, "8", "integer", DbType.Int32, isDefault: false); + await AssertType((short)8, "8", "integer", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Int32, DbType.Int16), valueTypeEqualsFieldType: false); + await AssertType(8L, "8", "integer", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Int32, DbType.Int64), valueTypeEqualsFieldType: false); + await AssertType((byte)8, "8", "integer", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Int32, DbType.Int16), valueTypeEqualsFieldType: false, skipArrayCheck: true); // byte[] maps to bytea + await AssertType((sbyte)8, "8", "integer", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Int32, DbType.Int16), valueTypeEqualsFieldType: false); + await AssertType(8F, "8", "integer", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Int32, DbType.Single), valueTypeEqualsFieldType: false); + await AssertType(8D, "8", "integer", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Int32, DbType.Double), valueTypeEqualsFieldType: false); + await AssertType(8M, "8", "integer", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Int32, DbType.Decimal), valueTypeEqualsFieldType: false); } [Test, Description("Tests some types which are aliased to UInt32")] [TestCase("oid", TestName="OID")] [TestCase("xid", TestName="XID")] [TestCase("cid", TestName="CID")] - public Task UInt32(string pgTypeName) - => AssertType(8u, "8", pgTypeName, isDefaultForWriting: false); + public Task UInt32(string dataTypeName) + => AssertType(8u, "8", dataTypeName, dataTypeInference: DataTypeInference.Nothing); [Test] [TestCase("xid8", TestName="XID8")] - public async Task UInt64(string pgTypeName) + public async Task UInt64(string dataTypeName) { await using var conn = await OpenConnectionAsync(); MinimumPgVersion(conn, "13.0", "The xid8 type was introduced in PostgreSQL 13"); - await AssertType(8ul, "8", pgTypeName, isDefaultForWriting: false); + await AssertType(8ul, "8", dataTypeName, dataTypeInference: DataTypeInference.Nothing); } [Test] public async Task Int64() { - await AssertType(8L, "8", "bigint", DbType.Int64); + await AssertType(8L, "8", "bigint", dbType: DbType.Int64); - await AssertType((short)8, "8", "bigint", DbType.Int64, isDefault: false); - await AssertType(8, "8", "bigint", DbType.Int64, isDefault: false); - await AssertType((byte)8, "8", "bigint", DbType.Int64, isDefault: false); - await AssertType(8F, "8", "bigint", DbType.Int64, isDefault: false); - await AssertType(8D, "8", "bigint", DbType.Int64, isDefault: false); - await AssertType(8M, "8", "bigint", DbType.Int64, isDefault: false); + await AssertType((short)8, "8", "bigint", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Int64, DbType.Int16), valueTypeEqualsFieldType: false); + await AssertType(8, "8", "bigint", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Int64, DbType.Int32), valueTypeEqualsFieldType: false); + await AssertType((byte)8, "8", "bigint", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Int64, DbType.Int16), valueTypeEqualsFieldType: false, skipArrayCheck: true); // byte[] maps to bytea + await AssertType((sbyte)8, "8", "bigint", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Int64, DbType.Int16), valueTypeEqualsFieldType: false); + await AssertType(8F, "8", "bigint", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Int64, DbType.Single), valueTypeEqualsFieldType: false); + await AssertType(8D, "8", "bigint", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Int64, DbType.Double), valueTypeEqualsFieldType: false); + await AssertType(8M, "8", "bigint", dataTypeInference: DataTypeInference.Mismatch, + dbType: new(DbType.Int64, DbType.Decimal), valueTypeEqualsFieldType: false); } [Test] @@ -83,7 +106,7 @@ public async Task Double(double value, string sqlLiteral) await using var conn = await OpenConnectionAsync(); MinimumPgVersion(conn, "12.0"); - await AssertType(value, sqlLiteral, "double precision", DbType.Double); + await AssertType(value, sqlLiteral, "double precision", dbType: DbType.Double); } [Test] @@ -92,19 +115,19 @@ public async Task Double(double value, string sqlLiteral) [TestCase(float.PositiveInfinity, "Infinity", TestName = "Float_PositiveInfinity")] [TestCase(float.NegativeInfinity, "-Infinity", TestName = "Float_NegativeInfinity")] public Task Float(float value, string sqlLiteral) - => AssertType(value, sqlLiteral, "real", DbType.Single); + => AssertType(value, sqlLiteral, "real", dbType: DbType.Single); [Test] [TestCase(short.MaxValue + 1, "smallint")] [TestCase(int.MaxValue + 1L, "integer")] [TestCase(long.MaxValue + 1D, "bigint")] - public Task Write_overflow(T value, string pgTypeName) - => AssertTypeUnsupportedWrite(value, pgTypeName); + public Task Write_overflow(T value, string dataTypeName) + => AssertTypeUnsupportedWrite(value, dataTypeName); [Test] [TestCase((short)0, short.MaxValue + 1D, "int")] [TestCase(0, int.MaxValue + 1D, "bigint")] [TestCase(0L, long.MaxValue + 1D, "decimal")] - public Task Read_overflow(T _, double value, string pgTypeName) - => AssertTypeUnsupportedRead(value.ToString(CultureInfo.InvariantCulture), pgTypeName); + public Task Read_overflow(T _, double value, string dataTypeName) + => AssertTypeUnsupportedRead(value.ToString(CultureInfo.InvariantCulture), dataTypeName); } diff --git a/test/Npgsql.Tests/Types/RangeTests.cs b/test/Npgsql.Tests/Types/RangeTests.cs index 6997cd9bb4..773815463d 100644 --- a/test/Npgsql.Tests/Types/RangeTests.cs +++ b/test/Npgsql.Tests/Types/RangeTests.cs @@ -2,7 +2,6 @@ using System.ComponentModel; using System.Data; using System.Globalization; -using System.Linq; using System.Threading.Tasks; using Npgsql.Properties; using Npgsql.Util; @@ -54,8 +53,8 @@ class RangeTests : MultiplexingTestBase // See more test cases in DateTimeTests [Test, TestCaseSource(nameof(RangeTestCases))] - public Task Range(T range, string sqlLiteral, string pgTypeName) - => AssertType(range, sqlLiteral, pgTypeName, + public Task Range(T range, string sqlLiteral, string dataTypeName) + => AssertType(range, sqlLiteral, dataTypeName, // NpgsqlRange[] is mapped to multirange by default, not array, so the built-in AssertType testing for arrays fails // (see below) skipArrayCheck: true); @@ -63,8 +62,8 @@ public Task Range(T range, string sqlLiteral, string pgTypeName) // This re-executes the same scenario as above, but with isDefaultForWriting: false and without skipArrayCheck: true. // This tests coverage of range arrays (as opposed to multiranges). [Test, TestCaseSource(nameof(RangeTestCases))] - public Task Range_array(T range, string sqlLiteral, string pgTypeName) - => AssertType(range, sqlLiteral, pgTypeName, isDefaultForWriting: false); + public Task Range_array(T range, string sqlLiteral, string dataTypeName) + => AssertType(range, sqlLiteral, dataTypeName, dataTypeInference: DataTypeInference.Mismatch); [Test] public void Equality_finite() @@ -210,7 +209,7 @@ public async Task Unmapped_range_supported_only_with_EnableUnmappedTypes() Assert.That(exception.InnerException, Is.InstanceOf()); Assert.That(exception.InnerException!.Message, Is.EqualTo(errorMessage)); - exception = await AssertTypeUnsupportedRead("""["bar","foo"]""", rangeType); + exception = await AssertTypeUnsupportedRead("""["bar","foo"]""", rangeType); Assert.That(exception.InnerException, Is.InstanceOf()); Assert.That(exception.InnerException!.Message, Is.EqualTo(errorMessage)); @@ -244,8 +243,7 @@ await AssertType( }, """{"[3,4)","[5,6)"}""", "int4range[]", - isDefaultForWriting: !supportsMultirange, - isDataTypeInferredFromValue: false); + dataTypeInference: supportsMultirange ? DataTypeInference.Mismatch : DataTypeInference.Match); } [Test] diff --git a/test/Npgsql.Tests/Types/TextTests.cs b/test/Npgsql.Tests/Types/TextTests.cs index f93918bd54..75de705c6c 100644 --- a/test/Npgsql.Tests/Types/TextTests.cs +++ b/test/Npgsql.Tests/Types/TextTests.cs @@ -19,29 +19,33 @@ public class TextTests(MultiplexingMode multiplexingMode) : MultiplexingTestBase { [Test] public Task Text_as_string() - => AssertType("foo", "foo", "text", DbType.String); + => AssertType("foo", "foo", "text", dbType: DbType.String); [Test] public Task Text_as_array_of_chars() - => AssertType("foo".ToCharArray(), "foo", "text", DbType.String, isDefaultForReading: false); + => AssertType("foo".ToCharArray(), "foo", "text", dataTypeInference: DataTypeInference.Mismatch, + dbType: DbType.String, valueTypeEqualsFieldType: false); [Test] public Task Text_as_ArraySegment_of_chars() - => AssertTypeWrite(new ArraySegment("foo".ToCharArray()), "foo", "text", DbType.String, - isDefault: false); + => AssertTypeWrite(new ArraySegment("foo".ToCharArray()), "foo", "text", dbType: DbType.String); [Test] public Task Text_as_array_of_bytes() - => AssertType(Encoding.UTF8.GetBytes("foo"), "foo", "text", DbType.String, isDefault: false); + => AssertType("foo"u8.ToArray(), "foo", "text", dataTypeInference: DataTypeInference.Mismatch, + new(DbType.String, DbType.Binary), valueTypeEqualsFieldType: false); [Test] public Task Text_as_ReadOnlyMemory_of_bytes() - => AssertTypeWrite(new ReadOnlyMemory(Encoding.UTF8.GetBytes("foo")), "foo", "text", DbType.String, - isDefault: false); + => AssertTypeWrite(new ReadOnlyMemory("foo"u8.ToArray()), "foo", + "text", dataTypeInference: DataTypeInference.Mismatch, + new(DbType.String, DbType.Binary)); [Test] public Task Char_as_char() - => AssertType('f', "f", "character", inferredDbType: DbType.String, isDefault: false); + => AssertType('f', "f", + "character", dataTypeInference: DataTypeInference.Mismatch, + dbType: DbType.String, valueTypeEqualsFieldType: false, skipArrayCheck: true); // char[] maps to text [Test] public async Task Citext_as_string() @@ -49,12 +53,16 @@ public async Task Citext_as_string() await using var conn = await OpenConnectionAsync(); await EnsureExtensionAsync(conn, "citext"); - await AssertType("foo", "foo", "citext", inferredDbType: DbType.String, isDefaultForWriting: false); + await AssertType("foo", "foo", + "citext", dataTypeInference: DataTypeInference.Mismatch, + dbType: DbType.String); } [Test] public Task Text_as_MemoryStream() - => AssertTypeWrite(() => new MemoryStream("foo"u8.ToArray()), "foo", "text", DbType.String, isDefault: false); + => AssertTypeWrite(() => new MemoryStream("foo"u8.ToArray()), "foo", + "text", dataTypeInference: DataTypeInference.Mismatch, + new(DbType.String, DbType.Binary)); [Test] public async Task Text_long() @@ -64,7 +72,7 @@ public async Task Text_long() builder.Append('X', conn.Settings.WriteBufferSize); var value = builder.ToString(); - await AssertType(value, value, "text", DbType.String); + await AssertType(value, value, "text", dbType: DbType.String); } [Test, Description("Tests that strings are truncated when the NpgsqlParameter's Size is set")] @@ -104,8 +112,8 @@ public async Task Null_character() [Test, Description("Tests some types which are aliased to strings")] [TestCase("character varying")] [TestCase("name")] - public Task Aliased_postgres_types(string pgTypeName) - => AssertType("foo", "foo", pgTypeName, inferredDbType: DbType.String, isDefaultForWriting: false); + public Task Aliased_postgres_types(string dataTypeName) + => AssertType("foo", "foo", dataTypeName, dataTypeInference: DataTypeInference.Mismatch, dbType: DbType.String); [Test] [TestCase(DbType.AnsiString)] From 87426760ca30507ea7619090871472b5fc9ea138 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 25 Feb 2026 10:19:01 +0100 Subject: [PATCH 699/761] Bump Microsoft.NET.Test.Sdk from 18.0.1 to 18.3.0 (#6460) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 735a8607b3..a197b8c29e 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -33,7 +33,7 @@ - + From bef0fafa5d6f0b793bc2f2b263667c7a40e1312f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 27 Feb 2026 10:26:54 +0100 Subject: [PATCH 700/761] Bump actions/upload-artifact from 6 to 7 (#6461) --- .github/workflows/build.yml | 6 +++--- .github/workflows/native-aot.yml | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 34537b0ca8..29158fb563 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -306,7 +306,7 @@ jobs: shell: bash - name: Upload Test Hang Dumps - uses: actions/upload-artifact@v6 + uses: actions/upload-artifact@v7 if: failure() with: name: test-hang-dumps @@ -360,7 +360,7 @@ jobs: run: dotnet pack --configuration Release --property:PackageOutputPath="$PWD/nupkgs" --version-suffix "ci.$(date -u +%Y%m%dT%H%M%S)+sha.${GITHUB_SHA:0:9}" -p:ContinuousIntegrationBuild=true - name: Upload artifacts (nupkg) - uses: actions/upload-artifact@v6 + uses: actions/upload-artifact@v7 with: name: Npgsql.CI path: nupkgs @@ -392,7 +392,7 @@ jobs: run: dotnet pack --configuration Release --property:PackageOutputPath="$PWD/nupkgs" -p:ContinuousIntegrationBuild=true - name: Upload artifacts - uses: actions/upload-artifact@v6 + uses: actions/upload-artifact@v7 with: name: Npgsql.Release path: nupkgs diff --git a/.github/workflows/native-aot.yml b/.github/workflows/native-aot.yml index 25b193a17f..cdc4d77ab5 100644 --- a/.github/workflows/native-aot.yml +++ b/.github/workflows/native-aot.yml @@ -173,21 +173,21 @@ jobs: run: dotnet run --project test/MStatDumper/MStatDumper.csproj -c release -f ${{ matrix.tfm }} -- "test/Npgsql.NativeAotTests/obj/Release/${{ matrix.tfm }}/linux-x64/native/Npgsql.NativeAotTests.mstat" md >> $GITHUB_STEP_SUMMARY - name: Upload mstat - uses: actions/upload-artifact@v6 + uses: actions/upload-artifact@v7 with: name: npgsql.mstat path: "test/Npgsql.NativeAotTests/obj/Release/${{ matrix.tfm }}/linux-x64/native/Npgsql.NativeAotTests.mstat" retention-days: 3 - name: Upload codedgen dgml - uses: actions/upload-artifact@v6 + uses: actions/upload-artifact@v7 with: name: npgsql.codegen.dgml.xml path: "test/Npgsql.NativeAotTests/obj/Release/${{ matrix.tfm }}/linux-x64/native/Npgsql.NativeAotTests.codegen.dgml.xml" retention-days: 3 - name: Upload scan dgml - uses: actions/upload-artifact@v6 + uses: actions/upload-artifact@v7 with: name: npgsql.scan.dgml.xml path: "test/Npgsql.NativeAotTests/obj/Release/${{ matrix.tfm }}/linux-x64/native/Npgsql.NativeAotTests.scan.dgml.xml" From 2fea28143ae5bfae388f4ae722ea4ef08400ef6a Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Sat, 28 Feb 2026 08:17:15 +0100 Subject: [PATCH 701/761] Remove multiplexing (#6457) Closes #6449 --- src/Npgsql/Internal/NpgsqlConnector.cs | 216 +-------- src/Npgsql/LogMessages.cs | 24 - src/Npgsql/MultiplexingDataSource.cs | 422 ------------------ src/Npgsql/NpgsqlBinaryExporter.cs | 1 - src/Npgsql/NpgsqlBinaryImporter.cs | 1 - src/Npgsql/NpgsqlCommand.cs | 308 +++++-------- src/Npgsql/NpgsqlConnection.cs | 404 ++++------------- src/Npgsql/NpgsqlConnectionStringBuilder.cs | 46 -- src/Npgsql/NpgsqlDataReader.cs | 30 +- src/Npgsql/NpgsqlEventId.cs | 4 +- src/Npgsql/NpgsqlEventSource.cs | 49 -- src/Npgsql/NpgsqlMultiHostDataSource.cs | 1 - src/Npgsql/NpgsqlRawCopyStream.cs | 1 - src/Npgsql/NpgsqlSchema.cs | 4 +- src/Npgsql/NpgsqlSlimDataSourceBuilder.cs | 10 +- src/Npgsql/NpgsqlTransaction.cs | 4 +- src/Npgsql/PoolingDataSource.cs | 10 +- src/Npgsql/PublicAPI.Unshipped.txt | 4 + .../Replication/ReplicationConnection.cs | 5 +- src/Npgsql/Util/ManualResetValueTaskSource.cs | 21 - test/Npgsql.PluginTests/NodaTimeTests.cs | 5 +- test/Npgsql.Tests/AuthenticationTests.cs | 6 +- test/Npgsql.Tests/BatchTests.cs | 10 +- test/Npgsql.Tests/BugTests.cs | 45 -- test/Npgsql.Tests/CommandParameterTests.cs | 5 +- test/Npgsql.Tests/CommandTests.cs | 109 +---- test/Npgsql.Tests/ConnectionTests.cs | 107 +---- test/Npgsql.Tests/CopyTests.cs | 37 +- test/Npgsql.Tests/DataSourceTests.cs | 28 +- test/Npgsql.Tests/LoggingTests.cs | 38 +- test/Npgsql.Tests/PoolTests.cs | 24 - test/Npgsql.Tests/PrepareTests.cs | 11 - test/Npgsql.Tests/ReaderNewSchemaTests.cs | 2 - test/Npgsql.Tests/ReaderOldSchemaTests.cs | 3 - test/Npgsql.Tests/ReaderTests.cs | 99 +--- test/Npgsql.Tests/SecurityTests.cs | 16 +- test/Npgsql.Tests/Support/AssemblySetUp.cs | 1 - .../Support/MultiplexingTestBase.cs | 37 -- test/Npgsql.Tests/Support/TestBase.cs | 7 +- test/Npgsql.Tests/TestUtil.cs | 2 +- test/Npgsql.Tests/TracingTests.cs | 98 +--- test/Npgsql.Tests/TransactionTests.cs | 80 +--- test/Npgsql.Tests/Types/ArrayTests.cs | 8 +- test/Npgsql.Tests/Types/BitStringTests.cs | 2 +- test/Npgsql.Tests/Types/ByteaTests.cs | 2 +- test/Npgsql.Tests/Types/CompositeTests.cs | 7 +- test/Npgsql.Tests/Types/CubeTests.cs | 4 +- test/Npgsql.Tests/Types/DateTimeTests.cs | 1 - test/Npgsql.Tests/Types/DomainTests.cs | 5 +- test/Npgsql.Tests/Types/EnumTests.cs | 2 +- .../Npgsql.Tests/Types/FullTextSearchTests.cs | 2 +- test/Npgsql.Tests/Types/GeometricTypeTests.cs | 2 +- test/Npgsql.Tests/Types/HstoreTests.cs | 2 +- test/Npgsql.Tests/Types/InternalTypeTests.cs | 4 +- test/Npgsql.Tests/Types/JsonDynamicTests.cs | 11 +- test/Npgsql.Tests/Types/JsonPathTests.cs | 2 +- test/Npgsql.Tests/Types/JsonTests.cs | 11 +- test/Npgsql.Tests/Types/LTreeTests.cs | 2 +- .../Npgsql.Tests/Types/LegacyDateTimeTests.cs | 1 - test/Npgsql.Tests/Types/MiscTypeTests.cs | 5 +- test/Npgsql.Tests/Types/MultirangeTests.cs | 2 - test/Npgsql.Tests/Types/NetworkTypeTests.cs | 2 +- test/Npgsql.Tests/Types/NumericTests.cs | 2 +- test/Npgsql.Tests/Types/NumericTypeTests.cs | 2 +- test/Npgsql.Tests/Types/RangeTests.cs | 6 +- test/Npgsql.Tests/Types/RecordTests.cs | 2 +- test/Npgsql.Tests/Types/TextTests.cs | 2 +- 67 files changed, 340 insertions(+), 2086 deletions(-) delete mode 100644 src/Npgsql/MultiplexingDataSource.cs delete mode 100644 src/Npgsql/Util/ManualResetValueTaskSource.cs delete mode 100644 test/Npgsql.Tests/Support/MultiplexingTestBase.cs diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index 347c0e0d52..63b26f3878 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -16,7 +16,6 @@ using System.Security.Cryptography.X509Certificates; using System.Text; using System.Threading; -using System.Threading.Channels; using System.Threading.Tasks; using Npgsql.BackendMessages; using Npgsql.Util; @@ -186,38 +185,6 @@ internal string InferredUserName /// volatile Exception? _breakReason; - /// - /// - /// Used by the pool to indicate that I/O is currently in progress on this connector, so that another write - /// isn't started concurrently. Note that since we have only one write loop, this is only ever usedto - /// protect against an over-capacity writes into a connector that's currently *asynchronously* writing. - /// - /// - /// It is guaranteed that the currently-executing - /// Specifically, reading may occur - and the connector may even be returned to the pool - before this is - /// released. - /// - /// - internal volatile int MultiplexAsyncWritingLock; - - /// - internal void FlagAsNotWritableForMultiplexing() - { - Debug.Assert(Settings.Multiplexing); - Debug.Assert(CommandsInFlightCount > 0 || IsBroken || IsClosed, - $"About to mark multiplexing connector as non-writable, but {nameof(CommandsInFlightCount)} is {CommandsInFlightCount}"); - - Interlocked.Exchange(ref MultiplexAsyncWritingLock, 1); - } - - /// - internal void FlagAsWritableForMultiplexing() - { - Debug.Assert(Settings.Multiplexing); - if (Interlocked.CompareExchange(ref MultiplexAsyncWritingLock, 0, 1) != 1) - throw new Exception("Multiplexing lock was not taken when releasing. Please report a bug."); - } - /// /// A lock that's taken while a cancellation is being delivered; new queries are blocked until the /// cancellation is delivered. This reduces the chance that a cancellation meant for a previous @@ -411,24 +378,6 @@ internal NpgsqlConnector(NpgsqlDataSource dataSource, NpgsqlConnection conn) // TODO: Not just for automatic preparation anymore... PreparedStatementManager = new PreparedStatementManager(this); - - if (Settings.Multiplexing) - { - // Note: It's OK for this channel to be unbounded: each command enqueued to it is accompanied by sending - // it to PostgreSQL. If we overload it, a TCP zero window will make us block on the networking side - // anyway. - // Note: the in-flight channel can probably be single-writer, but that doesn't actually do anything - // at this point. And we currently rely on being able to complete the channel at any point (from - // Break). We may want to revisit this if an optimized, SingleWriter implementation is introduced. - var commandsInFlightChannel = Channel.CreateUnbounded( - new UnboundedChannelOptions { SingleReader = true }); - CommandsInFlightReader = commandsInFlightChannel.Reader; - CommandsInFlightWriter = commandsInFlightChannel.Writer; - - // TODO: Properly implement this - if (_isKeepAliveEnabled) - throw new NotImplementedException("Keepalive not yet implemented for multiplexing"); - } } #endregion @@ -513,7 +462,7 @@ internal async Task Open(NpgsqlTimeout timeout, bool async, CancellationToken ca // ReloadTypes. We update them here before returning the connector from the pool. ReloadableState = DataSource.CurrentReloadableState; - if (Settings.Pooling && Settings is { Multiplexing: false, NoResetOnClose: false } && DatabaseInfo.SupportsDiscard) + if (Settings.Pooling && Settings is { NoResetOnClose: false } && DatabaseInfo.SupportsDiscard) { _sendResetOnClose = true; GenerateResetMessage(); @@ -521,22 +470,6 @@ internal async Task Open(NpgsqlTimeout timeout, bool async, CancellationToken ca OpenTimestamp = DateTime.UtcNow; - if (Settings.Multiplexing) - { - // Start an infinite async loop, which processes incoming multiplexing traffic. - // It is intentionally not awaited and will run as long as the connector is alive. - // The CommandsInFlightWriter channel is completed in Cleanup, which should cause this task - // to complete. - // Make sure we do not flow AsyncLocals like Activity.Current - using var __ = ExecutionContext.SuppressFlow(); - _ = Task.Run(MultiplexingReadLoop, CancellationToken.None) - .ContinueWith(t => - { - // Note that we *must* observe the exception if the task is faulted. - ConnectionLogger.LogError(t.Exception!, "Exception bubbled out of multiplexing read loop", Id); - }, TaskContinuationOptions.OnlyOnFaulted); - } - if (_isKeepAliveEnabled) { // Start the keep alive mechanism to work by scheduling the timer. @@ -1478,110 +1411,6 @@ void SetSocketOptions(Socket socket) #endregion - #region I/O - - readonly ChannelReader? CommandsInFlightReader; - internal readonly ChannelWriter? CommandsInFlightWriter; - - internal volatile int CommandsInFlightCount; - - internal ManualResetValueTaskSource ReaderCompleted { get; } = - new() { RunContinuationsAsynchronously = true }; - - async Task MultiplexingReadLoop() - { - Debug.Assert(Settings.Multiplexing); - Debug.Assert(CommandsInFlightReader != null); - - NpgsqlCommand? command = null; - var commandsRead = 0; - - try - { - while (await CommandsInFlightReader.WaitToReadAsync().ConfigureAwait(false)) - { - commandsRead = 0; - Debug.Assert(!InTransaction); - - while (CommandsInFlightReader.TryRead(out command)) - { - commandsRead++; - - await ReadBuffer.Ensure(5, true).ConfigureAwait(false); - - // We have a resultset for the command - hand back control to the command (which will - // return it to the user) - ReaderCompleted.Reset(); - command.ExecutionCompletion.SetResult(this); - - // Now wait until that command's reader is disposed. Note that RunContinuationsAsynchronously is - // true, so that the user code calling NpgsqlDataReader.Dispose will not continue executing - // synchronously here. The prevents issues if the code after the next command's execution - // completion blocks. - await new ValueTask(ReaderCompleted, ReaderCompleted.Version).ConfigureAwait(false); - Debug.Assert(!InTransaction); - } - - // Atomically update the commands in-flight counter, and check if it reached 0. If so, the - // connector is idle and can be returned. - // Note that this is racing with over-capacity writing, which can select any connector at any - // time (see MultiplexingWriteLoop), and we must make absolutely sure that if a connector is - // returned to the pool, it is *never* written to unless properly dequeued from the Idle channel. - if (Interlocked.Add(ref CommandsInFlightCount, -commandsRead) == 0) - { - // There's a race condition where the continuation of an asynchronous multiplexing write may not - // have executed yet, and the flush may still be in progress. We know all I/O has already - // been sent - because the reader has already consumed the entire resultset. So we wait until - // the connector's write lock has been released (long waiting will never occur here). - SpinWait.SpinUntil(() => MultiplexAsyncWritingLock == 0 || IsBroken); - - ResetReadBuffer(); - DataSource.Return(this); - } - } - - ConnectionLogger.LogTrace("Exiting multiplexing read loop", Id); - } - catch (Exception e) - { - Debug.Assert(IsBroken); - - // Decrement the commands already dequeued from the in-flight counter - Interlocked.Add(ref CommandsInFlightCount, -commandsRead); - - // When a connector is broken, the causing exception is stored on it. We fail commands with - // that exception - rather than the one thrown here - since the break may have happened during - // writing, and we want to bubble that one up. - - // Drain any pending in-flight commands and fail them. Note that some have only been written - // to the buffer, and not sent to the server. - command?.ExecutionCompletion.SetException(_breakReason!); - try - { - while (true) - { - var pendingCommand = await CommandsInFlightReader.ReadAsync().ConfigureAwait(false); - - // TODO: the exception we have here is sometimes just the result of the write loop breaking - // the connector, so it doesn't represent the actual root cause. - pendingCommand.ExecutionCompletion.SetException(new NpgsqlException("A previous command on this connection caused an error requiring all pending commands on this connection to be aborted", _breakReason!)); - } - } - catch (ChannelClosedException) - { - // All good, drained to the channel and failed all commands - } - - // "Return" the connector to the pool to for cleanup (e.g. update total connector count) - DataSource.Return(this); - - ConnectionLogger.LogError(e, "Exception in multiplexing read loop", Id); - } - - Debug.Assert(CommandsInFlightCount == 0); - } - - #endregion #region Frontend message processing @@ -1954,17 +1783,8 @@ void ProcessNewTransactionStatus(TransactionStatus newStatus) switch (newStatus) { case TransactionStatus.Idle: - return; case TransactionStatus.InTransactionBlock: case TransactionStatus.InFailedTransactionBlock: - // In multiplexing mode, we can't support transaction in SQL: the connector must be removed from the - // writable connectors list, otherwise other commands may get written to it. So the user must tell us - // about the transaction via BeginTransaction. - if (Connection is null) - { - Debug.Assert(Settings.Multiplexing); - ThrowHelper.ThrowNotSupportedException("In multiplexing mode, transactions must be started with BeginTransaction"); - } return; case TransactionStatus.Pending: ThrowHelper.ThrowInvalidOperationException($"Internal Npgsql bug: invalid TransactionStatus {nameof(TransactionStatus.Pending)} received, should be frontend-only"); @@ -2076,7 +1896,7 @@ internal void ResetCancellation() internal void PerformImmediateUserCancellation() { var connection = Connection; - if (connection is null || connection.ConnectorBindingScope == ConnectorBindingScope.Reader || UserCancellationRequested) + if (connection is null || UserCancellationRequested) return; // Take the lock first to make sure there is no concurrent Break. @@ -2499,11 +2319,9 @@ internal Exception Break(Exception reason, bool markHostAsOfflineOnConnecting = // On the other hand leaving the state Open could indicate to the user that the connection is functional. // (see https://github.com/npgsql/npgsql/issues/3705#issuecomment-839908772) Connection = null; - if (connection.ConnectorBindingScope != ConnectorBindingScope.None) - Return(); + Return(); connection.EnlistedTransaction = null; connection.Connector = null; - connection.ConnectorBindingScope = ConnectorBindingScope.None; } connection.FullState = ConnectionState.Broken; @@ -2523,19 +2341,6 @@ void FullCleanup() { lock (CleanupLock) { - if (Settings.Multiplexing) - { - FlagAsNotWritableForMultiplexing(); - - // Note that in multiplexing, this could be called from the read loop, while the write loop is - // writing into the channel. To make sure this race condition isn't a problem, the channel currently - // isn't set up with SingleWriter (since at this point it doesn't do anything). - CommandsInFlightWriter!.Complete(); - - // The connector's read loop has a continuation to observe and log any exception coming out - // (see Open) - } - ConnectionLogger.LogTrace("Cleaning up connector", Id); Cleanup(); @@ -2693,8 +2498,6 @@ void GenerateResetMessage() /// internal async Task Reset(bool async) { - bool endBindingScope; - // We start user action in case a keeplive happens concurrently, or a concurrent user command (bug) using (StartUserAction(attemptPgCancellation: false)) { @@ -2711,21 +2514,17 @@ internal async Task Reset(bool async) switch (TransactionStatus) { case TransactionStatus.Idle: - // There is an undisposed transaction on multiplexing connection - endBindingScope = Connection?.ConnectorBindingScope == ConnectorBindingScope.Transaction; break; case TransactionStatus.Pending: // BeginTransaction() was called, but was left in the write buffer and not yet sent to server. // Just clear the transaction state. ProcessNewTransactionStatus(TransactionStatus.Idle); ClearTransaction(); - endBindingScope = true; break; case TransactionStatus.InTransactionBlock: case TransactionStatus.InFailedTransactionBlock: await Rollback(async).ConfigureAwait(false); ClearTransaction(); - endBindingScope = true; break; default: ThrowHelper.ThrowInvalidOperationException($"Internal Npgsql bug: unexpected value {TransactionStatus} of enum {nameof(TransactionStatus)}. Please file a bug."); @@ -2750,13 +2549,6 @@ internal async Task Reset(bool async) DataReader.UnbindIfNecessary(); } - - if (endBindingScope) - { - // Connection is null if a connection enlisted in a TransactionScope was closed before the - // TransactionScope completed - the connector is still enlisted, but has no connection. - Connection?.EndBindingScope(ConnectorBindingScope.Transaction); - } } /// @@ -3216,8 +3008,6 @@ void ReadParameterStatus(ReadOnlySpan incomingName, ReadOnlySpan inc switch (name) { case "standard_conforming_strings": - if (value != "on" && Settings.Multiplexing) - throw Break(new NotSupportedException("standard_conforming_strings must be on with multiplexing")); UseConformingStrings = value == "on"; return; diff --git a/src/Npgsql/LogMessages.cs b/src/Npgsql/LogMessages.cs index 349b91b4b5..757f972764 100644 --- a/src/Npgsql/LogMessages.cs +++ b/src/Npgsql/LogMessages.cs @@ -26,12 +26,6 @@ static partial class LogMessages Message = "Opened connection to {Host}:{Port}/{Database}")] internal static partial void OpenedConnection(ILogger logger, string Host, int Port, string Database, string ConnectionString, int ConnectorId); - [LoggerMessage( - EventId = NpgsqlEventId.OpenedConnection, - Level = LogLevel.Debug, - Message = "Opened multiplexing connection to {Host}:{Port}/{Database}")] - internal static partial void OpenedMultiplexingConnection(ILogger logger, string Host, int Port, string Database, string ConnectionString); - [LoggerMessage( EventId = NpgsqlEventId.ClosingConnection, Level = LogLevel.Trace, @@ -44,12 +38,6 @@ static partial class LogMessages Message = "Closed connection to {Host}:{Port}/{Database}")] internal static partial void ClosedConnection(ILogger logger, string Host, int Port, string Database, string ConnectionString, int ConnectorId); - [LoggerMessage( - EventId = NpgsqlEventId.ClosedConnection, - Level = LogLevel.Debug, - Message = "Closed multiplexing connection to {Host}:{Port}/{Database}")] - internal static partial void ClosedMultiplexingConnection(ILogger logger, string Host, int Port, string Database, string ConnectionString); - [LoggerMessage( EventId = NpgsqlEventId.OpeningPhysicalConnection, Level = LogLevel.Trace, @@ -134,12 +122,6 @@ static partial class LogMessages Message = "Exception while closing connector")] internal static partial void ExceptionWhenClosingPhysicalConnection(ILogger logger, int ConnectorId, Exception exception); - [LoggerMessage( - EventId = NpgsqlEventId.ExceptionWhenOpeningConnectionForMultiplexing, - Level = LogLevel.Error, - Message = "Exception opening a connection for multiplexing")] - internal static partial void ExceptionWhenOpeningConnectionForMultiplexing(ILogger logger, Exception exception); - [LoggerMessage( Level = LogLevel.Trace, Message = "Start user action")] @@ -254,12 +236,6 @@ internal static partial void BatchExecutionCompletedWithParameters( Message = "Deriving Parameters for query: {CommandText}")] internal static partial void DerivingParameters(ILogger logger, string CommandText, int ConnectorId); - [LoggerMessage( - EventId = NpgsqlEventId.ExceptionWhenWritingMultiplexedCommands, - Level = LogLevel.Error, - Message = "Exception while writing multiplexed commands")] - internal static partial void ExceptionWhenWritingMultiplexedCommands(ILogger logger, int ConnectorId, Exception exception); - [LoggerMessage( Level = LogLevel.Trace, Message = "Cleaning up reader")] diff --git a/src/Npgsql/MultiplexingDataSource.cs b/src/Npgsql/MultiplexingDataSource.cs deleted file mode 100644 index 8c529cf7e7..0000000000 --- a/src/Npgsql/MultiplexingDataSource.cs +++ /dev/null @@ -1,422 +0,0 @@ -using System; -using System.Diagnostics; -using System.Runtime.CompilerServices; -using System.Threading; -using System.Threading.Channels; -using System.Threading.Tasks; -using Microsoft.Extensions.Logging; -using Npgsql.Internal; -using Npgsql.Util; - -namespace Npgsql; - -sealed class MultiplexingDataSource : PoolingDataSource -{ - readonly ILogger _connectionLogger; - readonly ILogger _commandLogger; - - readonly bool _autoPrepare; - - readonly ChannelReader _multiplexCommandReader; - internal ChannelWriter MultiplexCommandWriter { get; } - - readonly Task _multiplexWriteLoop; - - /// - /// When multiplexing is enabled, determines the maximum number of outgoing bytes to buffer before - /// flushing to the network. - /// - readonly int _writeCoalescingBufferThresholdBytes; - - // TODO: Make this configurable - const int MultiplexingCommandChannelBound = 4096; - - internal MultiplexingDataSource( - NpgsqlConnectionStringBuilder settings, - NpgsqlDataSourceConfiguration dataSourceConfig) - : base(settings, dataSourceConfig) - { - Debug.Assert(Settings.Multiplexing); - - // TODO: Validate multiplexing options are set only when Multiplexing is on - - _autoPrepare = settings.MaxAutoPrepare > 0; - - _writeCoalescingBufferThresholdBytes = Settings.WriteCoalescingBufferThresholdBytes; - - var multiplexCommandChannel = Channel.CreateBounded( - new BoundedChannelOptions(MultiplexingCommandChannelBound) - { - FullMode = BoundedChannelFullMode.Wait, - SingleReader = true - }); - _multiplexCommandReader = multiplexCommandChannel.Reader; - MultiplexCommandWriter = multiplexCommandChannel.Writer; - - _connectionLogger = dataSourceConfig.LoggingConfiguration.ConnectionLogger; - _commandLogger = dataSourceConfig.LoggingConfiguration.CommandLogger; - - // Make sure we do not flow AsyncLocals like Activity.Current - using var _ = ExecutionContext.SuppressFlow(); - _multiplexWriteLoop = Task.Run(MultiplexingWriteLoop, CancellationToken.None) - .ContinueWith(t => - { - if (t.IsFaulted) - { - // Note that MultiplexingWriteLoop should never throw an exception - everything should be caught and handled internally. - _connectionLogger.LogError(t.Exception, "Exception in multiplexing write loop, this is an Npgsql bug, please file an issue."); - } - }); - } - - async Task MultiplexingWriteLoop() - { - // This method is async, but only ever yields when there are no pending commands in the command channel. - // No I/O should ever be performed asynchronously, as that would block further writing for the entire - // application; whenever I/O cannot complete immediately, we chain a callback with ContinueWith and move - // on to the next connector. - Debug.Assert(_multiplexCommandReader != null); - - var stats = new MultiplexingStats(); - - while (true) - { - NpgsqlConnector? connector; - NpgsqlCommand? command; - - try - { - // Get a first command out. - if (!_multiplexCommandReader.TryRead(out command)) - command = await _multiplexCommandReader.ReadAsync().ConfigureAwait(false); - } - catch (ChannelClosedException) - { - return; - } - - try - { - // First step is to get a connector on which to execute - var spinwait = new SpinWait(); - while (true) - { - if (TryGetIdleConnector(out connector)) - { - // See increment under over-capacity mode below - Interlocked.Increment(ref connector.CommandsInFlightCount); - break; - } - - // At no point should we ever have an activity here - Debug.Assert(Activity.Current is null); - // Set current activity as the one from the command - // So child activities from physical open are bound to it - Activity.Current = command.CurrentActivity; - - try - { - connector = await OpenNewConnector( - command.InternalConnection!, - new NpgsqlTimeout(TimeSpan.FromSeconds(Settings.Timeout)), - async: true, - CancellationToken.None).ConfigureAwait(false); - } - finally - { - Activity.Current = null; - } - - if (connector != null) - { - // Managed to create a new connector - connector.Connection = null; - - // See increment under over-capacity mode below - Interlocked.Increment(ref connector.CommandsInFlightCount); - - break; - } - - // There were no idle connectors and we're at max capacity, so we can't open a new one. - // Enter over-capacity mode - find an unlocked connector with the least currently in-flight - // commands and sent on it, even though there are already pending commands. - var minInFlight = int.MaxValue; - foreach (var c in Connectors) - { - if (c?.MultiplexAsyncWritingLock == 0 && c.CommandsInFlightCount < minInFlight) - { - minInFlight = c.CommandsInFlightCount; - connector = c; - } - } - - // There could be no writable connectors (all stuck in transaction or flushing). - if (connector == null) - { - // TODO: This is problematic - when absolutely all connectors are both busy *and* currently - // performing (async) I/O, this will spin-wait. - // We could call WaitAsync, but that would wait for an idle connector, whereas we want any - // writeable (non-writing) connector even if it has in-flight commands. Maybe something - // with better back-off. - // On the other hand, this is exactly *one* thread doing spin-wait, maybe not that bad. - spinwait.SpinOnce(); - continue; - } - - // We may be in a race condition with the connector read loop, which may be currently returning - // the connector to the Idle channel (because it has completed all commands). - // Increment the in-flight count to make sure the connector isn't returned as idle. - var newInFlight = Interlocked.Increment(ref connector.CommandsInFlightCount); - if (newInFlight == 1) - { - // The connector's in-flight was 0, so it was idle - abort over-capacity read - // and retry the normal flow. - Interlocked.Decrement(ref connector.CommandsInFlightCount); - spinwait.SpinOnce(); - continue; - } - - break; - } - } - catch (Exception exception) - { - LogMessages.ExceptionWhenOpeningConnectionForMultiplexing(_connectionLogger, exception); - - // Fail the first command in the channel as a way of bubbling the exception up to the user - command.ExecutionCompletion.SetException(exception); - - continue; - } - - // We now have a ready connector, and can start writing commands to it. - Debug.Assert(connector != null); - - try - { - stats.Reset(); - connector.FlagAsNotWritableForMultiplexing(); - command.TraceCommandEnrich(connector); - - // Read queued commands and write them to the connector's buffer, for as long as we're - // under our write threshold and timer delay. - // Note we already have one command we read above, and have already updated the connector's - // CommandsInFlightCount. Now write that command. - var first = true; - bool writtenSynchronously; - do - { - if (first) - first = false; - else - Interlocked.Increment(ref connector.CommandsInFlightCount); - writtenSynchronously = WriteCommand(connector, command, ref stats); - } while (connector.WriteBuffer.WritePosition < _writeCoalescingBufferThresholdBytes && - writtenSynchronously && - _multiplexCommandReader.TryRead(out command)); - - // If all commands were written synchronously (good path), complete the write here, flushing - // and updating statistics. If not, CompleteRewrite is scheduled to run later, when the async - // operations complete, so skip it and continue. - if (writtenSynchronously) - Flush(connector, ref stats); - } - catch (Exception ex) - { - FailWrite(connector, ex); - } - } - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - bool WriteCommand(NpgsqlConnector connector, NpgsqlCommand command, ref MultiplexingStats stats) - { - // Note: this method *never* awaits on I/O - doing so would suspend all outgoing multiplexing commands - // for the entire pool. In the normal/fast case, writing the command is purely synchronous (serialize - // to buffer in memory), and the actual flush will occur at the level above. For cases where the - // command overflows the buffer, async I/O is done, and we schedule continuations separately - - // but the main thread continues to handle other commands on other connectors. - - var fullyPrepared = _autoPrepare; - - if (_autoPrepare) - { - // TODO: Need to log based on numPrepared like in non-multiplexing mode... - for (var i = 0; i < command.InternalBatchCommands.Count; i++) - if (!command.InternalBatchCommands[i].TryAutoPrepare(connector)) - fullyPrepared = false; - } - - if (command.CurrentActivity is not null && fullyPrepared) - { - command.CurrentActivity.SetTag("db.npgsql.prepared", true); - } - - var written = connector.CommandsInFlightWriter!.TryWrite(command); - Debug.Assert(written, $"Failed to enqueue command to {connector.CommandsInFlightWriter}"); - - // Purposefully don't wait for I/O to complete - var task = command.Write(connector, async: true, flush: false); - stats.NumCommands++; - - switch (task.Status) - { - case TaskStatus.RanToCompletion: - return true; - - case TaskStatus.Faulted: - task.GetAwaiter().GetResult(); // Throw the exception - return true; - - case TaskStatus.WaitingForActivation: - case TaskStatus.Running: - { - // Asynchronous completion, which means the writing is flushing to network and there's actual I/O - // (i.e. a big command which overflowed our buffer). - // We don't (ever) await in the write loop, so remove the connector from the writable list (as it's - // still flushing) and schedule a continuation to continue taking care of this connector. - // The write loop continues to the next connector. - - // Create a copy of the statistics and purposefully box it via the closure. We need a separate - // copy of the stats for the async writing that will continue in parallel with this loop. - var clonedStats = stats.Clone(); - - // ReSharper disable once MethodSupportsCancellation - task.ContinueWith((t, o) => - { - var conn = (NpgsqlConnector)o!; - - if (t.IsFaulted) - { - FailWrite(conn, t.Exception!.InnerException!); - return; - } - - // There's almost certainly more buffered outgoing data for the command, after the flush - // occurred. Complete the write, which will flush again (and update statistics). - try - { - Flush(conn, ref clonedStats); - } - catch (Exception e) - { - FailWrite(conn, e); - } - }, connector); - - return false; - } - - default: - Debug.Fail("When writing command to connector, task is in invalid state " + task.Status); - ThrowHelper.ThrowNpgsqlException("When writing command to connector, task is in invalid state " + task.Status); - return false; - } - } - - void Flush(NpgsqlConnector connector, ref MultiplexingStats stats) - { - var task = connector.Flush(async: true); - switch (task.Status) - { - case TaskStatus.RanToCompletion: - CompleteWrite(connector, ref stats); - return; - - case TaskStatus.Faulted: - task.GetAwaiter().GetResult(); // Throw the exception - return; - - case TaskStatus.WaitingForActivation: - case TaskStatus.Running: - { - // Asynchronous completion - the flush didn't complete immediately (e.g. TCP zero window). - - // Create a copy of the statistics and purposefully box it via the closure. We need a separate - // copy of the stats for the async writing that will continue in parallel with this loop. - var clonedStats = stats.Clone(); - - task.ContinueWith((t, o) => - { - var conn = (NpgsqlConnector)o!; - if (t.IsFaulted) - { - FailWrite(conn, t.Exception!.InnerException!); - return; - } - - CompleteWrite(conn, ref clonedStats); - }, connector); - - return; - } - - default: - Debug.Fail("When flushing, task is in invalid state " + task.Status); - ThrowHelper.ThrowNpgsqlException("When flushing, task is in invalid state " + task.Status); - return; - } - } - - void FailWrite(NpgsqlConnector connector, Exception exception) - { - // Note that all commands already passed validation. This means any error here is either an unrecoverable network issue - // (in which case we're already broken), or some other issue while writing (e.g. invalid UTF8 characters in the SQL query) - - // unrecoverable in any case. - - // All commands enqueued in CommandsInFlightWriter will be drained by the reader and failed. - // Note that some of these commands where only written to the connector's buffer, but never - // actually sent - because of a later exception. - // In theory, we could track commands that were only enqueued and not sent, and retry those - // (on another connector), but that would add some book-keeping and complexity, and in any case - // if one connector was broken, chances are that all are (networking). - Debug.Assert(connector.IsBroken); - - LogMessages.ExceptionWhenWritingMultiplexedCommands(_commandLogger, connector.Id, exception); - } - - static void CompleteWrite(NpgsqlConnector connector, ref MultiplexingStats stats) - { - // All I/O has completed, mark this connector as safe for writing again. - // This will allow the connector to be returned to the pool by its read loop, and also to be selected - // for over-capacity write. - connector.FlagAsWritableForMultiplexing(); - - NpgsqlEventSource.Log.MultiplexingBatchSent(stats.NumCommands, Stopwatch.GetElapsedTime(stats.StartTimestamp).Ticks); - } - - // ReSharper disable once FunctionNeverReturns - } - - protected override void DisposeBase() - { - MultiplexCommandWriter.Complete(new ObjectDisposedException(nameof(MultiplexingDataSource))); - _multiplexWriteLoop.GetAwaiter().GetResult(); - base.DisposeBase(); - } - - protected override async ValueTask DisposeAsyncBase() - { - MultiplexCommandWriter.Complete(new ObjectDisposedException(nameof(MultiplexingDataSource))); - await _multiplexWriteLoop.ConfigureAwait(false); - await base.DisposeAsyncBase().ConfigureAwait(false); - } - - struct MultiplexingStats - { - internal long StartTimestamp; - internal int NumCommands; - - internal void Reset() - { - NumCommands = 0; - StartTimestamp = Stopwatch.GetTimestamp(); - } - - internal MultiplexingStats Clone() - { - var clone = new MultiplexingStats { StartTimestamp = StartTimestamp, NumCommands = NumCommands }; - return clone; - } - } -} diff --git a/src/Npgsql/NpgsqlBinaryExporter.cs b/src/Npgsql/NpgsqlBinaryExporter.cs index 65c616f496..033517c79d 100644 --- a/src/Npgsql/NpgsqlBinaryExporter.cs +++ b/src/Npgsql/NpgsqlBinaryExporter.cs @@ -543,7 +543,6 @@ void Cleanup() if (!ReferenceEquals(connector, null)) { connector.CurrentCopyOperation = null; - _connector.Connection?.EndBindingScope(ConnectorBindingScope.Copy); _connector = null!; } diff --git a/src/Npgsql/NpgsqlBinaryImporter.cs b/src/Npgsql/NpgsqlBinaryImporter.cs index 60a1f09daf..08f2a90844 100644 --- a/src/Npgsql/NpgsqlBinaryImporter.cs +++ b/src/Npgsql/NpgsqlBinaryImporter.cs @@ -556,7 +556,6 @@ void Cleanup() { connector.EndUserAction(); connector.CurrentCopyOperation = null; - connector.Connection?.EndBindingScope(ConnectorBindingScope.Copy); _connector = null; } diff --git a/src/Npgsql/NpgsqlCommand.cs b/src/Npgsql/NpgsqlCommand.cs index 8ddbb2e5fb..1e3f4a1f04 100644 --- a/src/Npgsql/NpgsqlCommand.cs +++ b/src/Npgsql/NpgsqlCommand.cs @@ -13,7 +13,6 @@ using NpgsqlTypes; using static Npgsql.Util.Statics; using System.Diagnostics.CodeAnalysis; -using System.Threading.Channels; using Microsoft.Extensions.Logging; using Npgsql.Internal; using Npgsql.Properties; @@ -470,12 +469,11 @@ internal void DeriveParameters() { var conn = CheckAndGetConnection(); Debug.Assert(conn is not null); + var connector = conn.Connector!; if (string.IsNullOrEmpty(CommandText)) throw new InvalidOperationException("CommandText property has not been initialized"); - using var _ = conn.StartTemporaryBindingScope(out var connector); - foreach (var s in InternalBatchCommands) if (s.PreparedStatement?.IsExplicit == true) throw new NpgsqlException("Deriving parameters isn't supported for commands that are already prepared."); @@ -678,8 +676,6 @@ Task Prepare(bool async, CancellationToken cancellationToken = default) { var connection = CheckAndGetConnection(); Debug.Assert(connection is not null); - if (connection.Settings.Multiplexing) - throw new NotSupportedException("Explicit preparation not supported with multiplexing"); var connector = connection.Connector!; var logger = connector.CommandLogger; @@ -844,8 +840,6 @@ async Task Unprepare(bool async, CancellationToken cancellationToken = default) { var connection = CheckAndGetConnection(); Debug.Assert(connection is not null); - if (connection.Settings.Multiplexing) - throw new NotSupportedException("Explicit preparation not supported with multiplexing"); var forall = true; foreach (var statement in InternalBatchCommands) @@ -890,7 +884,7 @@ async Task Unprepare(bool async, CancellationToken cancellationToken = default) #region Query analysis - internal void ProcessRawQuery(SqlQueryParser? parser, bool standardConformingStrings, NpgsqlBatchCommand? batchCommand) + internal void ProcessRawQuery(SqlQueryParser parser, bool standardConformingStrings, NpgsqlBatchCommand? batchCommand) { var (commandText, commandType, parameters) = batchCommand is null ? (CommandText, CommandType, _parameters) @@ -944,9 +938,6 @@ internal void ProcessRawQuery(SqlQueryParser? parser, bool standardConformingStr if (!EnableSqlRewriting) ThrowHelper.ThrowNotSupportedException($"Named parameters are not supported when Npgsql.{nameof(EnableSqlRewriting)} is disabled"); - // The parser is cached on NpgsqlConnector - unless we're in multiplexing mode. - parser ??= new SqlQueryParser(); - if (batchCommand is null) { parser.ParseRawQuery(this, standardConformingStrings); @@ -1072,7 +1063,7 @@ async Task WriteExecute(NpgsqlConnector connector, bool async, bool flush, Cance var syncCaller = !async; for (var i = 0; i < InternalBatchCommands.Count; i++) { - // The following is only for deadlock avoidance when doing sync I/O (so never in multiplexing) + // The following is only for deadlock avoidance when doing sync I/O if (syncCaller && ShouldSchedule(ref async, i)) await new TaskSchedulerAwaitable(ConstrainedConcurrencyScheduler); @@ -1391,16 +1382,12 @@ protected override async Task ExecuteDbDataReaderAsync(CommandBeha public new Task ExecuteReaderAsync(CommandBehavior behavior, CancellationToken cancellationToken = default) => ExecuteReader(async: true, behavior, cancellationToken).AsTask(); - // TODO: Maybe pool these? - internal ManualResetValueTaskSource ExecutionCompletion { get; } - = new(); - internal virtual async ValueTask ExecuteReader(bool async, CommandBehavior behavior, CancellationToken cancellationToken) { var conn = CheckAndGetConnection(); _behavior = behavior; - NpgsqlConnector? connector; + NpgsqlConnector connector; if (_connector is not null) { Debug.Assert(conn is null); @@ -1411,229 +1398,162 @@ internal virtual async ValueTask ExecuteReader(bool async, Com else { Debug.Assert(conn is not null); - conn.TryGetBoundConnector(out connector); + connector = conn.Connector!; } try { - if (connector is not null) - { - var logger = connector.CommandLogger; - var reloadableState = connector.ReloadableState; + var logger = connector.CommandLogger; + var reloadableState = connector.ReloadableState; - cancellationToken.ThrowIfCancellationRequested(); - // We cannot pass a token here, as we'll cancel a non-send query - // Also, we don't pass the cancellation token to StartUserAction, since that would make it scope to the entire action (command execution) - // whereas it should only be scoped to the Execute method. - connector.StartUserAction(ConnectorState.Executing, this, CancellationToken.None); + cancellationToken.ThrowIfCancellationRequested(); + // We cannot pass a token here, as we'll cancel a non-send query + // Also, we don't pass the cancellation token to StartUserAction, since that would make it scope to the entire action (command execution) + // whereas it should only be scoped to the Execute method. + connector.StartUserAction(ConnectorState.Executing, this, CancellationToken.None); - Task? sendTask; + Task? sendTask; - var validateParameterValues = !behavior.HasFlag(CommandBehavior.SchemaOnly); - long startTimestamp; + var validateParameterValues = !behavior.HasFlag(CommandBehavior.SchemaOnly); + long startTimestamp; - try - { - var fullyPrepared = false; + try + { + var fullyPrepared = false; - switch (IsExplicitlyPrepared) + switch (IsExplicitlyPrepared) + { + case true: + Debug.Assert(_connectorPreparedOn != null); + if (WrappingBatch is not null) { - case true: - Debug.Assert(_connectorPreparedOn != null); - if (WrappingBatch is not null) + foreach (var batchCommand in InternalBatchCommands) { - foreach (var batchCommand in InternalBatchCommands) + if (batchCommand.ConnectorPreparedOn != connector) { - if (batchCommand.ConnectorPreparedOn != connector) - { - ResetPreparation(); - goto case false; - } - - batchCommand._parameters?.ProcessParameters(reloadableState, validateParameterValues, batchCommand.CommandType); - } - } - else - { - if (_connectorPreparedOn != connector) - { - // The command was prepared, but since then the connector has changed. Detach all prepared statements. ResetPreparation(); goto case false; } - _parameters?.ProcessParameters(reloadableState, validateParameterValues, CommandType); - } - - NpgsqlEventSource.Log.CommandStartPrepared(); - connector.DataSource.MetricsReporter.CommandStartPrepared(); - fullyPrepared = true; - break; - - case false: - var numPrepared = 0; - if (WrappingBatch is not null) + batchCommand._parameters?.ProcessParameters(reloadableState, validateParameterValues, batchCommand.CommandType); + } + } + else + { + if (_connectorPreparedOn != connector) { - for (var i = 0; i < InternalBatchCommands.Count; i++) - { - var batchCommand = InternalBatchCommands[i]; + // The command was prepared, but since then the connector has changed. Detach all prepared statements. + ResetPreparation(); + goto case false; + } + _parameters?.ProcessParameters(reloadableState, validateParameterValues, CommandType); + } - batchCommand._parameters?.ProcessParameters(reloadableState, validateParameterValues, batchCommand.CommandType); - ProcessRawQuery(connector.SqlQueryParser, connector.UseConformingStrings, batchCommand); + NpgsqlEventSource.Log.CommandStartPrepared(); + connector.DataSource.MetricsReporter.CommandStartPrepared(); + fullyPrepared = true; + break; - if (connector.Settings.MaxAutoPrepare > 0 && batchCommand.TryAutoPrepare(connector)) - { - batchCommand.ConnectorPreparedOn = connector; - numPrepared++; - } - } - } - else + case false: + var numPrepared = 0; + + if (WrappingBatch is not null) + { + for (var i = 0; i < InternalBatchCommands.Count; i++) { - _parameters?.ProcessParameters(reloadableState, validateParameterValues, CommandType); - ProcessRawQuery(connector.SqlQueryParser, connector.UseConformingStrings, batchCommand: null); + var batchCommand = InternalBatchCommands[i]; - if (connector.Settings.MaxAutoPrepare > 0) - for (var i = 0; i < InternalBatchCommands.Count; i++) - if (InternalBatchCommands[i].TryAutoPrepare(connector)) - numPrepared++; - } + batchCommand._parameters?.ProcessParameters(reloadableState, validateParameterValues, batchCommand.CommandType); + ProcessRawQuery(connector.SqlQueryParser, connector.UseConformingStrings, batchCommand); - if (numPrepared > 0) - { - _connectorPreparedOn = connector; - if (numPrepared == InternalBatchCommands.Count) + if (connector.Settings.MaxAutoPrepare > 0 && batchCommand.TryAutoPrepare(connector)) { - NpgsqlEventSource.Log.CommandStartPrepared(); - connector.DataSource.MetricsReporter.CommandStartPrepared(); - fullyPrepared = true; + batchCommand.ConnectorPreparedOn = connector; + numPrepared++; } } - - break; } + else + { + _parameters?.ProcessParameters(reloadableState, validateParameterValues, CommandType); + ProcessRawQuery(connector.SqlQueryParser, connector.UseConformingStrings, batchCommand: null); - // If a cancellation is in progress, wait for it to "complete" before proceeding (#615) - // We do it before changing the state because we only allow sending cancellation request if State == InProgress - connector.ResetCancellation(); - - State = CommandState.InProgress; + if (connector.Settings.MaxAutoPrepare > 0) + for (var i = 0; i < InternalBatchCommands.Count; i++) + if (InternalBatchCommands[i].TryAutoPrepare(connector)) + numPrepared++; + } - if (logger.IsEnabled(LogLevel.Information)) + if (numPrepared > 0) { - connector.QueryLogStopWatch.Restart(); - - if (logger.IsEnabled(LogLevel.Debug)) - LogExecutingCompleted(connector, executing: true); + _connectorPreparedOn = connector; + if (numPrepared == InternalBatchCommands.Count) + { + NpgsqlEventSource.Log.CommandStartPrepared(); + connector.DataSource.MetricsReporter.CommandStartPrepared(); + fullyPrepared = true; + } } - NpgsqlEventSource.Log.CommandStart(CommandText); - startTimestamp = connector.DataSource.MetricsReporter.ReportCommandStart(); - TraceCommandStart(connector.DataSource.Configuration.TracingOptions, fullyPrepared); - TraceCommandEnrich(connector); - - // We do not wait for the entire send to complete before proceeding to reading - - // the sending continues in parallel with the user's reading. Waiting for the - // entire send to complete would trigger a deadlock for multi-statement commands, - // where PostgreSQL sends large results for the first statement, while we're sending large - // parameter data for the second. See #641. - // Instead, all sends for non-first statements are performed asynchronously (even if the user requested sync), - // in a special synchronization context to prevents a dependency on the thread pool (which would also trigger - // deadlocks). - sendTask = Write(connector, async, flush: true, CancellationToken.None); - - // The following is a hack. It raises an exception if one was thrown in the first phases - // of the send (i.e. in parts of the send that executed synchronously). Exceptions may - // still happen later and aren't properly handled. See #1323. - if (sendTask.IsFaulted) - sendTask.GetAwaiter().GetResult(); - } - catch - { - connector.EndUserAction(); - throw; + break; } - // TODO: DRY the following with multiplexing, but be careful with the cancellation registration... - var reader = connector.DataReader; - reader.Init(this, behavior, InternalBatchCommands, startTimestamp, sendTask); - connector.CurrentReader = reader; - if (async) - await reader.NextResultAsync(cancellationToken).ConfigureAwait(false); - else - reader.NextResult(); - - TraceReceivedFirstResponse(connector.DataSource.Configuration.TracingOptions); - - return reader; - } - else - { - Debug.Assert(conn is not null); - Debug.Assert(conn.Settings.Multiplexing); + // If a cancellation is in progress, wait for it to "complete" before proceeding (#615) + // We do it before changing the state because we only allow sending cancellation request if State == InProgress + connector.ResetCancellation(); - // The connection isn't bound to a connector - it's multiplexing time. - var dataSource = (MultiplexingDataSource)conn.NpgsqlDataSource; - var reloadableState = dataSource.CurrentReloadableState; + State = CommandState.InProgress; - if (!async) + if (logger.IsEnabled(LogLevel.Information)) { - // The waiting on the ExecutionCompletion ManualResetValueTaskSource is necessarily - // asynchronous, so allowing sync would mean sync-over-async. - ThrowHelper.ThrowNotSupportedException("Synchronous command execution is not supported when multiplexing is on"); - } + connector.QueryLogStopWatch.Restart(); - if (WrappingBatch is not null) - { - foreach (var batchCommand in InternalBatchCommands) - { - batchCommand._parameters?.ProcessParameters(reloadableState, validateValues: true, batchCommand.CommandType); - ProcessRawQuery(null, standardConformingStrings: true, batchCommand); - } + if (logger.IsEnabled(LogLevel.Debug)) + LogExecutingCompleted(connector, executing: true); } - else - { - _parameters?.ProcessParameters(reloadableState, validateValues: true, CommandType); - ProcessRawQuery(null, standardConformingStrings: true, batchCommand: null); - } - - State = CommandState.InProgress; - // In multiplexing, we don't yet know whether the command will execute as prepared or not; that will be determined later. - TraceCommandStart(conn.NpgsqlDataSource.Configuration.TracingOptions, prepared: null); + NpgsqlEventSource.Log.CommandStart(CommandText); + startTimestamp = connector.DataSource.MetricsReporter.ReportCommandStart(); + TraceCommandStart(connector.DataSource.Configuration.TracingOptions, fullyPrepared); + TraceCommandEnrich(connector); + + // We do not wait for the entire send to complete before proceeding to reading - + // the sending continues in parallel with the user's reading. Waiting for the + // entire send to complete would trigger a deadlock for multi-statement commands, + // where PostgreSQL sends large results for the first statement, while we're sending large + // parameter data for the second. See #641. + // Instead, all sends for non-first statements are performed asynchronously (even if the user requested sync), + // in a special synchronization context to prevents a dependency on the thread pool (which would also trigger + // deadlocks). + sendTask = Write(connector, async, flush: true, CancellationToken.None); + + // The following is a hack. It raises an exception if one was thrown in the first phases + // of the send (i.e. in parts of the send that executed synchronously). Exceptions may + // still happen later and aren't properly handled. See #1323. + if (sendTask.IsFaulted) + sendTask.GetAwaiter().GetResult(); + } + catch + { + connector.EndUserAction(); + throw; + } - // TODO: Experiment: do we want to wait on *writing* here, or on *reading*? - // Previous behavior was to wait on reading, which throw the exception from ExecuteReader (and not from - // the first read). But waiting on writing would allow us to do sync writing and async reading. - ExecutionCompletion.Reset(); - try - { - await dataSource.MultiplexCommandWriter.WriteAsync(this, cancellationToken).ConfigureAwait(false); - } - catch (ChannelClosedException ex) - { - Debug.Assert(ex.InnerException is not null); - throw ex.InnerException; - } - connector = await new ValueTask(ExecutionCompletion, ExecutionCompletion.Version).ConfigureAwait(false); - // TODO: Overload of StartBindingScope? - conn.Connector = connector; - connector.Connection = conn; - conn.ConnectorBindingScope = ConnectorBindingScope.Reader; - - var reader = connector.DataReader; - reader.Init(this, behavior, InternalBatchCommands); - connector.CurrentReader = reader; + var reader = connector.DataReader; + reader.Init(this, behavior, InternalBatchCommands, startTimestamp, sendTask); + connector.CurrentReader = reader; + if (async) await reader.NextResultAsync(cancellationToken).ConfigureAwait(false); + else + reader.NextResult(); - TraceReceivedFirstResponse(connector.DataSource.Configuration.TracingOptions); + TraceReceivedFirstResponse(connector.DataSource.Configuration.TracingOptions); - return reader; - } + return reader; } catch (Exception e) { - var reader = connector?.CurrentReader; + var reader = connector.CurrentReader; if (e is not NpgsqlOperationInProgressException && reader is not null) await reader.Cleanup(async).ConfigureAwait(false); diff --git a/src/Npgsql/NpgsqlConnection.cs b/src/Npgsql/NpgsqlConnection.cs index 25319858d9..9619e938bd 100644 --- a/src/Npgsql/NpgsqlConnection.cs +++ b/src/Npgsql/NpgsqlConnection.cs @@ -45,8 +45,7 @@ public sealed class NpgsqlConnection : DbConnection, ICloneable, IComponent ConnectionState _fullState; /// - /// The physical connection to the database. This is when the connection is closed, - /// and also when it is open in multiplexing mode and unbound (e.g. not in a transaction). + /// The physical connection to the database. This is when the connection is closed. /// internal NpgsqlConnector? Connector { get; set; } @@ -101,12 +100,6 @@ public INpgsqlTypeMapper TypeMapper /// internal const int TimeoutLimit = 1024; - /// - /// Tracks when this connection was bound to a physical connector (e.g. at open-time, when a transaction - /// was started...). - /// - internal ConnectorBindingScope ConnectorBindingScope { get; set; } - ILogger _connectionLogger = default!; // Initialized in Open, shouldn't be used otherwise static readonly StateChangeEventArgs ClosedToOpenEventArgs = new(ConnectionState.Closed, ConnectionState.Open); @@ -138,7 +131,6 @@ internal NpgsqlConnection(NpgsqlDataSource dataSource, NpgsqlConnector connector Connector = connector; connector.Connection = this; - ConnectorBindingScope = ConnectorBindingScope.Connection; FullState = ConnectionState.Open; } @@ -246,37 +238,10 @@ internal Task Open(bool async, CancellationToken cancellationToken) if (_connectionLogger.IsEnabled(LogLevel.Trace)) LogMessages.OpeningConnection(_connectionLogger, Settings.Host!, Settings.Port, Settings.Database!, _userFacingConnectionString); - if (Settings.Multiplexing) - { - if (Settings.Enlist && Transaction.Current != null) - { - // TODO: Keep in mind that the TransactionScope can be disposed - ThrowHelper.ThrowNotSupportedException(); - } - - // We're opening in multiplexing mode, without a transaction. We don't actually do anything. - - // If we've never connected with this connection string, open a physical connector in order to generate - // any exception (bad user/password, IP address...). This reproduces the standard error behavior. - if (!_dataSource.IsBootstrapped) - { - FullState = ConnectionState.Connecting; - return PerformMultiplexingStartupCheck(async, cancellationToken); - } - - if (_connectionLogger.IsEnabled(LogLevel.Debug)) - LogMessages.OpenedMultiplexingConnection(_connectionLogger, Settings.Host!, Settings.Port, Settings.Database!, _userFacingConnectionString); - FullState = ConnectionState.Open; - - return Task.CompletedTask; - } - return OpenAsync(async, cancellationToken); async Task OpenAsync(bool async, CancellationToken cancellationToken) { - Debug.Assert(!Settings.Multiplexing); - FullState = ConnectionState.Connecting; NpgsqlConnector? connector = null; try @@ -301,7 +266,6 @@ async Task OpenAsync(bool async, CancellationToken cancellationToken) Debug.Assert(connector.Connection is null, $"Connection for opened connector '{Connector?.Id.ToString() ?? "???"}' is bound to another connection"); - ConnectorBindingScope = ConnectorBindingScope.Connection; connector.Connection = this; Connector = connector; @@ -314,7 +278,6 @@ async Task OpenAsync(bool async, CancellationToken cancellationToken) catch { FullState = ConnectionState.Closed; - ConnectorBindingScope = ConnectorBindingScope.None; Connector = null; EnlistedTransaction = null; @@ -328,25 +291,6 @@ async Task OpenAsync(bool async, CancellationToken cancellationToken) } } - async Task PerformMultiplexingStartupCheck(bool async, CancellationToken cancellationToken) - { - try - { - var timeout = new NpgsqlTimeout(TimeSpan.FromSeconds(ConnectionTimeout)); - - _ = await StartBindingScope(ConnectorBindingScope.Connection, timeout, async, cancellationToken).ConfigureAwait(false); - EndBindingScope(ConnectorBindingScope.Connection); - - LogMessages.OpenedMultiplexingConnection(_connectionLogger, Settings.Host!, Settings.Port, Settings.Database!, _userFacingConnectionString); - - FullState = ConnectionState.Open; - } - catch - { - FullState = ConnectionState.Closed; - throw; - } - } } #endregion Open / Init @@ -624,31 +568,21 @@ async ValueTask BeginTransaction(bool async, IsolationLevel l ThrowHelper.ThrowNotSupportedException($"Unsupported IsolationLevel: {nameof(IsolationLevel.Chaos)}"); CheckReady(); - if (Connector is { InTransaction: true }) + var connector = Connector; + if (connector is { InTransaction: true }) ThrowHelper.ThrowInvalidOperationException("A transaction is already in progress; nested/concurrent transactions aren't supported."); // There was a committed/rolled back transaction, but it was not disposed - var connector = ConnectorBindingScope == ConnectorBindingScope.Transaction - ? Connector - : await StartBindingScope(ConnectorBindingScope.Transaction, NpgsqlTimeout.Infinite, async, cancellationToken).ConfigureAwait(false); Debug.Assert(connector != null); - try - { - // Note that beginning a transaction doesn't actually send anything to the backend (only prepends). - // But we start a user action to check the cancellation token and generate exceptions - using var _ = connector.StartUserAction(cancellationToken); + // Note that beginning a transaction doesn't actually send anything to the backend (only prepends). + // But we start a user action to check the cancellation token and generate exceptions + using var _ = connector.StartUserAction(cancellationToken); - connector.Transaction ??= new NpgsqlTransaction(connector); - connector.Transaction.Init(level); - return connector.Transaction; - } - catch - { - EndBindingScope(ConnectorBindingScope.Transaction); - throw; - } + connector.Transaction ??= new NpgsqlTransaction(connector); + connector.Transaction.Init(level); + return connector.Transaction; } /// @@ -698,9 +632,6 @@ protected override async ValueTask BeginDbTransactionAsync(Isolat /// public override void EnlistTransaction(Transaction? transaction) { - if (Settings.Multiplexing) - throw new NotSupportedException("Ambient transactions aren't yet implemented for multiplexing"); - if (EnlistedTransaction != null) { if (EnlistedTransaction.Equals(transaction)) @@ -719,14 +650,11 @@ public override void EnlistTransaction(Transaction? transaction) } CheckReady(); - var connector = StartBindingScope(ConnectorBindingScope.Transaction); + var connector = Connector!; EnlistedTransaction = transaction; if (transaction == null) - { - EndBindingScope(ConnectorBindingScope.Transaction); return; - } // Until #1378 is implemented, we have no recovery, and so no need to enlist as a durable resource manager // (or as promotable single phase). @@ -740,7 +668,7 @@ public override void EnlistTransaction(Transaction? transaction) EnlistedTransaction = transaction; LogMessages.EnlistedVolatileResourceManager( - Connector!.LoggingConfiguration.TransactionLogger, + connector.LoggingConfiguration.TransactionLogger, transaction.TransactionInformation.LocalIdentifier, connector.Id); } @@ -793,28 +721,12 @@ internal Task Close(bool async) throw new ArgumentOutOfRangeException("Unknown connection state: " + FullState); } - // TODO: The following shouldn't exist - we need to flow down the regular path to close any - // open reader / COPY. See test CloseDuringRead with multiplexing. - if (Settings.Multiplexing && ConnectorBindingScope == ConnectorBindingScope.None) - { - // TODO: Consider falling through to the regular reset logic. This adds some unneeded conditions - // and assignment but actual perf impact should be negligible (measure). - Debug.Assert(Connector == null); - ReleaseCloseLock(); - - FullState = ConnectionState.Closed; - LogMessages.ClosedMultiplexingConnection(_connectionLogger, Settings.Host!, Settings.Port, Settings.Database!, _userFacingConnectionString); - - return Task.CompletedTask; - } - return CloseAsync(async); } async Task CloseAsync(bool async) { Debug.Assert(Connector != null); - Debug.Assert(ConnectorBindingScope != ConnectorBindingScope.None); try { @@ -825,16 +737,6 @@ async Task CloseAsync(bool async) { // This method could re-enter connection.Close() due to an underlying connection failure. await connector.CloseOngoingOperations(async).ConfigureAwait(false); - - if (ConnectorBindingScope == ConnectorBindingScope.None) - { - Debug.Assert(Settings.Multiplexing); - Debug.Assert(Connector is null); - - FullState = ConnectionState.Closed; - LogMessages.ClosedMultiplexingConnection(_connectionLogger, Settings.Host!, Settings.Port, Settings.Database!, _userFacingConnectionString); - return; - } } Debug.Assert(connector.IsReady || connector.IsBroken, $"Connector is not ready or broken during close, it's {connector.State}"); @@ -859,7 +761,6 @@ async Task CloseAsync(bool async) if (Settings.Pooling) { // Clear the buffer, roll back any pending transaction and prepend a reset message if needed - // Also returns the connector to the pool, if there is an open transaction and multiplexing is on // Note that we're doing this only for pooled connections await connector.Reset(async).ConfigureAwait(false); } @@ -870,23 +771,12 @@ async Task CloseAsync(bool async) connector.Transaction?.UnbindIfNecessary(); } - if (Settings.Multiplexing) - { - // We've already closed ongoing operations rolled back any transaction and the connector is already in the pool, - // so we must be unbound. Nothing to do. - Debug.Assert(ConnectorBindingScope == ConnectorBindingScope.None, - $"When closing a multiplexed connection, the connection was supposed to be unbound, but {nameof(ConnectorBindingScope)} was {ConnectorBindingScope}"); - } - else - { - connector.Connection = null; - connector.Return(); - } + connector.Connection = null; + connector.Return(); } LogMessages.ClosedConnection(_connectionLogger, Settings.Host!, Settings.Port, Settings.Database!, _userFacingConnectionString, connector.Id); Connector = null; - ConnectorBindingScope = ConnectorBindingScope.None; FullState = ConnectionState.Closed; } finally @@ -984,22 +874,50 @@ internal void OnNotification(NpgsqlNotificationEventArgs e) /// /// Returns whether SSL is being used for the connection. /// - internal bool IsSslEncrypted => CheckOpenAndRunInTemporaryScope(c => c.IsSslEncrypted); + internal bool IsSslEncrypted + { + get + { + CheckOpen(); + return Connector!.IsSslEncrypted; + } + } /// /// Returns whether GSS encryption is being used for the connection. /// - internal bool IsGssEncrypted => CheckOpenAndRunInTemporaryScope(c => c.IsGssEncrypted); + internal bool IsGssEncrypted + { + get + { + CheckOpen(); + return Connector!.IsGssEncrypted; + } + } /// /// Returns whether SCRAM-SHA256 is being user for the connection /// - internal bool IsScram => CheckOpenAndRunInTemporaryScope(c => c.IsScram); + internal bool IsScram + { + get + { + CheckOpen(); + return Connector!.IsScram; + } + } /// /// Returns whether SCRAM-SHA256-PLUS is being user for the connection /// - internal bool IsScramPlus => CheckOpenAndRunInTemporaryScope(c => c.IsScramPlus); + internal bool IsScramPlus + { + get + { + CheckOpen(); + return Connector!.IsScramPlus; + } + } /// /// Selects the local Secure Sockets Layer (SSL) certificate used for authentication. @@ -1055,7 +973,14 @@ internal void OnNotification(NpgsqlNotificationEventArgs e) /// /// [Browsable(false)] - public Version PostgreSqlVersion => CheckOpenAndRunInTemporaryScope(c => c.DatabaseInfo.Version); + public Version PostgreSqlVersion + { + get + { + CheckOpen(); + return Connector!.DatabaseInfo.Version; + } + } /// /// The PostgreSQL server version as returned by the server_version option. @@ -1063,8 +988,14 @@ internal void OnNotification(NpgsqlNotificationEventArgs e) /// This can only be called when the connection is open. /// /// - public override string ServerVersion => CheckOpenAndRunInTemporaryScope( - c => c.DatabaseInfo.ServerVersion); + public override string ServerVersion + { + get + { + CheckOpen(); + return Connector!.DatabaseInfo.ServerVersion; + } + } /// /// Process id of backend server. @@ -1077,10 +1008,7 @@ public int ProcessID get { CheckOpen(); - - return TryGetBoundConnector(out var connector) - ? connector.BackendProcessId - : throw new InvalidOperationException("No bound physical connection (using multiplexing)"); + return Connector!.BackendProcessId; } } @@ -1090,13 +1018,27 @@ public int ProcessID /// Meant for use by type plugins (e.g. NodaTime) /// [Browsable(false)] - public bool HasIntegerDateTimes => CheckOpenAndRunInTemporaryScope(c => c.DatabaseInfo.HasIntegerDateTimes); + public bool HasIntegerDateTimes + { + get + { + CheckOpen(); + return Connector!.DatabaseInfo.HasIntegerDateTimes; + } + } /// /// The connection's timezone as reported by PostgreSQL, in the IANA/Olson database format. /// [Browsable(false)] - public string Timezone => CheckOpenAndRunInTemporaryScope(c => c.Timezone); + public string Timezone + { + get + { + CheckOpen(); + return Connector!.Timezone; + } + } /// /// Holds all PostgreSQL parameters received for this connection. Is updated if the values change @@ -1104,7 +1046,13 @@ public int ProcessID /// [Browsable(false)] public IReadOnlyDictionary PostgresParameters - => CheckOpenAndRunInTemporaryScope(c => c.PostgresParameters); + { + get + { + CheckOpen(); + return Connector!.PostgresParameters; + } + } #endregion Backend version, capabilities, settings @@ -1140,7 +1088,7 @@ async Task BeginBinaryImport(bool async, string copyFromCo throw new ArgumentException("Must contain a COPY FROM STDIN command!", nameof(copyFromCommand)); CheckReady(); - var connector = StartBindingScope(ConnectorBindingScope.Copy); + var connector = Connector!; LogMessages.StartingBinaryImport(connector.LoggingConfiguration.CopyLogger, connector.Id); // no point in passing a cancellationToken here, as we register the cancellation in the Init method @@ -1199,7 +1147,7 @@ async Task BeginBinaryExport(bool async, string copyToComm throw new ArgumentException("Must contain a COPY TO STDOUT command!", nameof(copyToCommand)); CheckReady(); - var connector = StartBindingScope(ConnectorBindingScope.Copy); + var connector = Connector!; LogMessages.StartingBinaryExport(connector.LoggingConfiguration.CopyLogger, connector.Id); // no point in passing a cancellationToken here, as we register the cancellation in the Init method @@ -1264,7 +1212,7 @@ async Task BeginTextImport(bool async, string copyFromComm throw new ArgumentException("Must contain a COPY FROM STDIN command!", nameof(copyFromCommand)); CheckReady(); - var connector = StartBindingScope(ConnectorBindingScope.Copy); + var connector = Connector!; LogMessages.StartingTextImport(connector.LoggingConfiguration.CopyLogger, connector.Id); // no point in passing a cancellationToken here, as we register the cancellation in the Init method @@ -1330,7 +1278,7 @@ async Task BeginTextExport(bool async, string copyToComman throw new ArgumentException("Must contain a COPY TO STDOUT command!", nameof(copyToCommand)); CheckReady(); - var connector = StartBindingScope(ConnectorBindingScope.Copy); + var connector = Connector!; LogMessages.StartingTextExport(connector.LoggingConfiguration.CopyLogger, connector.Id); // no point in passing a cancellationToken here, as we register the cancellation in the Init method @@ -1396,7 +1344,7 @@ async Task BeginRawBinaryCopy(bool async, string copyComman throw new ArgumentException("Must contain a COPY TO STDOUT OR COPY FROM STDIN command!", nameof(copyCommand)); CheckReady(); - var connector = StartBindingScope(ConnectorBindingScope.Copy); + var connector = Connector!; LogMessages.StartingRawCopy(connector.LoggingConfiguration.CopyLogger, connector.Id); // no point in passing a cancellationToken here, as we register the cancellation in the Init method @@ -1452,8 +1400,6 @@ public bool Wait(int timeout) { if (timeout != -1 && timeout < 0) throw new ArgumentException("Argument must be -1, 0 or positive", nameof(timeout)); - if (Settings.Multiplexing) - throw new NotSupportedException($"{nameof(Wait)} isn't supported in multiplexing mode"); CheckReady(); @@ -1495,9 +1441,6 @@ public bool Wait(int timeout) /// true if an asynchronous message was received, false if timed out. public Task WaitAsync(int timeout, CancellationToken cancellationToken = default) { - if (Settings.Multiplexing) - throw new NotSupportedException($"{nameof(Wait)} isn't supported in multiplexing mode"); - CheckReady(); LogMessages.StartingWait(_connectionLogger, timeout, Connector!.Id); @@ -1591,121 +1534,6 @@ internal void CheckReady() #endregion State checks - #region Connector binding - - /// - /// Checks whether the connection is currently bound to a connector, and if so, returns it via - /// . - /// - internal bool TryGetBoundConnector([NotNullWhen(true)] out NpgsqlConnector? connector) - { - if (ConnectorBindingScope == ConnectorBindingScope.None) - { - Debug.Assert(Connector == null, $"Binding scope is None but {Connector} exists"); - connector = null; - return false; - } - Debug.Assert(Connector != null, $"Binding scope is {ConnectorBindingScope} but {Connector} is null"); - Debug.Assert(Connector.Connection == this, $"Bound connector {Connector} does not reference this connection"); - connector = Connector; - return true; - } - - /// - /// Binds this connection to a physical connector. This happens when opening a non-multiplexing connection, - /// or when starting a transaction on a multiplexed connection. - /// - internal ValueTask StartBindingScope( - ConnectorBindingScope scope, NpgsqlTimeout timeout, bool async, CancellationToken cancellationToken) - { - // If the connection is around bound at a higher scope, we do nothing (e.g. copy operation started - // within a transaction on a multiplexing connection). - // Note that if we're in an ambient transaction, that means we're already bound and so we do nothing here. - if (ConnectorBindingScope != ConnectorBindingScope.None) - { - Debug.Assert(Connector != null, $"Connection bound with scope {ConnectorBindingScope} but has no connector"); - Debug.Assert(scope != ConnectorBindingScope, $"Binding scopes aren't reentrant ({ConnectorBindingScope})"); - return new ValueTask(Connector); - } - - return StartBindingScopeAsync(); - - async ValueTask StartBindingScopeAsync() - { - try - { - Debug.Assert(Settings.Multiplexing); - Debug.Assert(_dataSource != null); - - var connector = await _dataSource.Get(this, timeout, async, cancellationToken).ConfigureAwait(false); - Connector = connector; - connector.Connection = this; - ConnectorBindingScope = scope; - return connector; - } - catch - { - FullState = ConnectionState.Broken; - throw; - } - } - } - - internal NpgsqlConnector StartBindingScope(ConnectorBindingScope scope) - => StartBindingScope(scope, NpgsqlTimeout.Infinite, async: false, CancellationToken.None) - .GetAwaiter().GetResult(); - - internal EndScopeDisposable StartTemporaryBindingScope(out NpgsqlConnector connector) - { - connector = StartBindingScope(ConnectorBindingScope.Temporary); - return new EndScopeDisposable(this); - } - - internal async ValueTask<(EndScopeDisposable, NpgsqlConnector)> StartTemporaryBindingScopeAsync(CancellationToken cancellationToken) - { - var connector = await StartBindingScope(ConnectorBindingScope.Temporary, NpgsqlTimeout.Infinite, async: true, cancellationToken).ConfigureAwait(false); - return (new EndScopeDisposable(this), connector); - } - - internal T CheckOpenAndRunInTemporaryScope(Func func) - { - CheckOpen(); - - using var _ = StartTemporaryBindingScope(out var connector); - var result = func(connector); - return result; - } - - /// - /// Ends binding scope to the physical connection and returns it to the pool. Only useful with multiplexing on. - /// - /// - /// After this method is called, under no circumstances the physical connection (connector) should ever be used if multiplexing is on. - /// See #3249. - /// - internal void EndBindingScope(ConnectorBindingScope scope) - { - Debug.Assert(ConnectorBindingScope != ConnectorBindingScope.None || FullState == ConnectionState.Broken, - $"Ending binding scope {scope} but connection's scope is null"); - - if (scope != ConnectorBindingScope) - return; - - Debug.Assert(Connector != null, $"Ending binding scope {scope} but connector is null"); - Debug.Assert(_dataSource != null, $"Ending binding scope {scope} but _pool is null"); - Debug.Assert(Settings.Multiplexing, $"Ending binding scope {scope} but multiplexing is disabled"); - - // TODO: If enlisted transaction scope is still active, need to AddPendingEnlistedConnector, just like Close - var connector = Connector; - Connector = null; - connector.Connection = null; - connector.Transaction?.UnbindIfNecessary(); - connector.Return(); - ConnectorBindingScope = ConnectorBindingScope.None; - } - - #endregion Connector binding - #region Schema operations /// @@ -1897,9 +1725,6 @@ public override void ChangeDatabase(string dbName) /// public void UnprepareAll() { - if (Settings.Multiplexing) - throw new NotSupportedException("Explicit preparation not supported with multiplexing"); - CheckReady(); using (Connector!.StartUserAction()) @@ -1914,10 +1739,8 @@ public void ReloadTypes() { CheckReady(); - using var scope = StartTemporaryBindingScope(out var connector); - _dataSource!.Bootstrap( - connector, + Connector!, NpgsqlTimeout.Infinite, forceReload: true, async: false, @@ -1933,11 +1756,8 @@ public async Task ReloadTypesAsync(CancellationToken cancellationToken = default { CheckReady(); - var (scope, connector) = await StartTemporaryBindingScopeAsync(cancellationToken).ConfigureAwait(false); - using var _ = scope; - await _dataSource!.Bootstrap( - connector, + Connector!, NpgsqlTimeout.Infinite, forceReload: true, async: true, @@ -1963,46 +1783,6 @@ event EventHandler? IComponent.Disposed #endregion Misc } -enum ConnectorBindingScope -{ - /// - /// The connection is currently not bound to a connector. - /// - None, - - /// - /// The connection is bound to its connector for the scope of the entire connection - /// (i.e. non-multiplexed connection). - /// - Connection, - - /// - /// The connection is bound to its connector for the scope of a transaction. - /// - Transaction, - - /// - /// The connection is bound to its connector for the scope of a COPY operation. - /// - Copy, - - /// - /// The connection is bound to its connector for the scope of a single reader. - /// - Reader, - - /// - /// The connection is bound to its connector for an unspecified, temporary scope; the code that initiated - /// the binding is also responsible to unbind it. - /// - Temporary -} - -readonly struct EndScopeDisposable(NpgsqlConnection connection) : IDisposable -{ - public void Dispose() => connection.EndBindingScope(ConnectorBindingScope.Temporary); -} - #region Delegates /// diff --git a/src/Npgsql/NpgsqlConnectionStringBuilder.cs b/src/Npgsql/NpgsqlConnectionStringBuilder.cs index 9dd2696e51..9b79c9f064 100644 --- a/src/Npgsql/NpgsqlConnectionStringBuilder.cs +++ b/src/Npgsql/NpgsqlConnectionStringBuilder.cs @@ -1365,50 +1365,6 @@ public ArrayNullabilityMode ArrayNullabilityMode #endregion - #region Multiplexing - - /// - /// Enables multiplexing, which allows more efficient use of connections. - /// - [Category("Multiplexing")] - [Description("Enables multiplexing, which allows more efficient use of connections.")] - [DisplayName("Multiplexing")] - [NpgsqlConnectionStringProperty] - [DefaultValue(false)] - public bool Multiplexing - { - get => _multiplexing; - set - { - _multiplexing = value; - SetValue(nameof(Multiplexing), value); - } - } - bool _multiplexing; - - /// - /// When multiplexing is enabled, determines the maximum number of outgoing bytes to buffer before - /// flushing to the network. - /// - [Category("Multiplexing")] - [Description("When multiplexing is enabled, determines the maximum number of outgoing bytes to buffer before " + - "flushing to the network.")] - [DisplayName("Write Coalescing Buffer Threshold Bytes")] - [NpgsqlConnectionStringProperty] - [DefaultValue(1000)] - public int WriteCoalescingBufferThresholdBytes - { - get => _writeCoalescingBufferThresholdBytes; - set - { - _writeCoalescingBufferThresholdBytes = value; - SetValue(nameof(WriteCoalescingBufferThresholdBytes), value); - } - } - int _writeCoalescingBufferThresholdBytes; - - #endregion - #region Properties - Obsolete /// @@ -1500,8 +1456,6 @@ public int InternalCommandTimeout internal void PostProcessAndValidate() { ArgumentException.ThrowIfNullOrWhiteSpace(Host); - if (Multiplexing && !Pooling) - throw new ArgumentException("Pooling must be on to use multiplexing"); if (SslNegotiation == SslNegotiation.Direct && SslMode is not SslMode.Require and not SslMode.VerifyCA and not SslMode.VerifyFull) throw new ArgumentException("SSL Mode has to be Require or higher to be used with direct SSL Negotiation"); diff --git a/src/Npgsql/NpgsqlDataReader.cs b/src/Npgsql/NpgsqlDataReader.cs index 55753dc7f6..27bc6675c7 100644 --- a/src/Npgsql/NpgsqlDataReader.cs +++ b/src/Npgsql/NpgsqlDataReader.cs @@ -43,8 +43,7 @@ public sealed class NpgsqlDataReader : DbDataReader, IDbColumnSchemaGenerator CommandBehavior _behavior; /// - /// In multiplexing, this is as the sending is managed in the write multiplexing loop, - /// and does not need to be awaited by the reader. + /// The task for writing this command's messages. Awaited on reader cleanup. /// Task? _sendTask; @@ -1014,8 +1013,7 @@ protected override void Dispose(bool disposing) catch (Exception ex) { // In the case of a PostgresException (or multiple ones, if we have error barriers), the reader's state has already been set - // to Disposed in Close above; in multiplexing, we also unbind the connector (with its reader), and at that point it can be used - // by other consumers. Therefore, we only set the state fo Disposed if the exception *wasn't* a PostgresException. + // to Disposed in Close above. Therefore, we only set the state to Disposed if the exception *wasn't* a PostgresException. if (!(ex is PostgresException || ex is NpgsqlException { InnerException: AggregateException aggregateException } && AllPostgresExceptions(aggregateException.InnerExceptions))) @@ -1043,8 +1041,7 @@ public override async ValueTask DisposeAsync() catch (Exception ex) { // In the case of a PostgresException (or multiple ones, if we have error barriers), the reader's state has already been set - // to Disposed in Close above; in multiplexing, we also unbind the connector (with its reader), and at that point it can be used - // by other consumers. Therefore, we only set the state to Disposed if the exception *wasn't* a PostgresException. + // to Disposed in Close above. Therefore, we only set the state to Disposed if the exception *wasn't* a PostgresException. if (!(ex is PostgresException || ex is NpgsqlException { InnerException: AggregateException aggregateException } && AllPostgresExceptions(aggregateException.InnerExceptions))) @@ -1142,7 +1139,7 @@ internal async Task Cleanup(bool async, bool connectionClosing = false, bool isD { LogMessages.ReaderCleanup(_commandLogger, Connector.Id); - // If multiplexing isn't on, _sendTask contains the task for the writing of this command. + // _sendTask contains the task for the writing of this command. // Make sure that this task, which may have executed asynchronously and in parallel with the reading, // has completed, throwing any exceptions it generated. If we don't do this, there's the possibility of a race condition where the // user executes a new command after reader.Dispose() returns, but some additional write stuff is still finishing up from the last @@ -1189,27 +1186,10 @@ internal async Task Cleanup(bool async, bool connectionClosing = false, bool isD Connector.DataSource.MetricsReporter.ReportCommandStop(_startTimestamp); Connector.EndUserAction(); - // The reader shouldn't be unbound, if we're disposing - so the state is set prematurely if (isDisposing) State = ReaderState.Disposed; - if (_connection?.ConnectorBindingScope == ConnectorBindingScope.Reader) - { - UnbindIfNecessary(); - - // TODO: Refactor... Use proper scope - _connection.Connector = null; - Connector.Connection = null; - _connection.ConnectorBindingScope = ConnectorBindingScope.None; - - // If the reader is being closed as part of the connection closing, we don't apply - // the reader's CommandBehavior.CloseConnection - if (_behavior.HasFlag(CommandBehavior.CloseConnection) && !connectionClosing) - _connection.Close(); - - Connector.ReaderCompleted.SetResult(null); - } - else if (_behavior.HasFlag(CommandBehavior.CloseConnection) && !connectionClosing) + if (_behavior.HasFlag(CommandBehavior.CloseConnection) && !connectionClosing) { Debug.Assert(_connection is not null); _connection.Close(); diff --git a/src/Npgsql/NpgsqlEventId.cs b/src/Npgsql/NpgsqlEventId.cs index cf82ea063d..a0bf0bf30c 100644 --- a/src/Npgsql/NpgsqlEventId.cs +++ b/src/Npgsql/NpgsqlEventId.cs @@ -30,7 +30,7 @@ public static class NpgsqlEventId public const int CaughtUserExceptionInNoticeEventHandler = 1901; public const int CaughtUserExceptionInNotificationEventHandler = 1902; public const int ExceptionWhenClosingPhysicalConnection = 1903; - public const int ExceptionWhenOpeningConnectionForMultiplexing = 1904; + public const int ExceptionWhenOpeningConnectionForMultiplexing = 1904; // Multiplexing has been removed #endregion Connection @@ -48,7 +48,7 @@ public static class NpgsqlEventId public const int DerivingParameters = 2500; - public const int ExceptionWhenWritingMultiplexedCommands = 2600; + public const int ExceptionWhenWritingMultiplexedCommands = 2600; // Multiplexing has been removed #endregion Command diff --git a/src/Npgsql/NpgsqlEventSource.cs b/src/Npgsql/NpgsqlEventSource.cs index 00cfc1ed31..4122bbd8d5 100644 --- a/src/Npgsql/NpgsqlEventSource.cs +++ b/src/Npgsql/NpgsqlEventSource.cs @@ -31,9 +31,6 @@ sealed class NpgsqlEventSource : EventSource PollingCounter? _poolsCounter; - PollingCounter? _multiplexingAverageCommandsPerBatchCounter; - PollingCounter? _multiplexingAverageWriteTimePerBatchCounter; - long _bytesWritten; long _bytesRead; @@ -42,10 +39,6 @@ sealed class NpgsqlEventSource : EventSource long _currentCommands; long _failedCommands; - long _multiplexingBatchesSent; - long _multiplexingCommandsSent; - long _multiplexingTicksWritten; - internal NpgsqlEventSource() : base(EventSourceName) {} // NOTE @@ -99,39 +92,8 @@ internal void CommandFailed() internal bool TryTrackDataSource(string name, NpgsqlDataSource dataSource, [NotNullWhen(true)]out IDisposable? untrack) => DataSourceEvents.TryTrack(name, dataSource, out untrack); - internal void MultiplexingBatchSent(int numCommands, long elapsedTicks) - { - // TODO: CAS loop instead of 3 separate interlocked operations? - if (IsEnabled()) - { - Interlocked.Increment(ref _multiplexingBatchesSent); - Interlocked.Add(ref _multiplexingCommandsSent, numCommands); - Interlocked.Add(ref _multiplexingTicksWritten, elapsedTicks); - } - } - double GetDataSourceCount() => DataSourceEvents.GetDataSourceCount(); - double GetMultiplexingAverageCommandsPerBatch() - { - var batchesSent = Interlocked.Read(ref _multiplexingBatchesSent); - if (batchesSent == 0) - return -1; - - var commandsSent = (double)Interlocked.Read(ref _multiplexingCommandsSent); - return commandsSent / batchesSent; - } - - double GetMultiplexingAverageWriteTimePerBatch() - { - var batchesSent = Interlocked.Read(ref _multiplexingBatchesSent); - if (batchesSent == 0) - return -1; - - var ticksWritten = (double)Interlocked.Read(ref _multiplexingTicksWritten); - return ticksWritten / batchesSent / 1000; - } - protected override void OnEventCommand(EventCommandEventArgs command) { if (command.Command is EventCommand.Enable) @@ -189,17 +151,6 @@ protected override void OnEventCommand(EventCommandEventArgs command) DisplayName = "Connection Pools" }; - _multiplexingAverageCommandsPerBatchCounter = new PollingCounter("multiplexing-average-commands-per-batch", this, GetMultiplexingAverageCommandsPerBatch) - { - DisplayName = "Average commands per multiplexing batch" - }; - - _multiplexingAverageWriteTimePerBatchCounter = new PollingCounter("multiplexing-average-write-time-per-batch", this, GetMultiplexingAverageWriteTimePerBatch) - { - DisplayName = "Average write time per multiplexing batch", - DisplayUnits = "us" - }; - DataSourceEvents.EnableAll(); } } diff --git a/src/Npgsql/NpgsqlMultiHostDataSource.cs b/src/Npgsql/NpgsqlMultiHostDataSource.cs index 4ccc0809b5..4e7d63bddb 100644 --- a/src/Npgsql/NpgsqlMultiHostDataSource.cs +++ b/src/Npgsql/NpgsqlMultiHostDataSource.cs @@ -37,7 +37,6 @@ internal NpgsqlMultiHostDataSource(NpgsqlConnectionStringBuilder settings, Npgsq for (var i = 0; i < hosts.Length; i++) { var poolSettings = settings.Clone(); - Debug.Assert(!poolSettings.Multiplexing); var host = hosts[i].AsSpan().Trim(); if (NpgsqlConnectionStringBuilder.TrySplitHostPort(host, out var newHost, out var newPort)) { diff --git a/src/Npgsql/NpgsqlRawCopyStream.cs b/src/Npgsql/NpgsqlRawCopyStream.cs index bbc641ff66..981065b813 100644 --- a/src/Npgsql/NpgsqlRawCopyStream.cs +++ b/src/Npgsql/NpgsqlRawCopyStream.cs @@ -452,7 +452,6 @@ void Cleanup() LogMessages.CopyOperationCompleted(_copyLogger, _connector.Id); _connector.EndUserAction(); _connector.CurrentCopyOperation = null; - _connector.Connection?.EndBindingScope(ConnectorBindingScope.Copy); _connector = null; _readBuf = null; _writeBuf = null; diff --git a/src/Npgsql/NpgsqlSchema.cs b/src/Npgsql/NpgsqlSchema.cs index 409fe3b91e..aea2f6e925 100644 --- a/src/Npgsql/NpgsqlSchema.cs +++ b/src/Npgsql/NpgsqlSchema.cs @@ -758,7 +758,8 @@ static DataTable GetDataSourceInformation(NpgsqlConnection conn) static DataTable GetDataTypes(NpgsqlConnection conn) { - using var _ = conn.StartTemporaryBindingScope(out var connector); + conn.CheckReady(); + var connector = conn.Connector!; var table = new DataTable("DataTypes"); @@ -789,7 +790,6 @@ static DataTable GetDataTypes(NpgsqlConnection conn) // Npgsql-specific table.Columns.Add("OID", typeof(uint)); - // TODO: Support type name restriction try { diff --git a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs index 358ab30b95..ebe7fd9163 100644 --- a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs @@ -798,11 +798,9 @@ public NpgsqlDataSource Build() return new NpgsqlMultiHostDataSource(connectionStringBuilder, config); } - return ConnectionStringBuilder.Multiplexing - ? new MultiplexingDataSource(connectionStringBuilder, config) - : ConnectionStringBuilder.Pooling - ? new PoolingDataSource(connectionStringBuilder, config) - : new UnpooledDataSource(connectionStringBuilder, config); + return ConnectionStringBuilder.Pooling + ? new PoolingDataSource(connectionStringBuilder, config) + : new UnpooledDataSource(connectionStringBuilder, config); } /// @@ -903,8 +901,6 @@ _loggerFactory is null void ValidateMultiHost() { - if (ConnectionStringBuilder.Multiplexing) - throw new NotSupportedException("Multiplexing is not supported with multiple hosts"); if (ConnectionStringBuilder.ReplicationMode != ReplicationMode.Off) throw new NotSupportedException("Replication is not supported with multiple hosts"); } diff --git a/src/Npgsql/NpgsqlTransaction.cs b/src/Npgsql/NpgsqlTransaction.cs index f88efbefa9..14254bdccc 100644 --- a/src/Npgsql/NpgsqlTransaction.cs +++ b/src/Npgsql/NpgsqlTransaction.cs @@ -328,7 +328,6 @@ protected override void Dispose(bool disposing) } IsDisposed = true; - _connector?.Connection?.EndBindingScope(ConnectorBindingScope.Transaction); } } @@ -345,8 +344,8 @@ public override ValueTask DisposeAsync() } IsDisposed = true; - _connector?.Connection?.EndBindingScope(ConnectorBindingScope.Transaction); } + return default; async ValueTask DisposeAsyncInternal() @@ -364,7 +363,6 @@ async ValueTask DisposeAsyncInternal() } IsDisposed = true; - _connector?.Connection?.EndBindingScope(ConnectorBindingScope.Transaction); } } diff --git a/src/Npgsql/PoolingDataSource.cs b/src/Npgsql/PoolingDataSource.cs index 18ddc1e63f..4de3cfd928 100644 --- a/src/Npgsql/PoolingDataSource.cs +++ b/src/Npgsql/PoolingDataSource.cs @@ -80,8 +80,8 @@ internal PoolingDataSource( throw new ArgumentException($"Connection can't have 'Max Pool Size' {settings.MaxPoolSize} under 'Min Pool Size' {settings.MinPoolSize}"); // We enforce Max Pool Size, so no need to to create a bounded channel (which is less efficient) - // On the consuming side, we have the multiplexing write loop but also non-multiplexing Rents - // On the producing side, we have connections being released back into the pool (both multiplexing and not) + // On the consuming side, we have Rents + // On the producing side, we have connections being released back into the pool var idleChannel = Channel.CreateUnbounded(); _idleConnectorReader = idleChannel.Reader; IdleConnectorWriter = idleChannel.Writer; @@ -244,10 +244,6 @@ bool CheckIdleConnector([NotNullWhen(true)] NpgsqlConnector? connector) Debug.Assert(connector.State == ConnectorState.Ready, $"Got idle connector but {nameof(connector.State)} is {connector.State}"); - Debug.Assert(connector.CommandsInFlightCount == 0, - $"Got idle connector but {nameof(connector.CommandsInFlightCount)} is {connector.CommandsInFlightCount}"); - Debug.Assert(connector.MultiplexAsyncWritingLock == 0, - $"Got idle connector but {nameof(connector.MultiplexAsyncWritingLock)} is 1"); return true; } @@ -310,8 +306,6 @@ bool CheckIdleConnector([NotNullWhen(true)] NpgsqlConnector? connector) internal sealed override void Return(NpgsqlConnector connector) { Debug.Assert(!connector.InTransaction); - Debug.Assert(connector.MultiplexAsyncWritingLock == 0 || connector.IsBroken || connector.IsClosed, - $"About to return multiplexing connector to the pool, but {nameof(connector.MultiplexAsyncWritingLock)} is {connector.MultiplexAsyncWritingLock}"); // If Clear/ClearAll has been been called since this connector was first opened, // throw it away. The same if it's broken (in which case CloseConnector is only diff --git a/src/Npgsql/PublicAPI.Unshipped.txt b/src/Npgsql/PublicAPI.Unshipped.txt index 6694c16f4f..b52e2d68fb 100644 --- a/src/Npgsql/PublicAPI.Unshipped.txt +++ b/src/Npgsql/PublicAPI.Unshipped.txt @@ -1,4 +1,8 @@ #nullable enable +*REMOVED*Npgsql.NpgsqlConnectionStringBuilder.Multiplexing.get -> bool +*REMOVED*Npgsql.NpgsqlConnectionStringBuilder.Multiplexing.set -> void +*REMOVED*Npgsql.NpgsqlConnectionStringBuilder.WriteCoalescingBufferThresholdBytes.get -> int +*REMOVED*Npgsql.NpgsqlConnectionStringBuilder.WriteCoalescingBufferThresholdBytes.set -> void abstract Npgsql.NpgsqlDataSource.Clear() -> void Npgsql.GssEncryptionMode Npgsql.GssEncryptionMode.Disable = 0 -> Npgsql.GssEncryptionMode diff --git a/src/Npgsql/Replication/ReplicationConnection.cs b/src/Npgsql/Replication/ReplicationConnection.cs index 8583c31ce0..69f7a6d010 100644 --- a/src/Npgsql/Replication/ReplicationConnection.cs +++ b/src/Npgsql/Replication/ReplicationConnection.cs @@ -81,8 +81,8 @@ private protected ReplicationConnection(string? connectionString) : this() /// /// /// Since replication connections are a special kind of connection, - /// , , - /// and + /// , + /// and /// are always disabled no matter what you set them to in your connection string. /// [AllowNull] @@ -95,7 +95,6 @@ public string ConnectionString { { Pooling = false, Enlist = false, - Multiplexing = false, KeepAlive = 0, ReplicationMode = ReplicationMode }; diff --git a/src/Npgsql/Util/ManualResetValueTaskSource.cs b/src/Npgsql/Util/ManualResetValueTaskSource.cs deleted file mode 100644 index 55e45aa225..0000000000 --- a/src/Npgsql/Util/ManualResetValueTaskSource.cs +++ /dev/null @@ -1,21 +0,0 @@ -using System; -using System.Threading.Tasks.Sources; - -namespace Npgsql.Util; - -sealed class ManualResetValueTaskSource : IValueTaskSource, IValueTaskSource -{ - ManualResetValueTaskSourceCore _core; // mutable struct; do not make this readonly - - public bool RunContinuationsAsynchronously { get => _core.RunContinuationsAsynchronously; set => _core.RunContinuationsAsynchronously = value; } - public short Version => _core.Version; - public void Reset() => _core.Reset(); - public void SetResult(T result) => _core.SetResult(result); - public void SetException(Exception error) => _core.SetException(error); - - public T GetResult(short token) => _core.GetResult(token); - void IValueTaskSource.GetResult(short token) => _core.GetResult(token); - public ValueTaskSourceStatus GetStatus(short token) => _core.GetStatus(token); - public void OnCompleted(Action continuation, object? state, short token, ValueTaskSourceOnCompletedFlags flags) - => _core.OnCompleted(continuation, state, token, flags); -} \ No newline at end of file diff --git a/test/Npgsql.PluginTests/NodaTimeTests.cs b/test/Npgsql.PluginTests/NodaTimeTests.cs index 557e841b5e..bec0b46c9b 100644 --- a/test/Npgsql.PluginTests/NodaTimeTests.cs +++ b/test/Npgsql.PluginTests/NodaTimeTests.cs @@ -13,7 +13,7 @@ namespace Npgsql.PluginTests; -public class NodaTimeTests : MultiplexingTestBase, IDisposable +public class NodaTimeTests : TestBase, IDisposable { #region Timestamp without time zone @@ -760,8 +760,7 @@ public async Task Period_write_throw_on_overflow() protected override NpgsqlDataSource DataSource { get; } - public NodaTimeTests(MultiplexingMode multiplexingMode) - : base(multiplexingMode) + public NodaTimeTests() { var builder = CreateDataSourceBuilder(); builder.UseNodaTime(); diff --git a/test/Npgsql.Tests/AuthenticationTests.cs b/test/Npgsql.Tests/AuthenticationTests.cs index 1503d7f373..a3765d41ae 100644 --- a/test/Npgsql.Tests/AuthenticationTests.cs +++ b/test/Npgsql.Tests/AuthenticationTests.cs @@ -11,7 +11,7 @@ namespace Npgsql.Tests; -public class AuthenticationTests(MultiplexingMode multiplexingMode) : MultiplexingTestBase(multiplexingMode) +public class AuthenticationTests : TestBase { [Test] [NonParallelizable] // Sets environment variable @@ -419,8 +419,6 @@ public async Task ProvidePasswordCallback_is_used() conn.Open(); Assert.That(getPasswordDelegateWasCalled, "ProvidePasswordCallback delegate not used"); - // Do this again, since with multiplexing the very first connection attempt is done via - // the non-multiplexing path, to surface any exceptions. NpgsqlConnection.ClearPool(conn); conn.Close(); getPasswordDelegateWasCalled = false; @@ -445,8 +443,6 @@ public void ProvidePasswordCallback_is_not_used() { conn.Open(); - // Do this again, since with multiplexing the very first connection attempt is done via - // the non-multiplexing path, to surface any exceptions. NpgsqlConnection.ClearPool(conn); conn.Close(); conn.Open(); diff --git a/test/Npgsql.Tests/BatchTests.cs b/test/Npgsql.Tests/BatchTests.cs index d1df99faca..0a8daccac7 100644 --- a/test/Npgsql.Tests/BatchTests.cs +++ b/test/Npgsql.Tests/BatchTests.cs @@ -8,11 +8,9 @@ namespace Npgsql.Tests; -[TestFixture(MultiplexingMode.NonMultiplexing, CommandBehavior.Default)] -[TestFixture(MultiplexingMode.Multiplexing, CommandBehavior.Default)] -[TestFixture(MultiplexingMode.NonMultiplexing, CommandBehavior.SequentialAccess)] -[TestFixture(MultiplexingMode.Multiplexing, CommandBehavior.SequentialAccess)] -public class BatchTests : MultiplexingTestBase, IDisposable +[TestFixture(CommandBehavior.Default)] +[TestFixture(CommandBehavior.SequentialAccess)] +public class BatchTests : TestBase, IDisposable { #region Parameters @@ -825,7 +823,7 @@ public async Task Batch_dispose_reuse() NpgsqlDataSource? _dataSource; protected override NpgsqlDataSource DataSource => _dataSource ??= CreateDataSource(csb => csb.IncludeFailedBatchedCommand = true); - public BatchTests(MultiplexingMode multiplexingMode, CommandBehavior behavior) : base(multiplexingMode) + public BatchTests(CommandBehavior behavior) { Behavior = behavior; IsSequential = (Behavior & CommandBehavior.SequentialAccess) != 0; diff --git a/test/Npgsql.Tests/BugTests.cs b/test/Npgsql.Tests/BugTests.cs index b3cd644afd..5c0b77b1dd 100644 --- a/test/Npgsql.Tests/BugTests.cs +++ b/test/Npgsql.Tests/BugTests.cs @@ -1333,51 +1333,6 @@ public async Task Bug3924() } } - [Test] - [IssueLink("https://github.com/npgsql/npgsql/issues/4099")] - public async Task Bug4099() - { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) - { - Multiplexing = true, - MaxPoolSize = 1 - }; - await using var postmaster = PgPostmasterMock.Start(csb.ConnectionString); - await using var dataSource = CreateDataSource(postmaster.ConnectionString); - await using var firstConn = await dataSource.OpenConnectionAsync(); - await using var secondConn = await dataSource.OpenConnectionAsync(); - - var firstQuery = firstConn.ExecuteScalarAsync("SELECT data"); - - var server = await postmaster.WaitForServerConnection(); - await server.ExpectExtendedQuery(); - - var secondQuery = secondConn.ExecuteScalarAsync("SELECT other_data"); - await server.ExpectExtendedQuery(); - - var data = new byte[10000]; - await server - .WriteParseComplete() - .WriteBindComplete() - .WriteRowDescription(new FieldDescription(ByteaOid)) - .WriteDataRowWithFlush(data); - - var otherData = new byte[10]; - await server - .WriteCommandComplete() - .WriteReadyForQuery() - .WriteParseComplete() - .WriteBindComplete() - .WriteRowDescription(new FieldDescription(ByteaOid)) - .WriteDataRow(otherData) - .WriteCommandComplete() - .WriteReadyForQuery() - .FlushAsync(); - - Assert.That(data, Is.EquivalentTo((byte[])(await firstQuery)!)); - Assert.That(otherData, Is.EquivalentTo((byte[])(await secondQuery)!)); - } - [Test] [IssueLink("https://github.com/npgsql/npgsql/issues/4123")] public async Task Bug4123() diff --git a/test/Npgsql.Tests/CommandParameterTests.cs b/test/Npgsql.Tests/CommandParameterTests.cs index a0d88dccd6..adc5d311a5 100644 --- a/test/Npgsql.Tests/CommandParameterTests.cs +++ b/test/Npgsql.Tests/CommandParameterTests.cs @@ -6,7 +6,7 @@ namespace Npgsql.Tests; -public class CommandParameterTests(MultiplexingMode multiplexingMode) : MultiplexingTestBase(multiplexingMode) +public class CommandParameterTests : TestBase { [Test] [TestCase(CommandBehavior.Default)] @@ -30,9 +30,6 @@ public async Task Input_and_output_parameters(CommandBehavior behavior) [Test] public async Task Send_NpgsqlDbType_Unknown([Values(PrepareOrNot.NotPrepared, PrepareOrNot.Prepared)] PrepareOrNot prepare) { - if (prepare == PrepareOrNot.Prepared && IsMultiplexing) - return; - using var conn = await OpenConnectionAsync(); using var cmd = new NpgsqlCommand("SELECT @p::TIMESTAMP", conn); cmd.CommandText = "SELECT @p::TIMESTAMP"; diff --git a/test/Npgsql.Tests/CommandTests.cs b/test/Npgsql.Tests/CommandTests.cs index 3288cfe6af..7c6888c9a7 100644 --- a/test/Npgsql.Tests/CommandTests.cs +++ b/test/Npgsql.Tests/CommandTests.cs @@ -15,7 +15,7 @@ namespace Npgsql.Tests; -public class CommandTests(MultiplexingMode multiplexingMode) : MultiplexingTestBase(multiplexingMode) +public class CommandTests : TestBase { static uint Int4Oid => PostgresMinimalDatabaseInfo.DefaultTypeCatalog.GetOid(DataTypeNames.Int4).Value; static uint TextOid => PostgresMinimalDatabaseInfo.DefaultTypeCatalog.GetOid(DataTypeNames.Text).Value; @@ -41,7 +41,7 @@ public async Task Multiple_statements(bool[] queries) { await using var cmd = conn.CreateCommand(); cmd.CommandText = sql; - if (prepare && !IsMultiplexing) + if (prepare) await cmd.PrepareAsync(); await using var reader = await cmd.ExecuteReaderAsync(); var numResultSets = queries.Count(q => q); @@ -57,9 +57,6 @@ public async Task Multiple_statements(bool[] queries) [Test] public async Task Multiple_statements_with_parameters([Values(PrepareOrNot.NotPrepared, PrepareOrNot.Prepared)] PrepareOrNot prepare) { - if (prepare == PrepareOrNot.Prepared && IsMultiplexing) - return; - await using var conn = await OpenConnectionAsync(); await using var cmd = conn.CreateCommand(); cmd.CommandText = "SELECT @p1; SELECT @p2"; @@ -83,9 +80,6 @@ public async Task Multiple_statements_with_parameters([Values(PrepareOrNot.NotPr [Test] public async Task SingleRow_legacy_batching([Values(PrepareOrNot.NotPrepared, PrepareOrNot.Prepared)] PrepareOrNot prepare) { - if (prepare == PrepareOrNot.Prepared && IsMultiplexing) - return; - using var conn = await OpenConnectionAsync(); using var cmd = new NpgsqlCommand("SELECT 1; SELECT 2", conn); if (prepare == PrepareOrNot.Prepared) @@ -169,9 +163,6 @@ public async Task Named_parameters_are_not_supported_when_EnableSqlParsing_is_di [IssueLink("https://github.com/npgsql/npgsql/issues/327")] public async Task Timeout() { - if (IsMultiplexing) - return; // Multiplexing, Timeout - await using var dataSource = CreateDataSource(csb => csb.CommandTimeout = 1); await using var conn = await dataSource.OpenConnectionAsync(); await using var cmd = CreateSleepCommand(conn, 10); @@ -186,9 +177,6 @@ public async Task Timeout() [IssueLink("https://github.com/npgsql/npgsql/issues/607")] public async Task Timeout_async_soft() { - if (IsMultiplexing) - return; // Multiplexing, Timeout - await using var dataSource = CreateDataSource(csb => csb.CommandTimeout = 1); await using var conn = await dataSource.OpenConnectionAsync(); await using var cmd = CreateSleepCommand(conn, 10); @@ -203,9 +191,6 @@ public async Task Timeout_async_soft() [IssueLink("https://github.com/npgsql/npgsql/issues/607")] public async Task Timeout_async_hard() { - if (IsMultiplexing) - return; // Multiplexing, Timeout - var builder = new NpgsqlConnectionStringBuilder(ConnectionString) { CommandTimeout = 1 }; await using var postmasterMock = PgPostmasterMock.Start(builder.ConnectionString); await using var dataSource = CreateDataSource(postmasterMock.ConnectionString); @@ -266,9 +251,6 @@ public async Task Timeout_switch_connection() [Test] public async Task Prepare_timeout_hard([Values] SyncOrAsync async) { - if (IsMultiplexing) - return; // Multiplexing, Timeout - var builder = new NpgsqlConnectionStringBuilder(ConnectionString) { CommandTimeout = 1 }; await using var postmasterMock = PgPostmasterMock.Start(builder.ConnectionString); await using var dataSource = CreateDataSource(postmasterMock.ConnectionString); @@ -301,9 +283,6 @@ public async Task Prepare_timeout_hard([Values] SyncOrAsync async) [Test, Description("Basic cancellation scenario")] public async Task Cancel() { - if (IsMultiplexing) - return; - await using var conn = await OpenConnectionAsync(); await using var cmd = CreateSleepCommand(conn, 5); @@ -321,9 +300,6 @@ public async Task Cancel() [Test] public async Task Cancel_async_immediately() { - if (IsMultiplexing) - return; // Multiplexing, cancellation - await using var conn = await OpenConnectionAsync(); await using var cmd = conn.CreateCommand(); cmd.CommandText = "SELECT 1"; @@ -340,9 +316,6 @@ public async Task Cancel_async_immediately() [Test, Description("Cancels an async query with the cancellation token, with successful PG cancellation")] public async Task Cancel_async_soft() { - if (IsMultiplexing) - return; // Multiplexing, cancellation - await using var conn = await OpenConnectionAsync(); await using var cmd = CreateSleepCommand(conn); using var cancellationSource = new CancellationTokenSource(); @@ -362,9 +335,6 @@ public async Task Cancel_async_soft() [IssueLink("https://github.com/npgsql/npgsql/issues/5191")] public async Task Cancel_async_soft_with_prepended_query() { - if (IsMultiplexing) - return; // Multiplexing, cancellation - await using var postmasterMock = PgPostmasterMock.Start(ConnectionString); await using var dataSource = CreateDataSource(postmasterMock.ConnectionString); await using var conn = await dataSource.OpenConnectionAsync(); @@ -403,9 +373,6 @@ await server [Test, Description("Cancels an async query with the cancellation token, with unsuccessful PG cancellation (socket break)")] public async Task Cancel_async_hard() { - if (IsMultiplexing) - return; // Multiplexing, cancellation - await using var postmasterMock = PgPostmasterMock.Start(ConnectionString); await using var dataSource = CreateDataSource(postmasterMock.ConnectionString); await using var conn = await dataSource.OpenConnectionAsync(); @@ -431,9 +398,6 @@ public async Task Cancel_async_hard() [Ignore("https://github.com/npgsql/npgsql/issues/4668")] public async Task Bug3466([Values(false, true)] bool isBroken) { - if (IsMultiplexing) - return; // Multiplexing, cancellation - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) { Pooling = false @@ -589,9 +553,6 @@ public async Task CloseConnection_with_exception() [Test] public async Task SingleRow([Values(PrepareOrNot.NotPrepared, PrepareOrNot.Prepared)] PrepareOrNot prepare) { - if (prepare == PrepareOrNot.Prepared && IsMultiplexing) - return; - await using var conn = await OpenConnectionAsync(); await using var cmd = new NpgsqlCommand("SELECT 1, 2 UNION SELECT 3, 4", conn); if (prepare == PrepareOrNot.Prepared) @@ -684,17 +645,9 @@ public async Task Non_standards_conforming_strings() await using var dataSource = CreateDataSource(); await using var conn = await dataSource.OpenConnectionAsync(); - if (IsMultiplexing) - { - Assert.That(async () => await conn.ExecuteNonQueryAsync("set standard_conforming_strings=off"), - Throws.Exception.TypeOf()); - } - else - { - await conn.ExecuteNonQueryAsync("set standard_conforming_strings=off"); - Assert.That(await conn.ExecuteScalarAsync("SELECT 1"), Is.EqualTo(1)); - await conn.ExecuteNonQueryAsync("set standard_conforming_strings=on"); - } + await conn.ExecuteNonQueryAsync("set standard_conforming_strings=off"); + Assert.That(await conn.ExecuteScalarAsync("SELECT 1"), Is.EqualTo(1)); + await conn.ExecuteNonQueryAsync("set standard_conforming_strings=on"); } [Test] @@ -846,9 +799,6 @@ public async Task Bug1006158_output_parameters() [Test] public async Task Bug1010788_UpdateRowSource() { - if (IsMultiplexing) - return; - using var conn = await OpenConnectionAsync(); var table = await CreateTempTable(conn, "id SERIAL PRIMARY KEY, name TEXT"); @@ -891,9 +841,6 @@ public async Task Invalid_UTF8() [Test, IssueLink("https://github.com/npgsql/npgsql/issues/395")] public async Task Use_across_connection_change([Values(PrepareOrNot.Prepared, PrepareOrNot.NotPrepared)] PrepareOrNot prepare) { - if (prepare == PrepareOrNot.Prepared && IsMultiplexing) - return; - using var conn1 = await OpenConnectionAsync(); using var conn2 = await OpenConnectionAsync(); using var cmd = new NpgsqlCommand("SELECT 1", conn1); @@ -910,9 +857,6 @@ public async Task Use_across_connection_change([Values(PrepareOrNot.Prepared, Pr [Test] public async Task Use_after_reload_types_invalidates_cached_infos() { - if (IsMultiplexing) - return; - using var conn1 = await OpenConnectionAsync(); using var cmd = new NpgsqlCommand("SELECT 1", conn1); cmd.Prepare(); @@ -934,7 +878,7 @@ public async Task Use_after_reload_types_invalidates_cached_infos() [Test] public async Task Parameter_overflow_message_length_throws() { - // Create a separate dataSource because of Multiplexing (otherwise we can break unrelated queries) + // Create a separate data source to avoid breaking unrelated queries await using var dataSource = CreateDataSource(); await using var conn = await dataSource.OpenConnectionAsync(); await using var cmd = new NpgsqlCommand("SELECT @a, @b, @c, @d, @e, @f, @g, @h", conn); @@ -1000,7 +944,7 @@ record BigComposite [Test] public async Task Array_overflow_message_length_throws() { - // Create a separate dataSource because of Multiplexing (otherwise we can break unrelated queries) + // Create a separate data source to avoid breaking unrelated queries await using var dataSource = CreateDataSource(); await using var conn = await dataSource.OpenConnectionAsync(); @@ -1133,9 +1077,6 @@ public void Connection_not_open_throws() [Test] public async Task ExecuteNonQuery_Throws_PostgresException([Values] bool async) { - if (!async && IsMultiplexing) - return; - await using var conn = await OpenConnectionAsync(); var table1 = await CreateTempTable(conn, "id integer PRIMARY key, t varchar(40)"); @@ -1152,9 +1093,6 @@ public async Task ExecuteNonQuery_Throws_PostgresException([Values] bool async) [Test] public async Task ExecuteScalar_Throws_PostgresException([Values] bool async) { - if (!async && IsMultiplexing) - return; - await using var conn = await OpenConnectionAsync(); var table1 = await CreateTempTable(conn, "id integer PRIMARY key, t varchar(40)"); @@ -1171,9 +1109,6 @@ public async Task ExecuteScalar_Throws_PostgresException([Values] bool async) [Test] public async Task ExecuteReader_Throws_PostgresException([Values] bool async) { - if (!async && IsMultiplexing) - return; - await using var conn = await OpenConnectionAsync(); var table1 = await CreateTempTable(conn, "id integer PRIMARY key, t varchar(40)"); @@ -1241,9 +1176,6 @@ public void Command_recycled_resets_CommandType() [IssueLink("https://github.com/npgsql/npgsql/issues/2795")] public async Task Many_parameters([Values(PrepareOrNot.NotPrepared, PrepareOrNot.Prepared)] PrepareOrNot prepare) { - if (prepare == PrepareOrNot.Prepared && IsMultiplexing) - return; - using var conn = await OpenConnectionAsync(); var table = await CreateTempTable(conn, "some_column INT"); using var cmd = new NpgsqlCommand { Connection = conn }; @@ -1271,9 +1203,6 @@ public async Task Many_parameters([Values(PrepareOrNot.NotPrepared, PrepareOrNot [IssueLink("https://github.com/npgsql/npgsql/issues/2703")] public async Task Too_many_parameters_throws([Values(PrepareOrNot.NotPrepared, PrepareOrNot.Prepared)] PrepareOrNot prepare) { - if (prepare == PrepareOrNot.Prepared && IsMultiplexing) - return; - using var conn = await OpenConnectionAsync(); using var cmd = new NpgsqlCommand { Connection = conn }; var sb = new StringBuilder("SOME RANDOM SQL "); @@ -1358,9 +1287,6 @@ public async Task Batched_big_statements_do_not_deadlock() [Test] public void Batched_small_then_big_statements_do_not_deadlock_in_sync_io() { - if (IsMultiplexing) - return; // Multiplexing, sync I/O - // This makes sure we switch to async writing for batches, starting from the 2nd statement at the latest. // Otherwise, a small first first statement followed by a huge big one could cause us to deadlock, as we're stuck // synchronously sending the 2nd statement while PG is stuck sending the results of the 1st. @@ -1399,9 +1325,6 @@ public async Task Same_command_different_param_instances() [Test, IssueLink("https://github.com/npgsql/npgsql/issues/3509"), Ignore("Flaky")] public async Task Bug3509() { - if (IsMultiplexing) - return; - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) { KeepAlive = 1, @@ -1448,9 +1371,6 @@ public async Task Cached_command_double_dispose() [Test, IssueLink("https://github.com/npgsql/npgsql/issues/4330")] public async Task Prepare_with_positional_placeholders_after_named() { - if (IsMultiplexing) - return; // Explicit preparation - await using var conn = await OpenConnectionAsync(); await using var command = new NpgsqlCommand("SELECT @p", conn); @@ -1468,9 +1388,6 @@ public async Task Prepare_with_positional_placeholders_after_named() [Description("Most of 08* errors are coming whenever there was an error while connecting to a remote server from a cluster, so the connection to the cluster is still OK")] public async Task Postgres_connection_errors_not_break_connection() { - if (IsMultiplexing) - return; - await using var postmasterMock = PgPostmasterMock.Start(ConnectionString); await using var dataSource = CreateDataSource(postmasterMock.ConnectionString); await using var conn = await dataSource.OpenConnectionAsync(); @@ -1494,9 +1411,6 @@ await server [Description("Concurrent write and read failure can lead to deadlocks while cleaning up the connector.")] public async Task Concurrent_read_write_failure_deadlock() { - if (IsMultiplexing) - return; - await using var postmasterMock = PgPostmasterMock.Start(ConnectionString); await using var dataSource = CreateDataSource(postmasterMock.ConnectionString); await using var conn = await dataSource.OpenConnectionAsync(); @@ -1518,9 +1432,6 @@ public async Task Concurrent_read_write_failure_deadlock() [Explicit("Flaky due to #5033")] public async Task Not_cancel_prepended_query([Values] bool failPrependedQuery) { - if (IsMultiplexing) - return; - await using var postmasterMock = PgPostmasterMock.Start(ConnectionString); var csb = new NpgsqlConnectionStringBuilder(postmasterMock.ConnectionString) { @@ -1591,9 +1502,6 @@ await server [Test] public async Task Cancel_while_reading_from_long_running_query() { - if (IsMultiplexing) - return; - await using var conn = await OpenConnectionAsync(); await using var cmd = conn.CreateCommand(); @@ -1628,9 +1536,6 @@ SELECT generate_series(1, 1000000) AS "i" [Description("Make sure we do not lose unread messages after resetting oversize buffer")] public async Task Oversize_buffer_lost_messages() { - if (IsMultiplexing) - return; - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) { NoResetOnClose = true diff --git a/test/Npgsql.Tests/ConnectionTests.cs b/test/Npgsql.Tests/ConnectionTests.cs index f776e3bc4b..d33441488c 100644 --- a/test/Npgsql.Tests/ConnectionTests.cs +++ b/test/Npgsql.Tests/ConnectionTests.cs @@ -22,7 +22,7 @@ namespace Npgsql.Tests; -public class ConnectionTests(MultiplexingMode multiplexingMode) : MultiplexingTestBase(multiplexingMode) +public class ConnectionTests : TestBase { [Test, Description("Makes sure the connection goes through the proper state lifecycle")] public async Task Basic_lifecycle() @@ -72,9 +72,6 @@ public async Task Basic_lifecycle() [Test, Description("Makes sure the connection goes through the proper state lifecycle")] public async Task Broken_lifecycle([Values] bool openFromClose) { - if (IsMultiplexing) - return; - await using var dataSource = CreateDataSource(); await using var conn = dataSource.CreateConnection(); @@ -133,9 +130,6 @@ public async Task Broken_lifecycle([Values] bool openFromClose) [Test] public async Task Break_while_open() { - if (IsMultiplexing) - return; - await using var dataSource = CreateDataSource(); await using var conn = await dataSource.OpenConnectionAsync(); @@ -191,9 +185,6 @@ public async Task Connection_refused_async(bool pooled) [Test] public void Invalid_Username() { - if (IsMultiplexing) - Assert.Ignore(); - var connString = new NpgsqlConnectionStringBuilder(ConnectionString) { Username = "unknown", Pooling = false @@ -226,9 +217,6 @@ public void Mandatory_connection_string_params() [Test, Description("Reuses the same connection instance for a failed connection, then a successful one")] public async Task Fail_connect_then_succeed([Values] bool pooling) { - if (IsMultiplexing && !pooling) // Multiplexing doesn't work without pooling - return; - var dbName = GetUniqueIdentifier(nameof(Fail_connect_then_succeed)); await using var conn1 = await OpenConnectionAsync(); await conn1.ExecuteNonQueryAsync($"DROP DATABASE IF EXISTS \"{dbName}\""); @@ -581,10 +569,6 @@ public void DataSource_property() conn.ConnectionString = csb.ConnectionString; Assert.That(conn.DataSource, Is.EqualTo($"tcp://{csb.Host}:{csb.Port}")); - // Multiplexing isn't supported with multiple hosts - if (IsMultiplexing) - return; - csb.Host = "127.0.0.1, 127.0.0.2"; conn.ConnectionString = csb.ConnectionString; Assert.That(conn.DataSource, Is.EqualTo(string.Empty)); @@ -739,9 +723,6 @@ public void Set_connection_string_to_empty() [Parallelizable(ParallelScope.None)] public async Task Set_Schemas_And_Load_Relevant_Types(string testSchema, string otherSchema, bool enabled) { - if (IsMultiplexing) - return; - await using var conn1 = await OpenConnectionAsync(); try { @@ -824,9 +805,6 @@ public async Task No_database_defaults_to_username() [TestCase(true, TestName = nameof(Break_connector_in_pool) + "_with_keep_alive")] public async Task Break_connector_in_pool(bool keepAlive) { - if (IsMultiplexing) - Assert.Ignore("Multiplexing, hanging"); - var dataSourceBuilder = CreateDataSourceBuilder(); dataSourceBuilder.ConnectionStringBuilder.MaxPoolSize = 1; if (keepAlive) @@ -863,9 +841,6 @@ public async Task Break_connector_in_pool(bool keepAlive) [IssueLink("https://github.com/npgsql/npgsql/issues/4603")] public async Task Reload_types_keepalive_concurrent() { - if (IsMultiplexing) - Assert.Ignore("Multiplexing doesn't support keepalive"); - await using var dataSource = CreateDataSource(csb => csb.KeepAlive = 1); await using var conn = await dataSource.OpenConnectionAsync(); @@ -915,9 +890,6 @@ public void ChangeDatabase_connection_on_closed_connection_throws() [Test, Description("Tests closing a connector while a reader is open")] public async Task Close_during_read([Values(PooledOrNot.Pooled, PooledOrNot.Unpooled)] PooledOrNot pooled) { - if (IsMultiplexing && pooled == PooledOrNot.Unpooled) - return; // Multiplexing requires pooling - await using var dataSource = CreateDataSource(csb => csb.Pooling = pooled == PooledOrNot.Pooled); await using var conn = await dataSource.OpenConnectionAsync(); await using (var cmd = new NpgsqlCommand("SELECT 1", conn)) @@ -1037,8 +1009,6 @@ await conn.ExecuteNonQueryAsync($@" [Test, Description("Makes sure that concurrent use of the connection throws an exception")] public async Task Concurrent_use_throws() { - if (IsMultiplexing) - Assert.Ignore("Multiplexing: fails"); using var conn = await OpenConnectionAsync(); using (var cmd = new NpgsqlCommand("SELECT 1", conn)) using (await cmd.ExecuteReaderAsync()) @@ -1061,9 +1031,6 @@ public async Task Concurrent_use_throws() [IssueLink("https://github.com/npgsql/npgsql/issues/783")] public void PersistSecurityInfo_is_true([Values(true, false)] bool pooling) { - if (IsMultiplexing && !pooling) - return; - var connString = new NpgsqlConnectionStringBuilder(ConnectionString) { PersistSecurityInfo = true, @@ -1080,9 +1047,6 @@ public void PersistSecurityInfo_is_true([Values(true, false)] bool pooling) [IssueLink("https://github.com/npgsql/npgsql/issues/783")] public void No_password_without_PersistSecurityInfo([Values(true, false)] bool pooling) { - if (IsMultiplexing && !pooling) - return; - var connString = new NpgsqlConnectionStringBuilder(ConnectionString) { Pooling = pooling @@ -1212,8 +1176,6 @@ public async Task Clone_with_data_source() [Test] public async Task DatabaseInfo_is_shared() { - if (IsMultiplexing) - return; // Create a temp pool to make sure the second connection will be new and not idle await using var dataSource = CreateDataSource(); await using var conn1 = await dataSource.OpenConnectionAsync(); @@ -1262,9 +1224,6 @@ public async Task Many_open_close_with_transaction() [IssueLink("https://github.com/npgsql/npgsql/issues/736")] public async Task Rollback_on_close() { - if (IsMultiplexing) - Assert.Ignore(); - // Npgsql 3.0.0 to 3.0.4 prepended a rollback for the next time the connector is used, as an optimization. // This caused some issues (#927) and was removed. @@ -1290,10 +1249,6 @@ public async Task Rollback_on_close() [IssueLink("https://github.com/npgsql/npgsql/issues/777")] public async Task Exception_during_close() { - // Pooling must be on to use multiplexing - if (IsMultiplexing) - return; - await using var dataSource = CreateDataSource(csb => csb.Pooling = false); await using var conn = await dataSource.OpenConnectionAsync(); var connectorId = conn.ProcessID; @@ -1341,9 +1296,6 @@ public async Task NoTypeLoading() [Test, IssueLink("https://github.com/npgsql/npgsql/issues/1158")] public async Task Table_named_record() { - if (IsMultiplexing) - Assert.Ignore("Multiplexing, ReloadTypes"); - using var conn = await OpenConnectionAsync(); await conn.ExecuteNonQueryAsync(@" @@ -1422,9 +1374,6 @@ await adminConn.ExecuteNonQueryAsync( [Test] public async Task Oversize_buffer() { - if (IsMultiplexing) - return; - await using var dataSource = CreateDataSource(); await using var conn = await dataSource.OpenConnectionAsync(); var csb = new NpgsqlConnectionStringBuilder(ConnectionString); @@ -1480,9 +1429,6 @@ public async Task TcpKeepalive() [Test, IssueLink("https://github.com/npgsql/npgsql/issues/3511")] public async Task Keepalive_with_failed_transaction() { - if (IsMultiplexing) - return; - await using var dataSource = CreateDataSource(csb => csb.KeepAlive = 1); await using var conn = await dataSource.OpenConnectionAsync(); await using var tx = await conn.BeginTransactionAsync(); @@ -1502,9 +1448,6 @@ public async Task Keepalive_with_failed_transaction() [Test] public async Task Change_parameter() { - if (IsMultiplexing) - return; - using var conn = await OpenConnectionAsync(); var defaultApplicationName = conn.PostgresParameters["application_name"]; await conn.ExecuteNonQueryAsync("SET application_name = 'some_test_value'"); @@ -1547,7 +1490,7 @@ public async Task NoResetOnClose(bool noResetOnClose) await conn.CloseAsync(); await conn.OpenAsync(); Assert.That(await conn.ExecuteScalarAsync("SHOW application_name"), Is.EqualTo( - noResetOnClose || IsMultiplexing + noResetOnClose ? "modified" : originalApplicationName)); } @@ -1556,9 +1499,6 @@ public async Task NoResetOnClose(bool noResetOnClose) [Description("Test whether the internal NpgsqlConnection.Open method stays on the same thread with async=false")] public async Task Sync_open_blocked_same_thread() { - if (IsMultiplexing) - return; - await using var dataSource = CreateDataSource(csb => { csb.MaxPoolSize = 1; @@ -1598,9 +1538,6 @@ public async Task Sync_open_blocked_same_thread() [Test, IssueLink("https://github.com/npgsql/npgsql/issues/6427")] public async Task Gss_encryption_retry_does_not_clear_pool() { - if (IsMultiplexing) - return; - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) { GssEncryptionMode = GssEncryptionMode.Prefer @@ -1628,9 +1565,6 @@ public async Task Gss_encryption_retry_does_not_clear_pool() [Test] public async Task PhysicalConnectionInitializer_sync() { - if (IsMultiplexing) // Sync I/O - return; - await using var adminConn = await OpenConnectionAsync(); var table = await CreateTempTable(adminConn, "ID INTEGER"); @@ -1655,11 +1589,6 @@ public async Task PhysicalConnectionInitializer_sync() [Test] public async Task PhysicalConnectionInitializer_async() { - // With multiplexing the connector might become idle at undetermined point after the query is executed. - // Which is why we ignore it. - if (IsMultiplexing) - return; - await using var adminConn = await OpenConnectionAsync(); var table = await CreateTempTable(adminConn, "ID INTEGER"); @@ -1684,9 +1613,6 @@ public async Task PhysicalConnectionInitializer_async() [Test] public async Task PhysicalConnectionInitializer_sync_with_break() { - if (IsMultiplexing) // Sync I/O - return; - var dataSourceBuilder = CreateDataSourceBuilder(); dataSourceBuilder.UsePhysicalConnectionInitializer( conn => @@ -1727,9 +1653,7 @@ public async Task PhysicalConnectionInitializer_async_with_break() [Test] public async Task PhysicalConnectionInitializer_async_throws_on_second_open() { - // With multiplexing a physical connection might open on NpgsqlConnection.OpenAsync (if there was no completed bootstrap beforehand) - // or on NpgsqlCommand.ExecuteReaderAsync. - // We've already tested the first case in PhysicalConnectionInitializer_async_throws above, testing the second one below. + // We've already tested a simpler case in PhysicalConnectionInitializer_async_throws above, testing a second one below. var count = 0; var dataSourceBuilder = CreateDataSourceBuilder(); dataSourceBuilder.UsePhysicalConnectionInitializer( @@ -1745,18 +1669,10 @@ public async Task PhysicalConnectionInitializer_async_throws_on_second_open() await using var conn1 = dataSource.CreateConnection(); Assert.DoesNotThrowAsync(async () => await conn1.OpenAsync()); - // We start a transaction specifically for multiplexing (to bind a connector to the connection) await using var tx = await conn1.BeginTransactionAsync(); await using var conn2 = dataSource.CreateConnection(); - Exception exception; - if (IsMultiplexing) - { - await conn2.OpenAsync(); - exception = Assert.ThrowsAsync(async () => await conn2.BeginTransactionAsync())!; - } - else - exception = Assert.ThrowsAsync(async () => await conn2.OpenAsync())!; + var exception = Assert.ThrowsAsync(async () => await conn2.OpenAsync())!; Assert.That(exception.Message, Is.EqualTo("INTENTIONAL FAILURE")); } @@ -1922,9 +1838,6 @@ public void Auth_methods_are_trimmed() [IssueLink("https://github.com/npgsql/npgsql/issues/4425")] public async Task Breaking_connection_while_loading_database_info() { - if (IsMultiplexing) - return; - await using var dataSource = CreateDataSource(); await using var firstConn = dataSource.CreateConnection(); @@ -2011,12 +1924,9 @@ public async Task Log_Open_Close_pooled() AssertLoggingStateContains(closedConnectionEvent, "Port", port); AssertLoggingStateContains(closedConnectionEvent, "Database", database); - if (!IsMultiplexing) - { - AssertLoggingStateContains(openedConnectionEvent, "ConnectorId", processId); - AssertLoggingStateContains(closingConnectionEvent, "ConnectorId", processId); - AssertLoggingStateContains(closedConnectionEvent, "ConnectorId", processId); - } + AssertLoggingStateContains(openedConnectionEvent, "ConnectorId", processId); + AssertLoggingStateContains(closingConnectionEvent, "ConnectorId", processId); + AssertLoggingStateContains(closedConnectionEvent, "ConnectorId", processId); var ids = new[] { @@ -2033,9 +1943,6 @@ public async Task Log_Open_Close_pooled() [Test] public async Task Log_Open_Close_physical() { - if (IsMultiplexing) - return; - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) { Pooling = false }; await using var dataSource = CreateLoggingDataSource(out var listLoggerProvider, csb.ToString()); await using var conn = dataSource.CreateConnection(); diff --git a/test/Npgsql.Tests/CopyTests.cs b/test/Npgsql.Tests/CopyTests.cs index eb43420917..6d1421fe82 100644 --- a/test/Npgsql.Tests/CopyTests.cs +++ b/test/Npgsql.Tests/CopyTests.cs @@ -17,7 +17,7 @@ namespace Npgsql.Tests; -public class CopyTests(MultiplexingMode multiplexingMode) : MultiplexingTestBase(multiplexingMode) +public class CopyTests : TestBase { #region Issue 2257 @@ -227,8 +227,6 @@ public async Task Wrong_table_definition_raw_binary_copy() [Test, IssueLink("https://github.com/npgsql/npgsql/issues/2330")] public async Task Wrong_format_raw_binary_copy() { - if (IsMultiplexing) - Assert.Ignore("Multiplexing: fails"); using (var conn = await OpenConnectionAsync()) { var table = await CreateTempTable(conn, "blob BYTEA"); @@ -512,8 +510,6 @@ public async Task Wrong_table_definition_binary_import() [Test, IssueLink("https://github.com/npgsql/npgsql/issues/2330")] public async Task Wrong_format_binary_import() { - if (IsMultiplexing) - Assert.Ignore("Multiplexing: fails"); using var conn = await OpenConnectionAsync(); var table = await CreateTempTable(conn, "blob BYTEA"); Assert.Throws(() => conn.BeginBinaryImport($"COPY {table} (blob) FROM STDIN")); @@ -533,8 +529,6 @@ public async Task Wrong_table_definition_binary_export() [Test, IssueLink("https://github.com/npgsql/npgsql/issues/5457")] public async Task MixedOperations() { - if (IsMultiplexing) - Assert.Ignore("Multiplexing: fails"); using var conn = await OpenConnectionAsync(); using var reader = conn.BeginBinaryExport(""" @@ -558,8 +552,6 @@ public async Task MixedOperations() [Test] public async Task ReadMoreColumnsThanExist() { - if (IsMultiplexing) - Assert.Ignore("Multiplexing: fails"); using var conn = await OpenConnectionAsync(); using var reader = conn.BeginBinaryExport(""" @@ -585,8 +577,6 @@ public async Task ReadMoreColumnsThanExist() [Test] public async Task ReadZeroSizedColumns() { - if (IsMultiplexing) - Assert.Ignore("Multiplexing: fails"); using var conn = await OpenConnectionAsync(); using var reader = conn.BeginBinaryExport(""" @@ -617,8 +607,6 @@ public async Task ReadZeroSizedColumns() [Test] public async Task ReadConverterResolverType() { - if (IsMultiplexing) - Assert.Ignore("Multiplexing: fails"); using var conn = await OpenConnectionAsync(); using (var reader = conn.BeginBinaryExport(""" @@ -653,8 +641,6 @@ public async Task ReadConverterResolverType() [Test] public async Task StreamingRead() { - if (IsMultiplexing) - Assert.Ignore("Multiplexing: fails"); using var conn = await OpenConnectionAsync(); var str = new string('a', PgReader.MaxPreparedTextReaderSize + 1); @@ -668,8 +654,6 @@ public async Task StreamingRead() [Test, IssueLink("https://github.com/npgsql/npgsql/issues/2330")] public async Task Wrong_format_binary_export() { - if (IsMultiplexing) - Assert.Ignore("Multiplexing: fails"); using var conn = await OpenConnectionAsync(); var table = await CreateTempTable(conn, "blob BYTEA"); Assert.Throws(() => conn.BeginBinaryExport($"COPY {table} (blob) TO STDOUT")); @@ -679,9 +663,6 @@ public async Task Wrong_format_binary_export() [Test, NonParallelizable, IssueLink("https://github.com/npgsql/npgsql/issues/661")] public async Task Unexpected_exception_binary_import() { - if (IsMultiplexing) - return; - // Use a private data source since we terminate the connection below (affects database state) await using var dataSource = CreateDataSource(); await using var conn = await dataSource.OpenConnectionAsync(); @@ -1122,8 +1103,6 @@ await conn.ExecuteNonQueryAsync($@" [Test, IssueLink("https://github.com/npgsql/npgsql/issues/2330")] public async Task Wrong_table_definition_text_import() { - if (IsMultiplexing) - Assert.Ignore("Multiplexing: fails"); using var conn = await OpenConnectionAsync(); Assert.Throws(() => conn.BeginTextImport("COPY table_is_not_exist (blob) FROM STDIN")); Assert.That(conn.FullState, Is.EqualTo(ConnectionState.Open)); @@ -1133,8 +1112,6 @@ public async Task Wrong_table_definition_text_import() [Test, IssueLink("https://github.com/npgsql/npgsql/issues/2330")] public async Task Wrong_format_text_import() { - if (IsMultiplexing) - Assert.Ignore("Multiplexing: fails"); using var conn = await OpenConnectionAsync(); var table = await CreateTempTable(conn, "blob BYTEA"); Assert.Throws(() => conn.BeginTextImport($"COPY {table} (blob) FROM STDIN BINARY")); @@ -1144,8 +1121,6 @@ public async Task Wrong_format_text_import() [Test, IssueLink("https://github.com/npgsql/npgsql/issues/2330")] public async Task Wrong_table_definition_text_export() { - if (IsMultiplexing) - Assert.Ignore("Multiplexing: fails"); using var conn = await OpenConnectionAsync(); Assert.Throws(() => conn.BeginTextExport("COPY table_is_not_exist (blob) TO STDOUT")); Assert.That(conn.FullState, Is.EqualTo(ConnectionState.Open)); @@ -1155,8 +1130,6 @@ public async Task Wrong_table_definition_text_export() [Test, IssueLink("https://github.com/npgsql/npgsql/issues/2330")] public async Task Wrong_format_text_export() { - if (IsMultiplexing) - Assert.Ignore("Multiplexing: fails"); using var conn = await OpenConnectionAsync(); var table = await CreateTempTable(conn, "blob BYTEA"); Assert.Throws(() => conn.BeginTextExport($"COPY {table} (blob) TO STDOUT BINARY")); @@ -1290,7 +1263,7 @@ public async Task Write_different_types() Assert.That(await conn.ExecuteScalarAsync($"SELECT COUNT(*) FROM {table}"), Is.EqualTo(2)); } - [Test, Description("Tests nested binding scopes in multiplexing")] + [Test] public async Task Within_transaction() { using var conn = await OpenConnectionAsync(); @@ -1333,8 +1306,7 @@ public async Task Within_transaction() [Test, IssueLink("https://github.com/npgsql/npgsql/issues/4199")] public async Task Copy_from_is_not_supported_in_regular_command_execution() { - // Run in a separate pool to protect other queries in multiplexing - // because we're going to break the connection on CopyInResponse + // Run in a separate pool because we're going to break the connection on CopyInResponse await using var dataSource = CreateDataSource(); await using var conn = await dataSource.OpenConnectionAsync(); var table = await CreateTempTable(conn, "foo INT"); @@ -1345,8 +1317,7 @@ public async Task Copy_from_is_not_supported_in_regular_command_execution() [Test, IssueLink("https://github.com/npgsql/npgsql/issues/4974")] public async Task Copy_to_is_not_supported_in_regular_command_execution() { - // Run in a separate pool to protect other queries in multiplexing - // because we're going to break the connection on CopyInResponse + // Run in a separate pool because we're going to break the connection on CopyInResponse await using var dataSource = CreateDataSource(); await using var conn = await dataSource.OpenConnectionAsync(); var table = await CreateTempTable(conn, "foo INT"); diff --git a/test/Npgsql.Tests/DataSourceTests.cs b/test/Npgsql.Tests/DataSourceTests.cs index f7aa537dd9..c2ef7bc9cb 100644 --- a/test/Npgsql.Tests/DataSourceTests.cs +++ b/test/Npgsql.Tests/DataSourceTests.cs @@ -289,39 +289,15 @@ public async Task As_DbDataSource([Values] bool async) } [Test] - public async Task Executing_command_on_disposed_datasource([Values] bool multiplexing) + public async Task Executing_command_on_disposed_datasource() { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) - { - Multiplexing = multiplexing - }; - DbDataSource dataSource = NpgsqlDataSource.Create(csb.ConnectionString); + DbDataSource dataSource = NpgsqlDataSource.Create(ConnectionString); await using (var _ = await dataSource.OpenConnectionAsync()) {} await dataSource.DisposeAsync(); await using var command = dataSource.CreateCommand("SELECT 1"); Assert.ThrowsAsync(command.ExecuteNonQueryAsync); } - [Test, IssueLink("https://github.com/npgsql/npgsql/issues/4840")] - public async Task Multiplexing_connectionless_command_open_connection() - { - var csb = new NpgsqlConnectionStringBuilder(ConnectionString) - { - Multiplexing = true - }; - await using var dataSource = NpgsqlDataSource.Create(csb.ConnectionString); - - await using var conn = await dataSource.OpenConnectionAsync(); - await using var _ = await conn.BeginTransactionAsync(); - - await using var command = dataSource.CreateCommand(); - command.CommandText = "SELECT 1"; - - await using var reader = await command.ExecuteReaderAsync(); - Assert.That(reader.Read()); - Assert.That(reader.GetInt32(0), Is.EqualTo(1)); - } - [Test] public async Task Connection_string_builder_settings_are_frozen_on_Build() { diff --git a/test/Npgsql.Tests/LoggingTests.cs b/test/Npgsql.Tests/LoggingTests.cs index 76f13ab03c..0d5d0ee10d 100644 --- a/test/Npgsql.Tests/LoggingTests.cs +++ b/test/Npgsql.Tests/LoggingTests.cs @@ -8,7 +8,7 @@ namespace Npgsql.Tests; -public class LoggingTests(MultiplexingMode multiplexingMode) : MultiplexingTestBase(multiplexingMode) +public class LoggingTests : TestBase { [Test] public async Task Command_ExecuteScalar_single_statement_without_parameters() @@ -26,9 +26,7 @@ public async Task Command_ExecuteScalar_single_statement_without_parameters() Assert.That(executingCommandEvent.Message, Does.Contain("Command execution completed").And.Contains("SELECT 1")); AssertLoggingStateContains(executingCommandEvent, "CommandText", "SELECT 1"); AssertLoggingStateDoesNotContain(executingCommandEvent, "Parameters"); - - if (!IsMultiplexing) - AssertLoggingStateContains(executingCommandEvent, "ConnectorId", conn.ProcessID); + AssertLoggingStateContains(executingCommandEvent, "ConnectorId", conn.ProcessID); } [Test] @@ -51,9 +49,7 @@ public async Task Command_ExecuteScalar_single_statement_with_positional_paramet .And.Contains("Parameters: [8, NULL]")); AssertLoggingStateContains(executingCommandEvent, "CommandText", "SELECT $1, $2"); AssertLoggingStateContains(executingCommandEvent, "Parameters", new object[] { 8, "NULL" }); - - if (!IsMultiplexing) - AssertLoggingStateContains(executingCommandEvent, "ConnectorId", conn.ProcessID); + AssertLoggingStateContains(executingCommandEvent, "ConnectorId", conn.ProcessID); } [Test] @@ -80,9 +76,7 @@ public async Task Command_ExecuteScalar_single_statement__Should_unwrap_array_an .And.Contains("Parameters: [1024, [1, 2, 3], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, ...], [1, NULL], NULL, NULL]")); AssertLoggingStateContains(executingCommandEvent, "CommandText", "SELECT $1, $2, $3, $4, $5, $6"); AssertLoggingStateContains(executingCommandEvent, "Parameters", new object[] { 1024, "[1, 2, 3]", "[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, ...]", "[1, NULL]", "NULL", "NULL" }); - - if (!IsMultiplexing) - AssertLoggingStateContains(executingCommandEvent, "ConnectorId", conn.ProcessID); + AssertLoggingStateContains(executingCommandEvent, "ConnectorId", conn.ProcessID); } [Test] @@ -105,9 +99,7 @@ public async Task Command_ExecuteScalar_single_statement_with_named_parameters() .And.Contains("Parameters: [8, NULL]")); AssertLoggingStateContains(executingCommandEvent, "CommandText", "SELECT $1, $2"); AssertLoggingStateContains(executingCommandEvent, "Parameters", new object[] { 8, "NULL" }); - - if (!IsMultiplexing) - AssertLoggingStateContains(executingCommandEvent, "ConnectorId", conn.ProcessID); + AssertLoggingStateContains(executingCommandEvent, "ConnectorId", conn.ProcessID); } [Test] @@ -151,9 +143,7 @@ public async Task Command_ExecuteScalar_multiple_statement_without_parameters() Assert.That(batchCommands[1].CommandText, Is.EqualTo("SELECT 2")); Assert.That(batchCommands[1].Parameters, Is.Empty); AssertLoggingStateDoesNotContain(executingCommandEvent, "Parameters"); - - if (!IsMultiplexing) - AssertLoggingStateContains(executingCommandEvent, "ConnectorId", conn.ProcessID); + AssertLoggingStateContains(executingCommandEvent, "ConnectorId", conn.ProcessID); } [Test] @@ -179,9 +169,7 @@ public async Task Command_ExecuteScalar_multiple_statement_with_parameters() Assert.That(batchCommands[1].CommandText, Is.EqualTo("SELECT $1")); Assert.That(batchCommands[1].Parameters.First(), Is.EqualTo(9)); AssertLoggingStateDoesNotContain(executingCommandEvent, "Parameters"); - - if (!IsMultiplexing) - AssertLoggingStateContains(executingCommandEvent, "ConnectorId", conn.ProcessID); + AssertLoggingStateContains(executingCommandEvent, "ConnectorId", conn.ProcessID); } [Test] @@ -205,9 +193,7 @@ public async Task Command_ExecuteScalar_multiple_statement_with_parameter_loggin Assert.That(batchCommands[0], Is.EqualTo("SELECT $1")); Assert.That(batchCommands[1], Is.EqualTo("SELECT $1")); AssertLoggingStateDoesNotContain(executingCommandEvent, "Parameters"); - - if (!IsMultiplexing) - AssertLoggingStateContains(executingCommandEvent, "ConnectorId", conn.ProcessID); + AssertLoggingStateContains(executingCommandEvent, "ConnectorId", conn.ProcessID); } [Test] @@ -230,9 +216,7 @@ public async Task Batch_ExecuteScalar_single_statement_without_parameters() Assert.That(executingCommandEvent.Message, Does.Contain("Command execution completed").And.Contains("SELECT 1")); AssertLoggingStateContains(executingCommandEvent, "CommandText", "SELECT 1"); AssertLoggingStateDoesNotContain(executingCommandEvent, "Parameters"); - - if (!IsMultiplexing) - AssertLoggingStateContains(executingCommandEvent, "ConnectorId", conn.ProcessID); + AssertLoggingStateContains(executingCommandEvent, "ConnectorId", conn.ProcessID); } [Test] @@ -259,9 +243,7 @@ public async Task Batch_ExecuteScalar_multiple_statements_with_parameters() Assert.That(executingCommandEvent.Message, Does.Contain("Batch execution completed").And.Contains("[(SELECT $1, [8]), (SELECT $1, 9, [9])]")); AssertLoggingStateDoesNotContain(executingCommandEvent, "CommandText"); AssertLoggingStateDoesNotContain(executingCommandEvent, "Parameters"); - - if (!IsMultiplexing) - AssertLoggingStateContains(executingCommandEvent, "ConnectorId", conn.ProcessID); + AssertLoggingStateContains(executingCommandEvent, "ConnectorId", conn.ProcessID); var batchCommands = (IList<(string CommandText, IEnumerable Parameters)>)AssertLoggingStateContains(executingCommandEvent, "BatchCommands"); Assert.That(batchCommands.Count, Is.EqualTo(2)); diff --git a/test/Npgsql.Tests/PoolTests.cs b/test/Npgsql.Tests/PoolTests.cs index af0fc27096..4a3ecca261 100644 --- a/test/Npgsql.Tests/PoolTests.cs +++ b/test/Npgsql.Tests/PoolTests.cs @@ -468,29 +468,5 @@ await Task.WhenAll(Enumerable.Range(0, numParallelCommands) })); } - // When multiplexing, and the pool is totally saturated (at Max Pool Size and 0 idle connectors), we select - // the connector with the least commands in flight and execute on it. We must never select a connector with - // a pending transaction on it. - // TODO: Test not tested - [Test] - [Ignore("Multiplexing: fails")] - public async Task MultiplexedCommandDoesntGetExecutedOnTransactionedConnector() - { - await using var dataSource = CreateDataSource(csb => - { - csb.MaxPoolSize = 1; - csb.Timeout = 1; - }); - - await using var connWithTx = await dataSource.OpenConnectionAsync(); - await using var tx = await connWithTx.BeginTransactionAsync(); - // connWithTx should now be bound with the only physical connector available. - // Any commands execute should timeout - - await using var conn2 = await dataSource.OpenConnectionAsync(); - await using var cmd = new NpgsqlCommand("SELECT 1", conn2); - Assert.ThrowsAsync(() => cmd.ExecuteScalarAsync()); - } - #endregion } diff --git a/test/Npgsql.Tests/PrepareTests.cs b/test/Npgsql.Tests/PrepareTests.cs index fe01ef6fbc..1957d1091e 100644 --- a/test/Npgsql.Tests/PrepareTests.cs +++ b/test/Npgsql.Tests/PrepareTests.cs @@ -725,17 +725,6 @@ public void Prepare_multiple_commands_with_parameters() } } - [Test] - public void Multiplexing_not_supported() - { - using var dataSource = CreateDataSource(csb => csb.Multiplexing = true); - using var conn = dataSource.OpenConnection(); - using var cmd = new NpgsqlCommand("SELECT 1", conn); - - Assert.That(() => cmd.Prepare(), Throws.Exception.TypeOf()); - Assert.That(() => conn.UnprepareAll(), Throws.Exception.TypeOf()); - } - [Test] public async Task Explicitly_prepared_statement_invalidation([Values] bool prepareAfterError, [Values] bool unprepareAfterError) { diff --git a/test/Npgsql.Tests/ReaderNewSchemaTests.cs b/test/Npgsql.Tests/ReaderNewSchemaTests.cs index 2b28501aeb..3eafdc8a89 100644 --- a/test/Npgsql.Tests/ReaderNewSchemaTests.cs +++ b/test/Npgsql.Tests/ReaderNewSchemaTests.cs @@ -670,8 +670,6 @@ await conn.ExecuteNonQueryAsync($@" [Test, IssueLink("https://github.com/npgsql/npgsql/issues/1553")] public async Task Domain_type() { - // if (IsMultiplexing) - // Assert.Ignore("Multiplexing: ReloadTypes"); using var conn = await OpenConnectionAsync(); await IgnoreOnRedshift(conn, "Domain types not support on Redshift"); diff --git a/test/Npgsql.Tests/ReaderOldSchemaTests.cs b/test/Npgsql.Tests/ReaderOldSchemaTests.cs index d6adcdf88f..72a6401468 100644 --- a/test/Npgsql.Tests/ReaderOldSchemaTests.cs +++ b/test/Npgsql.Tests/ReaderOldSchemaTests.cs @@ -181,9 +181,6 @@ public async Task Precision_and_scale() [Test] public async Task SchemaOnly([Values(PrepareOrNot.NotPrepared, PrepareOrNot.Prepared)] PrepareOrNot prepare) { - // if (prepare == PrepareOrNot.Prepared && IsMultiplexing) - // return; - using var conn = await OpenConnectionAsync(); var table = await CreateTempTable(conn, "name TEXT"); diff --git a/test/Npgsql.Tests/ReaderTests.cs b/test/Npgsql.Tests/ReaderTests.cs index 0d27bc5d03..8a4484ef9b 100644 --- a/test/Npgsql.Tests/ReaderTests.cs +++ b/test/Npgsql.Tests/ReaderTests.cs @@ -20,11 +20,9 @@ namespace Npgsql.Tests; -[TestFixture(MultiplexingMode.NonMultiplexing, CommandBehavior.Default)] -[TestFixture(MultiplexingMode.Multiplexing, CommandBehavior.Default)] -[TestFixture(MultiplexingMode.NonMultiplexing, CommandBehavior.SequentialAccess)] -[TestFixture(MultiplexingMode.Multiplexing, CommandBehavior.SequentialAccess)] -public class ReaderTests : MultiplexingTestBase +[TestFixture(CommandBehavior.Default)] +[TestFixture(CommandBehavior.SequentialAccess)] +public class ReaderTests : TestBase { static uint Int4Oid => PostgresMinimalDatabaseInfo.DefaultTypeCatalog.GetOid(DataTypeNames.Int4).Value; static uint ByteaOid => PostgresMinimalDatabaseInfo.DefaultTypeCatalog.GetOid(DataTypeNames.Bytea).Value; @@ -312,9 +310,6 @@ public async Task GetFieldType_SchemaOnly() [Test] public async Task GetPostgresType() { - if (IsMultiplexing) - Assert.Ignore("Multiplexing: Fails"); - using var conn = await OpenConnectionAsync(); PostgresType intType; using (var cmd = new NpgsqlCommand(@"SELECT 1::INTEGER AS some_column", conn)) @@ -386,7 +381,6 @@ public async Task GetDataTypeName_enum() await using var conn = await dataSource.OpenConnectionAsync(); var typeName = await GetTempTypeName(conn); await conn.ExecuteNonQueryAsync($"CREATE TYPE {typeName} AS ENUM ('one')"); - await Task.Yield(); // TODO: fix multiplexing deadlock bug conn.ReloadTypes(); await using var cmd = new NpgsqlCommand($"SELECT 'one'::{typeName}", conn); await using var reader = await cmd.ExecuteReaderAsync(Behavior); @@ -401,7 +395,6 @@ public async Task GetDataTypeName_domain() await using var conn = await dataSource.OpenConnectionAsync(); var typeName = await GetTempTypeName(conn); await conn.ExecuteNonQueryAsync($"CREATE DOMAIN {typeName} AS VARCHAR(10)"); - await Task.Yield(); // TODO: fix multiplexing deadlock bug conn.ReloadTypes(); await using var cmd = new NpgsqlCommand($"SELECT 'one'::{typeName}", conn); await using var reader = await cmd.ExecuteReaderAsync(Behavior); @@ -535,7 +528,7 @@ public async Task Read_past_reader_end() [Test] public async Task Reader_dispose_state_does_not_leak() { - if (IsMultiplexing || Behavior != CommandBehavior.Default) + if (Behavior != CommandBehavior.Default) return; var startReaderClosedTcs = new TaskCompletionSource(TaskCreationOptions.RunContinuationsAsynchronously); @@ -582,9 +575,6 @@ public async Task SingleResult() [Test, IssueLink("https://github.com/npgsql/npgsql/issues/400")] public async Task Exception_thrown_from_ExecuteReaderAsync([Values(PrepareOrNot.Prepared, PrepareOrNot.NotPrepared)] PrepareOrNot prepare) { - if (prepare == PrepareOrNot.Prepared && IsMultiplexing) - return; - using var conn = await OpenConnectionAsync(); var function = await GetTempFunctionName(conn); @@ -603,9 +593,6 @@ await conn.ExecuteNonQueryAsync($@" [Test, IssueLink("https://github.com/npgsql/npgsql/issues/1032")] public async Task Exception_thrown_from_NextResult([Values(PrepareOrNot.Prepared, PrepareOrNot.NotPrepared)] PrepareOrNot prepare) { - if (prepare == PrepareOrNot.Prepared && IsMultiplexing) - return; - using var conn = await OpenConnectionAsync(); var function = await GetTempFunctionName(conn); @@ -767,8 +754,7 @@ public async Task Reader_is_still_open() { await using var conn = await OpenConnectionAsync(); // We might get the connection, on which the second command was already prepared, so prepare wouldn't start the UserAction - if (!IsMultiplexing) - conn.UnprepareAll(); + conn.UnprepareAll(); using var cmd1 = new NpgsqlCommand("SELECT 1", conn); await using var reader1 = await cmd1.ExecuteReaderAsync(Behavior); Assert.That(() => conn.ExecuteNonQuery("SELECT 1"), Throws.Exception.TypeOf()); @@ -776,16 +762,12 @@ public async Task Reader_is_still_open() using var cmd2 = new NpgsqlCommand("SELECT 2", conn); Assert.That(() => cmd2.ExecuteReader(Behavior), Throws.Exception.TypeOf()); - if (!IsMultiplexing) - Assert.That(() => cmd2.Prepare(), Throws.Exception.TypeOf()); + Assert.That(() => cmd2.Prepare(), Throws.Exception.TypeOf()); } [Test] public async Task Cleans_up_ok_with_dispose_calls([Values(PrepareOrNot.Prepared, PrepareOrNot.NotPrepared)] PrepareOrNot prepare) { - if (prepare == PrepareOrNot.Prepared && IsMultiplexing) - return; - using var conn = await OpenConnectionAsync(); using var command = new NpgsqlCommand("SELECT 1", conn); using var dr = await command.ExecuteReaderAsync(Behavior); @@ -828,9 +810,6 @@ public async Task Null() [IssueLink("https://github.com/npgsql/npgsql/issues/1898")] public async Task HasRows([Values(PrepareOrNot.NotPrepared, PrepareOrNot.Prepared)] PrepareOrNot prepare) { - if (prepare == PrepareOrNot.Prepared && IsMultiplexing) - return; - using var conn = await OpenConnectionAsync(); var table = await CreateTempTable(conn, "name TEXT"); @@ -1105,10 +1084,6 @@ public async Task Nullable_scalar() [Test, IssueLink("https://github.com/npgsql/npgsql/issues/2913")] public async Task Bug2913_reading_previous_query_messages() { - // No point in testing for multiplexing, as every query may use another connection - if (IsMultiplexing) - return; - var firstMrs = new ManualResetEventSlim(false); var secondMrs = new ManualResetEventSlim(false); @@ -1323,14 +1298,8 @@ public async Task Dispose_does_not_swallow_exceptions([Values(true, false)] bool await using var postmasterMock = PgPostmasterMock.Start(ConnectionString); await using var dataSource = CreateDataSource(postmasterMock.ConnectionString); await using var conn = await dataSource.OpenConnectionAsync(); - await using var tx = IsMultiplexing ? await conn.BeginTransactionAsync() : null; var pgMock = await postmasterMock.WaitForServerConnection(); - if (IsMultiplexing) - pgMock - .WriteEmptyQueryResponse() - .WriteReadyForQuery(TransactionStatus.InTransactionBlock); - // Write responses for the query, but break the connection before sending CommandComplete/ReadyForQuery await pgMock .WriteParseComplete() @@ -1820,9 +1789,6 @@ public async Task GetChars_when_null() [Test] public async Task Reader_is_reused() { - if (IsMultiplexing) - Assert.Ignore("Multiplexing: Fails"); - using var conn = await OpenConnectionAsync(); NpgsqlDataReader reader1; @@ -1885,9 +1851,6 @@ public async Task GetTextReader_in_middle_of_column_throws([Values] bool async) [Test, IssueLink("https://github.com/npgsql/npgsql/issues/5450")] public async Task EndRead_StreamActive([Values]bool async) { - if (IsMultiplexing) - return; - const int columnLength = 1; await using var conn = await OpenConnectionAsync(); @@ -1950,9 +1913,6 @@ public async Task Non_SafeReadException() [Test, Description("Cancels ReadAsync via the NpgsqlCommand.Cancel, with successful PG cancellation")] public async Task ReadAsync_cancel_command_soft() { - if (IsMultiplexing) - return; // Multiplexing, cancellation - await using var postmasterMock = PgPostmasterMock.Start(ConnectionString); await using var dataSource = CreateDataSource(postmasterMock.ConnectionString); await using var conn = await dataSource.OpenConnectionAsync(); @@ -1999,9 +1959,6 @@ await pgMock [Test, Description("Cancels ReadAsync via the cancellation token, with successful PG cancellation")] public async Task ReadAsync_cancel_soft() { - if (IsMultiplexing) - return; // Multiplexing, cancellation - await using var postmasterMock = PgPostmasterMock.Start(ConnectionString); await using var dataSource = CreateDataSource(postmasterMock.ConnectionString); await using var conn = await dataSource.OpenConnectionAsync(); @@ -2050,9 +2007,6 @@ await pgMock [Test, Description("Cancels NextResultAsync via the cancellation token, with successful PG cancellation")] public async Task NextResult_cancel_soft() { - if (IsMultiplexing) - return; // Multiplexing, cancellation - await using var postmasterMock = PgPostmasterMock.Start(ConnectionString); await using var dataSource = CreateDataSource(postmasterMock.ConnectionString); await using var conn = await dataSource.OpenConnectionAsync(); @@ -2102,9 +2056,6 @@ await pgMock [Test, Description("Cancels ReadAsync via the cancellation token, with unsuccessful PG cancellation (socket break)")] public async Task ReadAsync_cancel_hard([Values(true, false)] bool passCancelledToken) { - if (IsMultiplexing) - return; // Multiplexing, cancellation - await using var postmasterMock = PgPostmasterMock.Start(ConnectionString); await using var dataSource = CreateDataSource(postmasterMock.ConnectionString); await using var conn = await dataSource.OpenConnectionAsync(); @@ -2146,9 +2097,6 @@ await pgMock [Test, Description("Cancels NextResultAsync via the cancellation token, with unsuccessful PG cancellation (socket break)")] public async Task NextResultAsync_cancel_hard([Values(true, false)] bool passCancelledToken) { - if (IsMultiplexing) - return; // Multiplexing, cancellation - await using var postmasterMock = PgPostmasterMock.Start(ConnectionString); await using var dataSource = CreateDataSource(postmasterMock.ConnectionString); await using var conn = await dataSource.OpenConnectionAsync(); @@ -2191,9 +2139,6 @@ await pgMock [Test, Description("Cancels sequential ReadAsGetFieldValueAsync")] public async Task GetFieldValueAsync_sequential_cancel([Values(true, false)] bool passCancelledToken) { - if (IsMultiplexing) - return; // Multiplexing, cancellation - if (!IsSequential) return; @@ -2229,9 +2174,6 @@ await pgMock [Test, Description("Cancels sequential ReadAsGetFieldValueAsync")] public async Task IsDBNullAsync_sequential_cancel([Values(true, false)] bool passCancelledToken) { - if (IsMultiplexing) - return; // Multiplexing, cancellation - if (!IsSequential) return; @@ -2264,24 +2206,6 @@ await pgMock Assert.That(conn.FullState, Is.EqualTo(ConnectionState.Broken)); } - [Test, Description("Cancellation does not work with the multiplexing")] - public async Task Cancel_multiplexing_disabled() - { - if (!IsMultiplexing) - return; - - await using var dataSource = CreateDataSource(); - await using var conn = await dataSource.OpenConnectionAsync(); - await using var cmd = new NpgsqlCommand("SELECT generate_series(1, 100); SELECT generate_series(1, 100)", conn); - await using var reader = await cmd.ExecuteReaderAsync(Behavior); - var cancelledToken = new CancellationToken(canceled: true); - Assert.That(await reader.ReadAsync()); - while (await reader.ReadAsync(cancelledToken)) { } - Assert.That(await reader.NextResultAsync(cancelledToken)); - while (await reader.ReadAsync(cancelledToken)) { } - Assert.That(conn.Connector!.UserCancellationRequested, Is.False); - } - #endregion Cancellation #region Timeout @@ -2289,9 +2213,6 @@ public async Task Cancel_multiplexing_disabled() [Test, Description("Timeouts sequential ReadAsGetFieldValueAsync")] public async Task GetFieldValueAsync_sequential_timeout() { - if (IsMultiplexing) - return; // Multiplexing, cancellation - if (!IsSequential) return; @@ -2329,9 +2250,6 @@ await pgMock [Test, Description("Timeouts sequential IsDBNullAsync")] public async Task IsDBNullAsync_sequential_timeout() { - if (IsMultiplexing) - return; // Multiplexing, cancellation - if (!IsSequential) return; @@ -2369,9 +2287,6 @@ await pgMock [Test, IssueLink("https://github.com/npgsql/npgsql/issues/3446")] public async Task Bug3446() { - if (IsMultiplexing) - return; // Multiplexing, cancellation - await using var postmasterMock = PgPostmasterMock.Start(ConnectionString); await using var dataSource = CreateDataSource(postmasterMock.ConnectionString); await using var conn = await dataSource.OpenConnectionAsync(); @@ -2443,7 +2358,7 @@ await pgMock readonly CommandBehavior Behavior; // ReSharper restore InconsistentNaming - public ReaderTests(MultiplexingMode multiplexingMode, CommandBehavior behavior) : base(multiplexingMode) + public ReaderTests(CommandBehavior behavior) { Behavior = behavior; IsSequential = (Behavior & CommandBehavior.SequentialAccess) != 0; diff --git a/test/Npgsql.Tests/SecurityTests.cs b/test/Npgsql.Tests/SecurityTests.cs index c7a595c8b8..cb591b39eb 100644 --- a/test/Npgsql.Tests/SecurityTests.cs +++ b/test/Npgsql.Tests/SecurityTests.cs @@ -232,13 +232,8 @@ public void ScramPlus_channel_binding([Values] ChannelBinding channelBinding) } [Test] - public async Task Connect_with_only_ssl_allowed_user([Values] bool multiplexing, [Values] bool keepAlive) + public async Task Connect_with_only_ssl_allowed_user([Values] bool keepAlive) { - if (multiplexing && keepAlive) - { - Assert.Ignore("Multiplexing doesn't support keepalive"); - } - try { await using var dataSource = CreateDataSource(csb => @@ -246,7 +241,6 @@ public async Task Connect_with_only_ssl_allowed_user([Values] bool multiplexing, csb.SslMode = SslMode.Allow; csb.Username = "npgsql_tests_ssl"; csb.Password = "npgsql_tests_ssl"; - csb.Multiplexing = multiplexing; csb.KeepAlive = keepAlive ? 10 : 0; }); await using var conn = await dataSource.OpenConnectionAsync(); @@ -261,13 +255,8 @@ public async Task Connect_with_only_ssl_allowed_user([Values] bool multiplexing, [Test] [Platform(Exclude = "Win", Reason = "Postgresql doesn't close connection correctly on windows which might result in missing error message")] - public async Task Connect_with_only_non_ssl_allowed_user([Values] bool multiplexing, [Values] bool keepAlive) + public async Task Connect_with_only_non_ssl_allowed_user([Values] bool keepAlive) { - if (multiplexing && keepAlive) - { - Assert.Ignore("Multiplexing doesn't support keepalive"); - } - try { await using var dataSource = CreateDataSource(csb => @@ -275,7 +264,6 @@ public async Task Connect_with_only_non_ssl_allowed_user([Values] bool multiplex csb.SslMode = SslMode.Prefer; csb.Username = "npgsql_tests_nossl"; csb.Password = "npgsql_tests_nossl"; - csb.Multiplexing = multiplexing; csb.KeepAlive = keepAlive ? 10 : 0; }); await using var conn = await dataSource.OpenConnectionAsync(); diff --git a/test/Npgsql.Tests/Support/AssemblySetUp.cs b/test/Npgsql.Tests/Support/AssemblySetUp.cs index f1619ecec4..c7d16f0501 100644 --- a/test/Npgsql.Tests/Support/AssemblySetUp.cs +++ b/test/Npgsql.Tests/Support/AssemblySetUp.cs @@ -26,7 +26,6 @@ public void Setup() var builder = new NpgsqlConnectionStringBuilder(connString) { Pooling = false, - Multiplexing = false, Database = "postgres" }; diff --git a/test/Npgsql.Tests/Support/MultiplexingTestBase.cs b/test/Npgsql.Tests/Support/MultiplexingTestBase.cs deleted file mode 100644 index 892dd79f5e..0000000000 --- a/test/Npgsql.Tests/Support/MultiplexingTestBase.cs +++ /dev/null @@ -1,37 +0,0 @@ -using System.Collections.Concurrent; -using NUnit.Framework; - -namespace Npgsql.Tests; - -[TestFixture(MultiplexingMode.NonMultiplexing)] -[TestFixture(MultiplexingMode.Multiplexing)] -public abstract class MultiplexingTestBase : TestBase -{ - protected bool IsMultiplexing => MultiplexingMode == MultiplexingMode.Multiplexing; - - protected MultiplexingMode MultiplexingMode { get; } - - readonly ConcurrentDictionary<(string ConnString, bool IsMultiplexing), string> _connStringCache - = new(); - - public override string ConnectionString { get; } - - protected MultiplexingTestBase(MultiplexingMode multiplexingMode) - { - MultiplexingMode = multiplexingMode; - - // If the test requires multiplexing to be on or off, use a small cache to avoid reparsing and - // regenerating the connection string every time - ConnectionString = _connStringCache.GetOrAdd((base.ConnectionString, IsMultiplexing), - tup => new NpgsqlConnectionStringBuilder(tup.ConnString) - { - Multiplexing = tup.IsMultiplexing - }.ToString()); - } -} - -public enum MultiplexingMode -{ - NonMultiplexing, - Multiplexing -} diff --git a/test/Npgsql.Tests/Support/TestBase.cs b/test/Npgsql.Tests/Support/TestBase.cs index 5d690004b6..8fc0131889 100644 --- a/test/Npgsql.Tests/Support/TestBase.cs +++ b/test/Npgsql.Tests/Support/TestBase.cs @@ -551,8 +551,7 @@ async Task AssertTypeUnsupportedReadCore(string sqlLi dataSource ??= DataSource; await using var conn = await dataSource.OpenConnectionAsync(); - // Make sure we don't poison the connection with a fault, potentially terminating other perfectly passing tests as well. - await using var tx = dataSource.Settings.Multiplexing ? await conn.BeginTransactionAsync() : null; + await using var tx = await conn.BeginTransactionAsync(); await using var cmd = new NpgsqlCommand($"SELECT '{sqlLiteral}'::{dataTypeName}", conn); await using var reader = await cmd.ExecuteReaderAsync(); await reader.ReadAsync(); @@ -588,8 +587,7 @@ async Task AssertTypeUnsupportedWriteCore(T value, st dataSource ??= DataSource; await using var conn = await dataSource.OpenConnectionAsync(); - // Make sure we don't poison the connection with a fault, potentially terminating other perfectly passing tests as well. - await using var tx = dataSource.Settings.Multiplexing ? await conn.BeginTransactionAsync() : null; + await using var tx = await conn.BeginTransactionAsync(); await using var cmd = new NpgsqlCommand("SELECT $1", conn) { Parameters = { new() { Value = value } } @@ -768,7 +766,6 @@ async Task OpenConnectionInternal(bool hasLock) var builder = new NpgsqlConnectionStringBuilder(TestUtil.ConnectionString) { Pooling = false, - Multiplexing = false, Database = "postgres" }; diff --git a/test/Npgsql.Tests/TestUtil.cs b/test/Npgsql.Tests/TestUtil.cs index 85141c1cfa..0f83946ac7 100644 --- a/test/Npgsql.Tests/TestUtil.cs +++ b/test/Npgsql.Tests/TestUtil.cs @@ -19,7 +19,7 @@ public static class TestUtil /// test database. /// public const string DefaultConnectionString = - "Host=localhost;Username=npgsql_tests;Password=npgsql_tests;Database=npgsql_tests;Timeout=0;Command Timeout=0;SSL Mode=Disable;Multiplexing=False"; + "Host=localhost;Username=npgsql_tests;Password=npgsql_tests;Database=npgsql_tests;Timeout=0;Command Timeout=0;SSL Mode=Disable"; /// /// The connection string that will be used when opening the connection to the tests database. diff --git a/test/Npgsql.Tests/TracingTests.cs b/test/Npgsql.Tests/TracingTests.cs index f845861366..0d51e6a6f6 100644 --- a/test/Npgsql.Tests/TracingTests.cs +++ b/test/Npgsql.Tests/TracingTests.cs @@ -10,11 +10,9 @@ namespace Npgsql.Tests; [NonParallelizable] -[TestFixture(MultiplexingMode.NonMultiplexing, true)] -[TestFixture(MultiplexingMode.NonMultiplexing, false)] -[TestFixture(MultiplexingMode.Multiplexing, true)] -// Sync I/O not supported with multiplexing -public class TracingTests(MultiplexingMode multiplexingMode, bool async) : MultiplexingTestBase(multiplexingMode) +[TestFixture(true)] +[TestFixture(false)] +public class TracingTests(bool async) : TestBase { #region Physical open @@ -28,47 +26,24 @@ public async Task PhysicalOpen() : dataSource.OpenConnection(); Assert.That(activities, Has.Count.EqualTo(1)); - ValidateActivity(activities[0], connection, IsMultiplexing); - if (!IsMultiplexing) - return; - - activities.Clear(); - - // For multiplexing, we clear the pool to force next query to open another physical connection - dataSource.Clear(); - - await connection.ExecuteScalarAsync("SELECT 1"); - - Assert.That(activities, Has.Count.EqualTo(2)); - ValidateActivity(activities[0], connection, IsMultiplexing); - - // For multiplexing, query's activity can be considered as a parent for physical open's activity - Assert.That(activities[0].Parent, Is.SameAs(activities[1])); - - static void ValidateActivity(Activity activity, NpgsqlConnection conn, bool isMultiplexing) - { - Assert.That(activity.DisplayName, Is.EqualTo("CONNECT " + conn.Settings.Database)); - Assert.That(activity.OperationName, Is.EqualTo("CONNECT " + conn.Settings.Database)); - Assert.That(activity.Status, Is.EqualTo(ActivityStatusCode.Unset)); - - Assert.That(activity.Events.Count(), Is.EqualTo(0)); + var activity = activities[0]; + Assert.That(activity.DisplayName, Is.EqualTo("CONNECT " + connection.Settings.Database)); + Assert.That(activity.OperationName, Is.EqualTo("CONNECT " + connection.Settings.Database)); + Assert.That(activity.Status, Is.EqualTo(ActivityStatusCode.Unset)); - var tags = activity.TagObjects.ToDictionary(t => t.Key, t => t.Value); - Assert.That(tags, Has.Count.EqualTo(conn.Settings.Port == 5432 ? 5 : 6)); + Assert.That(activity.Events.Count(), Is.EqualTo(0)); - Assert.That(tags["db.system.name"], Is.EqualTo("postgresql")); - Assert.That(tags["db.namespace"], Is.EqualTo(conn.Settings.Database)); + var tags = activity.TagObjects.ToDictionary(t => t.Key, t => t.Value); + Assert.That(tags, Has.Count.EqualTo(connection.Settings.Port == 5432 ? 5 : 6)); - Assert.That(tags, Does.Not.ContainKey("db.query.text")); + Assert.That(tags["db.system.name"], Is.EqualTo("postgresql")); + Assert.That(tags["db.namespace"], Is.EqualTo(connection.Settings.Database)); - Assert.That(tags["db.npgsql.data_source"], Is.EqualTo(conn.ConnectionString)); + Assert.That(tags, Does.Not.ContainKey("db.query.text")); - if (isMultiplexing) - Assert.That(tags, Does.ContainKey("db.npgsql.connection_id")); - else - Assert.That(tags["db.npgsql.connection_id"], Is.EqualTo(conn.ProcessID)); - } + Assert.That(tags["db.npgsql.data_source"], Is.EqualTo(connection.ConnectionString)); + Assert.That(tags["db.npgsql.connection_id"], Is.EqualTo(connection.ProcessID)); } [Test] @@ -149,11 +124,7 @@ public async Task CommandExecute([Values] bool batch) Assert.That(tags["db.namespace"], Is.EqualTo(connection.Settings.Database)); Assert.That(tags["db.npgsql.data_source"], Is.EqualTo("TestTracingDataSource")); - - if (IsMultiplexing) - Assert.That(tags, Does.ContainKey("db.npgsql.connection_id")); - else - Assert.That(tags["db.npgsql.connection_id"], Is.EqualTo(connection.ProcessID)); + Assert.That(tags["db.npgsql.connection_id"], Is.EqualTo(connection.ProcessID)); } [Test] @@ -190,22 +161,12 @@ public async Task CommandExecute_error([Values] bool batch) Assert.That(activityTags["error.type"], Is.EqualTo(PostgresErrorCodes.UndefinedTable)); Assert.That(activityTags["db.npgsql.data_source"], Is.EqualTo(connection.ConnectionString)); - - if (IsMultiplexing) - Assert.That(activityTags, Does.ContainKey("db.npgsql.connection_id")); - else - Assert.That(activityTags["db.npgsql.connection_id"], Is.EqualTo(connection.ProcessID)); + Assert.That(activityTags["db.npgsql.connection_id"], Is.EqualTo(connection.ProcessID)); } [Test] public async Task CommandExecute_explicit_prepare([Values] bool batch) { - if (IsMultiplexing) - { - Assert.Ignore("Explicit prepare is not supported with multiplexing"); - return; - } - await using var dataSource = CreateDataSource(o => o.ConfigureTracing(o => o.EnablePhysicalOpenTracing(false))); await using var connection = await dataSource.OpenConnectionAsync(); @@ -333,10 +294,7 @@ public async Task BinaryImport() Assert.That(tags["db.npgsql.data_source"], Is.EqualTo(connection.ConnectionString)); Assert.That(tags["db.npgsql.rows"], Is.EqualTo(1)); - if (IsMultiplexing) - Assert.That(tags, Does.ContainKey("db.npgsql.connection_id")); - else - Assert.That(tags["db.npgsql.connection_id"], Is.EqualTo(connection.ProcessID)); + Assert.That(tags["db.npgsql.connection_id"], Is.EqualTo(connection.ProcessID)); } [Test] @@ -451,10 +409,7 @@ public async Task BinaryExport() Assert.That(tags["db.npgsql.data_source"], Is.EqualTo(connection.ConnectionString)); Assert.That(tags["db.npgsql.rows"], Is.EqualTo(1)); - if (IsMultiplexing) - Assert.That(tags, Does.ContainKey("db.npgsql.connection_id")); - else - Assert.That(tags["db.npgsql.connection_id"], Is.EqualTo(connection.ProcessID)); + Assert.That(tags["db.npgsql.connection_id"], Is.EqualTo(connection.ProcessID)); } [Test] @@ -558,10 +513,7 @@ public async Task RawBinaryExport() Assert.That(tags["db.npgsql.data_source"], Is.EqualTo(connection.ConnectionString)); - if (IsMultiplexing) - Assert.That(tags, Does.ContainKey("db.npgsql.connection_id")); - else - Assert.That(tags["db.npgsql.connection_id"], Is.EqualTo(connection.ProcessID)); + Assert.That(tags["db.npgsql.connection_id"], Is.EqualTo(connection.ProcessID)); Assert.That(tags, Does.Not.ContainKey("db.npgsql.rows")); } @@ -695,10 +647,7 @@ public async Task TextImport() Assert.That(tags["db.npgsql.data_source"], Is.EqualTo(connection.ConnectionString)); - if (IsMultiplexing) - Assert.That(tags, Does.ContainKey("db.npgsql.connection_id")); - else - Assert.That(tags["db.npgsql.connection_id"], Is.EqualTo(connection.ProcessID)); + Assert.That(tags["db.npgsql.connection_id"], Is.EqualTo(connection.ProcessID)); Assert.That(tags, Does.Not.ContainKey("db.npgsql.rows")); } @@ -743,10 +692,7 @@ public async Task TextExport() Assert.That(tags["db.npgsql.data_source"], Is.EqualTo(connection.ConnectionString)); - if (IsMultiplexing) - Assert.That(tags, Does.ContainKey("db.npgsql.connection_id")); - else - Assert.That(tags["db.npgsql.connection_id"], Is.EqualTo(connection.ProcessID)); + Assert.That(tags["db.npgsql.connection_id"], Is.EqualTo(connection.ProcessID)); Assert.That(tags, Does.Not.ContainKey("db.npgsql.rows")); } diff --git a/test/Npgsql.Tests/TransactionTests.cs b/test/Npgsql.Tests/TransactionTests.cs index 9836bac8f2..57e0cc523f 100644 --- a/test/Npgsql.Tests/TransactionTests.cs +++ b/test/Npgsql.Tests/TransactionTests.cs @@ -12,14 +12,11 @@ namespace Npgsql.Tests; -public class TransactionTests(MultiplexingMode multiplexingMode) : MultiplexingTestBase(multiplexingMode) +public class TransactionTests : TestBase { [Test, Description("Basic insert within a committed transaction")] public async Task Commit([Values(PrepareOrNot.NotPrepared, PrepareOrNot.Prepared)] PrepareOrNot prepare) { - if (prepare == PrepareOrNot.Prepared && IsMultiplexing) - return; - await using var conn = await OpenConnectionAsync(); var table = await CreateTempTable(conn, "name TEXT"); @@ -37,18 +34,12 @@ public async Task Commit([Values(PrepareOrNot.NotPrepared, PrepareOrNot.Prepared Assert.That(await conn.ExecuteScalarAsync($"SELECT COUNT(*) FROM {table}"), Is.EqualTo(1)); } - // With multiplexing we can't assume that disposed NpgsqlTransaction will throw ObjectDisposedException - // Because disposed NpgsqlTransaction might be reused by another thread - if (!IsMultiplexing) - Assert.That(() => tx.Connection, Throws.Exception.TypeOf()); + Assert.That(() => tx.Connection, Throws.Exception.TypeOf()); } [Test, Description("Basic insert within a committed transaction")] public async Task CommitAsync([Values(PrepareOrNot.NotPrepared, PrepareOrNot.Prepared)] PrepareOrNot prepare) { - if (prepare == PrepareOrNot.Prepared && IsMultiplexing) - return; - await using var conn = await OpenConnectionAsync(); var table = await CreateTempTable(conn, "name TEXT"); @@ -66,18 +57,12 @@ public async Task CommitAsync([Values(PrepareOrNot.NotPrepared, PrepareOrNot.Pre Assert.That(await conn.ExecuteScalarAsync($"SELECT COUNT(*) FROM {table}"), Is.EqualTo(1)); } - // With multiplexing we can't assume that disposed NpgsqlTransaction will throw ObjectDisposedException - // Because disposed NpgsqlTransaction might be reused by another thread - if (!IsMultiplexing) - Assert.That(() => tx.Connection, Throws.Exception.TypeOf()); + Assert.That(() => tx.Connection, Throws.Exception.TypeOf()); } [Test, Description("Basic insert within a rolled back transaction")] public async Task Rollback([Values(PrepareOrNot.NotPrepared, PrepareOrNot.Prepared)] PrepareOrNot prepare) { - if (prepare == PrepareOrNot.Prepared && IsMultiplexing) - return; - await using var conn = await OpenConnectionAsync(); var table = await CreateTempTable(conn, "name TEXT"); @@ -95,18 +80,12 @@ public async Task Rollback([Values(PrepareOrNot.NotPrepared, PrepareOrNot.Prepar Assert.That(await conn.ExecuteScalarAsync($"SELECT COUNT(*) FROM {table}"), Is.EqualTo(0)); } - // With multiplexing we can't assume that disposed NpgsqlTransaction will throw ObjectDisposedException - // Because disposed NpgsqlTransaction might be reused by another thread - if (!IsMultiplexing) - Assert.That(() => tx.Connection, Throws.Exception.TypeOf()); + Assert.That(() => tx.Connection, Throws.Exception.TypeOf()); } [Test, Description("Basic insert within a rolled back transaction")] public async Task RollbackAsync([Values(PrepareOrNot.NotPrepared, PrepareOrNot.Prepared)] PrepareOrNot prepare) { - if (prepare == PrepareOrNot.Prepared && IsMultiplexing) - return; - await using var conn = await OpenConnectionAsync(); var table = await CreateTempTable(conn, "name TEXT"); @@ -124,10 +103,7 @@ public async Task RollbackAsync([Values(PrepareOrNot.NotPrepared, PrepareOrNot.P Assert.That(await conn.ExecuteScalarAsync($"SELECT COUNT(*) FROM {table}"), Is.EqualTo(0)); } - // With multiplexing we can't assume that disposed NpgsqlTransaction will throw ObjectDisposedException - // Because disposed NpgsqlTransaction might be reused by another thread - if (!IsMultiplexing) - Assert.That(() => tx.Connection, Throws.Exception.TypeOf()); + Assert.That(() => tx.Connection, Throws.Exception.TypeOf()); } [Test, Description("Dispose a transaction in progress, should roll back")] @@ -249,21 +225,12 @@ public async Task Default_IsolationLevel() tx.Rollback(); } - [Test, Description("Makes sure that transactions started in SQL work, except in multiplexing")] + [Test, Description("Makes sure that transactions started in SQL work")] public async Task Via_sql() { - if (IsMultiplexing) - Assert.Ignore("Multiplexing: not implemented"); - await using var conn = await OpenConnectionAsync(); var table = await CreateTempTable(conn, "name TEXT"); - if (IsMultiplexing) - { - Assert.That(async () => await conn.ExecuteNonQueryAsync("BEGIN"), Throws.Exception.TypeOf()); - return; - } - await conn.ExecuteNonQueryAsync("BEGIN"); await conn.ExecuteNonQueryAsync($"INSERT INTO {table} (name) VALUES ('X')"); await conn.ExecuteNonQueryAsync("ROLLBACK"); @@ -356,9 +323,6 @@ public async Task Failed_transaction_on_close_with_custom_timeout() [Test, IssueLink("https://github.com/npgsql/npgsql/issues/555")] public async Task Transaction_on_recycled_connection() { - if (IsMultiplexing) - Assert.Ignore("Multiplexing: fails"); - // Use application name to make sure we have our very own private connection pool await using var conn = new NpgsqlConnection(ConnectionString + $";Application Name={GetUniqueIdentifier(nameof(Transaction_on_recycled_connection))}"); conn.Open(); @@ -507,12 +471,10 @@ public async Task IsCompleted_rollback_failed() public async Task Transaction_not_supported() { // TODO: rewrite to DataSource - if (IsMultiplexing) - Assert.Ignore("Need to rethink/redo dummy transaction mode"); var connString = new NpgsqlConnectionStringBuilder(ConnectionString) { - ApplicationName = nameof(Transaction_not_supported) + IsMultiplexing + ApplicationName = nameof(Transaction_not_supported) }.ToString(); NpgsqlDatabaseInfo.RegisterFactory(new NoTransactionDatabaseInfoFactory()); @@ -560,19 +522,6 @@ public async Task Transaction_not_supported() // More at #3254 public async Task Bug3248_Dispose_transaction_Rollback() { - if (!IsMultiplexing) - return; - - using var conn = await OpenConnectionAsync(); - await using (var tx = await conn.BeginTransactionAsync()) - { - Assert.That(conn.Connector, Is.Not.Null); - Assert.That(async () => await conn.ExecuteScalarAsync("SELECT * FROM \"unknown_table\"", tx: tx), - Throws.Exception.TypeOf()); - Assert.That(conn.Connector, Is.Not.Null); - } - - Assert.That(conn.Connector, Is.Null); } [Test] @@ -580,18 +529,6 @@ public async Task Bug3248_Dispose_transaction_Rollback() // More at #3254 public async Task Bug3248_Dispose_connection_Rollback() { - if (!IsMultiplexing) - return; - - var conn = await OpenConnectionAsync(); - var tx = conn.BeginTransaction(); - Assert.That(conn.Connector, Is.Not.Null); - Assert.That(async () => await conn.ExecuteScalarAsync("SELECT * FROM \"unknown_table\"", tx: tx), - Throws.Exception.TypeOf()); - Assert.That(conn.Connector, Is.Not.Null); - - await conn.DisposeAsync(); - Assert.That(conn.Connector, Is.Null); } [Test] @@ -696,9 +633,6 @@ public async Task Unbound_transaction_reuse() [Test, IssueLink("https://github.com/npgsql/npgsql/issues/3686")] public async Task Bug3686() { - if (IsMultiplexing) - return; - await using var dataSource = CreateDataSource(csb => csb.Pooling = false); await using var conn = await dataSource.OpenConnectionAsync(); await using var tx = await conn.BeginTransactionAsync(); diff --git a/test/Npgsql.Tests/Types/ArrayTests.cs b/test/Npgsql.Tests/Types/ArrayTests.cs index 82209de20f..cf6f27e038 100644 --- a/test/Npgsql.Tests/Types/ArrayTests.cs +++ b/test/Npgsql.Tests/Types/ArrayTests.cs @@ -21,7 +21,7 @@ namespace Npgsql.Tests.Types; /// /// https://www.postgresql.org/docs/current/static/arrays.html /// -public class ArrayTests(MultiplexingMode multiplexingMode) : MultiplexingTestBase(multiplexingMode) +public class ArrayTests : TestBase { static readonly TestCaseData[] ArrayTestCases = [ @@ -421,9 +421,6 @@ public async Task Jagged_arrays_not_supported() [Test, Description("Roundtrips one-dimensional and two-dimensional arrays of a PostgreSQL domain.")] public async Task Array_of_domain() { - if (IsMultiplexing) - Assert.Ignore("Multiplexing, ReloadTypes"); - await using var conn = await OpenConnectionAsync(); MinimumPgVersion(conn, "11.0", "Arrays of domains were introduced in PostgreSQL 11"); await conn.ExecuteNonQueryAsync("CREATE DOMAIN pg_temp.posint AS integer CHECK (VALUE > 0);"); @@ -453,9 +450,6 @@ public async Task Array_of_domain() [Test, Description("Roundtrips a PostgreSQL domain over a one-dimensional and a two-dimensional array.")] public async Task Domain_of_array() { - if (IsMultiplexing) - Assert.Ignore("Multiplexing, ReloadTypes"); - await using var conn = await OpenConnectionAsync(); MinimumPgVersion(conn, "11.0", "Domains over arrays were introduced in PostgreSQL 11"); await conn.ExecuteNonQueryAsync( diff --git a/test/Npgsql.Tests/Types/BitStringTests.cs b/test/Npgsql.Tests/Types/BitStringTests.cs index 7e64238b69..e41ae0cc8f 100644 --- a/test/Npgsql.Tests/Types/BitStringTests.cs +++ b/test/Npgsql.Tests/Types/BitStringTests.cs @@ -14,7 +14,7 @@ namespace Npgsql.Tests.Types; /// /// https://www.postgresql.org/docs/current/static/datatype-bit.html /// -public class BitStringTests(MultiplexingMode multiplexingMode) : MultiplexingTestBase(multiplexingMode) +public class BitStringTests : TestBase { [Test] [TestCase("10110110", TestName = "BitArray")] diff --git a/test/Npgsql.Tests/Types/ByteaTests.cs b/test/Npgsql.Tests/Types/ByteaTests.cs index 9bff02c273..d87ed48216 100644 --- a/test/Npgsql.Tests/Types/ByteaTests.cs +++ b/test/Npgsql.Tests/Types/ByteaTests.cs @@ -15,7 +15,7 @@ namespace Npgsql.Tests.Types; /// /// https://www.postgresql.org/docs/current/static/datatype-binary.html /// -public class ByteaTests(MultiplexingMode multiplexingMode) : MultiplexingTestBase(multiplexingMode) +public class ByteaTests : TestBase { [Test] [TestCase(new byte[] { 1, 2, 3, 4, 5 }, "\\x0102030405", TestName = "Bytea")] diff --git a/test/Npgsql.Tests/Types/CompositeTests.cs b/test/Npgsql.Tests/Types/CompositeTests.cs index 8c8c336478..d9c1db253d 100644 --- a/test/Npgsql.Tests/Types/CompositeTests.cs +++ b/test/Npgsql.Tests/Types/CompositeTests.cs @@ -10,7 +10,7 @@ namespace Npgsql.Tests.Types; -public class CompositeTests(MultiplexingMode multiplexingMode) : MultiplexingTestBase(multiplexingMode) +public class CompositeTests : TestBase { [Test] public async Task Basic() @@ -449,8 +449,6 @@ public async Task Table_as_composite([Values] bool enabled) else { Assert.ThrowsAsync(DoAssertion); - // Start a transaction specifically for multiplexing (to bind a connector to the connection) - await using var tx = await connection.BeginTransactionAsync(); Assert.That(connection.Connector!.DatabaseInfo.CompositeTypes.SingleOrDefault(c => c.Name.Contains(table)), Is.Null); Assert.That(connection.Connector!.DatabaseInfo.ArrayTypes.SingleOrDefault(c => c.Name.Contains(table)), Is.Null); @@ -530,8 +528,7 @@ await AssertType( [Test] public async Task PostgresType() { - // With multiplexing we can't guarantee that after ReloadTypesAsync we'll execute the query on a connection which has the new types - // Set max pool size to 1 enforce this + // Set max pool size to 1 to ensure we execute queries on the connection which has the new types await using var dataSource = CreateDataSource(connectionStringBuilderAction: csb => csb.MaxPoolSize = 1); await using var connection = await dataSource.OpenConnectionAsync(); var type1 = await GetTempTypeName(connection); diff --git a/test/Npgsql.Tests/Types/CubeTests.cs b/test/Npgsql.Tests/Types/CubeTests.cs index 4b59a85370..9c98438ab7 100644 --- a/test/Npgsql.Tests/Types/CubeTests.cs +++ b/test/Npgsql.Tests/Types/CubeTests.cs @@ -6,7 +6,7 @@ namespace Npgsql.Tests.Types; -public class CubeTests : MultiplexingTestBase +public class CubeTests : TestBase { static readonly TestCaseData[] CubeValues = { @@ -264,6 +264,4 @@ public async Task SetUp() TestUtil.MinimumPgVersion(conn, "13.0"); await TestUtil.EnsureExtensionAsync(conn, "cube"); } - - public CubeTests(MultiplexingMode multiplexingMode) : base(multiplexingMode) { } } diff --git a/test/Npgsql.Tests/Types/DateTimeTests.cs b/test/Npgsql.Tests/Types/DateTimeTests.cs index e81da27ee0..c698514e10 100644 --- a/test/Npgsql.Tests/Types/DateTimeTests.cs +++ b/test/Npgsql.Tests/Types/DateTimeTests.cs @@ -8,7 +8,6 @@ namespace Npgsql.Tests.Types; -// Since this test suite manipulates TimeZone, it is incompatible with multiplexing public class DateTimeTests : TestBase { #region Date diff --git a/test/Npgsql.Tests/Types/DomainTests.cs b/test/Npgsql.Tests/Types/DomainTests.cs index 7b9a7e6571..0905b7b4d6 100644 --- a/test/Npgsql.Tests/Types/DomainTests.cs +++ b/test/Npgsql.Tests/Types/DomainTests.cs @@ -6,14 +6,11 @@ namespace Npgsql.Tests.Types; -public class DomainTests(MultiplexingMode multiplexingMode) : MultiplexingTestBase(multiplexingMode) +public class DomainTests : TestBase { [Test, Description("Resolves a domain type handler via the different pathways")] public async Task Domain_resolution() { - if (IsMultiplexing) - Assert.Ignore("Multiplexing, ReloadTypes"); - await using var dataSource = CreateDataSource(csb => csb.Pooling = false); await using var conn = await dataSource.OpenConnectionAsync(); var type = await GetTempTypeName(conn); diff --git a/test/Npgsql.Tests/Types/EnumTests.cs b/test/Npgsql.Tests/Types/EnumTests.cs index 30eb522b5d..52f512c944 100644 --- a/test/Npgsql.Tests/Types/EnumTests.cs +++ b/test/Npgsql.Tests/Types/EnumTests.cs @@ -11,7 +11,7 @@ namespace Npgsql.Tests.Types; -public class EnumTests(MultiplexingMode multiplexingMode) : MultiplexingTestBase(multiplexingMode) +public class EnumTests : TestBase { enum Mood { Sad, Ok, Happy } enum AnotherEnum { Value1, Value2 } diff --git a/test/Npgsql.Tests/Types/FullTextSearchTests.cs b/test/Npgsql.Tests/Types/FullTextSearchTests.cs index 5f63826a99..079bb7dec5 100644 --- a/test/Npgsql.Tests/Types/FullTextSearchTests.cs +++ b/test/Npgsql.Tests/Types/FullTextSearchTests.cs @@ -9,7 +9,7 @@ namespace Npgsql.Tests.Types; -public class FullTextSearchTests(MultiplexingMode multiplexingMode) : MultiplexingTestBase(multiplexingMode) +public class FullTextSearchTests : TestBase { [Test] public Task TsVector() diff --git a/test/Npgsql.Tests/Types/GeometricTypeTests.cs b/test/Npgsql.Tests/Types/GeometricTypeTests.cs index 016aee1b03..20a7606e04 100644 --- a/test/Npgsql.Tests/Types/GeometricTypeTests.cs +++ b/test/Npgsql.Tests/Types/GeometricTypeTests.cs @@ -10,7 +10,7 @@ namespace Npgsql.Tests.Types; /// /// https://www.postgresql.org/docs/current/static/datatype-geometric.html /// -class GeometricTypeTests(MultiplexingMode multiplexingMode) : MultiplexingTestBase(multiplexingMode) +class GeometricTypeTests : TestBase { [Test] public Task Point() diff --git a/test/Npgsql.Tests/Types/HstoreTests.cs b/test/Npgsql.Tests/Types/HstoreTests.cs index 779aa8fad1..2d42be4448 100644 --- a/test/Npgsql.Tests/Types/HstoreTests.cs +++ b/test/Npgsql.Tests/Types/HstoreTests.cs @@ -5,7 +5,7 @@ namespace Npgsql.Tests.Types; -public class HstoreTests(MultiplexingMode multiplexingMode) : MultiplexingTestBase(multiplexingMode) +public class HstoreTests : TestBase { [Test] public Task Hstore() diff --git a/test/Npgsql.Tests/Types/InternalTypeTests.cs b/test/Npgsql.Tests/Types/InternalTypeTests.cs index cd1c2190ed..21b9e8c24f 100644 --- a/test/Npgsql.Tests/Types/InternalTypeTests.cs +++ b/test/Npgsql.Tests/Types/InternalTypeTests.cs @@ -4,7 +4,7 @@ namespace Npgsql.Tests.Types; -public class InternalTypeTests(MultiplexingMode multiplexingMode) : MultiplexingTestBase(multiplexingMode) +public class InternalTypeTests : TestBase { [Test] public async Task Read_internal_char() @@ -96,4 +96,4 @@ public async Task NpgsqlLogSequenceNumber() } #endregion NpgsqlLogSequenceNumber / PgLsn -} \ No newline at end of file +} diff --git a/test/Npgsql.Tests/Types/JsonDynamicTests.cs b/test/Npgsql.Tests/Types/JsonDynamicTests.cs index ecc3fb5a66..a3e68838ac 100644 --- a/test/Npgsql.Tests/Types/JsonDynamicTests.cs +++ b/test/Npgsql.Tests/Types/JsonDynamicTests.cs @@ -7,11 +7,9 @@ namespace Npgsql.Tests.Types; -[TestFixture(MultiplexingMode.NonMultiplexing, "json")] -[TestFixture(MultiplexingMode.NonMultiplexing, "jsonb")] -[TestFixture(MultiplexingMode.Multiplexing, "json")] -[TestFixture(MultiplexingMode.Multiplexing, "jsonb")] -public class JsonDynamicTests : MultiplexingTestBase +[TestFixture("json")] +[TestFixture("jsonb")] +public class JsonDynamicTests : TestBase { [Test] public async Task As_poco() @@ -418,8 +416,7 @@ record ExtendedDerivedWeatherForecast : DerivedWeatherForecast #endregion Polymorphic - public JsonDynamicTests(MultiplexingMode multiplexingMode, string dataTypeName) - : base(multiplexingMode) + public JsonDynamicTests(string dataTypeName) { DataSource = CreateDataSource(b => b.EnableDynamicJson()); diff --git a/test/Npgsql.Tests/Types/JsonPathTests.cs b/test/Npgsql.Tests/Types/JsonPathTests.cs index 044ecc1827..62db50032b 100644 --- a/test/Npgsql.Tests/Types/JsonPathTests.cs +++ b/test/Npgsql.Tests/Types/JsonPathTests.cs @@ -6,7 +6,7 @@ namespace Npgsql.Tests.Types; -public class JsonPathTests(MultiplexingMode multiplexingMode) : MultiplexingTestBase(multiplexingMode) +public class JsonPathTests : TestBase { static readonly object[] ReadWriteCases = [ diff --git a/test/Npgsql.Tests/Types/JsonTests.cs b/test/Npgsql.Tests/Types/JsonTests.cs index 8b5e36bc7e..5cf8504ac4 100644 --- a/test/Npgsql.Tests/Types/JsonTests.cs +++ b/test/Npgsql.Tests/Types/JsonTests.cs @@ -9,11 +9,9 @@ namespace Npgsql.Tests.Types; -[TestFixture(MultiplexingMode.NonMultiplexing, "json")] -[TestFixture(MultiplexingMode.NonMultiplexing, "jsonb")] -[TestFixture(MultiplexingMode.Multiplexing, "json")] -[TestFixture(MultiplexingMode.Multiplexing, "jsonb")] -public class JsonTests : MultiplexingTestBase +[TestFixture("json")] +[TestFixture("jsonb")] +public class JsonTests : TestBase { [Test] public async Task As_string() @@ -226,8 +224,7 @@ public async Task Write_jsonobject_array_without_npgsqldbtype() await cmd.ExecuteNonQueryAsync(); } - public JsonTests(MultiplexingMode multiplexingMode, string dataTypeName) - : base(multiplexingMode) + public JsonTests(string dataTypeName) { if (dataTypeName == "jsonb") using (var conn = OpenConnection()) diff --git a/test/Npgsql.Tests/Types/LTreeTests.cs b/test/Npgsql.Tests/Types/LTreeTests.cs index d0ab3dbf29..c7498adf83 100644 --- a/test/Npgsql.Tests/Types/LTreeTests.cs +++ b/test/Npgsql.Tests/Types/LTreeTests.cs @@ -6,7 +6,7 @@ namespace Npgsql.Tests.Types; -public class LTreeTests(MultiplexingMode multiplexingMode) : MultiplexingTestBase(multiplexingMode) +public class LTreeTests : TestBase { [Test] public Task LQuery() diff --git a/test/Npgsql.Tests/Types/LegacyDateTimeTests.cs b/test/Npgsql.Tests/Types/LegacyDateTimeTests.cs index 9dd2e9e9fb..730188330e 100644 --- a/test/Npgsql.Tests/Types/LegacyDateTimeTests.cs +++ b/test/Npgsql.Tests/Types/LegacyDateTimeTests.cs @@ -7,7 +7,6 @@ namespace Npgsql.Tests.Types; -// Since this test suite manipulates TimeZone, it is incompatible with multiplexing [NonParallelizable] public class LegacyDateTimeTests : TestBase { diff --git a/test/Npgsql.Tests/Types/MiscTypeTests.cs b/test/Npgsql.Tests/Types/MiscTypeTests.cs index ce638ebfef..e2bd17cf29 100644 --- a/test/Npgsql.Tests/Types/MiscTypeTests.cs +++ b/test/Npgsql.Tests/Types/MiscTypeTests.cs @@ -9,7 +9,7 @@ namespace Npgsql.Tests.Types; /// /// Tests on PostgreSQL types which don't fit elsewhere /// -class MiscTypeTests(MultiplexingMode multiplexingMode) : MultiplexingTestBase(multiplexingMode) +class MiscTypeTests : TestBase { [Test] public async Task Boolean() @@ -103,9 +103,6 @@ public async Task AllResultTypesAreUnknown() [Test, Description("Mixes and matches an unknown type with a known type")] public async Task UnknownResultTypeList() { - if (IsMultiplexing) - return; - await using var conn = await OpenConnectionAsync(); await using var cmd = new NpgsqlCommand("SELECT TRUE, 8", conn); cmd.UnknownResultTypeList = [true, false]; diff --git a/test/Npgsql.Tests/Types/MultirangeTests.cs b/test/Npgsql.Tests/Types/MultirangeTests.cs index 53eb6e210a..9bf53bf528 100644 --- a/test/Npgsql.Tests/Types/MultirangeTests.cs +++ b/test/Npgsql.Tests/Types/MultirangeTests.cs @@ -106,7 +106,6 @@ public async Task Unmapped_multirange_with_mapped_subtype() var typeName = await GetTempTypeName(conn); await conn.ExecuteNonQueryAsync($"CREATE TYPE {typeName} AS RANGE(subtype=text)"); - await Task.Yield(); // TODO: fix multiplexing deadlock bug conn.ReloadTypes(); Assert.That(await conn.ExecuteScalarAsync("SELECT 1"), Is.EqualTo(1)); @@ -133,7 +132,6 @@ public async Task Unmapped_multirange_supported_only_with_EnableUnmappedTypes() var rangeType = await GetTempTypeName(connection); var multirangeTypeName = rangeType + "_multirange"; await connection.ExecuteNonQueryAsync($"CREATE TYPE {rangeType} AS RANGE(subtype=text)"); - await Task.Yield(); // TODO: fix multiplexing deadlock bug await connection.ReloadTypesAsync(); var errorMessage = string.Format( diff --git a/test/Npgsql.Tests/Types/NetworkTypeTests.cs b/test/Npgsql.Tests/Types/NetworkTypeTests.cs index 9a15555989..3ddc78e87c 100644 --- a/test/Npgsql.Tests/Types/NetworkTypeTests.cs +++ b/test/Npgsql.Tests/Types/NetworkTypeTests.cs @@ -13,7 +13,7 @@ namespace Npgsql.Tests.Types; /// /// https://www.postgresql.org/docs/current/static/datatype-net-types.html /// -class NetworkTypeTests(MultiplexingMode multiplexingMode) : MultiplexingTestBase(multiplexingMode) +class NetworkTypeTests : TestBase { [Test] public Task Inet_v4_as_IPAddress() diff --git a/test/Npgsql.Tests/Types/NumericTests.cs b/test/Npgsql.Tests/Types/NumericTests.cs index 2a651b3b87..38b95cfc0e 100644 --- a/test/Npgsql.Tests/Types/NumericTests.cs +++ b/test/Npgsql.Tests/Types/NumericTests.cs @@ -7,7 +7,7 @@ namespace Npgsql.Tests.Types; -public class NumericTests(MultiplexingMode multiplexingMode) : MultiplexingTestBase(multiplexingMode) +public class NumericTests : TestBase { static readonly object[] ReadWriteCases = [ diff --git a/test/Npgsql.Tests/Types/NumericTypeTests.cs b/test/Npgsql.Tests/Types/NumericTypeTests.cs index f3e535152e..dc41a387c8 100644 --- a/test/Npgsql.Tests/Types/NumericTypeTests.cs +++ b/test/Npgsql.Tests/Types/NumericTypeTests.cs @@ -13,7 +13,7 @@ namespace Npgsql.Tests.Types; /// /// https://www.postgresql.org/docs/current/static/datatype-numeric.html /// -public class NumericTypeTests(MultiplexingMode multiplexingMode) : MultiplexingTestBase(multiplexingMode) +public class NumericTypeTests : TestBase { [Test] public async Task Int16() diff --git a/test/Npgsql.Tests/Types/RangeTests.cs b/test/Npgsql.Tests/Types/RangeTests.cs index 773815463d..23974f5583 100644 --- a/test/Npgsql.Tests/Types/RangeTests.cs +++ b/test/Npgsql.Tests/Types/RangeTests.cs @@ -11,7 +11,7 @@ namespace Npgsql.Tests.Types; -class RangeTests : MultiplexingTestBase +class RangeTests : TestBase { static readonly TestCaseData[] RangeTestCases = [ @@ -171,7 +171,6 @@ public async Task Unmapped_range_with_mapped_subtype() var typeName = await GetTempTypeName(conn); await conn.ExecuteNonQueryAsync($"CREATE TYPE {typeName} AS RANGE(subtype=text)"); - await Task.Yield(); // TODO: fix multiplexing deadlock bug conn.ReloadTypes(); Assert.That(await conn.ExecuteScalarAsync("SELECT 1"), Is.EqualTo(1)); @@ -197,7 +196,6 @@ public async Task Unmapped_range_supported_only_with_EnableUnmappedTypes() await using var connection = await DataSource.OpenConnectionAsync(); var rangeType = await GetTempTypeName(connection); await connection.ExecuteNonQueryAsync($"CREATE TYPE {rangeType} AS RANGE(subtype=text)"); - await Task.Yield(); // TODO: fix multiplexing deadlock bug await connection.ReloadTypesAsync(); var errorMessage = string.Format( @@ -458,7 +456,7 @@ [new NpgsqlRange(May_17_2018, false, true, May_18_2018, false, false)] protected override NpgsqlDataSource DataSource { get; } - public RangeTests(MultiplexingMode multiplexingMode) : base(multiplexingMode) + public RangeTests() => DataSource = CreateDataSource(builder => { builder.ConnectionStringBuilder.Timezone = "Europe/Berlin"; diff --git a/test/Npgsql.Tests/Types/RecordTests.cs b/test/Npgsql.Tests/Types/RecordTests.cs index 86c3fd1875..2fd330badf 100644 --- a/test/Npgsql.Tests/Types/RecordTests.cs +++ b/test/Npgsql.Tests/Types/RecordTests.cs @@ -7,7 +7,7 @@ namespace Npgsql.Tests.Types; -public class RecordTests(MultiplexingMode multiplexingMode) : MultiplexingTestBase(multiplexingMode) +public class RecordTests : TestBase { [Test] [IssueLink("https://github.com/npgsql/npgsql/issues/724")] diff --git a/test/Npgsql.Tests/Types/TextTests.cs b/test/Npgsql.Tests/Types/TextTests.cs index 75de705c6c..27b9566009 100644 --- a/test/Npgsql.Tests/Types/TextTests.cs +++ b/test/Npgsql.Tests/Types/TextTests.cs @@ -15,7 +15,7 @@ namespace Npgsql.Tests.Types; /// /// https://www.postgresql.org/docs/current/static/datatype-character.html /// -public class TextTests(MultiplexingMode multiplexingMode) : MultiplexingTestBase(multiplexingMode) +public class TextTests : TestBase { [Test] public Task Text_as_string() From 205de93744105055614e4a4518e0537156e22189 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Sat, 28 Feb 2026 09:51:38 +0200 Subject: [PATCH 702/761] Remove leftover empty tests --- test/Npgsql.Tests/TransactionTests.cs | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/test/Npgsql.Tests/TransactionTests.cs b/test/Npgsql.Tests/TransactionTests.cs index 57e0cc523f..2832ed7fa1 100644 --- a/test/Npgsql.Tests/TransactionTests.cs +++ b/test/Npgsql.Tests/TransactionTests.cs @@ -517,20 +517,6 @@ public async Task Transaction_not_supported() } } - [Test] - [IssueLink("https://github.com/npgsql/npgsql/issues/3248")] - // More at #3254 - public async Task Bug3248_Dispose_transaction_Rollback() - { - } - - [Test] - [IssueLink("https://github.com/npgsql/npgsql/issues/3248")] - // More at #3254 - public async Task Bug3248_Dispose_connection_Rollback() - { - } - [Test] [IssueLink("https://github.com/npgsql/npgsql/issues/3306")] [TestCase(true)] From 4984a2d7c548a05915bf85505226682a3bdbb271 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Mon, 2 Mar 2026 16:31:08 +0300 Subject: [PATCH 703/761] Fix using infinite timeout with replication connection (#6464) Fixes #6456 --- src/Npgsql/Replication/ReplicationConnection.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Npgsql/Replication/ReplicationConnection.cs b/src/Npgsql/Replication/ReplicationConnection.cs index 69f7a6d010..3877254521 100644 --- a/src/Npgsql/Replication/ReplicationConnection.cs +++ b/src/Npgsql/Replication/ReplicationConnection.cs @@ -891,7 +891,7 @@ void SetTimeouts(TimeSpan readTimeout, TimeSpan writeTimeout) var connector = Connector; var readBuffer = connector.ReadBuffer; if (readBuffer != null) - readBuffer.Timeout = readTimeout > TimeSpan.Zero ? readTimeout : TimeSpan.Zero; + readBuffer.Timeout = readTimeout > TimeSpan.Zero ? readTimeout : Timeout.InfiniteTimeSpan; var writeBuffer = connector.WriteBuffer; if (writeBuffer != null) From 8cce50629c15409aaeb3bf9943f98d234efd1ed2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 3 Mar 2026 09:44:49 +0200 Subject: [PATCH 704/761] Bump NodaTime from 3.3.0 to 3.3.1 (#6465) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index a197b8c29e..c333548633 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -16,7 +16,7 @@ - + From 44389676c447c24d22b822ddb3b7fe86c3f9a690 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 5 Mar 2026 00:13:12 +0200 Subject: [PATCH 705/761] Bump Scriban.Signed from 6.5.3 to 6.5.4 (#6466) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index c333548633..91cdbef89f 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -25,7 +25,7 @@ - + From bd8c2d7338c93d90709adf37103896e6decb1dd2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 5 Mar 2026 23:57:51 +0200 Subject: [PATCH 706/761] Bump actions/setup-dotnet from 5.1.0 to 5.2.0 (#6468) --- .github/workflows/build.yml | 6 +++--- .github/workflows/native-aot.yml | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 29158fb563..924622255b 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -85,7 +85,7 @@ jobs: ${{ runner.os }}-nuget- - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v5.1.0 + uses: actions/setup-dotnet@v5.2.0 - name: Build run: dotnet build -c ${{ matrix.config }} @@ -354,7 +354,7 @@ jobs: ${{ runner.os }}-nuget- - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v5.1.0 + uses: actions/setup-dotnet@v5.2.0 - name: Pack run: dotnet pack --configuration Release --property:PackageOutputPath="$PWD/nupkgs" --version-suffix "ci.$(date -u +%Y%m%dT%H%M%S)+sha.${GITHUB_SHA:0:9}" -p:ContinuousIntegrationBuild=true @@ -386,7 +386,7 @@ jobs: uses: actions/checkout@v6 - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v5.1.0 + uses: actions/setup-dotnet@v5.2.0 - name: Pack run: dotnet pack --configuration Release --property:PackageOutputPath="$PWD/nupkgs" -p:ContinuousIntegrationBuild=true diff --git a/.github/workflows/native-aot.yml b/.github/workflows/native-aot.yml index cdc4d77ab5..3fc070a421 100644 --- a/.github/workflows/native-aot.yml +++ b/.github/workflows/native-aot.yml @@ -110,7 +110,7 @@ jobs: ${{ runner.os }}-nuget- - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v5.1.0 + uses: actions/setup-dotnet@v5.2.0 - name: Write script run: echo "$AOT_Compat" > test-aot-compatibility.ps1 @@ -144,7 +144,7 @@ jobs: ${{ runner.os }}-nuget- - name: Setup .NET Core SDK - uses: actions/setup-dotnet@v5.1.0 + uses: actions/setup-dotnet@v5.2.0 - name: Start PostgreSQL run: | From e8ccd7da47a167f0a3c1c414ccc01d9efc4feeb2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 5 Mar 2026 22:00:01 +0000 Subject: [PATCH 707/761] Bump Scriban.Signed from 6.5.4 to 6.5.5 (#6469) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 91cdbef89f..6c005e14f2 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -25,7 +25,7 @@ - + From a7a053173eb4be80c7bb927a3e850deaa628d9c5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Mar 2026 08:25:59 +0200 Subject: [PATCH 708/761] Bump NUnit.Analyzers from 4.11.2 to 4.12.0 (#6473) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 6c005e14f2..d360d56c2b 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -29,7 +29,7 @@ - + From fc81e741e551e3b3e9efd9aafc01c50b0acc408f Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Wed, 11 Mar 2026 01:56:11 +0100 Subject: [PATCH 709/761] Cleanup array converter (#6472) --- .../Internal/Converters/ArrayConverter.cs | 665 +++++------------- .../Internal/Converters/ArrayConverterCore.cs | 363 ++++++++++ src/Npgsql/Internal/PgTypeInfo.cs | 18 +- .../AdoTypeInfoResolverFactory.cs | 16 +- src/Npgsql/Internal/TypeInfoMapping.cs | 8 +- src/Npgsql/Util/IterationIndices.cs | 112 +++ test/Npgsql.Tests/Types/ArrayTests.cs | 16 +- 7 files changed, 671 insertions(+), 527 deletions(-) create mode 100644 src/Npgsql/Internal/Converters/ArrayConverterCore.cs create mode 100644 src/Npgsql/Util/IterationIndices.cs diff --git a/src/Npgsql/Internal/Converters/ArrayConverter.cs b/src/Npgsql/Internal/Converters/ArrayConverter.cs index 2d6d443329..cca67a870a 100644 --- a/src/Npgsql/Internal/Converters/ArrayConverter.cs +++ b/src/Npgsql/Internal/Converters/ArrayConverter.cs @@ -1,5 +1,4 @@ using System; -using System.Buffers; using System.Collections.Generic; using System.Collections.Concurrent; using System.Diagnostics; @@ -7,367 +6,30 @@ using System.Threading; using System.Threading.Tasks; using Npgsql.Internal.Postgres; +using Npgsql.Util; namespace Npgsql.Internal.Converters; -struct Indices -{ - // Public field to be able to return it by ref in GetItem. - public int One; - public int[]? Many { get; private init; } - public int Count { get; private init; } - - public static Indices Create(int dimensions) - => dimensions switch - { - 0 => new() { Count = dimensions, One = -1 }, - 1 => new() { Count = dimensions }, - _ => new() { Count = dimensions, Many = new int[dimensions] } - }; -} - -static class IndicesExtensions -{ - // Workaround for lack of ref returns on struct fields. - public static ref int GetItem(this ref Indices indices, int index) - { - switch (indices.Count) - { - case 0: - ThrowHelper.ThrowIndexOutOfRangeException("Cannot index into a 0-dimensional array."); - return ref Unsafe.NullRef(); - case 1: - Debug.Assert(index is 0); - Debug.Assert(indices.Many is null); - return ref indices.One; - default: - return ref indices.Many![index]; - } - } -} - -interface IElementOperations -{ - object CreateCollection(ReadOnlySpan lengths); - int GetCollectionCount(object collection, out int[]? lengths); - Size? GetSizeOrDbNull(SizeContext context, object collection, Indices indices, ref object? writeState); - ValueTask Read(bool async, PgReader reader, bool isDbNull, object collection, Indices indices, CancellationToken cancellationToken = default); - ValueTask Write(bool async, PgWriter writer, object collection, Indices indices, CancellationToken cancellationToken = default); -} - -readonly struct PgArrayConverter( - IElementOperations elemOps, - bool elemTypeDbNullable, - int? expectedDimensions, - BufferRequirements bufferRequirements, - PgTypeId elemTypeId, - int pgLowerBound = 1) -{ - public const string ReadNonNullableCollectionWithNullsExceptionMessage = - "Cannot read a non-nullable collection of elements because the returned array contains nulls. Call GetFieldValue with a nullable collection type instead."; - public const int MaxDimensions = 8; - - public bool ElemTypeDbNullable { get; } = elemTypeDbNullable; - - bool IsDbNull(object values, Indices indices) - { - object? state = null; - return elemOps.GetSizeOrDbNull(new(DataFormat.Binary, bufferRequirements.Write), values, indices, ref state) is null; - } - - Size GetElemsSize(object values, (Size, object?)[] elemStates, out bool anyElementState, DataFormat format, int count, Indices indices, int[]? lengths = null) - { - Debug.Assert(elemStates.Length >= count); - var totalSize = Size.Zero; - var context = new SizeContext(format, bufferRequirements.Write); - anyElementState = false; - var lastLength = lengths?[^1] ?? count; - ref var lastIndex = ref indices.GetItem(indices.Count - 1); - var i = 0; - do - { - ref var elemItem = ref elemStates[i++]; - var elemState = (object?)null; - var size = elemOps.GetSizeOrDbNull(context, values, indices, ref elemState); - anyElementState = anyElementState || elemState is not null; - elemItem = (size ?? -1, elemState); - totalSize = totalSize.Combine(size ?? 0); - } - // We can immediately continue if we didn't reach the end of the last dimension. - while (++lastIndex < lastLength || (indices.Count > 1 && CarryIndices(lengths!, indices))); - - return totalSize; - } - - Size GetFixedElemsSize(Size elemSize, object values, int count, Indices indices, int[]? lengths = null) - { - var nulls = 0; - var lastLength = lengths?[^1] ?? count; - ref var lastIndex = ref indices.GetItem(indices.Count - 1); - if (ElemTypeDbNullable) - do - { - if (IsDbNull(values, indices)) - nulls++; - } - // We can immediately continue if we didn't reach the end of the last dimension. - while (++lastIndex < lastLength || (indices.Count > 1 && CarryIndices(lengths!, indices))); - - return (count - nulls) * elemSize.Value; - } - - int GetFormatSize(int count, int dimensions) - => sizeof(int) + // Dimensions - sizeof(int) + // Flags - sizeof(int) + // Element OID - dimensions * (sizeof(int) + sizeof(int)) + // Dimensions * (array length and lower bound) - sizeof(int) * count; // Element length integers - - public Size GetSize(SizeContext context, object values, ref object? writeState) - { - var count = elemOps.GetCollectionCount(values, out var lengths); - var dimensions = lengths?.Length ?? 1; - if (dimensions > MaxDimensions) - ThrowHelper.ThrowArgumentException($"Postgres arrays can have at most {MaxDimensions} dimensions.", nameof(values)); - - var formatSize = Size.Create(GetFormatSize(count, dimensions)); - if (count is 0) - return formatSize; - - Size elemsSize; - var indices = Indices.Create(dimensions); - if (bufferRequirements.Write is { Kind: SizeKind.Exact } req) - { - elemsSize = GetFixedElemsSize(req, values, count, indices, lengths); - writeState = new WriteState { Count = count, Indices = indices, Lengths = lengths, ArrayPool = null, Data = default, AnyWriteState = false }; - } - else - { - var arrayPool = ArrayPool<(Size, object?)>.Shared; - var data = ArrayPool<(Size, object?)>.Shared.Rent(count); - elemsSize = GetElemsSize(values, data, out var elemStateDisposable, context.Format, count, indices, lengths); - writeState = new WriteState - { Count = count, Indices = indices, Lengths = lengths, - ArrayPool = arrayPool, Data = new(data, 0, count), AnyWriteState = elemStateDisposable }; - } - - return formatSize.Combine(elemsSize); - } - - sealed class WriteState : MultiWriteState - { - public required int Count { get; init; } - public required Indices Indices { get; init; } - public required int[]? Lengths { get; init; } - } - - object ReadDimsAndCreateCollection(PgReader reader, int dimensions, out int lastDimLength) - { - Debug.Assert(!reader.ShouldBuffer((sizeof(int) + sizeof(int)) * dimensions)); - - Span dimLengths = stackalloc int[MaxDimensions]; - lastDimLength = 0; - for (var i = 0; i < dimensions; i++) - { - lastDimLength = reader.ReadInt32(); - _ = reader.ReadInt32(); // Lower bound - dimLengths[i] = lastDimLength; - } - - var collection = elemOps.CreateCollection(dimLengths.Slice(0, dimensions)); - Debug.Assert(dimensions <= 1 || collection is Array a && a.Rank == dimensions); - return collection; - } - - public async ValueTask Read(bool async, PgReader reader, CancellationToken cancellationToken = default) - { - if (reader.ShouldBuffer(sizeof(int) + sizeof(int) + sizeof(uint))) - await reader.Buffer(async, sizeof(int) + sizeof(int) + sizeof(uint), cancellationToken).ConfigureAwait(false); - - var dimensions = reader.ReadInt32(); - if (dimensions > MaxDimensions) - ThrowHelper.ThrowInvalidOperationException($"Postgres arrays can have at most {MaxDimensions} dimensions."); - - var containsNulls = reader.ReadInt32() is 1; - _ = reader.ReadUInt32(); // Element OID. - - if (dimensions is not 0 && expectedDimensions is not null && dimensions != expectedDimensions) - ThrowHelper.ThrowInvalidCastException( - $"Cannot read an array value with {dimensions} dimension{(dimensions == 1 ? "" : "s")} into a " - + $"collection type with {expectedDimensions} dimension{(expectedDimensions == 1 ? "" : "s")}. " - + $"Call GetValue or a version of GetFieldValue with the commas being the expected amount of dimensions."); - - if (containsNulls && !ElemTypeDbNullable) - ThrowHelper.ThrowInvalidCastException(ReadNonNullableCollectionWithNullsExceptionMessage); - - // Make sure we can read length + lower bound N dimension times. - if (reader.ShouldBuffer((sizeof(int) + sizeof(int)) * dimensions)) - await reader.Buffer(async, (sizeof(int) + sizeof(int)) * dimensions, cancellationToken).ConfigureAwait(false); - - var collection = ReadDimsAndCreateCollection(reader, dimensions, out var lastDimLength); - if (dimensions is 0 || lastDimLength is 0) - return collection; - - _ = elemOps.GetCollectionCount(collection, out var dimLengths); - var indices = Indices.Create(dimensions); - - do - { - if (reader.ShouldBuffer(sizeof(int))) - await reader.Buffer(async, sizeof(int), cancellationToken).ConfigureAwait(false); - - var length = reader.ReadInt32(); - var isDbNull = length == -1; - if (!isDbNull) - { - var scope = await reader.BeginNestedRead(async, length, bufferRequirements.Read, cancellationToken).ConfigureAwait(false); - try - { - await elemOps.Read(async, reader, isDbNull, collection, indices, cancellationToken).ConfigureAwait(false); - } - finally - { - if (async) - await scope.DisposeAsync().ConfigureAwait(false); - else - scope.Dispose(); - } - } - else - await elemOps.Read(async, reader, isDbNull, collection, indices, cancellationToken).ConfigureAwait(false); - } - // We can immediately continue if we didn't reach the end of the last dimension. - while (++indices.GetItem(indices.Count - 1) < lastDimLength || (dimLengths is not null && CarryIndices(dimLengths, indices))); - - return collection; - } - - static bool CarryIndices(int[] lengths, Indices indices) - { - Debug.Assert(lengths.Length > 1); - Debug.Assert(indices.Count > 1); - - // Find the first dimension from the end that isn't at or past its length, increment it and bring all previous dimensions to zero. - for (var dim = indices.Count - 1; dim >= 0; dim--) - { - if (indices.GetItem(dim) >= lengths[dim] - 1) - continue; - - indices.Many.AsSpan().Slice(dim + 1).Clear(); - indices.GetItem(dim)++; - return true; - } - - // We're done if we can't find any dimension that isn't at its length. - return false; - } - - public async ValueTask Write(bool async, PgWriter writer, object values, CancellationToken cancellationToken) - { - var (count, dims, state) = writer.Current.WriteState switch - { - WriteState writeState => (writeState.Count, writeState.Lengths?.Length ?? 1 , writeState), - null => (0, values is Array a ? a.Rank : 1, null), - _ => throw new InvalidCastException($"Invalid write state, expected {typeof(WriteState).FullName}.") - }; - - if (writer.ShouldFlush(GetFormatSize(count, dims))) - await writer.Flush(async, cancellationToken).ConfigureAwait(false); - - writer.WriteInt32(dims); // Dimensions - writer.WriteInt32(0); // Flags (not really used) - writer.WriteAsOid(elemTypeId); - for (var dim = 0; dim < dims; dim++) - { - writer.WriteInt32(state?.Lengths?[dim] ?? count); - writer.WriteInt32(pgLowerBound); // Lower bound - } - - // We can stop here for empty collections. - if (state is null) - return; - - var elemTypeDbNullable = ElemTypeDbNullable; - var elemData = state.Data.Array; - - var indices = state.Indices; - if (indices.Many is not null) - Array.Clear(indices.Many, 0 , indices.Many.Length); - var lastLength = state.Lengths?[^1] ?? state.Count; - var i = state.Data.Offset; - do - { - if (writer.ShouldFlush(sizeof(int))) - await writer.Flush(async, cancellationToken).ConfigureAwait(false); - - var elem = elemData?[i++]; - var size = elem?.Size ?? (elemTypeDbNullable && IsDbNull(values, indices) ? -1 : bufferRequirements.Write); - if (size.Kind is SizeKind.Unknown) - throw new NotImplementedException(); - - var length = size.Value; - writer.WriteInt32(length); - if (length != -1) - { - using var _ = await writer.BeginNestedWrite(async, bufferRequirements.Write, length, elem?.WriteState, cancellationToken).ConfigureAwait(false); - await elemOps.Write(async, writer, values, indices, cancellationToken).ConfigureAwait(false); - } - } - // We can immediately continue if we didn't reach the end of the last dimension. - while (++indices.GetItem(indices.Count - 1) < lastLength || (state.Lengths is not null && CarryIndices(state.Lengths, indices))); - } - - // Using a function pointer here is safe against assembly unloading as the instance reference that the static pointer method lives on is passed along. - // As such the instance cannot be collected by the gc which means the entire assembly is prevented from unloading until we're done. - // The alternatives are: - // 1. Add a virtual method and make AwaitTask call into it (bloating the vtable of all derived types). - // 2. Using a delegate, meaning we add a static field + an alloc per T + metadata, slightly slower dispatch perf so overall strictly worse as well. - [AsyncMethodBuilder(typeof(PoolingAsyncValueTaskMethodBuilder))] - public static async ValueTask AwaitTask(Task task, Continuation continuation, object collection, Indices indices) - { - await task.ConfigureAwait(false); - continuation.Invoke(task, collection, indices); - // Guarantee the type stays loaded until the function pointer call is done. - GC.KeepAlive(continuation.Handle); - } - - // Split out into a struct as unsafe and async don't mix, while we do want a nicely typed function pointer signature to prevent mistakes. - public readonly unsafe struct Continuation - { - public object Handle { get; } - readonly delegate* _continuation; - - /// A reference to the type that houses the static method points to. - /// The continuation - public Continuation(object handle, delegate* continuation) - { - Handle = handle; - _continuation = continuation; - } - - public void Invoke(Task task, object collection, Indices indices) => _continuation(task, collection, indices); - } -} - abstract class ArrayConverter : PgStreamingConverter where T : notnull { - readonly PgArrayConverter _pgArrayConverter; + readonly ArrayConverterCore _arrayConverterCore; - private protected ArrayConverter(int? expectedDimensions, PgConverterResolution elemResolution, int pgLowerBound = 1) + ArrayConverter(int? expectedDimensions, PgConverterResolution elemResolution, int pgLowerBound = 1) { if (!elemResolution.Converter.CanConvert(DataFormat.Binary, out var bufferRequirements)) throw new NotSupportedException("Element converter has to support the binary format to be compatible."); - _pgArrayConverter = new((IElementOperations)this, elemResolution.Converter.IsDbNullable, expectedDimensions, + PgTypeInfo elementTypeInfo = null!; // TODO until https://github.com/npgsql/npgsql/pull/6316 + _arrayConverterCore = new((IElementOperations)this, elementTypeInfo, elemResolution.Converter.IsDbNullable, expectedDimensions, bufferRequirements, elemResolution.PgTypeId, pgLowerBound); } - public override T Read(PgReader reader) => (T)_pgArrayConverter.Read(async: false, reader).Result; + public override T Read(PgReader reader) => (T)_arrayConverterCore.Read(async: false, reader).Result; public override unsafe ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) { // Cheap if we have all the data. - var task = _pgArrayConverter.Read(async: true, reader, cancellationToken); + var task = _arrayConverterCore.Read(async: true, reader, cancellationToken); if (task.IsCompletedSuccessfully) return new((T)task.Result); @@ -392,213 +54,206 @@ static void UnboxAndComplete(Task task, AsyncHelpers.CompletionSource completion } public override Size GetSize(SizeContext context, T values, ref object? writeState) - => _pgArrayConverter.GetSize(context, values, ref writeState); + => _arrayConverterCore.GetSize(context, values, ref writeState); public override void Write(PgWriter writer, T values) - => _pgArrayConverter.Write(async: false, writer, values, CancellationToken.None).GetAwaiter().GetResult(); + => _arrayConverterCore.Write(async: false, writer, values, CancellationToken.None).GetAwaiter().GetResult(); public override ValueTask WriteAsync(PgWriter writer, T values, CancellationToken cancellationToken = default) - => _pgArrayConverter.Write(async: true, writer, values, cancellationToken); + => _arrayConverterCore.Write(async: true, writer, values, cancellationToken); - protected static int GetLengths(Array array, out int[]? lengths) - { - var dimensions = array.Rank; - - if (dimensions is 1) - { - lengths = null; - return array.Length; - } + public static ArrayConverter CreateArrayBased(PgConverterResolution elemResolution, Type? effectiveType = null, int pgLowerBound = 1) + => new ArrayBased(elemResolution, effectiveType, pgLowerBound); - lengths = new int[dimensions]; - for (var i = 0; i < lengths.Length; i++) - lengths[i] = array.GetLength(i); + public static ArrayConverter CreateListBased(PgConverterResolution elemResolution, int pgLowerBound = 1) + => new ListBased(elemResolution, pgLowerBound); - // If we have a multidim array it may throw an overflow exception for large arrays (LongLength exists for these cases) - // however anything over int.MaxValue wouldn't fit in a parameter anyway so easier to throw here than deal with a long. - return array.Length; - } -} - -sealed class ArrayBasedArrayConverter(PgConverterResolution elemResolution, Type? effectiveType = null, int pgLowerBound = 1) - : ArrayConverter(expectedDimensions: effectiveType is null ? 1 : effectiveType.IsArray ? effectiveType.GetArrayRank() : null, + sealed class ArrayBased(PgConverterResolution elemResolution, Type? effectiveType = null, int pgLowerBound = 1) + : ArrayConverter(expectedDimensions: effectiveType is null ? 1 : effectiveType.IsArray ? effectiveType.GetArrayRank() : null, elemResolution, pgLowerBound), IElementOperations - where T : class -{ - readonly PgConverter _elemConverter = elemResolution.GetConverter(); - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - static TElement? GetValue(object collection, Indices indices) { - Debug.Assert(indices.Count > 0); - switch (indices.Count) - { - case 1: - // Justification: exact type Unsafe.As used to avoid the cast overhead for per element calls. - Debug.Assert(collection is TElement?[]); - return Unsafe.As(collection)[indices.One]; - default: - // Justification: exact type Unsafe.As used to avoid the cast overhead for per element calls. - Debug.Assert(collection is Array); - return (TElement?)Unsafe.As(collection).GetValue(indices.Many!); - } - } + readonly PgConverter _elemConverter = elemResolution.GetConverter(); - [MethodImpl(MethodImplOptions.AggressiveInlining)] - static void SetValue(object collection, Indices indices, TElement? value) - { - Debug.Assert(indices.Count > 0); - switch (indices.Count) + [MethodImpl(MethodImplOptions.AggressiveInlining)] + static TElement? GetValue(object collection, IterationIndices indices) { + Debug.Assert(indices.Rank > 0); + switch (indices.Rank) + { case 1: // Justification: exact type Unsafe.As used to avoid the cast overhead for per element calls. Debug.Assert(collection is TElement?[]); - Unsafe.As(collection)[indices.One] = value; - break; + return Unsafe.As(collection)[indices.One]; + case 2: + // Justification: exact type Unsafe.As used to avoid the cast overhead for per element calls. + Debug.Assert(collection is TElement?[,]); + return Unsafe.As(collection)[indices.Many![0], indices.Many![1]]; default: // Justification: exact type Unsafe.As used to avoid the cast overhead for per element calls. Debug.Assert(collection is Array); - Unsafe.As(collection).SetValue(value, indices.Many!); - break; + return (TElement?)Unsafe.As(collection).GetValue(indices.Many!); + } } - } - object IElementOperations.CreateCollection(ReadOnlySpan lengths) - => lengths.Length switch + [MethodImpl(MethodImplOptions.AggressiveInlining)] + static void SetValue(object collection, IterationIndices indices, TElement? value) { - 0 => Array.Empty(), - 1 when lengths[0] == 0 => Array.Empty(), - 1 => new TElement?[lengths[0]], - 2 => new TElement?[lengths[0], lengths[1]], - 3 => new TElement?[lengths[0], lengths[1], lengths[2]], - 4 => new TElement?[lengths[0], lengths[1], lengths[2], lengths[3]], - 5 => new TElement?[lengths[0], lengths[1], lengths[2], lengths[3], lengths[4]], - 6 => new TElement?[lengths[0], lengths[1], lengths[2], lengths[3], lengths[4], lengths[5]], - 7 => new TElement?[lengths[0], lengths[1], lengths[2], lengths[3], lengths[4], lengths[5], lengths[6]], - 8 => new TElement?[lengths[0], lengths[1], lengths[2], lengths[3], lengths[4], lengths[5], lengths[6], lengths[7]], - _ => throw new InvalidOperationException("Postgres arrays can have at most 8 dimensions.") - }; - - int IElementOperations.GetCollectionCount(object collection, out int[]? lengths) - => GetLengths((Array)collection, out lengths); - - Size? IElementOperations.GetSizeOrDbNull(SizeContext context, object collection, Indices indices, ref object? writeState) - => _elemConverter.GetSizeOrDbNull(context.Format, context.BufferRequirement, GetValue(collection, indices), ref writeState); - - ValueTask IElementOperations.Read(bool async, PgReader reader, bool isDbNull, object collection, Indices indices, CancellationToken cancellationToken) - { - if (!isDbNull && async && _elemConverter is PgStreamingConverter streamingConverter) - return ReadAsync(streamingConverter, reader, collection, indices, cancellationToken); - - SetValue(collection, indices, isDbNull ? default : _elemConverter.Read(reader)); - return new(); - } + Debug.Assert(indices.Rank > 0); + switch (indices.Rank) + { + case 1: + // Justification: exact type Unsafe.As used to avoid the cast overhead for per element calls. + Debug.Assert(collection is TElement?[]); + Unsafe.As(collection)[indices.One] = value; + break; + case 2: + // Justification: exact type Unsafe.As used to avoid the cast overhead for per element calls. + Debug.Assert(collection is TElement?[,]); + Unsafe.As(collection)[indices.Many![0], indices.Many![1]] = value; + break; + default: + // Justification: exact type Unsafe.As used to avoid the cast overhead for per element calls. + Debug.Assert(collection is Array); + Unsafe.As(collection).SetValue(value, indices.Many!); + break; + } + } - unsafe ValueTask ReadAsync(PgStreamingConverter converter, PgReader reader, object collection, Indices indices, CancellationToken cancellationToken) - { - if (converter.ReadAsyncAsTask(reader, cancellationToken, out var result) is { } task) - return PgArrayConverter.AwaitTask(task, new(this, &SetResult), collection, indices); + object IElementOperations.CreateCollection(ReadOnlySpan lengths) + => lengths.Length switch + { + 0 => Array.Empty(), + 1 => new TElement?[lengths[0]], + 2 => new TElement?[lengths[0], lengths[1]], + 3 => new TElement?[lengths[0], lengths[1], lengths[2]], + 4 => new TElement?[lengths[0], lengths[1], lengths[2], lengths[3]], + 5 => new TElement?[lengths[0], lengths[1], lengths[2], lengths[3], lengths[4]], + 6 => new TElement?[lengths[0], lengths[1], lengths[2], lengths[3], lengths[4], lengths[5]], + 7 => new TElement?[lengths[0], lengths[1], lengths[2], lengths[3], lengths[4], lengths[5], lengths[6]], + 8 => new TElement?[lengths[0], lengths[1], lengths[2], lengths[3], lengths[4], lengths[5], lengths[6], lengths[7]], + _ => throw new InvalidOperationException("Postgres arrays can have at most 8 dimensions.") + }; + + int IElementOperations.GetCollectionCount(object collection, out int[]? lengths) + => ArrayConverterCore.GetArrayLengths((Array)collection, out lengths); + + Size? IElementOperations.GetSizeOrDbNull(SizeContext context, object collection, IterationIndices indices, ref object? writeState) + => _elemConverter.GetSizeOrDbNull(context.Format, context.BufferRequirement, GetValue(collection, indices), ref writeState); + + ValueTask IElementOperations.Read(bool async, PgReader reader, bool isDbNull, object collection, IterationIndices indices, CancellationToken cancellationToken) + { + if (!isDbNull && async && _elemConverter is PgStreamingConverter streamingConverter) + return ReadAsync(streamingConverter, reader, collection, indices, cancellationToken); - SetValue(collection, indices, result); - return new(); + SetValue(collection, indices, isDbNull ? default : _elemConverter.Read(reader)); + return new(); + } - static void SetResult(Task task, object collection, Indices indices) + unsafe ValueTask ReadAsync(PgStreamingConverter converter, PgReader reader, object collection, IterationIndices indices, CancellationToken cancellationToken) { - // Justification: exact type Unsafe.As used to reduce generic duplication cost. - Debug.Assert(task is Task); - // Using .Result on ValueTask is equivalent to GetAwaiter().GetResult(), this removes TaskAwaiter rooting. - SetValue(collection, indices, new ValueTask(task: Unsafe.As>(task)).Result); - } - } + if (converter.ReadAsyncAsTask(reader, cancellationToken, out var result) is { } task) + return ArrayConverterCore.AwaitTask(task, new(this, &SetResult), collection, indices); - ValueTask IElementOperations.Write(bool async, PgWriter writer, object collection, Indices indices, CancellationToken cancellationToken) - { - if (async) - return _elemConverter.WriteAsync(writer, GetValue(collection, indices)!, cancellationToken); + SetValue(collection, indices, result); + return new(); - _elemConverter.Write(writer, GetValue(collection, indices)!); - return new(); - } -} + static void SetResult(Task task, object collection, IterationIndices indices) + { + // Justification: exact type Unsafe.As used to reduce generic duplication cost. + Debug.Assert(task is Task); + // Using .Result on ValueTask is equivalent to GetAwaiter().GetResult(), this removes TaskAwaiter rooting. + SetValue(collection, indices, new ValueTask(task: Unsafe.As>(task)).Result); + } + } -sealed class ListBasedArrayConverter(PgConverterResolution elemResolution, int pgLowerBound = 1) - : ArrayConverter(expectedDimensions: 1, elemResolution, pgLowerBound), IElementOperations - where T : class -{ - readonly PgConverter _elemConverter = elemResolution.GetConverter(); + ValueTask IElementOperations.Write(bool async, PgWriter writer, object collection, IterationIndices indices, CancellationToken cancellationToken) + { + if (async) + return _elemConverter.WriteAsync(writer, GetValue(collection, indices)!, cancellationToken); - [MethodImpl(MethodImplOptions.AggressiveInlining)] - static TElement? GetValue(object collection, int index) - { - // Justification: avoid the cast overhead for per element calls. - Debug.Assert(collection is IList); - return Unsafe.As>(collection)[index]; + _elemConverter.Write(writer, GetValue(collection, indices)!); + return new(); + } } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - static void SetValue(object collection, int index, TElement? value) + sealed class ListBased(PgConverterResolution elemResolution, int pgLowerBound = 1) + : ArrayConverter(expectedDimensions: 1, elemResolution, pgLowerBound), IElementOperations { - // Justification: avoid the cast overhead for per element calls. - Debug.Assert(collection is IList); - var list = Unsafe.As>(collection); - list.Insert(index, value); - } + readonly PgConverter _elemConverter = elemResolution.GetConverter(); - object IElementOperations.CreateCollection(ReadOnlySpan lengths) - => new List(lengths.Length is 0 ? 0 : lengths[0]); + [MethodImpl(MethodImplOptions.AggressiveInlining)] + static TElement? GetValue(object collection, int index) + { + // Justification: avoid the cast overhead for per element calls. + Debug.Assert(collection is IList); + return Unsafe.As>(collection)[index]; + } - int IElementOperations.GetCollectionCount(object collection, out int[]? lengths) - { - lengths = null; - return ((IList)collection).Count; - } + [MethodImpl(MethodImplOptions.AggressiveInlining)] + static void SetValue(object collection, int index, TElement? value) + { + // Justification: avoid the cast overhead for per element calls. + Debug.Assert(collection is IList); + var list = Unsafe.As>(collection); + list.Insert(index, value); + } - Size? IElementOperations.GetSizeOrDbNull(SizeContext context, object collection, Indices indices, ref object? writeState) - => _elemConverter.GetSizeOrDbNull(context.Format, context.BufferRequirement, GetValue(collection, indices.One), ref writeState); + object IElementOperations.CreateCollection(ReadOnlySpan lengths) + => new List(lengths.Length is 0 ? 0 : lengths[0]); - ValueTask IElementOperations.Read(bool async, PgReader reader, bool isDbNull, object collection, Indices indices, CancellationToken cancellationToken) - { - Debug.Assert(indices.Count is 1); - if (!isDbNull && async && _elemConverter is PgStreamingConverter streamingConverter) - return ReadAsync(streamingConverter, reader, collection, indices, cancellationToken); + int IElementOperations.GetCollectionCount(object collection, out int[]? lengths) + { + lengths = null; + return ((IList)collection).Count; + } - SetValue(collection, indices.One, isDbNull ? default : _elemConverter.Read(reader)); - return new(); - } + Size? IElementOperations.GetSizeOrDbNull(SizeContext context, object collection, IterationIndices indices, ref object? writeState) + => _elemConverter.GetSizeOrDbNull(context.Format, context.BufferRequirement, GetValue(collection, indices.One), ref writeState); - unsafe ValueTask ReadAsync(PgStreamingConverter converter, PgReader reader, object collection, Indices indices, CancellationToken cancellationToken) - { - Debug.Assert(indices.Count is 1); - if (converter.ReadAsyncAsTask(reader, cancellationToken, out var result) is { } task) - return PgArrayConverter.AwaitTask(task, new(this, &SetResult), collection, indices); + ValueTask IElementOperations.Read(bool async, PgReader reader, bool isDbNull, object collection, IterationIndices indices, CancellationToken cancellationToken) + { + Debug.Assert(indices.Rank is 1); + if (!isDbNull && async && _elemConverter is PgStreamingConverter streamingConverter) + return ReadAsync(streamingConverter, reader, collection, indices, cancellationToken); - SetValue(collection, indices.One, result); - return new(); + SetValue(collection, indices.One, isDbNull ? default : _elemConverter.Read(reader)); + return new(); + } - static void SetResult(Task task, object collection, Indices indices) + unsafe ValueTask ReadAsync(PgStreamingConverter converter, PgReader reader, object collection, IterationIndices indices, CancellationToken cancellationToken) { - // Justification: exact type Unsafe.As used to reduce generic duplication cost. - Debug.Assert(task is Task); - // Using .Result on ValueTask is equivalent to GetAwaiter().GetResult(), this removes TaskAwaiter rooting. - SetValue(collection, indices.One, new ValueTask(task: Unsafe.As>(task)).Result); + Debug.Assert(indices.Rank is 1); + if (converter.ReadAsyncAsTask(reader, cancellationToken, out var result) is { } task) + return ArrayConverterCore.AwaitTask(task, new(this, &SetResult), collection, indices); + + SetValue(collection, indices.One, result); + return new(); + + static void SetResult(Task task, object collection, IterationIndices indices) + { + // Justification: exact type Unsafe.As used to reduce generic duplication cost. + Debug.Assert(task is Task); + // Using .Result on ValueTask is equivalent to GetAwaiter().GetResult(), this removes TaskAwaiter rooting. + SetValue(collection, indices.One, new ValueTask(task: Unsafe.As>(task)).Result); + } } - } - ValueTask IElementOperations.Write(bool async, PgWriter writer, object collection, Indices indices, CancellationToken cancellationToken) - { - Debug.Assert(indices.Count is 1); - if (async) - return _elemConverter.WriteAsync(writer, GetValue(collection, indices.One)!, cancellationToken); + ValueTask IElementOperations.Write(bool async, PgWriter writer, object collection, IterationIndices indices, CancellationToken cancellationToken) + { + Debug.Assert(indices.Rank is 1); + if (async) + return _elemConverter.WriteAsync(writer, GetValue(collection, indices.One)!, cancellationToken); - _elemConverter.Write(writer, GetValue(collection, indices.One)!); - return new(); + _elemConverter.Write(writer, GetValue(collection, indices.One)!); + return new(); + } } } sealed class ArrayConverterResolver(PgResolverTypeInfo elementTypeInfo, Type effectiveType) : PgComposingConverterResolver(elementTypeInfo.PgTypeId is { } id ? elementTypeInfo.Options.GetArrayTypeId(id) : null, elementTypeInfo) - where T : class + where T : notnull { PgSerializerOptions Options => EffectiveTypeInfo.Options; @@ -608,10 +263,10 @@ sealed class ArrayConverterResolver(PgResolverTypeInfo elementTypeI protected override PgConverter CreateConverter(PgConverterResolution effectiveResolution) { if (typeof(T) == typeof(Array) || typeof(T).IsArray) - return new ArrayBasedArrayConverter(effectiveResolution, effectiveType); + return ArrayConverter.CreateArrayBased(effectiveResolution, effectiveType); if (typeof(T).IsConstructedGenericType && typeof(T).GetGenericTypeDefinition() == typeof(IList<>)) - return new ListBasedArrayConverter(effectiveResolution); + return ArrayConverter.CreateListBased(effectiveResolution); throw new NotSupportedException($"Unknown type T: {typeof(T).FullName}"); } diff --git a/src/Npgsql/Internal/Converters/ArrayConverterCore.cs b/src/Npgsql/Internal/Converters/ArrayConverterCore.cs new file mode 100644 index 0000000000..46aab4a9fe --- /dev/null +++ b/src/Npgsql/Internal/Converters/ArrayConverterCore.cs @@ -0,0 +1,363 @@ +using System; +using System.Buffers; +using System.Diagnostics; +using System.Diagnostics.CodeAnalysis; +using System.Runtime.CompilerServices; +using System.Threading; +using System.Threading.Tasks; +using Npgsql.Internal.Postgres; +using Npgsql.Util; + +namespace Npgsql.Internal.Converters; + +interface IElementOperations +{ + object CreateCollection(ReadOnlySpan lengths); + int GetCollectionCount(object collection, out int[]? lengths); + Size? GetSizeOrDbNull(SizeContext context, object collection, IterationIndices indices, ref object? writeState); + ValueTask Read(bool async, PgReader reader, bool isDbNull, object collection, IterationIndices indices, CancellationToken cancellationToken = default); + ValueTask Write(bool async, PgWriter writer, object collection, IterationIndices indices, CancellationToken cancellationToken = default); +} + +readonly struct ArrayConverterCore( + IElementOperations elemOps, + PgTypeInfo elementTypeInfo, + bool elemTypeDbNullable, + int? expectedDimensions, + BufferRequirements binaryRequirements, + PgTypeId elemTypeId, + int pgLowerBound = 1) +{ + // Exposed for testing + internal const string ReadNonNullableCollectionWithNullsExceptionMessage = + "Cannot read a non-nullable collection of elements because the returned array contains nulls. Call GetFieldValue with a nullable collection type instead."; + + PgTypeInfo ElementTypeInfo { get; } = elementTypeInfo; + bool ElemTypeDbNullable { get; } = elemTypeDbNullable; + + bool IsDbNull(object values, IterationIndices arrayIndices, ref object? writeState) + { + // This call will only skip GetSize if we are dealing with fixed size elements, otherwise we'll repeat sizing costs. + Debug.Assert(binaryRequirements.Write.Kind is SizeKind.Exact); + return elemOps.GetSizeOrDbNull(new(DataFormat.Binary, binaryRequirements.Write), values, arrayIndices, ref writeState) is null; + } + + public Size GetSize(SizeContext context, object values, ref object? writeState) + { + Debug.Assert(context.Format is DataFormat.Binary); + if (writeState is not null) + ThrowHelper.ThrowArgumentException("Unexpected write state, expected null.", nameof(writeState)); + + var metadata = PgArrayMetadata.Create(elemOps.GetCollectionCount(values, out var lengths), lengths); + if (metadata.TotalElements is 0) + { + Debug.Assert(writeState is null); + return metadata.BinaryPreambleByteCount; + } + + var size = Size.Create(metadata.BinaryPreambleByteCount + sizeof(int) * metadata.TotalElements); + var indices = metadata.CreateIndices(); + var anyWriteState = false; + ArrayPool<(Size, object?)>? arrayPool = null; + (Size Size, object? WriteState)[]? elemData = null; + if (binaryRequirements.Write is { Kind: SizeKind.Exact, Value: var elemByteCount }) + { + var nulls = 0; + var lastLength = metadata.LastDimension; + if (ElemTypeDbNullable) + { + do + { + object? elemState = null; + if (IsDbNull(values, indices, ref elemState)) + nulls++; + if (elemState is not null) + ElementTypeInfo.DisposeWriteState(elemState); + } + while (indices.TryAdvance(lastLength, metadata.DimensionLengths)); + } + + size = size.Combine((metadata.TotalElements - nulls) * elemByteCount); + } + else + { + arrayPool = ArrayPool<(Size, object?)>.Shared; + elemData = arrayPool.Rent(metadata.TotalElements); + var lastCount = metadata.LastDimension; + do + { + ref var elemState = ref elemData[indices.IndicesSum].WriteState; + var elemSize = elemOps.GetSizeOrDbNull(context, values, indices, ref elemState); + anyWriteState = anyWriteState || elemState is not null; + elemData[indices.IndicesSum].Size = elemSize ?? -1; + size = size.Combine(elemSize ?? 0); + } + // We can immediately continue if we didn't reach the end of the last dimension. + while (indices.TryAdvance(lastCount, metadata.DimensionLengths)); + } + + writeState = new ArrayConverterWriteState + { + Metadata = metadata, + IterationIndices = indices, + ArrayPool = arrayPool, + Data = elemData!, + AnyWriteState = anyWriteState + }; + return size; + } + + public async ValueTask Read(bool async, PgReader reader, CancellationToken cancellationToken = default) + { + Debug.Assert(reader.Current.Format is DataFormat.Binary); + if (reader.ShouldBuffer(sizeof(int) + sizeof(int) + sizeof(uint))) + await reader.Buffer(async, sizeof(int) + sizeof(int) + sizeof(uint), cancellationToken).ConfigureAwait(false); + + var dimensions = reader.ReadInt32(); + + var flags = (PgArrayMetadata.Flags)reader.ReadInt32(); + _ = reader.ReadUInt32(); // Element OID. + + if (!ElemTypeDbNullable && flags.HasFlag(PgArrayMetadata.Flags.ContainsNulls)) + ThrowHelper.ThrowInvalidCastException(ReadNonNullableCollectionWithNullsExceptionMessage); + + // Make sure we can read length + lower bound N dimension times. + if (reader.ShouldBuffer((sizeof(int) + sizeof(int)) * dimensions)) + await reader.Buffer(async, (sizeof(int) + sizeof(int)) * dimensions, cancellationToken).ConfigureAwait(false); + + Debug.Assert(!reader.ShouldBuffer((sizeof(int) + sizeof(int)) * dimensions)); + + int[]? dimensionLengths = null; + var lastDimension = 0; + scoped Span dimensionLengthsSpan; + switch (dimensions) + { + case 0: + // At 0, if we have expected dimensions create the collection as such, works around https://github.com/npgsql/npgsql/issues/1271. + switch (expectedDimensions) + { + case null or <= 1: + dimensionLengthsSpan = Span.Empty; + break; + case { } value: + dimensionLengthsSpan = stackalloc int[value]; + dimensionLengthsSpan.Clear(); + break; + } + break; + case 1: + lastDimension = reader.ReadInt32(); + _ = reader.ReadInt32(); // Lower bound + dimensionLengthsSpan = lastDimension is 0 ? Span.Empty : new(ref lastDimension); + break; + default: + dimensionLengths = new int[dimensions]; + for (var i = 0; i < dimensions; i++) + { + lastDimension = reader.ReadInt32(); + _ = reader.ReadInt32(); // Lower bound + dimensionLengths[i] = lastDimension; + } + dimensionLengthsSpan = dimensionLengths.AsSpan(); + break; + } + + var collection = elemOps.CreateCollection(dimensionLengthsSpan); + if (dimensions is 0 || lastDimension is 0) + return collection; + + if (expectedDimensions is not null && dimensions != expectedDimensions) + ThrowHelper.ThrowInvalidCastException( + $"Cannot read an array value with {dimensions} dimension{(dimensions == 1 ? "" : "s")} into a " + + $"collection type with {expectedDimensions} dimension{(expectedDimensions == 1 ? "" : "s")}. " + + $"Call GetValue or a version of GetFieldValue with the commas matching the expected amount of dimensions."); + + var indices = IterationIndices.Create(dimensions); + do + { + if (reader.ShouldBuffer(sizeof(int))) + await reader.Buffer(async, sizeof(int), cancellationToken).ConfigureAwait(false); + + var length = reader.ReadInt32(); + if (length is not -1) + { + var scope = await reader.BeginNestedRead(async, length, binaryRequirements.Read, cancellationToken).ConfigureAwait(false); + try + { + await elemOps.Read(async, reader, isDbNull: false, collection, indices, cancellationToken).ConfigureAwait(false); + } + finally + { + if (async) + await scope.DisposeAsync().ConfigureAwait(false); + else + scope.Dispose(); + } + } + else + await elemOps.Read(async, reader, isDbNull: true, collection, indices, cancellationToken).ConfigureAwait(false); + } + while (indices.TryAdvance(lastDimension, dimensionLengths)); + + return collection; + } + + public async ValueTask Write(bool async, PgWriter writer, object values, CancellationToken cancellationToken) + { + Debug.Assert(writer.Current.Format is DataFormat.Binary); + var (metadata, state) = writer.Current.WriteState switch + { + ArrayConverterWriteState writeState => (writeState.Metadata, writeState), + null => (PgArrayMetadata.Create(0, null), null), + _ => throw new InvalidCastException($"Invalid write state, expected {typeof(ArrayConverterWriteState).FullName}.") + }; + + if (writer.ShouldFlush(metadata.BinaryPreambleByteCount)) + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + + writer.WriteInt32(metadata.Dimensions); // Dimensions + writer.WriteInt32(0); // Flags (not really used) + writer.WriteAsOid(elemTypeId); + for (var dim = 0; dim < metadata.Dimensions; dim++) + { + writer.WriteInt32(metadata.DimensionLengths[dim]); + writer.WriteInt32(pgLowerBound); // Lower bound + } + + // We can stop here for empty collections. + if (state is null) + return; + + var elemData = state.Data.Array; + var indices = state.IterationIndices; + indices.Reset(); + var lastCount = metadata.LastDimension; + var offset = state.Data.Offset; + do + { + if (writer.ShouldFlush(sizeof(int))) + await writer.Flush(async, cancellationToken).ConfigureAwait(false); + + var elem = elemData?[offset + indices.IndicesSum]; + object? fixedSizeWriteState = null; + var length = elemData is null + ? ElemTypeDbNullable && IsDbNull(values, indices, ref fixedSizeWriteState) ? -1 : binaryRequirements.Write.Value + : elem.GetValueOrDefault().Size.Value; + + writer.WriteInt32(length); + if (length is not -1) + { + using var _ = await writer.BeginNestedWrite(async, binaryRequirements.Write, + length, fixedSizeWriteState ?? elem?.WriteState, cancellationToken).ConfigureAwait(false); + await elemOps.Write(async, writer, values, indices, cancellationToken).ConfigureAwait(false); + } + + if (fixedSizeWriteState is not null) + ElementTypeInfo.DisposeWriteState(fixedSizeWriteState); + } + while (indices.TryAdvance(lastCount, metadata.DimensionLengths)); + } + + public static int GetArrayLengths(Array array, out int[]? dimensionLengths) + { + var dimensions = array.Rank; + + if (dimensions is 1) + { + dimensionLengths = null; + return array.Length; + } + + dimensionLengths = new int[dimensions]; + for (var i = 0; i < dimensionLengths.Length; i++) + dimensionLengths[i] = array.GetLength(i); + + // If we have a multidim array it may throw an overflow exception for large arrays (LongLength exists for these cases) + // however anything over int.MaxValue wouldn't fit in a parameter anyway so easier to throw here than deal with a long. + return array.Length; + } + + // Using a function pointer here is safe against assembly unloading as the instance reference that the static pointer method lives on is passed along. + // As such the instance cannot be collected by the gc which means the entire assembly is prevented from unloading until we're done. + // The alternatives are: + // 1. Add a virtual method and make AwaitTask call into it (bloating the vtable of all derived types). + // 2. Using a delegate, meaning we add a static field + an alloc per T + metadata, slightly slower dispatch perf so overall strictly worse as well. + [AsyncMethodBuilder(typeof(PoolingAsyncValueTaskMethodBuilder))] + public static async ValueTask AwaitTask(Task task, Continuation continuation, object collection, IterationIndices indices) + { + await task.ConfigureAwait(false); + continuation.Invoke(task, collection, indices); + // Guarantee the type stays loaded until the function pointer call is done. + GC.KeepAlive(continuation.Handle); + } + + // Split out into a struct as unsafe and async don't mix, while we do want a nicely typed function pointer signature to prevent mistakes. + public readonly unsafe struct Continuation + { + public object Handle { get; } + readonly delegate* _continuation; + + /// A reference to the type that houses the static method points to. + /// The continuation + public Continuation(object handle, delegate* continuation) + { + Handle = handle; + _continuation = continuation; + } + + public void Invoke(Task task, object collection, IterationIndices indices) => _continuation(task, collection, indices); + } +} + +sealed class ArrayConverterWriteState : MultiWriteState +{ + public required PgArrayMetadata Metadata { get; init; } + public required IterationIndices IterationIndices { get; init; } +} + +readonly struct PgArrayMetadata +{ + const int MaxDimensions = 8; + + readonly int _totalElements; + readonly int[]? _dimensionLengths; + + PgArrayMetadata(int totalElements, int[]? dimensionLengths) + { + _totalElements = totalElements; + _dimensionLengths = dimensionLengths; + } + + public int TotalElements => _totalElements; + public int LastDimension => _dimensionLengths is null ? _totalElements : _dimensionLengths[^1]; + [UnscopedRef] + public ReadOnlySpan DimensionLengths + => _dimensionLengths is null ? new ReadOnlySpan(in _totalElements) : _dimensionLengths.AsSpan(); + public int Dimensions => _dimensionLengths?.Length ?? (_totalElements is 0 ? 0 : 1); + + public int BinaryPreambleByteCount => GetBinaryPreambleByteCount(TotalElements, Dimensions); + + public IterationIndices CreateIndices() => IterationIndices.Create(Dimensions); + + static int GetBinaryPreambleByteCount(int totalElements, int dimensions) + => sizeof(int) + // Dimensions + sizeof(int) + // Flags + sizeof(uint) + // Element OID + (totalElements is 0 ? 0 : dimensions * (sizeof(int) + sizeof(int))); // Dimensions * (array length and lower bound) + + public static PgArrayMetadata Create(long totalElements, int[]? dimensionLengths) + { + if (totalElements > int.MaxValue) + ThrowHelper.ThrowArgumentException("Postgres arrays cannot have more than int.MaxValue elements.", nameof(totalElements)); + + if (dimensionLengths?.Length is < 0 or > MaxDimensions) + ThrowHelper.ThrowArgumentException($"Postgres arrays can have at most {MaxDimensions} dimensions.", nameof(dimensionLengths)); + + return new((int)totalElements, dimensionLengths); + } + + public enum Flags + { + ContainsNulls = 1 + } +} diff --git a/src/Npgsql/Internal/PgTypeInfo.cs b/src/Npgsql/Internal/PgTypeInfo.cs index 93b90b3a70..aa3640a82e 100644 --- a/src/Npgsql/Internal/PgTypeInfo.cs +++ b/src/Npgsql/Internal/PgTypeInfo.cs @@ -73,13 +73,6 @@ private protected PgTypeInfo(PgSerializerOptions options, Type type, PgConverter public PgTypeId? PgTypeId { get; } - // Having it here so we can easily extend any behavior. - internal void DisposeWriteState(object writeState) - { - if (writeState is IDisposable disposable) - disposable.Dispose(); - } - public PgConverterResolution GetResolution(T? value) { if (this is not PgResolverTypeInfo resolverInfo) @@ -289,6 +282,17 @@ public PgConverterResolution GetDefaultResolution(PgTypeId? expectedPgTypeId) public PgConverterResolver GetConverterResolver() => converterResolver; } +// TODO until https://github.com/npgsql/npgsql/pull/6316 +static class PgTypeInfoExtensions +{ + // Having it here so we can easily extend any behavior. + public static void DisposeWriteState(this PgTypeInfo typeInfo, object writeState) + { + if (writeState is IDisposable disposable) + disposable.Dispose(); + } +} + public readonly struct PgConverterResolution(PgConverter converter, PgTypeId pgTypeId) { public PgConverter Converter { get; } = converter; diff --git a/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.cs index 8db547315f..cd1ab655bc 100644 --- a/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.cs +++ b/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.cs @@ -304,14 +304,14 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) mappings.AddType( DataTypeNames.OidVector, static (options, mapping, _) => mapping.CreateInfo(options, - new ArrayBasedArrayConverter(new(new UInt32Converter(), new PgTypeId(DataTypeNames.Oid)), pgLowerBound: 0)), + ArrayConverter.CreateArrayBased(new(new UInt32Converter(), new PgTypeId(DataTypeNames.Oid)), pgLowerBound: 0)), MatchRequirement.DataTypeName); // Int2vector mappings.AddType( DataTypeNames.Int2Vector, static (options, mapping, _) => mapping.CreateInfo(options, - new ArrayBasedArrayConverter(new(new Int2Converter(), new PgTypeId(DataTypeNames.Int2)), pgLowerBound: 0)), + ArrayConverter.CreateArrayBased(new(new Int2Converter(), new PgTypeId(DataTypeNames.Int2)), pgLowerBound: 0)), MatchRequirement.DataTypeName); // Tid @@ -406,10 +406,10 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) mappings.AddPolymorphicResolverArrayType(DataTypeNames.Varbit, static options => resolution => resolution.Converter switch { BoolBitStringConverter => PgConverterFactory.CreatePolymorphicArrayConverter( - () => new ArrayBasedArrayConverter(resolution, typeof(Array)), - () => new ArrayBasedArrayConverter(new(new NullableConverter(resolution.GetConverter()), resolution.PgTypeId), typeof(Array)), + () => ArrayConverter.CreateArrayBased(resolution, typeof(Array)), + () => ArrayConverter.CreateArrayBased(new(new NullableConverter(resolution.GetConverter()), resolution.PgTypeId), typeof(Array)), options), - BitArrayBitStringConverter => new ArrayBasedArrayConverter(resolution, typeof(Array)), + BitArrayBitStringConverter => ArrayConverter.CreateArrayBased(resolution, typeof(Array)), _ => throw new NotSupportedException() }); mappings.AddArrayType(DataTypeNames.Varbit); @@ -421,10 +421,10 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) mappings.AddPolymorphicResolverArrayType(DataTypeNames.Bit, static options => resolution => resolution.Converter switch { BoolBitStringConverter => PgConverterFactory.CreatePolymorphicArrayConverter( - () => new ArrayBasedArrayConverter(resolution, typeof(Array)), - () => new ArrayBasedArrayConverter(new(new NullableConverter(resolution.GetConverter()), resolution.PgTypeId), typeof(Array)), + () => ArrayConverter.CreateArrayBased(resolution, typeof(Array)), + () => ArrayConverter.CreateArrayBased(new(new NullableConverter(resolution.GetConverter()), resolution.PgTypeId), typeof(Array)), options), - BitArrayBitStringConverter => new ArrayBasedArrayConverter(resolution, typeof(Array)), + BitArrayBitStringConverter => ArrayConverter.CreateArrayBased(resolution, typeof(Array)), _ => throw new NotSupportedException() }); mappings.AddArrayType(DataTypeNames.Bit); diff --git a/src/Npgsql/Internal/TypeInfoMapping.cs b/src/Npgsql/Internal/TypeInfoMapping.cs index 1fc028153f..74d0abf0d0 100644 --- a/src/Npgsql/Internal/TypeInfoMapping.cs +++ b/src/Npgsql/Internal/TypeInfoMapping.cs @@ -705,19 +705,19 @@ static string GetArrayDataTypeName(string dataTypeName) ? DataTypeName.ValidatedName(dataTypeName).ToArrayName().Value : "_" + DataTypeName.FromDisplayName(dataTypeName).UnqualifiedName; - static ArrayBasedArrayConverter CreateArrayBasedConverter(TypeInfoMapping mapping, PgTypeInfo elemInfo) + static ArrayConverter CreateArrayBasedConverter(TypeInfoMapping mapping, PgTypeInfo elemInfo) { if (!elemInfo.IsBoxing) - return new ArrayBasedArrayConverter(elemInfo.GetResolution(), mapping.Type); + return ArrayConverter.CreateArrayBased(elemInfo.GetResolution(), mapping.Type); ThrowBoxingNotSupported(resolver: false); return default; } - static ListBasedArrayConverter, TElement> CreateListBasedConverter(TypeInfoMapping mapping, PgTypeInfo elemInfo) + static ArrayConverter> CreateListBasedConverter(TypeInfoMapping mapping, PgTypeInfo elemInfo) { if (!elemInfo.IsBoxing) - return new ListBasedArrayConverter, TElement>(elemInfo.GetResolution()); + return ArrayConverter>.CreateListBased(elemInfo.GetResolution()); ThrowBoxingNotSupported(resolver: false); return default; diff --git a/src/Npgsql/Util/IterationIndices.cs b/src/Npgsql/Util/IterationIndices.cs new file mode 100644 index 0000000000..8e844b6ca6 --- /dev/null +++ b/src/Npgsql/Util/IterationIndices.cs @@ -0,0 +1,112 @@ +using System; +using System.Diagnostics; +using System.Diagnostics.CodeAnalysis; +using System.Runtime.CompilerServices; + +namespace Npgsql.Util; + +// Many array cannot be pooled until https://github.com/dotnet/runtime/issues/125325 is addressed. +struct IterationIndices +{ + long _indicesSum; + + public long IndicesSum => _indicesSum; + + public int Rank { get; private init; } + public int One => (int)_indicesSum; + public int[]? Many { get; private init; } + public int Last => Many is null ? (int)_indicesSum : Many[^1]; + + // Also accept the count for the most common case where we have a single dimension array to avoid the bounds check. + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public bool TryAdvance(int lastCount, ReadOnlySpan counts) + { + Debug.Assert(counts.IsEmpty || lastCount == counts[^1]); + + ref var lastIndex = ref Many is null ? ref GetIntRefFromLong(ref _indicesSum) : ref Many![^1]; + + if (lastIndex < lastCount - 1) + { + lastIndex++; + return true; + } + + return Many is not null && IncrementOrCarry(counts); + } + + [MethodImpl(MethodImplOptions.NoInlining)] + bool IncrementOrCarry(ReadOnlySpan counts) + { + Debug.Assert(counts.Length > 1); + Debug.Assert(Rank > 1); + + // Find the first dimension from the end that isn't at or past its length, increment it and bring all previous dimensions to zero. + for (var dim = Rank - 1; dim >= 0; dim--) + { + if (this[dim] >= counts[dim] - 1) + continue; + + Many.AsSpan().Slice(dim + 1).Clear(); + this[dim]++; + _indicesSum++; + return true; + } + + // We're done if we can't find any dimension that isn't at its length. + return false; + } + + public ref int this[int index] + { + [UnscopedRef] + get + { + switch (Rank) + { + case 0: + ThrowHelper.ThrowIndexOutOfRangeException("Cannot index into a 0-dimensional array."); + return ref Unsafe.NullRef(); + case 1: + Debug.Assert(index is 0); + Debug.Assert(Many is null); + return ref GetIntRefFromLong(ref _indicesSum); + default: + return ref Many![index]; + } + } + } + + public void Reset() + { + if (Many is null) + { + _indicesSum = 0; + return; + } + + Array.Clear(Many); + } + + public static IterationIndices Create(int dimensions) + { + switch (dimensions) + { + case 0: + ThrowHelper.ThrowArgumentOutOfRangeException(nameof(dimensions), "Cannot create a 0-dimensional array."); + return default; + case 1: + return new() { Rank = dimensions }; + default: + return new() + { + Rank = dimensions, + Many = new int[dimensions], + }; + } + } + + static ref int GetIntRefFromLong(ref long value) + => ref BitConverter.IsLittleEndian + ? ref Unsafe.As(ref value) + : ref Unsafe.Add(ref Unsafe.As(ref value), 1); // Take high 32 bits. +} diff --git a/test/Npgsql.Tests/Types/ArrayTests.cs b/test/Npgsql.Tests/Types/ArrayTests.cs index cf6f27e038..e1cdcd258b 100644 --- a/test/Npgsql.Tests/Types/ArrayTests.cs +++ b/test/Npgsql.Tests/Types/ArrayTests.cs @@ -65,7 +65,7 @@ public async Task Throws_too_many_dimensions() cmd.Parameters.AddWithValue("p", new int[1, 1, 1, 1, 1, 1, 1, 1, 1]); // 9 dimensions Assert.That( () => cmd.ExecuteScalarAsync(), - Throws.Exception.TypeOf().With.Message.EqualTo("Postgres arrays can have at most 8 dimensions. (Parameter 'values')")); + Throws.Exception.TypeOf().With.Message.EqualTo("Postgres arrays can have at most 8 dimensions. (Parameter 'dimensionLengths')")); } [Test, Description("Checks that PG arrays containing nulls are returned as set via ValueTypeArrayMode.")] @@ -311,7 +311,7 @@ public async Task Read_null_as_non_nullable_array_throws() Assert.That( () => reader.GetFieldValue(0), Throws.Exception.TypeOf() - .With.Message.EqualTo(PgArrayConverter.ReadNonNullableCollectionWithNullsExceptionMessage)); + .With.Message.EqualTo(ArrayConverterCore.ReadNonNullableCollectionWithNullsExceptionMessage)); } @@ -330,7 +330,7 @@ public async Task Read_null_as_non_nullable_list_throws() Assert.That( () => reader.GetFieldValue>(0), Throws.Exception.TypeOf() - .With.Message.EqualTo(PgArrayConverter.ReadNonNullableCollectionWithNullsExceptionMessage)); + .With.Message.EqualTo(ArrayConverterCore.ReadNonNullableCollectionWithNullsExceptionMessage)); } [Test, Description("Roundtrips a large, one-dimensional array of ints that will be chunked")] @@ -491,6 +491,16 @@ public async Task Read_two_empty_arrays() Assert.That(reader.GetFieldValue>(1), Is.Not.SameAs(reader.GetFieldValue>(0))); } + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/1271")] + public async Task Generics_read_empty_multidim_array() + { + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("select ARRAY[[], []]::integer[]", conn); + await using var reader = await cmd.ExecuteReaderAsync(); + await reader.ReadAsync(); + Assert.That(reader.GetFieldValue(0).Length, Is.Zero); + } + [Test] public async Task Arrays_not_supported_by_default_on_NpgsqlSlimSourceBuilder() { From ec3666860a44f010ccbbda4b2b9b3475b0803d84 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Wed, 11 Mar 2026 18:25:30 +0200 Subject: [PATCH 710/761] Update dependencies (#6482) --- Directory.Packages.props | 16 ++++++++-------- global.json | 2 +- src/Npgsql/Npgsql.csproj | 1 - test/Npgsql.Tests/ReadBufferTests.cs | 1 - 4 files changed, 9 insertions(+), 11 deletions(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index d360d56c2b..23355fd6d8 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -1,11 +1,11 @@ - 10.0.3 - 10.0.3 + 10.0.4 + 10.0.4 - 10.0.3 - 10.0.3 + 10.0.4 + 10.0.4 @@ -21,14 +21,14 @@ - + - - + + - + diff --git a/global.json b/global.json index 6a288505a1..8e7ec085a0 100644 --- a/global.json +++ b/global.json @@ -1,6 +1,6 @@ { "sdk": { - "version": "10.0.100", + "version": "10.0.200", "rollForward": "latestMajor", "allowPrerelease": false } diff --git a/src/Npgsql/Npgsql.csproj b/src/Npgsql/Npgsql.csproj index a4e47f12cf..d06e91fcc8 100644 --- a/src/Npgsql/Npgsql.csproj +++ b/src/Npgsql/Npgsql.csproj @@ -33,7 +33,6 @@ - True True diff --git a/test/Npgsql.Tests/ReadBufferTests.cs b/test/Npgsql.Tests/ReadBufferTests.cs index 3169e5366d..b547787368 100644 --- a/test/Npgsql.Tests/ReadBufferTests.cs +++ b/test/Npgsql.Tests/ReadBufferTests.cs @@ -79,7 +79,6 @@ public async Task ReadNullTerminatedString_with_io() .WriteByte(0) .Write(NpgsqlWriteBuffer.UTF8Encoding.GetBytes(new string("bar"))) .WriteByte(0); - Assert.That(task.IsCompleted); Assert.That(await task, Is.EqualTo("Chunked string")); Assert.That(ReadBuffer.ReadNullTerminatedString(), Is.EqualTo("bar")); } From aa86a1d6fa45fa7b6ff2ba2e4c53930a7c94fe95 Mon Sep 17 00:00:00 2001 From: Brian Pursley Date: Wed, 11 Mar 2026 14:28:36 -0400 Subject: [PATCH 711/761] Fix range test assertion to check for NpgsqlRange.Empty (#6485) --- test/Npgsql.Tests/Types/RangeTests.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/Npgsql.Tests/Types/RangeTests.cs b/test/Npgsql.Tests/Types/RangeTests.cs index 23974f5583..d7bdea0132 100644 --- a/test/Npgsql.Tests/Types/RangeTests.cs +++ b/test/Npgsql.Tests/Types/RangeTests.cs @@ -387,7 +387,7 @@ public void TypeConverter() var result = converter.ConvertFromString("empty"); // Assert - Assert.That(result, Is.Empty); + Assert.That(result, Is.EqualTo(NpgsqlRange.Empty)); } #endregion From 0421d2ff0aaf1dfb8a8b47d872782f1933fbe4da Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Thu, 12 Mar 2026 03:26:13 +0100 Subject: [PATCH 712/761] Fix benchmarks (#6489) --- .editorconfig | 4 ++ src/Npgsql/NpgsqlDataSourceBuilder.cs | 4 ++ src/Npgsql/NpgsqlSlimDataSourceBuilder.cs | 8 ++-- test/Directory.Build.props | 3 +- .../Npgsql.Benchmarks.csproj | 1 - test/Npgsql.Benchmarks/ResolveHandler.cs | 10 ++--- test/Npgsql.Benchmarks/TypeHandlers/Text.cs | 3 +- .../TypeHandlers/TypeHandlerBenchmarks.cs | 42 ++++++++++++------- 8 files changed, 49 insertions(+), 26 deletions(-) diff --git a/.editorconfig b/.editorconfig index a3a2e648f8..e11ed480bf 100644 --- a/.editorconfig +++ b/.editorconfig @@ -5,6 +5,10 @@ root = true [*] end_of_line = LF +[*.{csproj,props,targets}] +indent_style = space +indent_size = 2 + ; 4-column space indentation [*.cs] indent_style = space diff --git a/src/Npgsql/NpgsqlDataSourceBuilder.cs b/src/Npgsql/NpgsqlDataSourceBuilder.cs index 156885d04e..2148aec035 100644 --- a/src/Npgsql/NpgsqlDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlDataSourceBuilder.cs @@ -601,6 +601,10 @@ public NpgsqlDataSource Build() public NpgsqlMultiHostDataSource BuildMultiHost() => _internalBuilder.BuildMultiHost(); + // Used in testing. + internal (NpgsqlConnectionStringBuilder, NpgsqlDataSourceConfiguration) PrepareConfiguration() + => _internalBuilder.PrepareConfiguration(); + INpgsqlTypeMapper INpgsqlTypeMapper.ConfigureJsonOptions(JsonSerializerOptions serializerOptions) => ConfigureJsonOptions(serializerOptions); diff --git a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs index ebe7fd9163..a9f7a4717e 100644 --- a/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs +++ b/src/Npgsql/NpgsqlSlimDataSourceBuilder.cs @@ -789,6 +789,7 @@ public NpgsqlSlimDataSourceBuilder UsePhysicalConnectionInitializer( /// public NpgsqlDataSource Build() { + ConnectionStringBuilder.PostProcessAndValidate(); var (connectionStringBuilder, config) = PrepareConfiguration(); if (ConnectionStringBuilder.Host!.Contains(',')) @@ -808,6 +809,7 @@ public NpgsqlDataSource Build() /// public NpgsqlMultiHostDataSource BuildMultiHost() { + ConnectionStringBuilder.PostProcessAndValidate(); var (connectionStringBuilder, config) = PrepareConfiguration(); ValidateMultiHost(); @@ -815,9 +817,9 @@ public NpgsqlMultiHostDataSource BuildMultiHost() return new(connectionStringBuilder, config); } - (NpgsqlConnectionStringBuilder, NpgsqlDataSourceConfiguration) PrepareConfiguration() + // Used in testing. + internal (NpgsqlConnectionStringBuilder, NpgsqlDataSourceConfiguration) PrepareConfiguration() { - ConnectionStringBuilder.PostProcessAndValidate(); var connectionStringBuilder = ConnectionStringBuilder.Clone(); var sslClientAuthenticationOptionsCallback = _sslClientAuthenticationOptionsCallback; @@ -855,7 +857,7 @@ public NpgsqlMultiHostDataSource BuildMultiHost() } if ((_passwordProvider is not null || _periodicPasswordProvider is not null) && - (ConnectionStringBuilder.Password is not null || ConnectionStringBuilder.Passfile is not null)) + (connectionStringBuilder.Password is not null || connectionStringBuilder.Passfile is not null)) { throw new NotSupportedException(NpgsqlStrings.CannotSetBothPasswordProviderAndPassword); } diff --git a/test/Directory.Build.props b/test/Directory.Build.props index 6af6edc496..9625beb334 100644 --- a/test/Directory.Build.props +++ b/test/Directory.Build.props @@ -6,7 +6,8 @@ false - $(NoWarn);CA2252 + + $(NoWarn);CA2252;NPG9001 diff --git a/test/Npgsql.Benchmarks/Npgsql.Benchmarks.csproj b/test/Npgsql.Benchmarks/Npgsql.Benchmarks.csproj index 013bfb8a9d..55f3104673 100644 --- a/test/Npgsql.Benchmarks/Npgsql.Benchmarks.csproj +++ b/test/Npgsql.Benchmarks/Npgsql.Benchmarks.csproj @@ -4,7 +4,6 @@ portable Npgsql.Benchmarks Exe - $(NoWarn);NPG9001 NU1901;NU1902;NU1903;NU1904 diff --git a/test/Npgsql.Benchmarks/ResolveHandler.cs b/test/Npgsql.Benchmarks/ResolveHandler.cs index ead3a547ed..b36d9b51e4 100644 --- a/test/Npgsql.Benchmarks/ResolveHandler.cs +++ b/test/Npgsql.Benchmarks/ResolveHandler.cs @@ -7,7 +7,6 @@ namespace Npgsql.Benchmarks; [MemoryDiagnoser] public class ResolveHandler { - NpgsqlDataSource? _dataSource; PgSerializerOptions _serializerOptions = null!; [Params(0, 1, 2)] @@ -21,12 +20,11 @@ public void Setup() dataSourceBuilder.UseNodaTime(); if (NumPlugins > 1) dataSourceBuilder.UseNetTopologySuite(); - _dataSource = dataSourceBuilder.Build(); - _serializerOptions = _dataSource.CurrentReloadableState.SerializerOptions; - } - [GlobalCleanup] - public void Cleanup() => _dataSource?.Dispose(); + // Alternatively we must build a data source and get it bootstrapped against a real database. + (_, var config) = dataSourceBuilder.PrepareConfiguration(); + _serializerOptions = new PgSerializerOptions(PostgresMinimalDatabaseInfo.DefaultTypeCatalog, config.ResolverChain); + } [Benchmark] public PgTypeInfo? ResolveDefault() diff --git a/test/Npgsql.Benchmarks/TypeHandlers/Text.cs b/test/Npgsql.Benchmarks/TypeHandlers/Text.cs index 6216cdc5de..425b836838 100644 --- a/test/Npgsql.Benchmarks/TypeHandlers/Text.cs +++ b/test/Npgsql.Benchmarks/TypeHandlers/Text.cs @@ -1,6 +1,7 @@ using BenchmarkDotNet.Attributes; using System.Collections.Generic; using System.Text; +using Npgsql.Internal; using Npgsql.Internal.Converters; namespace Npgsql.Benchmarks.TypeHandlers; @@ -10,7 +11,7 @@ public class Text() : TypeHandlerBenchmarks(new StringTextConverter(Enco { protected override IEnumerable ValuesOverride() { - for (var i = 1; i <= 10000; i *= 10) + for (var i = Encoding.UTF8.GetByteCount("x"); i <= NpgsqlWriteBuffer.DefaultSize; i *= 4) yield return new string('x', i); } } diff --git a/test/Npgsql.Benchmarks/TypeHandlers/TypeHandlerBenchmarks.cs b/test/Npgsql.Benchmarks/TypeHandlers/TypeHandlerBenchmarks.cs index 9bc09dac99..d37d7bcc43 100644 --- a/test/Npgsql.Benchmarks/TypeHandlers/TypeHandlerBenchmarks.cs +++ b/test/Npgsql.Benchmarks/TypeHandlers/TypeHandlerBenchmarks.cs @@ -37,7 +37,7 @@ public override void SetLength(long value) { } public override void Write(byte[] buffer, int offset, int count) { } } - readonly PgConverter _converter; + readonly PgConverter _converter; readonly PgReader _reader; readonly PgWriter _writer; readonly NpgsqlWriteBuffer _writeBuffer; @@ -50,12 +50,11 @@ public override void Write(byte[] buffer, int offset, int count) { } protected TypeHandlerBenchmarks(PgConverter handler) { var stream = new EndlessStream(); - _converter = handler ?? throw new ArgumentNullException(nameof(handler)); - _readBuffer = new NpgsqlReadBuffer(null, stream, null, NpgsqlReadBuffer.MinimumSize, NpgsqlWriteBuffer.UTF8Encoding, NpgsqlWriteBuffer.RelaxedUTF8Encoding); - _writeBuffer = new NpgsqlWriteBuffer(null, stream, null, NpgsqlWriteBuffer.MinimumSize, NpgsqlWriteBuffer.UTF8Encoding); + _converter = (PgConverter)handler ?? throw new ArgumentNullException(nameof(handler)); + _readBuffer = new NpgsqlReadBuffer(null, stream, null, NpgsqlReadBuffer.DefaultSize, NpgsqlWriteBuffer.UTF8Encoding, NpgsqlWriteBuffer.RelaxedUTF8Encoding); + _writeBuffer = new NpgsqlWriteBuffer(null, stream, null, NpgsqlWriteBuffer.DefaultSize, NpgsqlWriteBuffer.UTF8Encoding) { MessageLengthValidation = false }; _reader = new PgReader(_readBuffer); - _writer = new PgWriter(new NpgsqlBufferWriter(_writeBuffer)); - _writer.Init(new PostgresMinimalDatabaseInfo()); + _writer = _writeBuffer.GetWriter(new PostgresMinimalDatabaseInfo(), FlushMode.Blocking); _converter.CanConvert(DataFormat.Binary, out _binaryRequirements); } @@ -69,26 +68,41 @@ public T Value get => _value; set { + // Workaround for https://github.com/dotnet/BenchmarkDotNet/issues/3049 + if (default(T) is null && value is null) + return; + + if (_reader.Initialized) + { + // Prevent Commit from calling Skip, which would cause us to try and use the null connector. + _readBuffer.ReadPosition += _reader.CurrentRemaining; + _reader.Commit(); + } + _value = value; object state = null; - var size = _elementSize = _converter.GetSizeAsObject(new(DataFormat.Binary, _binaryRequirements.Write), value, ref state); + var size = _elementSize = _converter.GetSizeOrDbNullAsObject(DataFormat.Binary, _binaryRequirements.Write, value, ref state)!.Value; var current = new ValueMetadata { Format = DataFormat.Binary, BufferRequirement = _binaryRequirements.Write, Size = size, WriteState = state }; + _writer.BeginWrite(async: false, current, CancellationToken.None).GetAwaiter().GetResult(); _converter.WriteAsObject(_writer, value); - Buffer.BlockCopy(_writeBuffer.Buffer, 0, _readBuffer.Buffer, 0, size.Value); - _writer.Commit(size.Value); - _readBuffer.FilledBytes = size.Value; + + Buffer.BlockCopy(_writeBuffer.Buffer, 0, _readBuffer.Buffer, 0, _writeBuffer.WritePosition); _writeBuffer.WritePosition = 0; + _readBuffer.ReadPosition = 0; + _readBuffer.FilledBytes = _writeBuffer.WritePosition; + + _reader.Init(size.Value, DataFormat.Binary); } } [Benchmark] public T Read() { - _readBuffer.ReadPosition = sizeof(int); + _readBuffer.ReadPosition = 0; _reader.StartRead(_binaryRequirements.Read); - var value = ((PgConverter)_converter).Read(_reader); + var value = _converter.Read(_reader); _reader.EndRead(); return value; } @@ -96,9 +110,9 @@ public T Read() [Benchmark] public void Write() { - _writeBuffer.WritePosition = 0; + _writer.RefreshBuffer(); var current = new ValueMetadata { Format = DataFormat.Binary, BufferRequirement = _binaryRequirements.Write, Size = _elementSize, WriteState = null }; _writer.BeginWrite(async: false, current, CancellationToken.None).GetAwaiter().GetResult(); - ((PgConverter)_converter).Write(_writer, _value); + _converter.Write(_writer, _value); } } From a91ad95b8a67440b8d83cce2e7153b55483420f9 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Thu, 12 Mar 2026 03:28:42 +0100 Subject: [PATCH 713/761] Address feedback on fix benchmarks PR --- test/Npgsql.Benchmarks/TypeHandlers/TypeHandlerBenchmarks.cs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/Npgsql.Benchmarks/TypeHandlers/TypeHandlerBenchmarks.cs b/test/Npgsql.Benchmarks/TypeHandlers/TypeHandlerBenchmarks.cs index d37d7bcc43..e49e3b25ad 100644 --- a/test/Npgsql.Benchmarks/TypeHandlers/TypeHandlerBenchmarks.cs +++ b/test/Npgsql.Benchmarks/TypeHandlers/TypeHandlerBenchmarks.cs @@ -89,9 +89,9 @@ public T Value _writer.Commit(size.Value); Buffer.BlockCopy(_writeBuffer.Buffer, 0, _readBuffer.Buffer, 0, _writeBuffer.WritePosition); - _writeBuffer.WritePosition = 0; - _readBuffer.ReadPosition = 0; _readBuffer.FilledBytes = _writeBuffer.WritePosition; + _readBuffer.ReadPosition = 0; + _writeBuffer.WritePosition = 0; _reader.Init(size.Value, DataFormat.Binary); } From 013e7717ab8e37228d3f06dd312221e25057572b Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Thu, 12 Mar 2026 06:41:50 +0100 Subject: [PATCH 714/761] Fix delayed converter resolution for nullables (#6453) --- .../Internal/Converters/ArrayConverter.cs | 71 +++++++++---------- .../Internal/Converters/NullableConverter.cs | 6 +- test/Npgsql.Tests/Types/DateTimeTests.cs | 12 ++++ 3 files changed, 48 insertions(+), 41 deletions(-) diff --git a/src/Npgsql/Internal/Converters/ArrayConverter.cs b/src/Npgsql/Internal/Converters/ArrayConverter.cs index cca67a870a..6a9b7b0947 100644 --- a/src/Npgsql/Internal/Converters/ArrayConverter.cs +++ b/src/Npgsql/Internal/Converters/ArrayConverter.cs @@ -274,45 +274,40 @@ protected override PgConverter CreateConverter(PgConverterResolution effectiv protected override PgConverterResolution? GetEffectiveResolution(T? values, PgTypeId? expectedEffectivePgTypeId) { PgConverterResolution? resolution = null; - if (values is null) + switch (values) { - resolution = EffectiveTypeInfo.GetDefaultResolution(expectedEffectivePgTypeId); - } - else - { - switch (values) - { - case TElement[] array: - foreach (var value in array) - { - var result = EffectiveTypeInfo.GetResolution(value, resolution?.PgTypeId ?? expectedEffectivePgTypeId); - resolution ??= result; - } - break; - case List list: - foreach (var value in list) - { - var result = EffectiveTypeInfo.GetResolution(value, resolution?.PgTypeId ?? expectedEffectivePgTypeId); - resolution ??= result; - } - break; - case IList list: - foreach (var value in list) - { - var result = EffectiveTypeInfo.GetResolution(value, resolution?.PgTypeId ?? expectedEffectivePgTypeId); - resolution ??= result; - } - break; - case Array array: - foreach (var value in array) - { - var result = EffectiveTypeInfo.GetResolutionAsObject(value, resolution?.PgTypeId ?? expectedEffectivePgTypeId); - resolution ??= result; - } - break; - default: - throw new NotSupportedException(); - } + case TElement[] array: + foreach (var value in array) + { + var result = EffectiveTypeInfo.GetResolution(value, resolution?.PgTypeId ?? expectedEffectivePgTypeId); + resolution ??= result; + } + break; + case List list: + foreach (var value in list) + { + var result = EffectiveTypeInfo.GetResolution(value, resolution?.PgTypeId ?? expectedEffectivePgTypeId); + resolution ??= result; + } + break; + case IList list: + foreach (var value in list) + { + var result = EffectiveTypeInfo.GetResolution(value, resolution?.PgTypeId ?? expectedEffectivePgTypeId); + resolution ??= result; + } + break; + case Array array: + foreach (var value in array) + { + var result = EffectiveTypeInfo.GetResolutionAsObject(value, resolution?.PgTypeId ?? expectedEffectivePgTypeId); + resolution ??= result; + } + break; + case null: + break; + default: + throw new NotSupportedException(); } return resolution; diff --git a/src/Npgsql/Internal/Converters/NullableConverter.cs b/src/Npgsql/Internal/Converters/NullableConverter.cs index 57a12e005f..b4d5689da7 100644 --- a/src/Npgsql/Internal/Converters/NullableConverter.cs +++ b/src/Npgsql/Internal/Converters/NullableConverter.cs @@ -50,7 +50,7 @@ sealed class NullableConverterResolver(PgResolverTypeInfo effectiveTypeInfo) => new NullableConverter(effectiveResolution.GetConverter()); protected override PgConverterResolution? GetEffectiveResolution(T? value, PgTypeId? expectedEffectivePgTypeId) - => value is null - ? EffectiveTypeInfo.GetDefaultResolution(expectedEffectivePgTypeId) - : EffectiveTypeInfo.GetResolution(value.GetValueOrDefault(), expectedEffectivePgTypeId); + => value is { } inner + ? EffectiveTypeInfo.GetResolution(inner, expectedEffectivePgTypeId) + : null; } diff --git a/test/Npgsql.Tests/Types/DateTimeTests.cs b/test/Npgsql.Tests/Types/DateTimeTests.cs index c698514e10..397ef6f069 100644 --- a/test/Npgsql.Tests/Types/DateTimeTests.cs +++ b/test/Npgsql.Tests/Types/DateTimeTests.cs @@ -465,6 +465,18 @@ await AssertType(datasource, @"{""1998-04-12 15:26:38+02"",NULL}", "timestamp with time zone[]"); + // Make sure delayed converter resolution works when null precedes a non-null value. + // We expect the resolution of null values to not lock in the default type timestamp. + // This would cause the subsequent non-null value to fail to convert, as it requires timestamptz. + await AssertType(datasource, + new DateTime?[] + { + null, + new DateTime(1998, 4, 12, 13, 26, 38, DateTimeKind.Utc) + }, + @"{NULL,""1998-04-12 15:26:38+02""}", + "timestamp with time zone[]"); + await AssertType(datasource, new DateTime?[] { From 977e835961dd6964a7d967004e3cf9f8ce5f7fc9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 12 Mar 2026 07:50:07 +0200 Subject: [PATCH 715/761] Bump Scriban.Signed from 6.5.5 to 6.5.6 (#6487) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 23355fd6d8..da809099f4 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -25,7 +25,7 @@ - + From b18dd1773909c929128b825ce5ff4a21d1a85cec Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20Harrtell?= Date: Thu, 12 Mar 2026 08:38:39 +0100 Subject: [PATCH 716/761] Respect handleOrdinates also when writing geometry (#6380) --- ...NetTopologySuiteTypeInfoResolverFactory.cs | 7 +- .../NetTopologySuiteTests.cs | 79 +++++++++++++++++++ 2 files changed, 85 insertions(+), 1 deletion(-) diff --git a/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeInfoResolverFactory.cs b/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeInfoResolverFactory.cs index e533d62207..2012490fb5 100644 --- a/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeInfoResolverFactory.cs +++ b/src/Npgsql.NetTopologySuite/Internal/NetTopologySuiteTypeInfoResolverFactory.cs @@ -20,10 +20,11 @@ sealed class NetTopologySuiteTypeInfoResolverFactory( class Resolver : IPgTypeInfoResolver { readonly PostGisReader _gisReader; + readonly PostGisWriter _gisWriter; protected readonly bool _geographyAsDefault; TypeInfoMappingCollection? _mappings; - protected TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(), _gisReader, new(), _geographyAsDefault); + protected TypeInfoMappingCollection Mappings => _mappings ??= AddMappings(new(), _gisReader, _gisWriter, _geographyAsDefault); public Resolver( CoordinateSequenceFactory? coordinateSequenceFactory, @@ -37,6 +38,10 @@ public Resolver( _geographyAsDefault = geographyAsDefault; _gisReader = new PostGisReader(coordinateSequenceFactory, precisionModel, handleOrdinates); + _gisWriter = new PostGisWriter + { + HandleOrdinates = handleOrdinates + }; } public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) diff --git a/test/Npgsql.PluginTests/NetTopologySuiteTests.cs b/test/Npgsql.PluginTests/NetTopologySuiteTests.cs index 54a1a91026..2f41a6e8cc 100644 --- a/test/Npgsql.PluginTests/NetTopologySuiteTests.cs +++ b/test/Npgsql.PluginTests/NetTopologySuiteTests.cs @@ -150,6 +150,85 @@ public async Task Write(Ordinates ordinates, Geometry geometry, string sqlRepres Assert.That(cmd.ExecuteScalar(), Is.True); } + [Test] + public async Task ReadWithHandleOrdinatesXY_FiltersZCoordinate() + { + // This test verifies that handleOrdinates IS respected during read operations + await using var conn = await OpenConnectionAsync(handleOrdinates: Ordinates.XY); + await using var cmd = conn.CreateCommand(); + cmd.CommandText = "SELECT ST_MakePoint(1, 2, 3)"; // Create a 3D point in SQL + + var result = (Point)cmd.ExecuteScalar()!; + + // The Z coordinate should be filtered out during reading based on handleOrdinates: XY + Assert.That(result.CoordinateSequence.HasZ, Is.False, + "Z coordinate was correctly filtered during read"); + Assert.That(result.X, Is.EqualTo(1d)); + Assert.That(result.Y, Is.EqualTo(2d)); + Assert.That(result.Z, Is.NaN, "Z coordinate should be NaN when filtered out"); + } + + [Test] + public async Task WriteWithHandleOrdinatesXY_ShouldFilterZCoordinate() + { + // This test verifies that when handleOrdinates is set to XY, + // Z coordinates are correctly filtered out during write operations. + var pointWithZ = new Point(1d, 2d, 3d); + + await using var conn = await OpenConnectionAsync(handleOrdinates: Ordinates.XY); + await using var cmd = conn.CreateCommand(); + cmd.Parameters.AddWithValue("p1", pointWithZ); + cmd.CommandText = "SELECT ST_Z(@p1::geometry)"; + + var result = cmd.ExecuteScalar(); + + // Z coordinate should be filtered out and return NULL + Assert.That(result, Is.EqualTo(DBNull.Value), + "Z coordinate should be filtered during write when handleOrdinates: Ordinates.XY"); + } + + [Test] + public async Task WriteWithHandleOrdinatesXY_ShouldFilterMCoordinate() + { + // This test verifies that when handleOrdinates is set to XY, + // M coordinates are correctly filtered out during write operations. + var pointWithM = new Point( + new DotSpatialAffineCoordinateSequence([1d, 2d], [double.NaN], [4d]), + GeometryFactory.Default); + + await using var conn = await OpenConnectionAsync(handleOrdinates: Ordinates.XY); + await using var cmd = conn.CreateCommand(); + cmd.Parameters.AddWithValue("p1", pointWithM); + cmd.CommandText = "SELECT ST_M(@p1::geometry)"; + + var result = cmd.ExecuteScalar(); + + // M coordinate should be filtered out and return NULL + Assert.That(result, Is.EqualTo(DBNull.Value), + "M coordinate should be filtered during write when handleOrdinates: Ordinates.XY"); + } + + [Test] + public async Task WriteWithHandleOrdinatesXYZ_ShouldFilterMCoordinate() + { + // This test verifies that when handleOrdinates is set to XYZ, + // M coordinates are correctly filtered out during write operations. + var pointWithZM = new Point( + new DotSpatialAffineCoordinateSequence([1d, 2d], [3d], [4d]), + GeometryFactory.Default); + + await using var conn = await OpenConnectionAsync(handleOrdinates: Ordinates.XYZ); + await using var cmd = conn.CreateCommand(); + cmd.Parameters.AddWithValue("p1", pointWithZM); + cmd.CommandText = "SELECT ST_M(@p1::geometry)"; + + var result = cmd.ExecuteScalar(); + + // M coordinate should be filtered out and return NULL + Assert.That(result, Is.EqualTo(DBNull.Value), + "M coordinate should be filtered during write when handleOrdinates: Ordinates.XYZ"); + } + [Test] public async Task Array() { From ff82cd7433015abad03993b1a170ab37713f264d Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Fri, 13 Mar 2026 09:03:18 +0200 Subject: [PATCH 717/761] Bump dependencies (#6500) --- Directory.Packages.props | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index da809099f4..ef14823895 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -1,11 +1,11 @@ - 10.0.4 - 10.0.4 + 10.0.5 + 10.0.5 - 10.0.4 - 10.0.4 + 10.0.5 + 10.0.5 @@ -21,11 +21,11 @@ - + - + From 5c46b99d64015bc21e805b16a24d31a94e0670d3 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Fri, 13 Mar 2026 13:52:33 +0200 Subject: [PATCH 718/761] Remove the obsoleted large object API (#6493) Closes #6492 --- src/Npgsql/NpgsqlLargeObjectManager.cs | 242 -------------------- src/Npgsql/NpgsqlLargeObjectStream.cs | 297 ------------------------- src/Npgsql/PublicAPI.Unshipped.txt | 37 +++ test/Npgsql.Tests/LargeObjectTests.cs | 50 ----- 4 files changed, 37 insertions(+), 589 deletions(-) delete mode 100644 src/Npgsql/NpgsqlLargeObjectManager.cs delete mode 100644 src/Npgsql/NpgsqlLargeObjectStream.cs delete mode 100644 test/Npgsql.Tests/LargeObjectTests.cs diff --git a/src/Npgsql/NpgsqlLargeObjectManager.cs b/src/Npgsql/NpgsqlLargeObjectManager.cs deleted file mode 100644 index 2bc6c02751..0000000000 --- a/src/Npgsql/NpgsqlLargeObjectManager.cs +++ /dev/null @@ -1,242 +0,0 @@ -using System; -using Npgsql.Util; -using System.Data; -using System.Text; -using System.Threading; -using System.Threading.Tasks; - -namespace Npgsql; - -/// -/// Large object manager. This class can be used to store very large files in a PostgreSQL database. -/// -[Obsolete("NpgsqlLargeObjectManager allows manipulating PostgreSQL large objects via publicly available PostgreSQL functions (lo_read, lo_write); call these yourself directly.")] -public class NpgsqlLargeObjectManager -{ - const int InvWrite = 0x00020000; - const int InvRead = 0x00040000; - - internal NpgsqlConnection Connection { get; } - - /// - /// The largest chunk size (in bytes) read and write operations will read/write each roundtrip to the network. Default 4 MB. - /// - public int MaxTransferBlockSize { get; set; } - - /// - /// Creates an NpgsqlLargeObjectManager for this connection. The connection must be opened to perform remote operations. - /// - /// - public NpgsqlLargeObjectManager(NpgsqlConnection connection) - { - Connection = connection; - MaxTransferBlockSize = 4 * 1024 * 1024; // 4MB - } - - /// - /// Execute a function - /// - internal async Task ExecuteFunction(bool async, string function, CancellationToken cancellationToken, params object[] arguments) - { - using var command = Connection.CreateCommand(); - var stringBuilder = new StringBuilder("SELECT * FROM ").Append(function).Append('('); - - for (var i = 0; i < arguments.Length; i++) - { - if (i > 0) - stringBuilder.Append(", "); - stringBuilder.Append('$').Append(i + 1); - command.Parameters.Add(new NpgsqlParameter { Value = arguments[i] }); - } - - stringBuilder.Append(')'); - command.CommandText = stringBuilder.ToString(); - - return (T)(async ? await command.ExecuteScalarAsync(cancellationToken).ConfigureAwait(false) : command.ExecuteScalar())!; - } - - /// - /// Execute a function that returns a byte array - /// - /// - internal async Task ExecuteFunctionGetBytes( - bool async, string function, byte[] buffer, int offset, int len, CancellationToken cancellationToken, params object[] arguments) - { - using var command = Connection.CreateCommand(); - var stringBuilder = new StringBuilder("SELECT * FROM ").Append(function).Append('('); - - for (var i = 0; i < arguments.Length; i++) - { - if (i > 0) - stringBuilder.Append(", "); - stringBuilder.Append('$').Append(i + 1); - command.Parameters.Add(new NpgsqlParameter { Value = arguments[i] }); - } - - stringBuilder.Append(')'); - command.CommandText = stringBuilder.ToString(); - - var reader = async - ? await command.ExecuteReaderAsync(CommandBehavior.SequentialAccess, cancellationToken).ConfigureAwait(false) - : command.ExecuteReader(CommandBehavior.SequentialAccess); - try - { - if (async) - await reader.ReadAsync(cancellationToken).ConfigureAwait(false); - else - reader.Read(); - - return (int)reader.GetBytes(0, 0, buffer, offset, len); - } - finally - { - if (async) - await reader.DisposeAsync().ConfigureAwait(false); - else - reader.Dispose(); - } - } - - /// - /// Create an empty large object in the database. If an oid is specified but is already in use, an PostgresException will be thrown. - /// - /// A preferred oid, or specify 0 if one should be automatically assigned - /// The oid for the large object created - /// If an oid is already in use - public uint Create(uint preferredOid = 0) => Create(preferredOid, false).GetAwaiter().GetResult(); - - // Review unused parameters - /// - /// Create an empty large object in the database. If an oid is specified but is already in use, an PostgresException will be thrown. - /// - /// A preferred oid, or specify 0 if one should be automatically assigned - /// - /// An optional token to cancel the asynchronous operation. The default value is . - /// - /// The oid for the large object created - /// If an oid is already in use - public Task CreateAsync(uint preferredOid, CancellationToken cancellationToken = default) - => Create(preferredOid, true, cancellationToken); - - Task Create(uint preferredOid, bool async, CancellationToken cancellationToken = default) - => ExecuteFunction(async, "lo_create", cancellationToken, (int)preferredOid); - - /// - /// Opens a large object on the backend, returning a stream controlling this remote object. - /// A transaction snapshot is taken by the backend when the object is opened with only read permissions. - /// When reading from this object, the contents reflects the time when the snapshot was taken. - /// Note that this method, as well as operations on the stream must be wrapped inside a transaction. - /// - /// Oid of the object - /// An NpgsqlLargeObjectStream - public NpgsqlLargeObjectStream OpenRead(uint oid) - => OpenRead(async: false, oid).GetAwaiter().GetResult(); - - /// - /// Opens a large object on the backend, returning a stream controlling this remote object. - /// A transaction snapshot is taken by the backend when the object is opened with only read permissions. - /// When reading from this object, the contents reflects the time when the snapshot was taken. - /// Note that this method, as well as operations on the stream must be wrapped inside a transaction. - /// - /// Oid of the object - /// - /// An optional token to cancel the asynchronous operation. The default value is . - /// - /// An NpgsqlLargeObjectStream - public Task OpenReadAsync(uint oid, CancellationToken cancellationToken = default) - => OpenRead(async: true, oid, cancellationToken); - - async Task OpenRead(bool async, uint oid, CancellationToken cancellationToken = default) - { - var fd = await ExecuteFunction(async, "lo_open", cancellationToken, (int)oid, InvRead).ConfigureAwait(false); - return new NpgsqlLargeObjectStream(this, fd, false); - } - - /// - /// Opens a large object on the backend, returning a stream controlling this remote object. - /// Note that this method, as well as operations on the stream must be wrapped inside a transaction. - /// - /// Oid of the object - /// An NpgsqlLargeObjectStream - public NpgsqlLargeObjectStream OpenReadWrite(uint oid) - => OpenReadWrite(async: false, oid).GetAwaiter().GetResult(); - - /// - /// Opens a large object on the backend, returning a stream controlling this remote object. - /// Note that this method, as well as operations on the stream must be wrapped inside a transaction. - /// - /// Oid of the object - /// - /// An optional token to cancel the asynchronous operation. The default value is . - /// - /// An NpgsqlLargeObjectStream - public Task OpenReadWriteAsync(uint oid, CancellationToken cancellationToken = default) - => OpenReadWrite(async: true, oid, cancellationToken); - - async Task OpenReadWrite(bool async, uint oid, CancellationToken cancellationToken = default) - { - var fd = await ExecuteFunction(async, "lo_open", cancellationToken, (int)oid, InvRead | InvWrite).ConfigureAwait(false); - return new NpgsqlLargeObjectStream(this, fd, true); - } - - /// - /// Deletes a large object on the backend. - /// - /// Oid of the object to delete - public void Unlink(uint oid) - => ExecuteFunction(async: false, "lo_unlink", CancellationToken.None, (int)oid).GetAwaiter().GetResult(); - - /// - /// Deletes a large object on the backend. - /// - /// Oid of the object to delete - /// - /// An optional token to cancel the asynchronous operation. The default value is . - /// - public Task UnlinkAsync(uint oid, CancellationToken cancellationToken = default) - => ExecuteFunction(async: true, "lo_unlink", cancellationToken, (int)oid); - - /// - /// Exports a large object stored in the database to a file on the backend. This requires superuser permissions. - /// - /// Oid of the object to export - /// Path to write the file on the backend - public void ExportRemote(uint oid, string path) - => ExecuteFunction(async: false, "lo_export", CancellationToken.None, (int)oid, path).GetAwaiter().GetResult(); - - /// - /// Exports a large object stored in the database to a file on the backend. This requires superuser permissions. - /// - /// Oid of the object to export - /// Path to write the file on the backend - /// - /// An optional token to cancel the asynchronous operation. The default value is . - /// - public Task ExportRemoteAsync(uint oid, string path, CancellationToken cancellationToken = default) - => ExecuteFunction(async: true, "lo_export", cancellationToken, (int)oid, path); - - /// - /// Imports a large object to be stored as a large object in the database from a file stored on the backend. This requires superuser permissions. - /// - /// Path to read the file on the backend - /// A preferred oid, or specify 0 if one should be automatically assigned - public void ImportRemote(string path, uint oid = 0) - => ExecuteFunction(async: false, "lo_import", CancellationToken.None, path, (int)oid).GetAwaiter().GetResult(); - - /// - /// Imports a large object to be stored as a large object in the database from a file stored on the backend. This requires superuser permissions. - /// - /// Path to read the file on the backend - /// A preferred oid, or specify 0 if one should be automatically assigned - /// - /// An optional token to cancel the asynchronous operation. The default value is . - /// - public Task ImportRemoteAsync(string path, uint oid, CancellationToken cancellationToken = default) - => ExecuteFunction(async: true, "lo_import", cancellationToken, path, (int)oid); - - /// - /// Since PostgreSQL 9.3, large objects larger than 2GB can be handled, up to 4TB. - /// This property returns true whether the PostgreSQL version is >= 9.3. - /// - public bool Has64BitSupport => Connection.PostgreSqlVersion.IsGreaterOrEqual(9, 3); -} diff --git a/src/Npgsql/NpgsqlLargeObjectStream.cs b/src/Npgsql/NpgsqlLargeObjectStream.cs deleted file mode 100644 index 09d90b164a..0000000000 --- a/src/Npgsql/NpgsqlLargeObjectStream.cs +++ /dev/null @@ -1,297 +0,0 @@ -using Npgsql.Util; -using System; -using System.IO; -using System.Threading; -using System.Threading.Tasks; - -namespace Npgsql; - -/// -/// An interface to remotely control the seekable stream for an opened large object on a PostgreSQL server. -/// Note that the OpenRead/OpenReadWrite method as well as all operations performed on this stream must be wrapped inside a database transaction. -/// -[Obsolete("NpgsqlLargeObjectStream allows manipulating PostgreSQL large objects via publicly available PostgreSQL functions (lo_read, lo_write); call these yourself directly.")] -public sealed class NpgsqlLargeObjectStream : Stream -{ - readonly NpgsqlLargeObjectManager _manager; - readonly int _fd; - long _pos; - readonly bool _writeable; - bool _disposed; - - internal NpgsqlLargeObjectStream(NpgsqlLargeObjectManager manager, int fd, bool writeable) - { - _manager = manager; - _fd = fd; - _pos = 0; - _writeable = writeable; - } - - void CheckDisposed() - { - if (_disposed) - throw new InvalidOperationException("Object disposed"); - } - - /// - /// Since PostgreSQL 9.3, large objects larger than 2GB can be handled, up to 4TB. - /// This property returns true whether the PostgreSQL version is >= 9.3. - /// - public bool Has64BitSupport => _manager.Connection.PostgreSqlVersion.IsGreaterOrEqual(9, 3); - - /// - /// Reads count bytes from the large object. The only case when fewer bytes are read is when end of stream is reached. - /// - /// The buffer where read data should be stored. - /// The offset in the buffer where the first byte should be read. - /// The maximum number of bytes that should be read. - /// How many bytes actually read, or 0 if end of file was already reached. - public override int Read(byte[] buffer, int offset, int count) - => Read(async: false, buffer, offset, count).GetAwaiter().GetResult(); - - /// - /// Reads count bytes from the large object. The only case when fewer bytes are read is when end of stream is reached. - /// - /// The buffer where read data should be stored. - /// The offset in the buffer where the first byte should be read. - /// The maximum number of bytes that should be read. - /// - /// An optional token to cancel the asynchronous operation. The default value is . - /// - /// How many bytes actually read, or 0 if end of file was already reached. - public override Task ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) - => Read(async: true, buffer, offset, count, cancellationToken); - - async Task Read(bool async, byte[] buffer, int offset, int count, CancellationToken cancellationToken = default) - { - ArgumentNullException.ThrowIfNull(buffer); - ArgumentOutOfRangeException.ThrowIfNegative(offset); - ArgumentOutOfRangeException.ThrowIfNegative(count); - if (buffer.Length - offset < count) - ThrowHelper.ThrowArgumentException("Invalid offset or count for this buffer"); - - CheckDisposed(); - - var chunkCount = Math.Min(count, _manager.MaxTransferBlockSize); - var read = 0; - - while (read < count) - { - var bytesRead = await _manager.ExecuteFunctionGetBytes( - async, "loread", buffer, offset + read, count - read, cancellationToken, _fd, chunkCount).ConfigureAwait(false); - _pos += bytesRead; - read += bytesRead; - if (bytesRead < chunkCount) - { - return read; - } - } - return read; - } - - /// - /// Writes count bytes to the large object. - /// - /// The buffer to write data from. - /// The offset in the buffer at which to begin copying bytes. - /// The number of bytes to write. - public override void Write(byte[] buffer, int offset, int count) - => Write(async: false, buffer, offset, count).GetAwaiter().GetResult(); - - /// - /// Writes count bytes to the large object. - /// - /// The buffer to write data from. - /// The offset in the buffer at which to begin copying bytes. - /// The number of bytes to write. - /// - /// An optional token to cancel the asynchronous operation. The default value is . - /// - public override Task WriteAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) - => Write(async: true, buffer, offset, count, cancellationToken); - - async Task Write(bool async, byte[] buffer, int offset, int count, CancellationToken cancellationToken = default) - { - ArgumentNullException.ThrowIfNull(buffer); - ArgumentOutOfRangeException.ThrowIfNegative(offset); - ArgumentOutOfRangeException.ThrowIfNegative(count); - if (buffer.Length - offset < count) - ThrowHelper.ThrowArgumentException("Invalid offset or count for this buffer"); - - CheckDisposed(); - - if (!_writeable) - throw new NotSupportedException("Write cannot be called on a stream opened with no write permissions"); - - var totalWritten = 0; - - while (totalWritten < count) - { - var chunkSize = Math.Min(count - totalWritten, _manager.MaxTransferBlockSize); - var bytesWritten = await _manager.ExecuteFunction(async, "lowrite", cancellationToken, _fd, new ArraySegment(buffer, offset + totalWritten, chunkSize)).ConfigureAwait(false); - totalWritten += bytesWritten; - - if (bytesWritten != chunkSize) - throw new InvalidOperationException($"Internal Npgsql bug, please report"); - - _pos += bytesWritten; - } - } - - /// - /// CanTimeout always returns false. - /// - public override bool CanTimeout => false; - - /// - /// CanRead always returns true, unless the stream has been closed. - /// - public override bool CanRead => !_disposed; - - /// - /// CanWrite returns true if the stream was opened with write permissions, and the stream has not been closed. - /// - public override bool CanWrite => _writeable && !_disposed; - - /// - /// CanSeek always returns true, unless the stream has been closed. - /// - public override bool CanSeek => !_disposed; - - /// - /// Returns the current position in the stream. Getting the current position does not need a round-trip to the server, however setting the current position does. - /// - public override long Position - { - get - { - CheckDisposed(); - return _pos; - } - set => Seek(value, SeekOrigin.Begin); - } - - /// - /// Gets the length of the large object. This internally seeks to the end of the stream to retrieve the length, and then back again. - /// - public override long Length => GetLength(false).GetAwaiter().GetResult(); - - /// - /// Gets the length of the large object. This internally seeks to the end of the stream to retrieve the length, and then back again. - /// - /// - /// An optional token to cancel the asynchronous operation. The default value is . - /// - public Task GetLengthAsync(CancellationToken cancellationToken = default) => GetLength(async: true); - - async Task GetLength(bool async) - { - CheckDisposed(); - var old = _pos; - var retval = await Seek(async, 0, SeekOrigin.End).ConfigureAwait(false); - if (retval != old) - await Seek(async, old, SeekOrigin.Begin).ConfigureAwait(false); - return retval; - } - - /// - /// Seeks in the stream to the specified position. This requires a round-trip to the backend. - /// - /// A byte offset relative to the origin parameter. - /// A value of type SeekOrigin indicating the reference point used to obtain the new position. - /// - public override long Seek(long offset, SeekOrigin origin) - => Seek(async: false, offset, origin).GetAwaiter().GetResult(); - - /// - /// Seeks in the stream to the specified position. This requires a round-trip to the backend. - /// - /// A byte offset relative to the origin parameter. - /// A value of type SeekOrigin indicating the reference point used to obtain the new position. - /// - /// An optional token to cancel the asynchronous operation. The default value is . - /// - public Task SeekAsync(long offset, SeekOrigin origin, CancellationToken cancellationToken = default) - => Seek(async: true, offset, origin, cancellationToken); - - async Task Seek(bool async, long offset, SeekOrigin origin, CancellationToken cancellationToken = default) - { - if (origin < SeekOrigin.Begin || origin > SeekOrigin.End) - throw new ArgumentException("Invalid origin"); - if (!Has64BitSupport && offset != (int)offset) - throw new ArgumentOutOfRangeException(nameof(offset), "offset must fit in 32 bits for PostgreSQL versions older than 9.3"); - - CheckDisposed(); - - return _manager.Has64BitSupport - ? _pos = await _manager.ExecuteFunction(async, "lo_lseek64", cancellationToken, _fd, offset, (int)origin).ConfigureAwait(false) - : _pos = await _manager.ExecuteFunction(async, "lo_lseek", cancellationToken, _fd, (int)offset, (int)origin).ConfigureAwait(false); - } - - /// - /// Does nothing. - /// - public override void Flush() {} - - /// - /// Truncates or enlarges the large object to the given size. If enlarging, the large object is extended with null bytes. - /// For PostgreSQL versions earlier than 9.3, the value must fit in an Int32. - /// - /// Number of bytes to either truncate or enlarge the large object. - public override void SetLength(long value) - => SetLength(async: false, value).GetAwaiter().GetResult(); - - /// - /// Truncates or enlarges the large object to the given size. If enlarging, the large object is extended with null bytes. - /// For PostgreSQL versions earlier than 9.3, the value must fit in an Int32. - /// - /// Number of bytes to either truncate or enlarge the large object. - /// - /// An optional token to cancel the asynchronous operation. The default value is . - /// - public Task SetLength(long value, CancellationToken cancellationToken) - => SetLength(async: true, value, cancellationToken); - - async Task SetLength(bool async, long value, CancellationToken cancellationToken = default) - { - cancellationToken.ThrowIfCancellationRequested(); - - ArgumentOutOfRangeException.ThrowIfNegative(value); - if (!Has64BitSupport && value != (int)value) - throw new ArgumentOutOfRangeException(nameof(value), "offset must fit in 32 bits for PostgreSQL versions older than 9.3"); - - CheckDisposed(); - - if (!_writeable) - throw new NotSupportedException("SetLength cannot be called on a stream opened with no write permissions"); - - if (_manager.Has64BitSupport) - await _manager.ExecuteFunction(async, "lo_truncate64", cancellationToken, _fd, value).ConfigureAwait(false); - else - await _manager.ExecuteFunction(async, "lo_truncate", cancellationToken, _fd, (int)value).ConfigureAwait(false); - } - - /// - /// Releases resources at the backend allocated for this stream. - /// - public override void Close() - { - if (!_disposed) - { - _manager.ExecuteFunction(async: false, "lo_close", CancellationToken.None, _fd).GetAwaiter().GetResult(); - _disposed = true; - } - } - - /// - /// Releases resources at the backend allocated for this stream, iff disposing is true. - /// - /// Whether to release resources allocated at the backend. - protected override void Dispose(bool disposing) - { - if (disposing) - { - Close(); - } - } -} diff --git a/src/Npgsql/PublicAPI.Unshipped.txt b/src/Npgsql/PublicAPI.Unshipped.txt index b52e2d68fb..a51b8a3f46 100644 --- a/src/Npgsql/PublicAPI.Unshipped.txt +++ b/src/Npgsql/PublicAPI.Unshipped.txt @@ -99,6 +99,28 @@ override Npgsql.NpgsqlMultiHostDataSource.Clear() -> void Npgsql.NpgsqlDataSource.ReloadTypes() -> void Npgsql.NpgsqlDataSource.ReloadTypesAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! Npgsql.NpgsqlConnection.ReloadTypesAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! +*REMOVED*Npgsql.NpgsqlLargeObjectManager +*REMOVED*Npgsql.NpgsqlLargeObjectManager.Create(uint preferredOid = 0) -> uint +*REMOVED*Npgsql.NpgsqlLargeObjectManager.CreateAsync(uint preferredOid, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! +*REMOVED*Npgsql.NpgsqlLargeObjectManager.ExportRemote(uint oid, string! path) -> void +*REMOVED*Npgsql.NpgsqlLargeObjectManager.ExportRemoteAsync(uint oid, string! path, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! +*REMOVED*Npgsql.NpgsqlLargeObjectManager.Has64BitSupport.get -> bool +*REMOVED*Npgsql.NpgsqlLargeObjectManager.ImportRemote(string! path, uint oid = 0) -> void +*REMOVED*Npgsql.NpgsqlLargeObjectManager.ImportRemoteAsync(string! path, uint oid, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! +*REMOVED*Npgsql.NpgsqlLargeObjectManager.MaxTransferBlockSize.get -> int +*REMOVED*Npgsql.NpgsqlLargeObjectManager.MaxTransferBlockSize.set -> void +*REMOVED*Npgsql.NpgsqlLargeObjectManager.NpgsqlLargeObjectManager(Npgsql.NpgsqlConnection! connection) -> void +*REMOVED*Npgsql.NpgsqlLargeObjectManager.OpenRead(uint oid) -> Npgsql.NpgsqlLargeObjectStream! +*REMOVED*Npgsql.NpgsqlLargeObjectManager.OpenReadAsync(uint oid, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! +*REMOVED*Npgsql.NpgsqlLargeObjectManager.OpenReadWrite(uint oid) -> Npgsql.NpgsqlLargeObjectStream! +*REMOVED*Npgsql.NpgsqlLargeObjectManager.OpenReadWriteAsync(uint oid, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! +*REMOVED*Npgsql.NpgsqlLargeObjectManager.Unlink(uint oid) -> void +*REMOVED*Npgsql.NpgsqlLargeObjectManager.UnlinkAsync(uint oid, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! +*REMOVED*Npgsql.NpgsqlLargeObjectStream +*REMOVED*Npgsql.NpgsqlLargeObjectStream.GetLengthAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! +*REMOVED*Npgsql.NpgsqlLargeObjectStream.Has64BitSupport.get -> bool +*REMOVED*Npgsql.NpgsqlLargeObjectStream.SeekAsync(long offset, System.IO.SeekOrigin origin, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! +*REMOVED*Npgsql.NpgsqlLargeObjectStream.SetLength(long value, System.Threading.CancellationToken cancellationToken) -> System.Threading.Tasks.Task! *REMOVED*Npgsql.NpgsqlConnection.ReloadTypesAsync() -> System.Threading.Tasks.Task! *REMOVED*Npgsql.NpgsqlDataSourceBuilder.MapComposite(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! *REMOVED*Npgsql.NpgsqlDataSourceBuilder.MapComposite(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! @@ -130,6 +152,21 @@ NpgsqlTypes.NpgsqlCube.IsPoint.get -> bool NpgsqlTypes.NpgsqlCube.ToSubset(params int[]! indexes) -> NpgsqlTypes.NpgsqlCube NpgsqlTypes.NpgsqlCube.UpperRight.get -> System.Collections.Generic.IReadOnlyList! NpgsqlTypes.NpgsqlDbType.Cube = 63 -> NpgsqlTypes.NpgsqlDbType +*REMOVED*override Npgsql.NpgsqlLargeObjectStream.CanRead.get -> bool +*REMOVED*override Npgsql.NpgsqlLargeObjectStream.CanSeek.get -> bool +*REMOVED*override Npgsql.NpgsqlLargeObjectStream.CanTimeout.get -> bool +*REMOVED*override Npgsql.NpgsqlLargeObjectStream.CanWrite.get -> bool +*REMOVED*override Npgsql.NpgsqlLargeObjectStream.Close() -> void +*REMOVED*override Npgsql.NpgsqlLargeObjectStream.Flush() -> void +*REMOVED*override Npgsql.NpgsqlLargeObjectStream.Length.get -> long +*REMOVED*override Npgsql.NpgsqlLargeObjectStream.Position.get -> long +*REMOVED*override Npgsql.NpgsqlLargeObjectStream.Position.set -> void +*REMOVED*override Npgsql.NpgsqlLargeObjectStream.Read(byte[]! buffer, int offset, int count) -> int +*REMOVED*override Npgsql.NpgsqlLargeObjectStream.ReadAsync(byte[]! buffer, int offset, int count, System.Threading.CancellationToken cancellationToken) -> System.Threading.Tasks.Task! +*REMOVED*override Npgsql.NpgsqlLargeObjectStream.Seek(long offset, System.IO.SeekOrigin origin) -> long +*REMOVED*override Npgsql.NpgsqlLargeObjectStream.SetLength(long value) -> void +*REMOVED*override Npgsql.NpgsqlLargeObjectStream.Write(byte[]! buffer, int offset, int count) -> void +*REMOVED*override Npgsql.NpgsqlLargeObjectStream.WriteAsync(byte[]! buffer, int offset, int count, System.Threading.CancellationToken cancellationToken) -> System.Threading.Tasks.Task! override NpgsqlTypes.NpgsqlCube.Equals(object? obj) -> bool override NpgsqlTypes.NpgsqlCube.GetHashCode() -> int override NpgsqlTypes.NpgsqlCube.ToString() -> string! diff --git a/test/Npgsql.Tests/LargeObjectTests.cs b/test/Npgsql.Tests/LargeObjectTests.cs deleted file mode 100644 index bdb1c51084..0000000000 --- a/test/Npgsql.Tests/LargeObjectTests.cs +++ /dev/null @@ -1,50 +0,0 @@ -using System.Linq; -using System.Text; -using NUnit.Framework; - -namespace Npgsql.Tests; - -#pragma warning disable CS0618 // Large object support is obsolete - -public class LargeObjectTests : TestBase -{ - [Test] - public void Test() - { - using var conn = OpenConnection(); - using var transaction = conn.BeginTransaction(); - var manager = new NpgsqlLargeObjectManager(conn); - var oid = manager.Create(); - using (var stream = manager.OpenReadWrite(oid)) - { - var buf = "Hello"u8.ToArray(); - stream.Write(buf, 0, buf.Length); - stream.Seek(0, System.IO.SeekOrigin.Begin); - var buf2 = new byte[buf.Length]; - stream.ReadExactly(buf2, 0, buf2.Length); - Assert.That(buf.SequenceEqual(buf2)); - - Assert.That(stream.Position, Is.EqualTo(5)); - - Assert.That(stream.Length, Is.EqualTo(5)); - - stream.Seek(-1, System.IO.SeekOrigin.Current); - Assert.That(stream.ReadByte(), Is.EqualTo((int)'o')); - - manager.MaxTransferBlockSize = 3; - - stream.Write(buf, 0, buf.Length); - stream.Seek(-5, System.IO.SeekOrigin.End); - var buf3 = new byte[100]; - Assert.That(stream.Read(buf3, 0, 100), Is.EqualTo(5)); - Assert.That(buf.SequenceEqual(buf3.Take(5))); - - stream.SetLength(43); - Assert.That(stream.Length, Is.EqualTo(43)); - } - - manager.Unlink(oid); - - transaction.Rollback(); - } -} From 32b2b7d0af3278c6e43f35041f3aa6c16069a9ae Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Fri, 13 Mar 2026 14:57:47 +0200 Subject: [PATCH 719/761] Tweak dependabot config (#6501) --- .github/dependabot.yml | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 9277727063..d59cfe3375 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -3,9 +3,16 @@ updates: - package-ecosystem: "nuget" directory: "/" schedule: - interval: "daily" + interval: "weekly" + groups: + minor-and-patch: + patterns: ["*"] + update-types: ["minor", "patch"] - package-ecosystem: "github-actions" directory: "/" schedule: - interval: "daily" + interval: "weekly" + groups: + actions-all: + patterns: ["*"] From e7aa407e5f4f9cec7053ec8d89dc98ea3f1416d3 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Fri, 13 Mar 2026 15:24:32 +0100 Subject: [PATCH 720/761] Update OS versions for CI (#6502) --- .github/workflows/build.yml | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 924622255b..075d7e35fe 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -40,11 +40,11 @@ jobs: pg_major: 18 config: Debug test_tfm: net10.0 - - os: macos-15 + - os: macos-26 pg_major: 16 config: Release test_tfm: net10.0 - - os: windows-2022 + - os: windows-2025 pg_major: 18 config: Release test_tfm: net10.0 @@ -66,11 +66,11 @@ jobs: is_release: ${{ steps.analyze_tag.outputs.is_release }} is_prerelease: ${{ steps.analyze_tag.outputs.is_prerelease }} - # Installing PostGIS on Windows is complicated/unreliable, so we don't test on it. + # Installing PostGIS on Windows/macOS is complicated/unreliable, so we don't test on it. # The NPGSQL_TEST_POSTGIS environment variable ensures that if PostGIS isn't installed, # the PostGIS tests fail and therefore fail the build. env: - NPGSQL_TEST_POSTGIS: ${{ !startsWith(matrix.os, 'windows') }} + NPGSQL_TEST_POSTGIS: ${{ !startsWith(matrix.os, 'windows') && !startsWith(matrix.os, 'macos') }} steps: - name: Checkout @@ -315,7 +315,6 @@ jobs: **/*_Sequence.xml - name: Test Plugins - if: "!startsWith(matrix.os, 'macos')" run: | if [ -z "${{ matrix.pg_prerelease }}" ]; then dotnet test -c ${{ matrix.config }} -f ${{ matrix.test_tfm }} test/Npgsql.PluginTests --logger "GitHubActions;report-warnings=false" From 80973daec0fb4319dcd6d319a9321708336eca0c Mon Sep 17 00:00:00 2001 From: Brian Pursley Date: Fri, 13 Mar 2026 11:07:38 -0400 Subject: [PATCH 721/761] Support async connection close during NpgsqlDataReader cleanup and NpgsqlCommand exception handling. (#6459) --- src/Npgsql/NpgsqlCommand.cs | 2 +- src/Npgsql/NpgsqlDataReader.cs | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/Npgsql/NpgsqlCommand.cs b/src/Npgsql/NpgsqlCommand.cs index 1e3f4a1f04..c8fef976b2 100644 --- a/src/Npgsql/NpgsqlCommand.cs +++ b/src/Npgsql/NpgsqlCommand.cs @@ -1567,7 +1567,7 @@ internal virtual async ValueTask ExecuteReader(bool async, Com if ((behavior & CommandBehavior.CloseConnection) == CommandBehavior.CloseConnection) { Debug.Assert(_connector is null && conn is not null); - conn.Close(); + await conn.Close(async).ConfigureAwait(false); } throw; diff --git a/src/Npgsql/NpgsqlDataReader.cs b/src/Npgsql/NpgsqlDataReader.cs index 27bc6675c7..1680bf97f4 100644 --- a/src/Npgsql/NpgsqlDataReader.cs +++ b/src/Npgsql/NpgsqlDataReader.cs @@ -70,7 +70,7 @@ public sealed class NpgsqlDataReader : DbDataReader, IDbColumnSchemaGenerator int _columnsStartPos; /// - /// The index of the column that we're on, i.e. that has already been parsed, is + /// The index of the column that we're on, i.e. that has already been parsed, /// is memory and can be retrieved. Initialized to -1, which means we're on the column /// count (which comes before the first column). /// @@ -1192,7 +1192,7 @@ internal async Task Cleanup(bool async, bool connectionClosing = false, bool isD if (_behavior.HasFlag(CommandBehavior.CloseConnection) && !connectionClosing) { Debug.Assert(_connection is not null); - _connection.Close(); + await _connection.Close(async).ConfigureAwait(false); } if (ReaderClosed != null) @@ -1321,7 +1321,7 @@ public override int GetValues(object[] values) /// Gets the value of the specified column as a TimeSpan, /// /// - /// PostgreSQL's interval type has has a resolution of 1 microsecond and ranges from + /// PostgreSQL's interval type has a resolution of 1 microsecond and ranges from /// -178000000 to 178000000 years, while .NET's TimeSpan has a resolution of 100 nanoseconds /// and ranges from roughly -29247 to 29247 years. /// See https://www.postgresql.org/docs/current/static/datatype-datetime.html @@ -1335,7 +1335,7 @@ public override int GetValues(object[] values) /// /// Returns a nested data reader for the requested column. - /// The column type must be a record or a to Npgsql known composite type, or an array thereof. + /// The column type must be a record or a Npgsql known composite type, or an array thereof. /// Currently only supported in non-sequential mode. /// /// The zero-based column ordinal. @@ -2101,7 +2101,7 @@ DataFormat GetDefaultInfo(int ordinal, out PgConverter converter, out Size buffe /// /// Checks that we have a RowDescription, but not necessary an actual resultset - /// (for operations which work in SchemaOnly mode. + /// (for operations which work in SchemaOnly mode). /// FieldDescription GetField(int ordinal) { From 9acf81a5758ba58e75a938e400272f341de050e9 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Sat, 14 Mar 2026 08:31:53 +0100 Subject: [PATCH 722/761] Add missing simple builtin reg* uint type mappings (#6503) --- src/Npgsql/Internal/Postgres/DataTypeNames.cs | 9 +++++ .../AdoTypeInfoResolverFactory.cs | 36 +++++++++++++++++-- src/Npgsql/PostgresMinimalDatabaseInfo.cs | 9 +++++ test/Npgsql.Tests/Types/InternalTypeTests.cs | 33 ++++++++--------- 4 files changed, 66 insertions(+), 21 deletions(-) diff --git a/src/Npgsql/Internal/Postgres/DataTypeNames.cs b/src/Npgsql/Internal/Postgres/DataTypeNames.cs index 6c4ca73b2f..d904c8ae33 100644 --- a/src/Npgsql/Internal/Postgres/DataTypeNames.cs +++ b/src/Npgsql/Internal/Postgres/DataTypeNames.cs @@ -79,7 +79,16 @@ static class DataTypeNames public static DataTypeName Varbit => ValidatedName("pg_catalog.varbit"); public static DataTypeName TsVector => ValidatedName("pg_catalog.tsvector"); public static DataTypeName TsQuery => ValidatedName("pg_catalog.tsquery"); + public static DataTypeName RegClass => ValidatedName("pg_catalog.regclass"); + public static DataTypeName RegCollation => ValidatedName("pg_catalog.regcollation"); public static DataTypeName RegConfig => ValidatedName("pg_catalog.regconfig"); + public static DataTypeName RegDictionary => ValidatedName("pg_catalog.regdictionary"); + public static DataTypeName RegNamespace => ValidatedName("pg_catalog.regnamespace"); + public static DataTypeName RegOper => ValidatedName("pg_catalog.regoper"); + public static DataTypeName RegOperator => ValidatedName("pg_catalog.regoperator"); + public static DataTypeName RegProc => ValidatedName("pg_catalog.regproc"); + public static DataTypeName RegProcedure => ValidatedName("pg_catalog.regprocedure"); + public static DataTypeName RegRole => ValidatedName("pg_catalog.regrole"); public static DataTypeName Uuid => ValidatedName("pg_catalog.uuid"); public static DataTypeName Xml => ValidatedName("pg_catalog.xml"); public static DataTypeName Json => ValidatedName("pg_catalog.json"); diff --git a/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.cs index cd1ab655bc..5f1753bfa9 100644 --- a/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.cs +++ b/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.cs @@ -280,7 +280,23 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) MatchRequirement.DataTypeName); // UInt internal types - foreach (var dataTypeName in new[] { DataTypeNames.Oid, DataTypeNames.Xid, DataTypeNames.Cid, DataTypeNames.RegType, DataTypeNames.RegConfig }) + foreach (var dataTypeName in new[] + { + DataTypeNames.Oid, + DataTypeNames.Xid, + DataTypeNames.Cid, + DataTypeNames.RegClass, + DataTypeNames.RegCollation, + DataTypeNames.RegConfig, + DataTypeNames.RegDictionary, + DataTypeNames.RegNamespace, + DataTypeNames.RegOper, + DataTypeNames.RegOperator, + DataTypeNames.RegProc, + DataTypeNames.RegProcedure, + DataTypeNames.RegRole, + DataTypeNames.RegType + }) { mappings.AddStructType(dataTypeName, static (options, mapping, _) => mapping.CreateInfo(options, new UInt32Converter()), @@ -470,7 +486,23 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) mappings.AddArrayType>("hstore"); // UInt internal types - foreach (var dataTypeName in new[] { DataTypeNames.Oid, DataTypeNames.Xid, DataTypeNames.Cid, DataTypeNames.RegType, (string)DataTypeNames.RegConfig }) + foreach (var dataTypeName in new[] + { + DataTypeNames.Oid, + DataTypeNames.Xid, + DataTypeNames.Cid, + DataTypeNames.RegClass, + DataTypeNames.RegCollation, + DataTypeNames.RegConfig, + DataTypeNames.RegDictionary, + DataTypeNames.RegNamespace, + DataTypeNames.RegOper, + DataTypeNames.RegOperator, + DataTypeNames.RegProc, + DataTypeNames.RegProcedure, + DataTypeNames.RegRole, + DataTypeNames.RegType + }) { mappings.AddStructArrayType(dataTypeName); } diff --git a/src/Npgsql/PostgresMinimalDatabaseInfo.cs b/src/Npgsql/PostgresMinimalDatabaseInfo.cs index ed2ef15e81..d5141db985 100644 --- a/src/Npgsql/PostgresMinimalDatabaseInfo.cs +++ b/src/Npgsql/PostgresMinimalDatabaseInfo.cs @@ -65,7 +65,16 @@ static PostgresType[] CreateTypes(bool withMultiranges) Add(DataTypeNames.Varbit, oid: 1562, arrayOid: 1563); Add(DataTypeNames.TsVector, oid: 3614, arrayOid: 3643); Add(DataTypeNames.TsQuery, oid: 3615, arrayOid: 3645); + Add(DataTypeNames.RegClass, oid: 2205, arrayOid: 2210); + Add(DataTypeNames.RegCollation, oid: 4191, arrayOid: 4192); Add(DataTypeNames.RegConfig, oid: 3734, arrayOid: 3735); + Add(DataTypeNames.RegDictionary, oid: 3769, arrayOid: 3770); + Add(DataTypeNames.RegNamespace, oid: 4089, arrayOid: 4090); + Add(DataTypeNames.RegOper, oid: 2203, arrayOid: 2208); + Add(DataTypeNames.RegOperator, oid: 2204, arrayOid: 2209); + Add(DataTypeNames.RegProc, oid: 24, arrayOid: 1008); + Add(DataTypeNames.RegProcedure, oid: 2202, arrayOid: 2207); + Add(DataTypeNames.RegRole, oid: 4096, arrayOid: 4097); Add(DataTypeNames.Uuid, oid: 2950, arrayOid: 2951); Add(DataTypeNames.Xml, oid: 142, arrayOid: 143); Add(DataTypeNames.Json, oid: 114, arrayOid: 199); diff --git a/test/Npgsql.Tests/Types/InternalTypeTests.cs b/test/Npgsql.Tests/Types/InternalTypeTests.cs index 21b9e8c24f..3a22cefcf4 100644 --- a/test/Npgsql.Tests/Types/InternalTypeTests.cs +++ b/test/Npgsql.Tests/Types/InternalTypeTests.cs @@ -20,26 +20,21 @@ public async Task Read_internal_char() } [Test] - [TestCase(NpgsqlDbType.Oid)] - [TestCase(NpgsqlDbType.Regtype)] - [TestCase(NpgsqlDbType.Regconfig)] - public async Task Internal_uint_types(NpgsqlDbType npgsqlDbType) + [TestCase("oid")] + [TestCase("regtype")] + [TestCase("regconfig")] + [TestCase("regclass")] + [TestCase("regcollation")] + [TestCase("regdictionary")] + [TestCase("regnamespace")] + [TestCase("regoper")] + [TestCase("regoperator")] + [TestCase("regproc")] + [TestCase("regprocedure")] + [TestCase("regrole")] + public async Task Internal_uint_types(string postgresType) { - var postgresType = npgsqlDbType.ToString().ToLowerInvariant(); - using var conn = await OpenConnectionAsync(); - using var cmd = new NpgsqlCommand($"SELECT @max, 4294967295::{postgresType}, @eight, 8::{postgresType}", conn); - cmd.Parameters.AddWithValue("max", npgsqlDbType, uint.MaxValue); - cmd.Parameters.AddWithValue("eight", npgsqlDbType, 8u); - using var reader = await cmd.ExecuteReaderAsync(); - reader.Read(); - - for (var i = 0; i < reader.FieldCount; i++) - Assert.That(reader.GetFieldType(i), Is.EqualTo(typeof(uint))); - - Assert.That(reader.GetValue(0), Is.EqualTo(uint.MaxValue)); - Assert.That(reader.GetValue(1), Is.EqualTo(uint.MaxValue)); - Assert.That(reader.GetValue(2), Is.EqualTo(8u)); - Assert.That(reader.GetValue(3), Is.EqualTo(8u)); + await AssertType(uint.MaxValue, "4294967295", postgresType, dataTypeInference: DataTypeInference.Nothing); } [Test] From 0eb4e956af013c83e5ceee99cc5103ca8b39d34a Mon Sep 17 00:00:00 2001 From: Copilot <198982749+Copilot@users.noreply.github.com> Date: Sun, 15 Mar 2026 15:33:25 +0100 Subject: [PATCH 723/761] Extend AssertType to cover sync and async read/write converter paths (#6505) --- src/Npgsql/NpgsqlDataReader.cs | 11 ++++++ test/Npgsql.Tests/Support/TestBase.cs | 48 +++++++++++++++++++++++---- 2 files changed, 52 insertions(+), 7 deletions(-) diff --git a/src/Npgsql/NpgsqlDataReader.cs b/src/Npgsql/NpgsqlDataReader.cs index 1680bf97f4..3b56704606 100644 --- a/src/Npgsql/NpgsqlDataReader.cs +++ b/src/Npgsql/NpgsqlDataReader.cs @@ -89,6 +89,17 @@ public sealed class NpgsqlDataReader : DbDataReader, IDbColumnSchemaGenerator /// bool _isRowBuffered; + /// + /// Gets or sets whether the current row is fully buffered in memory. + /// When , async reads will go through the real async converter path rather than the sync shortcut. + /// + /// Settable for testing purposes. + internal bool IsRowBuffered + { + get => _isRowBuffered; + set => _isRowBuffered = value; + } + /// /// The RowDescription message for the current resultset being processed /// diff --git a/test/Npgsql.Tests/Support/TestBase.cs b/test/Npgsql.Tests/Support/TestBase.cs index 8fc0131889..b8c531139f 100644 --- a/test/Npgsql.Tests/Support/TestBase.cs +++ b/test/Npgsql.Tests/Support/TestBase.cs @@ -339,14 +339,31 @@ static async Task AssertTypeWriteCore( cmd.CommandText = "SELECT " + string.Join(", ", Enumerable.Range(1, cmd.Parameters.Count).Select(i => "pg_typeof($1)::text, $1::text".Replace("$1", $"${i}"))); - await using var reader = await cmd.ExecuteReaderAsync(CommandBehavior.SequentialAccess); - await reader.ReadAsync(); + // Async execution: tests async write paths in converters + { + await using var reader = await cmd.ExecuteReaderAsync(CommandBehavior.SequentialAccess); + await reader.ReadAsync(); + AssertWriteResults(reader); + } - for (var i = 0; i < cmd.Parameters.Count * 2; i += 2) + // Sync execution: tests sync write paths in converters. + // Reset parameter values first so that one-shot values (e.g. streams) can be re-read from the start. + for (var i = 0; i < cmd.Parameters.Count; i++) + cmd.Parameters[i].Value = valueFactory(); { - var error = errorIdentifier[i / 2]; - Assert.That(reader[i], Is.EqualTo(dataTypeNameWithoutFacets), $"Got wrong data type name when writing with {error}"); - Assert.That(reader[i+1], Is.EqualTo(sqlLiteral), $"Got wrong SQL literal when writing with {error}"); + using var reader = cmd.ExecuteReader(CommandBehavior.SequentialAccess); + reader.Read(); + AssertWriteResults(reader); + } + + void AssertWriteResults(NpgsqlDataReader reader) + { + for (var i = 0; i < cmd.Parameters.Count * 2; i += 2) + { + var error = errorIdentifier[i / 2]; + Assert.That(reader[i], Is.EqualTo(dataTypeNameWithoutFacets), $"Got wrong data type name when writing with {error}"); + Assert.That(reader[i + 1], Is.EqualTo(sqlLiteral), $"Got wrong SQL literal when writing with {error}"); + } } void DataTypeAsserts() @@ -465,6 +482,8 @@ static async Task AssertTypeReadCore( sqlLiteral = sqlLiteral.Replace("'", "''"); await using var cmd = new NpgsqlCommand($"SELECT '{sqlLiteral}'::{dataTypeName}", connection); + + // Async execution: tests async and sync column reads within a single buffered query. await using var reader = await cmd.ExecuteReaderAsync(); await reader.ReadAsync(); @@ -491,6 +510,15 @@ static async Task AssertTypeReadCore( T actual; if (valueTypeEqualsFieldType) { + // Set IsRowBuffered=false before the first read so _column is still -1, ensuring GetFieldValueAsync + // goes through the real async converter path (converter.ReadAsObjectAsync) rather than the sync shortcut. + reader.IsRowBuffered = false; + actual = (T)await reader.GetFieldValueAsync(0); + Assert.That(actual, comparer is null ? Is.EqualTo(value) : Is.EqualTo(value).Using(CreateEqualityComparer(comparer)), + $"Got wrong result from GetFieldValueAsync() value when reading '{truncatedSqlLiteral}'"); + + // Restore IsRowBuffered so subsequent sync reads use the normal buffered code path. + reader.IsRowBuffered = true; actual = (T)reader.GetValue(0); Assert.That(actual, comparer is null ? Is.EqualTo(value) : Is.EqualTo(value).Using(CreateEqualityComparer(comparer!)), $"Got wrong result from GetValue() value when reading '{truncatedSqlLiteral}'"); @@ -502,8 +530,14 @@ static async Task AssertTypeReadCore( return actual; } - actual = reader.GetFieldValue(0); + // See comment above about IsRowBuffered. + reader.IsRowBuffered = false; + actual = await reader.GetFieldValueAsync(0); + Assert.That(actual, comparer is null ? Is.EqualTo(value) : Is.EqualTo(value).Using(CreateEqualityComparer(comparer!)), + $"Got wrong result from GetFieldValueAsync() value when reading '{truncatedSqlLiteral}'"); + reader.IsRowBuffered = true; + actual = reader.GetFieldValue(0); Assert.That(actual, comparer is null ? Is.EqualTo(value) : Is.EqualTo(value).Using(CreateEqualityComparer(comparer!)), $"Got wrong result from GetFieldValue() value when reading '{truncatedSqlLiteral}'"); From 63cfeb7e21d75ae216bd87042638bc334f41e590 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Mon, 16 Mar 2026 08:49:25 +0100 Subject: [PATCH 724/761] Move type loading to use the data type name constructors (#6507) --- src/Npgsql/Internal/Postgres/DataTypeName.cs | 61 ++++++++++---------- src/Npgsql/PostgresDatabaseInfo.cs | 20 ++++--- src/Npgsql/PostgresTypes/PostgresType.cs | 10 ++-- test/Npgsql.Tests/DataTypeNameTests.cs | 24 +++++++- 4 files changed, 68 insertions(+), 47 deletions(-) diff --git a/src/Npgsql/Internal/Postgres/DataTypeName.cs b/src/Npgsql/Internal/Postgres/DataTypeName.cs index 9c9f43e41a..8dd91b5508 100644 --- a/src/Npgsql/Internal/Postgres/DataTypeName.cs +++ b/src/Npgsql/Internal/Postgres/DataTypeName.cs @@ -52,13 +52,15 @@ public DataTypeName(string fullyQualifiedDataTypeName) internal static DataTypeName ValidatedName(string fullyQualifiedDataTypeName) => new(fullyQualifiedDataTypeName, validated: true); + bool IsUnqualifiedDisplayName => SchemaSpan is "pg_catalog" || IsUnqualified; + // Includes schema unless it's pg_catalog or the schema is an invalid character used to represent an unspecified schema. public string DisplayName => - Value.StartsWith("pg_catalog", StringComparison.Ordinal) || IsUnqualified + IsUnqualifiedDisplayName ? UnqualifiedDisplayName : Schema + "." + UnqualifiedDisplayName; - public string UnqualifiedDisplayName => ToDisplayName(UnqualifiedNameSpan); + public string UnqualifiedDisplayName => ToDisplayName(UnqualifiedNameSpan, mapAliases: IsUnqualifiedDisplayName); internal ReadOnlySpan SchemaSpan => Value.AsSpan(0, _value.IndexOf('.')); public string Schema => Value.Substring(0, _value.IndexOf('.')); @@ -124,27 +126,20 @@ public DataTypeName ToDefaultMultirangeName() // Create a DataTypeName from a broader range of valid names. // including SQL aliases like 'timestamp without time zone', trailing facet info etc. - public static DataTypeName FromDisplayName(string displayName, string? schema = null) - => FromDisplayName(displayName, schema, assumeUnqualified: false); // user strings may come fully qualified. - - // This method is used during type loading, it allows us to accept friendly names in constructors, without having to preconcatenate the schema. - internal static DataTypeName FromDisplayName(string displayName, string? schema, bool assumeUnqualified) + public static DataTypeName FromDisplayName(string displayName) { var displayNameSpan = displayName.AsSpan().Trim(); var schemaEndIndex = displayNameSpan.IndexOf('.'); ReadOnlySpan schemaSpan; - if (schemaEndIndex is not -1 && !assumeUnqualified) + if (schemaEndIndex is not -1) { - if (schema is not null) - throw new ArgumentException("Schema provided for a fully qualified name."); - schemaSpan = displayNameSpan.Slice(0, schemaEndIndex); displayNameSpan = displayNameSpan.Slice(schemaEndIndex + 1); } else { - schemaSpan = schema is null ? $"{InvalidIdentifier}" : schema.AsSpan(); + schemaSpan = $"{InvalidIdentifier}"; } // Then we strip either of the two valid array representations to get the base type name (with or without facets). @@ -196,7 +191,7 @@ internal static DataTypeName FromDisplayName(string displayName, string? schema, var value => value }; - if (schema is null && DataTypeNames.IsWellKnownUnqualifiedName(mapped)) + if (DataTypeNames.IsWellKnownUnqualifiedName(mapped)) schemaSpan = "pg_catalog".AsSpan(); return new(string.Concat(schemaSpan, ".", isArray ? "_" : "", mapped)); @@ -207,29 +202,33 @@ internal static DataTypeName FromDisplayName(string displayName, string? schema, // Additionally array types have a '_' prefix while for readability their element type should be postfixed with '[]'. // See the table for all the aliases https://www.postgresql.org/docs/current/static/datatype.html#DATATYPE-TABLE // Alternatively some of the source lives at https://github.com/postgres/postgres/blob/c8e1ba736b2b9e8c98d37a5b77c4ed31baf94147/src/backend/utils/adt/format_type.c#L186 - static string ToDisplayName(ReadOnlySpan unqualifiedName) + static string ToDisplayName(ReadOnlySpan unqualifiedName, bool mapAliases) { var isArray = unqualifiedName.IndexOf('_') is 0; var baseTypeName = isArray ? unqualifiedName.Slice(1) : unqualifiedName; - var mappedBaseType = baseTypeName switch + string? mappedBaseType = null; + if (mapAliases) { - "bool" => "boolean", - "bpchar" => "character", - "decimal" => "numeric", - "float4" => "real", - "float8" => "double precision", - "int2" => "smallint", - "int4" => "integer", - "int8" => "bigint", - "time" => "time without time zone", - "timestamp" => "timestamp without time zone", - "timetz" => "time with time zone", - "timestamptz" => "timestamp with time zone", - "varbit" => "bit varying", - "varchar" => "character varying", - _ => null - }; + mappedBaseType = baseTypeName switch + { + "bool" => "boolean", + "bpchar" => "character", + "decimal" => "numeric", + "float4" => "real", + "float8" => "double precision", + "int2" => "smallint", + "int4" => "integer", + "int8" => "bigint", + "time" => "time without time zone", + "timestamp" => "timestamp without time zone", + "timetz" => "time with time zone", + "timestamptz" => "timestamp with time zone", + "varbit" => "bit varying", + "varchar" => "character varying", + _ => null + }; + } return isArray ? string.Concat(mappedBaseType ?? baseTypeName, "[]") diff --git a/src/Npgsql/PostgresDatabaseInfo.cs b/src/Npgsql/PostgresDatabaseInfo.cs index 1c1b518a3f..6218b0a8d6 100644 --- a/src/Npgsql/PostgresDatabaseInfo.cs +++ b/src/Npgsql/PostgresDatabaseInfo.cs @@ -8,6 +8,7 @@ using Microsoft.Extensions.Logging.Abstractions; using Npgsql.BackendMessages; using Npgsql.Internal; +using Npgsql.Internal.Postgres; using Npgsql.PostgresTypes; using Npgsql.Util; using static Npgsql.Util.Statics; @@ -523,7 +524,7 @@ bool TryAddPostgresType(PostgresTypeDefinition postgresTypeDefinition, Dictionar switch (postgresTypeDefinition.Type) { case 'b': // Normal base type - var baseType = new PostgresBaseType(postgresTypeDefinition.Namespace, postgresTypeDefinition.Name, postgresTypeDefinition.OID); + var baseType = new PostgresBaseType(postgresTypeDefinition.DataTypeName, postgresTypeDefinition.OID); byOID[baseType.OID] = baseType; return true; @@ -537,7 +538,7 @@ bool TryAddPostgresType(PostgresTypeDefinition postgresTypeDefinition, Dictionar return false; } - var arrayType = new PostgresArrayType(postgresTypeDefinition.Namespace, postgresTypeDefinition.Name, postgresTypeDefinition.OID, elementPostgresType); + var arrayType = new PostgresArrayType(postgresTypeDefinition.DataTypeName, postgresTypeDefinition.OID, elementPostgresType); byOID[arrayType.OID] = arrayType; return true; } @@ -552,7 +553,7 @@ bool TryAddPostgresType(PostgresTypeDefinition postgresTypeDefinition, Dictionar return false; } - var rangeType = new PostgresRangeType(postgresTypeDefinition.Namespace, postgresTypeDefinition.Name, postgresTypeDefinition.OID, subtypePostgresType); + var rangeType = new PostgresRangeType(postgresTypeDefinition.DataTypeName, postgresTypeDefinition.OID, subtypePostgresType); byOID[rangeType.OID] = rangeType; return true; } @@ -573,17 +574,17 @@ bool TryAddPostgresType(PostgresTypeDefinition postgresTypeDefinition, Dictionar return false; } - var multirangeType = new PostgresMultirangeType(postgresTypeDefinition.Namespace, postgresTypeDefinition.Name, postgresTypeDefinition.OID, rangePostgresType); + var multirangeType = new PostgresMultirangeType(postgresTypeDefinition.DataTypeName, postgresTypeDefinition.OID, rangePostgresType); byOID[multirangeType.OID] = multirangeType; return true; case 'e': // Enum - var enumType = new PostgresEnumType(postgresTypeDefinition.Namespace, postgresTypeDefinition.Name, postgresTypeDefinition.OID); + var enumType = new PostgresEnumType(postgresTypeDefinition.DataTypeName, postgresTypeDefinition.OID); byOID[enumType.OID] = enumType; return true; case 'c': // Composite - var compositeType = new PostgresCompositeType(postgresTypeDefinition.Namespace, postgresTypeDefinition.Name, postgresTypeDefinition.OID); + var compositeType = new PostgresCompositeType(postgresTypeDefinition.DataTypeName, postgresTypeDefinition.OID); byOID[compositeType.OID] = compositeType; return true; @@ -596,7 +597,7 @@ bool TryAddPostgresType(PostgresTypeDefinition postgresTypeDefinition, Dictionar return false; } - var domainType = new PostgresDomainType(postgresTypeDefinition.Namespace, postgresTypeDefinition.Name, postgresTypeDefinition.OID, basePostgresType, postgresTypeDefinition.NotNull); + var domainType = new PostgresDomainType(postgresTypeDefinition.DataTypeName, postgresTypeDefinition.OID, basePostgresType, postgresTypeDefinition.NotNull); byOID[domainType.OID] = domainType; return true; @@ -610,4 +611,7 @@ bool TryAddPostgresType(PostgresTypeDefinition postgresTypeDefinition, Dictionar } } -readonly record struct PostgresTypeDefinition(string Namespace, uint OID, string Name, char Type, bool NotNull, uint ElemTypeOID); +readonly record struct PostgresTypeDefinition(string Namespace, uint OID, string Name, char Type, bool NotNull, uint ElemTypeOID) +{ + public DataTypeName DataTypeName => DataTypeName.CreateFullyQualifiedName(Namespace + "." + Name); +} diff --git a/src/Npgsql/PostgresTypes/PostgresType.cs b/src/Npgsql/PostgresTypes/PostgresType.cs index 842d1f3eea..fc88eb1304 100644 --- a/src/Npgsql/PostgresTypes/PostgresType.cs +++ b/src/Npgsql/PostgresTypes/PostgresType.cs @@ -1,4 +1,5 @@ using System; +using System.Diagnostics.CodeAnalysis; using Npgsql.Internal.Postgres; namespace Npgsql.PostgresTypes; @@ -20,13 +21,12 @@ public abstract class PostgresType /// Constructs a representation of a PostgreSQL data type. /// /// The data type's namespace (or schema). - /// The data type's name. + /// The data type's display name. /// The data type's OID. private protected PostgresType(string ns, string name, uint oid) { - DataTypeName = DataTypeName.FromDisplayName(name, ns, assumeUnqualified: true); + DataTypeName = DataTypeName.FromDisplayName(ns is null or "pg_catalog" ? name : ns + "." + name); OID = oid; - FullName = Namespace + "." + Name; } /// @@ -38,7 +38,6 @@ private protected PostgresType(DataTypeName dataTypeName, Oid oid) { DataTypeName = dataTypeName; OID = oid.Value; - FullName = Namespace + "." + Name; } #endregion @@ -67,7 +66,8 @@ private protected PostgresType(DataTypeName dataTypeName, Oid oid) /// /// The full name of the backend type, including its namespace. /// - public string FullName { get; } + [field: MaybeNull] + public string FullName => field ??= Namespace + "." + Name; internal DataTypeName DataTypeName { get; } diff --git a/test/Npgsql.Tests/DataTypeNameTests.cs b/test/Npgsql.Tests/DataTypeNameTests.cs index 067eb217c4..acd209060e 100644 --- a/test/Npgsql.Tests/DataTypeNameTests.cs +++ b/test/Npgsql.Tests/DataTypeNameTests.cs @@ -60,9 +60,27 @@ public string ToDefaultMultirangeNameHasRange(string name) [TestCase("name ", "public", ExpectedResult = "public.name")] [TestCase("_name", "public", ExpectedResult = "public._name")] [TestCase("name[]", "public", ExpectedResult = "public._name")] - [TestCase("timestamp with time zone", "public", ExpectedResult = "public.timestamptz")] - [TestCase("boolean(facet_name)", "public", ExpectedResult = "public.bool")] + [TestCase("timestamp with time zone", "public", ExpectedResult = "public.timestamp with time zone")] + [TestCase("timestamp with time zone", "pg_catalog", ExpectedResult = "pg_catalog.timestamptz")] + [TestCase("timestamp with time zone", null, ExpectedResult = "pg_catalog.timestamptz")] + [TestCase("boolean(facet_name)", "public", ExpectedResult = "public.boolean(facet_name)")] + [TestCase("boolean(facet_name)", "pg_catalog", ExpectedResult = "pg_catalog.bool")] + [TestCase("boolean(facet_name)", null, ExpectedResult = "pg_catalog.bool")] [TestCase(" public.name ", null, ExpectedResult = "public.name")] + [TestCase("decimal", "public", ExpectedResult = "public.decimal")] + [TestCase("numeric", "public", ExpectedResult = "public.numeric")] public string FromDisplayName(string name, string? schema) - => DataTypeName.FromDisplayName(name, schema).Value; + => DataTypeName.FromDisplayName(schema is null or "pg_catalog" ? name : schema + "." + name).Value; + + [TestCase("pg_catalog.bool", ExpectedResult = "boolean")] + [TestCase("public.bool", ExpectedResult = "bool")] + [TestCase("pg_catalog.numeric", ExpectedResult = "numeric")] + [TestCase("pg_catalog._numeric", ExpectedResult = "numeric[]")] + [TestCase("pg_catalog.decimal", ExpectedResult = "numeric")] + [TestCase("public.numeric", ExpectedResult = "numeric")] + [TestCase("public._numeric", ExpectedResult = "numeric[]")] + [TestCase("public.decimal", ExpectedResult = "decimal")] + [TestCase("public._decimal", ExpectedResult = "decimal[]")] + public string UnqualifiedDisplayName(string fullyQualifiedName) + => new DataTypeName(fullyQualifiedName).UnqualifiedDisplayName; } From 25def34b8f847226d9dba17f2e23727a306047a4 Mon Sep 17 00:00:00 2001 From: Bruce Bowyer-Smyth Date: Tue, 17 Mar 2026 17:19:49 +1000 Subject: [PATCH 725/761] Specialize UTF8 encoding in converter writing (#6297) Co-authored-by: Nino Floris --- src/Npgsql/Internal/PgWriter.cs | 84 +++++++++++++++++++-- test/Npgsql.Benchmarks/TypeHandlers/Text.cs | 2 +- 2 files changed, 79 insertions(+), 7 deletions(-) diff --git a/src/Npgsql/Internal/PgWriter.cs b/src/Npgsql/Internal/PgWriter.cs index 2d08a38e53..902c15d20b 100644 --- a/src/Npgsql/Internal/PgWriter.cs +++ b/src/Npgsql/Internal/PgWriter.cs @@ -1,3 +1,4 @@ +using Npgsql.Internal.Postgres; using System; using System.Buffers; using System.Buffers.Binary; @@ -7,9 +8,9 @@ using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Text; +using System.Text.Unicode; using System.Threading; using System.Threading.Tasks; -using Npgsql.Internal.Postgres; namespace Npgsql.Internal; @@ -21,7 +22,7 @@ enum FlushMode } // A streaming alternative to a System.IO.Stream, instead based on the preferable IBufferWriter. -interface IStreamingWriter: IBufferWriter +interface IStreamingWriter : IBufferWriter { void Flush(TimeSpan timeout = default); ValueTask FlushAsync(CancellationToken cancellationToken = default); @@ -273,20 +274,57 @@ public void WriteDouble(double value) public void WriteChars(ReadOnlySpan data, Encoding encoding) { + if (encoding.CodePage == Encoding.UTF8.CodePage) + { + var fallback = encoding.EncoderFallback; + // We can only emulate these well known fallbacks in the fast path. + if (EncoderFallback.ExceptionFallback.Equals(fallback) || EncoderFallback.ReplacementFallback.Equals(fallback)) + { + Utf8Core(data, replace: !EncoderFallback.ExceptionFallback.Equals(fallback), scalarMaxByteCount: 4); + return; + } + } + // If we have more chars than bytes remaining we can immediately go to the slow path. if (data.Length <= Remaining) { // If not, it's worth a shot to see if we can convert in one go. - var encodedLength = encoding.GetByteCount(data); - if (!ShouldFlush(encodedLength)) + if (!ShouldFlush(encoding.GetMaxByteCount(data.Length)) || !ShouldFlush(encoding.GetByteCount(data))) { var count = encoding.GetBytes(data, Span); Advance(count); return; } } + Core(data, encoding); + void Utf8Core(ReadOnlySpan data, bool replace, int scalarMaxByteCount) + { + while (true) + { + var status = Utf8.FromUtf16(data, Span, out var charsRead, out var bytesWritten, replaceInvalidSequences: replace, isFinalBlock: true); + Advance(bytesWritten); + + switch (status) + { + case OperationStatus.DestinationTooSmall: + Flush(); + Ensure(scalarMaxByteCount); + data = data.Slice(charsRead); + break; + case OperationStatus.InvalidData: + ThrowEncoderFallbackException(); + break; + default: + return; + } + } + + static void ThrowEncoderFallbackException() + => throw new EncoderFallbackException("Unable to translate Unicode character to specified code page"); + } + void Core(ReadOnlySpan data, Encoding encoding) { var encoder = encoding.GetEncoder(); @@ -307,13 +345,20 @@ void Core(ReadOnlySpan data, Encoding encoding) public ValueTask WriteCharsAsync(ReadOnlyMemory data, Encoding encoding, CancellationToken cancellationToken = default) { + if (encoding.CodePage == Encoding.UTF8.CodePage) + { + var fallback = encoding.EncoderFallback; + // We can only emulate these well known fallbacks in the fast path. + if (EncoderFallback.ExceptionFallback.Equals(fallback) || EncoderFallback.ReplacementFallback.Equals(fallback)) + return Utf8Core(data, replace: !EncoderFallback.ExceptionFallback.Equals(fallback), scalarMaxByteCount: 4, cancellationToken); + } + var dataSpan = data.Span; // If we have more chars than bytes remaining we can immediately go to the slow path. if (data.Length <= Remaining) { // If not, it's worth a shot to see if we can convert in one go. - var encodedLength = encoding.GetByteCount(dataSpan); - if (!ShouldFlush(encodedLength)) + if (!ShouldFlush(encoding.GetMaxByteCount(data.Length)) || !ShouldFlush(encoding.GetByteCount(dataSpan))) { var count = encoding.GetBytes(dataSpan, Span); Advance(count); @@ -323,6 +368,33 @@ public ValueTask WriteCharsAsync(ReadOnlyMemory data, Encoding encoding, C return Core(data, encoding, cancellationToken); + async ValueTask Utf8Core(ReadOnlyMemory data, bool replace, int scalarMaxByteCount, CancellationToken cancellationToken) + { + while (true) + { + var status = Utf8.FromUtf16(data.Span, Span, out var charsRead, out var bytesWritten, replaceInvalidSequences: replace, + isFinalBlock: true); + Advance(bytesWritten); + + switch (status) + { + case OperationStatus.DestinationTooSmall: + await FlushAsync(cancellationToken).ConfigureAwait(false); + Ensure(scalarMaxByteCount); + data = data.Slice(charsRead); + break; + case OperationStatus.InvalidData: + ThrowEncoderFallbackException(); + break; + default: + return; + } + } + + static void ThrowEncoderFallbackException() + => throw new EncoderFallbackException("Unable to translate Unicode character to specified code page"); + } + async ValueTask Core(ReadOnlyMemory data, Encoding encoding, CancellationToken cancellationToken) { var encoder = encoding.GetEncoder(); diff --git a/test/Npgsql.Benchmarks/TypeHandlers/Text.cs b/test/Npgsql.Benchmarks/TypeHandlers/Text.cs index 425b836838..0cbdace076 100644 --- a/test/Npgsql.Benchmarks/TypeHandlers/Text.cs +++ b/test/Npgsql.Benchmarks/TypeHandlers/Text.cs @@ -7,7 +7,7 @@ namespace Npgsql.Benchmarks.TypeHandlers; [Config(typeof(Config))] -public class Text() : TypeHandlerBenchmarks(new StringTextConverter(Encoding.UTF8)) +public class Text() : TypeHandlerBenchmarks(new StringTextConverter(NpgsqlWriteBuffer.UTF8Encoding)) { protected override IEnumerable ValuesOverride() { From f06387b0180974cf549ae63428b3d74569dcb550 Mon Sep 17 00:00:00 2001 From: Bruce Bowyer-Smyth Date: Tue, 17 Mar 2026 17:36:47 +1000 Subject: [PATCH 726/761] Devirtualize UTF8 with a specialized text converter (#6016) Co-authored-by: Nino Floris --- .../Converters/Primitive/TextConverters.cs | 93 +++++++++++-------- src/Npgsql/Internal/NpgsqlWriteBuffer.cs | 6 +- src/Npgsql/Internal/PgSerializerOptions.cs | 4 +- .../AdoTypeInfoResolverFactory.cs | 14 +-- ...ExtraConversionsTypeInfoResolverFactory.cs | 6 +- .../LTreeTypeInfoResolverFactory.cs | 6 +- test/Npgsql.Benchmarks/TypeHandlers/Text.cs | 8 +- test/Npgsql.Tests/TypeMapperTests.cs | 26 +++++- 8 files changed, 99 insertions(+), 64 deletions(-) diff --git a/src/Npgsql/Internal/Converters/Primitive/TextConverters.cs b/src/Npgsql/Internal/Converters/Primitive/TextConverters.cs index e1ef7f714a..d4c1f8834e 100644 --- a/src/Npgsql/Internal/Converters/Primitive/TextConverters.cs +++ b/src/Npgsql/Internal/Converters/Primitive/TextConverters.cs @@ -11,54 +11,69 @@ // ReSharper disable once CheckNamespace namespace Npgsql.Internal.Converters; -abstract class StringBasedTextConverter(Encoding encoding) : PgStreamingConverter +static class TextConverter { - public override T Read(PgReader reader) - => Read(async: false, reader, encoding).GetAwaiter().GetResult(); + public static PgConverter CreateStringConverter(Encoding encoding) + => new StringBasedTextConverter(encoding); - public override ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) - => Read(async: true, reader, encoding, cancellationToken); + public static PgConverter> CreateReadOnlyMemoryConverter(Encoding encoding) + => new StringBasedTextConverter, ReadOnlyMemoryConversion>(encoding); - public override Size GetSize(SizeContext context, T value, ref object? writeState) - => TextConverter.GetSize(ref context, ConvertTo(value), encoding); + sealed class StringBasedTextConverter(Encoding encoding) : PgStreamingConverter + where TConv : struct, IStringConversion + { + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.None; + return format is DataFormat.Binary or DataFormat.Text; + } - public override void Write(PgWriter writer, T value) - => writer.WriteChars(ConvertTo(value).Span, encoding); + public override T Read(PgReader reader) + { + var bytes = reader.ReadBytes(reader.CurrentRemaining); + return TConv.ConvertFrom( + ReferenceEquals(encoding, PgSerializerOptions.DefaultUtf8Encoding) + ? PgSerializerOptions.DefaultUtf8Encoding.GetString(bytes) + : encoding.GetString(bytes)); + } - public override ValueTask WriteAsync(PgWriter writer, T value, CancellationToken cancellationToken = default) - => writer.WriteCharsAsync(ConvertTo(value), encoding, cancellationToken); + [AsyncMethodBuilder(typeof(PoolingAsyncValueTaskMethodBuilder<>))] + public override async ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) + { + var bytes = await reader.ReadBytesAsync(reader.CurrentRemaining, cancellationToken).ConfigureAwait(false); + return TConv.ConvertFrom( + ReferenceEquals(encoding, PgSerializerOptions.DefaultUtf8Encoding) + ? PgSerializerOptions.DefaultUtf8Encoding.GetString(bytes) + : encoding.GetString(bytes)); + } - public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) - { - bufferRequirements = BufferRequirements.None; - return format is DataFormat.Binary or DataFormat.Text; - } + public override Size GetSize(SizeContext context, T value, ref object? writeState) + => TextConverterHelpers.GetSize(ref context, TConv.ConvertTo(value), encoding); - protected abstract ReadOnlyMemory ConvertTo(T value); - protected abstract T ConvertFrom(string value); + public override void Write(PgWriter writer, T value) + => writer.WriteChars(TConv.ConvertTo(value).Span, encoding); - ValueTask Read(bool async, PgReader reader, Encoding encoding, CancellationToken cancellationToken = default) - { - return async - ? ReadAsync(reader, encoding, cancellationToken) - : new(ConvertFrom(encoding.GetString(reader.ReadBytes(reader.CurrentRemaining)))); + public override ValueTask WriteAsync(PgWriter writer, T value, CancellationToken cancellationToken = default) + => writer.WriteCharsAsync(TConv.ConvertTo(value), encoding, cancellationToken); + } - [AsyncMethodBuilder(typeof(PoolingAsyncValueTaskMethodBuilder<>))] - async ValueTask ReadAsync(PgReader reader, Encoding encoding, CancellationToken cancellationToken) - => ConvertFrom(encoding.GetString(await reader.ReadBytesAsync(reader.CurrentRemaining, cancellationToken).ConfigureAwait(false))); + interface IStringConversion + { + static abstract ReadOnlyMemory ConvertTo(T value); + static abstract T ConvertFrom(string value); } -} -sealed class ReadOnlyMemoryTextConverter(Encoding encoding) : StringBasedTextConverter>(encoding) -{ - protected override ReadOnlyMemory ConvertTo(ReadOnlyMemory value) => value; - protected override ReadOnlyMemory ConvertFrom(string value) => value.AsMemory(); -} + struct ReadOnlyMemoryConversion : IStringConversion> + { + public static ReadOnlyMemory ConvertTo(ReadOnlyMemory value) => value; + public static ReadOnlyMemory ConvertFrom(string value) => value.AsMemory(); + } -sealed class StringTextConverter(Encoding encoding) : StringBasedTextConverter(encoding) -{ - protected override ReadOnlyMemory ConvertTo(string value) => value.AsMemory(); - protected override string ConvertFrom(string value) => value; + struct StringConversion : IStringConversion + { + public static ReadOnlyMemory ConvertTo(string value) => value.AsMemory(); + public static string ConvertFrom(string value) => value; + } } abstract class ArrayBasedTextConverter(Encoding encoding) : PgStreamingConverter @@ -69,7 +84,7 @@ public override ValueTask ReadAsync(PgReader reader, CancellationToken cancel => Read(async: true, reader, encoding); public override Size GetSize(SizeContext context, T value, ref object? writeState) - => TextConverter.GetSize(ref context, ConvertTo(value), encoding); + => TextConverterHelpers.GetSize(ref context, ConvertTo(value), encoding); public override void Write(PgWriter writer, T value) => writer.WriteChars(ConvertTo(value).AsSpan(), encoding); @@ -96,7 +111,7 @@ async ValueTask ReadAsync(PgReader reader, Encoding encoding) static ArraySegment GetSegment(ReadOnlySequence bytes, Encoding encoding) { - var array = TextConverter.GetChars(encoding, bytes); + var array = TextConverterHelpers.GetChars(encoding, bytes); return new(array, 0, array.Length); } } @@ -248,7 +263,7 @@ static int ConsumeChars(TextReader reader, int? count) } // Moved out for code size/sharing. -static class TextConverter +static class TextConverterHelpers { public static Size GetSize(ref SizeContext context, ReadOnlyMemory value, Encoding encoding) => encoding.GetByteCount(value.Span); diff --git a/src/Npgsql/Internal/NpgsqlWriteBuffer.cs b/src/Npgsql/Internal/NpgsqlWriteBuffer.cs index ea0b4b265a..3c0d91a148 100644 --- a/src/Npgsql/Internal/NpgsqlWriteBuffer.cs +++ b/src/Npgsql/Internal/NpgsqlWriteBuffer.cs @@ -20,8 +20,10 @@ sealed class NpgsqlWriteBuffer : IDisposable { #region Fields and Properties - internal static readonly UTF8Encoding UTF8Encoding = new(false, true); - internal static readonly UTF8Encoding RelaxedUTF8Encoding = new(false, false); + internal static readonly UTF8Encoding UTF8Encoding = new ThrowingUTF8Encoding(); + internal static readonly UTF8Encoding RelaxedUTF8Encoding = Encoding.UTF8 as UTF8Encoding ?? new(false, throwOnInvalidBytes: false); + + sealed class ThrowingUTF8Encoding() : UTF8Encoding(false, throwOnInvalidBytes: true); internal readonly NpgsqlConnector Connector; diff --git a/src/Npgsql/Internal/PgSerializerOptions.cs b/src/Npgsql/Internal/PgSerializerOptions.cs index 052404da5c..9a68ba1d70 100644 --- a/src/Npgsql/Internal/PgSerializerOptions.cs +++ b/src/Npgsql/Internal/PgSerializerOptions.cs @@ -11,6 +11,8 @@ namespace Npgsql.Internal; [Experimental(NpgsqlDiagnostics.ConvertersExperimental)] public sealed class PgSerializerOptions { + internal static UTF8Encoding DefaultUtf8Encoding => NpgsqlWriteBuffer.UTF8Encoding; + /// /// Used by GetSchema to be able to attempt to resolve all type catalog types without exceptions. /// @@ -48,7 +50,7 @@ internal bool IntrospectionMode internal NpgsqlDatabaseInfo DatabaseInfo { get; } public string TimeZone => _timeZoneProvider?.Invoke() ?? throw new NotSupportedException("TimeZone was not configured."); - public Encoding TextEncoding { get; init; } = Encoding.UTF8; + public Encoding TextEncoding { get; init; } = NpgsqlWriteBuffer.RelaxedUTF8Encoding; public IPgTypeInfoResolver TypeInfoResolver { get => _typeInfoResolver ??= new ChainTypeInfoResolver(_resolverChain); diff --git a/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.cs index 5f1753bfa9..7bd4c2ca24 100644 --- a/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.cs +++ b/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.cs @@ -42,7 +42,7 @@ class Resolver : IPgTypeInfoResolver || options.DatabaseInfo.GetPostgresType(dataTypeName) is not PostgresEnumType) return null; - return new PgTypeInfo(options, new StringTextConverter(options.TextEncoding), dataTypeName, + return new PgTypeInfo(options, TextConverter.CreateStringConverter(options.TextEncoding), dataTypeName, unboxedType: type == typeof(object) ? typeof(string) : null); } @@ -76,7 +76,7 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) // Text // Update PgSerializerOptions.IsWellKnownTextType(Type) after any changes to this list. mappings.AddType(DataTypeNames.Text, - static (options, mapping, _) => mapping.CreateInfo(options, new StringTextConverter(options.TextEncoding), preferredFormat: DataFormat.Text), isDefault: true); + static (options, mapping, _) => mapping.CreateInfo(options, TextConverter.CreateStringConverter(options.TextEncoding), preferredFormat: DataFormat.Text), isDefault: true); mappings.AddStructType(DataTypeNames.Text, static (options, mapping, _) => mapping.CreateInfo(options, new CharTextConverter(options.TextEncoding), preferredFormat: DataFormat.Text)); // Uses the bytea converters, as neither type has a header. @@ -103,7 +103,7 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) DataTypeNames.Xml, DataTypeNames.Name, DataTypeNames.RefCursor }) { mappings.AddType(dataTypeName, - static (options, mapping, _) => mapping.CreateInfo(options, new StringTextConverter(options.TextEncoding), preferredFormat: DataFormat.Text), isDefault: true); + static (options, mapping, _) => mapping.CreateInfo(options, TextConverter.CreateStringConverter(options.TextEncoding), preferredFormat: DataFormat.Text), isDefault: true); mappings.AddStructType(dataTypeName, static (options, mapping, _) => mapping.CreateInfo(options, new CharTextConverter(options.TextEncoding), preferredFormat: DataFormat.Text)); // Uses the bytea converters, as neither type has a header. @@ -128,7 +128,7 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) // Jsonb const byte jsonbVersion = 1; mappings.AddType(DataTypeNames.Jsonb, - static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter(jsonbVersion, new StringTextConverter(options.TextEncoding))), isDefault: true); + static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter(jsonbVersion, TextConverter.CreateStringConverter(options.TextEncoding))), isDefault: true); mappings.AddStructType(DataTypeNames.Jsonb, static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter(jsonbVersion, new CharTextConverter(options.TextEncoding)))); mappings.AddType(DataTypeNames.Jsonb, @@ -151,7 +151,7 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) // Jsonpath const byte jsonpathVersion = 1; mappings.AddType(DataTypeNames.Jsonpath, - static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter(jsonpathVersion, new StringTextConverter(options.TextEncoding))), isDefault: true); + static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter(jsonpathVersion, TextConverter.CreateStringConverter(options.TextEncoding))), isDefault: true); //Special mappings, these have no corresponding array mapping. mappings.AddType(DataTypeNames.Jsonpath, static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter(jsonpathVersion, new TextReaderTextConverter(options.TextEncoding)), preferredFormat: DataFormat.Text, supportsWriting: false), @@ -271,7 +271,7 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) // Unknown mappings.AddType(DataTypeNames.Unknown, - static (options, mapping, _) => mapping.CreateInfo(options, new StringTextConverter(options.TextEncoding), preferredFormat: DataFormat.Text), + static (options, mapping, _) => mapping.CreateInfo(options, TextConverter.CreateStringConverter(options.TextEncoding), preferredFormat: DataFormat.Text), MatchRequirement.DataTypeName); // Void @@ -549,7 +549,7 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) var mappings = new TypeInfoMappingCollection(); mappings.AddType(enumType.DataTypeName, - (options, mapping, _) => mapping.CreateInfo(options, new StringTextConverter(options.TextEncoding)), MatchRequirement.DataTypeName); + (options, mapping, _) => mapping.CreateInfo(options, TextConverter.CreateStringConverter(options.TextEncoding)), MatchRequirement.DataTypeName); mappings.AddArrayType(enumType.DataTypeName); return mappings.Find(type, dataTypeName, options); } diff --git a/src/Npgsql/Internal/ResolverFactories/ExtraConversionsTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/ExtraConversionsTypeInfoResolverFactory.cs index 9b5de89736..5809695b7d 100644 --- a/src/Npgsql/Internal/ResolverFactories/ExtraConversionsTypeInfoResolverFactory.cs +++ b/src/Npgsql/Internal/ResolverFactories/ExtraConversionsTypeInfoResolverFactory.cs @@ -108,7 +108,7 @@ static TypeInfoMappingCollection AddInfos(TypeInfoMappingCollection mappings) mappings.AddType(DataTypeNames.Text, static (options, mapping, _) => mapping.CreateInfo(options, new CharArrayTextConverter(options.TextEncoding), preferredFormat: DataFormat.Text)); mappings.AddStructType>(DataTypeNames.Text, - static (options, mapping, _) => mapping.CreateInfo(options, new ReadOnlyMemoryTextConverter(options.TextEncoding), preferredFormat: DataFormat.Text)); + static (options, mapping, _) => mapping.CreateInfo(options, TextConverter.CreateReadOnlyMemoryConverter(options.TextEncoding), preferredFormat: DataFormat.Text)); mappings.AddStructType>(DataTypeNames.Text, static (options, mapping, _) => mapping.CreateInfo(options, new CharArraySegmentTextConverter(options.TextEncoding), preferredFormat: DataFormat.Text)); @@ -121,7 +121,7 @@ static TypeInfoMappingCollection AddInfos(TypeInfoMappingCollection mappings) static (options, mapping, _) => mapping.CreateInfo(options, new CharArrayTextConverter(options.TextEncoding), preferredFormat: DataFormat.Text)); mappings.AddStructType>(dataTypeName, - static (options, mapping, _) => mapping.CreateInfo(options, new ReadOnlyMemoryTextConverter(options.TextEncoding), + static (options, mapping, _) => mapping.CreateInfo(options, TextConverter.CreateReadOnlyMemoryConverter(options.TextEncoding), preferredFormat: DataFormat.Text)); mappings.AddStructType>(dataTypeName, static (options, mapping, _) => mapping.CreateInfo(options, new CharArraySegmentTextConverter(options.TextEncoding), @@ -133,7 +133,7 @@ static TypeInfoMappingCollection AddInfos(TypeInfoMappingCollection mappings) mappings.AddType(DataTypeNames.Jsonb, static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter(jsonbVersion, new CharArrayTextConverter(options.TextEncoding)))); mappings.AddStructType>(DataTypeNames.Jsonb, - static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter>(jsonbVersion, new ReadOnlyMemoryTextConverter(options.TextEncoding)))); + static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter>(jsonbVersion, TextConverter.CreateReadOnlyMemoryConverter(options.TextEncoding)))); mappings.AddStructType>(DataTypeNames.Jsonb, static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter>(jsonbVersion, new CharArraySegmentTextConverter(options.TextEncoding)))); diff --git a/src/Npgsql/Internal/ResolverFactories/LTreeTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/LTreeTypeInfoResolverFactory.cs index 720d8ee78d..13d1f51cc4 100644 --- a/src/Npgsql/Internal/ResolverFactories/LTreeTypeInfoResolverFactory.cs +++ b/src/Npgsql/Internal/ResolverFactories/LTreeTypeInfoResolverFactory.cs @@ -31,15 +31,15 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) { mappings.AddType("ltree", static (options, mapping, _) => mapping.CreateInfo(options, - new VersionPrefixedTextConverter(LTreeVersion, new StringTextConverter(options.TextEncoding))), + new VersionPrefixedTextConverter(LTreeVersion, TextConverter.CreateStringConverter(options.TextEncoding))), MatchRequirement.DataTypeName); mappings.AddType("lquery", static (options, mapping, _) => mapping.CreateInfo(options, - new VersionPrefixedTextConverter(LTreeVersion, new StringTextConverter(options.TextEncoding))), + new VersionPrefixedTextConverter(LTreeVersion, TextConverter.CreateStringConverter(options.TextEncoding))), MatchRequirement.DataTypeName); mappings.AddType("ltxtquery", static (options, mapping, _) => mapping.CreateInfo(options, - new VersionPrefixedTextConverter(LTreeVersion, new StringTextConverter(options.TextEncoding))), + new VersionPrefixedTextConverter(LTreeVersion, TextConverter.CreateStringConverter(options.TextEncoding))), MatchRequirement.DataTypeName); return mappings; diff --git a/test/Npgsql.Benchmarks/TypeHandlers/Text.cs b/test/Npgsql.Benchmarks/TypeHandlers/Text.cs index 0cbdace076..7d8b158ea3 100644 --- a/test/Npgsql.Benchmarks/TypeHandlers/Text.cs +++ b/test/Npgsql.Benchmarks/TypeHandlers/Text.cs @@ -1,13 +1,13 @@ -using BenchmarkDotNet.Attributes; +using Npgsql.Internal; +using Npgsql.Internal.Converters; using System.Collections.Generic; using System.Text; -using Npgsql.Internal; -using Npgsql.Internal.Converters; +using BenchmarkDotNet.Attributes; namespace Npgsql.Benchmarks.TypeHandlers; [Config(typeof(Config))] -public class Text() : TypeHandlerBenchmarks(new StringTextConverter(NpgsqlWriteBuffer.UTF8Encoding)) +public class Text() : TypeHandlerBenchmarks(TextConverter.CreateStringConverter(PgSerializerOptions.DefaultUtf8Encoding)) { protected override IEnumerable ValuesOverride() { diff --git a/test/Npgsql.Tests/TypeMapperTests.cs b/test/Npgsql.Tests/TypeMapperTests.cs index 469a57be01..8fd8f66346 100644 --- a/test/Npgsql.Tests/TypeMapperTests.cs +++ b/test/Npgsql.Tests/TypeMapperTests.cs @@ -2,6 +2,9 @@ using NUnit.Framework; using System; using System.Data; +using System.Runtime.CompilerServices; +using System.Text; +using System.Threading; using System.Threading.Tasks; using Npgsql.Internal.Converters; using Npgsql.Internal.Postgres; @@ -192,7 +195,7 @@ sealed class Resolver : IPgTypeInfoResolver { if (type == typeof(string) || dataTypeName?.UnqualifiedName == "citext") if (options.DatabaseInfo.TryGetPostgresTypeByName("citext", out var pgType)) - return new(options, new StringTextConverter(options.TextEncoding), options.ToCanonicalTypeId(pgType)); + return new(options, TextConverter.CreateStringConverter(options.TextEncoding), options.ToCanonicalTypeId(pgType)); return null; } @@ -241,15 +244,28 @@ sealed class GuidTextTypeInfoResolver(string typeName) : IPgTypeInfoResolver } } - sealed class GuidTextConverter(System.Text.Encoding encoding) : StringBasedTextConverter(encoding) + sealed class GuidTextConverter(Encoding encoding) : PgStreamingConverter { public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) { bufferRequirements = BufferRequirements.None; - return format is DataFormat.Text; + return format is DataFormat.Binary or DataFormat.Text; } - protected override Guid ConvertFrom(string value) => Guid.Parse(value); - protected override ReadOnlyMemory ConvertTo(Guid value) => value.ToString().AsMemory(); + + public override Guid Read(PgReader reader) + => Guid.Parse(encoding.GetString(reader.ReadBytes(reader.CurrentRemaining))); + + public override async ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) + => Guid.Parse(encoding.GetString(await reader.ReadBytesAsync(reader.CurrentRemaining, cancellationToken).ConfigureAwait(false))); + + public override Size GetSize(SizeContext context, Guid value, ref object? writeState) + => TextConverterHelpers.GetSize(ref context, value.ToString().AsMemory(), encoding); + + public override void Write(PgWriter writer, Guid value) + => writer.WriteChars(value.ToString().AsSpan(), encoding); + + public override ValueTask WriteAsync(PgWriter writer, Guid value, CancellationToken cancellationToken = default) + => writer.WriteCharsAsync(value.ToString().AsMemory(), encoding, cancellationToken); } } From 09c84605ef5facb77e4f3cddacdf28e6154a4e8d Mon Sep 17 00:00:00 2001 From: Brian Pursley Date: Tue, 17 Mar 2026 11:14:55 -0400 Subject: [PATCH 727/761] Change bad hostname tests to use .invalid reserved TLD. (#6510) --- test/Npgsql.Tests/ConnectionTests.cs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/Npgsql.Tests/ConnectionTests.cs b/test/Npgsql.Tests/ConnectionTests.cs index d33441488c..8a87e7fbea 100644 --- a/test/Npgsql.Tests/ConnectionTests.cs +++ b/test/Npgsql.Tests/ConnectionTests.cs @@ -307,7 +307,7 @@ public void Connect_timeout_cancel() [Test] public void Bad_hostname() { - using var dataSource = CreateDataSource(csb => csb.Host = "hostname.that.does.not.exist"); + using var dataSource = CreateDataSource(csb => csb.Host = "hostname.invalid"); using var conn = dataSource.CreateConnection(); Assert.That( @@ -323,7 +323,7 @@ public void Bad_hostname() [Test] public void Bad_hostname_async() { - using var dataSource = CreateDataSource(csb => csb.Host = "hostname.that.does.not.exist"); + using var dataSource = CreateDataSource(csb => csb.Host = "hostname.invalid"); using var conn = dataSource.CreateConnection(); Assert.That( From 9030fb9187c1cb016733ee2e19ca1fa73448cb22 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 18 Mar 2026 07:09:00 +0100 Subject: [PATCH 728/761] Bump Microsoft.Data.SqlClient from 6.1.4 to 7.0.0 (#6512) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index ef14823895..2131066551 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -44,7 +44,7 @@ - + From 67685cc4a80272b9fab55ed2e202ec2182a1c5ea Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 18 Mar 2026 07:09:26 +0100 Subject: [PATCH 729/761] Bumps Scriban.Signed from 6.5.7 to 6.5.8 --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 2131066551..fe871fc03c 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -25,7 +25,7 @@ - + From ba839638481507f58d9c7f78c004a633dece6cf7 Mon Sep 17 00:00:00 2001 From: Brian Pursley Date: Thu, 19 Mar 2026 02:46:43 -0400 Subject: [PATCH 730/761] Enable Npgsql.Specification.Tests in the build pipeline (#6504) --- .github/workflows/build.yml | 29 ++++++------ .../Npgsql.Specification.Tests.csproj | 4 ++ .../NpgsqlCommandTests.cs | 32 +++++++++++-- .../NpgsqlConnectionTests.cs | 17 ++++++- .../NpgsqlDataReaderTests.cs | 47 ++++++++++++++++++- 5 files changed, 107 insertions(+), 22 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 075d7e35fe..eed712eea8 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -70,7 +70,7 @@ jobs: # The NPGSQL_TEST_POSTGIS environment variable ensures that if PostGIS isn't installed, # the PostGIS tests fail and therefore fail the build. env: - NPGSQL_TEST_POSTGIS: ${{ !startsWith(matrix.os, 'windows') && !startsWith(matrix.os, 'macos') }} + NPGSQL_TEST_POSTGIS: ${{ !startsWith(matrix.os, 'windows') && !startsWith(matrix.os, 'macos') && matrix.pg_prerelease == '' }} steps: - name: Checkout @@ -108,16 +108,19 @@ jobs: sudo chmod 600 $PGDATA/{server.crt,server.key,ca.crt} sudo chown postgres $PGDATA/{server.crt,server.key,ca.crt} + # Create the npgsql_tests database + sudo -u postgres psql -c "CREATE DATABASE npgsql_tests" + # Create npgsql_tests user with md5 password 'npgsql_tests' sudo -u postgres psql -c "CREATE USER npgsql_tests SUPERUSER PASSWORD 'md5adf74603a5772843f53e812f03dacb02'" sudo -u postgres psql -c "CREATE USER npgsql_tests_ssl SUPERUSER PASSWORD 'npgsql_tests_ssl'" sudo -u postgres psql -c "CREATE USER npgsql_tests_nossl SUPERUSER PASSWORD 'npgsql_tests_nossl'" - # To disable PostGIS for prereleases (because it usually isn't available until late), surround with the following: - #if [ -z "${{ matrix.pg_prerelease }}" ]; then + # Install PostGIS if required + if [[ $NPGSQL_TEST_POSTGIS == 'true' ]]; then sudo apt-get install -qq postgresql-${{ matrix.pg_major }}-postgis-${{ env.postgis_version }} - #fi + fi if [ ${{ matrix.pg_major }} -ge 14 ]; then sudo sed -i "s|unix_socket_directories = '/var/run/postgresql'|unix_socket_directories = '/var/run/postgresql, @/npgsql_unix'|" $PGDATA/postgresql.conf @@ -192,6 +195,9 @@ jobs: sed -i "s|#max_prepared_transactions = 0|max_prepared_transactions = 100|" pgsql/PGDATA/postgresql.conf pgsql/bin/pg_ctl -D pgsql/PGDATA -l logfile -o '-c ssl=true -c ssl_cert_file=../server.crt -c ssl_key_file=../server.key -c ssl_ca_file=../ca.crt' start + # Create npgsql_tests database + pgsql/bin/psql -U postgres -c "CREATE DATABASE npgsql_tests" + # Create npgsql_tests user with md5 password 'npgsql_tests' pgsql/bin/psql -U postgres -c "CREATE ROLE npgsql_tests SUPERUSER LOGIN PASSWORD 'md5adf74603a5772843f53e812f03dacb02'" @@ -256,6 +262,9 @@ jobs: sleep 5 done + # Create the npgsql_tests database + psql -c "CREATE DATABASE npgsql_tests" postgres + # Create npgsql_tests user with md5 password 'npgsql_tests' psql -c "CREATE USER npgsql_tests SUPERUSER PASSWORD 'md5adf74603a5772843f53e812f03dacb02'" postgres @@ -298,11 +307,8 @@ jobs: done psql -c "CREATE USER npgsql_tests_scram SUPERUSER PASSWORD 'npgsql_tests_scram'" postgres - # TODO: Once test/Npgsql.Specification.Tests work, switch to just testing on the solution - name: Test - run: | - dotnet test -c ${{ matrix.config }} -f ${{ matrix.test_tfm }} test/Npgsql.Tests --logger "GitHubActions;report-warnings=false" --blame-crash --blame-hang-timeout 30s - dotnet test -c ${{ matrix.config }} -f ${{ matrix.test_tfm }} test/Npgsql.DependencyInjection.Tests --logger "GitHubActions;report-warnings=false" + run: dotnet test -c ${{ matrix.config }} -f ${{ matrix.test_tfm }} --logger "GitHubActions;report-warnings=false" --blame-crash --blame-hang-timeout 30s shell: bash - name: Upload Test Hang Dumps @@ -314,13 +320,6 @@ jobs: **/*.dmp **/*_Sequence.xml - - name: Test Plugins - run: | - if [ -z "${{ matrix.pg_prerelease }}" ]; then - dotnet test -c ${{ matrix.config }} -f ${{ matrix.test_tfm }} test/Npgsql.PluginTests --logger "GitHubActions;report-warnings=false" - fi - shell: bash - - id: analyze_tag name: Analyze tag shell: bash diff --git a/test/Npgsql.Specification.Tests/Npgsql.Specification.Tests.csproj b/test/Npgsql.Specification.Tests/Npgsql.Specification.Tests.csproj index 466d6550dd..47e39cc121 100644 --- a/test/Npgsql.Specification.Tests/Npgsql.Specification.Tests.csproj +++ b/test/Npgsql.Specification.Tests/Npgsql.Specification.Tests.csproj @@ -10,4 +10,8 @@ + + + $(NoWarn);xUnit1004 + diff --git a/test/Npgsql.Specification.Tests/NpgsqlCommandTests.cs b/test/Npgsql.Specification.Tests/NpgsqlCommandTests.cs index 8318435aa9..ea72e86bcf 100644 --- a/test/Npgsql.Specification.Tests/NpgsqlCommandTests.cs +++ b/test/Npgsql.Specification.Tests/NpgsqlCommandTests.cs @@ -1,11 +1,33 @@ using AdoNet.Specification.Tests; +using Xunit; +using Xunit.Sdk; namespace Npgsql.Specification.Tests; public sealed class NpgsqlCommandTests(NpgsqlDbFactoryFixture fixture) : CommandTestBase(fixture) { - // PostgreSQL only supports a single transaction on a given connection at a given time. As a result, - // Npgsql completely ignores DbCommand.Transaction. - public override void ExecuteReader_throws_when_transaction_required() {} - public override void ExecuteReader_throws_when_transaction_mismatched() {} -} \ No newline at end of file + public override void ExecuteReader_throws_when_transaction_required() + { + // PostgreSQL only supports a single transaction on a given connection at a given time. As a result, + // Npgsql completely ignores DbCommand.Transaction. + var ex = Assert.Throws(() => base.ExecuteReader_throws_when_transaction_required()); + Assert.Contains("No exception was thrown", ex.Message); + } + + public override void ExecuteReader_throws_when_transaction_mismatched() + { + // PostgreSQL only supports a single transaction on a given connection at a given time. As a result, + // Npgsql completely ignores DbCommand.Transaction. + var ex = Assert.Throws(() => base.ExecuteReader_throws_when_transaction_mismatched()); + Assert.Contains("No exception was thrown", ex.Message); + } + + // Skipped tests mark places where Npgsql currently diverges from AdoNet.Specification.Tests expectations. + // Some divergences may be by design; others may indicate compatibility gaps worth investigating. + + [Fact(Skip = "NpgsqlCommand.ExecuteReader() throws NpgsqlOperationInProgressException instead of InvalidOperationException when another reader is already open")] + public override void ExecuteReader_throws_when_reader_open() {} + + [Fact(Skip = "NpgsqlCommand.Execute() throws InvalidCastException instead of NotSupportedException for unknown ParameterValue types")] + public override void Execute_throws_for_unknown_ParameterValue_type() {} +} diff --git a/test/Npgsql.Specification.Tests/NpgsqlConnectionTests.cs b/test/Npgsql.Specification.Tests/NpgsqlConnectionTests.cs index 20f5bc2547..b103128329 100644 --- a/test/Npgsql.Specification.Tests/NpgsqlConnectionTests.cs +++ b/test/Npgsql.Specification.Tests/NpgsqlConnectionTests.cs @@ -1,5 +1,20 @@ +using System.Threading.Tasks; using AdoNet.Specification.Tests; +using Xunit; namespace Npgsql.Specification.Tests; -public sealed class NpgsqlConnectionTests(NpgsqlDbFactoryFixture fixture) : ConnectionTestBase(fixture); \ No newline at end of file +public sealed class NpgsqlConnectionTests(NpgsqlDbFactoryFixture fixture) : ConnectionTestBase(fixture) +{ + // Skipped tests mark places where Npgsql currently diverges from AdoNet.Specification.Tests expectations. + // Some divergences may be by design; others may indicate compatibility gaps worth investigating. + + [Fact(Skip = "NpgsqlConnection does not support the Disposed event")] + public override void Dispose_raises_Disposed() {} + + [Fact(Skip = "NpgsqlConnection does not support the Disposed event")] + public override Task DisposeAsync_raises_Disposed() => Task.CompletedTask; + + [Fact(Skip = "NpgsqlConnection.OpenAsync() does not throw OperationCanceledException when a canceled token is passed")] + public override Task OpenAsync_is_canceled() => Task.CompletedTask; +} diff --git a/test/Npgsql.Specification.Tests/NpgsqlDataReaderTests.cs b/test/Npgsql.Specification.Tests/NpgsqlDataReaderTests.cs index 3f3c9021aa..a58546af73 100644 --- a/test/Npgsql.Specification.Tests/NpgsqlDataReaderTests.cs +++ b/test/Npgsql.Specification.Tests/NpgsqlDataReaderTests.cs @@ -1,5 +1,50 @@ +using System.Threading.Tasks; using AdoNet.Specification.Tests; +using Xunit; namespace Npgsql.Specification.Tests; -public sealed class NpgsqlDataReaderTests(NpgsqlSelectValueFixture fixture) : DataReaderTestBase(fixture); \ No newline at end of file +public sealed class NpgsqlDataReaderTests(NpgsqlSelectValueFixture fixture) : DataReaderTestBase(fixture) +{ + // Skipped tests mark places where Npgsql currently diverges from AdoNet.Specification.Tests expectations. + // Some divergences may be by design; others may indicate compatibility gaps worth investigating. + + [Fact(Skip = "NpgsqlDataReader.FieldCount throws ObjectDisposedException instead of InvalidOperationException")] + public override void FieldCount_throws_when_closed() {} + + [Fact(Skip = "NpgsqlDataReader.GetBytes() throws ArgumentOutOfRangeException instead of returning 0 when dataOffset is too large")] + public override void GetBytes_reads_nothing_when_dataOffset_is_too_large() {} + + [Fact(Skip = "NpgsqlDataReader.GetChars() throws EndOfStreamException instead of returning 0 when dataOffset is too large")] + public override void GetChars_reads_nothing_when_dataOffset_is_too_large() {} + + [Fact(Skip = "NpgsqlDataReader.GetDataTypeName() throws ObjectDisposedException instead of InvalidOperationException when reader is disposed")] + public override void GetDataTypeName_throws_when_closed() {} + + [Fact(Skip = "NpgsqlDataReader.GetFieldType() throws ObjectDisposedException instead of InvalidOperationException when reader is disposed")] + public override void GetFieldType_throws_when_closed() {} + + [Fact(Skip = "NpgsqlDataReader.GetFieldValueAsync() does not throw an OperationCanceledException when a canceled token is passed")] + public override Task GetFieldValueAsync_is_canceled() => Task.CompletedTask; + + [Fact(Skip = "NpgsqlDataReader.GetName() throws ObjectDisposedException instead of InvalidOperationException when reader is disposed")] + public override void GetName_throws_when_closed() {} + + [Fact(Skip = "NpgsqlDataReader.GetTextReader() throws InvalidCastException when command text is null")] + public override void GetTextReader_returns_empty_for_null_String() {} + + [Fact(Skip = "NpgsqlDataReader.GetValue() throws ObjectDisposedException instead of InvalidOperationException when reader is disposed")] + public override void GetValue_throws_when_closed() {} + + [Fact(Skip = "NpgsqlDataReader.IsDBNull() throws ObjectDisposedException instead of InvalidOperationException when reader is disposed")] + public override void IsDBNull_throws_when_closed() {} + + [Fact(Skip = "NpgsqlDataReader.IsDBNullAsync() does not throw OperationCanceledException when a canceled token is passed")] + public override Task IsDBNullAsync_is_canceled() => Task.CompletedTask; + + [Fact(Skip = "NpgsqlDataReader.NextResult() throws ObjectDisposedException instead of InvalidOperationException when reader is disposed")] + public override void NextResult_throws_when_closed() {} + + [Fact(Skip = "NpgsqlDataReader.Read() throws ObjectDisposedException instead of InvalidOperationException when reader is disposed")] + public override void Read_throws_when_closed() {} +} From a2c6353d4f63685d1cb542c7903d6409f7fa899e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 25 Mar 2026 00:01:35 +0200 Subject: [PATCH 731/761] Bump the minor-and-patch group with 2 updates (#6520) Bumps NUnit3TestAdapter from 6.1.0 to 6.2.0 Bumps Scriban.Signed from 6.5.8 to 6.6.0 --- Directory.Packages.props | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index fe871fc03c..edc57a514b 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -25,7 +25,7 @@ - + @@ -34,7 +34,7 @@ - + From 987ba5d38bddc523f8704e513198bdd42f73df7b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 24 Mar 2026 22:10:26 +0000 Subject: [PATCH 732/761] Bump Scriban.Signed from 6.5.8 to 7.0.3 (#6521) --- Directory.Packages.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index edc57a514b..8e8ca841b1 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -25,7 +25,7 @@ - + From b5a100134eb61fb97233dbda762a91a1e6e32ac6 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Wed, 25 Mar 2026 12:51:21 +0100 Subject: [PATCH 733/761] Faster seek and restart (#6508) --- src/Npgsql/Internal/PgReader.cs | 38 ++++++++++++++++++++------------- src/Npgsql/NpgsqlDataReader.cs | 11 +++++----- 2 files changed, 29 insertions(+), 20 deletions(-) diff --git a/src/Npgsql/Internal/PgReader.cs b/src/Npgsql/Internal/PgReader.cs index 5da3ea7681..44f5d7b24c 100644 --- a/src/Npgsql/Internal/PgReader.cs +++ b/src/Npgsql/Internal/PgReader.cs @@ -13,6 +13,7 @@ namespace Npgsql.Internal; [Experimental(NpgsqlDiagnostics.ConvertersExperimental)] public class PgReader { + const int DbNullSentinel = -1; const int UninitializedSentinel = -1; // We don't want to add a ton of memory pressure for large strings. @@ -61,10 +62,10 @@ internal PgReader(NpgsqlReadBuffer buffer) int FieldSize => _fieldSize; int FieldRemaining => FieldSize - FieldOffset; - internal bool FieldIsDbNull => FieldSize is -1; + internal bool FieldIsDbNull => FieldSize is DbNullSentinel; internal bool FieldAtStart => FieldOffset is 0; - internal bool IsFieldConsumed(int offset) => FieldOffset > offset; + internal bool IsFieldPastOffset(int offset) => FieldOffset > offset; // TODO refactor out internal long GetFieldStartPos(NpgsqlNestedDataReader nestedDataReader) => _fieldStartPos; @@ -362,6 +363,13 @@ public void Rewind(int count) if (StreamActive) DisposeUserActiveStream(async: false).GetAwaiter().GetResult(); + RewindCore(count); + } + + void RewindCore(int count) + { + Debug.Assert(CurrentOffset >= count); + Debug.Assert(_buffer.ReadPosition >= count); _buffer.ReadPosition -= count; } @@ -525,14 +533,13 @@ public ValueTask BeginNestedReadAsync(int size, Size bufferRequ => BeginNestedRead(async: true, size, bufferRequirement, cancellationToken); /// Seek origin is the start of Current, e.g. Seek(0) rewinds to the start. - internal int Seek(int offset) + internal void Seek(int offset) { - if (CurrentOffset > offset) - Rewind(CurrentOffset - offset); - else if (CurrentOffset < offset) - Consume(offset - CurrentOffset); - - return FieldRemaining; + var currentOffset = CurrentOffset; + if (currentOffset > offset) + Rewind(currentOffset - offset); + else if (currentOffset < offset) + Consume(offset - currentOffset); } public void Consume(int? count = null) @@ -622,11 +629,12 @@ internal int Restart(bool resumable) ThrowHelper.ThrowInvalidOperationException("Cannot restart a non-initialized reader."); // We resume if the reader was initialized as resumable and we're not explicitly restarting as non-resumable. - // When the field size is DbNullFieldSize (i.e. -1) we're always restarting as resumable, to allow rereading null values endlessly. - if ((Resumable && resumable) || FieldIsDbNull) + // When the field size is DbNullSentinel (i.e. -1) we're always restarting as resumable, to allow rereading null values endlessly. + var fieldSize = FieldSize; + if ((Resumable && resumable) || fieldSize is DbNullSentinel) { - _resumable = resumable || FieldIsDbNull; - return FieldSize; + _resumable = true; + return fieldSize; } // From this point on we're not resuming, we're resetting any remaining state and rewinding our position. @@ -640,10 +648,10 @@ internal int Restart(bool resumable) _fieldConsumed = false; _resumable = resumable; - Seek(0); + RewindCore(FieldOffset); Debug.Assert(Initialized); - return FieldSize; + return fieldSize; } [MethodImpl(MethodImplOptions.AggressiveInlining)] diff --git a/src/Npgsql/NpgsqlDataReader.cs b/src/Npgsql/NpgsqlDataReader.cs index 3b56704606..42237e61ae 100644 --- a/src/Npgsql/NpgsqlDataReader.cs +++ b/src/Npgsql/NpgsqlDataReader.cs @@ -1420,16 +1420,17 @@ public override long GetBytes(int ordinal, long dataOffset, byte[]? buffer, int return columnLength; // Check whether any sequential seek is contractually sound (even though we might be able to satisfy rewinds we make sure we won't). - if (_isSequential && PgReader.IsFieldConsumed((int)dataOffset)) + var reader = PgReader; + if (_isSequential && reader.IsFieldPastOffset((int)dataOffset)) ThrowHelper.ThrowInvalidOperationException("Attempt to read a position in the column which has already been read"); // Move to offset - Debug.Assert(!PgReader.NestedInitialized, "Unexpected nested read active, Seek(0) would seek to the start of the nested data."); - var remaining = PgReader.Seek((int)dataOffset); + Debug.Assert(!reader.NestedInitialized, "Unexpected nested read active, Seek(0) would seek to the start of the nested data."); + reader.Seek((int)dataOffset); // At offset, read into buffer. - length = Math.Min(length, remaining); - PgReader.ReadBytes(new Span(buffer, bufferOffset, length)); + length = Math.Min(length, reader.CurrentRemaining); + reader.ReadBytes(new Span(buffer, bufferOffset, length)); return length; } From c90653ac06902b5c7edeefd912bade00f275fd5d Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Wed, 25 Mar 2026 12:58:38 +0100 Subject: [PATCH 734/761] Buffer requirements changes (#6481) --- src/Npgsql/Internal/BufferRequirements.cs | 23 +++++++++++++ .../Internal/Converters/ArrayConverter.cs | 2 +- .../Converters/BitStringConverters.cs | 4 ++- .../Converters/Internal/VoidConverter.cs | 5 ++- .../Networking/IPNetworkConverter.cs | 2 +- .../Converters/Primitive/NumericConverters.cs | 2 +- .../VersionPrefixedTextConverter.cs | 7 ++-- src/Npgsql/Internal/PgBufferedConverter.cs | 5 ++- src/Npgsql/Internal/PgConverter.cs | 6 ---- src/Npgsql/Internal/PgReader.cs | 34 +++++++------------ src/Npgsql/Internal/PgWriter.cs | 16 +++++---- src/Npgsql/NpgsqlNestedDataReader.cs | 2 +- test/Npgsql.Tests/ReaderTests.cs | 5 ++- 13 files changed, 65 insertions(+), 48 deletions(-) diff --git a/src/Npgsql/Internal/BufferRequirements.cs b/src/Npgsql/Internal/BufferRequirements.cs index 14ffabc52b..9551687426 100644 --- a/src/Npgsql/Internal/BufferRequirements.cs +++ b/src/Npgsql/Internal/BufferRequirements.cs @@ -1,5 +1,6 @@ using System; using System.Diagnostics.CodeAnalysis; +using System.Runtime.CompilerServices; namespace Npgsql.Internal; @@ -42,4 +43,26 @@ public BufferRequirements Combine(int byteCount) public override int GetHashCode() => HashCode.Combine(_read, _write); public static bool operator ==(BufferRequirements left, BufferRequirements right) => left.Equals(right); public static bool operator !=(BufferRequirements left, BufferRequirements right) => !left.Equals(right); + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + internal static int GetMinimumBufferByteCount(Size bufferRequirement, int valueSize) + { + ArgumentOutOfRangeException.ThrowIfNegative(valueSize); + var reqByteCount = bufferRequirement.GetValueOrDefault(); + switch (bufferRequirement.Kind) + { + case SizeKind.Exact: + if (reqByteCount != valueSize) + ThrowExactMismatch(reqByteCount, valueSize); + goto default; + case SizeKind.UpperBound: + return Math.Min(valueSize, reqByteCount); + default: + return reqByteCount; + } + + static void ThrowExactMismatch(int expected, int actual) + => throw new ArgumentOutOfRangeException(nameof(bufferRequirement), + $"Exact buffer requirement size ({expected} bytes) does not match the value size ({actual} bytes)."); + } } diff --git a/src/Npgsql/Internal/Converters/ArrayConverter.cs b/src/Npgsql/Internal/Converters/ArrayConverter.cs index 6a9b7b0947..117135d8d9 100644 --- a/src/Npgsql/Internal/Converters/ArrayConverter.cs +++ b/src/Npgsql/Internal/Converters/ArrayConverter.cs @@ -322,7 +322,7 @@ sealed class PolymorphicArrayConverter( { public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) { - bufferRequirements = BufferRequirements.Create(read: sizeof(int) + sizeof(int), write: Size.Unknown); + bufferRequirements = BufferRequirements.Create(read: Size.CreateUpperBound(sizeof(int) + sizeof(int)), write: Size.Unknown); return format is DataFormat.Binary; } diff --git a/src/Npgsql/Internal/Converters/BitStringConverters.cs b/src/Npgsql/Internal/Converters/BitStringConverters.cs index d0d6327a20..12316bdbd2 100644 --- a/src/Npgsql/Internal/Converters/BitStringConverters.cs +++ b/src/Npgsql/Internal/Converters/BitStringConverters.cs @@ -97,9 +97,11 @@ async ValueTask Write(bool async, PgWriter writer, BitArray value, CancellationT sealed class BitVector32BitStringConverter : PgBufferedConverter { + static int MaxSize => sizeof(int) + sizeof(int); + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) { - bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(int) + sizeof(int)); + bufferRequirements = BufferRequirements.Create(read: Size.CreateUpperBound(MaxSize), write: MaxSize); return format is DataFormat.Binary; } diff --git a/src/Npgsql/Internal/Converters/Internal/VoidConverter.cs b/src/Npgsql/Internal/Converters/Internal/VoidConverter.cs index 45b48df5b5..a91c39ae9b 100644 --- a/src/Npgsql/Internal/Converters/Internal/VoidConverter.cs +++ b/src/Npgsql/Internal/Converters/Internal/VoidConverter.cs @@ -6,7 +6,10 @@ namespace Npgsql.Internal.Converters.Internal; sealed class VoidConverter : PgBufferedConverter { public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) - => CanConvertBufferedDefault(DataFormat.Binary, out bufferRequirements); // Text is identical + { + bufferRequirements = BufferRequirements.CreateFixedSize(0); + return true; + } protected override object? ReadCore(PgReader reader) => null; protected override void WriteCore(PgWriter writer, object? value) => throw new NotSupportedException(); diff --git a/src/Npgsql/Internal/Converters/Networking/IPNetworkConverter.cs b/src/Npgsql/Internal/Converters/Networking/IPNetworkConverter.cs index 6fc7b5401e..0f1cac0935 100644 --- a/src/Npgsql/Internal/Converters/Networking/IPNetworkConverter.cs +++ b/src/Npgsql/Internal/Converters/Networking/IPNetworkConverter.cs @@ -7,7 +7,7 @@ namespace Npgsql.Internal.Converters; sealed class IPNetworkConverter : PgBufferedConverter { public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) - => CanConvertBufferedDefault(format, out bufferRequirements); + => NpgsqlInetConverter.CanConvertImpl(format, out bufferRequirements); public override Size GetSize(SizeContext context, IPNetwork value, ref object? writeState) => NpgsqlInetConverter.GetSizeImpl(context, value.BaseAddress, ref writeState); diff --git a/src/Npgsql/Internal/Converters/Primitive/NumericConverters.cs b/src/Npgsql/Internal/Converters/Primitive/NumericConverters.cs index 79a82a1bfa..7d61d677e9 100644 --- a/src/Npgsql/Internal/Converters/Primitive/NumericConverters.cs +++ b/src/Npgsql/Internal/Converters/Primitive/NumericConverters.cs @@ -67,7 +67,7 @@ public override void Write(PgWriter writer, BigInteger value) public override ValueTask WriteAsync(PgWriter writer, BigInteger value, CancellationToken cancellationToken = default) { - if (writer.ShouldFlush(writer.Current.Size)) + if (writer.ShouldFlush(writer.Current.Size.Value)) return AsyncCore(writer, value, cancellationToken); // If we don't need a flush and can write buffered we delegate to our sync write method which won't flush in such a case. diff --git a/src/Npgsql/Internal/Converters/VersionPrefixedTextConverter.cs b/src/Npgsql/Internal/Converters/VersionPrefixedTextConverter.cs index 8dc981a47e..5252617506 100644 --- a/src/Npgsql/Internal/Converters/VersionPrefixedTextConverter.cs +++ b/src/Npgsql/Internal/Converters/VersionPrefixedTextConverter.cs @@ -73,10 +73,9 @@ public static async ValueTask ReadVersion(bool async, byte expectedVersion, PgRe throw new InvalidCastException($"Unknown wire format version: {actualVersion}"); } - // No need for a nested read, all text converters will read CurrentRemaining bytes. - // We only need to buffer data if we're binary, otherwise the caller would have had to do so - // as we directly expose the underlying text converter requirements for the text data format. - await reader.Buffer(async, textConverterReadRequirement, cancellationToken).ConfigureAwait(false); + var byteCount = BufferRequirements.GetMinimumBufferByteCount(textConverterReadRequirement, reader.CurrentRemaining); + if (reader.ShouldBuffer(byteCount)) + await reader.Buffer(async, byteCount, cancellationToken).ConfigureAwait(false); } public static bool CanConvert(PgConverter textConverter, DataFormat format, out BufferRequirements textConverterRequirements, out BufferRequirements bufferRequirements) diff --git a/src/Npgsql/Internal/PgBufferedConverter.cs b/src/Npgsql/Internal/PgBufferedConverter.cs index beced6d589..12b9851c8d 100644 --- a/src/Npgsql/Internal/PgBufferedConverter.cs +++ b/src/Npgsql/Internal/PgBufferedConverter.cs @@ -16,8 +16,7 @@ public override Size GetSize(SizeContext context, T value, ref object? writeStat public sealed override T Read(PgReader reader) { - // We check FieldAtStart to speed up simple value reads, as field level buffering was handled by reader.StartRead() already. - if (!reader.FieldAtStart && reader.ShouldBufferCurrent()) + if (reader.ShouldBufferCurrent()) ThrowIORequired(reader.CurrentBufferRequirement); return ReadCore(reader); @@ -31,7 +30,7 @@ internal sealed override ValueTask ReadAsObject(bool async, PgReader rea public sealed override void Write(PgWriter writer, T value) { - if (!writer.BufferingWrite && writer.ShouldFlush(writer.CurrentBufferRequirement)) + if (writer.ShouldFlushCurrent()) ThrowIORequired(writer.CurrentBufferRequirement); WriteCore(writer, value); diff --git a/src/Npgsql/Internal/PgConverter.cs b/src/Npgsql/Internal/PgConverter.cs index 627c4dc979..0d1eb000c3 100644 --- a/src/Npgsql/Internal/PgConverter.cs +++ b/src/Npgsql/Internal/PgConverter.cs @@ -87,12 +87,6 @@ private protected static bool ThrowInvalidNullValue() private protected bool ThrowDbNullPredicateOutOfRange() => throw new UnreachableException($"Unknown case {DbNullPredicateKind.ToString()}"); - - protected bool CanConvertBufferedDefault(DataFormat format, out BufferRequirements bufferRequirements) - { - bufferRequirements = BufferRequirements.Value; - return format is DataFormat.Binary; - } } public abstract class PgConverter : PgConverter diff --git a/src/Npgsql/Internal/PgReader.cs b/src/Npgsql/Internal/PgReader.cs index 44f5d7b24c..ee117aa874 100644 --- a/src/Npgsql/Internal/PgReader.cs +++ b/src/Npgsql/Internal/PgReader.cs @@ -458,19 +458,21 @@ internal void StartRead(Size bufferRequirement) { Debug.Assert(FieldSize >= 0); _fieldBufferRequirement = bufferRequirement; - if (ShouldBuffer(bufferRequirement)) - BufferNoInlined(bufferRequirement); + var byteCount = BufferRequirements.GetMinimumBufferByteCount(bufferRequirement, FieldSize); + if (ShouldBuffer(byteCount)) + BufferNoInlined(byteCount); [MethodImpl(MethodImplOptions.NoInlining)] - void BufferNoInlined(Size bufferRequirement) - => Buffer(bufferRequirement); + void BufferNoInlined(int byteCount) + => Buffer(byteCount); } internal ValueTask StartReadAsync(Size bufferRequirement, CancellationToken cancellationToken) { Debug.Assert(FieldSize >= 0); _fieldBufferRequirement = bufferRequirement; - return ShouldBuffer(bufferRequirement) ? BufferAsync(bufferRequirement, cancellationToken) : new(); + var byteCount = BufferRequirements.GetMinimumBufferByteCount(bufferRequirement, FieldSize); + return ShouldBuffer(byteCount) ? BufferAsync(byteCount, cancellationToken) : new(); } internal void EndRead() @@ -522,7 +524,9 @@ internal async ValueTask BeginNestedRead(bool async, int size, _currentBufferRequirement = bufferRequirement; _currentStartPos = FieldOffset; - await Buffer(async, bufferRequirement, cancellationToken).ConfigureAwait(false); + var byteCount = BufferRequirements.GetMinimumBufferByteCount(bufferRequirement, size); + if (ShouldBuffer(byteCount)) + await Buffer(async, byteCount, cancellationToken).ConfigureAwait(false); return new NestedReadScope(async, this, previousSize, previousStartPos, previousBufferRequirement); } @@ -740,16 +744,10 @@ byte[] RentArray(int count) return array; } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - int GetBufferRequirementByteCount(Size bufferRequirement) - => bufferRequirement is { Kind: SizeKind.UpperBound } - ? Math.Min(CurrentRemaining, bufferRequirement.Value) - : bufferRequirement.GetValueOrDefault(); - - internal bool ShouldBufferCurrent() => ShouldBuffer(CurrentBufferRequirement); + // We check FieldAtStart to speed up simple value reads, as field level buffering was handled by reader.StartRead() already. + internal bool ShouldBufferCurrent() + => !FieldAtStart && ShouldBuffer(BufferRequirements.GetMinimumBufferByteCount(CurrentBufferRequirement, CurrentRemaining)); - public bool ShouldBuffer(Size bufferRequirement) - => ShouldBuffer(GetBufferRequirementByteCount(bufferRequirement)); public bool ShouldBuffer(int byteCount) { return _buffer.ReadBytesLeft < byteCount && ShouldBufferSlow(byteCount); @@ -768,16 +766,10 @@ bool ShouldBufferSlow(int byteCount) } } - public void Buffer(Size bufferRequirement) - => Buffer(GetBufferRequirementByteCount(bufferRequirement)); public void Buffer(int byteCount) => _buffer.Ensure(byteCount); - public ValueTask BufferAsync(Size bufferRequirement, CancellationToken cancellationToken) - => BufferAsync(GetBufferRequirementByteCount(bufferRequirement), cancellationToken); public ValueTask BufferAsync(int byteCount, CancellationToken cancellationToken) => _buffer.EnsureAsync(byteCount); - internal ValueTask Buffer(bool async, Size bufferRequirement, CancellationToken cancellationToken) - => Buffer(async, GetBufferRequirementByteCount(bufferRequirement), cancellationToken); internal ValueTask Buffer(bool async, int byteCount, CancellationToken cancellationToken) { if (async) diff --git a/src/Npgsql/Internal/PgWriter.cs b/src/Npgsql/Internal/PgWriter.cs index 902c15d20b..572197dbd4 100644 --- a/src/Npgsql/Internal/PgWriter.cs +++ b/src/Npgsql/Internal/PgWriter.cs @@ -189,7 +189,9 @@ internal void Commit(int? expectedByteCount = null) internal ValueTask BeginWrite(bool async, ValueMetadata current, CancellationToken cancellationToken) { _current = current; - if (ShouldFlush(current.BufferRequirement)) + + var bufferRequirementByteCount = BufferRequirements.GetMinimumBufferByteCount(current.BufferRequirement, current.Size.GetValueOrDefault()); + if (ShouldFlush(bufferRequirementByteCount)) return Flush(async, cancellationToken); return new(); @@ -456,6 +458,7 @@ async ValueTask Core(bool allowMixedIO, ReadOnlyMemory buffer, Cancellatio } } } + /// /// Gets a that can be used to write to the underlying buffer. /// @@ -464,10 +467,9 @@ async ValueTask Core(bool allowMixedIO, ReadOnlyMemory buffer, Cancellatio public Stream GetStream(bool allowMixedIO = false) => new PgWriterStream(this, allowMixedIO); - public bool ShouldFlush(Size bufferRequirement) - => ShouldFlush(bufferRequirement is { Kind: SizeKind.UpperBound } - ? Math.Min(Current.Size.Value, bufferRequirement.Value) - : bufferRequirement.GetValueOrDefault()); + // We also check pos != offset to speed up simple value writes, as field level buffering was handled by writer.StartWrite() already. + public bool ShouldFlushCurrent() + => !BufferingWrite && _pos != _offset && ShouldFlush(BufferRequirements.GetMinimumBufferByteCount(Current.BufferRequirement, Current.Size.GetValueOrDefault())); public bool ShouldFlush(int byteCount) => Remaining < byteCount && FlushMode is not FlushMode.None; @@ -526,10 +528,10 @@ internal ValueTask BeginNestedWrite(bool async, Size bufferReq { Debug.Assert(bufferRequirement != -1); - // ShouldFlush depends on the current size for upper bound requirements, so we must set it beforehand. + var bufferRequirementByteCount = BufferRequirements.GetMinimumBufferByteCount(bufferRequirement, byteCount); _current = new() { Format = _current.Format, Size = byteCount, BufferRequirement = bufferRequirement, WriteState = state }; - if (ShouldFlush(bufferRequirement)) + if (ShouldFlush(bufferRequirementByteCount)) return Core(async, cancellationToken); return new(new NestedWriteScope()); diff --git a/src/Npgsql/NpgsqlNestedDataReader.cs b/src/Npgsql/NpgsqlNestedDataReader.cs index cda412d1a5..14ec9945a1 100644 --- a/src/Npgsql/NpgsqlNestedDataReader.cs +++ b/src/Npgsql/NpgsqlNestedDataReader.cs @@ -188,7 +188,7 @@ public override long GetBytes(int ordinal, long dataOffset, byte[]? buffer, int if (buffer is null) return columnLen; - using var _ = PgReader.BeginNestedRead(columnLen, Size.Zero); + using var _ = PgReader.BeginNestedRead(columnLen, Size.Unknown); // Move to offset PgReader.Seek((int)dataOffset); diff --git a/test/Npgsql.Tests/ReaderTests.cs b/test/Npgsql.Tests/ReaderTests.cs index 8a4484ef9b..971de29076 100644 --- a/test/Npgsql.Tests/ReaderTests.cs +++ b/test/Npgsql.Tests/ReaderTests.cs @@ -2396,7 +2396,10 @@ public override Size GetSize(SizeContext context, int value, ref object? writeSt => throw new NotSupportedException(); public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) - => CanConvertBufferedDefault(format, out bufferRequirements); + { + bufferRequirements = BufferRequirements.Value; + return format is DataFormat.Binary; + } protected override void WriteCore(PgWriter writer, int value) => throw new NotSupportedException(); From 53b6ad3b4d4e8e2d953b14e3b76495d21d20edc3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 2 Apr 2026 13:01:07 +0300 Subject: [PATCH 735/761] Bump the minor-and-patch group with 5 updates (#6526) Bumps GitHubActionsTestLogger from 3.0.1 to 3.0.2 Bumps OpenTelemetry from 1.15.0 to 1.15.1 Bumps OpenTelemetry.Api from 1.15.0 to 1.15.1 Bumps OpenTelemetry.Exporter.InMemory from 1.15.0 to 1.15.1 Bumps Scriban.Signed from 7.0.3 to 7.0.6 --- Directory.Packages.props | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 8e8ca841b1..a6bb41adef 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -11,7 +11,7 @@ - + @@ -25,7 +25,7 @@ - + @@ -37,10 +37,10 @@ - + - - + + From 3698a477b3f52808e3c4ecf9ba53e3db8c8b6e5c Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Fri, 3 Apr 2026 19:36:55 +0300 Subject: [PATCH 736/761] Add back parameterless overload of ReloadTypesAsync (#6522) --- src/Npgsql/NpgsqlConnection.cs | 9 ++++++++- src/Npgsql/PublicAPI.Shipped.txt | 1 + src/Npgsql/PublicAPI.Unshipped.txt | 2 -- 3 files changed, 9 insertions(+), 3 deletions(-) diff --git a/src/Npgsql/NpgsqlConnection.cs b/src/Npgsql/NpgsqlConnection.cs index 9619e938bd..d4334582e0 100644 --- a/src/Npgsql/NpgsqlConnection.cs +++ b/src/Npgsql/NpgsqlConnection.cs @@ -1752,7 +1752,14 @@ public void ReloadTypes() /// Flushes the type cache for this connection's connection string and reloads the types for this connection only. /// Type changes will appear for other connections only after they are re-opened from the pool. /// - public async Task ReloadTypesAsync(CancellationToken cancellationToken = default) + public Task ReloadTypesAsync() + => ReloadTypesAsync(CancellationToken.None); + + /// + /// Flushes the type cache for this connection's connection string and reloads the types for this connection only. + /// Type changes will appear for other connections only after they are re-opened from the pool. + /// + public async Task ReloadTypesAsync(CancellationToken cancellationToken) { CheckReady(); diff --git a/src/Npgsql/PublicAPI.Shipped.txt b/src/Npgsql/PublicAPI.Shipped.txt index 3ec604ddc0..84bb317e6f 100644 --- a/src/Npgsql/PublicAPI.Shipped.txt +++ b/src/Npgsql/PublicAPI.Shipped.txt @@ -402,6 +402,7 @@ Npgsql.NpgsqlConnection.ProvidePasswordCallback.get -> Npgsql.ProvidePasswordCal Npgsql.NpgsqlConnection.ProvidePasswordCallback.set -> void Npgsql.NpgsqlConnection.ReloadTypes() -> void Npgsql.NpgsqlConnection.ReloadTypesAsync() -> System.Threading.Tasks.Task! +Npgsql.NpgsqlConnection.ReloadTypesAsync(System.Threading.CancellationToken cancellationToken) -> System.Threading.Tasks.Task! Npgsql.NpgsqlConnection.Timezone.get -> string! Npgsql.NpgsqlConnection.TypeMapper.get -> Npgsql.TypeMapping.INpgsqlTypeMapper! Npgsql.NpgsqlConnection.UnprepareAll() -> void diff --git a/src/Npgsql/PublicAPI.Unshipped.txt b/src/Npgsql/PublicAPI.Unshipped.txt index a51b8a3f46..3fb4a49954 100644 --- a/src/Npgsql/PublicAPI.Unshipped.txt +++ b/src/Npgsql/PublicAPI.Unshipped.txt @@ -98,7 +98,6 @@ override Npgsql.NpgsqlDataReader.GetColumnSchemaAsync(System.Threading.Cancellat override Npgsql.NpgsqlMultiHostDataSource.Clear() -> void Npgsql.NpgsqlDataSource.ReloadTypes() -> void Npgsql.NpgsqlDataSource.ReloadTypesAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! -Npgsql.NpgsqlConnection.ReloadTypesAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! *REMOVED*Npgsql.NpgsqlLargeObjectManager *REMOVED*Npgsql.NpgsqlLargeObjectManager.Create(uint preferredOid = 0) -> uint *REMOVED*Npgsql.NpgsqlLargeObjectManager.CreateAsync(uint preferredOid, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! @@ -121,7 +120,6 @@ Npgsql.NpgsqlConnection.ReloadTypesAsync(System.Threading.CancellationToken canc *REMOVED*Npgsql.NpgsqlLargeObjectStream.Has64BitSupport.get -> bool *REMOVED*Npgsql.NpgsqlLargeObjectStream.SeekAsync(long offset, System.IO.SeekOrigin origin, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! *REMOVED*Npgsql.NpgsqlLargeObjectStream.SetLength(long value, System.Threading.CancellationToken cancellationToken) -> System.Threading.Tasks.Task! -*REMOVED*Npgsql.NpgsqlConnection.ReloadTypesAsync() -> System.Threading.Tasks.Task! *REMOVED*Npgsql.NpgsqlDataSourceBuilder.MapComposite(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! *REMOVED*Npgsql.NpgsqlDataSourceBuilder.MapComposite(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! *REMOVED*Npgsql.NpgsqlDataSourceBuilder.MapEnum(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! From 387817f6b8de5908787ac66fdde2bbf70d0f875c Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Sat, 4 Apr 2026 19:49:22 +0200 Subject: [PATCH 737/761] Merge PublicAPI.Unshipped into PublicAPI.Shipped for major release Move all unshipped API entries into shipped files and reset unshipped files to start fresh after the major version release. Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- .../PublicAPI.Shipped.txt | 6 +- .../PublicAPI.Unshipped.txt | 6 +- src/Npgsql.GeoJSON/PublicAPI.Shipped.txt | 6 +- src/Npgsql.GeoJSON/PublicAPI.Unshipped.txt | 4 +- src/Npgsql.Json.NET/PublicAPI.Shipped.txt | 3 +- src/Npgsql.Json.NET/PublicAPI.Unshipped.txt | 3 +- .../PublicAPI.Shipped.txt | 3 +- .../PublicAPI.Unshipped.txt | 3 +- src/Npgsql.NodaTime/PublicAPI.Shipped.txt | 3 +- src/Npgsql.NodaTime/PublicAPI.Unshipped.txt | 3 +- src/Npgsql/PublicAPI.Shipped.txt | 794 ++++++++++-------- src/Npgsql/PublicAPI.Unshipped.txt | 182 +--- 12 files changed, 450 insertions(+), 566 deletions(-) diff --git a/src/Npgsql.DependencyInjection/PublicAPI.Shipped.txt b/src/Npgsql.DependencyInjection/PublicAPI.Shipped.txt index 4066bf5273..86c0b922d4 100644 --- a/src/Npgsql.DependencyInjection/PublicAPI.Shipped.txt +++ b/src/Npgsql.DependencyInjection/PublicAPI.Shipped.txt @@ -1,18 +1,22 @@ -#nullable enable +#nullable enable Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddMultiHostNpgsqlDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Transient, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Singleton, object? serviceKey = null) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddMultiHostNpgsqlDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddMultiHostNpgsqlDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, System.Action! dataSourceBuilderAction, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Transient, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Singleton, object? serviceKey = null) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddMultiHostNpgsqlDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, System.Action! dataSourceBuilderAction, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! +static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddMultiHostNpgsqlDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, System.Action! dataSourceBuilderAction, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Transient, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Singleton, object? serviceKey = null) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddMultiHostNpgsqlSlimDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Transient, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Singleton, object? serviceKey = null) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddMultiHostNpgsqlSlimDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddMultiHostNpgsqlSlimDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, System.Action! dataSourceBuilderAction, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Transient, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Singleton, object? serviceKey = null) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddMultiHostNpgsqlSlimDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, System.Action! dataSourceBuilderAction, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! +static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddMultiHostNpgsqlSlimDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, System.Action! dataSourceBuilderAction, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Transient, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Singleton, object? serviceKey = null) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddNpgsqlDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Transient, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Singleton, object? serviceKey = null) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddNpgsqlDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddNpgsqlDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, System.Action! dataSourceBuilderAction, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Transient, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Singleton, object? serviceKey = null) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddNpgsqlDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, System.Action! dataSourceBuilderAction, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! +static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddNpgsqlDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, System.Action! dataSourceBuilderAction, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Transient, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Singleton, object? serviceKey = null) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddNpgsqlSlimDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Transient, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Singleton, object? serviceKey = null) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddNpgsqlSlimDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddNpgsqlSlimDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, System.Action! dataSourceBuilderAction, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Transient, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Singleton, object? serviceKey = null) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddNpgsqlSlimDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, System.Action! dataSourceBuilderAction, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! +static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddNpgsqlSlimDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, System.Action! dataSourceBuilderAction, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Transient, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Singleton, object? serviceKey = null) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! diff --git a/src/Npgsql.DependencyInjection/PublicAPI.Unshipped.txt b/src/Npgsql.DependencyInjection/PublicAPI.Unshipped.txt index 34f2d889e9..7dc5c58110 100644 --- a/src/Npgsql.DependencyInjection/PublicAPI.Unshipped.txt +++ b/src/Npgsql.DependencyInjection/PublicAPI.Unshipped.txt @@ -1,5 +1 @@ -#nullable enable -static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddMultiHostNpgsqlDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, System.Action! dataSourceBuilderAction, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Transient, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Singleton, object? serviceKey = null) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! -static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddMultiHostNpgsqlSlimDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, System.Action! dataSourceBuilderAction, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Transient, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Singleton, object? serviceKey = null) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! -static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddNpgsqlDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, System.Action! dataSourceBuilderAction, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Transient, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Singleton, object? serviceKey = null) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! -static Microsoft.Extensions.DependencyInjection.NpgsqlServiceCollectionExtensions.AddNpgsqlSlimDataSource(this Microsoft.Extensions.DependencyInjection.IServiceCollection! serviceCollection, string! connectionString, System.Action! dataSourceBuilderAction, Microsoft.Extensions.DependencyInjection.ServiceLifetime connectionLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Transient, Microsoft.Extensions.DependencyInjection.ServiceLifetime dataSourceLifetime = Microsoft.Extensions.DependencyInjection.ServiceLifetime.Singleton, object? serviceKey = null) -> Microsoft.Extensions.DependencyInjection.IServiceCollection! +#nullable enable diff --git a/src/Npgsql.GeoJSON/PublicAPI.Shipped.txt b/src/Npgsql.GeoJSON/PublicAPI.Shipped.txt index 7f92ef111d..2281dba39d 100644 --- a/src/Npgsql.GeoJSON/PublicAPI.Shipped.txt +++ b/src/Npgsql.GeoJSON/PublicAPI.Shipped.txt @@ -1,4 +1,4 @@ -#nullable enable +#nullable enable Npgsql.GeoJSON.CrsMap Npgsql.GeoJSON.CrsMapExtensions Npgsql.GeoJSONOptions @@ -10,4 +10,6 @@ Npgsql.NpgsqlGeoJSONExtensions static Npgsql.GeoJSON.CrsMapExtensions.GetCrsMap(this Npgsql.NpgsqlDataSource! dataSource) -> Npgsql.GeoJSON.CrsMap! static Npgsql.GeoJSON.CrsMapExtensions.GetCrsMapAsync(this Npgsql.NpgsqlDataSource! dataSource) -> System.Threading.Tasks.Task! static Npgsql.NpgsqlGeoJSONExtensions.UseGeoJson(this Npgsql.TypeMapping.INpgsqlTypeMapper! mapper, Npgsql.GeoJSON.CrsMap! crsMap, Npgsql.GeoJSONOptions options = Npgsql.GeoJSONOptions.None, bool geographyAsDefault = false) -> Npgsql.TypeMapping.INpgsqlTypeMapper! -static Npgsql.NpgsqlGeoJSONExtensions.UseGeoJson(this Npgsql.TypeMapping.INpgsqlTypeMapper! mapper, Npgsql.GeoJSONOptions options = Npgsql.GeoJSONOptions.None, bool geographyAsDefault = false) -> Npgsql.TypeMapping.INpgsqlTypeMapper! \ No newline at end of file +static Npgsql.NpgsqlGeoJSONExtensions.UseGeoJson(this Npgsql.TypeMapping.INpgsqlTypeMapper! mapper, Npgsql.GeoJSONOptions options = Npgsql.GeoJSONOptions.None, bool geographyAsDefault = false) -> Npgsql.TypeMapping.INpgsqlTypeMapper! +static Npgsql.NpgsqlGeoJSONExtensions.UseGeoJson(this TMapper mapper, Npgsql.GeoJSON.CrsMap! crsMap, Npgsql.GeoJSONOptions options = Npgsql.GeoJSONOptions.None, bool geographyAsDefault = false) -> TMapper +static Npgsql.NpgsqlGeoJSONExtensions.UseGeoJson(this TMapper mapper, Npgsql.GeoJSONOptions options = Npgsql.GeoJSONOptions.None, bool geographyAsDefault = false) -> TMapper diff --git a/src/Npgsql.GeoJSON/PublicAPI.Unshipped.txt b/src/Npgsql.GeoJSON/PublicAPI.Unshipped.txt index 34de07f0d7..7dc5c58110 100644 --- a/src/Npgsql.GeoJSON/PublicAPI.Unshipped.txt +++ b/src/Npgsql.GeoJSON/PublicAPI.Unshipped.txt @@ -1,3 +1 @@ -#nullable enable -static Npgsql.NpgsqlGeoJSONExtensions.UseGeoJson(this TMapper mapper, Npgsql.GeoJSON.CrsMap! crsMap, Npgsql.GeoJSONOptions options = Npgsql.GeoJSONOptions.None, bool geographyAsDefault = false) -> TMapper -static Npgsql.NpgsqlGeoJSONExtensions.UseGeoJson(this TMapper mapper, Npgsql.GeoJSONOptions options = Npgsql.GeoJSONOptions.None, bool geographyAsDefault = false) -> TMapper +#nullable enable diff --git a/src/Npgsql.Json.NET/PublicAPI.Shipped.txt b/src/Npgsql.Json.NET/PublicAPI.Shipped.txt index 912eb76bcb..f83708dc0e 100644 --- a/src/Npgsql.Json.NET/PublicAPI.Shipped.txt +++ b/src/Npgsql.Json.NET/PublicAPI.Shipped.txt @@ -1,3 +1,4 @@ -#nullable enable +#nullable enable Npgsql.NpgsqlJsonNetExtensions static Npgsql.NpgsqlJsonNetExtensions.UseJsonNet(this Npgsql.TypeMapping.INpgsqlTypeMapper! mapper, Newtonsoft.Json.JsonSerializerSettings? settings = null, System.Type![]? jsonbClrTypes = null, System.Type![]? jsonClrTypes = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! +static Npgsql.NpgsqlJsonNetExtensions.UseJsonNet(this TMapper mapper, Newtonsoft.Json.JsonSerializerSettings? settings = null, System.Type![]? jsonbClrTypes = null, System.Type![]? jsonClrTypes = null) -> TMapper diff --git a/src/Npgsql.Json.NET/PublicAPI.Unshipped.txt b/src/Npgsql.Json.NET/PublicAPI.Unshipped.txt index f4557570e1..7dc5c58110 100644 --- a/src/Npgsql.Json.NET/PublicAPI.Unshipped.txt +++ b/src/Npgsql.Json.NET/PublicAPI.Unshipped.txt @@ -1,2 +1 @@ -#nullable enable -static Npgsql.NpgsqlJsonNetExtensions.UseJsonNet(this TMapper mapper, Newtonsoft.Json.JsonSerializerSettings? settings = null, System.Type![]? jsonbClrTypes = null, System.Type![]? jsonClrTypes = null) -> TMapper +#nullable enable diff --git a/src/Npgsql.NetTopologySuite/PublicAPI.Shipped.txt b/src/Npgsql.NetTopologySuite/PublicAPI.Shipped.txt index a9ca3382e6..d1e505b58e 100644 --- a/src/Npgsql.NetTopologySuite/PublicAPI.Shipped.txt +++ b/src/Npgsql.NetTopologySuite/PublicAPI.Shipped.txt @@ -1,3 +1,4 @@ -#nullable enable +#nullable enable Npgsql.NpgsqlNetTopologySuiteExtensions static Npgsql.NpgsqlNetTopologySuiteExtensions.UseNetTopologySuite(this Npgsql.TypeMapping.INpgsqlTypeMapper! mapper, NetTopologySuite.Geometries.CoordinateSequenceFactory? coordinateSequenceFactory = null, NetTopologySuite.Geometries.PrecisionModel? precisionModel = null, NetTopologySuite.Geometries.Ordinates handleOrdinates = NetTopologySuite.Geometries.Ordinates.None, bool geographyAsDefault = false) -> Npgsql.TypeMapping.INpgsqlTypeMapper! +static Npgsql.NpgsqlNetTopologySuiteExtensions.UseNetTopologySuite(this TMapper mapper, NetTopologySuite.Geometries.CoordinateSequenceFactory? coordinateSequenceFactory = null, NetTopologySuite.Geometries.PrecisionModel? precisionModel = null, NetTopologySuite.Geometries.Ordinates handleOrdinates = NetTopologySuite.Geometries.Ordinates.None, bool geographyAsDefault = false) -> TMapper diff --git a/src/Npgsql.NetTopologySuite/PublicAPI.Unshipped.txt b/src/Npgsql.NetTopologySuite/PublicAPI.Unshipped.txt index ab78bca1af..7dc5c58110 100644 --- a/src/Npgsql.NetTopologySuite/PublicAPI.Unshipped.txt +++ b/src/Npgsql.NetTopologySuite/PublicAPI.Unshipped.txt @@ -1,2 +1 @@ -#nullable enable -static Npgsql.NpgsqlNetTopologySuiteExtensions.UseNetTopologySuite(this TMapper mapper, NetTopologySuite.Geometries.CoordinateSequenceFactory? coordinateSequenceFactory = null, NetTopologySuite.Geometries.PrecisionModel? precisionModel = null, NetTopologySuite.Geometries.Ordinates handleOrdinates = NetTopologySuite.Geometries.Ordinates.None, bool geographyAsDefault = false) -> TMapper +#nullable enable diff --git a/src/Npgsql.NodaTime/PublicAPI.Shipped.txt b/src/Npgsql.NodaTime/PublicAPI.Shipped.txt index 998522184e..3bdfb77065 100644 --- a/src/Npgsql.NodaTime/PublicAPI.Shipped.txt +++ b/src/Npgsql.NodaTime/PublicAPI.Shipped.txt @@ -1,3 +1,4 @@ -#nullable enable +#nullable enable Npgsql.NpgsqlNodaTimeExtensions static Npgsql.NpgsqlNodaTimeExtensions.UseNodaTime(this Npgsql.TypeMapping.INpgsqlTypeMapper! mapper) -> Npgsql.TypeMapping.INpgsqlTypeMapper! +static Npgsql.NpgsqlNodaTimeExtensions.UseNodaTime(this TMapper mapper) -> TMapper diff --git a/src/Npgsql.NodaTime/PublicAPI.Unshipped.txt b/src/Npgsql.NodaTime/PublicAPI.Unshipped.txt index f1ab4e3c0c..7dc5c58110 100644 --- a/src/Npgsql.NodaTime/PublicAPI.Unshipped.txt +++ b/src/Npgsql.NodaTime/PublicAPI.Unshipped.txt @@ -1,2 +1 @@ -#nullable enable -static Npgsql.NpgsqlNodaTimeExtensions.UseNodaTime(this TMapper mapper) -> TMapper +#nullable enable diff --git a/src/Npgsql/PublicAPI.Shipped.txt b/src/Npgsql/PublicAPI.Shipped.txt index 84bb317e6f..6d888f6a3f 100644 --- a/src/Npgsql/PublicAPI.Shipped.txt +++ b/src/Npgsql/PublicAPI.Shipped.txt @@ -1,245 +1,4 @@ -#nullable enable -abstract Npgsql.Replication.PgOutput.Messages.UpdateMessage.NewRow.get -> Npgsql.Replication.PgOutput.ReplicationTuple! -abstract NpgsqlTypes.NpgsqlTsQuery.Equals(NpgsqlTypes.NpgsqlTsQuery? other) -> bool -const Npgsql.NpgsqlConnection.DefaultPort = 5432 -> int -const Npgsql.PostgresErrorCodes.ActiveSqlTransaction = "25001" -> string! -const Npgsql.PostgresErrorCodes.AdminShutdown = "57P01" -> string! -const Npgsql.PostgresErrorCodes.AmbiguousAlias = "42P09" -> string! -const Npgsql.PostgresErrorCodes.AmbiguousColumn = "42702" -> string! -const Npgsql.PostgresErrorCodes.AmbiguousFunction = "42725" -> string! -const Npgsql.PostgresErrorCodes.AmbiguousParameter = "42P08" -> string! -const Npgsql.PostgresErrorCodes.ArraySubscriptError = "2202E" -> string! -const Npgsql.PostgresErrorCodes.AssertFailure = "P0004" -> string! -const Npgsql.PostgresErrorCodes.BadCopyFileFormat = "22P04" -> string! -const Npgsql.PostgresErrorCodes.BranchTransactionAlreadyActive = "25002" -> string! -const Npgsql.PostgresErrorCodes.CannotCoerce = "42846" -> string! -const Npgsql.PostgresErrorCodes.CannotConnectNow = "57P03" -> string! -const Npgsql.PostgresErrorCodes.CantChangeRuntimeParam = "55P02" -> string! -const Npgsql.PostgresErrorCodes.CardinalityViolation = "21000" -> string! -const Npgsql.PostgresErrorCodes.CaseNotFound = "20000" -> string! -const Npgsql.PostgresErrorCodes.CharacterNotInRepertoire = "22021" -> string! -const Npgsql.PostgresErrorCodes.CheckViolation = "23514" -> string! -const Npgsql.PostgresErrorCodes.CollationMismatch = "42P21" -> string! -const Npgsql.PostgresErrorCodes.ConfigFileError = "F0000" -> string! -const Npgsql.PostgresErrorCodes.ConfigurationLimitExceeded = "53400" -> string! -const Npgsql.PostgresErrorCodes.ConnectionDoesNotExist = "08003" -> string! -const Npgsql.PostgresErrorCodes.ConnectionException = "08000" -> string! -const Npgsql.PostgresErrorCodes.ConnectionFailure = "08006" -> string! -const Npgsql.PostgresErrorCodes.ContainingSqlNotPermittedExternalRoutineException = "38001" -> string! -const Npgsql.PostgresErrorCodes.CrashShutdown = "57P02" -> string! -const Npgsql.PostgresErrorCodes.DatabaseDropped = "57P04" -> string! -const Npgsql.PostgresErrorCodes.DataCorrupted = "XX001" -> string! -const Npgsql.PostgresErrorCodes.DataException = "22000" -> string! -const Npgsql.PostgresErrorCodes.DatatypeMismatch = "42804" -> string! -const Npgsql.PostgresErrorCodes.DatetimeFieldOverflow = "22008" -> string! -const Npgsql.PostgresErrorCodes.DeadlockDetected = "40P01" -> string! -const Npgsql.PostgresErrorCodes.DependentObjectsStillExist = "2BP01" -> string! -const Npgsql.PostgresErrorCodes.DependentPrivilegeDescriptorsStillExist = "2B000" -> string! -const Npgsql.PostgresErrorCodes.DeprecatedFeatureWarning = "01P01" -> string! -const Npgsql.PostgresErrorCodes.DiagnosticsException = "0Z000" -> string! -const Npgsql.PostgresErrorCodes.DiskFull = "53100" -> string! -const Npgsql.PostgresErrorCodes.DivisionByZero = "22012" -> string! -const Npgsql.PostgresErrorCodes.DuplicateAlias = "42712" -> string! -const Npgsql.PostgresErrorCodes.DuplicateColumn = "42701" -> string! -const Npgsql.PostgresErrorCodes.DuplicateCursor = "42P03" -> string! -const Npgsql.PostgresErrorCodes.DuplicateDatabase = "42P04" -> string! -const Npgsql.PostgresErrorCodes.DuplicateFile = "58P02" -> string! -const Npgsql.PostgresErrorCodes.DuplicateFunction = "42723" -> string! -const Npgsql.PostgresErrorCodes.DuplicateObject = "42710" -> string! -const Npgsql.PostgresErrorCodes.DuplicatePreparedStatement = "42P05" -> string! -const Npgsql.PostgresErrorCodes.DuplicateSchema = "42P06" -> string! -const Npgsql.PostgresErrorCodes.DuplicateTable = "42P07" -> string! -const Npgsql.PostgresErrorCodes.DynamicResultSetsReturnedWarning = "0100C" -> string! -const Npgsql.PostgresErrorCodes.ErrorInAssignment = "22005" -> string! -const Npgsql.PostgresErrorCodes.EscapeCharacterConflict = "2200B" -> string! -const Npgsql.PostgresErrorCodes.EventTriggerProtocolViolatedExternalRoutineInvocationException = "39P03" -> string! -const Npgsql.PostgresErrorCodes.ExclusionViolation = "23P01" -> string! -const Npgsql.PostgresErrorCodes.ExternalRoutineException = "38000" -> string! -const Npgsql.PostgresErrorCodes.ExternalRoutineInvocationException = "39000" -> string! -const Npgsql.PostgresErrorCodes.FdwColumnNameNotFound = "HV005" -> string! -const Npgsql.PostgresErrorCodes.FdwDynamicParameterValueNeeded = "HV002" -> string! -const Npgsql.PostgresErrorCodes.FdwError = "HV000" -> string! -const Npgsql.PostgresErrorCodes.FdwFunctionSequenceError = "HV010" -> string! -const Npgsql.PostgresErrorCodes.FdwInconsistentDescriptorInformation = "HV021" -> string! -const Npgsql.PostgresErrorCodes.FdwInvalidAttributeValue = "HV024" -> string! -const Npgsql.PostgresErrorCodes.FdwInvalidColumnName = "HV007" -> string! -const Npgsql.PostgresErrorCodes.FdwInvalidColumnNumber = "HV008" -> string! -const Npgsql.PostgresErrorCodes.FdwInvalidDataType = "HV004" -> string! -const Npgsql.PostgresErrorCodes.FdwInvalidDataTypeDescriptors = "HV006" -> string! -const Npgsql.PostgresErrorCodes.FdwInvalidDescriptorFieldIdentifier = "HV091" -> string! -const Npgsql.PostgresErrorCodes.FdwInvalidHandle = "HV00B" -> string! -const Npgsql.PostgresErrorCodes.FdwInvalidOptionIndex = "HV00C" -> string! -const Npgsql.PostgresErrorCodes.FdwInvalidOptionName = "HV00D" -> string! -const Npgsql.PostgresErrorCodes.FdwInvalidStringFormat = "HV00A" -> string! -const Npgsql.PostgresErrorCodes.FdwInvalidStringLengthOrBufferLength = "HV090" -> string! -const Npgsql.PostgresErrorCodes.FdwInvalidUseOfNullPointer = "HV009" -> string! -const Npgsql.PostgresErrorCodes.FdwNoSchemas = "HV00P" -> string! -const Npgsql.PostgresErrorCodes.FdwOptionNameNotFound = "HV00J" -> string! -const Npgsql.PostgresErrorCodes.FdwOutOfMemory = "HV001" -> string! -const Npgsql.PostgresErrorCodes.FdwReplyHandle = "HV00K" -> string! -const Npgsql.PostgresErrorCodes.FdwSchemaNotFound = "HV00Q" -> string! -const Npgsql.PostgresErrorCodes.FdwTableNotFound = "HV00R" -> string! -const Npgsql.PostgresErrorCodes.FdwTooManyHandles = "HV014" -> string! -const Npgsql.PostgresErrorCodes.FdwUnableToCreateExecution = "HV00L" -> string! -const Npgsql.PostgresErrorCodes.FdwUnableToCreateReply = "HV00M" -> string! -const Npgsql.PostgresErrorCodes.FdwUnableToEstablishConnection = "HV00N" -> string! -const Npgsql.PostgresErrorCodes.FeatureNotSupported = "0A000" -> string! -const Npgsql.PostgresErrorCodes.FloatingPointException = "22P01" -> string! -const Npgsql.PostgresErrorCodes.ForeignKeyViolation = "23503" -> string! -const Npgsql.PostgresErrorCodes.FunctionExecutedNoReturnStatementSqlRoutineException = "2F005" -> string! -const Npgsql.PostgresErrorCodes.GroupingError = "42803" -> string! -const Npgsql.PostgresErrorCodes.HeldCursorRequiresSameIsolationLevel = "25008" -> string! -const Npgsql.PostgresErrorCodes.IdleSessionTimeout = "57P05" -> string! -const Npgsql.PostgresErrorCodes.ImplicitZeroBitPaddingWarning = "01008" -> string! -const Npgsql.PostgresErrorCodes.InappropriateAccessModeForBranchTransaction = "25003" -> string! -const Npgsql.PostgresErrorCodes.InappropriateIsolationLevelForBranchTransaction = "25004" -> string! -const Npgsql.PostgresErrorCodes.IndeterminateCollation = "42P22" -> string! -const Npgsql.PostgresErrorCodes.IndeterminateDatatype = "42P18" -> string! -const Npgsql.PostgresErrorCodes.IndexCorrupted = "XX002" -> string! -const Npgsql.PostgresErrorCodes.IndicatorOverflow = "22022" -> string! -const Npgsql.PostgresErrorCodes.InFailedSqlTransaction = "25P02" -> string! -const Npgsql.PostgresErrorCodes.InsufficientPrivilege = "42501" -> string! -const Npgsql.PostgresErrorCodes.InsufficientResources = "53000" -> string! -const Npgsql.PostgresErrorCodes.IntegrityConstraintViolation = "23000" -> string! -const Npgsql.PostgresErrorCodes.InternalError = "XX000" -> string! -const Npgsql.PostgresErrorCodes.IntervalFieldOverflow = "22015" -> string! -const Npgsql.PostgresErrorCodes.InvalidArgumentForLogarithm = "2201E" -> string! -const Npgsql.PostgresErrorCodes.InvalidArgumentForNthValueFunction = "22016" -> string! -const Npgsql.PostgresErrorCodes.InvalidArgumentForNtileFunction = "22014" -> string! -const Npgsql.PostgresErrorCodes.InvalidArgumentForPowerFunction = "2201F" -> string! -const Npgsql.PostgresErrorCodes.InvalidArgumentForWidthBucketFunction = "2201G" -> string! -const Npgsql.PostgresErrorCodes.InvalidAuthorizationSpecification = "28000" -> string! -const Npgsql.PostgresErrorCodes.InvalidBinaryRepresentation = "22P03" -> string! -const Npgsql.PostgresErrorCodes.InvalidCatalogName = "3D000" -> string! -const Npgsql.PostgresErrorCodes.InvalidCharacterValueForCast = "22018" -> string! -const Npgsql.PostgresErrorCodes.InvalidColumnDefinition = "42611" -> string! -const Npgsql.PostgresErrorCodes.InvalidColumnReference = "42P10" -> string! -const Npgsql.PostgresErrorCodes.InvalidCursorDefinition = "42P11" -> string! -const Npgsql.PostgresErrorCodes.InvalidCursorName = "34000" -> string! -const Npgsql.PostgresErrorCodes.InvalidCursorState = "24000" -> string! -const Npgsql.PostgresErrorCodes.InvalidDatabaseDefinition = "42P12" -> string! -const Npgsql.PostgresErrorCodes.InvalidDatetimeFormat = "22007" -> string! -const Npgsql.PostgresErrorCodes.InvalidEscapeCharacter = "22019" -> string! -const Npgsql.PostgresErrorCodes.InvalidEscapeOctet = "2200D" -> string! -const Npgsql.PostgresErrorCodes.InvalidEscapeSequence = "22025" -> string! -const Npgsql.PostgresErrorCodes.InvalidForeignKey = "42830" -> string! -const Npgsql.PostgresErrorCodes.InvalidFunctionDefinition = "42P13" -> string! -const Npgsql.PostgresErrorCodes.InvalidGrantOperation = "0LP01" -> string! -const Npgsql.PostgresErrorCodes.InvalidGrantor = "0L000" -> string! -const Npgsql.PostgresErrorCodes.InvalidIndicatorParameterValue = "22010" -> string! -const Npgsql.PostgresErrorCodes.InvalidLocatorSpecification = "0F001" -> string! -const Npgsql.PostgresErrorCodes.InvalidName = "42602" -> string! -const Npgsql.PostgresErrorCodes.InvalidObjectDefinition = "42P17" -> string! -const Npgsql.PostgresErrorCodes.InvalidParameterValue = "22023" -> string! -const Npgsql.PostgresErrorCodes.InvalidPassword = "28P01" -> string! -const Npgsql.PostgresErrorCodes.InvalidPreparedStatementDefinition = "42P14" -> string! -const Npgsql.PostgresErrorCodes.InvalidRecursion = "42P19" -> string! -const Npgsql.PostgresErrorCodes.InvalidRegularExpression = "2201B" -> string! -const Npgsql.PostgresErrorCodes.InvalidRoleSpecification = "0P000" -> string! -const Npgsql.PostgresErrorCodes.InvalidRowCountInLimitClause = "2201W" -> string! -const Npgsql.PostgresErrorCodes.InvalidRowCountInResultOffsetClause = "2201X" -> string! -const Npgsql.PostgresErrorCodes.InvalidSavepointSpecification = "3B001" -> string! -const Npgsql.PostgresErrorCodes.InvalidSchemaDefinition = "42P15" -> string! -const Npgsql.PostgresErrorCodes.InvalidSchemaName = "3F000" -> string! -const Npgsql.PostgresErrorCodes.InvalidSqlStatementName = "26000" -> string! -const Npgsql.PostgresErrorCodes.InvalidSqlstateReturnedExternalRoutineInvocationException = "39001" -> string! -const Npgsql.PostgresErrorCodes.InvalidTableDefinition = "42P16" -> string! -const Npgsql.PostgresErrorCodes.InvalidTablesampleArgument = "2202H" -> string! -const Npgsql.PostgresErrorCodes.InvalidTablesampleRepeat = "2202G" -> string! -const Npgsql.PostgresErrorCodes.InvalidTextRepresentation = "22P02" -> string! -const Npgsql.PostgresErrorCodes.InvalidTimeZoneDisplacementValue = "22009" -> string! -const Npgsql.PostgresErrorCodes.InvalidTransactionInitiation = "0B000" -> string! -const Npgsql.PostgresErrorCodes.InvalidTransactionState = "25000" -> string! -const Npgsql.PostgresErrorCodes.InvalidTransactionTermination = "2D000" -> string! -const Npgsql.PostgresErrorCodes.InvalidUseOfEscapeCharacter = "2200C" -> string! -const Npgsql.PostgresErrorCodes.InvalidXmlComment = "2200S" -> string! -const Npgsql.PostgresErrorCodes.InvalidXmlContent = "2200N" -> string! -const Npgsql.PostgresErrorCodes.InvalidXmlDocument = "2200M" -> string! -const Npgsql.PostgresErrorCodes.InvalidXmlProcessingInstruction = "2200T" -> string! -const Npgsql.PostgresErrorCodes.IoError = "58030" -> string! -const Npgsql.PostgresErrorCodes.LocatorException = "0F000" -> string! -const Npgsql.PostgresErrorCodes.LockFileExists = "F0001" -> string! -const Npgsql.PostgresErrorCodes.LockNotAvailable = "55P03" -> string! -const Npgsql.PostgresErrorCodes.ModifyingSqlDataNotPermittedExternalRoutineException = "38002" -> string! -const Npgsql.PostgresErrorCodes.ModifyingSqlDataNotPermittedSqlRoutineException = "2F002" -> string! -const Npgsql.PostgresErrorCodes.MostSpecificTypeMismatch = "2200G" -> string! -const Npgsql.PostgresErrorCodes.NameTooLong = "42622" -> string! -const Npgsql.PostgresErrorCodes.NoActiveSqlTransaction = "25P01" -> string! -const Npgsql.PostgresErrorCodes.NoActiveSqlTransactionForBranchTransaction = "25005" -> string! -const Npgsql.PostgresErrorCodes.NoAdditionalDynamicResultSetsReturned = "02001" -> string! -const Npgsql.PostgresErrorCodes.NoData = "02000" -> string! -const Npgsql.PostgresErrorCodes.NoDataFound = "P0002" -> string! -const Npgsql.PostgresErrorCodes.NonstandardUseOfEscapeCharacter = "22P06" -> string! -const Npgsql.PostgresErrorCodes.NotAnXmlDocument = "2200L" -> string! -const Npgsql.PostgresErrorCodes.NotNullViolation = "23502" -> string! -const Npgsql.PostgresErrorCodes.NullValueEliminatedInSetFunctionWarning = "01003" -> string! -const Npgsql.PostgresErrorCodes.NullValueNoIndicatorParameter = "22002" -> string! -const Npgsql.PostgresErrorCodes.NullValueNotAllowed = "22004" -> string! -const Npgsql.PostgresErrorCodes.NullValueNotAllowedExternalRoutineInvocationException = "39004" -> string! -const Npgsql.PostgresErrorCodes.NumericValueOutOfRange = "22003" -> string! -const Npgsql.PostgresErrorCodes.ObjectInUse = "55006" -> string! -const Npgsql.PostgresErrorCodes.ObjectNotInPrerequisiteState = "55000" -> string! -const Npgsql.PostgresErrorCodes.OperatorIntervention = "57000" -> string! -const Npgsql.PostgresErrorCodes.OutOfMemory = "53200" -> string! -const Npgsql.PostgresErrorCodes.PlpgsqlError = "P0000" -> string! -const Npgsql.PostgresErrorCodes.PrivilegeNotGrantedWarning = "01007" -> string! -const Npgsql.PostgresErrorCodes.PrivilegeNotRevokedWarning = "01006" -> string! -const Npgsql.PostgresErrorCodes.ProgramLimitExceeded = "54000" -> string! -const Npgsql.PostgresErrorCodes.ProhibitedSqlStatementAttemptedExternalRoutineException = "38003" -> string! -const Npgsql.PostgresErrorCodes.ProhibitedSqlStatementAttemptedSqlRoutineException = "2F003" -> string! -const Npgsql.PostgresErrorCodes.ProtocolViolation = "08P01" -> string! -const Npgsql.PostgresErrorCodes.QueryCanceled = "57014" -> string! -const Npgsql.PostgresErrorCodes.RaiseException = "P0001" -> string! -const Npgsql.PostgresErrorCodes.ReadingSqlDataNotPermittedExternalRoutineException = "38004" -> string! -const Npgsql.PostgresErrorCodes.ReadingSqlDataNotPermittedSqlRoutineException = "2F004" -> string! -const Npgsql.PostgresErrorCodes.ReadOnlySqlTransaction = "25006" -> string! -const Npgsql.PostgresErrorCodes.ReservedName = "42939" -> string! -const Npgsql.PostgresErrorCodes.RestrictViolation = "23001" -> string! -const Npgsql.PostgresErrorCodes.SavepointException = "3B000" -> string! -const Npgsql.PostgresErrorCodes.SchemaAndDataStatementMixingNotSupported = "25007" -> string! -const Npgsql.PostgresErrorCodes.SerializationFailure = "40001" -> string! -const Npgsql.PostgresErrorCodes.SnapshotFailure = "72000" -> string! -const Npgsql.PostgresErrorCodes.SqlClientUnableToEstablishSqlConnection = "08001" -> string! -const Npgsql.PostgresErrorCodes.SqlRoutineException = "2F000" -> string! -const Npgsql.PostgresErrorCodes.SqlServerRejectedEstablishmentOfSqlConnection = "08004" -> string! -const Npgsql.PostgresErrorCodes.SqlStatementNotYetComplete = "03000" -> string! -const Npgsql.PostgresErrorCodes.SrfProtocolViolatedExternalRoutineInvocationException = "39P02" -> string! -const Npgsql.PostgresErrorCodes.StackedDiagnosticsAccessedWithoutActiveHandler = "0Z002" -> string! -const Npgsql.PostgresErrorCodes.StatementCompletionUnknown = "40003" -> string! -const Npgsql.PostgresErrorCodes.StatementTooComplex = "54001" -> string! -const Npgsql.PostgresErrorCodes.StringDataLengthMismatch = "22026" -> string! -const Npgsql.PostgresErrorCodes.StringDataRightTruncation = "22001" -> string! -const Npgsql.PostgresErrorCodes.StringDataRightTruncationWarning = "01004" -> string! -const Npgsql.PostgresErrorCodes.SubstringError = "22011" -> string! -const Npgsql.PostgresErrorCodes.SuccessfulCompletion = "00000" -> string! -const Npgsql.PostgresErrorCodes.SyntaxError = "42601" -> string! -const Npgsql.PostgresErrorCodes.SyntaxErrorOrAccessRuleViolation = "42000" -> string! -const Npgsql.PostgresErrorCodes.SystemError = "58000" -> string! -const Npgsql.PostgresErrorCodes.TooManyArguments = "54023" -> string! -const Npgsql.PostgresErrorCodes.TooManyColumns = "54011" -> string! -const Npgsql.PostgresErrorCodes.TooManyConnections = "53300" -> string! -const Npgsql.PostgresErrorCodes.TooManyRows = "P0003" -> string! -const Npgsql.PostgresErrorCodes.TransactionIntegrityConstraintViolation = "40002" -> string! -const Npgsql.PostgresErrorCodes.TransactionResolutionUnknown = "08007" -> string! -const Npgsql.PostgresErrorCodes.TransactionRollback = "40000" -> string! -const Npgsql.PostgresErrorCodes.TriggeredActionException = "09000" -> string! -const Npgsql.PostgresErrorCodes.TriggeredDataChangeViolation = "27000" -> string! -const Npgsql.PostgresErrorCodes.TriggerProtocolViolatedExternalRoutineInvocationException = "39P01" -> string! -const Npgsql.PostgresErrorCodes.TrimError = "22027" -> string! -const Npgsql.PostgresErrorCodes.UndefinedColumn = "42703" -> string! -const Npgsql.PostgresErrorCodes.UndefinedFile = "58P01" -> string! -const Npgsql.PostgresErrorCodes.UndefinedFunction = "42883" -> string! -const Npgsql.PostgresErrorCodes.UndefinedObject = "42704" -> string! -const Npgsql.PostgresErrorCodes.UndefinedParameter = "42P02" -> string! -const Npgsql.PostgresErrorCodes.UndefinedTable = "42P01" -> string! -const Npgsql.PostgresErrorCodes.UniqueViolation = "23505" -> string! -const Npgsql.PostgresErrorCodes.UnterminatedCString = "22024" -> string! -const Npgsql.PostgresErrorCodes.UntranslatableCharacter = "22P05" -> string! -const Npgsql.PostgresErrorCodes.Warning = "01000" -> string! -const Npgsql.PostgresErrorCodes.WindowingError = "42P20" -> string! -const Npgsql.PostgresErrorCodes.WithCheckOptionViolation = "44000" -> string! -const Npgsql.PostgresErrorCodes.WrongObjectType = "42809" -> string! -const Npgsql.PostgresErrorCodes.ZeroLengthCharacterString = "2200F" -> string! +#nullable enable Npgsql.ArrayNullabilityMode Npgsql.ArrayNullabilityMode.Always = 1 -> Npgsql.ArrayNullabilityMode Npgsql.ArrayNullabilityMode.Never = 0 -> Npgsql.ArrayNullabilityMode @@ -253,6 +12,10 @@ Npgsql.ChannelBinding Npgsql.ChannelBinding.Disable = 0 -> Npgsql.ChannelBinding Npgsql.ChannelBinding.Prefer = 1 -> Npgsql.ChannelBinding Npgsql.ChannelBinding.Require = 2 -> Npgsql.ChannelBinding +Npgsql.GssEncryptionMode +Npgsql.GssEncryptionMode.Disable = 0 -> Npgsql.GssEncryptionMode +Npgsql.GssEncryptionMode.Prefer = 1 -> Npgsql.GssEncryptionMode +Npgsql.GssEncryptionMode.Require = 2 -> Npgsql.GssEncryptionMode Npgsql.INpgsqlNameTranslator Npgsql.INpgsqlNameTranslator.TranslateMemberName(string! clrName) -> string! Npgsql.INpgsqlNameTranslator.TranslateTypeName(string! clrName) -> string! @@ -261,8 +24,8 @@ Npgsql.NameTranslation.NpgsqlNullNameTranslator.NpgsqlNullNameTranslator() -> vo Npgsql.NameTranslation.NpgsqlNullNameTranslator.TranslateMemberName(string! clrName) -> string! Npgsql.NameTranslation.NpgsqlNullNameTranslator.TranslateTypeName(string! clrName) -> string! Npgsql.NameTranslation.NpgsqlSnakeCaseNameTranslator -Npgsql.NameTranslation.NpgsqlSnakeCaseNameTranslator.NpgsqlSnakeCaseNameTranslator(bool legacyMode, System.Globalization.CultureInfo? culture = null) -> void Npgsql.NameTranslation.NpgsqlSnakeCaseNameTranslator.NpgsqlSnakeCaseNameTranslator(System.Globalization.CultureInfo? culture = null) -> void +Npgsql.NameTranslation.NpgsqlSnakeCaseNameTranslator.NpgsqlSnakeCaseNameTranslator(bool legacyMode, System.Globalization.CultureInfo? culture = null) -> void Npgsql.NameTranslation.NpgsqlSnakeCaseNameTranslator.TranslateMemberName(string! clrName) -> string! Npgsql.NameTranslation.NpgsqlSnakeCaseNameTranslator.TranslateTypeName(string! clrName) -> string! Npgsql.NoticeEventHandler @@ -327,8 +90,8 @@ Npgsql.NpgsqlBinaryImporter.Write(T value) -> void Npgsql.NpgsqlBinaryImporter.Write(T value, NpgsqlTypes.NpgsqlDbType npgsqlDbType) -> void Npgsql.NpgsqlBinaryImporter.Write(T value, string! dataTypeName) -> void Npgsql.NpgsqlBinaryImporter.WriteAsync(T value, NpgsqlTypes.NpgsqlDbType npgsqlDbType, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! -Npgsql.NpgsqlBinaryImporter.WriteAsync(T value, string! dataTypeName, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! Npgsql.NpgsqlBinaryImporter.WriteAsync(T value, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! +Npgsql.NpgsqlBinaryImporter.WriteAsync(T value, string! dataTypeName, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! Npgsql.NpgsqlBinaryImporter.WriteNull() -> void Npgsql.NpgsqlBinaryImporter.WriteNullAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! Npgsql.NpgsqlBinaryImporter.WriteRow(params object?[]! values) -> void @@ -372,15 +135,16 @@ Npgsql.NpgsqlConnection.BeginBinaryImport(string! copyFromCommand) -> Npgsql.Npg Npgsql.NpgsqlConnection.BeginBinaryImportAsync(string! copyFromCommand, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! Npgsql.NpgsqlConnection.BeginRawBinaryCopy(string! copyCommand) -> Npgsql.NpgsqlRawCopyStream! Npgsql.NpgsqlConnection.BeginRawBinaryCopyAsync(string! copyCommand, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! -Npgsql.NpgsqlConnection.BeginTextExport(string! copyToCommand) -> System.IO.TextReader! -Npgsql.NpgsqlConnection.BeginTextExportAsync(string! copyToCommand, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! -Npgsql.NpgsqlConnection.BeginTextImport(string! copyFromCommand) -> System.IO.TextWriter! -Npgsql.NpgsqlConnection.BeginTextImportAsync(string! copyFromCommand, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! +Npgsql.NpgsqlConnection.BeginTextExport(string! copyToCommand) -> Npgsql.NpgsqlCopyTextReader! +Npgsql.NpgsqlConnection.BeginTextExportAsync(string! copyToCommand, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! +Npgsql.NpgsqlConnection.BeginTextImport(string! copyFromCommand) -> Npgsql.NpgsqlCopyTextWriter! +Npgsql.NpgsqlConnection.BeginTextImportAsync(string! copyFromCommand, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! Npgsql.NpgsqlConnection.BeginTransaction() -> Npgsql.NpgsqlTransaction! Npgsql.NpgsqlConnection.BeginTransaction(System.Data.IsolationLevel level) -> Npgsql.NpgsqlTransaction! Npgsql.NpgsqlConnection.BeginTransactionAsync(System.Data.IsolationLevel level, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.ValueTask Npgsql.NpgsqlConnection.BeginTransactionAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.ValueTask Npgsql.NpgsqlConnection.CloneWith(string! connectionString) -> Npgsql.NpgsqlConnection! +Npgsql.NpgsqlConnection.CloneWithAsync(string! connectionString, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.ValueTask Npgsql.NpgsqlConnection.CommandTimeout.get -> int Npgsql.NpgsqlConnection.CreateBatch() -> Npgsql.NpgsqlBatch! Npgsql.NpgsqlConnection.CreateCommand() -> Npgsql.NpgsqlCommand! @@ -393,8 +157,8 @@ Npgsql.NpgsqlConnection.Notification -> Npgsql.NotificationEventHandler? Npgsql.NpgsqlConnection.NpgsqlConnection() -> void Npgsql.NpgsqlConnection.NpgsqlConnection(string? connectionString) -> void Npgsql.NpgsqlConnection.Port.get -> int -Npgsql.NpgsqlConnection.PostgresParameters.get -> System.Collections.Generic.IReadOnlyDictionary! Npgsql.NpgsqlConnection.PostgreSqlVersion.get -> System.Version! +Npgsql.NpgsqlConnection.PostgresParameters.get -> System.Collections.Generic.IReadOnlyDictionary! Npgsql.NpgsqlConnection.ProcessID.get -> int Npgsql.NpgsqlConnection.ProvideClientCertificatesCallback.get -> Npgsql.ProvideClientCertificatesCallback? Npgsql.NpgsqlConnection.ProvideClientCertificatesCallback.set -> void @@ -403,6 +167,8 @@ Npgsql.NpgsqlConnection.ProvidePasswordCallback.set -> void Npgsql.NpgsqlConnection.ReloadTypes() -> void Npgsql.NpgsqlConnection.ReloadTypesAsync() -> System.Threading.Tasks.Task! Npgsql.NpgsqlConnection.ReloadTypesAsync(System.Threading.CancellationToken cancellationToken) -> System.Threading.Tasks.Task! +Npgsql.NpgsqlConnection.SslClientAuthenticationOptionsCallback.get -> System.Action? +Npgsql.NpgsqlConnection.SslClientAuthenticationOptionsCallback.set -> void Npgsql.NpgsqlConnection.Timezone.get -> string! Npgsql.NpgsqlConnection.TypeMapper.get -> Npgsql.TypeMapping.INpgsqlTypeMapper! Npgsql.NpgsqlConnection.UnprepareAll() -> void @@ -410,11 +176,11 @@ Npgsql.NpgsqlConnection.UserCertificateValidationCallback.get -> System.Net.Secu Npgsql.NpgsqlConnection.UserCertificateValidationCallback.set -> void Npgsql.NpgsqlConnection.UserName.get -> string? Npgsql.NpgsqlConnection.Wait() -> void -Npgsql.NpgsqlConnection.Wait(int timeout) -> bool Npgsql.NpgsqlConnection.Wait(System.TimeSpan timeout) -> bool -Npgsql.NpgsqlConnection.WaitAsync(int timeout, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! +Npgsql.NpgsqlConnection.Wait(int timeout) -> bool Npgsql.NpgsqlConnection.WaitAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! Npgsql.NpgsqlConnection.WaitAsync(System.TimeSpan timeout, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! +Npgsql.NpgsqlConnection.WaitAsync(int timeout, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! Npgsql.NpgsqlConnectionStringBuilder Npgsql.NpgsqlConnectionStringBuilder.Add(System.Collections.Generic.KeyValuePair item) -> void Npgsql.NpgsqlConnectionStringBuilder.ApplicationName.get -> string? @@ -448,12 +214,16 @@ Npgsql.NpgsqlConnectionStringBuilder.Encoding.set -> void Npgsql.NpgsqlConnectionStringBuilder.Enlist.get -> bool Npgsql.NpgsqlConnectionStringBuilder.Enlist.set -> void Npgsql.NpgsqlConnectionStringBuilder.GetEnumerator() -> System.Collections.Generic.IEnumerator>! +Npgsql.NpgsqlConnectionStringBuilder.GssEncryptionMode.get -> Npgsql.GssEncryptionMode +Npgsql.NpgsqlConnectionStringBuilder.GssEncryptionMode.set -> void Npgsql.NpgsqlConnectionStringBuilder.Host.get -> string? Npgsql.NpgsqlConnectionStringBuilder.Host.set -> void Npgsql.NpgsqlConnectionStringBuilder.HostRecheckSeconds.get -> int Npgsql.NpgsqlConnectionStringBuilder.HostRecheckSeconds.set -> void Npgsql.NpgsqlConnectionStringBuilder.IncludeErrorDetail.get -> bool Npgsql.NpgsqlConnectionStringBuilder.IncludeErrorDetail.set -> void +Npgsql.NpgsqlConnectionStringBuilder.IncludeFailedBatchedCommand.get -> bool +Npgsql.NpgsqlConnectionStringBuilder.IncludeFailedBatchedCommand.set -> void Npgsql.NpgsqlConnectionStringBuilder.IncludeRealm.get -> bool Npgsql.NpgsqlConnectionStringBuilder.IncludeRealm.set -> void Npgsql.NpgsqlConnectionStringBuilder.InternalCommandTimeout.get -> int @@ -475,8 +245,6 @@ Npgsql.NpgsqlConnectionStringBuilder.MaxPoolSize.get -> int Npgsql.NpgsqlConnectionStringBuilder.MaxPoolSize.set -> void Npgsql.NpgsqlConnectionStringBuilder.MinPoolSize.get -> int Npgsql.NpgsqlConnectionStringBuilder.MinPoolSize.set -> void -Npgsql.NpgsqlConnectionStringBuilder.Multiplexing.get -> bool -Npgsql.NpgsqlConnectionStringBuilder.Multiplexing.set -> void Npgsql.NpgsqlConnectionStringBuilder.NoResetOnClose.get -> bool Npgsql.NpgsqlConnectionStringBuilder.NoResetOnClose.set -> void Npgsql.NpgsqlConnectionStringBuilder.NpgsqlConnectionStringBuilder() -> void @@ -497,6 +265,8 @@ Npgsql.NpgsqlConnectionStringBuilder.Port.set -> void Npgsql.NpgsqlConnectionStringBuilder.ReadBufferSize.get -> int Npgsql.NpgsqlConnectionStringBuilder.ReadBufferSize.set -> void Npgsql.NpgsqlConnectionStringBuilder.Remove(System.Collections.Generic.KeyValuePair item) -> bool +Npgsql.NpgsqlConnectionStringBuilder.RequireAuth.get -> string? +Npgsql.NpgsqlConnectionStringBuilder.RequireAuth.set -> void Npgsql.NpgsqlConnectionStringBuilder.RootCertificate.get -> string? Npgsql.NpgsqlConnectionStringBuilder.RootCertificate.set -> void Npgsql.NpgsqlConnectionStringBuilder.SearchPath.get -> string? @@ -513,6 +283,8 @@ Npgsql.NpgsqlConnectionStringBuilder.SslKey.get -> string? Npgsql.NpgsqlConnectionStringBuilder.SslKey.set -> void Npgsql.NpgsqlConnectionStringBuilder.SslMode.get -> Npgsql.SslMode Npgsql.NpgsqlConnectionStringBuilder.SslMode.set -> void +Npgsql.NpgsqlConnectionStringBuilder.SslNegotiation.get -> Npgsql.SslNegotiation +Npgsql.NpgsqlConnectionStringBuilder.SslNegotiation.set -> void Npgsql.NpgsqlConnectionStringBuilder.SslPassword.get -> string? Npgsql.NpgsqlConnectionStringBuilder.SslPassword.set -> void Npgsql.NpgsqlConnectionStringBuilder.TargetSessionAttributes.get -> string? @@ -534,15 +306,17 @@ Npgsql.NpgsqlConnectionStringBuilder.Username.set -> void Npgsql.NpgsqlConnectionStringBuilder.Values.get -> System.Collections.Generic.ICollection! Npgsql.NpgsqlConnectionStringBuilder.WriteBufferSize.get -> int Npgsql.NpgsqlConnectionStringBuilder.WriteBufferSize.set -> void -Npgsql.NpgsqlConnectionStringBuilder.WriteCoalescingBufferThresholdBytes.get -> int -Npgsql.NpgsqlConnectionStringBuilder.WriteCoalescingBufferThresholdBytes.set -> void Npgsql.NpgsqlCopyTextReader Npgsql.NpgsqlCopyTextReader.Cancel() -> void Npgsql.NpgsqlCopyTextReader.CancelAsync() -> System.Threading.Tasks.Task! Npgsql.NpgsqlCopyTextReader.DisposeAsync() -> System.Threading.Tasks.ValueTask +Npgsql.NpgsqlCopyTextReader.Timeout.get -> int +Npgsql.NpgsqlCopyTextReader.Timeout.set -> void Npgsql.NpgsqlCopyTextWriter Npgsql.NpgsqlCopyTextWriter.Cancel() -> void Npgsql.NpgsqlCopyTextWriter.CancelAsync() -> System.Threading.Tasks.Task! +Npgsql.NpgsqlCopyTextWriter.Timeout.get -> int +Npgsql.NpgsqlCopyTextWriter.Timeout.set -> void Npgsql.NpgsqlDataAdapter Npgsql.NpgsqlDataAdapter.DeleteCommand.get -> Npgsql.NpgsqlCommand? Npgsql.NpgsqlDataAdapter.DeleteCommand.set -> void @@ -560,7 +334,6 @@ Npgsql.NpgsqlDataAdapter.UpdateCommand.get -> Npgsql.NpgsqlCommand? Npgsql.NpgsqlDataAdapter.UpdateCommand.set -> void Npgsql.NpgsqlDataReader Npgsql.NpgsqlDataReader.GetColumnSchema() -> System.Collections.ObjectModel.ReadOnlyCollection! -Npgsql.NpgsqlDataReader.GetColumnSchemaAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task!>! Npgsql.NpgsqlDataReader.GetData(int ordinal) -> Npgsql.NpgsqlNestedDataReader! Npgsql.NpgsqlDataReader.GetDataTypeOID(int ordinal) -> uint Npgsql.NpgsqlDataReader.GetPostgresType(int ordinal) -> Npgsql.PostgresTypes.PostgresType! @@ -578,11 +351,15 @@ Npgsql.NpgsqlDataSource.CreateConnection() -> Npgsql.NpgsqlConnection! Npgsql.NpgsqlDataSource.OpenConnection() -> Npgsql.NpgsqlConnection! Npgsql.NpgsqlDataSource.OpenConnectionAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.ValueTask Npgsql.NpgsqlDataSource.Password.set -> void +Npgsql.NpgsqlDataSource.ReloadTypes() -> void +Npgsql.NpgsqlDataSource.ReloadTypesAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! Npgsql.NpgsqlDataSourceBuilder Npgsql.NpgsqlDataSourceBuilder.AddTypeInfoResolverFactory(Npgsql.Internal.PgTypeInfoResolverFactory! factory) -> void Npgsql.NpgsqlDataSourceBuilder.Build() -> Npgsql.NpgsqlDataSource! Npgsql.NpgsqlDataSourceBuilder.BuildMultiHost() -> Npgsql.NpgsqlMultiHostDataSource! Npgsql.NpgsqlDataSourceBuilder.ConfigureJsonOptions(System.Text.Json.JsonSerializerOptions! serializerOptions) -> Npgsql.NpgsqlDataSourceBuilder! +Npgsql.NpgsqlDataSourceBuilder.ConfigureTracing(System.Action! configureAction) -> Npgsql.NpgsqlDataSourceBuilder! +Npgsql.NpgsqlDataSourceBuilder.ConfigureTypeLoading(System.Action! configureAction) -> Npgsql.NpgsqlDataSourceBuilder! Npgsql.NpgsqlDataSourceBuilder.ConnectionString.get -> string! Npgsql.NpgsqlDataSourceBuilder.ConnectionStringBuilder.get -> Npgsql.NpgsqlConnectionStringBuilder! Npgsql.NpgsqlDataSourceBuilder.DefaultNameTranslator.get -> Npgsql.INpgsqlNameTranslator! @@ -591,10 +368,10 @@ Npgsql.NpgsqlDataSourceBuilder.EnableDynamicJson(System.Type![]? jsonbClrTypes = Npgsql.NpgsqlDataSourceBuilder.EnableParameterLogging(bool parameterLoggingEnabled = true) -> Npgsql.NpgsqlDataSourceBuilder! Npgsql.NpgsqlDataSourceBuilder.EnableRecordsAsTuples() -> Npgsql.NpgsqlDataSourceBuilder! Npgsql.NpgsqlDataSourceBuilder.EnableUnmappedTypes() -> Npgsql.NpgsqlDataSourceBuilder! -Npgsql.NpgsqlDataSourceBuilder.MapComposite(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! -Npgsql.NpgsqlDataSourceBuilder.MapComposite(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! -Npgsql.NpgsqlDataSourceBuilder.MapEnum(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! -Npgsql.NpgsqlDataSourceBuilder.MapEnum(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! +Npgsql.NpgsqlDataSourceBuilder.MapComposite(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.NpgsqlDataSourceBuilder! +Npgsql.NpgsqlDataSourceBuilder.MapComposite(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.NpgsqlDataSourceBuilder! +Npgsql.NpgsqlDataSourceBuilder.MapEnum(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.NpgsqlDataSourceBuilder! +Npgsql.NpgsqlDataSourceBuilder.MapEnum(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.NpgsqlDataSourceBuilder! Npgsql.NpgsqlDataSourceBuilder.Name.get -> string? Npgsql.NpgsqlDataSourceBuilder.Name.set -> void Npgsql.NpgsqlDataSourceBuilder.NpgsqlDataSourceBuilder(string? connectionString = null) -> void @@ -606,44 +383,28 @@ Npgsql.NpgsqlDataSourceBuilder.UseClientCertificate(System.Security.Cryptography Npgsql.NpgsqlDataSourceBuilder.UseClientCertificates(System.Security.Cryptography.X509Certificates.X509CertificateCollection? clientCertificates) -> Npgsql.NpgsqlDataSourceBuilder! Npgsql.NpgsqlDataSourceBuilder.UseClientCertificatesCallback(System.Action? clientCertificatesCallback) -> Npgsql.NpgsqlDataSourceBuilder! Npgsql.NpgsqlDataSourceBuilder.UseLoggerFactory(Microsoft.Extensions.Logging.ILoggerFactory? loggerFactory) -> Npgsql.NpgsqlDataSourceBuilder! +Npgsql.NpgsqlDataSourceBuilder.UseNegotiateOptionsCallback(System.Action? negotiateOptionsCallback) -> Npgsql.NpgsqlDataSourceBuilder! Npgsql.NpgsqlDataSourceBuilder.UsePasswordProvider(System.Func? passwordProvider, System.Func>? passwordProviderAsync) -> Npgsql.NpgsqlDataSourceBuilder! Npgsql.NpgsqlDataSourceBuilder.UsePeriodicPasswordProvider(System.Func>? passwordProvider, System.TimeSpan successRefreshInterval, System.TimeSpan failureRefreshInterval) -> Npgsql.NpgsqlDataSourceBuilder! Npgsql.NpgsqlDataSourceBuilder.UsePhysicalConnectionInitializer(System.Action? connectionInitializer, System.Func? connectionInitializerAsync) -> Npgsql.NpgsqlDataSourceBuilder! Npgsql.NpgsqlDataSourceBuilder.UseRootCertificate(System.Security.Cryptography.X509Certificates.X509Certificate2? rootCertificate) -> Npgsql.NpgsqlDataSourceBuilder! Npgsql.NpgsqlDataSourceBuilder.UseRootCertificateCallback(System.Func? rootCertificateCallback) -> Npgsql.NpgsqlDataSourceBuilder! +Npgsql.NpgsqlDataSourceBuilder.UseRootCertificates(System.Security.Cryptography.X509Certificates.X509Certificate2Collection? rootCertificates) -> Npgsql.NpgsqlDataSourceBuilder! +Npgsql.NpgsqlDataSourceBuilder.UseRootCertificatesCallback(System.Func? rootCertificateCallback) -> Npgsql.NpgsqlDataSourceBuilder! +Npgsql.NpgsqlDataSourceBuilder.UseSslClientAuthenticationOptionsCallback(System.Action? sslClientAuthenticationOptionsCallback) -> Npgsql.NpgsqlDataSourceBuilder! Npgsql.NpgsqlDataSourceBuilder.UseUserCertificateValidationCallback(System.Net.Security.RemoteCertificateValidationCallback! userCertificateValidationCallback) -> Npgsql.NpgsqlDataSourceBuilder! Npgsql.NpgsqlException Npgsql.NpgsqlException.BatchCommand.get -> Npgsql.NpgsqlBatchCommand? Npgsql.NpgsqlException.BatchCommand.set -> void Npgsql.NpgsqlException.NpgsqlException() -> void +Npgsql.NpgsqlException.NpgsqlException(System.Runtime.Serialization.SerializationInfo! info, System.Runtime.Serialization.StreamingContext context) -> void Npgsql.NpgsqlException.NpgsqlException(string? message) -> void Npgsql.NpgsqlException.NpgsqlException(string? message, System.Exception? innerException) -> void -Npgsql.NpgsqlException.NpgsqlException(System.Runtime.Serialization.SerializationInfo! info, System.Runtime.Serialization.StreamingContext context) -> void Npgsql.NpgsqlFactory Npgsql.NpgsqlFactory.GetService(System.Type! serviceType) -> object? -Npgsql.NpgsqlLargeObjectManager -Npgsql.NpgsqlLargeObjectManager.Create(uint preferredOid = 0) -> uint -Npgsql.NpgsqlLargeObjectManager.CreateAsync(uint preferredOid, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! -Npgsql.NpgsqlLargeObjectManager.ExportRemote(uint oid, string! path) -> void -Npgsql.NpgsqlLargeObjectManager.ExportRemoteAsync(uint oid, string! path, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! -Npgsql.NpgsqlLargeObjectManager.Has64BitSupport.get -> bool -Npgsql.NpgsqlLargeObjectManager.ImportRemote(string! path, uint oid = 0) -> void -Npgsql.NpgsqlLargeObjectManager.ImportRemoteAsync(string! path, uint oid, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! -Npgsql.NpgsqlLargeObjectManager.MaxTransferBlockSize.get -> int -Npgsql.NpgsqlLargeObjectManager.MaxTransferBlockSize.set -> void -Npgsql.NpgsqlLargeObjectManager.NpgsqlLargeObjectManager(Npgsql.NpgsqlConnection! connection) -> void -Npgsql.NpgsqlLargeObjectManager.OpenRead(uint oid) -> Npgsql.NpgsqlLargeObjectStream! -Npgsql.NpgsqlLargeObjectManager.OpenReadAsync(uint oid, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! -Npgsql.NpgsqlLargeObjectManager.OpenReadWrite(uint oid) -> Npgsql.NpgsqlLargeObjectStream! -Npgsql.NpgsqlLargeObjectManager.OpenReadWriteAsync(uint oid, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! -Npgsql.NpgsqlLargeObjectManager.Unlink(uint oid) -> void -Npgsql.NpgsqlLargeObjectManager.UnlinkAsync(uint oid, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! -Npgsql.NpgsqlLargeObjectStream -Npgsql.NpgsqlLargeObjectStream.GetLengthAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! -Npgsql.NpgsqlLargeObjectStream.Has64BitSupport.get -> bool -Npgsql.NpgsqlLargeObjectStream.SeekAsync(long offset, System.IO.SeekOrigin origin, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! -Npgsql.NpgsqlLargeObjectStream.SetLength(long value, System.Threading.CancellationToken cancellationToken) -> System.Threading.Tasks.Task! Npgsql.NpgsqlLoggingConfiguration +Npgsql.NpgsqlMetricsOptions +Npgsql.NpgsqlMetricsOptions.NpgsqlMetricsOptions() -> void Npgsql.NpgsqlMultiHostDataSource Npgsql.NpgsqlMultiHostDataSource.ClearDatabaseStates() -> void Npgsql.NpgsqlMultiHostDataSource.CreateConnection(Npgsql.TargetSessionAttributes targetSessionAttributes) -> Npgsql.NpgsqlConnection! @@ -656,8 +417,8 @@ Npgsql.NpgsqlNoticeEventArgs Npgsql.NpgsqlNoticeEventArgs.Notice.get -> Npgsql.PostgresNotice! Npgsql.NpgsqlNotificationEventArgs Npgsql.NpgsqlNotificationEventArgs.Channel.get -> string! -Npgsql.NpgsqlNotificationEventArgs.Payload.get -> string! Npgsql.NpgsqlNotificationEventArgs.PID.get -> int +Npgsql.NpgsqlNotificationEventArgs.Payload.get -> string! Npgsql.NpgsqlOperationInProgressException Npgsql.NpgsqlOperationInProgressException.CommandInProgress.get -> Npgsql.NpgsqlCommand? Npgsql.NpgsqlOperationInProgressException.NpgsqlOperationInProgressException(Npgsql.NpgsqlCommand! command) -> void @@ -675,10 +436,10 @@ Npgsql.NpgsqlParameter.NpgsqlParameter(string! parameterName, System.Data.DbType Npgsql.NpgsqlParameter.NpgsqlParameter(string? parameterName, NpgsqlTypes.NpgsqlDbType parameterType) -> void Npgsql.NpgsqlParameter.NpgsqlParameter(string? parameterName, NpgsqlTypes.NpgsqlDbType parameterType, int size) -> void Npgsql.NpgsqlParameter.NpgsqlParameter(string? parameterName, NpgsqlTypes.NpgsqlDbType parameterType, int size, string? sourceColumn) -> void -Npgsql.NpgsqlParameter.NpgsqlParameter(string? parameterName, object? value) -> void Npgsql.NpgsqlParameter.NpgsqlParameter(string? parameterName, System.Data.DbType parameterType) -> void Npgsql.NpgsqlParameter.NpgsqlParameter(string? parameterName, System.Data.DbType parameterType, int size) -> void Npgsql.NpgsqlParameter.NpgsqlParameter(string? parameterName, System.Data.DbType parameterType, int size, string? sourceColumn) -> void +Npgsql.NpgsqlParameter.NpgsqlParameter(string? parameterName, object? value) -> void Npgsql.NpgsqlParameter.NpgsqlValue.get -> object? Npgsql.NpgsqlParameter.NpgsqlValue.set -> void Npgsql.NpgsqlParameter.PostgresType.get -> Npgsql.PostgresTypes.PostgresType? @@ -710,12 +471,12 @@ Npgsql.NpgsqlParameterCollection.IndexOf(Npgsql.NpgsqlParameter! item) -> int Npgsql.NpgsqlParameterCollection.Insert(int index, Npgsql.NpgsqlParameter! item) -> void Npgsql.NpgsqlParameterCollection.Remove(Npgsql.NpgsqlParameter! item) -> bool Npgsql.NpgsqlParameterCollection.Remove(string! parameterName) -> void +Npgsql.NpgsqlParameterCollection.ToArray() -> Npgsql.NpgsqlParameter![]! +Npgsql.NpgsqlParameterCollection.TryGetValue(string! parameterName, out Npgsql.NpgsqlParameter? parameter) -> bool Npgsql.NpgsqlParameterCollection.this[int index].get -> Npgsql.NpgsqlParameter! Npgsql.NpgsqlParameterCollection.this[int index].set -> void Npgsql.NpgsqlParameterCollection.this[string! parameterName].get -> Npgsql.NpgsqlParameter! Npgsql.NpgsqlParameterCollection.this[string! parameterName].set -> void -Npgsql.NpgsqlParameterCollection.ToArray() -> Npgsql.NpgsqlParameter![]! -Npgsql.NpgsqlParameterCollection.TryGetValue(string! parameterName, out Npgsql.NpgsqlParameter? parameter) -> bool Npgsql.NpgsqlRawCopyStream Npgsql.NpgsqlRawCopyStream.Cancel() -> void Npgsql.NpgsqlRawCopyStream.CancelAsync() -> System.Threading.Tasks.Task! @@ -730,27 +491,33 @@ Npgsql.NpgsqlSlimDataSourceBuilder.AddTypeInfoResolverFactory(Npgsql.Internal.Pg Npgsql.NpgsqlSlimDataSourceBuilder.Build() -> Npgsql.NpgsqlDataSource! Npgsql.NpgsqlSlimDataSourceBuilder.BuildMultiHost() -> Npgsql.NpgsqlMultiHostDataSource! Npgsql.NpgsqlSlimDataSourceBuilder.ConfigureJsonOptions(System.Text.Json.JsonSerializerOptions! serializerOptions) -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.ConfigureTracing(System.Action! configureAction) -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.ConfigureTypeLoading(System.Action! configureAction) -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.ConnectionString.get -> string! Npgsql.NpgsqlSlimDataSourceBuilder.ConnectionStringBuilder.get -> Npgsql.NpgsqlConnectionStringBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.DefaultNameTranslator.get -> Npgsql.INpgsqlNameTranslator! Npgsql.NpgsqlSlimDataSourceBuilder.DefaultNameTranslator.set -> void Npgsql.NpgsqlSlimDataSourceBuilder.EnableArrays() -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.EnableCube() -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.EnableDynamicJson(System.Type![]? jsonbClrTypes = null, System.Type![]? jsonClrTypes = null) -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.EnableExtraConversions() -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.EnableFullTextSearch() -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.EnableGeometricTypes() -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.EnableIntegratedSecurity() -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.EnableJsonTypes() -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.EnableLTree() -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.EnableMultiranges() -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.EnableNetworkTypes() -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.EnableParameterLogging(bool parameterLoggingEnabled = true) -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.EnableRanges() -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.EnableRecords() -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.EnableRecordsAsTuples() -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.EnableTransportSecurity() -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.EnableUnmappedTypes() -> Npgsql.NpgsqlSlimDataSourceBuilder! -Npgsql.NpgsqlSlimDataSourceBuilder.MapComposite(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! -Npgsql.NpgsqlSlimDataSourceBuilder.MapComposite(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! -Npgsql.NpgsqlSlimDataSourceBuilder.MapEnum(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! -Npgsql.NpgsqlSlimDataSourceBuilder.MapEnum(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! +Npgsql.NpgsqlSlimDataSourceBuilder.MapComposite(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.MapComposite(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.MapEnum(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.MapEnum(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.Name.get -> string? Npgsql.NpgsqlSlimDataSourceBuilder.Name.set -> void Npgsql.NpgsqlSlimDataSourceBuilder.NpgsqlSlimDataSourceBuilder(string? connectionString = null) -> void @@ -762,16 +529,34 @@ Npgsql.NpgsqlSlimDataSourceBuilder.UseClientCertificate(System.Security.Cryptogr Npgsql.NpgsqlSlimDataSourceBuilder.UseClientCertificates(System.Security.Cryptography.X509Certificates.X509CertificateCollection? clientCertificates) -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.UseClientCertificatesCallback(System.Action? clientCertificatesCallback) -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.UseLoggerFactory(Microsoft.Extensions.Logging.ILoggerFactory? loggerFactory) -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.UseNegotiateOptionsCallback(System.Action? negotiateOptionsCallback) -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.UsePasswordProvider(System.Func? passwordProvider, System.Func>? passwordProviderAsync) -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.UsePeriodicPasswordProvider(System.Func>? passwordProvider, System.TimeSpan successRefreshInterval, System.TimeSpan failureRefreshInterval) -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.UsePhysicalConnectionInitializer(System.Action? connectionInitializer, System.Func? connectionInitializerAsync) -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.UseRootCertificate(System.Security.Cryptography.X509Certificates.X509Certificate2? rootCertificate) -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.UseRootCertificateCallback(System.Func? rootCertificateCallback) -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.UseRootCertificates(System.Security.Cryptography.X509Certificates.X509Certificate2Collection? rootCertificates) -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.UseRootCertificatesCallback(System.Func? rootCertificateCallback) -> Npgsql.NpgsqlSlimDataSourceBuilder! +Npgsql.NpgsqlSlimDataSourceBuilder.UseSslClientAuthenticationOptionsCallback(System.Action? sslClientAuthenticationOptionsCallback) -> Npgsql.NpgsqlSlimDataSourceBuilder! Npgsql.NpgsqlSlimDataSourceBuilder.UseUserCertificateValidationCallback(System.Net.Security.RemoteCertificateValidationCallback! userCertificateValidationCallback) -> Npgsql.NpgsqlSlimDataSourceBuilder! -Npgsql.NpgsqlTracingOptions -Npgsql.NpgsqlTracingOptions.NpgsqlTracingOptions() -> void +Npgsql.NpgsqlTracingOptionsBuilder +Npgsql.NpgsqlTracingOptionsBuilder.ConfigureBatchEnrichmentCallback(System.Action? batchEnrichmentCallback) -> Npgsql.NpgsqlTracingOptionsBuilder! +Npgsql.NpgsqlTracingOptionsBuilder.ConfigureBatchFilter(System.Func? batchFilter) -> Npgsql.NpgsqlTracingOptionsBuilder! +Npgsql.NpgsqlTracingOptionsBuilder.ConfigureBatchSpanNameProvider(System.Func? batchSpanNameProvider) -> Npgsql.NpgsqlTracingOptionsBuilder! +Npgsql.NpgsqlTracingOptionsBuilder.ConfigureCommandEnrichmentCallback(System.Action? commandEnrichmentCallback) -> Npgsql.NpgsqlTracingOptionsBuilder! +Npgsql.NpgsqlTracingOptionsBuilder.ConfigureCommandFilter(System.Func? commandFilter) -> Npgsql.NpgsqlTracingOptionsBuilder! +Npgsql.NpgsqlTracingOptionsBuilder.ConfigureCommandSpanNameProvider(System.Func? commandSpanNameProvider) -> Npgsql.NpgsqlTracingOptionsBuilder! +Npgsql.NpgsqlTracingOptionsBuilder.ConfigureCopyOperationEnrichmentCallback(System.Action? copyOperationEnrichmentCallback) -> Npgsql.NpgsqlTracingOptionsBuilder! +Npgsql.NpgsqlTracingOptionsBuilder.ConfigureCopyOperationFilter(System.Func? copyOperationFilter) -> Npgsql.NpgsqlTracingOptionsBuilder! +Npgsql.NpgsqlTracingOptionsBuilder.ConfigureCopyOperationSpanNameProvider(System.Func? copyOperationSpanNameProvider) -> Npgsql.NpgsqlTracingOptionsBuilder! +Npgsql.NpgsqlTracingOptionsBuilder.EnableFirstResponseEvent(bool enable = true) -> Npgsql.NpgsqlTracingOptionsBuilder! +Npgsql.NpgsqlTracingOptionsBuilder.EnablePhysicalOpenTracing(bool enable = true) -> Npgsql.NpgsqlTracingOptionsBuilder! Npgsql.NpgsqlTransaction Npgsql.NpgsqlTransaction.Connection.get -> Npgsql.NpgsqlConnection? +Npgsql.NpgsqlTypeLoadingOptionsBuilder +Npgsql.NpgsqlTypeLoadingOptionsBuilder.EnableTableCompositesLoading(bool enable = true) -> Npgsql.NpgsqlTypeLoadingOptionsBuilder! +Npgsql.NpgsqlTypeLoadingOptionsBuilder.EnableTypeLoading(bool enable = true) -> Npgsql.NpgsqlTypeLoadingOptionsBuilder! +Npgsql.NpgsqlTypeLoadingOptionsBuilder.SetTypeLoadingSchemas(params System.Collections.Generic.IEnumerable? schemas) -> Npgsql.NpgsqlTypeLoadingOptionsBuilder! Npgsql.PostgresErrorCodes Npgsql.PostgresException Npgsql.PostgresException.ColumnName.get -> string? @@ -920,10 +705,11 @@ Npgsql.Replication.PgOutput.Messages.LogicalDecodingMessage.Prefix.get -> string Npgsql.Replication.PgOutput.Messages.OriginMessage Npgsql.Replication.PgOutput.Messages.OriginMessage.OriginCommitLsn.get -> NpgsqlTypes.NpgsqlLogSequenceNumber Npgsql.Replication.PgOutput.Messages.OriginMessage.OriginName.get -> string! +Npgsql.Replication.PgOutput.Messages.ParallelStreamAbortMessage +Npgsql.Replication.PgOutput.Messages.ParallelStreamAbortMessage.AbortLsn.get -> NpgsqlTypes.NpgsqlLogSequenceNumber +Npgsql.Replication.PgOutput.Messages.ParallelStreamAbortMessage.AbortTimestamp.get -> System.DateTime Npgsql.Replication.PgOutput.Messages.PgOutputReplicationMessage Npgsql.Replication.PgOutput.Messages.PgOutputReplicationMessage.PgOutputReplicationMessage() -> void -Npgsql.Replication.PgOutput.Messages.PreparedTransactionControlMessage -Npgsql.Replication.PgOutput.Messages.PreparedTransactionControlMessage.TransactionGid.get -> string! Npgsql.Replication.PgOutput.Messages.PrepareMessage Npgsql.Replication.PgOutput.Messages.PrepareMessage.Flags.get -> Npgsql.Replication.PgOutput.Messages.PrepareMessage.PrepareFlags Npgsql.Replication.PgOutput.Messages.PrepareMessage.PrepareFlags @@ -932,6 +718,8 @@ Npgsql.Replication.PgOutput.Messages.PrepareMessageBase Npgsql.Replication.PgOutput.Messages.PrepareMessageBase.PrepareEndLsn.get -> NpgsqlTypes.NpgsqlLogSequenceNumber Npgsql.Replication.PgOutput.Messages.PrepareMessageBase.PrepareLsn.get -> NpgsqlTypes.NpgsqlLogSequenceNumber Npgsql.Replication.PgOutput.Messages.PrepareMessageBase.TransactionPrepareTimestamp.get -> System.DateTime +Npgsql.Replication.PgOutput.Messages.PreparedTransactionControlMessage +Npgsql.Replication.PgOutput.Messages.PreparedTransactionControlMessage.TransactionGid.get -> string! Npgsql.Replication.PgOutput.Messages.RelationMessage Npgsql.Replication.PgOutput.Messages.RelationMessage.Column Npgsql.Replication.PgOutput.Messages.RelationMessage.Column.Column() -> void @@ -980,12 +768,12 @@ Npgsql.Replication.PgOutput.Messages.StreamPrepareMessage.StreamPrepareFlags.Non Npgsql.Replication.PgOutput.Messages.StreamStartMessage Npgsql.Replication.PgOutput.Messages.StreamStartMessage.StreamSegmentIndicator.get -> byte Npgsql.Replication.PgOutput.Messages.StreamStopMessage -Npgsql.Replication.PgOutput.Messages.TransactionalMessage -Npgsql.Replication.PgOutput.Messages.TransactionalMessage.TransactionalMessage() -> void -Npgsql.Replication.PgOutput.Messages.TransactionalMessage.TransactionXid.get -> uint? Npgsql.Replication.PgOutput.Messages.TransactionControlMessage Npgsql.Replication.PgOutput.Messages.TransactionControlMessage.TransactionControlMessage() -> void Npgsql.Replication.PgOutput.Messages.TransactionControlMessage.TransactionXid.get -> uint +Npgsql.Replication.PgOutput.Messages.TransactionalMessage +Npgsql.Replication.PgOutput.Messages.TransactionalMessage.TransactionXid.get -> uint? +Npgsql.Replication.PgOutput.Messages.TransactionalMessage.TransactionalMessage() -> void Npgsql.Replication.PgOutput.Messages.TruncateMessage Npgsql.Replication.PgOutput.Messages.TruncateMessage.Options.get -> Npgsql.Replication.PgOutput.Messages.TruncateMessage.TruncateOptions Npgsql.Replication.PgOutput.Messages.TruncateMessage.Relations.get -> System.Collections.Generic.IReadOnlyList! @@ -999,26 +787,38 @@ Npgsql.Replication.PgOutput.Messages.TypeMessage.Namespace.get -> string! Npgsql.Replication.PgOutput.Messages.TypeMessage.TypeId.get -> uint Npgsql.Replication.PgOutput.Messages.UpdateMessage Npgsql.Replication.PgOutput.Messages.UpdateMessage.Relation.get -> Npgsql.Replication.PgOutput.Messages.RelationMessage! +Npgsql.Replication.PgOutput.PgOutputProtocolVersion +Npgsql.Replication.PgOutput.PgOutputProtocolVersion.V1 = 1 -> Npgsql.Replication.PgOutput.PgOutputProtocolVersion +Npgsql.Replication.PgOutput.PgOutputProtocolVersion.V2 = 2 -> Npgsql.Replication.PgOutput.PgOutputProtocolVersion +Npgsql.Replication.PgOutput.PgOutputProtocolVersion.V3 = 3 -> Npgsql.Replication.PgOutput.PgOutputProtocolVersion +Npgsql.Replication.PgOutput.PgOutputProtocolVersion.V4 = 4 -> Npgsql.Replication.PgOutput.PgOutputProtocolVersion Npgsql.Replication.PgOutput.PgOutputReplicationOptions Npgsql.Replication.PgOutput.PgOutputReplicationOptions.Binary.get -> bool? Npgsql.Replication.PgOutput.PgOutputReplicationOptions.Equals(Npgsql.Replication.PgOutput.PgOutputReplicationOptions? other) -> bool Npgsql.Replication.PgOutput.PgOutputReplicationOptions.Messages.get -> bool? -Npgsql.Replication.PgOutput.PgOutputReplicationOptions.PgOutputReplicationOptions(string! publicationName, ulong protocolVersion, bool? binary = null, bool? streaming = null, bool? messages = null, bool? twoPhase = null) -> void +Npgsql.Replication.PgOutput.PgOutputReplicationOptions.PgOutputReplicationOptions(System.Collections.Generic.IEnumerable! publicationNames, Npgsql.Replication.PgOutput.PgOutputProtocolVersion protocolVersion, bool? binary = null, Npgsql.Replication.PgOutput.PgOutputStreamingMode? streamingMode = null, bool? messages = null, bool? twoPhase = null) -> void Npgsql.Replication.PgOutput.PgOutputReplicationOptions.PgOutputReplicationOptions(System.Collections.Generic.IEnumerable! publicationNames, ulong protocolVersion, bool? binary = null, bool? streaming = null, bool? messages = null, bool? twoPhase = null) -> void -Npgsql.Replication.PgOutput.PgOutputReplicationOptions.ProtocolVersion.get -> ulong +Npgsql.Replication.PgOutput.PgOutputReplicationOptions.PgOutputReplicationOptions(string! publicationName, Npgsql.Replication.PgOutput.PgOutputProtocolVersion protocolVersion, bool? binary = null, Npgsql.Replication.PgOutput.PgOutputStreamingMode? streamingMode = null, bool? messages = null, bool? twoPhase = null) -> void +Npgsql.Replication.PgOutput.PgOutputReplicationOptions.PgOutputReplicationOptions(string! publicationName, ulong protocolVersion, bool? binary = null, bool? streaming = null, bool? messages = null, bool? twoPhase = null) -> void +Npgsql.Replication.PgOutput.PgOutputReplicationOptions.ProtocolVersion.get -> Npgsql.Replication.PgOutput.PgOutputProtocolVersion Npgsql.Replication.PgOutput.PgOutputReplicationOptions.PublicationNames.get -> System.Collections.Generic.List! -Npgsql.Replication.PgOutput.PgOutputReplicationOptions.Streaming.get -> bool? +Npgsql.Replication.PgOutput.PgOutputReplicationOptions.StreamingMode.get -> Npgsql.Replication.PgOutput.PgOutputStreamingMode? Npgsql.Replication.PgOutput.PgOutputReplicationOptions.TwoPhase.get -> bool? Npgsql.Replication.PgOutput.PgOutputReplicationSlot Npgsql.Replication.PgOutput.PgOutputReplicationSlot.PgOutputReplicationSlot(Npgsql.Replication.PgOutput.PgOutputReplicationSlot! slot) -> void Npgsql.Replication.PgOutput.PgOutputReplicationSlot.PgOutputReplicationSlot(Npgsql.Replication.ReplicationSlotOptions options) -> void Npgsql.Replication.PgOutput.PgOutputReplicationSlot.PgOutputReplicationSlot(string! slotName) -> void +Npgsql.Replication.PgOutput.PgOutputStreamingMode +Npgsql.Replication.PgOutput.PgOutputStreamingMode.Off = 0 -> Npgsql.Replication.PgOutput.PgOutputStreamingMode +Npgsql.Replication.PgOutput.PgOutputStreamingMode.On = 1 -> Npgsql.Replication.PgOutput.PgOutputStreamingMode +Npgsql.Replication.PgOutput.PgOutputStreamingMode.Parallel = 2 -> Npgsql.Replication.PgOutput.PgOutputStreamingMode Npgsql.Replication.PgOutput.ReplicationTuple Npgsql.Replication.PgOutput.ReplicationTuple.NumColumns.get -> ushort Npgsql.Replication.PgOutput.ReplicationValue Npgsql.Replication.PgOutput.ReplicationValue.Get(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.ValueTask Npgsql.Replication.PgOutput.ReplicationValue.Get(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.ValueTask Npgsql.Replication.PgOutput.ReplicationValue.GetDataTypeName() -> string! +Npgsql.Replication.PgOutput.ReplicationValue.GetFieldName() -> string! Npgsql.Replication.PgOutput.ReplicationValue.GetFieldType() -> System.Type! Npgsql.Replication.PgOutput.ReplicationValue.GetPostgresType() -> Npgsql.PostgresTypes.PostgresType! Npgsql.Replication.PgOutput.ReplicationValue.GetStream() -> System.IO.Stream! @@ -1165,8 +965,8 @@ Npgsql.Schema.NpgsqlDbColumn.TypeOID.get -> uint Npgsql.Schema.NpgsqlDbColumn.UdtAssemblyQualifiedName.get -> string? Npgsql.Schema.NpgsqlDbColumn.UdtAssemblyQualifiedName.set -> void Npgsql.ServerCompatibilityMode -Npgsql.ServerCompatibilityMode.None = 0 -> Npgsql.ServerCompatibilityMode Npgsql.ServerCompatibilityMode.NoTypeLoading = 2 -> Npgsql.ServerCompatibilityMode +Npgsql.ServerCompatibilityMode.None = 0 -> Npgsql.ServerCompatibilityMode Npgsql.ServerCompatibilityMode.Redshift = 1 -> Npgsql.ServerCompatibilityMode Npgsql.SslMode Npgsql.SslMode.Allow = 1 -> Npgsql.SslMode @@ -1175,6 +975,9 @@ Npgsql.SslMode.Prefer = 2 -> Npgsql.SslMode Npgsql.SslMode.Require = 3 -> Npgsql.SslMode Npgsql.SslMode.VerifyCA = 4 -> Npgsql.SslMode Npgsql.SslMode.VerifyFull = 5 -> Npgsql.SslMode +Npgsql.SslNegotiation +Npgsql.SslNegotiation.Direct = 1 -> Npgsql.SslNegotiation +Npgsql.SslNegotiation.Postgres = 0 -> Npgsql.SslNegotiation Npgsql.StatementType Npgsql.StatementType.Call = 11 -> Npgsql.StatementType Npgsql.StatementType.Copy = 8 -> Npgsql.StatementType @@ -1189,6 +992,7 @@ Npgsql.StatementType.Select = 1 -> Npgsql.StatementType Npgsql.StatementType.Unknown = 0 -> Npgsql.StatementType Npgsql.StatementType.Update = 4 -> Npgsql.StatementType Npgsql.TypeMapping.INpgsqlTypeMapper +Npgsql.TypeMapping.INpgsqlTypeMapper.AddDbTypeResolverFactory(Npgsql.Internal.DbTypeResolverFactory! factory) -> void Npgsql.TypeMapping.INpgsqlTypeMapper.AddTypeInfoResolverFactory(Npgsql.Internal.PgTypeInfoResolverFactory! factory) -> void Npgsql.TypeMapping.INpgsqlTypeMapper.ConfigureJsonOptions(System.Text.Json.JsonSerializerOptions! serializerOptions) -> Npgsql.TypeMapping.INpgsqlTypeMapper! Npgsql.TypeMapping.INpgsqlTypeMapper.DefaultNameTranslator.get -> Npgsql.INpgsqlNameTranslator! @@ -1212,6 +1016,9 @@ Npgsql.Util.NpgsqlTimeout Npgsql.Util.NpgsqlTimeout.NpgsqlTimeout() -> void NpgsqlTypes.NpgsqlBox NpgsqlTypes.NpgsqlBox.Bottom.get -> double +NpgsqlTypes.NpgsqlBox.Deconstruct(out NpgsqlTypes.NpgsqlPoint lowerLeft, out NpgsqlTypes.NpgsqlPoint upperRight) -> void +NpgsqlTypes.NpgsqlBox.Deconstruct(out double left, out double right, out double bottom, out double top) -> void +NpgsqlTypes.NpgsqlBox.Deconstruct(out double left, out double right, out double bottom, out double top, out double width, out double height) -> void NpgsqlTypes.NpgsqlBox.Equals(NpgsqlTypes.NpgsqlBox other) -> bool NpgsqlTypes.NpgsqlBox.Height.get -> double NpgsqlTypes.NpgsqlBox.IsEmpty.get -> bool @@ -1219,8 +1026,8 @@ NpgsqlTypes.NpgsqlBox.Left.get -> double NpgsqlTypes.NpgsqlBox.LowerLeft.get -> NpgsqlTypes.NpgsqlPoint NpgsqlTypes.NpgsqlBox.LowerLeft.set -> void NpgsqlTypes.NpgsqlBox.NpgsqlBox() -> void -NpgsqlTypes.NpgsqlBox.NpgsqlBox(double top, double right, double bottom, double left) -> void NpgsqlTypes.NpgsqlBox.NpgsqlBox(NpgsqlTypes.NpgsqlPoint upperRight, NpgsqlTypes.NpgsqlPoint lowerLeft) -> void +NpgsqlTypes.NpgsqlBox.NpgsqlBox(double top, double right, double bottom, double left) -> void NpgsqlTypes.NpgsqlBox.Right.get -> double NpgsqlTypes.NpgsqlBox.Top.get -> double NpgsqlTypes.NpgsqlBox.UpperRight.get -> NpgsqlTypes.NpgsqlPoint @@ -1231,27 +1038,43 @@ NpgsqlTypes.NpgsqlCidr.Address.get -> System.Net.IPAddress! NpgsqlTypes.NpgsqlCidr.Deconstruct(out System.Net.IPAddress! address, out byte netmask) -> void NpgsqlTypes.NpgsqlCidr.Netmask.get -> byte NpgsqlTypes.NpgsqlCidr.NpgsqlCidr() -> void -NpgsqlTypes.NpgsqlCidr.NpgsqlCidr(string! addr) -> void NpgsqlTypes.NpgsqlCidr.NpgsqlCidr(System.Net.IPAddress! address, byte netmask) -> void +NpgsqlTypes.NpgsqlCidr.NpgsqlCidr(string! addr) -> void NpgsqlTypes.NpgsqlCircle NpgsqlTypes.NpgsqlCircle.Center.get -> NpgsqlTypes.NpgsqlPoint NpgsqlTypes.NpgsqlCircle.Center.set -> void +NpgsqlTypes.NpgsqlCircle.Deconstruct(out NpgsqlTypes.NpgsqlPoint center, out double radius) -> void +NpgsqlTypes.NpgsqlCircle.Deconstruct(out double x, out double y, out double radius) -> void NpgsqlTypes.NpgsqlCircle.Equals(NpgsqlTypes.NpgsqlCircle other) -> bool NpgsqlTypes.NpgsqlCircle.NpgsqlCircle() -> void -NpgsqlTypes.NpgsqlCircle.NpgsqlCircle(double x, double y, double radius) -> void NpgsqlTypes.NpgsqlCircle.NpgsqlCircle(NpgsqlTypes.NpgsqlPoint center, double radius) -> void +NpgsqlTypes.NpgsqlCircle.NpgsqlCircle(double x, double y, double radius) -> void NpgsqlTypes.NpgsqlCircle.Radius.get -> double NpgsqlTypes.NpgsqlCircle.Radius.set -> void NpgsqlTypes.NpgsqlCircle.X.get -> double NpgsqlTypes.NpgsqlCircle.X.set -> void NpgsqlTypes.NpgsqlCircle.Y.get -> double NpgsqlTypes.NpgsqlCircle.Y.set -> void +NpgsqlTypes.NpgsqlCube +NpgsqlTypes.NpgsqlCube.Dimensions.get -> int +NpgsqlTypes.NpgsqlCube.Equals(NpgsqlTypes.NpgsqlCube other) -> bool +NpgsqlTypes.NpgsqlCube.IsPoint.get -> bool +NpgsqlTypes.NpgsqlCube.LowerLeft.get -> System.Collections.Generic.IReadOnlyList! +NpgsqlTypes.NpgsqlCube.NpgsqlCube() -> void +NpgsqlTypes.NpgsqlCube.NpgsqlCube(NpgsqlTypes.NpgsqlCube cube, double coord) -> void +NpgsqlTypes.NpgsqlCube.NpgsqlCube(NpgsqlTypes.NpgsqlCube cube, double lowerLeft, double upperRight) -> void +NpgsqlTypes.NpgsqlCube.NpgsqlCube(System.Collections.Generic.IEnumerable! coords) -> void +NpgsqlTypes.NpgsqlCube.NpgsqlCube(System.Collections.Generic.IEnumerable! lowerLeft, System.Collections.Generic.IEnumerable! upperRight) -> void +NpgsqlTypes.NpgsqlCube.NpgsqlCube(double coord) -> void +NpgsqlTypes.NpgsqlCube.NpgsqlCube(double lowerLeft, double upperRight) -> void +NpgsqlTypes.NpgsqlCube.ToSubset(params int[]! indexes) -> NpgsqlTypes.NpgsqlCube +NpgsqlTypes.NpgsqlCube.UpperRight.get -> System.Collections.Generic.IReadOnlyList! NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.Abstime = 33 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.Array = -2147483648 -> NpgsqlTypes.NpgsqlDbType -NpgsqlTypes.NpgsqlDbType.Bigint = 1 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.BigIntMultirange = 536870913 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.BigIntRange = 1073741825 -> NpgsqlTypes.NpgsqlDbType +NpgsqlTypes.NpgsqlDbType.Bigint = 1 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.Bit = 25 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.Boolean = 2 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.Box = 3 -> NpgsqlTypes.NpgsqlDbType @@ -1261,6 +1084,7 @@ NpgsqlTypes.NpgsqlDbType.Cid = 43 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.Cidr = 44 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.Circle = 5 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.Citext = 51 -> NpgsqlTypes.NpgsqlDbType +NpgsqlTypes.NpgsqlDbType.Cube = 63 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.Date = 7 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.DateMultirange = 536870919 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.DateRange = 1073741831 -> NpgsqlTypes.NpgsqlDbType @@ -1276,13 +1100,13 @@ NpgsqlTypes.NpgsqlDbType.IntegerRange = 1073741833 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.InternalChar = 38 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.Interval = 30 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.Json = 35 -> NpgsqlTypes.NpgsqlDbType -NpgsqlTypes.NpgsqlDbType.Jsonb = 36 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.JsonPath = 57 -> NpgsqlTypes.NpgsqlDbType -NpgsqlTypes.NpgsqlDbType.Line = 10 -> NpgsqlTypes.NpgsqlDbType +NpgsqlTypes.NpgsqlDbType.Jsonb = 36 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.LQuery = 61 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.LSeg = 11 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.LTree = 60 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.LTxtQuery = 62 -> NpgsqlTypes.NpgsqlDbType +NpgsqlTypes.NpgsqlDbType.Line = 10 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.MacAddr = 34 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.MacAddr8 = 54 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.Money = 12 -> NpgsqlTypes.NpgsqlDbType @@ -1306,13 +1130,13 @@ NpgsqlTypes.NpgsqlDbType.Smallint = 18 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.Text = 19 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.Tid = 53 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.Time = 20 -> NpgsqlTypes.NpgsqlDbType +NpgsqlTypes.NpgsqlDbType.TimeTz = 31 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.Timestamp = 21 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.TimestampMultirange = 536870933 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.TimestampRange = 1073741845 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.TimestampTz = 26 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.TimestampTzMultirange = 536870938 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.TimestampTzRange = 1073741850 -> NpgsqlTypes.NpgsqlDbType -NpgsqlTypes.NpgsqlDbType.TimeTz = 31 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.TsQuery = 46 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.TsVector = 45 -> NpgsqlTypes.NpgsqlDbType NpgsqlTypes.NpgsqlDbType.Unknown = 40 -> NpgsqlTypes.NpgsqlDbType @@ -1327,9 +1151,9 @@ NpgsqlTypes.NpgsqlInet.Address.get -> System.Net.IPAddress! NpgsqlTypes.NpgsqlInet.Deconstruct(out System.Net.IPAddress! address, out byte netmask) -> void NpgsqlTypes.NpgsqlInet.Netmask.get -> byte NpgsqlTypes.NpgsqlInet.NpgsqlInet() -> void -NpgsqlTypes.NpgsqlInet.NpgsqlInet(string! addr) -> void NpgsqlTypes.NpgsqlInet.NpgsqlInet(System.Net.IPAddress! address) -> void NpgsqlTypes.NpgsqlInet.NpgsqlInet(System.Net.IPAddress! address, byte netmask) -> void +NpgsqlTypes.NpgsqlInet.NpgsqlInet(string! addr) -> void NpgsqlTypes.NpgsqlInterval NpgsqlTypes.NpgsqlInterval.Days.get -> int NpgsqlTypes.NpgsqlInterval.Equals(NpgsqlTypes.NpgsqlInterval other) -> bool @@ -1337,6 +1161,16 @@ NpgsqlTypes.NpgsqlInterval.Months.get -> int NpgsqlTypes.NpgsqlInterval.NpgsqlInterval() -> void NpgsqlTypes.NpgsqlInterval.NpgsqlInterval(int months, int days, long time) -> void NpgsqlTypes.NpgsqlInterval.Time.get -> long +NpgsqlTypes.NpgsqlLSeg +NpgsqlTypes.NpgsqlLSeg.Deconstruct(out NpgsqlTypes.NpgsqlPoint start, out NpgsqlTypes.NpgsqlPoint end) -> void +NpgsqlTypes.NpgsqlLSeg.End.get -> NpgsqlTypes.NpgsqlPoint +NpgsqlTypes.NpgsqlLSeg.End.set -> void +NpgsqlTypes.NpgsqlLSeg.Equals(NpgsqlTypes.NpgsqlLSeg other) -> bool +NpgsqlTypes.NpgsqlLSeg.NpgsqlLSeg() -> void +NpgsqlTypes.NpgsqlLSeg.NpgsqlLSeg(NpgsqlTypes.NpgsqlPoint start, NpgsqlTypes.NpgsqlPoint end) -> void +NpgsqlTypes.NpgsqlLSeg.NpgsqlLSeg(double startx, double starty, double endx, double endy) -> void +NpgsqlTypes.NpgsqlLSeg.Start.get -> NpgsqlTypes.NpgsqlPoint +NpgsqlTypes.NpgsqlLSeg.Start.set -> void NpgsqlTypes.NpgsqlLine NpgsqlTypes.NpgsqlLine.A.get -> double NpgsqlTypes.NpgsqlLine.A.set -> void @@ -1344,6 +1178,7 @@ NpgsqlTypes.NpgsqlLine.B.get -> double NpgsqlTypes.NpgsqlLine.B.set -> void NpgsqlTypes.NpgsqlLine.C.get -> double NpgsqlTypes.NpgsqlLine.C.set -> void +NpgsqlTypes.NpgsqlLine.Deconstruct(out double a, out double b, out double c) -> void NpgsqlTypes.NpgsqlLine.Equals(NpgsqlTypes.NpgsqlLine other) -> bool NpgsqlTypes.NpgsqlLine.NpgsqlLine() -> void NpgsqlTypes.NpgsqlLine.NpgsqlLine(double a, double b, double c) -> void @@ -1352,15 +1187,6 @@ NpgsqlTypes.NpgsqlLogSequenceNumber.CompareTo(NpgsqlTypes.NpgsqlLogSequenceNumbe NpgsqlTypes.NpgsqlLogSequenceNumber.Equals(NpgsqlTypes.NpgsqlLogSequenceNumber other) -> bool NpgsqlTypes.NpgsqlLogSequenceNumber.NpgsqlLogSequenceNumber() -> void NpgsqlTypes.NpgsqlLogSequenceNumber.NpgsqlLogSequenceNumber(ulong value) -> void -NpgsqlTypes.NpgsqlLSeg -NpgsqlTypes.NpgsqlLSeg.End.get -> NpgsqlTypes.NpgsqlPoint -NpgsqlTypes.NpgsqlLSeg.End.set -> void -NpgsqlTypes.NpgsqlLSeg.Equals(NpgsqlTypes.NpgsqlLSeg other) -> bool -NpgsqlTypes.NpgsqlLSeg.NpgsqlLSeg() -> void -NpgsqlTypes.NpgsqlLSeg.NpgsqlLSeg(double startx, double starty, double endx, double endy) -> void -NpgsqlTypes.NpgsqlLSeg.NpgsqlLSeg(NpgsqlTypes.NpgsqlPoint start, NpgsqlTypes.NpgsqlPoint end) -> void -NpgsqlTypes.NpgsqlLSeg.Start.get -> NpgsqlTypes.NpgsqlPoint -NpgsqlTypes.NpgsqlLSeg.Start.set -> void NpgsqlTypes.NpgsqlPath NpgsqlTypes.NpgsqlPath.Add(NpgsqlTypes.NpgsqlPoint item) -> void NpgsqlTypes.NpgsqlPath.Capacity.get -> int @@ -1374,12 +1200,12 @@ NpgsqlTypes.NpgsqlPath.IndexOf(NpgsqlTypes.NpgsqlPoint item) -> int NpgsqlTypes.NpgsqlPath.Insert(int index, NpgsqlTypes.NpgsqlPoint item) -> void NpgsqlTypes.NpgsqlPath.IsReadOnly.get -> bool NpgsqlTypes.NpgsqlPath.NpgsqlPath() -> void +NpgsqlTypes.NpgsqlPath.NpgsqlPath(System.Collections.Generic.IEnumerable! points) -> void +NpgsqlTypes.NpgsqlPath.NpgsqlPath(System.Collections.Generic.IEnumerable! points, bool open) -> void NpgsqlTypes.NpgsqlPath.NpgsqlPath(bool open) -> void NpgsqlTypes.NpgsqlPath.NpgsqlPath(int capacity) -> void NpgsqlTypes.NpgsqlPath.NpgsqlPath(int capacity, bool open) -> void NpgsqlTypes.NpgsqlPath.NpgsqlPath(params NpgsqlTypes.NpgsqlPoint[]! points) -> void -NpgsqlTypes.NpgsqlPath.NpgsqlPath(System.Collections.Generic.IEnumerable! points) -> void -NpgsqlTypes.NpgsqlPath.NpgsqlPath(System.Collections.Generic.IEnumerable! points, bool open) -> void NpgsqlTypes.NpgsqlPath.Open.get -> bool NpgsqlTypes.NpgsqlPath.Open.set -> void NpgsqlTypes.NpgsqlPath.Remove(NpgsqlTypes.NpgsqlPoint item) -> bool @@ -1387,6 +1213,7 @@ NpgsqlTypes.NpgsqlPath.RemoveAt(int index) -> void NpgsqlTypes.NpgsqlPath.this[int index].get -> NpgsqlTypes.NpgsqlPoint NpgsqlTypes.NpgsqlPath.this[int index].set -> void NpgsqlTypes.NpgsqlPoint +NpgsqlTypes.NpgsqlPoint.Deconstruct(out double x, out double y) -> void NpgsqlTypes.NpgsqlPoint.Equals(NpgsqlTypes.NpgsqlPoint other) -> bool NpgsqlTypes.NpgsqlPoint.NpgsqlPoint() -> void NpgsqlTypes.NpgsqlPoint.NpgsqlPoint(double x, double y) -> void @@ -1407,9 +1234,9 @@ NpgsqlTypes.NpgsqlPolygon.IndexOf(NpgsqlTypes.NpgsqlPoint item) -> int NpgsqlTypes.NpgsqlPolygon.Insert(int index, NpgsqlTypes.NpgsqlPoint item) -> void NpgsqlTypes.NpgsqlPolygon.IsReadOnly.get -> bool NpgsqlTypes.NpgsqlPolygon.NpgsqlPolygon() -> void +NpgsqlTypes.NpgsqlPolygon.NpgsqlPolygon(System.Collections.Generic.IEnumerable! points) -> void NpgsqlTypes.NpgsqlPolygon.NpgsqlPolygon(int capacity) -> void NpgsqlTypes.NpgsqlPolygon.NpgsqlPolygon(params NpgsqlTypes.NpgsqlPoint[]! points) -> void -NpgsqlTypes.NpgsqlPolygon.NpgsqlPolygon(System.Collections.Generic.IEnumerable! points) -> void NpgsqlTypes.NpgsqlPolygon.Remove(NpgsqlTypes.NpgsqlPoint item) -> bool NpgsqlTypes.NpgsqlPolygon.RemoveAt(int index) -> void NpgsqlTypes.NpgsqlPolygon.this[int index].get -> NpgsqlTypes.NpgsqlPoint @@ -1421,9 +1248,9 @@ NpgsqlTypes.NpgsqlRange.LowerBound.get -> T NpgsqlTypes.NpgsqlRange.LowerBoundInfinite.get -> bool NpgsqlTypes.NpgsqlRange.LowerBoundIsInclusive.get -> bool NpgsqlTypes.NpgsqlRange.NpgsqlRange() -> void -NpgsqlTypes.NpgsqlRange.NpgsqlRange(T lowerBound, bool lowerBoundIsInclusive, bool lowerBoundInfinite, T upperBound, bool upperBoundIsInclusive, bool upperBoundInfinite) -> void -NpgsqlTypes.NpgsqlRange.NpgsqlRange(T lowerBound, bool lowerBoundIsInclusive, T upperBound, bool upperBoundIsInclusive) -> void NpgsqlTypes.NpgsqlRange.NpgsqlRange(T lowerBound, T upperBound) -> void +NpgsqlTypes.NpgsqlRange.NpgsqlRange(T lowerBound, bool lowerBoundIsInclusive, T upperBound, bool upperBoundIsInclusive) -> void +NpgsqlTypes.NpgsqlRange.NpgsqlRange(T lowerBound, bool lowerBoundIsInclusive, bool lowerBoundInfinite, T upperBound, bool upperBoundIsInclusive, bool upperBoundInfinite) -> void NpgsqlTypes.NpgsqlRange.RangeTypeConverter NpgsqlTypes.NpgsqlRange.RangeTypeConverter.RangeTypeConverter() -> void NpgsqlTypes.NpgsqlRange.UpperBound.get -> T @@ -1431,6 +1258,7 @@ NpgsqlTypes.NpgsqlRange.UpperBoundInfinite.get -> bool NpgsqlTypes.NpgsqlRange.UpperBoundIsInclusive.get -> bool NpgsqlTypes.NpgsqlTid NpgsqlTypes.NpgsqlTid.BlockNumber.get -> uint +NpgsqlTypes.NpgsqlTid.Deconstruct(out uint blockNumber, out ushort offsetNumber) -> void NpgsqlTypes.NpgsqlTid.Equals(NpgsqlTypes.NpgsqlTid other) -> bool NpgsqlTypes.NpgsqlTid.NpgsqlTid() -> void NpgsqlTypes.NpgsqlTid.NpgsqlTid(uint blockNumber, ushort offsetNumber) -> void @@ -1494,7 +1322,6 @@ NpgsqlTypes.NpgsqlTsVector.Lexeme.Lexeme(string! text) -> void NpgsqlTypes.NpgsqlTsVector.Lexeme.Lexeme(string! text, System.Collections.Generic.List? wordEntryPositions) -> void NpgsqlTypes.NpgsqlTsVector.Lexeme.Text.get -> string! NpgsqlTypes.NpgsqlTsVector.Lexeme.Text.set -> void -NpgsqlTypes.NpgsqlTsVector.Lexeme.this[int index].get -> NpgsqlTypes.NpgsqlTsVector.Lexeme.WordEntryPos NpgsqlTypes.NpgsqlTsVector.Lexeme.Weight NpgsqlTypes.NpgsqlTsVector.Lexeme.Weight.A = 3 -> NpgsqlTypes.NpgsqlTsVector.Lexeme.Weight NpgsqlTypes.NpgsqlTsVector.Lexeme.Weight.B = 2 -> NpgsqlTypes.NpgsqlTsVector.Lexeme.Weight @@ -1506,10 +1333,253 @@ NpgsqlTypes.NpgsqlTsVector.Lexeme.WordEntryPos.Pos.get -> int NpgsqlTypes.NpgsqlTsVector.Lexeme.WordEntryPos.Weight.get -> NpgsqlTypes.NpgsqlTsVector.Lexeme.Weight NpgsqlTypes.NpgsqlTsVector.Lexeme.WordEntryPos.WordEntryPos() -> void NpgsqlTypes.NpgsqlTsVector.Lexeme.WordEntryPos.WordEntryPos(int pos, NpgsqlTypes.NpgsqlTsVector.Lexeme.Weight weight = NpgsqlTypes.NpgsqlTsVector.Lexeme.Weight.D) -> void +NpgsqlTypes.NpgsqlTsVector.Lexeme.this[int index].get -> NpgsqlTypes.NpgsqlTsVector.Lexeme.WordEntryPos NpgsqlTypes.NpgsqlTsVector.this[int index].get -> NpgsqlTypes.NpgsqlTsVector.Lexeme NpgsqlTypes.PgNameAttribute NpgsqlTypes.PgNameAttribute.PgName.get -> string! NpgsqlTypes.PgNameAttribute.PgNameAttribute(string! pgName) -> void +abstract Npgsql.NpgsqlDataSource.Clear() -> void +abstract Npgsql.Replication.PgOutput.Messages.UpdateMessage.NewRow.get -> Npgsql.Replication.PgOutput.ReplicationTuple! +abstract NpgsqlTypes.NpgsqlTsQuery.Equals(NpgsqlTypes.NpgsqlTsQuery? other) -> bool +const Npgsql.NpgsqlConnection.DefaultPort = 5432 -> int +const Npgsql.PostgresErrorCodes.ActiveSqlTransaction = "25001" -> string! +const Npgsql.PostgresErrorCodes.AdminShutdown = "57P01" -> string! +const Npgsql.PostgresErrorCodes.AmbiguousAlias = "42P09" -> string! +const Npgsql.PostgresErrorCodes.AmbiguousColumn = "42702" -> string! +const Npgsql.PostgresErrorCodes.AmbiguousFunction = "42725" -> string! +const Npgsql.PostgresErrorCodes.AmbiguousParameter = "42P08" -> string! +const Npgsql.PostgresErrorCodes.ArraySubscriptError = "2202E" -> string! +const Npgsql.PostgresErrorCodes.AssertFailure = "P0004" -> string! +const Npgsql.PostgresErrorCodes.BadCopyFileFormat = "22P04" -> string! +const Npgsql.PostgresErrorCodes.BranchTransactionAlreadyActive = "25002" -> string! +const Npgsql.PostgresErrorCodes.CannotCoerce = "42846" -> string! +const Npgsql.PostgresErrorCodes.CannotConnectNow = "57P03" -> string! +const Npgsql.PostgresErrorCodes.CantChangeRuntimeParam = "55P02" -> string! +const Npgsql.PostgresErrorCodes.CardinalityViolation = "21000" -> string! +const Npgsql.PostgresErrorCodes.CaseNotFound = "20000" -> string! +const Npgsql.PostgresErrorCodes.CharacterNotInRepertoire = "22021" -> string! +const Npgsql.PostgresErrorCodes.CheckViolation = "23514" -> string! +const Npgsql.PostgresErrorCodes.CollationMismatch = "42P21" -> string! +const Npgsql.PostgresErrorCodes.ConfigFileError = "F0000" -> string! +const Npgsql.PostgresErrorCodes.ConfigurationLimitExceeded = "53400" -> string! +const Npgsql.PostgresErrorCodes.ConnectionDoesNotExist = "08003" -> string! +const Npgsql.PostgresErrorCodes.ConnectionException = "08000" -> string! +const Npgsql.PostgresErrorCodes.ConnectionFailure = "08006" -> string! +const Npgsql.PostgresErrorCodes.ContainingSqlNotPermittedExternalRoutineException = "38001" -> string! +const Npgsql.PostgresErrorCodes.CrashShutdown = "57P02" -> string! +const Npgsql.PostgresErrorCodes.DataCorrupted = "XX001" -> string! +const Npgsql.PostgresErrorCodes.DataException = "22000" -> string! +const Npgsql.PostgresErrorCodes.DatabaseDropped = "57P04" -> string! +const Npgsql.PostgresErrorCodes.DatatypeMismatch = "42804" -> string! +const Npgsql.PostgresErrorCodes.DatetimeFieldOverflow = "22008" -> string! +const Npgsql.PostgresErrorCodes.DeadlockDetected = "40P01" -> string! +const Npgsql.PostgresErrorCodes.DependentObjectsStillExist = "2BP01" -> string! +const Npgsql.PostgresErrorCodes.DependentPrivilegeDescriptorsStillExist = "2B000" -> string! +const Npgsql.PostgresErrorCodes.DeprecatedFeatureWarning = "01P01" -> string! +const Npgsql.PostgresErrorCodes.DiagnosticsException = "0Z000" -> string! +const Npgsql.PostgresErrorCodes.DiskFull = "53100" -> string! +const Npgsql.PostgresErrorCodes.DivisionByZero = "22012" -> string! +const Npgsql.PostgresErrorCodes.DuplicateAlias = "42712" -> string! +const Npgsql.PostgresErrorCodes.DuplicateColumn = "42701" -> string! +const Npgsql.PostgresErrorCodes.DuplicateCursor = "42P03" -> string! +const Npgsql.PostgresErrorCodes.DuplicateDatabase = "42P04" -> string! +const Npgsql.PostgresErrorCodes.DuplicateFile = "58P02" -> string! +const Npgsql.PostgresErrorCodes.DuplicateFunction = "42723" -> string! +const Npgsql.PostgresErrorCodes.DuplicateObject = "42710" -> string! +const Npgsql.PostgresErrorCodes.DuplicatePreparedStatement = "42P05" -> string! +const Npgsql.PostgresErrorCodes.DuplicateSchema = "42P06" -> string! +const Npgsql.PostgresErrorCodes.DuplicateTable = "42P07" -> string! +const Npgsql.PostgresErrorCodes.DynamicResultSetsReturnedWarning = "0100C" -> string! +const Npgsql.PostgresErrorCodes.ErrorInAssignment = "22005" -> string! +const Npgsql.PostgresErrorCodes.EscapeCharacterConflict = "2200B" -> string! +const Npgsql.PostgresErrorCodes.EventTriggerProtocolViolatedExternalRoutineInvocationException = "39P03" -> string! +const Npgsql.PostgresErrorCodes.ExclusionViolation = "23P01" -> string! +const Npgsql.PostgresErrorCodes.ExternalRoutineException = "38000" -> string! +const Npgsql.PostgresErrorCodes.ExternalRoutineInvocationException = "39000" -> string! +const Npgsql.PostgresErrorCodes.FdwColumnNameNotFound = "HV005" -> string! +const Npgsql.PostgresErrorCodes.FdwDynamicParameterValueNeeded = "HV002" -> string! +const Npgsql.PostgresErrorCodes.FdwError = "HV000" -> string! +const Npgsql.PostgresErrorCodes.FdwFunctionSequenceError = "HV010" -> string! +const Npgsql.PostgresErrorCodes.FdwInconsistentDescriptorInformation = "HV021" -> string! +const Npgsql.PostgresErrorCodes.FdwInvalidAttributeValue = "HV024" -> string! +const Npgsql.PostgresErrorCodes.FdwInvalidColumnName = "HV007" -> string! +const Npgsql.PostgresErrorCodes.FdwInvalidColumnNumber = "HV008" -> string! +const Npgsql.PostgresErrorCodes.FdwInvalidDataType = "HV004" -> string! +const Npgsql.PostgresErrorCodes.FdwInvalidDataTypeDescriptors = "HV006" -> string! +const Npgsql.PostgresErrorCodes.FdwInvalidDescriptorFieldIdentifier = "HV091" -> string! +const Npgsql.PostgresErrorCodes.FdwInvalidHandle = "HV00B" -> string! +const Npgsql.PostgresErrorCodes.FdwInvalidOptionIndex = "HV00C" -> string! +const Npgsql.PostgresErrorCodes.FdwInvalidOptionName = "HV00D" -> string! +const Npgsql.PostgresErrorCodes.FdwInvalidStringFormat = "HV00A" -> string! +const Npgsql.PostgresErrorCodes.FdwInvalidStringLengthOrBufferLength = "HV090" -> string! +const Npgsql.PostgresErrorCodes.FdwInvalidUseOfNullPointer = "HV009" -> string! +const Npgsql.PostgresErrorCodes.FdwNoSchemas = "HV00P" -> string! +const Npgsql.PostgresErrorCodes.FdwOptionNameNotFound = "HV00J" -> string! +const Npgsql.PostgresErrorCodes.FdwOutOfMemory = "HV001" -> string! +const Npgsql.PostgresErrorCodes.FdwReplyHandle = "HV00K" -> string! +const Npgsql.PostgresErrorCodes.FdwSchemaNotFound = "HV00Q" -> string! +const Npgsql.PostgresErrorCodes.FdwTableNotFound = "HV00R" -> string! +const Npgsql.PostgresErrorCodes.FdwTooManyHandles = "HV014" -> string! +const Npgsql.PostgresErrorCodes.FdwUnableToCreateExecution = "HV00L" -> string! +const Npgsql.PostgresErrorCodes.FdwUnableToCreateReply = "HV00M" -> string! +const Npgsql.PostgresErrorCodes.FdwUnableToEstablishConnection = "HV00N" -> string! +const Npgsql.PostgresErrorCodes.FeatureNotSupported = "0A000" -> string! +const Npgsql.PostgresErrorCodes.FloatingPointException = "22P01" -> string! +const Npgsql.PostgresErrorCodes.ForeignKeyViolation = "23503" -> string! +const Npgsql.PostgresErrorCodes.FunctionExecutedNoReturnStatementSqlRoutineException = "2F005" -> string! +const Npgsql.PostgresErrorCodes.GroupingError = "42803" -> string! +const Npgsql.PostgresErrorCodes.HeldCursorRequiresSameIsolationLevel = "25008" -> string! +const Npgsql.PostgresErrorCodes.IdleSessionTimeout = "57P05" -> string! +const Npgsql.PostgresErrorCodes.ImplicitZeroBitPaddingWarning = "01008" -> string! +const Npgsql.PostgresErrorCodes.InFailedSqlTransaction = "25P02" -> string! +const Npgsql.PostgresErrorCodes.InappropriateAccessModeForBranchTransaction = "25003" -> string! +const Npgsql.PostgresErrorCodes.InappropriateIsolationLevelForBranchTransaction = "25004" -> string! +const Npgsql.PostgresErrorCodes.IndeterminateCollation = "42P22" -> string! +const Npgsql.PostgresErrorCodes.IndeterminateDatatype = "42P18" -> string! +const Npgsql.PostgresErrorCodes.IndexCorrupted = "XX002" -> string! +const Npgsql.PostgresErrorCodes.IndicatorOverflow = "22022" -> string! +const Npgsql.PostgresErrorCodes.InsufficientPrivilege = "42501" -> string! +const Npgsql.PostgresErrorCodes.InsufficientResources = "53000" -> string! +const Npgsql.PostgresErrorCodes.IntegrityConstraintViolation = "23000" -> string! +const Npgsql.PostgresErrorCodes.InternalError = "XX000" -> string! +const Npgsql.PostgresErrorCodes.IntervalFieldOverflow = "22015" -> string! +const Npgsql.PostgresErrorCodes.InvalidArgumentForLogarithm = "2201E" -> string! +const Npgsql.PostgresErrorCodes.InvalidArgumentForNthValueFunction = "22016" -> string! +const Npgsql.PostgresErrorCodes.InvalidArgumentForNtileFunction = "22014" -> string! +const Npgsql.PostgresErrorCodes.InvalidArgumentForPowerFunction = "2201F" -> string! +const Npgsql.PostgresErrorCodes.InvalidArgumentForWidthBucketFunction = "2201G" -> string! +const Npgsql.PostgresErrorCodes.InvalidAuthorizationSpecification = "28000" -> string! +const Npgsql.PostgresErrorCodes.InvalidBinaryRepresentation = "22P03" -> string! +const Npgsql.PostgresErrorCodes.InvalidCatalogName = "3D000" -> string! +const Npgsql.PostgresErrorCodes.InvalidCharacterValueForCast = "22018" -> string! +const Npgsql.PostgresErrorCodes.InvalidColumnDefinition = "42611" -> string! +const Npgsql.PostgresErrorCodes.InvalidColumnReference = "42P10" -> string! +const Npgsql.PostgresErrorCodes.InvalidCursorDefinition = "42P11" -> string! +const Npgsql.PostgresErrorCodes.InvalidCursorName = "34000" -> string! +const Npgsql.PostgresErrorCodes.InvalidCursorState = "24000" -> string! +const Npgsql.PostgresErrorCodes.InvalidDatabaseDefinition = "42P12" -> string! +const Npgsql.PostgresErrorCodes.InvalidDatetimeFormat = "22007" -> string! +const Npgsql.PostgresErrorCodes.InvalidEscapeCharacter = "22019" -> string! +const Npgsql.PostgresErrorCodes.InvalidEscapeOctet = "2200D" -> string! +const Npgsql.PostgresErrorCodes.InvalidEscapeSequence = "22025" -> string! +const Npgsql.PostgresErrorCodes.InvalidForeignKey = "42830" -> string! +const Npgsql.PostgresErrorCodes.InvalidFunctionDefinition = "42P13" -> string! +const Npgsql.PostgresErrorCodes.InvalidGrantOperation = "0LP01" -> string! +const Npgsql.PostgresErrorCodes.InvalidGrantor = "0L000" -> string! +const Npgsql.PostgresErrorCodes.InvalidIndicatorParameterValue = "22010" -> string! +const Npgsql.PostgresErrorCodes.InvalidLocatorSpecification = "0F001" -> string! +const Npgsql.PostgresErrorCodes.InvalidName = "42602" -> string! +const Npgsql.PostgresErrorCodes.InvalidObjectDefinition = "42P17" -> string! +const Npgsql.PostgresErrorCodes.InvalidParameterValue = "22023" -> string! +const Npgsql.PostgresErrorCodes.InvalidPassword = "28P01" -> string! +const Npgsql.PostgresErrorCodes.InvalidPreparedStatementDefinition = "42P14" -> string! +const Npgsql.PostgresErrorCodes.InvalidRecursion = "42P19" -> string! +const Npgsql.PostgresErrorCodes.InvalidRegularExpression = "2201B" -> string! +const Npgsql.PostgresErrorCodes.InvalidRoleSpecification = "0P000" -> string! +const Npgsql.PostgresErrorCodes.InvalidRowCountInLimitClause = "2201W" -> string! +const Npgsql.PostgresErrorCodes.InvalidRowCountInResultOffsetClause = "2201X" -> string! +const Npgsql.PostgresErrorCodes.InvalidSavepointSpecification = "3B001" -> string! +const Npgsql.PostgresErrorCodes.InvalidSchemaDefinition = "42P15" -> string! +const Npgsql.PostgresErrorCodes.InvalidSchemaName = "3F000" -> string! +const Npgsql.PostgresErrorCodes.InvalidSqlStatementName = "26000" -> string! +const Npgsql.PostgresErrorCodes.InvalidSqlstateReturnedExternalRoutineInvocationException = "39001" -> string! +const Npgsql.PostgresErrorCodes.InvalidTableDefinition = "42P16" -> string! +const Npgsql.PostgresErrorCodes.InvalidTablesampleArgument = "2202H" -> string! +const Npgsql.PostgresErrorCodes.InvalidTablesampleRepeat = "2202G" -> string! +const Npgsql.PostgresErrorCodes.InvalidTextRepresentation = "22P02" -> string! +const Npgsql.PostgresErrorCodes.InvalidTimeZoneDisplacementValue = "22009" -> string! +const Npgsql.PostgresErrorCodes.InvalidTransactionInitiation = "0B000" -> string! +const Npgsql.PostgresErrorCodes.InvalidTransactionState = "25000" -> string! +const Npgsql.PostgresErrorCodes.InvalidTransactionTermination = "2D000" -> string! +const Npgsql.PostgresErrorCodes.InvalidUseOfEscapeCharacter = "2200C" -> string! +const Npgsql.PostgresErrorCodes.InvalidXmlComment = "2200S" -> string! +const Npgsql.PostgresErrorCodes.InvalidXmlContent = "2200N" -> string! +const Npgsql.PostgresErrorCodes.InvalidXmlDocument = "2200M" -> string! +const Npgsql.PostgresErrorCodes.InvalidXmlProcessingInstruction = "2200T" -> string! +const Npgsql.PostgresErrorCodes.IoError = "58030" -> string! +const Npgsql.PostgresErrorCodes.LocatorException = "0F000" -> string! +const Npgsql.PostgresErrorCodes.LockFileExists = "F0001" -> string! +const Npgsql.PostgresErrorCodes.LockNotAvailable = "55P03" -> string! +const Npgsql.PostgresErrorCodes.ModifyingSqlDataNotPermittedExternalRoutineException = "38002" -> string! +const Npgsql.PostgresErrorCodes.ModifyingSqlDataNotPermittedSqlRoutineException = "2F002" -> string! +const Npgsql.PostgresErrorCodes.MostSpecificTypeMismatch = "2200G" -> string! +const Npgsql.PostgresErrorCodes.NameTooLong = "42622" -> string! +const Npgsql.PostgresErrorCodes.NoActiveSqlTransaction = "25P01" -> string! +const Npgsql.PostgresErrorCodes.NoActiveSqlTransactionForBranchTransaction = "25005" -> string! +const Npgsql.PostgresErrorCodes.NoAdditionalDynamicResultSetsReturned = "02001" -> string! +const Npgsql.PostgresErrorCodes.NoData = "02000" -> string! +const Npgsql.PostgresErrorCodes.NoDataFound = "P0002" -> string! +const Npgsql.PostgresErrorCodes.NonstandardUseOfEscapeCharacter = "22P06" -> string! +const Npgsql.PostgresErrorCodes.NotAnXmlDocument = "2200L" -> string! +const Npgsql.PostgresErrorCodes.NotNullViolation = "23502" -> string! +const Npgsql.PostgresErrorCodes.NullValueEliminatedInSetFunctionWarning = "01003" -> string! +const Npgsql.PostgresErrorCodes.NullValueNoIndicatorParameter = "22002" -> string! +const Npgsql.PostgresErrorCodes.NullValueNotAllowed = "22004" -> string! +const Npgsql.PostgresErrorCodes.NullValueNotAllowedExternalRoutineInvocationException = "39004" -> string! +const Npgsql.PostgresErrorCodes.NumericValueOutOfRange = "22003" -> string! +const Npgsql.PostgresErrorCodes.ObjectInUse = "55006" -> string! +const Npgsql.PostgresErrorCodes.ObjectNotInPrerequisiteState = "55000" -> string! +const Npgsql.PostgresErrorCodes.OperatorIntervention = "57000" -> string! +const Npgsql.PostgresErrorCodes.OutOfMemory = "53200" -> string! +const Npgsql.PostgresErrorCodes.PlpgsqlError = "P0000" -> string! +const Npgsql.PostgresErrorCodes.PrivilegeNotGrantedWarning = "01007" -> string! +const Npgsql.PostgresErrorCodes.PrivilegeNotRevokedWarning = "01006" -> string! +const Npgsql.PostgresErrorCodes.ProgramLimitExceeded = "54000" -> string! +const Npgsql.PostgresErrorCodes.ProhibitedSqlStatementAttemptedExternalRoutineException = "38003" -> string! +const Npgsql.PostgresErrorCodes.ProhibitedSqlStatementAttemptedSqlRoutineException = "2F003" -> string! +const Npgsql.PostgresErrorCodes.ProtocolViolation = "08P01" -> string! +const Npgsql.PostgresErrorCodes.QueryCanceled = "57014" -> string! +const Npgsql.PostgresErrorCodes.RaiseException = "P0001" -> string! +const Npgsql.PostgresErrorCodes.ReadOnlySqlTransaction = "25006" -> string! +const Npgsql.PostgresErrorCodes.ReadingSqlDataNotPermittedExternalRoutineException = "38004" -> string! +const Npgsql.PostgresErrorCodes.ReadingSqlDataNotPermittedSqlRoutineException = "2F004" -> string! +const Npgsql.PostgresErrorCodes.ReservedName = "42939" -> string! +const Npgsql.PostgresErrorCodes.RestrictViolation = "23001" -> string! +const Npgsql.PostgresErrorCodes.SavepointException = "3B000" -> string! +const Npgsql.PostgresErrorCodes.SchemaAndDataStatementMixingNotSupported = "25007" -> string! +const Npgsql.PostgresErrorCodes.SerializationFailure = "40001" -> string! +const Npgsql.PostgresErrorCodes.SnapshotFailure = "72000" -> string! +const Npgsql.PostgresErrorCodes.SqlClientUnableToEstablishSqlConnection = "08001" -> string! +const Npgsql.PostgresErrorCodes.SqlRoutineException = "2F000" -> string! +const Npgsql.PostgresErrorCodes.SqlServerRejectedEstablishmentOfSqlConnection = "08004" -> string! +const Npgsql.PostgresErrorCodes.SqlStatementNotYetComplete = "03000" -> string! +const Npgsql.PostgresErrorCodes.SrfProtocolViolatedExternalRoutineInvocationException = "39P02" -> string! +const Npgsql.PostgresErrorCodes.StackedDiagnosticsAccessedWithoutActiveHandler = "0Z002" -> string! +const Npgsql.PostgresErrorCodes.StatementCompletionUnknown = "40003" -> string! +const Npgsql.PostgresErrorCodes.StatementTooComplex = "54001" -> string! +const Npgsql.PostgresErrorCodes.StringDataLengthMismatch = "22026" -> string! +const Npgsql.PostgresErrorCodes.StringDataRightTruncation = "22001" -> string! +const Npgsql.PostgresErrorCodes.StringDataRightTruncationWarning = "01004" -> string! +const Npgsql.PostgresErrorCodes.SubstringError = "22011" -> string! +const Npgsql.PostgresErrorCodes.SuccessfulCompletion = "00000" -> string! +const Npgsql.PostgresErrorCodes.SyntaxError = "42601" -> string! +const Npgsql.PostgresErrorCodes.SyntaxErrorOrAccessRuleViolation = "42000" -> string! +const Npgsql.PostgresErrorCodes.SystemError = "58000" -> string! +const Npgsql.PostgresErrorCodes.TooManyArguments = "54023" -> string! +const Npgsql.PostgresErrorCodes.TooManyColumns = "54011" -> string! +const Npgsql.PostgresErrorCodes.TooManyConnections = "53300" -> string! +const Npgsql.PostgresErrorCodes.TooManyRows = "P0003" -> string! +const Npgsql.PostgresErrorCodes.TransactionIntegrityConstraintViolation = "40002" -> string! +const Npgsql.PostgresErrorCodes.TransactionResolutionUnknown = "08007" -> string! +const Npgsql.PostgresErrorCodes.TransactionRollback = "40000" -> string! +const Npgsql.PostgresErrorCodes.TriggerProtocolViolatedExternalRoutineInvocationException = "39P01" -> string! +const Npgsql.PostgresErrorCodes.TriggeredActionException = "09000" -> string! +const Npgsql.PostgresErrorCodes.TriggeredDataChangeViolation = "27000" -> string! +const Npgsql.PostgresErrorCodes.TrimError = "22027" -> string! +const Npgsql.PostgresErrorCodes.UndefinedColumn = "42703" -> string! +const Npgsql.PostgresErrorCodes.UndefinedFile = "58P01" -> string! +const Npgsql.PostgresErrorCodes.UndefinedFunction = "42883" -> string! +const Npgsql.PostgresErrorCodes.UndefinedObject = "42704" -> string! +const Npgsql.PostgresErrorCodes.UndefinedParameter = "42P02" -> string! +const Npgsql.PostgresErrorCodes.UndefinedTable = "42P01" -> string! +const Npgsql.PostgresErrorCodes.UniqueViolation = "23505" -> string! +const Npgsql.PostgresErrorCodes.UnterminatedCString = "22024" -> string! +const Npgsql.PostgresErrorCodes.UntranslatableCharacter = "22P05" -> string! +const Npgsql.PostgresErrorCodes.Warning = "01000" -> string! +const Npgsql.PostgresErrorCodes.WindowingError = "42P20" -> string! +const Npgsql.PostgresErrorCodes.WithCheckOptionViolation = "44000" -> string! +const Npgsql.PostgresErrorCodes.WrongObjectType = "42809" -> string! +const Npgsql.PostgresErrorCodes.ZeroLengthCharacterString = "2200F" -> string! override Npgsql.BackendMessages.FieldDescription.ToString() -> string! override Npgsql.NpgsqlBatch.Cancel() -> void override Npgsql.NpgsqlBatch.CreateDbBatchCommand() -> System.Data.Common.DbBatchCommand! @@ -1587,16 +1657,16 @@ override Npgsql.NpgsqlConnection.CloseAsync() -> System.Threading.Tasks.Task! override Npgsql.NpgsqlConnection.ConnectionString.get -> string! override Npgsql.NpgsqlConnection.ConnectionString.set -> void override Npgsql.NpgsqlConnection.ConnectionTimeout.get -> int -override Npgsql.NpgsqlConnection.Database.get -> string! override Npgsql.NpgsqlConnection.DataSource.get -> string! +override Npgsql.NpgsqlConnection.Database.get -> string! override Npgsql.NpgsqlConnection.DisposeAsync() -> System.Threading.Tasks.ValueTask override Npgsql.NpgsqlConnection.EnlistTransaction(System.Transactions.Transaction? transaction) -> void override Npgsql.NpgsqlConnection.GetSchema() -> System.Data.DataTable! override Npgsql.NpgsqlConnection.GetSchema(string? collectionName) -> System.Data.DataTable! override Npgsql.NpgsqlConnection.GetSchema(string? collectionName, string?[]? restrictions) -> System.Data.DataTable! -override Npgsql.NpgsqlConnection.GetSchemaAsync(string! collectionName, string?[]? restrictions, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! -override Npgsql.NpgsqlConnection.GetSchemaAsync(string! collectionName, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! override Npgsql.NpgsqlConnection.GetSchemaAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! +override Npgsql.NpgsqlConnection.GetSchemaAsync(string! collectionName, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! +override Npgsql.NpgsqlConnection.GetSchemaAsync(string! collectionName, string?[]? restrictions, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! override Npgsql.NpgsqlConnection.Open() -> void override Npgsql.NpgsqlConnection.OpenAsync(System.Threading.CancellationToken cancellationToken) -> System.Threading.Tasks.Task! override Npgsql.NpgsqlConnection.ServerVersion.get -> string! @@ -1606,9 +1676,9 @@ override Npgsql.NpgsqlConnectionStringBuilder.ContainsKey(string! keyword) -> bo override Npgsql.NpgsqlConnectionStringBuilder.Equals(object? obj) -> bool override Npgsql.NpgsqlConnectionStringBuilder.GetHashCode() -> int override Npgsql.NpgsqlConnectionStringBuilder.Remove(string! keyword) -> bool +override Npgsql.NpgsqlConnectionStringBuilder.TryGetValue(string! keyword, out object? value) -> bool override Npgsql.NpgsqlConnectionStringBuilder.this[string! keyword].get -> object! override Npgsql.NpgsqlConnectionStringBuilder.this[string! keyword].set -> void -override Npgsql.NpgsqlConnectionStringBuilder.TryGetValue(string! keyword, out object? value) -> bool override Npgsql.NpgsqlDataReader.Close() -> void override Npgsql.NpgsqlDataReader.CloseAsync() -> System.Threading.Tasks.Task! override Npgsql.NpgsqlDataReader.Depth.get -> int @@ -1619,6 +1689,7 @@ override Npgsql.NpgsqlDataReader.GetByte(int ordinal) -> byte override Npgsql.NpgsqlDataReader.GetBytes(int ordinal, long dataOffset, byte[]? buffer, int bufferOffset, int length) -> long override Npgsql.NpgsqlDataReader.GetChar(int ordinal) -> char override Npgsql.NpgsqlDataReader.GetChars(int ordinal, long dataOffset, char[]? buffer, int bufferOffset, int length) -> long +override Npgsql.NpgsqlDataReader.GetColumnSchemaAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task!>! override Npgsql.NpgsqlDataReader.GetDataTypeName(int ordinal) -> string! override Npgsql.NpgsqlDataReader.GetDateTime(int ordinal) -> System.DateTime override Npgsql.NpgsqlDataReader.GetDecimal(int ordinal) -> decimal @@ -1667,21 +1738,7 @@ override Npgsql.NpgsqlFactory.CreateConnectionStringBuilder() -> System.Data.Com override Npgsql.NpgsqlFactory.CreateDataAdapter() -> System.Data.Common.DbDataAdapter! override Npgsql.NpgsqlFactory.CreateDataSource(string! connectionString) -> System.Data.Common.DbDataSource! override Npgsql.NpgsqlFactory.CreateParameter() -> System.Data.Common.DbParameter! -override Npgsql.NpgsqlLargeObjectStream.CanRead.get -> bool -override Npgsql.NpgsqlLargeObjectStream.CanSeek.get -> bool -override Npgsql.NpgsqlLargeObjectStream.CanTimeout.get -> bool -override Npgsql.NpgsqlLargeObjectStream.CanWrite.get -> bool -override Npgsql.NpgsqlLargeObjectStream.Close() -> void -override Npgsql.NpgsqlLargeObjectStream.Flush() -> void -override Npgsql.NpgsqlLargeObjectStream.Length.get -> long -override Npgsql.NpgsqlLargeObjectStream.Position.get -> long -override Npgsql.NpgsqlLargeObjectStream.Position.set -> void -override Npgsql.NpgsqlLargeObjectStream.Read(byte[]! buffer, int offset, int count) -> int -override Npgsql.NpgsqlLargeObjectStream.ReadAsync(byte[]! buffer, int offset, int count, System.Threading.CancellationToken cancellationToken) -> System.Threading.Tasks.Task! -override Npgsql.NpgsqlLargeObjectStream.Seek(long offset, System.IO.SeekOrigin origin) -> long -override Npgsql.NpgsqlLargeObjectStream.SetLength(long value) -> void -override Npgsql.NpgsqlLargeObjectStream.Write(byte[]! buffer, int offset, int count) -> void -override Npgsql.NpgsqlLargeObjectStream.WriteAsync(byte[]! buffer, int offset, int count, System.Threading.CancellationToken cancellationToken) -> System.Threading.Tasks.Task! +override Npgsql.NpgsqlMultiHostDataSource.Clear() -> void override Npgsql.NpgsqlNestedDataReader.Close() -> void override Npgsql.NpgsqlNestedDataReader.Depth.get -> int override Npgsql.NpgsqlNestedDataReader.FieldCount.get -> int @@ -1748,18 +1805,18 @@ override Npgsql.NpgsqlRawCopyStream.FlushAsync(System.Threading.CancellationToke override Npgsql.NpgsqlRawCopyStream.Length.get -> long override Npgsql.NpgsqlRawCopyStream.Position.get -> long override Npgsql.NpgsqlRawCopyStream.Position.set -> void -override Npgsql.NpgsqlRawCopyStream.Read(byte[]! buffer, int offset, int count) -> int override Npgsql.NpgsqlRawCopyStream.Read(System.Span span) -> int -override Npgsql.NpgsqlRawCopyStream.ReadAsync(byte[]! buffer, int offset, int count, System.Threading.CancellationToken cancellationToken) -> System.Threading.Tasks.Task! +override Npgsql.NpgsqlRawCopyStream.Read(byte[]! buffer, int offset, int count) -> int override Npgsql.NpgsqlRawCopyStream.ReadAsync(System.Memory buffer, System.Threading.CancellationToken cancellationToken) -> System.Threading.Tasks.ValueTask +override Npgsql.NpgsqlRawCopyStream.ReadAsync(byte[]! buffer, int offset, int count, System.Threading.CancellationToken cancellationToken) -> System.Threading.Tasks.Task! override Npgsql.NpgsqlRawCopyStream.ReadTimeout.get -> int override Npgsql.NpgsqlRawCopyStream.ReadTimeout.set -> void override Npgsql.NpgsqlRawCopyStream.Seek(long offset, System.IO.SeekOrigin origin) -> long override Npgsql.NpgsqlRawCopyStream.SetLength(long value) -> void -override Npgsql.NpgsqlRawCopyStream.Write(byte[]! buffer, int offset, int count) -> void override Npgsql.NpgsqlRawCopyStream.Write(System.ReadOnlySpan buffer) -> void -override Npgsql.NpgsqlRawCopyStream.WriteAsync(byte[]! buffer, int offset, int count, System.Threading.CancellationToken cancellationToken) -> System.Threading.Tasks.Task! +override Npgsql.NpgsqlRawCopyStream.Write(byte[]! buffer, int offset, int count) -> void override Npgsql.NpgsqlRawCopyStream.WriteAsync(System.ReadOnlyMemory buffer, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.ValueTask +override Npgsql.NpgsqlRawCopyStream.WriteAsync(byte[]! buffer, int offset, int count, System.Threading.CancellationToken cancellationToken) -> System.Threading.Tasks.Task! override Npgsql.NpgsqlRawCopyStream.WriteTimeout.get -> int override Npgsql.NpgsqlRawCopyStream.WriteTimeout.set -> void override Npgsql.NpgsqlTransaction.Commit() -> void @@ -1770,8 +1827,8 @@ override Npgsql.NpgsqlTransaction.Release(string! name) -> void override Npgsql.NpgsqlTransaction.ReleaseAsync(string! name, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! override Npgsql.NpgsqlTransaction.Rollback() -> void override Npgsql.NpgsqlTransaction.Rollback(string! name) -> void -override Npgsql.NpgsqlTransaction.RollbackAsync(string! name, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! override Npgsql.NpgsqlTransaction.RollbackAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! +override Npgsql.NpgsqlTransaction.RollbackAsync(string! name, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! override Npgsql.NpgsqlTransaction.Save(string! name) -> void override Npgsql.NpgsqlTransaction.SaveAsync(string! name, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! override Npgsql.NpgsqlTransaction.SupportsSavepoints.get -> bool @@ -1798,18 +1855,21 @@ override NpgsqlTypes.NpgsqlCidr.ToString() -> string! override NpgsqlTypes.NpgsqlCircle.Equals(object? obj) -> bool override NpgsqlTypes.NpgsqlCircle.GetHashCode() -> int override NpgsqlTypes.NpgsqlCircle.ToString() -> string! +override NpgsqlTypes.NpgsqlCube.Equals(object? obj) -> bool +override NpgsqlTypes.NpgsqlCube.GetHashCode() -> int +override NpgsqlTypes.NpgsqlCube.ToString() -> string! override NpgsqlTypes.NpgsqlInet.ToString() -> string! override NpgsqlTypes.NpgsqlInterval.Equals(object? obj) -> bool override NpgsqlTypes.NpgsqlInterval.GetHashCode() -> int +override NpgsqlTypes.NpgsqlLSeg.Equals(object? obj) -> bool +override NpgsqlTypes.NpgsqlLSeg.GetHashCode() -> int +override NpgsqlTypes.NpgsqlLSeg.ToString() -> string! override NpgsqlTypes.NpgsqlLine.Equals(object? obj) -> bool override NpgsqlTypes.NpgsqlLine.GetHashCode() -> int override NpgsqlTypes.NpgsqlLine.ToString() -> string! override NpgsqlTypes.NpgsqlLogSequenceNumber.Equals(object? obj) -> bool override NpgsqlTypes.NpgsqlLogSequenceNumber.GetHashCode() -> int override NpgsqlTypes.NpgsqlLogSequenceNumber.ToString() -> string! -override NpgsqlTypes.NpgsqlLSeg.Equals(object? obj) -> bool -override NpgsqlTypes.NpgsqlLSeg.GetHashCode() -> int -override NpgsqlTypes.NpgsqlLSeg.ToString() -> string! override NpgsqlTypes.NpgsqlPath.Equals(object? obj) -> bool override NpgsqlTypes.NpgsqlPath.GetHashCode() -> int override NpgsqlTypes.NpgsqlPath.ToString() -> string! @@ -1889,13 +1949,23 @@ static NpgsqlTypes.NpgsqlCidr.explicit operator System.Net.IPAddress!(NpgsqlType static NpgsqlTypes.NpgsqlCidr.implicit operator NpgsqlTypes.NpgsqlInet(NpgsqlTypes.NpgsqlCidr cidr) -> NpgsqlTypes.NpgsqlInet static NpgsqlTypes.NpgsqlCircle.operator !=(NpgsqlTypes.NpgsqlCircle x, NpgsqlTypes.NpgsqlCircle y) -> bool static NpgsqlTypes.NpgsqlCircle.operator ==(NpgsqlTypes.NpgsqlCircle x, NpgsqlTypes.NpgsqlCircle y) -> bool +static NpgsqlTypes.NpgsqlCube.operator !=(NpgsqlTypes.NpgsqlCube x, NpgsqlTypes.NpgsqlCube y) -> bool +static NpgsqlTypes.NpgsqlCube.operator ==(NpgsqlTypes.NpgsqlCube x, NpgsqlTypes.NpgsqlCube y) -> bool static NpgsqlTypes.NpgsqlInet.explicit operator System.Net.IPAddress!(NpgsqlTypes.NpgsqlInet inet) -> System.Net.IPAddress! static NpgsqlTypes.NpgsqlInet.implicit operator NpgsqlTypes.NpgsqlInet(System.Net.IPAddress! ip) -> NpgsqlTypes.NpgsqlInet +static NpgsqlTypes.NpgsqlInet.implicit operator NpgsqlTypes.NpgsqlInet(System.Net.IPNetwork cidr) -> NpgsqlTypes.NpgsqlInet +static NpgsqlTypes.NpgsqlLSeg.operator !=(NpgsqlTypes.NpgsqlLSeg x, NpgsqlTypes.NpgsqlLSeg y) -> bool +static NpgsqlTypes.NpgsqlLSeg.operator ==(NpgsqlTypes.NpgsqlLSeg x, NpgsqlTypes.NpgsqlLSeg y) -> bool static NpgsqlTypes.NpgsqlLine.operator !=(NpgsqlTypes.NpgsqlLine x, NpgsqlTypes.NpgsqlLine y) -> bool static NpgsqlTypes.NpgsqlLine.operator ==(NpgsqlTypes.NpgsqlLine x, NpgsqlTypes.NpgsqlLine y) -> bool +static NpgsqlTypes.NpgsqlLogSequenceNumber.Larger(NpgsqlTypes.NpgsqlLogSequenceNumber value1, NpgsqlTypes.NpgsqlLogSequenceNumber value2) -> NpgsqlTypes.NpgsqlLogSequenceNumber +static NpgsqlTypes.NpgsqlLogSequenceNumber.Parse(System.ReadOnlySpan s) -> NpgsqlTypes.NpgsqlLogSequenceNumber +static NpgsqlTypes.NpgsqlLogSequenceNumber.Parse(string! s) -> NpgsqlTypes.NpgsqlLogSequenceNumber +static NpgsqlTypes.NpgsqlLogSequenceNumber.Smaller(NpgsqlTypes.NpgsqlLogSequenceNumber value1, NpgsqlTypes.NpgsqlLogSequenceNumber value2) -> NpgsqlTypes.NpgsqlLogSequenceNumber +static NpgsqlTypes.NpgsqlLogSequenceNumber.TryParse(System.ReadOnlySpan s, out NpgsqlTypes.NpgsqlLogSequenceNumber result) -> bool +static NpgsqlTypes.NpgsqlLogSequenceNumber.TryParse(string! s, out NpgsqlTypes.NpgsqlLogSequenceNumber result) -> bool static NpgsqlTypes.NpgsqlLogSequenceNumber.explicit operator NpgsqlTypes.NpgsqlLogSequenceNumber(ulong value) -> NpgsqlTypes.NpgsqlLogSequenceNumber static NpgsqlTypes.NpgsqlLogSequenceNumber.explicit operator ulong(NpgsqlTypes.NpgsqlLogSequenceNumber value) -> ulong -static NpgsqlTypes.NpgsqlLogSequenceNumber.Larger(NpgsqlTypes.NpgsqlLogSequenceNumber value1, NpgsqlTypes.NpgsqlLogSequenceNumber value2) -> NpgsqlTypes.NpgsqlLogSequenceNumber static NpgsqlTypes.NpgsqlLogSequenceNumber.operator !=(NpgsqlTypes.NpgsqlLogSequenceNumber value1, NpgsqlTypes.NpgsqlLogSequenceNumber value2) -> bool static NpgsqlTypes.NpgsqlLogSequenceNumber.operator +(NpgsqlTypes.NpgsqlLogSequenceNumber lsn, double nbytes) -> NpgsqlTypes.NpgsqlLogSequenceNumber static NpgsqlTypes.NpgsqlLogSequenceNumber.operator -(NpgsqlTypes.NpgsqlLogSequenceNumber first, NpgsqlTypes.NpgsqlLogSequenceNumber second) -> ulong @@ -1905,35 +1975,29 @@ static NpgsqlTypes.NpgsqlLogSequenceNumber.operator <=(NpgsqlTypes.NpgsqlLogSequ static NpgsqlTypes.NpgsqlLogSequenceNumber.operator ==(NpgsqlTypes.NpgsqlLogSequenceNumber value1, NpgsqlTypes.NpgsqlLogSequenceNumber value2) -> bool static NpgsqlTypes.NpgsqlLogSequenceNumber.operator >(NpgsqlTypes.NpgsqlLogSequenceNumber value1, NpgsqlTypes.NpgsqlLogSequenceNumber value2) -> bool static NpgsqlTypes.NpgsqlLogSequenceNumber.operator >=(NpgsqlTypes.NpgsqlLogSequenceNumber value1, NpgsqlTypes.NpgsqlLogSequenceNumber value2) -> bool -static NpgsqlTypes.NpgsqlLogSequenceNumber.Parse(string! s) -> NpgsqlTypes.NpgsqlLogSequenceNumber -static NpgsqlTypes.NpgsqlLogSequenceNumber.Parse(System.ReadOnlySpan s) -> NpgsqlTypes.NpgsqlLogSequenceNumber -static NpgsqlTypes.NpgsqlLogSequenceNumber.Smaller(NpgsqlTypes.NpgsqlLogSequenceNumber value1, NpgsqlTypes.NpgsqlLogSequenceNumber value2) -> NpgsqlTypes.NpgsqlLogSequenceNumber -static NpgsqlTypes.NpgsqlLogSequenceNumber.TryParse(string! s, out NpgsqlTypes.NpgsqlLogSequenceNumber result) -> bool -static NpgsqlTypes.NpgsqlLogSequenceNumber.TryParse(System.ReadOnlySpan s, out NpgsqlTypes.NpgsqlLogSequenceNumber result) -> bool -static NpgsqlTypes.NpgsqlLSeg.operator !=(NpgsqlTypes.NpgsqlLSeg x, NpgsqlTypes.NpgsqlLSeg y) -> bool -static NpgsqlTypes.NpgsqlLSeg.operator ==(NpgsqlTypes.NpgsqlLSeg x, NpgsqlTypes.NpgsqlLSeg y) -> bool static NpgsqlTypes.NpgsqlPath.operator !=(NpgsqlTypes.NpgsqlPath x, NpgsqlTypes.NpgsqlPath y) -> bool static NpgsqlTypes.NpgsqlPath.operator ==(NpgsqlTypes.NpgsqlPath x, NpgsqlTypes.NpgsqlPath y) -> bool static NpgsqlTypes.NpgsqlPoint.operator !=(NpgsqlTypes.NpgsqlPoint x, NpgsqlTypes.NpgsqlPoint y) -> bool static NpgsqlTypes.NpgsqlPoint.operator ==(NpgsqlTypes.NpgsqlPoint x, NpgsqlTypes.NpgsqlPoint y) -> bool static NpgsqlTypes.NpgsqlPolygon.operator !=(NpgsqlTypes.NpgsqlPolygon x, NpgsqlTypes.NpgsqlPolygon y) -> bool static NpgsqlTypes.NpgsqlPolygon.operator ==(NpgsqlTypes.NpgsqlPolygon x, NpgsqlTypes.NpgsqlPolygon y) -> bool -static NpgsqlTypes.NpgsqlRange.operator !=(NpgsqlTypes.NpgsqlRange x, NpgsqlTypes.NpgsqlRange y) -> bool -static NpgsqlTypes.NpgsqlRange.operator ==(NpgsqlTypes.NpgsqlRange x, NpgsqlTypes.NpgsqlRange y) -> bool static NpgsqlTypes.NpgsqlRange.Parse(string! value) -> NpgsqlTypes.NpgsqlRange static NpgsqlTypes.NpgsqlRange.RangeTypeConverter.Register() -> void +static NpgsqlTypes.NpgsqlRange.operator !=(NpgsqlTypes.NpgsqlRange x, NpgsqlTypes.NpgsqlRange y) -> bool +static NpgsqlTypes.NpgsqlRange.operator ==(NpgsqlTypes.NpgsqlRange x, NpgsqlTypes.NpgsqlRange y) -> bool static NpgsqlTypes.NpgsqlTid.operator !=(NpgsqlTypes.NpgsqlTid left, NpgsqlTypes.NpgsqlTid right) -> bool static NpgsqlTypes.NpgsqlTid.operator ==(NpgsqlTypes.NpgsqlTid left, NpgsqlTypes.NpgsqlTid right) -> bool +static NpgsqlTypes.NpgsqlTsQuery.Parse(string! value) -> NpgsqlTypes.NpgsqlTsQuery! static NpgsqlTypes.NpgsqlTsQuery.operator !=(NpgsqlTypes.NpgsqlTsQuery? left, NpgsqlTypes.NpgsqlTsQuery? right) -> bool static NpgsqlTypes.NpgsqlTsQuery.operator ==(NpgsqlTypes.NpgsqlTsQuery? left, NpgsqlTypes.NpgsqlTsQuery? right) -> bool -static NpgsqlTypes.NpgsqlTsQuery.Parse(string! value) -> NpgsqlTypes.NpgsqlTsQuery! -static NpgsqlTypes.NpgsqlTsVector.Lexeme.operator !=(NpgsqlTypes.NpgsqlTsVector.Lexeme left, NpgsqlTypes.NpgsqlTsVector.Lexeme right) -> bool -static NpgsqlTypes.NpgsqlTsVector.Lexeme.operator ==(NpgsqlTypes.NpgsqlTsVector.Lexeme left, NpgsqlTypes.NpgsqlTsVector.Lexeme right) -> bool static NpgsqlTypes.NpgsqlTsVector.Lexeme.WordEntryPos.operator !=(NpgsqlTypes.NpgsqlTsVector.Lexeme.WordEntryPos left, NpgsqlTypes.NpgsqlTsVector.Lexeme.WordEntryPos right) -> bool static NpgsqlTypes.NpgsqlTsVector.Lexeme.WordEntryPos.operator ==(NpgsqlTypes.NpgsqlTsVector.Lexeme.WordEntryPos left, NpgsqlTypes.NpgsqlTsVector.Lexeme.WordEntryPos right) -> bool +static NpgsqlTypes.NpgsqlTsVector.Lexeme.operator !=(NpgsqlTypes.NpgsqlTsVector.Lexeme left, NpgsqlTypes.NpgsqlTsVector.Lexeme right) -> bool +static NpgsqlTypes.NpgsqlTsVector.Lexeme.operator ==(NpgsqlTypes.NpgsqlTsVector.Lexeme left, NpgsqlTypes.NpgsqlTsVector.Lexeme right) -> bool static NpgsqlTypes.NpgsqlTsVector.Parse(string! value) -> NpgsqlTypes.NpgsqlTsVector! static readonly Npgsql.NpgsqlFactory.Instance -> Npgsql.NpgsqlFactory! static readonly NpgsqlTypes.NpgsqlLogSequenceNumber.Invalid -> NpgsqlTypes.NpgsqlLogSequenceNumber static readonly NpgsqlTypes.NpgsqlRange.Empty -> NpgsqlTypes.NpgsqlRange +static readonly NpgsqlTypes.NpgsqlTsVector.Empty -> NpgsqlTypes.NpgsqlTsVector! virtual Npgsql.NpgsqlCommand.Clone() -> Npgsql.NpgsqlCommand! virtual Npgsql.Replication.PgOutput.ReplicationTuple.GetAsyncEnumerator(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Collections.Generic.IAsyncEnumerator! diff --git a/src/Npgsql/PublicAPI.Unshipped.txt b/src/Npgsql/PublicAPI.Unshipped.txt index 3fb4a49954..7dc5c58110 100644 --- a/src/Npgsql/PublicAPI.Unshipped.txt +++ b/src/Npgsql/PublicAPI.Unshipped.txt @@ -1,181 +1 @@ -#nullable enable -*REMOVED*Npgsql.NpgsqlConnectionStringBuilder.Multiplexing.get -> bool -*REMOVED*Npgsql.NpgsqlConnectionStringBuilder.Multiplexing.set -> void -*REMOVED*Npgsql.NpgsqlConnectionStringBuilder.WriteCoalescingBufferThresholdBytes.get -> int -*REMOVED*Npgsql.NpgsqlConnectionStringBuilder.WriteCoalescingBufferThresholdBytes.set -> void -abstract Npgsql.NpgsqlDataSource.Clear() -> void -Npgsql.GssEncryptionMode -Npgsql.GssEncryptionMode.Disable = 0 -> Npgsql.GssEncryptionMode -Npgsql.GssEncryptionMode.Prefer = 1 -> Npgsql.GssEncryptionMode -Npgsql.GssEncryptionMode.Require = 2 -> Npgsql.GssEncryptionMode -Npgsql.TypeMapping.INpgsqlTypeMapper.AddDbTypeResolverFactory(Npgsql.Internal.DbTypeResolverFactory! factory) -> void -Npgsql.NpgsqlConnection.BeginTextExport(string! copyToCommand) -> Npgsql.NpgsqlCopyTextReader! -Npgsql.NpgsqlConnection.BeginTextExportAsync(string! copyToCommand, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! -Npgsql.NpgsqlConnection.BeginTextImport(string! copyFromCommand) -> Npgsql.NpgsqlCopyTextWriter! -Npgsql.NpgsqlConnection.BeginTextImportAsync(string! copyFromCommand, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! -Npgsql.NpgsqlConnection.CloneWithAsync(string! connectionString, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.ValueTask -Npgsql.NpgsqlConnection.SslClientAuthenticationOptionsCallback.get -> System.Action? -Npgsql.NpgsqlConnection.SslClientAuthenticationOptionsCallback.set -> void -Npgsql.NpgsqlConnectionStringBuilder.GssEncryptionMode.get -> Npgsql.GssEncryptionMode -Npgsql.NpgsqlConnectionStringBuilder.GssEncryptionMode.set -> void -Npgsql.NpgsqlConnectionStringBuilder.IncludeFailedBatchedCommand.get -> bool -Npgsql.NpgsqlConnectionStringBuilder.IncludeFailedBatchedCommand.set -> void -Npgsql.NpgsqlConnectionStringBuilder.RequireAuth.get -> string? -Npgsql.NpgsqlConnectionStringBuilder.RequireAuth.set -> void -Npgsql.NpgsqlConnectionStringBuilder.SslNegotiation.get -> Npgsql.SslNegotiation -Npgsql.NpgsqlConnectionStringBuilder.SslNegotiation.set -> void -Npgsql.NpgsqlCopyTextReader.Timeout.get -> int -Npgsql.NpgsqlCopyTextReader.Timeout.set -> void -Npgsql.NpgsqlCopyTextWriter.Timeout.get -> int -Npgsql.NpgsqlCopyTextWriter.Timeout.set -> void -Npgsql.NpgsqlDataSourceBuilder.ConfigureTypeLoading(System.Action! configureAction) -> Npgsql.NpgsqlDataSourceBuilder! -Npgsql.NpgsqlDataSourceBuilder.MapComposite(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.NpgsqlDataSourceBuilder! -Npgsql.NpgsqlDataSourceBuilder.MapComposite(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.NpgsqlDataSourceBuilder! -Npgsql.NpgsqlDataSourceBuilder.MapEnum(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.NpgsqlDataSourceBuilder! -Npgsql.NpgsqlDataSourceBuilder.MapEnum(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.NpgsqlDataSourceBuilder! -Npgsql.NpgsqlDataSourceBuilder.ConfigureTracing(System.Action! configureAction) -> Npgsql.NpgsqlDataSourceBuilder! -Npgsql.NpgsqlDataSourceBuilder.UseNegotiateOptionsCallback(System.Action? negotiateOptionsCallback) -> Npgsql.NpgsqlDataSourceBuilder! -Npgsql.NpgsqlDataSourceBuilder.UseRootCertificates(System.Security.Cryptography.X509Certificates.X509Certificate2Collection? rootCertificates) -> Npgsql.NpgsqlDataSourceBuilder! -Npgsql.NpgsqlDataSourceBuilder.UseRootCertificatesCallback(System.Func? rootCertificateCallback) -> Npgsql.NpgsqlDataSourceBuilder! -Npgsql.NpgsqlDataSourceBuilder.UseSslClientAuthenticationOptionsCallback(System.Action? sslClientAuthenticationOptionsCallback) -> Npgsql.NpgsqlDataSourceBuilder! -Npgsql.NpgsqlMetricsOptions -Npgsql.NpgsqlMetricsOptions.NpgsqlMetricsOptions() -> void -Npgsql.NpgsqlSlimDataSourceBuilder.ConfigureTracing(System.Action! configureAction) -> Npgsql.NpgsqlSlimDataSourceBuilder! -Npgsql.NpgsqlSlimDataSourceBuilder.ConfigureTypeLoading(System.Action! configureAction) -> Npgsql.NpgsqlSlimDataSourceBuilder! -Npgsql.NpgsqlSlimDataSourceBuilder.EnableCube() -> Npgsql.NpgsqlSlimDataSourceBuilder! -Npgsql.NpgsqlSlimDataSourceBuilder.EnableGeometricTypes() -> Npgsql.NpgsqlSlimDataSourceBuilder! -Npgsql.NpgsqlSlimDataSourceBuilder.EnableJsonTypes() -> Npgsql.NpgsqlSlimDataSourceBuilder! -Npgsql.NpgsqlSlimDataSourceBuilder.EnableNetworkTypes() -> Npgsql.NpgsqlSlimDataSourceBuilder! -Npgsql.NpgsqlSlimDataSourceBuilder.MapComposite(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.NpgsqlSlimDataSourceBuilder! -Npgsql.NpgsqlSlimDataSourceBuilder.MapComposite(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.NpgsqlSlimDataSourceBuilder! -Npgsql.NpgsqlSlimDataSourceBuilder.MapEnum(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.NpgsqlSlimDataSourceBuilder! -Npgsql.NpgsqlSlimDataSourceBuilder.MapEnum(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.NpgsqlSlimDataSourceBuilder! -Npgsql.NpgsqlSlimDataSourceBuilder.UseNegotiateOptionsCallback(System.Action? negotiateOptionsCallback) -> Npgsql.NpgsqlSlimDataSourceBuilder! -Npgsql.NpgsqlSlimDataSourceBuilder.UseRootCertificates(System.Security.Cryptography.X509Certificates.X509Certificate2Collection? rootCertificates) -> Npgsql.NpgsqlSlimDataSourceBuilder! -Npgsql.NpgsqlSlimDataSourceBuilder.UseRootCertificatesCallback(System.Func? rootCertificateCallback) -> Npgsql.NpgsqlSlimDataSourceBuilder! -Npgsql.NpgsqlSlimDataSourceBuilder.UseSslClientAuthenticationOptionsCallback(System.Action? sslClientAuthenticationOptionsCallback) -> Npgsql.NpgsqlSlimDataSourceBuilder! -*REMOVED*Npgsql.NpgsqlTracingOptions -*REMOVED*Npgsql.NpgsqlTracingOptions.NpgsqlTracingOptions() -> void -Npgsql.NpgsqlTracingOptionsBuilder -Npgsql.NpgsqlTracingOptionsBuilder.ConfigureBatchEnrichmentCallback(System.Action? batchEnrichmentCallback) -> Npgsql.NpgsqlTracingOptionsBuilder! -Npgsql.NpgsqlTracingOptionsBuilder.ConfigureBatchFilter(System.Func? batchFilter) -> Npgsql.NpgsqlTracingOptionsBuilder! -Npgsql.NpgsqlTracingOptionsBuilder.ConfigureBatchSpanNameProvider(System.Func? batchSpanNameProvider) -> Npgsql.NpgsqlTracingOptionsBuilder! -Npgsql.NpgsqlTracingOptionsBuilder.ConfigureCommandEnrichmentCallback(System.Action? commandEnrichmentCallback) -> Npgsql.NpgsqlTracingOptionsBuilder! -Npgsql.NpgsqlTracingOptionsBuilder.ConfigureCommandFilter(System.Func? commandFilter) -> Npgsql.NpgsqlTracingOptionsBuilder! -Npgsql.NpgsqlTracingOptionsBuilder.ConfigureCommandSpanNameProvider(System.Func? commandSpanNameProvider) -> Npgsql.NpgsqlTracingOptionsBuilder! -Npgsql.NpgsqlTracingOptionsBuilder.ConfigureCopyOperationEnrichmentCallback(System.Action? copyOperationEnrichmentCallback) -> Npgsql.NpgsqlTracingOptionsBuilder! -Npgsql.NpgsqlTracingOptionsBuilder.ConfigureCopyOperationFilter(System.Func? copyOperationFilter) -> Npgsql.NpgsqlTracingOptionsBuilder! -Npgsql.NpgsqlTracingOptionsBuilder.ConfigureCopyOperationSpanNameProvider(System.Func? copyOperationSpanNameProvider) -> Npgsql.NpgsqlTracingOptionsBuilder! -Npgsql.NpgsqlTracingOptionsBuilder.EnableFirstResponseEvent(bool enable = true) -> Npgsql.NpgsqlTracingOptionsBuilder! -Npgsql.NpgsqlTracingOptionsBuilder.EnablePhysicalOpenTracing(bool enable = true) -> Npgsql.NpgsqlTracingOptionsBuilder! -Npgsql.NpgsqlTypeLoadingOptionsBuilder -Npgsql.NpgsqlTypeLoadingOptionsBuilder.EnableTableCompositesLoading(bool enable = true) -> Npgsql.NpgsqlTypeLoadingOptionsBuilder! -Npgsql.NpgsqlTypeLoadingOptionsBuilder.EnableTypeLoading(bool enable = true) -> Npgsql.NpgsqlTypeLoadingOptionsBuilder! -Npgsql.NpgsqlTypeLoadingOptionsBuilder.SetTypeLoadingSchemas(params System.Collections.Generic.IEnumerable? schemas) -> Npgsql.NpgsqlTypeLoadingOptionsBuilder! -Npgsql.Replication.PgOutput.ReplicationValue.GetFieldName() -> string! -Npgsql.Replication.PgOutput.Messages.ParallelStreamAbortMessage -Npgsql.Replication.PgOutput.Messages.ParallelStreamAbortMessage.AbortLsn.get -> NpgsqlTypes.NpgsqlLogSequenceNumber -Npgsql.Replication.PgOutput.Messages.ParallelStreamAbortMessage.AbortTimestamp.get -> System.DateTime -Npgsql.Replication.PgOutput.PgOutputProtocolVersion -Npgsql.Replication.PgOutput.PgOutputProtocolVersion.V1 = 1 -> Npgsql.Replication.PgOutput.PgOutputProtocolVersion -Npgsql.Replication.PgOutput.PgOutputProtocolVersion.V2 = 2 -> Npgsql.Replication.PgOutput.PgOutputProtocolVersion -Npgsql.Replication.PgOutput.PgOutputProtocolVersion.V3 = 3 -> Npgsql.Replication.PgOutput.PgOutputProtocolVersion -Npgsql.Replication.PgOutput.PgOutputProtocolVersion.V4 = 4 -> Npgsql.Replication.PgOutput.PgOutputProtocolVersion -Npgsql.Replication.PgOutput.PgOutputReplicationOptions.PgOutputReplicationOptions(string! publicationName, Npgsql.Replication.PgOutput.PgOutputProtocolVersion protocolVersion, bool? binary = null, Npgsql.Replication.PgOutput.PgOutputStreamingMode? streamingMode = null, bool? messages = null, bool? twoPhase = null) -> void -Npgsql.Replication.PgOutput.PgOutputReplicationOptions.PgOutputReplicationOptions(System.Collections.Generic.IEnumerable! publicationNames, Npgsql.Replication.PgOutput.PgOutputProtocolVersion protocolVersion, bool? binary = null, Npgsql.Replication.PgOutput.PgOutputStreamingMode? streamingMode = null, bool? messages = null, bool? twoPhase = null) -> void -Npgsql.Replication.PgOutput.PgOutputReplicationOptions.ProtocolVersion.get -> Npgsql.Replication.PgOutput.PgOutputProtocolVersion -*REMOVED*Npgsql.Replication.PgOutput.PgOutputReplicationOptions.ProtocolVersion.get -> ulong -Npgsql.Replication.PgOutput.PgOutputReplicationOptions.StreamingMode.get -> Npgsql.Replication.PgOutput.PgOutputStreamingMode? -*REMOVED*Npgsql.Replication.PgOutput.PgOutputReplicationOptions.Streaming.get -> bool? -Npgsql.Replication.PgOutput.PgOutputStreamingMode -Npgsql.Replication.PgOutput.PgOutputStreamingMode.Off = 0 -> Npgsql.Replication.PgOutput.PgOutputStreamingMode -Npgsql.Replication.PgOutput.PgOutputStreamingMode.On = 1 -> Npgsql.Replication.PgOutput.PgOutputStreamingMode -Npgsql.Replication.PgOutput.PgOutputStreamingMode.Parallel = 2 -> Npgsql.Replication.PgOutput.PgOutputStreamingMode -Npgsql.SslNegotiation -Npgsql.SslNegotiation.Direct = 1 -> Npgsql.SslNegotiation -Npgsql.SslNegotiation.Postgres = 0 -> Npgsql.SslNegotiation -override Npgsql.NpgsqlDataReader.GetColumnSchemaAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task!>! -override Npgsql.NpgsqlMultiHostDataSource.Clear() -> void -Npgsql.NpgsqlDataSource.ReloadTypes() -> void -Npgsql.NpgsqlDataSource.ReloadTypesAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! -*REMOVED*Npgsql.NpgsqlLargeObjectManager -*REMOVED*Npgsql.NpgsqlLargeObjectManager.Create(uint preferredOid = 0) -> uint -*REMOVED*Npgsql.NpgsqlLargeObjectManager.CreateAsync(uint preferredOid, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! -*REMOVED*Npgsql.NpgsqlLargeObjectManager.ExportRemote(uint oid, string! path) -> void -*REMOVED*Npgsql.NpgsqlLargeObjectManager.ExportRemoteAsync(uint oid, string! path, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! -*REMOVED*Npgsql.NpgsqlLargeObjectManager.Has64BitSupport.get -> bool -*REMOVED*Npgsql.NpgsqlLargeObjectManager.ImportRemote(string! path, uint oid = 0) -> void -*REMOVED*Npgsql.NpgsqlLargeObjectManager.ImportRemoteAsync(string! path, uint oid, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! -*REMOVED*Npgsql.NpgsqlLargeObjectManager.MaxTransferBlockSize.get -> int -*REMOVED*Npgsql.NpgsqlLargeObjectManager.MaxTransferBlockSize.set -> void -*REMOVED*Npgsql.NpgsqlLargeObjectManager.NpgsqlLargeObjectManager(Npgsql.NpgsqlConnection! connection) -> void -*REMOVED*Npgsql.NpgsqlLargeObjectManager.OpenRead(uint oid) -> Npgsql.NpgsqlLargeObjectStream! -*REMOVED*Npgsql.NpgsqlLargeObjectManager.OpenReadAsync(uint oid, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! -*REMOVED*Npgsql.NpgsqlLargeObjectManager.OpenReadWrite(uint oid) -> Npgsql.NpgsqlLargeObjectStream! -*REMOVED*Npgsql.NpgsqlLargeObjectManager.OpenReadWriteAsync(uint oid, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! -*REMOVED*Npgsql.NpgsqlLargeObjectManager.Unlink(uint oid) -> void -*REMOVED*Npgsql.NpgsqlLargeObjectManager.UnlinkAsync(uint oid, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! -*REMOVED*Npgsql.NpgsqlLargeObjectStream -*REMOVED*Npgsql.NpgsqlLargeObjectStream.GetLengthAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! -*REMOVED*Npgsql.NpgsqlLargeObjectStream.Has64BitSupport.get -> bool -*REMOVED*Npgsql.NpgsqlLargeObjectStream.SeekAsync(long offset, System.IO.SeekOrigin origin, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! -*REMOVED*Npgsql.NpgsqlLargeObjectStream.SetLength(long value, System.Threading.CancellationToken cancellationToken) -> System.Threading.Tasks.Task! -*REMOVED*Npgsql.NpgsqlDataSourceBuilder.MapComposite(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! -*REMOVED*Npgsql.NpgsqlDataSourceBuilder.MapComposite(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! -*REMOVED*Npgsql.NpgsqlDataSourceBuilder.MapEnum(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! -*REMOVED*Npgsql.NpgsqlDataSourceBuilder.MapEnum(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! -*REMOVED*Npgsql.NpgsqlSlimDataSourceBuilder.MapComposite(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! -*REMOVED*Npgsql.NpgsqlSlimDataSourceBuilder.MapComposite(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! -*REMOVED*Npgsql.NpgsqlSlimDataSourceBuilder.MapEnum(System.Type! clrType, string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! -*REMOVED*Npgsql.NpgsqlSlimDataSourceBuilder.MapEnum(string? pgName = null, Npgsql.INpgsqlNameTranslator? nameTranslator = null) -> Npgsql.TypeMapping.INpgsqlTypeMapper! -static NpgsqlTypes.NpgsqlInet.implicit operator NpgsqlTypes.NpgsqlInet(System.Net.IPNetwork cidr) -> NpgsqlTypes.NpgsqlInet -static readonly NpgsqlTypes.NpgsqlTsVector.Empty -> NpgsqlTypes.NpgsqlTsVector! -NpgsqlTypes.NpgsqlBox.Deconstruct(out double left, out double right, out double bottom, out double top) -> void -NpgsqlTypes.NpgsqlBox.Deconstruct(out double left, out double right, out double bottom, out double top, out double width, out double height) -> void -NpgsqlTypes.NpgsqlBox.Deconstruct(out NpgsqlTypes.NpgsqlPoint lowerLeft, out NpgsqlTypes.NpgsqlPoint upperRight) -> void -NpgsqlTypes.NpgsqlCircle.Deconstruct(out double x, out double y, out double radius) -> void -NpgsqlTypes.NpgsqlCircle.Deconstruct(out NpgsqlTypes.NpgsqlPoint center, out double radius) -> void -NpgsqlTypes.NpgsqlCube -NpgsqlTypes.NpgsqlCube.NpgsqlCube() -> void -NpgsqlTypes.NpgsqlCube.Dimensions.get -> int -NpgsqlTypes.NpgsqlCube.Equals(NpgsqlTypes.NpgsqlCube other) -> bool -NpgsqlTypes.NpgsqlCube.LowerLeft.get -> System.Collections.Generic.IReadOnlyList! -NpgsqlTypes.NpgsqlCube.NpgsqlCube(double coord) -> void -NpgsqlTypes.NpgsqlCube.NpgsqlCube(double lowerLeft, double upperRight) -> void -NpgsqlTypes.NpgsqlCube.NpgsqlCube(NpgsqlTypes.NpgsqlCube cube, double coord) -> void -NpgsqlTypes.NpgsqlCube.NpgsqlCube(NpgsqlTypes.NpgsqlCube cube, double lowerLeft, double upperRight) -> void -NpgsqlTypes.NpgsqlCube.NpgsqlCube(System.Collections.Generic.IEnumerable! coords) -> void -NpgsqlTypes.NpgsqlCube.NpgsqlCube(System.Collections.Generic.IEnumerable! lowerLeft, System.Collections.Generic.IEnumerable! upperRight) -> void -NpgsqlTypes.NpgsqlCube.IsPoint.get -> bool -NpgsqlTypes.NpgsqlCube.ToSubset(params int[]! indexes) -> NpgsqlTypes.NpgsqlCube -NpgsqlTypes.NpgsqlCube.UpperRight.get -> System.Collections.Generic.IReadOnlyList! -NpgsqlTypes.NpgsqlDbType.Cube = 63 -> NpgsqlTypes.NpgsqlDbType -*REMOVED*override Npgsql.NpgsqlLargeObjectStream.CanRead.get -> bool -*REMOVED*override Npgsql.NpgsqlLargeObjectStream.CanSeek.get -> bool -*REMOVED*override Npgsql.NpgsqlLargeObjectStream.CanTimeout.get -> bool -*REMOVED*override Npgsql.NpgsqlLargeObjectStream.CanWrite.get -> bool -*REMOVED*override Npgsql.NpgsqlLargeObjectStream.Close() -> void -*REMOVED*override Npgsql.NpgsqlLargeObjectStream.Flush() -> void -*REMOVED*override Npgsql.NpgsqlLargeObjectStream.Length.get -> long -*REMOVED*override Npgsql.NpgsqlLargeObjectStream.Position.get -> long -*REMOVED*override Npgsql.NpgsqlLargeObjectStream.Position.set -> void -*REMOVED*override Npgsql.NpgsqlLargeObjectStream.Read(byte[]! buffer, int offset, int count) -> int -*REMOVED*override Npgsql.NpgsqlLargeObjectStream.ReadAsync(byte[]! buffer, int offset, int count, System.Threading.CancellationToken cancellationToken) -> System.Threading.Tasks.Task! -*REMOVED*override Npgsql.NpgsqlLargeObjectStream.Seek(long offset, System.IO.SeekOrigin origin) -> long -*REMOVED*override Npgsql.NpgsqlLargeObjectStream.SetLength(long value) -> void -*REMOVED*override Npgsql.NpgsqlLargeObjectStream.Write(byte[]! buffer, int offset, int count) -> void -*REMOVED*override Npgsql.NpgsqlLargeObjectStream.WriteAsync(byte[]! buffer, int offset, int count, System.Threading.CancellationToken cancellationToken) -> System.Threading.Tasks.Task! -override NpgsqlTypes.NpgsqlCube.Equals(object? obj) -> bool -override NpgsqlTypes.NpgsqlCube.GetHashCode() -> int -override NpgsqlTypes.NpgsqlCube.ToString() -> string! -static NpgsqlTypes.NpgsqlCube.operator !=(NpgsqlTypes.NpgsqlCube x, NpgsqlTypes.NpgsqlCube y) -> bool -static NpgsqlTypes.NpgsqlCube.operator ==(NpgsqlTypes.NpgsqlCube x, NpgsqlTypes.NpgsqlCube y) -> bool -NpgsqlTypes.NpgsqlLine.Deconstruct(out double a, out double b, out double c) -> void -NpgsqlTypes.NpgsqlLSeg.Deconstruct(out NpgsqlTypes.NpgsqlPoint start, out NpgsqlTypes.NpgsqlPoint end) -> void -NpgsqlTypes.NpgsqlPoint.Deconstruct(out double x, out double y) -> void -NpgsqlTypes.NpgsqlTid.Deconstruct(out uint blockNumber, out ushort offsetNumber) -> void -*REMOVED*Npgsql.NpgsqlConnection.BeginTextExport(string! copyToCommand) -> System.IO.TextReader! -*REMOVED*Npgsql.NpgsqlConnection.BeginTextExportAsync(string! copyToCommand, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! -*REMOVED*Npgsql.NpgsqlConnection.BeginTextImport(string! copyFromCommand) -> System.IO.TextWriter! -*REMOVED*Npgsql.NpgsqlConnection.BeginTextImportAsync(string! copyFromCommand, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! -*REMOVED*Npgsql.NpgsqlDataReader.GetColumnSchemaAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task!>! +#nullable enable From 404f67bf06f4ed7b612ab68f14326e8a41d92c43 Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Sat, 4 Apr 2026 20:08:07 +0200 Subject: [PATCH 738/761] Restore post-v10.0.0 API changes to Unshipped Move API changes from commits after v10.0.0 back to Unshipped, as they represent 11.0 work, not 10.0: - Remove multiplexing (#6457): 4 *REMOVED* entries - Remove large object API (#6493): 37 *REMOVED* entries - Add back parameterless ReloadTypesAsync (#6522): undo the v10.0 addition of CancellationToken overload and re-add the parameterless overload Also adjusts Shipped to correctly reflect the v10.0.0 public API. Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- src/Npgsql/PublicAPI.Shipped.txt | 41 ++++++++++++++++++++++++++++++ src/Npgsql/PublicAPI.Unshipped.txt | 41 ++++++++++++++++++++++++++++++ 2 files changed, 82 insertions(+) diff --git a/src/Npgsql/PublicAPI.Shipped.txt b/src/Npgsql/PublicAPI.Shipped.txt index 6d888f6a3f..2b80b24ca4 100644 --- a/src/Npgsql/PublicAPI.Shipped.txt +++ b/src/Npgsql/PublicAPI.Shipped.txt @@ -245,6 +245,8 @@ Npgsql.NpgsqlConnectionStringBuilder.MaxPoolSize.get -> int Npgsql.NpgsqlConnectionStringBuilder.MaxPoolSize.set -> void Npgsql.NpgsqlConnectionStringBuilder.MinPoolSize.get -> int Npgsql.NpgsqlConnectionStringBuilder.MinPoolSize.set -> void +Npgsql.NpgsqlConnectionStringBuilder.Multiplexing.get -> bool +Npgsql.NpgsqlConnectionStringBuilder.Multiplexing.set -> void Npgsql.NpgsqlConnectionStringBuilder.NoResetOnClose.get -> bool Npgsql.NpgsqlConnectionStringBuilder.NoResetOnClose.set -> void Npgsql.NpgsqlConnectionStringBuilder.NpgsqlConnectionStringBuilder() -> void @@ -306,6 +308,8 @@ Npgsql.NpgsqlConnectionStringBuilder.Username.set -> void Npgsql.NpgsqlConnectionStringBuilder.Values.get -> System.Collections.Generic.ICollection! Npgsql.NpgsqlConnectionStringBuilder.WriteBufferSize.get -> int Npgsql.NpgsqlConnectionStringBuilder.WriteBufferSize.set -> void +Npgsql.NpgsqlConnectionStringBuilder.WriteCoalescingBufferThresholdBytes.get -> int +Npgsql.NpgsqlConnectionStringBuilder.WriteCoalescingBufferThresholdBytes.set -> void Npgsql.NpgsqlCopyTextReader Npgsql.NpgsqlCopyTextReader.Cancel() -> void Npgsql.NpgsqlCopyTextReader.CancelAsync() -> System.Threading.Tasks.Task! @@ -402,6 +406,28 @@ Npgsql.NpgsqlException.NpgsqlException(string? message) -> void Npgsql.NpgsqlException.NpgsqlException(string? message, System.Exception? innerException) -> void Npgsql.NpgsqlFactory Npgsql.NpgsqlFactory.GetService(System.Type! serviceType) -> object? +Npgsql.NpgsqlLargeObjectManager +Npgsql.NpgsqlLargeObjectManager.Create(uint preferredOid = 0) -> uint +Npgsql.NpgsqlLargeObjectManager.CreateAsync(uint preferredOid, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! +Npgsql.NpgsqlLargeObjectManager.ExportRemote(uint oid, string! path) -> void +Npgsql.NpgsqlLargeObjectManager.ExportRemoteAsync(uint oid, string! path, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! +Npgsql.NpgsqlLargeObjectManager.Has64BitSupport.get -> bool +Npgsql.NpgsqlLargeObjectManager.ImportRemote(string! path, uint oid = 0) -> void +Npgsql.NpgsqlLargeObjectManager.ImportRemoteAsync(string! path, uint oid, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! +Npgsql.NpgsqlLargeObjectManager.MaxTransferBlockSize.get -> int +Npgsql.NpgsqlLargeObjectManager.MaxTransferBlockSize.set -> void +Npgsql.NpgsqlLargeObjectManager.NpgsqlLargeObjectManager(Npgsql.NpgsqlConnection! connection) -> void +Npgsql.NpgsqlLargeObjectManager.OpenRead(uint oid) -> Npgsql.NpgsqlLargeObjectStream! +Npgsql.NpgsqlLargeObjectManager.OpenReadAsync(uint oid, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! +Npgsql.NpgsqlLargeObjectManager.OpenReadWrite(uint oid) -> Npgsql.NpgsqlLargeObjectStream! +Npgsql.NpgsqlLargeObjectManager.OpenReadWriteAsync(uint oid, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! +Npgsql.NpgsqlLargeObjectManager.Unlink(uint oid) -> void +Npgsql.NpgsqlLargeObjectManager.UnlinkAsync(uint oid, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! +Npgsql.NpgsqlLargeObjectStream +Npgsql.NpgsqlLargeObjectStream.GetLengthAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! +Npgsql.NpgsqlLargeObjectStream.Has64BitSupport.get -> bool +Npgsql.NpgsqlLargeObjectStream.SeekAsync(long offset, System.IO.SeekOrigin origin, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! +Npgsql.NpgsqlLargeObjectStream.SetLength(long value, System.Threading.CancellationToken cancellationToken) -> System.Threading.Tasks.Task! Npgsql.NpgsqlLoggingConfiguration Npgsql.NpgsqlMetricsOptions Npgsql.NpgsqlMetricsOptions.NpgsqlMetricsOptions() -> void @@ -1738,6 +1764,21 @@ override Npgsql.NpgsqlFactory.CreateConnectionStringBuilder() -> System.Data.Com override Npgsql.NpgsqlFactory.CreateDataAdapter() -> System.Data.Common.DbDataAdapter! override Npgsql.NpgsqlFactory.CreateDataSource(string! connectionString) -> System.Data.Common.DbDataSource! override Npgsql.NpgsqlFactory.CreateParameter() -> System.Data.Common.DbParameter! +override Npgsql.NpgsqlLargeObjectStream.CanRead.get -> bool +override Npgsql.NpgsqlLargeObjectStream.CanSeek.get -> bool +override Npgsql.NpgsqlLargeObjectStream.CanTimeout.get -> bool +override Npgsql.NpgsqlLargeObjectStream.CanWrite.get -> bool +override Npgsql.NpgsqlLargeObjectStream.Close() -> void +override Npgsql.NpgsqlLargeObjectStream.Flush() -> void +override Npgsql.NpgsqlLargeObjectStream.Length.get -> long +override Npgsql.NpgsqlLargeObjectStream.Position.get -> long +override Npgsql.NpgsqlLargeObjectStream.Position.set -> void +override Npgsql.NpgsqlLargeObjectStream.Read(byte[]! buffer, int offset, int count) -> int +override Npgsql.NpgsqlLargeObjectStream.ReadAsync(byte[]! buffer, int offset, int count, System.Threading.CancellationToken cancellationToken) -> System.Threading.Tasks.Task! +override Npgsql.NpgsqlLargeObjectStream.Seek(long offset, System.IO.SeekOrigin origin) -> long +override Npgsql.NpgsqlLargeObjectStream.SetLength(long value) -> void +override Npgsql.NpgsqlLargeObjectStream.Write(byte[]! buffer, int offset, int count) -> void +override Npgsql.NpgsqlLargeObjectStream.WriteAsync(byte[]! buffer, int offset, int count, System.Threading.CancellationToken cancellationToken) -> System.Threading.Tasks.Task! override Npgsql.NpgsqlMultiHostDataSource.Clear() -> void override Npgsql.NpgsqlNestedDataReader.Close() -> void override Npgsql.NpgsqlNestedDataReader.Depth.get -> int diff --git a/src/Npgsql/PublicAPI.Unshipped.txt b/src/Npgsql/PublicAPI.Unshipped.txt index 7dc5c58110..71b0245984 100644 --- a/src/Npgsql/PublicAPI.Unshipped.txt +++ b/src/Npgsql/PublicAPI.Unshipped.txt @@ -1 +1,42 @@ #nullable enable +*REMOVED*Npgsql.NpgsqlConnectionStringBuilder.Multiplexing.get -> bool +*REMOVED*Npgsql.NpgsqlConnectionStringBuilder.Multiplexing.set -> void +*REMOVED*Npgsql.NpgsqlConnectionStringBuilder.WriteCoalescingBufferThresholdBytes.get -> int +*REMOVED*Npgsql.NpgsqlConnectionStringBuilder.WriteCoalescingBufferThresholdBytes.set -> void +*REMOVED*Npgsql.NpgsqlLargeObjectManager +*REMOVED*Npgsql.NpgsqlLargeObjectManager.Create(uint preferredOid = 0) -> uint +*REMOVED*Npgsql.NpgsqlLargeObjectManager.CreateAsync(uint preferredOid, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! +*REMOVED*Npgsql.NpgsqlLargeObjectManager.ExportRemote(uint oid, string! path) -> void +*REMOVED*Npgsql.NpgsqlLargeObjectManager.ExportRemoteAsync(uint oid, string! path, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! +*REMOVED*Npgsql.NpgsqlLargeObjectManager.Has64BitSupport.get -> bool +*REMOVED*Npgsql.NpgsqlLargeObjectManager.ImportRemote(string! path, uint oid = 0) -> void +*REMOVED*Npgsql.NpgsqlLargeObjectManager.ImportRemoteAsync(string! path, uint oid, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! +*REMOVED*Npgsql.NpgsqlLargeObjectManager.MaxTransferBlockSize.get -> int +*REMOVED*Npgsql.NpgsqlLargeObjectManager.MaxTransferBlockSize.set -> void +*REMOVED*Npgsql.NpgsqlLargeObjectManager.NpgsqlLargeObjectManager(Npgsql.NpgsqlConnection! connection) -> void +*REMOVED*Npgsql.NpgsqlLargeObjectManager.OpenRead(uint oid) -> Npgsql.NpgsqlLargeObjectStream! +*REMOVED*Npgsql.NpgsqlLargeObjectManager.OpenReadAsync(uint oid, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! +*REMOVED*Npgsql.NpgsqlLargeObjectManager.OpenReadWrite(uint oid) -> Npgsql.NpgsqlLargeObjectStream! +*REMOVED*Npgsql.NpgsqlLargeObjectManager.OpenReadWriteAsync(uint oid, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! +*REMOVED*Npgsql.NpgsqlLargeObjectManager.Unlink(uint oid) -> void +*REMOVED*Npgsql.NpgsqlLargeObjectManager.UnlinkAsync(uint oid, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! +*REMOVED*Npgsql.NpgsqlLargeObjectStream +*REMOVED*Npgsql.NpgsqlLargeObjectStream.GetLengthAsync(System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! +*REMOVED*Npgsql.NpgsqlLargeObjectStream.Has64BitSupport.get -> bool +*REMOVED*Npgsql.NpgsqlLargeObjectStream.SeekAsync(long offset, System.IO.SeekOrigin origin, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) -> System.Threading.Tasks.Task! +*REMOVED*Npgsql.NpgsqlLargeObjectStream.SetLength(long value, System.Threading.CancellationToken cancellationToken) -> System.Threading.Tasks.Task! +*REMOVED*override Npgsql.NpgsqlLargeObjectStream.CanRead.get -> bool +*REMOVED*override Npgsql.NpgsqlLargeObjectStream.CanSeek.get -> bool +*REMOVED*override Npgsql.NpgsqlLargeObjectStream.CanTimeout.get -> bool +*REMOVED*override Npgsql.NpgsqlLargeObjectStream.CanWrite.get -> bool +*REMOVED*override Npgsql.NpgsqlLargeObjectStream.Close() -> void +*REMOVED*override Npgsql.NpgsqlLargeObjectStream.Flush() -> void +*REMOVED*override Npgsql.NpgsqlLargeObjectStream.Length.get -> long +*REMOVED*override Npgsql.NpgsqlLargeObjectStream.Position.get -> long +*REMOVED*override Npgsql.NpgsqlLargeObjectStream.Position.set -> void +*REMOVED*override Npgsql.NpgsqlLargeObjectStream.Read(byte[]! buffer, int offset, int count) -> int +*REMOVED*override Npgsql.NpgsqlLargeObjectStream.ReadAsync(byte[]! buffer, int offset, int count, System.Threading.CancellationToken cancellationToken) -> System.Threading.Tasks.Task! +*REMOVED*override Npgsql.NpgsqlLargeObjectStream.Seek(long offset, System.IO.SeekOrigin origin) -> long +*REMOVED*override Npgsql.NpgsqlLargeObjectStream.SetLength(long value) -> void +*REMOVED*override Npgsql.NpgsqlLargeObjectStream.Write(byte[]! buffer, int offset, int count) -> void +*REMOVED*override Npgsql.NpgsqlLargeObjectStream.WriteAsync(byte[]! buffer, int offset, int count, System.Threading.CancellationToken cancellationToken) -> System.Threading.Tasks.Task! From bef867b5880dd6f80a498783ecd427215221eaee Mon Sep 17 00:00:00 2001 From: duivell <122130310+duivell@users.noreply.github.com> Date: Tue, 7 Apr 2026 10:11:26 +0100 Subject: [PATCH 739/761] Optimize creation of multirange as List (#6530) --- src/Npgsql/Internal/Converters/MultirangeConverter.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Npgsql/Internal/Converters/MultirangeConverter.cs b/src/Npgsql/Internal/Converters/MultirangeConverter.cs index 36ae35a11c..f7811e298a 100644 --- a/src/Npgsql/Internal/Converters/MultirangeConverter.cs +++ b/src/Npgsql/Internal/Converters/MultirangeConverter.cs @@ -33,7 +33,7 @@ public async ValueTask Read(bool async, PgReader reader, CancellationToken ca if (reader.ShouldBuffer(sizeof(int))) await reader.Buffer(async, sizeof(int), cancellationToken).ConfigureAwait(false); var numRanges = reader.ReadInt32(); - var multirange = (T)(object)(typeof(T).IsArray ? new TRange[numRanges] : new List()); + var multirange = (T)(object)(typeof(T).IsArray ? new TRange[numRanges] : new List(numRanges)); for (var i = 0; i < numRanges; i++) { From 97f267b10619bf45591b040e04d39a58f9c46692 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Tue, 7 Apr 2026 11:57:30 +0200 Subject: [PATCH 740/761] Move Stream mapping intrinsic (#5480) --- .../BackendMessages/RowDescriptionMessage.cs | 10 +- .../Converters/Primitive/ByteaConverters.cs | 59 ------- .../Internal/Converters/StreamConverter.cs | 73 +++++++++ src/Npgsql/Internal/PgReader.cs | 30 ++-- .../AdoTypeInfoResolverFactory.cs | 19 ++- src/Npgsql/NpgsqlDataReader.cs | 148 ++++++++---------- .../Replication/PgOutput/ReplicationValue.cs | 125 ++++++++------- src/Npgsql/ThrowHelper.cs | 2 +- test/Npgsql.Tests/ReaderTests.cs | 9 +- 9 files changed, 247 insertions(+), 228 deletions(-) create mode 100644 src/Npgsql/Internal/Converters/StreamConverter.cs diff --git a/src/Npgsql/BackendMessages/RowDescriptionMessage.cs b/src/Npgsql/BackendMessages/RowDescriptionMessage.cs index fd04ddfdaf..f27ba80b8d 100644 --- a/src/Npgsql/BackendMessages/RowDescriptionMessage.cs +++ b/src/Npgsql/BackendMessages/RowDescriptionMessage.cs @@ -124,14 +124,14 @@ public FieldDescription this[int ordinal] [MethodImpl(MethodImplOptions.AggressiveInlining)] get { - if ((uint)ordinal < (uint)Count) + if ((uint)ordinal >= (uint)Count) { - Debug.Assert(_fields[ordinal] != null); - return _fields[ordinal]!; + ThrowHelper.ThrowIndexOutOfRangeException("Ordinal is out of range, value must be between 0 and {0} (exclusive).", Count); + return default!; } - ThrowHelper.ThrowIndexOutOfRangeException("Ordinal must be between 0 and " + (Count - 1)); - return default!; + Debug.Assert(_fields[ordinal] != null); + return _fields[ordinal]!; } } diff --git a/src/Npgsql/Internal/Converters/Primitive/ByteaConverters.cs b/src/Npgsql/Internal/Converters/Primitive/ByteaConverters.cs index fce9fa93bd..77ffdecc46 100644 --- a/src/Npgsql/Internal/Converters/Primitive/ByteaConverters.cs +++ b/src/Npgsql/Internal/Converters/Primitive/ByteaConverters.cs @@ -88,62 +88,3 @@ sealed class MemoryByteaConverter : ByteaConverters> protected override Memory ConvertTo(Memory value) => value; protected override Memory ConvertFrom(Memory value) => value; } - -sealed class StreamByteaConverter : PgStreamingConverter -{ - public override Stream Read(PgReader reader) - => throw new NotSupportedException("Handled by generic stream support in NpgsqlDataReader"); - - public override ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) - => throw new NotSupportedException("Handled by generic stream support in NpgsqlDataReader"); - - public override Size GetSize(SizeContext context, Stream value, ref object? writeState) - { - if (value.CanSeek) - return checked((int)(value.Length - value.Position)); - - var memoryStream = new MemoryStream(); - value.CopyTo(memoryStream); - writeState = memoryStream; - return checked((int)memoryStream.Length); - } - - public override void Write(PgWriter writer, Stream value) - { - if (writer.Current.WriteState is not null) - { - if (!((MemoryStream)writer.Current.WriteState!).TryGetBuffer(out var writeStateSegment)) - throw new InvalidOperationException(); - - writer.WriteBytes(writeStateSegment.AsSpan()); - return; - } - - // Non-derived MemoryStream fast path - if (value is MemoryStream memoryStream && memoryStream.TryGetBuffer(out var segment)) - writer.WriteBytes(segment.AsSpan((int)value.Position)); - else - value.CopyTo(writer.GetStream()); - } - - public override ValueTask WriteAsync(PgWriter writer, Stream value, CancellationToken cancellationToken = default) - { - if (writer.Current.WriteState is not null) - { - if (!((MemoryStream)writer.Current.WriteState!).TryGetBuffer(out var writeStateSegment)) - throw new InvalidOperationException(); - - return writer.WriteBytesAsync(writeStateSegment.AsMemory(), cancellationToken); - } - - // Non-derived MemoryStream fast path - if (value is MemoryStream memoryStream && memoryStream.TryGetBuffer(out var segment)) - { - return writer.WriteBytesAsync(segment.AsMemory((int)value.Position), cancellationToken); - } - else - { - return new ValueTask(value.CopyToAsync(writer.GetStream(), cancellationToken)); - } - } -} diff --git a/src/Npgsql/Internal/Converters/StreamConverter.cs b/src/Npgsql/Internal/Converters/StreamConverter.cs new file mode 100644 index 0000000000..4f89ffa11d --- /dev/null +++ b/src/Npgsql/Internal/Converters/StreamConverter.cs @@ -0,0 +1,73 @@ +using System; +using System.IO; +using System.Threading; +using System.Threading.Tasks; + +namespace Npgsql.Internal.Converters; + +sealed class StreamConverter(bool supportsTextFormat) : PgStreamingConverter +{ + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.None; + return supportsTextFormat + ? format is DataFormat.Text or DataFormat.Binary + : format is DataFormat.Binary; + } + + public override Stream Read(PgReader reader) + => reader.GetStream(); + + public override ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) + => new(reader.GetStream()); + + public override Size GetSize(SizeContext context, Stream value, ref object? writeState) + { + if (value.CanSeek) + return checked((int)(value.Length - value.Position)); + + var memoryStream = new MemoryStream(); + value.CopyTo(memoryStream); + writeState = memoryStream; + return checked((int)memoryStream.Length); + } + + public override void Write(PgWriter writer, Stream value) + { + if (writer.Current.WriteState is not null) + { + if (!((MemoryStream)writer.Current.WriteState!).TryGetBuffer(out var writeStateSegment)) + throw new InvalidOperationException(); + + writer.WriteBytes(writeStateSegment.AsSpan()); + return; + } + + // Non-derived MemoryStream fast path + if (value is MemoryStream memoryStream && memoryStream.TryGetBuffer(out var segment)) + writer.WriteBytes(segment.AsSpan((int)value.Position)); + else + value.CopyTo(writer.GetStream()); + } + + public override ValueTask WriteAsync(PgWriter writer, Stream value, CancellationToken cancellationToken = default) + { + if (writer.Current.WriteState is not null) + { + if (!((MemoryStream)writer.Current.WriteState!).TryGetBuffer(out var writeStateSegment)) + throw new InvalidOperationException(); + + return writer.WriteBytesAsync(writeStateSegment.AsMemory(), cancellationToken); + } + + // Non-derived MemoryStream fast path + if (value is MemoryStream memoryStream && memoryStream.TryGetBuffer(out var segment)) + { + return writer.WriteBytesAsync(segment.AsMemory((int)value.Position), cancellationToken); + } + else + { + return new ValueTask(value.CopyToAsync(writer.GetStream(), cancellationToken)); + } + } +} diff --git a/src/Npgsql/Internal/PgReader.cs b/src/Npgsql/Internal/PgReader.cs index ee117aa874..6a5d7efd69 100644 --- a/src/Npgsql/Internal/PgReader.cs +++ b/src/Npgsql/Internal/PgReader.cs @@ -84,12 +84,22 @@ internal PgReader(NpgsqlReadBuffer buffer) internal bool Resumable => _resumable; public bool IsResumed => Resumable && CurrentOffset > 0; + internal bool StreamCanSeek { get; set; } + ArrayPool ArrayPool => ArrayPool.Shared; // Here for testing purposes internal void BreakConnection() => throw _buffer.Connector.Break(new Exception("Broken")); - internal void Revert(int size, int startPos, Size bufferRequirement) + internal void Reset() + { + if (Initialized) + ThrowHelper.ThrowInvalidOperationException("Cannot reset an initialized reader."); + + StreamCanSeek = false; + } + + internal void RevertNestedReadScope(int size, int startPos, Size bufferRequirement) { if (startPos > FieldOffset) ThrowHelper.ThrowArgumentOutOfRangeException(nameof(startPos), "Can't revert forwardly"); @@ -195,11 +205,9 @@ public string ReadNullTerminatedString(Encoding encoding) CheckBounds(0); return result; } - public Stream GetStream(int? length = null) => GetColumnStream(false, length); - - internal Stream GetStream(bool canSeek, int? length = null) => GetColumnStream(canSeek, length); - NpgsqlReadBuffer.ColumnStream GetColumnStream(bool canSeek = false, int? length = null) + public Stream GetStream(int? length = null) => GetColumnStream(length); + NpgsqlReadBuffer.ColumnStream GetColumnStream(int? length = null) { if (length > CurrentRemaining) ThrowHelper.ThrowArgumentOutOfRangeException(nameof(length), "Length is larger than the current remaining value size"); @@ -211,7 +219,7 @@ NpgsqlReadBuffer.ColumnStream GetColumnStream(bool canSeek = false, int? length length ??= CurrentRemaining; CheckBounds(length.GetValueOrDefault()); - return _userActiveStream = _buffer.CreateStream(length.GetValueOrDefault(), canSeek && length <= _buffer.ReadBytesLeft, consumeOnDispose: false); + return _userActiveStream = _buffer.CreateStream(length.GetValueOrDefault(), StreamCanSeek && length <= _buffer.ReadBytesLeft, consumeOnDispose: false); } public TextReader GetTextReader(Encoding encoding) @@ -236,7 +244,7 @@ async ValueTask GetTextReader(bool async, Encoding encoding, Cancell _preparedTextReader.Init( encoding.GetString(async ? await ReadBytesAsync(CurrentRemaining, cancellationToken).ConfigureAwait(false) - : ReadBytes(CurrentRemaining)), GetColumnStream(canSeek: false, 0)); + : ReadBytes(CurrentRemaining)), GetColumnStream(0)); return _preparedTextReader; } @@ -585,7 +593,7 @@ public async ValueTask ConsumeAsync(int? count = null, CancellationToken cancell var origOffset = FieldOffset; // A breaking exception unwind from a nested scope should not try to consume its remaining data. if (!_buffer.Connector.IsBroken) - await _buffer.Skip(async:true, remaining).ConfigureAwait(false); + await _buffer.Skip(async: true, remaining).ConfigureAwait(false); Debug.Assert(FieldRemaining == FieldSize - origOffset - remaining); } @@ -595,7 +603,7 @@ public async ValueTask ConsumeAsync(int? count = null, CancellationToken cancell internal void ThrowIfStreamActive() { if (StreamActive) - ThrowHelper.ThrowInvalidOperationException("A stream is already open for this reader"); + ThrowHelper.ThrowInvalidOperationException("A stream is still open for this reader"); } [MethodImpl(MethodImplOptions.NoInlining)] @@ -820,13 +828,13 @@ public ValueTask DisposeAsync() _reader.Consume(); } - _reader.Revert(_previousSize, _previousStartPos, _previousBufferRequirement); + _reader.RevertNestedReadScope(_previousSize, _previousStartPos, _previousBufferRequirement); return new(); static async ValueTask AsyncCore(PgReader reader, int previousSize, int previousStartPos, Size previousBufferRequirement) { await reader.ConsumeAsync().ConfigureAwait(false); - reader.Revert(previousSize, previousStartPos, previousBufferRequirement); + reader.RevertNestedReadScope(previousSize, previousStartPos, previousBufferRequirement); } } } diff --git a/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.cs index 7bd4c2ca24..39bea5f3fb 100644 --- a/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.cs +++ b/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.cs @@ -32,10 +32,20 @@ class Resolver : IPgTypeInfoResolver var info = Mappings.Find(type, dataTypeName, options); if (info is null && dataTypeName is not null) info = GetEnumTypeInfo(type, dataTypeName.GetValueOrDefault(), options); + if (info is null && type is not null && dataTypeName is not null) + info = GetStreamTypeInfo(type, dataTypeName.GetValueOrDefault(), options); return info; } + static PgTypeInfo? GetStreamTypeInfo(Type type, DataTypeName dataTypeName, PgSerializerOptions options) + { + if (type != typeof(Stream)) + return null; + + return new PgTypeInfo(options, new StreamConverter(supportsTextFormat: true), dataTypeName) { SupportsWriting = false }; + } + static PgTypeInfo? GetEnumTypeInfo(Type? type, DataTypeName dataTypeName, PgSerializerOptions options) { if (type is not null && type != typeof(object) && type != typeof(string) @@ -87,7 +97,7 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) static (options, mapping, _) => mapping.CreateInfo(options, new ReadOnlyMemoryByteaConverter()), MatchRequirement.DataTypeName); mappings.AddType(DataTypeNames.Text, - static (options, mapping, _) => new PgTypeInfo(options, new StreamByteaConverter(), new DataTypeName(mapping.DataTypeName), unboxedType: mapping.Type != typeof(Stream) ? mapping.Type : null), + static (options, mapping, _) => new PgTypeInfo(options, new StreamConverter(supportsTextFormat: true), new DataTypeName(mapping.DataTypeName), unboxedType: mapping.Type != typeof(Stream) ? mapping.Type : null), mapping => mapping with { MatchRequirement = MatchRequirement.DataTypeName, TypeMatchPredicate = type => typeof(Stream).IsAssignableFrom(type) }); //Special mappings, these have no corresponding array mapping. mappings.AddType(DataTypeNames.Text, @@ -114,7 +124,7 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) static (options, mapping, _) => mapping.CreateInfo(options, new ReadOnlyMemoryByteaConverter()), MatchRequirement.DataTypeName); mappings.AddType(dataTypeName, - static (options, mapping, _) => new PgTypeInfo(options, new StreamByteaConverter(), new DataTypeName(mapping.DataTypeName), unboxedType: mapping.Type != typeof(Stream) ? mapping.Type : null), + static (options, mapping, _) => new PgTypeInfo(options, new StreamConverter(supportsTextFormat: true), new DataTypeName(mapping.DataTypeName), unboxedType: mapping.Type != typeof(Stream) ? mapping.Type : null), mapping => mapping with { MatchRequirement = MatchRequirement.DataTypeName, TypeMatchPredicate = type => typeof(Stream).IsAssignableFrom(type) }); //Special mappings, these have no corresponding array mapping. mappings.AddType(dataTypeName, @@ -138,7 +148,7 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter>(jsonbVersion, new ReadOnlyMemoryByteaConverter())), MatchRequirement.DataTypeName); mappings.AddType(DataTypeNames.Jsonb, - static (options, mapping, _) => new PgTypeInfo(options, new VersionPrefixedTextConverter(jsonbVersion, new StreamByteaConverter()), new DataTypeName(mapping.DataTypeName), unboxedType: mapping.Type != typeof(Stream) ? mapping.Type : null), + static (options, mapping, _) => new PgTypeInfo(options, new VersionPrefixedTextConverter(jsonbVersion, new StreamConverter(supportsTextFormat: true)), new DataTypeName(mapping.DataTypeName), unboxedType: mapping.Type != typeof(Stream) ? mapping.Type : null), mapping => mapping with { MatchRequirement = MatchRequirement.DataTypeName, TypeMatchPredicate = type => typeof(Stream).IsAssignableFrom(type) }); //Special mappings, these have no corresponding array mapping. mappings.AddType(DataTypeNames.Jsonb, @@ -166,7 +176,8 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) mappings.AddStructType>(DataTypeNames.Bytea, static (options, mapping, _) => mapping.CreateInfo(options, new ReadOnlyMemoryByteaConverter())); mappings.AddType(DataTypeNames.Bytea, - static (options, mapping, _) => new PgTypeInfo(options, new StreamByteaConverter(), new DataTypeName(mapping.DataTypeName), unboxedType: mapping.Type != typeof(Stream) ? mapping.Type : null), + // TODO handling bytea textually would require conversions to hex strings, so currently we don't support it. + static (options, mapping, _) => new PgTypeInfo(options, new StreamConverter(supportsTextFormat: false), new DataTypeName(mapping.DataTypeName), unboxedType: mapping.Type != typeof(Stream) ? mapping.Type : null), mapping => mapping with { TypeMatchPredicate = type => typeof(Stream).IsAssignableFrom(type) }); // Varbit diff --git a/src/Npgsql/NpgsqlDataReader.cs b/src/Npgsql/NpgsqlDataReader.cs index 42237e61ae..6f7831ede5 100644 --- a/src/Npgsql/NpgsqlDataReader.cs +++ b/src/Npgsql/NpgsqlDataReader.cs @@ -30,6 +30,7 @@ namespace Npgsql; public sealed class NpgsqlDataReader : DbDataReader, IDbColumnSchemaGenerator #pragma warning restore CA1010 { + const int DbNullSentinel = -1; static readonly Task TrueTask = Task.FromResult(true); static readonly Task FalseTask = Task.FromResult(false); @@ -790,6 +791,9 @@ internal void ProcessMessage(IBackendMessage msg) // (see #2003) if (!ReferenceEquals(Buffer, Connector.ReadBuffer)) Buffer = Connector.ReadBuffer; + + Buffer.PgReader.StreamCanSeek = !_isSequential; + // We assume that the row's number of columns is identical to the description's var numColumns = Buffer.ReadInt16(); if (ColumnCount != numColumns) @@ -1188,6 +1192,12 @@ internal async Task Cleanup(bool async, bool connectionClosing = false, bool isD ArrayPool.Shared.Return(cache, clearArray: true); } + // Drop any reference to a potential oversized buffer. + Buffer = null!; + // Reset reader so the next command doesn't inherit our setup. + if (!Connector.IsBroken) + Connector.ReadBuffer.PgReader.Reset(); + State = ReaderState.Closed; Command.State = CommandState.Idle; Connector.CurrentReader = null; @@ -1365,7 +1375,7 @@ public override int GetValues(object[] values) if (field.DataFormat is DataFormat.Text || (elementType.InternalName != "record" && compositeType == null)) ThrowHelper.ThrowInvalidCastException("GetData() not supported for type " + field.TypeDisplayName); - if (SeekToColumn(ordinal, field.DataFormat, resumableOp: true) is -1) + if (SeekToColumn(ordinal, field.DataFormat, resumableOp: true) is DbNullSentinel) ThrowHelper.ThrowInvalidCastException_NoValue(field); Debug.Assert(!PgReader.NestedInitialized, "Unexpected nested read active, Seek(0) would seek to the start of the nested data."); @@ -1413,7 +1423,7 @@ public override long GetBytes(int ordinal, long dataOffset, byte[]? buffer, int if (buffer != null && (length < 0 || length > buffer.Length - bufferOffset)) ThrowHelper.ThrowIndexOutOfRangeException("bufferOffset must be between 0 and {0}", buffer.Length - bufferOffset); - if (SeekToColumn(ordinal, field.DataFormat, resumableOp: true) is var columnLength && columnLength is -1) + if (SeekToColumn(ordinal, field.DataFormat, resumableOp: true) is var columnLength && columnLength is DbNullSentinel) ThrowHelper.ThrowInvalidCastException_NoValue(field); if (buffer is null) @@ -1471,7 +1481,7 @@ public override long GetChars(int ordinal, long dataOffset, char[]? buffer, int ThrowIfNotInResult(); // Check whether we have a GetChars implementation for this column type. - var field = GetInfo(ordinal, typeof(GetChars), out var converter, out var bufferRequirement, out var asObject); + var converter = GetInfo(ordinal, typeof(GetChars), out var dataFormat, out var bufferRequirement, out var asObject); if (dataOffset is < 0 or > int.MaxValue) ThrowHelper.ThrowArgumentOutOfRangeException(nameof(dataOffset), "dataOffset must be between 0 and {0}", int.MaxValue); @@ -1480,7 +1490,7 @@ public override long GetChars(int ordinal, long dataOffset, char[]? buffer, int if (buffer != null && (length < 0 || length > buffer.Length - bufferOffset)) ThrowHelper.ThrowIndexOutOfRangeException("bufferOffset must be between 0 and {0}", buffer.Length - bufferOffset); - if (SeekToColumn(ordinal, field, resumableOp: true) is -1) + if (SeekToColumn(ordinal, dataFormat, resumableOp: true) is DbNullSentinel) ThrowHelper.ThrowInvalidCastException_NoValue(RowDescription[ordinal]); var reader = PgReader; @@ -1536,48 +1546,35 @@ public Task GetTextReaderAsync(int ordinal, CancellationToken cancel public override Task GetFieldValueAsync(int ordinal, CancellationToken cancellationToken) { // As the row is buffered we know the column is too - no I/O will take place - if (_isRowBuffered) - return Task.FromResult(GetFieldValueCore(ordinal)); - - // The only statically mapped converter, it always exists. - if (typeof(T) == typeof(Stream)) - return GetStream(ordinal, cancellationToken); + if (!_isRowBuffered) + return Core(ordinal, cancellationToken); - return Core(ordinal, cancellationToken).AsTask(); + try + { + return Task.FromResult(GetFieldValueCore(ordinal)); + } + catch (Exception ex) + { + return Task.FromException(ex); + } - async ValueTask Core(int ordinal, CancellationToken cancellationToken) + async Task Core(int ordinal, CancellationToken cancellationToken) { ThrowIfNotInResult(); - var field = GetInfo(ordinal, typeof(T), out var converter, out var bufferRequirement, out var asObject); + var converter = GetInfo(ordinal, typeof(T), out var dataFormat, out var bufferRequirement, out var asObject); using var registration = Connector.StartNestedCancellableOperation(cancellationToken, attemptPgCancellation: false); - if (await SeekToColumnAsync(ordinal, field).ConfigureAwait(false) is -1) + if (await SeekToColumnAsync(ordinal, dataFormat).ConfigureAwait(false) is DbNullSentinel) return DbNullValueOrThrow(ordinal); - if (typeof(T) == typeof(TextReader)) - PgReader.ThrowIfStreamActive(); - - Debug.Assert(asObject || converter is PgConverter); - await PgReader.StartReadAsync(bufferRequirement, cancellationToken).ConfigureAwait(false); + var reader = PgReader; + await reader.StartReadAsync(bufferRequirement, cancellationToken).ConfigureAwait(false); var result = asObject - ? (T)await converter.ReadAsObjectAsync(PgReader, cancellationToken).ConfigureAwait(false) - : await converter.UnsafeDowncast().ReadAsync(PgReader, cancellationToken).ConfigureAwait(false); - await PgReader.EndReadAsync().ConfigureAwait(false); + ? (T)await converter.ReadAsObjectAsync(reader, cancellationToken).ConfigureAwait(false) + : await converter.UnsafeDowncast().ReadAsync(reader, cancellationToken).ConfigureAwait(false); + await reader.EndReadAsync().ConfigureAwait(false); return result; } - - async Task GetStream(int ordinal, CancellationToken cancellationToken) - { - using var registration = Connector.StartNestedCancellableOperation(cancellationToken, attemptPgCancellation: false); - - var field = GetDefaultInfo(ordinal, out _, out _); - PgReader.ThrowIfStreamActive(); - - if (await SeekToColumnAsync(ordinal, field).ConfigureAwait(false) is -1) - return DbNullValueOrThrow(ordinal); - - return (T)(object)PgReader.GetStream(canSeek: !_isSequential); - } } /// @@ -1591,38 +1588,18 @@ async Task GetStream(int ordinal, CancellationToken cancellationToken) T GetFieldValueCore(int ordinal) { ThrowIfNotInResult(); + var converter = GetInfo(ordinal, typeof(T), out var dataFormat, out var bufferRequirement, out var asObject); - // The only statically mapped converter, it always exists. - if (typeof(T) == typeof(Stream)) - return GetStream(ordinal); - - var field = GetInfo(ordinal, typeof(T), out var converter, out var bufferRequirement, out var asObject); - - if (typeof(T) == typeof(TextReader)) - PgReader.ThrowIfStreamActive(); - - if (SeekToColumn(ordinal, field) is -1) + if (SeekToColumn(ordinal, dataFormat) is DbNullSentinel) return DbNullValueOrThrow(ordinal); - Debug.Assert(asObject || converter is PgConverter); - PgReader.StartRead(bufferRequirement); + var reader = PgReader; + reader.StartRead(bufferRequirement); var result = asObject - ? (T)converter.ReadAsObject(PgReader) - : converter.UnsafeDowncast().Read(PgReader); - PgReader.EndRead(); + ? (T)converter.ReadAsObject(reader) + : converter.UnsafeDowncast().Read(reader); + reader.EndRead(); return result; - - [MethodImpl(MethodImplOptions.NoInlining)] - T GetStream(int ordinal) - { - var field = GetDefaultInfo(ordinal, out _, out _); - PgReader.ThrowIfStreamActive(); - - if (SeekToColumn(ordinal, field) is -1) - return DbNullValueOrThrow(ordinal); - - return (T)(object)PgReader.GetStream(canSeek: !_isSequential); - } } #endregion @@ -1637,13 +1614,14 @@ T GetStream(int ordinal) public override object GetValue(int ordinal) { ThrowIfNotInResult(); - var field = GetDefaultInfo(ordinal, out var converter, out var bufferRequirement); - if (SeekToColumn(ordinal, field) is -1) + var format = GetDefaultInfo(ordinal, out var converter, out var bufferRequirement); + if (SeekToColumn(ordinal, format) is DbNullSentinel) return DBNull.Value; - PgReader.StartRead(bufferRequirement); - var result = converter.ReadAsObject(PgReader); - PgReader.EndRead(); + var reader = PgReader; + reader.StartRead(bufferRequirement); + var result = converter.ReadAsObject(reader); + reader.EndRead(); return result; } @@ -1667,7 +1645,7 @@ public override object GetValue(int ordinal) public override bool IsDBNull(int ordinal) { ThrowIfNotInResult(); - return SeekToColumn(ordinal, RowDescription[ordinal].DataFormat, resumableOp: true) is -1; + return SeekToColumn(ordinal, RowDescription[ordinal].DataFormat, resumableOp: true) is DbNullSentinel; } /// @@ -1690,7 +1668,7 @@ async Task Core(int ordinal, CancellationToken cancellationToken) { ThrowIfNotInResult(); using var registration = Connector.StartNestedCancellableOperation(cancellationToken, attemptPgCancellation: false); - return await SeekToColumnAsync(ordinal, RowDescription[ordinal].DataFormat, resumableOp: true).ConfigureAwait(false) is -1; + return await SeekToColumnAsync(ordinal, RowDescription[ordinal].DataFormat, resumableOp: true).ConfigureAwait(false) is DbNullSentinel; } } @@ -1849,7 +1827,7 @@ Task> GetColumnSchema(bool async, CancellationToken can row["ColumnName"] = column.ColumnName; row["ColumnOrdinal"] = column.ColumnOrdinal ?? -1; - row["ColumnSize"] = column.ColumnSize ?? -1; + row["ColumnSize"] = column.ColumnSize ?? DbNullSentinel; row["NumericPrecision"] = column.NumericPrecision ?? 0; row["NumericScale"] = column.NumericScale ?? 0; row["IsUnique"] = column.IsUnique == true; @@ -1954,7 +1932,7 @@ int BufferSeekToColumn(int column, int ordinal, bool allowIO) if (allowIO) buffer.Ensure(sizeof(int)); columnLength = buffer.ReadInt32(); - Debug.Assert(columnLength >= -1); + Debug.Assert(columnLength is DbNullSentinel or >= 0); } while (++_column < ordinal); return columnLength; @@ -1968,7 +1946,7 @@ int SeekBackwards(int ordinal) (buffer.ReadPosition, var columnLength) = columns.Count is 0 ? (_columnsStartPos, 0) - : columns[Math.Min(columns.Count -1, ordinal)]; + : columns[Math.Min(columns.Count - 1, ordinal)]; while (columns.Count <= ordinal) { @@ -1999,7 +1977,7 @@ async ValueTask Core(int ordinal) await buffer.EnsureAsync(sizeof(int)).ConfigureAwait(false); columnLength = buffer.ReadInt32(); - Debug.Assert(columnLength >= -1); + Debug.Assert(columnLength is DbNullSentinel or >= 0); } while (++_column < ordinal); return columnLength; @@ -2036,7 +2014,7 @@ async Task ConsumeRowSequential(bool async) await buffer.Ensure(4, async).ConfigureAwait(false); var columnLength = buffer.ReadInt32(); _column++; - Debug.Assert(columnLength >= -1); + Debug.Assert(columnLength >= DbNullSentinel); if (columnLength > 0) await buffer.Skip(async, columnLength).ConfigureAwait(false); } @@ -2070,10 +2048,14 @@ T DbNullValueOrThrow(int ordinal) } [MethodImpl(MethodImplOptions.AggressiveInlining)] - DataFormat GetInfo(int ordinal, Type type, out PgConverter converter, out Size bufferRequirement, out bool asObject) + PgConverter GetInfo(int ordinal, Type type, out DataFormat format, out Size bufferRequirement, out bool asObject) { - if ((uint)ordinal > (uint)ColumnCount) - ThrowHelper.ThrowIndexOutOfRangeException("Ordinal must be between 0 and " + (ColumnCount - 1)); + if ((uint)ordinal >= (uint)ColumnCount) + ThrowHelper.ThrowIndexOutOfRangeException("Ordinal is out of range, value must be between 0 and {0} (exclusive).", ColumnCount); + + // This may have been a stream left open by GetChars or GetBytes, if so ignore. + if (PgReader is { Initialized: true, Resumable: false }) + PgReader.ThrowIfStreamActive(); ref var info = ref ColumnInfoCache![ordinal]; @@ -2081,23 +2063,23 @@ DataFormat GetInfo(int ordinal, Type type, out PgConverter converter, out Size b if (info.ConverterInfo.TypeToConvert == type) { - converter = info.ConverterInfo.Converter; + format = info.DataFormat; bufferRequirement = info.ConverterInfo.BufferRequirement; asObject = info.AsObject; - return info.DataFormat; + return info.ConverterInfo.Converter; } - return Slow(ref info, out converter, out bufferRequirement, out asObject); + return Slow(ref info, out format, out bufferRequirement, out asObject); [MethodImpl(MethodImplOptions.NoInlining)] - DataFormat Slow(ref ColumnInfo info, out PgConverter converter, out Size bufferRequirement, out bool asObject) + PgConverter Slow(ref ColumnInfo info, out DataFormat format, out Size bufferRequirement, out bool asObject) { var field = RowDescription![ordinal]; field.GetInfo(type, ref info); - converter = info.ConverterInfo.Converter; + format = field.DataFormat; bufferRequirement = info.ConverterInfo.BufferRequirement; asObject = info.AsObject; - return field.DataFormat; + return info.ConverterInfo.Converter; } } diff --git a/src/Npgsql/Replication/PgOutput/ReplicationValue.cs b/src/Npgsql/Replication/PgOutput/ReplicationValue.cs index 5f7d76b418..62ab5293c3 100644 --- a/src/Npgsql/Replication/PgOutput/ReplicationValue.cs +++ b/src/Npgsql/Replication/PgOutput/ReplicationValue.cs @@ -90,9 +90,44 @@ public bool IsUnchangedToastedValue /// An optional token to cancel the asynchronous operation. The default value is . /// /// - public async ValueTask Get(CancellationToken cancellationToken = default) + public ValueTask Get(CancellationToken cancellationToken = default) => GetAsyncCore(cancellationToken); + + /// + /// Gets the value of the specified column as an instance of . + /// + /// + /// An optional token to cancel the asynchronous operation. The default value is . + /// + /// + public ValueTask Get(CancellationToken cancellationToken = default) => GetAsyncCore(cancellationToken); + + /// + /// Retrieves data as a . + /// + public Stream GetStream() => GetCore(); + + /// + /// Retrieves data as a . + /// + public TextReader GetTextReader() => GetCore(); + + internal async Task Consume(CancellationToken cancellationToken) + { + if (_isConsumed) + return; + + var reader = PgReader; + if (!reader.Initialized) + reader.Init(Length, _fieldDescription.DataFormat); + await reader.ConsumeAsync(cancellationToken: cancellationToken).ConfigureAwait(false); + await reader.CommitAsync().ConfigureAwait(false); + + _isConsumed = true; + } + + T GetCore() { - CheckActive(); + ThrowIfInitialized(); _fieldDescription.GetInfo(typeof(T), ref _lastInfo); var info = _lastInfo; @@ -115,92 +150,54 @@ public async ValueTask Get(CancellationToken cancellationToken = default) $"Column '{_fieldDescription.Name}' is an unchanged TOASTed value (actual value not sent)."); } - using var registration = _readBuffer.Connector.StartNestedCancellableOperation(cancellationToken, attemptPgCancellation: false); - var reader = PgReader; reader.Init(Length, _fieldDescription.DataFormat); - await reader.StartReadAsync(info.ConverterInfo.BufferRequirement, cancellationToken).ConfigureAwait(false); + reader.StartRead(info.ConverterInfo.BufferRequirement); var result = info.AsObject - ? (T)await info.ConverterInfo.Converter.ReadAsObjectAsync(reader, cancellationToken).ConfigureAwait(false) - : await info.ConverterInfo.Converter.UnsafeDowncast().ReadAsync(reader, cancellationToken).ConfigureAwait(false); - await reader.EndReadAsync().ConfigureAwait(false); + ? (T)info.ConverterInfo.Converter.ReadAsObject(reader) + : info.ConverterInfo.Converter.UnsafeDowncast().Read(reader); + reader.EndRead(); return result; } - /// - /// Gets the value of the specified column as an instance of . - /// - /// - /// An optional token to cancel the asynchronous operation. The default value is . - /// - /// - public ValueTask Get(CancellationToken cancellationToken = default) => Get(cancellationToken); - - /// - /// Retrieves data as a . - /// - public Stream GetStream() + async ValueTask GetAsyncCore(CancellationToken cancellationToken) { - CheckActive(); + ThrowIfInitialized(); + + _fieldDescription.GetInfo(typeof(T), ref _lastInfo); + var info = _lastInfo; switch (Kind) { case TupleDataKind.Null: - ThrowHelper.ThrowInvalidCastException_NoValue(_fieldDescription); - break; - - case TupleDataKind.UnchangedToastedValue: - throw new InvalidCastException($"Column '{_fieldDescription.Name}' is an unchanged TOASTed value (actual value not sent)."); - } - - var reader = PgReader; - reader.Init(Length, _fieldDescription.DataFormat); - return reader.GetStream(canSeek: false); - } - - /// - /// Retrieves data as a . - /// - public TextReader GetTextReader() - { - CheckActive(); + // When T is a Nullable (and only in that case), we support returning null + if (default(T) is null && typeof(T).IsValueType) + return default!; - ref var info = ref _lastInfo; - _fieldDescription.GetInfo(typeof(TextReader), ref info); + if (typeof(T) == typeof(object)) + return (T)(object)DBNull.Value; - switch (Kind) - { - case TupleDataKind.Null: ThrowHelper.ThrowInvalidCastException_NoValue(_fieldDescription); break; case TupleDataKind.UnchangedToastedValue: - throw new InvalidCastException($"Column '{_fieldDescription.Name}' is an unchanged TOASTed value (actual value not sent)."); + throw new InvalidCastException( + $"Column '{_fieldDescription.Name}' is an unchanged TOASTed value (actual value not sent)."); } + using var registration = _readBuffer.Connector.StartNestedCancellableOperation(cancellationToken, attemptPgCancellation: false); + var reader = PgReader; reader.Init(Length, _fieldDescription.DataFormat); - reader.StartRead(info.ConverterInfo.BufferRequirement); - var result = (TextReader)info.ConverterInfo.Converter.ReadAsObject(reader); - reader.EndRead(); + await reader.StartReadAsync(info.ConverterInfo.BufferRequirement, cancellationToken).ConfigureAwait(false); + var result = info.AsObject + ? (T)await info.ConverterInfo.Converter.ReadAsObjectAsync(reader, cancellationToken).ConfigureAwait(false) + : await info.ConverterInfo.Converter.UnsafeDowncast().ReadAsync(reader, cancellationToken).ConfigureAwait(false); + await reader.EndReadAsync().ConfigureAwait(false); return result; } - internal async Task Consume(CancellationToken cancellationToken) - { - if (_isConsumed) - return; - - var reader = PgReader; - if (!reader.Initialized) - reader.Init(Length, _fieldDescription.DataFormat); - await reader.ConsumeAsync(cancellationToken: cancellationToken).ConfigureAwait(false); - await reader.CommitAsync().ConfigureAwait(false); - - _isConsumed = true; - } - - void CheckActive() + void ThrowIfInitialized() { if (PgReader.Initialized) throw new InvalidOperationException("Column has already been consumed"); diff --git a/src/Npgsql/ThrowHelper.cs b/src/Npgsql/ThrowHelper.cs index dc79128537..6717458d2a 100644 --- a/src/Npgsql/ThrowHelper.cs +++ b/src/Npgsql/ThrowHelper.cs @@ -97,7 +97,7 @@ internal static void ThrowIndexOutOfRangeException(string message) => throw new IndexOutOfRangeException(message); [DoesNotReturn] - internal static void ThrowIndexOutOfRangeException(string message, object argument) + internal static void ThrowIndexOutOfRangeException(string message, int argument) => throw new IndexOutOfRangeException(string.Format(message, argument)); [DoesNotReturn] diff --git a/test/Npgsql.Tests/ReaderTests.cs b/test/Npgsql.Tests/ReaderTests.cs index 971de29076..8b566209ad 100644 --- a/test/Npgsql.Tests/ReaderTests.cs +++ b/test/Npgsql.Tests/ReaderTests.cs @@ -16,6 +16,7 @@ using Npgsql.Util; using NpgsqlTypes; using NUnit.Framework; +using NUnit.Framework.Constraints; using static Npgsql.Tests.TestUtil; namespace Npgsql.Tests; @@ -746,7 +747,13 @@ public async Task Field_index_does_not_exist() using var command = new NpgsqlCommand("SELECT 1", conn); using var dr = await command.ExecuteReaderAsync(Behavior); dr.Read(); - Assert.That(() => dr[5], Throws.Exception.TypeOf()); + + Assert.That(() => dr[1], AssertExpectedException()); + Assert.That(() => dr.GetValue(2), AssertExpectedException()); + Assert.That(() => dr.GetFieldValue(3), AssertExpectedException()); + + static IResolveConstraint AssertExpectedException() + => Throws.Exception.TypeOf().With.Message.StartsWith("Ordinal is out of range"); } [Test, Description("Performs some operations while a reader is still open and checks for exceptions")] From 279405af1ff5c99ea2952529c0c92f5410920a38 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Tue, 7 Apr 2026 12:16:20 +0200 Subject: [PATCH 741/761] Enable buffer bounds checks by default (#6491) --- src/Npgsql/Internal/NpgsqlConnector.cs | 4 +- src/Npgsql/Internal/NpgsqlReadBuffer.cs | 116 +++++++++--------- src/Npgsql/Internal/PgBufferedConverter.cs | 16 +-- src/Npgsql/Internal/PgReader.cs | 61 +++++---- .../TypeHandlers/TypeHandlerBenchmarks.cs | 2 +- test/Npgsql.Tests/ReaderTests.cs | 2 +- 6 files changed, 94 insertions(+), 107 deletions(-) diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index 63b26f3878..dce6e48d0e 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -1589,10 +1589,10 @@ internal ValueTask ReadMessage( Debug.Assert(msg != null, "Message is null for code: " + messageCode); - // Reset flushed bytes after any RFQ or in between potentially long running operations. + // Rebase the cumulative buffer-end counter after any RFQ or in between potentially long-running operations. // Just in case we'll hit that 15 exbibyte limit of a signed long... if (messageCode is BackendMessageCode.ReadyForQuery or BackendMessageCode.CopyData or BackendMessageCode.NotificationResponse) - ReadBuffer.ResetFlushedBytes(); + ReadBuffer.RebaseBufferEndPosition(); return msg; } diff --git a/src/Npgsql/Internal/NpgsqlReadBuffer.cs b/src/Npgsql/Internal/NpgsqlReadBuffer.cs index 0f91bad9d4..66221366d0 100644 --- a/src/Npgsql/Internal/NpgsqlReadBuffer.cs +++ b/src/Npgsql/Internal/NpgsqlReadBuffer.cs @@ -23,12 +23,6 @@ sealed partial class NpgsqlReadBuffer : IDisposable { #region Fields and Properties -#if DEBUG - internal static readonly bool BufferBoundsChecks = true; -#else - internal static readonly bool BufferBoundsChecks = Statics.EnableAssertions; -#endif - public NpgsqlConnection Connection => Connector.Connection!; internal readonly NpgsqlConnector Connector; internal Stream Underlying { private get; set; } @@ -68,17 +62,22 @@ internal TimeSpan Timeout /// internal Encoding RelaxedTextEncoding { get; } - internal int ReadPosition { get; set; } - internal int ReadBytesLeft => FilledBytes - ReadPosition; + internal int ReadBytesLeft { get; private set; } + internal int ReadPosition + { + get => FilledBytes - ReadBytesLeft; + set => ReadBytesLeft = FilledBytes - value; + } internal PgReader PgReader { get; } - long _flushedBytes; // this will always fit at least one message. + // Tracks the absolute position of the end of the buffered window. + // Invariant: _bufferEndPosition == CumulativeReadPosition + ReadBytesLeft. + long _bufferEndPosition; // this will always fit at least one message. internal long CumulativeReadPosition - // Cast to uint to remove the sign extension (ReadPosition is never negative) - => _flushedBytes + (uint)ReadPosition; + => _bufferEndPosition - ReadBytesLeft; internal readonly byte[] Buffer; - internal int FilledBytes; + internal int FilledBytes { get; private set; } internal ReadOnlySpan Span => Buffer.AsSpan(ReadPosition, ReadBytesLeft); @@ -124,6 +123,16 @@ internal NpgsqlReadBuffer( #region I/O + // Used for testing. + internal void AddBytesToRead(int count) + { + ArgumentOutOfRangeException.ThrowIfNegative(count); + ArgumentOutOfRangeException.ThrowIfGreaterThan(FilledBytes + count, Size, nameof(count)); + FilledBytes += count; + ReadBytesLeft += count; + _bufferEndPosition = unchecked(_bufferEndPosition + count); + } + public void Ensure(int count) => Ensure(count, async: false, readingNotifications: false).GetAwaiter().GetResult(); @@ -141,7 +150,7 @@ int ReadWithTimeout(Span buffer) try { var read = Underlying.Read(buffer); - _flushedBytes = unchecked(_flushedBytes + read); + _bufferEndPosition = unchecked(_bufferEndPosition + read); NpgsqlEventSource.Log.BytesRead(read); return read; } @@ -189,7 +198,7 @@ async ValueTask ReadWithTimeoutAsync(Memory buffer, CancellationToken try { var read = await Underlying.ReadAsync(buffer, finalCt).ConfigureAwait(false); - _flushedBytes = unchecked(_flushedBytes + read); + _bufferEndPosition = unchecked(_bufferEndPosition + read); Cts.Stop(); NpgsqlEventSource.Log.BytesRead(read); return read; @@ -268,7 +277,7 @@ static async ValueTask EnsureLong( Debug.Assert(count > buffer.ReadBytesLeft); count -= buffer.ReadBytesLeft; - if (buffer.ReadPosition == buffer.FilledBytes) + if (buffer.ReadBytesLeft == 0) { buffer.ResetPosition(); } @@ -276,8 +285,6 @@ static async ValueTask EnsureLong( { Array.Copy(buffer.Buffer, buffer.ReadPosition, buffer.Buffer, 0, buffer.ReadBytesLeft); buffer.FilledBytes = buffer.ReadBytesLeft; - buffer._flushedBytes = unchecked(buffer._flushedBytes + buffer.ReadPosition); - buffer.ReadPosition = 0; } var finalCt = async && buffer.Timeout != InfiniteTimeSpan @@ -298,6 +305,8 @@ static async ValueTask EnsureLong( throw new EndOfStreamException(); count -= read; buffer.FilledBytes += read; + buffer.ReadBytesLeft += read; + buffer._bufferEndPosition = unchecked(buffer._bufferEndPosition + read); totalRead += read; // Most of the time, it should be fine to reset cancellation token source, so we can use it again @@ -417,13 +426,13 @@ internal void Skip(int len, bool allowIO) } Debug.Assert(ReadBytesLeft >= len); - ReadPosition += len; + ReadBytesLeft -= len; } internal void Skip(int len) { Debug.Assert(ReadBytesLeft >= len); - ReadPosition += len; + ReadBytesLeft -= len; } /// @@ -446,7 +455,7 @@ public async Task Skip(bool async, int len) await Ensure(len, async).ConfigureAwait(false); } - ReadPosition += len; + ReadBytesLeft -= len; } #endregion @@ -458,7 +467,7 @@ public byte ReadByte() { CheckBounds(sizeof(byte)); var result = Buffer[ReadPosition]; - ReadPosition += sizeof(byte); + ReadBytesLeft -= sizeof(byte); return result; } @@ -469,7 +478,7 @@ public short ReadInt16() var result = BitConverter.IsLittleEndian ? BinaryPrimitives.ReverseEndianness(Unsafe.ReadUnaligned(ref Buffer[ReadPosition])) : Unsafe.ReadUnaligned(ref Buffer[ReadPosition]); - ReadPosition += sizeof(short); + ReadBytesLeft -= sizeof(short); return result; } @@ -480,7 +489,7 @@ public ushort ReadUInt16() var result = BitConverter.IsLittleEndian ? BinaryPrimitives.ReverseEndianness(Unsafe.ReadUnaligned(ref Buffer[ReadPosition])) : Unsafe.ReadUnaligned(ref Buffer[ReadPosition]); - ReadPosition += sizeof(ushort); + ReadBytesLeft -= sizeof(ushort); return result; } @@ -491,7 +500,7 @@ public int ReadInt32() var result = BitConverter.IsLittleEndian ? BinaryPrimitives.ReverseEndianness(Unsafe.ReadUnaligned(ref Buffer[ReadPosition])) : Unsafe.ReadUnaligned(ref Buffer[ReadPosition]); - ReadPosition += sizeof(int); + ReadBytesLeft -= sizeof(int); return result; } @@ -502,7 +511,7 @@ public uint ReadUInt32() var result = BitConverter.IsLittleEndian ? BinaryPrimitives.ReverseEndianness(Unsafe.ReadUnaligned(ref Buffer[ReadPosition])) : Unsafe.ReadUnaligned(ref Buffer[ReadPosition]); - ReadPosition += sizeof(uint); + ReadBytesLeft -= sizeof(uint); return result; } @@ -513,7 +522,7 @@ public long ReadInt64() var result = BitConverter.IsLittleEndian ? BinaryPrimitives.ReverseEndianness(Unsafe.ReadUnaligned(ref Buffer[ReadPosition])) : Unsafe.ReadUnaligned(ref Buffer[ReadPosition]); - ReadPosition += sizeof(long); + ReadBytesLeft -= sizeof(long); return result; } @@ -524,7 +533,7 @@ public ulong ReadUInt64() var result = BitConverter.IsLittleEndian ? BinaryPrimitives.ReverseEndianness(Unsafe.ReadUnaligned(ref Buffer[ReadPosition])) : Unsafe.ReadUnaligned(ref Buffer[ReadPosition]); - ReadPosition += sizeof(ulong); + ReadBytesLeft -= sizeof(ulong); return result; } @@ -535,7 +544,7 @@ public float ReadSingle() var result = BitConverter.IsLittleEndian ? BitConverter.Int32BitsToSingle(BinaryPrimitives.ReverseEndianness(Unsafe.ReadUnaligned(ref Buffer[ReadPosition]))) : Unsafe.ReadUnaligned(ref Buffer[ReadPosition]); - ReadPosition += sizeof(float); + ReadBytesLeft -= sizeof(float); return result; } @@ -546,36 +555,30 @@ public double ReadDouble() var result = BitConverter.IsLittleEndian ? BitConverter.Int64BitsToDouble(BinaryPrimitives.ReverseEndianness(Unsafe.ReadUnaligned(ref Buffer[ReadPosition]))) : Unsafe.ReadUnaligned(ref Buffer[ReadPosition]); - ReadPosition += sizeof(double); + ReadBytesLeft -= sizeof(double); return result; } + [MethodImpl(MethodImplOptions.AggressiveInlining)] void CheckBounds(int count) { - if (BufferBoundsChecks) - Core(count); - - [MethodImpl(MethodImplOptions.NoInlining)] - void Core(int count) - { - if (count > ReadBytesLeft) - ThrowHelper.ThrowInvalidOperationException("There is not enough data left in the buffer."); - } + if (count > ReadBytesLeft) + ThrowHelper.ThrowInvalidOperationException("There is not enough data left in the buffer."); } public string ReadString(int byteLen) { - Debug.Assert(byteLen <= ReadBytesLeft); + CheckBounds(byteLen); var result = TextEncoding.GetString(Buffer, ReadPosition, byteLen); - ReadPosition += byteLen; + ReadBytesLeft -= byteLen; return result; } public void ReadBytes(Span output) { - Debug.Assert(output.Length <= ReadBytesLeft); + CheckBounds(output.Length); new Span(Buffer, ReadPosition, output.Length).CopyTo(output); - ReadPosition += output.Length; + ReadBytesLeft -= output.Length; } public void ReadBytes(byte[] output, int outputOffset, int len) @@ -583,9 +586,9 @@ public void ReadBytes(byte[] output, int outputOffset, int len) public ReadOnlyMemory ReadMemory(int len) { - Debug.Assert(len <= ReadBytesLeft); + CheckBounds(len); var memory = new ReadOnlyMemory(Buffer, ReadPosition, len); - ReadPosition += len; + ReadBytesLeft -= len; return memory; } @@ -599,7 +602,7 @@ public int Read(bool commandScoped, Span output) if (readFromBuffer > 0) { Buffer.AsSpan(ReadPosition, readFromBuffer).CopyTo(output); - ReadPosition += readFromBuffer; + ReadBytesLeft -= readFromBuffer; return readFromBuffer; } @@ -616,7 +619,7 @@ public int Read(bool commandScoped, Span output) try { var read = Underlying.Read(output); - _flushedBytes = unchecked(_flushedBytes + read); + _bufferEndPosition = unchecked(_bufferEndPosition + read); NpgsqlEventSource.Log.BytesRead(read); return read; } @@ -632,7 +635,7 @@ public ValueTask ReadAsync(bool commandScoped, Memory output, Cancell if (readFromBuffer > 0) { Buffer.AsSpan(ReadPosition, readFromBuffer).CopyTo(output.Span); - ReadPosition += readFromBuffer; + ReadBytesLeft -= readFromBuffer; return new ValueTask(readFromBuffer); } @@ -653,7 +656,7 @@ static async ValueTask ReadAsyncLong(NpgsqlReadBuffer buffer, bool commandS try { var read = await buffer.Underlying.ReadAsync(output, cancellationToken).ConfigureAwait(false); - buffer._flushedBytes = unchecked(buffer._flushedBytes + read); + buffer._bufferEndPosition = unchecked(buffer._bufferEndPosition + read); NpgsqlEventSource.Log.BytesRead(read); return read; } @@ -701,7 +704,7 @@ public ValueTask ReadNullTerminatedString(Encoding encoding, bool async, if (index >= 0) { var result = new ValueTask(encoding.GetString(Buffer, ReadPosition, index)); - ReadPosition += index + 1; + ReadBytesLeft -= index + 1; return result; } @@ -709,7 +712,7 @@ public ValueTask ReadNullTerminatedString(Encoding encoding, bool async, async ValueTask ReadLong(Encoding encoding, bool async) { - var chunkSize = FilledBytes - ReadPosition; + var chunkSize = ReadBytesLeft; var tempBuf = ArrayPool.Shared.Rent(chunkSize + 1024); try @@ -717,7 +720,7 @@ async ValueTask ReadLong(Encoding encoding, bool async) bool foundTerminator; var byteLen = chunkSize; Array.Copy(Buffer, ReadPosition, tempBuf, 0, chunkSize); - ReadPosition += chunkSize; + ReadBytesLeft -= chunkSize; do { @@ -750,7 +753,7 @@ async ValueTask ReadLong(Encoding encoding, bool async) ReadPosition = i; } while (!foundTerminator); - ReadPosition++; + ReadBytesLeft--; return encoding.GetString(tempBuf, 0, byteLen); } finally @@ -765,7 +768,7 @@ public ReadOnlySpan GetNullTerminatedBytes() var i = Span.IndexOf((byte)0); Debug.Assert(i >= 0); var result = new ReadOnlySpan(Buffer, ReadPosition, i); - ReadPosition += i + 1; + ReadBytesLeft -= i + 1; return result; } @@ -791,18 +794,19 @@ public void Dispose() void ResetPosition() { - _flushedBytes = unchecked(_flushedBytes + FilledBytes); - ReadPosition = 0; + ReadBytesLeft = 0; FilledBytes = 0; } - internal void ResetFlushedBytes() => _flushedBytes = 0; + internal void RebaseBufferEndPosition() => _bufferEndPosition = ReadBytesLeft; internal void CopyTo(NpgsqlReadBuffer other) { Debug.Assert(other.Size - other.FilledBytes >= ReadBytesLeft); Array.Copy(Buffer, ReadPosition, other.Buffer, other.FilledBytes, ReadBytesLeft); other.FilledBytes += ReadBytesLeft; + other.ReadBytesLeft += ReadBytesLeft; + other._bufferEndPosition = unchecked(other._bufferEndPosition + ReadBytesLeft); } #endregion diff --git a/src/Npgsql/Internal/PgBufferedConverter.cs b/src/Npgsql/Internal/PgBufferedConverter.cs index 12b9851c8d..9a61cbb3f1 100644 --- a/src/Npgsql/Internal/PgBufferedConverter.cs +++ b/src/Npgsql/Internal/PgBufferedConverter.cs @@ -14,13 +14,7 @@ public abstract class PgBufferedConverter(bool customDbNullPredicate = false) public override Size GetSize(SizeContext context, T value, ref object? writeState) => throw new NotSupportedException(); - public sealed override T Read(PgReader reader) - { - if (reader.ShouldBufferCurrent()) - ThrowIORequired(reader.CurrentBufferRequirement); - - return ReadCore(reader); - } + public sealed override T Read(PgReader reader) => ReadCore(reader); public sealed override ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) => new(Read(reader)); @@ -28,13 +22,7 @@ public sealed override ValueTask ReadAsync(PgReader reader, CancellationToken internal sealed override ValueTask ReadAsObject(bool async, PgReader reader, CancellationToken cancellationToken) => new(Read(reader)!); - public sealed override void Write(PgWriter writer, T value) - { - if (writer.ShouldFlushCurrent()) - ThrowIORequired(writer.CurrentBufferRequirement); - - WriteCore(writer, value); - } + public sealed override void Write(PgWriter writer, T value) => WriteCore(writer, value); public sealed override ValueTask WriteAsync(PgWriter writer, [DisallowNull] T value, CancellationToken cancellationToken = default) { diff --git a/src/Npgsql/Internal/PgReader.cs b/src/Npgsql/Internal/PgReader.cs index 6a5d7efd69..f6323c09f3 100644 --- a/src/Npgsql/Internal/PgReader.cs +++ b/src/Npgsql/Internal/PgReader.cs @@ -28,6 +28,7 @@ public class PgReader PreparedTextReader? _preparedTextReader; long _fieldStartPos; + long _fieldEndPos; Size _fieldBufferRequirement; DataFormat _fieldFormat; int _fieldSize; @@ -46,9 +47,6 @@ public class PgReader ArraySegment? _charsReadBuffer; bool _requiresCleanup; - // The field reading process of doing init/commit and startread/endread pairs is very perf sensitive. - // So this is used in Commit as a fast-path alternative to FieldRemaining to detect if the field was consumed succesfully. - bool _fieldConsumed; internal PgReader(NpgsqlReadBuffer buffer) { @@ -109,17 +107,14 @@ internal void RevertNestedReadScope(int size, int startPos, Size bufferRequireme _currentSize = size; } + [MethodImpl(MethodImplOptions.AggressiveInlining)] void CheckBounds(int count) { - if (NpgsqlReadBuffer.BufferBoundsChecks) - Core(count); + if (_buffer.CumulativeReadPosition > _fieldEndPos - count) + Throw(); - [MethodImpl(MethodImplOptions.NoInlining)] - void Core(int count) - { - if (count > CurrentRemaining) - ThrowHelper.ThrowIndexOutOfRangeException("Attempt to read past the end of the current field size."); - } + static void Throw() + => ThrowHelper.ThrowIndexOutOfRangeException("Attempt to read past the end of the field."); } public byte ReadByte() @@ -456,7 +451,7 @@ internal void Init(int fieldSize, DataFormat fieldFormat, bool resumable = false ThrowHelper.ThrowInvalidOperationException("Already initialized"); _fieldStartPos = _buffer.CumulativeReadPosition; - _fieldConsumed = false; + _fieldEndPos = _fieldStartPos + fieldSize; _fieldSize = fieldSize; _fieldFormat = fieldFormat; _resumable = resumable; @@ -485,35 +480,36 @@ internal ValueTask StartReadAsync(Size bufferRequirement, CancellationToken canc internal void EndRead() { - if (_resumable || StreamActive) + if (_resumable || (_requiresCleanup && StreamActive)) return; - // If it was upper bound we should consume. - if (_fieldBufferRequirement is { Kind: SizeKind.UpperBound }) + if (_buffer.CumulativeReadPosition != _fieldEndPos) { - Consume(FieldRemaining); - return; - } + // If it was upper bound we should consume. + if (_fieldBufferRequirement is { Kind: SizeKind.UpperBound }) + { + Consume(FieldRemaining); + return; + } - if (FieldOffset != FieldSize) ThrowNotConsumedExactly(); - - _fieldConsumed = true; + } } internal ValueTask EndReadAsync() { - if (_resumable || StreamActive) + if (_resumable || (_requiresCleanup && StreamActive)) return new(); - // If it was upper bound we should consume. - if (_fieldBufferRequirement is { Kind: SizeKind.UpperBound }) - return ConsumeAsync(FieldRemaining); + if (_buffer.CumulativeReadPosition != _fieldEndPos) + { + // If it was upper bound we should consume. + if (_fieldBufferRequirement is { Kind: SizeKind.UpperBound }) + return ConsumeAsync(FieldRemaining); - if (FieldOffset != FieldSize) ThrowNotConsumedExactly(); + } - _fieldConsumed = true; return new(); } @@ -658,7 +654,6 @@ internal int Restart(bool resumable) if (NestedInitialized) ResetCurrent(); - _fieldConsumed = false; _resumable = resumable; RewindCore(FieldOffset); @@ -681,17 +676,17 @@ internal void Commit() // We make sure to fuly consume any FieldRemaining in the event of an exception or a nested scope not being disposed. Debug.Assert(!NestedInitialized); - if (!_fieldConsumed && FieldRemaining > 0) + if (FieldRemaining > 0) Consume(); _fieldStartPos = UninitializedSentinel; Debug.Assert(!Initialized); // These will always be re-initialized by Init() + // _fieldEndPos = default; // _fieldSize = default; // _fieldFormat = default; // _resumable = default; - // _fieldConsumed = default; } [MethodImpl(MethodImplOptions.AggressiveInlining)] @@ -709,17 +704,17 @@ internal ValueTask CommitAsync() // We make sure to fuly consume any FieldRemaining in the event of an exception or a nested scope not being disposed. Debug.Assert(!NestedInitialized); - if (!_fieldConsumed && FieldRemaining > 0) + if (FieldRemaining > 0) return CommitAsync(); _fieldStartPos = UninitializedSentinel; Debug.Assert(!Initialized); // These will always be re-initialized by Init() + // _fieldEndPos = default; // _fieldSize = default; // _fieldFormat = default; // _resumable = default; - // _fieldConsumed = default; return new(); @@ -731,10 +726,10 @@ async ValueTask CommitAsync() Debug.Assert(!Initialized); // These will always be re-initialized by Init() + // _fieldEndPos = default; // _fieldSize = default; // _fieldFormat = default; // _resumable = default; - // _fieldConsumed = default; } } diff --git a/test/Npgsql.Benchmarks/TypeHandlers/TypeHandlerBenchmarks.cs b/test/Npgsql.Benchmarks/TypeHandlers/TypeHandlerBenchmarks.cs index e49e3b25ad..17469b3519 100644 --- a/test/Npgsql.Benchmarks/TypeHandlers/TypeHandlerBenchmarks.cs +++ b/test/Npgsql.Benchmarks/TypeHandlers/TypeHandlerBenchmarks.cs @@ -89,7 +89,7 @@ public T Value _writer.Commit(size.Value); Buffer.BlockCopy(_writeBuffer.Buffer, 0, _readBuffer.Buffer, 0, _writeBuffer.WritePosition); - _readBuffer.FilledBytes = _writeBuffer.WritePosition; + _readBuffer.AddBytesToRead(_writeBuffer.WritePosition); _readBuffer.ReadPosition = 0; _writeBuffer.WritePosition = 0; diff --git a/test/Npgsql.Tests/ReaderTests.cs b/test/Npgsql.Tests/ReaderTests.cs index 8b566209ad..ae80df8e6a 100644 --- a/test/Npgsql.Tests/ReaderTests.cs +++ b/test/Npgsql.Tests/ReaderTests.cs @@ -1862,7 +1862,7 @@ public async Task EndRead_StreamActive([Values]bool async) await using var conn = await OpenConnectionAsync(); var buffer = conn.Connector!.ReadBuffer; - buffer.FilledBytes += columnLength; + buffer.AddBytesToRead(columnLength); var reader = buffer.PgReader; reader.Init(columnLength, DataFormat.Binary, resumable: false); if (async) From e8dcaa06ead88e7aefe501e33fea7a395d1f7405 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Tue, 7 Apr 2026 21:09:50 +0200 Subject: [PATCH 742/761] Decouple PgReader GetChars from other Streams (#6490) --- src/Npgsql/Internal/PgReader.cs | 147 +++++++++++------- src/Npgsql/NpgsqlDataReader.cs | 9 +- src/Npgsql/PreparedTextReader.cs | 14 +- src/Npgsql/Util/SubReadStream.cs | 154 ++++++++++--------- test/Npgsql.Tests/ReaderTests.cs | 250 ++++++++++++++++++++++++++++++- 5 files changed, 424 insertions(+), 150 deletions(-) diff --git a/src/Npgsql/Internal/PgReader.cs b/src/Npgsql/Internal/PgReader.cs index f6323c09f3..aa2eea9e5d 100644 --- a/src/Npgsql/Internal/PgReader.cs +++ b/src/Npgsql/Internal/PgReader.cs @@ -7,6 +7,7 @@ using System.Text; using System.Threading; using System.Threading.Tasks; +using Npgsql.Util; namespace Npgsql.Internal; @@ -24,7 +25,7 @@ public class PgReader bool _resumable; byte[]? _pooledArray; - NpgsqlReadBuffer.ColumnStream? _userActiveStream; + Stream? _userActiveStream; PreparedTextReader? _preparedTextReader; long _fieldStartPos; @@ -39,8 +40,8 @@ public class PgReader int _currentSize; // GetChars Internal state - TextReader? _charsReadReader; - int _charsRead; + TextReader? _getCharsReader; + int _getCharsRead; // GetChars User state int? _charsReadOffset; @@ -201,20 +202,38 @@ public string ReadNullTerminatedString(Encoding encoding) return result; } - public Stream GetStream(int? length = null) => GetColumnStream(length); - NpgsqlReadBuffer.ColumnStream GetColumnStream(int? length = null) + public Stream GetStream(int? length = null) => GetStreamCore(length); + Stream GetStreamCore(int? length = null, bool untracked = false) { if (length > CurrentRemaining) ThrowHelper.ThrowArgumentOutOfRangeException(nameof(length), "Length is larger than the current remaining value size"); - _requiresCleanup = true; // This will cause any previously handed out StreamReaders etc to throw, as intended. - if (_userActiveStream is not null) + if (!untracked && UserStreamActive) DisposeUserActiveStream(async: false).GetAwaiter().GetResult(); length ??= CurrentRemaining; - CheckBounds(length.GetValueOrDefault()); - return _userActiveStream = _buffer.CreateStream(length.GetValueOrDefault(), StreamCanSeek && length <= _buffer.ReadBytesLeft, consumeOnDispose: false); + var len = length.GetValueOrDefault(); + CheckBounds(len); + + Stream stream; + if (StreamCanSeek && len <= _buffer.ReadBytesLeft) + { + // All data is in the buffer — return an isolated view over the buffer. + stream = new SubReadStream(_buffer.Buffer, _buffer.ReadPosition, len); + _buffer.ReadPosition += len; + } + else + { + stream = _buffer.CreateStream(len, canSeek: false, consumeOnDispose: false); + } + + if (!untracked) + { + _requiresCleanup = true; + _userActiveStream = stream; + } + return stream; } public TextReader GetTextReader(Encoding encoding) @@ -223,24 +242,30 @@ public TextReader GetTextReader(Encoding encoding) public ValueTask GetTextReaderAsync(Encoding encoding, CancellationToken cancellationToken) => GetTextReader(async: true, encoding, cancellationToken); - async ValueTask GetTextReader(bool async, Encoding encoding, CancellationToken cancellationToken) + async ValueTask GetTextReader(bool async, Encoding encoding, CancellationToken cancellationToken, bool untracked = false) { - _requiresCleanup = true; if (CurrentRemaining > _buffer.ReadBytesLeft || CurrentRemaining > MaxPreparedTextReaderSize) - return new StreamReader(GetColumnStream(), encoding, detectEncodingFromByteOrderMarks: false); + return new StreamReader(GetStreamCore(untracked: untracked), encoding, detectEncodingFromByteOrderMarks: false); - if (_preparedTextReader is { IsDisposed: false }) + if (!untracked && _preparedTextReader is { IsDisposed: false }) { _preparedTextReader.Dispose(); _preparedTextReader = null; } - _preparedTextReader ??= new PreparedTextReader(); - _preparedTextReader.Init( - encoding.GetString(async - ? await ReadBytesAsync(CurrentRemaining, cancellationToken).ConfigureAwait(false) - : ReadBytes(CurrentRemaining)), GetColumnStream(0)); - return _preparedTextReader; + _requiresCleanup = true; + var currentOffset = CurrentOffset; + var currentRemaining = CurrentSize - currentOffset; + + // Always make a new reader for GetChars, see GetColumnStream. + var preparedTextReader = (untracked ? null : _preparedTextReader) ?? new(); + preparedTextReader.Init(encoding.GetString(async + ? await ReadBytesAsync(currentRemaining, cancellationToken).ConfigureAwait(false) + : ReadBytes(currentRemaining))); + if (!untracked) + _preparedTextReader = preparedTextReader; + + return preparedTextReader; } public ValueTask ReadBytesAsync(Memory buffer, CancellationToken cancellationToken = default) @@ -363,7 +388,7 @@ public void Rewind(int count) ThrowHelper.ThrowArgumentOutOfRangeException(nameof(count), "Attempt to rewind past the buffer start, some of this data is no longer part of the underlying buffer."); // Shut down any streaming going on on the column - if (StreamActive) + if (UserStreamActive) DisposeUserActiveStream(async: false).GetAwaiter().GetResult(); RewindCore(count); @@ -376,21 +401,23 @@ void RewindCore(int count) _buffer.ReadPosition -= count; } - /// - /// - /// - /// - /// The stream length, if any - async ValueTask DisposeUserActiveStream(bool async) + [MethodImpl(MethodImplOptions.NoInlining)] + ValueTask DisposeUserActiveStream(bool async) { - if (async) - await (_userActiveStream?.DisposeAsync() ?? new()).ConfigureAwait(false); - else - _userActiveStream?.Dispose(); - _userActiveStream = null; + var stream = _userActiveStream; + if (stream is not null) + { + _userActiveStream = null; + if (async) + return stream.DisposeAsync(); + + stream.Dispose(); + } + + return new(); } - internal int CharsRead => _charsRead; + internal int GetCharsRead => _getCharsRead; internal bool CharsReadActive => _charsReadOffset is not null; internal void GetCharsReadInfo(Encoding encoding, out int charsRead, out TextReader reader, out int charsOffset, out ArraySegment? buffer) @@ -398,8 +425,10 @@ internal void GetCharsReadInfo(Encoding encoding, out int charsRead, out TextRea if (!CharsReadActive) ThrowHelper.ThrowInvalidOperationException("No active chars read"); - charsRead = _charsRead; - reader = _charsReadReader ??= GetTextReader(encoding); + _requiresCleanup = true; + + charsRead = _getCharsRead; + reader = _getCharsReader ??= GetTextReader(async: false, encoding, default, untracked: true).GetAwaiter().GetResult(); charsOffset = _charsReadOffset ?? 0; buffer = _charsReadBuffer; } @@ -409,7 +438,7 @@ internal void RestartCharsRead() if (!CharsReadActive) ThrowHelper.ThrowInvalidOperationException("No active chars read"); - switch (_charsReadReader) + switch (_getCharsReader) { case PreparedTextReader reader: reader.Restart(); @@ -419,10 +448,13 @@ internal void RestartCharsRead() reader.DiscardBufferedData(); break; } - _charsRead = 0; + _getCharsRead = 0; } - internal void AdvanceCharsRead(int charsRead) => _charsRead += charsRead; + internal void AdvanceCharsRead(int charsRead) + { + _getCharsRead += charsRead; + } internal void StartCharsRead(int dataOffset, ArraySegment? buffer) { @@ -480,7 +512,7 @@ internal ValueTask StartReadAsync(Size bufferRequirement, CancellationToken canc internal void EndRead() { - if (_resumable || (_requiresCleanup && StreamActive)) + if (_resumable || (_requiresCleanup && UserStreamActive)) return; if (_buffer.CumulativeReadPosition != _fieldEndPos) @@ -498,7 +530,7 @@ internal void EndRead() internal ValueTask EndReadAsync() { - if (_resumable || (_requiresCleanup && StreamActive)) + if (_resumable || (_requiresCleanup && UserStreamActive)) return new(); if (_buffer.CumulativeReadPosition != _fieldEndPos) @@ -561,7 +593,7 @@ public void Consume(int? count = null) if (count > currentRemaining) ThrowHelper.ThrowArgumentOutOfRangeException(nameof(count), "Attempt to read past the end of the current field size."); - if (StreamActive) + if (UserStreamActive) DisposeUserActiveStream(async: false).GetAwaiter().GetResult(); var origOffset = FieldOffset; @@ -583,7 +615,7 @@ public async ValueTask ConsumeAsync(int? count = null, CancellationToken cancell if (count > currentRemaining) ThrowHelper.ThrowArgumentOutOfRangeException(nameof(count), "Attempt to read past the end of the current field size."); - if (StreamActive) + if (UserStreamActive) await DisposeUserActiveStream(async: true).ConfigureAwait(false); var origOffset = FieldOffset; @@ -595,17 +627,16 @@ public async ValueTask ConsumeAsync(int? count = null, CancellationToken cancell } [MemberNotNullWhen(true, nameof(_userActiveStream))] - bool StreamActive => _userActiveStream is { IsDisposed: false }; - internal void ThrowIfStreamActive() + bool UserStreamActive => _userActiveStream switch { - if (StreamActive) - ThrowHelper.ThrowInvalidOperationException("A stream is still open for this reader"); - } - + NpgsqlReadBuffer.ColumnStream { IsDisposed: false } => true, + SubReadStream { IsDisposed: false } => true, + _ => false + }; [MethodImpl(MethodImplOptions.NoInlining)] void Cleanup() { - if (StreamActive) + if (UserStreamActive) DisposeUserActiveStream(async: false).GetAwaiter().GetResult(); if (_pooledArray is not null) @@ -614,11 +645,17 @@ void Cleanup() _pooledArray = null; } - if (_charsReadReader is not null) + if (_getCharsReader is not null) { - _charsReadReader.Dispose(); - _charsReadReader = null; - _charsRead = default; + _getCharsReader.Dispose(); + _getCharsReader = null; + _getCharsRead = default; + } + + if (_preparedTextReader is not null) + { + _preparedTextReader.Dispose(); + _preparedTextReader = null; } _requiresCleanup = false; @@ -645,11 +682,7 @@ internal int Restart(bool resumable) return fieldSize; } - // From this point on we're not resuming, we're resetting any remaining state and rewinding our position. - - // Shut down any streaming and pooling going on on the column. - if (_requiresCleanup) - Cleanup(); + // From this point on we're not resuming, we're resetting any previous converter state and rewinding our position. if (NestedInitialized) ResetCurrent(); diff --git a/src/Npgsql/NpgsqlDataReader.cs b/src/Npgsql/NpgsqlDataReader.cs index 6f7831ede5..ea410fda64 100644 --- a/src/Npgsql/NpgsqlDataReader.cs +++ b/src/Npgsql/NpgsqlDataReader.cs @@ -1495,7 +1495,7 @@ public override long GetChars(int ordinal, long dataOffset, char[]? buffer, int var reader = PgReader; dataOffset = buffer is null ? 0 : dataOffset; - if (_isSequential && reader.CharsRead > dataOffset) + if (_isSequential && reader.GetCharsRead > dataOffset) ThrowHelper.ThrowInvalidOperationException("Attempt to read a position in the column which has already been read"); reader.StartCharsRead(checked((int)dataOffset), @@ -1872,6 +1872,9 @@ int SeekToColumn(int ordinal, DataFormat dataFormat, bool resumableOp = false) // * If it did get initialized as resumable we only allow rereading when either of the following is true: // - The op is a resumable one again // - The op isn't resumable but the field is still entirely unconsumed + // Note: this relies on resumable reads (e.g. GetChars) always advancing ReadPosition, + // even when data could be serviced from the buffer, so that FieldAtStart correctly + // reflects whether the column has been read. if (_isSequential && (column > ordinal || (column == ordinal && (!reader.Resumable || (!resumableOp && !reader.FieldAtStart))))) ThrowInvalidSequentialSeek(column, ordinal); @@ -2053,10 +2056,6 @@ PgConverter GetInfo(int ordinal, Type type, out DataFormat format, out Size buff if ((uint)ordinal >= (uint)ColumnCount) ThrowHelper.ThrowIndexOutOfRangeException("Ordinal is out of range, value must be between 0 and {0} (exclusive).", ColumnCount); - // This may have been a stream left open by GetChars or GetBytes, if so ignore. - if (PgReader is { Initialized: true, Resumable: false }) - PgReader.ThrowIfStreamActive(); - ref var info = ref ColumnInfoCache![ordinal]; Debug.Assert(info.ConverterInfo.IsDefault || ReferenceEquals(Connector.SerializerOptions, info.ConverterInfo.TypeInfo.Options), "Cache is bleeding over"); diff --git a/src/Npgsql/PreparedTextReader.cs b/src/Npgsql/PreparedTextReader.cs index 80ee543d9b..86668c010f 100644 --- a/src/Npgsql/PreparedTextReader.cs +++ b/src/Npgsql/PreparedTextReader.cs @@ -2,22 +2,19 @@ using System.IO; using System.Threading; using System.Threading.Tasks; -using Npgsql.Internal; namespace Npgsql; sealed class PreparedTextReader : TextReader { string _str = null!; - NpgsqlReadBuffer.ColumnStream _stream = null!; int _position; bool _disposed; - public void Init(string str, NpgsqlReadBuffer.ColumnStream stream) + public void Init(string str) { _str = str; - _stream = stream; _disposed = false; _position = 0; } @@ -90,7 +87,7 @@ public override string ReadToEnd() public override Task ReadToEndAsync() => Task.FromResult(ReadToEnd()); void CheckDisposed() - => ObjectDisposedException.ThrowIf(_disposed || _stream.IsDisposed, this); + => ObjectDisposedException.ThrowIf(_disposed, this); public void Restart() { @@ -100,12 +97,9 @@ public void Restart() protected override void Dispose(bool disposing) { - base.Dispose(disposing); - if (disposing) - { _disposed = true; - _stream.Dispose(); - } + + base.Dispose(disposing); } } diff --git a/src/Npgsql/Util/SubReadStream.cs b/src/Npgsql/Util/SubReadStream.cs index 8d9d1b1ec5..d5eb760d2f 100644 --- a/src/Npgsql/Util/SubReadStream.cs +++ b/src/Npgsql/Util/SubReadStream.cs @@ -1,5 +1,4 @@ using System; -using System.Diagnostics; using System.IO; using System.Threading; using System.Threading.Tasks; @@ -9,31 +8,40 @@ namespace Npgsql.Util; // Adapted from https://github.com/dotnet/runtime/blob/83adfae6a6273d8fb4c69554aa3b1cc7cbf01c71/src/libraries/System.IO.Compression/src/System/IO/Compression/ZipCustomStreams.cs#L221 sealed class SubReadStream : Stream { - readonly long _startInSuperStream; - long _positionInSuperStream; - readonly long _endInSuperStream; - readonly Stream _superStream; + readonly long _start; + long _position; + readonly long _end; + readonly Stream? _stream; + readonly ArraySegment _buffer; readonly bool _canSeek; bool _isDisposed; + internal bool IsDisposed => _isDisposed; - public SubReadStream(Stream superStream, long maxLength) + public SubReadStream(Stream source, long maxLength) { - _startInSuperStream = -1; - _positionInSuperStream = 0; - _endInSuperStream = maxLength; - _superStream = superStream; + _start = -1; + _position = 0; + _end = maxLength; + _stream = source; _canSeek = false; - _isDisposed = false; } - public SubReadStream(Stream superStream, long startPosition, long maxLength) + public SubReadStream(Stream source, long startPosition, long maxLength) { - _startInSuperStream = startPosition; - _positionInSuperStream = startPosition; - _endInSuperStream = startPosition + maxLength; - _superStream = superStream; - _canSeek = superStream.CanSeek; - _isDisposed = false; + _start = startPosition; + _position = startPosition; + _end = startPosition + maxLength; + _stream = source; + _canSeek = source.CanSeek; + } + + public SubReadStream(byte[] buffer, int offset, int count) + { + _buffer = new ArraySegment(buffer, offset, count); + _start = 0; + _position = 0; + _end = count; + _canSeek = true; } public override long Length @@ -45,7 +53,7 @@ public override long Length if (!_canSeek) throw new NotSupportedException(); - return _endInSuperStream - _startInSuperStream; + return _end - _start; } } @@ -58,19 +66,23 @@ public override long Position if (!_canSeek) throw new NotSupportedException(); - return _positionInSuperStream - _startInSuperStream; + return _position - _start; } set { ThrowIfDisposed(); - throw new NotSupportedException(); + if (!_canSeek) + throw new NotSupportedException(); + + ArgumentOutOfRangeException.ThrowIfNegative(value); + _position = _start + value; } } - public override bool CanRead => _superStream.CanRead && !_isDisposed; + public override bool CanRead => _buffer.Array is not null || _stream!.CanRead; - public override bool CanSeek => false; + public override bool CanSeek => _canSeek; public override bool CanWrite => false; @@ -85,46 +97,34 @@ void ThrowIfCantRead() public override int Read(byte[] buffer, int offset, int count) { - // parameter validation sent to _superStream.Read - var origCount = count; - - ThrowIfDisposed(); - ThrowIfCantRead(); - - if (_canSeek && _superStream.Position != _positionInSuperStream) - _superStream.Seek(_positionInSuperStream, SeekOrigin.Begin); - if (_positionInSuperStream > _endInSuperStream - count) - count = (int)(_endInSuperStream - _positionInSuperStream); - - Debug.Assert(count >= 0); - Debug.Assert(count <= origCount); - - var ret = _superStream.Read(buffer, offset, count); - - _positionInSuperStream += ret; - return ret; + ValidateBufferArguments(buffer, offset, count); + return Read(new Span(buffer, offset, count)); } public override int Read(Span destination) { - // parameter validation sent to _superStream.Read - var origCount = destination.Length; - var count = destination.Length; - ThrowIfDisposed(); - ThrowIfCantRead(); - if (_canSeek && _superStream.Position != _positionInSuperStream) - _superStream.Seek(_positionInSuperStream, SeekOrigin.Begin); - if (_positionInSuperStream + count > _endInSuperStream) - count = (int)(_endInSuperStream - _positionInSuperStream); + var count = destination.Length; + if (_position + count > _end) + count = (int)(_end - _position); - Debug.Assert(count >= 0); - Debug.Assert(count <= origCount); + if (count <= 0) + return 0; - var ret = _superStream.Read(destination.Slice(0, count)); + if (_buffer.Array is not null) + { + _buffer.AsSpan((int)_position, count).CopyTo(destination); + _position += count; + return count; + } + + ThrowIfCantRead(); + if (_canSeek && _stream!.Position != _position) + _stream.Seek(_position, SeekOrigin.Begin); - _positionInSuperStream += ret; + var ret = _stream!.Read(destination.Slice(0, count)); + _position += ret; return ret; } @@ -143,23 +143,23 @@ public override Task ReadAsync(byte[] buffer, int offset, int count, Cancel public override ValueTask ReadAsync(Memory buffer, CancellationToken cancellationToken = default) { ThrowIfDisposed(); + + if (_buffer.Array is not null) + return new(Read(buffer.Span)); + ThrowIfCantRead(); - if (_canSeek && _superStream.Position != _positionInSuperStream) - { - _superStream.Seek(_positionInSuperStream, SeekOrigin.Begin); - } + if (_canSeek && _stream!.Position != _position) + _stream.Seek(_position, SeekOrigin.Begin); - if (_positionInSuperStream > _endInSuperStream - buffer.Length) - { - buffer = buffer.Slice(0, (int)(_endInSuperStream - _positionInSuperStream)); - } + if (_position > _end - buffer.Length) + buffer = buffer.Slice(0, (int)(_end - _position)); return Core(buffer, cancellationToken); async ValueTask Core(Memory buffer, CancellationToken cancellationToken) { - var ret = await _superStream.ReadAsync(buffer, cancellationToken).ConfigureAwait(false); - _positionInSuperStream += ret; + var ret = await _stream!.ReadAsync(buffer, cancellationToken).ConfigureAwait(false); + _position += ret; return ret; } } @@ -167,7 +167,23 @@ async ValueTask Core(Memory buffer, CancellationToken cancellationTok public override long Seek(long offset, SeekOrigin origin) { ThrowIfDisposed(); - throw new NotSupportedException(); + + if (!_canSeek) + throw new NotSupportedException(); + + var newPosition = origin switch + { + SeekOrigin.Begin => _start + offset, + SeekOrigin.Current => _position + offset, + SeekOrigin.End => _end + offset, + _ => throw new ArgumentOutOfRangeException(nameof(origin)) + }; + + if (newPosition < _start) + throw new IOException("An attempt was made to move the position before the beginning of the stream."); + + _position = newPosition; + return _position - _start; } public override void SetLength(long value) @@ -182,14 +198,8 @@ public override void Write(byte[] buffer, int offset, int count) throw new NotSupportedException(); } - public override void Flush() - { - ThrowIfDisposed(); - throw new NotSupportedException(); - } + public override void Flush() => ThrowIfDisposed(); - // Close the stream for reading. Note that this does NOT close the superStream (since - // the substream is just 'a chunk' of the super-stream protected override void Dispose(bool disposing) { if (disposing && !_isDisposed) diff --git a/test/Npgsql.Tests/ReaderTests.cs b/test/Npgsql.Tests/ReaderTests.cs index ae80df8e6a..babd79977d 100644 --- a/test/Npgsql.Tests/ReaderTests.cs +++ b/test/Npgsql.Tests/ReaderTests.cs @@ -1396,7 +1396,7 @@ public async Task GetBytes() } [Test] - public async Task GetStream_second_time_throws([Values(true, false)] bool isAsync) + public async Task GetStream_second_time([Values(true, false)] bool isAsync) { var expected = new byte[] { 1, 2, 3, 4, 5, 6, 7, 8 }; var streamGetter = BuildStreamGetter(isAsync); @@ -1409,8 +1409,20 @@ public async Task GetStream_second_time_throws([Values(true, false)] bool isAsyn using var stream = await streamGetter(reader, 0); - Assert.That(async () => await streamGetter(reader, 0), - Throws.Exception.TypeOf()); + if (IsSequential) + { + Assert.That(async () => await streamGetter(reader, 0), + Throws.Exception.TypeOf()); + } + else + { + // Non-sequential: getting a second stream disposes the first and returns a fresh one. + using var stream2 = await streamGetter(reader, 0); + Assert.That(() => stream.Read(new byte[1]), Throws.TypeOf()); + var buf = new byte[8]; + Assert.That(stream2.Read(buf), Is.EqualTo(8)); + Assert.That(buf, Is.EqualTo(expected)); + } } [Test] @@ -1721,9 +1733,16 @@ public async Task GetTextReader([Values(true, false)] bool isAsync) textReader.Read(actual, 0, 2); Assert.That(actual[0], Is.EqualTo(expected[0])); Assert.That(actual[1], Is.EqualTo(expected[1])); - Assert.That(async () => await textReaderGetter(reader, 0), - Throws.Exception.TypeOf(), - "Sequential text reader twice on same column"); + if (IsSequential) + { + Assert.That(async () => await textReaderGetter(reader, 0), + Throws.Exception.TypeOf(), + "Sequential text reader twice on same column"); + } + else + { + Assert.That(reader.GetChars(0, 0, actual, 4, 1), Is.EqualTo(1)); + } textReader.Read(actual, 2, 1); Assert.That(actual[2], Is.EqualTo(expected[2])); textReader.Dispose(); @@ -1853,6 +1872,225 @@ public async Task GetTextReader_in_middle_of_column_throws([Values] bool async) Assert.That(() => reader.GetTextReader(0), Throws.Exception.TypeOf()); } + [Test] + public async Task GetStream_is_isolated_from_GetChars() + { + if (IsSequential) + return; + + const string str = "ABCDEFGHIJ"; + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand($"SELECT '{str}'", conn); + await using var reader = await cmd.ExecuteReaderAsync(Behavior); + await reader.ReadAsync(); + + // GetChars on the column, then get a stream — stream should start from the beginning. + var charBuf = new char[5]; + Assert.That(reader.GetChars(0, 0, charBuf, 0, 5), Is.EqualTo(5)); + Assert.That(new string(charBuf), Is.EqualTo("ABCDE")); + + await using var stream = reader.GetStream(0); + Assert.That(stream.CanSeek, Is.True); + Assert.That(stream.Length, Is.EqualTo(Encoding.UTF8.GetByteCount(str))); + Assert.That(stream.Position, Is.EqualTo(0)); + } + + [Test] + public async Task GetStream_survives_reread_of_same_column() + { + if (IsSequential) + return; + + var expected = new byte[] { 1, 2, 3, 4, 5, 6, 7, 8 }; + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand($"SELECT {EncodeByteaHex(expected)}::bytea", conn); + await using var reader = await cmd.ExecuteReaderAsync(Behavior); + await reader.ReadAsync(); + + // Get stream, read partially. + var stream1 = reader.GetStream(0); + var buf = new byte[3]; + Assert.That(stream1.Read(buf), Is.EqualTo(3)); + Assert.That(buf, Is.EqualTo(new byte[] { 1, 2, 3 })); + + // Getting a second stream on same column disposes the first. + var stream2 = reader.GetStream(0); + Assert.That(() => stream1.Read(new byte[1]), Throws.TypeOf()); + + // Second stream should provide the full data. + var buf2 = new byte[8]; + Assert.That(stream2.Read(buf2), Is.EqualTo(8)); + Assert.That(buf2, Is.EqualTo(expected)); + } + + [Test] + public async Task GetTextReader_and_GetChars_interleaved() + { + if (IsSequential) + return; + + const string str = "ABCDEFGHIJKLMNOP"; + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand($"SELECT '{str}'", conn); + await using var reader = await cmd.ExecuteReaderAsync(Behavior); + await reader.ReadAsync(); + + // Get a TextReader, read some chars. + var textReader = reader.GetTextReader(0); + var buf = new char[4]; + Assert.That(textReader.Read(buf, 0, 4), Is.EqualTo(4)); + Assert.That(new string(buf), Is.EqualTo("ABCD")); + + // Now use GetChars to read from the start — should not affect the TextReader. + var charsBuf = new char[6]; + Assert.That(reader.GetChars(0, 0, charsBuf, 0, 6), Is.EqualTo(6)); + Assert.That(new string(charsBuf), Is.EqualTo("ABCDEF")); + + // TextReader should still be at its original position. + Assert.That(textReader.Read(buf, 0, 4), Is.EqualTo(4)); + Assert.That(new string(buf), Is.EqualTo("EFGH")); + } + + [Test] + public async Task GetStream_and_GetChars_on_same_column() + { + if (IsSequential) + return; + + const string str = "ABCDEFGHIJ"; + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand($"SELECT '{str}'", conn); + await using var reader = await cmd.ExecuteReaderAsync(Behavior); + await reader.ReadAsync(); + + // Get a stream and read partially. + await using var stream = reader.GetStream(0); + var buf = new byte[4]; + Assert.That(stream.Read(buf), Is.EqualTo(4)); + + // GetChars on the same column — stream should remain valid. + var charBuf = new char[5]; + Assert.That(reader.GetChars(0, 0, charBuf, 0, 5), Is.EqualTo(5)); + Assert.That(new string(charBuf), Is.EqualTo("ABCDE")); + + // Stream should still be readable from where we left off. + Assert.That(stream.Read(buf), Is.EqualTo(4)); + + // GetChars at a different offset should also work. + Assert.That(reader.GetChars(0, 5, charBuf, 0, 5), Is.EqualTo(5)); + Assert.That(new string(charBuf), Is.EqualTo("FGHIJ")); + } + + [Test] + public async Task GetStream_seek_with_SubReadStream() + { + if (IsSequential) + return; + + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand("SELECT 'abcdefgh'::bytea", conn); + await using var reader = await cmd.ExecuteReaderAsync(Behavior); + await reader.ReadAsync(); + + await using var stream = reader.GetStream(0); + Assert.That(stream.CanSeek, Is.True); + + // Read, seek back, read again — isolated from ReadPosition. + var buf = new byte[4]; + Assert.That(stream.Read(buf), Is.EqualTo(4)); + + stream.Position = 0; + var buf2 = new byte[4]; + Assert.That(stream.Read(buf2), Is.EqualTo(4)); + Assert.That(buf2, Is.EqualTo(buf)); + + // Seek to end, confirm empty. + stream.Seek(0, SeekOrigin.End); + Assert.That(stream.Read(buf), Is.EqualTo(0)); + } + + [Test] + public async Task GetChars_after_unconsumed_GetStream() + { + if (IsSequential) + return; + + const string str = "ABCDEFGHIJ"; + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand($"SELECT '{str}'", conn); + await using var reader = await cmd.ExecuteReaderAsync(Behavior); + await reader.ReadAsync(); + + // Get a stream but don't read from it. + var stream = reader.GetStream(0); + + // GetChars should work — SubReadStream is isolated. + var charBuf = new char[5]; + Assert.That(reader.GetChars(0, 0, charBuf, 0, 5), Is.EqualTo(5)); + Assert.That(new string(charBuf), Is.EqualTo("ABCDE")); + + // Stream is still valid. + var buf = new byte[3]; + Assert.That(stream.Read(buf), Is.EqualTo(3)); + } + + [Test] + public async Task Multiple_GetChars_calls_after_GetTextReader() + { + if (IsSequential) + return; + + const string str = "ABCDEFGHIJ"; + await using var conn = await OpenConnectionAsync(); + await using var cmd = new NpgsqlCommand($"SELECT '{str}'", conn); + await using var reader = await cmd.ExecuteReaderAsync(Behavior); + await reader.ReadAsync(); + + var textReader = reader.GetTextReader(0); + var buf = new char[2]; + textReader.Read(buf, 0, 2); + Assert.That(new string(buf), Is.EqualTo("AB")); + + // Multiple GetChars calls at different offsets should all work. + var charBuf = new char[3]; + Assert.That(reader.GetChars(0, 0, charBuf, 0, 3), Is.EqualTo(3)); + Assert.That(new string(charBuf), Is.EqualTo("ABC")); + + Assert.That(reader.GetChars(0, 5, charBuf, 0, 3), Is.EqualTo(3)); + Assert.That(new string(charBuf), Is.EqualTo("FGH")); + + // TextReader should still work from where it was. + textReader.Read(buf, 0, 2); + Assert.That(new string(buf), Is.EqualTo("CD")); + } + + [Test] + public async Task Sequential_GetChars_advances_field_position([Values] bool fitsInBuffer) + { + // Invariant: GetChars must always advance ReadPosition (even when data is fully buffered), + // so that FieldAtStart correctly reflects consumption. This ensures the sequential seek + // guard blocks non-resumable re-reads after GetChars has consumed data. + // This is important for behavioral consistency across columns. As an optimization + // that skips ReadPosition advancement (reading from a view over the buffer) would pass the sequential seek guard. + if (!IsSequential) + return; + + await using var conn = await OpenConnectionAsync(); + var bufferSize = conn.Settings.ReadBufferSize; + var str = new string('x', fitsInBuffer ? 10 : bufferSize * 2); + + await using var cmd = new NpgsqlCommand($"SELECT '{str}'", conn); + await using var reader = await cmd.ExecuteReaderAsync(Behavior); + await reader.ReadAsync(); + + // GetChars consumes part of the column. + var charBuf = new char[5]; + Assert.That(reader.GetChars(0, 0, charBuf, 0, 5), Is.EqualTo(5)); + + // A non-resumable read on the same column should throw — field is no longer at start. + Assert.That(() => reader.GetString(0), Throws.Exception.TypeOf()); + } + #endregion GetChars / GetTextReader [Test, IssueLink("https://github.com/npgsql/npgsql/issues/5450")] From 77d1fb9dc6ad89daf1a2f236088e98aa0737b172 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 8 Apr 2026 08:50:11 +0300 Subject: [PATCH 743/761] Bump the minor-and-patch group with 2 updates (#6532) Bumps GitHubActionsTestLogger from 3.0.2 to 3.0.3 Bumps Microsoft.NET.Test.Sdk from 18.3.0 to 18.4.0 --- Directory.Packages.props | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index a6bb41adef..55921bd0a3 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -33,11 +33,11 @@ - + - + From 8d90c327415fb0f6d281eead328b75f60d39eb7a Mon Sep 17 00:00:00 2001 From: Shay Rojansky Date: Wed, 8 Apr 2026 13:38:52 +0300 Subject: [PATCH 744/761] Default metrics pool name to Application Name from connection string (#6533) Closes #6531 Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- src/Npgsql/NpgsqlDataSource.cs | 4 ++-- test/Npgsql.Tests/MetricTests.cs | 28 +++++++++++++++++++++++++--- 2 files changed, 27 insertions(+), 5 deletions(-) diff --git a/src/Npgsql/NpgsqlDataSource.cs b/src/Npgsql/NpgsqlDataSource.cs index e9311a16c4..a32a9480bc 100644 --- a/src/Npgsql/NpgsqlDataSource.cs +++ b/src/Npgsql/NpgsqlDataSource.cs @@ -127,12 +127,12 @@ internal NpgsqlDataSource(NpgsqlConnectionStringBuilder settings, NpgsqlDataSour ConnectionString = settings.ToString(); // The data source name is reported in tracing/metrics, so avoid leaking the password through there. - Name = name ?? settings.ToStringWithoutPassword(); + Name = name ?? settings.ApplicationName ?? settings.ToStringWithoutPassword(); } else { ConnectionString = settings.ToStringWithoutPassword(); - Name = name ?? ConnectionString; + Name = name ?? settings.ApplicationName ?? ConnectionString; } _password = settings.Password; diff --git a/test/Npgsql.Tests/MetricTests.cs b/test/Npgsql.Tests/MetricTests.cs index 9a8b2757e3..937019f0ee 100644 --- a/test/Npgsql.Tests/MetricTests.cs +++ b/test/Npgsql.Tests/MetricTests.cs @@ -127,6 +127,29 @@ public async Task ConnectionMax() Assert.That(tags["db.client.connection.pool.name"], Is.EqualTo(dataSource.Name)); } + [Test] + public async Task Pool_name_defaults_to_application_name() + { + var exportedItems = new List(); + using var meterProvider = Sdk.CreateMeterProviderBuilder() + .AddMeter("Npgsql") + .AddInMemoryExporter(exportedItems) + .Build(); + + var applicationName = "MetricsDataSource" + Interlocked.Increment(ref _dataSourceCounter); + var dataSourceBuilder = base.CreateDataSourceBuilder(); + dataSourceBuilder.ConnectionStringBuilder.ApplicationName = applicationName; + // Do not set the data source name - this makes the pool name default to the Application Name + await using var dataSource = dataSourceBuilder.Build(); + + meterProvider.ForceFlush(); + + var metric = exportedItems.Single(m => m.Name == "db.client.connection.max"); + var point = GetFilteredPoints(metric.GetMetricPoints(), dataSource.Name).First(); + var tags = ToDictionary(point.Tags); + Assert.That(tags["db.client.connection.pool.name"], Is.EqualTo(applicationName)); + } + [Test] public async Task Password_does_not_leak_via_datasource_name([Values] bool persistSecurityInfo) { @@ -137,10 +160,9 @@ public async Task Password_does_not_leak_via_datasource_name([Values] bool persi .Build(); var dataSourceBuilder = base.CreateDataSourceBuilder(); - dataSourceBuilder.ConnectionStringBuilder.ApplicationName = "MetricsDataSource" + Interlocked.Increment(ref _dataSourceCounter); dataSourceBuilder.ConnectionStringBuilder.PersistSecurityInfo = persistSecurityInfo; - // Do not set the data source name - this makes it default to the connection string, but without - // the password (even when Persist Security Info is true) + // Do not set the data source name or the application name - this makes the pool name default to the + // connection string, but without the password (even when Persist Security Info is true) await using var dataSource = dataSourceBuilder.Build(); meterProvider.ForceFlush(); From 01a773fba90a085d022739e0f1b02cf677ee4a42 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Fri, 10 Apr 2026 16:44:14 +0300 Subject: [PATCH 745/761] Fix disposing mre on gss session encryption fallback (#6536) Fixes #6506 --- src/Npgsql/Internal/NpgsqlConnector.cs | 3 ++- test/Npgsql.Tests/ConnectionTests.cs | 27 ++++++++++++++++++- test/Npgsql.Tests/Support/PgPostmasterMock.cs | 2 ++ 3 files changed, 30 insertions(+), 2 deletions(-) diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index dce6e48d0e..d224aed84b 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -516,6 +516,7 @@ internal async Task Open(NpgsqlTimeout timeout, bool async, CancellationToken ca if (activity is not null) NpgsqlActivitySource.SetException(activity, e); Break(e, markHostAsOfflineOnConnecting: true); + FullCleanup(); throw; } @@ -2305,7 +2306,7 @@ internal Exception Break(Exception reason, bool markHostAsOfflineOnConnecting = var connection = Connection; - FullCleanup(); + Cleanup(); if (connection is not null) { diff --git a/test/Npgsql.Tests/ConnectionTests.cs b/test/Npgsql.Tests/ConnectionTests.cs index 8a87e7fbea..863130be1c 100644 --- a/test/Npgsql.Tests/ConnectionTests.cs +++ b/test/Npgsql.Tests/ConnectionTests.cs @@ -1536,27 +1536,52 @@ public async Task Sync_open_blocked_same_thread() } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/6427")] + [Platform(Include = "Win")] // Hangs on linux and mac when server closes the socket public async Task Gss_encryption_retry_does_not_clear_pool() { var csb = new NpgsqlConnectionStringBuilder(ConnectionString) { - GssEncryptionMode = GssEncryptionMode.Prefer + GssEncryptionMode = GssEncryptionMode.Prefer, + NoResetOnClose = false }; // Break connection on gss encryption request to force the client to create a new connection and retry again // This emulates the behavior of older versions of PostgreSQL or its forks, like Supabase await using var postmaster = PgPostmasterMock.Start(csb.ConnectionString, breakOnGssEncryptionRequest: true); await using var dataSource = CreateDataSource(postmaster.ConnectionString); + PgServerMock server; + int processID; await using (var conn = await dataSource.OpenConnectionAsync()) { processID = conn.ProcessID; + + // The next connection request isn't valid because it was retried + await postmaster.SkipNextConnection(); + + var queryTask = conn.ExecuteNonQueryAsync("SELECT 1"); + + server = await postmaster.WaitForServerConnection(); + await server.ExpectExtendedQuery(); + await server.WriteScalarResponseAndFlush(1); + await queryTask; } // The second time we get a connection from the pool we should ge the exact same connection await using (var conn = await dataSource.OpenConnectionAsync()) { Assert.That(conn.ProcessID, Is.EqualTo(processID)); + + var queryTask = conn.ExecuteNonQueryAsync("SELECT 1"); + + // We do not set NoResetOnClose=true on connection string to test query behavior after connection retry + await server.ExpectSimpleQuery("DISCARD ALL"); + await server.ExpectExtendedQuery(); + server + .WriteCommandComplete() + .WriteReadyForQuery(); + await server.WriteScalarResponseAndFlush(1); + await queryTask; } } diff --git a/test/Npgsql.Tests/Support/PgPostmasterMock.cs b/test/Npgsql.Tests/Support/PgPostmasterMock.cs index d9a93531a1..426a1519c8 100644 --- a/test/Npgsql.Tests/Support/PgPostmasterMock.cs +++ b/test/Npgsql.Tests/Support/PgPostmasterMock.cs @@ -237,6 +237,8 @@ internal async ValueTask WaitForCancellationRequest() return serverOrCancellationRequest.CancellationRequest; } + internal async ValueTask SkipNextConnection() => await _pendingRequestsReader.ReadAsync(); + public async ValueTask DisposeAsync() { var endpoint = _socket.LocalEndPoint as IPEndPoint; From a74334ba33052583c1fb6d20e6290874b0609fcf Mon Sep 17 00:00:00 2001 From: Daniil Dukhovniy <58626074+breakKing@users.noreply.github.com> Date: Fri, 10 Apr 2026 22:47:24 +0300 Subject: [PATCH 746/761] Fix column schema generator for partitioned tables (#6537) Co-authored-by: Shay Rojansky --- src/Npgsql/Schema/DbColumnSchemaGenerator.cs | 2 +- test/Npgsql.Tests/ReaderNewSchemaTests.cs | 30 ++++++++++++++++++++ 2 files changed, 31 insertions(+), 1 deletion(-) diff --git a/src/Npgsql/Schema/DbColumnSchemaGenerator.cs b/src/Npgsql/Schema/DbColumnSchemaGenerator.cs index 3abda51fae..20a5f1fd06 100644 --- a/src/Npgsql/Schema/DbColumnSchemaGenerator.cs +++ b/src/Npgsql/Schema/DbColumnSchemaGenerator.cs @@ -61,7 +61,7 @@ FROM pg_attribute AS attr JOIN pg_namespace AS ns ON ns.oid = cls.relnamespace WHERE atttypid <> 0 AND - relkind IN ('r', 'v', 'm') AND + relkind IN ('r', 'v', 'm', 'p') AND NOT attisdropped AND nspname NOT IN ('pg_catalog', 'information_schema') AND attnum > 0 AND diff --git a/test/Npgsql.Tests/ReaderNewSchemaTests.cs b/test/Npgsql.Tests/ReaderNewSchemaTests.cs index 3eafdc8a89..5cf0c82150 100644 --- a/test/Npgsql.Tests/ReaderNewSchemaTests.cs +++ b/test/Npgsql.Tests/ReaderNewSchemaTests.cs @@ -252,6 +252,36 @@ public async Task IsAutoIncrement_identity() Assert.That(columns[1].IsAutoIncrement, Is.True, "PG 10 IDENTITY not identified as autoincrement"); } + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/5491")] + public async Task Partitioned_table() + { + await using var conn = await OpenConnectionAsync(); + await IgnoreOnRedshift(conn); + MinimumPgVersion(conn, "10.0", "Partitioned tables introduced in PostgreSQL 10"); + + var table = await GetTempTableName(conn); + await conn.ExecuteNonQueryAsync( + $""" + CREATE TABLE {table} ( + id INTEGER GENERATED BY DEFAULT AS IDENTITY NOT NULL, + event_date TIMESTAMP WITHOUT TIME ZONE NOT NULL, + user_name CHARACTER VARYING(500), + CONSTRAINT pk_{table} PRIMARY KEY (id, event_date) + ) PARTITION BY RANGE (event_date) + """); + + await using var cmd = new NpgsqlCommand($"SELECT id, event_date, user_name FROM {table}", conn); + await using var reader = await cmd.ExecuteReaderAsync(CommandBehavior.SchemaOnly | CommandBehavior.KeyInfo); + var columns = await GetColumnSchema(reader); + + Assert.That(columns[0].AllowDBNull, Is.False); + Assert.That(columns[0].IsAutoIncrement, Is.True); + Assert.That(columns[1].AllowDBNull, Is.False); + Assert.That(columns[1].IsAutoIncrement, Is.False); + Assert.That(columns[2].AllowDBNull, Is.True); + Assert.That(columns[2].IsAutoIncrement, Is.False); + } + [Test] public async Task IsIdentity() { From d90ced53ada92baf3349f52e7a4760eccf8e0963 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Sat, 11 Apr 2026 14:58:02 +0200 Subject: [PATCH 747/761] Fix IterationIndices indices sum bug (#6539) --- src/Npgsql/Util/IterationIndices.cs | 13 ++++++------- test/Npgsql.Tests/Types/ArrayTests.cs | 5 +++++ 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/src/Npgsql/Util/IterationIndices.cs b/src/Npgsql/Util/IterationIndices.cs index 8e844b6ca6..943b22fe1d 100644 --- a/src/Npgsql/Util/IterationIndices.cs +++ b/src/Npgsql/Util/IterationIndices.cs @@ -28,6 +28,9 @@ public bool TryAdvance(int lastCount, ReadOnlySpan counts) if (lastIndex < lastCount - 1) { lastIndex++; + // For Rank > 1 _indicesSum is a linear element counter independent from Many[^1], so we need to advance it explicitly. + if (Many is not null) + _indicesSum++; return true; } @@ -78,13 +81,9 @@ public ref int this[int index] public void Reset() { - if (Many is null) - { - _indicesSum = 0; - return; - } - - Array.Clear(Many); + _indicesSum = 0; + if (Many is not null) + Array.Clear(Many); } public static IterationIndices Create(int dimensions) diff --git a/test/Npgsql.Tests/Types/ArrayTests.cs b/test/Npgsql.Tests/Types/ArrayTests.cs index e1cdcd258b..fa69d2386b 100644 --- a/test/Npgsql.Tests/Types/ArrayTests.cs +++ b/test/Npgsql.Tests/Types/ArrayTests.cs @@ -31,6 +31,11 @@ public class ArrayTests : TestBase .SetName("Empty_array"), new TestCaseData(new[,] { { 1, 2, 3 }, { 7, 8, 9 } }, "{{1,2,3},{7,8,9}}", "integer[]") .SetName("Two_dimensional_array"), + new TestCaseData( + new[,] { { "a", "bb", "ccc" }, { "dddd", "eeeee", "ffffff" } }, + """{{a,bb,ccc},{dddd,eeeee,ffffff}}""", + "text[]") + .SetName("Two_dimensional_variable_size_array"), new TestCaseData(new[] { [1, 2], new byte[] { 3, 4 } }, """{"\\x0102","\\x0304"}""", "bytea[]") .SetName("Bytea_array") ]; From 1c4e9c141736780991868eb280fb74fac49b313e Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Sun, 12 Apr 2026 01:10:13 +0200 Subject: [PATCH 748/761] Prework for read write streamlining (#6316) --- .../Composites/Metadata/CompositeFieldInfo.cs | 109 ++-- .../Internal/Converters/ArrayConverter.cs | 261 ++++++--- .../Internal/Converters/ArrayConverterCore.cs | 111 ++-- .../Converters/BitStringConverters.cs | 26 +- .../Internal/Converters/CastingConverter.cs | 32 +- .../Internal/Converters/CompositeConverter.cs | 143 ++++- .../Internal/Converters/NullableConverter.cs | 18 +- .../Internal/Converters/ObjectConverter.cs | 112 ++-- .../PolymorphicArrayTypeInfoProvider.cs | 51 ++ .../PolymorphicConverterResolver.cs | 66 --- .../Temporal/DateTimeConverterResolver.cs | 143 ----- .../Temporal/DateTimeTypeInfoProvider.cs | 169 ++++++ .../VersionPrefixedTextConverter.cs | 2 +- .../Internal/DynamicTypeInfoResolver.cs | 8 +- .../Internal/PgComposingConverterResolver.cs | 68 --- .../Internal/PgComposingTypeInfoProvider.cs | 76 +++ .../Internal/PgConcreteTypeInfoProvider.cs | 105 ++++ src/Npgsql/Internal/PgConverter.cs | 58 +- src/Npgsql/Internal/PgConverterResolver.cs | 111 ---- src/Npgsql/Internal/PgReader.cs | 2 +- src/Npgsql/Internal/PgSerializerOptions.cs | 2 +- src/Npgsql/Internal/PgTypeInfo.cs | 222 +++++--- .../AdoTypeInfoResolverFactory.Multirange.cs | 24 +- .../AdoTypeInfoResolverFactory.Range.cs | 12 +- .../AdoTypeInfoResolverFactory.cs | 54 +- .../NetworkTypeInfoResolverFactory.cs | 2 +- .../UnmappedTypeInfoResolverFactory.cs | 16 +- src/Npgsql/Internal/TypeInfoMapping.cs | 202 +++---- src/Npgsql/NpgsqlBinaryExporter.cs | 2 +- src/Npgsql/NpgsqlParameter.cs | 27 +- src/Npgsql/NpgsqlParameterCollection.cs | 21 +- src/Npgsql/NpgsqlParameter`.cs | 8 +- src/Npgsql/TypeMapping/GlobalTypeMapper.cs | 12 +- test/Npgsql.Tests/CopyTests.cs | 2 +- test/Npgsql.Tests/ReaderTests.cs | 2 +- test/Npgsql.Tests/TypeMapperTests.cs | 4 +- test/Npgsql.Tests/Types/ArrayTests.cs | 2 +- test/Npgsql.Tests/Types/CompositeTests.cs | 80 ++- test/Npgsql.Tests/WriteStateTests.cs | 506 ++++++++++++++++++ 39 files changed, 1945 insertions(+), 926 deletions(-) create mode 100644 src/Npgsql/Internal/Converters/PolymorphicArrayTypeInfoProvider.cs delete mode 100644 src/Npgsql/Internal/Converters/PolymorphicConverterResolver.cs delete mode 100644 src/Npgsql/Internal/Converters/Temporal/DateTimeConverterResolver.cs create mode 100644 src/Npgsql/Internal/Converters/Temporal/DateTimeTypeInfoProvider.cs delete mode 100644 src/Npgsql/Internal/PgComposingConverterResolver.cs create mode 100644 src/Npgsql/Internal/PgComposingTypeInfoProvider.cs create mode 100644 src/Npgsql/Internal/PgConcreteTypeInfoProvider.cs delete mode 100644 src/Npgsql/Internal/PgConverterResolver.cs create mode 100644 test/Npgsql.Tests/WriteStateTests.cs diff --git a/src/Npgsql/Internal/Composites/Metadata/CompositeFieldInfo.cs b/src/Npgsql/Internal/Composites/Metadata/CompositeFieldInfo.cs index 080d31ea68..dd394ce6bb 100644 --- a/src/Npgsql/Internal/Composites/Metadata/CompositeFieldInfo.cs +++ b/src/Npgsql/Internal/Composites/Metadata/CompositeFieldInfo.cs @@ -1,4 +1,5 @@ using System; +using System.Diagnostics; using System.Runtime.CompilerServices; using System.Threading; using System.Threading.Tasks; @@ -26,27 +27,45 @@ private protected CompositeFieldInfo(string name, PgTypeInfo typeInfo, PgTypeId PgTypeId = nominalPgTypeId; if (typeInfo.PgTypeId is null) - ThrowHelper.ThrowArgumentException("PgTypeInfo must have a PgTypeId."); + ThrowHelper.ThrowArgumentException("Type info cannot have an undecided PgTypeId.", nameof(typeInfo)); - if (!typeInfo.IsResolverInfo) + PgConcreteTypeInfo concrete; + if (typeInfo is PgConcreteTypeInfo direct) { - var resolution = typeInfo.GetResolution(); - if (typeInfo.GetBufferRequirements(resolution.Converter, DataFormat.Binary) is not { } bufferRequirements) - { - ThrowHelper.ThrowInvalidOperationException("Converter must support binary format to participate in composite types."); - return; - } - _binaryBufferRequirements = bufferRequirements; - Converter = resolution.Converter; + concrete = direct; } + else if (typeInfo is PgProviderTypeInfo providerTypeInfo) + { + // Lift the default concrete's buffer requirements and converter so the composite gets an + // accurate per-field size even when resolution is deferred. IsProviderBacked still signals that + // GetWriteInfo / GetSize must go through BindValue for per-value dispatch at bind time — + // that's where provider-backed fields (DateTime kind, late-bound, etc.) surface deterministic + // errors. The cached default is reused by GetDefaultWriteInfo on CompositeConverter's Path A, + // where per-value resolution has already completed without producing state. + concrete = providerTypeInfo.GetDefaultConcreteTypeInfo(null); + IsProviderBacked = true; + } + else + { + ThrowHelper.ThrowInvalidOperationException($"Unsupported {nameof(PgTypeInfo)} '{typeInfo.GetType().FullName}' for composite field '{name}'."); + return; + } + + if (concrete.GetBufferRequirements(concrete.Converter, DataFormat.Binary) is not { } bufferRequirements) + { + ThrowHelper.ThrowInvalidOperationException("Converter must support binary format to participate in composite types."); + return; + } + _binaryBufferRequirements = bufferRequirements; + Converter = concrete.Converter; } public PgConverter GetReadInfo(out Size readRequirement) { - if (Converter is not null) + if (!IsProviderBacked) { readRequirement = _binaryBufferRequirements.Read; - return Converter; + return Converter!; } if (!PgTypeInfo.TryBind(new Field(Name, PgTypeInfo.PgTypeId.GetValueOrDefault(), -1), DataFormat.Binary, out var converterInfo)) @@ -56,14 +75,29 @@ public PgConverter GetReadInfo(out Size readRequirement) return converterInfo.Converter; } - public PgConverter GetWriteInfo(object instance, out Size writeRequirement) + public PgConverter GetWriteInfo(object instance, out Size writeRequirement, out object? writeState) { - if (Converter is null) - return BindValue(instance, out writeRequirement); + if (IsProviderBacked) + return BindValue(instance, out writeRequirement, out writeState); + writeState = null; writeRequirement = _binaryBufferRequirements.Write; - return Converter; + return Converter!; + } + /// + /// Returns a deterministic write converter for this field without running per-value dispatch — + /// for concrete fields the one-and-only converter, for provider fields the default concrete that + /// was resolved at construction. Used by CompositeConverter.Write's Path A, which only runs when + /// bind-time GetSize has already completed and produced no per-field state; the default converter + /// writes the same bytes as any value-dispatched variant for a decided field id and carries no + /// state to dispose. + /// + public PgConverter GetDefaultWriteInfo(out Size writeRequirement) + { + Debug.Assert(Converter is not null); + writeRequirement = _binaryBufferRequirements.Write; + return Converter; } protected ValueTask ReadAsObject(bool async, PgConverter converter, CompositeBuilder builder, PgReader reader, CancellationToken cancellationToken) @@ -98,12 +132,15 @@ protected ValueTask WriteAsObject(bool async, PgConverter converter, PgWriter wr public string Name { get; } public PgTypeId PgTypeId { get; } - public Size BinaryReadRequirement => Converter is not null ? _binaryBufferRequirements.Read : Size.Unknown; - public Size BinaryWriteRequirement => Converter is not null ? _binaryBufferRequirements.Write : Size.Unknown; + public Size BinaryReadRequirement => _binaryBufferRequirements.Read; + public Size BinaryWriteRequirement => _binaryBufferRequirements.Write; + + /// True when this field defers converter resolution to bind time via a provider. + public bool IsProviderBacked { get; } public abstract Type Type { get; } - protected abstract PgConverter BindValue(object instance, out Size writeRequirement); + protected abstract PgConverter BindValue(object instance, out Size writeRequirement, out object? writeState); protected abstract void AddValue(CompositeBuilder builder, object value); public abstract StrongBox CreateBox(); @@ -113,7 +150,7 @@ protected ValueTask WriteAsObject(bool async, PgConverter converter, PgWriter wr public abstract void ReadDbNull(CompositeBuilder builder); public abstract ValueTask Read(bool async, PgConverter converter, CompositeBuilder builder, PgReader reader, CancellationToken cancellationToken = default); - public abstract bool IsDbNull(PgConverter converter, object instance, ref object? writeState); + public abstract bool IsDbNull(PgConverter converter, object instance, object? writeState); public abstract Size? GetSizeOrDbNull(PgConverter converter, DataFormat format, Size writeRequirement, object instance, ref object? writeState); public abstract ValueTask Write(bool async, PgConverter converter, PgWriter writer, object instance, CancellationToken cancellationToken); } @@ -129,15 +166,17 @@ sealed class CompositeFieldInfo : CompositeFieldInfo : base(name, typeInfo, nominalPgTypeId) { if (typeInfo.Type != typeof(T)) - throw new InvalidOperationException($"PgTypeInfo type '{typeInfo.Type.FullName}' must be equal to field type '{typeof(T)}'."); + ThrowHelper.ThrowInvalidOperationException($"PgTypeInfo type '{typeInfo.Type.FullName}' must be equal to field type '{typeof(T)}'."); - if (!typeInfo.IsResolverInfo) + // Converter is populated by the base constructor for both concrete and provider type infos — + // for providers it holds the default concrete's converter. _asObject is derived from it and is + // used by AsObject's fast path when the runtime converter matches the cached default. + if (Converter is not null) { - var resolution = typeInfo.GetResolution(); - var typeToConvert = resolution.Converter.TypeToConvert; + var typeToConvert = Converter.TypeToConvert; _asObject = typeToConvert != typeof(T); if (!typeToConvert.IsAssignableFrom(typeof(T))) - throw new InvalidOperationException($"Converter type '{typeToConvert.FullName}' must be assignable from field type '{typeof(T)}'."); + ThrowHelper.ThrowInvalidOperationException($"Converter type '{typeToConvert.FullName}' must be assignable from field type '{typeof(T)}'."); } _getter = getter; @@ -167,7 +206,7 @@ bool AsObject(PgConverter converter) public void Set(object instance, T value) { if (_setter is null) - throw new InvalidOperationException("Not a composite field for a clr field."); + ThrowHelper.ThrowInvalidOperationException("Not a composite field for a clr field."); _setter(instance, value); } @@ -175,7 +214,7 @@ public void Set(object instance, T value) public override void Set(object instance, StrongBox value) { if (_setter is null) - throw new InvalidOperationException("Not a composite field for a clr field."); + ThrowHelper.ThrowInvalidOperationException("Not a composite field for a clr field."); _setter(instance, ((Util.StrongBox)value).TypedValue!); } @@ -183,16 +222,18 @@ public override void Set(object instance, StrongBox value) public override void ReadDbNull(CompositeBuilder builder) { if (default(T) != null) - throw new InvalidCastException($"Type {typeof(T).FullName} does not have null as a possible value."); + ThrowHelper.ThrowInvalidCastException($"Type {typeof(T).FullName} does not have null as a possible value."); builder.AddValue((T?)default); } - protected override PgConverter BindValue(object instance, out Size writeRequirement) + protected override PgConverter BindValue(object instance, out Size writeRequirement, out object? writeState) { var value = _getter(instance); - var resolution = PgTypeInfo.IsBoxing ? PgTypeInfo.GetObjectResolution(value) : PgTypeInfo.GetResolution(value); - if (PgTypeInfo.GetBufferRequirements(resolution.Converter, DataFormat.Binary) is not { } bufferRequirements) + var concreteTypeInfo = PgTypeInfo.IsBoxing + ? PgTypeInfo.GetObjectConcreteTypeInfo(value, out writeState) + : PgTypeInfo.GetConcreteTypeInfo(value, out writeState); + if (concreteTypeInfo.GetBufferRequirements(concreteTypeInfo.Converter, DataFormat.Binary) is not { } bufferRequirements) { ThrowHelper.ThrowInvalidOperationException("Converter must support binary format to participate in composite types."); writeRequirement = default; @@ -200,7 +241,7 @@ protected override PgConverter BindValue(object instance, out Size writeRequirem } writeRequirement = bufferRequirements.Write; - return resolution.Converter; + return concreteTypeInfo.Converter; } protected override void AddValue(CompositeBuilder builder, object value) => builder.AddValue((T)value); @@ -231,10 +272,10 @@ async ValueTask Core(CompositeBuilder builder, ValueTask task) public override bool IsDbNullable => Converter?.IsDbNullable ?? true; - public override bool IsDbNull(PgConverter converter, object instance, ref object? writeState) + public override bool IsDbNull(PgConverter converter, object instance, object? writeState) { var value = _getter(instance); - return AsObject(converter) ? converter.IsDbNullAsObject(value, ref writeState) : ((PgConverter)converter).IsDbNull(value, ref writeState); + return AsObject(converter) ? converter.IsDbNullAsObject(value, writeState) : ((PgConverter)converter).IsDbNull(value, writeState); } public override Size? GetSizeOrDbNull(PgConverter converter, DataFormat format, Size writeRequirement, object instance, ref object? writeState) diff --git a/src/Npgsql/Internal/Converters/ArrayConverter.cs b/src/Npgsql/Internal/Converters/ArrayConverter.cs index 117135d8d9..29e3564f14 100644 --- a/src/Npgsql/Internal/Converters/ArrayConverter.cs +++ b/src/Npgsql/Internal/Converters/ArrayConverter.cs @@ -1,4 +1,5 @@ using System; +using System.Buffers; using System.Collections.Generic; using System.Collections.Concurrent; using System.Diagnostics; @@ -14,14 +15,13 @@ abstract class ArrayConverter : PgStreamingConverter where T : notnull { readonly ArrayConverterCore _arrayConverterCore; - ArrayConverter(int? expectedDimensions, PgConverterResolution elemResolution, int pgLowerBound = 1) + private protected ArrayConverter(int? expectedDimensions, PgConcreteTypeInfo elementTypeInfo, int pgLowerBound = 1) { - if (!elemResolution.Converter.CanConvert(DataFormat.Binary, out var bufferRequirements)) + if (!elementTypeInfo.Converter.CanConvert(DataFormat.Binary, out var bufferRequirements)) throw new NotSupportedException("Element converter has to support the binary format to be compatible."); - PgTypeInfo elementTypeInfo = null!; // TODO until https://github.com/npgsql/npgsql/pull/6316 - _arrayConverterCore = new((IElementOperations)this, elementTypeInfo, elemResolution.Converter.IsDbNullable, expectedDimensions, - bufferRequirements, elemResolution.PgTypeId, pgLowerBound); + _arrayConverterCore = new((IElementOperations)this, elementTypeInfo, elementTypeInfo.Converter.IsDbNullable, expectedDimensions, + bufferRequirements, elementTypeInfo.PgTypeId, pgLowerBound); } public override T Read(PgReader reader) => (T)_arrayConverterCore.Read(async: false, reader).Result; @@ -62,17 +62,17 @@ public override void Write(PgWriter writer, T values) public override ValueTask WriteAsync(PgWriter writer, T values, CancellationToken cancellationToken = default) => _arrayConverterCore.Write(async: true, writer, values, cancellationToken); - public static ArrayConverter CreateArrayBased(PgConverterResolution elemResolution, Type? effectiveType = null, int pgLowerBound = 1) - => new ArrayBased(elemResolution, effectiveType, pgLowerBound); + public static ArrayConverter CreateArrayBased(PgConcreteTypeInfo elementTypeInfo, Type? effectiveType = null, int pgLowerBound = 1) + => new ArrayBased(elementTypeInfo, effectiveType, pgLowerBound); - public static ArrayConverter CreateListBased(PgConverterResolution elemResolution, int pgLowerBound = 1) - => new ListBased(elemResolution, pgLowerBound); + public static ArrayConverter CreateListBased(PgConcreteTypeInfo elementTypeInfo, int pgLowerBound = 1) + => new ListBased(elementTypeInfo, pgLowerBound); - sealed class ArrayBased(PgConverterResolution elemResolution, Type? effectiveType = null, int pgLowerBound = 1) + sealed class ArrayBased(PgConcreteTypeInfo elementTypeInfo, Type? effectiveType = null, int pgLowerBound = 1) : ArrayConverter(expectedDimensions: effectiveType is null ? 1 : effectiveType.IsArray ? effectiveType.GetArrayRank() : null, - elemResolution, pgLowerBound), IElementOperations + elementTypeInfo, pgLowerBound), IElementOperations { - readonly PgConverter _elemConverter = elemResolution.GetConverter(); + readonly PgConverter _elemConverter = (PgConverter)elementTypeInfo.Converter; [MethodImpl(MethodImplOptions.AggressiveInlining)] static TElement? GetValue(object collection, IterationIndices indices) @@ -176,10 +176,10 @@ ValueTask IElementOperations.Write(bool async, PgWriter writer, object collectio } } - sealed class ListBased(PgConverterResolution elemResolution, int pgLowerBound = 1) - : ArrayConverter(expectedDimensions: 1, elemResolution, pgLowerBound), IElementOperations + sealed class ListBased(PgConcreteTypeInfo elementTypeInfo, int pgLowerBound = 1) + : ArrayConverter(expectedDimensions: 1, elementTypeInfo, pgLowerBound), IElementOperations { - readonly PgConverter _elemConverter = elemResolution.GetConverter(); + readonly PgConverter _elemConverter = (PgConverter)elementTypeInfo.Converter; [MethodImpl(MethodImplOptions.AggressiveInlining)] static TElement? GetValue(object collection, int index) @@ -250,8 +250,8 @@ ValueTask IElementOperations.Write(bool async, PgWriter writer, object collectio } } -sealed class ArrayConverterResolver(PgResolverTypeInfo elementTypeInfo, Type effectiveType) - : PgComposingConverterResolver(elementTypeInfo.PgTypeId is { } id ? elementTypeInfo.Options.GetArrayTypeId(id) : null, +sealed class ArrayTypeInfoProvider(PgProviderTypeInfo elementTypeInfo, Type effectiveType) + : PgComposingTypeInfoProvider(elementTypeInfo.PgTypeId is { } id ? elementTypeInfo.Options.GetArrayTypeId(id) : null, elementTypeInfo) where T : notnull { @@ -260,57 +260,170 @@ sealed class ArrayConverterResolver(PgResolverTypeInfo elementTypeI protected override PgTypeId GetEffectivePgTypeId(PgTypeId pgTypeId) => Options.GetArrayElementTypeId(pgTypeId); protected override PgTypeId GetPgTypeId(PgTypeId effectivePgTypeId) => Options.GetArrayTypeId(effectivePgTypeId); - protected override PgConverter CreateConverter(PgConverterResolution effectiveResolution) + protected override PgConverter CreateConverter(PgConcreteTypeInfo effectiveConcreteTypeInfo) { if (typeof(T) == typeof(Array) || typeof(T).IsArray) - return ArrayConverter.CreateArrayBased(effectiveResolution, effectiveType); + return ArrayConverter.CreateArrayBased(effectiveConcreteTypeInfo, effectiveType); if (typeof(T).IsConstructedGenericType && typeof(T).GetGenericTypeDefinition() == typeof(IList<>)) - return ArrayConverter.CreateListBased(effectiveResolution); + return ArrayConverter.CreateListBased(effectiveConcreteTypeInfo); throw new NotSupportedException($"Unknown type T: {typeof(T).FullName}"); } - protected override PgConverterResolution? GetEffectiveResolution(T? values, PgTypeId? expectedEffectivePgTypeId) + protected override PgConcreteTypeInfo? GetEffectiveTypeInfo(ProviderValueContext effectiveContext, T? values, ref object? writeState) { - PgConverterResolution? resolution = null; + PgConcreteTypeInfo? concreteTypeInfo = null; + PgArrayMetadata metadata; + ArrayPool<(Size, object?)>? elemDataArrayPool = null; + (Size, object? WriteState)[]? elemData = null; + + var index = 0; switch (values) { - case TElement[] array: - foreach (var value in array) + case TElement[] array: + metadata = PgArrayMetadata.Create(ArrayConverterCore.GetArrayLengths(array, out _), null); + foreach (var value in array) + { + var result = EffectiveTypeInfo.GetConcreteTypeInfo(effectiveContext, value, out var state); + if (state is not null && elemData is null) { - var result = EffectiveTypeInfo.GetResolution(value, resolution?.PgTypeId ?? expectedEffectivePgTypeId); - resolution ??= result; + elemDataArrayPool = ArrayPool<(Size, object?)>.Shared; + elemData = elemDataArrayPool.Rent(metadata.TotalElements); + elemData.AsSpan(0, index).Clear(); } - break; - case List list: - foreach (var value in list) + + // Always assign when elemData is allocated to avoid stale pooled array entries. + if (elemData is not null) + elemData[index].WriteState = state; + + if (result is not null) { - var result = EffectiveTypeInfo.GetResolution(value, resolution?.PgTypeId ?? expectedEffectivePgTypeId); - resolution ??= result; + if (concreteTypeInfo is null) + { + concreteTypeInfo = result; + effectiveContext = effectiveContext with { ExpectedPgTypeId = concreteTypeInfo.PgTypeId }; + } + else if (result != concreteTypeInfo) + ThrowHelper.ThrowInvalidOperationException("Array elements resolved to inconsistent concrete type infos. All elements must resolve to the same type info."); } - break; - case IList list: - foreach (var value in list) + + index++; + } + + break; + case List list: + metadata = PgArrayMetadata.Create(list.Count, null); + foreach (var value in list) + { + var result = EffectiveTypeInfo.GetConcreteTypeInfo(effectiveContext, value, out var state); + if (state is not null && elemData is null) { - var result = EffectiveTypeInfo.GetResolution(value, resolution?.PgTypeId ?? expectedEffectivePgTypeId); - resolution ??= result; + elemDataArrayPool = ArrayPool<(Size, object?)>.Shared; + elemData = elemDataArrayPool.Rent(metadata.TotalElements); + elemData.AsSpan(0, index).Clear(); } - break; - case Array array: - foreach (var value in array) + + // Always assign when elemData is allocated to avoid stale pooled array entries. + if (elemData is not null) + elemData[index].WriteState = state; + + if (result is not null) { - var result = EffectiveTypeInfo.GetResolutionAsObject(value, resolution?.PgTypeId ?? expectedEffectivePgTypeId); - resolution ??= result; + if (concreteTypeInfo is null) + { + concreteTypeInfo = result; + effectiveContext = effectiveContext with { ExpectedPgTypeId = concreteTypeInfo.PgTypeId }; + } + else if (result != concreteTypeInfo) + ThrowHelper.ThrowInvalidOperationException("Array elements resolved to inconsistent concrete type infos. All elements must resolve to the same type info."); } - break; - case null: - break; - default: - throw new NotSupportedException(); + + index++; + } + + break; + case IList list: + metadata = PgArrayMetadata.Create(list.Count, null); + foreach (var value in list) + { + var result = EffectiveTypeInfo.GetConcreteTypeInfo(effectiveContext, value, out var state); + if (state is not null && elemData is null) + { + elemDataArrayPool = ArrayPool<(Size, object?)>.Shared; + elemData = elemDataArrayPool.Rent(metadata.TotalElements); + elemData.AsSpan(0, index).Clear(); + } + + // Always assign when elemData is allocated to avoid stale pooled array entries. + if (elemData is not null) + elemData[index].WriteState = state; + + if (result is not null) + { + if (concreteTypeInfo is null) + { + concreteTypeInfo = result; + effectiveContext = effectiveContext with { ExpectedPgTypeId = concreteTypeInfo.PgTypeId }; + } + else if (result != concreteTypeInfo) + ThrowHelper.ThrowInvalidOperationException("Array elements resolved to inconsistent concrete type infos. All elements must resolve to the same type info."); + } + + index++; + } + + break; + case Array array: + metadata = PgArrayMetadata.Create(ArrayConverterCore.GetArrayLengths(array, out var dimensionLengths), dimensionLengths); + foreach (var value in array) + { + var result = EffectiveTypeInfo.GetAsObjectConcreteTypeInfo(effectiveContext, value, out var state); + if (state is not null && elemData is null) + { + elemDataArrayPool = ArrayPool<(Size, object?)>.Shared; + elemData = elemDataArrayPool.Rent(metadata.TotalElements); + elemData.AsSpan(0, index).Clear(); + } + + // Always assign when elemData is allocated to avoid stale pooled array entries. + if (elemData is not null) + elemData[index].WriteState = state; + + if (result is not null) + { + if (concreteTypeInfo is null) + { + concreteTypeInfo = result; + effectiveContext = effectiveContext with { ExpectedPgTypeId = concreteTypeInfo.PgTypeId }; + } + else if (result != concreteTypeInfo) + ThrowHelper.ThrowInvalidOperationException("Array elements resolved to inconsistent concrete type infos. All elements must resolve to the same type info."); + } + + index++; + } + + break; + case null: + return null; + default: + throw new NotSupportedException(); } - return resolution; + if (elemData is not null) + { + writeState = new ArrayConverterWriteState + { + Metadata = metadata, + IterationIndices = metadata.CreateIndices(), + ArrayPool = elemDataArrayPool, + Data = new(elemData, 0, index), + AnyWriteState = true + }; + } + + return concreteTypeInfo; } } @@ -356,34 +469,46 @@ public override ValueTask WriteAsync(PgWriter writer, TBase value, CancellationT => throw new NotSupportedException("Polymorphic writing is not supported"); } -sealed class PolymorphicArrayConverterResolver : PolymorphicConverterResolver +sealed class PolymorphicArrayTypeInfoProvider : PgConcreteTypeInfoProvider { - readonly PgResolverTypeInfo _effectiveInfo; - readonly PgResolverTypeInfo _effectiveNullableInfo; - readonly ConcurrentDictionary _converterCache = new(ReferenceEqualityComparer.Instance); + readonly PgProviderTypeInfo _effectiveTypeInfo; + readonly PgProviderTypeInfo _effectiveNullableTypeInfo; + readonly ConcurrentDictionary _concreteInfoCache = new(ReferenceEqualityComparer.Instance); + + public PolymorphicArrayTypeInfoProvider(PgProviderTypeInfo effectiveTypeInfo, PgProviderTypeInfo effectiveNullableTypeInfo) + { + if (effectiveTypeInfo.PgTypeId is null || effectiveNullableTypeInfo.PgTypeId is null) + throw new ArgumentException("Type info cannot have an undecided PgTypeId.", + effectiveTypeInfo.PgTypeId is null ? nameof(effectiveTypeInfo) : nameof(effectiveNullableTypeInfo)); + + _effectiveTypeInfo = effectiveTypeInfo; + _effectiveNullableTypeInfo = effectiveNullableTypeInfo; + } + + protected override PgConcreteTypeInfo GetDefaultCore(PgTypeId? pgTypeId) + => GetOrAdd(_effectiveTypeInfo.GetDefaultConcreteTypeInfo(pgTypeId), _effectiveNullableTypeInfo.GetDefaultConcreteTypeInfo(pgTypeId)); + + protected override PgConcreteTypeInfo? GetForValueCore(ProviderValueContext context, TBase? value, ref object? writeState) + => throw new NotSupportedException("Polymorphic writing is not supported."); - public PolymorphicArrayConverterResolver(PgResolverTypeInfo effectiveInfo, PgResolverTypeInfo effectiveNullableInfo) - : base(effectiveInfo.PgTypeId!.Value) + protected override PgConcreteTypeInfo? GetForFieldCore(Field field) { - if (effectiveInfo.PgTypeId is null || effectiveNullableInfo.PgTypeId is null) - throw new InvalidOperationException("Cannot accept undecided infos"); + var concreteTypeInfo = _effectiveTypeInfo.GetConcreteTypeInfo(field); + var concreteNullableTypeInfo = _effectiveNullableTypeInfo.GetConcreteTypeInfo(field); - _effectiveInfo = effectiveInfo; - _effectiveNullableInfo = effectiveNullableInfo; + return concreteTypeInfo is not null && concreteNullableTypeInfo is not null + ? GetOrAdd(concreteTypeInfo, concreteNullableTypeInfo) + : null; } - protected override PgConverter Get(Field? maybeField) + PgConcreteTypeInfo GetOrAdd(PgConcreteTypeInfo concreteTypeInfo, PgConcreteTypeInfo concreteNullableTypeInfo) { - var structResolution = maybeField is { } field - ? _effectiveInfo.GetResolution(field) - : _effectiveInfo.GetDefaultResolution(PgTypeId); - var nullableResolution = maybeField is { } field2 - ? _effectiveNullableInfo.GetResolution(field2) - : _effectiveNullableInfo.GetDefaultResolution(PgTypeId); - - (PgConverter StructConverter, PgConverter NullableConverter) state = (structResolution.Converter, nullableResolution.Converter); - return _converterCache.GetOrAdd(structResolution.Converter, - static (_, state) => new PolymorphicArrayConverter((PgConverter)state.StructConverter, (PgConverter)state.NullableConverter), + (PgConcreteTypeInfo ConcreteInfo, PgConcreteTypeInfo ConcreteNullableInfo) state = (concreteTypeInfo, concreteNullableTypeInfo); + return _concreteInfoCache.GetOrAdd(concreteTypeInfo, + static (_, state) => + new(state.ConcreteInfo.Options, + new PolymorphicArrayConverter((PgConverter)state.ConcreteInfo.Converter, (PgConverter)state.ConcreteNullableInfo.Converter), + state.ConcreteInfo.PgTypeId), state); } } diff --git a/src/Npgsql/Internal/Converters/ArrayConverterCore.cs b/src/Npgsql/Internal/Converters/ArrayConverterCore.cs index 46aab4a9fe..29fd0dfa60 100644 --- a/src/Npgsql/Internal/Converters/ArrayConverterCore.cs +++ b/src/Npgsql/Internal/Converters/ArrayConverterCore.cs @@ -35,44 +35,63 @@ readonly struct ArrayConverterCore( PgTypeInfo ElementTypeInfo { get; } = elementTypeInfo; bool ElemTypeDbNullable { get; } = elemTypeDbNullable; - bool IsDbNull(object values, IterationIndices arrayIndices, ref object? writeState) + bool IsDbNull(object values, IterationIndices arrayIndices, object? writeState) { // This call will only skip GetSize if we are dealing with fixed size elements, otherwise we'll repeat sizing costs. + // Fixed-size element converters cannot produce per-value write state, so GetSizeOrDbNull must + // leave writeState alone — any mutation is a contract violation in the element converter. Debug.Assert(binaryRequirements.Write.Kind is SizeKind.Exact); - return elemOps.GetSizeOrDbNull(new(DataFormat.Binary, binaryRequirements.Write), values, arrayIndices, ref writeState) is null; + var originalWriteState = writeState; + var isDbNull = elemOps.GetSizeOrDbNull(new(DataFormat.Binary, binaryRequirements.Write), values, arrayIndices, ref writeState) is null; + Debug.Assert(ReferenceEquals(writeState, originalWriteState), "Fixed-size element converter mutated writeState during a null probe."); + return isDbNull; + } + + // Sizes a single element, accumulates into running size/anyWriteState, and returns the per-slot Size (-1 sentinel for NULL). + [MethodImpl(MethodImplOptions.AggressiveInlining)] + Size SizeElement(SizeContext context, object values, IterationIndices indices, ref object? elemState, ref Size size, ref bool anyWriteState) + { + var elemSize = elemOps.GetSizeOrDbNull(context, values, indices, ref elemState); + anyWriteState = anyWriteState || elemState is not null; + size = size.Combine(elemSize ?? 0); + return elemSize ?? -1; } public Size GetSize(SizeContext context, object values, ref object? writeState) { Debug.Assert(context.Format is DataFormat.Binary); - if (writeState is not null) - ThrowHelper.ThrowArgumentException("Unexpected write state, expected null.", nameof(writeState)); - var metadata = PgArrayMetadata.Create(elemOps.GetCollectionCount(values, out var lengths), lengths); + // Try to extract state from the provider phase (if anything). Provider-level state is consumed once per binding, + // so we don't need to check for or clean up leftover iteration state — there's no path that produces it. + var providerState = writeState as ArrayConverterWriteState; + + var metadata = providerState?.Metadata ?? PgArrayMetadata.Create(elemOps.GetCollectionCount(values, out var lengths), lengths); if (metadata.TotalElements is 0) { - Debug.Assert(writeState is null); + // The provider phase doesn't construct write state when there are no elements to populate, so any state + // reaching this branch is stale from a prior binding and would otherwise leak through to Write as garbage. + if (writeState is not null) + ThrowHelper.ThrowArgumentException("Write state should be null for empty arrays.", nameof(writeState)); return metadata.BinaryPreambleByteCount; } var size = Size.Create(metadata.BinaryPreambleByteCount + sizeof(int) * metadata.TotalElements); - var indices = metadata.CreateIndices(); - var anyWriteState = false; - ArrayPool<(Size, object?)>? arrayPool = null; - (Size Size, object? WriteState)[]? elemData = null; + var indices = providerState?.IterationIndices ?? metadata.CreateIndices(); + var anyWriteState = providerState?.AnyWriteState ?? false; + var arrayPool = providerState?.ArrayPool; + var elemData = providerState?.Data.Array; + var fixedSizeElements = false; if (binaryRequirements.Write is { Kind: SizeKind.Exact, Value: var elemByteCount }) { + fixedSizeElements = true; var nulls = 0; var lastLength = metadata.LastDimension; if (ElemTypeDbNullable) { do { - object? elemState = null; - if (IsDbNull(values, indices, ref elemState)) + if (IsDbNull(values, indices, elemData?[indices.IndicesSum].WriteState)) nulls++; - if (elemState is not null) - ElementTypeInfo.DisposeWriteState(elemState); } while (indices.TryAdvance(lastLength, metadata.DimensionLengths)); } @@ -81,29 +100,45 @@ public Size GetSize(SizeContext context, object values, ref object? writeState) } else { - arrayPool = ArrayPool<(Size, object?)>.Shared; - elemData = arrayPool.Rent(metadata.TotalElements); var lastCount = metadata.LastDimension; - do + if (elemData is null) { - ref var elemState = ref elemData[indices.IndicesSum].WriteState; - var elemSize = elemOps.GetSizeOrDbNull(context, values, indices, ref elemState); - anyWriteState = anyWriteState || elemState is not null; - elemData[indices.IndicesSum].Size = elemSize ?? -1; - size = size.Combine(elemSize ?? 0); + arrayPool = ArrayPool<(Size, object?)>.Shared; + elemData = arrayPool.Rent(metadata.TotalElements); + // Own-rent: pool buffers may contain stale WriteState references, so start each state at null. + do + { + object? elemState = null; + var elemSize = SizeElement(context, values, indices, ref elemState, ref size, ref anyWriteState); + elemData[indices.IndicesSum] = (elemSize, elemState); + } + while (indices.TryAdvance(lastCount, metadata.DimensionLengths)); + } + else + { + // Provider-supplied elemData already has valid per-element WriteState, observe and extend it through the ref. + do + { + ref var elem = ref elemData[indices.IndicesSum]; + elem.Size = SizeElement(context, values, indices, ref elem.WriteState, ref size, ref anyWriteState); + } + while (indices.TryAdvance(lastCount, metadata.DimensionLengths)); } - // We can immediately continue if we didn't reach the end of the last dimension. - while (indices.TryAdvance(lastCount, metadata.DimensionLengths)); } - writeState = new ArrayConverterWriteState + var result = providerState ?? new() { Metadata = metadata, - IterationIndices = indices, - ArrayPool = arrayPool, - Data = elemData!, - AnyWriteState = anyWriteState + IterationIndices = indices }; + if (elemData is not null) + { + result.ArrayPool = arrayPool; + result.Data = new(elemData, 0, metadata.TotalElements); + result.AnyWriteState = anyWriteState; + } + result.FixedSizeElements = fixedSizeElements; + writeState = result; return size; } @@ -233,27 +268,24 @@ public async ValueTask Write(bool async, PgWriter writer, object values, Cancell indices.Reset(); var lastCount = metadata.LastDimension; var offset = state.Data.Offset; + var fixedSizeElements = state.FixedSizeElements; do { if (writer.ShouldFlush(sizeof(int))) await writer.Flush(async, cancellationToken).ConfigureAwait(false); - var elem = elemData?[offset + indices.IndicesSum]; - object? fixedSizeWriteState = null; - var length = elemData is null - ? ElemTypeDbNullable && IsDbNull(values, indices, ref fixedSizeWriteState) ? -1 : binaryRequirements.Write.Value - : elem.GetValueOrDefault().Size.Value; + var elem = elemData?[offset + indices.IndicesSum] ?? default; + var length = fixedSizeElements + ? ElemTypeDbNullable && IsDbNull(values, indices, elem.WriteState) ? -1 : binaryRequirements.Write.Value + : elem.Size.Value; writer.WriteInt32(length); if (length is not -1) { using var _ = await writer.BeginNestedWrite(async, binaryRequirements.Write, - length, fixedSizeWriteState ?? elem?.WriteState, cancellationToken).ConfigureAwait(false); + length, elem.WriteState, cancellationToken).ConfigureAwait(false); await elemOps.Write(async, writer, values, indices, cancellationToken).ConfigureAwait(false); } - - if (fixedSizeWriteState is not null) - ElementTypeInfo.DisposeWriteState(fixedSizeWriteState); } while (indices.TryAdvance(lastCount, metadata.DimensionLengths)); } @@ -313,6 +345,9 @@ sealed class ArrayConverterWriteState : MultiWriteState { public required PgArrayMetadata Metadata { get; init; } public required IterationIndices IterationIndices { get; init; } + + /// When true, all non-null elements have a fixed binary size and Data is not populated with per-element sizes. + public bool FixedSizeElements { get; set; } } readonly struct PgArrayMetadata diff --git a/src/Npgsql/Internal/Converters/BitStringConverters.cs b/src/Npgsql/Internal/Converters/BitStringConverters.cs index 12316bdbd2..5b2f868ddb 100644 --- a/src/Npgsql/Internal/Converters/BitStringConverters.cs +++ b/src/Npgsql/Internal/Converters/BitStringConverters.cs @@ -225,15 +225,23 @@ async ValueTask Write(bool async, PgWriter writer, string value, CancellationTok } } -/// Note that for BIT(1), this resolver will return a bool by default, to align with SqlClient -/// (see discussion https://github.com/npgsql/npgsql/pull/362#issuecomment-59622101). -sealed class PolymorphicBitStringConverterResolver(PgTypeId bitString) : PolymorphicConverterResolver(bitString) +/// For BIT(1) columns specifically (read from a field with TypeModifier == 1), this provider returns a bool converter +/// to align with SqlClient (see discussion https://github.com/npgsql/npgsql/pull/362#issuecomment-59622101). +/// Otherwise we return a BitArray converter. Polymorphic writing through this provider is not supported. +sealed class PolymorphicBitStringTypeInfoProvider(PgSerializerOptions options, PgTypeId bitString) : PgConcreteTypeInfoProvider { - BoolBitStringConverter? _boolConverter; - BitArrayBitStringConverter? _bitArrayConverter; + readonly PgConcreteTypeInfo _boolConcreteTypeInfo = new(options, new BoolBitStringConverter(), bitString); + readonly PgConcreteTypeInfo _bitArrayConcreteTypeInfo = new(options, new BitArrayBitStringConverter(), bitString); - protected override PgConverter Get(Field? field) - => field?.TypeModifier is 1 - ? _boolConverter ??= new BoolBitStringConverter() - : _bitArrayConverter ??= new BitArrayBitStringConverter(); + protected override PgConcreteTypeInfo GetDefaultCore(PgTypeId? pgTypeId) + => GetConcreteInfo(field: null); + + protected override PgConcreteTypeInfo? GetForValueCore(ProviderValueContext context, object? value, ref object? writeState) + => throw new NotSupportedException("Polymorphic writing is not supported."); + + protected override PgConcreteTypeInfo GetForFieldCore(Field field) + => GetConcreteInfo(field); + + PgConcreteTypeInfo GetConcreteInfo(Field? field) + => field?.TypeModifier is 1 ? _boolConcreteTypeInfo : _bitArrayConcreteTypeInfo; } diff --git a/src/Npgsql/Internal/Converters/CastingConverter.cs b/src/Npgsql/Internal/Converters/CastingConverter.cs index a2b83fd94c..ff94242dfb 100644 --- a/src/Npgsql/Internal/Converters/CastingConverter.cs +++ b/src/Npgsql/Internal/Converters/CastingConverter.cs @@ -10,7 +10,7 @@ namespace Npgsql.Internal.Converters; sealed class CastingConverter(PgConverter effectiveConverter) : PgConverter(effectiveConverter.DbNullPredicateKind is DbNullPredicate.Custom) { - protected override bool IsDbNullValue(T? value, ref object? writeState) => effectiveConverter.IsDbNullAsObject(value, ref writeState); + protected override bool IsDbNullValue(T? value, object? writeState) => effectiveConverter.IsDbNullAsObject(value, writeState); public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) => effectiveConverter.CanConvert(format, out bufferRequirements); @@ -44,18 +44,18 @@ internal override ValueTask WriteAsObject(bool async, PgWriter writer, object va } } -// Given there aren't many instantiations of converter resolvers (and it's fairly involved to write a fast one) we use the composing base class. -sealed class CastingConverterResolver(PgResolverTypeInfo effectiveResolverTypeInfo) - : PgComposingConverterResolver(effectiveResolverTypeInfo.PgTypeId, effectiveResolverTypeInfo) +// Given there aren't many instantiations of providers (and it's fairly involved to write a fast one) we use the composing base class. +sealed class CastingTypeInfoProvider(PgProviderTypeInfo effectiveProviderTypeInfo) + : PgComposingTypeInfoProvider(effectiveProviderTypeInfo.PgTypeId, effectiveProviderTypeInfo) { protected override PgTypeId GetEffectivePgTypeId(PgTypeId pgTypeId) => pgTypeId; protected override PgTypeId GetPgTypeId(PgTypeId effectivePgTypeId) => effectivePgTypeId; - protected override PgConverter CreateConverter(PgConverterResolution effectiveResolution) - => new CastingConverter(effectiveResolution.Converter); + protected override PgConverter CreateConverter(PgConcreteTypeInfo effectiveConcreteTypeInfo) + => new CastingConverter(effectiveConcreteTypeInfo.Converter); - protected override PgConverterResolution? GetEffectiveResolution(T? value, PgTypeId? expectedEffectiveTypeId) - => EffectiveTypeInfo.GetResolutionAsObject(value, expectedEffectiveTypeId); + protected override PgConcreteTypeInfo? GetEffectiveTypeInfo(ProviderValueContext effectiveContext, T? value, ref object? writeState) + => EffectiveTypeInfo.GetAsObjectConcreteTypeInfo(effectiveContext, value, out writeState); } static class CastingTypeInfoExtensions @@ -67,13 +67,13 @@ internal static PgTypeInfo ToNonBoxing(this PgTypeInfo typeInfo) return typeInfo; var type = typeInfo.Type; - if (typeInfo is PgResolverTypeInfo resolverTypeInfo) - return new PgResolverTypeInfo(typeInfo.Options, - (PgConverterResolver)Activator.CreateInstance(typeof(CastingConverterResolver<>).MakeGenericType(type), - resolverTypeInfo)!, typeInfo.PgTypeId); - - var resolution = typeInfo.GetResolution(); - return new PgTypeInfo(typeInfo.Options, - (PgConverter)Activator.CreateInstance(typeof(CastingConverter<>).MakeGenericType(type), resolution.Converter)!, resolution.PgTypeId); + if (typeInfo is PgProviderTypeInfo providerTypeInfo) + return new PgProviderTypeInfo(typeInfo.Options, + (PgConcreteTypeInfoProvider)Activator.CreateInstance(typeof(CastingTypeInfoProvider<>).MakeGenericType(type), + providerTypeInfo)!, typeInfo.PgTypeId); + + var concreteTypeInfo = (PgConcreteTypeInfo)typeInfo; + return new PgConcreteTypeInfo(typeInfo.Options, + (PgConverter)Activator.CreateInstance(typeof(CastingConverter<>).MakeGenericType(type), concreteTypeInfo.Converter)!, concreteTypeInfo.PgTypeId); } } diff --git a/src/Npgsql/Internal/Converters/CompositeConverter.cs b/src/Npgsql/Internal/Converters/CompositeConverter.cs index 2c985b647c..5646402636 100644 --- a/src/Npgsql/Internal/Converters/CompositeConverter.cs +++ b/src/Npgsql/Internal/Converters/CompositeConverter.cs @@ -1,5 +1,6 @@ using System; using System.Buffers; +using System.Diagnostics; using System.Threading; using System.Threading.Tasks; using Npgsql.Internal.Composites; @@ -10,14 +11,21 @@ sealed class CompositeConverter : PgStreamingConverter where T : notnull { readonly CompositeInfo _composite; readonly BufferRequirements _bufferRequirements; + // Precomputed write size from the constructor's combine pass, taken before the provider-field clamp + // and the upper-bound limit. When Exact, GetSize can return this directly without per-field sizing — + // the per-field loop still runs for bind-time resolution side-effects, but size is already known. + readonly Size _writeSizePrecomputed; public CompositeConverter(CompositeInfo composite) { _composite = composite; var req = BufferRequirements.CreateFixedSize(sizeof(int) + _composite.Fields.Count * (sizeof(uint) + sizeof(int))); + var anyProviderField = false; foreach (var field in _composite.Fields) { + anyProviderField = anyProviderField || field.IsProviderBacked; + var readReq = field.BinaryReadRequirement; var writeReq = field.BinaryWriteRequirement; @@ -34,6 +42,19 @@ public CompositeConverter(CompositeInfo composite) req = BufferRequirements.Create(readSuccess ? readReq : Size.Unknown, writeSuccess ? writeReq : Size.Unknown); } + // Capture the combined write size before clamping so GetSize can return it unchanged. This is the + // full requirement we know internally — externally we hide it behind an upper-bound to force GetSize + // to fire for provider-backed composites, but the number itself is still correct. + _writeSizePrecomputed = req.Write; + + // When any field defers resolution to a provider, downgrade the externally-reported write size to + // an upper bound. This is the sole mechanism by which bind-time resolution is triggered: non-exact + // writes route through GetSize, where per-field GetWriteInfo calls dispatch into providers and + // surface deterministic value-level errors (e.g. DateTime kind mismatches) at bind instead of at + // first Write. Composites with only concrete fields stay exact and skip GetSize as before. + if (anyProviderField && req.Write.Kind is SizeKind.Exact) + req = BufferRequirements.Create(req.Read, Size.CreateUpperBound(req.Write.Value)); + // We have to put a limit on the requirements we report otherwise smaller buffer sizes won't work. req = BufferRequirements.Create(Limit(req.Read), Limit(req.Write)); @@ -112,33 +133,87 @@ async ValueTask Read(bool async, PgReader reader, CancellationToken cancellat public override Size GetSize(SizeContext context, T value, ref object? writeState) { - var arrayPool = ArrayPool.Shared; - var data = arrayPool.Rent(_composite.Fields.Count); + var boxedInstance = (object)value; + + // When the combine pass produced an exact size, every field is individually fixed-size and + // non-nullable — the only reason we're in GetSize at all is that some field defers resolution + // to a provider and we clamped externally to force this entry. Walk fields purely for bind-time + // resolution side effects; the size is the precomputed one from the constructor. Rent lazily + // so the common DateTime-kind-style case (providers that validate but produce no state) pays + // no ElementState array allocation. + if (_writeSizePrecomputed.Kind is SizeKind.Exact) + { + ElementState[]? data = null; + for (var i = 0; i < _composite.Fields.Count; i++) + { + var field = _composite.Fields[i]; + var converter = field.GetWriteInfo(boxedInstance, out var writeRequirement, out var fieldState); + + // Skip populating the slot when the provider produced no state and the resolved converter is the same as the default. + // The common case — DateTime-kind and similar pure-validation providers — satisfies both and pays no slot allocation. + // A provider that happens to return a non-default concrete for a decided id still has its + // converter captured so Write uses it instead of demoting silently to the default. + if (fieldState is null && ReferenceEquals(converter, field.GetDefaultWriteInfo(out _))) + continue; + if (data is null) + { + data = ArrayPool.Shared.Rent(_composite.Fields.Count); + // clear any stale slots left behind by the previous pool user. + Array.Clear(data, 0, _composite.Fields.Count); + } + + data[i] = new() + { + Size = writeRequirement, + WriteState = fieldState, + Converter = converter, + BufferRequirement = writeRequirement + }; + } + + if (data is null) + { + writeState = null; + return _writeSizePrecomputed; + } + + writeState = new WriteState + { + ArrayPool = ArrayPool.Shared, + Data = new(data, 0, _composite.Fields.Count), + AnyWriteState = true, + BoxedInstance = boxedInstance, + }; + return _writeSizePrecomputed; + } + + // Variable-size or nullable fields — per-field GetSizeOrDbNull is needed to compute the total, + // and per-field sizes must flow forward to Write. Always rent. + var arrayPool = ArrayPool.Shared; + var slowData = arrayPool.Rent(_composite.Fields.Count); var totalSize = Size.Create(sizeof(int) + _composite.Fields.Count * (sizeof(uint) + sizeof(int))); - var boxedInstance = (object)value; var anyWriteState = false; for (var i = 0; i < _composite.Fields.Count; i++) { var field = _composite.Fields[i]; - var converter = field.GetWriteInfo(boxedInstance, out var writeRequirement); - object? fieldState = null; - var fieldSize = field.GetSizeOrDbNull(converter, context.Format, writeRequirement, boxedInstance, ref fieldState); + var converter = field.GetWriteInfo(boxedInstance, out var writeRequirement, out var fieldState); + var fieldSizeOrNull = field.GetSizeOrDbNull(converter, context.Format, writeRequirement, boxedInstance, ref fieldState); anyWriteState = anyWriteState || fieldState is not null; - data[i] = new() + slowData[i] = new() { - Size = fieldSize ?? -1, + Size = fieldSizeOrNull ?? -1, WriteState = fieldState, Converter = converter, BufferRequirement = writeRequirement }; - totalSize = totalSize.Combine(fieldSize ?? 0); + totalSize = totalSize.Combine(fieldSizeOrNull ?? 0); } writeState = new WriteState { ArrayPool = arrayPool, - Data = new(data, 0, _composite.Fields.Count), + Data = new(slowData, 0, _composite.Fields.Count), AnyWriteState = anyWriteState, BoxedInstance = boxedInstance, }; @@ -153,15 +228,30 @@ public override ValueTask WriteAsync(PgWriter writer, T value, CancellationToken async ValueTask Write(bool async, PgWriter writer, T value, CancellationToken cancellationToken) { - if (writer.Current.WriteState is not null and not WriteState) - throw new InvalidCastException($"Invalid write state, expected {typeof(WriteState).FullName}."); + // Null state is legitimate in two cases: + // 1. Exact-size composite — GetSize was skipped entirely. By construction of the combine pass + // this means no provider field, no variable field, no nullable field. + // 2. Clamped-by-provider composite — GetSize ran but every field's provider produced null + // state, so we skipped the WriteState allocation. All fields are individually fixed-size + // (that's what _writeSizePrecomputed.Kind is Exact guarantees), so it works the same + // way and resolution is just re-done via cached provider dispatch. + // Variable-size composites must always arrive with a populated WriteState, we can't recover + // per-field value-dependent sizes otherwise. + var writeState = writer.Current.WriteState switch + { + WriteState ws => ws, + null when _writeSizePrecomputed.Kind is SizeKind.Exact => null, + null => throw new InvalidOperationException("Composite Write requires per-field data from GetSize when any field is variable-size."), + _ => throw new InvalidCastException($"Invalid write state, expected {typeof(WriteState).FullName}.") + }; + Debug.Assert(_bufferRequirements.Write.Kind is not SizeKind.Exact || writeState is null, + "Exact-size composite must not carry write state — GetSize should have been skipped."); if (writer.ShouldFlush(sizeof(int))) await writer.Flush(async, cancellationToken).ConfigureAwait(false); writer.WriteInt32(_composite.Fields.Count); - var writeState = writer.Current.WriteState as WriteState; var boxedInstance = writeState?.BoxedInstance ?? value; var data = writeState?.Data.Array; for (var i = 0; i < _composite.Fields.Count; i++) @@ -172,21 +262,27 @@ async ValueTask Write(bool async, PgWriter writer, T value, CancellationToken ca var field = _composite.Fields[i]; writer.WriteAsOid(field.PgTypeId); + // No cached slot: uses GetDefaultWriteInfo which is stateless by construction, + // so there is nothing to dispose on this path. Per-value resolution, if it was needed, + // already ran at bind-time GetSize and would have populated the slot + // A slot with a null Converter is a default(ElementState) left behind by + // GetSize's lazy-rent: fields walked before the first state-producing provider aren't + // back-filled, and Write handles them per-slot the same way a fully-unallocated data + // array is handled in the truly-exact case. ElementState elementState; - if (data?[i] is not { } state) + if (data?[i] is { Converter: not null } state) + elementState = state; + else { - var converter = field.GetWriteInfo(boxedInstance, out var writeRequirement); - object? fieldState = null; + var converter = field.GetDefaultWriteInfo(out var writeRequirement); elementState = new() { - Size = field.IsDbNull(converter, boxedInstance, ref fieldState) ? -1 : writeRequirement, + Size = field.IsDbNull(converter, boxedInstance, writeState: null) ? -1 : writeRequirement, WriteState = null, Converter = converter, BufferRequirement = writeRequirement, }; } - else - elementState = state; var length = elementState.Size.Value; writer.WriteInt32(length); if (length is not -1) @@ -218,15 +314,12 @@ public void Dispose() return; if (AnyWriteState) - { - for (var i = Data.Offset; i < array.Length; i++) + for (var i = Data.Offset; i < Data.Offset + Data.Count; i++) if (array[i].WriteState is IDisposable disposable) disposable.Dispose(); - Array.Clear(Data.Array, Data.Offset, Data.Count); - } - - ArrayPool?.Return(Data.Array); + Array.Clear(array, Data.Offset, Data.Count); + ArrayPool?.Return(array); } } } diff --git a/src/Npgsql/Internal/Converters/NullableConverter.cs b/src/Npgsql/Internal/Converters/NullableConverter.cs index b4d5689da7..250b98293f 100644 --- a/src/Npgsql/Internal/Converters/NullableConverter.cs +++ b/src/Npgsql/Internal/Converters/NullableConverter.cs @@ -11,8 +11,8 @@ sealed class NullableConverter(PgConverter effectiveConverter) : PgConverter(effectiveConverter.DbNullPredicateKind is DbNullPredicate.Custom) where T : struct { - protected override bool IsDbNullValue(T? value, ref object? writeState) - => value is null || effectiveConverter.IsDbNull(value.GetValueOrDefault(), ref writeState); + protected override bool IsDbNullValue(T? value, object? writeState) + => value is null || effectiveConverter.IsDbNull(value.GetValueOrDefault(), writeState); public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) => effectiveConverter.CanConvert(format, out bufferRequirements); @@ -39,18 +39,18 @@ internal override ValueTask WriteAsObject(bool async, PgWriter writer, object va => effectiveConverter.WriteAsObject(async, writer, value, cancellationToken); } -sealed class NullableConverterResolver(PgResolverTypeInfo effectiveTypeInfo) - : PgComposingConverterResolver(effectiveTypeInfo.PgTypeId, effectiveTypeInfo) +sealed class NullableTypeInfoProvider(PgProviderTypeInfo effectiveTypeInfo) + : PgComposingTypeInfoProvider(effectiveTypeInfo.PgTypeId, effectiveTypeInfo) where T : struct { protected override PgTypeId GetEffectivePgTypeId(PgTypeId pgTypeId) => pgTypeId; protected override PgTypeId GetPgTypeId(PgTypeId effectivePgTypeId) => effectivePgTypeId; - protected override PgConverter CreateConverter(PgConverterResolution effectiveResolution) - => new NullableConverter(effectiveResolution.GetConverter()); + protected override PgConverter CreateConverter(PgConcreteTypeInfo effectiveConcreteTypeInfo) + => new NullableConverter((PgConverter)effectiveConcreteTypeInfo.Converter); - protected override PgConverterResolution? GetEffectiveResolution(T? value, PgTypeId? expectedEffectivePgTypeId) - => value is { } inner - ? EffectiveTypeInfo.GetResolution(inner, expectedEffectivePgTypeId) + protected override PgConcreteTypeInfo? GetEffectiveTypeInfo(ProviderValueContext effectiveContext, T? value, ref object? writeState) + => value is not null + ? EffectiveTypeInfo.GetConcreteTypeInfo(effectiveContext, value.GetValueOrDefault(), out writeState) : null; } diff --git a/src/Npgsql/Internal/Converters/ObjectConverter.cs b/src/Npgsql/Internal/Converters/ObjectConverter.cs index 4889c60fad..1e78c8e7cf 100644 --- a/src/Npgsql/Internal/Converters/ObjectConverter.cs +++ b/src/Npgsql/Internal/Converters/ObjectConverter.cs @@ -6,22 +6,18 @@ namespace Npgsql.Internal; -sealed class ObjectConverter(PgSerializerOptions options, PgTypeId pgTypeId) : PgStreamingConverter(customDbNullPredicate: true) +sealed class ObjectConverter() : PgStreamingConverter(customDbNullPredicate: true) { - protected override bool IsDbNullValue(object? value, ref object? writeState) + protected override bool IsDbNullValue(object? value, object? writeState) { - if (value is null or DBNull) - return true; - - var typeInfo = GetTypeInfo(value.GetType()); - - object? effectiveState = null; - var converter = typeInfo.GetObjectResolution(value).Converter; - if (converter.IsDbNullAsObject(value, ref effectiveState)) - return true; + var (concreteTypeInfo, effectiveState) = writeState switch + { + PgConcreteTypeInfo info => (info, (object?)null), + WriteState ws => (ws.ConcreteTypeInfo, ws.EffectiveState), + _ => throw new InvalidOperationException("writeState cannot be null, LateBoundTypeInfoProvider is expected to pre-populate it with concrete type info.") + }; - writeState = effectiveState is not null ? new WriteState { TypeInfo = typeInfo, EffectiveState = effectiveState } : typeInfo; - return false; + return concreteTypeInfo.Converter.IsDbNullAsObject(value, effectiveState); } public override object Read(PgReader reader) => throw new NotSupportedException(); @@ -29,22 +25,16 @@ protected override bool IsDbNullValue(object? value, ref object? writeState) public override Size GetSize(SizeContext context, object value, ref object? writeState) { - var (typeInfo, effectiveState) = writeState switch + var (concreteTypeInfo, effectiveState) = writeState switch { - PgTypeInfo info => (info, null), - WriteState state => (state.TypeInfo, state.EffectiveState), + PgConcreteTypeInfo info => (info, (object?)null), + WriteState state => (state.ConcreteTypeInfo, state.EffectiveState), _ => throw new InvalidOperationException("Invalid state") }; - // We can call GetDefaultResolution here as validation has already happened in IsDbNullValue. - // And we know it was called due to the writeState being filled. - Debug.Assert(typeInfo.PgTypeId is not null); - var converter = typeInfo is PgResolverTypeInfo resolverTypeInfo - ? resolverTypeInfo.GetDefaultResolution(null).Converter - : typeInfo.GetResolution().Converter; - if (typeInfo.GetBufferRequirements(converter, context.Format) is not { } bufferRequirements) + if (concreteTypeInfo.GetBufferRequirements(concreteTypeInfo.Converter, context.Format) is not { } bufferRequirements) { - ThrowHelper.ThrowNotSupportedException($"Resolved converter '{converter.GetType()}' has to support the {context.Format} format to be compatible."); + ThrowHelper.ThrowNotSupportedException($"Resolved converter '{concreteTypeInfo.Converter.GetType()}' has to support the {context.Format} format to be compatible."); return default; } @@ -52,13 +42,13 @@ public override Size GetSize(SizeContext context, object value, ref object? writ if (bufferRequirements.Write.Kind is SizeKind.Exact) return bufferRequirements.Write; - var result = converter.GetSizeAsObject(context, value, ref effectiveState); + var result = concreteTypeInfo.Converter.GetSizeAsObject(context, value, ref effectiveState); if (effectiveState is not null) { - if (writeState is WriteState state && !ReferenceEquals(state.EffectiveState, effectiveState)) - state.EffectiveState = effectiveState; + if (writeState is WriteState s && !ReferenceEquals(s.EffectiveState, effectiveState)) + s.EffectiveState = effectiveState; else - writeState = new WriteState { TypeInfo = typeInfo, EffectiveState = effectiveState }; + writeState = new WriteState { ConcreteTypeInfo = concreteTypeInfo, EffectiveState = effectiveState }; } return result; @@ -72,31 +62,63 @@ public override ValueTask WriteAsync(PgWriter writer, object value, Cancellation async ValueTask Write(bool async, PgWriter writer, object value, CancellationToken cancellationToken) { - var (typeInfo, effectiveState) = writer.Current.WriteState switch + var (concreteTypeInfo, effectiveState) = writer.Current.WriteState switch { - PgTypeInfo info => (info, null), - WriteState state => (state.TypeInfo, state.EffectiveState), + PgConcreteTypeInfo info => (info, (object?)null), + WriteState state => (state.ConcreteTypeInfo, state.EffectiveState), _ => throw new InvalidOperationException("Invalid state") }; - // We can call GetDefaultResolution here as validation has already happened in IsDbNullValue. - // And we know it was called due to the writeState being filled. - Debug.Assert(typeInfo.PgTypeId is not null); - var converter = typeInfo is PgResolverTypeInfo resolverTypeInfo - ? resolverTypeInfo.GetDefaultResolution(null).Converter - : typeInfo.GetResolution().Converter; - var writeRequirement = typeInfo.GetBufferRequirements(converter, DataFormat.Binary)!.Value.Write; + var writeRequirement = concreteTypeInfo.GetBufferRequirements(concreteTypeInfo.Converter, DataFormat.Binary)!.Value.Write; using var _ = await writer.BeginNestedWrite(async, writeRequirement, writer.Current.Size.Value, effectiveState, cancellationToken).ConfigureAwait(false); - await converter.WriteAsObject(async, writer, value, cancellationToken).ConfigureAwait(false); + await concreteTypeInfo.Converter.WriteAsObject(async, writer, value, cancellationToken).ConfigureAwait(false); } - PgTypeInfo GetTypeInfo(Type type) - => options.GetTypeInfoInternal(type, pgTypeId) - ?? throw new NotSupportedException($"Writing values of '{type.FullName}' having DataTypeName '{options.DatabaseInfo.GetPostgresType(pgTypeId).DisplayName}' is not supported."); + internal sealed class WriteState : IDisposable + { + public required PgConcreteTypeInfo ConcreteTypeInfo { get; init; } + public required object? EffectiveState { get; set; } + + // EffectiveState may hold a pooled WriteState from the underlying concrete converter + // (composite, array, etc.). The outer DisposeWriteState on PgTypeInfo only sees this + // wrapper, so the wrapper is responsible for cascading disposal to the inner state. + public void Dispose() + { + if (EffectiveState is IDisposable disposable) + disposable.Dispose(); + } + } +} - sealed class WriteState +// TODO the goal is to allow this provider to return the underlying converter type info, but we're not there yet. +// At that point we don't need the ObjectConverter any longer. +sealed class LateBoundTypeInfoProvider(PgSerializerOptions options, PgTypeId typeId) : PgConcreteTypeInfoProvider +{ + readonly PgConcreteTypeInfo _defaultConcreteTypeInfo = new(options, new ObjectConverter(), typeId); + + protected override PgConcreteTypeInfo GetDefaultCore(PgTypeId? pgTypeId) + { + // Late binding is only supported when we've decided on a type id, so the provider's nominal typeId is the only + // legitimate answer. Upstream PgProviderTypeInfo.GetDefaultConcreteTypeInfo already throws on a mismatched id. + // Meaning, pgTypeId is either null or equal to typeId, and either way we return the cached info. + Debug.Assert(pgTypeId is null || pgTypeId == typeId); + return _defaultConcreteTypeInfo; + } + + protected override PgConcreteTypeInfo GetForValueCore(ProviderValueContext context, object? value, ref object? writeState) { - public required PgTypeInfo TypeInfo { get; init; } - public required object EffectiveState { get; set; } + if (value is null or DBNull) + { + writeState = options.UnspecifiedDBNullTypeInfo; + return GetDefaultCore(context.ExpectedPgTypeId); + } + + var typeInfo = AdoSerializerHelpers.GetTypeInfoForWriting(value.GetType(), context.ExpectedPgTypeId ?? typeId, options); + var concreteTypeInfo = typeInfo.GetObjectConcreteTypeInfo(value, out var effectiveState); + writeState = effectiveState is not null + ? new ObjectConverter.WriteState { ConcreteTypeInfo = concreteTypeInfo, EffectiveState = effectiveState } + : concreteTypeInfo; + + return GetDefault(context.ExpectedPgTypeId); } } diff --git a/src/Npgsql/Internal/Converters/PolymorphicArrayTypeInfoProvider.cs b/src/Npgsql/Internal/Converters/PolymorphicArrayTypeInfoProvider.cs new file mode 100644 index 0000000000..9084567f4a --- /dev/null +++ b/src/Npgsql/Internal/Converters/PolymorphicArrayTypeInfoProvider.cs @@ -0,0 +1,51 @@ +using System; +using System.Collections.Concurrent; +using System.Collections.Generic; +using Npgsql.Internal.Postgres; + +namespace Npgsql.Internal.Converters; + +// Many ways to achieve strongly typed composition on top of a polymorphic element type. +// Including pushing construction through a GVM visitor pattern on the element handler, +// manual reimplementation of the element logic in the array provider, and other ways. +// This one however is by far the most lightweight on both the implementation duplication and code bloat axes. +sealed class PolymorphicArrayTypeInfoProvider : PgConcreteTypeInfoProvider +{ + readonly PgTypeId _pgTypeId; + readonly PgProviderTypeInfo _elementTypeInfo; + readonly Func _elementToArrayConverterFactory; + readonly PgTypeId _elementPgTypeId; + readonly ConcurrentDictionary _concreteInfoCache = new(ReferenceEqualityComparer.Instance); + + public PolymorphicArrayTypeInfoProvider(PgTypeId pgTypeId, PgProviderTypeInfo elementTypeInfo, Func elementToArrayConverterFactory) + { + if (elementTypeInfo.PgTypeId is null) + throw new ArgumentException("Type info cannot have an undecided PgTypeId.", nameof(elementTypeInfo)); + + _pgTypeId = pgTypeId; + _elementTypeInfo = elementTypeInfo; + _elementToArrayConverterFactory = elementToArrayConverterFactory; + _elementPgTypeId = elementTypeInfo.PgTypeId!.Value; + } + + protected override PgConcreteTypeInfo GetDefaultCore(PgTypeId? pgTypeId) + => GetOrAdd(_elementTypeInfo.GetDefaultConcreteTypeInfo(_elementPgTypeId)); + + protected override PgConcreteTypeInfo? GetForValueCore(ProviderValueContext context, object? value, ref object? writeState) + => throw new NotSupportedException("Polymorphic writing is not supported."); + + protected override PgConcreteTypeInfo? GetForFieldCore(Field field) + { + var elementConcreteTypeInfo = _elementTypeInfo.GetConcreteTypeInfo(field with { PgTypeId = _elementPgTypeId }); + return elementConcreteTypeInfo is not null ? GetOrAdd(elementConcreteTypeInfo) : null; + } + + PgConcreteTypeInfo GetOrAdd(PgConcreteTypeInfo elementConcreteTypeInfo) + { + (PolymorphicArrayTypeInfoProvider Instance, PgConcreteTypeInfo ConcreteInfo) state = (this, elementConcreteTypeInfo); + return _concreteInfoCache.GetOrAdd(elementConcreteTypeInfo, + static (_, state) => + new(state.ConcreteInfo.Options, state.Instance._elementToArrayConverterFactory(state.ConcreteInfo), state.Instance._pgTypeId), + state); + } +} diff --git a/src/Npgsql/Internal/Converters/PolymorphicConverterResolver.cs b/src/Npgsql/Internal/Converters/PolymorphicConverterResolver.cs deleted file mode 100644 index 7cf355d103..0000000000 --- a/src/Npgsql/Internal/Converters/PolymorphicConverterResolver.cs +++ /dev/null @@ -1,66 +0,0 @@ -using System; -using System.Collections.Concurrent; -using System.Collections.Generic; -using Npgsql.Internal.Postgres; - -namespace Npgsql.Internal.Converters; - -abstract class PolymorphicConverterResolver(PgTypeId pgTypeId) : PgConverterResolver -{ - protected PgTypeId PgTypeId { get; } = pgTypeId; - - protected abstract PgConverter Get(Field? field); - - public sealed override PgConverterResolution GetDefault(PgTypeId? pgTypeId) - { - if (pgTypeId is not null && pgTypeId != PgTypeId) - throw CreateUnsupportedPgTypeIdException(pgTypeId.Value); - - return new(Get(null), PgTypeId); - } - - public sealed override PgConverterResolution? Get(TBase? value, PgTypeId? expectedPgTypeId) - => new(Get(null), PgTypeId); - - public sealed override PgConverterResolution Get(Field field) - { - if (field.PgTypeId != PgTypeId) - throw CreateUnsupportedPgTypeIdException(field.PgTypeId); - - var converter = Get(field); - return new(converter, PgTypeId); - } -} - -// Many ways to achieve strongly typed composition on top of a polymorphic element type. -// Including pushing construction through a GVM visitor pattern on the element handler, -// manual reimplementation of the element logic in the array resolver, and other ways. -// This one however is by far the most lightweight on both the implementation duplication and code bloat axes. -sealed class ArrayPolymorphicConverterResolver : PolymorphicConverterResolver -{ - readonly PgResolverTypeInfo _elemTypeInfo; - readonly Func _elemToArrayConverterFactory; - readonly PgTypeId _elemPgTypeId; - readonly ConcurrentDictionary _converterCache = new(ReferenceEqualityComparer.Instance); - - public ArrayPolymorphicConverterResolver(PgTypeId pgTypeId, PgResolverTypeInfo elemTypeInfo, Func elemToArrayConverterFactory) - : base(pgTypeId) - { - if (elemTypeInfo.PgTypeId is null) - throw new ArgumentException("elemTypeInfo.PgTypeId must be non-null.", nameof(elemTypeInfo)); - - _elemTypeInfo = elemTypeInfo; - _elemToArrayConverterFactory = elemToArrayConverterFactory; - _elemPgTypeId = elemTypeInfo.PgTypeId!.Value; - } - - protected override PgConverter Get(Field? maybeField) - { - var elemResolution = maybeField is { } field - ? _elemTypeInfo.GetResolution(field with { PgTypeId = _elemPgTypeId }) - : _elemTypeInfo.GetDefaultResolution(_elemPgTypeId); - - (Func Factory, PgConverterResolution Resolution) state = (_elemToArrayConverterFactory, elemResolution); - return _converterCache.GetOrAdd(elemResolution.Converter, static (_, state) => state.Factory(state.Resolution), state); - } -} diff --git a/src/Npgsql/Internal/Converters/Temporal/DateTimeConverterResolver.cs b/src/Npgsql/Internal/Converters/Temporal/DateTimeConverterResolver.cs deleted file mode 100644 index 6ae5a783a1..0000000000 --- a/src/Npgsql/Internal/Converters/Temporal/DateTimeConverterResolver.cs +++ /dev/null @@ -1,143 +0,0 @@ -using System; -using System.Collections.Generic; -using Npgsql.Internal.Postgres; -using Npgsql.Properties; -using NpgsqlTypes; - -// ReSharper disable once CheckNamespace -namespace Npgsql.Internal.Converters; - -sealed class DateTimeConverterResolver : PgConverterResolver -{ - readonly PgSerializerOptions _options; - readonly Func, T?, PgTypeId?, PgConverterResolution?> _resolver; - readonly Func _factory; - readonly PgTypeId _timestampTz; - PgConverter? _timestampTzConverter; - readonly PgTypeId _timestamp; - PgConverter? _timestampConverter; - readonly bool _dateTimeInfinityConversions; - - internal DateTimeConverterResolver(PgSerializerOptions options, Func, T?, PgTypeId?, PgConverterResolution?> resolver, Func factory, PgTypeId timestampTz, PgTypeId timestamp, bool dateTimeInfinityConversions) - { - _options = options; - _resolver = resolver; - _factory = factory; - _timestampTz = timestampTz; - _timestamp = timestamp; - _dateTimeInfinityConversions = dateTimeInfinityConversions; - } - - public override PgConverterResolution GetDefault(PgTypeId? pgTypeId) - { - if (pgTypeId == _timestampTz) - return new(_timestampTzConverter ??= _factory(_timestampTz), _timestampTz); - if (pgTypeId is null || pgTypeId == _timestamp) - return new(_timestampConverter ??= _factory(_timestamp), _timestamp); - - throw CreateUnsupportedPgTypeIdException(pgTypeId.Value); - } - - public PgConverterResolution? Get(DateTime value, PgTypeId? expectedPgTypeId, bool validateOnly = false) - { - if (value.Kind is DateTimeKind.Utc) - { - // We coalesce with expectedPgTypeId to throw on unknown type ids. - return expectedPgTypeId == _timestamp - ? throw new ArgumentException( - string.Format(NpgsqlStrings.TimestampNoDateTimeUtc, _options.GetDataTypeName(_timestamp).DisplayName, _options.GetDataTypeName(_timestampTz).DisplayName), nameof(value)) - : validateOnly ? null : GetDefault(expectedPgTypeId ?? _timestampTz); - } - - // For timestamptz types we'll accept unspecified MinValue/MaxValue as well. - if (expectedPgTypeId == _timestampTz - && !(_dateTimeInfinityConversions && (value == DateTime.MinValue || value == DateTime.MaxValue))) - { - throw new ArgumentException( - string.Format(NpgsqlStrings.TimestampTzNoDateTimeUnspecified, value.Kind, _options.GetDataTypeName(_timestampTz).DisplayName), nameof(value)); - } - - // We coalesce with expectedPgTypeId to throw on unknown type ids. - return GetDefault(expectedPgTypeId ?? _timestamp); - } - - public override PgConverterResolution? Get(T? value, PgTypeId? expectedPgTypeId) - => _resolver(this, value, expectedPgTypeId); -} - -sealed class DateTimeConverterResolver -{ - public static DateTimeConverterResolver CreateResolver(PgSerializerOptions options, PgTypeId timestampTz, PgTypeId timestamp, bool dateTimeInfinityConversions) - => new(options, static (resolver, value, expectedPgTypeId) => resolver.Get(value, expectedPgTypeId), pgTypeId => - { - if (pgTypeId == timestampTz) - return new DateTimeConverter(dateTimeInfinityConversions, DateTimeKind.Utc); - if (pgTypeId == timestamp) - return new DateTimeConverter(dateTimeInfinityConversions, DateTimeKind.Unspecified); - - throw new NotSupportedException(); - }, timestampTz, timestamp, dateTimeInfinityConversions); - - public static DateTimeConverterResolver> CreateRangeResolver(PgSerializerOptions options, PgTypeId timestampTz, PgTypeId timestamp, bool dateTimeInfinityConversions) - => new(options, static (resolver, value, expectedPgTypeId) => - { - // Resolve both sides to make sure we end up with consistent PgTypeIds. - PgConverterResolution? resolution = null; - if (!value.LowerBoundInfinite) - resolution = resolver.Get(value.LowerBound, expectedPgTypeId); - - if (!value.UpperBoundInfinite) - { - var result = resolver.Get(value.UpperBound, resolution?.PgTypeId ?? expectedPgTypeId, validateOnly: resolution is not null); - resolution ??= result; - } - - return resolution; - }, pgTypeId => - { - if (pgTypeId == timestampTz) - return new RangeConverter(new DateTimeConverter(dateTimeInfinityConversions, DateTimeKind.Utc)); - if (pgTypeId == timestamp) - return new RangeConverter(new DateTimeConverter(dateTimeInfinityConversions, DateTimeKind.Unspecified)); - - throw new NotSupportedException(); - }, timestampTz, timestamp, dateTimeInfinityConversions); - - public static DateTimeConverterResolver CreateMultirangeResolver(PgSerializerOptions options, PgTypeId timestampTz, PgTypeId timestamp, bool dateTimeInfinityConversions) - where T : IList where TElement : notnull - { - if (typeof(TElement) != typeof(NpgsqlRange)) - ThrowHelper.ThrowNotSupportedException("Unsupported element type"); - - return new DateTimeConverterResolver(options, static (resolver, value, expectedPgTypeId) => - { - PgConverterResolution? resolution = null; - if (value is null) - return null; - - foreach (var element in (IList>)value) - { - PgConverterResolution? result; - if (!element.LowerBoundInfinite) - { - result = resolver.Get(element.LowerBound, resolution?.PgTypeId ?? expectedPgTypeId, validateOnly: resolution is not null); - resolution ??= result; - } - if (!element.UpperBoundInfinite) - { - result = resolver.Get(element.UpperBound, resolution?.PgTypeId ?? expectedPgTypeId, validateOnly: resolution is not null); - resolution ??= result; - } - } - return resolution; - }, pgTypeId => - { - if (pgTypeId == timestampTz) - return new MultirangeConverter((PgConverter)(object)new RangeConverter(new DateTimeConverter(dateTimeInfinityConversions, DateTimeKind.Utc))); - if (pgTypeId == timestamp) - return new MultirangeConverter((PgConverter)(object)new RangeConverter(new DateTimeConverter(dateTimeInfinityConversions, DateTimeKind.Unspecified))); - - throw new NotSupportedException(); - }, timestampTz, timestamp, dateTimeInfinityConversions); - } -} diff --git a/src/Npgsql/Internal/Converters/Temporal/DateTimeTypeInfoProvider.cs b/src/Npgsql/Internal/Converters/Temporal/DateTimeTypeInfoProvider.cs new file mode 100644 index 0000000000..34fdbf73ba --- /dev/null +++ b/src/Npgsql/Internal/Converters/Temporal/DateTimeTypeInfoProvider.cs @@ -0,0 +1,169 @@ +using System; +using System.Collections.Generic; +using System.Diagnostics; +using Npgsql.Internal.Postgres; +using Npgsql.Properties; +using NpgsqlTypes; + +// ReSharper disable once CheckNamespace +namespace Npgsql.Internal.Converters; + +delegate PgConcreteTypeInfo? DateTimeTypeInfoProviderDelegate( + DateTimeTypeInfoProvider provider, ProviderValueContext context, T? value, ref object? writeState); + +sealed class DateTimeTypeInfoProvider : PgConcreteTypeInfoProvider +{ + readonly PgSerializerOptions _options; + readonly DateTimeTypeInfoProviderDelegate _provider; + readonly Func _factory; + readonly PgTypeId _timestampTz; + readonly PgConcreteTypeInfo _timestampTzConcreteTypeInfo; + readonly PgTypeId _timestamp; + readonly PgConcreteTypeInfo _timestampConcreteTypeInfo; + readonly bool _dateTimeInfinityConversions; + + internal DateTimeTypeInfoProvider(PgSerializerOptions options, DateTimeTypeInfoProviderDelegate provider, + Func factory, PgTypeId timestampTz, PgTypeId timestamp, bool dateTimeInfinityConversions) + { + _options = options; + _provider = provider; + _factory = factory; + _timestampTz = timestampTz; + _timestamp = timestamp; + _dateTimeInfinityConversions = dateTimeInfinityConversions; + _timestampTzConcreteTypeInfo = new(options, factory(timestampTz), timestampTz); + _timestampConcreteTypeInfo = new(options, factory(timestamp), timestamp); + } + + protected override PgConcreteTypeInfo GetDefaultCore(PgTypeId? pgTypeId) + { + if (pgTypeId == _timestampTz) + return _timestampTzConcreteTypeInfo; + if (pgTypeId is null || pgTypeId == _timestamp) + return _timestampConcreteTypeInfo; + + throw new ArgumentOutOfRangeException(nameof(pgTypeId), pgTypeId, "Unsupported PgTypeId."); + } + + protected override PgConcreteTypeInfo? GetForValueCore(ProviderValueContext context, T? value, ref object? writeState) + => _provider(this, context, value, ref writeState); + + public PgConcreteTypeInfo? Get(ProviderValueContext context, DateTime value, bool validateOnly = false) + { + Debug.Assert(!validateOnly || context.ExpectedPgTypeId is not null); + if (value.Kind is DateTimeKind.Utc) + { + // We coalesce with expectedPgTypeId to throw on unknown type ids. + return context.ExpectedPgTypeId == _timestamp + ? throw new ArgumentException( + string.Format(NpgsqlStrings.TimestampNoDateTimeUtc, + _options.GetDataTypeName(_timestamp).DisplayName, + _options.GetDataTypeName(_timestampTz).DisplayName), nameof(value)) + : validateOnly ? null : GetDefault(context.ExpectedPgTypeId ?? _timestampTz); + } + + // For timestamptz types we'll accept unspecified MinValue/MaxValue as well. + if (context.ExpectedPgTypeId == _timestampTz + && !(_dateTimeInfinityConversions && (value == DateTime.MinValue || value == DateTime.MaxValue))) + { + throw new ArgumentException( + string.Format(NpgsqlStrings.TimestampTzNoDateTimeUnspecified, value.Kind, + _options.GetDataTypeName(_timestampTz).DisplayName), nameof(value)); + } + + // We coalesce with expectedPgTypeId to throw on unknown type ids. + return validateOnly ? null : GetDefault(context.ExpectedPgTypeId ?? _timestamp); + } +} + +sealed class DateTimeTypeInfoProvider +{ + public static DateTimeTypeInfoProvider CreateProvider(PgSerializerOptions options, PgTypeId timestampTz, PgTypeId timestamp, bool dateTimeInfinityConversions) + => new(options, static (provider, context, value, ref writeState) => provider.Get(context, value), pgTypeId => + { + if (pgTypeId == timestampTz) + return new DateTimeConverter(dateTimeInfinityConversions, DateTimeKind.Utc); + if (pgTypeId == timestamp) + return new DateTimeConverter(dateTimeInfinityConversions, DateTimeKind.Unspecified); + + throw new NotSupportedException(); + }, timestampTz, timestamp, dateTimeInfinityConversions); + + public static DateTimeTypeInfoProvider> CreateRangeProvider( + PgSerializerOptions options, PgTypeId timestampTz, PgTypeId timestamp, bool dateTimeInfinityConversions) + => new(options, static (provider, context, value, ref writeState) => + { + // Resolve both sides to make sure we end up with consistent PgTypeIds. + PgConcreteTypeInfo? concreteTypeInfo = null; + if (!value.LowerBoundInfinite) + { + concreteTypeInfo = provider.Get(context, value.LowerBound); + context = context with { ExpectedPgTypeId = concreteTypeInfo?.PgTypeId ?? context.ExpectedPgTypeId }; + } + + if (!value.UpperBoundInfinite) + { + var result = provider.Get(context, value.UpperBound, validateOnly: concreteTypeInfo is not null); + concreteTypeInfo ??= result; + } + + return concreteTypeInfo; + }, pgTypeId => + { + if (pgTypeId == timestampTz) + return new RangeConverter(new DateTimeConverter(dateTimeInfinityConversions, DateTimeKind.Utc)); + if (pgTypeId == timestamp) + return new RangeConverter(new DateTimeConverter(dateTimeInfinityConversions, DateTimeKind.Unspecified)); + + throw new NotSupportedException(); + }, timestampTz, timestamp, dateTimeInfinityConversions); + + public static DateTimeTypeInfoProvider CreateMultirangeProvider( + PgSerializerOptions options, PgTypeId timestampTz, PgTypeId timestamp, bool dateTimeInfinityConversions) + where T : IList where TElement : notnull + { + if (typeof(TElement) != typeof(NpgsqlRange)) + ThrowHelper.ThrowNotSupportedException("Unsupported element type"); + + return new DateTimeTypeInfoProvider(options, static (provider, context, value, ref writeState) => + { + PgConcreteTypeInfo? concreteTypeInfo = null; + if (value is null) + return null; + + foreach (var element in (IList>)value) + { + PgConcreteTypeInfo? result; + if (!element.LowerBoundInfinite) + { + result = provider.Get(context, element.LowerBound, validateOnly: concreteTypeInfo is not null); + if (concreteTypeInfo is null && result is not null) + { + concreteTypeInfo = result; + context = context with { ExpectedPgTypeId = concreteTypeInfo.PgTypeId }; + } + } + if (!element.UpperBoundInfinite) + { + result = provider.Get(context, element.UpperBound, validateOnly: concreteTypeInfo is not null); + if (concreteTypeInfo is null && result is not null) + { + concreteTypeInfo = result; + context = context with { ExpectedPgTypeId = concreteTypeInfo.PgTypeId }; + } + } + } + return concreteTypeInfo; + }, pgTypeId => + { + if (pgTypeId == timestampTz) + return new MultirangeConverter( + (PgConverter)(object)new RangeConverter(new DateTimeConverter(dateTimeInfinityConversions, DateTimeKind.Utc))); + if (pgTypeId == timestamp) + return new MultirangeConverter( + (PgConverter)(object)new RangeConverter(new DateTimeConverter(dateTimeInfinityConversions, DateTimeKind.Unspecified))); + + throw new NotSupportedException(); + }, timestampTz, timestamp, dateTimeInfinityConversions); + } +} diff --git a/src/Npgsql/Internal/Converters/VersionPrefixedTextConverter.cs b/src/Npgsql/Internal/Converters/VersionPrefixedTextConverter.cs index 5252617506..3dd6a5519b 100644 --- a/src/Npgsql/Internal/Converters/VersionPrefixedTextConverter.cs +++ b/src/Npgsql/Internal/Converters/VersionPrefixedTextConverter.cs @@ -10,7 +10,7 @@ sealed class VersionPrefixedTextConverter(byte versionPrefix, PgConverter { BufferRequirements _innerRequirements; - protected override bool IsDbNullValue(T? value, ref object? writeState) => textConverter.IsDbNull(value, ref writeState); + protected override bool IsDbNullValue(T? value, object? writeState) => textConverter.IsDbNull(value, writeState); public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) => VersionPrefixedTextConverter.CanConvert(textConverter, format, out _innerRequirements, out bufferRequirements); diff --git a/src/Npgsql/Internal/DynamicTypeInfoResolver.cs b/src/Npgsql/Internal/DynamicTypeInfoResolver.cs index dfdb5a79e7..840bd10b37 100644 --- a/src/Npgsql/Internal/DynamicTypeInfoResolver.cs +++ b/src/Npgsql/Internal/DynamicTypeInfoResolver.cs @@ -99,7 +99,7 @@ public DynamicMappingCollection AddResolverMapping([DynamicallyAccessedMembers(D if (type.IsValueType) typeof(TypeInfoMappingCollection) - .GetMethod(nameof(TypeInfoMappingCollection.AddResolverStructType), [typeof(string), typeof(TypeInfoFactory), typeof(Func)])! + .GetMethod(nameof(TypeInfoMappingCollection.AddProviderStructType), [typeof(string), typeof(TypeInfoFactory), typeof(Func)])! .MakeGenericMethod(type).Invoke(_mappings ??= new(), [ dataTypeName, @@ -108,7 +108,7 @@ public DynamicMappingCollection AddResolverMapping([DynamicallyAccessedMembers(D ]); else typeof(TypeInfoMappingCollection) - .GetMethod(nameof(TypeInfoMappingCollection.AddResolverType), [typeof(string), typeof(TypeInfoFactory), typeof(Func)])! + .GetMethod(nameof(TypeInfoMappingCollection.AddProviderType), [typeof(string), typeof(TypeInfoFactory), typeof(Func)])! .MakeGenericMethod(type).Invoke(_mappings ??= new(), [ dataTypeName, @@ -122,11 +122,11 @@ public DynamicMappingCollection AddResolverArrayMapping([DynamicallyAccessedMemb { if (elementType.IsValueType) typeof(TypeInfoMappingCollection) - .GetMethod(nameof(TypeInfoMappingCollection.AddResolverStructArrayType), [typeof(string)])! + .GetMethod(nameof(TypeInfoMappingCollection.AddProviderStructArrayType), [typeof(string)])! .MakeGenericMethod(elementType).Invoke(_mappings ??= new(), [dataTypeName]); else typeof(TypeInfoMappingCollection) - .GetMethod(nameof(TypeInfoMappingCollection.AddResolverArrayType), [typeof(string)])! + .GetMethod(nameof(TypeInfoMappingCollection.AddProviderArrayType), [typeof(string)])! .MakeGenericMethod(elementType).Invoke(_mappings ??= new(), [dataTypeName]); return this; } diff --git a/src/Npgsql/Internal/PgComposingConverterResolver.cs b/src/Npgsql/Internal/PgComposingConverterResolver.cs deleted file mode 100644 index 543ef8bdbd..0000000000 --- a/src/Npgsql/Internal/PgComposingConverterResolver.cs +++ /dev/null @@ -1,68 +0,0 @@ -using System; -using System.Collections.Concurrent; -using System.Collections.Generic; -using Npgsql.Internal.Postgres; - -namespace Npgsql.Internal; - -abstract class PgComposingConverterResolver : PgConverterResolver -{ - readonly PgTypeId? _pgTypeId; - public PgResolverTypeInfo EffectiveTypeInfo { get; } - readonly ConcurrentDictionary _converters = new(ReferenceEqualityComparer.Instance); - - protected PgComposingConverterResolver(PgTypeId? pgTypeId, PgResolverTypeInfo effectiveTypeInfo) - { - if (pgTypeId is null && effectiveTypeInfo.PgTypeId is not null) - throw new ArgumentNullException(nameof(pgTypeId), $"Cannot be null if {nameof(effectiveTypeInfo)}.{nameof(PgTypeInfo.PgTypeId)} is not null."); - - _pgTypeId = pgTypeId; - EffectiveTypeInfo = effectiveTypeInfo; - } - - protected abstract PgTypeId GetEffectivePgTypeId(PgTypeId pgTypeId); - protected abstract PgTypeId GetPgTypeId(PgTypeId effectivePgTypeId); - protected abstract PgConverter CreateConverter(PgConverterResolution effectiveResolution); - protected abstract PgConverterResolution? GetEffectiveResolution(T? value, PgTypeId? expectedEffectivePgTypeId); - - public override PgConverterResolution GetDefault(PgTypeId? pgTypeId) - { - PgTypeId? effectivePgTypeId = pgTypeId is not null ? GetEffectiveTypeId(pgTypeId.GetValueOrDefault()) : null; - var effectiveResolution = EffectiveTypeInfo.GetDefaultResolution(effectivePgTypeId); - return new(GetOrAdd(effectiveResolution), pgTypeId ?? _pgTypeId ?? GetPgTypeId(effectiveResolution.PgTypeId)); - } - - public override PgConverterResolution? Get(T? value, PgTypeId? expectedPgTypeId) - { - PgTypeId? expectedEffectiveId = expectedPgTypeId is not null ? GetEffectiveTypeId(expectedPgTypeId.GetValueOrDefault()) : null; - if (GetEffectiveResolution(value, expectedEffectiveId) is { } resolution) - return new PgConverterResolution(GetOrAdd(resolution), expectedPgTypeId ?? _pgTypeId ?? GetPgTypeId(resolution.PgTypeId)); - - return null; - } - - public override PgConverterResolution Get(Field field) - { - var effectiveResolution = EffectiveTypeInfo.GetResolution(field with { PgTypeId = GetEffectiveTypeId(field.PgTypeId) }); - return new PgConverterResolution(GetOrAdd(effectiveResolution), field.PgTypeId); - } - - PgTypeId GetEffectiveTypeId(PgTypeId pgTypeId) - { - if (_pgTypeId == pgTypeId) - return EffectiveTypeInfo.PgTypeId.GetValueOrDefault(); - - // We have an undecided type info which is asked to resolve for a specific type id - // we'll unfortunately have to look up the effective id, this is rare though. - return GetEffectivePgTypeId(pgTypeId); - } - - PgConverter GetOrAdd(PgConverterResolution effectiveResolution) - { - (PgComposingConverterResolver Instance, PgConverterResolution EffectiveResolution) state = (this, effectiveResolution); - return (PgConverter)_converters.GetOrAdd( - effectiveResolution.Converter, - static (_, state) => state.Instance.CreateConverter(state.EffectiveResolution), - state); - } -} diff --git a/src/Npgsql/Internal/PgComposingTypeInfoProvider.cs b/src/Npgsql/Internal/PgComposingTypeInfoProvider.cs new file mode 100644 index 0000000000..d24b44ab3d --- /dev/null +++ b/src/Npgsql/Internal/PgComposingTypeInfoProvider.cs @@ -0,0 +1,76 @@ +using System; +using System.Collections.Concurrent; +using System.Collections.Generic; +using Npgsql.Internal.Postgres; + +namespace Npgsql.Internal; + +abstract class PgComposingTypeInfoProvider : PgConcreteTypeInfoProvider +{ + readonly PgTypeId? _pgTypeId; + protected PgProviderTypeInfo EffectiveTypeInfo { get; } + readonly ConcurrentDictionary _concreteInfoCache = new(ReferenceEqualityComparer.Instance); + + protected PgComposingTypeInfoProvider(PgTypeId? pgTypeId, PgProviderTypeInfo effectiveTypeInfo) + { + if (pgTypeId is null && effectiveTypeInfo.PgTypeId is not null) + throw new ArgumentNullException(nameof(pgTypeId), $"Cannot be null if {nameof(effectiveTypeInfo)}.{nameof(PgTypeInfo.PgTypeId)} is not null."); + + _pgTypeId = pgTypeId; + EffectiveTypeInfo = effectiveTypeInfo; + } + + protected abstract PgTypeId GetEffectivePgTypeId(PgTypeId pgTypeId); + protected abstract PgTypeId GetPgTypeId(PgTypeId effectivePgTypeId); + protected abstract PgConverter CreateConverter(PgConcreteTypeInfo effectiveConcreteTypeInfo); + protected abstract PgConcreteTypeInfo? GetEffectiveTypeInfo(ProviderValueContext effectiveContext, T? value, ref object? writeState); + + protected override PgConcreteTypeInfo GetDefaultCore(PgTypeId? pgTypeId) + { + PgTypeId? effectiveTypeId = pgTypeId is { } id ? GetEffectiveTypeId(id) : null; + var concreteTypeInfo = EffectiveTypeInfo.GetDefaultConcreteTypeInfo(effectiveTypeId); + var composingPgTypeId = _pgTypeId ?? GetPgTypeId(concreteTypeInfo.PgTypeId); + return GetOrAdd(concreteTypeInfo, composingPgTypeId); + } + + protected override PgConcreteTypeInfo? GetForValueCore(ProviderValueContext context, T? value, ref object? writeState) + { + PgTypeId? effectiveTypeId = context.ExpectedPgTypeId is { } id ? GetEffectiveTypeId(id) : null; + var effectiveContext = context with { ExpectedPgTypeId = effectiveTypeId }; + if (GetEffectiveTypeInfo(effectiveContext, value, ref writeState) is { } effectiveTypeInfo) + return GetOrAdd(effectiveTypeInfo, context.ExpectedPgTypeId ?? _pgTypeId ?? GetPgTypeId(effectiveTypeInfo.PgTypeId)); + + return null; + } + + protected override PgConcreteTypeInfo? GetForFieldCore(Field field) + { + if (EffectiveTypeInfo.GetConcreteTypeInfo(field with { PgTypeId = GetEffectivePgTypeId(field.PgTypeId)}) is not { } concreteTypeInfo) + return null; + + var composingPgTypeId = _pgTypeId ?? GetPgTypeId(concreteTypeInfo.PgTypeId); + return GetOrAdd(concreteTypeInfo, composingPgTypeId); + } + + PgTypeId GetEffectiveTypeId(PgTypeId pgTypeId) + { + // If we have a _pgTypeId match we already know the effective id, and the constructor has verified it is non-null. + if (pgTypeId == _pgTypeId) + return EffectiveTypeInfo.PgTypeId.GetValueOrDefault(); + + // We have an undecided type info which is asked to resolve for a specific type id + // we'll unfortunately have to look up the effective id, this is rare though. + return GetEffectivePgTypeId(pgTypeId); + } + + PgConcreteTypeInfo GetOrAdd(PgConcreteTypeInfo concreteTypeInfo, PgTypeId pgTypeId) + { + (PgComposingTypeInfoProvider Instance, PgConcreteTypeInfo ConcreteTypeInfo, PgTypeId PgTypeId) + state = (this, concreteTypeInfo, pgTypeId); + return _concreteInfoCache.GetOrAdd( + concreteTypeInfo, + static (_, state) + => new(state.ConcreteTypeInfo.Options, state.Instance.CreateConverter(state.ConcreteTypeInfo), state.PgTypeId), + state); + } +} diff --git a/src/Npgsql/Internal/PgConcreteTypeInfoProvider.cs b/src/Npgsql/Internal/PgConcreteTypeInfoProvider.cs new file mode 100644 index 0000000000..a52c977838 --- /dev/null +++ b/src/Npgsql/Internal/PgConcreteTypeInfoProvider.cs @@ -0,0 +1,105 @@ +using System; +using System.Diagnostics.CodeAnalysis; +using Npgsql.Internal.Postgres; + +namespace Npgsql.Internal; + +[Experimental(NpgsqlDiagnostics.ConvertersExperimental)] +public abstract class PgConcreteTypeInfoProvider +{ + private protected PgConcreteTypeInfoProvider() { } + + /// + /// Gets the appropriate type info solely based on PgTypeId. + /// + public PgConcreteTypeInfo GetDefault(PgTypeId? pgTypeId) + { + var result = GetDefaultCore(pgTypeId); + if (pgTypeId is { } id && result.PgTypeId != id) + ThrowPgTypeIdMismatch(nameof(GetDefaultCore)); + return result; + } + + /// + /// Gets the appropriate type info based on the given field info. + /// + public PgConcreteTypeInfo? GetForField(Field field) + { + var result = GetForFieldCore(field); + if (result is not null && result.PgTypeId != field.PgTypeId) + ThrowPgTypeIdMismatch(nameof(GetForFieldCore)); + return result; + } + + /// + /// Gets the appropriate type info based on the given value and expected type id. + /// + public PgConcreteTypeInfo? GetForValueAsObject(ProviderValueContext context, object? value, ref object? writeState) + { + var result = GetForValueAsObjectCore(context, value, ref writeState); + if (context.ExpectedPgTypeId is { } id && result is not null && result.PgTypeId != id) + ThrowPgTypeIdMismatch(nameof(GetForValueAsObjectCore)); + return result; + } + + /// + /// Gets the default concrete type info for a given PgTypeId. + /// + /// + /// Implementations should not return new instances of the possible infos that can be returned, instead its expected these are cached once returned. + /// Composing providers depend on this to cache their own infos - wrapping the element info - with the cache key being the element info reference. + /// + protected abstract PgConcreteTypeInfo GetDefaultCore(PgTypeId? pgTypeId); + + /// + /// Gets the concrete type info for a given field. + /// + /// + /// Implementations should not return new instances of the possible infos that can be returned, instead its expected these are cached once returned. + /// Composing providers depend on this to cache their own infos - wrapping the element info - with the cache key being the element info reference. + /// + protected virtual PgConcreteTypeInfo? GetForFieldCore(Field field) => null; + + internal abstract Type TypeToConvert { get; } + + private protected abstract PgConcreteTypeInfo? GetForValueAsObjectCore(ProviderValueContext context, object? value, ref object? writeState); + + private protected static void ThrowPgTypeIdMismatch(string methodName) + => throw new InvalidOperationException( + $"'{methodName}' incorrectly returned a different {nameof(PgTypeId)} in its concrete type info than the caller passed in."); +} + +public abstract class PgConcreteTypeInfoProvider : PgConcreteTypeInfoProvider +{ + /// + /// Gets the appropriate type info based on the given value and expected type id. + /// + public PgConcreteTypeInfo? GetForValue(ProviderValueContext context, T? value, ref object? writeState) + { + var result = GetForValueCore(context, value, ref writeState); + if (context.ExpectedPgTypeId is { } id && result is not null && result.PgTypeId != id) + ThrowPgTypeIdMismatch(nameof(GetForValueCore)); + return result; + } + + /// + /// Gets the concrete type info for a given value and expected type id. + /// + /// + /// Implementations should not return new instances of the possible infos that can be returned, instead its expected these are cached once returned. + /// Composing providers depend on this to cache their own infos - wrapping the element info - with the cache key being the element info reference. + /// + protected abstract PgConcreteTypeInfo? GetForValueCore(ProviderValueContext context, T? value, ref object? writeState); + + internal sealed override Type TypeToConvert => typeof(T); + + // If null was passed while it is not a valid value for T we directly return null. + // This allows concrete info to be produced by falling back to GetDefault afterwards. + private protected sealed override PgConcreteTypeInfo? GetForValueAsObjectCore(ProviderValueContext context, object? value, ref object? writeState) + => default(T) is null || value is not null ? GetForValueCore(context, (T?)value, ref writeState) : null; +} + +public readonly struct ProviderValueContext +{ + public PgTypeId? ExpectedPgTypeId { get; init; } +} diff --git a/src/Npgsql/Internal/PgConverter.cs b/src/Npgsql/Internal/PgConverter.cs index 0d1eb000c3..424f6ca16f 100644 --- a/src/Npgsql/Internal/PgConverter.cs +++ b/src/Npgsql/Internal/PgConverter.cs @@ -2,6 +2,7 @@ using System.Buffers; using System.Diagnostics; using System.Diagnostics.CodeAnalysis; +using System.ComponentModel; using System.Runtime.CompilerServices; using System.Threading; using System.Threading.Tasks; @@ -28,18 +29,26 @@ private protected PgConverter(Type type, bool isNullDefaultValue, bool customDbN internal abstract Type TypeToConvert { get; } - internal bool IsDbNullAsObject([NotNullWhen(false)] object? value, ref object? writeState) + internal bool IsDbNullAsObject([NotNullWhen(false)] object? value, object? writeState) => DbNullPredicateKind switch { DbNullPredicate.Null => value is null, DbNullPredicate.None => false, DbNullPredicate.PolymorphicNull => value is null or DBNull, // We do the null check to keep the NotNullWhen(false) invariant. - DbNullPredicate.Custom => IsDbNullValueAsObject(value, ref writeState) || (value is null && ThrowInvalidNullValue()), + DbNullPredicate.Custom => IsDbNullValueAsObject(value, writeState) || (value is null && ThrowInvalidNullValue()), _ => ThrowDbNullPredicateOutOfRange() }; - private protected abstract bool IsDbNullValueAsObject(object? value, ref object? writeState); + [Obsolete("Use the overload without ref.")] + internal bool IsDbNullAsObject([NotNullWhen(false)] object? value, ref object? writeState) + => IsDbNullAsObject(value, writeState); + + private protected abstract bool IsDbNullValueAsObject(object? value, object? writeState); + + [Obsolete("Use the overload without ref.")] + private protected bool IsDbNullValueAsObject(object? value, ref object? writeState) + => IsDbNullValueAsObject(value, writeState); internal abstract Size GetSizeAsObject(SizeContext context, object value, ref object? writeState); @@ -94,24 +103,47 @@ public abstract class PgConverter : PgConverter private protected PgConverter(bool customDbNullPredicate) : base(typeof(T), default(T) is null, customDbNullPredicate) { } +#pragma warning disable CS0618 // Obsolete - delegates to ref overload for binary compat with existing overrides + protected virtual bool IsDbNullValue(T? value, object? writeState) + { + // The obsolete ref overload is kept around for binary compatibility on the signature, but + // mutating writeState during a null probe is no longer a supported behaviour. Detect the + // mutation via a local captured before the forward and throw — a violating override is a + // bug in the derived converter, not something to defend against here. + var originalWriteState = writeState; + var isDbNull = IsDbNullValue(value, ref writeState); + if (!ReferenceEquals(writeState, originalWriteState)) + ThrowHelper.ThrowInvalidOperationException( + $"{GetType().FullName} mutated writeState from its IsDbNullValue override. Override the overload without ref and produce write state only in GetSize."); + return isDbNull; + } +#pragma warning restore CS0618 + + [Obsolete("Use the overload without ref.")] + [EditorBrowsable(EditorBrowsableState.Never)] protected virtual bool IsDbNullValue(T? value, ref object? writeState) => throw new NotSupportedException(); // Object null semantics as follows, if T is a struct (so excluding nullable) report false for null values, don't throw on the cast. // As a result this creates symmetry with IsDbNull when we're dealing with a struct T, as it cannot be passed null at all. - private protected override bool IsDbNullValueAsObject(object? value, ref object? writeState) - => (default(T) is null || value is not null) && IsDbNullValue((T?)value, ref writeState); + private protected override bool IsDbNullValueAsObject(object? value, object? writeState) + => (default(T) is null || value is not null) && IsDbNullValue((T?)value, writeState); - public bool IsDbNull([NotNullWhen(false)] T? value, ref object? writeState) + public bool IsDbNull([NotNullWhen(false)] T? value, object? writeState) => DbNullPredicateKind switch { DbNullPredicate.Null => value is null, DbNullPredicate.None => false, DbNullPredicate.PolymorphicNull => value is null or DBNull, // We do the null check to keep the NotNullWhen(false) invariant. - DbNullPredicate.Custom => IsDbNullValue(value, ref writeState) || (value is null && ThrowInvalidNullValue()), + DbNullPredicate.Custom => IsDbNullValue(value, writeState) || (value is null && ThrowInvalidNullValue()), _ => ThrowDbNullPredicateOutOfRange() }; + [Obsolete("Use the overload without ref.")] + [EditorBrowsable(EditorBrowsableState.Never)] + public bool IsDbNull([NotNullWhen(false)] T? value, ref object? writeState) + => IsDbNull(value, writeState); + public abstract T Read(PgReader reader); public abstract ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default); @@ -129,7 +161,7 @@ static class PgConverterExtensions { public static Size? GetSizeOrDbNull(this PgConverter converter, DataFormat format, Size writeRequirement, T? value, ref object? writeState) { - if (converter.IsDbNull(value, ref writeState)) + if (converter.IsDbNull(value, writeState)) return null; if (writeRequirement is { Kind: SizeKind.Exact, Value: var byteCount }) @@ -152,7 +184,7 @@ static class PgConverterExtensions public static Size? GetSizeOrDbNullAsObject(this PgConverter converter, DataFormat format, Size writeRequirement, object? value, ref object? writeState) { - if (converter.IsDbNullAsObject(value, ref writeState)) + if (converter.IsDbNullAsObject(value, writeState)) return null; if (writeRequirement is { Kind: SizeKind.Exact, Value: var byteCount }) @@ -190,9 +222,9 @@ public readonly struct SizeContext(DataFormat format, Size bufferRequirement) class MultiWriteState : IDisposable { - public required ArrayPool<(Size Size, object? WriteState)>? ArrayPool { get; init; } - public required ArraySegment<(Size Size, object? WriteState)> Data { get; init; } - public required bool AnyWriteState { get; init; } + public ArrayPool<(Size Size, object? WriteState)>? ArrayPool { get; set; } + public ArraySegment<(Size Size, object? WriteState)> Data { get; set; } + public bool AnyWriteState { get; set; } public void Dispose() { @@ -201,7 +233,7 @@ public void Dispose() if (AnyWriteState) { - for (var i = Data.Offset; i < array.Length; i++) + for (var i = Data.Offset; i < Data.Offset + Data.Count; i++) if (array[i].WriteState is IDisposable disposable) disposable.Dispose(); diff --git a/src/Npgsql/Internal/PgConverterResolver.cs b/src/Npgsql/Internal/PgConverterResolver.cs deleted file mode 100644 index 5fbe699017..0000000000 --- a/src/Npgsql/Internal/PgConverterResolver.cs +++ /dev/null @@ -1,111 +0,0 @@ -using System; -using System.Diagnostics.CodeAnalysis; -using Npgsql.Internal.Postgres; - -namespace Npgsql.Internal; - -[Experimental(NpgsqlDiagnostics.ConvertersExperimental)] -public abstract class PgConverterResolver -{ - private protected PgConverterResolver() { } - - /// - /// Gets the appropriate converter solely based on PgTypeId. - /// - /// - /// The converter resolution. - /// - /// Implementations should not return new instances of the possible converters that can be returned, instead its expected these are cached once used. - /// Array or other collection converters depend on this to cache their own converter - which wraps the element converter - with the cache key being the element converter reference. - /// - public abstract PgConverterResolution GetDefault(PgTypeId? pgTypeId); - - /// - /// Gets the appropriate converter to read with based on the given field info. - /// - /// - /// The converter resolution. - /// - /// Implementations should not return new instances of the possible converters that can be returned, instead its expected these are cached once used. - /// Array or other collection converters depend on this to cache their own converter - which wraps the element converter - with the cache key being the element converter reference. - /// - public virtual PgConverterResolution Get(Field field) => GetDefault(field.PgTypeId); - - internal abstract Type TypeToConvert { get; } - - internal abstract PgConverterResolution? GetAsObjectInternal(PgTypeInfo typeInfo, object? value, PgTypeId? expectedPgTypeId); - - internal PgConverterResolution GetDefaultInternal(bool validate, bool expectPortableTypeIds, PgTypeId? pgTypeId) - { - var resolution = GetDefault(pgTypeId); - if (validate) - Validate(nameof(GetDefault), resolution, TypeToConvert, pgTypeId, expectPortableTypeIds); - return resolution; - } - - internal PgConverterResolution GetInternal(PgTypeInfo typeInfo, Field field) - { - var resolution = Get(field); - if (typeInfo.ValidateResolution) - Validate(nameof(Get), resolution, TypeToConvert, field.PgTypeId, typeInfo.Options.PortableTypeIds); - return resolution; - } - - private protected static void Validate(string methodName, PgConverterResolution resolution, Type expectedTypeToConvert, PgTypeId? expectedPgTypeId, bool expectPortableTypeIds) - { - if (resolution.Converter is null) - throw new InvalidOperationException($"'{methodName}' returned a null {nameof(PgConverterResolution.Converter)} unexpectedly."); - - // We allow object resolvers to return any converter, this is to help: - // - Composing resolvers being able to use converter type identity (instead of everything being CastingConverter). - // - Reduce indirection by allowing disparate type converters to be returned directly. - // As a consequence any object typed resolver info is always a boxing one, to reduce the chances invalid casts to PgConverter are attempted. - if (expectedTypeToConvert != typeof(object) && resolution.Converter.TypeToConvert != expectedTypeToConvert) - throw new InvalidOperationException($"'{methodName}' returned a {nameof(PgConverterResolution.Converter)} of type {resolution.Converter.TypeToConvert} instead of {expectedTypeToConvert} unexpectedly."); - - if (expectPortableTypeIds && resolution.PgTypeId.IsOid || !expectPortableTypeIds && resolution.PgTypeId.IsDataTypeName) - throw new InvalidOperationException($"{methodName}' returned a resolution with a {nameof(PgConverterResolution.PgTypeId)} that was not in canonical form."); - - if (expectedPgTypeId is not null && resolution.PgTypeId != expectedPgTypeId) - throw new InvalidOperationException( - $"'{methodName}' returned a different {nameof(PgConverterResolution.PgTypeId)} than was passed in as expected." + - $" If such a mismatch occurs an exception should be thrown instead."); - } - - protected ArgumentOutOfRangeException CreateUnsupportedPgTypeIdException(PgTypeId pgTypeId) - => new(nameof(pgTypeId), pgTypeId, "Unsupported PgTypeId."); -} - -public abstract class PgConverterResolver : PgConverterResolver -{ - /// - /// Gets the appropriate converter to write with based on the given value. - /// - /// - /// - /// The converter resolution. - /// - /// Implementations should not return new instances of the possible converters that can be returned, instead its expected these are - /// cached once used. Array or other collection converters depend on this to cache their own converter - which wraps the element - /// converter - with the cache key being the element converter reference. - /// - public abstract PgConverterResolution? Get(T? value, PgTypeId? expectedPgTypeId); - - internal sealed override Type TypeToConvert => typeof(T); - - internal PgConverterResolution? GetInternal(PgTypeInfo typeInfo, T? value, PgTypeId? expectedPgTypeId) - { - var resolution = Get(value, expectedPgTypeId); - if (typeInfo.ValidateResolution && resolution is not null) - Validate(nameof(Get), resolution.GetValueOrDefault(), TypeToConvert, expectedPgTypeId, typeInfo.Options.PortableTypeIds); - return resolution; - } - - internal sealed override PgConverterResolution? GetAsObjectInternal(PgTypeInfo typeInfo, object? value, PgTypeId? expectedPgTypeId) - { - var resolution = Get(value is null ? default : (T)value, expectedPgTypeId); - if (typeInfo.ValidateResolution && resolution is not null) - Validate(nameof(Get), resolution.GetValueOrDefault(), TypeToConvert, expectedPgTypeId, typeInfo.Options.PortableTypeIds); - return resolution; - } -} diff --git a/src/Npgsql/Internal/PgReader.cs b/src/Npgsql/Internal/PgReader.cs index aa2eea9e5d..d9686cb5bb 100644 --- a/src/Npgsql/Internal/PgReader.cs +++ b/src/Npgsql/Internal/PgReader.cs @@ -257,7 +257,7 @@ async ValueTask GetTextReader(bool async, Encoding encoding, Cancell var currentOffset = CurrentOffset; var currentRemaining = CurrentSize - currentOffset; - // Always make a new reader for GetChars, see GetColumnStream. + // Always make a new reader for untracked usage, see GetStreamCore. var preparedTextReader = (untracked ? null : _preparedTextReader) ?? new(); preparedTextReader.Init(encoding.GetString(async ? await ReadBytesAsync(currentRemaining, cancellationToken).ConfigureAwait(false) diff --git a/src/Npgsql/Internal/PgSerializerOptions.cs b/src/Npgsql/Internal/PgSerializerOptions.cs index 9a68ba1d70..daa69c3993 100644 --- a/src/Npgsql/Internal/PgSerializerOptions.cs +++ b/src/Npgsql/Internal/PgSerializerOptions.cs @@ -32,7 +32,7 @@ internal PgSerializerOptions(NpgsqlDatabaseInfo databaseInfo, PgTypeInfoResolver UnspecifiedDBNullTypeInfo = new(this, new Converters.Internal.VoidConverter(), DataTypeName.Unspecified, unboxedType: typeof(DBNull)); } - internal PgTypeInfo UnspecifiedDBNullTypeInfo { get; } + internal PgConcreteTypeInfo UnspecifiedDBNullTypeInfo { get; } PostgresType? _textPgType; internal PgTypeId TextPgTypeId => ToCanonicalTypeId(_textPgType ??= DatabaseInfo.GetPostgresType(DataTypeNames.Text)); diff --git a/src/Npgsql/Internal/PgTypeInfo.cs b/src/Npgsql/Internal/PgTypeInfo.cs index aa3640a82e..73a18aa31f 100644 --- a/src/Npgsql/Internal/PgTypeInfo.cs +++ b/src/Npgsql/Internal/PgTypeInfo.cs @@ -1,11 +1,12 @@ using System; +using System.Diagnostics; using System.Diagnostics.CodeAnalysis; using Npgsql.Internal.Postgres; namespace Npgsql.Internal; [Experimental(NpgsqlDiagnostics.ConvertersExperimental)] -public class PgTypeInfo +public abstract class PgTypeInfo { readonly bool _canBinaryConvert; readonly BufferRequirements _binaryBufferRequirements; @@ -25,7 +26,7 @@ public class PgTypeInfo SupportsWriting = true; } - public PgTypeInfo(PgSerializerOptions options, PgConverter converter, PgTypeId pgTypeId, Type? unboxedType = null) + private protected PgTypeInfo(PgSerializerOptions options, PgConverter converter, PgTypeId pgTypeId, Type? unboxedType = null) : this(options, converter.TypeToConvert, unboxedType) { Converter = converter; @@ -34,19 +35,16 @@ public PgTypeInfo(PgSerializerOptions options, PgConverter converter, PgTypeId p _canTextConvert = converter.CanConvert(DataFormat.Text, out _textBufferRequirements); } - private protected PgTypeInfo(PgSerializerOptions options, Type type, PgConverterResolution? resolution, Type? unboxedType = null) + private protected PgTypeInfo(PgSerializerOptions options, Type type, PgConcreteTypeInfo? defaultConcrete, Type? unboxedType = null) : this(options, type, unboxedType) { - if (resolution is { } res) + if (defaultConcrete is not null) { - // Resolutions should always be in canonical form already. - if (options.PortableTypeIds && res.PgTypeId.IsOid || !options.PortableTypeIds && res.PgTypeId.IsDataTypeName) - throw new ArgumentException("Given type id is not in canonical form. Make sure ConverterResolver implementations close over canonical ids, e.g. by calling options.GetCanonicalTypeId(pgTypeId) on the constructor arguments.", nameof(PgTypeId)); - - PgTypeId = res.PgTypeId; - Converter = res.Converter; - _canBinaryConvert = res.Converter.CanConvert(DataFormat.Binary, out _binaryBufferRequirements); - _canTextConvert = res.Converter.CanConvert(DataFormat.Text, out _textBufferRequirements); + Debug.Assert(options.PortableTypeIds && defaultConcrete.PgTypeId.IsDataTypeName || !options.PortableTypeIds && defaultConcrete.PgTypeId.IsOid); + PgTypeId = defaultConcrete.PgTypeId; + Converter = defaultConcrete.Converter; + _canBinaryConvert = defaultConcrete.Converter.CanConvert(DataFormat.Binary, out _binaryBufferRequirements); + _canTextConvert = defaultConcrete.Converter.CanConvert(DataFormat.Text, out _textBufferRequirements); } } @@ -59,57 +57,56 @@ private protected PgTypeInfo(PgSerializerOptions options, Type type, PgConverter public bool SupportsWriting { get; init; } public DataFormat? PreferredFormat { get; init; } - // Doubles as the storage for the converter coming from a default resolution (used to confirm whether we can use cached info). - PgConverter? Converter { get; } - [MemberNotNullWhen(false, nameof(Converter))] - [MemberNotNullWhen(false, nameof(PgTypeId))] - internal bool IsResolverInfo => GetType() == typeof(PgResolverTypeInfo); + // Doubles as the storage for the converter coming from a default provider result (used to confirm whether we can use cached info). + protected PgConverter? Converter { get; } // TODO pull validate from options + internal exempt for perf? - internal bool ValidateResolution => true; + internal bool ValidateProviderResults => true; // Used for internal converters to save on binary bloat. internal bool IsBoxing { get; } public PgTypeId? PgTypeId { get; } - public PgConverterResolution GetResolution(T? value) + // Having it here so we can easily extend any behavior. + internal void DisposeWriteState(object writeState) + { + if (writeState is IDisposable disposable) + disposable.Dispose(); + } + + public PgConcreteTypeInfo GetConcreteTypeInfo(T? value, out object? writeState) { - if (this is not PgResolverTypeInfo resolverInfo) - return new(Converter!, PgTypeId.GetValueOrDefault()); + if (this is not PgProviderTypeInfo providerTypeInfo) + { + writeState = null; + return (PgConcreteTypeInfo)this; + } - var resolution = resolverInfo.GetResolution(value, null); - return resolution ?? resolverInfo.GetDefaultResolution(null); + return providerTypeInfo.GetConcreteTypeInfo(default, value, out writeState) ?? providerTypeInfo.GetDefaultConcreteTypeInfo(null); } - // Note: this api is not called GetResolutionAsObject as the semantics are extended, DBNull is a NULL value for all object values. - public PgConverterResolution GetObjectResolution(object? value) + // Note: this api is not called GetConcreteTypeInfoAsObject as the semantics are extended, DBNull is a NULL value for all object values. + public PgConcreteTypeInfo GetObjectConcreteTypeInfo(object? value, out object? writeState) { + writeState = null; switch (this) { - case { IsResolverInfo: false }: - return new(Converter, PgTypeId.GetValueOrDefault()); - case PgResolverTypeInfo resolverInfo: - PgConverterResolution? resolution = null; + case PgConcreteTypeInfo v: + return v; + case PgProviderTypeInfo providerTypeInfo: + PgConcreteTypeInfo? concreteTypeInfo = null; if (value is not DBNull) - resolution = resolverInfo.GetResolutionAsObject(value, null); - return resolution ?? resolverInfo.GetDefaultResolution(null); + concreteTypeInfo = providerTypeInfo.GetAsObjectConcreteTypeInfo(default, value, out writeState); + return concreteTypeInfo ?? providerTypeInfo.GetDefaultConcreteTypeInfo(null); default: return ThrowNotSupported(); } - static PgConverterResolution ThrowNotSupported() + static PgConcreteTypeInfo ThrowNotSupported() => throw new NotSupportedException("Should not happen, please file a bug."); } - /// Throws if the instance is a PgResolverTypeInfo. - internal PgConverterResolution GetResolution() - { - if (IsResolverInfo) - ThrowHelper.ThrowInvalidOperationException("Instance is a PgResolverTypeInfo."); - return new(Converter, PgTypeId.GetValueOrDefault()); - } - bool CanConvert(PgConverter converter, DataFormat format, out BufferRequirements bufferRequirements) { if (HasCachedInfo(converter)) @@ -139,22 +136,22 @@ internal bool TryBind(Field field, DataFormat format, out PgConverterInfo info) { switch (this) { - case { IsResolverInfo: false }: - if (!CanConvert(Converter, format, out var bufferRequirements)) + case PgConcreteTypeInfo v: + if (!CanConvert(v.Converter, format, out var bufferRequirements)) { info = default; return false; } - info = new(this, Converter, bufferRequirements.Read); + info = new(this, v.Converter, bufferRequirements.Read); return true; - case PgResolverTypeInfo resolverInfo: - var resolution = resolverInfo.GetResolution(field); - if (!CanConvert(resolution.Converter, format, out bufferRequirements)) + case PgProviderTypeInfo providerTypeInfo: + var concreteTypeInfo = providerTypeInfo.GetConcreteTypeInfo(field) ?? providerTypeInfo.GetDefaultConcreteTypeInfo(field.PgTypeId); + if (!CanConvert(concreteTypeInfo.Converter, format, out bufferRequirements)) { info = default; return false; } - info = new(this, resolution.Converter, bufferRequirements.Read); + info = new(this, concreteTypeInfo.Converter, bufferRequirements.Read); return true; default: throw new NotSupportedException("Should not happen, please file a bug."); @@ -172,7 +169,7 @@ internal PgConverterInfo Bind(Field field, DataFormat format) // Bind for writing. /// When result is null, the value was interpreted to be a SQL NULL. - internal PgConverterInfo? Bind(PgConverter converter, T? value, out Size size, out object? writeState, out DataFormat format, DataFormat? formatPreference = null) + internal PgConverterInfo? Bind(PgConverter converter, T? value, out Size size, ref object? writeState, out DataFormat format, DataFormat? formatPreference = null) { // Basically exists to catch cases like object[] resolving a polymorphic read converter, better to fail during binding than writing. if (!SupportsWriting) @@ -180,7 +177,6 @@ internal PgConverterInfo Bind(Field field, DataFormat format) format = ResolveFormat(converter, out var bufferRequirements, formatPreference ?? PreferredFormat); - writeState = null; if (converter.GetSizeOrDbNull(format, bufferRequirements.Write, value, ref writeState) is not { } sizeOrDbNull) { size = default; @@ -194,7 +190,7 @@ internal PgConverterInfo Bind(Field field, DataFormat format) // Bind for writing. // Note: this api is not called BindAsObject as the semantics are extended, DBNull is a NULL value for all object values. /// When result is null or DBNull, the value was interpreted to be a SQL NULL. - internal PgConverterInfo? BindObject(PgConverter converter, object? value, out Size size, out object? writeState, out DataFormat format, DataFormat? formatPreference = null) + internal PgConverterInfo? BindObject(PgConverter converter, object? value, ref Size size, ref object? writeState, out DataFormat format, DataFormat? formatPreference = null) { // Basically exists to catch cases like object[] resolving a polymorphic read converter, better to fail during binding than writing. if (!SupportsWriting) @@ -203,7 +199,6 @@ internal PgConverterInfo Bind(Field field, DataFormat format) format = ResolveFormat(converter, out var bufferRequirements, formatPreference ?? PreferredFormat); // Given SQL values are effectively a union of T | NULL we support DBNull.Value to signify a NULL value for all types except DBNull in this api. - writeState = null; if (value is DBNull && Type != typeof(DBNull) || converter.GetSizeOrDbNullAsObject(format, bufferRequirements.Write, value, ref writeState) is not { } sizeOrDbNull) { size = default; @@ -242,63 +237,116 @@ internal static bool GetDefaultSupportsReading(Type type, Type? unboxedType) => unboxedType is null || unboxedType == type; } -public sealed class PgResolverTypeInfo( +public sealed class PgProviderTypeInfo( PgSerializerOptions options, - PgConverterResolver converterResolver, + PgConcreteTypeInfoProvider typeInfoProvider, PgTypeId? pgTypeId, Type? unboxedType = null) : PgTypeInfo(options, - converterResolver.TypeToConvert, - pgTypeId is { } typeId ? ResolveDefaultId(options, converterResolver, typeId) : null, - unboxedType ?? (converterResolver.TypeToConvert == typeof(object) ? typeof(object) : null)) + typeInfoProvider.TypeToConvert, + pgTypeId is { } typeId ? GetDefault(options, typeInfoProvider, typeId) : null, + unboxedType) { - // We always mark resolvers with type object as boxing, as they may freely return converters for any type (see PgConverterResolver.Validate). + readonly PgConcreteTypeInfoProvider _typeInfoProvider = typeInfoProvider; + + // We'll always validate the default provider result, the info will be re-used so there is no real downside. + static PgConcreteTypeInfo GetDefault(PgSerializerOptions options, PgConcreteTypeInfoProvider concreteTypeInfoProvider, PgTypeId typeId) + { + var result = concreteTypeInfoProvider.GetDefault(options.GetCanonicalTypeId(typeId)); + ValidateResult(nameof(GetDefault), result, concreteTypeInfoProvider.TypeToConvert, options.PortableTypeIds); + return result; + } + + public PgConcreteTypeInfo GetDefaultConcreteTypeInfo(PgTypeId? pgTypeId) + { + if (pgTypeId is { } id && PgTypeId is { } decidedId && id != decidedId) + ThrowUnexpectedPgTypeId(nameof(pgTypeId)); + + var result = _typeInfoProvider.GetDefault(pgTypeId ?? PgTypeId); + ValidateResult(nameof(PgConcreteTypeInfoProvider.GetDefault), result); + return result; + } + + public PgConcreteTypeInfo? GetConcreteTypeInfo(Field field) + { + if (PgTypeId is { } decidedId && field.PgTypeId != decidedId) + ThrowUnexpectedPgTypeId(nameof(field)); - // We'll always validate the default resolution, the info will be re-used so there is no real downside. - static PgConverterResolution ResolveDefaultId(PgSerializerOptions options, PgConverterResolver converterResolver, PgTypeId typeId) - => converterResolver.GetDefaultInternal(validate: true, options.PortableTypeIds, options.GetCanonicalTypeId(typeId)); + var result = _typeInfoProvider.GetForField(field); + if (result is not null) + ValidateResult(nameof(PgConcreteTypeInfoProvider.GetForField), result); + return result; + } - public PgConverterResolution? GetResolution(T? value, PgTypeId? expectedPgTypeId) + public PgConcreteTypeInfo? GetConcreteTypeInfo(ProviderValueContext context, T? value, out object? writeState) { - return converterResolver is PgConverterResolver resolverT - ? resolverT.GetInternal(this, value, expectedPgTypeId ?? PgTypeId) + if (PgTypeId is { } pgTypeId) + { + if (context.ExpectedPgTypeId is not { } expectedId) + { + context = context with { ExpectedPgTypeId = pgTypeId }; + } + else if (pgTypeId != expectedId) + ThrowUnexpectedPgTypeId(nameof(context.ExpectedPgTypeId)); + } + + writeState = null; + var result = _typeInfoProvider is PgConcreteTypeInfoProvider providerT + ? providerT.GetForValue(context, value, ref writeState) : ThrowNotSupportedType(typeof(T)); - PgConverterResolution ThrowNotSupportedType(Type? type) + if (result is not null) + ValidateResult(nameof(PgConcreteTypeInfoProvider<>.GetForValue), result); + return result; + + PgConcreteTypeInfo ThrowNotSupportedType(Type? type) => throw new NotSupportedException(IsBoxing - ? "TypeInfo only supports boxing conversions, call GetResolutionAsObject instead." + ? $"TypeInfo only supports boxing conversions, call {nameof(GetAsObjectConcreteTypeInfo)} or {nameof(GetObjectConcreteTypeInfo)} instead." : $"TypeInfo is not of type {type}"); } - public PgConverterResolution? GetResolutionAsObject(object? value, PgTypeId? expectedPgTypeId) - => converterResolver.GetAsObjectInternal(this, value, expectedPgTypeId ?? PgTypeId); + public PgConcreteTypeInfo? GetAsObjectConcreteTypeInfo(ProviderValueContext context, object? value, out object? writeState) + { + if (PgTypeId is { } pgTypeId) + { + if (context.ExpectedPgTypeId is not { } expectedId) + { + context = context with { ExpectedPgTypeId = pgTypeId }; + } + else if (pgTypeId != expectedId) + ThrowUnexpectedPgTypeId(nameof(context.ExpectedPgTypeId)); + } - public PgConverterResolution GetResolution(Field field) - => converterResolver.GetInternal(this, field); + writeState = null; + var result = _typeInfoProvider.GetForValueAsObject(context, value, ref writeState); + if (result is not null) + ValidateResult(nameof(PgConcreteTypeInfoProvider.GetForValueAsObject), result); + return result; + } - public PgConverterResolution GetDefaultResolution(PgTypeId? expectedPgTypeId) - => converterResolver.GetDefaultInternal(ValidateResolution, Options.PortableTypeIds, expectedPgTypeId ?? PgTypeId); + public static PgConcreteTypeInfoProvider GetProvider(PgProviderTypeInfo instance) => instance._typeInfoProvider; - public PgConverterResolver GetConverterResolver() => converterResolver; -} + static void ThrowUnexpectedPgTypeId(string parameterName) + => throw new ArgumentException($"PgTypeId does not match the decided value on this {nameof(PgProviderTypeInfo)}.", parameterName); -// TODO until https://github.com/npgsql/npgsql/pull/6316 -static class PgTypeInfoExtensions -{ - // Having it here so we can easily extend any behavior. - public static void DisposeWriteState(this PgTypeInfo typeInfo, object writeState) + void ValidateResult(string methodName, PgConcreteTypeInfo result) + => ValidateResult(methodName, result, _typeInfoProvider.TypeToConvert, Options.PortableTypeIds); + + static void ValidateResult(string methodName, PgConcreteTypeInfo result, Type expectedTypeToConvert, bool expectPortableTypeIds) { - if (writeState is IDisposable disposable) - disposable.Dispose(); + if (expectedTypeToConvert != typeof(object) && result.Converter.TypeToConvert != expectedTypeToConvert) + throw new InvalidOperationException($"'{methodName}' returned a {nameof(result.Converter)} of type {result.Converter.TypeToConvert} instead of {expectedTypeToConvert} unexpectedly."); + + if (expectPortableTypeIds && result.PgTypeId.IsOid || !expectPortableTypeIds && result.PgTypeId.IsDataTypeName) + throw new InvalidOperationException($"'{methodName}' returned a concrete type info with a {nameof(result.PgTypeId)} that was not in canonical form."); } } -public readonly struct PgConverterResolution(PgConverter converter, PgTypeId pgTypeId) +public sealed class PgConcreteTypeInfo(PgSerializerOptions options, PgConverter converter, PgTypeId pgTypeId, Type? unboxedType = null) + : PgTypeInfo(options, converter, pgTypeId, unboxedType) { - public PgConverter Converter { get; } = converter; - public PgTypeId PgTypeId { get; } = pgTypeId; - - public PgConverter GetConverter() => (PgConverter)Converter; + public new PgConverter Converter => base.Converter!; + public new PgTypeId PgTypeId => base.PgTypeId.GetValueOrDefault(); } readonly struct PgConverterInfo @@ -311,9 +359,9 @@ public PgConverterInfo(PgTypeInfo pgTypeInfo, PgConverter converter, Size buffer Converter = converter; BufferRequirement = bufferRequirement; - // Object typed resolvers can return any type of converter, so we check the type of the converter instead. + // Object typed providers can return any type of converter, so we check the type of the converter instead. // We cannot do this in general as we should respect the 'unboxed type' of infos, which can differ from the converter type. - if (pgTypeInfo.IsResolverInfo && pgTypeInfo.Type == typeof(object)) + if (pgTypeInfo is PgProviderTypeInfo && pgTypeInfo.Type == typeof(object)) TypeToConvert = Converter.TypeToConvert; else TypeToConvert = pgTypeInfo.Type; diff --git a/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.Multirange.cs b/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.Multirange.cs index 3d82ab03f1..8f0cf46bcf 100644 --- a/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.Multirange.cs +++ b/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.Multirange.cs @@ -73,16 +73,16 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) } else { - mappings.AddResolverType[]>(DataTypeNames.TsMultirange, + mappings.AddProviderType[]>(DataTypeNames.TsMultirange, static (options, mapping, requiresDataTypeName) => mapping.CreateInfo(options, - DateTimeConverterResolver.CreateMultirangeResolver[], NpgsqlRange>(options, + DateTimeTypeInfoProvider.CreateMultirangeProvider[], NpgsqlRange>(options, options.GetCanonicalTypeId(DataTypeNames.TsTzMultirange), options.GetCanonicalTypeId(DataTypeNames.TsMultirange), options.EnableDateTimeInfinityConversions), requiresDataTypeName), isDefault: true); - mappings.AddResolverType>>(DataTypeNames.TsMultirange, + mappings.AddProviderType>>(DataTypeNames.TsMultirange, static (options, mapping, requiresDataTypeName) => mapping.CreateInfo(options, - DateTimeConverterResolver.CreateMultirangeResolver>, NpgsqlRange>(options, + DateTimeTypeInfoProvider.CreateMultirangeProvider>, NpgsqlRange>(options, options.GetCanonicalTypeId(DataTypeNames.TsTzMultirange), options.GetCanonicalTypeId(DataTypeNames.TsMultirange), options.EnableDateTimeInfinityConversions), requiresDataTypeName)); @@ -125,16 +125,16 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) } else { - mappings.AddResolverType[]>(DataTypeNames.TsTzMultirange, + mappings.AddProviderType[]>(DataTypeNames.TsTzMultirange, static (options, mapping, requiresDataTypeName) => mapping.CreateInfo(options, - DateTimeConverterResolver.CreateMultirangeResolver[], NpgsqlRange>(options, + DateTimeTypeInfoProvider.CreateMultirangeProvider[], NpgsqlRange>(options, options.GetCanonicalTypeId(DataTypeNames.TsTzMultirange), options.GetCanonicalTypeId(DataTypeNames.TsMultirange), options.EnableDateTimeInfinityConversions), requiresDataTypeName), isDefault: true); - mappings.AddResolverType>>(DataTypeNames.TsTzMultirange, + mappings.AddProviderType>>(DataTypeNames.TsTzMultirange, static (options, mapping, requiresDataTypeName) => mapping.CreateInfo(options, - DateTimeConverterResolver.CreateMultirangeResolver>, NpgsqlRange>(options, + DateTimeTypeInfoProvider.CreateMultirangeProvider>, NpgsqlRange>(options, options.GetCanonicalTypeId(DataTypeNames.TsTzMultirange), options.GetCanonicalTypeId(DataTypeNames.TsMultirange), options.EnableDateTimeInfinityConversions), requiresDataTypeName)); @@ -211,8 +211,8 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) } else { - mappings.AddResolverArrayType[]>(DataTypeNames.TsMultirange); - mappings.AddResolverArrayType>>(DataTypeNames.TsMultirange); + mappings.AddProviderArrayType[]>(DataTypeNames.TsMultirange); + mappings.AddProviderArrayType>>(DataTypeNames.TsMultirange); } mappings.AddArrayType[]>(DataTypeNames.TsMultirange); @@ -228,8 +228,8 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) } else { - mappings.AddResolverArrayType[]>(DataTypeNames.TsTzMultirange); - mappings.AddResolverArrayType>>(DataTypeNames.TsTzMultirange); + mappings.AddProviderArrayType[]>(DataTypeNames.TsTzMultirange); + mappings.AddProviderArrayType>>(DataTypeNames.TsTzMultirange); mappings.AddArrayType[]>(DataTypeNames.TsTzMultirange); mappings.AddArrayType>>(DataTypeNames.TsTzMultirange); } diff --git a/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.Range.cs b/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.Range.cs index 17ba8c3c33..b145097881 100644 --- a/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.Range.cs +++ b/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.Range.cs @@ -47,9 +47,9 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) } else { - mappings.AddResolverStructType>(DataTypeNames.TsRange, + mappings.AddProviderStructType>(DataTypeNames.TsRange, static (options, mapping, requiresDataTypeName) => mapping.CreateInfo(options, - DateTimeConverterResolver.CreateRangeResolver(options, + DateTimeTypeInfoProvider.CreateRangeProvider(options, options.GetCanonicalTypeId(DataTypeNames.TsTzRange), options.GetCanonicalTypeId(DataTypeNames.TsRange), options.EnableDateTimeInfinityConversions), requiresDataTypeName), @@ -72,9 +72,9 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) } else { - mappings.AddResolverStructType>(DataTypeNames.TsTzRange, + mappings.AddProviderStructType>(DataTypeNames.TsTzRange, static (options, mapping, requiresDataTypeName) => mapping.CreateInfo(options, - DateTimeConverterResolver.CreateRangeResolver(options, + DateTimeTypeInfoProvider.CreateRangeProvider(options, options.GetCanonicalTypeId(DataTypeNames.TsTzRange), options.GetCanonicalTypeId(DataTypeNames.TsRange), options.EnableDateTimeInfinityConversions), requiresDataTypeName), @@ -122,7 +122,7 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) if (Statics.LegacyTimestampBehavior) mappings.AddStructArrayType>(DataTypeNames.TsRange); else - mappings.AddResolverStructArrayType>(DataTypeNames.TsRange); + mappings.AddProviderStructArrayType>(DataTypeNames.TsRange); mappings.AddStructArrayType>(DataTypeNames.TsRange); // tstzrange @@ -133,7 +133,7 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) } else { - mappings.AddResolverStructArrayType>(DataTypeNames.TsTzRange); + mappings.AddProviderStructArrayType>(DataTypeNames.TsTzRange); mappings.AddStructArrayType>(DataTypeNames.TsTzRange); } mappings.AddStructArrayType>(DataTypeNames.TsTzRange); diff --git a/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.cs index 39bea5f3fb..921d27e19e 100644 --- a/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.cs +++ b/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.cs @@ -43,7 +43,7 @@ class Resolver : IPgTypeInfoResolver if (type != typeof(Stream)) return null; - return new PgTypeInfo(options, new StreamConverter(supportsTextFormat: true), dataTypeName) { SupportsWriting = false }; + return new PgConcreteTypeInfo(options, new StreamConverter(supportsTextFormat: true), dataTypeName) { SupportsWriting = false }; } static PgTypeInfo? GetEnumTypeInfo(Type? type, DataTypeName dataTypeName, PgSerializerOptions options) @@ -52,7 +52,7 @@ class Resolver : IPgTypeInfoResolver || options.DatabaseInfo.GetPostgresType(dataTypeName) is not PostgresEnumType) return null; - return new PgTypeInfo(options, TextConverter.CreateStringConverter(options.TextEncoding), dataTypeName, + return new PgConcreteTypeInfo(options, TextConverter.CreateStringConverter(options.TextEncoding), dataTypeName, unboxedType: type == typeof(object) ? typeof(string) : null); } @@ -97,7 +97,7 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) static (options, mapping, _) => mapping.CreateInfo(options, new ReadOnlyMemoryByteaConverter()), MatchRequirement.DataTypeName); mappings.AddType(DataTypeNames.Text, - static (options, mapping, _) => new PgTypeInfo(options, new StreamConverter(supportsTextFormat: true), new DataTypeName(mapping.DataTypeName), unboxedType: mapping.Type != typeof(Stream) ? mapping.Type : null), + static (options, mapping, _) => new PgConcreteTypeInfo(options, new StreamConverter(supportsTextFormat: true), new DataTypeName(mapping.DataTypeName), unboxedType: mapping.Type != typeof(Stream) ? mapping.Type : null), mapping => mapping with { MatchRequirement = MatchRequirement.DataTypeName, TypeMatchPredicate = type => typeof(Stream).IsAssignableFrom(type) }); //Special mappings, these have no corresponding array mapping. mappings.AddType(DataTypeNames.Text, @@ -124,7 +124,7 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) static (options, mapping, _) => mapping.CreateInfo(options, new ReadOnlyMemoryByteaConverter()), MatchRequirement.DataTypeName); mappings.AddType(dataTypeName, - static (options, mapping, _) => new PgTypeInfo(options, new StreamConverter(supportsTextFormat: true), new DataTypeName(mapping.DataTypeName), unboxedType: mapping.Type != typeof(Stream) ? mapping.Type : null), + static (options, mapping, _) => new PgConcreteTypeInfo(options, new StreamConverter(supportsTextFormat: true), new DataTypeName(mapping.DataTypeName), unboxedType: mapping.Type != typeof(Stream) ? mapping.Type : null), mapping => mapping with { MatchRequirement = MatchRequirement.DataTypeName, TypeMatchPredicate = type => typeof(Stream).IsAssignableFrom(type) }); //Special mappings, these have no corresponding array mapping. mappings.AddType(dataTypeName, @@ -148,7 +148,7 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter>(jsonbVersion, new ReadOnlyMemoryByteaConverter())), MatchRequirement.DataTypeName); mappings.AddType(DataTypeNames.Jsonb, - static (options, mapping, _) => new PgTypeInfo(options, new VersionPrefixedTextConverter(jsonbVersion, new StreamConverter(supportsTextFormat: true)), new DataTypeName(mapping.DataTypeName), unboxedType: mapping.Type != typeof(Stream) ? mapping.Type : null), + static (options, mapping, _) => new PgConcreteTypeInfo(options, new VersionPrefixedTextConverter(jsonbVersion, new StreamConverter(supportsTextFormat: true)), new DataTypeName(mapping.DataTypeName), unboxedType: mapping.Type != typeof(Stream) ? mapping.Type : null), mapping => mapping with { MatchRequirement = MatchRequirement.DataTypeName, TypeMatchPredicate = type => typeof(Stream).IsAssignableFrom(type) }); //Special mappings, these have no corresponding array mapping. mappings.AddType(DataTypeNames.Jsonb, @@ -177,13 +177,13 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) static (options, mapping, _) => mapping.CreateInfo(options, new ReadOnlyMemoryByteaConverter())); mappings.AddType(DataTypeNames.Bytea, // TODO handling bytea textually would require conversions to hex strings, so currently we don't support it. - static (options, mapping, _) => new PgTypeInfo(options, new StreamConverter(supportsTextFormat: false), new DataTypeName(mapping.DataTypeName), unboxedType: mapping.Type != typeof(Stream) ? mapping.Type : null), + static (options, mapping, _) => new PgConcreteTypeInfo(options, new StreamConverter(supportsTextFormat: false), new DataTypeName(mapping.DataTypeName), unboxedType: mapping.Type != typeof(Stream) ? mapping.Type : null), mapping => mapping with { TypeMatchPredicate = type => typeof(Stream).IsAssignableFrom(type) }); // Varbit mappings.AddType(DataTypeNames.Varbit, static (options, mapping, _) => mapping.CreateInfo(options, - new PolymorphicBitStringConverterResolver(options.GetCanonicalTypeId(DataTypeNames.Varbit)), includeDataTypeName: true, supportsWriting: false)); + new PolymorphicBitStringTypeInfoProvider(options, options.GetCanonicalTypeId(DataTypeNames.Varbit)), includeDataTypeName: true, supportsWriting: false)); mappings.AddType(DataTypeNames.Varbit, static (options, mapping, _) => mapping.CreateInfo(options, new BitArrayBitStringConverter()), isDefault: true); mappings.AddStructType(DataTypeNames.Varbit, @@ -194,7 +194,7 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) // Bit mappings.AddType(DataTypeNames.Bit, static (options, mapping, _) => mapping.CreateInfo(options, - new PolymorphicBitStringConverterResolver(options.GetCanonicalTypeId(DataTypeNames.Bit)), includeDataTypeName: true, supportsWriting: false)); + new PolymorphicBitStringTypeInfoProvider(options, options.GetCanonicalTypeId(DataTypeNames.Bit)), includeDataTypeName: true, supportsWriting: false)); mappings.AddType(DataTypeNames.Bit, static (options, mapping, _) => mapping.CreateInfo(options, new BitArrayBitStringConverter()), isDefault: true); mappings.AddStructType(DataTypeNames.Bit, @@ -211,9 +211,9 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) } else { - mappings.AddResolverStructType(DataTypeNames.Timestamp, + mappings.AddProviderStructType(DataTypeNames.Timestamp, static (options, mapping, requiresDataTypeName) => mapping.CreateInfo(options, - DateTimeConverterResolver.CreateResolver(options, options.GetCanonicalTypeId(DataTypeNames.TimestampTz), options.GetCanonicalTypeId(DataTypeNames.Timestamp), + DateTimeTypeInfoProvider.CreateProvider(options, options.GetCanonicalTypeId(DataTypeNames.TimestampTz), options.GetCanonicalTypeId(DataTypeNames.Timestamp), options.EnableDateTimeInfinityConversions), requiresDataTypeName), isDefault: true); } mappings.AddStructType(DataTypeNames.Timestamp, @@ -230,9 +230,9 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) } else { - mappings.AddResolverStructType(DataTypeNames.TimestampTz, + mappings.AddProviderStructType(DataTypeNames.TimestampTz, static (options, mapping, requiresDataTypeName) => mapping.CreateInfo(options, - DateTimeConverterResolver.CreateResolver(options, options.GetCanonicalTypeId(DataTypeNames.TimestampTz), options.GetCanonicalTypeId(DataTypeNames.Timestamp), + DateTimeTypeInfoProvider.CreateProvider(options, options.GetCanonicalTypeId(DataTypeNames.TimestampTz), options.GetCanonicalTypeId(DataTypeNames.Timestamp), options.EnableDateTimeInfinityConversions), requiresDataTypeName), isDefault: true); mappings.AddStructType(DataTypeNames.TimestampTz, static (options, mapping, _) => mapping.CreateInfo(options, new DateTimeOffsetConverter(options.EnableDateTimeInfinityConversions))); @@ -331,14 +331,14 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) mappings.AddType( DataTypeNames.OidVector, static (options, mapping, _) => mapping.CreateInfo(options, - ArrayConverter.CreateArrayBased(new(new UInt32Converter(), new PgTypeId(DataTypeNames.Oid)), pgLowerBound: 0)), + ArrayConverter.CreateArrayBased(new(options, new UInt32Converter(), new PgTypeId(DataTypeNames.Oid)), pgLowerBound: 0)), MatchRequirement.DataTypeName); // Int2vector mappings.AddType( DataTypeNames.Int2Vector, static (options, mapping, _) => mapping.CreateInfo(options, - ArrayConverter.CreateArrayBased(new(new Int2Converter(), new PgTypeId(DataTypeNames.Int2)), pgLowerBound: 0)), + ArrayConverter.CreateArrayBased(new(options, new Int2Converter(), new PgTypeId(DataTypeNames.Int2)), pgLowerBound: 0)), MatchRequirement.DataTypeName); // Tid @@ -430,13 +430,13 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) // Varbit // Object mapping first. - mappings.AddPolymorphicResolverArrayType(DataTypeNames.Varbit, static options => resolution => resolution.Converter switch + mappings.AddPolymorphicProviderArrayType(DataTypeNames.Varbit, static options => concreteTypeInfo => concreteTypeInfo.Converter switch { BoolBitStringConverter => PgConverterFactory.CreatePolymorphicArrayConverter( - () => ArrayConverter.CreateArrayBased(resolution, typeof(Array)), - () => ArrayConverter.CreateArrayBased(new(new NullableConverter(resolution.GetConverter()), resolution.PgTypeId), typeof(Array)), + () => ArrayConverter.CreateArrayBased(concreteTypeInfo, typeof(Array)), + () => ArrayConverter.CreateArrayBased(new(options, new NullableConverter((PgConverter)concreteTypeInfo.Converter), concreteTypeInfo.PgTypeId), typeof(Array)), options), - BitArrayBitStringConverter => ArrayConverter.CreateArrayBased(resolution, typeof(Array)), + BitArrayBitStringConverter => ArrayConverter.CreateArrayBased(concreteTypeInfo, typeof(Array)), _ => throw new NotSupportedException() }); mappings.AddArrayType(DataTypeNames.Varbit); @@ -445,13 +445,13 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) // Bit // Object mapping first. - mappings.AddPolymorphicResolverArrayType(DataTypeNames.Bit, static options => resolution => resolution.Converter switch + mappings.AddPolymorphicProviderArrayType(DataTypeNames.Bit, static options => concreteTypeInfo => concreteTypeInfo.Converter switch { BoolBitStringConverter => PgConverterFactory.CreatePolymorphicArrayConverter( - () => ArrayConverter.CreateArrayBased(resolution, typeof(Array)), - () => ArrayConverter.CreateArrayBased(new(new NullableConverter(resolution.GetConverter()), resolution.PgTypeId), typeof(Array)), + () => ArrayConverter.CreateArrayBased(concreteTypeInfo, typeof(Array)), + () => ArrayConverter.CreateArrayBased(new(options, new NullableConverter((PgConverter)concreteTypeInfo.Converter), concreteTypeInfo.PgTypeId), typeof(Array)), options), - BitArrayBitStringConverter => ArrayConverter.CreateArrayBased(resolution, typeof(Array)), + BitArrayBitStringConverter => ArrayConverter.CreateArrayBased(concreteTypeInfo, typeof(Array)), _ => throw new NotSupportedException() }); mappings.AddArrayType(DataTypeNames.Bit); @@ -462,14 +462,14 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) if (Statics.LegacyTimestampBehavior) mappings.AddStructArrayType(DataTypeNames.Timestamp); else - mappings.AddResolverStructArrayType(DataTypeNames.Timestamp); + mappings.AddProviderStructArrayType(DataTypeNames.Timestamp); mappings.AddStructArrayType(DataTypeNames.Timestamp); // TimestampTz if (Statics.LegacyTimestampBehavior) mappings.AddStructArrayType(DataTypeNames.TimestampTz); else - mappings.AddResolverStructArrayType(DataTypeNames.TimestampTz); + mappings.AddProviderStructArrayType(DataTypeNames.TimestampTz); mappings.AddStructArrayType(DataTypeNames.TimestampTz); mappings.AddStructArrayType(DataTypeNames.TimestampTz); @@ -546,9 +546,9 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) return null; var mappings = new TypeInfoMappingCollection(); - mappings.AddType(pgElementType.DataTypeName, - (options, mapping, _) => mapping.CreateInfo(options, new ObjectConverter(options, elementId)), MatchRequirement.DataTypeName); - mappings.AddArrayType(pgElementType.DataTypeName); + mappings.AddProviderType(pgElementType.DataTypeName, + (options, mapping, includeDataTypeName) => mapping.CreateInfo(options, new LateBoundTypeInfoProvider(options, elementId), includeDataTypeName), MatchRequirement.DataTypeName); + mappings.AddProviderArrayType(pgElementType.DataTypeName); return mappings.Find(type, dataTypeName, options); } diff --git a/src/Npgsql/Internal/ResolverFactories/NetworkTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/NetworkTypeInfoResolverFactory.cs index 6a2af4453f..d641cde931 100644 --- a/src/Npgsql/Internal/ResolverFactories/NetworkTypeInfoResolverFactory.cs +++ b/src/Npgsql/Internal/ResolverFactories/NetworkTypeInfoResolverFactory.cs @@ -32,7 +32,7 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) // inet // There are certain IPAddress values like Loopback or Any that return a *private* derived type (see https://github.com/dotnet/runtime/issues/27870). mappings.AddType(DataTypeNames.Inet, - static (options, mapping, _) => new PgTypeInfo(options, new IPAddressConverter(), new DataTypeName(mapping.DataTypeName), + static (options, mapping, _) => new PgConcreteTypeInfo(options, new IPAddressConverter(), new DataTypeName(mapping.DataTypeName), unboxedType: mapping.Type != typeof(IPAddress) ? mapping.Type : null), mapping => mapping with { diff --git a/src/Npgsql/Internal/ResolverFactories/UnmappedTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/UnmappedTypeInfoResolverFactory.cs index d3dcabb467..2a62e7a8ab 100644 --- a/src/Npgsql/Internal/ResolverFactories/UnmappedTypeInfoResolverFactory.cs +++ b/src/Npgsql/Internal/ResolverFactories/UnmappedTypeInfoResolverFactory.cs @@ -80,9 +80,9 @@ class RangeResolver : DynamicTypeInfoResolver matchedType is null ? null : matchedType == typeof(object) ? matchedType : matchedType.GetGenericArguments()[0], options.ToCanonicalTypeId(rangeType.Subtype.GetRepresentationalType())); - // We have no generic RangeConverterResolver so we would not know how to compose a range mapping for such infos. + // We have no generic range-specific PgConcreteTypeInfoProvider so we would not know how to compose a range mapping for provider-backed sub-infos. // See https://github.com/npgsql/npgsql/issues/5268 - if (subInfo is not { IsResolverInfo: false }) + if (subInfo is not PgConcreteTypeInfo) return null; subInfo = subInfo.ToNonBoxing(); @@ -91,10 +91,10 @@ class RangeResolver : DynamicTypeInfoResolver return CreateCollection().AddMapping(matchedType ?? converterType, dataTypeName, (options, mapping, _) => - new PgTypeInfo( + new PgConcreteTypeInfo( options, (PgConverter)Activator.CreateInstance(typeof(RangeConverter<>).MakeGenericType(subInfo.Type), - subInfo.GetResolution().Converter)!, + ((PgConcreteTypeInfo)subInfo).Converter)!, new DataTypeName(mapping.DataTypeName), unboxedType: matchedType is not null && matchedType != converterType ? converterType : null ) { PreferredFormat = subInfo.PreferredFormat, SupportsWriting = subInfo.SupportsWriting }, @@ -134,9 +134,9 @@ class MultirangeResolver : DynamicTypeInfoResolver var subInfo = options.GetTypeInfoInternal(type is null ? null : elementType ?? typeof(object), options.ToCanonicalTypeId(multirangeType.Subrange)); - // We have no generic MultirangeConverterResolver so we would not know how to compose a range mapping for such infos. + // We have no generic multirange-specific PgConcreteTypeInfoProvider so we would not know how to compose a multirange mapping for provider-backed sub-infos. // See https://github.com/npgsql/npgsql/issues/5268 - if (subInfo is not { IsResolverInfo: false }) + if (subInfo is not PgConcreteTypeInfo) return null; subInfo = subInfo.ToNonBoxing(); @@ -145,10 +145,10 @@ class MultirangeResolver : DynamicTypeInfoResolver return CreateCollection().AddMapping(type ?? converterType, dataTypeName, (options, mapping, _) => - new PgTypeInfo( + new PgConcreteTypeInfo( options, (PgConverter)Activator.CreateInstance(typeof(MultirangeConverter<,>).MakeGenericType(converterType, subInfo.Type), - subInfo.GetResolution().Converter)!, + ((PgConcreteTypeInfo)subInfo).Converter)!, new DataTypeName(mapping.DataTypeName), unboxedType: type is not null && type != converterType ? converterType : null ) { PreferredFormat = subInfo.PreferredFormat, SupportsWriting = subInfo.SupportsWriting }, diff --git a/src/Npgsql/Internal/TypeInfoMapping.cs b/src/Npgsql/Internal/TypeInfoMapping.cs index 74d0abf0d0..81893047c3 100644 --- a/src/Npgsql/Internal/TypeInfoMapping.cs +++ b/src/Npgsql/Internal/TypeInfoMapping.cs @@ -17,7 +17,7 @@ namespace Npgsql.Internal; /// /// /// -/// Relevant for `PgResolverTypeInfo` only: whether the instance can be constructed without passing mapping.DataTypeName, an exception occurs otherwise. +/// Relevant for `PgProviderTypeInfo` only: whether the instance can be constructed without passing mapping.DataTypeName, an exception occurs otherwise. /// [Experimental(NpgsqlDiagnostics.ConvertersExperimental)] public delegate PgTypeInfo TypeInfoFactory(PgSerializerOptions options, TypeInfoMapping mapping, bool requiresDataTypeName); @@ -164,7 +164,7 @@ public TypeInfoMappingCollection(IEnumerable items) static string ResolveFullyQualifiedDataTypeName(DataTypeName? dataTypeName, string mappingDataTypeName, PgSerializerOptions options) { - // Make sure plugins (which match on unqualified names) and converter resolvers get the fully qualified name to canonicalize. + // Make sure plugins (which match on unqualified names) and type info providers get the fully qualified name to canonicalize. if (dataTypeName is not null) return dataTypeName.GetValueOrDefault().Value; @@ -210,7 +210,7 @@ static TypeInfoFactory CreateComposedFactory(Type mappingType, TypeInfoMapping i var readingSupported = innerInfo.SupportsReading && (supportsReading ?? PgTypeInfo.GetDefaultSupportsReading(converter.TypeToConvert, unboxedType)); var writingSupported = innerInfo.SupportsWriting && (supportsWriting ?? true); - return new PgTypeInfo(options, converter, options.GetCanonicalTypeId(new DataTypeName(mapping.DataTypeName)), unboxedType) + return new PgConcreteTypeInfo(options, converter, options.GetCanonicalTypeId(new DataTypeName(mapping.DataTypeName)), unboxedType) { PreferredFormat = preferredFormat, SupportsReading = readingSupported, @@ -219,25 +219,25 @@ static TypeInfoFactory CreateComposedFactory(Type mappingType, TypeInfoMapping i }; // Helper to eliminate generic display class duplication. - static TypeInfoFactory CreateComposedFactory(Type mappingType, TypeInfoMapping innerMapping, Func mapper, bool copyPreferredFormat = false, bool? supportsReading = null, bool? supportsWriting = null) + static TypeInfoFactory CreateComposedFactory(Type mappingType, TypeInfoMapping innerMapping, Func mapper, bool copyPreferredFormat = false, bool? supportsReading = null, bool? supportsWriting = null) => (options, mapping, requiresDataTypeName) => { var resolvedInnerMapping = innerMapping; if (!DataTypeName.IsFullyQualified(innerMapping.DataTypeName.AsSpan())) resolvedInnerMapping = innerMapping with { DataTypeName = new DataTypeName(mapping.DataTypeName).Schema + "." + innerMapping.DataTypeName }; - var innerInfo = (PgResolverTypeInfo)innerMapping.Factory(options, resolvedInnerMapping, requiresDataTypeName); - var resolver = mapper(mapping, innerInfo); + var innerInfo = (PgProviderTypeInfo)innerMapping.Factory(options, resolvedInnerMapping, requiresDataTypeName); + var typeInfoProvider = mapper(mapping, innerInfo); var preferredFormat = copyPreferredFormat ? innerInfo.PreferredFormat : null; - var unboxedType = ComputeUnboxedType(defaultType: mappingType, resolver.TypeToConvert, mapping.Type); - var readingSupported = innerInfo.SupportsReading && (supportsReading ?? PgTypeInfo.GetDefaultSupportsReading(resolver.TypeToConvert, unboxedType)); + var unboxedType = ComputeUnboxedType(defaultType: mappingType, typeInfoProvider.TypeToConvert, mapping.Type); + var readingSupported = innerInfo.SupportsReading && (supportsReading ?? PgTypeInfo.GetDefaultSupportsReading(typeInfoProvider.TypeToConvert, unboxedType)); var writingSupported = innerInfo.SupportsWriting && (supportsWriting ?? true); // We include the data type name if the inner info did so as well. // This way we can rely on its logic around resolvedDataTypeName, including when it ignores that flag. PgTypeId? pgTypeId = innerInfo.PgTypeId is not null ? options.GetCanonicalTypeId(new DataTypeName(mapping.DataTypeName)) : null; - return new PgResolverTypeInfo(options, resolver, pgTypeId, unboxedType) + return new PgProviderTypeInfo(options, typeInfoProvider, pgTypeId, unboxedType) { PreferredFormat = preferredFormat, SupportsReading = readingSupported, @@ -306,26 +306,26 @@ public void AddType(string dataTypeName, TypeInfoFactory createInfo, Func info.GetResolution().Converter, copyPreferredFormat: true)) + CreateComposedFactory(typeof(T), mapping, static (_, info) => ((PgConcreteTypeInfo)info).Converter, copyPreferredFormat: true)) { MatchRequirement = mapping.MatchRequirement }); _items.Add(mapping); } - public void AddResolverType(string dataTypeName, TypeInfoFactory createInfo, bool isDefault = false) where T : class - => AddResolverType(dataTypeName, createInfo, GetDefaultConfigure(isDefault)); + public void AddProviderType(string dataTypeName, TypeInfoFactory createInfo, bool isDefault = false) where T : class + => AddProviderType(dataTypeName, createInfo, GetDefaultConfigure(isDefault)); - public void AddResolverType(string dataTypeName, TypeInfoFactory createInfo, MatchRequirement matchRequirement) where T : class - => AddResolverType(dataTypeName, createInfo, GetDefaultConfigure(matchRequirement)); + public void AddProviderType(string dataTypeName, TypeInfoFactory createInfo, MatchRequirement matchRequirement) where T : class + => AddProviderType(dataTypeName, createInfo, GetDefaultConfigure(matchRequirement)); - public void AddResolverType(string dataTypeName, TypeInfoFactory createInfo, Func? configure) where T : class + public void AddProviderType(string dataTypeName, TypeInfoFactory createInfo, Func? configure) where T : class { var mapping = new TypeInfoMapping(typeof(T), dataTypeName, createInfo); mapping = configure?.Invoke(mapping) ?? mapping; if (typeof(T) != typeof(object) && mapping.MatchRequirement is MatchRequirement.DataTypeName or MatchRequirement.Single && !TryGetMapping(typeof(object), mapping.DataTypeName, out _)) _items.Add(new TypeInfoMapping(typeof(object), dataTypeName, - CreateComposedFactory(typeof(T), mapping, static (_, info) => info.GetConverterResolver(), copyPreferredFormat: true)) + CreateComposedFactory(typeof(T), mapping, static (_, info) => PgProviderTypeInfo.GetProvider(info), copyPreferredFormat: true)) { MatchRequirement = mapping.MatchRequirement }); @@ -373,16 +373,16 @@ void AddArrayType(TypeInfoMapping elementMapping, Type type, Func(string elementDataTypeName) where TElement : class - => AddResolverArrayType(GetMapping(typeof(TElement), elementDataTypeName), suppressObjectMapping: false); + public void AddProviderArrayType(string elementDataTypeName) where TElement : class + => AddProviderArrayType(GetMapping(typeof(TElement), elementDataTypeName), suppressObjectMapping: false); - public void AddResolverArrayType(string elementDataTypeName, bool suppressObjectMapping) where TElement : class - => AddResolverArrayType(GetMapping(typeof(TElement), elementDataTypeName), suppressObjectMapping); + public void AddProviderArrayType(string elementDataTypeName, bool suppressObjectMapping) where TElement : class + => AddProviderArrayType(GetMapping(typeof(TElement), elementDataTypeName), suppressObjectMapping); - public void AddResolverArrayType(TypeInfoMapping elementMapping) where TElement : class - => AddResolverArrayType(elementMapping, suppressObjectMapping: false); + public void AddProviderArrayType(TypeInfoMapping elementMapping) where TElement : class + => AddProviderArrayType(elementMapping, suppressObjectMapping: false); - public void AddResolverArrayType(TypeInfoMapping elementMapping, bool suppressObjectMapping) where TElement : class + public void AddProviderArrayType(TypeInfoMapping elementMapping, bool suppressObjectMapping) where TElement : class { // Always use a predicate to match all dimensions. var arrayTypeMatchPredicate = GetArrayTypeMatchPredicate(elementMapping.TypeMatchPredicate ?? (static type => type is null || type == typeof(TElement))); @@ -390,10 +390,10 @@ public void AddResolverArrayType(TypeInfoMapping elementMapping, bool var arrayDataTypeName = GetArrayDataTypeName(elementMapping.DataTypeName); - AddResolverArrayType(elementMapping, typeof(TElement[]), CreateArrayBasedConverterResolver, arrayTypeMatchPredicate, suppressObjectMapping: suppressObjectMapping || TryGetMapping(typeof(object), arrayDataTypeName, out _)); - AddResolverArrayType(elementMapping, typeof(IList), CreateListBasedConverterResolver, listTypeMatchPredicate, suppressObjectMapping: true); + AddProviderArrayType(elementMapping, typeof(TElement[]), CreateArrayBasedTypeInfoProvider, arrayTypeMatchPredicate, suppressObjectMapping: suppressObjectMapping || TryGetMapping(typeof(object), arrayDataTypeName, out _)); + AddProviderArrayType(elementMapping, typeof(IList), CreateListBasedTypeInfoProvider, listTypeMatchPredicate, suppressObjectMapping: true); - void AddResolverArrayType(TypeInfoMapping elementMapping, Type type, Func converter, Func? typeMatchPredicate = null, bool suppressObjectMapping = false) + void AddProviderArrayType(TypeInfoMapping elementMapping, Type type, Func converter, Func? typeMatchPredicate = null, bool suppressObjectMapping = false) { var arrayMapping = new TypeInfoMapping(type, arrayDataTypeName, CreateComposedFactory(type, elementMapping, converter, supportsReading: true)) { @@ -415,15 +415,15 @@ void AddResolverArrayType(TypeInfoMapping elementMapping, Type type, Func(string dataTypeName, TypeInfoFactory createInfo, bool isDefault = false) where T : struct => AddStructType(typeof(T), typeof(T?), dataTypeName, createInfo, - static (_, innerInfo) => new NullableConverter(innerInfo.GetResolution().GetConverter()), GetDefaultConfigure(isDefault)); + static (_, innerInfo) => new NullableConverter((PgConverter)((PgConcreteTypeInfo)innerInfo).Converter), GetDefaultConfigure(isDefault)); public void AddStructType(string dataTypeName, TypeInfoFactory createInfo, MatchRequirement matchRequirement) where T : struct => AddStructType(typeof(T), typeof(T?), dataTypeName, createInfo, - static (_, innerInfo) => new NullableConverter(innerInfo.GetResolution().GetConverter()), GetDefaultConfigure(matchRequirement)); + static (_, innerInfo) => new NullableConverter((PgConverter)((PgConcreteTypeInfo)innerInfo).Converter), GetDefaultConfigure(matchRequirement)); public void AddStructType(string dataTypeName, TypeInfoFactory createInfo, Func? configure) where T : struct => AddStructType(typeof(T), typeof(T?), dataTypeName, createInfo, - static (_, innerInfo) => new NullableConverter(innerInfo.GetResolution().GetConverter()), configure); + static (_, innerInfo) => new NullableConverter((PgConverter)((PgConcreteTypeInfo)innerInfo).Converter), configure); // Lives outside to prevent capture of T. void AddStructType(Type type, Type nullableType, string dataTypeName, TypeInfoFactory createInfo, @@ -433,7 +433,7 @@ void AddStructType(Type type, Type nullableType, string dataTypeName, TypeInfoFa mapping = configure?.Invoke(mapping) ?? mapping; if (type != typeof(object) && mapping.MatchRequirement is MatchRequirement.DataTypeName or MatchRequirement.Single && !TryGetMapping(typeof(object), mapping.DataTypeName, out _)) _items.Add(new TypeInfoMapping(typeof(object), dataTypeName, - CreateComposedFactory(type, mapping, static (_, info) => info.GetResolution().Converter, copyPreferredFormat: true)) + CreateComposedFactory(type, mapping, static (_, info) => ((PgConcreteTypeInfo)info).Converter, copyPreferredFormat: true)) { MatchRequirement = mapping.MatchRequirement }); @@ -518,39 +518,39 @@ void AddStructArrayType(TypeInfoMapping elementMapping, TypeInfoMapping nullable }; }) { MatchRequirement = MatchRequirement.DataTypeName }); - PgTypeInfo CreateComposedPerInstance(PgTypeInfo innerTypeInfo, PgTypeInfo nullableInnerTypeInfo, string dataTypeName) + PgTypeInfo CreateComposedPerInstance(PgTypeInfo innerInfo, PgTypeInfo nullableInnerInfo, string dataTypeName) { var converter = new PolymorphicArrayConverter( - innerTypeInfo.GetResolution().GetConverter(), - nullableInnerTypeInfo.GetResolution().GetConverter()); + (PgConverter)((PgConcreteTypeInfo)innerInfo).Converter, + (PgConverter)((PgConcreteTypeInfo)nullableInnerInfo).Converter); - return new PgTypeInfo(innerTypeInfo.Options, converter, - innerTypeInfo.Options.GetCanonicalTypeId(new DataTypeName(dataTypeName)), unboxedType: typeof(Array)) { SupportsWriting = false }; + return new PgConcreteTypeInfo(innerInfo.Options, converter, + innerInfo.Options.GetCanonicalTypeId(new DataTypeName(dataTypeName)), unboxedType: typeof(Array)) { SupportsWriting = false }; } } - public void AddResolverStructType(string dataTypeName, TypeInfoFactory createInfo, bool isDefault = false) where T : struct - => AddResolverStructType(typeof(T), typeof(T?), dataTypeName, createInfo, - static (_, innerInfo) => new NullableConverterResolver(innerInfo), GetDefaultConfigure(isDefault)); + public void AddProviderStructType(string dataTypeName, TypeInfoFactory createInfo, bool isDefault = false) where T : struct + => AddProviderStructType(typeof(T), typeof(T?), dataTypeName, createInfo, + static (_, innerInfo) => new NullableTypeInfoProvider(innerInfo), GetDefaultConfigure(isDefault)); - public void AddResolverStructType(string dataTypeName, TypeInfoFactory createInfo, MatchRequirement matchRequirement) where T : struct - => AddResolverStructType(typeof(T), typeof(T?), dataTypeName, createInfo, - static (_, innerInfo) => new NullableConverterResolver(innerInfo), GetDefaultConfigure(matchRequirement)); + public void AddProviderStructType(string dataTypeName, TypeInfoFactory createInfo, MatchRequirement matchRequirement) where T : struct + => AddProviderStructType(typeof(T), typeof(T?), dataTypeName, createInfo, + static (_, innerInfo) => new NullableTypeInfoProvider(innerInfo), GetDefaultConfigure(matchRequirement)); - public void AddResolverStructType(string dataTypeName, TypeInfoFactory createInfo, Func? configure) where T : struct - => AddResolverStructType(typeof(T), typeof(T?), dataTypeName, createInfo, - static (_, innerInfo) => new NullableConverterResolver(innerInfo), configure); + public void AddProviderStructType(string dataTypeName, TypeInfoFactory createInfo, Func? configure) where T : struct + => AddProviderStructType(typeof(T), typeof(T?), dataTypeName, createInfo, + static (_, innerInfo) => new NullableTypeInfoProvider(innerInfo), configure); // Lives outside to prevent capture of T. - void AddResolverStructType(Type type, Type nullableType, string dataTypeName, TypeInfoFactory createInfo, - Func nullableConverter, Func? configure) + void AddProviderStructType(Type type, Type nullableType, string dataTypeName, TypeInfoFactory createInfo, + Func nullableConverter, Func? configure) { var mapping = new TypeInfoMapping(type, dataTypeName, createInfo); mapping = configure?.Invoke(mapping) ?? mapping; if (type != typeof(object) && mapping.MatchRequirement is MatchRequirement.DataTypeName or MatchRequirement.Single && !TryGetMapping(typeof(object), mapping.DataTypeName, out _)) _items.Add(new TypeInfoMapping(typeof(object), dataTypeName, - CreateComposedFactory(type, mapping, static (_, info) => info.GetConverterResolver(), copyPreferredFormat: true)) + CreateComposedFactory(type, mapping, static (_, info) => PgProviderTypeInfo.GetProvider(info), copyPreferredFormat: true)) { MatchRequirement = mapping.MatchRequirement }); @@ -567,16 +567,16 @@ void AddResolverStructType(Type type, Type nullableType, string dataTypeName, Ty }); } - public void AddResolverStructArrayType(string elementDataTypeName) where TElement : struct - => AddResolverStructArrayType(GetMapping(typeof(TElement), elementDataTypeName), GetMapping(typeof(TElement?), elementDataTypeName), suppressObjectMapping: false); + public void AddProviderStructArrayType(string elementDataTypeName) where TElement : struct + => AddProviderStructArrayType(GetMapping(typeof(TElement), elementDataTypeName), GetMapping(typeof(TElement?), elementDataTypeName), suppressObjectMapping: false); - public void AddResolverStructArrayType(string elementDataTypeName, bool suppressObjectMapping) where TElement : struct - => AddResolverStructArrayType(GetMapping(typeof(TElement), elementDataTypeName), GetMapping(typeof(TElement?), elementDataTypeName), suppressObjectMapping); + public void AddProviderStructArrayType(string elementDataTypeName, bool suppressObjectMapping) where TElement : struct + => AddProviderStructArrayType(GetMapping(typeof(TElement), elementDataTypeName), GetMapping(typeof(TElement?), elementDataTypeName), suppressObjectMapping); - public void AddResolverStructArrayType(TypeInfoMapping elementMapping, TypeInfoMapping nullableElementMapping) where TElement : struct - => AddResolverStructArrayType(elementMapping, nullableElementMapping, suppressObjectMapping: false); + public void AddProviderStructArrayType(TypeInfoMapping elementMapping, TypeInfoMapping nullableElementMapping) where TElement : struct + => AddProviderStructArrayType(elementMapping, nullableElementMapping, suppressObjectMapping: false); - public void AddResolverStructArrayType(TypeInfoMapping elementMapping, TypeInfoMapping nullableElementMapping, bool suppressObjectMapping) where TElement : struct + public void AddProviderStructArrayType(TypeInfoMapping elementMapping, TypeInfoMapping nullableElementMapping, bool suppressObjectMapping) where TElement : struct { // Always use a predicate to match all dimensions. var arrayTypeMatchPredicate = GetArrayTypeMatchPredicate(elementMapping.TypeMatchPredicate ?? (static type => type is null || type == typeof(TElement))); @@ -588,19 +588,19 @@ public void AddResolverStructArrayType(TypeInfoMapping elementMapping, var arrayDataTypeName = GetArrayDataTypeName(elementMapping.DataTypeName); - AddResolverStructArrayType(elementMapping, nullableElementMapping, typeof(TElement[]), typeof(TElement?[]), - CreateArrayBasedConverterResolver, - CreateArrayBasedConverterResolver, suppressObjectMapping: suppressObjectMapping || TryGetMapping(typeof(object), arrayDataTypeName, out _), arrayTypeMatchPredicate, nullableArrayTypeMatchPredicate); + AddProviderStructArrayType(elementMapping, nullableElementMapping, typeof(TElement[]), typeof(TElement?[]), + CreateArrayBasedTypeInfoProvider, + CreateArrayBasedTypeInfoProvider, suppressObjectMapping: suppressObjectMapping || TryGetMapping(typeof(object), arrayDataTypeName, out _), arrayTypeMatchPredicate, nullableArrayTypeMatchPredicate); // Don't add the object converter for the list based converter. - AddResolverStructArrayType(elementMapping, nullableElementMapping, typeof(IList), typeof(IList), - CreateListBasedConverterResolver, - CreateListBasedConverterResolver, suppressObjectMapping: true, listTypeMatchPredicate, nullableListTypeMatchPredicate); + AddProviderStructArrayType(elementMapping, nullableElementMapping, typeof(IList), typeof(IList), + CreateListBasedTypeInfoProvider, + CreateListBasedTypeInfoProvider, suppressObjectMapping: true, listTypeMatchPredicate, nullableListTypeMatchPredicate); } // Lives outside to prevent capture of TElement. - void AddResolverStructArrayType(TypeInfoMapping elementMapping, TypeInfoMapping nullableElementMapping, Type type, Type nullableType, - Func converter, Func nullableConverter, + void AddProviderStructArrayType(TypeInfoMapping elementMapping, TypeInfoMapping nullableElementMapping, Type type, Type nullableType, + Func converter, Func nullableConverter, bool suppressObjectMapping, Func? typeMatchPredicate, Func? nullableTypeMatchPredicate) { var arrayDataTypeName = GetArrayDataTypeName(elementMapping.DataTypeName); @@ -633,28 +633,28 @@ void AddResolverStructArrayType(TypeInfoMapping elementMapping, TypeInfoMapping _ => throw new ArgumentOutOfRangeException() }) { MatchRequirement = MatchRequirement.DataTypeName }); - PgTypeInfo CreateComposedPerInstance(PgTypeInfo innerTypeInfo, PgTypeInfo nullableInnerTypeInfo, string dataTypeName) + PgTypeInfo CreateComposedPerInstance(PgTypeInfo innerInfo, PgTypeInfo nullableInnerInfo, string dataTypeName) { - var resolver = - new PolymorphicArrayConverterResolver((PgResolverTypeInfo)innerTypeInfo, - (PgResolverTypeInfo)nullableInnerTypeInfo); + var provider = + new PolymorphicArrayTypeInfoProvider((PgProviderTypeInfo)innerInfo, + (PgProviderTypeInfo)nullableInnerInfo); - return new PgResolverTypeInfo(innerTypeInfo.Options, resolver, - innerTypeInfo.Options.GetCanonicalTypeId(new DataTypeName(dataTypeName)), unboxedType: typeof(Array)) { SupportsWriting = false }; + return new PgProviderTypeInfo(innerInfo.Options, provider, + innerInfo.Options.GetCanonicalTypeId(new DataTypeName(dataTypeName)), unboxedType: typeof(Array)) { SupportsWriting = false }; } } - public void AddPolymorphicResolverArrayType(string elementDataTypeName, Func> elementToArrayConverterFactory) - => AddPolymorphicResolverArrayType(GetMapping(typeof(object), elementDataTypeName), elementToArrayConverterFactory); + public void AddPolymorphicProviderArrayType(string elementDataTypeName, Func> elementToArrayConverterFactory) + => AddPolymorphicProviderArrayType(GetMapping(typeof(object), elementDataTypeName), elementToArrayConverterFactory); - public void AddPolymorphicResolverArrayType(TypeInfoMapping elementMapping, Func> elementToArrayConverterFactory) + public void AddPolymorphicProviderArrayType(TypeInfoMapping elementMapping, Func> elementToArrayConverterFactory) { - AddPolymorphicResolverArrayType(elementMapping, typeof(object), - (mapping, elemInfo) => new ArrayPolymorphicConverterResolver( - elemInfo.Options.GetCanonicalTypeId(new DataTypeName(mapping.DataTypeName)), elemInfo, elementToArrayConverterFactory(elemInfo.Options)) + AddPolymorphicProviderArrayType(elementMapping, typeof(object), + (mapping, elementInfo) => new PolymorphicArrayTypeInfoProvider( + elementInfo.Options.GetCanonicalTypeId(new DataTypeName(mapping.DataTypeName)), elementInfo, elementToArrayConverterFactory(elementInfo.Options)) , null); - void AddPolymorphicResolverArrayType(TypeInfoMapping elementMapping, Type type, Func converter, Func? typeMatchPredicate) + void AddPolymorphicProviderArrayType(TypeInfoMapping elementMapping, Type type, Func converter, Func? typeMatchPredicate) { var arrayDataTypeName = GetArrayDataTypeName(elementMapping.DataTypeName); var mapping = new TypeInfoMapping(type, arrayDataTypeName, @@ -708,42 +708,42 @@ static string GetArrayDataTypeName(string dataTypeName) static ArrayConverter CreateArrayBasedConverter(TypeInfoMapping mapping, PgTypeInfo elemInfo) { if (!elemInfo.IsBoxing) - return ArrayConverter.CreateArrayBased(elemInfo.GetResolution(), mapping.Type); + return ArrayConverter.CreateArrayBased((PgConcreteTypeInfo)elemInfo, mapping.Type); - ThrowBoxingNotSupported(resolver: false); + ThrowBoxingNotSupported(provider: false); return default; } static ArrayConverter> CreateListBasedConverter(TypeInfoMapping mapping, PgTypeInfo elemInfo) { if (!elemInfo.IsBoxing) - return ArrayConverter>.CreateListBased(elemInfo.GetResolution()); + return ArrayConverter>.CreateListBased((PgConcreteTypeInfo)elemInfo); - ThrowBoxingNotSupported(resolver: false); + ThrowBoxingNotSupported(provider: false); return default; } - static ArrayConverterResolver CreateArrayBasedConverterResolver(TypeInfoMapping mapping, PgResolverTypeInfo elemInfo) + static ArrayTypeInfoProvider CreateArrayBasedTypeInfoProvider(TypeInfoMapping mapping, PgProviderTypeInfo elemInfo) { if (!elemInfo.IsBoxing) - return new ArrayConverterResolver(elemInfo, mapping.Type); + return new ArrayTypeInfoProvider(elemInfo, mapping.Type); - ThrowBoxingNotSupported(resolver: true); + ThrowBoxingNotSupported(provider: true); return default; } - static ArrayConverterResolver, TElement> CreateListBasedConverterResolver(TypeInfoMapping mapping, PgResolverTypeInfo elemInfo) + static ArrayTypeInfoProvider, TElement> CreateListBasedTypeInfoProvider(TypeInfoMapping mapping, PgProviderTypeInfo elemInfo) { if (!elemInfo.IsBoxing) - return new ArrayConverterResolver, TElement>(elemInfo, mapping.Type); + return new ArrayTypeInfoProvider, TElement>(elemInfo, mapping.Type); - ThrowBoxingNotSupported(resolver: true); + ThrowBoxingNotSupported(provider: true); return default; } [DoesNotReturn] - static void ThrowBoxingNotSupported(bool resolver) - => throw new InvalidOperationException($"Boxing converters are not supported, manually construct a mapping over a casting converter{(resolver ? " resolver" : "")} instead."); + static void ThrowBoxingNotSupported(bool provider) + => throw new InvalidOperationException($"Boxing converters are not supported, manually construct a mapping over a casting converter{(provider ? " type info provider" : "")} instead."); } [Experimental(NpgsqlDiagnostics.ConvertersExperimental)] @@ -780,7 +780,7 @@ internal static PostgresType GetPgType(this TypeInfoMapping mapping, PgSerialize /// The converter to create a PgTypeInfo for. /// The created info instance. public static PgTypeInfo CreateInfo(this TypeInfoMapping mapping, PgSerializerOptions options, PgConverter converter) - => new(options, converter, new DataTypeName(mapping.DataTypeName)) + => new PgConcreteTypeInfo(options, converter, new DataTypeName(mapping.DataTypeName)) { PreferredFormat = null, SupportsWriting = true @@ -793,10 +793,10 @@ public static PgTypeInfo CreateInfo(this TypeInfoMapping mapping, PgSerializerOp /// The options to use. /// The converter to create a PgTypeInfo for. /// Whether to prefer a specific data format for this info, when null it defaults to the most suitable format. - /// Whether the converters returned from the given converter resolver support writing. + /// Whether the converters returned from the given provider support writing. /// The created info instance. public static PgTypeInfo CreateInfo(this TypeInfoMapping mapping, PgSerializerOptions options, PgConverter converter, DataFormat? preferredFormat = null, bool supportsWriting = true) - => new(options, converter, new DataTypeName(mapping.DataTypeName)) + => new PgConcreteTypeInfo(options, converter, new DataTypeName(mapping.DataTypeName)) { PreferredFormat = preferredFormat, SupportsWriting = supportsWriting @@ -805,31 +805,31 @@ public static PgTypeInfo CreateInfo(this TypeInfoMapping mapping, PgSerializerOp // NOTE: This method exists since 9.0 to be able to deprecate the method below that has optional arguments in 10.0 (potentially removing it directly or in 11.0). // It reduces how binary breaking that change will be if this method would not be there to be picked for the most common invocations. /// - /// Creates a PgResolverTypeInfo from a mapping, options, and a converter resolver. + /// Creates a PgProviderTypeInfo from a mapping, options, and a provider. /// /// The mapping to create an info for. /// The options to use. - /// The resolver to create a PgResolverTypeInfo for. - /// Whether to pass mapping.DataTypeName to the PgResolverTypeInfo constructor, mandatory when TypeInfoFactory(..., requiresDataTypeName: true). + /// The provider to create a PgProviderTypeInfo for. + /// Whether to pass mapping.DataTypeName to the PgProviderTypeInfo constructor, mandatory when TypeInfoFactory(..., requiresDataTypeName: true). /// The created info instance. - public static PgResolverTypeInfo CreateInfo(this TypeInfoMapping mapping, PgSerializerOptions options, PgConverterResolver resolver, bool includeDataTypeName) - => new(options, resolver, includeDataTypeName ? new DataTypeName(mapping.DataTypeName) : null) + public static PgProviderTypeInfo CreateInfo(this TypeInfoMapping mapping, PgSerializerOptions options, PgConcreteTypeInfoProvider provider, bool includeDataTypeName) + => new(options, provider, includeDataTypeName ? new DataTypeName(mapping.DataTypeName) : null) { PreferredFormat = null }; /// - /// Creates a PgResolverTypeInfo from a mapping, options, and a converter resolver. + /// Creates a PgProviderTypeInfo from a mapping, options, and a provider. /// /// The mapping to create an info for. /// The options to use. - /// The converter resolver to create a PgResolverTypeInfo for. - /// Whether to pass mapping.DataTypeName to the PgResolverTypeInfo constructor, mandatory when TypeInfoFactory(..., requiresDataTypeName: true). + /// The provider to create a PgProviderTypeInfo for. + /// Whether to pass mapping.DataTypeName to the PgProviderTypeInfo constructor, mandatory when TypeInfoFactory(..., requiresDataTypeName: true). /// Whether to prefer a specific data format for this info, when null it defaults to the most suitable format. - /// Whether the converters returned from the given converter resolver support writing. + /// Whether the converters returned from the given provider support writing. /// The created info instance. - public static PgResolverTypeInfo CreateInfo(this TypeInfoMapping mapping, PgSerializerOptions options, PgConverterResolver resolver, bool includeDataTypeName, DataFormat? preferredFormat = null, bool supportsWriting = true) - => new(options, resolver, includeDataTypeName ? new DataTypeName(mapping.DataTypeName) : null) + public static PgProviderTypeInfo CreateInfo(this TypeInfoMapping mapping, PgSerializerOptions options, PgConcreteTypeInfoProvider provider, bool includeDataTypeName, DataFormat? preferredFormat = null, bool supportsWriting = true) + => new(options, provider, includeDataTypeName ? new DataTypeName(mapping.DataTypeName) : null) { PreferredFormat = preferredFormat, SupportsWriting = supportsWriting diff --git a/src/Npgsql/NpgsqlBinaryExporter.cs b/src/Npgsql/NpgsqlBinaryExporter.cs index 033517c79d..7d3e3cd852 100644 --- a/src/Npgsql/NpgsqlBinaryExporter.cs +++ b/src/Npgsql/NpgsqlBinaryExporter.cs @@ -361,7 +361,7 @@ PgConverterInfo CreateConverterInfo(Type type, NpgsqlDbType? npgsqlDbType = null // Binary export has no type info so we only do caller-directed interpretation of data. return info.Bind(new Field("?", - info.PgTypeId ?? ((PgResolverTypeInfo)info).GetDefaultResolution(null).PgTypeId, -1), DataFormat.Binary); + info.PgTypeId ?? ((PgProviderTypeInfo)info).GetDefaultConcreteTypeInfo(null).PgTypeId, -1), DataFormat.Binary); PgTypeId GetRepresentationalOrDefault(string dataTypeName) { diff --git a/src/Npgsql/NpgsqlParameter.cs b/src/Npgsql/NpgsqlParameter.cs index 8930724c92..69d4e29816 100644 --- a/src/Npgsql/NpgsqlParameter.cs +++ b/src/Npgsql/NpgsqlParameter.cs @@ -660,14 +660,14 @@ internal void ResolveTypeInfo(PgSerializerOptions options, IDbTypeResolver? dbTy } // This step isn't part of BindValue because we need to know the PgTypeId beforehand for things like SchemaOnly with null values. - // We never reuse resolutions for resolvers across executions as a mutable value itself may influence the result. + // We never reuse concrete type infos from providers across executions as a mutable value itself may influence the result. // TODO we could expose a property on a Converter/TypeInfo to indicate whether it's immutable, at that point we can reuse. - if (!previouslyResolved || typeInfo!.IsResolverInfo) + if (!previouslyResolved || typeInfo is not PgConcreteTypeInfo) { ResetBindingInfo(); - var resolution = ResolveConverter(typeInfo!); - Converter = resolution.Converter; - PgTypeId = resolution.PgTypeId; + var concreteTypeInfo = GetConcreteTypeInfo(typeInfo!); + Converter = concreteTypeInfo.Converter; + PgTypeId = concreteTypeInfo.PgTypeId; } void ThrowNoTypeInfo() @@ -685,10 +685,20 @@ void ThrowNotSupported(string dataTypeName) } // Pull from Value so we also support object typed generic params. - private protected virtual PgConverterResolution ResolveConverter(PgTypeInfo typeInfo) + private protected virtual PgConcreteTypeInfo GetConcreteTypeInfo(PgTypeInfo typeInfo) { _asObject = true; - return typeInfo.GetObjectResolution(Value); + return typeInfo.GetObjectConcreteTypeInfo(Value, out _writeState); + } + + /// Dispose write state produced during ResolveTypeInfo when Bind won't follow (e.g. SchemaOnly). + internal void DisposeResolutionWriteState() + { + if (_writeState is { } ws) + { + _writeState = null; + TypeInfo?.DisposeWriteState(ws); + } } /// Bind the current value to the type info, truncate (if applicable), take its size, and do any final validation before writing. @@ -759,7 +769,8 @@ private protected virtual void BindCore(DataFormat? formatPreference, bool allow if (_useSubStream && value is not null) value = _subStream = new SubReadStream((Stream)value, _size); - if (TypeInfo!.BindObject(Converter!, value, out var size, out _writeState, out var dataFormat, formatPreference) is { } info) + Size size = default; + if (TypeInfo!.BindObject(Converter!, value, ref size, ref _writeState, out var dataFormat, formatPreference) is { } info) { WriteSize = size; _bufferRequirement = info.BufferRequirement; diff --git a/src/Npgsql/NpgsqlParameterCollection.cs b/src/Npgsql/NpgsqlParameterCollection.cs index 51a40e6648..ac38f474d9 100644 --- a/src/Npgsql/NpgsqlParameterCollection.cs +++ b/src/Npgsql/NpgsqlParameterCollection.cs @@ -760,11 +760,24 @@ internal void ProcessParameters(NpgsqlDataSource.ReloadableState reloadableState break; } - p.ResolveTypeInfo(reloadableState.SerializerOptions, reloadableState.DbTypeResolver); - - if (validateValues) + // Resolution can produce a provider-level write state that normally gets consumed by Bind. + // If Bind is skipped (SchemaOnly) or either step throws, that state needs to be cleaned up + // here. wasBound tracks whether Bind completed successfully and took ownership; every other + // exit path (SchemaOnly, ResolveTypeInfo throws, Bind throws) disposes. + var wasBound = false; + try + { + p.ResolveTypeInfo(reloadableState.SerializerOptions, reloadableState.DbTypeResolver); + if (validateValues) + { + p.Bind(out _, out _); + wasBound = true; + } + } + finally { - p.Bind(out _, out _); + if (!wasBound) + p.DisposeResolutionWriteState(); } } } diff --git a/src/Npgsql/NpgsqlParameter`.cs b/src/Npgsql/NpgsqlParameter`.cs index 2f1e1b24bc..90cbe7f737 100644 --- a/src/Npgsql/NpgsqlParameter`.cs +++ b/src/Npgsql/NpgsqlParameter`.cs @@ -84,13 +84,13 @@ public NpgsqlParameter(string parameterName, DbType dbType) private protected override void SetOutputValueCore(NpgsqlDataReader reader, int ordinal) => TypedValue = reader.GetFieldValue(ordinal); - private protected override PgConverterResolution ResolveConverter(PgTypeInfo typeInfo) + private protected override PgConcreteTypeInfo GetConcreteTypeInfo(PgTypeInfo typeInfo) { if (typeof(T) == typeof(object) || TypeInfo!.IsBoxing) - return base.ResolveConverter(typeInfo); + return base.GetConcreteTypeInfo(typeInfo); _asObject = false; - return typeInfo.GetResolution(TypedValue); + return typeInfo.GetConcreteTypeInfo(TypedValue, out _writeState); } // We ignore allowNullReference, it's just there to control the base implementation. @@ -104,7 +104,7 @@ private protected override void BindCore(DataFormat? formatPreference, bool allo } var value = TypedValue; - if (TypeInfo!.Bind(Converter!.UnsafeDowncast(), value, out var size, out _writeState, out var dataFormat, formatPreference) is { } info) + if (TypeInfo!.Bind(Converter!.UnsafeDowncast(), value, out var size, ref _writeState, out var dataFormat, formatPreference) is { } info) { WriteSize = size; _bufferRequirement = info.BufferRequirement; diff --git a/src/Npgsql/TypeMapping/GlobalTypeMapper.cs b/src/Npgsql/TypeMapping/GlobalTypeMapper.cs index ef3981d22f..263b8aa55a 100644 --- a/src/Npgsql/TypeMapping/GlobalTypeMapper.cs +++ b/src/Npgsql/TypeMapping/GlobalTypeMapper.cs @@ -101,10 +101,16 @@ PgSerializerOptions TypeMappingOptions try { var typeInfo = TypeMappingOptions.GetTypeInfoInternal(type, null); - if (typeInfo is PgResolverTypeInfo info) - dataTypeName = info.GetObjectResolution(value).PgTypeId.DataTypeName; + if (typeInfo is PgProviderTypeInfo providerInfo) + { + dataTypeName = providerInfo.GetObjectConcreteTypeInfo(value, out var state).PgTypeId.DataTypeName; + if (state is not null) + providerInfo.DisposeWriteState(state); + } else - dataTypeName = typeInfo?.GetResolution().PgTypeId.DataTypeName; + { + dataTypeName = ((PgConcreteTypeInfo?)typeInfo)?.PgTypeId.DataTypeName; + } } catch { diff --git a/test/Npgsql.Tests/CopyTests.cs b/test/Npgsql.Tests/CopyTests.cs index 6d1421fe82..6a6a8e5368 100644 --- a/test/Npgsql.Tests/CopyTests.cs +++ b/test/Npgsql.Tests/CopyTests.cs @@ -605,7 +605,7 @@ public async Task ReadZeroSizedColumns() } [Test] - public async Task ReadConverterResolverType() + public async Task ReadTypeInfoProviderType() { using var conn = await OpenConnectionAsync(); diff --git a/test/Npgsql.Tests/ReaderTests.cs b/test/Npgsql.Tests/ReaderTests.cs index babd79977d..92187756bc 100644 --- a/test/Npgsql.Tests/ReaderTests.cs +++ b/test/Npgsql.Tests/ReaderTests.cs @@ -2624,7 +2624,7 @@ sealed class Resolver(bool safe) : IPgTypeInfoResolver public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) { if (dataTypeName == DataTypeNames.Int4 && (type == typeof(int) || type is null)) - return new(options, new ExplodingTypeHandler(safe), DataTypeNames.Int4); + return new PgConcreteTypeInfo(options, new ExplodingTypeHandler(safe), DataTypeNames.Int4); return null; } diff --git a/test/Npgsql.Tests/TypeMapperTests.cs b/test/Npgsql.Tests/TypeMapperTests.cs index 8fd8f66346..6709b1a507 100644 --- a/test/Npgsql.Tests/TypeMapperTests.cs +++ b/test/Npgsql.Tests/TypeMapperTests.cs @@ -195,7 +195,7 @@ sealed class Resolver : IPgTypeInfoResolver { if (type == typeof(string) || dataTypeName?.UnqualifiedName == "citext") if (options.DatabaseInfo.TryGetPostgresTypeByName("citext", out var pgType)) - return new(options, TextConverter.CreateStringConverter(options.TextEncoding), options.ToCanonicalTypeId(pgType)); + return new PgConcreteTypeInfo(options, TextConverter.CreateStringConverter(options.TextEncoding), options.ToCanonicalTypeId(pgType)); return null; } @@ -238,7 +238,7 @@ sealed class GuidTextTypeInfoResolver(string typeName) : IPgTypeInfoResolver { if (type == typeof(Guid) || dataTypeName?.UnqualifiedName == typeName) if (options.DatabaseInfo.TryGetPostgresTypeByName(typeName, out var pgType)) - return new(options, new GuidTextConverter(options.TextEncoding), options.ToCanonicalTypeId(pgType)); + return new PgConcreteTypeInfo(options, new GuidTextConverter(options.TextEncoding), options.ToCanonicalTypeId(pgType)); return null; } diff --git a/test/Npgsql.Tests/Types/ArrayTests.cs b/test/Npgsql.Tests/Types/ArrayTests.cs index fa69d2386b..a6a272c133 100644 --- a/test/Npgsql.Tests/Types/ArrayTests.cs +++ b/test/Npgsql.Tests/Types/ArrayTests.cs @@ -155,7 +155,7 @@ public async Task Value_type_array_nullabilities(ArrayNullabilityMode mode) [TestCase(ArrayNullabilityMode.Always)] [TestCase(ArrayNullabilityMode.Never)] [TestCase(ArrayNullabilityMode.PerInstance)] - public async Task Value_type_array_nullabilities_converter_resolver(ArrayNullabilityMode mode) + public async Task Value_type_array_nullabilities_type_info_provider(ArrayNullabilityMode mode) { await using var dataSource = CreateDataSource(csb => { diff --git a/test/Npgsql.Tests/Types/CompositeTests.cs b/test/Npgsql.Tests/Types/CompositeTests.cs index d9c1db253d..db9814dae0 100644 --- a/test/Npgsql.Tests/Types/CompositeTests.cs +++ b/test/Npgsql.Tests/Types/CompositeTests.cs @@ -383,7 +383,7 @@ await AssertType( } [Test] - public async Task Composite_containing_converter_resolver_type() + public async Task Composite_containing_type_info_provider_type() { await using var adminConnection = await OpenConnectionAsync(); var compositeType = await GetTempTypeName(adminConnection); @@ -393,13 +393,13 @@ await adminConnection.ExecuteNonQueryAsync($@" var dataSourceBuilder = CreateDataSourceBuilder(); dataSourceBuilder.ConnectionStringBuilder.Timezone = "Europe/Berlin"; - dataSourceBuilder.MapComposite(compositeType); + dataSourceBuilder.MapComposite(compositeType); await using var dataSource = dataSourceBuilder.Build(); await using var connection = await dataSource.OpenConnectionAsync(); await AssertType( connection, - new SomeCompositeWithConverterResolverType { DateTimes = [new DateTime(DateTime.UnixEpoch.Ticks, DateTimeKind.Unspecified), new DateTime(DateTime.UnixEpoch.Ticks, DateTimeKind.Unspecified).AddDays(1) + new SomeCompositeWithTypeInfoProviderType { DateTimes = [new DateTime(DateTime.UnixEpoch.Ticks, DateTimeKind.Unspecified), new DateTime(DateTime.UnixEpoch.Ticks, DateTimeKind.Unspecified).AddDays(1) ] }, """("{""1970-01-01 00:00:00"",""1970-01-02 00:00:00""}")""", @@ -409,7 +409,7 @@ await AssertType( } [Test] - public async Task Composite_containing_converter_resolver_type_throws() + public async Task Composite_containing_type_info_provider_type_throws() { await using var adminConnection = await OpenConnectionAsync(); var compositeType = await GetTempTypeName(adminConnection); @@ -419,19 +419,79 @@ await adminConnection.ExecuteNonQueryAsync($@" var dataSourceBuilder = CreateDataSourceBuilder(); dataSourceBuilder.ConnectionStringBuilder.Timezone = "Europe/Berlin"; - dataSourceBuilder.MapComposite(compositeType); + dataSourceBuilder.MapComposite(compositeType); await using var dataSource = dataSourceBuilder.Build(); await using var connection = await dataSource.OpenConnectionAsync(); Assert.ThrowsAsync(() => AssertType( connection, - new SomeCompositeWithConverterResolverType { DateTimes = [DateTime.UnixEpoch] }, // UTC DateTime + new SomeCompositeWithTypeInfoProviderType { DateTimes = [DateTime.UnixEpoch] }, // UTC DateTime """("{""1970-01-01 01:00:00"",""1970-01-02 01:00:00""}")""", compositeType, dataTypeInference: DataTypeInference.Nothing, comparer: (actual, expected) => actual.DateTimes!.SequenceEqual(expected.DateTimes!))); } + // A composite whose only provider-backed field has a fixed-size default concrete (plain timestamp, + // 8 bytes). Exercises the path where the composite's combined write size is exact but gets clamped + // externally because a field defers to a provider: GetSize fires for bind-time resolution, observes + // that no field produced write state, skips the WriteState allocation, and Write proceeds to call the relevant converter. + [Test] + public async Task Composite_containing_fixed_size_type_info_provider_field() + { + await using var adminConnection = await OpenConnectionAsync(); + var compositeType = await GetTempTypeName(adminConnection); + + await adminConnection.ExecuteNonQueryAsync($@" +CREATE TYPE {compositeType} AS (id int, created_at timestamp)"); + + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.MapComposite(compositeType); + await using var dataSource = dataSourceBuilder.Build(); + await using var connection = await dataSource.OpenConnectionAsync(); + + await AssertType( + connection, + new SomeCompositeWithFixedSizeTypeInfoProviderField + { + Id = 42, + CreatedAt = new DateTime(1970, 1, 1, 0, 0, 0, DateTimeKind.Unspecified) + }, + """(42,"1970-01-01 00:00:00")""", + compositeType, + dataTypeInference: DataTypeInference.Nothing, + comparer: (actual, expected) => actual.Id == expected.Id && actual.CreatedAt == expected.CreatedAt); + } + + // Companion to the above — confirms that deterministic provider-level errors (DateTime kind + // mismatch against plain timestamp) still surface when the field is fixed-size, now via the + // bind-time GetSize checkpoint instead of the first Write. + [Test] + public async Task Composite_containing_fixed_size_type_info_provider_field_throws() + { + await using var adminConnection = await OpenConnectionAsync(); + var compositeType = await GetTempTypeName(adminConnection); + + await adminConnection.ExecuteNonQueryAsync($@" +CREATE TYPE {compositeType} AS (id int, created_at timestamp)"); + + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.MapComposite(compositeType); + await using var dataSource = dataSourceBuilder.Build(); + await using var connection = await dataSource.OpenConnectionAsync(); + + Assert.ThrowsAsync(() => AssertType( + connection, + new SomeCompositeWithFixedSizeTypeInfoProviderField + { + Id = 42, + CreatedAt = DateTime.UnixEpoch // UTC — incompatible with plain timestamp + }, + """(42,"1970-01-01 00:00:00")""", + compositeType, + dataTypeInference: DataTypeInference.Nothing)); + } + [Test, IssueLink("https://github.com/npgsql/npgsql/issues/990")] public async Task Table_as_composite([Values] bool enabled) { @@ -746,11 +806,17 @@ class SomeCompositeWithIPAddress public IPAddress? Address { get; set; } } - class SomeCompositeWithConverterResolverType + class SomeCompositeWithTypeInfoProviderType { public DateTime[]? DateTimes { get; set; } } + class SomeCompositeWithFixedSizeTypeInfoProviderField + { + public int Id { get; set; } + public DateTime CreatedAt { get; set; } + } + record NameTranslationComposite { public int Simple { get; set; } diff --git a/test/Npgsql.Tests/WriteStateTests.cs b/test/Npgsql.Tests/WriteStateTests.cs new file mode 100644 index 0000000000..b1235e4c45 --- /dev/null +++ b/test/Npgsql.Tests/WriteStateTests.cs @@ -0,0 +1,506 @@ +using System; +using System.Data; +using System.Threading.Tasks; +using Npgsql.Internal; +using Npgsql.Internal.Converters; +using Npgsql.Internal.Postgres; +using NpgsqlTypes; +using NUnit.Framework; + +namespace Npgsql.Tests; + +/// +/// Tests that pin the write-state propagation and disposal contracts between NpgsqlParameter, +/// PgTypeInfo providers, and the converters they produce. +/// +public class WriteStateTests : TestBase +{ + [Test] + public async Task Nullable_array_write_state_flows([Values] bool fixedSize) + { + // Verifies that provider-produced write state flows through IsDbNull and Write + // for nullable array elements, both fixed-size and variable-size. + var tracker = new WriteStateTracker(); + var dataSourceBuilder = new NpgsqlSlimDataSourceBuilder(ConnectionString); + dataSourceBuilder.EnableArrays(); + dataSourceBuilder.AddTypeInfoResolverFactory(new WriteStateTrackingResolverFactory(fixedSize, tracker)); + await using var dataSource = dataSourceBuilder.Build(); + await using var conn = await dataSource.OpenConnectionAsync(); + + var input = new[] { 1, 2, 3 }; + + await using var cmd = new NpgsqlCommand("SELECT @p", conn); + cmd.Parameters.Add(new NpgsqlParameter { ParameterName = "p", TypedValue = input, DataTypeName = "integer[]" }); + await cmd.ExecuteNonQueryAsync(); + + if (fixedSize) + Assert.That(tracker.IsDbNullWriteStateReceived, Is.True, "IsDbNullValue did not receive write state"); + Assert.That(tracker.WriteWriteStateReceived, Is.True, "Write did not receive write state"); + } + + [Test] + public async Task Object_array_write_state_flows_through_late_bound_element([Values] bool fixedSize) + { + // Verifies write state propagation through two layers with mixed element shapes: + // outer: ArrayTypeInfoProvider + // inner: LateBoundTypeInfoProvider (object -> int or DBNull) + // int path -> WriteStateTrackingProvider (per-element wrapped WriteState) + // null path -> PgSerializerOptions.UnspecifiedDBNullTypeInfo (different concrete info entirely) + // The tracking int converter's IsDbNullValue and WriteCore must see the provider-produced + // write state after passing through the array + ObjectConverter layers, while the DBNull + // slots must flow through without disturbing the non-null slots. + var tracker = new WriteStateTracker(); + var dataSourceBuilder = new NpgsqlSlimDataSourceBuilder(ConnectionString); + dataSourceBuilder.EnableArrays(); + dataSourceBuilder.AddTypeInfoResolverFactory(new WriteStateTrackingResolverFactory(fixedSize, tracker)); + await using var dataSource = dataSourceBuilder.Build(); + await using var conn = await dataSource.OpenConnectionAsync(); + + var input = new object[] { 1, DBNull.Value, 2, DBNull.Value, 3 }; + + await using var cmd = new NpgsqlCommand("SELECT @p", conn); + cmd.Parameters.Add(new NpgsqlParameter { ParameterName = "p", TypedValue = input, DataTypeName = "integer[]" }); + await cmd.ExecuteNonQueryAsync(); + + Assert.That(tracker.IsDbNullWriteStateReceived, Is.True, + "IsDbNullValue did not receive write state after array + late-bound object layers"); + Assert.That(tracker.WriteWriteStateReceived, Is.True, + "Write did not receive write state after array + late-bound object layers"); + } + + [Test] + public async Task Range_write_state_flows() + { + // Verifies write state propagation through a range composition: + // RangeConverter -> tracking int subtype (GetSize populates writeState) + // The range converter must carry each bound's subtype state into BeginNestedWrite so the subtype's + // WriteCore observes the provider-produced sentinel. + var tracker = new WriteStateTracker(); + var dataSourceBuilder = new NpgsqlSlimDataSourceBuilder(ConnectionString); + dataSourceBuilder.EnableRanges(); + dataSourceBuilder.AddTypeInfoResolverFactory(new RangeWriteStateTrackingResolverFactory(tracker)); + await using var dataSource = dataSourceBuilder.Build(); + await using var conn = await dataSource.OpenConnectionAsync(); + + var input = new NpgsqlRange(1, 10); + + await using var cmd = new NpgsqlCommand("SELECT @p", conn); + cmd.Parameters.Add(new NpgsqlParameter> + { ParameterName = "p", TypedValue = input, DataTypeName = "int4range" }); + await cmd.ExecuteNonQueryAsync(); + + Assert.That(tracker.WriteWriteStateReceived, Is.True, + "Write did not receive write state after range -> subtype composition"); + } + + [Test] + public async Task Multirange_write_state_flows() + { + // Verifies write state propagation through a multirange composition: + // MultirangeConverter[], NpgsqlRange> -> RangeConverter -> tracking int subtype + // Three layers: multirange stores per-range state, range stores per-bound state, subtype populates bound state. + var tracker = new WriteStateTracker(); + var dataSourceBuilder = new NpgsqlSlimDataSourceBuilder(ConnectionString); + dataSourceBuilder.EnableRanges(); + dataSourceBuilder.EnableMultiranges(); + dataSourceBuilder.AddTypeInfoResolverFactory(new RangeWriteStateTrackingResolverFactory(tracker)); + await using var dataSource = dataSourceBuilder.Build(); + await using var conn = await dataSource.OpenConnectionAsync(); + + var input = new[] { new NpgsqlRange(1, 10), new NpgsqlRange(20, 30) }; + + await using var cmd = new NpgsqlCommand("SELECT @p", conn); + cmd.Parameters.Add(new NpgsqlParameter[]> + { ParameterName = "p", TypedValue = input, DataTypeName = "int4multirange" }); + await cmd.ExecuteNonQueryAsync(); + + Assert.That(tracker.WriteWriteStateReceived, Is.True, + "Write did not receive write state after multirange -> range -> subtype composition"); + } + + [Test] + public async Task Range_array_write_state_flows() + { + // Verifies write state propagation through an array-over-range composition: + // ArrayConverter[]> -> RangeConverter -> tracking int subtype + // The per-element array slot carries the range's WriteState, which itself nests the subtype state. + var tracker = new WriteStateTracker(); + var dataSourceBuilder = new NpgsqlSlimDataSourceBuilder(ConnectionString); + dataSourceBuilder.EnableArrays(); + dataSourceBuilder.EnableRanges(); + dataSourceBuilder.AddTypeInfoResolverFactory(new RangeWriteStateTrackingResolverFactory(tracker)); + await using var dataSource = dataSourceBuilder.Build(); + await using var conn = await dataSource.OpenConnectionAsync(); + + var input = new[] { new NpgsqlRange(1, 10), new NpgsqlRange(20, 30) }; + + await using var cmd = new NpgsqlCommand("SELECT @p", conn); + cmd.Parameters.Add(new NpgsqlParameter[]> + { ParameterName = "p", TypedValue = input, DataTypeName = "int4range[]" }); + await cmd.ExecuteNonQueryAsync(); + + Assert.That(tracker.WriteWriteStateReceived, Is.True, + "Write did not receive write state after array -> range -> subtype composition"); + } + + [Test] + public async Task Array_of_multirange_write_state_flows() + { + // Verifies write state propagation through an array-over-multirange composition (four layers): + // ArrayConverter[][]> -> MultirangeConverter -> RangeConverter -> tracking int subtype + // The deepest common composition shape — if any layer loses state, the subtype's WriteCore never sees it. + var tracker = new WriteStateTracker(); + var dataSourceBuilder = new NpgsqlSlimDataSourceBuilder(ConnectionString); + dataSourceBuilder.EnableArrays(); + dataSourceBuilder.EnableRanges(); + dataSourceBuilder.EnableMultiranges(); + dataSourceBuilder.AddTypeInfoResolverFactory(new RangeWriteStateTrackingResolverFactory(tracker)); + await using var dataSource = dataSourceBuilder.Build(); + await using var conn = await dataSource.OpenConnectionAsync(); + + var input = new[] + { + new[] { new NpgsqlRange(1, 10), new NpgsqlRange(20, 30) }, + new[] { new NpgsqlRange(40, 50) } + }; + + await using var cmd = new NpgsqlCommand("SELECT @p", conn); + cmd.Parameters.Add(new NpgsqlParameter[][]> + { ParameterName = "p", TypedValue = input, DataTypeName = "int4multirange[]" }); + await cmd.ExecuteNonQueryAsync(); + + Assert.That(tracker.WriteWriteStateReceived, Is.True, + "Write did not receive write state after array -> multirange -> range -> subtype composition"); + } + + [Test] + public async Task Composite_write_state_flows() + { + // Verifies write state propagation through a composite composition: + // CompositeConverter -> tracking int4 field converter + // The composite's per-field WriteState storage must carry the subtype's sentinel into BeginNestedWrite. + // Uses a real PG CREATE TYPE + MapComposite so the CompositeConverter is constructed by the production path. + var tracker = new WriteStateTracker(); + await using var adminConnection = await OpenConnectionAsync(); + var type = await TestUtil.GetTempTypeName(adminConnection); + await adminConnection.ExecuteNonQueryAsync($"CREATE TYPE {type} AS (x int)"); + + var dataSourceBuilder = CreateDataSourceBuilder(); + dataSourceBuilder.AddTypeInfoResolverFactory(new CompositeFieldWriteStateTrackingResolverFactory(tracker)); + dataSourceBuilder.MapComposite(type); + await using var dataSource = dataSourceBuilder.Build(); + await using var connection = await dataSource.OpenConnectionAsync(); + + await using var cmd = new NpgsqlCommand("SELECT @p", connection); + cmd.Parameters.Add(new NpgsqlParameter + { + ParameterName = "p", + Value = new CompositeWithInt { X = 42 }, + DataTypeName = type + }); + await cmd.ExecuteNonQueryAsync(); + + Assert.That(tracker.WriteWriteStateReceived, Is.True, + "Write did not receive write state after composite -> field subtype composition"); + } + + [Test] + public async Task Execute_disposes_write_state() + { + // Verifies that write state produced during ResolveTypeInfo (and carried through Bind/Write) is disposed + // once the normal execution path finishes, via ResetBindingInfo in the Write finally block. + var tracker = new DisposalTracker(); + var dataSourceBuilder = new NpgsqlSlimDataSourceBuilder(ConnectionString); + dataSourceBuilder.AddTypeInfoResolverFactory(new DisposableWriteStateResolverFactory(tracker)); + await using var dataSource = dataSourceBuilder.Build(); + await using var conn = await dataSource.OpenConnectionAsync(); + + await using var cmd = new NpgsqlCommand("SELECT @p", conn); + cmd.Parameters.Add(new NpgsqlParameter { ParameterName = "p", TypedValue = 42, DataTypeName = "integer" }); + + await cmd.ExecuteNonQueryAsync(); + + Assert.That(tracker.Disposed, Is.True, "provider-produced write state was not disposed after normal execution"); + } + + [Test] + public async Task SchemaOnly_disposes_resolution_write_state() + { + // Verifies that write state produced during ResolveTypeInfo is disposed when Bind is skipped + // (e.g. CommandBehavior.SchemaOnly), so provider-allocated state does not leak. + var tracker = new DisposalTracker(); + var dataSourceBuilder = new NpgsqlSlimDataSourceBuilder(ConnectionString); + dataSourceBuilder.AddTypeInfoResolverFactory(new DisposableWriteStateResolverFactory(tracker)); + await using var dataSource = dataSourceBuilder.Build(); + await using var conn = await dataSource.OpenConnectionAsync(); + + await using var cmd = new NpgsqlCommand("SELECT @p", conn); + cmd.Parameters.Add(new NpgsqlParameter { ParameterName = "p", TypedValue = 42, DataTypeName = "integer" }); + + await using var reader = await cmd.ExecuteReaderAsync(CommandBehavior.SchemaOnly); + + Assert.That(tracker.Disposed, Is.True, "provider-produced write state was not disposed after SchemaOnly execution"); + } + + sealed class WriteStateTracker + { + public bool IsDbNullWriteStateReceived; + public bool WriteWriteStateReceived; + } + + sealed class WriteStateTrackingConverter(bool fixedSize, WriteStateTracker tracker, bool generatesWriteState = false) + : PgBufferedConverter(customDbNullPredicate: true) + { + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = fixedSize ? BufferRequirements.CreateFixedSize(sizeof(int)) : BufferRequirements.Create(Size.CreateUpperBound(sizeof(int))); + return format is DataFormat.Binary; + } + + protected override bool IsDbNullValue(int value, object? writeState) + { + if (writeState is not null) + tracker.IsDbNullWriteStateReceived = true; + return false; + } + + protected override int ReadCore(PgReader reader) => reader.ReadInt32(); + + protected override void WriteCore(PgWriter writer, int value) + { + if (writer.Current.WriteState is not null) + tracker.WriteWriteStateReceived = true; + writer.WriteInt32(value); + } + + public override Size GetSize(SizeContext context, int value, ref object? writeState) + { + // Range/Multirange call the subtype converter directly with a fresh null writeState, so for those tests the + // subtype must produce state from GetSize. For the array tests the provider has already populated non-null + // state and the ??= is a no-op, preserving existing behavior. + if (generatesWriteState) + writeState ??= "provider-state"; + return sizeof(int); + } + } + + sealed class WriteStateTrackingProvider(PgSerializerOptions options, bool fixedSize, WriteStateTracker tracker) : PgConcreteTypeInfoProvider + { + PgConcreteTypeInfo? _concreteTypeInfo; + + PgConcreteTypeInfo GetOrCreate() + => _concreteTypeInfo ??= new(options, new WriteStateTrackingConverter(fixedSize, tracker), options.GetCanonicalTypeId(DataTypeNames.Int4)); + + protected override PgConcreteTypeInfo GetDefaultCore(PgTypeId? pgTypeId) => GetOrCreate(); + + protected override PgConcreteTypeInfo GetForValueCore(ProviderValueContext context, int value, ref object? writeState) + { + writeState = "provider-state"; + return GetOrCreate(); + } + } + + sealed class WriteStateTrackingResolverFactory(bool fixedSize, WriteStateTracker tracker) : PgTypeInfoResolverFactory + { + public override IPgTypeInfoResolver CreateResolver() => new Resolver(fixedSize, tracker); + public override IPgTypeInfoResolver CreateArrayResolver() => new ArrayResolver(); + + sealed class Resolver(bool fixedSize, WriteStateTracker tracker) : IPgTypeInfoResolver + { + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + { + if (dataTypeName == DataTypeNames.Int4 && (type == typeof(int) || type is null)) + return new PgProviderTypeInfo(options, new WriteStateTrackingProvider(options, fixedSize, tracker), DataTypeNames.Int4); + + // object->int4 goes through LateBoundTypeInfoProvider which delegates back to the int resolver above, + // letting us exercise write-state propagation across the object (late-bound) element layer. + if (dataTypeName == DataTypeNames.Int4 && type == typeof(object)) + return new PgProviderTypeInfo(options, new LateBoundTypeInfoProvider(options, options.GetCanonicalTypeId(DataTypeNames.Int4)), DataTypeNames.Int4); + + return null; + } + } + + sealed class ArrayResolver : IPgTypeInfoResolver + { + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + { + if (dataTypeName != DataTypeNames.Int4.ToArrayName()) + return null; + + if (type == typeof(object[])) + { + var objectElementInfo = options.GetTypeInfo(typeof(object), DataTypeNames.Int4); + if (objectElementInfo is not PgProviderTypeInfo objectElementProviderTypeInfo) + return null; + + return new PgProviderTypeInfo(options, + new ArrayTypeInfoProvider(objectElementProviderTypeInfo, typeof(object[])), + dataTypeName); + } + + var elementInfo = options.GetTypeInfo(typeof(int), DataTypeNames.Int4); + if (elementInfo is not PgProviderTypeInfo providerTypeInfo) + return null; + + return new PgProviderTypeInfo(options, + new ArrayTypeInfoProvider(providerTypeInfo, typeof(int[])), + dataTypeName); + } + } + } + + sealed class DisposalTracker : IDisposable + { + public bool Disposed { get; private set; } + + public void Dispose() => Disposed = true; + } + + sealed class DisposableWriteStateConverter : PgBufferedConverter + { + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.CreateFixedSize(sizeof(int)); + return format is DataFormat.Binary; + } + + protected override int ReadCore(PgReader reader) => reader.ReadInt32(); + protected override void WriteCore(PgWriter writer, int value) => writer.WriteInt32(value); + } + + sealed class DisposableWriteStateProvider(PgSerializerOptions options, DisposalTracker tracker) : PgConcreteTypeInfoProvider + { + PgConcreteTypeInfo? _concreteTypeInfo; + + PgConcreteTypeInfo GetOrCreate() + => _concreteTypeInfo ??= new(options, new DisposableWriteStateConverter(), options.GetCanonicalTypeId(DataTypeNames.Int4)); + + protected override PgConcreteTypeInfo GetDefaultCore(PgTypeId? pgTypeId) => GetOrCreate(); + + protected override PgConcreteTypeInfo GetForValueCore(ProviderValueContext context, int value, ref object? writeState) + { + writeState = tracker; + return GetOrCreate(); + } + } + + sealed class DisposableWriteStateResolverFactory(DisposalTracker tracker) : PgTypeInfoResolverFactory + { + public override IPgTypeInfoResolver CreateResolver() => new Resolver(tracker); + public override IPgTypeInfoResolver? CreateArrayResolver() => null; + + sealed class Resolver(DisposalTracker tracker) : IPgTypeInfoResolver + { + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + { + if (dataTypeName == DataTypeNames.Int4 && (type == typeof(int) || type is null)) + return new PgProviderTypeInfo(options, new DisposableWriteStateProvider(options, tracker), DataTypeNames.Int4); + + return null; + } + } + } + + sealed class RangeWriteStateTrackingResolverFactory(WriteStateTracker tracker) : PgTypeInfoResolverFactory + { + public override IPgTypeInfoResolver CreateResolver() => new NoOpResolver(); + public override IPgTypeInfoResolver? CreateArrayResolver() => null; + + public override IPgTypeInfoResolver? CreateRangeResolver() => new RangeResolver(tracker); + public override IPgTypeInfoResolver? CreateRangeArrayResolver() => new RangeArrayResolver(tracker); + public override IPgTypeInfoResolver? CreateMultirangeResolver() => new MultirangeResolver(tracker); + public override IPgTypeInfoResolver? CreateMultirangeArrayResolver() => new MultirangeArrayResolver(tracker); + + sealed class NoOpResolver : IPgTypeInfoResolver + { + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) => null; + } + + sealed class RangeResolver(WriteStateTracker tracker) : IPgTypeInfoResolver + { + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + { + if (dataTypeName == DataTypeNames.Int4Range && (type == typeof(NpgsqlRange) || type is null)) + { + var subtype = new WriteStateTrackingConverter(fixedSize: false, tracker, generatesWriteState: true); + var range = new RangeConverter(subtype); + return new PgConcreteTypeInfo(options, range, options.GetCanonicalTypeId(DataTypeNames.Int4Range)); + } + return null; + } + } + + sealed class RangeArrayResolver(WriteStateTracker tracker) : IPgTypeInfoResolver + { + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + { + if (dataTypeName == DataTypeNames.Int4Range.ToArrayName() && (type == typeof(NpgsqlRange[]) || type is null)) + { + var subtype = new WriteStateTrackingConverter(fixedSize: false, tracker, generatesWriteState: true); + var range = new RangeConverter(subtype); + var rangeInfo = new PgConcreteTypeInfo(options, range, options.GetCanonicalTypeId(DataTypeNames.Int4Range)); + var arrayConverter = ArrayConverter[]>.CreateArrayBased>(rangeInfo, typeof(NpgsqlRange[])); + return new PgConcreteTypeInfo(options, arrayConverter, options.GetCanonicalTypeId(DataTypeNames.Int4Range.ToArrayName())); + } + return null; + } + } + + sealed class MultirangeResolver(WriteStateTracker tracker) : IPgTypeInfoResolver + { + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + { + if (dataTypeName == DataTypeNames.Int4Multirange && (type == typeof(NpgsqlRange[]) || type is null)) + { + var subtype = new WriteStateTrackingConverter(fixedSize: false, tracker, generatesWriteState: true); + var range = new RangeConverter(subtype); + var multirange = new MultirangeConverter[], NpgsqlRange>(range); + return new PgConcreteTypeInfo(options, multirange, options.GetCanonicalTypeId(DataTypeNames.Int4Multirange)); + } + return null; + } + } + + sealed class MultirangeArrayResolver(WriteStateTracker tracker) : IPgTypeInfoResolver + { + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + { + if (dataTypeName == DataTypeNames.Int4Multirange.ToArrayName() && (type == typeof(NpgsqlRange[][]) || type is null)) + { + var subtype = new WriteStateTrackingConverter(fixedSize: false, tracker, generatesWriteState: true); + var range = new RangeConverter(subtype); + var multirange = new MultirangeConverter[], NpgsqlRange>(range); + var multirangeInfo = new PgConcreteTypeInfo(options, multirange, options.GetCanonicalTypeId(DataTypeNames.Int4Multirange)); + var arrayConverter = ArrayConverter[][]>.CreateArrayBased[]>(multirangeInfo, typeof(NpgsqlRange[][])); + return new PgConcreteTypeInfo(options, arrayConverter, options.GetCanonicalTypeId(DataTypeNames.Int4Multirange.ToArrayName())); + } + return null; + } + } + } + + class CompositeWithInt + { + public int X { get; set; } + } + + sealed class CompositeFieldWriteStateTrackingResolverFactory(WriteStateTracker tracker) : PgTypeInfoResolverFactory + { + public override IPgTypeInfoResolver CreateResolver() => new Resolver(tracker); + public override IPgTypeInfoResolver? CreateArrayResolver() => null; + + sealed class Resolver(WriteStateTracker tracker) : IPgTypeInfoResolver + { + public PgTypeInfo? GetTypeInfo(Type? type, DataTypeName? dataTypeName, PgSerializerOptions options) + { + if (dataTypeName == DataTypeNames.Int4 && (type == typeof(int) || type is null)) + { + var converter = new WriteStateTrackingConverter(fixedSize: false, tracker, generatesWriteState: true); + return new PgConcreteTypeInfo(options, converter, options.GetCanonicalTypeId(DataTypeNames.Int4)); + } + return null; + } + } + } +} From 5f26d6135c7ba0a3cc8fb83ff96ceb0aa45f9b74 Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Mon, 13 Apr 2026 15:33:16 +0300 Subject: [PATCH 749/761] Add additional security checks for GSS session encryption and GSS authentication (#6525) Closes #6523 --- src/Npgsql/Internal/NpgsqlConnector.Auth.cs | 8 +++++++- src/Npgsql/Internal/NpgsqlConnector.cs | 9 ++++++++- 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/src/Npgsql/Internal/NpgsqlConnector.Auth.cs b/src/Npgsql/Internal/NpgsqlConnector.Auth.cs index f837f08026..25642b0456 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.Auth.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.Auth.cs @@ -330,8 +330,14 @@ async Task AuthenticateMD5(string username, byte[] salt, bool async, Cancellatio internal async ValueTask AuthenticateGSS(bool async, CancellationToken cancellationToken) { var targetName = $"{KerberosServiceName}/{Host}"; + // See https://github.com/postgres/postgres/blob/a0dd0702e464f206b08c99a74cb58809c51aafa5/src/interfaces/libpq/fe-auth.c#L111-L123 + // We do not support delegation (TokenImpersonationLevel.Delegation) for now (#6540) + var clientOptions = new NegotiateAuthenticationClientOptions + { + TargetName = targetName, + RequireMutualAuthentication = true + }; - var clientOptions = new NegotiateAuthenticationClientOptions { TargetName = targetName }; NegotiateOptionsCallback?.Invoke(clientOptions); using var authContext = new NegotiateAuthentication(clientOptions); diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index d224aed84b..85971c8254 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -606,7 +606,14 @@ internal async ValueTask GSSEncrypt(bool async, bool isRequ ConnectionLogger.LogTrace("Negotiating GSS encryption"); var targetName = $"{KerberosServiceName}/{Host}"; - var clientOptions = new NegotiateAuthenticationClientOptions { TargetName = targetName }; + // See https://github.com/postgres/postgres/blob/a0dd0702e464f206b08c99a74cb58809c51aafa5/src/interfaces/libpq/fe-secure-gssapi.c#L651-L658 + // We do not support delegation (TokenImpersonationLevel.Delegation) for now (#6540) + var clientOptions = new NegotiateAuthenticationClientOptions + { + TargetName = targetName, + RequireMutualAuthentication = true, + RequiredProtectionLevel = ProtectionLevel.EncryptAndSign + }; NegotiateOptionsCallback?.Invoke(clientOptions); From f703ef86c745d2f0dee2a32f280840390ba33a2b Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Mon, 13 Apr 2026 16:21:59 +0300 Subject: [PATCH 750/761] Improve kerberos auth on windows (#6541) Fixes #6529 --- .../Internal/IntegratedSecurityHandler.cs | 6 +++--- src/Npgsql/Internal/NpgsqlConnector.Auth.cs | 18 ++++++++++++++---- src/Npgsql/Internal/NpgsqlConnector.cs | 2 +- 3 files changed, 18 insertions(+), 8 deletions(-) diff --git a/src/Npgsql/Internal/IntegratedSecurityHandler.cs b/src/Npgsql/Internal/IntegratedSecurityHandler.cs index 5edb826497..7589cc59e8 100644 --- a/src/Npgsql/Internal/IntegratedSecurityHandler.cs +++ b/src/Npgsql/Internal/IntegratedSecurityHandler.cs @@ -16,7 +16,7 @@ class IntegratedSecurityHandler return new(); } - public virtual ValueTask NegotiateAuthentication(bool async, NpgsqlConnector connector, CancellationToken cancellationToken) + public virtual ValueTask NegotiateAuthentication(bool async, bool isKerberos, NpgsqlConnector connector, CancellationToken cancellationToken) => throw new NotSupportedException(string.Format(NpgsqlStrings.IntegratedSecurityDisabled, nameof(NpgsqlSlimDataSourceBuilder.EnableIntegratedSecurity))); public virtual ValueTask GSSEncrypt(bool async, bool isRequired, NpgsqlConnector connector, CancellationToken cancellationToken) @@ -30,8 +30,8 @@ sealed class RealIntegratedSecurityHandler : IntegratedSecurityHandler public override ValueTask GetUsername(bool async, bool includeRealm, ILogger connectionLogger, CancellationToken cancellationToken) => KerberosUsernameProvider.GetUsername(async, includeRealm, connectionLogger, cancellationToken); - public override ValueTask NegotiateAuthentication(bool async, NpgsqlConnector connector, CancellationToken cancellationToken) - => connector.AuthenticateGSS(async, cancellationToken); + public override ValueTask NegotiateAuthentication(bool async, bool isKerberos, NpgsqlConnector connector, CancellationToken cancellationToken) + => connector.AuthenticateGSS(async, isKerberos, cancellationToken); public override ValueTask GSSEncrypt(bool async, bool isRequired, NpgsqlConnector connector, CancellationToken cancellationToken) => connector.GSSEncrypt(async, isRequired, cancellationToken); diff --git a/src/Npgsql/Internal/NpgsqlConnector.Auth.cs b/src/Npgsql/Internal/NpgsqlConnector.Auth.cs index 25642b0456..1c2ef6c3cf 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.Auth.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.Auth.cs @@ -61,7 +61,8 @@ await AuthenticateSASL(((AuthenticationSASLMessage)msg).Mechanisms, username, as case AuthenticationRequestType.GSS: case AuthenticationRequestType.SSPI: ThrowIfNotAllowed(requiredAuthModes, msg.AuthRequestType == AuthenticationRequestType.GSS ? RequireAuthMode.GSS : RequireAuthMode.SSPI); - await DataSource.IntegratedSecurityHandler.NegotiateAuthentication(async, this, cancellationToken).ConfigureAwait(false); + var isKerberos = msg.AuthRequestType == AuthenticationRequestType.GSS; + await DataSource.IntegratedSecurityHandler.NegotiateAuthentication(async, isKerberos, this, cancellationToken).ConfigureAwait(false); return; case AuthenticationRequestType.GSSContinue: @@ -327,7 +328,7 @@ async Task AuthenticateMD5(string username, byte[] salt, bool async, Cancellatio await Flush(async, cancellationToken).ConfigureAwait(false); } - internal async ValueTask AuthenticateGSS(bool async, CancellationToken cancellationToken) + internal async ValueTask AuthenticateGSS(bool async, bool isKerberos, CancellationToken cancellationToken) { var targetName = $"{KerberosServiceName}/{Host}"; // See https://github.com/postgres/postgres/blob/a0dd0702e464f206b08c99a74cb58809c51aafa5/src/interfaces/libpq/fe-auth.c#L111-L123 @@ -337,15 +338,24 @@ internal async ValueTask AuthenticateGSS(bool async, CancellationToken cancellat TargetName = targetName, RequireMutualAuthentication = true }; + // If postgres requests GSS, we explicitly ask for Kerberos + // Instead of relying on SSPI on windows to pick the correct protocol (Kerberos instead of NTLM) + // Otherwise, leave Negotiate to allow SSPI to pick whatever it thinks is correct + // This behavior differs from libpq, which prefers SSPI to pick the protocol + // But mimics PGJDBC + // On UNIX only Kerberos is supported, so no need to differentiate between OSes + // TODO: PGJBC has a parameter to force SSPI. Not sure we need something like this. + if (isKerberos) + clientOptions.Package = "Kerberos"; NegotiateOptionsCallback?.Invoke(clientOptions); using var authContext = new NegotiateAuthentication(clientOptions); var data = authContext.GetOutgoingBlob(ReadOnlySpan.Empty, out var statusCode)!; - if (statusCode != NegotiateAuthenticationStatusCode.ContinueNeeded) + if (statusCode is not NegotiateAuthenticationStatusCode.Completed and not NegotiateAuthenticationStatusCode.ContinueNeeded) { // Unable to retrieve credentials or some other issue - throw new NpgsqlException($"Unable to authenticate with GSS: received {statusCode} instead of the expected ContinueNeeded"); + throw new NpgsqlException($"Unable to authenticate with GSS: received {statusCode} instead of the expected ContinueNeeded or Completed"); } await WritePassword(data, 0, data.Length, async, cancellationToken).ConfigureAwait(false); await Flush(async, cancellationToken).ConfigureAwait(false); diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index 85971c8254..a7880f33a1 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -637,7 +637,7 @@ internal async ValueTask GSSEncrypt(bool async, bool isRequ return GssEncryptionResult.GetCredentialFailure; } - if (statusCode != NegotiateAuthenticationStatusCode.ContinueNeeded) + if (statusCode is not NegotiateAuthenticationStatusCode.Completed and not NegotiateAuthenticationStatusCode.ContinueNeeded) { // Unable to retrieve credentials // If it's required, throw an appropriate exception From 522521635d280fafabafd1f2e757ab506719aa1a Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Mon, 13 Apr 2026 16:13:48 +0200 Subject: [PATCH 751/761] Remove obsolete UTF-8 BOMs (#6542) --- Directory.Build.props | 2 +- Npgsql.slnx.DotSettings | 2 +- src/Directory.Build.props | 2 +- src/Npgsql.DependencyInjection/Properties/AssemblyInfo.cs | 2 +- src/Npgsql.GeoJSON/CrsMap.WellKnown.cs | 2 +- src/Npgsql.GeoJSON/Internal/BoundingBoxBuilder.cs | 2 +- src/Npgsql.GeoJSON/Internal/CrsMapBuilder.cs | 2 +- src/Npgsql.GeoJSON/Internal/GeoJSONConverter.cs | 2 +- src/Npgsql.GeoJSON/Npgsql.GeoJSON.csproj | 2 +- src/Npgsql.GeoJSON/NpgsqlGeoJSONExtensions.cs | 2 +- src/Npgsql.GeoJSON/Properties/AssemblyInfo.cs | 2 +- src/Npgsql.Json.NET/Internal/JsonNetJsonConverter.cs | 2 +- src/Npgsql.Json.NET/Npgsql.Json.NET.csproj | 2 +- src/Npgsql.Json.NET/NpgsqlJsonNetExtensions.cs | 2 +- src/Npgsql.Json.NET/Properties/AssemblyInfo.cs | 2 +- src/Npgsql.NetTopologySuite/Npgsql.NetTopologySuite.csproj | 2 +- src/Npgsql.NetTopologySuite/NpgsqlNetTopologySuiteExtensions.cs | 2 +- src/Npgsql.NetTopologySuite/Properties/AssemblyInfo.cs | 2 +- src/Npgsql.NodaTime/Npgsql.NodaTime.csproj | 2 +- src/Npgsql.NodaTime/NpgsqlNodaTimeExtensions.cs | 2 +- src/Npgsql.NodaTime/Properties/AssemblyInfo.cs | 2 +- .../Properties/NpgsqlNodaTimeStrings.Designer.cs | 2 +- src/Npgsql.OpenTelemetry/MeterProviderBuilderExtensions.cs | 2 +- src/Npgsql.OpenTelemetry/Properties/AssemblyInfo.cs | 2 +- src/Npgsql.OpenTelemetry/TracerProviderBuilderExtensions.cs | 2 +- src/Npgsql/BackendMessages/AuthenticationMessages.cs | 2 +- src/Npgsql/BackendMessages/BackendKeyDataMessage.cs | 2 +- src/Npgsql/BackendMessages/BindCompleteMessage.cs | 2 +- src/Npgsql/BackendMessages/CloseCompletedMessage.cs | 2 +- src/Npgsql/BackendMessages/CommandCompleteMessage.cs | 2 +- src/Npgsql/BackendMessages/CopyMessages.cs | 2 +- src/Npgsql/BackendMessages/DataRowMessage.cs | 2 +- src/Npgsql/BackendMessages/EmptyQueryMessage.cs | 2 +- src/Npgsql/BackendMessages/ErrorOrNoticeMessage.cs | 2 +- src/Npgsql/BackendMessages/NoDataMessage.cs | 2 +- src/Npgsql/BackendMessages/ParameterDescriptionMessage.cs | 2 +- src/Npgsql/BackendMessages/ParseCompleteMessage.cs | 2 +- src/Npgsql/BackendMessages/PortalSuspendedMessage.cs | 2 +- src/Npgsql/BackendMessages/ReadyForQueryMessage.cs | 2 +- src/Npgsql/BackendMessages/RowDescriptionMessage.cs | 2 +- src/Npgsql/GlobalSuppressions.cs | 2 +- src/Npgsql/ICancelable.cs | 2 +- src/Npgsql/Internal/Converters/Networking/IPNetworkConverter.cs | 2 +- src/Npgsql/Internal/INpgsqlDatabaseInfoFactory.cs | 2 +- src/Npgsql/Internal/NpgsqlConnector.Auth.cs | 2 +- src/Npgsql/Internal/NpgsqlDatabaseInfo.cs | 2 +- src/Npgsql/Internal/NpgsqlReadBuffer.Stream.cs | 2 +- src/Npgsql/Internal/NpgsqlReadBuffer.cs | 2 +- src/Npgsql/Internal/NpgsqlWriteBuffer.cs | 2 +- src/Npgsql/Internal/TransportSecurityHandler.cs | 2 +- src/Npgsql/KerberosUsernameProvider.cs | 2 +- src/Npgsql/MultiHostDataSourceWrapper.cs | 2 +- src/Npgsql/NameTranslation/INpgsqlNameTranslator.cs | 2 +- src/Npgsql/NameTranslation/NpgsqlNullNameTranslator.cs | 2 +- src/Npgsql/NameTranslation/NpgsqlSnakeCaseNameTranslator.cs | 2 +- src/Npgsql/Npgsql.csproj | 2 +- src/Npgsql/NpgsqlActivitySource.cs | 2 +- src/Npgsql/NpgsqlBatchCommand.cs | 2 +- src/Npgsql/NpgsqlCommandBuilder.cs | 2 +- src/Npgsql/NpgsqlException.cs | 2 +- src/Npgsql/NpgsqlLoggingConfiguration.cs | 2 +- src/Npgsql/NpgsqlMetricsOptions.cs | 2 +- src/Npgsql/NpgsqlMultiHostDataSource.cs | 2 +- src/Npgsql/NpgsqlNestedDataReader.cs | 2 +- src/Npgsql/NpgsqlOperationInProgressException.cs | 2 +- src/Npgsql/NpgsqlParameter`.cs | 2 +- src/Npgsql/NpgsqlTypes/NpgsqlInterval.cs | 2 +- src/Npgsql/NpgsqlTypes/NpgsqlLogSequenceNumber.cs | 2 +- src/Npgsql/NpgsqlTypes/NpgsqlRange.cs | 2 +- src/Npgsql/NpgsqlTypes/NpgsqlTsQuery.cs | 2 +- src/Npgsql/NpgsqlTypes/NpgsqlTsVector.cs | 2 +- src/Npgsql/NpgsqlTypes/PgNameAttribute.cs | 2 +- src/Npgsql/PgPassFile.cs | 2 +- src/Npgsql/PoolManager.cs | 2 +- src/Npgsql/PoolingDataSource.cs | 2 +- src/Npgsql/PostgresDatabaseInfo.cs | 2 +- src/Npgsql/PostgresErrorCodes.cs | 2 +- src/Npgsql/PostgresMinimalDatabaseInfo.cs | 2 +- src/Npgsql/PostgresNotice.cs | 2 +- src/Npgsql/PostgresTypes/PostgresArrayType.cs | 2 +- src/Npgsql/PostgresTypes/PostgresBaseType.cs | 2 +- src/Npgsql/PostgresTypes/PostgresCompositeType.cs | 2 +- src/Npgsql/PostgresTypes/PostgresDomainType.cs | 2 +- src/Npgsql/PostgresTypes/PostgresEnumType.cs | 2 +- src/Npgsql/PostgresTypes/PostgresFacets.cs | 2 +- src/Npgsql/PostgresTypes/PostgresMultirangeType.cs | 2 +- src/Npgsql/PostgresTypes/PostgresRangeType.cs | 2 +- src/Npgsql/PostgresTypes/PostgresType.cs | 2 +- src/Npgsql/PostgresTypes/PostgresUnknownType.cs | 2 +- src/Npgsql/PregeneratedMessages.cs | 2 +- src/Npgsql/PreparedStatement.cs | 2 +- src/Npgsql/PreparedStatementManager.cs | 2 +- src/Npgsql/PreparedTextReader.cs | 2 +- src/Npgsql/Properties/AssemblyInfo.cs | 2 +- src/Npgsql/Properties/NpgsqlStrings.Designer.cs | 2 +- .../Internal/LogicalReplicationConnectionExtensions.cs | 2 +- src/Npgsql/Replication/Internal/LogicalReplicationSlot.cs | 2 +- src/Npgsql/Replication/LogicalReplicationConnection.cs | 2 +- src/Npgsql/Replication/LogicalSlotSnapshotInitMode.cs | 2 +- src/Npgsql/Replication/PgOutput/Messages/BeginMessage.cs | 2 +- src/Npgsql/Replication/PgOutput/Messages/BeginPrepareMessage.cs | 2 +- src/Npgsql/Replication/PgOutput/Messages/CommitMessage.cs | 2 +- .../Replication/PgOutput/Messages/CommitPreparedMessage.cs | 2 +- src/Npgsql/Replication/PgOutput/Messages/DeleteMessage.cs | 2 +- src/Npgsql/Replication/PgOutput/Messages/FullDeleteMessage.cs | 2 +- src/Npgsql/Replication/PgOutput/Messages/FullUpdateMessage.cs | 2 +- src/Npgsql/Replication/PgOutput/Messages/IndexUpdateMessage.cs | 2 +- src/Npgsql/Replication/PgOutput/Messages/InsertMessage.cs | 2 +- src/Npgsql/Replication/PgOutput/Messages/KeyDeleteMessage.cs | 2 +- .../Replication/PgOutput/Messages/LogicalDecodingMessage.cs | 2 +- src/Npgsql/Replication/PgOutput/Messages/OriginMessage.cs | 2 +- .../Replication/PgOutput/Messages/PgOutputReplicationMessage.cs | 2 +- src/Npgsql/Replication/PgOutput/Messages/PrepareMessage.cs | 2 +- src/Npgsql/Replication/PgOutput/Messages/PrepareMessageBase.cs | 2 +- .../PgOutput/Messages/PreparedTransactionControlMessage.cs | 2 +- src/Npgsql/Replication/PgOutput/Messages/RelationMessage.cs | 2 +- .../Replication/PgOutput/Messages/RelationMessageColumn.cs | 2 +- .../Replication/PgOutput/Messages/RollbackPreparedMessage.cs | 2 +- src/Npgsql/Replication/PgOutput/Messages/StreamAbortMessage.cs | 2 +- src/Npgsql/Replication/PgOutput/Messages/StreamCommitMessage.cs | 2 +- .../Replication/PgOutput/Messages/StreamPrepareMessage.cs | 2 +- src/Npgsql/Replication/PgOutput/Messages/StreamStartMessage.cs | 2 +- src/Npgsql/Replication/PgOutput/Messages/StreamStopMessage.cs | 2 +- .../Replication/PgOutput/Messages/TransactionControlMessage.cs | 2 +- .../Replication/PgOutput/Messages/TransactionalMessage.cs | 2 +- src/Npgsql/Replication/PgOutput/Messages/TruncateMessage.cs | 2 +- src/Npgsql/Replication/PgOutput/Messages/TypeMessage.cs | 2 +- src/Npgsql/Replication/PgOutput/Messages/UpdateMessage.cs | 2 +- src/Npgsql/Replication/PgOutput/PgOutputConnectionExtensions.cs | 2 +- src/Npgsql/Replication/PgOutput/PgOutputProtocolVersion.cs | 2 +- src/Npgsql/Replication/PgOutput/PgOutputReplicationOptions.cs | 2 +- src/Npgsql/Replication/PgOutput/PgOutputReplicationSlot.cs | 2 +- src/Npgsql/Replication/PgOutput/PgOutputStreamingMode.cs | 2 +- src/Npgsql/Replication/PgOutput/ReadonlyArrayBuffer.cs | 2 +- src/Npgsql/Replication/PgOutput/TupleDataKind.cs | 2 +- src/Npgsql/Replication/PhysicalReplicationConnection.cs | 2 +- src/Npgsql/Replication/PhysicalReplicationSlot.cs | 2 +- src/Npgsql/Replication/ReplicationConnection.cs | 2 +- src/Npgsql/Replication/ReplicationMessage.cs | 2 +- src/Npgsql/Replication/ReplicationSlot.cs | 2 +- src/Npgsql/Replication/ReplicationSlotOptions.cs | 2 +- src/Npgsql/Replication/ReplicationSystemIdentification.cs | 2 +- .../TestDecoding/TestDecodingConnectionExtensions.cs | 2 +- src/Npgsql/Replication/TestDecoding/TestDecodingData.cs | 2 +- src/Npgsql/Replication/TestDecoding/TestDecodingOptions.cs | 2 +- .../Replication/TestDecoding/TestDecodingReplicationSlot.cs | 2 +- src/Npgsql/Replication/TimelineHistoryFile.cs | 2 +- src/Npgsql/Replication/XLogDataMessage.cs | 2 +- src/Npgsql/Schema/DbColumnSchemaGenerator.cs | 2 +- src/Npgsql/Schema/NpgsqlDbColumn.cs | 2 +- src/Npgsql/SqlQueryParser.cs | 2 +- src/Npgsql/ThrowHelper.cs | 2 +- src/Npgsql/TypeMapping/GlobalTypeMapper.cs | 2 +- src/Npgsql/TypeMapping/INpgsqlTypeMapper.cs | 2 +- src/Npgsql/Util/GSSStream.cs | 2 +- src/Npgsql/Util/LoggingEnumerable.cs | 2 +- src/Npgsql/Util/ResettableCancellationTokenSource.cs | 2 +- src/Npgsql/Util/VersionExtensions.cs | 2 +- test/MStatDumper/Program.cs | 2 +- test/Npgsql.Benchmarks/App.config | 2 +- test/Npgsql.Benchmarks/BenchmarkEnvironment.cs | 2 +- test/Npgsql.Benchmarks/CommandExecuteBenchmarks.cs | 2 +- test/Npgsql.Benchmarks/Commit.cs | 2 +- test/Npgsql.Benchmarks/ConnectionCreationBenchmarks.cs | 2 +- test/Npgsql.Benchmarks/ConnectionOpenCloseBenchmarks.cs | 2 +- test/Npgsql.Benchmarks/CopyExport.cs | 2 +- test/Npgsql.Benchmarks/CopyImport.cs | 2 +- test/Npgsql.Benchmarks/GetFieldValue.cs | 2 +- test/Npgsql.Benchmarks/Insert.cs | 2 +- test/Npgsql.Benchmarks/Npgsql.Benchmarks.csproj | 2 +- test/Npgsql.Benchmarks/Prepare.cs | 2 +- test/Npgsql.Benchmarks/Program.cs | 2 +- test/Npgsql.Benchmarks/ReadArray.cs | 2 +- test/Npgsql.Benchmarks/ReadColumns.cs | 2 +- test/Npgsql.Benchmarks/ReadRows.cs | 2 +- test/Npgsql.Benchmarks/TypeHandlers/Composite.cs | 2 +- test/Npgsql.Benchmarks/TypeHandlers/Numeric.cs | 2 +- test/Npgsql.Benchmarks/TypeHandlers/Text.cs | 2 +- test/Npgsql.Benchmarks/TypeHandlers/TypeHandlerBenchmarks.cs | 2 +- test/Npgsql.Benchmarks/TypeHandlers/Uuid.cs | 2 +- test/Npgsql.Benchmarks/UnixDomainSocket.cs | 2 +- test/Npgsql.Benchmarks/WriteVaryingNumberOfParameters.cs | 2 +- .../DependencyInjectionTests.cs | 2 +- test/Npgsql.PluginTests/GeoJSONTests.cs | 2 +- test/Npgsql.PluginTests/JsonNetTests.cs | 2 +- test/Npgsql.PluginTests/NetTopologySuiteTests.cs | 2 +- test/Npgsql.PluginTests/NodaTimeTests.cs | 2 +- test/Npgsql.PluginTests/Npgsql.PluginTests.csproj | 2 +- .../Npgsql.Specification.Tests.csproj | 2 +- test/Npgsql.Tests/App.config | 2 +- test/Npgsql.Tests/AsyncTests.cs | 2 +- test/Npgsql.Tests/AutoPrepareTests.cs | 2 +- test/Npgsql.Tests/BugTests.cs | 2 +- test/Npgsql.Tests/CommandBuilderTests.cs | 2 +- test/Npgsql.Tests/ConnectionStringBuilderTests.cs | 2 +- test/Npgsql.Tests/CopyTests.cs | 2 +- test/Npgsql.Tests/FunctionTests.cs | 2 +- test/Npgsql.Tests/MultipleHostsTests.cs | 2 +- test/Npgsql.Tests/NestedDataReaderTests.cs | 2 +- test/Npgsql.Tests/NotificationTests.cs | 2 +- test/Npgsql.Tests/Npgsql.Tests.csproj | 2 +- test/Npgsql.Tests/PgPassEntryTests.cs | 2 +- test/Npgsql.Tests/PgPassFileTests.cs | 2 +- test/Npgsql.Tests/PoolManagerTests.cs | 2 +- test/Npgsql.Tests/PoolTests.cs | 2 +- test/Npgsql.Tests/PrepareTests.cs | 2 +- test/Npgsql.Tests/Properties/AssemblyInfo.cs | 2 +- test/Npgsql.Tests/ReadBufferTests.cs | 2 +- test/Npgsql.Tests/ReaderNewSchemaTests.cs | 2 +- test/Npgsql.Tests/ReaderOldSchemaTests.cs | 2 +- test/Npgsql.Tests/ReaderTests.cs | 2 +- test/Npgsql.Tests/Replication/CommonLogicalReplicationTests.cs | 2 +- test/Npgsql.Tests/Replication/CommonReplicationTests.cs | 2 +- test/Npgsql.Tests/Replication/PgOutputReplicationTests.cs | 2 +- test/Npgsql.Tests/Replication/PhysicalReplicationTests.cs | 2 +- test/Npgsql.Tests/Replication/SafeReplicationTestBase.cs | 2 +- test/Npgsql.Tests/Replication/TestDecodingReplicationTests.cs | 2 +- test/Npgsql.Tests/SchemaTests.cs | 2 +- test/Npgsql.Tests/SecurityTests.cs | 2 +- test/Npgsql.Tests/SnakeCaseNameTranslatorTests.cs | 2 +- test/Npgsql.Tests/SqlQueryParserTests.cs | 2 +- test/Npgsql.Tests/Support/AssemblySetUp.cs | 2 +- test/Npgsql.Tests/Support/PgCancellationRequest.cs | 2 +- test/Npgsql.Tests/TestMetrics.cs | 2 +- test/Npgsql.Tests/TestUtil.cs | 2 +- test/Npgsql.Tests/TracingTests.cs | 2 +- test/Npgsql.Tests/TransactionTests.cs | 2 +- test/Npgsql.Tests/TypeMapperTests.cs | 2 +- test/Npgsql.Tests/Types/ArrayTests.cs | 2 +- test/Npgsql.Tests/Types/BitStringTests.cs | 2 +- test/Npgsql.Tests/Types/ByteaTests.cs | 2 +- test/Npgsql.Tests/Types/CompositeTests.cs | 2 +- test/Npgsql.Tests/Types/DateTimeTests.cs | 2 +- test/Npgsql.Tests/Types/EnumTests.cs | 2 +- test/Npgsql.Tests/Types/FullTextSearchTests.cs | 2 +- test/Npgsql.Tests/Types/GeometricTypeTests.cs | 2 +- test/Npgsql.Tests/Types/InternalTypeTests.cs | 2 +- test/Npgsql.Tests/Types/JsonPathTests.cs | 2 +- test/Npgsql.Tests/Types/LTreeTests.cs | 2 +- test/Npgsql.Tests/Types/MiscTypeTests.cs | 2 +- test/Npgsql.Tests/Types/MoneyTests.cs | 2 +- test/Npgsql.Tests/Types/NetworkTypeTests.cs | 2 +- test/Npgsql.Tests/Types/NumericTests.cs | 2 +- test/Npgsql.Tests/Types/NumericTypeTests.cs | 2 +- test/Npgsql.Tests/Types/RangeTests.cs | 2 +- test/Npgsql.Tests/Types/TextTests.cs | 2 +- test/Npgsql.Tests/WriteBufferTests.cs | 2 +- 247 files changed, 247 insertions(+), 247 deletions(-) diff --git a/Directory.Build.props b/Directory.Build.props index 482dbaf297..cdd05be625 100644 --- a/Directory.Build.props +++ b/Directory.Build.props @@ -1,4 +1,4 @@ - + 11.0.0-preview.1 latest diff --git a/Npgsql.slnx.DotSettings b/Npgsql.slnx.DotSettings index 890df2d4be..7aa838ea54 100644 --- a/Npgsql.slnx.DotSettings +++ b/Npgsql.slnx.DotSettings @@ -1,4 +1,4 @@ - + DO_NOT_SHOW DO_NOT_SHOW DO_NOT_SHOW diff --git a/src/Directory.Build.props b/src/Directory.Build.props index 6e8c5bb19f..aca6ef7b02 100644 --- a/src/Directory.Build.props +++ b/src/Directory.Build.props @@ -1,4 +1,4 @@ - + diff --git a/src/Npgsql.DependencyInjection/Properties/AssemblyInfo.cs b/src/Npgsql.DependencyInjection/Properties/AssemblyInfo.cs index d128ee1ec1..f30dbdd96f 100644 --- a/src/Npgsql.DependencyInjection/Properties/AssemblyInfo.cs +++ b/src/Npgsql.DependencyInjection/Properties/AssemblyInfo.cs @@ -1,3 +1,3 @@ -using System.Runtime.CompilerServices; +using System.Runtime.CompilerServices; [module: SkipLocalsInit] diff --git a/src/Npgsql.GeoJSON/CrsMap.WellKnown.cs b/src/Npgsql.GeoJSON/CrsMap.WellKnown.cs index dda11bd1d7..a2d8cd0217 100644 --- a/src/Npgsql.GeoJSON/CrsMap.WellKnown.cs +++ b/src/Npgsql.GeoJSON/CrsMap.WellKnown.cs @@ -1,4 +1,4 @@ -namespace Npgsql.GeoJSON; +namespace Npgsql.GeoJSON; public partial class CrsMap { diff --git a/src/Npgsql.GeoJSON/Internal/BoundingBoxBuilder.cs b/src/Npgsql.GeoJSON/Internal/BoundingBoxBuilder.cs index c3ea8f271f..eb37a4ba60 100644 --- a/src/Npgsql.GeoJSON/Internal/BoundingBoxBuilder.cs +++ b/src/Npgsql.GeoJSON/Internal/BoundingBoxBuilder.cs @@ -1,4 +1,4 @@ -using GeoJSON.Net.Geometry; +using GeoJSON.Net.Geometry; namespace Npgsql.GeoJSON.Internal; diff --git a/src/Npgsql.GeoJSON/Internal/CrsMapBuilder.cs b/src/Npgsql.GeoJSON/Internal/CrsMapBuilder.cs index 95f45d5db3..a43300b6ef 100644 --- a/src/Npgsql.GeoJSON/Internal/CrsMapBuilder.cs +++ b/src/Npgsql.GeoJSON/Internal/CrsMapBuilder.cs @@ -1,4 +1,4 @@ -using System; +using System; namespace Npgsql.GeoJSON.Internal; diff --git a/src/Npgsql.GeoJSON/Internal/GeoJSONConverter.cs b/src/Npgsql.GeoJSON/Internal/GeoJSONConverter.cs index 5d54d16194..22c527f23a 100644 --- a/src/Npgsql.GeoJSON/Internal/GeoJSONConverter.cs +++ b/src/Npgsql.GeoJSON/Internal/GeoJSONConverter.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Buffers.Binary; using System.Collections.Concurrent; using System.Collections.ObjectModel; diff --git a/src/Npgsql.GeoJSON/Npgsql.GeoJSON.csproj b/src/Npgsql.GeoJSON/Npgsql.GeoJSON.csproj index 824c5b79e6..136d4b5635 100644 --- a/src/Npgsql.GeoJSON/Npgsql.GeoJSON.csproj +++ b/src/Npgsql.GeoJSON/Npgsql.GeoJSON.csproj @@ -1,4 +1,4 @@ - + Yoh Deadfall;Shay Rojansky GeoJSON plugin for Npgsql, allowing mapping of PostGIS geometry types to GeoJSON types. diff --git a/src/Npgsql.GeoJSON/NpgsqlGeoJSONExtensions.cs b/src/Npgsql.GeoJSON/NpgsqlGeoJSONExtensions.cs index 9651004a86..4984148f0f 100644 --- a/src/Npgsql.GeoJSON/NpgsqlGeoJSONExtensions.cs +++ b/src/Npgsql.GeoJSON/NpgsqlGeoJSONExtensions.cs @@ -1,4 +1,4 @@ -using Npgsql.GeoJSON; +using Npgsql.GeoJSON; using Npgsql.GeoJSON.Internal; using Npgsql.TypeMapping; diff --git a/src/Npgsql.GeoJSON/Properties/AssemblyInfo.cs b/src/Npgsql.GeoJSON/Properties/AssemblyInfo.cs index d128ee1ec1..f30dbdd96f 100644 --- a/src/Npgsql.GeoJSON/Properties/AssemblyInfo.cs +++ b/src/Npgsql.GeoJSON/Properties/AssemblyInfo.cs @@ -1,3 +1,3 @@ -using System.Runtime.CompilerServices; +using System.Runtime.CompilerServices; [module: SkipLocalsInit] diff --git a/src/Npgsql.Json.NET/Internal/JsonNetJsonConverter.cs b/src/Npgsql.Json.NET/Internal/JsonNetJsonConverter.cs index 10126d25f9..b365f65a53 100644 --- a/src/Npgsql.Json.NET/Internal/JsonNetJsonConverter.cs +++ b/src/Npgsql.Json.NET/Internal/JsonNetJsonConverter.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Globalization; using System.IO; using System.Text; diff --git a/src/Npgsql.Json.NET/Npgsql.Json.NET.csproj b/src/Npgsql.Json.NET/Npgsql.Json.NET.csproj index df6b2ccef6..32c6cb212f 100644 --- a/src/Npgsql.Json.NET/Npgsql.Json.NET.csproj +++ b/src/Npgsql.Json.NET/Npgsql.Json.NET.csproj @@ -1,4 +1,4 @@ - + Shay Rojansky Json.NET plugin for Npgsql, allowing transparent serialization/deserialization of JSON objects directly to and from the database. diff --git a/src/Npgsql.Json.NET/NpgsqlJsonNetExtensions.cs b/src/Npgsql.Json.NET/NpgsqlJsonNetExtensions.cs index 89c8d21603..e427a3a0fb 100644 --- a/src/Npgsql.Json.NET/NpgsqlJsonNetExtensions.cs +++ b/src/Npgsql.Json.NET/NpgsqlJsonNetExtensions.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Diagnostics.CodeAnalysis; using Npgsql.TypeMapping; using NpgsqlTypes; diff --git a/src/Npgsql.Json.NET/Properties/AssemblyInfo.cs b/src/Npgsql.Json.NET/Properties/AssemblyInfo.cs index d128ee1ec1..f30dbdd96f 100644 --- a/src/Npgsql.Json.NET/Properties/AssemblyInfo.cs +++ b/src/Npgsql.Json.NET/Properties/AssemblyInfo.cs @@ -1,3 +1,3 @@ -using System.Runtime.CompilerServices; +using System.Runtime.CompilerServices; [module: SkipLocalsInit] diff --git a/src/Npgsql.NetTopologySuite/Npgsql.NetTopologySuite.csproj b/src/Npgsql.NetTopologySuite/Npgsql.NetTopologySuite.csproj index 91a4c268a0..cce977ba3c 100644 --- a/src/Npgsql.NetTopologySuite/Npgsql.NetTopologySuite.csproj +++ b/src/Npgsql.NetTopologySuite/Npgsql.NetTopologySuite.csproj @@ -1,4 +1,4 @@ - + Shay Rojansky;Yoh Deadfall NetTopologySuite plugin for Npgsql, allowing mapping of PostGIS geometry types to NetTopologySuite types. diff --git a/src/Npgsql.NetTopologySuite/NpgsqlNetTopologySuiteExtensions.cs b/src/Npgsql.NetTopologySuite/NpgsqlNetTopologySuiteExtensions.cs index 76afcf886c..ea6af4bcdf 100644 --- a/src/Npgsql.NetTopologySuite/NpgsqlNetTopologySuiteExtensions.cs +++ b/src/Npgsql.NetTopologySuite/NpgsqlNetTopologySuiteExtensions.cs @@ -1,4 +1,4 @@ -using NetTopologySuite.Geometries; +using NetTopologySuite.Geometries; using Npgsql.NetTopologySuite.Internal; using Npgsql.TypeMapping; diff --git a/src/Npgsql.NetTopologySuite/Properties/AssemblyInfo.cs b/src/Npgsql.NetTopologySuite/Properties/AssemblyInfo.cs index d128ee1ec1..f30dbdd96f 100644 --- a/src/Npgsql.NetTopologySuite/Properties/AssemblyInfo.cs +++ b/src/Npgsql.NetTopologySuite/Properties/AssemblyInfo.cs @@ -1,3 +1,3 @@ -using System.Runtime.CompilerServices; +using System.Runtime.CompilerServices; [module: SkipLocalsInit] diff --git a/src/Npgsql.NodaTime/Npgsql.NodaTime.csproj b/src/Npgsql.NodaTime/Npgsql.NodaTime.csproj index 1fd5d4b767..20f1107e34 100644 --- a/src/Npgsql.NodaTime/Npgsql.NodaTime.csproj +++ b/src/Npgsql.NodaTime/Npgsql.NodaTime.csproj @@ -1,4 +1,4 @@ - + Shay Rojansky NodaTime plugin for Npgsql, allowing mapping of PostgreSQL date/time types to NodaTime types. diff --git a/src/Npgsql.NodaTime/NpgsqlNodaTimeExtensions.cs b/src/Npgsql.NodaTime/NpgsqlNodaTimeExtensions.cs index 585143f3fe..ba3a0225ef 100644 --- a/src/Npgsql.NodaTime/NpgsqlNodaTimeExtensions.cs +++ b/src/Npgsql.NodaTime/NpgsqlNodaTimeExtensions.cs @@ -1,4 +1,4 @@ -using Npgsql.NodaTime.Internal; +using Npgsql.NodaTime.Internal; using Npgsql.TypeMapping; // ReSharper disable once CheckNamespace diff --git a/src/Npgsql.NodaTime/Properties/AssemblyInfo.cs b/src/Npgsql.NodaTime/Properties/AssemblyInfo.cs index 2582e7fb33..290b6c190e 100644 --- a/src/Npgsql.NodaTime/Properties/AssemblyInfo.cs +++ b/src/Npgsql.NodaTime/Properties/AssemblyInfo.cs @@ -1,4 +1,4 @@ -using System.Runtime.CompilerServices; +using System.Runtime.CompilerServices; [module: SkipLocalsInit] diff --git a/src/Npgsql.NodaTime/Properties/NpgsqlNodaTimeStrings.Designer.cs b/src/Npgsql.NodaTime/Properties/NpgsqlNodaTimeStrings.Designer.cs index ab29289106..02eb03e26d 100644 --- a/src/Npgsql.NodaTime/Properties/NpgsqlNodaTimeStrings.Designer.cs +++ b/src/Npgsql.NodaTime/Properties/NpgsqlNodaTimeStrings.Designer.cs @@ -1,4 +1,4 @@ -//------------------------------------------------------------------------------ +//------------------------------------------------------------------------------ // // This code was generated by a tool. // diff --git a/src/Npgsql.OpenTelemetry/MeterProviderBuilderExtensions.cs b/src/Npgsql.OpenTelemetry/MeterProviderBuilderExtensions.cs index 90f81c4cc3..2b48f0b52c 100644 --- a/src/Npgsql.OpenTelemetry/MeterProviderBuilderExtensions.cs +++ b/src/Npgsql.OpenTelemetry/MeterProviderBuilderExtensions.cs @@ -1,4 +1,4 @@ -using System; +using System; using OpenTelemetry.Metrics; // ReSharper disable once CheckNamespace diff --git a/src/Npgsql.OpenTelemetry/Properties/AssemblyInfo.cs b/src/Npgsql.OpenTelemetry/Properties/AssemblyInfo.cs index d128ee1ec1..f30dbdd96f 100644 --- a/src/Npgsql.OpenTelemetry/Properties/AssemblyInfo.cs +++ b/src/Npgsql.OpenTelemetry/Properties/AssemblyInfo.cs @@ -1,3 +1,3 @@ -using System.Runtime.CompilerServices; +using System.Runtime.CompilerServices; [module: SkipLocalsInit] diff --git a/src/Npgsql.OpenTelemetry/TracerProviderBuilderExtensions.cs b/src/Npgsql.OpenTelemetry/TracerProviderBuilderExtensions.cs index 1568d2d080..f4fabc920b 100644 --- a/src/Npgsql.OpenTelemetry/TracerProviderBuilderExtensions.cs +++ b/src/Npgsql.OpenTelemetry/TracerProviderBuilderExtensions.cs @@ -1,4 +1,4 @@ -using System; +using System; using OpenTelemetry.Trace; // ReSharper disable once CheckNamespace diff --git a/src/Npgsql/BackendMessages/AuthenticationMessages.cs b/src/Npgsql/BackendMessages/AuthenticationMessages.cs index c52da80d33..fe8e6edf5b 100644 --- a/src/Npgsql/BackendMessages/AuthenticationMessages.cs +++ b/src/Npgsql/BackendMessages/AuthenticationMessages.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Collections.Generic; using Microsoft.Extensions.Logging; using Npgsql.Internal; diff --git a/src/Npgsql/BackendMessages/BackendKeyDataMessage.cs b/src/Npgsql/BackendMessages/BackendKeyDataMessage.cs index 2140048c38..cf72e619b2 100644 --- a/src/Npgsql/BackendMessages/BackendKeyDataMessage.cs +++ b/src/Npgsql/BackendMessages/BackendKeyDataMessage.cs @@ -1,4 +1,4 @@ -using Npgsql.Internal; +using Npgsql.Internal; namespace Npgsql.BackendMessages; diff --git a/src/Npgsql/BackendMessages/BindCompleteMessage.cs b/src/Npgsql/BackendMessages/BindCompleteMessage.cs index f6dbfce1bb..d4a9f2b2d6 100644 --- a/src/Npgsql/BackendMessages/BindCompleteMessage.cs +++ b/src/Npgsql/BackendMessages/BindCompleteMessage.cs @@ -1,4 +1,4 @@ -namespace Npgsql.BackendMessages; +namespace Npgsql.BackendMessages; sealed class BindCompleteMessage : IBackendMessage { diff --git a/src/Npgsql/BackendMessages/CloseCompletedMessage.cs b/src/Npgsql/BackendMessages/CloseCompletedMessage.cs index 9443fd3e97..522b3c46e3 100644 --- a/src/Npgsql/BackendMessages/CloseCompletedMessage.cs +++ b/src/Npgsql/BackendMessages/CloseCompletedMessage.cs @@ -1,4 +1,4 @@ -namespace Npgsql.BackendMessages; +namespace Npgsql.BackendMessages; sealed class CloseCompletedMessage : IBackendMessage { diff --git a/src/Npgsql/BackendMessages/CommandCompleteMessage.cs b/src/Npgsql/BackendMessages/CommandCompleteMessage.cs index 98154d1a7e..91bc43fff1 100644 --- a/src/Npgsql/BackendMessages/CommandCompleteMessage.cs +++ b/src/Npgsql/BackendMessages/CommandCompleteMessage.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Buffers.Text; using Npgsql.Internal; diff --git a/src/Npgsql/BackendMessages/CopyMessages.cs b/src/Npgsql/BackendMessages/CopyMessages.cs index e7d4d6935c..84dd271617 100644 --- a/src/Npgsql/BackendMessages/CopyMessages.cs +++ b/src/Npgsql/BackendMessages/CopyMessages.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Collections.Generic; using Npgsql.Internal; diff --git a/src/Npgsql/BackendMessages/DataRowMessage.cs b/src/Npgsql/BackendMessages/DataRowMessage.cs index b4fddf9789..b4b4b40279 100644 --- a/src/Npgsql/BackendMessages/DataRowMessage.cs +++ b/src/Npgsql/BackendMessages/DataRowMessage.cs @@ -1,4 +1,4 @@ -namespace Npgsql.BackendMessages; +namespace Npgsql.BackendMessages; /// /// DataRow is special in that it does not parse the actual contents of the backend message, diff --git a/src/Npgsql/BackendMessages/EmptyQueryMessage.cs b/src/Npgsql/BackendMessages/EmptyQueryMessage.cs index ef190f3678..d9f57d5189 100644 --- a/src/Npgsql/BackendMessages/EmptyQueryMessage.cs +++ b/src/Npgsql/BackendMessages/EmptyQueryMessage.cs @@ -1,4 +1,4 @@ -namespace Npgsql.BackendMessages; +namespace Npgsql.BackendMessages; sealed class EmptyQueryMessage : IBackendMessage { diff --git a/src/Npgsql/BackendMessages/ErrorOrNoticeMessage.cs b/src/Npgsql/BackendMessages/ErrorOrNoticeMessage.cs index 8a22139a94..f9cf4c8575 100644 --- a/src/Npgsql/BackendMessages/ErrorOrNoticeMessage.cs +++ b/src/Npgsql/BackendMessages/ErrorOrNoticeMessage.cs @@ -1,4 +1,4 @@ -using System; +using System; using Microsoft.Extensions.Logging; using Npgsql.Internal; diff --git a/src/Npgsql/BackendMessages/NoDataMessage.cs b/src/Npgsql/BackendMessages/NoDataMessage.cs index 884d5c4d5e..9ff7176cbe 100644 --- a/src/Npgsql/BackendMessages/NoDataMessage.cs +++ b/src/Npgsql/BackendMessages/NoDataMessage.cs @@ -1,4 +1,4 @@ -namespace Npgsql.BackendMessages; +namespace Npgsql.BackendMessages; sealed class NoDataMessage : IBackendMessage { diff --git a/src/Npgsql/BackendMessages/ParameterDescriptionMessage.cs b/src/Npgsql/BackendMessages/ParameterDescriptionMessage.cs index 16c4687da5..3e98ab96f3 100644 --- a/src/Npgsql/BackendMessages/ParameterDescriptionMessage.cs +++ b/src/Npgsql/BackendMessages/ParameterDescriptionMessage.cs @@ -1,4 +1,4 @@ -using System.Collections.Generic; +using System.Collections.Generic; using Npgsql.Internal; namespace Npgsql.BackendMessages; diff --git a/src/Npgsql/BackendMessages/ParseCompleteMessage.cs b/src/Npgsql/BackendMessages/ParseCompleteMessage.cs index bb011f821a..406bd9e194 100644 --- a/src/Npgsql/BackendMessages/ParseCompleteMessage.cs +++ b/src/Npgsql/BackendMessages/ParseCompleteMessage.cs @@ -1,4 +1,4 @@ -namespace Npgsql.BackendMessages; +namespace Npgsql.BackendMessages; sealed class ParseCompleteMessage : IBackendMessage { diff --git a/src/Npgsql/BackendMessages/PortalSuspendedMessage.cs b/src/Npgsql/BackendMessages/PortalSuspendedMessage.cs index 5da91ea831..96663eaa45 100644 --- a/src/Npgsql/BackendMessages/PortalSuspendedMessage.cs +++ b/src/Npgsql/BackendMessages/PortalSuspendedMessage.cs @@ -1,4 +1,4 @@ -namespace Npgsql.BackendMessages; +namespace Npgsql.BackendMessages; sealed class PortalSuspendedMessage : IBackendMessage { diff --git a/src/Npgsql/BackendMessages/ReadyForQueryMessage.cs b/src/Npgsql/BackendMessages/ReadyForQueryMessage.cs index 4d7225c422..64c9219342 100644 --- a/src/Npgsql/BackendMessages/ReadyForQueryMessage.cs +++ b/src/Npgsql/BackendMessages/ReadyForQueryMessage.cs @@ -1,4 +1,4 @@ -using Npgsql.Internal; +using Npgsql.Internal; namespace Npgsql.BackendMessages; diff --git a/src/Npgsql/BackendMessages/RowDescriptionMessage.cs b/src/Npgsql/BackendMessages/RowDescriptionMessage.cs index f27ba80b8d..09cd464650 100644 --- a/src/Npgsql/BackendMessages/RowDescriptionMessage.cs +++ b/src/Npgsql/BackendMessages/RowDescriptionMessage.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Collections.Generic; using System.Diagnostics; using System.Globalization; diff --git a/src/Npgsql/GlobalSuppressions.cs b/src/Npgsql/GlobalSuppressions.cs index 580c453b9d..6507c11514 100644 --- a/src/Npgsql/GlobalSuppressions.cs +++ b/src/Npgsql/GlobalSuppressions.cs @@ -1,4 +1,4 @@ - + // This file is used by Code Analysis to maintain SuppressMessage // attributes that are applied to this project. // Project-level suppressions either have no target or are given diff --git a/src/Npgsql/ICancelable.cs b/src/Npgsql/ICancelable.cs index 460f17c171..27fe829563 100644 --- a/src/Npgsql/ICancelable.cs +++ b/src/Npgsql/ICancelable.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Threading.Tasks; namespace Npgsql; diff --git a/src/Npgsql/Internal/Converters/Networking/IPNetworkConverter.cs b/src/Npgsql/Internal/Converters/Networking/IPNetworkConverter.cs index 0f1cac0935..a76e645d8c 100644 --- a/src/Npgsql/Internal/Converters/Networking/IPNetworkConverter.cs +++ b/src/Npgsql/Internal/Converters/Networking/IPNetworkConverter.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Net; // ReSharper disable once CheckNamespace diff --git a/src/Npgsql/Internal/INpgsqlDatabaseInfoFactory.cs b/src/Npgsql/Internal/INpgsqlDatabaseInfoFactory.cs index ea3f0ad525..6d388c9a80 100644 --- a/src/Npgsql/Internal/INpgsqlDatabaseInfoFactory.cs +++ b/src/Npgsql/Internal/INpgsqlDatabaseInfoFactory.cs @@ -1,4 +1,4 @@ -using System.Diagnostics.CodeAnalysis; +using System.Diagnostics.CodeAnalysis; using System.Threading.Tasks; using Npgsql.Util; diff --git a/src/Npgsql/Internal/NpgsqlConnector.Auth.cs b/src/Npgsql/Internal/NpgsqlConnector.Auth.cs index 1c2ef6c3cf..9d30a1dac8 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.Auth.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.Auth.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Collections.Generic; using System.Diagnostics; using System.Net.Security; diff --git a/src/Npgsql/Internal/NpgsqlDatabaseInfo.cs b/src/Npgsql/Internal/NpgsqlDatabaseInfo.cs index 5c700ac7e3..0bd6dc3992 100644 --- a/src/Npgsql/Internal/NpgsqlDatabaseInfo.cs +++ b/src/Npgsql/Internal/NpgsqlDatabaseInfo.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Collections.Generic; using System.Diagnostics.CodeAnalysis; using System.Threading.Tasks; diff --git a/src/Npgsql/Internal/NpgsqlReadBuffer.Stream.cs b/src/Npgsql/Internal/NpgsqlReadBuffer.Stream.cs index 66f53503ed..eeee079c86 100644 --- a/src/Npgsql/Internal/NpgsqlReadBuffer.Stream.cs +++ b/src/Npgsql/Internal/NpgsqlReadBuffer.Stream.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Diagnostics; using System.IO; using System.Threading; diff --git a/src/Npgsql/Internal/NpgsqlReadBuffer.cs b/src/Npgsql/Internal/NpgsqlReadBuffer.cs index 66221366d0..47e9b3515e 100644 --- a/src/Npgsql/Internal/NpgsqlReadBuffer.cs +++ b/src/Npgsql/Internal/NpgsqlReadBuffer.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Buffers; using System.Buffers.Binary; using System.Diagnostics; diff --git a/src/Npgsql/Internal/NpgsqlWriteBuffer.cs b/src/Npgsql/Internal/NpgsqlWriteBuffer.cs index 3c0d91a148..6db8974e1e 100644 --- a/src/Npgsql/Internal/NpgsqlWriteBuffer.cs +++ b/src/Npgsql/Internal/NpgsqlWriteBuffer.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Buffers.Binary; using System.Diagnostics; using System.IO; diff --git a/src/Npgsql/Internal/TransportSecurityHandler.cs b/src/Npgsql/Internal/TransportSecurityHandler.cs index fbe8cad72e..5776bcf993 100644 --- a/src/Npgsql/Internal/TransportSecurityHandler.cs +++ b/src/Npgsql/Internal/TransportSecurityHandler.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Security.Cryptography.X509Certificates; using System.Threading; using System.Threading.Tasks; diff --git a/src/Npgsql/KerberosUsernameProvider.cs b/src/Npgsql/KerberosUsernameProvider.cs index 6963e139f0..5c1234bc74 100644 --- a/src/Npgsql/KerberosUsernameProvider.cs +++ b/src/Npgsql/KerberosUsernameProvider.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Diagnostics; using System.IO; using System.Threading; diff --git a/src/Npgsql/MultiHostDataSourceWrapper.cs b/src/Npgsql/MultiHostDataSourceWrapper.cs index 432875ae67..cc54e06b29 100644 --- a/src/Npgsql/MultiHostDataSourceWrapper.cs +++ b/src/Npgsql/MultiHostDataSourceWrapper.cs @@ -1,4 +1,4 @@ -using Npgsql.Internal; +using Npgsql.Internal; using Npgsql.Util; using System.Diagnostics.CodeAnalysis; using System.Threading; diff --git a/src/Npgsql/NameTranslation/INpgsqlNameTranslator.cs b/src/Npgsql/NameTranslation/INpgsqlNameTranslator.cs index 1fa188a91e..66b62dc883 100644 --- a/src/Npgsql/NameTranslation/INpgsqlNameTranslator.cs +++ b/src/Npgsql/NameTranslation/INpgsqlNameTranslator.cs @@ -1,4 +1,4 @@ -namespace Npgsql; +namespace Npgsql; /// /// A component which translates a CLR name (e.g. SomeClass) into a database name (e.g. some_class) diff --git a/src/Npgsql/NameTranslation/NpgsqlNullNameTranslator.cs b/src/Npgsql/NameTranslation/NpgsqlNullNameTranslator.cs index f754169a72..4016825871 100644 --- a/src/Npgsql/NameTranslation/NpgsqlNullNameTranslator.cs +++ b/src/Npgsql/NameTranslation/NpgsqlNullNameTranslator.cs @@ -1,4 +1,4 @@ -using System; +using System; namespace Npgsql.NameTranslation; diff --git a/src/Npgsql/NameTranslation/NpgsqlSnakeCaseNameTranslator.cs b/src/Npgsql/NameTranslation/NpgsqlSnakeCaseNameTranslator.cs index 998b5f6420..805c5d6b61 100644 --- a/src/Npgsql/NameTranslation/NpgsqlSnakeCaseNameTranslator.cs +++ b/src/Npgsql/NameTranslation/NpgsqlSnakeCaseNameTranslator.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Collections.Generic; using System.Globalization; using System.Text; diff --git a/src/Npgsql/Npgsql.csproj b/src/Npgsql/Npgsql.csproj index d06e91fcc8..5ebaefcdf5 100644 --- a/src/Npgsql/Npgsql.csproj +++ b/src/Npgsql/Npgsql.csproj @@ -1,4 +1,4 @@ - + Shay Rojansky;Nikita Kazmin;Brar Piening;Nino Floris;Yoh Deadfall;;Austin Drenski;Emil Lenngren;Francisco Figueiredo Jr.;Kenji Uno diff --git a/src/Npgsql/NpgsqlActivitySource.cs b/src/Npgsql/NpgsqlActivitySource.cs index be4e257c48..f2a005f02d 100644 --- a/src/Npgsql/NpgsqlActivitySource.cs +++ b/src/Npgsql/NpgsqlActivitySource.cs @@ -1,4 +1,4 @@ -using Npgsql.Internal; +using Npgsql.Internal; using System; using System.Data; using System.Diagnostics; diff --git a/src/Npgsql/NpgsqlBatchCommand.cs b/src/Npgsql/NpgsqlBatchCommand.cs index 2812fdbead..9534a54b17 100644 --- a/src/Npgsql/NpgsqlBatchCommand.cs +++ b/src/Npgsql/NpgsqlBatchCommand.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Buffers; using System.Collections.Generic; using System.Data; diff --git a/src/Npgsql/NpgsqlCommandBuilder.cs b/src/Npgsql/NpgsqlCommandBuilder.cs index d9a698c2ef..878e194d8e 100644 --- a/src/Npgsql/NpgsqlCommandBuilder.cs +++ b/src/Npgsql/NpgsqlCommandBuilder.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Data; using System.Data.Common; using System.Diagnostics.CodeAnalysis; diff --git a/src/Npgsql/NpgsqlException.cs b/src/Npgsql/NpgsqlException.cs index 9e2dfe9ee0..d437df72b6 100644 --- a/src/Npgsql/NpgsqlException.cs +++ b/src/Npgsql/NpgsqlException.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Data.Common; using System.IO; using System.Net.Sockets; diff --git a/src/Npgsql/NpgsqlLoggingConfiguration.cs b/src/Npgsql/NpgsqlLoggingConfiguration.cs index 745cf476cb..988b8b730e 100644 --- a/src/Npgsql/NpgsqlLoggingConfiguration.cs +++ b/src/Npgsql/NpgsqlLoggingConfiguration.cs @@ -1,4 +1,4 @@ -using Microsoft.Extensions.Logging; +using Microsoft.Extensions.Logging; using Microsoft.Extensions.Logging.Abstractions; namespace Npgsql; diff --git a/src/Npgsql/NpgsqlMetricsOptions.cs b/src/Npgsql/NpgsqlMetricsOptions.cs index b4da63dc7a..b0e7332700 100644 --- a/src/Npgsql/NpgsqlMetricsOptions.cs +++ b/src/Npgsql/NpgsqlMetricsOptions.cs @@ -1,4 +1,4 @@ -namespace Npgsql; +namespace Npgsql; /// /// Options to configure Npgsql's support for OpenTelemetry metrics. diff --git a/src/Npgsql/NpgsqlMultiHostDataSource.cs b/src/Npgsql/NpgsqlMultiHostDataSource.cs index 4e7d63bddb..4e6f42eeae 100644 --- a/src/Npgsql/NpgsqlMultiHostDataSource.cs +++ b/src/Npgsql/NpgsqlMultiHostDataSource.cs @@ -1,4 +1,4 @@ -using Npgsql.Internal; +using Npgsql.Internal; using Npgsql.Util; using System; using System.Collections.Generic; diff --git a/src/Npgsql/NpgsqlNestedDataReader.cs b/src/Npgsql/NpgsqlNestedDataReader.cs index 14ec9945a1..b7e8a7d242 100644 --- a/src/Npgsql/NpgsqlNestedDataReader.cs +++ b/src/Npgsql/NpgsqlNestedDataReader.cs @@ -1,4 +1,4 @@ -using Npgsql.Internal; +using Npgsql.Internal; using Npgsql.PostgresTypes; using System; using System.Collections; diff --git a/src/Npgsql/NpgsqlOperationInProgressException.cs b/src/Npgsql/NpgsqlOperationInProgressException.cs index 74e7e646ff..052167ced3 100644 --- a/src/Npgsql/NpgsqlOperationInProgressException.cs +++ b/src/Npgsql/NpgsqlOperationInProgressException.cs @@ -1,4 +1,4 @@ -using Npgsql.Internal; +using Npgsql.Internal; namespace Npgsql; diff --git a/src/Npgsql/NpgsqlParameter`.cs b/src/Npgsql/NpgsqlParameter`.cs index 90cbe7f737..c406053ea3 100644 --- a/src/Npgsql/NpgsqlParameter`.cs +++ b/src/Npgsql/NpgsqlParameter`.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Data; using System.Diagnostics; using System.Runtime.CompilerServices; diff --git a/src/Npgsql/NpgsqlTypes/NpgsqlInterval.cs b/src/Npgsql/NpgsqlTypes/NpgsqlInterval.cs index f4b51ba4a9..221a82cb27 100644 --- a/src/Npgsql/NpgsqlTypes/NpgsqlInterval.cs +++ b/src/Npgsql/NpgsqlTypes/NpgsqlInterval.cs @@ -1,4 +1,4 @@ -using System; +using System; // ReSharper disable once CheckNamespace namespace NpgsqlTypes; diff --git a/src/Npgsql/NpgsqlTypes/NpgsqlLogSequenceNumber.cs b/src/Npgsql/NpgsqlTypes/NpgsqlLogSequenceNumber.cs index b9fc6da358..3520d8f734 100644 --- a/src/Npgsql/NpgsqlTypes/NpgsqlLogSequenceNumber.cs +++ b/src/Npgsql/NpgsqlTypes/NpgsqlLogSequenceNumber.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Globalization; // ReSharper disable once CheckNamespace diff --git a/src/Npgsql/NpgsqlTypes/NpgsqlRange.cs b/src/Npgsql/NpgsqlTypes/NpgsqlRange.cs index 23b2578c13..a99dd2c537 100644 --- a/src/Npgsql/NpgsqlTypes/NpgsqlRange.cs +++ b/src/Npgsql/NpgsqlTypes/NpgsqlRange.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.ComponentModel; using System.Diagnostics.CodeAnalysis; using System.Globalization; diff --git a/src/Npgsql/NpgsqlTypes/NpgsqlTsQuery.cs b/src/Npgsql/NpgsqlTypes/NpgsqlTsQuery.cs index 96585832f3..3e9b5995e1 100644 --- a/src/Npgsql/NpgsqlTypes/NpgsqlTsQuery.cs +++ b/src/Npgsql/NpgsqlTypes/NpgsqlTsQuery.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Collections.Generic; using System.Text; diff --git a/src/Npgsql/NpgsqlTypes/NpgsqlTsVector.cs b/src/Npgsql/NpgsqlTypes/NpgsqlTsVector.cs index 4dd1e28b08..b534c05755 100644 --- a/src/Npgsql/NpgsqlTypes/NpgsqlTsVector.cs +++ b/src/Npgsql/NpgsqlTypes/NpgsqlTsVector.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Collections; using System.Collections.Generic; using System.Text; diff --git a/src/Npgsql/NpgsqlTypes/PgNameAttribute.cs b/src/Npgsql/NpgsqlTypes/PgNameAttribute.cs index 48cbc955e4..ffb5f3af48 100644 --- a/src/Npgsql/NpgsqlTypes/PgNameAttribute.cs +++ b/src/Npgsql/NpgsqlTypes/PgNameAttribute.cs @@ -1,4 +1,4 @@ -using System; +using System; // ReSharper disable once CheckNamespace namespace NpgsqlTypes; diff --git a/src/Npgsql/PgPassFile.cs b/src/Npgsql/PgPassFile.cs index 364d2b7409..3e1c1605be 100644 --- a/src/Npgsql/PgPassFile.cs +++ b/src/Npgsql/PgPassFile.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Collections.Generic; using System.IO; using System.Text; diff --git a/src/Npgsql/PoolManager.cs b/src/Npgsql/PoolManager.cs index d1086b5196..e93d6a856d 100644 --- a/src/Npgsql/PoolManager.cs +++ b/src/Npgsql/PoolManager.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Collections.Concurrent; namespace Npgsql; diff --git a/src/Npgsql/PoolingDataSource.cs b/src/Npgsql/PoolingDataSource.cs index 4de3cfd928..813fff6b22 100644 --- a/src/Npgsql/PoolingDataSource.cs +++ b/src/Npgsql/PoolingDataSource.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Diagnostics; using System.Diagnostics.CodeAnalysis; using System.Runtime.CompilerServices; diff --git a/src/Npgsql/PostgresDatabaseInfo.cs b/src/Npgsql/PostgresDatabaseInfo.cs index 6218b0a8d6..7ffe93037e 100644 --- a/src/Npgsql/PostgresDatabaseInfo.cs +++ b/src/Npgsql/PostgresDatabaseInfo.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Collections.Generic; using System.Diagnostics; using System.Globalization; diff --git a/src/Npgsql/PostgresErrorCodes.cs b/src/Npgsql/PostgresErrorCodes.cs index 98d878e12b..258d89ca45 100644 --- a/src/Npgsql/PostgresErrorCodes.cs +++ b/src/Npgsql/PostgresErrorCodes.cs @@ -1,4 +1,4 @@ -#pragma warning disable CS1591 // Missing XML comment for publicly visible type or member +#pragma warning disable CS1591 // Missing XML comment for publicly visible type or member using System; diff --git a/src/Npgsql/PostgresMinimalDatabaseInfo.cs b/src/Npgsql/PostgresMinimalDatabaseInfo.cs index d5141db985..dc4906838b 100644 --- a/src/Npgsql/PostgresMinimalDatabaseInfo.cs +++ b/src/Npgsql/PostgresMinimalDatabaseInfo.cs @@ -1,4 +1,4 @@ -using System.Collections.Generic; +using System.Collections.Generic; using System.Threading.Tasks; using Npgsql.Internal; using Npgsql.Internal.Postgres; diff --git a/src/Npgsql/PostgresNotice.cs b/src/Npgsql/PostgresNotice.cs index 6ed9c7f98d..3b619fafdf 100644 --- a/src/Npgsql/PostgresNotice.cs +++ b/src/Npgsql/PostgresNotice.cs @@ -1,4 +1,4 @@ -using System; +using System; using Microsoft.Extensions.Logging; using Npgsql.BackendMessages; using Npgsql.Internal; diff --git a/src/Npgsql/PostgresTypes/PostgresArrayType.cs b/src/Npgsql/PostgresTypes/PostgresArrayType.cs index 2f46d31cf2..f9ed4e9cd2 100644 --- a/src/Npgsql/PostgresTypes/PostgresArrayType.cs +++ b/src/Npgsql/PostgresTypes/PostgresArrayType.cs @@ -1,4 +1,4 @@ -using Npgsql.Internal.Postgres; +using Npgsql.Internal.Postgres; namespace Npgsql.PostgresTypes; diff --git a/src/Npgsql/PostgresTypes/PostgresBaseType.cs b/src/Npgsql/PostgresTypes/PostgresBaseType.cs index 11c289b1a8..141abf4064 100644 --- a/src/Npgsql/PostgresTypes/PostgresBaseType.cs +++ b/src/Npgsql/PostgresTypes/PostgresBaseType.cs @@ -1,4 +1,4 @@ - + using Npgsql.Internal.Postgres; namespace Npgsql.PostgresTypes; diff --git a/src/Npgsql/PostgresTypes/PostgresCompositeType.cs b/src/Npgsql/PostgresTypes/PostgresCompositeType.cs index 1663b01ebd..19942ea16e 100644 --- a/src/Npgsql/PostgresTypes/PostgresCompositeType.cs +++ b/src/Npgsql/PostgresTypes/PostgresCompositeType.cs @@ -1,4 +1,4 @@ -using System.Collections.Generic; +using System.Collections.Generic; using Npgsql.Internal.Postgres; namespace Npgsql.PostgresTypes; diff --git a/src/Npgsql/PostgresTypes/PostgresDomainType.cs b/src/Npgsql/PostgresTypes/PostgresDomainType.cs index cab9323015..89ce0350d9 100644 --- a/src/Npgsql/PostgresTypes/PostgresDomainType.cs +++ b/src/Npgsql/PostgresTypes/PostgresDomainType.cs @@ -1,4 +1,4 @@ -using Npgsql.Internal.Postgres; +using Npgsql.Internal.Postgres; namespace Npgsql.PostgresTypes; diff --git a/src/Npgsql/PostgresTypes/PostgresEnumType.cs b/src/Npgsql/PostgresTypes/PostgresEnumType.cs index 2422cb07a2..eb6a1c8f33 100644 --- a/src/Npgsql/PostgresTypes/PostgresEnumType.cs +++ b/src/Npgsql/PostgresTypes/PostgresEnumType.cs @@ -1,4 +1,4 @@ -using System.Collections.Generic; +using System.Collections.Generic; using Npgsql.Internal.Postgres; namespace Npgsql.PostgresTypes; diff --git a/src/Npgsql/PostgresTypes/PostgresFacets.cs b/src/Npgsql/PostgresTypes/PostgresFacets.cs index 4c88724965..14672ed19e 100644 --- a/src/Npgsql/PostgresTypes/PostgresFacets.cs +++ b/src/Npgsql/PostgresTypes/PostgresFacets.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Text; namespace Npgsql.PostgresTypes; diff --git a/src/Npgsql/PostgresTypes/PostgresMultirangeType.cs b/src/Npgsql/PostgresTypes/PostgresMultirangeType.cs index 2769df87f8..e22b836c51 100644 --- a/src/Npgsql/PostgresTypes/PostgresMultirangeType.cs +++ b/src/Npgsql/PostgresTypes/PostgresMultirangeType.cs @@ -1,4 +1,4 @@ -using Npgsql.Internal.Postgres; +using Npgsql.Internal.Postgres; namespace Npgsql.PostgresTypes; diff --git a/src/Npgsql/PostgresTypes/PostgresRangeType.cs b/src/Npgsql/PostgresTypes/PostgresRangeType.cs index a26a71afae..cd07a46936 100644 --- a/src/Npgsql/PostgresTypes/PostgresRangeType.cs +++ b/src/Npgsql/PostgresTypes/PostgresRangeType.cs @@ -1,4 +1,4 @@ -using Npgsql.Internal.Postgres; +using Npgsql.Internal.Postgres; namespace Npgsql.PostgresTypes; diff --git a/src/Npgsql/PostgresTypes/PostgresType.cs b/src/Npgsql/PostgresTypes/PostgresType.cs index fc88eb1304..f6e964b368 100644 --- a/src/Npgsql/PostgresTypes/PostgresType.cs +++ b/src/Npgsql/PostgresTypes/PostgresType.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Diagnostics.CodeAnalysis; using Npgsql.Internal.Postgres; diff --git a/src/Npgsql/PostgresTypes/PostgresUnknownType.cs b/src/Npgsql/PostgresTypes/PostgresUnknownType.cs index d7cfc983e9..fa9bf74c54 100644 --- a/src/Npgsql/PostgresTypes/PostgresUnknownType.cs +++ b/src/Npgsql/PostgresTypes/PostgresUnknownType.cs @@ -1,4 +1,4 @@ -using Npgsql.Internal.Postgres; +using Npgsql.Internal.Postgres; namespace Npgsql.PostgresTypes; diff --git a/src/Npgsql/PregeneratedMessages.cs b/src/Npgsql/PregeneratedMessages.cs index 4e6434e9c1..b1812360e6 100644 --- a/src/Npgsql/PregeneratedMessages.cs +++ b/src/Npgsql/PregeneratedMessages.cs @@ -1,4 +1,4 @@ -using System.IO; +using System.IO; using System.Text; using Npgsql.Internal; using Npgsql.Util; diff --git a/src/Npgsql/PreparedStatement.cs b/src/Npgsql/PreparedStatement.cs index 5a5a877eb2..2f63b182e7 100644 --- a/src/Npgsql/PreparedStatement.cs +++ b/src/Npgsql/PreparedStatement.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Collections.Generic; using System.Diagnostics; using System.Text; diff --git a/src/Npgsql/PreparedStatementManager.cs b/src/Npgsql/PreparedStatementManager.cs index 8f80223753..57c4c90af9 100644 --- a/src/Npgsql/PreparedStatementManager.cs +++ b/src/Npgsql/PreparedStatementManager.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Collections.Generic; using System.Diagnostics; using System.Text; diff --git a/src/Npgsql/PreparedTextReader.cs b/src/Npgsql/PreparedTextReader.cs index 86668c010f..d9f3dd06b4 100644 --- a/src/Npgsql/PreparedTextReader.cs +++ b/src/Npgsql/PreparedTextReader.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.IO; using System.Threading; using System.Threading.Tasks; diff --git a/src/Npgsql/Properties/AssemblyInfo.cs b/src/Npgsql/Properties/AssemblyInfo.cs index 464a017b15..eab391f2c8 100644 --- a/src/Npgsql/Properties/AssemblyInfo.cs +++ b/src/Npgsql/Properties/AssemblyInfo.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Runtime.CompilerServices; using System.Reflection; using System.Security; diff --git a/src/Npgsql/Properties/NpgsqlStrings.Designer.cs b/src/Npgsql/Properties/NpgsqlStrings.Designer.cs index d0b7839d6c..130ac94f8e 100644 --- a/src/Npgsql/Properties/NpgsqlStrings.Designer.cs +++ b/src/Npgsql/Properties/NpgsqlStrings.Designer.cs @@ -1,4 +1,4 @@ -//------------------------------------------------------------------------------ +//------------------------------------------------------------------------------ // // This code was generated by a tool. // diff --git a/src/Npgsql/Replication/Internal/LogicalReplicationConnectionExtensions.cs b/src/Npgsql/Replication/Internal/LogicalReplicationConnectionExtensions.cs index d66a9e55d1..ef066e1158 100644 --- a/src/Npgsql/Replication/Internal/LogicalReplicationConnectionExtensions.cs +++ b/src/Npgsql/Replication/Internal/LogicalReplicationConnectionExtensions.cs @@ -1,4 +1,4 @@ -using NpgsqlTypes; +using NpgsqlTypes; using System; using System.Collections.Generic; using System.Runtime.CompilerServices; diff --git a/src/Npgsql/Replication/Internal/LogicalReplicationSlot.cs b/src/Npgsql/Replication/Internal/LogicalReplicationSlot.cs index 5edfa5d823..dddb0d77a4 100644 --- a/src/Npgsql/Replication/Internal/LogicalReplicationSlot.cs +++ b/src/Npgsql/Replication/Internal/LogicalReplicationSlot.cs @@ -1,4 +1,4 @@ -using NpgsqlTypes; +using NpgsqlTypes; using System; namespace Npgsql.Replication.Internal; diff --git a/src/Npgsql/Replication/LogicalReplicationConnection.cs b/src/Npgsql/Replication/LogicalReplicationConnection.cs index 7172b8a060..8a71a0c511 100644 --- a/src/Npgsql/Replication/LogicalReplicationConnection.cs +++ b/src/Npgsql/Replication/LogicalReplicationConnection.cs @@ -1,4 +1,4 @@ -namespace Npgsql.Replication; +namespace Npgsql.Replication; /// /// Represents a logical replication connection to a PostgreSQL server. diff --git a/src/Npgsql/Replication/LogicalSlotSnapshotInitMode.cs b/src/Npgsql/Replication/LogicalSlotSnapshotInitMode.cs index 3e71c7ca7b..9c287d431b 100644 --- a/src/Npgsql/Replication/LogicalSlotSnapshotInitMode.cs +++ b/src/Npgsql/Replication/LogicalSlotSnapshotInitMode.cs @@ -1,4 +1,4 @@ -namespace Npgsql.Replication; +namespace Npgsql.Replication; /// /// Decides what to do with the snapshot created during logical slot initialization. diff --git a/src/Npgsql/Replication/PgOutput/Messages/BeginMessage.cs b/src/Npgsql/Replication/PgOutput/Messages/BeginMessage.cs index 6fbfcb2c37..e64c6a6275 100644 --- a/src/Npgsql/Replication/PgOutput/Messages/BeginMessage.cs +++ b/src/Npgsql/Replication/PgOutput/Messages/BeginMessage.cs @@ -1,4 +1,4 @@ -using NpgsqlTypes; +using NpgsqlTypes; using System; namespace Npgsql.Replication.PgOutput.Messages; diff --git a/src/Npgsql/Replication/PgOutput/Messages/BeginPrepareMessage.cs b/src/Npgsql/Replication/PgOutput/Messages/BeginPrepareMessage.cs index 288bff1e03..74840af38a 100644 --- a/src/Npgsql/Replication/PgOutput/Messages/BeginPrepareMessage.cs +++ b/src/Npgsql/Replication/PgOutput/Messages/BeginPrepareMessage.cs @@ -1,4 +1,4 @@ -using NpgsqlTypes; +using NpgsqlTypes; using System; namespace Npgsql.Replication.PgOutput.Messages; diff --git a/src/Npgsql/Replication/PgOutput/Messages/CommitMessage.cs b/src/Npgsql/Replication/PgOutput/Messages/CommitMessage.cs index f2f0b16525..38cc1f1046 100644 --- a/src/Npgsql/Replication/PgOutput/Messages/CommitMessage.cs +++ b/src/Npgsql/Replication/PgOutput/Messages/CommitMessage.cs @@ -1,4 +1,4 @@ -using NpgsqlTypes; +using NpgsqlTypes; using System; namespace Npgsql.Replication.PgOutput.Messages; diff --git a/src/Npgsql/Replication/PgOutput/Messages/CommitPreparedMessage.cs b/src/Npgsql/Replication/PgOutput/Messages/CommitPreparedMessage.cs index 7ed189a981..a98284644d 100644 --- a/src/Npgsql/Replication/PgOutput/Messages/CommitPreparedMessage.cs +++ b/src/Npgsql/Replication/PgOutput/Messages/CommitPreparedMessage.cs @@ -1,4 +1,4 @@ -using NpgsqlTypes; +using NpgsqlTypes; using System; namespace Npgsql.Replication.PgOutput.Messages; diff --git a/src/Npgsql/Replication/PgOutput/Messages/DeleteMessage.cs b/src/Npgsql/Replication/PgOutput/Messages/DeleteMessage.cs index c1057dabdd..bbca233e6b 100644 --- a/src/Npgsql/Replication/PgOutput/Messages/DeleteMessage.cs +++ b/src/Npgsql/Replication/PgOutput/Messages/DeleteMessage.cs @@ -1,4 +1,4 @@ -using NpgsqlTypes; +using NpgsqlTypes; using System; namespace Npgsql.Replication.PgOutput.Messages; diff --git a/src/Npgsql/Replication/PgOutput/Messages/FullDeleteMessage.cs b/src/Npgsql/Replication/PgOutput/Messages/FullDeleteMessage.cs index a426a2b6ad..cb5dec77ec 100644 --- a/src/Npgsql/Replication/PgOutput/Messages/FullDeleteMessage.cs +++ b/src/Npgsql/Replication/PgOutput/Messages/FullDeleteMessage.cs @@ -1,4 +1,4 @@ -using NpgsqlTypes; +using NpgsqlTypes; using System; using System.Threading; using System.Threading.Tasks; diff --git a/src/Npgsql/Replication/PgOutput/Messages/FullUpdateMessage.cs b/src/Npgsql/Replication/PgOutput/Messages/FullUpdateMessage.cs index 814780cf37..572095d615 100644 --- a/src/Npgsql/Replication/PgOutput/Messages/FullUpdateMessage.cs +++ b/src/Npgsql/Replication/PgOutput/Messages/FullUpdateMessage.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Threading; using System.Threading.Tasks; using Npgsql.Internal; diff --git a/src/Npgsql/Replication/PgOutput/Messages/IndexUpdateMessage.cs b/src/Npgsql/Replication/PgOutput/Messages/IndexUpdateMessage.cs index 021458140d..26bf38c83c 100644 --- a/src/Npgsql/Replication/PgOutput/Messages/IndexUpdateMessage.cs +++ b/src/Npgsql/Replication/PgOutput/Messages/IndexUpdateMessage.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Threading; using System.Threading.Tasks; using Npgsql.Internal; diff --git a/src/Npgsql/Replication/PgOutput/Messages/InsertMessage.cs b/src/Npgsql/Replication/PgOutput/Messages/InsertMessage.cs index df413f6b21..a11a21de38 100644 --- a/src/Npgsql/Replication/PgOutput/Messages/InsertMessage.cs +++ b/src/Npgsql/Replication/PgOutput/Messages/InsertMessage.cs @@ -1,4 +1,4 @@ -using NpgsqlTypes; +using NpgsqlTypes; using System; using System.Threading; using System.Threading.Tasks; diff --git a/src/Npgsql/Replication/PgOutput/Messages/KeyDeleteMessage.cs b/src/Npgsql/Replication/PgOutput/Messages/KeyDeleteMessage.cs index 9b30b3e1df..5d589f8526 100644 --- a/src/Npgsql/Replication/PgOutput/Messages/KeyDeleteMessage.cs +++ b/src/Npgsql/Replication/PgOutput/Messages/KeyDeleteMessage.cs @@ -1,4 +1,4 @@ -using NpgsqlTypes; +using NpgsqlTypes; using System; using System.Threading; using System.Threading.Tasks; diff --git a/src/Npgsql/Replication/PgOutput/Messages/LogicalDecodingMessage.cs b/src/Npgsql/Replication/PgOutput/Messages/LogicalDecodingMessage.cs index 0add6103e6..d49a908dbc 100644 --- a/src/Npgsql/Replication/PgOutput/Messages/LogicalDecodingMessage.cs +++ b/src/Npgsql/Replication/PgOutput/Messages/LogicalDecodingMessage.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.IO; using NpgsqlTypes; diff --git a/src/Npgsql/Replication/PgOutput/Messages/OriginMessage.cs b/src/Npgsql/Replication/PgOutput/Messages/OriginMessage.cs index 8356cc997a..a9be84ea26 100644 --- a/src/Npgsql/Replication/PgOutput/Messages/OriginMessage.cs +++ b/src/Npgsql/Replication/PgOutput/Messages/OriginMessage.cs @@ -1,4 +1,4 @@ -using NpgsqlTypes; +using NpgsqlTypes; using System; namespace Npgsql.Replication.PgOutput.Messages; diff --git a/src/Npgsql/Replication/PgOutput/Messages/PgOutputReplicationMessage.cs b/src/Npgsql/Replication/PgOutput/Messages/PgOutputReplicationMessage.cs index 24de9e201f..b2ccdc73c3 100644 --- a/src/Npgsql/Replication/PgOutput/Messages/PgOutputReplicationMessage.cs +++ b/src/Npgsql/Replication/PgOutput/Messages/PgOutputReplicationMessage.cs @@ -1,4 +1,4 @@ -namespace Npgsql.Replication.PgOutput.Messages; +namespace Npgsql.Replication.PgOutput.Messages; /// /// The base class of all Logical Replication Protocol Messages diff --git a/src/Npgsql/Replication/PgOutput/Messages/PrepareMessage.cs b/src/Npgsql/Replication/PgOutput/Messages/PrepareMessage.cs index 16cd8fa36b..0f01b4c4d9 100644 --- a/src/Npgsql/Replication/PgOutput/Messages/PrepareMessage.cs +++ b/src/Npgsql/Replication/PgOutput/Messages/PrepareMessage.cs @@ -1,4 +1,4 @@ -using NpgsqlTypes; +using NpgsqlTypes; using System; namespace Npgsql.Replication.PgOutput.Messages; diff --git a/src/Npgsql/Replication/PgOutput/Messages/PrepareMessageBase.cs b/src/Npgsql/Replication/PgOutput/Messages/PrepareMessageBase.cs index 0eda1b18d3..98f4e208f1 100644 --- a/src/Npgsql/Replication/PgOutput/Messages/PrepareMessageBase.cs +++ b/src/Npgsql/Replication/PgOutput/Messages/PrepareMessageBase.cs @@ -1,4 +1,4 @@ -using NpgsqlTypes; +using NpgsqlTypes; using System; namespace Npgsql.Replication.PgOutput.Messages; diff --git a/src/Npgsql/Replication/PgOutput/Messages/PreparedTransactionControlMessage.cs b/src/Npgsql/Replication/PgOutput/Messages/PreparedTransactionControlMessage.cs index 04f98be920..28885629c3 100644 --- a/src/Npgsql/Replication/PgOutput/Messages/PreparedTransactionControlMessage.cs +++ b/src/Npgsql/Replication/PgOutput/Messages/PreparedTransactionControlMessage.cs @@ -1,4 +1,4 @@ -using NpgsqlTypes; +using NpgsqlTypes; using System; namespace Npgsql.Replication.PgOutput.Messages; diff --git a/src/Npgsql/Replication/PgOutput/Messages/RelationMessage.cs b/src/Npgsql/Replication/PgOutput/Messages/RelationMessage.cs index 85d83debb7..aa17bd766b 100644 --- a/src/Npgsql/Replication/PgOutput/Messages/RelationMessage.cs +++ b/src/Npgsql/Replication/PgOutput/Messages/RelationMessage.cs @@ -1,4 +1,4 @@ -using NpgsqlTypes; +using NpgsqlTypes; using System; using System.Collections.Generic; using Npgsql.BackendMessages; diff --git a/src/Npgsql/Replication/PgOutput/Messages/RelationMessageColumn.cs b/src/Npgsql/Replication/PgOutput/Messages/RelationMessageColumn.cs index 4692e4e6c4..c7806a8b1c 100644 --- a/src/Npgsql/Replication/PgOutput/Messages/RelationMessageColumn.cs +++ b/src/Npgsql/Replication/PgOutput/Messages/RelationMessageColumn.cs @@ -1,4 +1,4 @@ -namespace Npgsql.Replication.PgOutput.Messages; +namespace Npgsql.Replication.PgOutput.Messages; /// /// Represents a column in a Logical Replication Protocol relation message diff --git a/src/Npgsql/Replication/PgOutput/Messages/RollbackPreparedMessage.cs b/src/Npgsql/Replication/PgOutput/Messages/RollbackPreparedMessage.cs index 681e7af4b6..c3f652d5ee 100644 --- a/src/Npgsql/Replication/PgOutput/Messages/RollbackPreparedMessage.cs +++ b/src/Npgsql/Replication/PgOutput/Messages/RollbackPreparedMessage.cs @@ -1,4 +1,4 @@ -using NpgsqlTypes; +using NpgsqlTypes; using System; namespace Npgsql.Replication.PgOutput.Messages; diff --git a/src/Npgsql/Replication/PgOutput/Messages/StreamAbortMessage.cs b/src/Npgsql/Replication/PgOutput/Messages/StreamAbortMessage.cs index 20e5c4d2e3..4c9a24b06c 100644 --- a/src/Npgsql/Replication/PgOutput/Messages/StreamAbortMessage.cs +++ b/src/Npgsql/Replication/PgOutput/Messages/StreamAbortMessage.cs @@ -1,4 +1,4 @@ -using NpgsqlTypes; +using NpgsqlTypes; using System; namespace Npgsql.Replication.PgOutput.Messages; diff --git a/src/Npgsql/Replication/PgOutput/Messages/StreamCommitMessage.cs b/src/Npgsql/Replication/PgOutput/Messages/StreamCommitMessage.cs index ae6aacc584..7dba3c3027 100644 --- a/src/Npgsql/Replication/PgOutput/Messages/StreamCommitMessage.cs +++ b/src/Npgsql/Replication/PgOutput/Messages/StreamCommitMessage.cs @@ -1,4 +1,4 @@ -using NpgsqlTypes; +using NpgsqlTypes; using System; namespace Npgsql.Replication.PgOutput.Messages; diff --git a/src/Npgsql/Replication/PgOutput/Messages/StreamPrepareMessage.cs b/src/Npgsql/Replication/PgOutput/Messages/StreamPrepareMessage.cs index 4947e0d046..d54dedf9f2 100644 --- a/src/Npgsql/Replication/PgOutput/Messages/StreamPrepareMessage.cs +++ b/src/Npgsql/Replication/PgOutput/Messages/StreamPrepareMessage.cs @@ -1,4 +1,4 @@ -using NpgsqlTypes; +using NpgsqlTypes; using System; namespace Npgsql.Replication.PgOutput.Messages; diff --git a/src/Npgsql/Replication/PgOutput/Messages/StreamStartMessage.cs b/src/Npgsql/Replication/PgOutput/Messages/StreamStartMessage.cs index 4b0ace1cf7..c6aeb86276 100644 --- a/src/Npgsql/Replication/PgOutput/Messages/StreamStartMessage.cs +++ b/src/Npgsql/Replication/PgOutput/Messages/StreamStartMessage.cs @@ -1,4 +1,4 @@ -using NpgsqlTypes; +using NpgsqlTypes; using System; namespace Npgsql.Replication.PgOutput.Messages; diff --git a/src/Npgsql/Replication/PgOutput/Messages/StreamStopMessage.cs b/src/Npgsql/Replication/PgOutput/Messages/StreamStopMessage.cs index f3fd165a1e..673ba5a6d5 100644 --- a/src/Npgsql/Replication/PgOutput/Messages/StreamStopMessage.cs +++ b/src/Npgsql/Replication/PgOutput/Messages/StreamStopMessage.cs @@ -1,4 +1,4 @@ -using NpgsqlTypes; +using NpgsqlTypes; using System; namespace Npgsql.Replication.PgOutput.Messages; diff --git a/src/Npgsql/Replication/PgOutput/Messages/TransactionControlMessage.cs b/src/Npgsql/Replication/PgOutput/Messages/TransactionControlMessage.cs index 4c3c901b2f..6c039e5475 100644 --- a/src/Npgsql/Replication/PgOutput/Messages/TransactionControlMessage.cs +++ b/src/Npgsql/Replication/PgOutput/Messages/TransactionControlMessage.cs @@ -1,4 +1,4 @@ -using System; +using System; using NpgsqlTypes; namespace Npgsql.Replication.PgOutput.Messages; diff --git a/src/Npgsql/Replication/PgOutput/Messages/TransactionalMessage.cs b/src/Npgsql/Replication/PgOutput/Messages/TransactionalMessage.cs index d5aac683a2..307e45a355 100644 --- a/src/Npgsql/Replication/PgOutput/Messages/TransactionalMessage.cs +++ b/src/Npgsql/Replication/PgOutput/Messages/TransactionalMessage.cs @@ -1,4 +1,4 @@ -using System; +using System; using NpgsqlTypes; namespace Npgsql.Replication.PgOutput.Messages; diff --git a/src/Npgsql/Replication/PgOutput/Messages/TruncateMessage.cs b/src/Npgsql/Replication/PgOutput/Messages/TruncateMessage.cs index 47837f93f3..b2ae368aa6 100644 --- a/src/Npgsql/Replication/PgOutput/Messages/TruncateMessage.cs +++ b/src/Npgsql/Replication/PgOutput/Messages/TruncateMessage.cs @@ -1,4 +1,4 @@ -using NpgsqlTypes; +using NpgsqlTypes; using System; using System.Collections.Generic; diff --git a/src/Npgsql/Replication/PgOutput/Messages/TypeMessage.cs b/src/Npgsql/Replication/PgOutput/Messages/TypeMessage.cs index 5e188de4fe..25747c676a 100644 --- a/src/Npgsql/Replication/PgOutput/Messages/TypeMessage.cs +++ b/src/Npgsql/Replication/PgOutput/Messages/TypeMessage.cs @@ -1,4 +1,4 @@ -using NpgsqlTypes; +using NpgsqlTypes; using System; namespace Npgsql.Replication.PgOutput.Messages; diff --git a/src/Npgsql/Replication/PgOutput/Messages/UpdateMessage.cs b/src/Npgsql/Replication/PgOutput/Messages/UpdateMessage.cs index 135ff0ddaf..a891f66ad0 100644 --- a/src/Npgsql/Replication/PgOutput/Messages/UpdateMessage.cs +++ b/src/Npgsql/Replication/PgOutput/Messages/UpdateMessage.cs @@ -1,4 +1,4 @@ -using NpgsqlTypes; +using NpgsqlTypes; using System; using System.Collections.Generic; using System.Diagnostics; diff --git a/src/Npgsql/Replication/PgOutput/PgOutputConnectionExtensions.cs b/src/Npgsql/Replication/PgOutput/PgOutputConnectionExtensions.cs index c67af16d58..8cd5c6f3d1 100644 --- a/src/Npgsql/Replication/PgOutput/PgOutputConnectionExtensions.cs +++ b/src/Npgsql/Replication/PgOutput/PgOutputConnectionExtensions.cs @@ -1,4 +1,4 @@ -using System.Collections.Generic; +using System.Collections.Generic; using System.Threading; using System.Threading.Tasks; using NpgsqlTypes; diff --git a/src/Npgsql/Replication/PgOutput/PgOutputProtocolVersion.cs b/src/Npgsql/Replication/PgOutput/PgOutputProtocolVersion.cs index fd717b6791..c575a9c85e 100644 --- a/src/Npgsql/Replication/PgOutput/PgOutputProtocolVersion.cs +++ b/src/Npgsql/Replication/PgOutput/PgOutputProtocolVersion.cs @@ -1,4 +1,4 @@ -namespace Npgsql.Replication.PgOutput; +namespace Npgsql.Replication.PgOutput; /// /// The Logical Streaming Replication Protocol version. diff --git a/src/Npgsql/Replication/PgOutput/PgOutputReplicationOptions.cs b/src/Npgsql/Replication/PgOutput/PgOutputReplicationOptions.cs index b6aedba5d3..94df40222c 100644 --- a/src/Npgsql/Replication/PgOutput/PgOutputReplicationOptions.cs +++ b/src/Npgsql/Replication/PgOutput/PgOutputReplicationOptions.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Collections.Generic; using System.Globalization; diff --git a/src/Npgsql/Replication/PgOutput/PgOutputReplicationSlot.cs b/src/Npgsql/Replication/PgOutput/PgOutputReplicationSlot.cs index a873f585fc..c2517347f0 100644 --- a/src/Npgsql/Replication/PgOutput/PgOutputReplicationSlot.cs +++ b/src/Npgsql/Replication/PgOutput/PgOutputReplicationSlot.cs @@ -1,4 +1,4 @@ -using Npgsql.Replication.Internal; +using Npgsql.Replication.Internal; namespace Npgsql.Replication.PgOutput; diff --git a/src/Npgsql/Replication/PgOutput/PgOutputStreamingMode.cs b/src/Npgsql/Replication/PgOutput/PgOutputStreamingMode.cs index 935ad2792c..312f842c70 100644 --- a/src/Npgsql/Replication/PgOutput/PgOutputStreamingMode.cs +++ b/src/Npgsql/Replication/PgOutput/PgOutputStreamingMode.cs @@ -1,4 +1,4 @@ -namespace Npgsql.Replication.PgOutput; +namespace Npgsql.Replication.PgOutput; /// /// Option to enable streaming of in-progress transactions. diff --git a/src/Npgsql/Replication/PgOutput/ReadonlyArrayBuffer.cs b/src/Npgsql/Replication/PgOutput/ReadonlyArrayBuffer.cs index 3d22b5f5f6..dac1ecdea9 100644 --- a/src/Npgsql/Replication/PgOutput/ReadonlyArrayBuffer.cs +++ b/src/Npgsql/Replication/PgOutput/ReadonlyArrayBuffer.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Collections; using System.Collections.Generic; diff --git a/src/Npgsql/Replication/PgOutput/TupleDataKind.cs b/src/Npgsql/Replication/PgOutput/TupleDataKind.cs index 141e4af16e..6bd404a7d3 100644 --- a/src/Npgsql/Replication/PgOutput/TupleDataKind.cs +++ b/src/Npgsql/Replication/PgOutput/TupleDataKind.cs @@ -1,4 +1,4 @@ -namespace Npgsql.Replication.PgOutput; +namespace Npgsql.Replication.PgOutput; /// /// The kind of data transmitted for a tuple in a Logical Replication Protocol message. diff --git a/src/Npgsql/Replication/PhysicalReplicationConnection.cs b/src/Npgsql/Replication/PhysicalReplicationConnection.cs index 05d0af33ca..bad56ff357 100644 --- a/src/Npgsql/Replication/PhysicalReplicationConnection.cs +++ b/src/Npgsql/Replication/PhysicalReplicationConnection.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Collections.Generic; using System.Globalization; using System.Runtime.CompilerServices; diff --git a/src/Npgsql/Replication/PhysicalReplicationSlot.cs b/src/Npgsql/Replication/PhysicalReplicationSlot.cs index 7aba817fe2..4495b404c1 100644 --- a/src/Npgsql/Replication/PhysicalReplicationSlot.cs +++ b/src/Npgsql/Replication/PhysicalReplicationSlot.cs @@ -1,4 +1,4 @@ -using NpgsqlTypes; +using NpgsqlTypes; namespace Npgsql.Replication; diff --git a/src/Npgsql/Replication/ReplicationConnection.cs b/src/Npgsql/Replication/ReplicationConnection.cs index 3877254521..fb631b5c1c 100644 --- a/src/Npgsql/Replication/ReplicationConnection.cs +++ b/src/Npgsql/Replication/ReplicationConnection.cs @@ -1,4 +1,4 @@ -using Npgsql.BackendMessages; +using Npgsql.BackendMessages; using NpgsqlTypes; using System; using System.Collections.Generic; diff --git a/src/Npgsql/Replication/ReplicationMessage.cs b/src/Npgsql/Replication/ReplicationMessage.cs index be957346cb..4beac0d644 100644 --- a/src/Npgsql/Replication/ReplicationMessage.cs +++ b/src/Npgsql/Replication/ReplicationMessage.cs @@ -1,4 +1,4 @@ -using NpgsqlTypes; +using NpgsqlTypes; using System; namespace Npgsql.Replication; diff --git a/src/Npgsql/Replication/ReplicationSlot.cs b/src/Npgsql/Replication/ReplicationSlot.cs index 1e9b3473b6..ab4fb22f84 100644 --- a/src/Npgsql/Replication/ReplicationSlot.cs +++ b/src/Npgsql/Replication/ReplicationSlot.cs @@ -1,4 +1,4 @@ -namespace Npgsql.Replication; +namespace Npgsql.Replication; /// /// Contains information about a newly-created replication slot. diff --git a/src/Npgsql/Replication/ReplicationSlotOptions.cs b/src/Npgsql/Replication/ReplicationSlotOptions.cs index 669e8711df..93141d55df 100644 --- a/src/Npgsql/Replication/ReplicationSlotOptions.cs +++ b/src/Npgsql/Replication/ReplicationSlotOptions.cs @@ -1,4 +1,4 @@ -using System; +using System; using NpgsqlTypes; namespace Npgsql.Replication; diff --git a/src/Npgsql/Replication/ReplicationSystemIdentification.cs b/src/Npgsql/Replication/ReplicationSystemIdentification.cs index 7e6673e702..4bd59890f9 100644 --- a/src/Npgsql/Replication/ReplicationSystemIdentification.cs +++ b/src/Npgsql/Replication/ReplicationSystemIdentification.cs @@ -1,4 +1,4 @@ -using NpgsqlTypes; +using NpgsqlTypes; namespace Npgsql.Replication; diff --git a/src/Npgsql/Replication/TestDecoding/TestDecodingConnectionExtensions.cs b/src/Npgsql/Replication/TestDecoding/TestDecodingConnectionExtensions.cs index 77321711d9..a09d16b8e8 100644 --- a/src/Npgsql/Replication/TestDecoding/TestDecodingConnectionExtensions.cs +++ b/src/Npgsql/Replication/TestDecoding/TestDecodingConnectionExtensions.cs @@ -1,4 +1,4 @@ -using System.Collections.Generic; +using System.Collections.Generic; using System.Threading; using System.Threading.Tasks; using NpgsqlTypes; diff --git a/src/Npgsql/Replication/TestDecoding/TestDecodingData.cs b/src/Npgsql/Replication/TestDecoding/TestDecodingData.cs index c887a015ad..178b0ba87e 100644 --- a/src/Npgsql/Replication/TestDecoding/TestDecodingData.cs +++ b/src/Npgsql/Replication/TestDecoding/TestDecodingData.cs @@ -1,4 +1,4 @@ -using NpgsqlTypes; +using NpgsqlTypes; using System; namespace Npgsql.Replication.TestDecoding; diff --git a/src/Npgsql/Replication/TestDecoding/TestDecodingOptions.cs b/src/Npgsql/Replication/TestDecoding/TestDecodingOptions.cs index 12372cb793..4e90e19bb4 100644 --- a/src/Npgsql/Replication/TestDecoding/TestDecodingOptions.cs +++ b/src/Npgsql/Replication/TestDecoding/TestDecodingOptions.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Collections.Generic; namespace Npgsql.Replication.TestDecoding; diff --git a/src/Npgsql/Replication/TestDecoding/TestDecodingReplicationSlot.cs b/src/Npgsql/Replication/TestDecoding/TestDecodingReplicationSlot.cs index cc5c52e5a4..9e1e5db5a0 100644 --- a/src/Npgsql/Replication/TestDecoding/TestDecodingReplicationSlot.cs +++ b/src/Npgsql/Replication/TestDecoding/TestDecodingReplicationSlot.cs @@ -1,4 +1,4 @@ -using Npgsql.Replication.Internal; +using Npgsql.Replication.Internal; namespace Npgsql.Replication.TestDecoding; diff --git a/src/Npgsql/Replication/TimelineHistoryFile.cs b/src/Npgsql/Replication/TimelineHistoryFile.cs index 89a15ffd69..44934d63e5 100644 --- a/src/Npgsql/Replication/TimelineHistoryFile.cs +++ b/src/Npgsql/Replication/TimelineHistoryFile.cs @@ -1,4 +1,4 @@ -namespace Npgsql.Replication; +namespace Npgsql.Replication; /// /// Represents a PostgreSQL timeline history file diff --git a/src/Npgsql/Replication/XLogDataMessage.cs b/src/Npgsql/Replication/XLogDataMessage.cs index 6b4ecd6dcf..55e8c7ebab 100644 --- a/src/Npgsql/Replication/XLogDataMessage.cs +++ b/src/Npgsql/Replication/XLogDataMessage.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.IO; using NpgsqlTypes; diff --git a/src/Npgsql/Schema/DbColumnSchemaGenerator.cs b/src/Npgsql/Schema/DbColumnSchemaGenerator.cs index 20a5f1fd06..d9b1f77b7d 100644 --- a/src/Npgsql/Schema/DbColumnSchemaGenerator.cs +++ b/src/Npgsql/Schema/DbColumnSchemaGenerator.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Collections.Generic; using System.Collections.ObjectModel; using System.Data; diff --git a/src/Npgsql/Schema/NpgsqlDbColumn.cs b/src/Npgsql/Schema/NpgsqlDbColumn.cs index e4597e3d86..9ba8b04312 100644 --- a/src/Npgsql/Schema/NpgsqlDbColumn.cs +++ b/src/Npgsql/Schema/NpgsqlDbColumn.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Data.Common; using Npgsql.PostgresTypes; using NpgsqlTypes; diff --git a/src/Npgsql/SqlQueryParser.cs b/src/Npgsql/SqlQueryParser.cs index c037a51342..3d569c9bc2 100644 --- a/src/Npgsql/SqlQueryParser.cs +++ b/src/Npgsql/SqlQueryParser.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Collections.Generic; using System.Diagnostics; using System.Text; diff --git a/src/Npgsql/ThrowHelper.cs b/src/Npgsql/ThrowHelper.cs index 6717458d2a..53e8083df5 100644 --- a/src/Npgsql/ThrowHelper.cs +++ b/src/Npgsql/ThrowHelper.cs @@ -1,4 +1,4 @@ -using Npgsql.BackendMessages; +using Npgsql.BackendMessages; using System; using System.Diagnostics; using System.Diagnostics.CodeAnalysis; diff --git a/src/Npgsql/TypeMapping/GlobalTypeMapper.cs b/src/Npgsql/TypeMapping/GlobalTypeMapper.cs index 263b8aa55a..cff24a4f66 100644 --- a/src/Npgsql/TypeMapping/GlobalTypeMapper.cs +++ b/src/Npgsql/TypeMapping/GlobalTypeMapper.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Collections.Generic; using System.Diagnostics.CodeAnalysis; using System.Text.Json; diff --git a/src/Npgsql/TypeMapping/INpgsqlTypeMapper.cs b/src/Npgsql/TypeMapping/INpgsqlTypeMapper.cs index 3fc5d0cbf1..53088b33fd 100644 --- a/src/Npgsql/TypeMapping/INpgsqlTypeMapper.cs +++ b/src/Npgsql/TypeMapping/INpgsqlTypeMapper.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Diagnostics.CodeAnalysis; using System.Text.Json; using System.Text.Json.Nodes; diff --git a/src/Npgsql/Util/GSSStream.cs b/src/Npgsql/Util/GSSStream.cs index 4f98a1d1fa..b6f843f315 100644 --- a/src/Npgsql/Util/GSSStream.cs +++ b/src/Npgsql/Util/GSSStream.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Buffers; using System.Buffers.Binary; using System.IO; diff --git a/src/Npgsql/Util/LoggingEnumerable.cs b/src/Npgsql/Util/LoggingEnumerable.cs index eabc7ebdd5..4b36ce19a6 100644 --- a/src/Npgsql/Util/LoggingEnumerable.cs +++ b/src/Npgsql/Util/LoggingEnumerable.cs @@ -1,4 +1,4 @@ -using System.Collections; +using System.Collections; using System.Collections.Generic; using System.Text; diff --git a/src/Npgsql/Util/ResettableCancellationTokenSource.cs b/src/Npgsql/Util/ResettableCancellationTokenSource.cs index f4b1652e2a..3218a7e629 100644 --- a/src/Npgsql/Util/ResettableCancellationTokenSource.cs +++ b/src/Npgsql/Util/ResettableCancellationTokenSource.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Diagnostics; using System.Threading; using static System.Threading.Timeout; diff --git a/src/Npgsql/Util/VersionExtensions.cs b/src/Npgsql/Util/VersionExtensions.cs index 4501dd78d2..d2fbd67dea 100644 --- a/src/Npgsql/Util/VersionExtensions.cs +++ b/src/Npgsql/Util/VersionExtensions.cs @@ -1,4 +1,4 @@ -using System; +using System; namespace Npgsql.Util; diff --git a/test/MStatDumper/Program.cs b/test/MStatDumper/Program.cs index 9a9fe89dfb..13f0b4ab3e 100644 --- a/test/MStatDumper/Program.cs +++ b/test/MStatDumper/Program.cs @@ -1,4 +1,4 @@ -using Mono.Cecil; +using Mono.Cecil; using Mono.Cecil.Rocks; namespace MStatDumper diff --git a/test/Npgsql.Benchmarks/App.config b/test/Npgsql.Benchmarks/App.config index 88fa4027bd..614f539efc 100644 --- a/test/Npgsql.Benchmarks/App.config +++ b/test/Npgsql.Benchmarks/App.config @@ -1,4 +1,4 @@ - + diff --git a/test/Npgsql.Benchmarks/BenchmarkEnvironment.cs b/test/Npgsql.Benchmarks/BenchmarkEnvironment.cs index 4704cc90e3..913fd59ebe 100644 --- a/test/Npgsql.Benchmarks/BenchmarkEnvironment.cs +++ b/test/Npgsql.Benchmarks/BenchmarkEnvironment.cs @@ -1,4 +1,4 @@ -using System; +using System; namespace Npgsql.Benchmarks; diff --git a/test/Npgsql.Benchmarks/CommandExecuteBenchmarks.cs b/test/Npgsql.Benchmarks/CommandExecuteBenchmarks.cs index e2e6d4706a..77e9eccec4 100644 --- a/test/Npgsql.Benchmarks/CommandExecuteBenchmarks.cs +++ b/test/Npgsql.Benchmarks/CommandExecuteBenchmarks.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Diagnostics.CodeAnalysis; using BenchmarkDotNet.Attributes; using BenchmarkDotNet.Columns; diff --git a/test/Npgsql.Benchmarks/Commit.cs b/test/Npgsql.Benchmarks/Commit.cs index 9ab03c11db..cae0b300a6 100644 --- a/test/Npgsql.Benchmarks/Commit.cs +++ b/test/Npgsql.Benchmarks/Commit.cs @@ -1,4 +1,4 @@ -using BenchmarkDotNet.Attributes; +using BenchmarkDotNet.Attributes; using BenchmarkDotNet.Columns; using BenchmarkDotNet.Configs; diff --git a/test/Npgsql.Benchmarks/ConnectionCreationBenchmarks.cs b/test/Npgsql.Benchmarks/ConnectionCreationBenchmarks.cs index 633445ae0a..f47d4ae4fe 100644 --- a/test/Npgsql.Benchmarks/ConnectionCreationBenchmarks.cs +++ b/test/Npgsql.Benchmarks/ConnectionCreationBenchmarks.cs @@ -1,4 +1,4 @@ -using BenchmarkDotNet.Attributes; +using BenchmarkDotNet.Attributes; using BenchmarkDotNet.Columns; using BenchmarkDotNet.Configs; using Microsoft.Data.SqlClient; diff --git a/test/Npgsql.Benchmarks/ConnectionOpenCloseBenchmarks.cs b/test/Npgsql.Benchmarks/ConnectionOpenCloseBenchmarks.cs index ef5e69f62e..9f39915cfa 100644 --- a/test/Npgsql.Benchmarks/ConnectionOpenCloseBenchmarks.cs +++ b/test/Npgsql.Benchmarks/ConnectionOpenCloseBenchmarks.cs @@ -1,4 +1,4 @@ -using BenchmarkDotNet.Attributes; +using BenchmarkDotNet.Attributes; using BenchmarkDotNet.Columns; using BenchmarkDotNet.Configs; using Microsoft.Data.SqlClient; diff --git a/test/Npgsql.Benchmarks/CopyExport.cs b/test/Npgsql.Benchmarks/CopyExport.cs index e4ea9c0698..79f30c42c7 100644 --- a/test/Npgsql.Benchmarks/CopyExport.cs +++ b/test/Npgsql.Benchmarks/CopyExport.cs @@ -1,4 +1,4 @@ -using BenchmarkDotNet.Attributes; +using BenchmarkDotNet.Attributes; using NpgsqlTypes; namespace Npgsql.Benchmarks; diff --git a/test/Npgsql.Benchmarks/CopyImport.cs b/test/Npgsql.Benchmarks/CopyImport.cs index 486d257d6c..cfabaec2c1 100644 --- a/test/Npgsql.Benchmarks/CopyImport.cs +++ b/test/Npgsql.Benchmarks/CopyImport.cs @@ -1,4 +1,4 @@ -using BenchmarkDotNet.Attributes; +using BenchmarkDotNet.Attributes; using NpgsqlTypes; namespace Npgsql.Benchmarks; diff --git a/test/Npgsql.Benchmarks/GetFieldValue.cs b/test/Npgsql.Benchmarks/GetFieldValue.cs index 0065f4546c..be6b7bb1b8 100644 --- a/test/Npgsql.Benchmarks/GetFieldValue.cs +++ b/test/Npgsql.Benchmarks/GetFieldValue.cs @@ -1,4 +1,4 @@ -using BenchmarkDotNet.Attributes; +using BenchmarkDotNet.Attributes; using BenchmarkDotNet.Columns; using BenchmarkDotNet.Configs; diff --git a/test/Npgsql.Benchmarks/Insert.cs b/test/Npgsql.Benchmarks/Insert.cs index 2de57776d5..2caa8c6e4c 100644 --- a/test/Npgsql.Benchmarks/Insert.cs +++ b/test/Npgsql.Benchmarks/Insert.cs @@ -1,4 +1,4 @@ -using System.Text; +using System.Text; using BenchmarkDotNet.Attributes; using NpgsqlTypes; diff --git a/test/Npgsql.Benchmarks/Npgsql.Benchmarks.csproj b/test/Npgsql.Benchmarks/Npgsql.Benchmarks.csproj index 55f3104673..c42fd783ea 100644 --- a/test/Npgsql.Benchmarks/Npgsql.Benchmarks.csproj +++ b/test/Npgsql.Benchmarks/Npgsql.Benchmarks.csproj @@ -1,4 +1,4 @@ - + portable diff --git a/test/Npgsql.Benchmarks/Prepare.cs b/test/Npgsql.Benchmarks/Prepare.cs index 5648e75f98..cee771869e 100644 --- a/test/Npgsql.Benchmarks/Prepare.cs +++ b/test/Npgsql.Benchmarks/Prepare.cs @@ -1,4 +1,4 @@ -using System.Linq; +using System.Linq; using System.Reflection; using System.Text; using BenchmarkDotNet.Attributes; diff --git a/test/Npgsql.Benchmarks/Program.cs b/test/Npgsql.Benchmarks/Program.cs index 9a334f63b8..bd737133a0 100644 --- a/test/Npgsql.Benchmarks/Program.cs +++ b/test/Npgsql.Benchmarks/Program.cs @@ -1,4 +1,4 @@ -using BenchmarkDotNet.Running; +using BenchmarkDotNet.Running; using System.Reflection; namespace Npgsql.Benchmarks; diff --git a/test/Npgsql.Benchmarks/ReadArray.cs b/test/Npgsql.Benchmarks/ReadArray.cs index e1e5b2d8de..a69002a6a8 100644 --- a/test/Npgsql.Benchmarks/ReadArray.cs +++ b/test/Npgsql.Benchmarks/ReadArray.cs @@ -1,4 +1,4 @@ -using BenchmarkDotNet.Attributes; +using BenchmarkDotNet.Attributes; namespace Npgsql.Benchmarks; diff --git a/test/Npgsql.Benchmarks/ReadColumns.cs b/test/Npgsql.Benchmarks/ReadColumns.cs index aa10d25f1a..0eb7f0cf33 100644 --- a/test/Npgsql.Benchmarks/ReadColumns.cs +++ b/test/Npgsql.Benchmarks/ReadColumns.cs @@ -1,4 +1,4 @@ -using System.Linq; +using System.Linq; using System.Reflection; using System.Text; using BenchmarkDotNet.Attributes; diff --git a/test/Npgsql.Benchmarks/ReadRows.cs b/test/Npgsql.Benchmarks/ReadRows.cs index 7ec8d9ed09..04256249e0 100644 --- a/test/Npgsql.Benchmarks/ReadRows.cs +++ b/test/Npgsql.Benchmarks/ReadRows.cs @@ -1,4 +1,4 @@ -using BenchmarkDotNet.Attributes; +using BenchmarkDotNet.Attributes; namespace Npgsql.Benchmarks; diff --git a/test/Npgsql.Benchmarks/TypeHandlers/Composite.cs b/test/Npgsql.Benchmarks/TypeHandlers/Composite.cs index 52418a7240..9ac8a6fa96 100644 --- a/test/Npgsql.Benchmarks/TypeHandlers/Composite.cs +++ b/test/Npgsql.Benchmarks/TypeHandlers/Composite.cs @@ -1,4 +1,4 @@ - + /* Disabling for now: unmapped composite support is probably going away, and there's a good chance this * class can be simplified to a certain extent diff --git a/test/Npgsql.Benchmarks/TypeHandlers/Numeric.cs b/test/Npgsql.Benchmarks/TypeHandlers/Numeric.cs index ae5dbfe0d9..2ac09063c6 100644 --- a/test/Npgsql.Benchmarks/TypeHandlers/Numeric.cs +++ b/test/Npgsql.Benchmarks/TypeHandlers/Numeric.cs @@ -1,4 +1,4 @@ -using System.Collections.Generic; +using System.Collections.Generic; using BenchmarkDotNet.Attributes; using Npgsql.Internal.Converters; diff --git a/test/Npgsql.Benchmarks/TypeHandlers/Text.cs b/test/Npgsql.Benchmarks/TypeHandlers/Text.cs index 7d8b158ea3..34f3226326 100644 --- a/test/Npgsql.Benchmarks/TypeHandlers/Text.cs +++ b/test/Npgsql.Benchmarks/TypeHandlers/Text.cs @@ -1,4 +1,4 @@ -using Npgsql.Internal; +using Npgsql.Internal; using Npgsql.Internal.Converters; using System.Collections.Generic; using System.Text; diff --git a/test/Npgsql.Benchmarks/TypeHandlers/TypeHandlerBenchmarks.cs b/test/Npgsql.Benchmarks/TypeHandlers/TypeHandlerBenchmarks.cs index 17469b3519..7695885bfd 100644 --- a/test/Npgsql.Benchmarks/TypeHandlers/TypeHandlerBenchmarks.cs +++ b/test/Npgsql.Benchmarks/TypeHandlers/TypeHandlerBenchmarks.cs @@ -1,4 +1,4 @@ -using BenchmarkDotNet.Attributes; +using BenchmarkDotNet.Attributes; using BenchmarkDotNet.Columns; using BenchmarkDotNet.Configs; using BenchmarkDotNet.Diagnosers; diff --git a/test/Npgsql.Benchmarks/TypeHandlers/Uuid.cs b/test/Npgsql.Benchmarks/TypeHandlers/Uuid.cs index a497a0c509..099acae43c 100644 --- a/test/Npgsql.Benchmarks/TypeHandlers/Uuid.cs +++ b/test/Npgsql.Benchmarks/TypeHandlers/Uuid.cs @@ -1,4 +1,4 @@ -using System; +using System; using BenchmarkDotNet.Attributes; using Npgsql.Internal.Converters; diff --git a/test/Npgsql.Benchmarks/UnixDomainSocket.cs b/test/Npgsql.Benchmarks/UnixDomainSocket.cs index 89c42a9a49..71748c9ba0 100644 --- a/test/Npgsql.Benchmarks/UnixDomainSocket.cs +++ b/test/Npgsql.Benchmarks/UnixDomainSocket.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.IO; using System.Linq; using BenchmarkDotNet.Attributes; diff --git a/test/Npgsql.Benchmarks/WriteVaryingNumberOfParameters.cs b/test/Npgsql.Benchmarks/WriteVaryingNumberOfParameters.cs index 429861f262..fee0e47f9c 100644 --- a/test/Npgsql.Benchmarks/WriteVaryingNumberOfParameters.cs +++ b/test/Npgsql.Benchmarks/WriteVaryingNumberOfParameters.cs @@ -1,4 +1,4 @@ -using System.Linq; +using System.Linq; using BenchmarkDotNet.Attributes; using NpgsqlTypes; diff --git a/test/Npgsql.DependencyInjection.Tests/DependencyInjectionTests.cs b/test/Npgsql.DependencyInjection.Tests/DependencyInjectionTests.cs index ebbf0e2388..ad7728835f 100644 --- a/test/Npgsql.DependencyInjection.Tests/DependencyInjectionTests.cs +++ b/test/Npgsql.DependencyInjection.Tests/DependencyInjectionTests.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Data; using System.Linq; using System.Threading.Tasks; diff --git a/test/Npgsql.PluginTests/GeoJSONTests.cs b/test/Npgsql.PluginTests/GeoJSONTests.cs index 287c1277bc..9e51a5b298 100644 --- a/test/Npgsql.PluginTests/GeoJSONTests.cs +++ b/test/Npgsql.PluginTests/GeoJSONTests.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Collections.Concurrent; using System.Linq; using System.Threading.Tasks; diff --git a/test/Npgsql.PluginTests/JsonNetTests.cs b/test/Npgsql.PluginTests/JsonNetTests.cs index f20704e52f..9bc8198e79 100644 --- a/test/Npgsql.PluginTests/JsonNetTests.cs +++ b/test/Npgsql.PluginTests/JsonNetTests.cs @@ -1,4 +1,4 @@ -using Newtonsoft.Json; +using Newtonsoft.Json; using Newtonsoft.Json.Linq; using Npgsql.Tests; using NUnit.Framework; diff --git a/test/Npgsql.PluginTests/NetTopologySuiteTests.cs b/test/Npgsql.PluginTests/NetTopologySuiteTests.cs index 2f41a6e8cc..7e9d8caa84 100644 --- a/test/Npgsql.PluginTests/NetTopologySuiteTests.cs +++ b/test/Npgsql.PluginTests/NetTopologySuiteTests.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Collections.Concurrent; using System.Linq; using System.Threading.Tasks; diff --git a/test/Npgsql.PluginTests/NodaTimeTests.cs b/test/Npgsql.PluginTests/NodaTimeTests.cs index bec0b46c9b..b678374aed 100644 --- a/test/Npgsql.PluginTests/NodaTimeTests.cs +++ b/test/Npgsql.PluginTests/NodaTimeTests.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Data; using System.Threading.Tasks; using NodaTime; diff --git a/test/Npgsql.PluginTests/Npgsql.PluginTests.csproj b/test/Npgsql.PluginTests/Npgsql.PluginTests.csproj index 499373bc63..a5d594024d 100644 --- a/test/Npgsql.PluginTests/Npgsql.PluginTests.csproj +++ b/test/Npgsql.PluginTests/Npgsql.PluginTests.csproj @@ -1,4 +1,4 @@ - + diff --git a/test/Npgsql.Specification.Tests/Npgsql.Specification.Tests.csproj b/test/Npgsql.Specification.Tests/Npgsql.Specification.Tests.csproj index 47e39cc121..268d891b33 100644 --- a/test/Npgsql.Specification.Tests/Npgsql.Specification.Tests.csproj +++ b/test/Npgsql.Specification.Tests/Npgsql.Specification.Tests.csproj @@ -1,4 +1,4 @@ - + diff --git a/test/Npgsql.Tests/App.config b/test/Npgsql.Tests/App.config index dcd0a07c0b..e9e8771144 100644 --- a/test/Npgsql.Tests/App.config +++ b/test/Npgsql.Tests/App.config @@ -1,4 +1,4 @@ - + diff --git a/test/Npgsql.Tests/AsyncTests.cs b/test/Npgsql.Tests/AsyncTests.cs index 3d7ebc3300..591b94e0ca 100644 --- a/test/Npgsql.Tests/AsyncTests.cs +++ b/test/Npgsql.Tests/AsyncTests.cs @@ -1,4 +1,4 @@ -using NUnit.Framework; +using NUnit.Framework; using System.Data; using System.Threading.Tasks; using static Npgsql.Tests.TestUtil; diff --git a/test/Npgsql.Tests/AutoPrepareTests.cs b/test/Npgsql.Tests/AutoPrepareTests.cs index 97a46ad277..b7adfc8b10 100644 --- a/test/Npgsql.Tests/AutoPrepareTests.cs +++ b/test/Npgsql.Tests/AutoPrepareTests.cs @@ -1,4 +1,4 @@ -using NpgsqlTypes; +using NpgsqlTypes; using NUnit.Framework; using System; using System.Data; diff --git a/test/Npgsql.Tests/BugTests.cs b/test/Npgsql.Tests/BugTests.cs index 5c0b77b1dd..7c57ce7723 100644 --- a/test/Npgsql.Tests/BugTests.cs +++ b/test/Npgsql.Tests/BugTests.cs @@ -1,4 +1,4 @@ -using Npgsql.BackendMessages; +using Npgsql.BackendMessages; using Npgsql.Tests.Support; using NpgsqlTypes; using NUnit.Framework; diff --git a/test/Npgsql.Tests/CommandBuilderTests.cs b/test/Npgsql.Tests/CommandBuilderTests.cs index f9643adfd5..2e6fc90d7c 100644 --- a/test/Npgsql.Tests/CommandBuilderTests.cs +++ b/test/Npgsql.Tests/CommandBuilderTests.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Data; using System.Threading.Tasks; using Npgsql.PostgresTypes; diff --git a/test/Npgsql.Tests/ConnectionStringBuilderTests.cs b/test/Npgsql.Tests/ConnectionStringBuilderTests.cs index 6e2d2e3a04..c95e83da16 100644 --- a/test/Npgsql.Tests/ConnectionStringBuilderTests.cs +++ b/test/Npgsql.Tests/ConnectionStringBuilderTests.cs @@ -1,4 +1,4 @@ -using System; +using System; using NUnit.Framework; namespace Npgsql.Tests; diff --git a/test/Npgsql.Tests/CopyTests.cs b/test/Npgsql.Tests/CopyTests.cs index 6a6a8e5368..d3551517bc 100644 --- a/test/Npgsql.Tests/CopyTests.cs +++ b/test/Npgsql.Tests/CopyTests.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Collections; using System.Collections.Generic; using System.Collections.Specialized; diff --git a/test/Npgsql.Tests/FunctionTests.cs b/test/Npgsql.Tests/FunctionTests.cs index 4c3b1e10aa..e755ef746a 100644 --- a/test/Npgsql.Tests/FunctionTests.cs +++ b/test/Npgsql.Tests/FunctionTests.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Data; using System.Threading.Tasks; using Npgsql.PostgresTypes; diff --git a/test/Npgsql.Tests/MultipleHostsTests.cs b/test/Npgsql.Tests/MultipleHostsTests.cs index 94ba45dd00..398c0520ff 100644 --- a/test/Npgsql.Tests/MultipleHostsTests.cs +++ b/test/Npgsql.Tests/MultipleHostsTests.cs @@ -1,4 +1,4 @@ -using Npgsql.Internal; +using Npgsql.Internal; using Npgsql.Tests.Support; using NUnit.Framework; using System; diff --git a/test/Npgsql.Tests/NestedDataReaderTests.cs b/test/Npgsql.Tests/NestedDataReaderTests.cs index 7e157c3426..52531fbdd2 100644 --- a/test/Npgsql.Tests/NestedDataReaderTests.cs +++ b/test/Npgsql.Tests/NestedDataReaderTests.cs @@ -1,4 +1,4 @@ -using NUnit.Framework; +using NUnit.Framework; using System; using System.Threading.Tasks; using static Npgsql.Tests.TestUtil; diff --git a/test/Npgsql.Tests/NotificationTests.cs b/test/Npgsql.Tests/NotificationTests.cs index 5f6c11efcd..08e7a8f605 100644 --- a/test/Npgsql.Tests/NotificationTests.cs +++ b/test/Npgsql.Tests/NotificationTests.cs @@ -1,4 +1,4 @@ -using NUnit.Framework; +using NUnit.Framework; using System; using System.Data; using System.Threading; diff --git a/test/Npgsql.Tests/Npgsql.Tests.csproj b/test/Npgsql.Tests/Npgsql.Tests.csproj index 3714b9edaa..8e04167e9d 100644 --- a/test/Npgsql.Tests/Npgsql.Tests.csproj +++ b/test/Npgsql.Tests/Npgsql.Tests.csproj @@ -1,4 +1,4 @@ - + diff --git a/test/Npgsql.Tests/PgPassEntryTests.cs b/test/Npgsql.Tests/PgPassEntryTests.cs index db78e893ad..60678e70bf 100644 --- a/test/Npgsql.Tests/PgPassEntryTests.cs +++ b/test/Npgsql.Tests/PgPassEntryTests.cs @@ -1,4 +1,4 @@ -using System; +using System; using NUnit.Framework; using NUnit.Framework.Constraints; diff --git a/test/Npgsql.Tests/PgPassFileTests.cs b/test/Npgsql.Tests/PgPassFileTests.cs index 593e522e89..0e7d6f46ce 100644 --- a/test/Npgsql.Tests/PgPassFileTests.cs +++ b/test/Npgsql.Tests/PgPassFileTests.cs @@ -1,4 +1,4 @@ -using System.IO; +using System.IO; using System.Linq; using NUnit.Framework; diff --git a/test/Npgsql.Tests/PoolManagerTests.cs b/test/Npgsql.Tests/PoolManagerTests.cs index afd716dab5..25b79d1bbb 100644 --- a/test/Npgsql.Tests/PoolManagerTests.cs +++ b/test/Npgsql.Tests/PoolManagerTests.cs @@ -1,4 +1,4 @@ -using NUnit.Framework; +using NUnit.Framework; namespace Npgsql.Tests; diff --git a/test/Npgsql.Tests/PoolTests.cs b/test/Npgsql.Tests/PoolTests.cs index 4a3ecca261..65901af14c 100644 --- a/test/Npgsql.Tests/PoolTests.cs +++ b/test/Npgsql.Tests/PoolTests.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Linq; using System.Net.Sockets; using System.Threading; diff --git a/test/Npgsql.Tests/PrepareTests.cs b/test/Npgsql.Tests/PrepareTests.cs index 1957d1091e..d09d3ac016 100644 --- a/test/Npgsql.Tests/PrepareTests.cs +++ b/test/Npgsql.Tests/PrepareTests.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Collections.Generic; using System.Data; using System.Linq; diff --git a/test/Npgsql.Tests/Properties/AssemblyInfo.cs b/test/Npgsql.Tests/Properties/AssemblyInfo.cs index 89a1bb2e0d..9b9dcef56f 100644 --- a/test/Npgsql.Tests/Properties/AssemblyInfo.cs +++ b/test/Npgsql.Tests/Properties/AssemblyInfo.cs @@ -1,4 +1,4 @@ -using System.Runtime.CompilerServices; +using System.Runtime.CompilerServices; using NUnit.Framework; [assembly: Parallelizable(ParallelScope.Children)] diff --git a/test/Npgsql.Tests/ReadBufferTests.cs b/test/Npgsql.Tests/ReadBufferTests.cs index b547787368..172e0d5af5 100644 --- a/test/Npgsql.Tests/ReadBufferTests.cs +++ b/test/Npgsql.Tests/ReadBufferTests.cs @@ -1,4 +1,4 @@ -using Npgsql.Internal; +using Npgsql.Internal; using NUnit.Framework; using System; using System.IO; diff --git a/test/Npgsql.Tests/ReaderNewSchemaTests.cs b/test/Npgsql.Tests/ReaderNewSchemaTests.cs index 5cf0c82150..f7e3747489 100644 --- a/test/Npgsql.Tests/ReaderNewSchemaTests.cs +++ b/test/Npgsql.Tests/ReaderNewSchemaTests.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Collections.Generic; using System.Collections.ObjectModel; using System.Data; diff --git a/test/Npgsql.Tests/ReaderOldSchemaTests.cs b/test/Npgsql.Tests/ReaderOldSchemaTests.cs index 72a6401468..ebb1d883fd 100644 --- a/test/Npgsql.Tests/ReaderOldSchemaTests.cs +++ b/test/Npgsql.Tests/ReaderOldSchemaTests.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Data; using System.Linq; using System.Threading.Tasks; diff --git a/test/Npgsql.Tests/ReaderTests.cs b/test/Npgsql.Tests/ReaderTests.cs index 92187756bc..986d4163cb 100644 --- a/test/Npgsql.Tests/ReaderTests.cs +++ b/test/Npgsql.Tests/ReaderTests.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Buffers.Binary; using System.Collections; using System.Data; diff --git a/test/Npgsql.Tests/Replication/CommonLogicalReplicationTests.cs b/test/Npgsql.Tests/Replication/CommonLogicalReplicationTests.cs index cb434edd8a..6e0f35a51a 100644 --- a/test/Npgsql.Tests/Replication/CommonLogicalReplicationTests.cs +++ b/test/Npgsql.Tests/Replication/CommonLogicalReplicationTests.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Data; using System.Threading.Tasks; using NUnit.Framework; diff --git a/test/Npgsql.Tests/Replication/CommonReplicationTests.cs b/test/Npgsql.Tests/Replication/CommonReplicationTests.cs index 2be1f3faff..6038e5c854 100644 --- a/test/Npgsql.Tests/Replication/CommonReplicationTests.cs +++ b/test/Npgsql.Tests/Replication/CommonReplicationTests.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Collections.Generic; using System.IO; using System.Runtime.CompilerServices; diff --git a/test/Npgsql.Tests/Replication/PgOutputReplicationTests.cs b/test/Npgsql.Tests/Replication/PgOutputReplicationTests.cs index a9a90842d3..802d69be4c 100644 --- a/test/Npgsql.Tests/Replication/PgOutputReplicationTests.cs +++ b/test/Npgsql.Tests/Replication/PgOutputReplicationTests.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Collections.Generic; using System.IO; using System.Linq; diff --git a/test/Npgsql.Tests/Replication/PhysicalReplicationTests.cs b/test/Npgsql.Tests/Replication/PhysicalReplicationTests.cs index 62c19451e9..4c4efab2e5 100644 --- a/test/Npgsql.Tests/Replication/PhysicalReplicationTests.cs +++ b/test/Npgsql.Tests/Replication/PhysicalReplicationTests.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Threading; using System.Threading.Tasks; using NUnit.Framework; diff --git a/test/Npgsql.Tests/Replication/SafeReplicationTestBase.cs b/test/Npgsql.Tests/Replication/SafeReplicationTestBase.cs index 77f67eaf4b..3034dee2f1 100644 --- a/test/Npgsql.Tests/Replication/SafeReplicationTestBase.cs +++ b/test/Npgsql.Tests/Replication/SafeReplicationTestBase.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Collections.Generic; using System.Runtime.CompilerServices; using System.Text.RegularExpressions; diff --git a/test/Npgsql.Tests/Replication/TestDecodingReplicationTests.cs b/test/Npgsql.Tests/Replication/TestDecodingReplicationTests.cs index 406be7b809..694b6b420d 100644 --- a/test/Npgsql.Tests/Replication/TestDecodingReplicationTests.cs +++ b/test/Npgsql.Tests/Replication/TestDecodingReplicationTests.cs @@ -1,4 +1,4 @@ -using System.Collections.Generic; +using System.Collections.Generic; using System.Threading; using System.Threading.Tasks; using NUnit.Framework; diff --git a/test/Npgsql.Tests/SchemaTests.cs b/test/Npgsql.Tests/SchemaTests.cs index ebe36269b0..cf8fcc9e8f 100644 --- a/test/Npgsql.Tests/SchemaTests.cs +++ b/test/Npgsql.Tests/SchemaTests.cs @@ -1,4 +1,4 @@ -using NpgsqlTypes; +using NpgsqlTypes; using NUnit.Framework; using System; using System.Data; diff --git a/test/Npgsql.Tests/SecurityTests.cs b/test/Npgsql.Tests/SecurityTests.cs index cb591b39eb..a0594fb971 100644 --- a/test/Npgsql.Tests/SecurityTests.cs +++ b/test/Npgsql.Tests/SecurityTests.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.IO; using System.Runtime.InteropServices; using System.Security.Authentication; diff --git a/test/Npgsql.Tests/SnakeCaseNameTranslatorTests.cs b/test/Npgsql.Tests/SnakeCaseNameTranslatorTests.cs index 9a64ccdaa2..0385e2a877 100644 --- a/test/Npgsql.Tests/SnakeCaseNameTranslatorTests.cs +++ b/test/Npgsql.Tests/SnakeCaseNameTranslatorTests.cs @@ -1,4 +1,4 @@ -using System.Collections.Generic; +using System.Collections.Generic; using System.Globalization; using System.Linq; using Npgsql.NameTranslation; diff --git a/test/Npgsql.Tests/SqlQueryParserTests.cs b/test/Npgsql.Tests/SqlQueryParserTests.cs index 1044b707fc..c01c155fd4 100644 --- a/test/Npgsql.Tests/SqlQueryParserTests.cs +++ b/test/Npgsql.Tests/SqlQueryParserTests.cs @@ -1,4 +1,4 @@ -using System.Collections.Generic; +using System.Collections.Generic; using System.Data; using System.Linq; using NUnit.Framework; diff --git a/test/Npgsql.Tests/Support/AssemblySetUp.cs b/test/Npgsql.Tests/Support/AssemblySetUp.cs index c7d16f0501..9c0e4d1789 100644 --- a/test/Npgsql.Tests/Support/AssemblySetUp.cs +++ b/test/Npgsql.Tests/Support/AssemblySetUp.cs @@ -1,4 +1,4 @@ -using Npgsql; +using Npgsql; using Npgsql.Tests; using NUnit.Framework; using System; diff --git a/test/Npgsql.Tests/Support/PgCancellationRequest.cs b/test/Npgsql.Tests/Support/PgCancellationRequest.cs index 6773c55dd2..928f388bd0 100644 --- a/test/Npgsql.Tests/Support/PgCancellationRequest.cs +++ b/test/Npgsql.Tests/Support/PgCancellationRequest.cs @@ -1,4 +1,4 @@ -using System.IO; +using System.IO; using Npgsql.Internal; namespace Npgsql.Tests.Support; diff --git a/test/Npgsql.Tests/TestMetrics.cs b/test/Npgsql.Tests/TestMetrics.cs index 3b6c11dbda..c90f1e484c 100644 --- a/test/Npgsql.Tests/TestMetrics.cs +++ b/test/Npgsql.Tests/TestMetrics.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Diagnostics; namespace Npgsql.Tests; diff --git a/test/Npgsql.Tests/TestUtil.cs b/test/Npgsql.Tests/TestUtil.cs index 0f83946ac7..0d5c643225 100644 --- a/test/Npgsql.Tests/TestUtil.cs +++ b/test/Npgsql.Tests/TestUtil.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Collections.Generic; using System.Data; using System.Diagnostics.CodeAnalysis; diff --git a/test/Npgsql.Tests/TracingTests.cs b/test/Npgsql.Tests/TracingTests.cs index 0d51e6a6f6..f9241c2a51 100644 --- a/test/Npgsql.Tests/TracingTests.cs +++ b/test/Npgsql.Tests/TracingTests.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Collections.Generic; using System.Diagnostics; using System.Linq; diff --git a/test/Npgsql.Tests/TransactionTests.cs b/test/Npgsql.Tests/TransactionTests.cs index 2832ed7fa1..ab94837a95 100644 --- a/test/Npgsql.Tests/TransactionTests.cs +++ b/test/Npgsql.Tests/TransactionTests.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Data; using System.Threading.Tasks; using Npgsql.Internal; diff --git a/test/Npgsql.Tests/TypeMapperTests.cs b/test/Npgsql.Tests/TypeMapperTests.cs index 6709b1a507..c06526bdba 100644 --- a/test/Npgsql.Tests/TypeMapperTests.cs +++ b/test/Npgsql.Tests/TypeMapperTests.cs @@ -1,4 +1,4 @@ -using Npgsql.Internal; +using Npgsql.Internal; using NUnit.Framework; using System; using System.Data; diff --git a/test/Npgsql.Tests/Types/ArrayTests.cs b/test/Npgsql.Tests/Types/ArrayTests.cs index a6a272c133..538e3f0cc2 100644 --- a/test/Npgsql.Tests/Types/ArrayTests.cs +++ b/test/Npgsql.Tests/Types/ArrayTests.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Collections; using System.Collections.Generic; using System.Collections.Immutable; diff --git a/test/Npgsql.Tests/Types/BitStringTests.cs b/test/Npgsql.Tests/Types/BitStringTests.cs index e41ae0cc8f..0ef6481ffd 100644 --- a/test/Npgsql.Tests/Types/BitStringTests.cs +++ b/test/Npgsql.Tests/Types/BitStringTests.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Collections; using System.Collections.Specialized; using System.Data; diff --git a/test/Npgsql.Tests/Types/ByteaTests.cs b/test/Npgsql.Tests/Types/ByteaTests.cs index d87ed48216..5765848f5a 100644 --- a/test/Npgsql.Tests/Types/ByteaTests.cs +++ b/test/Npgsql.Tests/Types/ByteaTests.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Collections.Generic; using System.Data; using System.IO; diff --git a/test/Npgsql.Tests/Types/CompositeTests.cs b/test/Npgsql.Tests/Types/CompositeTests.cs index db9814dae0..4c6ebff7eb 100644 --- a/test/Npgsql.Tests/Types/CompositeTests.cs +++ b/test/Npgsql.Tests/Types/CompositeTests.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Linq; using System.Net; using System.Reflection; diff --git a/test/Npgsql.Tests/Types/DateTimeTests.cs b/test/Npgsql.Tests/Types/DateTimeTests.cs index 397ef6f069..d1e5feb0df 100644 --- a/test/Npgsql.Tests/Types/DateTimeTests.cs +++ b/test/Npgsql.Tests/Types/DateTimeTests.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Collections.Generic; using System.Data; using System.Threading.Tasks; diff --git a/test/Npgsql.Tests/Types/EnumTests.cs b/test/Npgsql.Tests/Types/EnumTests.cs index 52f512c944..3da3b522da 100644 --- a/test/Npgsql.Tests/Types/EnumTests.cs +++ b/test/Npgsql.Tests/Types/EnumTests.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Collections.Generic; using System.Data; using System.Threading.Tasks; diff --git a/test/Npgsql.Tests/Types/FullTextSearchTests.cs b/test/Npgsql.Tests/Types/FullTextSearchTests.cs index 079bb7dec5..10a3d320a2 100644 --- a/test/Npgsql.Tests/Types/FullTextSearchTests.cs +++ b/test/Npgsql.Tests/Types/FullTextSearchTests.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Collections; using System.Threading.Tasks; using Npgsql.Properties; diff --git a/test/Npgsql.Tests/Types/GeometricTypeTests.cs b/test/Npgsql.Tests/Types/GeometricTypeTests.cs index 20a7606e04..f3df5891c1 100644 --- a/test/Npgsql.Tests/Types/GeometricTypeTests.cs +++ b/test/Npgsql.Tests/Types/GeometricTypeTests.cs @@ -1,4 +1,4 @@ -using System.Threading.Tasks; +using System.Threading.Tasks; using NpgsqlTypes; using NUnit.Framework; diff --git a/test/Npgsql.Tests/Types/InternalTypeTests.cs b/test/Npgsql.Tests/Types/InternalTypeTests.cs index 3a22cefcf4..7e69a85453 100644 --- a/test/Npgsql.Tests/Types/InternalTypeTests.cs +++ b/test/Npgsql.Tests/Types/InternalTypeTests.cs @@ -1,4 +1,4 @@ -using System.Threading.Tasks; +using System.Threading.Tasks; using NpgsqlTypes; using NUnit.Framework; diff --git a/test/Npgsql.Tests/Types/JsonPathTests.cs b/test/Npgsql.Tests/Types/JsonPathTests.cs index 62db50032b..1c5f732bfd 100644 --- a/test/Npgsql.Tests/Types/JsonPathTests.cs +++ b/test/Npgsql.Tests/Types/JsonPathTests.cs @@ -1,4 +1,4 @@ -using System.Data; +using System.Data; using System.Threading.Tasks; using NpgsqlTypes; using NUnit.Framework; diff --git a/test/Npgsql.Tests/Types/LTreeTests.cs b/test/Npgsql.Tests/Types/LTreeTests.cs index c7498adf83..b47bc910f6 100644 --- a/test/Npgsql.Tests/Types/LTreeTests.cs +++ b/test/Npgsql.Tests/Types/LTreeTests.cs @@ -1,4 +1,4 @@ -using System.Data; +using System.Data; using System.Threading.Tasks; using Npgsql.Properties; using NpgsqlTypes; diff --git a/test/Npgsql.Tests/Types/MiscTypeTests.cs b/test/Npgsql.Tests/Types/MiscTypeTests.cs index e2bd17cf29..a047fce9b2 100644 --- a/test/Npgsql.Tests/Types/MiscTypeTests.cs +++ b/test/Npgsql.Tests/Types/MiscTypeTests.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Data; using System.Threading.Tasks; using NpgsqlTypes; diff --git a/test/Npgsql.Tests/Types/MoneyTests.cs b/test/Npgsql.Tests/Types/MoneyTests.cs index 8f277a6e34..a0bf7f1e57 100644 --- a/test/Npgsql.Tests/Types/MoneyTests.cs +++ b/test/Npgsql.Tests/Types/MoneyTests.cs @@ -1,4 +1,4 @@ -using System.Data; +using System.Data; using System.Threading.Tasks; using NpgsqlTypes; using NUnit.Framework; diff --git a/test/Npgsql.Tests/Types/NetworkTypeTests.cs b/test/Npgsql.Tests/Types/NetworkTypeTests.cs index 3ddc78e87c..ffecfe3247 100644 --- a/test/Npgsql.Tests/Types/NetworkTypeTests.cs +++ b/test/Npgsql.Tests/Types/NetworkTypeTests.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Net; using System.Net.NetworkInformation; using System.Threading.Tasks; diff --git a/test/Npgsql.Tests/Types/NumericTests.cs b/test/Npgsql.Tests/Types/NumericTests.cs index 38b95cfc0e..c73617f819 100644 --- a/test/Npgsql.Tests/Types/NumericTests.cs +++ b/test/Npgsql.Tests/Types/NumericTests.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Data; using System.Linq; using System.Numerics; diff --git a/test/Npgsql.Tests/Types/NumericTypeTests.cs b/test/Npgsql.Tests/Types/NumericTypeTests.cs index dc41a387c8..795efc16ce 100644 --- a/test/Npgsql.Tests/Types/NumericTypeTests.cs +++ b/test/Npgsql.Tests/Types/NumericTypeTests.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Data; using System.Globalization; using System.Threading.Tasks; diff --git a/test/Npgsql.Tests/Types/RangeTests.cs b/test/Npgsql.Tests/Types/RangeTests.cs index d7bdea0132..df83c68358 100644 --- a/test/Npgsql.Tests/Types/RangeTests.cs +++ b/test/Npgsql.Tests/Types/RangeTests.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.ComponentModel; using System.Data; using System.Globalization; diff --git a/test/Npgsql.Tests/Types/TextTests.cs b/test/Npgsql.Tests/Types/TextTests.cs index 27b9566009..22403aa3d4 100644 --- a/test/Npgsql.Tests/Types/TextTests.cs +++ b/test/Npgsql.Tests/Types/TextTests.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.Data; using System.IO; using System.Text; diff --git a/test/Npgsql.Tests/WriteBufferTests.cs b/test/Npgsql.Tests/WriteBufferTests.cs index 53bf753dd6..3818b1f8fe 100644 --- a/test/Npgsql.Tests/WriteBufferTests.cs +++ b/test/Npgsql.Tests/WriteBufferTests.cs @@ -1,4 +1,4 @@ -using System; +using System; using System.IO; using Npgsql.Internal; using NUnit.Framework; From 768d1885320a5d1be3ddedfbeb7d6a8520dbb6f1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 15 Apr 2026 10:11:28 +0000 Subject: [PATCH 752/761] Bump the minor-and-patch group with 9 updates (#6543) --- .gitignore | 1 + Directory.Packages.props | 18 +++++++++--------- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/.gitignore b/.gitignore index 669922429f..da1d49e515 100644 --- a/.gitignore +++ b/.gitignore @@ -19,3 +19,4 @@ artifacts/ *.ide/ .vs/ TestResult.xml +*.lscache diff --git a/Directory.Packages.props b/Directory.Packages.props index 55921bd0a3..49157d3237 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -1,17 +1,17 @@ - 10.0.5 - 10.0.5 + 10.0.6 + 10.0.6 - 10.0.5 - 10.0.5 + 10.0.6 + 10.0.6 - + @@ -21,11 +21,11 @@ - + - + @@ -39,8 +39,8 @@ - - + + From 44f13949430e97ccf7fa5e915042be0d7d43877c Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Thu, 16 Apr 2026 12:44:33 +0200 Subject: [PATCH 753/761] Simplify GlobalTypeMapper locking and fix races (#6538) --- src/Npgsql/TypeMapping/GlobalTypeMapper.cs | 210 +++++++-------------- 1 file changed, 72 insertions(+), 138 deletions(-) diff --git a/src/Npgsql/TypeMapping/GlobalTypeMapper.cs b/src/Npgsql/TypeMapping/GlobalTypeMapper.cs index cff24a4f66..148260dd9c 100644 --- a/src/Npgsql/TypeMapping/GlobalTypeMapper.cs +++ b/src/Npgsql/TypeMapping/GlobalTypeMapper.cs @@ -2,7 +2,6 @@ using System.Collections.Generic; using System.Diagnostics.CodeAnalysis; using System.Text.Json; -using System.Threading; using Npgsql.Internal; using Npgsql.Internal.Postgres; using Npgsql.Internal.ResolverFactories; @@ -14,84 +13,64 @@ sealed class GlobalTypeMapper : INpgsqlTypeMapper { readonly UserTypeMapper _userTypeMapper = new(); readonly List _pluginResolverFactories = []; - readonly ReaderWriterLockSlim _lock = new(); + readonly object _sync = new(); PgTypeInfoResolverFactory[] _typeMappingResolvers = []; internal IEnumerable GetPluginResolverFactories() { - var resolvers = new List(); - _lock.EnterReadLock(); - try - { - resolvers.AddRange(_pluginResolverFactories); - } - finally - { - _lock.ExitReadLock(); - } - - return resolvers; + lock (_sync) + return new List(_pluginResolverFactories); } internal PgTypeInfoResolverFactory? GetUserMappingsResolverFactory() { - _lock.EnterReadLock(); - try - { + lock (_sync) return _userTypeMapper.Items.Count > 0 ? _userTypeMapper : null; - } - finally - { - _lock.ExitReadLock(); - } } internal void AddGlobalTypeMappingResolvers(PgTypeInfoResolverFactory[] factories, Func? builderFactory = null, bool overwrite = false) { - // Good enough logic to prevent SlimBuilder overriding the normal Builder. - if (overwrite || factories.Length > _typeMappingResolvers.Length) + lock (_sync) { - _builderFactory = builderFactory; - _typeMappingResolvers = factories; - ResetTypeMappingCache(); + // Good enough logic to prevent SlimBuilder overriding the normal Builder. + if (overwrite || factories.Length > _typeMappingResolvers.Length) + { + _builderFactory = builderFactory; + _typeMappingResolvers = factories; + _typeMappingOptions = null; + } } } - void ResetTypeMappingCache() => _typeMappingOptions = null; - PgSerializerOptions? _typeMappingOptions; Func? _builderFactory; JsonSerializerOptions? _jsonSerializerOptions; - PgSerializerOptions TypeMappingOptions + PgSerializerOptions TypeMappingOptions => _typeMappingOptions ?? BuildTypeMappingOptions(); + + PgSerializerOptions BuildTypeMappingOptions() { - get + lock (_sync) { - if (_typeMappingOptions is not null) - return _typeMappingOptions; - - _lock.EnterReadLock(); - try + if (_typeMappingOptions is { } existing) + return existing; + + var builder = _builderFactory?.Invoke() ?? new(); + builder.AppendResolverFactory(_userTypeMapper); + foreach (var factory in _pluginResolverFactories) + builder.AppendResolverFactory(factory); + foreach (var factory in _typeMappingResolvers) + builder.AppendResolverFactory(factory); + var chain = builder.Build(); + var options = new PgSerializerOptions(PostgresMinimalDatabaseInfo.DefaultTypeCatalog, chain) { - var builder = _builderFactory?.Invoke() ?? new(); - builder.AppendResolverFactory(_userTypeMapper); - foreach (var factory in _pluginResolverFactories) - builder.AppendResolverFactory(factory); - foreach (var factory in _typeMappingResolvers) - builder.AppendResolverFactory(factory); - var chain = builder.Build(); - return _typeMappingOptions = new(PostgresMinimalDatabaseInfo.DefaultTypeCatalog, chain) - { - // This means we don't ever have a missing oid for a datatypename as our canonical format is datatypenames. - PortableTypeIds = true, - // Don't throw if our catalog doesn't know the datatypename. - IntrospectionMode = true - }; - } - finally - { - _lock.ExitReadLock(); - } + // This means we don't ever have a missing oid for a datatypename as our canonical format is datatypenames. + PortableTypeIds = true, + // Don't throw if our catalog doesn't know the datatypename. + IntrospectionMode = true + }; + _typeMappingOptions = options; + return options; } } @@ -127,8 +106,7 @@ static GlobalTypeMapper() /// public void AddTypeInfoResolverFactory(PgTypeInfoResolverFactory factory) { - _lock.EnterWriteLock(); - try + lock (_sync) { var type = factory.GetType(); @@ -146,53 +124,21 @@ public void AddTypeInfoResolverFactory(PgTypeInfoResolverFactory factory) } _pluginResolverFactories.Insert(0, factory); - ResetTypeMappingCache(); - } - finally - { - _lock.ExitWriteLock(); + _typeMappingOptions = null; } } public void AddDbTypeResolverFactory(DbTypeResolverFactory factory) => throw new NotSupportedException("The global type mapper does not support DbTypeResolverFactories. Call this method on a data source builder instead."); - void ReplaceTypeInfoResolverFactory(PgTypeInfoResolverFactory factory) - { - _lock.EnterWriteLock(); - try - { - var type = factory.GetType(); - - for (var i = 0; i < _pluginResolverFactories.Count; i++) - { - if (_pluginResolverFactories[i].GetType() == type) - { - _pluginResolverFactories[i] = factory; - break; - } - } - - ResetTypeMappingCache(); - } - finally - { - _lock.ExitWriteLock(); - } - } - /// public void Reset() { - _lock.EnterWriteLock(); - try + lock (_sync) { _pluginResolverFactories.Clear(); _userTypeMapper.Items.Clear(); - } - finally - { - _lock.ExitWriteLock(); + _typeMappingOptions = null; } } @@ -206,9 +152,25 @@ public INpgsqlNameTranslator DefaultNameTranslator /// public INpgsqlTypeMapper ConfigureJsonOptions(JsonSerializerOptions serializerOptions) { - _jsonSerializerOptions = serializerOptions; - // If JsonTypeInfoResolverFactory exists we replace it with a configured instance on the same index of the array. - ReplaceTypeInfoResolverFactory(new JsonTypeInfoResolverFactory(serializerOptions)); + lock (_sync) + { + _jsonSerializerOptions = serializerOptions; + + // If JsonTypeInfoResolverFactory exists we replace it with a configured instance on the same index of the array. + var factory = new JsonTypeInfoResolverFactory(serializerOptions); + var type = factory.GetType(); + + for (var i = 0; i < _pluginResolverFactories.Count; i++) + { + if (_pluginResolverFactories[i].GetType() == type) + { + _pluginResolverFactories[i] = factory; + break; + } + } + + _typeMappingOptions = null; + } return this; } @@ -219,7 +181,9 @@ public INpgsqlTypeMapper EnableDynamicJson( Type[]? jsonbClrTypes = null, Type[]? jsonClrTypes = null) { - AddTypeInfoResolverFactory(new JsonDynamicTypeInfoResolverFactory(jsonbClrTypes, jsonClrTypes, _jsonSerializerOptions)); + // Use a re-entered lock to add the read of _jsonSerializerOptions to the total scope. + lock (_sync) + AddTypeInfoResolverFactory(new JsonDynamicTypeInfoResolverFactory(jsonbClrTypes, jsonClrTypes, _jsonSerializerOptions)); return this; } @@ -244,33 +208,23 @@ public INpgsqlTypeMapper EnableUnmappedTypes() /// public INpgsqlTypeMapper MapEnum<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields)] TEnum>(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) where TEnum : struct, Enum { - _lock.EnterWriteLock(); - try + lock (_sync) { _userTypeMapper.MapEnum(pgName, nameTranslator); - ResetTypeMappingCache(); + _typeMappingOptions = null; return this; } - finally - { - _lock.ExitWriteLock(); - } } /// public bool UnmapEnum<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields)] TEnum>(string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) where TEnum : struct, Enum { - _lock.EnterWriteLock(); - try + lock (_sync) { var removed = _userTypeMapper.UnmapEnum(pgName, nameTranslator); - ResetTypeMappingCache(); + _typeMappingOptions = null; return removed; } - finally - { - _lock.ExitWriteLock(); - } } /// @@ -278,34 +232,24 @@ public INpgsqlTypeMapper EnableUnmappedTypes() public INpgsqlTypeMapper MapEnum([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields | DynamicallyAccessedMemberTypes.PublicParameterlessConstructor)] Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) { - _lock.EnterWriteLock(); - try + lock (_sync) { _userTypeMapper.MapEnum(clrType, pgName, nameTranslator); - ResetTypeMappingCache(); + _typeMappingOptions = null; return this; } - finally - { - _lock.ExitWriteLock(); - } } /// public bool UnmapEnum([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicFields | DynamicallyAccessedMemberTypes.PublicParameterlessConstructor)] Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) { - _lock.EnterWriteLock(); - try + lock (_sync) { var removed = _userTypeMapper.UnmapEnum(clrType, pgName, nameTranslator); - ResetTypeMappingCache(); + _typeMappingOptions = null; return removed; } - finally - { - _lock.ExitWriteLock(); - } } /// @@ -323,17 +267,12 @@ public bool UnmapEnum([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes public INpgsqlTypeMapper MapComposite([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) { - _lock.EnterWriteLock(); - try + lock (_sync) { _userTypeMapper.MapComposite(clrType, pgName, nameTranslator); - ResetTypeMappingCache(); + _typeMappingOptions = null; return this; } - finally - { - _lock.ExitWriteLock(); - } } /// @@ -341,16 +280,11 @@ public INpgsqlTypeMapper MapComposite([DynamicallyAccessedMembers(DynamicallyAcc public bool UnmapComposite([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicFields)] Type clrType, string? pgName = null, INpgsqlNameTranslator? nameTranslator = null) { - _lock.EnterWriteLock(); - try + lock (_sync) { var result = _userTypeMapper.UnmapComposite(clrType, pgName, nameTranslator); - ResetTypeMappingCache(); + _typeMappingOptions = null; return result; } - finally - { - _lock.ExitWriteLock(); - } } } From 715baa7dd5a03a1bfcd049c3eb7d98184e8c74a3 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Mon, 20 Apr 2026 18:22:57 +0200 Subject: [PATCH 754/761] Add consistent nullable unwrapping to dynamic resolvers (#6548) --- .../JsonNetPocoTypeInfoResolverFactory.cs | 12 +++++-- .../Internal/DynamicTypeInfoResolver.cs | 34 +++++++++++++++---- .../JsonDynamicTypeInfoResolverFactory.cs | 12 +++++-- .../TupledRecordTypeInfoResolverFactory.cs | 11 +++--- .../UnmappedTypeInfoResolverFactory.cs | 12 ++++++- 5 files changed, 64 insertions(+), 17 deletions(-) diff --git a/src/Npgsql.Json.NET/Internal/JsonNetPocoTypeInfoResolverFactory.cs b/src/Npgsql.Json.NET/Internal/JsonNetPocoTypeInfoResolverFactory.cs index c038f17aab..8899eddb60 100644 --- a/src/Npgsql.Json.NET/Internal/JsonNetPocoTypeInfoResolverFactory.cs +++ b/src/Npgsql.Json.NET/Internal/JsonNetPocoTypeInfoResolverFactory.cs @@ -63,7 +63,9 @@ static void AddUserMappings(TypeInfoMappingCollection mappings, bool jsonb, Type || dataTypeName != JsonbDataTypeName && dataTypeName != JsonDataTypeName) return null; - return CreateCollection().AddMapping(type, dataTypeName, (options, mapping, _) => + var matchedType = Nullable.GetUnderlyingType(type) ?? type; + + return CreateCollection().AddMapping(matchedType, dataTypeName, (options, mapping, _) => { var jsonb = dataTypeName == JsonbDataTypeName; return mapping.CreateInfo(options, @@ -98,7 +100,12 @@ TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings, TypeIn var dynamicMappings = CreateCollection(baseMappings); foreach (var mapping in baseMappings.Items) + { + // Always handle Nullable mappings as part of the underlying type. + if (Nullable.GetUnderlyingType(mapping.Type) is not null) + continue; dynamicMappings.AddArrayMapping(mapping.Type, mapping.DataTypeName); + } mappings.AddRange(dynamicMappings.ToTypeInfoMappingCollection()); return mappings; @@ -106,9 +113,8 @@ TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings, TypeIn protected override DynamicMappingCollection? GetMappings(Type? type, DataTypeName dataTypeName, PgSerializerOptions options) => type is not null && IsArrayLikeType(type, out var elementType) && IsArrayDataTypeName(dataTypeName, options, out var elementDataTypeName) - ? base.GetMappings(elementType, elementDataTypeName, options)?.AddArrayMapping(elementType, elementDataTypeName) + ? base.GetMappings(elementType, elementDataTypeName, options)?.AddArrayMapping(Nullable.GetUnderlyingType(elementType) ?? elementType, elementDataTypeName) : null; } - } diff --git a/src/Npgsql/Internal/DynamicTypeInfoResolver.cs b/src/Npgsql/Internal/DynamicTypeInfoResolver.cs index 840bd10b37..d6f7abb824 100644 --- a/src/Npgsql/Internal/DynamicTypeInfoResolver.cs +++ b/src/Npgsql/Internal/DynamicTypeInfoResolver.cs @@ -55,10 +55,11 @@ internal DynamicMappingCollection(TypeInfoMappingCollection? baseCollection = nu public DynamicMappingCollection AddMapping([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicParameterlessConstructor)]Type type, string dataTypeName, TypeInfoFactory factory, Func? configureMapping = null) { - if (type.IsValueType && Nullable.GetUnderlyingType(type) is not null) - throw new NotSupportedException("Mapping nullable types is not supported, map its underlying type instead to get both."); - if (type.IsValueType) + { + if (Nullable.GetUnderlyingType(type) is not null) + throw new NotSupportedException("Mapping nullable types is not supported, map its underlying type instead to get both."); + typeof(TypeInfoMappingCollection) .GetMethod(nameof(TypeInfoMappingCollection.AddStructType), [typeof(string), typeof(TypeInfoFactory), typeof(Func)])! .MakeGenericMethod(type).Invoke(_mappings ??= new(), @@ -67,7 +68,9 @@ public DynamicMappingCollection AddMapping([DynamicallyAccessedMembers(Dynamical factory, configureMapping ]); + } else + { typeof(TypeInfoMappingCollection) .GetMethod(nameof(TypeInfoMappingCollection.AddType), [typeof(string), typeof(TypeInfoFactory), typeof(Func)])! .MakeGenericMethod(type).Invoke(_mappings ??= new(), @@ -76,28 +79,37 @@ public DynamicMappingCollection AddMapping([DynamicallyAccessedMembers(Dynamical factory, configureMapping ]); + } return this; } public DynamicMappingCollection AddArrayMapping([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicParameterlessConstructor)]Type elementType, string dataTypeName) { if (elementType.IsValueType) + { + if (Nullable.GetUnderlyingType(elementType) is not null) + throw new NotSupportedException("Mapping nullable types is not supported, map its underlying type instead to get both."); + typeof(TypeInfoMappingCollection) .GetMethod(nameof(TypeInfoMappingCollection.AddStructArrayType), [typeof(string)])! .MakeGenericMethod(elementType).Invoke(_mappings ??= new(), [dataTypeName]); + } else + { typeof(TypeInfoMappingCollection) .GetMethod(nameof(TypeInfoMappingCollection.AddArrayType), [typeof(string)])! .MakeGenericMethod(elementType).Invoke(_mappings ??= new(), [dataTypeName]); + } return this; } public DynamicMappingCollection AddResolverMapping([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicParameterlessConstructor)]Type type, string dataTypeName, TypeInfoFactory factory, Func? configureMapping = null) { - if (type.IsValueType && Nullable.GetUnderlyingType(type) is not null) - throw new NotSupportedException("Mapping nullable types is not supported"); - if (type.IsValueType) + { + if (Nullable.GetUnderlyingType(type) is not null) + throw new NotSupportedException("Mapping nullable types is not supported, map its underlying type instead to get both."); + typeof(TypeInfoMappingCollection) .GetMethod(nameof(TypeInfoMappingCollection.AddProviderStructType), [typeof(string), typeof(TypeInfoFactory), typeof(Func)])! .MakeGenericMethod(type).Invoke(_mappings ??= new(), @@ -106,7 +118,9 @@ public DynamicMappingCollection AddResolverMapping([DynamicallyAccessedMembers(D factory, configureMapping ]); + } else + { typeof(TypeInfoMappingCollection) .GetMethod(nameof(TypeInfoMappingCollection.AddProviderType), [typeof(string), typeof(TypeInfoFactory), typeof(Func)])! .MakeGenericMethod(type).Invoke(_mappings ??= new(), @@ -115,19 +129,27 @@ public DynamicMappingCollection AddResolverMapping([DynamicallyAccessedMembers(D factory, configureMapping ]); + } return this; } public DynamicMappingCollection AddResolverArrayMapping([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicParameterlessConstructor)]Type elementType, string dataTypeName) { if (elementType.IsValueType) + { + if (Nullable.GetUnderlyingType(elementType) is not null) + throw new NotSupportedException("Mapping nullable types is not supported, map its underlying type instead to get both."); + typeof(TypeInfoMappingCollection) .GetMethod(nameof(TypeInfoMappingCollection.AddProviderStructArrayType), [typeof(string)])! .MakeGenericMethod(elementType).Invoke(_mappings ??= new(), [dataTypeName]); + } else + { typeof(TypeInfoMappingCollection) .GetMethod(nameof(TypeInfoMappingCollection.AddProviderArrayType), [typeof(string)])! .MakeGenericMethod(elementType).Invoke(_mappings ??= new(), [dataTypeName]); + } return this; } diff --git a/src/Npgsql/Internal/ResolverFactories/JsonDynamicTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/JsonDynamicTypeInfoResolverFactory.cs index aca5484b77..02a456492f 100644 --- a/src/Npgsql/Internal/ResolverFactories/JsonDynamicTypeInfoResolverFactory.cs +++ b/src/Npgsql/Internal/ResolverFactories/JsonDynamicTypeInfoResolverFactory.cs @@ -98,7 +98,9 @@ void AddUserMappings(bool jsonb, Type[] clrTypes) || dataTypeName != DataTypeNames.Jsonb && dataTypeName != DataTypeNames.Json) return null; - return CreateCollection().AddMapping(type, dataTypeName, (options, mapping, _) => + var matchedType = Nullable.GetUnderlyingType(type) ?? type; + + return CreateCollection().AddMapping(matchedType, dataTypeName, (options, mapping, _) => { var jsonb = dataTypeName == DataTypeNames.Jsonb; @@ -133,7 +135,8 @@ sealed class ArrayResolver(Type[]? jsonbClrTypes = null, Type[]? jsonClrTypes = protected override DynamicMappingCollection? GetMappings(Type? type, DataTypeName dataTypeName, PgSerializerOptions options) => type is not null && IsArrayLikeType(type, out var elementType) && IsArrayDataTypeName(dataTypeName, options, out var elementDataTypeName) - ? base.GetMappings(elementType, elementDataTypeName, options)?.AddArrayMapping(elementType, elementDataTypeName) + ? base.GetMappings(elementType, elementDataTypeName, options) + ?.AddArrayMapping(Nullable.GetUnderlyingType(elementType) ?? elementType, elementDataTypeName) : null; static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings, TypeInfoMappingCollection baseMappings) @@ -143,7 +146,12 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings, var dynamicMappings = CreateCollection(baseMappings); foreach (var mapping in baseMappings.Items) + { + // Always handle Nullable mappings as part of the underlying type. + if (Nullable.GetUnderlyingType(mapping.Type) is not null) + continue; dynamicMappings.AddArrayMapping(mapping.Type, mapping.DataTypeName); + } mappings.AddRange(dynamicMappings.ToTypeInfoMappingCollection()); return mappings; diff --git a/src/Npgsql/Internal/ResolverFactories/TupledRecordTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/TupledRecordTypeInfoResolverFactory.cs index 7ee00d37a7..551c2836b9 100644 --- a/src/Npgsql/Internal/ResolverFactories/TupledRecordTypeInfoResolverFactory.cs +++ b/src/Npgsql/Internal/ResolverFactories/TupledRecordTypeInfoResolverFactory.cs @@ -19,12 +19,13 @@ class Resolver : DynamicTypeInfoResolver { protected override DynamicMappingCollection? GetMappings(Type? type, DataTypeName dataTypeName, PgSerializerOptions options) { - if (!(dataTypeName == DataTypeNames.Record && type is { IsConstructedGenericType: true, FullName: not null } && ( - type.FullName.StartsWith("System.Tuple", StringComparison.Ordinal) - || type.FullName.StartsWith("System.ValueTuple", StringComparison.Ordinal)))) + if (dataTypeName != DataTypeNames.Record || type is null || !IsTypeOrNullableOfType(type, + static type => type is { IsConstructedGenericType: true, FullName: not null } && + (type.FullName.StartsWith("System.Tuple", StringComparison.Ordinal) || + type.FullName.StartsWith("System.ValueTuple", StringComparison.Ordinal)), out var matchedType)) return null; - return CreateCollection().AddMapping(type, dataTypeName, (options, mapping, _) => + return CreateCollection().AddMapping(matchedType, dataTypeName, (options, mapping, _) => { var constructors = mapping.Type.GetConstructors(); ConstructorInfo? constructor = null; @@ -68,7 +69,7 @@ sealed class ArrayResolver : Resolver { protected override DynamicMappingCollection? GetMappings(Type? type, DataTypeName dataTypeName, PgSerializerOptions options) => type is not null && IsArrayLikeType(type, out var elementType) && IsArrayDataTypeName(dataTypeName, options, out var elementDataTypeName) - ? base.GetMappings(elementType, elementDataTypeName, options)?.AddArrayMapping(elementType, elementDataTypeName) + ? base.GetMappings(elementType, elementDataTypeName, options)?.AddArrayMapping(Nullable.GetUnderlyingType(elementType) ?? elementType, elementDataTypeName) : null; } } diff --git a/src/Npgsql/Internal/ResolverFactories/UnmappedTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/UnmappedTypeInfoResolverFactory.cs index 2a62e7a8ab..db3a5fc772 100644 --- a/src/Npgsql/Internal/ResolverFactories/UnmappedTypeInfoResolverFactory.cs +++ b/src/Npgsql/Internal/ResolverFactories/UnmappedTypeInfoResolverFactory.cs @@ -58,7 +58,7 @@ sealed class EnumArrayResolver : EnumResolver { protected override DynamicMappingCollection? GetMappings(Type? type, DataTypeName dataTypeName, PgSerializerOptions options) => type is not null && IsArrayLikeType(type, out var elementType) && IsArrayDataTypeName(dataTypeName, options, out var elementDataTypeName) - ? base.GetMappings(elementType, elementDataTypeName, options)?.AddArrayMapping(elementType, elementDataTypeName) + ? base.GetMappings(elementType, elementDataTypeName, options)?.AddArrayMapping(Nullable.GetUnderlyingType(elementType) ?? elementType, elementDataTypeName) : null; } @@ -114,7 +114,12 @@ sealed class RangeArrayResolver : RangeResolver return null; var mappings = base.GetMappings(elementType, elementDataTypeName, options); + elementType ??= mappings?.Find(null, elementDataTypeName, options)?.Type; // Try to get the default mapping. + + if (elementType is not null && Nullable.GetUnderlyingType(elementType) is { } underlyingType) + elementType = underlyingType; + return elementType is null ? null : mappings?.AddArrayMapping(elementType, elementDataTypeName); } } @@ -168,7 +173,12 @@ sealed class MultirangeArrayResolver : MultirangeResolver return null; var mappings = base.GetMappings(elementType, elementDataTypeName, options); + elementType ??= mappings?.Find(null, elementDataTypeName, options)?.Type; // Try to get the default mapping. + + if (elementType is not null && Nullable.GetUnderlyingType(elementType) is { } underlyingType) + elementType = underlyingType; + return elementType is null ? null : mappings?.AddArrayMapping(elementType, elementDataTypeName); } } From f59c06f00a97ebe85dd7f3f90dafa43454fd3f52 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Mon, 20 Apr 2026 18:45:19 +0200 Subject: [PATCH 755/761] Align missed renames with #6316 --- src/Npgsql/Internal/DynamicTypeInfoResolver.cs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Npgsql/Internal/DynamicTypeInfoResolver.cs b/src/Npgsql/Internal/DynamicTypeInfoResolver.cs index d6f7abb824..cdc6449331 100644 --- a/src/Npgsql/Internal/DynamicTypeInfoResolver.cs +++ b/src/Npgsql/Internal/DynamicTypeInfoResolver.cs @@ -103,7 +103,7 @@ public DynamicMappingCollection AddArrayMapping([DynamicallyAccessedMembers(Dyna return this; } - public DynamicMappingCollection AddResolverMapping([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicParameterlessConstructor)]Type type, string dataTypeName, TypeInfoFactory factory, Func? configureMapping = null) + public DynamicMappingCollection AddProviderMapping([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicParameterlessConstructor)]Type type, string dataTypeName, TypeInfoFactory factory, Func? configureMapping = null) { if (type.IsValueType) { @@ -133,7 +133,7 @@ public DynamicMappingCollection AddResolverMapping([DynamicallyAccessedMembers(D return this; } - public DynamicMappingCollection AddResolverArrayMapping([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicParameterlessConstructor)]Type elementType, string dataTypeName) + public DynamicMappingCollection AddProviderArrayMapping([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicParameterlessConstructor)]Type elementType, string dataTypeName) { if (elementType.IsValueType) { From 2d91c29738a50b9d82a532193b07d9410fd9e218 Mon Sep 17 00:00:00 2001 From: Copilot <198982749+Copilot@users.noreply.github.com> Date: Tue, 21 Apr 2026 17:52:43 +0200 Subject: [PATCH 756/761] Fix JsonNode[] missing from basic JSON types (#6549) --- .../ResolverFactories/JsonTypeInfoResolverFactory.cs | 3 +++ test/Npgsql.Tests/Types/JsonTests.cs | 12 ++++++++++++ 2 files changed, 15 insertions(+) diff --git a/src/Npgsql/Internal/ResolverFactories/JsonTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/JsonTypeInfoResolverFactory.cs index f778bea186..6e926e49ef 100644 --- a/src/Npgsql/Internal/ResolverFactories/JsonTypeInfoResolverFactory.cs +++ b/src/Npgsql/Internal/ResolverFactories/JsonTypeInfoResolverFactory.cs @@ -79,6 +79,8 @@ sealed class BasicJsonTypeInfoResolver : IJsonTypeInfoResolver return JsonMetadataServices.CreateValueInfo(options, JsonMetadataServices.JsonArrayConverter); if (type == typeof(JsonValue)) return JsonMetadataServices.CreateValueInfo(options, JsonMetadataServices.JsonValueConverter); + if (type == typeof(JsonNode)) + return JsonMetadataServices.CreateValueInfo(options, JsonMetadataServices.JsonNodeConverter); return null; } } @@ -101,6 +103,7 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) mappings.AddArrayType(dataTypeName); mappings.AddArrayType(dataTypeName); mappings.AddArrayType(dataTypeName); + mappings.AddArrayType(dataTypeName); } return mappings; diff --git a/test/Npgsql.Tests/Types/JsonTests.cs b/test/Npgsql.Tests/Types/JsonTests.cs index 5cf8504ac4..a113cfe2cf 100644 --- a/test/Npgsql.Tests/Types/JsonTests.cs +++ b/test/Npgsql.Tests/Types/JsonTests.cs @@ -224,6 +224,18 @@ public async Task Write_jsonobject_array_without_npgsqldbtype() await cmd.ExecuteNonQueryAsync(); } + [Test] + [IssueLink("https://github.com/npgsql/npgsql/issues/6517")] + public Task Roundtrip_JsonNode() + => AssertType( + (JsonNode)new JsonObject { ["Bar"] = 8 }, + IsJsonb ? """{"Bar": 8}""" : """{"Bar":8}""", + PostgresType, + // By default we map JsonNode to jsonb + dataTypeInference: IsJsonb ? DataTypeInference.Match : DataTypeInference.Mismatch, + valueTypeEqualsFieldType: false, + comparer: (x, y) => x.ToString() == y.ToString()); + public JsonTests(string dataTypeName) { if (dataTypeName == "jsonb") From dc5353f373fdaf482efdfa9aef70474f8766a833 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 22 Apr 2026 14:11:19 +0000 Subject: [PATCH 757/761] Bump the minor-and-patch group with 8 updates (#6553) --- Directory.Packages.props | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 49157d3237..564a04b5e6 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -1,17 +1,17 @@ - 10.0.6 - 10.0.6 + 10.0.7 + 10.0.7 - 10.0.6 - 10.0.6 + 10.0.7 + 10.0.7 - + @@ -21,7 +21,7 @@ - + @@ -39,8 +39,8 @@ - - + + From 2024d5a7f45d362d8cfa6a102c5bffe048d30edd Mon Sep 17 00:00:00 2001 From: Nikita Kazmin Date: Fri, 24 Apr 2026 12:07:51 +0300 Subject: [PATCH 758/761] Fix gss encryption on windows (#6556) Fixes #6555 --- src/Npgsql/Internal/NpgsqlConnector.cs | 4 +++- test/Npgsql.Tests/ConnectionTests.cs | 10 ++++++++-- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/src/Npgsql/Internal/NpgsqlConnector.cs b/src/Npgsql/Internal/NpgsqlConnector.cs index a7880f33a1..cedcfeac8b 100644 --- a/src/Npgsql/Internal/NpgsqlConnector.cs +++ b/src/Npgsql/Internal/NpgsqlConnector.cs @@ -612,7 +612,9 @@ internal async ValueTask GSSEncrypt(bool async, bool isRequ { TargetName = targetName, RequireMutualAuthentication = true, - RequiredProtectionLevel = ProtectionLevel.EncryptAndSign + RequiredProtectionLevel = ProtectionLevel.EncryptAndSign, + // GSS encryption only works with kerberos + Package = "Kerberos" }; NegotiateOptionsCallback?.Invoke(clientOptions); diff --git a/test/Npgsql.Tests/ConnectionTests.cs b/test/Npgsql.Tests/ConnectionTests.cs index 863130be1c..6b32630b9c 100644 --- a/test/Npgsql.Tests/ConnectionTests.cs +++ b/test/Npgsql.Tests/ConnectionTests.cs @@ -1536,7 +1536,7 @@ public async Task Sync_open_blocked_same_thread() } [Test, IssueLink("https://github.com/npgsql/npgsql/issues/6427")] - [Platform(Include = "Win")] // Hangs on linux and mac when server closes the socket + [Platform(Include = "Win")] // Hangs on linux and mac (probably because of missing kerberos token) public async Task Gss_encryption_retry_does_not_clear_pool() { var csb = new NpgsqlConnectionStringBuilder(ConnectionString) @@ -1547,7 +1547,13 @@ public async Task Gss_encryption_retry_does_not_clear_pool() // Break connection on gss encryption request to force the client to create a new connection and retry again // This emulates the behavior of older versions of PostgreSQL or its forks, like Supabase await using var postmaster = PgPostmasterMock.Start(csb.ConnectionString, breakOnGssEncryptionRequest: true); - await using var dataSource = CreateDataSource(postmaster.ConnectionString); + await using var dataSource = CreateDataSource(builder => + { + builder.ConnectionStringBuilder.ConnectionString = postmaster.ConnectionString; + // We use kerberos by default, which requires specific credentials to work + // Change it negotiate so SSPI on windows can use NTLM credentials + builder.UseNegotiateOptionsCallback(options => options.Package = "Negotiate"); + }); PgServerMock server; From 6042bed17d6f233b51de94e9c8ec9ca414d18112 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Sun, 3 May 2026 18:07:59 +0200 Subject: [PATCH 759/761] Centralization of mapping resolution and binding logic (#6559) --- .../BackendMessages/RowDescriptionMessage.cs | 126 ++-- src/Npgsql/Internal/AdoSerializerHelpers.cs | 54 +- .../Composites/Metadata/CompositeFieldInfo.cs | 45 +- .../Internal/Converters/ArrayConverter.cs | 36 +- .../Internal/Converters/ArrayConverterCore.cs | 8 +- .../Converters/BitStringConverters.cs | 4 +- .../Internal/Converters/CastingConverter.cs | 58 +- .../Internal/Converters/CompositeConverter.cs | 4 +- .../Converters/MultirangeConverter.cs | 2 +- .../Internal/Converters/NullableConverter.cs | 10 +- .../Internal/Converters/ObjectConverter.cs | 13 +- .../PolymorphicArrayTypeInfoProvider.cs | 6 +- .../Converters/Primitive/ByteaConverters.cs | 27 +- .../Converters/Primitive/TextConverters.cs | 6 + .../Internal/Converters/RangeConverter.cs | 4 +- .../Internal/Converters/RecordConverter.cs | 11 +- .../Internal/PgComposingTypeInfoProvider.cs | 16 +- src/Npgsql/Internal/PgConverter.cs | 91 ++- src/Npgsql/Internal/PgReader.cs | 17 +- src/Npgsql/Internal/PgSerializerOptions.cs | 2 +- src/Npgsql/Internal/PgTypeInfo.cs | 560 ++++++++++-------- src/Npgsql/Internal/PgWriter.cs | 58 +- src/Npgsql/Internal/Postgres/Field.cs | 2 + .../AdoTypeInfoResolverFactory.cs | 31 +- ...ExtraConversionsTypeInfoResolverFactory.cs | 4 +- .../NetworkTypeInfoResolverFactory.cs | 2 +- .../UnmappedTypeInfoResolverFactory.cs | 14 +- src/Npgsql/Internal/TypeInfoCache.cs | 6 +- src/Npgsql/Internal/TypeInfoMapping.cs | 123 ++-- src/Npgsql/NpgsqlBinaryExporter.cs | 85 ++- src/Npgsql/NpgsqlBinaryImporter.cs | 22 +- src/Npgsql/NpgsqlDataReader.cs | 174 +++--- src/Npgsql/NpgsqlNestedDataReader.cs | 103 ++-- src/Npgsql/NpgsqlParameter.cs | 386 ++++++++---- src/Npgsql/NpgsqlParameterCollection.cs | 22 +- src/Npgsql/NpgsqlParameter`.cs | 61 +- .../Replication/PgOutput/ReplicationValue.cs | 32 +- src/Npgsql/TypeMapping/GlobalTypeMapper.cs | 5 +- src/Npgsql/Util/TypeExtensions.cs | 22 + .../TypeHandlers/TypeHandlerBenchmarks.cs | 56 +- test/Npgsql.Tests/NpgsqlParameterTests.cs | 21 +- test/Npgsql.Tests/ReaderTests.cs | 6 +- 42 files changed, 1282 insertions(+), 1053 deletions(-) create mode 100644 src/Npgsql/Util/TypeExtensions.cs diff --git a/src/Npgsql/BackendMessages/RowDescriptionMessage.cs b/src/Npgsql/BackendMessages/RowDescriptionMessage.cs index 09cd464650..a453fdbbf0 100644 --- a/src/Npgsql/BackendMessages/RowDescriptionMessage.cs +++ b/src/Npgsql/BackendMessages/RowDescriptionMessage.cs @@ -11,11 +11,11 @@ namespace Npgsql.BackendMessages; -readonly struct ColumnInfo(PgConverterInfo converterInfo, DataFormat dataFormat, bool asObject) +readonly struct ReadConversionContext(PgConcreteTypeInfo typeInfo, PgFieldBinding binding) { - public PgConverterInfo ConverterInfo { get; } = converterInfo; - public DataFormat DataFormat { get; } = dataFormat; - public bool AsObject { get; } = asObject; + public bool IsDefault => TypeInfo is null; + public PgConcreteTypeInfo TypeInfo { get; } = typeInfo; + public PgFieldBinding Binding { get; } = binding; } /// @@ -36,7 +36,7 @@ sealed class RowDescriptionMessage : IBackendMessage FieldDescription?[] _fields; readonly Dictionary _nameIndex; Dictionary? _insensitiveIndex; - ColumnInfo[]? _lastConverterInfoCache; + ReadConversionContext[]? _lastConverterInfoCache; internal RowDescriptionMessage(bool connectorOwned, int numFields = 10) { @@ -135,14 +135,18 @@ public FieldDescription this[int ordinal] } } - internal void SetColumnInfoCache(ReadOnlySpan values) + [MethodImpl(MethodImplOptions.NoInlining)] + internal void GetConversionContext(int ordinal, Type type, ref ReadConversionContext result) + => this[ordinal].GetConversionContext(type, ref result); + + internal void SetColumnInfoCache(ReadOnlySpan values) { if (_connectorOwned || _lastConverterInfoCache is not null) return; Interlocked.CompareExchange(ref _lastConverterInfoCache, values.ToArray(), null); } - internal void LoadColumnInfoCache(PgSerializerOptions options, ColumnInfo[] values) + internal void LoadColumnInfoCache(PgSerializerOptions options, ReadConversionContext[] values) { if (_lastConverterInfoCache is not { } cache) return; @@ -232,7 +236,7 @@ internal FieldDescription(FieldDescription source) DataFormat = source.DataFormat; PostgresType = source.PostgresType; Field = source.Field; - _objectInfo = source._objectInfo; + _objectConversionContext = source._objectConversionContext; } internal void Populate( @@ -250,7 +254,7 @@ internal void Populate( DataFormat = dataFormat; PostgresType = _serializerOptions.DatabaseInfo.FindPostgresType((Oid)TypeOID)?.GetRepresentationalType() ?? UnknownBackendType.Instance; Field = new(Name, _serializerOptions.ToCanonicalTypeId(PostgresType), TypeModifier); - _objectInfo = default; + _objectConversionContext = default; } /// @@ -290,25 +294,35 @@ internal void Populate( /// internal DataFormat DataFormat { get; set; } + /// + /// Whether this field's data was requested in text format because the user opted into UnknownResultType + /// (via NpgsqlCommand.UnknownResultTypeList or AllResultTypesAreUnknown). Bindings for such fields are + /// expected to reinterpret the text bytes through a converter that could potentially only support binary formats. + /// + /// + /// DataFormat.Text today exclusively signals that we executed with an UnknownResultTypeList. + /// If we ever want to fully support DataFormat.Text we'll need to flow UnknownResultType status separately. + /// + internal bool IsUnknownResultType => DataFormat is DataFormat.Text; + internal Field Field { get; private set; } internal string TypeDisplayName => PostgresType.GetDisplayNameWithFacets(TypeModifier); internal PostgresType PostgresType { get; private set; } - internal Type FieldType => ObjectInfo.TypeToConvert; + internal Type FieldType => ObjectConversionContext.TypeInfo.Type; - ColumnInfo _objectInfo; - internal PgConverterInfo ObjectInfo + ReadConversionContext _objectConversionContext; + internal ReadConversionContext ObjectConversionContext { get { - if (!_objectInfo.ConverterInfo.IsDefault) - return _objectInfo.ConverterInfo; + if (!_objectConversionContext.IsDefault) + return _objectConversionContext; - ref var info = ref _objectInfo; - GetInfoCore(null, ref _objectInfo); - return info.ConverterInfo; + GetInfoAndBind(null, ref _objectConversionContext); + return _objectConversionContext; } } @@ -320,84 +334,74 @@ internal FieldDescription Clone() return field; } - internal void GetInfo(Type type, ref ColumnInfo lastColumnInfo) => GetInfoCore(type, ref lastColumnInfo); - void GetInfoCore(Type? type, ref ColumnInfo lastColumnInfo) + internal void GetConversionContext(Type type, ref ReadConversionContext result) => GetInfoAndBind(type, ref result); + void GetInfoAndBind(Type? type, ref ReadConversionContext result) { - Debug.Assert(lastColumnInfo.ConverterInfo.IsDefault || ( - ReferenceEquals(_serializerOptions, lastColumnInfo.ConverterInfo.TypeInfo.Options) && ( - IsUnknownResultType() && lastColumnInfo.ConverterInfo.TypeInfo.PgTypeId == _serializerOptions.TextPgTypeId || + Debug.Assert(result.IsDefault || ( + ReferenceEquals(_serializerOptions, result.TypeInfo.Options) && ( + IsUnknownResultType && result.TypeInfo.PgTypeId == _serializerOptions.TextPgTypeId || // Normal resolution - lastColumnInfo.ConverterInfo.TypeInfo.PgTypeId == _serializerOptions.ToCanonicalTypeId(PostgresType)) + result.TypeInfo.PgTypeId == _serializerOptions.ToCanonicalTypeId(PostgresType)) ), "Cache is bleeding over"); - if (!lastColumnInfo.ConverterInfo.IsDefault && lastColumnInfo.ConverterInfo.TypeToConvert == type) + if (result is { IsDefault: false, TypeInfo.Type: var typeToConvert } && typeToConvert == type) return; - var objectInfo = DataFormat is DataFormat.Text && type is not null ? ObjectInfo : _objectInfo.ConverterInfo; - if (objectInfo is { IsDefault: false }) + var objectInfo = DataFormat is DataFormat.Text && type is not null ? ObjectConversionContext : _objectConversionContext; + if (objectInfo.TypeInfo is not null && (typeof(object) == type || objectInfo.TypeInfo.Type == type)) { - if (typeof(object) == type) - { - lastColumnInfo = new(objectInfo, DataFormat, true); - return; - } - if (objectInfo.TypeToConvert == type) - { - // As TypeInfoMappingCollection is always adding object mappings for - // default/datatypename mappings, we'll also check Converter.TypeToConvert. - // If we have an exact match we are still able to use e.g. a converter for ints in an unboxed fashion. - lastColumnInfo = new(objectInfo, DataFormat, objectInfo.IsBoxingConverter && objectInfo.Converter.TypeToConvert != type); - return; - } + result = objectInfo; + return; } - GetInfoSlow(type, out lastColumnInfo); + Core(type, out result); + if (!result.IsDefault && result.Binding.DataFormat != DataFormat) + ThrowHelper.ThrowInvalidOperationException( + $"Binding for column '{Name}' produced format '{result.Binding.DataFormat}' but the field format is '{DataFormat}'."); [MethodImpl(MethodImplOptions.NoInlining)] - void GetInfoSlow(Type? type, out ColumnInfo lastColumnInfo) + void Core(Type? type, out ReadConversionContext lastReadConversionContext) { - PgConverterInfo converterInfo; + PgFieldBinding binding; switch (DataFormat) { - case DataFormat.Text when IsUnknownResultType(): + case DataFormat.Text when IsUnknownResultType: { - // Try to resolve some 'pg_catalog.text' type info for the expected clr type. + // Resolve the converter against pg_catalog.text, UnknownResultType reads text bytes + // for any column type. Every pg_catalog.text mapping we own declares text-format support, so a converter that + // can't bind to text here throws and surfaces as a missing mapping rather than getting silently reinterpreted. var typeInfo = AdoSerializerHelpers.GetTypeInfoForReading(type ?? typeof(string), _serializerOptions.TextPgTypeId, _serializerOptions); + var concreteTypeInfo = typeInfo.MakeConcreteForField(Field); + if (!concreteTypeInfo.SupportsReading) + AdoSerializerHelpers.ThrowReadingNotSupported(type, _serializerOptions, _serializerOptions.TextPgTypeId, resolved: true); - // We start binding to DataFormat.Binary as it's the broadest supported format. - // The format however is irrelevant as 'pg_catalog.text' data is identical across either. - // Given we did a resolution against 'pg_catalog.text' and not the actual field type we're in reinterpretation territory anyway. - if (!typeInfo.TryBind(Field, DataFormat.Binary, out converterInfo)) - converterInfo = typeInfo.Bind(Field, DataFormat.Text); - - lastColumnInfo = new(converterInfo, DataFormat, type != converterInfo.TypeToConvert || converterInfo.IsBoxingConverter); - + binding = concreteTypeInfo.BindField(DataFormat.Text); + lastReadConversionContext = new(concreteTypeInfo, binding); break; } case DataFormat.Binary or DataFormat.Text: { var typeInfo = AdoSerializerHelpers.GetTypeInfoForReading(type ?? typeof(object), _serializerOptions.ToCanonicalTypeId(PostgresType), _serializerOptions); + var concreteTypeInfo = typeInfo.MakeConcreteForField(Field); + if (!concreteTypeInfo.SupportsReading) + AdoSerializerHelpers.ThrowReadingNotSupported(type, _serializerOptions, _serializerOptions.ToCanonicalTypeId(PostgresType), resolved: true); // If we don't support the DataFormat we'll just throw. - converterInfo = typeInfo.Bind(Field, DataFormat); - lastColumnInfo = new(converterInfo, DataFormat, typeof(object) == type || converterInfo.IsBoxingConverter); + binding = concreteTypeInfo.BindField(DataFormat); + lastReadConversionContext = new(concreteTypeInfo, binding); break; } default: ThrowHelper.ThrowUnreachableException("Unknown data format {0}", DataFormat); - lastColumnInfo = default; + lastReadConversionContext = default; break; } // We delay initializing ObjectOrDefaultInfo until after the first lookup (unless it is itself the first lookup). // When passed in an unsupported type it allows the error to be more specific, instead of just having object/null to deal with. - if (_objectInfo.ConverterInfo.IsDefault && type is not null) - _ = ObjectInfo; + if (_objectConversionContext.TypeInfo is null && type is not null) + _ = ObjectConversionContext; } - - // DataFormat.Text today exclusively signals that we executed with an UnknownResultTypeList. - // If we ever want to fully support DataFormat.Text we'll need to flow UnknownResultType status separately. - bool IsUnknownResultType() => DataFormat is DataFormat.Text; } /// diff --git a/src/Npgsql/Internal/AdoSerializerHelpers.cs b/src/Npgsql/Internal/AdoSerializerHelpers.cs index 21010b3f99..16bfcec3fc 100644 --- a/src/Npgsql/Internal/AdoSerializerHelpers.cs +++ b/src/Npgsql/Internal/AdoSerializerHelpers.cs @@ -15,23 +15,12 @@ public static PgTypeInfo GetTypeInfoForReading(Type type, PgTypeId pgTypeId, PgS try { typeInfo = options.GetTypeInfoInternal(type, pgTypeId); - if (typeInfo is { SupportsReading: false }) - typeInfo = null; } catch (Exception ex) { inner = ex; } return typeInfo ?? ThrowReadingNotSupported(type, options, pgTypeId, inner); - - // InvalidCastException thrown to align with ADO.NET convention. - [DoesNotReturn] - static PgTypeInfo ThrowReadingNotSupported(Type? type, PgSerializerOptions options, PgTypeId pgTypeId, Exception? inner = null) - { - throw new InvalidCastException( - $"Reading{(type is null ? "" : $" as '{type.FullName}'")} is not supported for fields having DataTypeName '{options.DatabaseInfo.FindPostgresType(pgTypeId)?.DisplayName ?? "unknown"}'", - inner); - } } public static PgTypeInfo GetTypeInfoForWriting(Type? type, PgTypeId? pgTypeId, PgSerializerOptions options, NpgsqlDbType? npgsqlDbType = null) @@ -43,27 +32,40 @@ public static PgTypeInfo GetTypeInfoForWriting(Type? type, PgTypeId? pgTypeId, P try { typeInfo = options.GetTypeInfoInternal(type, pgTypeId); - if (typeInfo is { SupportsWriting: false }) - typeInfo = null; } catch (Exception ex) { inner = ex; } - return typeInfo ?? ThrowWritingNotSupported(type, options, pgTypeId, npgsqlDbType, inner); + return typeInfo ?? ThrowWritingNotSupported(type, options, pgTypeId, npgsqlDbType, inner: inner); + } - // InvalidCastException thrown to align with ADO.NET convention. - [DoesNotReturn] - static PgTypeInfo ThrowWritingNotSupported(Type? type, PgSerializerOptions options, PgTypeId? pgTypeId, NpgsqlDbType? npgsqlDbType, Exception? inner = null) - { - var pgTypeString = pgTypeId is null - ? "no NpgsqlDbType or DataTypeName. Try setting one of these values to the expected database type." - : npgsqlDbType is null - ? $"DataTypeName '{options.DatabaseInfo.FindPostgresType(pgTypeId.GetValueOrDefault())?.DisplayName ?? "unknown"}'" - : $"NpgsqlDbType '{npgsqlDbType}'"; + // InvalidCastException thrown to align with ADO.NET convention. + // resolved=true distinguishes the "resolution succeeded but the resolved converter opted out of this + // direction" case (e.g. read-only converters) from the "no converter could be found / resolution threw" + // case — important for diagnosing user reports. + [DoesNotReturn] + internal static PgTypeInfo ThrowReadingNotSupported(Type? type, PgSerializerOptions options, PgTypeId pgTypeId, Exception? inner = null, bool resolved = false) + { + var typeFragment = type is null ? "" : $" as '{type.FullName}'{(resolved ? " (resolved)" : "")}"; + var dataTypeNameFragment = $"DataTypeName '{options.DatabaseInfo.FindPostgresType(pgTypeId)?.DisplayName ?? "unknown"}'"; + var innerHint = inner is null ? "" : " See the inner exception for details."; - throw new InvalidCastException( - $"Writing{(type is null ? "" : $" values of '{type.FullName}'")} is not supported for parameters having {pgTypeString}.", inner); - } + throw new InvalidCastException($"Reading{typeFragment} is not supported for fields having {dataTypeNameFragment}.{innerHint}", inner); + } + + [DoesNotReturn] + internal static PgTypeInfo ThrowWritingNotSupported(Type? type, PgSerializerOptions options, PgTypeId? pgTypeId, NpgsqlDbType? npgsqlDbType = null, string? parameterName = null, Exception? inner = null, bool resolved = false) + { + var pgTypeFragment = pgTypeId is null + ? "no NpgsqlDbType or DataTypeName. Try setting one of these values to the expected database type." + : npgsqlDbType is null + ? $"DataTypeName '{options.DatabaseInfo.FindPostgresType(pgTypeId.GetValueOrDefault())?.DisplayName ?? "unknown"}'" + : $"NpgsqlDbType '{npgsqlDbType}'"; + var parameterFragment = parameterName is null ? "parameters" : $"parameter '{parameterName}'"; + var typeFragment = type is null ? "" : $" values of type '{type.FullName}'{(resolved ? " (resolved)" : "")}"; + var innerHint = inner is null ? "" : " See the inner exception for details."; + + throw new InvalidCastException($"Writing{typeFragment} is not supported for {parameterFragment} having {pgTypeFragment}.{innerHint}", inner); } } diff --git a/src/Npgsql/Internal/Composites/Metadata/CompositeFieldInfo.cs b/src/Npgsql/Internal/Composites/Metadata/CompositeFieldInfo.cs index dd394ce6bb..b6f41a8cc3 100644 --- a/src/Npgsql/Internal/Composites/Metadata/CompositeFieldInfo.cs +++ b/src/Npgsql/Internal/Composites/Metadata/CompositeFieldInfo.cs @@ -1,5 +1,6 @@ using System; using System.Diagnostics; +using System.Diagnostics.CodeAnalysis; using System.Runtime.CompilerServices; using System.Threading; using System.Threading.Tasks; @@ -42,7 +43,7 @@ private protected CompositeFieldInfo(string name, PgTypeInfo typeInfo, PgTypeId // that's where provider-backed fields (DateTime kind, late-bound, etc.) surface deterministic // errors. The cached default is reused by GetDefaultWriteInfo on CompositeConverter's Path A, // where per-value resolution has already completed without producing state. - concrete = providerTypeInfo.GetDefaultConcreteTypeInfo(null); + concrete = providerTypeInfo.GetDefault(null); IsProviderBacked = true; } else @@ -51,7 +52,7 @@ private protected CompositeFieldInfo(string name, PgTypeInfo typeInfo, PgTypeId return; } - if (concrete.GetBufferRequirements(concrete.Converter, DataFormat.Binary) is not { } bufferRequirements) + if (!concrete.Converter.CanConvert(DataFormat.Binary, out var bufferRequirements)) { ThrowHelper.ThrowInvalidOperationException("Converter must support binary format to participate in composite types."); return; @@ -65,24 +66,29 @@ public PgConverter GetReadInfo(out Size readRequirement) if (!IsProviderBacked) { readRequirement = _binaryBufferRequirements.Read; - return Converter!; + return Converter; } - if (!PgTypeInfo.TryBind(new Field(Name, PgTypeInfo.PgTypeId.GetValueOrDefault(), -1), DataFormat.Binary, out var converterInfo)) + var concreteTypeInfo = PgTypeInfo.MakeConcreteForField(new Field(Name, PgTypeInfo.PgTypeId.GetValueOrDefault(), -1)); + if (!concreteTypeInfo.SupportsReading) + AdoSerializerHelpers.ThrowReadingNotSupported(PgTypeInfo.Type, PgTypeInfo.Options, concreteTypeInfo.PgTypeId, resolved: true); + if (!concreteTypeInfo.TryBindField(DataFormat.Binary, out var binding)) ThrowHelper.ThrowInvalidOperationException("Converter must support binary format to participate in composite types."); - readRequirement = converterInfo.BufferRequirement; - return converterInfo.Converter; + readRequirement = binding.BufferRequirement; + return concreteTypeInfo.Converter; } public PgConverter GetWriteInfo(object instance, out Size writeRequirement, out object? writeState) { - if (IsProviderBacked) - return BindValue(instance, out writeRequirement, out writeState); + if (!IsProviderBacked) + { + writeState = null; + writeRequirement = _binaryBufferRequirements.Write; + return Converter; + } - writeState = null; - writeRequirement = _binaryBufferRequirements.Write; - return Converter!; + return BindValue(instance, out writeRequirement, out writeState); } /// @@ -136,6 +142,7 @@ protected ValueTask WriteAsObject(bool async, PgConverter converter, PgWriter wr public Size BinaryWriteRequirement => _binaryBufferRequirements.Write; /// True when this field defers converter resolution to bind time via a provider. + [MemberNotNullWhen(false, nameof(Converter))] public bool IsProviderBacked { get; } public abstract Type Type { get; } @@ -151,7 +158,7 @@ protected ValueTask WriteAsObject(bool async, PgConverter converter, PgWriter wr public abstract void ReadDbNull(CompositeBuilder builder); public abstract ValueTask Read(bool async, PgConverter converter, CompositeBuilder builder, PgReader reader, CancellationToken cancellationToken = default); public abstract bool IsDbNull(PgConverter converter, object instance, object? writeState); - public abstract Size? GetSizeOrDbNull(PgConverter converter, DataFormat format, Size writeRequirement, object instance, ref object? writeState); + public abstract Size? IsDbNullOrGetSize(PgConverter converter, DataFormat format, Size writeRequirement, object instance, ref object? writeState); public abstract ValueTask Write(bool async, PgConverter converter, PgWriter writer, object instance, CancellationToken cancellationToken); } @@ -230,10 +237,10 @@ public override void ReadDbNull(CompositeBuilder builder) protected override PgConverter BindValue(object instance, out Size writeRequirement, out object? writeState) { var value = _getter(instance); - var concreteTypeInfo = PgTypeInfo.IsBoxing - ? PgTypeInfo.GetObjectConcreteTypeInfo(value, out writeState) - : PgTypeInfo.GetConcreteTypeInfo(value, out writeState); - if (concreteTypeInfo.GetBufferRequirements(concreteTypeInfo.Converter, DataFormat.Binary) is not { } bufferRequirements) + var concreteTypeInfo = PgTypeInfo.MakeConcreteForValue(value, out writeState); + if (!concreteTypeInfo.SupportsWriting) + AdoSerializerHelpers.ThrowWritingNotSupported(typeof(T), PgTypeInfo.Options, concreteTypeInfo.PgTypeId, resolved: true); + if (!concreteTypeInfo.Converter.CanConvert(DataFormat.Binary, out var bufferRequirements)) { ThrowHelper.ThrowInvalidOperationException("Converter must support binary format to participate in composite types."); writeRequirement = default; @@ -278,12 +285,12 @@ public override bool IsDbNull(PgConverter converter, object instance, object? wr return AsObject(converter) ? converter.IsDbNullAsObject(value, writeState) : ((PgConverter)converter).IsDbNull(value, writeState); } - public override Size? GetSizeOrDbNull(PgConverter converter, DataFormat format, Size writeRequirement, object instance, ref object? writeState) + public override Size? IsDbNullOrGetSize(PgConverter converter, DataFormat format, Size writeRequirement, object instance, ref object? writeState) { var value = _getter(instance); return AsObject(converter) - ? converter.GetSizeOrDbNullAsObject(format, writeRequirement, value, ref writeState) - : ((PgConverter)converter).GetSizeOrDbNull(format, writeRequirement, value, ref writeState); + ? converter.IsDbNullOrGetSizeAsObject(format, writeRequirement, value, ref writeState) + : ((PgConverter)converter).IsDbNullOrGetSize(format, writeRequirement, value, ref writeState); } public override ValueTask Write(bool async, PgConverter converter, PgWriter writer, object instance, CancellationToken cancellationToken) diff --git a/src/Npgsql/Internal/Converters/ArrayConverter.cs b/src/Npgsql/Internal/Converters/ArrayConverter.cs index 29e3564f14..8ae0b1e94d 100644 --- a/src/Npgsql/Internal/Converters/ArrayConverter.cs +++ b/src/Npgsql/Internal/Converters/ArrayConverter.cs @@ -137,8 +137,8 @@ object IElementOperations.CreateCollection(ReadOnlySpan lengths) int IElementOperations.GetCollectionCount(object collection, out int[]? lengths) => ArrayConverterCore.GetArrayLengths((Array)collection, out lengths); - Size? IElementOperations.GetSizeOrDbNull(SizeContext context, object collection, IterationIndices indices, ref object? writeState) - => _elemConverter.GetSizeOrDbNull(context.Format, context.BufferRequirement, GetValue(collection, indices), ref writeState); + Size? IElementOperations.IsDbNullOrGetSize(SizeContext context, object collection, IterationIndices indices, ref object? writeState) + => _elemConverter.IsDbNullOrGetSize(context.Format, context.BufferRequirement, GetValue(collection, indices), ref writeState); ValueTask IElementOperations.Read(bool async, PgReader reader, bool isDbNull, object collection, IterationIndices indices, CancellationToken cancellationToken) { @@ -207,8 +207,8 @@ int IElementOperations.GetCollectionCount(object collection, out int[]? lengths) return ((IList)collection).Count; } - Size? IElementOperations.GetSizeOrDbNull(SizeContext context, object collection, IterationIndices indices, ref object? writeState) - => _elemConverter.GetSizeOrDbNull(context.Format, context.BufferRequirement, GetValue(collection, indices.One), ref writeState); + Size? IElementOperations.IsDbNullOrGetSize(SizeContext context, object collection, IterationIndices indices, ref object? writeState) + => _elemConverter.IsDbNullOrGetSize(context.Format, context.BufferRequirement, GetValue(collection, indices.One), ref writeState); ValueTask IElementOperations.Read(bool async, PgReader reader, bool isDbNull, object collection, IterationIndices indices, CancellationToken cancellationToken) { @@ -250,7 +250,7 @@ ValueTask IElementOperations.Write(bool async, PgWriter writer, object collectio } } -sealed class ArrayTypeInfoProvider(PgProviderTypeInfo elementTypeInfo, Type effectiveType) +sealed class ArrayTypeInfoProvider(PgProviderTypeInfo elementTypeInfo, Type requestedMappingType) : PgComposingTypeInfoProvider(elementTypeInfo.PgTypeId is { } id ? elementTypeInfo.Options.GetArrayTypeId(id) : null, elementTypeInfo) where T : notnull @@ -260,13 +260,19 @@ sealed class ArrayTypeInfoProvider(PgProviderTypeInfo elementTypeIn protected override PgTypeId GetEffectivePgTypeId(PgTypeId pgTypeId) => Options.GetArrayElementTypeId(pgTypeId); protected override PgTypeId GetPgTypeId(PgTypeId effectivePgTypeId) => Options.GetArrayTypeId(effectivePgTypeId); - protected override PgConverter CreateConverter(PgConcreteTypeInfo effectiveConcreteTypeInfo) + protected override PgConverter CreateConverter(PgConcreteTypeInfo effectiveConcreteTypeInfo, out Type? requestedType) { if (typeof(T) == typeof(Array) || typeof(T).IsArray) - return ArrayConverter.CreateArrayBased(effectiveConcreteTypeInfo, effectiveType); + { + requestedType = requestedMappingType; + return ArrayConverter.CreateArrayBased(effectiveConcreteTypeInfo, requestedType); + } if (typeof(T).IsConstructedGenericType && typeof(T).GetGenericTypeDefinition() == typeof(IList<>)) + { + requestedType = requestedMappingType; return ArrayConverter.CreateListBased(effectiveConcreteTypeInfo); + } throw new NotSupportedException($"Unknown type T: {typeof(T).FullName}"); } @@ -285,7 +291,7 @@ protected override PgConverter CreateConverter(PgConcreteTypeInfo effectiveCo metadata = PgArrayMetadata.Create(ArrayConverterCore.GetArrayLengths(array, out _), null); foreach (var value in array) { - var result = EffectiveTypeInfo.GetConcreteTypeInfo(effectiveContext, value, out var state); + var result = EffectiveTypeInfo.GetForValue(effectiveContext, value, out var state); if (state is not null && elemData is null) { elemDataArrayPool = ArrayPool<(Size, object?)>.Shared; @@ -316,7 +322,7 @@ protected override PgConverter CreateConverter(PgConcreteTypeInfo effectiveCo metadata = PgArrayMetadata.Create(list.Count, null); foreach (var value in list) { - var result = EffectiveTypeInfo.GetConcreteTypeInfo(effectiveContext, value, out var state); + var result = EffectiveTypeInfo.GetForValue(effectiveContext, value, out var state); if (state is not null && elemData is null) { elemDataArrayPool = ArrayPool<(Size, object?)>.Shared; @@ -347,7 +353,7 @@ protected override PgConverter CreateConverter(PgConcreteTypeInfo effectiveCo metadata = PgArrayMetadata.Create(list.Count, null); foreach (var value in list) { - var result = EffectiveTypeInfo.GetConcreteTypeInfo(effectiveContext, value, out var state); + var result = EffectiveTypeInfo.GetForValue(effectiveContext, value, out var state); if (state is not null && elemData is null) { elemDataArrayPool = ArrayPool<(Size, object?)>.Shared; @@ -378,7 +384,7 @@ protected override PgConverter CreateConverter(PgConcreteTypeInfo effectiveCo metadata = PgArrayMetadata.Create(ArrayConverterCore.GetArrayLengths(array, out var dimensionLengths), dimensionLengths); foreach (var value in array) { - var result = EffectiveTypeInfo.GetAsObjectConcreteTypeInfo(effectiveContext, value, out var state); + var result = EffectiveTypeInfo.GetForValue(effectiveContext, value, out var state); if (state is not null && elemData is null) { elemDataArrayPool = ArrayPool<(Size, object?)>.Shared; @@ -486,15 +492,15 @@ public PolymorphicArrayTypeInfoProvider(PgProviderTypeInfo effectiveTypeInfo, Pg } protected override PgConcreteTypeInfo GetDefaultCore(PgTypeId? pgTypeId) - => GetOrAdd(_effectiveTypeInfo.GetDefaultConcreteTypeInfo(pgTypeId), _effectiveNullableTypeInfo.GetDefaultConcreteTypeInfo(pgTypeId)); + => GetOrAdd(_effectiveTypeInfo.GetDefault(pgTypeId), _effectiveNullableTypeInfo.GetDefault(pgTypeId)); protected override PgConcreteTypeInfo? GetForValueCore(ProviderValueContext context, TBase? value, ref object? writeState) => throw new NotSupportedException("Polymorphic writing is not supported."); protected override PgConcreteTypeInfo? GetForFieldCore(Field field) { - var concreteTypeInfo = _effectiveTypeInfo.GetConcreteTypeInfo(field); - var concreteNullableTypeInfo = _effectiveNullableTypeInfo.GetConcreteTypeInfo(field); + var concreteTypeInfo = _effectiveTypeInfo.GetForField(field); + var concreteNullableTypeInfo = _effectiveNullableTypeInfo.GetForField(field); return concreteTypeInfo is not null && concreteNullableTypeInfo is not null ? GetOrAdd(concreteTypeInfo, concreteNullableTypeInfo) @@ -508,7 +514,7 @@ PgConcreteTypeInfo GetOrAdd(PgConcreteTypeInfo concreteTypeInfo, PgConcreteTypeI static (_, state) => new(state.ConcreteInfo.Options, new PolymorphicArrayConverter((PgConverter)state.ConcreteInfo.Converter, (PgConverter)state.ConcreteNullableInfo.Converter), - state.ConcreteInfo.PgTypeId), + state.ConcreteInfo.PgTypeId) { SupportsWriting = false }, state); } } diff --git a/src/Npgsql/Internal/Converters/ArrayConverterCore.cs b/src/Npgsql/Internal/Converters/ArrayConverterCore.cs index 29fd0dfa60..82e7550323 100644 --- a/src/Npgsql/Internal/Converters/ArrayConverterCore.cs +++ b/src/Npgsql/Internal/Converters/ArrayConverterCore.cs @@ -14,7 +14,7 @@ interface IElementOperations { object CreateCollection(ReadOnlySpan lengths); int GetCollectionCount(object collection, out int[]? lengths); - Size? GetSizeOrDbNull(SizeContext context, object collection, IterationIndices indices, ref object? writeState); + Size? IsDbNullOrGetSize(SizeContext context, object collection, IterationIndices indices, ref object? writeState); ValueTask Read(bool async, PgReader reader, bool isDbNull, object collection, IterationIndices indices, CancellationToken cancellationToken = default); ValueTask Write(bool async, PgWriter writer, object collection, IterationIndices indices, CancellationToken cancellationToken = default); } @@ -38,11 +38,11 @@ readonly struct ArrayConverterCore( bool IsDbNull(object values, IterationIndices arrayIndices, object? writeState) { // This call will only skip GetSize if we are dealing with fixed size elements, otherwise we'll repeat sizing costs. - // Fixed-size element converters cannot produce per-value write state, so GetSizeOrDbNull must + // Fixed-size element converters cannot produce per-value write state, so IsDbNullOrGetSize must // leave writeState alone — any mutation is a contract violation in the element converter. Debug.Assert(binaryRequirements.Write.Kind is SizeKind.Exact); var originalWriteState = writeState; - var isDbNull = elemOps.GetSizeOrDbNull(new(DataFormat.Binary, binaryRequirements.Write), values, arrayIndices, ref writeState) is null; + var isDbNull = elemOps.IsDbNullOrGetSize(new(DataFormat.Binary, binaryRequirements.Write), values, arrayIndices, ref writeState) is null; Debug.Assert(ReferenceEquals(writeState, originalWriteState), "Fixed-size element converter mutated writeState during a null probe."); return isDbNull; } @@ -51,7 +51,7 @@ bool IsDbNull(object values, IterationIndices arrayIndices, object? writeState) [MethodImpl(MethodImplOptions.AggressiveInlining)] Size SizeElement(SizeContext context, object values, IterationIndices indices, ref object? elemState, ref Size size, ref bool anyWriteState) { - var elemSize = elemOps.GetSizeOrDbNull(context, values, indices, ref elemState); + var elemSize = elemOps.IsDbNullOrGetSize(context, values, indices, ref elemState); anyWriteState = anyWriteState || elemState is not null; size = size.Combine(elemSize ?? 0); return elemSize ?? -1; diff --git a/src/Npgsql/Internal/Converters/BitStringConverters.cs b/src/Npgsql/Internal/Converters/BitStringConverters.cs index 5b2f868ddb..f7cee926bb 100644 --- a/src/Npgsql/Internal/Converters/BitStringConverters.cs +++ b/src/Npgsql/Internal/Converters/BitStringConverters.cs @@ -230,8 +230,8 @@ async ValueTask Write(bool async, PgWriter writer, string value, CancellationTok /// Otherwise we return a BitArray converter. Polymorphic writing through this provider is not supported. sealed class PolymorphicBitStringTypeInfoProvider(PgSerializerOptions options, PgTypeId bitString) : PgConcreteTypeInfoProvider { - readonly PgConcreteTypeInfo _boolConcreteTypeInfo = new(options, new BoolBitStringConverter(), bitString); - readonly PgConcreteTypeInfo _bitArrayConcreteTypeInfo = new(options, new BitArrayBitStringConverter(), bitString); + readonly PgConcreteTypeInfo _boolConcreteTypeInfo = new(options, new BoolBitStringConverter(), bitString) { SupportsWriting = false }; + readonly PgConcreteTypeInfo _bitArrayConcreteTypeInfo = new(options, new BitArrayBitStringConverter(), bitString) { SupportsWriting = false }; protected override PgConcreteTypeInfo GetDefaultCore(PgTypeId? pgTypeId) => GetConcreteInfo(field: null); diff --git a/src/Npgsql/Internal/Converters/CastingConverter.cs b/src/Npgsql/Internal/Converters/CastingConverter.cs index ff94242dfb..1d7e143616 100644 --- a/src/Npgsql/Internal/Converters/CastingConverter.cs +++ b/src/Npgsql/Internal/Converters/CastingConverter.cs @@ -3,43 +3,58 @@ using System.Threading; using System.Threading.Tasks; using Npgsql.Internal.Postgres; +using Npgsql.Util; namespace Npgsql.Internal.Converters; -/// A converter to map strongly typed apis onto boxed converter results to produce a strongly typed converter over T. -sealed class CastingConverter(PgConverter effectiveConverter) - : PgConverter(effectiveConverter.DbNullPredicateKind is DbNullPredicate.Custom) +/// A converter that adapts a boxed converter's results to an exact-type converter over T, wrapping the read/write +/// paths through object to present a typed surface for a converter whose TypeToConvert is only a base of T. +[Experimental(NpgsqlDiagnostics.ConvertersExperimental)] +public sealed class CastingConverter : PgConverter { - protected override bool IsDbNullValue(T? value, object? writeState) => effectiveConverter.IsDbNullAsObject(value, writeState); + readonly PgConverter _effectiveConverter; + + public CastingConverter(PgConverter effectiveConverter) : base(effectiveConverter.DbNullPredicateKind is DbNullPredicate.Custom) + { + if (!typeof(T).IsInSubtypeRelationshipWith(effectiveConverter.TypeToConvert)) + throw new ArgumentException( + $"Values for the effective converter's type {effectiveConverter.TypeToConvert} cannot be cast to the type {typeof(T)} for this converter.", + nameof(effectiveConverter)); + + _effectiveConverter = effectiveConverter; + } + + protected override bool IsDbNullValue(T? value, object? writeState) => _effectiveConverter.IsDbNullAsObject(value, writeState); public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) - => effectiveConverter.CanConvert(format, out bufferRequirements); + => _effectiveConverter.CanConvert(format, out bufferRequirements); - public override T Read(PgReader reader) => (T)effectiveConverter.ReadAsObject(reader); + public override T Read(PgReader reader) => (T)_effectiveConverter.ReadAsObject(reader); public override ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) - => this.ReadAsObjectAsyncAsT(effectiveConverter, reader, cancellationToken); + => this.ReadAsObjectAsyncAsT(_effectiveConverter, reader, cancellationToken); public override Size GetSize(SizeContext context, T value, ref object? writeState) - => effectiveConverter.GetSizeAsObject(context, value!, ref writeState); + => _effectiveConverter.GetSizeAsObject(context, value!, ref writeState); public override void Write(PgWriter writer, T value) - => effectiveConverter.WriteAsObject(writer, value!); + => _effectiveConverter.WriteAsObject(writer, value!); public override ValueTask WriteAsync(PgWriter writer, T value, CancellationToken cancellationToken = default) - => effectiveConverter.WriteAsObjectAsync(writer, value!, cancellationToken); + => _effectiveConverter.WriteAsObjectAsync(writer, value!, cancellationToken); internal override ValueTask ReadAsObject(bool async, PgReader reader, CancellationToken cancellationToken) => async - ? effectiveConverter.ReadAsObjectAsync(reader, cancellationToken) - : new(effectiveConverter.ReadAsObject(reader)); + ? _effectiveConverter.ReadAsObjectAsync(reader, cancellationToken) + : new(_effectiveConverter.ReadAsObject(reader)); internal override ValueTask WriteAsObject(bool async, PgWriter writer, object value, CancellationToken cancellationToken) { + // Cast here to keep our T contract, and otherwise return more accurate invalid cast exceptions (as the effective converter will cast as well). if (async) - return effectiveConverter.WriteAsObjectAsync(writer, value, cancellationToken); + return _effectiveConverter.WriteAsObjectAsync(writer, (T)value, cancellationToken); - effectiveConverter.WriteAsObject(writer, value); + _effectiveConverter.WriteAsObject(writer, (T)value); return new(); } } @@ -51,19 +66,22 @@ sealed class CastingTypeInfoProvider(PgProviderTypeInfo effectiveProviderType protected override PgTypeId GetEffectivePgTypeId(PgTypeId pgTypeId) => pgTypeId; protected override PgTypeId GetPgTypeId(PgTypeId effectivePgTypeId) => effectivePgTypeId; - protected override PgConverter CreateConverter(PgConcreteTypeInfo effectiveConcreteTypeInfo) - => new CastingConverter(effectiveConcreteTypeInfo.Converter); + protected override PgConverter CreateConverter(PgConcreteTypeInfo effectiveConcreteTypeInfo, out Type? requestedType) + { + requestedType = null; + return new CastingConverter(effectiveConcreteTypeInfo.Converter); + } protected override PgConcreteTypeInfo? GetEffectiveTypeInfo(ProviderValueContext effectiveContext, T? value, ref object? writeState) - => EffectiveTypeInfo.GetAsObjectConcreteTypeInfo(effectiveContext, value, out writeState); + => EffectiveTypeInfo.GetForValueAsObject(effectiveContext, value, out writeState); } static class CastingTypeInfoExtensions { - [RequiresDynamicCode("Changing boxing converters to their non-boxing counterpart can require creating new generic types or methods, which requires creating code at runtime. This may not be AOT when AOT compiling")] - internal static PgTypeInfo ToNonBoxing(this PgTypeInfo typeInfo) + [RequiresDynamicCode("Producing an exact-type info from one without an exact type can require creating new generic types or methods at runtime, which may not work when AOT compiling.")] + internal static PgTypeInfo ToExactTypeInfo(this PgTypeInfo typeInfo) { - if (!typeInfo.IsBoxing) + if (typeInfo.HasExactType) return typeInfo; var type = typeInfo.Type; diff --git a/src/Npgsql/Internal/Converters/CompositeConverter.cs b/src/Npgsql/Internal/Converters/CompositeConverter.cs index 5646402636..59b9f75291 100644 --- a/src/Npgsql/Internal/Converters/CompositeConverter.cs +++ b/src/Npgsql/Internal/Converters/CompositeConverter.cs @@ -188,7 +188,7 @@ public override Size GetSize(SizeContext context, T value, ref object? writeStat return _writeSizePrecomputed; } - // Variable-size or nullable fields — per-field GetSizeOrDbNull is needed to compute the total, + // Variable-size or nullable fields — per-field IsDbNullOrGetSize is needed to compute the total, // and per-field sizes must flow forward to Write. Always rent. var arrayPool = ArrayPool.Shared; var slowData = arrayPool.Rent(_composite.Fields.Count); @@ -198,7 +198,7 @@ public override Size GetSize(SizeContext context, T value, ref object? writeStat { var field = _composite.Fields[i]; var converter = field.GetWriteInfo(boxedInstance, out var writeRequirement, out var fieldState); - var fieldSizeOrNull = field.GetSizeOrDbNull(converter, context.Format, writeRequirement, boxedInstance, ref fieldState); + var fieldSizeOrNull = field.IsDbNullOrGetSize(converter, context.Format, writeRequirement, boxedInstance, ref fieldState); anyWriteState = anyWriteState || fieldState is not null; slowData[i] = new() { diff --git a/src/Npgsql/Internal/Converters/MultirangeConverter.cs b/src/Npgsql/Internal/Converters/MultirangeConverter.cs index f7811e298a..2757c2933f 100644 --- a/src/Npgsql/Internal/Converters/MultirangeConverter.cs +++ b/src/Npgsql/Internal/Converters/MultirangeConverter.cs @@ -77,7 +77,7 @@ public override Size GetSize(SizeContext context, T value, ref object? writeStat for (var i = 0; i < value.Count; i++) { object? innerState = null; - var rangeSize = _rangeConverter.GetSizeOrDbNull(context.Format, _rangeRequirements.Write, value[i], ref innerState); + var rangeSize = _rangeConverter.IsDbNullOrGetSize(context.Format, _rangeRequirements.Write, value[i], ref innerState); anyWriteState = anyWriteState || innerState is not null; // Ranges should never be NULL. Debug.Assert(rangeSize.HasValue); diff --git a/src/Npgsql/Internal/Converters/NullableConverter.cs b/src/Npgsql/Internal/Converters/NullableConverter.cs index 250b98293f..48e324f59f 100644 --- a/src/Npgsql/Internal/Converters/NullableConverter.cs +++ b/src/Npgsql/Internal/Converters/NullableConverter.cs @@ -1,3 +1,4 @@ +using System; using System.Diagnostics.CodeAnalysis; using System.Threading; using System.Threading.Tasks; @@ -46,11 +47,14 @@ sealed class NullableTypeInfoProvider(PgProviderTypeInfo effectiveTypeInfo) protected override PgTypeId GetEffectivePgTypeId(PgTypeId pgTypeId) => pgTypeId; protected override PgTypeId GetPgTypeId(PgTypeId effectivePgTypeId) => effectivePgTypeId; - protected override PgConverter CreateConverter(PgConcreteTypeInfo effectiveConcreteTypeInfo) - => new NullableConverter((PgConverter)effectiveConcreteTypeInfo.Converter); + protected override PgConverter CreateConverter(PgConcreteTypeInfo effectiveConcreteTypeInfo, out Type? requestedType) + { + requestedType = null; + return new NullableConverter((PgConverter)effectiveConcreteTypeInfo.Converter); + } protected override PgConcreteTypeInfo? GetEffectiveTypeInfo(ProviderValueContext effectiveContext, T? value, ref object? writeState) => value is not null - ? EffectiveTypeInfo.GetConcreteTypeInfo(effectiveContext, value.GetValueOrDefault(), out writeState) + ? EffectiveTypeInfo.GetForValue(effectiveContext, value.GetValueOrDefault(), out writeState) : null; } diff --git a/src/Npgsql/Internal/Converters/ObjectConverter.cs b/src/Npgsql/Internal/Converters/ObjectConverter.cs index 1e78c8e7cf..fb755178d0 100644 --- a/src/Npgsql/Internal/Converters/ObjectConverter.cs +++ b/src/Npgsql/Internal/Converters/ObjectConverter.cs @@ -32,7 +32,7 @@ public override Size GetSize(SizeContext context, object value, ref object? writ _ => throw new InvalidOperationException("Invalid state") }; - if (concreteTypeInfo.GetBufferRequirements(concreteTypeInfo.Converter, context.Format) is not { } bufferRequirements) + if (!concreteTypeInfo.Converter.CanConvert(context.Format, out var bufferRequirements)) { ThrowHelper.ThrowNotSupportedException($"Resolved converter '{concreteTypeInfo.Converter.GetType()}' has to support the {context.Format} format to be compatible."); return default; @@ -69,7 +69,9 @@ async ValueTask Write(bool async, PgWriter writer, object value, CancellationTok _ => throw new InvalidOperationException("Invalid state") }; - var writeRequirement = concreteTypeInfo.GetBufferRequirements(concreteTypeInfo.Converter, DataFormat.Binary)!.Value.Write; + var found = concreteTypeInfo.Converter.CanConvert(DataFormat.Binary, out var bufferRequirements); + Debug.Assert(found); + var writeRequirement = bufferRequirements.Write; using var _ = await writer.BeginNestedWrite(async, writeRequirement, writer.Current.Size.Value, effectiveState, cancellationToken).ConfigureAwait(false); await concreteTypeInfo.Converter.WriteAsObject(async, writer, value, cancellationToken).ConfigureAwait(false); } @@ -113,8 +115,11 @@ protected override PgConcreteTypeInfo GetForValueCore(ProviderValueContext conte return GetDefaultCore(context.ExpectedPgTypeId); } - var typeInfo = AdoSerializerHelpers.GetTypeInfoForWriting(value.GetType(), context.ExpectedPgTypeId ?? typeId, options); - var concreteTypeInfo = typeInfo.GetObjectConcreteTypeInfo(value, out var effectiveState); + var valueType = value.GetType(); + var typeInfo = AdoSerializerHelpers.GetTypeInfoForWriting(valueType, context.ExpectedPgTypeId ?? typeId, options); + var concreteTypeInfo = typeInfo.MakeConcreteForValueAsObject(value, out var effectiveState); + if (!concreteTypeInfo.SupportsWriting) + AdoSerializerHelpers.ThrowWritingNotSupported(valueType, options, concreteTypeInfo.PgTypeId, resolved: true); writeState = effectiveState is not null ? new ObjectConverter.WriteState { ConcreteTypeInfo = concreteTypeInfo, EffectiveState = effectiveState } : concreteTypeInfo; diff --git a/src/Npgsql/Internal/Converters/PolymorphicArrayTypeInfoProvider.cs b/src/Npgsql/Internal/Converters/PolymorphicArrayTypeInfoProvider.cs index 9084567f4a..b52b15a266 100644 --- a/src/Npgsql/Internal/Converters/PolymorphicArrayTypeInfoProvider.cs +++ b/src/Npgsql/Internal/Converters/PolymorphicArrayTypeInfoProvider.cs @@ -5,7 +5,7 @@ namespace Npgsql.Internal.Converters; -// Many ways to achieve strongly typed composition on top of a polymorphic element type. +// Many ways to achieve exact-type composition on top of a polymorphic element type. // Including pushing construction through a GVM visitor pattern on the element handler, // manual reimplementation of the element logic in the array provider, and other ways. // This one however is by far the most lightweight on both the implementation duplication and code bloat axes. @@ -29,14 +29,14 @@ public PolymorphicArrayTypeInfoProvider(PgTypeId pgTypeId, PgProviderTypeInfo el } protected override PgConcreteTypeInfo GetDefaultCore(PgTypeId? pgTypeId) - => GetOrAdd(_elementTypeInfo.GetDefaultConcreteTypeInfo(_elementPgTypeId)); + => GetOrAdd(_elementTypeInfo.GetDefault(_elementPgTypeId)); protected override PgConcreteTypeInfo? GetForValueCore(ProviderValueContext context, object? value, ref object? writeState) => throw new NotSupportedException("Polymorphic writing is not supported."); protected override PgConcreteTypeInfo? GetForFieldCore(Field field) { - var elementConcreteTypeInfo = _elementTypeInfo.GetConcreteTypeInfo(field with { PgTypeId = _elementPgTypeId }); + var elementConcreteTypeInfo = _elementTypeInfo.GetForField(field with { PgTypeId = _elementPgTypeId }); return elementConcreteTypeInfo is not null ? GetOrAdd(elementConcreteTypeInfo) : null; } diff --git a/src/Npgsql/Internal/Converters/Primitive/ByteaConverters.cs b/src/Npgsql/Internal/Converters/Primitive/ByteaConverters.cs index 77ffdecc46..903dd15dec 100644 --- a/src/Npgsql/Internal/Converters/Primitive/ByteaConverters.cs +++ b/src/Npgsql/Internal/Converters/Primitive/ByteaConverters.cs @@ -1,6 +1,5 @@ using System; using System.Diagnostics; -using System.IO; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Threading; @@ -9,8 +8,16 @@ // ReSharper disable once CheckNamespace namespace Npgsql.Internal.Converters; -abstract class ByteaConverters : PgStreamingConverter +abstract class ByteaConverters(bool supportsTextFormat) : PgStreamingConverter { + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.None; + return supportsTextFormat + ? format is DataFormat.Binary or DataFormat.Text + : format is DataFormat.Binary; + } + public override T Read(PgReader reader) => Read(async: false, reader, CancellationToken.None).Result; @@ -42,7 +49,7 @@ async ValueTask Read(bool async, PgReader reader, CancellationToken cancellat protected abstract T ConvertFrom(Memory value); } -sealed class ArraySegmentByteaConverter : ByteaConverters> +sealed class ArraySegmentByteaConverter(bool supportsTextFormat) : ByteaConverters>(supportsTextFormat) { protected override Memory ConvertTo(ArraySegment value) => value; protected override ArraySegment ConvertFrom(Memory value) @@ -51,8 +58,16 @@ protected override ArraySegment ConvertFrom(Memory value) : throw new UnreachableException("Expected array-backed memory"); } -sealed class ArrayByteaConverter : PgStreamingConverter +sealed class ArrayByteaConverter(bool supportsTextFormat) : PgStreamingConverter { + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.None; + return supportsTextFormat + ? format is DataFormat.Binary or DataFormat.Text + : format is DataFormat.Binary; + } + public override byte[] Read(PgReader reader) { var bytes = new byte[reader.CurrentRemaining]; @@ -77,13 +92,13 @@ public override ValueTask WriteAsync(PgWriter writer, byte[] value, Cancellation => writer.WriteBytesAsync(value, cancellationToken); } -sealed class ReadOnlyMemoryByteaConverter : ByteaConverters> +sealed class ReadOnlyMemoryByteaConverter(bool supportsTextFormat) : ByteaConverters>(supportsTextFormat) { protected override Memory ConvertTo(ReadOnlyMemory value) => MemoryMarshal.AsMemory(value); protected override ReadOnlyMemory ConvertFrom(Memory value) => value; } -sealed class MemoryByteaConverter : ByteaConverters> +sealed class MemoryByteaConverter(bool supportsTextFormat) : ByteaConverters>(supportsTextFormat) { protected override Memory ConvertTo(Memory value) => value; protected override Memory ConvertFrom(Memory value) => value; diff --git a/src/Npgsql/Internal/Converters/Primitive/TextConverters.cs b/src/Npgsql/Internal/Converters/Primitive/TextConverters.cs index d4c1f8834e..e1660e90a2 100644 --- a/src/Npgsql/Internal/Converters/Primitive/TextConverters.cs +++ b/src/Npgsql/Internal/Converters/Primitive/TextConverters.cs @@ -202,6 +202,12 @@ readonly struct GetChars(int read) sealed class GetCharsTextConverter(Encoding encoding) : PgStreamingConverter { + public override bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements) + { + bufferRequirements = BufferRequirements.None; + return format is DataFormat.Binary or DataFormat.Text; + } + public override GetChars Read(PgReader reader) => reader.CharsReadActive ? ResumableRead(reader) diff --git a/src/Npgsql/Internal/Converters/RangeConverter.cs b/src/Npgsql/Internal/Converters/RangeConverter.cs index c378d830f7..5a7b6df9f8 100644 --- a/src/Npgsql/Internal/Converters/RangeConverter.cs +++ b/src/Npgsql/Internal/Converters/RangeConverter.cs @@ -106,7 +106,7 @@ public override Size GetSize(SizeContext context, NpgsqlRange value, r { totalSize = totalSize.Combine(sizeof(int)); var subTypeState = (object?)null; - if (_subtypeConverter.GetSizeOrDbNull(context.Format, _subtypeRequirements.Write, value.LowerBound, ref subTypeState) is { } size) + if (_subtypeConverter.IsDbNullOrGetSize(context.Format, _subtypeRequirements.Write, value.LowerBound, ref subTypeState) is { } size) { totalSize = totalSize.Combine(size); (state ??= new WriteState()).LowerBoundSize = size; @@ -120,7 +120,7 @@ public override Size GetSize(SizeContext context, NpgsqlRange value, r { totalSize = totalSize.Combine(sizeof(int)); var subTypeState = (object?)null; - if (_subtypeConverter.GetSizeOrDbNull(context.Format, _subtypeRequirements.Write, value.UpperBound, ref subTypeState) is { } size) + if (_subtypeConverter.IsDbNullOrGetSize(context.Format, _subtypeRequirements.Write, value.UpperBound, ref subTypeState) is { } size) { totalSize = totalSize.Combine(size); (state ??= new WriteState()).UpperBoundSize = size; diff --git a/src/Npgsql/Internal/Converters/RecordConverter.cs b/src/Npgsql/Internal/Converters/RecordConverter.cs index 05eabcf7cd..14fbe4aabd 100644 --- a/src/Npgsql/Internal/Converters/RecordConverter.cs +++ b/src/Npgsql/Internal/Converters/RecordConverter.cs @@ -39,16 +39,19 @@ async ValueTask Read(bool async, PgReader reader, CancellationToken cancellat var pgTypeId = options.ToCanonicalTypeId(postgresType); // TODO resolve based on types expected by _factory (pass in a Type[] during construcion) - // Only allow object polymorphism for object[] records, valuetuple records are always strongly typed. + // Only allow object polymorphism for object[] records; valuetuple records always have exact types. var typeInfo = (IsObjectArrayRecord ? options.GetTypeInfo(typeof(object), pgTypeId) : options.GetDefaultTypeInfo(pgTypeId)) ?? throw new NotSupportedException( $"Reading isn't supported for record field {i} (PG type '{postgresType.DisplayName}'"); - var converterInfo = typeInfo.Bind(new Field("?", pgTypeId, -1), DataFormat.Binary); - var scope = await reader.BeginNestedRead(async, length, converterInfo.BufferRequirement, cancellationToken).ConfigureAwait(false); + var concreteTypeInfo = typeInfo.MakeConcreteForField(Field.CreateUnspecified(pgTypeId)); + if (!concreteTypeInfo.SupportsReading) + AdoSerializerHelpers.ThrowReadingNotSupported(IsObjectArrayRecord ? typeof(object) : null, options, pgTypeId, resolved: true); + var binding = concreteTypeInfo.BindField(DataFormat.Binary); + var scope = await reader.BeginNestedRead(async, length, binding.BufferRequirement, cancellationToken).ConfigureAwait(false); try { - result[i] = await converterInfo.Converter.ReadAsObject(async, reader, cancellationToken).ConfigureAwait(false); + result[i] = await concreteTypeInfo.Converter.ReadAsObject(async, reader, cancellationToken).ConfigureAwait(false); } finally { diff --git a/src/Npgsql/Internal/PgComposingTypeInfoProvider.cs b/src/Npgsql/Internal/PgComposingTypeInfoProvider.cs index d24b44ab3d..9a2d854872 100644 --- a/src/Npgsql/Internal/PgComposingTypeInfoProvider.cs +++ b/src/Npgsql/Internal/PgComposingTypeInfoProvider.cs @@ -13,6 +13,7 @@ abstract class PgComposingTypeInfoProvider : PgConcreteTypeInfoProvider protected PgComposingTypeInfoProvider(PgTypeId? pgTypeId, PgProviderTypeInfo effectiveTypeInfo) { + ArgumentNullException.ThrowIfNull(effectiveTypeInfo); if (pgTypeId is null && effectiveTypeInfo.PgTypeId is not null) throw new ArgumentNullException(nameof(pgTypeId), $"Cannot be null if {nameof(effectiveTypeInfo)}.{nameof(PgTypeInfo.PgTypeId)} is not null."); @@ -22,13 +23,13 @@ protected PgComposingTypeInfoProvider(PgTypeId? pgTypeId, PgProviderTypeInfo eff protected abstract PgTypeId GetEffectivePgTypeId(PgTypeId pgTypeId); protected abstract PgTypeId GetPgTypeId(PgTypeId effectivePgTypeId); - protected abstract PgConverter CreateConverter(PgConcreteTypeInfo effectiveConcreteTypeInfo); + protected abstract PgConverter CreateConverter(PgConcreteTypeInfo effectiveConcreteTypeInfo, out Type? requestedType); protected abstract PgConcreteTypeInfo? GetEffectiveTypeInfo(ProviderValueContext effectiveContext, T? value, ref object? writeState); protected override PgConcreteTypeInfo GetDefaultCore(PgTypeId? pgTypeId) { PgTypeId? effectiveTypeId = pgTypeId is { } id ? GetEffectiveTypeId(id) : null; - var concreteTypeInfo = EffectiveTypeInfo.GetDefaultConcreteTypeInfo(effectiveTypeId); + var concreteTypeInfo = EffectiveTypeInfo.GetDefault(effectiveTypeId); var composingPgTypeId = _pgTypeId ?? GetPgTypeId(concreteTypeInfo.PgTypeId); return GetOrAdd(concreteTypeInfo, composingPgTypeId); } @@ -45,7 +46,7 @@ protected override PgConcreteTypeInfo GetDefaultCore(PgTypeId? pgTypeId) protected override PgConcreteTypeInfo? GetForFieldCore(Field field) { - if (EffectiveTypeInfo.GetConcreteTypeInfo(field with { PgTypeId = GetEffectivePgTypeId(field.PgTypeId)}) is not { } concreteTypeInfo) + if (EffectiveTypeInfo.GetForField(field with { PgTypeId = GetEffectivePgTypeId(field.PgTypeId)}) is not { } concreteTypeInfo) return null; var composingPgTypeId = _pgTypeId ?? GetPgTypeId(concreteTypeInfo.PgTypeId); @@ -70,7 +71,14 @@ PgConcreteTypeInfo GetOrAdd(PgConcreteTypeInfo concreteTypeInfo, PgTypeId pgType return _concreteInfoCache.GetOrAdd( concreteTypeInfo, static (_, state) - => new(state.ConcreteTypeInfo.Options, state.Instance.CreateConverter(state.ConcreteTypeInfo), state.PgTypeId), + => new(state.ConcreteTypeInfo.Options, + state.Instance.CreateConverter(state.ConcreteTypeInfo, out var requestedType), + state.PgTypeId, + requestedType: requestedType) + { + SupportsReading = state.ConcreteTypeInfo.SupportsReading, + SupportsWriting = state.ConcreteTypeInfo.SupportsWriting + }, state); } } diff --git a/src/Npgsql/Internal/PgConverter.cs b/src/Npgsql/Internal/PgConverter.cs index 424f6ca16f..ddbc21a9a3 100644 --- a/src/Npgsql/Internal/PgConverter.cs +++ b/src/Npgsql/Internal/PgConverter.cs @@ -16,7 +16,10 @@ public abstract class PgConverter public bool IsDbNullable => DbNullPredicateKind is not DbNullPredicate.None; private protected PgConverter(Type type, bool isNullDefaultValue, bool customDbNullPredicate = false) - => DbNullPredicateKind = customDbNullPredicate ? DbNullPredicate.Custom : InferDbNullPredicate(type, isNullDefaultValue); + { + TypeToConvert = type; + DbNullPredicateKind = customDbNullPredicate ? DbNullPredicate.Custom : InferDbNullPredicate(type, isNullDefaultValue); + } /// /// Whether this converter can handle the given format and with which buffer requirements. @@ -27,7 +30,63 @@ private protected PgConverter(Type type, bool isNullDefaultValue, bool customDbN /// The buffer requirements should not cover database NULL reads or writes, these are handled by the caller. public abstract bool CanConvert(DataFormat format, out BufferRequirements bufferRequirements); - internal abstract Type TypeToConvert { get; } + internal Type TypeToConvert { get; } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + PgConverter UnsafeAs() + { + // Justification: avoid perf cost of casting to a known base class type per dispatch call. + Debug.Assert(typeof(T) == TypeToConvert); + Debug.Assert(this is PgConverter); + return Unsafe.As>(this); + } + + /// Reads a value from the reader as . + /// Dispatches to the typed converter when matches ; otherwise routes through the object-erased path. + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public T Read(PgReader reader) + => typeof(T) != TypeToConvert + ? (T)ReadAsObject(reader) + : UnsafeAs().Read(reader); + + /// Asynchronously reads a value from the reader as . + /// Dispatches to the typed converter when matches ; otherwise routes through the object-erased path. + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default) + { + if (typeof(T) != TypeToConvert) + { + var task = ReadAsObjectAsync(reader, cancellationToken); + return task.IsCompletedSuccessfully ? new((T)task.Result) : ReadAndUnboxAsync(task); + } + + return UnsafeAs().ReadAsync(reader, cancellationToken); + + [MethodImpl(MethodImplOptions.NoInlining)] + static async ValueTask ReadAndUnboxAsync(ValueTask task) + => (T)await task.ConfigureAwait(false); + } + + /// Writes a value to the writer. + /// Dispatches to the typed converter when matches ; otherwise routes through the object-erased path. + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public void Write(PgWriter writer, [DisallowNull] T value) + { + if (typeof(T) != TypeToConvert) + { + WriteAsObject(writer, value); + return; + } + UnsafeAs().Write(writer, value); + } + + /// Asynchronously writes a value to the writer. + /// Dispatches to the typed converter when matches ; otherwise routes through the object-erased path. + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public ValueTask WriteAsync(PgWriter writer, [DisallowNull] T value, CancellationToken cancellationToken = default) + => typeof(T) != TypeToConvert + ? WriteAsObjectAsync(writer, value, cancellationToken) + : UnsafeAs().WriteAsync(writer, value, cancellationToken); internal bool IsDbNullAsObject([NotNullWhen(false)] object? value, object? writeState) => DbNullPredicateKind switch @@ -40,16 +99,8 @@ internal bool IsDbNullAsObject([NotNullWhen(false)] object? value, object? write _ => ThrowDbNullPredicateOutOfRange() }; - [Obsolete("Use the overload without ref.")] - internal bool IsDbNullAsObject([NotNullWhen(false)] object? value, ref object? writeState) - => IsDbNullAsObject(value, writeState); - private protected abstract bool IsDbNullValueAsObject(object? value, object? writeState); - [Obsolete("Use the overload without ref.")] - private protected bool IsDbNullValueAsObject(object? value, ref object? writeState) - => IsDbNullValueAsObject(value, writeState); - internal abstract Size GetSizeAsObject(SizeContext context, object value, ref object? writeState); internal object ReadAsObject(PgReader reader) @@ -128,6 +179,7 @@ protected virtual bool IsDbNullValue(T? value, object? writeState) private protected override bool IsDbNullValueAsObject(object? value, object? writeState) => (default(T) is null || value is not null) && IsDbNullValue((T?)value, writeState); + /// Checks whether is considered a database null by this converter. public bool IsDbNull([NotNullWhen(false)] T? value, object? writeState) => DbNullPredicateKind switch { @@ -144,22 +196,26 @@ public bool IsDbNull([NotNullWhen(false)] T? value, object? writeState) public bool IsDbNull([NotNullWhen(false)] T? value, ref object? writeState) => IsDbNull(value, writeState); + /// Reads a value from the reader. public abstract T Read(PgReader reader); + /// Asynchronously reads a value from the reader. public abstract ValueTask ReadAsync(PgReader reader, CancellationToken cancellationToken = default); + /// Computes the serialized size for , producing any required . public abstract Size GetSize(SizeContext context, [DisallowNull]T value, ref object? writeState); + + /// Writes a value to the writer. public abstract void Write(PgWriter writer, [DisallowNull] T value); + /// Asynchronously writes a value to the writer. public abstract ValueTask WriteAsync(PgWriter writer, [DisallowNull] T value, CancellationToken cancellationToken = default); - internal sealed override Type TypeToConvert => typeof(T); - internal sealed override Size GetSizeAsObject(SizeContext context, object value, ref object? writeState) => GetSize(context, (T)value, ref writeState); } static class PgConverterExtensions { - public static Size? GetSizeOrDbNull(this PgConverter converter, DataFormat format, Size writeRequirement, T? value, ref object? writeState) + public static Size? IsDbNullOrGetSize(this PgConverter converter, DataFormat format, Size writeRequirement, T? value, ref object? writeState) { if (converter.IsDbNull(value, writeState)) return null; @@ -182,7 +238,7 @@ static class PgConverterExtensions return size; } - public static Size? GetSizeOrDbNullAsObject(this PgConverter converter, DataFormat format, Size writeRequirement, object? value, ref object? writeState) + public static Size? IsDbNullOrGetSizeAsObject(this PgConverter converter, DataFormat format, Size writeRequirement, object? value, ref object? writeState) { if (converter.IsDbNullAsObject(value, writeState)) return null; @@ -204,13 +260,6 @@ static class PgConverterExtensions return size; } - - internal static PgConverter UnsafeDowncast(this PgConverter converter) - { - // Justification: avoid perf cost of casting to a known base class type per read/write, see callers. - Debug.Assert(converter is PgConverter); - return Unsafe.As>(converter); - } } [method: SetsRequiredMembers] diff --git a/src/Npgsql/Internal/PgReader.cs b/src/Npgsql/Internal/PgReader.cs index d9686cb5bb..39b99b921a 100644 --- a/src/Npgsql/Internal/PgReader.cs +++ b/src/Npgsql/Internal/PgReader.cs @@ -477,7 +477,7 @@ internal void EndCharsRead() _charsReadBuffer = null; } - internal void Init(int fieldSize, DataFormat fieldFormat, bool resumable = false) + internal void Init(DataFormat fieldFormat, int fieldSize, bool resumable = false) { if (Initialized) ThrowHelper.ThrowInvalidOperationException("Already initialized"); @@ -485,15 +485,15 @@ internal void Init(int fieldSize, DataFormat fieldFormat, bool resumable = false _fieldStartPos = _buffer.CumulativeReadPosition; _fieldEndPos = _fieldStartPos + fieldSize; _fieldSize = fieldSize; - _fieldFormat = fieldFormat; _resumable = resumable; + _fieldFormat = fieldFormat; } - internal void StartRead(Size bufferRequirement) + internal void StartRead(PgFieldBinding binding) { Debug.Assert(FieldSize >= 0); - _fieldBufferRequirement = bufferRequirement; - var byteCount = BufferRequirements.GetMinimumBufferByteCount(bufferRequirement, FieldSize); + var byteCount = BufferRequirements.GetMinimumBufferByteCount(binding.BufferRequirement, FieldSize); + _fieldBufferRequirement = binding.BufferRequirement; if (ShouldBuffer(byteCount)) BufferNoInlined(byteCount); @@ -502,11 +502,11 @@ void BufferNoInlined(int byteCount) => Buffer(byteCount); } - internal ValueTask StartReadAsync(Size bufferRequirement, CancellationToken cancellationToken) + internal ValueTask StartReadAsync(PgFieldBinding binding, CancellationToken cancellationToken) { Debug.Assert(FieldSize >= 0); - _fieldBufferRequirement = bufferRequirement; - var byteCount = BufferRequirements.GetMinimumBufferByteCount(bufferRequirement, FieldSize); + var byteCount = BufferRequirements.GetMinimumBufferByteCount(binding.BufferRequirement, FieldSize); + _fieldBufferRequirement = binding.BufferRequirement; return ShouldBuffer(byteCount) ? BufferAsync(byteCount, cancellationToken) : new(); } @@ -668,6 +668,7 @@ void ResetCurrent() _currentSize = UninitializedSentinel; } + [MethodImpl(MethodImplOptions.AggressiveInlining)] internal int Restart(bool resumable) { if (!Initialized) diff --git a/src/Npgsql/Internal/PgSerializerOptions.cs b/src/Npgsql/Internal/PgSerializerOptions.cs index daa69c3993..da6a5722b9 100644 --- a/src/Npgsql/Internal/PgSerializerOptions.cs +++ b/src/Npgsql/Internal/PgSerializerOptions.cs @@ -29,7 +29,7 @@ internal PgSerializerOptions(NpgsqlDatabaseInfo databaseInfo, PgTypeInfoResolver _resolverChain = resolverChain ?? new(); _timeZoneProvider = timeZoneProvider; DatabaseInfo = databaseInfo; - UnspecifiedDBNullTypeInfo = new(this, new Converters.Internal.VoidConverter(), DataTypeName.Unspecified, unboxedType: typeof(DBNull)); + UnspecifiedDBNullTypeInfo = new(this, new Converters.Internal.VoidConverter(), DataTypeName.Unspecified, requestedType: typeof(DBNull)); } internal PgConcreteTypeInfo UnspecifiedDBNullTypeInfo { get; } diff --git a/src/Npgsql/Internal/PgTypeInfo.cs b/src/Npgsql/Internal/PgTypeInfo.cs index 73a18aa31f..97417eb852 100644 --- a/src/Npgsql/Internal/PgTypeInfo.cs +++ b/src/Npgsql/Internal/PgTypeInfo.cs @@ -1,273 +1,209 @@ using System; using System.Diagnostics; using System.Diagnostics.CodeAnalysis; +using System.Runtime.CompilerServices; +using System.Threading; +using System.Threading.Tasks; using Npgsql.Internal.Postgres; +using Npgsql.Util; namespace Npgsql.Internal; [Experimental(NpgsqlDiagnostics.ConvertersExperimental)] public abstract class PgTypeInfo { - readonly bool _canBinaryConvert; - readonly BufferRequirements _binaryBufferRequirements; - - readonly bool _canTextConvert; - readonly BufferRequirements _textBufferRequirements; - - PgTypeInfo(PgSerializerOptions options, Type type, Type? unboxedType) + PgTypeInfo(PgSerializerOptions options, Type type, Type? requestedType) { - if (unboxedType is not null && !type.IsAssignableFrom(unboxedType)) - throw new ArgumentException("A value of unboxed type is not assignable to converter type", nameof(unboxedType)); - Options = options; - IsBoxing = unboxedType is not null; - Type = unboxedType ?? type; - SupportsReading = GetDefaultSupportsReading(type, unboxedType); - SupportsWriting = true; - } - private protected PgTypeInfo(PgSerializerOptions options, PgConverter converter, PgTypeId pgTypeId, Type? unboxedType = null) - : this(options, converter.TypeToConvert, unboxedType) - { - Converter = converter; - PgTypeId = options.GetCanonicalTypeId(pgTypeId); - _canBinaryConvert = converter.CanConvert(DataFormat.Binary, out _binaryBufferRequirements); - _canTextConvert = converter.CanConvert(DataFormat.Text, out _textBufferRequirements); + HasExactType = requestedType is null || requestedType == type; + Type = requestedType is null ? type : GetReportedType(type, requestedType) ?? type; } - private protected PgTypeInfo(PgSerializerOptions options, Type type, PgConcreteTypeInfo? defaultConcrete, Type? unboxedType = null) - : this(options, type, unboxedType) - { - if (defaultConcrete is not null) - { - Debug.Assert(options.PortableTypeIds && defaultConcrete.PgTypeId.IsDataTypeName || !options.PortableTypeIds && defaultConcrete.PgTypeId.IsOid); - PgTypeId = defaultConcrete.PgTypeId; - Converter = defaultConcrete.Converter; - _canBinaryConvert = defaultConcrete.Converter.CanConvert(DataFormat.Binary, out _binaryBufferRequirements); - _canTextConvert = defaultConcrete.Converter.CanConvert(DataFormat.Text, out _textBufferRequirements); - } - } + private protected PgTypeInfo(PgSerializerOptions options, Type type, PgTypeId? pgTypeId, Type? requestedType = null) + : this(options, type, requestedType) + => PgTypeId = pgTypeId is { } id ? options.GetCanonicalTypeId(id) : null; - bool HasCachedInfo(PgConverter converter) => ReferenceEquals(Converter, converter); + private protected PgTypeInfo(PgSerializerOptions options, PgConverter converter, PgTypeId pgTypeId, Type? requestedType = null) + : this(options, converter.TypeToConvert, pgTypeId, requestedType) {} public Type Type { get; } public PgSerializerOptions Options { get; } - public bool SupportsReading { get; init; } - public bool SupportsWriting { get; init; } - public DataFormat? PreferredFormat { get; init; } - - // Doubles as the storage for the converter coming from a default provider result (used to confirm whether we can use cached info). - protected PgConverter? Converter { get; } - - // TODO pull validate from options + internal exempt for perf? - internal bool ValidateProviderResults => true; - - // Used for internal converters to save on binary bloat. - internal bool IsBoxing { get; } + // True when the reported type matches the converter's type exactly (no reported type given at construction, or + // the given reported type equals the converter type). When false, the reported type is a widening of the converter + // type (e.g. Array/Stream base-type reporting, enum-underlying widening) and the caller must dispatch through the + // info — the info routes reference-variance cases through the object APIs and layout-identity cases (enum) through + // the typed path with Unsafe.As, as appropriate for the widening kind. + // Having a single converter cover multiple reported types (Arrays, Streams) reduces the number of generic + // instantiations that need to be compiled for AOT. + internal bool HasExactType { get; } public PgTypeId? PgTypeId { get; } - // Having it here so we can easily extend any behavior. - internal void DisposeWriteState(object writeState) + /// + /// Makes a for the given field. + /// + /// The field whose metadata drives the concrete type info selection. + /// The to use for the field. + /// + /// When this instance is already concrete it is returned directly; otherwise the underlying provider is consulted + /// using the field's metadata (e.g. ) to select the appropriate concrete type info. + /// + public PgConcreteTypeInfo MakeConcreteForField(Field field) { - if (writeState is IDisposable disposable) - disposable.Dispose(); + if (this is PgConcreteTypeInfo concrete) + return concrete; + + // Decided providers skip GetDefault's validation. The prior GetForField call already validated + // the id. Undecided providers thread it so GetDefaultCore can dispatch on it. + var providerTypeInfo = (PgProviderTypeInfo)this; + return providerTypeInfo.GetForField(field) + ?? providerTypeInfo.GetDefault(providerTypeInfo.PgTypeId is null ? field.PgTypeId : null); } - public PgConcreteTypeInfo GetConcreteTypeInfo(T? value, out object? writeState) + /// + /// Makes a for the given value. + /// + /// The value whose content drive the concrete type info selection. + /// Contains any write state that was produced. + /// The CLR type of the value. + /// The to use for the value. + /// + /// When this instance is already concrete it is returned directly; otherwise the underlying provider is consulted + /// using the value to select the appropriate concrete type info. + /// + public PgConcreteTypeInfo MakeConcreteForValue(T? value, out object? writeState) + => MakeConcreteForValue(default, value, out writeState); + + /// + /// Makes a for the given value, with an explicit provider context. + /// + /// The context used when this instance is a provider based info. + /// The value whose content drives the concrete type info selection. + /// Contains any write state that was produced. + /// The CLR type of the value. + /// + /// When this instance is already concrete it is returned directly; otherwise the underlying provider is consulted + /// using the value and the supplied context to select the appropriate concrete type info. + /// + public PgConcreteTypeInfo MakeConcreteForValue(ProviderValueContext context, T? value, out object? writeState) { - if (this is not PgProviderTypeInfo providerTypeInfo) + if (this is PgConcreteTypeInfo concrete) { writeState = null; - return (PgConcreteTypeInfo)this; + return concrete; } - return providerTypeInfo.GetConcreteTypeInfo(default, value, out writeState) ?? providerTypeInfo.GetDefaultConcreteTypeInfo(null); + // Make sure we handle the non-exact typed provider case. + // This will never cause boxing as non-exact typed infos only happen for subtype relationships, i.e. reference types. + // We make sure to fall through to GetForValue which has a better error if T is not at all related to this info. + var providerTypeInfo = (PgProviderTypeInfo)this; + var concreteTypeInfo = PgProviderTypeInfo.GetProvider(providerTypeInfo) is not PgConcreteTypeInfoProvider && providerTypeInfo.Type == typeof(T) + ? providerTypeInfo.GetForValueAsObject(context, (object?)value, out writeState) + : providerTypeInfo.GetForValue(context, value, out writeState); + + // Decided providers skip GetDefault's validation. The prior GetForValue call already validated + // the id. Undecided providers thread it so GetDefaultCore can dispatch on it. + return concreteTypeInfo ?? providerTypeInfo.GetDefault(providerTypeInfo.PgTypeId is null ? context.ExpectedPgTypeId : null); } - // Note: this api is not called GetConcreteTypeInfoAsObject as the semantics are extended, DBNull is a NULL value for all object values. - public PgConcreteTypeInfo GetObjectConcreteTypeInfo(object? value, out object? writeState) + /// + /// Makes a for the given object value. + /// + /// The untyped value whose content drives the concrete type info selection. + /// Contains any write state that was produced. + /// The to use for the value. + /// + /// When this instance is already concrete it is returned directly; otherwise the underlying provider is consulted + /// using the value to select the appropriate concrete type info. + /// + public PgConcreteTypeInfo MakeConcreteForValueAsObject(object? value, out object? writeState) + => MakeConcreteForValueAsObject(default, value, out writeState); + + /// + /// Makes a for the given object value. + /// + /// The context used when this instance is a provider based info. + /// The untyped value whose content drives the concrete type info selection. + /// Contains any write state that was produced. + /// The to use for the value. + /// + /// When this instance is already concrete it is returned directly; otherwise the underlying provider is consulted + /// using the value to select the appropriate concrete type info. + /// + public PgConcreteTypeInfo MakeConcreteForValueAsObject(ProviderValueContext context, object? value, out object? writeState) { - writeState = null; - switch (this) + if (this is PgConcreteTypeInfo concrete) { - case PgConcreteTypeInfo v: - return v; - case PgProviderTypeInfo providerTypeInfo: - PgConcreteTypeInfo? concreteTypeInfo = null; - if (value is not DBNull) - concreteTypeInfo = providerTypeInfo.GetAsObjectConcreteTypeInfo(default, value, out writeState); - return concreteTypeInfo ?? providerTypeInfo.GetDefaultConcreteTypeInfo(null); - default: - return ThrowNotSupported(); - } - - static PgConcreteTypeInfo ThrowNotSupported() - => throw new NotSupportedException("Should not happen, please file a bug."); - } - - bool CanConvert(PgConverter converter, DataFormat format, out BufferRequirements bufferRequirements) - { - if (HasCachedInfo(converter)) - { - switch (format) - { - case DataFormat.Binary: - bufferRequirements = _binaryBufferRequirements; - return _canBinaryConvert; - case DataFormat.Text: - bufferRequirements = _textBufferRequirements; - return _canTextConvert; - } + writeState = null; + return concrete; } - return converter.CanConvert(format, out bufferRequirements); + // Decided providers skip GetDefault's validation. The prior GetForValueAsObject call already validated + // the id. Undecided providers thread it so GetDefaultCore can dispatch on it. + var providerTypeInfo = (PgProviderTypeInfo)this; + return providerTypeInfo.GetForValueAsObject(context, value, out writeState) + ?? providerTypeInfo.GetDefault(providerTypeInfo.PgTypeId is null ? context.ExpectedPgTypeId : null); } - public BufferRequirements? GetBufferRequirements(PgConverter converter, DataFormat format) - { - var success = CanConvert(converter, format, out var bufferRequirements); - return success ? bufferRequirements : null; - } - - // TryBind for reading. - internal bool TryBind(Field field, DataFormat format, out PgConverterInfo info) + // Having it here so we can easily extend any behavior. + internal void DisposeWriteState(object writeState) { - switch (this) - { - case PgConcreteTypeInfo v: - if (!CanConvert(v.Converter, format, out var bufferRequirements)) - { - info = default; - return false; - } - info = new(this, v.Converter, bufferRequirements.Read); - return true; - case PgProviderTypeInfo providerTypeInfo: - var concreteTypeInfo = providerTypeInfo.GetConcreteTypeInfo(field) ?? providerTypeInfo.GetDefaultConcreteTypeInfo(field.PgTypeId); - if (!CanConvert(concreteTypeInfo.Converter, format, out bufferRequirements)) - { - info = default; - return false; - } - info = new(this, concreteTypeInfo.Converter, bufferRequirements.Read); - return true; - default: - throw new NotSupportedException("Should not happen, please file a bug."); - } + if (writeState is IDisposable disposable) + disposable.Dispose(); } - // Bind for reading. - internal PgConverterInfo Bind(Field field, DataFormat format) + /// + /// Returns when it is a strict subtype of , otherwise null. + /// Throws when the two are not in a subtype relationship. + /// + protected static Type? GetReportedType(Type converterType, Type requestedType) { - if (!TryBind(field, format, out var info)) - ThrowHelper.ThrowInvalidOperationException($"Resolved converter does not support {format} format."); + if (!requestedType.IsInSubtypeRelationshipWith(converterType)) + throw new ArgumentException($"The requested type {requestedType} is not in a subtype relationship with the converter's type {converterType}.", nameof(requestedType)); - return info; + return requestedType != converterType && requestedType.IsAssignableTo(converterType) ? requestedType : null; } +} - // Bind for writing. - /// When result is null, the value was interpreted to be a SQL NULL. - internal PgConverterInfo? Bind(PgConverter converter, T? value, out Size size, ref object? writeState, out DataFormat format, DataFormat? formatPreference = null) - { - // Basically exists to catch cases like object[] resolving a polymorphic read converter, better to fail during binding than writing. - if (!SupportsWriting) - ThrowHelper.ThrowNotSupportedException($"Writing {Type} is not supported for this type info."); - - format = ResolveFormat(converter, out var bufferRequirements, formatPreference ?? PreferredFormat); - - if (converter.GetSizeOrDbNull(format, bufferRequirements.Write, value, ref writeState) is not { } sizeOrDbNull) - { - size = default; - return null; - } +public sealed class PgProviderTypeInfo : PgTypeInfo +{ + readonly PgConcreteTypeInfoProvider _typeInfoProvider; + readonly PgConcreteTypeInfo? _defaultConcrete; - size = sizeOrDbNull; - return new(this, converter, bufferRequirements.Write); - } + public PgProviderTypeInfo(PgSerializerOptions options, PgConcreteTypeInfoProvider typeInfoProvider, PgTypeId? pgTypeId) + : this(options, typeInfoProvider, pgTypeId, requestedType: null) + {} - // Bind for writing. - // Note: this api is not called BindAsObject as the semantics are extended, DBNull is a NULL value for all object values. - /// When result is null or DBNull, the value was interpreted to be a SQL NULL. - internal PgConverterInfo? BindObject(PgConverter converter, object? value, ref Size size, ref object? writeState, out DataFormat format, DataFormat? formatPreference = null) + internal PgProviderTypeInfo(PgSerializerOptions options, PgConcreteTypeInfoProvider typeInfoProvider, PgTypeId? pgTypeId, Type? requestedType) + : base(options, typeInfoProvider.TypeToConvert, pgTypeId, requestedType) { - // Basically exists to catch cases like object[] resolving a polymorphic read converter, better to fail during binding than writing. - if (!SupportsWriting) - throw new NotSupportedException($"Writing {Type} is not supported for this type info."); - - format = ResolveFormat(converter, out var bufferRequirements, formatPreference ?? PreferredFormat); - - // Given SQL values are effectively a union of T | NULL we support DBNull.Value to signify a NULL value for all types except DBNull in this api. - if (value is DBNull && Type != typeof(DBNull) || converter.GetSizeOrDbNullAsObject(format, bufferRequirements.Write, value, ref writeState) is not { } sizeOrDbNull) - { - size = default; - return null; - } + _typeInfoProvider = typeInfoProvider; - size = sizeOrDbNull; - return new(this, converter, bufferRequirements.Write); + // Always validate the default provider result, the info will be re-used so there is no real downside. + var result = typeInfoProvider.GetDefault(pgTypeId is { } id ? options.GetCanonicalTypeId(id) : null); + ValidateResult(nameof(PgConcreteTypeInfoProvider.GetDefault), result, typeInfoProvider.TypeToConvert, options.PortableTypeIds); + _defaultConcrete = result; } - DataFormat ResolveFormat(PgConverter converter, out BufferRequirements bufferRequirements, DataFormat? formatPreference = null) + public PgConcreteTypeInfo GetDefault(PgTypeId? pgTypeId) { - // First try to check for preferred support. - switch (formatPreference) + if (pgTypeId is { } id && PgTypeId is { } decidedId) { - case DataFormat.Binary when CanConvert(converter, DataFormat.Binary, out bufferRequirements): - return DataFormat.Binary; - case DataFormat.Text when CanConvert(converter, DataFormat.Text, out bufferRequirements): - return DataFormat.Text; - default: - // The common case, no preference given (or no match) means we default to binary if supported. - if (CanConvert(converter, DataFormat.Binary, out bufferRequirements)) - return DataFormat.Binary; - if (CanConvert(converter, DataFormat.Text, out bufferRequirements)) - return DataFormat.Text; + if (id != decidedId) + ThrowUnexpectedPgTypeId(nameof(pgTypeId)); - ThrowHelper.ThrowInvalidOperationException("Converter doesn't support any data format."); - bufferRequirements = default; - return default; + Debug.Assert(_defaultConcrete is not null); + return _defaultConcrete; } - } - - // We assume a boxing type info does not support reading as the converter won't be able to produce the derived type statically. - // Cases like Array converters unboxing to int[], int[,] etc. are the exception and the reason why SupportsReading is a settable property. - internal static bool GetDefaultSupportsReading(Type type, Type? unboxedType) - => unboxedType is null || unboxedType == type; -} - -public sealed class PgProviderTypeInfo( - PgSerializerOptions options, - PgConcreteTypeInfoProvider typeInfoProvider, - PgTypeId? pgTypeId, - Type? unboxedType = null) - : PgTypeInfo(options, - typeInfoProvider.TypeToConvert, - pgTypeId is { } typeId ? GetDefault(options, typeInfoProvider, typeId) : null, - unboxedType) -{ - readonly PgConcreteTypeInfoProvider _typeInfoProvider = typeInfoProvider; - - // We'll always validate the default provider result, the info will be re-used so there is no real downside. - static PgConcreteTypeInfo GetDefault(PgSerializerOptions options, PgConcreteTypeInfoProvider concreteTypeInfoProvider, PgTypeId typeId) - { - var result = concreteTypeInfoProvider.GetDefault(options.GetCanonicalTypeId(typeId)); - ValidateResult(nameof(GetDefault), result, concreteTypeInfoProvider.TypeToConvert, options.PortableTypeIds); - return result; - } - - public PgConcreteTypeInfo GetDefaultConcreteTypeInfo(PgTypeId? pgTypeId) - { - if (pgTypeId is { } id && PgTypeId is { } decidedId && id != decidedId) - ThrowUnexpectedPgTypeId(nameof(pgTypeId)); var result = _typeInfoProvider.GetDefault(pgTypeId ?? PgTypeId); ValidateResult(nameof(PgConcreteTypeInfoProvider.GetDefault), result); return result; } - public PgConcreteTypeInfo? GetConcreteTypeInfo(Field field) + public PgConcreteTypeInfo? GetForField(Field field) { if (PgTypeId is { } decidedId && field.PgTypeId != decidedId) ThrowUnexpectedPgTypeId(nameof(field)); @@ -278,7 +214,7 @@ public PgConcreteTypeInfo GetDefaultConcreteTypeInfo(PgTypeId? pgTypeId) return result; } - public PgConcreteTypeInfo? GetConcreteTypeInfo(ProviderValueContext context, T? value, out object? writeState) + public PgConcreteTypeInfo? GetForValue(ProviderValueContext context, T? value, out object? writeState) { if (PgTypeId is { } pgTypeId) { @@ -300,12 +236,12 @@ public PgConcreteTypeInfo GetDefaultConcreteTypeInfo(PgTypeId? pgTypeId) return result; PgConcreteTypeInfo ThrowNotSupportedType(Type? type) - => throw new NotSupportedException(IsBoxing - ? $"TypeInfo only supports boxing conversions, call {nameof(GetAsObjectConcreteTypeInfo)} or {nameof(GetObjectConcreteTypeInfo)} instead." - : $"TypeInfo is not of type {type}"); + => throw new NotSupportedException(type == Type + ? $"PgProviderTypeInfo does not exactly match type {type}, call {nameof(GetForValueAsObject)} instead." + : $"PgProviderTypeInfo is incompatible with type {type}"); } - public PgConcreteTypeInfo? GetAsObjectConcreteTypeInfo(ProviderValueContext context, object? value, out object? writeState) + public PgConcreteTypeInfo? GetForValueAsObject(ProviderValueContext context, object? value, out object? writeState) { if (PgTypeId is { } pgTypeId) { @@ -342,40 +278,192 @@ static void ValidateResult(string methodName, PgConcreteTypeInfo result, Type ex } } -public sealed class PgConcreteTypeInfo(PgSerializerOptions options, PgConverter converter, PgTypeId pgTypeId, Type? unboxedType = null) - : PgTypeInfo(options, converter, pgTypeId, unboxedType) +public sealed class PgConcreteTypeInfo : PgTypeInfo { - public new PgConverter Converter => base.Converter!; - public new PgTypeId PgTypeId => base.PgTypeId.GetValueOrDefault(); -} + readonly bool _canBinaryConvert; + readonly BufferRequirements _binaryBufferRequirements; -readonly struct PgConverterInfo -{ - readonly PgTypeInfo _typeInfo; + readonly bool _canTextConvert; + readonly BufferRequirements _textBufferRequirements; + + public PgConcreteTypeInfo(PgSerializerOptions options, PgConverter converter, PgTypeId pgTypeId) + : this(options, converter, pgTypeId, requestedType: null) + {} - public PgConverterInfo(PgTypeInfo pgTypeInfo, PgConverter converter, Size bufferRequirement) + internal PgConcreteTypeInfo(PgSerializerOptions options, PgConverter converter, PgTypeId pgTypeId, Type? requestedType) + : base(options, converter, pgTypeId, requestedType) { - _typeInfo = pgTypeInfo; Converter = converter; - BufferRequirement = bufferRequirement; + _canBinaryConvert = converter.CanConvert(DataFormat.Binary, out _binaryBufferRequirements); + _canTextConvert = converter.CanConvert(DataFormat.Text, out _textBufferRequirements); - // Object typed providers can return any type of converter, so we check the type of the converter instead. - // We cannot do this in general as we should respect the 'unboxed type' of infos, which can differ from the converter type. - if (pgTypeInfo is PgProviderTypeInfo && pgTypeInfo.Type == typeof(object)) - TypeToConvert = Converter.TypeToConvert; - else - TypeToConvert = pgTypeInfo.Type; + SupportsReading = GetDefaultSupportsReading(converter.TypeToConvert, requestedType); + SupportsWriting = true; } - public bool IsDefault => _typeInfo is null; + Type TypeToConvert + { + [MethodImpl(MethodImplOptions.AggressiveInlining)] + get => Converter.TypeToConvert; + } - public Type TypeToConvert { get; } + public PgConverter Converter { get; } - public PgTypeInfo TypeInfo => _typeInfo; + public bool SupportsReading { get; init; } + public bool SupportsWriting { get; init; } - public PgConverter Converter { get; } + // We assume a non-exact typed info does not support reading as the converter won't be able to produce the derived type statically. + // Cases like Array converters reading int[], int[,] etc. are the exception and the reason why SupportsReading is a settable property. + internal static bool GetDefaultSupportsReading(Type type, Type? requestedType) + => requestedType is null || GetReportedType(type, requestedType) is not { } reportedType || reportedType == type; + + public DataFormat? PreferredFormat { get; init; } + public new PgTypeId PgTypeId => base.PgTypeId.GetValueOrDefault(); + + internal bool CanReadTo(Type type) => Type == type || (!HasExactType && Type.IsAssignableTo(type)); + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + internal T ReadFieldValue(PgReader reader, in PgFieldBinding binding) + { + reader.StartRead(binding); + var result = Converter.Read(reader); + reader.EndRead(); + return result; + } + + internal async ValueTask ReadFieldValueAsync(PgReader reader, PgFieldBinding binding, CancellationToken cancellationToken) + { + await reader.StartReadAsync(binding, cancellationToken).ConfigureAwait(false); + + // Inline copy of Converter.ReadAsync to keep everything in one async frame. + var result = typeof(T) != TypeToConvert + ? (T)await Converter.ReadAsObjectAsync(reader, cancellationToken).ConfigureAwait(false) + : await Unsafe.As>(Converter).ReadAsync(reader, cancellationToken).ConfigureAwait(false); + + await reader.EndReadAsync().ConfigureAwait(false); + return result; + } + + // TryBind for reading. + internal bool TryBindField(DataFormat format, out PgFieldBinding binding) + { + if (!Converter.CanConvert(format, out var bufferRequirements)) + { + binding = default; + return false; + } + binding = new(format, bufferRequirements.Read); + return true; + } + + // Bind for reading. + internal PgFieldBinding BindField(DataFormat format) + { + if (!TryBindField(format, out var info)) + ThrowHelper.ThrowInvalidOperationException($"Converter does not support {format} format."); + + return info; + } + + // Bind for writing. + /// When result is null, the value was interpreted to be a SQL NULL. + internal PgValueBinding BindParameterValue(T? value, object? writeState, DataFormat? formatPreference = null) + { + if (typeof(T) != TypeToConvert) + return BindParameterObjectValue(value, writeState, formatPreference); + + // Basically exists to catch cases like object[] resolving a polymorphic read converter, better to fail during binding than writing. + if (!SupportsWriting) + ThrowHelper.ThrowNotSupportedException($"Writing {Type} is not supported for this type info."); + + var format = ResolveFormat(out var bufferRequirements, formatPreference ?? PreferredFormat); + + Debug.Assert(Converter is PgConverter); + if (Unsafe.As>(Converter).IsDbNullOrGetSize(format, bufferRequirements.Write, value, ref writeState) is not { } size) + return new(format, bufferRequirements.Write, null, null); + + return new(format, bufferRequirements.Write, size, writeState); + } + + // Bind for writing. + // Note: this api is not called BindAsObject as the semantics are extended, DBNull is a NULL value for all object values. + /// When result is null or DBNull, the value was interpreted to be a SQL NULL. + internal PgValueBinding BindParameterObjectValue(object? value, object? writeState, DataFormat? formatPreference = null) + { + // Basically exists to catch cases like object[] resolving a polymorphic read converter, better to fail during binding than writing. + if (!SupportsWriting) + ThrowHelper.ThrowNotSupportedException($"Writing {Type} is not supported for this type info."); + + var format = ResolveFormat(out var bufferRequirements, formatPreference ?? PreferredFormat); + + // Given SQL values are effectively a union of T | NULL we support DBNull.Value to signify a NULL value for all types except DBNull in this api. + if (value is DBNull && Type != typeof(DBNull) || Converter.IsDbNullOrGetSizeAsObject(format, bufferRequirements.Write, value, ref writeState) is not { } size) + { + return new(format, bufferRequirements.Write, null, null); + } + + return new(format, bufferRequirements.Write, size, writeState); + } + + DataFormat ResolveFormat(out BufferRequirements bufferRequirements, DataFormat? formatPreference = null) + { + // First try to check for preferred support. + switch (formatPreference) + { + case DataFormat.Binary when _canBinaryConvert: + bufferRequirements = _binaryBufferRequirements; + return DataFormat.Binary; + case DataFormat.Text when _canTextConvert: + bufferRequirements = _textBufferRequirements; + return DataFormat.Text; + default: + // The common case, no preference given (or no match) means we default to binary if supported. + if (_canBinaryConvert) + { + bufferRequirements = _binaryBufferRequirements; + return DataFormat.Binary; + } + + if (Converter.CanConvert(DataFormat.Text, out bufferRequirements)) + { + bufferRequirements = _textBufferRequirements; + return DataFormat.Text; + } + + ThrowHelper.ThrowInvalidOperationException("Converter doesn't support any data format."); + bufferRequirements = default; + return default; + } + } +} + +readonly struct PgFieldBinding +{ + internal PgFieldBinding(DataFormat dataFormat, Size bufferRequirement) + { + DataFormat = dataFormat; + BufferRequirement = bufferRequirement; + } + + public DataFormat DataFormat { get; } public Size BufferRequirement { get; } +} + +readonly struct PgValueBinding +{ + public DataFormat DataFormat { get; } + public Size BufferRequirement { get; } + public Size? Size { get; } + public object? WriteState { get; } + + internal PgValueBinding(DataFormat dataFormat, Size bufferRequirement, Size? size, object? writeState) + { + DataFormat = dataFormat; + BufferRequirement = bufferRequirement; + Size = size; + WriteState = writeState; + } - /// Whether Converter.TypeToConvert matches PgTypeInfo.Type, if it doesn't object apis should be used. - public bool IsBoxingConverter => _typeInfo.IsBoxing; + [MemberNotNullWhen(false, nameof(Size))] + public bool IsDbNullBinding => Size is null; } diff --git a/src/Npgsql/Internal/PgWriter.cs b/src/Npgsql/Internal/PgWriter.cs index 572197dbd4..fde304f547 100644 --- a/src/Npgsql/Internal/PgWriter.cs +++ b/src/Npgsql/Internal/PgWriter.cs @@ -171,37 +171,49 @@ void Slow(int count) void Advance(int count) => _pos += count; - internal void Commit(int? expectedByteCount = null) + void Commit() { - _totalBytesWritten += _pos - _offset; - _writer.Advance(_pos - _offset); + var written = _pos - _offset; + _totalBytesWritten += written; + _writer.Advance(written); _offset = _pos; - - if (expectedByteCount is not null) - { - var totalBytesWritten = _totalBytesWritten; - _totalBytesWritten = 0; - if (totalBytesWritten != expectedByteCount) - ThrowHelper.ThrowInvalidOperationException($"Bytes written ({totalBytesWritten}) and expected byte count ({expectedByteCount}) don't match."); - } } - internal ValueTask BeginWrite(bool async, ValueMetadata current, CancellationToken cancellationToken) + internal void CommitAndResetTotal(int expectedByteCount) { - _current = current; + Commit(); - var bufferRequirementByteCount = BufferRequirements.GetMinimumBufferByteCount(current.BufferRequirement, current.Size.GetValueOrDefault()); - if (ShouldFlush(bufferRequirementByteCount)) - return Flush(async, cancellationToken); + var totalBytesWritten = _totalBytesWritten; + _totalBytesWritten = 0; + if (totalBytesWritten != expectedByteCount) + ThrowHelper.ThrowInvalidOperationException($"Bytes written ({totalBytesWritten}) and expected byte count ({expectedByteCount}) don't match."); + } - return new(); + internal ValueTask StartWrite(bool async, in PgValueBinding binding, CancellationToken cancellationToken) + { + if (binding.IsDbNullBinding) + ThrowHelper.ThrowArgumentException("Binding context cannot be for a DbNull.", nameof(binding)); + + var bufferRequirement = binding.BufferRequirement; + var size = binding.Size.GetValueOrDefault(); + _current = new ValueMetadata + { + Format = binding.DataFormat, + BufferRequirement = bufferRequirement, + Size = size, + // WriteState is generally null, checking for null and showing the null literal to the JIT allows us to skip the write barrier if so. + WriteState = binding.WriteState is null ? null : binding.WriteState + }; + + return ShouldFlush(BufferRequirements.GetMinimumBufferByteCount(bufferRequirement, size.GetValueOrDefault())) + ? Flush(async, cancellationToken) + : new(); } - public ValueMetadata Current => _current; - internal Size CurrentBufferRequirement => _current.BufferRequirement; + internal void EndWrite(Size expectedByteCount) + => CommitAndResetTotal(expectedByteCount.GetValueOrDefault()); - // When we don't know the size during writing we're using the writer buffer as a sizing mechanism. - internal bool BufferingWrite => Current.Size.Kind is SizeKind.Unknown; + public ValueMetadata Current => _current; // This method lives here to remove the chances oids will be cached on converters inadvertently when data type names should be used. // Such a mapping (for instance for array element oids) should be done per operation to ensure it is done in the context of a specific backend. @@ -467,10 +479,6 @@ async ValueTask Core(bool allowMixedIO, ReadOnlyMemory buffer, Cancellatio public Stream GetStream(bool allowMixedIO = false) => new PgWriterStream(this, allowMixedIO); - // We also check pos != offset to speed up simple value writes, as field level buffering was handled by writer.StartWrite() already. - public bool ShouldFlushCurrent() - => !BufferingWrite && _pos != _offset && ShouldFlush(BufferRequirements.GetMinimumBufferByteCount(Current.BufferRequirement, Current.Size.GetValueOrDefault())); - public bool ShouldFlush(int byteCount) => Remaining < byteCount && FlushMode is not FlushMode.None; public void Flush(TimeSpan timeout = default) diff --git a/src/Npgsql/Internal/Postgres/Field.cs b/src/Npgsql/Internal/Postgres/Field.cs index abd74a0bc7..bae177bd75 100644 --- a/src/Npgsql/Internal/Postgres/Field.cs +++ b/src/Npgsql/Internal/Postgres/Field.cs @@ -9,4 +9,6 @@ public readonly struct Field(string name, PgTypeId pgTypeId, int typeModifier) public string Name { get; init; } = name; public PgTypeId PgTypeId { get; init; } = pgTypeId; public int TypeModifier { get; init; } = typeModifier; + + public static Field CreateUnspecified(PgTypeId pgTypeId) => new("?", pgTypeId, -1); } diff --git a/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.cs index 921d27e19e..46ee4e40b8 100644 --- a/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.cs +++ b/src/Npgsql/Internal/ResolverFactories/AdoTypeInfoResolverFactory.cs @@ -52,8 +52,7 @@ class Resolver : IPgTypeInfoResolver || options.DatabaseInfo.GetPostgresType(dataTypeName) is not PostgresEnumType) return null; - return new PgConcreteTypeInfo(options, TextConverter.CreateStringConverter(options.TextEncoding), dataTypeName, - unboxedType: type == typeof(object) ? typeof(string) : null); + return new PgConcreteTypeInfo(options, TextConverter.CreateStringConverter(options.TextEncoding), dataTypeName, requestedType: type); } static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) @@ -91,13 +90,13 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) static (options, mapping, _) => mapping.CreateInfo(options, new CharTextConverter(options.TextEncoding), preferredFormat: DataFormat.Text)); // Uses the bytea converters, as neither type has a header. mappings.AddType(DataTypeNames.Text, - static (options, mapping, _) => mapping.CreateInfo(options, new ArrayByteaConverter()), + static (options, mapping, _) => mapping.CreateInfo(options, new ArrayByteaConverter(supportsTextFormat: true)), MatchRequirement.DataTypeName); mappings.AddStructType>(DataTypeNames.Text, - static (options, mapping, _) => mapping.CreateInfo(options, new ReadOnlyMemoryByteaConverter()), + static (options, mapping, _) => mapping.CreateInfo(options, new ReadOnlyMemoryByteaConverter(supportsTextFormat: true)), MatchRequirement.DataTypeName); mappings.AddType(DataTypeNames.Text, - static (options, mapping, _) => new PgConcreteTypeInfo(options, new StreamConverter(supportsTextFormat: true), new DataTypeName(mapping.DataTypeName), unboxedType: mapping.Type != typeof(Stream) ? mapping.Type : null), + static (options, mapping, _) => new PgConcreteTypeInfo(options, new StreamConverter(supportsTextFormat: true), new DataTypeName(mapping.DataTypeName), requestedType: mapping.Type), mapping => mapping with { MatchRequirement = MatchRequirement.DataTypeName, TypeMatchPredicate = type => typeof(Stream).IsAssignableFrom(type) }); //Special mappings, these have no corresponding array mapping. mappings.AddType(DataTypeNames.Text, @@ -118,13 +117,13 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) static (options, mapping, _) => mapping.CreateInfo(options, new CharTextConverter(options.TextEncoding), preferredFormat: DataFormat.Text)); // Uses the bytea converters, as neither type has a header. mappings.AddType(dataTypeName, - static (options, mapping, _) => mapping.CreateInfo(options, new ArrayByteaConverter()), + static (options, mapping, _) => mapping.CreateInfo(options, new ArrayByteaConverter(supportsTextFormat: true)), MatchRequirement.DataTypeName); mappings.AddStructType>(dataTypeName, - static (options, mapping, _) => mapping.CreateInfo(options, new ReadOnlyMemoryByteaConverter()), + static (options, mapping, _) => mapping.CreateInfo(options, new ReadOnlyMemoryByteaConverter(supportsTextFormat: true)), MatchRequirement.DataTypeName); mappings.AddType(dataTypeName, - static (options, mapping, _) => new PgConcreteTypeInfo(options, new StreamConverter(supportsTextFormat: true), new DataTypeName(mapping.DataTypeName), unboxedType: mapping.Type != typeof(Stream) ? mapping.Type : null), + static (options, mapping, _) => new PgConcreteTypeInfo(options, new StreamConverter(supportsTextFormat: true), new DataTypeName(mapping.DataTypeName), requestedType: mapping.Type), mapping => mapping with { MatchRequirement = MatchRequirement.DataTypeName, TypeMatchPredicate = type => typeof(Stream).IsAssignableFrom(type) }); //Special mappings, these have no corresponding array mapping. mappings.AddType(dataTypeName, @@ -142,13 +141,13 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) mappings.AddStructType(DataTypeNames.Jsonb, static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter(jsonbVersion, new CharTextConverter(options.TextEncoding)))); mappings.AddType(DataTypeNames.Jsonb, - static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter(jsonbVersion, new ArrayByteaConverter())), + static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter(jsonbVersion, new ArrayByteaConverter(supportsTextFormat: true))), MatchRequirement.DataTypeName); mappings.AddStructType>(DataTypeNames.Jsonb, - static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter>(jsonbVersion, new ReadOnlyMemoryByteaConverter())), + static (options, mapping, _) => mapping.CreateInfo(options, new VersionPrefixedTextConverter>(jsonbVersion, new ReadOnlyMemoryByteaConverter(supportsTextFormat: true))), MatchRequirement.DataTypeName); mappings.AddType(DataTypeNames.Jsonb, - static (options, mapping, _) => new PgConcreteTypeInfo(options, new VersionPrefixedTextConverter(jsonbVersion, new StreamConverter(supportsTextFormat: true)), new DataTypeName(mapping.DataTypeName), unboxedType: mapping.Type != typeof(Stream) ? mapping.Type : null), + static (options, mapping, _) => new PgConcreteTypeInfo(options, new VersionPrefixedTextConverter(jsonbVersion, new StreamConverter(supportsTextFormat: true)), new DataTypeName(mapping.DataTypeName), requestedType: mapping.Type), mapping => mapping with { MatchRequirement = MatchRequirement.DataTypeName, TypeMatchPredicate = type => typeof(Stream).IsAssignableFrom(type) }); //Special mappings, these have no corresponding array mapping. mappings.AddType(DataTypeNames.Jsonb, @@ -172,18 +171,18 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) // Bytea mappings.AddType(DataTypeNames.Bytea, - static (options, mapping, _) => mapping.CreateInfo(options, new ArrayByteaConverter()), isDefault: true); + static (options, mapping, _) => mapping.CreateInfo(options, new ArrayByteaConverter(supportsTextFormat: false)), isDefault: true); mappings.AddStructType>(DataTypeNames.Bytea, - static (options, mapping, _) => mapping.CreateInfo(options, new ReadOnlyMemoryByteaConverter())); + static (options, mapping, _) => mapping.CreateInfo(options, new ReadOnlyMemoryByteaConverter(supportsTextFormat: false))); mappings.AddType(DataTypeNames.Bytea, // TODO handling bytea textually would require conversions to hex strings, so currently we don't support it. - static (options, mapping, _) => new PgConcreteTypeInfo(options, new StreamConverter(supportsTextFormat: false), new DataTypeName(mapping.DataTypeName), unboxedType: mapping.Type != typeof(Stream) ? mapping.Type : null), + static (options, mapping, _) => new PgConcreteTypeInfo(options, new StreamConverter(supportsTextFormat: false), new DataTypeName(mapping.DataTypeName), requestedType: mapping.Type), mapping => mapping with { TypeMatchPredicate = type => typeof(Stream).IsAssignableFrom(type) }); // Varbit mappings.AddType(DataTypeNames.Varbit, static (options, mapping, _) => mapping.CreateInfo(options, - new PolymorphicBitStringTypeInfoProvider(options, options.GetCanonicalTypeId(DataTypeNames.Varbit)), includeDataTypeName: true, supportsWriting: false)); + new PolymorphicBitStringTypeInfoProvider(options, options.GetCanonicalTypeId(DataTypeNames.Varbit)), includeDataTypeName: true)); mappings.AddType(DataTypeNames.Varbit, static (options, mapping, _) => mapping.CreateInfo(options, new BitArrayBitStringConverter()), isDefault: true); mappings.AddStructType(DataTypeNames.Varbit, @@ -194,7 +193,7 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) // Bit mappings.AddType(DataTypeNames.Bit, static (options, mapping, _) => mapping.CreateInfo(options, - new PolymorphicBitStringTypeInfoProvider(options, options.GetCanonicalTypeId(DataTypeNames.Bit)), includeDataTypeName: true, supportsWriting: false)); + new PolymorphicBitStringTypeInfoProvider(options, options.GetCanonicalTypeId(DataTypeNames.Bit)), includeDataTypeName: true)); mappings.AddType(DataTypeNames.Bit, static (options, mapping, _) => mapping.CreateInfo(options, new BitArrayBitStringConverter()), isDefault: true); mappings.AddStructType(DataTypeNames.Bit, diff --git a/src/Npgsql/Internal/ResolverFactories/ExtraConversionsTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/ExtraConversionsTypeInfoResolverFactory.cs index 5809695b7d..187748ccd3 100644 --- a/src/Npgsql/Internal/ResolverFactories/ExtraConversionsTypeInfoResolverFactory.cs +++ b/src/Npgsql/Internal/ResolverFactories/ExtraConversionsTypeInfoResolverFactory.cs @@ -91,9 +91,9 @@ static TypeInfoMappingCollection AddInfos(TypeInfoMappingCollection mappings) // Bytea mappings.AddStructType>(DataTypeNames.Bytea, - static (options, mapping, _) => mapping.CreateInfo(options, new ArraySegmentByteaConverter())); + static (options, mapping, _) => mapping.CreateInfo(options, new ArraySegmentByteaConverter(supportsTextFormat: false))); mappings.AddStructType>(DataTypeNames.Bytea, - static (options, mapping, _) => mapping.CreateInfo(options, new MemoryByteaConverter())); + static (options, mapping, _) => mapping.CreateInfo(options, new MemoryByteaConverter(supportsTextFormat: false))); // Varbit mappings.AddType(DataTypeNames.Varbit, diff --git a/src/Npgsql/Internal/ResolverFactories/NetworkTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/NetworkTypeInfoResolverFactory.cs index d641cde931..3cd2d14354 100644 --- a/src/Npgsql/Internal/ResolverFactories/NetworkTypeInfoResolverFactory.cs +++ b/src/Npgsql/Internal/ResolverFactories/NetworkTypeInfoResolverFactory.cs @@ -33,7 +33,7 @@ static TypeInfoMappingCollection AddMappings(TypeInfoMappingCollection mappings) // There are certain IPAddress values like Loopback or Any that return a *private* derived type (see https://github.com/dotnet/runtime/issues/27870). mappings.AddType(DataTypeNames.Inet, static (options, mapping, _) => new PgConcreteTypeInfo(options, new IPAddressConverter(), new DataTypeName(mapping.DataTypeName), - unboxedType: mapping.Type != typeof(IPAddress) ? mapping.Type : null), + requestedType: mapping.Type), mapping => mapping with { MatchRequirement = MatchRequirement.Single, diff --git a/src/Npgsql/Internal/ResolverFactories/UnmappedTypeInfoResolverFactory.cs b/src/Npgsql/Internal/ResolverFactories/UnmappedTypeInfoResolverFactory.cs index db3a5fc772..98e285a887 100644 --- a/src/Npgsql/Internal/ResolverFactories/UnmappedTypeInfoResolverFactory.cs +++ b/src/Npgsql/Internal/ResolverFactories/UnmappedTypeInfoResolverFactory.cs @@ -85,7 +85,8 @@ class RangeResolver : DynamicTypeInfoResolver if (subInfo is not PgConcreteTypeInfo) return null; - subInfo = subInfo.ToNonBoxing(); + subInfo = subInfo.ToExactTypeInfo(); + var subConcrete = (PgConcreteTypeInfo)subInfo; var converterType = typeof(NpgsqlRange<>).MakeGenericType(subInfo.Type); @@ -96,8 +97,8 @@ class RangeResolver : DynamicTypeInfoResolver (PgConverter)Activator.CreateInstance(typeof(RangeConverter<>).MakeGenericType(subInfo.Type), ((PgConcreteTypeInfo)subInfo).Converter)!, new DataTypeName(mapping.DataTypeName), - unboxedType: matchedType is not null && matchedType != converterType ? converterType : null - ) { PreferredFormat = subInfo.PreferredFormat, SupportsWriting = subInfo.SupportsWriting }, + requestedType: matchedType + ) { PreferredFormat = subConcrete.PreferredFormat, SupportsWriting = subConcrete.SupportsWriting }, mapping => mapping with { MatchRequirement = MatchRequirement.DataTypeName }); } } @@ -144,7 +145,8 @@ class MultirangeResolver : DynamicTypeInfoResolver if (subInfo is not PgConcreteTypeInfo) return null; - subInfo = subInfo.ToNonBoxing(); + subInfo = subInfo.ToExactTypeInfo(); + var subConcrete = (PgConcreteTypeInfo)subInfo; var converterType = subInfo.Type.MakeArrayType(); @@ -155,8 +157,8 @@ class MultirangeResolver : DynamicTypeInfoResolver (PgConverter)Activator.CreateInstance(typeof(MultirangeConverter<,>).MakeGenericType(converterType, subInfo.Type), ((PgConcreteTypeInfo)subInfo).Converter)!, new DataTypeName(mapping.DataTypeName), - unboxedType: type is not null && type != converterType ? converterType : null - ) { PreferredFormat = subInfo.PreferredFormat, SupportsWriting = subInfo.SupportsWriting }, + requestedType: type + ) { PreferredFormat = subConcrete.PreferredFormat, SupportsWriting = subConcrete.SupportsWriting }, mapping => mapping with { MatchRequirement = MatchRequirement.DataTypeName }); } } diff --git a/src/Npgsql/Internal/TypeInfoCache.cs b/src/Npgsql/Internal/TypeInfoCache.cs index 91a6de9295..ad98d1613f 100644 --- a/src/Npgsql/Internal/TypeInfoCache.cs +++ b/src/Npgsql/Internal/TypeInfoCache.cs @@ -124,9 +124,9 @@ static TypeInfoCache() if (type is not null && info.Type != type) { - // Types were not equal, throw for IsBoxing = false, otherwise we throw when the returned type isn't assignable to the requested type (after unboxing). - if (!info.IsBoxing || !info.Type.IsAssignableTo(type)) - throw new InvalidOperationException($"A CLR type '{type}' was passed but the resolved PgTypeInfo does not have an equal Type: {info.Type}."); + // Types were not equal, throw for HasExactType = true, otherwise we throw when the returned type isn't assignable to the requested type. + if (info.HasExactType || !info.Type.IsAssignableTo(type)) + throw new InvalidOperationException($"A CLR type '{type}' was passed but the resolved PgTypeInfo does not have a compatible type: {info.Type}."); } return info; diff --git a/src/Npgsql/Internal/TypeInfoMapping.cs b/src/Npgsql/Internal/TypeInfoMapping.cs index 81893047c3..7f2346add0 100644 --- a/src/Npgsql/Internal/TypeInfoMapping.cs +++ b/src/Npgsql/Internal/TypeInfoMapping.cs @@ -156,8 +156,9 @@ public TypeInfoMappingCollection(IEnumerable items) if (fallback is { } fbMapping) { + Debug.Assert(type is not null); var resolvedDataTypeName = ResolveFullyQualifiedDataTypeName(dataTypeName, fbMapping.DataTypeName, options); - return fbMapping.Factory(options, fbMapping with { Type = type!, DataTypeName = resolvedDataTypeName }, dataTypeName is not null); + return fbMapping.Factory(options, fbMapping with { Type = type, DataTypeName = resolvedDataTypeName }, dataTypeName is not null); } return null; @@ -203,14 +204,14 @@ static TypeInfoFactory CreateComposedFactory(Type mappingType, TypeInfoMapping i if (!DataTypeName.IsFullyQualified(innerMapping.DataTypeName.AsSpan())) resolvedInnerMapping = innerMapping with { DataTypeName = new DataTypeName(mapping.DataTypeName).Schema + "." + innerMapping.DataTypeName }; - var innerInfo = innerMapping.Factory(options, resolvedInnerMapping, requiresDataTypeName); - var converter = mapper(mapping, innerInfo); - var preferredFormat = copyPreferredFormat ? innerInfo.PreferredFormat : null; - var unboxedType = ComputeUnboxedType(defaultType: mappingType, converter.TypeToConvert, mapping.Type); - var readingSupported = innerInfo.SupportsReading && (supportsReading ?? PgTypeInfo.GetDefaultSupportsReading(converter.TypeToConvert, unboxedType)); - var writingSupported = innerInfo.SupportsWriting && (supportsWriting ?? true); + var innerConcrete = (PgConcreteTypeInfo)innerMapping.Factory(options, resolvedInnerMapping, requiresDataTypeName); + var converter = mapper(mapping, innerConcrete); + var preferredFormat = copyPreferredFormat ? innerConcrete.PreferredFormat : null; + var readingSupported = innerConcrete.SupportsReading + && (supportsReading ?? PgConcreteTypeInfo.GetDefaultSupportsReading(converter.TypeToConvert, requestedType: mapping.Type)); + var writingSupported = innerConcrete.SupportsWriting && (supportsWriting ?? true); - return new PgConcreteTypeInfo(options, converter, options.GetCanonicalTypeId(new DataTypeName(mapping.DataTypeName)), unboxedType) + return new PgConcreteTypeInfo(options, converter, options.GetCanonicalTypeId(new DataTypeName(mapping.DataTypeName)), requestedType: mapping.Type) { PreferredFormat = preferredFormat, SupportsReading = readingSupported, @@ -226,51 +227,18 @@ static TypeInfoFactory CreateComposedFactory(Type mappingType, TypeInfoMapping i if (!DataTypeName.IsFullyQualified(innerMapping.DataTypeName.AsSpan())) resolvedInnerMapping = innerMapping with { DataTypeName = new DataTypeName(mapping.DataTypeName).Schema + "." + innerMapping.DataTypeName }; - var innerInfo = (PgProviderTypeInfo)innerMapping.Factory(options, resolvedInnerMapping, requiresDataTypeName); - var typeInfoProvider = mapper(mapping, innerInfo); - var preferredFormat = copyPreferredFormat ? innerInfo.PreferredFormat : null; - var unboxedType = ComputeUnboxedType(defaultType: mappingType, typeInfoProvider.TypeToConvert, mapping.Type); - var readingSupported = innerInfo.SupportsReading && (supportsReading ?? PgTypeInfo.GetDefaultSupportsReading(typeInfoProvider.TypeToConvert, unboxedType)); - var writingSupported = innerInfo.SupportsWriting && (supportsWriting ?? true); + var innerInfo = innerMapping.Factory(options, resolvedInnerMapping, requiresDataTypeName); + + var providerInfo = (PgProviderTypeInfo)innerInfo; + var typeInfoProvider = mapper(mapping, providerInfo); // We include the data type name if the inner info did so as well. // This way we can rely on its logic around resolvedDataTypeName, including when it ignores that flag. PgTypeId? pgTypeId = innerInfo.PgTypeId is not null ? options.GetCanonicalTypeId(new DataTypeName(mapping.DataTypeName)) : null; - return new PgProviderTypeInfo(options, typeInfoProvider, pgTypeId, unboxedType) - { - PreferredFormat = preferredFormat, - SupportsReading = readingSupported, - SupportsWriting = writingSupported - }; + return new PgProviderTypeInfo(options, typeInfoProvider, pgTypeId, requestedType: mapping.Type); }; - static Type? ComputeUnboxedType(Type defaultType, Type converterType, Type matchedType) - { - // The minimal hierarchy that should hold for things to work is object < converterType < matchedType. - // Though these types could often be seen in a hierarchy: object < converterType < defaultType < matchedType. - // Some caveats with the latter being for instance Array being the matchedType while the defaultType is int[]. - Debug.Assert(converterType.IsAssignableFrom(matchedType) || matchedType == typeof(object)); - Debug.Assert(converterType.IsAssignableFrom(defaultType)); - - // A special case for object matches, where we return a more specific type than was matched. - // This is to report e.g. Array converters as Array when their matched type was object. - if (matchedType == typeof(object)) - return converterType; - - // This is to report e.g. Array converters as int[,,,] when their matched type was such. - if (matchedType != defaultType) - return matchedType; - - // If defaultType does not equal converterType we take defaultType as it's more specific. - // This is to report e.g. Array converters as int[] when their matched type was their default type. - if (defaultType != converterType) - return defaultType; - - // Keep the converter type. - return null; - } - public void Add(TypeInfoMapping mapping) => _items.Add(mapping); public void AddRange(TypeInfoMappingCollection collection) => _items.AddRange(collection._items); @@ -526,7 +494,7 @@ PgTypeInfo CreateComposedPerInstance(PgTypeInfo innerInfo, PgTypeInfo nullableIn (PgConverter)((PgConcreteTypeInfo)nullableInnerInfo).Converter); return new PgConcreteTypeInfo(innerInfo.Options, converter, - innerInfo.Options.GetCanonicalTypeId(new DataTypeName(dataTypeName)), unboxedType: typeof(Array)) { SupportsWriting = false }; + innerInfo.Options.GetCanonicalTypeId(new DataTypeName(dataTypeName)), requestedType: typeof(object)) { SupportsWriting = false }; } } @@ -640,7 +608,7 @@ PgTypeInfo CreateComposedPerInstance(PgTypeInfo innerInfo, PgTypeInfo nullableIn (PgProviderTypeInfo)nullableInnerInfo); return new PgProviderTypeInfo(innerInfo.Options, provider, - innerInfo.Options.GetCanonicalTypeId(new DataTypeName(dataTypeName)), unboxedType: typeof(Array)) { SupportsWriting = false }; + innerInfo.Options.GetCanonicalTypeId(new DataTypeName(dataTypeName)), requestedType: typeof(object)); } } @@ -707,43 +675,39 @@ static string GetArrayDataTypeName(string dataTypeName) static ArrayConverter CreateArrayBasedConverter(TypeInfoMapping mapping, PgTypeInfo elemInfo) { - if (!elemInfo.IsBoxing) - return ArrayConverter.CreateArrayBased((PgConcreteTypeInfo)elemInfo, mapping.Type); + if (!elemInfo.HasExactType) + ThrowRequiresExactType(provider: false); - ThrowBoxingNotSupported(provider: false); - return default; + return ArrayConverter.CreateArrayBased((PgConcreteTypeInfo)elemInfo, mapping.Type); } static ArrayConverter> CreateListBasedConverter(TypeInfoMapping mapping, PgTypeInfo elemInfo) { - if (!elemInfo.IsBoxing) - return ArrayConverter>.CreateListBased((PgConcreteTypeInfo)elemInfo); + if (!elemInfo.HasExactType) + ThrowRequiresExactType(provider: false); - ThrowBoxingNotSupported(provider: false); - return default; + return ArrayConverter>.CreateListBased((PgConcreteTypeInfo)elemInfo); } static ArrayTypeInfoProvider CreateArrayBasedTypeInfoProvider(TypeInfoMapping mapping, PgProviderTypeInfo elemInfo) { - if (!elemInfo.IsBoxing) - return new ArrayTypeInfoProvider(elemInfo, mapping.Type); + if (!elemInfo.HasExactType) + ThrowRequiresExactType(provider: true); - ThrowBoxingNotSupported(provider: true); - return default; + return new ArrayTypeInfoProvider(elemInfo, mapping.Type); } static ArrayTypeInfoProvider, TElement> CreateListBasedTypeInfoProvider(TypeInfoMapping mapping, PgProviderTypeInfo elemInfo) { - if (!elemInfo.IsBoxing) - return new ArrayTypeInfoProvider, TElement>(elemInfo, mapping.Type); + if (!elemInfo.HasExactType) + ThrowRequiresExactType(provider: true); - ThrowBoxingNotSupported(provider: true); - return default; + return new ArrayTypeInfoProvider, TElement>(elemInfo, mapping.Type); } [DoesNotReturn] - static void ThrowBoxingNotSupported(bool provider) - => throw new InvalidOperationException($"Boxing converters are not supported, manually construct a mapping over a casting converter{(provider ? " type info provider" : "")} instead."); + static void ThrowRequiresExactType(bool provider) + => throw new InvalidOperationException($"An exact-type info is required here; manually construct a mapping over a casting converter{(provider ? " type info provider" : "")} instead."); } [Experimental(NpgsqlDiagnostics.ConvertersExperimental)] @@ -812,26 +776,9 @@ public static PgTypeInfo CreateInfo(this TypeInfoMapping mapping, PgSerializerOp /// The provider to create a PgProviderTypeInfo for. /// Whether to pass mapping.DataTypeName to the PgProviderTypeInfo constructor, mandatory when TypeInfoFactory(..., requiresDataTypeName: true). /// The created info instance. - public static PgProviderTypeInfo CreateInfo(this TypeInfoMapping mapping, PgSerializerOptions options, PgConcreteTypeInfoProvider provider, bool includeDataTypeName) - => new(options, provider, includeDataTypeName ? new DataTypeName(mapping.DataTypeName) : null) - { - PreferredFormat = null - }; - - /// - /// Creates a PgProviderTypeInfo from a mapping, options, and a provider. - /// - /// The mapping to create an info for. - /// The options to use. - /// The provider to create a PgProviderTypeInfo for. - /// Whether to pass mapping.DataTypeName to the PgProviderTypeInfo constructor, mandatory when TypeInfoFactory(..., requiresDataTypeName: true). - /// Whether to prefer a specific data format for this info, when null it defaults to the most suitable format. - /// Whether the converters returned from the given provider support writing. - /// The created info instance. - public static PgProviderTypeInfo CreateInfo(this TypeInfoMapping mapping, PgSerializerOptions options, PgConcreteTypeInfoProvider provider, bool includeDataTypeName, DataFormat? preferredFormat = null, bool supportsWriting = true) - => new(options, provider, includeDataTypeName ? new DataTypeName(mapping.DataTypeName) : null) - { - PreferredFormat = preferredFormat, - SupportsWriting = supportsWriting - }; + public static PgTypeInfo CreateInfo(this TypeInfoMapping mapping, PgSerializerOptions options, PgConcreteTypeInfoProvider provider, bool includeDataTypeName) + { + PgTypeId? pgTypeId = includeDataTypeName ? new PgTypeId(new DataTypeName(mapping.DataTypeName)) : null; + return new PgProviderTypeInfo(options, provider, pgTypeId); + } } diff --git a/src/Npgsql/NpgsqlBinaryExporter.cs b/src/Npgsql/NpgsqlBinaryExporter.cs index 7d3e3cd852..bccb34507e 100644 --- a/src/Npgsql/NpgsqlBinaryExporter.cs +++ b/src/Npgsql/NpgsqlBinaryExporter.cs @@ -38,7 +38,7 @@ public sealed class NpgsqlBinaryExporter : ICancelable /// int NumColumns { get; set; } - PgConverterInfo[] _columnInfoCache; + ReadConversionContext[] _conversionContextCache; readonly ILogger _copyLogger; @@ -61,7 +61,7 @@ internal NpgsqlBinaryExporter(NpgsqlConnector connector) _connector = connector; _buf = connector.ReadBuffer; _column = BeforeRow; - _columnInfoCache = null!; + _conversionContextCache = null!; _copyLogger = connector.LoggingConfiguration.CopyLogger; } @@ -101,7 +101,7 @@ internal async Task Init(string copyToCommand, bool async, CancellationToken can _state = ExporterState.Ready; NumColumns = copyOutResponse.NumColumns; - _columnInfoCache = new PgConverterInfo[NumColumns]; + _conversionContextCache = new ReadConversionContext[NumColumns]; _rowsExported = 0; _endOfMessagePos = _buf.CumulativeReadPosition; await ReadHeader(async).ConfigureAwait(false); @@ -276,15 +276,8 @@ T Read(NpgsqlDbType? type) if (reader.FieldIsDbNull) return DbNullOrThrow(); - var info = GetInfo(typeof(T), type, out var asObject); - - reader.StartRead(info.BufferRequirement); - var result = asObject - ? (T)info.Converter.ReadAsObject(reader) - : info.Converter.UnsafeDowncast().Read(reader); - reader.EndRead(); - - return result; + var typeInfo = GetConversionContext(typeof(T), type, out var bindingContext); + return typeInfo.ReadFieldValue(reader, bindingContext); } finally { @@ -310,15 +303,8 @@ async ValueTask ReadAsync(NpgsqlDbType? type, CancellationToken cancellati if (reader.FieldIsDbNull) return DbNullOrThrow(); - var info = GetInfo(typeof(T), type, out var asObject); - - await reader.StartReadAsync(info.BufferRequirement, cancellationToken).ConfigureAwait(false); - var result = asObject - ? (T)await info.Converter.ReadAsObjectAsync(reader, cancellationToken).ConfigureAwait(false) - : await info.Converter.UnsafeDowncast().ReadAsync(reader, cancellationToken).ConfigureAwait(false); - await reader.EndReadAsync().ConfigureAwait(false); - - return result; + var typeInfo = GetConversionContext(typeof(T), type, out var bindingContext); + return await typeInfo.ReadFieldValueAsync(reader, bindingContext, cancellationToken).ConfigureAwait(false); } finally { @@ -337,36 +323,39 @@ static T DbNullOrThrow() throw new InvalidCastException("Column is null"); } - PgConverterInfo GetInfo(Type type, NpgsqlDbType? npgsqlDbType, out bool asObject) + PgConcreteTypeInfo GetConversionContext(Type type, NpgsqlDbType? npgsqlDbType, out PgFieldBinding binding) { - ref var cachedInfo = ref _columnInfoCache[_column]; - var converterInfo = cachedInfo.IsDefault ? cachedInfo = CreateConverterInfo(type, npgsqlDbType) : cachedInfo; - asObject = converterInfo.IsBoxingConverter; - return converterInfo; - } + ref var contextRef = ref _conversionContextCache[_column]; + var context = contextRef.IsDefault ? contextRef = GetInfoAndBind(type, npgsqlDbType) : contextRef; + binding = context.Binding; + return context.TypeInfo; - PgConverterInfo CreateConverterInfo(Type type, NpgsqlDbType? npgsqlDbType = null) - { - var options = _connector.SerializerOptions; - PgTypeId? pgTypeId = null; - if (npgsqlDbType.HasValue) + ReadConversionContext GetInfoAndBind(Type type, NpgsqlDbType? npgsqlDbType) { - pgTypeId = npgsqlDbType.Value.ToDataTypeName() is { } name - ? options.GetCanonicalTypeId(name) - // Handle plugin types via lookup. - : GetRepresentationalOrDefault(npgsqlDbType.Value.ToUnqualifiedDataTypeNameOrThrow()); - } - var info = options.GetTypeInfoInternal(type, pgTypeId) - ?? throw new NotSupportedException($"Reading is not supported for type '{type}'{(npgsqlDbType is null ? "" : $" and NpgsqlDbType '{npgsqlDbType}'")}"); + var options = _connector.SerializerOptions; + PgTypeId? pgTypeId = null; + if (npgsqlDbType.HasValue) + { + pgTypeId = npgsqlDbType.Value.ToDataTypeName() is { } name + ? options.GetCanonicalTypeId(name) + // Handle plugin types via lookup. + : GetRepresentationalOrDefault(npgsqlDbType.Value.ToUnqualifiedDataTypeNameOrThrow()); + } + var typeInfo = options.GetTypeInfoInternal(type, pgTypeId) + ?? throw new NotSupportedException($"Reading is not supported for type '{type}'{(npgsqlDbType is null ? "" : $" and NpgsqlDbType '{npgsqlDbType}'")}"); - // Binary export has no type info so we only do caller-directed interpretation of data. - return info.Bind(new Field("?", - info.PgTypeId ?? ((PgProviderTypeInfo)info).GetDefaultConcreteTypeInfo(null).PgTypeId, -1), DataFormat.Binary); + // Binary export has no type info so we only do caller-directed interpretation of data. + var concreteTypeInfo = typeInfo.MakeConcreteForField( + Field.CreateUnspecified(typeInfo.PgTypeId ?? ((PgProviderTypeInfo)typeInfo).GetDefault(null).PgTypeId)); + if (!concreteTypeInfo.SupportsReading) + AdoSerializerHelpers.ThrowReadingNotSupported(type, options, concreteTypeInfo.PgTypeId, resolved: true); + return new(concreteTypeInfo, concreteTypeInfo.BindField(DataFormat.Binary)); - PgTypeId GetRepresentationalOrDefault(string dataTypeName) - { - var type = options.DatabaseInfo.GetPostgresType(dataTypeName); - return options.ToCanonicalTypeId(type.GetRepresentationalType()); + PgTypeId GetRepresentationalOrDefault(string dataTypeName) + { + var type = options.DatabaseInfo.GetPostgresType(dataTypeName); + return options.ToCanonicalTypeId(type.GetRepresentationalType()); + } } } @@ -428,7 +417,7 @@ void MoveNextColumn(bool resumableOp) _column++; _buf.Ensure(sizeof(int)); var columnLen = _buf.ReadInt32(); - PgReader.Init(columnLen, DataFormat.Binary, resumableOp); + PgReader.Init(DataFormat.Binary, columnLen, resumableOp); } async ValueTask MoveNextColumnAsync(bool resumableOp) @@ -440,7 +429,7 @@ async ValueTask MoveNextColumnAsync(bool resumableOp) _column++; await _buf.Ensure(sizeof(int), async: true).ConfigureAwait(false); var columnLen = _buf.ReadInt32(); - PgReader.Init(columnLen, DataFormat.Binary, resumableOp); + PgReader.Init(DataFormat.Binary, columnLen, resumableOp); } void ThrowIfNotOnRow() diff --git a/src/Npgsql/NpgsqlBinaryImporter.cs b/src/Npgsql/NpgsqlBinaryImporter.cs index 08f2a90844..ef73700089 100644 --- a/src/Npgsql/NpgsqlBinaryImporter.cs +++ b/src/Npgsql/NpgsqlBinaryImporter.cs @@ -6,7 +6,6 @@ using Microsoft.Extensions.Logging; using Npgsql.BackendMessages; using Npgsql.Internal; -using Npgsql.Internal.Postgres; using NpgsqlTypes; using InfiniteTimeout = System.Threading.Timeout; using static Npgsql.Util.Statics; @@ -278,14 +277,13 @@ async Task Core(bool async, T value, NpgsqlDbType? npgsqlDbType, string? dataTyp // We only retrieve previous values if anything actually changed. // For object typed parameters we must do so whenever setting NpgsqlParameter.Value would reset the type info. PgTypeInfo? previousTypeInfo = null; - PgConverter? previousConverter = null; - PgTypeId previousTypeId = default; + PgConcreteTypeInfo? previousConcreteTypeInfo = null; if (!newParam && ( (typeof(T) == typeof(object) && param.ShouldResetObjectTypeInfo(value)) || param._npgsqlDbType != npgsqlDbType || param._dataTypeName != dataTypeName)) { - param.GetResolutionInfo(out previousTypeInfo, out previousConverter, out previousTypeId); + param.GetResolutionInfo(out previousTypeInfo, out previousConcreteTypeInfo); if (!newParam) { param.ResetDbType(); @@ -300,14 +298,14 @@ async Task Core(bool async, T value, NpgsqlDbType? npgsqlDbType, string? dataTyp param.TypedValue = value; param.ResolveTypeInfo(_connector.SerializerOptions, _connector.DbTypeResolver); - if (previousTypeInfo is not null && previousConverter is not null && param.PgTypeId != previousTypeId) + if (previousTypeInfo is not null && previousConcreteTypeInfo is not null && param.PgTypeId != previousConcreteTypeInfo.PgTypeId) { var currentPgTypeId = param.PgTypeId; // We should only rollback values when the stored instance was used. We'll throw before writing the new instance back anyway. // Also always rolling back could set PgTypeInfos that were resolved for a type that doesn't match the T of the NpgsqlParameter. if (!newParam) - param.SetResolutionInfo(previousTypeInfo, previousConverter, previousTypeId); - throw new InvalidOperationException($"Write for column {_column} resolves to a different PostgreSQL type: {currentPgTypeId} than the first row resolved to ({previousTypeId}). " + + param.SetResolutionInfo(previousTypeInfo, previousConcreteTypeInfo); + throw new InvalidOperationException($"Write for column {_column} resolves to a different PostgreSQL type: {currentPgTypeId} than the first row resolved to ({previousConcreteTypeInfo.PgTypeId}). " + $"Please make sure to use clr types that resolve to the same PostgreSQL type across rows. " + $"Alternatively pass the same NpgsqlDbType or DataTypeName to ensure the PostgreSQL type ends up to be identical." ); } @@ -559,6 +557,16 @@ void Cleanup() _connector = null; } + // Deterministically release each parameter's provider-produced write state and binding state. + // ResetDbType cascades through ResetTypeInfo which disposes both; clearing the type hints is + // incidental (the params aren't reused after the importer closes). GC would eventually catch + // anything we miss, but we'd rather not leak pooled buffers held in write state. + if (_params is not null) + { + foreach (var p in _params) + p?.ResetDbType(); + } + _buf = null; _state = ImporterState.Disposed; } diff --git a/src/Npgsql/NpgsqlDataReader.cs b/src/Npgsql/NpgsqlDataReader.cs index ea410fda64..1f75aa012f 100644 --- a/src/Npgsql/NpgsqlDataReader.cs +++ b/src/Npgsql/NpgsqlDataReader.cs @@ -111,7 +111,7 @@ internal bool IsRowBuffered /// /// Stores the last converter info resolved by column, to speed up repeated reading. /// - ColumnInfo[]? ColumnInfoCache { get; set; } + ReadConversionContext[]? ConversionContextCache { get; set; } ulong? _recordsAffected; @@ -146,7 +146,7 @@ internal void Init( long startTimestamp = 0, Task? sendTask = null) { - Debug.Assert(ColumnInfoCache is null); + Debug.Assert(ConversionContextCache is null); Command = command; _connection = command.InternalConnection; _behavior = behavior; @@ -364,7 +364,7 @@ async Task NextResult(bool async, bool isConsuming = false, CancellationTo var statementIndex = StatementIndex; if (statementIndex >= 0) { - if (RowDescription is { } description && statements[statementIndex].IsPrepared && ColumnInfoCache is { } cache) + if (RowDescription is { } description && statements[statementIndex].IsPrepared && ConversionContextCache is { } cache) description.SetColumnInfoCache(new(cache, 0, ColumnCount)); if (statementIndex is 0 && _behavior.HasFlag(CommandBehavior.SingleResult) && !isConsuming) @@ -429,16 +429,16 @@ async Task NextResult(bool async, bool isConsuming = false, CancellationTo if (RowDescription is not null) { - if (ColumnInfoCache?.Length >= ColumnCount) - Array.Clear(ColumnInfoCache, 0, ColumnCount); + if (ConversionContextCache?.Length >= ColumnCount) + Array.Clear(ConversionContextCache, 0, ColumnCount); else { - if (ColumnInfoCache is { } cache) - ArrayPool.Shared.Return(cache, clearArray: true); - ColumnInfoCache = ArrayPool.Shared.Rent(ColumnCount); + if (ConversionContextCache is { } cache) + ArrayPool.Shared.Return(cache, clearArray: true); + ConversionContextCache = ArrayPool.Shared.Rent(ColumnCount); } if (statement.IsPrepared) - RowDescription.LoadColumnInfoCache(Connector.SerializerOptions, ColumnInfoCache); + RowDescription.LoadColumnInfoCache(Connector.SerializerOptions, ConversionContextCache); } else { @@ -630,8 +630,8 @@ async ValueTask ConsumeResultSet(bool async) ProcessMessage(completedMsg); var statement = _statements[StatementIndex]; - if (statement.IsPrepared && ColumnInfoCache is not null) - RowDescription!.SetColumnInfoCache(new(ColumnInfoCache, 0, ColumnCount)); + if (statement.IsPrepared && ConversionContextCache is not null) + RowDescription!.SetColumnInfoCache(new(ConversionContextCache, 0, ColumnCount)); if (statement.AppendErrorBarrier ?? Command.EnableErrorBarriers) Expect(await Connector.ReadMessage(async).ConfigureAwait(false), Connector); @@ -1186,10 +1186,10 @@ internal async Task Cleanup(bool async, bool connectionClosing = false, bool isD } } - if (ColumnInfoCache is { } cache) + if (ConversionContextCache is { } cache) { - ColumnInfoCache = null; - ArrayPool.Shared.Return(cache, clearArray: true); + ConversionContextCache = null; + ArrayPool.Shared.Return(cache, clearArray: true); } // Drop any reference to a potential oversized buffer. @@ -1379,23 +1379,24 @@ public override int GetValues(object[] values) ThrowHelper.ThrowInvalidCastException_NoValue(field); Debug.Assert(!PgReader.NestedInitialized, "Unexpected nested read active, Seek(0) would seek to the start of the nested data."); - PgReader.Seek(0); + var reader = PgReader; + reader.Seek(0); - var reader = CachedFreeNestedDataReader; - if (reader != null) + var nestedReader = CachedFreeNestedDataReader; + if (nestedReader != null) { CachedFreeNestedDataReader = null; - reader.Init(compositeType); + nestedReader.Init(compositeType); } else { - reader = new NpgsqlNestedDataReader(this, null, 1, compositeType); + nestedReader = new NpgsqlNestedDataReader(this, null, 1, compositeType); } if (isArray) - reader.InitArray(); + nestedReader.InitArray(); else - reader.InitSingleRow(); - return reader; + nestedReader.InitSingleRow(); + return nestedReader; } #endregion @@ -1481,7 +1482,7 @@ public override long GetChars(int ordinal, long dataOffset, char[]? buffer, int ThrowIfNotInResult(); // Check whether we have a GetChars implementation for this column type. - var converter = GetInfo(ordinal, typeof(GetChars), out var dataFormat, out var bufferRequirement, out var asObject); + var context = GetConversionContext(ordinal, typeof(GetChars)); if (dataOffset is < 0 or > int.MaxValue) ThrowHelper.ThrowArgumentOutOfRangeException(nameof(dataOffset), "dataOffset must be between 0 and {0}", int.MaxValue); @@ -1490,23 +1491,17 @@ public override long GetChars(int ordinal, long dataOffset, char[]? buffer, int if (buffer != null && (length < 0 || length > buffer.Length - bufferOffset)) ThrowHelper.ThrowIndexOutOfRangeException("bufferOffset must be between 0 and {0}", buffer.Length - bufferOffset); - if (SeekToColumn(ordinal, dataFormat, resumableOp: true) is DbNullSentinel) + var reader = PgReader; + if (SeekToColumn(ordinal, context.Binding.DataFormat, resumableOp: true) is DbNullSentinel) ThrowHelper.ThrowInvalidCastException_NoValue(RowDescription[ordinal]); - var reader = PgReader; dataOffset = buffer is null ? 0 : dataOffset; if (_isSequential && reader.GetCharsRead > dataOffset) ThrowHelper.ThrowInvalidOperationException("Attempt to read a position in the column which has already been read"); reader.StartCharsRead(checked((int)dataOffset), buffer is not null ? new ArraySegment(buffer, bufferOffset, length) : (ArraySegment?)null); - - reader.StartRead(bufferRequirement); - var result = asObject - ? (GetChars)converter.ReadAsObject(reader) - : ((PgConverter)converter).Read(reader); - reader.EndRead(); - + var result = context.TypeInfo.ReadFieldValue(reader, context.Binding); reader.EndCharsRead(); return result.Read; } @@ -1561,19 +1556,15 @@ public override Task GetFieldValueAsync(int ordinal, CancellationToken can async Task Core(int ordinal, CancellationToken cancellationToken) { ThrowIfNotInResult(); - var converter = GetInfo(ordinal, typeof(T), out var dataFormat, out var bufferRequirement, out var asObject); + + var context = GetConversionContext(ordinal, type: typeof(T) == typeof(object) ? null : typeof(T)); using var registration = Connector.StartNestedCancellableOperation(cancellationToken, attemptPgCancellation: false); - if (await SeekToColumnAsync(ordinal, dataFormat).ConfigureAwait(false) is DbNullSentinel) - return DbNullValueOrThrow(ordinal); var reader = PgReader; - await reader.StartReadAsync(bufferRequirement, cancellationToken).ConfigureAwait(false); - var result = asObject - ? (T)await converter.ReadAsObjectAsync(reader, cancellationToken).ConfigureAwait(false) - : await converter.UnsafeDowncast().ReadAsync(reader, cancellationToken).ConfigureAwait(false); - await reader.EndReadAsync().ConfigureAwait(false); - return result; + return await SeekToColumnAsync(ordinal, context.Binding.DataFormat).ConfigureAwait(false) is DbNullSentinel + ? DbNullValueOrThrow(ordinal) + : await context.TypeInfo.ReadFieldValueAsync(reader, context.Binding, cancellationToken).ConfigureAwait(false); } } @@ -1588,18 +1579,11 @@ async Task Core(int ordinal, CancellationToken cancellationToken) T GetFieldValueCore(int ordinal) { ThrowIfNotInResult(); - var converter = GetInfo(ordinal, typeof(T), out var dataFormat, out var bufferRequirement, out var asObject); + var context = GetConversionContext(ordinal, type: typeof(T) == typeof(object) ? null : typeof(T)); - if (SeekToColumn(ordinal, dataFormat) is DbNullSentinel) - return DbNullValueOrThrow(ordinal); - - var reader = PgReader; - reader.StartRead(bufferRequirement); - var result = asObject - ? (T)converter.ReadAsObject(reader) - : converter.UnsafeDowncast().Read(reader); - reader.EndRead(); - return result; + return SeekToColumn(ordinal, context.Binding.DataFormat) is DbNullSentinel + ? DbNullValueOrThrow(ordinal) + : context.TypeInfo.ReadFieldValue(PgReader, context.Binding); } #endregion @@ -1611,20 +1595,7 @@ T GetFieldValueCore(int ordinal) /// /// The zero-based column ordinal. /// The value of the specified column. - public override object GetValue(int ordinal) - { - ThrowIfNotInResult(); - var format = GetDefaultInfo(ordinal, out var converter, out var bufferRequirement); - if (SeekToColumn(ordinal, format) is DbNullSentinel) - return DBNull.Value; - - var reader = PgReader; - reader.StartRead(bufferRequirement); - var result = converter.ReadAsObject(reader); - reader.EndRead(); - - return result; - } + public override object GetValue(int ordinal) => GetFieldValueCore(ordinal); /// /// Gets the value of the specified column as an instance of . @@ -1645,7 +1616,8 @@ public override object GetValue(int ordinal) public override bool IsDBNull(int ordinal) { ThrowIfNotInResult(); - return SeekToColumn(ordinal, RowDescription[ordinal].DataFormat, resumableOp: true) is DbNullSentinel; + var field = RowDescription[ordinal]; + return SeekToColumn(ordinal, field.DataFormat, resumableOp: true) is DbNullSentinel; } /// @@ -1667,8 +1639,9 @@ public override Task IsDBNullAsync(int ordinal, CancellationToken cancella async Task Core(int ordinal, CancellationToken cancellationToken) { ThrowIfNotInResult(); + var field = RowDescription[ordinal]; using var registration = Connector.StartNestedCancellableOperation(cancellationToken, attemptPgCancellation: false); - return await SeekToColumnAsync(ordinal, RowDescription[ordinal].DataFormat, resumableOp: true).ConfigureAwait(false) is DbNullSentinel; + return await SeekToColumnAsync(ordinal, field.DataFormat, resumableOp: true).ConfigureAwait(false) is DbNullSentinel; } } @@ -1860,8 +1833,8 @@ Task> GetColumnSchema(bool async, CancellationToken can #region Seeking - [MethodImpl(MethodImplOptions.AggressiveInlining)] - int SeekToColumn(int ordinal, DataFormat dataFormat, bool resumableOp = false) + [MethodImpl(MethodImplOptions.NoInlining)] + int SeekToColumn(int ordinal, DataFormat fieldFormat, bool resumableOp = false) { Debug.Assert(_isRowBuffered || _isSequential); var reader = PgReader; @@ -1883,7 +1856,7 @@ int SeekToColumn(int ordinal, DataFormat dataFormat, bool resumableOp = false) reader.Commit(); var columnLength = BufferSeekToColumn(column, ordinal, !_isRowBuffered); - reader.Init(columnLength, dataFormat, resumableOp); + reader.Init(fieldFormat, columnLength, resumableOp); return columnLength; static void ThrowInvalidSequentialSeek(int column, int ordinal) @@ -1892,28 +1865,27 @@ static void ThrowInvalidSequentialSeek(int column, int ordinal) $"you may only read from column ordinal '{column}' or greater."); } - ValueTask SeekToColumnAsync(int ordinal, DataFormat dataFormat, bool resumableOp = false) + ValueTask SeekToColumnAsync(int ordinal, DataFormat fieldFormat, bool resumableOp = false) { // When the row is buffered or we're rereading previous data no IO will be done. if (_isRowBuffered || _column >= ordinal) - return new(SeekToColumn(ordinal, dataFormat, resumableOp)); + return new(SeekToColumn(ordinal, fieldFormat, resumableOp)); - return Core(ordinal, dataFormat, resumableOp); + return Core(ordinal, fieldFormat, resumableOp); [AsyncMethodBuilder(typeof(PoolingAsyncValueTaskMethodBuilder<>))] - async ValueTask Core(int ordinal, DataFormat dataFormat, bool resumableOp) + async ValueTask Core(int ordinal, DataFormat fieldFormat, bool resumableOp) { Debug.Assert(!_isRowBuffered && _column < ordinal); var reader = PgReader; await reader.CommitAsync().ConfigureAwait(false); var columnLength = await BufferSeekToColumnAsync(_column, ordinal, !_isRowBuffered).ConfigureAwait(false); - reader.Init(columnLength, dataFormat, resumableOp); + reader.Init(fieldFormat, columnLength, resumableOp); return columnLength; } } - [MethodImpl(MethodImplOptions.AggressiveInlining)] int BufferSeekToColumn(int column, int ordinal, bool allowIO) { Debug.Assert(column < ordinal || !allowIO); @@ -2051,45 +2023,30 @@ T DbNullValueOrThrow(int ordinal) } [MethodImpl(MethodImplOptions.AggressiveInlining)] - PgConverter GetInfo(int ordinal, Type type, out DataFormat format, out Size bufferRequirement, out bool asObject) + ReadConversionContext GetConversionContext(int ordinal, Type? type) { - if ((uint)ordinal >= (uint)ColumnCount) - ThrowHelper.ThrowIndexOutOfRangeException("Ordinal is out of range, value must be between 0 and {0} (exclusive).", ColumnCount); - - ref var info = ref ColumnInfoCache![ordinal]; + ReadConversionContext context; + if (type is not null) + { + // Do the same check as the RowDescription indexer before we access the cache. + if ((uint)ordinal >= (uint)ColumnCount) + ThrowHelper.ThrowIndexOutOfRangeException("Ordinal is out of range, value must be between 0 and {0} (exclusive).", ColumnCount); - Debug.Assert(info.ConverterInfo.IsDefault || ReferenceEquals(Connector.SerializerOptions, info.ConverterInfo.TypeInfo.Options), "Cache is bleeding over"); + ref var contextRef = ref ConversionContextCache![ordinal]; - if (info.ConverterInfo.TypeToConvert == type) - { - format = info.DataFormat; - bufferRequirement = info.ConverterInfo.BufferRequirement; - asObject = info.AsObject; - return info.ConverterInfo.Converter; - } + Debug.Assert(contextRef.IsDefault || ReferenceEquals(Connector.SerializerOptions, contextRef.TypeInfo.Options), "Cache is bleeding over"); - return Slow(ref info, out format, out bufferRequirement, out asObject); + if (contextRef.TypeInfo is not { } typeInfo || !typeInfo.CanReadTo(type)) + RowDescription!.GetConversionContext(ordinal, type, ref contextRef); - [MethodImpl(MethodImplOptions.NoInlining)] - PgConverter Slow(ref ColumnInfo info, out DataFormat format, out Size bufferRequirement, out bool asObject) + context = contextRef; + } + else { - var field = RowDescription![ordinal]; - field.GetInfo(type, ref info); - format = field.DataFormat; - bufferRequirement = info.ConverterInfo.BufferRequirement; - asObject = info.AsObject; - return info.ConverterInfo.Converter; + context = RowDescription![ordinal].ObjectConversionContext; } - } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - DataFormat GetDefaultInfo(int ordinal, out PgConverter converter, out Size bufferRequirement) - { - var field = RowDescription![ordinal]; - - converter = field.ObjectInfo.Converter; - bufferRequirement = field.ObjectInfo.BufferRequirement; - return field.DataFormat; + return context; } /// @@ -2112,6 +2069,7 @@ void ThrowIfClosedOrDisposed() ThrowInvalidState(state); } + [MethodImpl(MethodImplOptions.AggressiveInlining)] [MemberNotNull(nameof(RowDescription))] void ThrowIfNotInResult() { diff --git a/src/Npgsql/NpgsqlNestedDataReader.cs b/src/Npgsql/NpgsqlNestedDataReader.cs index b7e8a7d242..f79942cbf3 100644 --- a/src/Npgsql/NpgsqlNestedDataReader.cs +++ b/src/Npgsql/NpgsqlNestedDataReader.cs @@ -8,6 +8,7 @@ using System.Globalization; using System.IO; using System.Runtime.CompilerServices; +using Npgsql.BackendMessages; using Npgsql.Internal.Postgres; namespace Npgsql; @@ -29,23 +30,30 @@ public sealed class NpgsqlNestedDataReader : DbDataReader int _nextRowBufferPos; ReaderState _readerState; - readonly List _columns = []; + readonly List _columns = []; long _startPos; - DataFormat Format => DataFormat.Binary; + DataFormat DataFormat => DataFormat.Binary; - readonly struct ColumnInfo(PostgresType postgresType, int bufferPos, PgTypeInfo objectOrDefaultTypeInfo, DataFormat format) + readonly struct NestedColumnInfo { - public PostgresType PostgresType { get; } = postgresType; - public int BufferPos { get; } = bufferPos; - public PgConverterInfo LastConverterInfo { get; init; } + public PostgresType PostgresType { get; } + public int BufferPos { get; } + public ReadConversionContext LastInfo { get; init; } + public PgConcreteTypeInfo ObjectTypeInfo { get; } + public PgFieldBinding ObjectBinding { get; } - public PgTypeInfo ObjectOrDefaultTypeInfo { get; } = objectOrDefaultTypeInfo; - public PgConverterInfo GetObjectOrDefaultInfo() => ObjectOrDefaultTypeInfo.Bind(Field, format); - - Field Field => new("?", ObjectOrDefaultTypeInfo.Options.PortableTypeIds ? PostgresType.DataTypeName : (Oid)PostgresType.OID, -1); + public NestedColumnInfo(PostgresType postgresType, int bufferPos, PgTypeInfo objectTypeInfo, DataFormat format) + { + PostgresType = postgresType; + BufferPos = bufferPos; + ObjectTypeInfo = objectTypeInfo.MakeConcreteForField(Field.CreateUnspecified(objectTypeInfo.Options.ToCanonicalTypeId(postgresType))); + if (!ObjectTypeInfo.SupportsReading) + AdoSerializerHelpers.ThrowReadingNotSupported(typeof(object), objectTypeInfo.Options, ObjectTypeInfo.PgTypeId, resolved: true); + ObjectBinding = ObjectTypeInfo.BindField(format); + } - public PgConverterInfo Bind(PgTypeInfo typeInfo) => typeInfo.Bind(Field, format); + public Field Field => Field.CreateUnspecified(ObjectTypeInfo.PgTypeId); } PgReader PgReader => _outermostReader.Buffer.PgReader; @@ -285,19 +293,18 @@ public override int GetOrdinal(string name) public override Type GetFieldType(int ordinal) { var column = CheckRowAndColumn(ordinal); - return column.GetObjectOrDefaultInfo().TypeToConvert; + return column.ObjectTypeInfo.Type; } /// public override object GetValue(int ordinal) { var columnLength = CheckRowAndColumnAndSeek(ordinal, out var column); - var info = column.GetObjectOrDefaultInfo(); if (columnLength == -1) return DBNull.Value; - using var _ = PgReader.BeginNestedRead(columnLength, info.BufferRequirement); - return info.Converter.ReadAsObject(PgReader); + using var _ = PgReader.BeginNestedRead(columnLength, column.ObjectBinding.BufferRequirement); + return column.ObjectTypeInfo.Converter.ReadAsObject(PgReader); } /// @@ -326,7 +333,7 @@ public override T GetFieldValue(int ordinal) return (T)(object)GetTextReader(ordinal); var columnLength = CheckRowAndColumnAndSeek(ordinal, out var column); - var info = GetOrAddConverterInfo(typeof(T), column, ordinal, out var asObject); + var info = GetOrAddConverterInfo(typeof(T), column, ordinal); if (columnLength == -1) { @@ -340,10 +347,8 @@ public override T GetFieldValue(int ordinal) ThrowHelper.ThrowInvalidCastException_NoValue(); } - using var _ = PgReader.BeginNestedRead(columnLength, info.BufferRequirement); - return asObject - ? (T)info.Converter.ReadAsObject(PgReader)! - : info.Converter.UnsafeDowncast().Read(PgReader); + using var _ = PgReader.BeginNestedRead(columnLength, info.Binding.BufferRequirement); + return info.TypeInfo.Converter.Read(PgReader); } /// @@ -371,7 +376,8 @@ public override bool Read() { var pgType = SerializerOptions.DatabaseInfo.GetPostgresType(typeOid); var pgTypeId = SerializerOptions.ToCanonicalTypeId(pgType); - _columns.Add(new ColumnInfo(pgType, bufferPos, AdoSerializerHelpers.GetTypeInfoForReading(typeof(object), pgTypeId, SerializerOptions), Format)); + _columns.Add(new NestedColumnInfo(pgType, bufferPos, + AdoSerializerHelpers.GetTypeInfoForReading(typeof(object), pgTypeId, SerializerOptions), DataFormat)); } else { @@ -379,7 +385,8 @@ public override bool Read() ? _columns[i].PostgresType : SerializerOptions.DatabaseInfo.GetPostgresType(typeOid); var pgTypeId = SerializerOptions.ToCanonicalTypeId(pgType); - _columns[i] = new ColumnInfo(pgType, bufferPos, AdoSerializerHelpers.GetTypeInfoForReading(typeof(object), pgTypeId, SerializerOptions), Format); + _columns[i] = new NestedColumnInfo(pgType, bufferPos, + AdoSerializerHelpers.GetTypeInfoForReading(typeof(object), pgTypeId, SerializerOptions), DataFormat); } var columnLen = PgReader.ReadInt32(); @@ -463,7 +470,7 @@ void CheckOnRow() throw new InvalidOperationException("No row is available"); } - ColumnInfo CheckRowAndColumn(int column) + NestedColumnInfo CheckRowAndColumn(int column) { CheckOnRow(); @@ -473,46 +480,30 @@ ColumnInfo CheckRowAndColumn(int column) return _columns[column]; } - int CheckRowAndColumnAndSeek(int ordinal, out ColumnInfo column) + int CheckRowAndColumnAndSeek(int ordinal, out NestedColumnInfo nestedColumn) { - column = CheckRowAndColumn(ordinal); - PgReader.Seek(column.BufferPos); + nestedColumn = CheckRowAndColumn(ordinal); + PgReader.Seek(nestedColumn.BufferPos); return PgReader.ReadInt32(); } - PgConverterInfo GetOrAddConverterInfo(Type type, ColumnInfo column, int ordinal, out bool asObject) + ReadConversionContext GetOrAddConverterInfo(Type type, NestedColumnInfo nestedColumn, int ordinal) { - if (column.LastConverterInfo is { IsDefault: false } lastInfo && lastInfo.TypeToConvert == type) - { - // As TypeInfoMappingCollection is always adding object mappings for - // default/datatypename mappings, we'll also check Converter.TypeToConvert. - // If we have an exact match we are still able to use e.g. a converter for ints in an unboxed fashion. - asObject = lastInfo.IsBoxingConverter && lastInfo.Converter.TypeToConvert != type; + if (nestedColumn.LastInfo is { IsDefault: false } lastInfo && lastInfo.TypeInfo.Type == type) return lastInfo; - } - - if (column.GetObjectOrDefaultInfo() is { IsDefault: false } odfInfo) - { - if (typeof(object) == type) - { - asObject = true; - return odfInfo; - } - - if (odfInfo.TypeToConvert == type) - { - // As TypeInfoMappingCollection is always adding object mappings for - // default/datatypename mappings, we'll also check Converter.TypeToConvert. - // If we have an exact match we are still able to use e.g. a converter for ints in an unboxed fashion. - asObject = odfInfo.IsBoxingConverter && odfInfo.Converter.TypeToConvert != type; - return odfInfo; - } - } - var converterInfo = column.Bind(AdoSerializerHelpers.GetTypeInfoForReading(type, SerializerOptions.ToCanonicalTypeId(column.PostgresType), SerializerOptions)); - _columns[ordinal] = column with { LastConverterInfo = converterInfo }; - asObject = converterInfo.IsBoxingConverter; - return converterInfo; + var objectInfo = (TypeInfo: nestedColumn.ObjectTypeInfo, Binding: nestedColumn.ObjectBinding); + if (objectInfo.TypeInfo is not null && (typeof(object) == type || objectInfo.TypeInfo.Type == type)) + return new(objectInfo.TypeInfo, objectInfo.Binding); + + var typeId = SerializerOptions.ToCanonicalTypeId(nestedColumn.PostgresType); + var typeInfo = AdoSerializerHelpers.GetTypeInfoForReading(type, typeId, SerializerOptions); + var concreteTypeInfo = typeInfo.MakeConcreteForField(nestedColumn.Field); + if (!concreteTypeInfo.SupportsReading) + AdoSerializerHelpers.ThrowReadingNotSupported(type, SerializerOptions, typeId, resolved: true); + var columnInfo = new ReadConversionContext(concreteTypeInfo, concreteTypeInfo.BindField(DataFormat)); + _columns[ordinal] = nestedColumn with { LastInfo = columnInfo }; + return columnInfo; } enum ReaderState diff --git a/src/Npgsql/NpgsqlParameter.cs b/src/Npgsql/NpgsqlParameter.cs index 69d4e29816..135a1be9b7 100644 --- a/src/Npgsql/NpgsqlParameter.cs +++ b/src/Npgsql/NpgsqlParameter.cs @@ -6,6 +6,7 @@ using System.Diagnostics.CodeAnalysis; using System.IO; using System.Runtime.CompilerServices; +using System.Runtime.ExceptionServices; using System.Threading; using System.Threading.Tasks; using Npgsql.Internal; @@ -35,7 +36,7 @@ public class NpgsqlParameter : DbParameter, IDbDataParameter, ICloneable private protected string _name = string.Empty; object? _value; private protected bool _useSubStream; - private protected SubReadStream? _subStream; + private protected Stream? _subStream; private protected string _sourceColumn; internal string TrimmedName { get; private protected set; } = PositionalName; @@ -43,15 +44,13 @@ public class NpgsqlParameter : DbParameter, IDbDataParameter, ICloneable IDbTypeResolver? _dbTypeResolver; private protected PgTypeInfo? TypeInfo { get; private set; } + private protected PgConcreteTypeInfo? ConcreteTypeInfo { get; private set; } - internal PgTypeId PgTypeId { get; private set; } - private protected PgConverter? Converter { get; private set; } + internal PgTypeId PgTypeId => ConcreteTypeInfo?.PgTypeId ?? default; - internal DataFormat Format { get; private protected set; } - private protected Size? WriteSize { get; set; } + internal DataFormat Format => _binding?.DataFormat ?? DataFormat.Binary; private protected object? _writeState; - private protected Size _bufferRequirement; - private protected bool _asObject; + private protected PgValueBinding? _binding; #endregion @@ -283,7 +282,7 @@ public override object? Value if (ShouldResetObjectTypeInfo(value)) ResetTypeInfo(); else - ResetBindingInfo(); + DisposeBindingState(); _value = value; } } @@ -483,7 +482,7 @@ public sealed override int Size if (value < -1) ThrowHelper.ThrowArgumentException($"Invalid parameter Size value '{value}'. The value must be greater than or equal to 0."); - ResetBindingInfo(); + DisposeBindingState(); _size = value; } } @@ -551,14 +550,13 @@ bool TryResolveDbTypeDataTypeName(DbType dbType, Type? type, [NotNullWhen(true)] internal void SetOutputValue(NpgsqlDataReader reader, int ordinal) { - if (GetType() == typeof(NpgsqlParameter)) + // Set Value (not _value) so we also support object typed generic params. + if (StaticValueType == typeof(object)) Value = reader.GetValue(ordinal); else - SetOutputValueCore(reader, ordinal); + SetOutputTypedValue(reader, ordinal); } - private protected virtual void SetOutputValueCore(NpgsqlDataReader reader, int ordinal) {} - internal bool ShouldResetObjectTypeInfo(object? value) { var currentType = TypeInfo?.Type; @@ -570,31 +568,40 @@ internal bool ShouldResetObjectTypeInfo(object? value) return valueType != typeof(DBNull) && currentType != valueType; } - internal void GetResolutionInfo(out PgTypeInfo? typeInfo, out PgConverter? converter, out PgTypeId pgTypeId) + internal void GetResolutionInfo(out PgTypeInfo? typeInfo, out PgConcreteTypeInfo? concreteTypeInfo) { typeInfo = TypeInfo; - converter = Converter; - pgTypeId = PgTypeId; + concreteTypeInfo = ConcreteTypeInfo; } - internal void SetResolutionInfo(PgTypeInfo typeInfo, PgConverter converter, PgTypeId pgTypeId) + internal void SetResolutionInfo(PgTypeInfo typeInfo, PgConcreteTypeInfo concreteTypeInfo) { - if (WriteSize is not null) - ResetBindingInfo(); + if (_binding is not null) + DisposeBindingState(); + + // Dispose any provider-produced _writeState against its current ConcreteTypeInfo before we + // overwrite it — once reassigned, the restored ConcreteTypeInfo can't dispose state produced + // by the about-to-be-discarded one. + if (_writeState is { } ws) + { + ConcreteTypeInfo?.DisposeWriteState(ws); + _writeState = null; + } TypeInfo = typeInfo; - Converter = converter; - PgTypeId = pgTypeId; + ConcreteTypeInfo = concreteTypeInfo; } /// Attempt to resolve a type info based on available (postgres) type information on the parameter. - internal void ResolveTypeInfo(PgSerializerOptions options, IDbTypeResolver? dbTypeResolver) + /// When is false (e.g. SchemaOnly), any provider-produced write state is + /// disposed immediately because no Bind call will follow to take ownership of it. + internal void ResolveTypeInfo(PgSerializerOptions options, IDbTypeResolver? dbTypeResolver, bool willBind = true) { var typeInfo = TypeInfo; + var staticValueType = StaticValueType; var previouslyResolved = ReferenceEquals(typeInfo?.Options, options); if (!previouslyResolved) { - var staticValueType = StaticValueType; var valueType = GetValueType(staticValueType); string? dataTypeName = null; @@ -664,10 +671,39 @@ internal void ResolveTypeInfo(PgSerializerOptions options, IDbTypeResolver? dbTy // TODO we could expose a property on a Converter/TypeInfo to indicate whether it's immutable, at that point we can reuse. if (!previouslyResolved || typeInfo is not PgConcreteTypeInfo) { - ResetBindingInfo(); - var concreteTypeInfo = GetConcreteTypeInfo(typeInfo!); - Converter = concreteTypeInfo.Converter; - PgTypeId = concreteTypeInfo.PgTypeId; + Debug.Assert(typeInfo is not null); + DisposeBindingState(); + + // Dispose any stale _writeState from a previous resolution against its current + // ConcreteTypeInfo before the branches below overwrite it — this covers the "failed resolution, + // caller fixed the value, called again" self-heal path (e.g. NpgsqlBinaryImporter's PgTypeId check). + if (_writeState is { } staleWs) + { + ConcreteTypeInfo?.DisposeWriteState(staleWs); + _writeState = null; + } + + if (staticValueType == typeof(object)) + { + // Pull from Value (not _value) so we also support object typed generic params. + var value = Value; + ConcreteTypeInfo = typeInfo.MakeConcreteForValueAsObject(value is DBNull ? null : value, out _writeState); + } + else + { + ConcreteTypeInfo = MakeConcreteTypeInfoForTypedValue(typeInfo); + } + + // If no Bind follows (SchemaOnly), release the provider-produced state immediately so + // lifecycle stays contained inside the parameter. + if ((!willBind || !ConcreteTypeInfo.SupportsWriting) && _writeState is { } ws) + { + ConcreteTypeInfo.DisposeWriteState(ws); + _writeState = null; + } + + if (!ConcreteTypeInfo.SupportsWriting) + AdoSerializerHelpers.ThrowWritingNotSupported(GetValueType(staticValueType), options, ConcreteTypeInfo.PgTypeId, _npgsqlDbType, ParameterName, resolved: true); } void ThrowNoTypeInfo() @@ -684,53 +720,117 @@ void ThrowNotSupported(string dataTypeName) $"Your database details or Npgsql type loading configuration may be incorrect. Alternatively your PostgreSQL installation might need to be upgraded, or an extension adding the missing data type might not have been installed."); } - // Pull from Value so we also support object typed generic params. - private protected virtual PgConcreteTypeInfo GetConcreteTypeInfo(PgTypeInfo typeInfo) - { - _asObject = true; - return typeInfo.GetObjectConcreteTypeInfo(Value, out _writeState); - } - - /// Dispose write state produced during ResolveTypeInfo when Bind won't follow (e.g. SchemaOnly). - internal void DisposeResolutionWriteState() - { - if (_writeState is { } ws) - { - _writeState = null; - TypeInfo?.DisposeWriteState(ws); - } - } - /// Bind the current value to the type info, truncate (if applicable), take its size, and do any final validation before writing. internal void Bind(out DataFormat format, out Size size, DataFormat? requiredFormat = null) { - if (TypeInfo is null) + if (TypeInfo is null || ConcreteTypeInfo is null) ThrowHelper.ThrowInvalidOperationException($"Missing type info, {nameof(ResolveTypeInfo)} needs to be called before {nameof(Bind)}."); // We might call this twice, once during validation and once during WriteBind, only compute things once. - if (WriteSize is null) + // Bind is atomic *and* self-cleaning: the local binding is only committed to _binding + // (and _writeState nulled) after every check passes, and any exception before commit disposes + // the resolution-time _writeState ourselves so callers don't need to know about it. + if (_binding is null) { if (_size > 0) - HandleSizeTruncation(); + HandleSizeTruncation(ConcreteTypeInfo); - BindCore(requiredFormat); + try + { + PgValueBinding binding; + if (_useSubStream) + { + binding = BindSubStream(); + } + else if (StaticValueType == typeof(object)) + { + // Pull from Value so we also support object typed generic params. + var value = Value; + if (value is null) + ThrowHelper.ThrowInvalidOperationException($"Parameter '{ParameterName}' cannot be null, DBNull.Value should be used instead."); + + binding = ConcreteTypeInfo.BindParameterObjectValue(value, _writeState, requiredFormat); + } + else + { + binding = BindTypedValue(ConcreteTypeInfo, formatPreference: requiredFormat); + } + + // Enforce that provider-produced _writeState flows end-to-end through the binding unchanged. + // A converter that accepts _writeState as input must thread the same instance into its returned + // binding's WriteState. Swapping to a different instance is a contract violation because it + // forks the lifecycle (the resolution-time state would be orphaned and the bind-time state + // would be unowned by this parameter). + if (_writeState is not null && !ReferenceEquals(_writeState, binding.WriteState)) + ThrowHelper.ThrowInvalidOperationException( + $"Binding for parameter '{ParameterName}' replaced the provider-produced write state with a different instance. " + + "Converters must thread the write state through unchanged."); + + if (requiredFormat is not null && binding.DataFormat != requiredFormat) + ThrowHelper.ThrowNotSupportedException($"Parameter '{ParameterName}' must be written in {requiredFormat} format, but does not support this format."); + + // Binding and ownership transfer of state happen together. + _binding = binding; + _writeState = null; + } + catch + { + if (_writeState is { } ws) + { + ConcreteTypeInfo.DisposeWriteState(ws); + _writeState = null; + } + if (_subStream is not null) + { + _subStream.Dispose(); + _subStream = null; + } + _useSubStream = false; + throw; + } + } + else if (requiredFormat is not null && _binding.GetValueOrDefault().DataFormat != requiredFormat) + { + ThrowHelper.ThrowNotSupportedException($"Parameter '{ParameterName}' must be written in {requiredFormat} format, but does not support this format."); } format = Format; - size = WriteSize!.Value; - if (requiredFormat is not null && format != requiredFormat) - ThrowHelper.ThrowNotSupportedException($"Parameter '{ParameterName}' must be written in {requiredFormat} format, but does not support this format."); + size = _binding.GetValueOrDefault().Size ?? -1; + + [MethodImpl(MethodImplOptions.NoInlining)] + PgValueBinding BindSubStream() + { + // Pull from Value so we also support object typed generic params. + var stream = (Stream?)Value; + Debug.Assert(stream is not null, "_useSubStream should only be true if we had a value during HandleSizeTruncation"); + int subSize; + if (stream.CanSeek) + { + var remaining = Math.Max(0, stream.Length - stream.Position); + subSize = remaining < _size ? (int)remaining : _size; + _subStream = new SubReadStream(stream, _size); + } + else + { + // TODO maybe we can move this IO. + var buffer = new byte[_size]; + var read = stream.ReadAtLeast(buffer, _size, throwOnEndOfStream: false); + subSize = Math.Min(_size, read); + _subStream = new MemoryStream(buffer, 0, subSize); + } + return new(DataFormat.Binary, 0, subSize, null); + } // Handle Size truncate behavior for a predetermined set of types and pg types. // Doesn't matter if we 'box' Value, all supported types are reference types. [MethodImpl(MethodImplOptions.NoInlining)] - void HandleSizeTruncation() + void HandleSizeTruncation(PgConcreteTypeInfo typeInfo) { - var type = Converter!.TypeToConvert; - if ((type != typeof(string) && type != typeof(char[]) && type != typeof(byte[]) && type != typeof(Stream)) || Value is not { } value) + var type = typeInfo.Type; + if ((type != typeof(string) && type != typeof(char[]) && type != typeof(byte[]) && !type.IsAssignableTo(typeof(Stream))) || Value is not { } value) return; - var dataTypeName = TypeInfo!.Options.GetDataTypeName(PgTypeId); + var dataTypeName = typeInfo.Options.GetDataTypeName(PgTypeId); if (dataTypeName == DataTypeNames.Text || dataTypeName == DataTypeNames.Varchar || dataTypeName == DataTypeNames.Bpchar) { if (value is string s && s.Length > _size) @@ -752,85 +852,90 @@ void HandleSizeTruncation() } else if (value is Stream) { - _asObject = true; + // Substream path abandons the resolver-produced state, we must dispose it here to prevent the no swap exception. + if (_writeState is { } ws) + { + typeInfo.DisposeWriteState(ws); + _writeState = null; + } _useSubStream = true; } } } } - private protected virtual void BindCore(DataFormat? formatPreference, bool allowNullReference = false) - { - // Pull from Value so we also support object typed generic params. - var value = Value; - if (value is null && !allowNullReference) - ThrowHelper.ThrowInvalidOperationException($"Parameter '{ParameterName}' cannot be null, DBNull.Value should be used instead."); - - if (_useSubStream && value is not null) - value = _subStream = new SubReadStream((Stream)value, _size); - - Size size = default; - if (TypeInfo!.BindObject(Converter!, value, ref size, ref _writeState, out var dataFormat, formatPreference) is { } info) - { - WriteSize = size; - _bufferRequirement = info.BufferRequirement; - } - else - { - WriteSize = -1; - _bufferRequirement = default; - } - - Format = dataFormat; - } - internal async ValueTask Write(bool async, PgWriter writer, CancellationToken cancellationToken) { - if (WriteSize is not { } writeSize) + if (_binding is not { } binding) { ThrowHelper.ThrowInvalidOperationException("Missing type info or binding info."); return; } + Debug.Assert(ConcreteTypeInfo is not null); try { if (writer.ShouldFlush(sizeof(int))) await writer.Flush(async, cancellationToken).ConfigureAwait(false); - writer.WriteInt32(writeSize.Value); - if (writeSize.Value is -1) - { - writer.Commit(sizeof(int)); - return; - } + var size = binding.Size?.Value ?? -1; + writer.WriteInt32(size); + writer.CommitAndResetTotal(sizeof(int)); - var current = new ValueMetadata + if (!binding.IsDbNullBinding) { - Format = Format, - BufferRequirement = _bufferRequirement, - Size = writeSize, - WriteState = _writeState - }; - await writer.BeginWrite(async, current, cancellationToken).ConfigureAwait(false); - await WriteValue(async, writer, cancellationToken).ConfigureAwait(false); - writer.Commit(writeSize.Value + sizeof(int)); + if (_useSubStream) + { + Debug.Assert(_subStream is not null); + if (async) + await _subStream.CopyToAsync(writer.GetStream(), cancellationToken).ConfigureAwait(false); + else + _subStream.CopyTo(writer.GetStream()); + writer.CommitAndResetTotal(size); + } + else + { + await writer.StartWrite(async, binding, cancellationToken).ConfigureAwait(false); + var typeInfo = ConcreteTypeInfo; + if (StaticValueType == typeof(object)) + { + // Pull from Value so we also support object typed generic params. + var value = Value; + Debug.Assert(value is not null); + if (async) + { + await typeInfo.Converter.WriteAsObjectAsync(writer, value, cancellationToken).ConfigureAwait(false); + } + else + { + typeInfo.Converter.WriteAsObject(writer, value); + } + } + else + { + await WriteTypedValue(async, typeInfo, writer, cancellationToken).ConfigureAwait(false); + } + writer.EndWrite(size); + } + } } finally { - ResetBindingInfo(); + DisposeBindingState(); } } - private protected virtual ValueTask WriteValue(bool async, PgWriter writer, CancellationToken cancellationToken) - { - // Pull from Value so we also support base calls from generic parameters. - var value = (_useSubStream ? _subStream : Value)!; - if (async) - return Converter!.WriteAsObjectAsync(writer, value, cancellationToken); + private protected virtual PgConcreteTypeInfo MakeConcreteTypeInfoForTypedValue(PgTypeInfo typeInfo) + => throw new NotSupportedException(); - Converter!.WriteAsObject(writer, value); - return new(); - } + private protected virtual PgValueBinding BindTypedValue(PgConcreteTypeInfo typeInfo, DataFormat? formatPreference) + => throw new NotSupportedException(); + + private protected virtual ValueTask WriteTypedValue(bool async, PgConcreteTypeInfo typeInfo, PgWriter writer, CancellationToken cancellationToken) + => throw new NotSupportedException(); + + private protected virtual void SetOutputTypedValue(NpgsqlDataReader reader, int ordinal) + => throw new NotSupportedException(); /// public override void ResetDbType() @@ -843,35 +948,66 @@ public override void ResetDbType() private protected void ResetTypeInfo() { + DisposeBindingState(); + + // Dispose any provider-produced _writeState as well. + if (_writeState is { } ws) + { + ConcreteTypeInfo?.DisposeWriteState(ws); + _writeState = null; + } + TypeInfo = null; - _asObject = false; - Converter = null; - PgTypeId = default; - ResetBindingInfo(); + ConcreteTypeInfo = null; } - private protected void ResetBindingInfo() + private protected void DisposeBindingState() { - if (WriteSize is null) + try { - Debug.Assert(_writeState == default && _useSubStream == default && Format == default && _bufferRequirement == default); - return; - } + if (_binding is not { } binding) + { + Debug.Assert(!_useSubStream && _subStream is null); + return; + } - if (_writeState is not null) - { - TypeInfo?.DisposeWriteState(_writeState); - _writeState = null; + // Dispose write state first as it may hold a reference to _subStream. + Debug.Assert(ConcreteTypeInfo is not null); + Exception? disposalException = null; + if (binding.WriteState is { } writeState) + { + try + { + ConcreteTypeInfo.DisposeWriteState(writeState); + } + catch (Exception ex) + { + disposalException = ex; + } + } + + if (_useSubStream) + { + Debug.Assert(_subStream is not null); + try + { + _subStream.Dispose(); + } + catch (Exception ex) when (disposalException is not null) + { + throw new AggregateException(disposalException, ex); + } + } + + if (disposalException is not null) + ExceptionDispatchInfo.Throw(disposalException); } - if (_useSubStream) + finally { _useSubStream = false; - _subStream?.Dispose(); _subStream = null; + _binding = null; } - WriteSize = null; - Format = default; - _bufferRequirement = default; } internal bool IsInputDirection => Direction == ParameterDirection.InputOutput || Direction == ParameterDirection.Input; diff --git a/src/Npgsql/NpgsqlParameterCollection.cs b/src/Npgsql/NpgsqlParameterCollection.cs index ac38f474d9..917eb9311b 100644 --- a/src/Npgsql/NpgsqlParameterCollection.cs +++ b/src/Npgsql/NpgsqlParameterCollection.cs @@ -760,25 +760,9 @@ internal void ProcessParameters(NpgsqlDataSource.ReloadableState reloadableState break; } - // Resolution can produce a provider-level write state that normally gets consumed by Bind. - // If Bind is skipped (SchemaOnly) or either step throws, that state needs to be cleaned up - // here. wasBound tracks whether Bind completed successfully and took ownership; every other - // exit path (SchemaOnly, ResolveTypeInfo throws, Bind throws) disposes. - var wasBound = false; - try - { - p.ResolveTypeInfo(reloadableState.SerializerOptions, reloadableState.DbTypeResolver); - if (validateValues) - { - p.Bind(out _, out _); - wasBound = true; - } - } - finally - { - if (!wasBound) - p.DisposeResolutionWriteState(); - } + p.ResolveTypeInfo(reloadableState.SerializerOptions, reloadableState.DbTypeResolver, willBind: validateValues); + if (validateValues) + p.Bind(out _, out _); } } diff --git a/src/Npgsql/NpgsqlParameter`.cs b/src/Npgsql/NpgsqlParameter`.cs index c406053ea3..04a1a214b1 100644 --- a/src/Npgsql/NpgsqlParameter`.cs +++ b/src/Npgsql/NpgsqlParameter`.cs @@ -1,7 +1,6 @@ using System; using System.Data; using System.Diagnostics; -using System.Runtime.CompilerServices; using System.Threading; using System.Threading.Tasks; using Npgsql.Internal; @@ -16,21 +15,19 @@ namespace Npgsql; /// The type of the value that will be stored in the parameter. public sealed class NpgsqlParameter : NpgsqlParameter { - T? _typedValue; - /// /// Gets or sets the strongly-typed value of the parameter. /// public T? TypedValue { - get => _typedValue; + get; set { if (typeof(T) == typeof(object) && ShouldResetObjectTypeInfo(value)) ResetTypeInfo(); else - ResetBindingInfo(); - _typedValue = value; + DisposeBindingState(); + field = value; } } @@ -81,55 +78,25 @@ public NpgsqlParameter(string parameterName, DbType dbType) #endregion Constructors - private protected override void SetOutputValueCore(NpgsqlDataReader reader, int ordinal) - => TypedValue = reader.GetFieldValue(ordinal); + private protected override PgConcreteTypeInfo MakeConcreteTypeInfoForTypedValue(PgTypeInfo typeInfo) + => typeInfo.MakeConcreteForValue(TypedValue, out _writeState); - private protected override PgConcreteTypeInfo GetConcreteTypeInfo(PgTypeInfo typeInfo) - { - if (typeof(T) == typeof(object) || TypeInfo!.IsBoxing) - return base.GetConcreteTypeInfo(typeInfo); - - _asObject = false; - return typeInfo.GetConcreteTypeInfo(TypedValue, out _writeState); - } + private protected override PgValueBinding BindTypedValue(PgConcreteTypeInfo typeInfo, DataFormat? formatPreference) + => typeInfo.BindParameterValue(TypedValue, _writeState, formatPreference); - // We ignore allowNullReference, it's just there to control the base implementation. - private protected override void BindCore(DataFormat? formatPreference, bool allowNullReference = false) + private protected override ValueTask WriteTypedValue(bool async, PgConcreteTypeInfo typeInfo, PgWriter writer, CancellationToken cancellationToken) { - if (_asObject) - { - // If we're object typed we should not support null. - base.BindCore(formatPreference, typeof(T) != typeof(object)); - return; - } - - var value = TypedValue; - if (TypeInfo!.Bind(Converter!.UnsafeDowncast(), value, out var size, ref _writeState, out var dataFormat, formatPreference) is { } info) - { - WriteSize = size; - _bufferRequirement = info.BufferRequirement; - } - else - { - WriteSize = -1; - _bufferRequirement = default; - } - - Format = dataFormat; - } - - private protected override ValueTask WriteValue(bool async, PgWriter writer, CancellationToken cancellationToken) - { - if (_asObject) - return base.WriteValue(async, writer, cancellationToken); - + Debug.Assert(TypedValue is not null); if (async) - return Converter!.UnsafeDowncast().WriteAsync(writer, TypedValue!, cancellationToken); + return typeInfo.Converter.WriteAsync(writer, TypedValue, cancellationToken); - Converter!.UnsafeDowncast().Write(writer, TypedValue!); + typeInfo.Converter.Write(writer, TypedValue); return new(); } + private protected override void SetOutputTypedValue(NpgsqlDataReader reader, int ordinal) + => TypedValue = reader.GetFieldValue(ordinal); + private protected override NpgsqlParameter CloneCore() => // use fields instead of properties // to avoid auto-initializing something like type_info diff --git a/src/Npgsql/Replication/PgOutput/ReplicationValue.cs b/src/Npgsql/Replication/PgOutput/ReplicationValue.cs index 62ab5293c3..6dc73f33a4 100644 --- a/src/Npgsql/Replication/PgOutput/ReplicationValue.cs +++ b/src/Npgsql/Replication/PgOutput/ReplicationValue.cs @@ -26,7 +26,7 @@ public class ReplicationValue public TupleDataKind Kind { get; private set; } FieldDescription _fieldDescription = null!; - ColumnInfo _lastInfo; + ReadConversionContext _lastConversionContext; bool _isConsumed; PgReader PgReader => _readBuffer.PgReader; @@ -38,7 +38,7 @@ internal void Reset(TupleDataKind kind, int length, FieldDescription fieldDescri Kind = kind; Length = length; _fieldDescription = fieldDescription; - _lastInfo = default; + _lastConversionContext = default; _isConsumed = false; } @@ -118,7 +118,7 @@ internal async Task Consume(CancellationToken cancellationToken) var reader = PgReader; if (!reader.Initialized) - reader.Init(Length, _fieldDescription.DataFormat); + reader.Init(_fieldDescription.DataFormat, Length); await reader.ConsumeAsync(cancellationToken: cancellationToken).ConfigureAwait(false); await reader.CommitAsync().ConfigureAwait(false); @@ -129,8 +129,8 @@ T GetCore() { ThrowIfInitialized(); - _fieldDescription.GetInfo(typeof(T), ref _lastInfo); - var info = _lastInfo; + _fieldDescription.GetConversionContext(typeof(T), ref _lastConversionContext); + var conversionContext = _lastConversionContext; switch (Kind) { @@ -151,21 +151,16 @@ T GetCore() } var reader = PgReader; - reader.Init(Length, _fieldDescription.DataFormat); - reader.StartRead(info.ConverterInfo.BufferRequirement); - var result = info.AsObject - ? (T)info.ConverterInfo.Converter.ReadAsObject(reader) - : info.ConverterInfo.Converter.UnsafeDowncast().Read(reader); - reader.EndRead(); - return result; + reader.Init(conversionContext.Binding.DataFormat, Length); + return conversionContext.TypeInfo.ReadFieldValue(PgReader, conversionContext.Binding); } async ValueTask GetAsyncCore(CancellationToken cancellationToken) { ThrowIfInitialized(); - _fieldDescription.GetInfo(typeof(T), ref _lastInfo); - var info = _lastInfo; + _fieldDescription.GetConversionContext(typeof(T), ref _lastConversionContext); + var conversionContext = _lastConversionContext; switch (Kind) { @@ -188,13 +183,8 @@ async ValueTask GetAsyncCore(CancellationToken cancellationToken) using var registration = _readBuffer.Connector.StartNestedCancellableOperation(cancellationToken, attemptPgCancellation: false); var reader = PgReader; - reader.Init(Length, _fieldDescription.DataFormat); - await reader.StartReadAsync(info.ConverterInfo.BufferRequirement, cancellationToken).ConfigureAwait(false); - var result = info.AsObject - ? (T)await info.ConverterInfo.Converter.ReadAsObjectAsync(reader, cancellationToken).ConfigureAwait(false) - : await info.ConverterInfo.Converter.UnsafeDowncast().ReadAsync(reader, cancellationToken).ConfigureAwait(false); - await reader.EndReadAsync().ConfigureAwait(false); - return result; + reader.Init(conversionContext.Binding.DataFormat, Length); + return await conversionContext.TypeInfo.ReadFieldValueAsync(PgReader, conversionContext.Binding, cancellationToken).ConfigureAwait(false); } void ThrowIfInitialized() diff --git a/src/Npgsql/TypeMapping/GlobalTypeMapper.cs b/src/Npgsql/TypeMapping/GlobalTypeMapper.cs index 148260dd9c..6e28fd158f 100644 --- a/src/Npgsql/TypeMapping/GlobalTypeMapper.cs +++ b/src/Npgsql/TypeMapping/GlobalTypeMapper.cs @@ -82,9 +82,10 @@ PgSerializerOptions BuildTypeMappingOptions() var typeInfo = TypeMappingOptions.GetTypeInfoInternal(type, null); if (typeInfo is PgProviderTypeInfo providerInfo) { - dataTypeName = providerInfo.GetObjectConcreteTypeInfo(value, out var state).PgTypeId.DataTypeName; + var concreteTypeInfo = providerInfo.MakeConcreteForValueAsObject(value is DBNull ? null : value, out var state); if (state is not null) - providerInfo.DisposeWriteState(state); + concreteTypeInfo.DisposeWriteState(state); + dataTypeName = concreteTypeInfo.PgTypeId.DataTypeName; } else { diff --git a/src/Npgsql/Util/TypeExtensions.cs b/src/Npgsql/Util/TypeExtensions.cs new file mode 100644 index 0000000000..a8a298c419 --- /dev/null +++ b/src/Npgsql/Util/TypeExtensions.cs @@ -0,0 +1,22 @@ +using System; + +namespace Npgsql.Util; + +static class TypeExtensions +{ + extension(Type type) + { + /// + /// Determines whether this type and are in a subtype relationship, + /// i.e. whether one is assignable to the other in either direction. + /// + /// + /// Returns when the types are identical, when one inherits from or implements the other, + /// or more generally when an implicit reference or boxing conversion exists between them. + /// + /// The type to check the relationship with. + /// if either type is assignable to the other; otherwise, . + public bool IsInSubtypeRelationshipWith(Type other) => + type.IsAssignableTo(other) || other.IsAssignableTo(type); + } +} diff --git a/test/Npgsql.Benchmarks/TypeHandlers/TypeHandlerBenchmarks.cs b/test/Npgsql.Benchmarks/TypeHandlers/TypeHandlerBenchmarks.cs index 7695885bfd..5dcf0f53cd 100644 --- a/test/Npgsql.Benchmarks/TypeHandlers/TypeHandlerBenchmarks.cs +++ b/test/Npgsql.Benchmarks/TypeHandlers/TypeHandlerBenchmarks.cs @@ -4,12 +4,11 @@ using BenchmarkDotNet.Diagnosers; using System; using System.Collections.Generic; +using System.Diagnostics.CodeAnalysis; using System.IO; using System.Threading; using Npgsql.Internal; -#nullable disable - namespace Npgsql.Benchmarks.TypeHandlers; public abstract class TypeHandlerBenchmarks @@ -44,10 +43,11 @@ public override void Write(byte[] buffer, int offset, int count) { } readonly NpgsqlReadBuffer _readBuffer; readonly BufferRequirements _binaryRequirements; - T _value; - Size _elementSize; + T? _value; + PgValueBinding _valueBinding; + PgFieldBinding _fieldBinding; - protected TypeHandlerBenchmarks(PgConverter handler) + protected TypeHandlerBenchmarks(PgConverter handler) { var stream = new EndlessStream(); _converter = (PgConverter)handler ?? throw new ArgumentNullException(nameof(handler)); @@ -58,11 +58,11 @@ protected TypeHandlerBenchmarks(PgConverter handler) _converter.CanConvert(DataFormat.Binary, out _binaryRequirements); } - public IEnumerable Values() => ValuesOverride(); + public IEnumerable Values() => ValuesOverride(); - protected virtual IEnumerable ValuesOverride() => [default(T)]; + protected virtual IEnumerable ValuesOverride() => [default]; - [ParamsSource(nameof(Values))] + [ParamsSource(nameof(Values)), MaybeNull] public T Value { get => _value; @@ -80,28 +80,35 @@ public T Value } _value = value; - object state = null; - var size = _elementSize = _converter.GetSizeOrDbNullAsObject(DataFormat.Binary, _binaryRequirements.Write, value, ref state)!.Value; - var current = new ValueMetadata { Format = DataFormat.Binary, BufferRequirement = _binaryRequirements.Write, Size = size, WriteState = state }; + object? writeState = null; + var size = _converter.IsDbNullOrGetSize(DataFormat.Binary, _binaryRequirements.Write, value, ref writeState); + _valueBinding = new PgValueBinding(DataFormat.Binary, _binaryRequirements.Write, size, writeState); - _writer.BeginWrite(async: false, current, CancellationToken.None).GetAwaiter().GetResult(); - _converter.WriteAsObject(_writer, value); - _writer.Commit(size.Value); + if (!_valueBinding.IsDbNullBinding) + { + _writer.StartWrite(async: false, _valueBinding, CancellationToken.None).GetAwaiter().GetResult(); + _converter.Write(_writer, value!); + _writer.EndWrite(_valueBinding.Size.Value); - Buffer.BlockCopy(_writeBuffer.Buffer, 0, _readBuffer.Buffer, 0, _writeBuffer.WritePosition); - _readBuffer.AddBytesToRead(_writeBuffer.WritePosition); - _readBuffer.ReadPosition = 0; - _writeBuffer.WritePosition = 0; + Buffer.BlockCopy(_writeBuffer.Buffer, 0, _readBuffer.Buffer, 0, _writeBuffer.WritePosition); + _readBuffer.AddBytesToRead(_writeBuffer.WritePosition); + _readBuffer.ReadPosition = 0; + _writeBuffer.WritePosition = 0; - _reader.Init(size.Value, DataFormat.Binary); + _reader.Init(_valueBinding.DataFormat, _valueBinding.Size.Value.GetValueOrDefault()); + _fieldBinding = new PgFieldBinding(DataFormat.Binary, _binaryRequirements.Read); + } } } [Benchmark] public T Read() { + if (_valueBinding.IsDbNullBinding) + return default!; + _readBuffer.ReadPosition = 0; - _reader.StartRead(_binaryRequirements.Read); + _reader.StartRead(_fieldBinding); var value = _converter.Read(_reader); _reader.EndRead(); return value; @@ -110,9 +117,12 @@ public T Read() [Benchmark] public void Write() { + if (_valueBinding.IsDbNullBinding) + return; + _writer.RefreshBuffer(); - var current = new ValueMetadata { Format = DataFormat.Binary, BufferRequirement = _binaryRequirements.Write, Size = _elementSize, WriteState = null }; - _writer.BeginWrite(async: false, current, CancellationToken.None).GetAwaiter().GetResult(); - _converter.Write(_writer, _value); + _writer.StartWrite(async: false, _valueBinding, CancellationToken.None).GetAwaiter().GetResult(); + _converter.Write(_writer, Value!); + _writer.EndWrite(_valueBinding.Size.GetValueOrDefault()); } } diff --git a/test/Npgsql.Tests/NpgsqlParameterTests.cs b/test/Npgsql.Tests/NpgsqlParameterTests.cs index 6070cc7266..23c59c7a95 100644 --- a/test/Npgsql.Tests/NpgsqlParameterTests.cs +++ b/test/Npgsql.Tests/NpgsqlParameterTests.cs @@ -657,17 +657,17 @@ public void DBNull_reuses_type_info([Values]bool generic) { var param = generic ? new NpgsqlParameter { Value = "value" } : new NpgsqlParameter { Value = "value" }; param.ResolveTypeInfo(DataSource.CurrentReloadableState.SerializerOptions, null); - param.GetResolutionInfo(out var typeInfo, out _, out _); + param.GetResolutionInfo(out var typeInfo, out _); Assert.That(typeInfo, Is.Not.Null); // Make sure we don't reset the type info when setting DBNull. param.Value = DBNull.Value; - param.GetResolutionInfo(out var secondTypeInfo, out _, out _); + param.GetResolutionInfo(out var secondTypeInfo, out _); Assert.That(secondTypeInfo, Is.SameAs(typeInfo)); // Make sure we don't resolve a different type info either. param.ResolveTypeInfo(DataSource.CurrentReloadableState.SerializerOptions, null); - param.GetResolutionInfo(out var thirdTypeInfo, out _, out _); + param.GetResolutionInfo(out var thirdTypeInfo, out _); Assert.That(thirdTypeInfo, Is.SameAs(secondTypeInfo)); } @@ -676,17 +676,16 @@ public void DBNull_followed_by_non_null_reresolves([Values]bool generic) { var param = generic ? new NpgsqlParameter { Value = DBNull.Value } : new NpgsqlParameter { Value = DBNull.Value }; param.ResolveTypeInfo(DataSource.CurrentReloadableState.SerializerOptions, null); - param.GetResolutionInfo(out var typeInfo, out _, out var pgTypeId); + param.GetResolutionInfo(out var typeInfo, out _); Assert.That(typeInfo, Is.Not.Null); - Assert.That(pgTypeId.IsUnspecified, Is.True); param.Value = "value"; - param.GetResolutionInfo(out var secondTypeInfo, out _, out _); + param.GetResolutionInfo(out var secondTypeInfo, out _); Assert.That(secondTypeInfo, Is.Null); // Make sure we don't resolve the same type info either. param.ResolveTypeInfo(DataSource.CurrentReloadableState.SerializerOptions, null); - param.GetResolutionInfo(out var thirdTypeInfo, out _, out _); + param.GetResolutionInfo(out var thirdTypeInfo, out _); Assert.That(thirdTypeInfo, Is.Not.SameAs(typeInfo)); } @@ -695,16 +694,16 @@ public void Changing_value_type_reresolves([Values]bool generic) { var param = generic ? new NpgsqlParameter { Value = "value" } : new NpgsqlParameter { Value = "value" }; param.ResolveTypeInfo(DataSource.CurrentReloadableState.SerializerOptions, null); - param.GetResolutionInfo(out var typeInfo, out _, out _); + param.GetResolutionInfo(out var typeInfo, out _); Assert.That(typeInfo, Is.Not.Null); param.Value = 1; - param.GetResolutionInfo(out var secondTypeInfo, out _, out _); + param.GetResolutionInfo(out var secondTypeInfo, out _); Assert.That(secondTypeInfo, Is.Null); // Make sure we don't resolve a different type info either. param.ResolveTypeInfo(DataSource.CurrentReloadableState.SerializerOptions, null); - param.GetResolutionInfo(out var thirdTypeInfo, out _, out _); + param.GetResolutionInfo(out var thirdTypeInfo, out _); Assert.That(thirdTypeInfo, Is.Not.SameAs(typeInfo)); } @@ -723,7 +722,7 @@ public void DataTypeName_prioritized_over_NpgsqlDbType([Values]bool generic) Value = "value" }; param.ResolveTypeInfo(DataSource.CurrentReloadableState.SerializerOptions, null); - param.GetResolutionInfo(out var typeInfo, out _, out _); + param.GetResolutionInfo(out var typeInfo, out _); Assert.That(typeInfo, Is.Not.Null); Assert.That(typeInfo.PgTypeId, Is.EqualTo(DataSource.CurrentReloadableState.SerializerOptions.TextPgTypeId)); } diff --git a/test/Npgsql.Tests/ReaderTests.cs b/test/Npgsql.Tests/ReaderTests.cs index 986d4163cb..9ec1d056cd 100644 --- a/test/Npgsql.Tests/ReaderTests.cs +++ b/test/Npgsql.Tests/ReaderTests.cs @@ -2102,11 +2102,11 @@ public async Task EndRead_StreamActive([Values]bool async) var buffer = conn.Connector!.ReadBuffer; buffer.AddBytesToRead(columnLength); var reader = buffer.PgReader; - reader.Init(columnLength, DataFormat.Binary, resumable: false); + reader.Init(DataFormat.Binary, columnLength, resumable: false); if (async) - await reader.StartReadAsync(Size.Unknown, CancellationToken.None); + await reader.StartReadAsync(new(DataFormat.Binary, Size.Unknown), CancellationToken.None); else - reader.StartRead(Size.Unknown); + reader.StartRead(new(DataFormat.Binary, Size.Unknown)); await using (var _ = reader.GetStream()) { From 6726cbd2ff0f1cb16648b261b075384699a55bc6 Mon Sep 17 00:00:00 2001 From: Nino Floris Date: Sun, 3 May 2026 18:12:48 +0200 Subject: [PATCH 760/761] Modernize NRT annotations on NpgsqlRange (#6544) --- .../Internal/DateIntervalConverter.cs | 4 +++ .../Internal/IntervalConverter.cs | 4 +++ src/Npgsql/NpgsqlTypes/NpgsqlRange.cs | 32 ++++++++----------- src/Npgsql/PublicAPI.Unshipped.txt | 3 ++ 4 files changed, 25 insertions(+), 18 deletions(-) diff --git a/src/Npgsql.NodaTime/Internal/DateIntervalConverter.cs b/src/Npgsql.NodaTime/Internal/DateIntervalConverter.cs index 1bf2d027df..b75f95d659 100644 --- a/src/Npgsql.NodaTime/Internal/DateIntervalConverter.cs +++ b/src/Npgsql.NodaTime/Internal/DateIntervalConverter.cs @@ -1,3 +1,4 @@ +using System; using System.Threading; using System.Threading.Tasks; using NodaTime; @@ -22,6 +23,9 @@ async ValueTask Read(bool async, PgReader reader, CancellationToke // ReSharper disable once MethodHasAsyncOverloadWithCancellation : rangeConverter.Read(reader); + if (range.IsEmpty) + throw new InvalidCastException("Cannot read an empty range as a NodaTime DateInterval."); + var upperBound = range.UpperBound; if (upperBound != LocalDate.MaxIsoValue || !dateTimeInfinityConversions) diff --git a/src/Npgsql.NodaTime/Internal/IntervalConverter.cs b/src/Npgsql.NodaTime/Internal/IntervalConverter.cs index f062079a4a..3c877f6119 100644 --- a/src/Npgsql.NodaTime/Internal/IntervalConverter.cs +++ b/src/Npgsql.NodaTime/Internal/IntervalConverter.cs @@ -1,3 +1,4 @@ +using System; using System.Threading; using System.Threading.Tasks; using NodaTime; @@ -21,6 +22,9 @@ async ValueTask Read(bool async, PgReader reader, CancellationToken ca // ReSharper disable once MethodHasAsyncOverloadWithCancellation : rangeConverter.Read(reader); + if (range.IsEmpty) + throw new InvalidCastException("Cannot read an empty range as a NodaTime Interval."); + // NodaTime Interval includes the start instant and excludes the end instant. Instant? start = range.LowerBoundInfinite ? null diff --git a/src/Npgsql/NpgsqlTypes/NpgsqlRange.cs b/src/Npgsql/NpgsqlTypes/NpgsqlRange.cs index a99dd2c537..aa6ae2cf0d 100644 --- a/src/Npgsql/NpgsqlTypes/NpgsqlRange.cs +++ b/src/Npgsql/NpgsqlTypes/NpgsqlRange.cs @@ -93,16 +93,14 @@ namespace NpgsqlTypes; public static readonly NpgsqlRange Empty = new(default, default, RangeFlags.Empty); /// - /// The lower bound of the range. Only valid when is false. + /// The lower bound of the range. Only valid when is false (i.e. the range is non-empty with a finite lower bound). /// - [MaybeNull, AllowNull] - public T LowerBound { get; } + public T? LowerBound { get; } /// - /// The upper bound of the range. Only valid when is false. + /// The upper bound of the range. Only valid when is false (i.e. the range is non-empty with a finite upper bound). /// - [MaybeNull, AllowNull] - public T UpperBound { get; } + public T? UpperBound { get; } /// /// The characteristics of the boundaries. @@ -122,11 +120,13 @@ namespace NpgsqlTypes; /// /// True if the lower bound is indefinite (i.e. infinite or unbounded); otherwise, false. /// + [MemberNotNullWhen(false, nameof(LowerBound))] public bool LowerBoundInfinite => (Flags & RangeFlags.LowerBoundInfinite) != 0; /// /// True if the upper bound is indefinite (i.e. infinite or unbounded); otherwise, false. /// + [MemberNotNullWhen(false, nameof(UpperBound))] public bool UpperBoundInfinite => (Flags & RangeFlags.UpperBoundInfinite) != 0; /// @@ -139,8 +139,8 @@ namespace NpgsqlTypes; /// /// The lower bound of the range. /// The upper bound of the range. - public NpgsqlRange([AllowNull] T lowerBound, [AllowNull] T upperBound) - : this(lowerBound, true, false, upperBound, true, false) { } + public NpgsqlRange(T lowerBound, T upperBound) + : this(lowerBound, lowerBoundIsInclusive: true, lowerBoundInfinite: false, upperBound, upperBoundIsInclusive: true, upperBoundInfinite: false) { } /// /// Constructs an with definite bounds. @@ -149,10 +149,8 @@ public NpgsqlRange([AllowNull] T lowerBound, [AllowNull] T upperBound) /// True if the lower bound is is part of the range (i.e. inclusive); otherwise, false. /// The upper bound of the range. /// True if the upper bound is part of the range (i.e. inclusive); otherwise, false. - public NpgsqlRange( - [AllowNull] T lowerBound, bool lowerBoundIsInclusive, - [AllowNull] T upperBound, bool upperBoundIsInclusive) - : this(lowerBound, lowerBoundIsInclusive, false, upperBound, upperBoundIsInclusive, false) { } + public NpgsqlRange(T lowerBound, bool lowerBoundIsInclusive, T upperBound, bool upperBoundIsInclusive) + : this(lowerBound, lowerBoundIsInclusive, lowerBoundInfinite: false, upperBound, upperBoundIsInclusive, upperBoundInfinite: false) { } /// /// Constructs an . @@ -163,9 +161,7 @@ public NpgsqlRange( /// The upper bound of the range. /// True if the upper bound is part of the range (i.e. inclusive); otherwise, false. /// True if the upper bound is indefinite (i.e. infinite or unbounded); otherwise, false. - public NpgsqlRange( - [AllowNull] T lowerBound, bool lowerBoundIsInclusive, bool lowerBoundInfinite, - [AllowNull] T upperBound, bool upperBoundIsInclusive, bool upperBoundInfinite) + public NpgsqlRange(T? lowerBound, bool lowerBoundIsInclusive, bool lowerBoundInfinite, T? upperBound, bool upperBoundIsInclusive, bool upperBoundInfinite) : this( lowerBound, upperBound, @@ -181,7 +177,7 @@ public NpgsqlRange( /// The lower bound of the range. /// The upper bound of the range. /// The characteristics of the range boundaries. - internal NpgsqlRange([AllowNull] T lowerBound, [AllowNull] T upperBound, RangeFlags flags) : this() + internal NpgsqlRange(T? lowerBound, T? upperBound, RangeFlags flags) : this() { // TODO: We need to check if the bounds are implicitly empty. E.g. '(1,1)' or '(0,0]'. // See: https://github.com/npgsql/npgsql/issues/1943. @@ -207,7 +203,7 @@ internal NpgsqlRange([AllowNull] T lowerBound, [AllowNull] T upperBound, RangeFl /// /// True if the range is implicitly empty; otherwise, false. /// - static bool IsEmptyRange([AllowNull] T lowerBound, [AllowNull] T upperBound, RangeFlags flags) + static bool IsEmptyRange(T? lowerBound, T? upperBound, RangeFlags flags) { // --------------------------------------------------------------------------------- // We only want to check for those conditions that are unambiguously erroneous: @@ -234,7 +230,7 @@ static bool IsEmptyRange([AllowNull] T lowerBound, [AllowNull] T upperBound, Ran return false; if (!HasEquatableBounds) - return lowerBound?.Equals(upperBound) ?? false; + return lowerBound.Equals(upperBound); var lower = (IEquatable)lowerBound; var upper = (IEquatable)upperBound; diff --git a/src/Npgsql/PublicAPI.Unshipped.txt b/src/Npgsql/PublicAPI.Unshipped.txt index 71b0245984..f91f353b64 100644 --- a/src/Npgsql/PublicAPI.Unshipped.txt +++ b/src/Npgsql/PublicAPI.Unshipped.txt @@ -40,3 +40,6 @@ *REMOVED*override Npgsql.NpgsqlLargeObjectStream.SetLength(long value) -> void *REMOVED*override Npgsql.NpgsqlLargeObjectStream.Write(byte[]! buffer, int offset, int count) -> void *REMOVED*override Npgsql.NpgsqlLargeObjectStream.WriteAsync(byte[]! buffer, int offset, int count, System.Threading.CancellationToken cancellationToken) -> System.Threading.Tasks.Task! +NpgsqlTypes.NpgsqlRange.LowerBound.get -> T? +NpgsqlTypes.NpgsqlRange.UpperBound.get -> T? +NpgsqlTypes.NpgsqlRange.NpgsqlRange(T? lowerBound, bool lowerBoundIsInclusive, bool lowerBoundInfinite, T? upperBound, bool upperBoundIsInclusive, bool upperBoundInfinite) -> void From 464effc0ee6c99198ace60a9c514fe277e3a21fd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 3 May 2026 20:26:13 +0200 Subject: [PATCH 761/761] Bump the minor-and-patch group with 3 updates (#6558) Bumps Microsoft.Data.SqlClient from 7.0.0 to 7.0.1 Bumps Microsoft.NET.Test.Sdk from 18.4.0 to 18.5.1 Bumps NodaTime from 3.3.1 to 3.3.2 --- Directory.Packages.props | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Directory.Packages.props b/Directory.Packages.props index 564a04b5e6..bbe474665f 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -16,7 +16,7 @@ - + @@ -33,7 +33,7 @@ - + @@ -44,7 +44,7 @@ - +